Commit c4795fb20edf2fe2c862c8fe9f8b681edeb79ac1

Authored by Christoph Hellwig
Committed by Nicholas Bellinger
1 parent e26d99aed4

target: header reshuffle, part2

This reorganized the headers under include/target into:

 - target_core_base.h stays as is with all target-wide data stuctures and defines
 - target_core_backend.h contains the whole interface to I/O backends
 - target_core_fabric.h contains the whole interface to fabric modules

Except for those only the various configfs macro headers stay around.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>

Showing 45 changed files with 391 additions and 565 deletions Inline Diff

Documentation/target/tcm_mod_builder.py
1 #!/usr/bin/python 1 #!/usr/bin/python
2 # The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD 2 # The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
3 # 3 #
4 # Copyright (c) 2010 Rising Tide Systems 4 # Copyright (c) 2010 Rising Tide Systems
5 # Copyright (c) 2010 Linux-iSCSI.org 5 # Copyright (c) 2010 Linux-iSCSI.org
6 # 6 #
7 # Author: nab@kernel.org 7 # Author: nab@kernel.org
8 # 8 #
9 import os, sys 9 import os, sys
10 import subprocess as sub 10 import subprocess as sub
11 import string 11 import string
12 import re 12 import re
13 import optparse 13 import optparse
14 14
15 tcm_dir = "" 15 tcm_dir = ""
16 16
17 fabric_ops = [] 17 fabric_ops = []
18 fabric_mod_dir = "" 18 fabric_mod_dir = ""
19 fabric_mod_port = "" 19 fabric_mod_port = ""
20 fabric_mod_init_port = "" 20 fabric_mod_init_port = ""
21 21
22 def tcm_mod_err(msg): 22 def tcm_mod_err(msg):
23 print msg 23 print msg
24 sys.exit(1) 24 sys.exit(1)
25 25
26 def tcm_mod_create_module_subdir(fabric_mod_dir_var): 26 def tcm_mod_create_module_subdir(fabric_mod_dir_var):
27 27
28 if os.path.isdir(fabric_mod_dir_var) == True: 28 if os.path.isdir(fabric_mod_dir_var) == True:
29 return 1 29 return 1
30 30
31 print "Creating fabric_mod_dir: " + fabric_mod_dir_var 31 print "Creating fabric_mod_dir: " + fabric_mod_dir_var
32 ret = os.mkdir(fabric_mod_dir_var) 32 ret = os.mkdir(fabric_mod_dir_var)
33 if ret: 33 if ret:
34 tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var) 34 tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
35 35
36 return 36 return
37 37
38 def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name): 38 def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
39 global fabric_mod_port 39 global fabric_mod_port
40 global fabric_mod_init_port 40 global fabric_mod_init_port
41 buf = "" 41 buf = ""
42 42
43 f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" 43 f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
44 print "Writing file: " + f 44 print "Writing file: " + f
45 45
46 p = open(f, 'w'); 46 p = open(f, 'w');
47 if not p: 47 if not p:
48 tcm_mod_err("Unable to open file: " + f) 48 tcm_mod_err("Unable to open file: " + f)
49 49
50 buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" 50 buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
51 buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" 51 buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
52 buf += "\n" 52 buf += "\n"
53 buf += "struct " + fabric_mod_name + "_nacl {\n" 53 buf += "struct " + fabric_mod_name + "_nacl {\n"
54 buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n" 54 buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
55 buf += " u64 nport_wwpn;\n" 55 buf += " u64 nport_wwpn;\n"
56 buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n" 56 buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
57 buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" 57 buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
58 buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" 58 buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
59 buf += " struct se_node_acl se_node_acl;\n" 59 buf += " struct se_node_acl se_node_acl;\n"
60 buf += "};\n" 60 buf += "};\n"
61 buf += "\n" 61 buf += "\n"
62 buf += "struct " + fabric_mod_name + "_tpg {\n" 62 buf += "struct " + fabric_mod_name + "_tpg {\n"
63 buf += " /* FC lport target portal group tag for TCM */\n" 63 buf += " /* FC lport target portal group tag for TCM */\n"
64 buf += " u16 lport_tpgt;\n" 64 buf += " u16 lport_tpgt;\n"
65 buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n" 65 buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
66 buf += " struct " + fabric_mod_name + "_lport *lport;\n" 66 buf += " struct " + fabric_mod_name + "_lport *lport;\n"
67 buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" 67 buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
68 buf += " struct se_portal_group se_tpg;\n" 68 buf += " struct se_portal_group se_tpg;\n"
69 buf += "};\n" 69 buf += "};\n"
70 buf += "\n" 70 buf += "\n"
71 buf += "struct " + fabric_mod_name + "_lport {\n" 71 buf += "struct " + fabric_mod_name + "_lport {\n"
72 buf += " /* SCSI protocol the lport is providing */\n" 72 buf += " /* SCSI protocol the lport is providing */\n"
73 buf += " u8 lport_proto_id;\n" 73 buf += " u8 lport_proto_id;\n"
74 buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n" 74 buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
75 buf += " u64 lport_wwpn;\n" 75 buf += " u64 lport_wwpn;\n"
76 buf += " /* ASCII formatted WWPN for FC Target Lport */\n" 76 buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
77 buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" 77 buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
78 buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n" 78 buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
79 buf += " struct se_wwn lport_wwn;\n" 79 buf += " struct se_wwn lport_wwn;\n"
80 buf += "};\n" 80 buf += "};\n"
81 81
82 ret = p.write(buf) 82 ret = p.write(buf)
83 if ret: 83 if ret:
84 tcm_mod_err("Unable to write f: " + f) 84 tcm_mod_err("Unable to write f: " + f)
85 85
86 p.close() 86 p.close()
87 87
88 fabric_mod_port = "lport" 88 fabric_mod_port = "lport"
89 fabric_mod_init_port = "nport" 89 fabric_mod_init_port = "nport"
90 90
91 return 91 return
92 92
93 def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name): 93 def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
94 global fabric_mod_port 94 global fabric_mod_port
95 global fabric_mod_init_port 95 global fabric_mod_init_port
96 buf = "" 96 buf = ""
97 97
98 f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" 98 f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
99 print "Writing file: " + f 99 print "Writing file: " + f
100 100
101 p = open(f, 'w'); 101 p = open(f, 'w');
102 if not p: 102 if not p:
103 tcm_mod_err("Unable to open file: " + f) 103 tcm_mod_err("Unable to open file: " + f)
104 104
105 buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" 105 buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
106 buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" 106 buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
107 buf += "\n" 107 buf += "\n"
108 buf += "struct " + fabric_mod_name + "_nacl {\n" 108 buf += "struct " + fabric_mod_name + "_nacl {\n"
109 buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n" 109 buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
110 buf += " u64 iport_wwpn;\n" 110 buf += " u64 iport_wwpn;\n"
111 buf += " /* ASCII formatted WWPN for Sas Initiator port */\n" 111 buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
112 buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" 112 buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
113 buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" 113 buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
114 buf += " struct se_node_acl se_node_acl;\n" 114 buf += " struct se_node_acl se_node_acl;\n"
115 buf += "};\n\n" 115 buf += "};\n\n"
116 buf += "struct " + fabric_mod_name + "_tpg {\n" 116 buf += "struct " + fabric_mod_name + "_tpg {\n"
117 buf += " /* SAS port target portal group tag for TCM */\n" 117 buf += " /* SAS port target portal group tag for TCM */\n"
118 buf += " u16 tport_tpgt;\n" 118 buf += " u16 tport_tpgt;\n"
119 buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n" 119 buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
120 buf += " struct " + fabric_mod_name + "_tport *tport;\n" 120 buf += " struct " + fabric_mod_name + "_tport *tport;\n"
121 buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" 121 buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
122 buf += " struct se_portal_group se_tpg;\n" 122 buf += " struct se_portal_group se_tpg;\n"
123 buf += "};\n\n" 123 buf += "};\n\n"
124 buf += "struct " + fabric_mod_name + "_tport {\n" 124 buf += "struct " + fabric_mod_name + "_tport {\n"
125 buf += " /* SCSI protocol the tport is providing */\n" 125 buf += " /* SCSI protocol the tport is providing */\n"
126 buf += " u8 tport_proto_id;\n" 126 buf += " u8 tport_proto_id;\n"
127 buf += " /* Binary World Wide unique Port Name for SAS Target port */\n" 127 buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
128 buf += " u64 tport_wwpn;\n" 128 buf += " u64 tport_wwpn;\n"
129 buf += " /* ASCII formatted WWPN for SAS Target port */\n" 129 buf += " /* ASCII formatted WWPN for SAS Target port */\n"
130 buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" 130 buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
131 buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n" 131 buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
132 buf += " struct se_wwn tport_wwn;\n" 132 buf += " struct se_wwn tport_wwn;\n"
133 buf += "};\n" 133 buf += "};\n"
134 134
135 ret = p.write(buf) 135 ret = p.write(buf)
136 if ret: 136 if ret:
137 tcm_mod_err("Unable to write f: " + f) 137 tcm_mod_err("Unable to write f: " + f)
138 138
139 p.close() 139 p.close()
140 140
141 fabric_mod_port = "tport" 141 fabric_mod_port = "tport"
142 fabric_mod_init_port = "iport" 142 fabric_mod_init_port = "iport"
143 143
144 return 144 return
145 145
146 def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name): 146 def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
147 global fabric_mod_port 147 global fabric_mod_port
148 global fabric_mod_init_port 148 global fabric_mod_init_port
149 buf = "" 149 buf = ""
150 150
151 f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" 151 f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
152 print "Writing file: " + f 152 print "Writing file: " + f
153 153
154 p = open(f, 'w'); 154 p = open(f, 'w');
155 if not p: 155 if not p:
156 tcm_mod_err("Unable to open file: " + f) 156 tcm_mod_err("Unable to open file: " + f)
157 157
158 buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" 158 buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
159 buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" 159 buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
160 buf += "\n" 160 buf += "\n"
161 buf += "struct " + fabric_mod_name + "_nacl {\n" 161 buf += "struct " + fabric_mod_name + "_nacl {\n"
162 buf += " /* ASCII formatted InitiatorName */\n" 162 buf += " /* ASCII formatted InitiatorName */\n"
163 buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" 163 buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
164 buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" 164 buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
165 buf += " struct se_node_acl se_node_acl;\n" 165 buf += " struct se_node_acl se_node_acl;\n"
166 buf += "};\n\n" 166 buf += "};\n\n"
167 buf += "struct " + fabric_mod_name + "_tpg {\n" 167 buf += "struct " + fabric_mod_name + "_tpg {\n"
168 buf += " /* iSCSI target portal group tag for TCM */\n" 168 buf += " /* iSCSI target portal group tag for TCM */\n"
169 buf += " u16 tport_tpgt;\n" 169 buf += " u16 tport_tpgt;\n"
170 buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n" 170 buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
171 buf += " struct " + fabric_mod_name + "_tport *tport;\n" 171 buf += " struct " + fabric_mod_name + "_tport *tport;\n"
172 buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" 172 buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
173 buf += " struct se_portal_group se_tpg;\n" 173 buf += " struct se_portal_group se_tpg;\n"
174 buf += "};\n\n" 174 buf += "};\n\n"
175 buf += "struct " + fabric_mod_name + "_tport {\n" 175 buf += "struct " + fabric_mod_name + "_tport {\n"
176 buf += " /* SCSI protocol the tport is providing */\n" 176 buf += " /* SCSI protocol the tport is providing */\n"
177 buf += " u8 tport_proto_id;\n" 177 buf += " u8 tport_proto_id;\n"
178 buf += " /* ASCII formatted TargetName for IQN */\n" 178 buf += " /* ASCII formatted TargetName for IQN */\n"
179 buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" 179 buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
180 buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n" 180 buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
181 buf += " struct se_wwn tport_wwn;\n" 181 buf += " struct se_wwn tport_wwn;\n"
182 buf += "};\n" 182 buf += "};\n"
183 183
184 ret = p.write(buf) 184 ret = p.write(buf)
185 if ret: 185 if ret:
186 tcm_mod_err("Unable to write f: " + f) 186 tcm_mod_err("Unable to write f: " + f)
187 187
188 p.close() 188 p.close()
189 189
190 fabric_mod_port = "tport" 190 fabric_mod_port = "tport"
191 fabric_mod_init_port = "iport" 191 fabric_mod_init_port = "iport"
192 192
193 return 193 return
194 194
195 def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name): 195 def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
196 196
197 if proto_ident == "FC": 197 if proto_ident == "FC":
198 tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name) 198 tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
199 elif proto_ident == "SAS": 199 elif proto_ident == "SAS":
200 tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name) 200 tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
201 elif proto_ident == "iSCSI": 201 elif proto_ident == "iSCSI":
202 tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name) 202 tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
203 else: 203 else:
204 print "Unsupported proto_ident: " + proto_ident 204 print "Unsupported proto_ident: " + proto_ident
205 sys.exit(1) 205 sys.exit(1)
206 206
207 return 207 return
208 208
209 def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): 209 def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
210 buf = "" 210 buf = ""
211 211
212 f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c" 212 f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
213 print "Writing file: " + f 213 print "Writing file: " + f
214 214
215 p = open(f, 'w'); 215 p = open(f, 'w');
216 if not p: 216 if not p:
217 tcm_mod_err("Unable to open file: " + f) 217 tcm_mod_err("Unable to open file: " + f)
218 218
219 buf = "#include <linux/module.h>\n" 219 buf = "#include <linux/module.h>\n"
220 buf += "#include <linux/moduleparam.h>\n" 220 buf += "#include <linux/moduleparam.h>\n"
221 buf += "#include <linux/version.h>\n" 221 buf += "#include <linux/version.h>\n"
222 buf += "#include <generated/utsrelease.h>\n" 222 buf += "#include <generated/utsrelease.h>\n"
223 buf += "#include <linux/utsname.h>\n" 223 buf += "#include <linux/utsname.h>\n"
224 buf += "#include <linux/init.h>\n" 224 buf += "#include <linux/init.h>\n"
225 buf += "#include <linux/slab.h>\n" 225 buf += "#include <linux/slab.h>\n"
226 buf += "#include <linux/kthread.h>\n" 226 buf += "#include <linux/kthread.h>\n"
227 buf += "#include <linux/types.h>\n" 227 buf += "#include <linux/types.h>\n"
228 buf += "#include <linux/string.h>\n" 228 buf += "#include <linux/string.h>\n"
229 buf += "#include <linux/configfs.h>\n" 229 buf += "#include <linux/configfs.h>\n"
230 buf += "#include <linux/ctype.h>\n" 230 buf += "#include <linux/ctype.h>\n"
231 buf += "#include <asm/unaligned.h>\n\n" 231 buf += "#include <asm/unaligned.h>\n\n"
232 buf += "#include <target/target_core_base.h>\n" 232 buf += "#include <target/target_core_base.h>\n"
233 buf += "#include <target/target_core_transport.h>\n" 233 buf += "#include <target/target_core_fabric.h>\n"
234 buf += "#include <target/target_core_fabric_ops.h>\n"
235 buf += "#include <target/target_core_fabric_configfs.h>\n" 234 buf += "#include <target/target_core_fabric_configfs.h>\n"
236 buf += "#include <target/target_core_fabric_lib.h>\n"
237 buf += "#include <target/target_core_device.h>\n"
238 buf += "#include <target/target_core_tpg.h>\n"
239 buf += "#include <target/target_core_configfs.h>\n" 235 buf += "#include <target/target_core_configfs.h>\n"
240 buf += "#include <target/target_core_base.h>\n"
241 buf += "#include <target/configfs_macros.h>\n\n" 236 buf += "#include <target/configfs_macros.h>\n\n"
242 buf += "#include \"" + fabric_mod_name + "_base.h\"\n" 237 buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
243 buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" 238 buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
244 239
245 buf += "/* Local pointer to allocated TCM configfs fabric module */\n" 240 buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
246 buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n" 241 buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
247 242
248 buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n" 243 buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
249 buf += " struct se_portal_group *se_tpg,\n" 244 buf += " struct se_portal_group *se_tpg,\n"
250 buf += " struct config_group *group,\n" 245 buf += " struct config_group *group,\n"
251 buf += " const char *name)\n" 246 buf += " const char *name)\n"
252 buf += "{\n" 247 buf += "{\n"
253 buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n" 248 buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
254 buf += " struct " + fabric_mod_name + "_nacl *nacl;\n" 249 buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
255 250
256 if proto_ident == "FC" or proto_ident == "SAS": 251 if proto_ident == "FC" or proto_ident == "SAS":
257 buf += " u64 wwpn = 0;\n" 252 buf += " u64 wwpn = 0;\n"
258 253
259 buf += " u32 nexus_depth;\n\n" 254 buf += " u32 nexus_depth;\n\n"
260 buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n" 255 buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
261 buf += " return ERR_PTR(-EINVAL); */\n" 256 buf += " return ERR_PTR(-EINVAL); */\n"
262 buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n" 257 buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
263 buf += " if (!(se_nacl_new))\n" 258 buf += " if (!(se_nacl_new))\n"
264 buf += " return ERR_PTR(-ENOMEM);\n" 259 buf += " return ERR_PTR(-ENOMEM);\n"
265 buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n" 260 buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
266 buf += " nexus_depth = 1;\n" 261 buf += " nexus_depth = 1;\n"
267 buf += " /*\n" 262 buf += " /*\n"
268 buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n" 263 buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
269 buf += " * when converting a NodeACL from demo mode -> explict\n" 264 buf += " * when converting a NodeACL from demo mode -> explict\n"
270 buf += " */\n" 265 buf += " */\n"
271 buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n" 266 buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
272 buf += " name, nexus_depth);\n" 267 buf += " name, nexus_depth);\n"
273 buf += " if (IS_ERR(se_nacl)) {\n" 268 buf += " if (IS_ERR(se_nacl)) {\n"
274 buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n" 269 buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
275 buf += " return se_nacl;\n" 270 buf += " return se_nacl;\n"
276 buf += " }\n" 271 buf += " }\n"
277 buf += " /*\n" 272 buf += " /*\n"
278 buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n" 273 buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
279 buf += " */\n" 274 buf += " */\n"
280 buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n" 275 buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
281 276
282 if proto_ident == "FC" or proto_ident == "SAS": 277 if proto_ident == "FC" or proto_ident == "SAS":
283 buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n" 278 buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
284 279
285 buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n" 280 buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
286 buf += " return se_nacl;\n" 281 buf += " return se_nacl;\n"
287 buf += "}\n\n" 282 buf += "}\n\n"
288 buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n" 283 buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
289 buf += "{\n" 284 buf += "{\n"
290 buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n" 285 buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
291 buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n" 286 buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
292 buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n" 287 buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
293 buf += " kfree(nacl);\n" 288 buf += " kfree(nacl);\n"
294 buf += "}\n\n" 289 buf += "}\n\n"
295 290
296 buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n" 291 buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
297 buf += " struct se_wwn *wwn,\n" 292 buf += " struct se_wwn *wwn,\n"
298 buf += " struct config_group *group,\n" 293 buf += " struct config_group *group,\n"
299 buf += " const char *name)\n" 294 buf += " const char *name)\n"
300 buf += "{\n" 295 buf += "{\n"
301 buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n" 296 buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
302 buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n" 297 buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
303 buf += " struct " + fabric_mod_name + "_tpg *tpg;\n" 298 buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
304 buf += " unsigned long tpgt;\n" 299 buf += " unsigned long tpgt;\n"
305 buf += " int ret;\n\n" 300 buf += " int ret;\n\n"
306 buf += " if (strstr(name, \"tpgt_\") != name)\n" 301 buf += " if (strstr(name, \"tpgt_\") != name)\n"
307 buf += " return ERR_PTR(-EINVAL);\n" 302 buf += " return ERR_PTR(-EINVAL);\n"
308 buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n" 303 buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
309 buf += " return ERR_PTR(-EINVAL);\n\n" 304 buf += " return ERR_PTR(-EINVAL);\n\n"
310 buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n" 305 buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
311 buf += " if (!(tpg)) {\n" 306 buf += " if (!(tpg)) {\n"
312 buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n" 307 buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
313 buf += " return ERR_PTR(-ENOMEM);\n" 308 buf += " return ERR_PTR(-ENOMEM);\n"
314 buf += " }\n" 309 buf += " }\n"
315 buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n" 310 buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
316 buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n" 311 buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
317 buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n" 312 buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
318 buf += " &tpg->se_tpg, (void *)tpg,\n" 313 buf += " &tpg->se_tpg, (void *)tpg,\n"
319 buf += " TRANSPORT_TPG_TYPE_NORMAL);\n" 314 buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
320 buf += " if (ret < 0) {\n" 315 buf += " if (ret < 0) {\n"
321 buf += " kfree(tpg);\n" 316 buf += " kfree(tpg);\n"
322 buf += " return NULL;\n" 317 buf += " return NULL;\n"
323 buf += " }\n" 318 buf += " }\n"
324 buf += " return &tpg->se_tpg;\n" 319 buf += " return &tpg->se_tpg;\n"
325 buf += "}\n\n" 320 buf += "}\n\n"
326 buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n" 321 buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
327 buf += "{\n" 322 buf += "{\n"
328 buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" 323 buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
329 buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n" 324 buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
330 buf += " core_tpg_deregister(se_tpg);\n" 325 buf += " core_tpg_deregister(se_tpg);\n"
331 buf += " kfree(tpg);\n" 326 buf += " kfree(tpg);\n"
332 buf += "}\n\n" 327 buf += "}\n\n"
333 328
334 buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n" 329 buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
335 buf += " struct target_fabric_configfs *tf,\n" 330 buf += " struct target_fabric_configfs *tf,\n"
336 buf += " struct config_group *group,\n" 331 buf += " struct config_group *group,\n"
337 buf += " const char *name)\n" 332 buf += " const char *name)\n"
338 buf += "{\n" 333 buf += "{\n"
339 buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n" 334 buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
340 335
341 if proto_ident == "FC" or proto_ident == "SAS": 336 if proto_ident == "FC" or proto_ident == "SAS":
342 buf += " u64 wwpn = 0;\n\n" 337 buf += " u64 wwpn = 0;\n\n"
343 338
344 buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n" 339 buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
345 buf += " return ERR_PTR(-EINVAL); */\n\n" 340 buf += " return ERR_PTR(-EINVAL); */\n\n"
346 buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n" 341 buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
347 buf += " if (!(" + fabric_mod_port + ")) {\n" 342 buf += " if (!(" + fabric_mod_port + ")) {\n"
348 buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n" 343 buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
349 buf += " return ERR_PTR(-ENOMEM);\n" 344 buf += " return ERR_PTR(-ENOMEM);\n"
350 buf += " }\n" 345 buf += " }\n"
351 346
352 if proto_ident == "FC" or proto_ident == "SAS": 347 if proto_ident == "FC" or proto_ident == "SAS":
353 buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n" 348 buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
354 349
355 buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n" 350 buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n"
356 buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n" 351 buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
357 buf += "}\n\n" 352 buf += "}\n\n"
358 buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n" 353 buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
359 buf += "{\n" 354 buf += "{\n"
360 buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n" 355 buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
361 buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n" 356 buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
362 buf += " kfree(" + fabric_mod_port + ");\n" 357 buf += " kfree(" + fabric_mod_port + ");\n"
363 buf += "}\n\n" 358 buf += "}\n\n"
364 buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n" 359 buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
365 buf += " struct target_fabric_configfs *tf,\n" 360 buf += " struct target_fabric_configfs *tf,\n"
366 buf += " char *page)\n" 361 buf += " char *page)\n"
367 buf += "{\n" 362 buf += "{\n"
368 buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n" 363 buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
369 buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n" 364 buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
370 buf += " utsname()->machine);\n" 365 buf += " utsname()->machine);\n"
371 buf += "}\n\n" 366 buf += "}\n\n"
372 buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n" 367 buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
373 buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n" 368 buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
374 buf += " &" + fabric_mod_name + "_wwn_version.attr,\n" 369 buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
375 buf += " NULL,\n" 370 buf += " NULL,\n"
376 buf += "};\n\n" 371 buf += "};\n\n"
377 372
378 buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n" 373 buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
379 buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n" 374 buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
380 buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n" 375 buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
381 buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n" 376 buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
382 buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n" 377 buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
383 buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n" 378 buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
384 buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n" 379 buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
385 buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n" 380 buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
386 buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n" 381 buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
387 buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n" 382 buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
388 buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n" 383 buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
389 buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n" 384 buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
390 buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n" 385 buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
391 buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n" 386 buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
392 buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n" 387 buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
393 buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n" 388 buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
394 buf += " .release_cmd_to_pool = " + fabric_mod_name + "_release_cmd,\n" 389 buf += " .release_cmd_to_pool = " + fabric_mod_name + "_release_cmd,\n"
395 buf += " .release_cmd_direct = " + fabric_mod_name + "_release_cmd,\n" 390 buf += " .release_cmd_direct = " + fabric_mod_name + "_release_cmd,\n"
396 buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n" 391 buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
397 buf += " .close_session = " + fabric_mod_name + "_close_session,\n" 392 buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
398 buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n" 393 buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
399 buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n" 394 buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
400 buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n" 395 buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
401 buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n" 396 buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
402 buf += " .sess_get_initiator_sid = NULL,\n" 397 buf += " .sess_get_initiator_sid = NULL,\n"
403 buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n" 398 buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
404 buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n" 399 buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
405 buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n" 400 buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
406 buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n" 401 buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
407 buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n" 402 buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
408 buf += " .new_cmd_failure = " + fabric_mod_name + "_new_cmd_failure,\n" 403 buf += " .new_cmd_failure = " + fabric_mod_name + "_new_cmd_failure,\n"
409 buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n" 404 buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
410 buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n" 405 buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
411 buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n" 406 buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
412 buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n" 407 buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
413 buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n" 408 buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
414 buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n" 409 buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
415 buf += " .pack_lun = " + fabric_mod_name + "_pack_lun,\n" 410 buf += " .pack_lun = " + fabric_mod_name + "_pack_lun,\n"
416 buf += " /*\n" 411 buf += " /*\n"
417 buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n" 412 buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
418 buf += " */\n" 413 buf += " */\n"
419 buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n" 414 buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
420 buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n" 415 buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
421 buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n" 416 buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
422 buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n" 417 buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
423 buf += " .fabric_post_link = NULL,\n" 418 buf += " .fabric_post_link = NULL,\n"
424 buf += " .fabric_pre_unlink = NULL,\n" 419 buf += " .fabric_pre_unlink = NULL,\n"
425 buf += " .fabric_make_np = NULL,\n" 420 buf += " .fabric_make_np = NULL,\n"
426 buf += " .fabric_drop_np = NULL,\n" 421 buf += " .fabric_drop_np = NULL,\n"
427 buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n" 422 buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
428 buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n" 423 buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
429 buf += "};\n\n" 424 buf += "};\n\n"
430 425
431 buf += "static int " + fabric_mod_name + "_register_configfs(void)\n" 426 buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
432 buf += "{\n" 427 buf += "{\n"
433 buf += " struct target_fabric_configfs *fabric;\n" 428 buf += " struct target_fabric_configfs *fabric;\n"
434 buf += " int ret;\n\n" 429 buf += " int ret;\n\n"
435 buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n" 430 buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
436 buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n" 431 buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
437 buf += " utsname()->machine);\n" 432 buf += " utsname()->machine);\n"
438 buf += " /*\n" 433 buf += " /*\n"
439 buf += " * Register the top level struct config_item_type with TCM core\n" 434 buf += " * Register the top level struct config_item_type with TCM core\n"
440 buf += " */\n" 435 buf += " */\n"
441 buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n" 436 buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
442 buf += " if (!(fabric)) {\n" 437 buf += " if (!(fabric)) {\n"
443 buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n" 438 buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
444 buf += " return -ENOMEM;\n" 439 buf += " return -ENOMEM;\n"
445 buf += " }\n" 440 buf += " }\n"
446 buf += " /*\n" 441 buf += " /*\n"
447 buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n" 442 buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
448 buf += " */\n" 443 buf += " */\n"
449 buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n" 444 buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
450 buf += " /*\n" 445 buf += " /*\n"
451 buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n" 446 buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
452 buf += " */\n" 447 buf += " */\n"
453 buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n" 448 buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
454 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n" 449 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
455 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n" 450 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
456 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n" 451 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
457 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n" 452 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
458 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n" 453 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
459 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n" 454 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
460 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n" 455 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
461 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n" 456 buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
462 buf += " /*\n" 457 buf += " /*\n"
463 buf += " * Register the fabric for use within TCM\n" 458 buf += " * Register the fabric for use within TCM\n"
464 buf += " */\n" 459 buf += " */\n"
465 buf += " ret = target_fabric_configfs_register(fabric);\n" 460 buf += " ret = target_fabric_configfs_register(fabric);\n"
466 buf += " if (ret < 0) {\n" 461 buf += " if (ret < 0) {\n"
467 buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n" 462 buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
468 buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n" 463 buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
469 buf += " return ret;\n" 464 buf += " return ret;\n"
470 buf += " }\n" 465 buf += " }\n"
471 buf += " /*\n" 466 buf += " /*\n"
472 buf += " * Setup our local pointer to *fabric\n" 467 buf += " * Setup our local pointer to *fabric\n"
473 buf += " */\n" 468 buf += " */\n"
474 buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n" 469 buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
475 buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n" 470 buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
476 buf += " return 0;\n" 471 buf += " return 0;\n"
477 buf += "};\n\n" 472 buf += "};\n\n"
478 buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n" 473 buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n"
479 buf += "{\n" 474 buf += "{\n"
480 buf += " if (!(" + fabric_mod_name + "_fabric_configfs))\n" 475 buf += " if (!(" + fabric_mod_name + "_fabric_configfs))\n"
481 buf += " return;\n\n" 476 buf += " return;\n\n"
482 buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n" 477 buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
483 buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n" 478 buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
484 buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n" 479 buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
485 buf += "};\n\n" 480 buf += "};\n\n"
486 481
487 buf += "static int __init " + fabric_mod_name + "_init(void)\n" 482 buf += "static int __init " + fabric_mod_name + "_init(void)\n"
488 buf += "{\n" 483 buf += "{\n"
489 buf += " int ret;\n\n" 484 buf += " int ret;\n\n"
490 buf += " ret = " + fabric_mod_name + "_register_configfs();\n" 485 buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
491 buf += " if (ret < 0)\n" 486 buf += " if (ret < 0)\n"
492 buf += " return ret;\n\n" 487 buf += " return ret;\n\n"
493 buf += " return 0;\n" 488 buf += " return 0;\n"
494 buf += "};\n\n" 489 buf += "};\n\n"
495 buf += "static void " + fabric_mod_name + "_exit(void)\n" 490 buf += "static void " + fabric_mod_name + "_exit(void)\n"
496 buf += "{\n" 491 buf += "{\n"
497 buf += " " + fabric_mod_name + "_deregister_configfs();\n" 492 buf += " " + fabric_mod_name + "_deregister_configfs();\n"
498 buf += "};\n\n" 493 buf += "};\n\n"
499 494
500 buf += "#ifdef MODULE\n" 495 buf += "#ifdef MODULE\n"
501 buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n" 496 buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
502 buf += "MODULE_LICENSE(\"GPL\");\n" 497 buf += "MODULE_LICENSE(\"GPL\");\n"
503 buf += "module_init(" + fabric_mod_name + "_init);\n" 498 buf += "module_init(" + fabric_mod_name + "_init);\n"
504 buf += "module_exit(" + fabric_mod_name + "_exit);\n" 499 buf += "module_exit(" + fabric_mod_name + "_exit);\n"
505 buf += "#endif\n" 500 buf += "#endif\n"
506 501
507 ret = p.write(buf) 502 ret = p.write(buf)
508 if ret: 503 if ret:
509 tcm_mod_err("Unable to write f: " + f) 504 tcm_mod_err("Unable to write f: " + f)
510 505
511 p.close() 506 p.close()
512 507
513 return 508 return
514 509
515 def tcm_mod_scan_fabric_ops(tcm_dir): 510 def tcm_mod_scan_fabric_ops(tcm_dir):
516 511
517 fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h" 512 fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
518 513
519 print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api 514 print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
520 process_fo = 0; 515 process_fo = 0;
521 516
522 p = open(fabric_ops_api, 'r') 517 p = open(fabric_ops_api, 'r')
523 518
524 line = p.readline() 519 line = p.readline()
525 while line: 520 while line:
526 if process_fo == 0 and re.search('struct target_core_fabric_ops {', line): 521 if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
527 line = p.readline() 522 line = p.readline()
528 continue 523 continue
529 524
530 if process_fo == 0: 525 if process_fo == 0:
531 process_fo = 1; 526 process_fo = 1;
532 line = p.readline() 527 line = p.readline()
533 # Search for function pointer 528 # Search for function pointer
534 if not re.search('\(\*', line): 529 if not re.search('\(\*', line):
535 continue 530 continue
536 531
537 fabric_ops.append(line.rstrip()) 532 fabric_ops.append(line.rstrip())
538 continue 533 continue
539 534
540 line = p.readline() 535 line = p.readline()
541 # Search for function pointer 536 # Search for function pointer
542 if not re.search('\(\*', line): 537 if not re.search('\(\*', line):
543 continue 538 continue
544 539
545 fabric_ops.append(line.rstrip()) 540 fabric_ops.append(line.rstrip())
546 541
547 p.close() 542 p.close()
548 return 543 return
549 544
550 def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): 545 def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
551 buf = "" 546 buf = ""
552 bufi = "" 547 bufi = ""
553 548
554 f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c" 549 f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
555 print "Writing file: " + f 550 print "Writing file: " + f
556 551
557 p = open(f, 'w') 552 p = open(f, 'w')
558 if not p: 553 if not p:
559 tcm_mod_err("Unable to open file: " + f) 554 tcm_mod_err("Unable to open file: " + f)
560 555
561 fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h" 556 fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
562 print "Writing file: " + fi 557 print "Writing file: " + fi
563 558
564 pi = open(fi, 'w') 559 pi = open(fi, 'w')
565 if not pi: 560 if not pi:
566 tcm_mod_err("Unable to open file: " + fi) 561 tcm_mod_err("Unable to open file: " + fi)
567 562
568 buf = "#include <linux/slab.h>\n" 563 buf = "#include <linux/slab.h>\n"
569 buf += "#include <linux/kthread.h>\n" 564 buf += "#include <linux/kthread.h>\n"
570 buf += "#include <linux/types.h>\n" 565 buf += "#include <linux/types.h>\n"
571 buf += "#include <linux/list.h>\n" 566 buf += "#include <linux/list.h>\n"
572 buf += "#include <linux/types.h>\n" 567 buf += "#include <linux/types.h>\n"
573 buf += "#include <linux/string.h>\n" 568 buf += "#include <linux/string.h>\n"
574 buf += "#include <linux/ctype.h>\n" 569 buf += "#include <linux/ctype.h>\n"
575 buf += "#include <asm/unaligned.h>\n" 570 buf += "#include <asm/unaligned.h>\n"
576 buf += "#include <scsi/scsi.h>\n" 571 buf += "#include <scsi/scsi.h>\n"
577 buf += "#include <scsi/scsi_host.h>\n" 572 buf += "#include <scsi/scsi_host.h>\n"
578 buf += "#include <scsi/scsi_device.h>\n" 573 buf += "#include <scsi/scsi_device.h>\n"
579 buf += "#include <scsi/scsi_cmnd.h>\n" 574 buf += "#include <scsi/scsi_cmnd.h>\n"
580 buf += "#include <scsi/libfc.h>\n\n" 575 buf += "#include <scsi/libfc.h>\n\n"
581 buf += "#include <target/target_core_base.h>\n" 576 buf += "#include <target/target_core_base.h>\n"
582 buf += "#include <target/target_core_transport.h>\n" 577 buf += "#include <target/target_core_fabric.h>\n"
583 buf += "#include <target/target_core_fabric_ops.h>\n"
584 buf += "#include <target/target_core_fabric_lib.h>\n"
585 buf += "#include <target/target_core_device.h>\n"
586 buf += "#include <target/target_core_tpg.h>\n"
587 buf += "#include <target/target_core_configfs.h>\n\n" 578 buf += "#include <target/target_core_configfs.h>\n\n"
588 buf += "#include \"" + fabric_mod_name + "_base.h\"\n" 579 buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
589 buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" 580 buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
590 581
591 buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n" 582 buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
592 buf += "{\n" 583 buf += "{\n"
593 buf += " return 1;\n" 584 buf += " return 1;\n"
594 buf += "}\n\n" 585 buf += "}\n\n"
595 bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n" 586 bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
596 587
597 buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n" 588 buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
598 buf += "{\n" 589 buf += "{\n"
599 buf += " return 0;\n" 590 buf += " return 0;\n"
600 buf += "}\n\n" 591 buf += "}\n\n"
601 bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n" 592 bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
602 593
603 total_fabric_ops = len(fabric_ops) 594 total_fabric_ops = len(fabric_ops)
604 i = 0 595 i = 0
605 596
606 while i < total_fabric_ops: 597 while i < total_fabric_ops:
607 fo = fabric_ops[i] 598 fo = fabric_ops[i]
608 i += 1 599 i += 1
609 # print "fabric_ops: " + fo 600 # print "fabric_ops: " + fo
610 601
611 if re.search('get_fabric_name', fo): 602 if re.search('get_fabric_name', fo):
612 buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n" 603 buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
613 buf += "{\n" 604 buf += "{\n"
614 buf += " return \"" + fabric_mod_name[4:] + "\";\n" 605 buf += " return \"" + fabric_mod_name[4:] + "\";\n"
615 buf += "}\n\n" 606 buf += "}\n\n"
616 bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n" 607 bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
617 continue 608 continue
618 609
619 if re.search('get_fabric_proto_ident', fo): 610 if re.search('get_fabric_proto_ident', fo):
620 buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n" 611 buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
621 buf += "{\n" 612 buf += "{\n"
622 buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" 613 buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
623 buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" 614 buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
624 buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" 615 buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
625 buf += " u8 proto_id;\n\n" 616 buf += " u8 proto_id;\n\n"
626 buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" 617 buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
627 if proto_ident == "FC": 618 if proto_ident == "FC":
628 buf += " case SCSI_PROTOCOL_FCP:\n" 619 buf += " case SCSI_PROTOCOL_FCP:\n"
629 buf += " default:\n" 620 buf += " default:\n"
630 buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n" 621 buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
631 buf += " break;\n" 622 buf += " break;\n"
632 elif proto_ident == "SAS": 623 elif proto_ident == "SAS":
633 buf += " case SCSI_PROTOCOL_SAS:\n" 624 buf += " case SCSI_PROTOCOL_SAS:\n"
634 buf += " default:\n" 625 buf += " default:\n"
635 buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n" 626 buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
636 buf += " break;\n" 627 buf += " break;\n"
637 elif proto_ident == "iSCSI": 628 elif proto_ident == "iSCSI":
638 buf += " case SCSI_PROTOCOL_ISCSI:\n" 629 buf += " case SCSI_PROTOCOL_ISCSI:\n"
639 buf += " default:\n" 630 buf += " default:\n"
640 buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n" 631 buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
641 buf += " break;\n" 632 buf += " break;\n"
642 633
643 buf += " }\n\n" 634 buf += " }\n\n"
644 buf += " return proto_id;\n" 635 buf += " return proto_id;\n"
645 buf += "}\n\n" 636 buf += "}\n\n"
646 bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n" 637 bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
647 638
648 if re.search('get_wwn', fo): 639 if re.search('get_wwn', fo):
649 buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n" 640 buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
650 buf += "{\n" 641 buf += "{\n"
651 buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" 642 buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
652 buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" 643 buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
653 buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n" 644 buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
654 buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n" 645 buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
655 buf += "}\n\n" 646 buf += "}\n\n"
656 bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n" 647 bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
657 648
658 if re.search('get_tag', fo): 649 if re.search('get_tag', fo):
659 buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n" 650 buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
660 buf += "{\n" 651 buf += "{\n"
661 buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" 652 buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
662 buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" 653 buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
663 buf += " return tpg->" + fabric_mod_port + "_tpgt;\n" 654 buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
664 buf += "}\n\n" 655 buf += "}\n\n"
665 bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n" 656 bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
666 657
667 if re.search('get_default_depth', fo): 658 if re.search('get_default_depth', fo):
668 buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n" 659 buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
669 buf += "{\n" 660 buf += "{\n"
670 buf += " return 1;\n" 661 buf += " return 1;\n"
671 buf += "}\n\n" 662 buf += "}\n\n"
672 bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n" 663 bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
673 664
674 if re.search('get_pr_transport_id\)\(', fo): 665 if re.search('get_pr_transport_id\)\(', fo):
675 buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n" 666 buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
676 buf += " struct se_portal_group *se_tpg,\n" 667 buf += " struct se_portal_group *se_tpg,\n"
677 buf += " struct se_node_acl *se_nacl,\n" 668 buf += " struct se_node_acl *se_nacl,\n"
678 buf += " struct t10_pr_registration *pr_reg,\n" 669 buf += " struct t10_pr_registration *pr_reg,\n"
679 buf += " int *format_code,\n" 670 buf += " int *format_code,\n"
680 buf += " unsigned char *buf)\n" 671 buf += " unsigned char *buf)\n"
681 buf += "{\n" 672 buf += "{\n"
682 buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" 673 buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
683 buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" 674 buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
684 buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" 675 buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
685 buf += " int ret = 0;\n\n" 676 buf += " int ret = 0;\n\n"
686 buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" 677 buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
687 if proto_ident == "FC": 678 if proto_ident == "FC":
688 buf += " case SCSI_PROTOCOL_FCP:\n" 679 buf += " case SCSI_PROTOCOL_FCP:\n"
689 buf += " default:\n" 680 buf += " default:\n"
690 buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" 681 buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
691 buf += " format_code, buf);\n" 682 buf += " format_code, buf);\n"
692 buf += " break;\n" 683 buf += " break;\n"
693 elif proto_ident == "SAS": 684 elif proto_ident == "SAS":
694 buf += " case SCSI_PROTOCOL_SAS:\n" 685 buf += " case SCSI_PROTOCOL_SAS:\n"
695 buf += " default:\n" 686 buf += " default:\n"
696 buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" 687 buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
697 buf += " format_code, buf);\n" 688 buf += " format_code, buf);\n"
698 buf += " break;\n" 689 buf += " break;\n"
699 elif proto_ident == "iSCSI": 690 elif proto_ident == "iSCSI":
700 buf += " case SCSI_PROTOCOL_ISCSI:\n" 691 buf += " case SCSI_PROTOCOL_ISCSI:\n"
701 buf += " default:\n" 692 buf += " default:\n"
702 buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" 693 buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
703 buf += " format_code, buf);\n" 694 buf += " format_code, buf);\n"
704 buf += " break;\n" 695 buf += " break;\n"
705 696
706 buf += " }\n\n" 697 buf += " }\n\n"
707 buf += " return ret;\n" 698 buf += " return ret;\n"
708 buf += "}\n\n" 699 buf += "}\n\n"
709 bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n" 700 bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
710 bufi += " struct se_node_acl *, struct t10_pr_registration *,\n" 701 bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
711 bufi += " int *, unsigned char *);\n" 702 bufi += " int *, unsigned char *);\n"
712 703
713 if re.search('get_pr_transport_id_len\)\(', fo): 704 if re.search('get_pr_transport_id_len\)\(', fo):
714 buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n" 705 buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
715 buf += " struct se_portal_group *se_tpg,\n" 706 buf += " struct se_portal_group *se_tpg,\n"
716 buf += " struct se_node_acl *se_nacl,\n" 707 buf += " struct se_node_acl *se_nacl,\n"
717 buf += " struct t10_pr_registration *pr_reg,\n" 708 buf += " struct t10_pr_registration *pr_reg,\n"
718 buf += " int *format_code)\n" 709 buf += " int *format_code)\n"
719 buf += "{\n" 710 buf += "{\n"
720 buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" 711 buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
721 buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" 712 buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
722 buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" 713 buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
723 buf += " int ret = 0;\n\n" 714 buf += " int ret = 0;\n\n"
724 buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" 715 buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
725 if proto_ident == "FC": 716 if proto_ident == "FC":
726 buf += " case SCSI_PROTOCOL_FCP:\n" 717 buf += " case SCSI_PROTOCOL_FCP:\n"
727 buf += " default:\n" 718 buf += " default:\n"
728 buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" 719 buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
729 buf += " format_code);\n" 720 buf += " format_code);\n"
730 buf += " break;\n" 721 buf += " break;\n"
731 elif proto_ident == "SAS": 722 elif proto_ident == "SAS":
732 buf += " case SCSI_PROTOCOL_SAS:\n" 723 buf += " case SCSI_PROTOCOL_SAS:\n"
733 buf += " default:\n" 724 buf += " default:\n"
734 buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" 725 buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
735 buf += " format_code);\n" 726 buf += " format_code);\n"
736 buf += " break;\n" 727 buf += " break;\n"
737 elif proto_ident == "iSCSI": 728 elif proto_ident == "iSCSI":
738 buf += " case SCSI_PROTOCOL_ISCSI:\n" 729 buf += " case SCSI_PROTOCOL_ISCSI:\n"
739 buf += " default:\n" 730 buf += " default:\n"
740 buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" 731 buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
741 buf += " format_code);\n" 732 buf += " format_code);\n"
742 buf += " break;\n" 733 buf += " break;\n"
743 734
744 735
745 buf += " }\n\n" 736 buf += " }\n\n"
746 buf += " return ret;\n" 737 buf += " return ret;\n"
747 buf += "}\n\n" 738 buf += "}\n\n"
748 bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n" 739 bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
749 bufi += " struct se_node_acl *, struct t10_pr_registration *,\n" 740 bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
750 bufi += " int *);\n" 741 bufi += " int *);\n"
751 742
752 if re.search('parse_pr_out_transport_id\)\(', fo): 743 if re.search('parse_pr_out_transport_id\)\(', fo):
753 buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n" 744 buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
754 buf += " struct se_portal_group *se_tpg,\n" 745 buf += " struct se_portal_group *se_tpg,\n"
755 buf += " const char *buf,\n" 746 buf += " const char *buf,\n"
756 buf += " u32 *out_tid_len,\n" 747 buf += " u32 *out_tid_len,\n"
757 buf += " char **port_nexus_ptr)\n" 748 buf += " char **port_nexus_ptr)\n"
758 buf += "{\n" 749 buf += "{\n"
759 buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" 750 buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
760 buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" 751 buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
761 buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" 752 buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
762 buf += " char *tid = NULL;\n\n" 753 buf += " char *tid = NULL;\n\n"
763 buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" 754 buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
764 if proto_ident == "FC": 755 if proto_ident == "FC":
765 buf += " case SCSI_PROTOCOL_FCP:\n" 756 buf += " case SCSI_PROTOCOL_FCP:\n"
766 buf += " default:\n" 757 buf += " default:\n"
767 buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" 758 buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
768 buf += " port_nexus_ptr);\n" 759 buf += " port_nexus_ptr);\n"
769 elif proto_ident == "SAS": 760 elif proto_ident == "SAS":
770 buf += " case SCSI_PROTOCOL_SAS:\n" 761 buf += " case SCSI_PROTOCOL_SAS:\n"
771 buf += " default:\n" 762 buf += " default:\n"
772 buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" 763 buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
773 buf += " port_nexus_ptr);\n" 764 buf += " port_nexus_ptr);\n"
774 elif proto_ident == "iSCSI": 765 elif proto_ident == "iSCSI":
775 buf += " case SCSI_PROTOCOL_ISCSI:\n" 766 buf += " case SCSI_PROTOCOL_ISCSI:\n"
776 buf += " default:\n" 767 buf += " default:\n"
777 buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" 768 buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
778 buf += " port_nexus_ptr);\n" 769 buf += " port_nexus_ptr);\n"
779 770
780 buf += " }\n\n" 771 buf += " }\n\n"
781 buf += " return tid;\n" 772 buf += " return tid;\n"
782 buf += "}\n\n" 773 buf += "}\n\n"
783 bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n" 774 bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
784 bufi += " const char *, u32 *, char **);\n" 775 bufi += " const char *, u32 *, char **);\n"
785 776
786 if re.search('alloc_fabric_acl\)\(', fo): 777 if re.search('alloc_fabric_acl\)\(', fo):
787 buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n" 778 buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
788 buf += "{\n" 779 buf += "{\n"
789 buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n" 780 buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
790 buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n" 781 buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
791 buf += " if (!(nacl)) {\n" 782 buf += " if (!(nacl)) {\n"
792 buf += " printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n" 783 buf += " printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n"
793 buf += " return NULL;\n" 784 buf += " return NULL;\n"
794 buf += " }\n\n" 785 buf += " }\n\n"
795 buf += " return &nacl->se_node_acl;\n" 786 buf += " return &nacl->se_node_acl;\n"
796 buf += "}\n\n" 787 buf += "}\n\n"
797 bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n" 788 bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
798 789
799 if re.search('release_fabric_acl\)\(', fo): 790 if re.search('release_fabric_acl\)\(', fo):
800 buf += "void " + fabric_mod_name + "_release_fabric_acl(\n" 791 buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
801 buf += " struct se_portal_group *se_tpg,\n" 792 buf += " struct se_portal_group *se_tpg,\n"
802 buf += " struct se_node_acl *se_nacl)\n" 793 buf += " struct se_node_acl *se_nacl)\n"
803 buf += "{\n" 794 buf += "{\n"
804 buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n" 795 buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
805 buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n" 796 buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
806 buf += " kfree(nacl);\n" 797 buf += " kfree(nacl);\n"
807 buf += "}\n\n" 798 buf += "}\n\n"
808 bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n" 799 bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
809 bufi += " struct se_node_acl *);\n" 800 bufi += " struct se_node_acl *);\n"
810 801
811 if re.search('tpg_get_inst_index\)\(', fo): 802 if re.search('tpg_get_inst_index\)\(', fo):
812 buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n" 803 buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
813 buf += "{\n" 804 buf += "{\n"
814 buf += " return 1;\n" 805 buf += " return 1;\n"
815 buf += "}\n\n" 806 buf += "}\n\n"
816 bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n" 807 bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
817 808
818 if re.search('release_cmd_to_pool', fo): 809 if re.search('release_cmd_to_pool', fo):
819 buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n" 810 buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
820 buf += "{\n" 811 buf += "{\n"
821 buf += " return;\n" 812 buf += " return;\n"
822 buf += "}\n\n" 813 buf += "}\n\n"
823 bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n" 814 bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
824 815
825 if re.search('shutdown_session\)\(', fo): 816 if re.search('shutdown_session\)\(', fo):
826 buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n" 817 buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
827 buf += "{\n" 818 buf += "{\n"
828 buf += " return 0;\n" 819 buf += " return 0;\n"
829 buf += "}\n\n" 820 buf += "}\n\n"
830 bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n" 821 bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
831 822
832 if re.search('close_session\)\(', fo): 823 if re.search('close_session\)\(', fo):
833 buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n" 824 buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
834 buf += "{\n" 825 buf += "{\n"
835 buf += " return;\n" 826 buf += " return;\n"
836 buf += "}\n\n" 827 buf += "}\n\n"
837 bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n" 828 bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
838 829
839 if re.search('stop_session\)\(', fo): 830 if re.search('stop_session\)\(', fo):
840 buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n" 831 buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
841 buf += "{\n" 832 buf += "{\n"
842 buf += " return;\n" 833 buf += " return;\n"
843 buf += "}\n\n" 834 buf += "}\n\n"
844 bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n" 835 bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
845 836
846 if re.search('fall_back_to_erl0\)\(', fo): 837 if re.search('fall_back_to_erl0\)\(', fo):
847 buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n" 838 buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
848 buf += "{\n" 839 buf += "{\n"
849 buf += " return;\n" 840 buf += " return;\n"
850 buf += "}\n\n" 841 buf += "}\n\n"
851 bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n" 842 bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
852 843
853 if re.search('sess_logged_in\)\(', fo): 844 if re.search('sess_logged_in\)\(', fo):
854 buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n" 845 buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
855 buf += "{\n" 846 buf += "{\n"
856 buf += " return 0;\n" 847 buf += " return 0;\n"
857 buf += "}\n\n" 848 buf += "}\n\n"
858 bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n" 849 bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
859 850
860 if re.search('sess_get_index\)\(', fo): 851 if re.search('sess_get_index\)\(', fo):
861 buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n" 852 buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
862 buf += "{\n" 853 buf += "{\n"
863 buf += " return 0;\n" 854 buf += " return 0;\n"
864 buf += "}\n\n" 855 buf += "}\n\n"
865 bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n" 856 bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
866 857
867 if re.search('write_pending\)\(', fo): 858 if re.search('write_pending\)\(', fo):
868 buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n" 859 buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
869 buf += "{\n" 860 buf += "{\n"
870 buf += " return 0;\n" 861 buf += " return 0;\n"
871 buf += "}\n\n" 862 buf += "}\n\n"
872 bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n" 863 bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
873 864
874 if re.search('write_pending_status\)\(', fo): 865 if re.search('write_pending_status\)\(', fo):
875 buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n" 866 buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
876 buf += "{\n" 867 buf += "{\n"
877 buf += " return 0;\n" 868 buf += " return 0;\n"
878 buf += "}\n\n" 869 buf += "}\n\n"
879 bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n" 870 bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
880 871
881 if re.search('set_default_node_attributes\)\(', fo): 872 if re.search('set_default_node_attributes\)\(', fo):
882 buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n" 873 buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
883 buf += "{\n" 874 buf += "{\n"
884 buf += " return;\n" 875 buf += " return;\n"
885 buf += "}\n\n" 876 buf += "}\n\n"
886 bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n" 877 bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
887 878
888 if re.search('get_task_tag\)\(', fo): 879 if re.search('get_task_tag\)\(', fo):
889 buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n" 880 buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
890 buf += "{\n" 881 buf += "{\n"
891 buf += " return 0;\n" 882 buf += " return 0;\n"
892 buf += "}\n\n" 883 buf += "}\n\n"
893 bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n" 884 bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
894 885
895 if re.search('get_cmd_state\)\(', fo): 886 if re.search('get_cmd_state\)\(', fo):
896 buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n" 887 buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
897 buf += "{\n" 888 buf += "{\n"
898 buf += " return 0;\n" 889 buf += " return 0;\n"
899 buf += "}\n\n" 890 buf += "}\n\n"
900 bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n" 891 bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
901 892
902 if re.search('new_cmd_failure\)\(', fo): 893 if re.search('new_cmd_failure\)\(', fo):
903 buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n" 894 buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n"
904 buf += "{\n" 895 buf += "{\n"
905 buf += " return;\n" 896 buf += " return;\n"
906 buf += "}\n\n" 897 buf += "}\n\n"
907 bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n" 898 bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n"
908 899
909 if re.search('queue_data_in\)\(', fo): 900 if re.search('queue_data_in\)\(', fo):
910 buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n" 901 buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
911 buf += "{\n" 902 buf += "{\n"
912 buf += " return 0;\n" 903 buf += " return 0;\n"
913 buf += "}\n\n" 904 buf += "}\n\n"
914 bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n" 905 bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
915 906
916 if re.search('queue_status\)\(', fo): 907 if re.search('queue_status\)\(', fo):
917 buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n" 908 buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
918 buf += "{\n" 909 buf += "{\n"
919 buf += " return 0;\n" 910 buf += " return 0;\n"
920 buf += "}\n\n" 911 buf += "}\n\n"
921 bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n" 912 bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
922 913
923 if re.search('queue_tm_rsp\)\(', fo): 914 if re.search('queue_tm_rsp\)\(', fo):
924 buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n" 915 buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
925 buf += "{\n" 916 buf += "{\n"
926 buf += " return 0;\n" 917 buf += " return 0;\n"
927 buf += "}\n\n" 918 buf += "}\n\n"
928 bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n" 919 bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
929 920
930 if re.search('get_fabric_sense_len\)\(', fo): 921 if re.search('get_fabric_sense_len\)\(', fo):
931 buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n" 922 buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
932 buf += "{\n" 923 buf += "{\n"
933 buf += " return 0;\n" 924 buf += " return 0;\n"
934 buf += "}\n\n" 925 buf += "}\n\n"
935 bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n" 926 bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
936 927
937 if re.search('set_fabric_sense_len\)\(', fo): 928 if re.search('set_fabric_sense_len\)\(', fo):
938 buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n" 929 buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
939 buf += "{\n" 930 buf += "{\n"
940 buf += " return 0;\n" 931 buf += " return 0;\n"
941 buf += "}\n\n" 932 buf += "}\n\n"
942 bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n" 933 bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
943 934
944 if re.search('is_state_remove\)\(', fo): 935 if re.search('is_state_remove\)\(', fo):
945 buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n" 936 buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
946 buf += "{\n" 937 buf += "{\n"
947 buf += " return 0;\n" 938 buf += " return 0;\n"
948 buf += "}\n\n" 939 buf += "}\n\n"
949 bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n" 940 bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
950 941
951 if re.search('pack_lun\)\(', fo): 942 if re.search('pack_lun\)\(', fo):
952 buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n" 943 buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n"
953 buf += "{\n" 944 buf += "{\n"
954 buf += " WARN_ON(lun >= 256);\n" 945 buf += " WARN_ON(lun >= 256);\n"
955 buf += " /* Caller wants this byte-swapped */\n" 946 buf += " /* Caller wants this byte-swapped */\n"
956 buf += " return cpu_to_le64((lun & 0xff) << 8);\n" 947 buf += " return cpu_to_le64((lun & 0xff) << 8);\n"
957 buf += "}\n\n" 948 buf += "}\n\n"
958 bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n" 949 bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n"
959 950
960 951
961 ret = p.write(buf) 952 ret = p.write(buf)
962 if ret: 953 if ret:
963 tcm_mod_err("Unable to write f: " + f) 954 tcm_mod_err("Unable to write f: " + f)
964 955
965 p.close() 956 p.close()
966 957
967 ret = pi.write(bufi) 958 ret = pi.write(bufi)
968 if ret: 959 if ret:
969 tcm_mod_err("Unable to write fi: " + fi) 960 tcm_mod_err("Unable to write fi: " + fi)
970 961
971 pi.close() 962 pi.close()
972 return 963 return
973 964
974 def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name): 965 def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
975 966
976 buf = "" 967 buf = ""
977 f = fabric_mod_dir_var + "/Makefile" 968 f = fabric_mod_dir_var + "/Makefile"
978 print "Writing file: " + f 969 print "Writing file: " + f
979 970
980 p = open(f, 'w') 971 p = open(f, 'w')
981 if not p: 972 if not p:
982 tcm_mod_err("Unable to open file: " + f) 973 tcm_mod_err("Unable to open file: " + f)
983 974
984 buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n" 975 buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
985 buf += " " + fabric_mod_name + "_configfs.o\n" 976 buf += " " + fabric_mod_name + "_configfs.o\n"
986 buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n" 977 buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
987 978
988 ret = p.write(buf) 979 ret = p.write(buf)
989 if ret: 980 if ret:
990 tcm_mod_err("Unable to write f: " + f) 981 tcm_mod_err("Unable to write f: " + f)
991 982
992 p.close() 983 p.close()
993 return 984 return
994 985
995 def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name): 986 def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
996 987
997 buf = "" 988 buf = ""
998 f = fabric_mod_dir_var + "/Kconfig" 989 f = fabric_mod_dir_var + "/Kconfig"
999 print "Writing file: " + f 990 print "Writing file: " + f
1000 991
1001 p = open(f, 'w') 992 p = open(f, 'w')
1002 if not p: 993 if not p:
1003 tcm_mod_err("Unable to open file: " + f) 994 tcm_mod_err("Unable to open file: " + f)
1004 995
1005 buf = "config " + fabric_mod_name.upper() + "\n" 996 buf = "config " + fabric_mod_name.upper() + "\n"
1006 buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n" 997 buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
1007 buf += " depends on TARGET_CORE && CONFIGFS_FS\n" 998 buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
1008 buf += " default n\n" 999 buf += " default n\n"
1009 buf += " ---help---\n" 1000 buf += " ---help---\n"
1010 buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n" 1001 buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
1011 1002
1012 ret = p.write(buf) 1003 ret = p.write(buf)
1013 if ret: 1004 if ret:
1014 tcm_mod_err("Unable to write f: " + f) 1005 tcm_mod_err("Unable to write f: " + f)
1015 1006
1016 p.close() 1007 p.close()
1017 return 1008 return
1018 1009
1019 def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name): 1010 def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
1020 buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n" 1011 buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
1021 kbuild = tcm_dir + "/drivers/target/Makefile" 1012 kbuild = tcm_dir + "/drivers/target/Makefile"
1022 1013
1023 f = open(kbuild, 'a') 1014 f = open(kbuild, 'a')
1024 f.write(buf) 1015 f.write(buf)
1025 f.close() 1016 f.close()
1026 return 1017 return
1027 1018
1028 def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name): 1019 def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
1029 buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n" 1020 buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
1030 kconfig = tcm_dir + "/drivers/target/Kconfig" 1021 kconfig = tcm_dir + "/drivers/target/Kconfig"
1031 1022
1032 f = open(kconfig, 'a') 1023 f = open(kconfig, 'a')
1033 f.write(buf) 1024 f.write(buf)
1034 f.close() 1025 f.close()
1035 return 1026 return
1036 1027
1037 def main(modname, proto_ident): 1028 def main(modname, proto_ident):
1038 # proto_ident = "FC" 1029 # proto_ident = "FC"
1039 # proto_ident = "SAS" 1030 # proto_ident = "SAS"
1040 # proto_ident = "iSCSI" 1031 # proto_ident = "iSCSI"
1041 1032
1042 tcm_dir = os.getcwd(); 1033 tcm_dir = os.getcwd();
1043 tcm_dir += "/../../" 1034 tcm_dir += "/../../"
1044 print "tcm_dir: " + tcm_dir 1035 print "tcm_dir: " + tcm_dir
1045 fabric_mod_name = modname 1036 fabric_mod_name = modname
1046 fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name 1037 fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
1047 print "Set fabric_mod_name: " + fabric_mod_name 1038 print "Set fabric_mod_name: " + fabric_mod_name
1048 print "Set fabric_mod_dir: " + fabric_mod_dir 1039 print "Set fabric_mod_dir: " + fabric_mod_dir
1049 print "Using proto_ident: " + proto_ident 1040 print "Using proto_ident: " + proto_ident
1050 1041
1051 if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI": 1042 if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
1052 print "Unsupported proto_ident: " + proto_ident 1043 print "Unsupported proto_ident: " + proto_ident
1053 sys.exit(1) 1044 sys.exit(1)
1054 1045
1055 ret = tcm_mod_create_module_subdir(fabric_mod_dir) 1046 ret = tcm_mod_create_module_subdir(fabric_mod_dir)
1056 if ret: 1047 if ret:
1057 print "tcm_mod_create_module_subdir() failed because module already exists!" 1048 print "tcm_mod_create_module_subdir() failed because module already exists!"
1058 sys.exit(1) 1049 sys.exit(1)
1059 1050
1060 tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name) 1051 tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
1061 tcm_mod_scan_fabric_ops(tcm_dir) 1052 tcm_mod_scan_fabric_ops(tcm_dir)
1062 tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name) 1053 tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
1063 tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name) 1054 tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
1064 tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name) 1055 tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
1065 tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name) 1056 tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
1066 1057
1067 input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ") 1058 input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
1068 if input == "yes" or input == "y": 1059 if input == "yes" or input == "y":
1069 tcm_mod_add_kbuild(tcm_dir, fabric_mod_name) 1060 tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
1070 1061
1071 input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ") 1062 input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
1072 if input == "yes" or input == "y": 1063 if input == "yes" or input == "y":
1073 tcm_mod_add_kconfig(tcm_dir, fabric_mod_name) 1064 tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
1074 1065
1075 return 1066 return
1076 1067
1077 parser = optparse.OptionParser() 1068 parser = optparse.OptionParser()
1078 parser.add_option('-m', '--modulename', help='Module name', dest='modname', 1069 parser.add_option('-m', '--modulename', help='Module name', dest='modname',
1079 action='store', nargs=1, type='string') 1070 action='store', nargs=1, type='string')
1080 parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident', 1071 parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
1081 action='store', nargs=1, type='string') 1072 action='store', nargs=1, type='string')
1082 1073
1083 (opts, args) = parser.parse_args() 1074 (opts, args) = parser.parse_args()
1084 1075
1085 mandatories = ['modname', 'protoident'] 1076 mandatories = ['modname', 'protoident']
1086 for m in mandatories: 1077 for m in mandatories:
1087 if not opts.__dict__[m]: 1078 if not opts.__dict__[m]:
1088 print "mandatory option is missing\n" 1079 print "mandatory option is missing\n"
1089 parser.print_help() 1080 parser.print_help()
1090 exit(-1) 1081 exit(-1)
1091 1082
1092 if __name__ == "__main__": 1083 if __name__ == "__main__":
1093 1084
1094 main(str(opts.modname), opts.protoident) 1085 main(str(opts.modname), opts.protoident)
1095 1086
drivers/target/iscsi/iscsi_target.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * This file contains main functions related to the iSCSI Target Core Driver. 2 * This file contains main functions related to the iSCSI Target Core Driver.
3 * 3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 * 5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 * 7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or 12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version. 13 * (at your option) any later version.
14 * 14 *
15 * This program is distributed in the hope that it will be useful, 15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 ******************************************************************************/ 19 ******************************************************************************/
20 20
21 #include <linux/string.h> 21 #include <linux/string.h>
22 #include <linux/kthread.h> 22 #include <linux/kthread.h>
23 #include <linux/crypto.h> 23 #include <linux/crypto.h>
24 #include <linux/completion.h> 24 #include <linux/completion.h>
25 #include <linux/module.h> 25 #include <linux/module.h>
26 #include <asm/unaligned.h> 26 #include <asm/unaligned.h>
27 #include <scsi/scsi_device.h> 27 #include <scsi/scsi_device.h>
28 #include <scsi/iscsi_proto.h> 28 #include <scsi/iscsi_proto.h>
29 #include <target/target_core_base.h> 29 #include <target/target_core_base.h>
30 #include <target/target_core_tmr.h> 30 #include <target/target_core_fabric.h>
31 #include <target/target_core_transport.h>
32 31
33 #include "iscsi_target_core.h" 32 #include "iscsi_target_core.h"
34 #include "iscsi_target_parameters.h" 33 #include "iscsi_target_parameters.h"
35 #include "iscsi_target_seq_pdu_list.h" 34 #include "iscsi_target_seq_pdu_list.h"
36 #include "iscsi_target_tq.h" 35 #include "iscsi_target_tq.h"
37 #include "iscsi_target_configfs.h" 36 #include "iscsi_target_configfs.h"
38 #include "iscsi_target_datain_values.h" 37 #include "iscsi_target_datain_values.h"
39 #include "iscsi_target_erl0.h" 38 #include "iscsi_target_erl0.h"
40 #include "iscsi_target_erl1.h" 39 #include "iscsi_target_erl1.h"
41 #include "iscsi_target_erl2.h" 40 #include "iscsi_target_erl2.h"
42 #include "iscsi_target_login.h" 41 #include "iscsi_target_login.h"
43 #include "iscsi_target_tmr.h" 42 #include "iscsi_target_tmr.h"
44 #include "iscsi_target_tpg.h" 43 #include "iscsi_target_tpg.h"
45 #include "iscsi_target_util.h" 44 #include "iscsi_target_util.h"
46 #include "iscsi_target.h" 45 #include "iscsi_target.h"
47 #include "iscsi_target_device.h" 46 #include "iscsi_target_device.h"
48 #include "iscsi_target_stat.h" 47 #include "iscsi_target_stat.h"
49 48
50 static LIST_HEAD(g_tiqn_list); 49 static LIST_HEAD(g_tiqn_list);
51 static LIST_HEAD(g_np_list); 50 static LIST_HEAD(g_np_list);
52 static DEFINE_SPINLOCK(tiqn_lock); 51 static DEFINE_SPINLOCK(tiqn_lock);
53 static DEFINE_SPINLOCK(np_lock); 52 static DEFINE_SPINLOCK(np_lock);
54 53
55 static struct idr tiqn_idr; 54 static struct idr tiqn_idr;
56 struct idr sess_idr; 55 struct idr sess_idr;
57 struct mutex auth_id_lock; 56 struct mutex auth_id_lock;
58 spinlock_t sess_idr_lock; 57 spinlock_t sess_idr_lock;
59 58
60 struct iscsit_global *iscsit_global; 59 struct iscsit_global *iscsit_global;
61 60
62 struct kmem_cache *lio_cmd_cache; 61 struct kmem_cache *lio_cmd_cache;
63 struct kmem_cache *lio_qr_cache; 62 struct kmem_cache *lio_qr_cache;
64 struct kmem_cache *lio_dr_cache; 63 struct kmem_cache *lio_dr_cache;
65 struct kmem_cache *lio_ooo_cache; 64 struct kmem_cache *lio_ooo_cache;
66 struct kmem_cache *lio_r2t_cache; 65 struct kmem_cache *lio_r2t_cache;
67 66
68 static int iscsit_handle_immediate_data(struct iscsi_cmd *, 67 static int iscsit_handle_immediate_data(struct iscsi_cmd *,
69 unsigned char *buf, u32); 68 unsigned char *buf, u32);
70 static int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *); 69 static int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
71 70
72 struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf) 71 struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
73 { 72 {
74 struct iscsi_tiqn *tiqn = NULL; 73 struct iscsi_tiqn *tiqn = NULL;
75 74
76 spin_lock(&tiqn_lock); 75 spin_lock(&tiqn_lock);
77 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) { 76 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
78 if (!strcmp(tiqn->tiqn, buf)) { 77 if (!strcmp(tiqn->tiqn, buf)) {
79 78
80 spin_lock(&tiqn->tiqn_state_lock); 79 spin_lock(&tiqn->tiqn_state_lock);
81 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) { 80 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
82 tiqn->tiqn_access_count++; 81 tiqn->tiqn_access_count++;
83 spin_unlock(&tiqn->tiqn_state_lock); 82 spin_unlock(&tiqn->tiqn_state_lock);
84 spin_unlock(&tiqn_lock); 83 spin_unlock(&tiqn_lock);
85 return tiqn; 84 return tiqn;
86 } 85 }
87 spin_unlock(&tiqn->tiqn_state_lock); 86 spin_unlock(&tiqn->tiqn_state_lock);
88 } 87 }
89 } 88 }
90 spin_unlock(&tiqn_lock); 89 spin_unlock(&tiqn_lock);
91 90
92 return NULL; 91 return NULL;
93 } 92 }
94 93
95 static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn) 94 static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn)
96 { 95 {
97 spin_lock(&tiqn->tiqn_state_lock); 96 spin_lock(&tiqn->tiqn_state_lock);
98 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) { 97 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
99 tiqn->tiqn_state = TIQN_STATE_SHUTDOWN; 98 tiqn->tiqn_state = TIQN_STATE_SHUTDOWN;
100 spin_unlock(&tiqn->tiqn_state_lock); 99 spin_unlock(&tiqn->tiqn_state_lock);
101 return 0; 100 return 0;
102 } 101 }
103 spin_unlock(&tiqn->tiqn_state_lock); 102 spin_unlock(&tiqn->tiqn_state_lock);
104 103
105 return -1; 104 return -1;
106 } 105 }
107 106
108 void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn) 107 void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn)
109 { 108 {
110 spin_lock(&tiqn->tiqn_state_lock); 109 spin_lock(&tiqn->tiqn_state_lock);
111 tiqn->tiqn_access_count--; 110 tiqn->tiqn_access_count--;
112 spin_unlock(&tiqn->tiqn_state_lock); 111 spin_unlock(&tiqn->tiqn_state_lock);
113 } 112 }
114 113
115 /* 114 /*
116 * Note that IQN formatting is expected to be done in userspace, and 115 * Note that IQN formatting is expected to be done in userspace, and
117 * no explict IQN format checks are done here. 116 * no explict IQN format checks are done here.
118 */ 117 */
119 struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf) 118 struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
120 { 119 {
121 struct iscsi_tiqn *tiqn = NULL; 120 struct iscsi_tiqn *tiqn = NULL;
122 int ret; 121 int ret;
123 122
124 if (strlen(buf) >= ISCSI_IQN_LEN) { 123 if (strlen(buf) >= ISCSI_IQN_LEN) {
125 pr_err("Target IQN exceeds %d bytes\n", 124 pr_err("Target IQN exceeds %d bytes\n",
126 ISCSI_IQN_LEN); 125 ISCSI_IQN_LEN);
127 return ERR_PTR(-EINVAL); 126 return ERR_PTR(-EINVAL);
128 } 127 }
129 128
130 tiqn = kzalloc(sizeof(struct iscsi_tiqn), GFP_KERNEL); 129 tiqn = kzalloc(sizeof(struct iscsi_tiqn), GFP_KERNEL);
131 if (!tiqn) { 130 if (!tiqn) {
132 pr_err("Unable to allocate struct iscsi_tiqn\n"); 131 pr_err("Unable to allocate struct iscsi_tiqn\n");
133 return ERR_PTR(-ENOMEM); 132 return ERR_PTR(-ENOMEM);
134 } 133 }
135 134
136 sprintf(tiqn->tiqn, "%s", buf); 135 sprintf(tiqn->tiqn, "%s", buf);
137 INIT_LIST_HEAD(&tiqn->tiqn_list); 136 INIT_LIST_HEAD(&tiqn->tiqn_list);
138 INIT_LIST_HEAD(&tiqn->tiqn_tpg_list); 137 INIT_LIST_HEAD(&tiqn->tiqn_tpg_list);
139 spin_lock_init(&tiqn->tiqn_state_lock); 138 spin_lock_init(&tiqn->tiqn_state_lock);
140 spin_lock_init(&tiqn->tiqn_tpg_lock); 139 spin_lock_init(&tiqn->tiqn_tpg_lock);
141 spin_lock_init(&tiqn->sess_err_stats.lock); 140 spin_lock_init(&tiqn->sess_err_stats.lock);
142 spin_lock_init(&tiqn->login_stats.lock); 141 spin_lock_init(&tiqn->login_stats.lock);
143 spin_lock_init(&tiqn->logout_stats.lock); 142 spin_lock_init(&tiqn->logout_stats.lock);
144 143
145 if (!idr_pre_get(&tiqn_idr, GFP_KERNEL)) { 144 if (!idr_pre_get(&tiqn_idr, GFP_KERNEL)) {
146 pr_err("idr_pre_get() for tiqn_idr failed\n"); 145 pr_err("idr_pre_get() for tiqn_idr failed\n");
147 kfree(tiqn); 146 kfree(tiqn);
148 return ERR_PTR(-ENOMEM); 147 return ERR_PTR(-ENOMEM);
149 } 148 }
150 tiqn->tiqn_state = TIQN_STATE_ACTIVE; 149 tiqn->tiqn_state = TIQN_STATE_ACTIVE;
151 150
152 spin_lock(&tiqn_lock); 151 spin_lock(&tiqn_lock);
153 ret = idr_get_new(&tiqn_idr, NULL, &tiqn->tiqn_index); 152 ret = idr_get_new(&tiqn_idr, NULL, &tiqn->tiqn_index);
154 if (ret < 0) { 153 if (ret < 0) {
155 pr_err("idr_get_new() failed for tiqn->tiqn_index\n"); 154 pr_err("idr_get_new() failed for tiqn->tiqn_index\n");
156 spin_unlock(&tiqn_lock); 155 spin_unlock(&tiqn_lock);
157 kfree(tiqn); 156 kfree(tiqn);
158 return ERR_PTR(ret); 157 return ERR_PTR(ret);
159 } 158 }
160 list_add_tail(&tiqn->tiqn_list, &g_tiqn_list); 159 list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
161 spin_unlock(&tiqn_lock); 160 spin_unlock(&tiqn_lock);
162 161
163 pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn); 162 pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);
164 163
165 return tiqn; 164 return tiqn;
166 165
167 } 166 }
168 167
169 static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn) 168 static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn)
170 { 169 {
171 /* 170 /*
172 * Wait for accesses to said struct iscsi_tiqn to end. 171 * Wait for accesses to said struct iscsi_tiqn to end.
173 */ 172 */
174 spin_lock(&tiqn->tiqn_state_lock); 173 spin_lock(&tiqn->tiqn_state_lock);
175 while (tiqn->tiqn_access_count != 0) { 174 while (tiqn->tiqn_access_count != 0) {
176 spin_unlock(&tiqn->tiqn_state_lock); 175 spin_unlock(&tiqn->tiqn_state_lock);
177 msleep(10); 176 msleep(10);
178 spin_lock(&tiqn->tiqn_state_lock); 177 spin_lock(&tiqn->tiqn_state_lock);
179 } 178 }
180 spin_unlock(&tiqn->tiqn_state_lock); 179 spin_unlock(&tiqn->tiqn_state_lock);
181 } 180 }
182 181
183 void iscsit_del_tiqn(struct iscsi_tiqn *tiqn) 182 void iscsit_del_tiqn(struct iscsi_tiqn *tiqn)
184 { 183 {
185 /* 184 /*
186 * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN 185 * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN
187 * while holding tiqn->tiqn_state_lock. This means that all subsequent 186 * while holding tiqn->tiqn_state_lock. This means that all subsequent
188 * attempts to access this struct iscsi_tiqn will fail from both transport 187 * attempts to access this struct iscsi_tiqn will fail from both transport
189 * fabric and control code paths. 188 * fabric and control code paths.
190 */ 189 */
191 if (iscsit_set_tiqn_shutdown(tiqn) < 0) { 190 if (iscsit_set_tiqn_shutdown(tiqn) < 0) {
192 pr_err("iscsit_set_tiqn_shutdown() failed\n"); 191 pr_err("iscsit_set_tiqn_shutdown() failed\n");
193 return; 192 return;
194 } 193 }
195 194
196 iscsit_wait_for_tiqn(tiqn); 195 iscsit_wait_for_tiqn(tiqn);
197 196
198 spin_lock(&tiqn_lock); 197 spin_lock(&tiqn_lock);
199 list_del(&tiqn->tiqn_list); 198 list_del(&tiqn->tiqn_list);
200 idr_remove(&tiqn_idr, tiqn->tiqn_index); 199 idr_remove(&tiqn_idr, tiqn->tiqn_index);
201 spin_unlock(&tiqn_lock); 200 spin_unlock(&tiqn_lock);
202 201
203 pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n", 202 pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n",
204 tiqn->tiqn); 203 tiqn->tiqn);
205 kfree(tiqn); 204 kfree(tiqn);
206 } 205 }
207 206
208 int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg) 207 int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
209 { 208 {
210 int ret; 209 int ret;
211 /* 210 /*
212 * Determine if the network portal is accepting storage traffic. 211 * Determine if the network portal is accepting storage traffic.
213 */ 212 */
214 spin_lock_bh(&np->np_thread_lock); 213 spin_lock_bh(&np->np_thread_lock);
215 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { 214 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
216 spin_unlock_bh(&np->np_thread_lock); 215 spin_unlock_bh(&np->np_thread_lock);
217 return -1; 216 return -1;
218 } 217 }
219 if (np->np_login_tpg) { 218 if (np->np_login_tpg) {
220 pr_err("np->np_login_tpg() is not NULL!\n"); 219 pr_err("np->np_login_tpg() is not NULL!\n");
221 spin_unlock_bh(&np->np_thread_lock); 220 spin_unlock_bh(&np->np_thread_lock);
222 return -1; 221 return -1;
223 } 222 }
224 spin_unlock_bh(&np->np_thread_lock); 223 spin_unlock_bh(&np->np_thread_lock);
225 /* 224 /*
226 * Determine if the portal group is accepting storage traffic. 225 * Determine if the portal group is accepting storage traffic.
227 */ 226 */
228 spin_lock_bh(&tpg->tpg_state_lock); 227 spin_lock_bh(&tpg->tpg_state_lock);
229 if (tpg->tpg_state != TPG_STATE_ACTIVE) { 228 if (tpg->tpg_state != TPG_STATE_ACTIVE) {
230 spin_unlock_bh(&tpg->tpg_state_lock); 229 spin_unlock_bh(&tpg->tpg_state_lock);
231 return -1; 230 return -1;
232 } 231 }
233 spin_unlock_bh(&tpg->tpg_state_lock); 232 spin_unlock_bh(&tpg->tpg_state_lock);
234 233
235 /* 234 /*
236 * Here we serialize access across the TIQN+TPG Tuple. 235 * Here we serialize access across the TIQN+TPG Tuple.
237 */ 236 */
238 ret = mutex_lock_interruptible(&tpg->np_login_lock); 237 ret = mutex_lock_interruptible(&tpg->np_login_lock);
239 if ((ret != 0) || signal_pending(current)) 238 if ((ret != 0) || signal_pending(current))
240 return -1; 239 return -1;
241 240
242 spin_lock_bh(&np->np_thread_lock); 241 spin_lock_bh(&np->np_thread_lock);
243 np->np_login_tpg = tpg; 242 np->np_login_tpg = tpg;
244 spin_unlock_bh(&np->np_thread_lock); 243 spin_unlock_bh(&np->np_thread_lock);
245 244
246 return 0; 245 return 0;
247 } 246 }
248 247
249 int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg) 248 int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
250 { 249 {
251 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; 250 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
252 251
253 spin_lock_bh(&np->np_thread_lock); 252 spin_lock_bh(&np->np_thread_lock);
254 np->np_login_tpg = NULL; 253 np->np_login_tpg = NULL;
255 spin_unlock_bh(&np->np_thread_lock); 254 spin_unlock_bh(&np->np_thread_lock);
256 255
257 mutex_unlock(&tpg->np_login_lock); 256 mutex_unlock(&tpg->np_login_lock);
258 257
259 if (tiqn) 258 if (tiqn)
260 iscsit_put_tiqn_for_login(tiqn); 259 iscsit_put_tiqn_for_login(tiqn);
261 260
262 return 0; 261 return 0;
263 } 262 }
264 263
265 static struct iscsi_np *iscsit_get_np( 264 static struct iscsi_np *iscsit_get_np(
266 struct __kernel_sockaddr_storage *sockaddr, 265 struct __kernel_sockaddr_storage *sockaddr,
267 int network_transport) 266 int network_transport)
268 { 267 {
269 struct sockaddr_in *sock_in, *sock_in_e; 268 struct sockaddr_in *sock_in, *sock_in_e;
270 struct sockaddr_in6 *sock_in6, *sock_in6_e; 269 struct sockaddr_in6 *sock_in6, *sock_in6_e;
271 struct iscsi_np *np; 270 struct iscsi_np *np;
272 int ip_match = 0; 271 int ip_match = 0;
273 u16 port; 272 u16 port;
274 273
275 spin_lock_bh(&np_lock); 274 spin_lock_bh(&np_lock);
276 list_for_each_entry(np, &g_np_list, np_list) { 275 list_for_each_entry(np, &g_np_list, np_list) {
277 spin_lock(&np->np_thread_lock); 276 spin_lock(&np->np_thread_lock);
278 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { 277 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
279 spin_unlock(&np->np_thread_lock); 278 spin_unlock(&np->np_thread_lock);
280 continue; 279 continue;
281 } 280 }
282 281
283 if (sockaddr->ss_family == AF_INET6) { 282 if (sockaddr->ss_family == AF_INET6) {
284 sock_in6 = (struct sockaddr_in6 *)sockaddr; 283 sock_in6 = (struct sockaddr_in6 *)sockaddr;
285 sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr; 284 sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
286 285
287 if (!memcmp((void *)&sock_in6->sin6_addr.in6_u, 286 if (!memcmp((void *)&sock_in6->sin6_addr.in6_u,
288 (void *)&sock_in6_e->sin6_addr.in6_u, 287 (void *)&sock_in6_e->sin6_addr.in6_u,
289 sizeof(struct in6_addr))) 288 sizeof(struct in6_addr)))
290 ip_match = 1; 289 ip_match = 1;
291 290
292 port = ntohs(sock_in6->sin6_port); 291 port = ntohs(sock_in6->sin6_port);
293 } else { 292 } else {
294 sock_in = (struct sockaddr_in *)sockaddr; 293 sock_in = (struct sockaddr_in *)sockaddr;
295 sock_in_e = (struct sockaddr_in *)&np->np_sockaddr; 294 sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
296 295
297 if (sock_in->sin_addr.s_addr == 296 if (sock_in->sin_addr.s_addr ==
298 sock_in_e->sin_addr.s_addr) 297 sock_in_e->sin_addr.s_addr)
299 ip_match = 1; 298 ip_match = 1;
300 299
301 port = ntohs(sock_in->sin_port); 300 port = ntohs(sock_in->sin_port);
302 } 301 }
303 302
304 if ((ip_match == 1) && (np->np_port == port) && 303 if ((ip_match == 1) && (np->np_port == port) &&
305 (np->np_network_transport == network_transport)) { 304 (np->np_network_transport == network_transport)) {
306 /* 305 /*
307 * Increment the np_exports reference count now to 306 * Increment the np_exports reference count now to
308 * prevent iscsit_del_np() below from being called 307 * prevent iscsit_del_np() below from being called
309 * while iscsi_tpg_add_network_portal() is called. 308 * while iscsi_tpg_add_network_portal() is called.
310 */ 309 */
311 np->np_exports++; 310 np->np_exports++;
312 spin_unlock(&np->np_thread_lock); 311 spin_unlock(&np->np_thread_lock);
313 spin_unlock_bh(&np_lock); 312 spin_unlock_bh(&np_lock);
314 return np; 313 return np;
315 } 314 }
316 spin_unlock(&np->np_thread_lock); 315 spin_unlock(&np->np_thread_lock);
317 } 316 }
318 spin_unlock_bh(&np_lock); 317 spin_unlock_bh(&np_lock);
319 318
320 return NULL; 319 return NULL;
321 } 320 }
322 321
323 struct iscsi_np *iscsit_add_np( 322 struct iscsi_np *iscsit_add_np(
324 struct __kernel_sockaddr_storage *sockaddr, 323 struct __kernel_sockaddr_storage *sockaddr,
325 char *ip_str, 324 char *ip_str,
326 int network_transport) 325 int network_transport)
327 { 326 {
328 struct sockaddr_in *sock_in; 327 struct sockaddr_in *sock_in;
329 struct sockaddr_in6 *sock_in6; 328 struct sockaddr_in6 *sock_in6;
330 struct iscsi_np *np; 329 struct iscsi_np *np;
331 int ret; 330 int ret;
332 /* 331 /*
333 * Locate the existing struct iscsi_np if already active.. 332 * Locate the existing struct iscsi_np if already active..
334 */ 333 */
335 np = iscsit_get_np(sockaddr, network_transport); 334 np = iscsit_get_np(sockaddr, network_transport);
336 if (np) 335 if (np)
337 return np; 336 return np;
338 337
339 np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL); 338 np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL);
340 if (!np) { 339 if (!np) {
341 pr_err("Unable to allocate memory for struct iscsi_np\n"); 340 pr_err("Unable to allocate memory for struct iscsi_np\n");
342 return ERR_PTR(-ENOMEM); 341 return ERR_PTR(-ENOMEM);
343 } 342 }
344 343
345 np->np_flags |= NPF_IP_NETWORK; 344 np->np_flags |= NPF_IP_NETWORK;
346 if (sockaddr->ss_family == AF_INET6) { 345 if (sockaddr->ss_family == AF_INET6) {
347 sock_in6 = (struct sockaddr_in6 *)sockaddr; 346 sock_in6 = (struct sockaddr_in6 *)sockaddr;
348 snprintf(np->np_ip, IPV6_ADDRESS_SPACE, "%s", ip_str); 347 snprintf(np->np_ip, IPV6_ADDRESS_SPACE, "%s", ip_str);
349 np->np_port = ntohs(sock_in6->sin6_port); 348 np->np_port = ntohs(sock_in6->sin6_port);
350 } else { 349 } else {
351 sock_in = (struct sockaddr_in *)sockaddr; 350 sock_in = (struct sockaddr_in *)sockaddr;
352 sprintf(np->np_ip, "%s", ip_str); 351 sprintf(np->np_ip, "%s", ip_str);
353 np->np_port = ntohs(sock_in->sin_port); 352 np->np_port = ntohs(sock_in->sin_port);
354 } 353 }
355 354
356 np->np_network_transport = network_transport; 355 np->np_network_transport = network_transport;
357 spin_lock_init(&np->np_thread_lock); 356 spin_lock_init(&np->np_thread_lock);
358 init_completion(&np->np_restart_comp); 357 init_completion(&np->np_restart_comp);
359 INIT_LIST_HEAD(&np->np_list); 358 INIT_LIST_HEAD(&np->np_list);
360 359
361 ret = iscsi_target_setup_login_socket(np, sockaddr); 360 ret = iscsi_target_setup_login_socket(np, sockaddr);
362 if (ret != 0) { 361 if (ret != 0) {
363 kfree(np); 362 kfree(np);
364 return ERR_PTR(ret); 363 return ERR_PTR(ret);
365 } 364 }
366 365
367 np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np"); 366 np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np");
368 if (IS_ERR(np->np_thread)) { 367 if (IS_ERR(np->np_thread)) {
369 pr_err("Unable to create kthread: iscsi_np\n"); 368 pr_err("Unable to create kthread: iscsi_np\n");
370 ret = PTR_ERR(np->np_thread); 369 ret = PTR_ERR(np->np_thread);
371 kfree(np); 370 kfree(np);
372 return ERR_PTR(ret); 371 return ERR_PTR(ret);
373 } 372 }
374 /* 373 /*
375 * Increment the np_exports reference count now to prevent 374 * Increment the np_exports reference count now to prevent
376 * iscsit_del_np() below from being run while a new call to 375 * iscsit_del_np() below from being run while a new call to
377 * iscsi_tpg_add_network_portal() for a matching iscsi_np is 376 * iscsi_tpg_add_network_portal() for a matching iscsi_np is
378 * active. We don't need to hold np->np_thread_lock at this 377 * active. We don't need to hold np->np_thread_lock at this
379 * point because iscsi_np has not been added to g_np_list yet. 378 * point because iscsi_np has not been added to g_np_list yet.
380 */ 379 */
381 np->np_exports = 1; 380 np->np_exports = 1;
382 381
383 spin_lock_bh(&np_lock); 382 spin_lock_bh(&np_lock);
384 list_add_tail(&np->np_list, &g_np_list); 383 list_add_tail(&np->np_list, &g_np_list);
385 spin_unlock_bh(&np_lock); 384 spin_unlock_bh(&np_lock);
386 385
387 pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n", 386 pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
388 np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ? 387 np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
389 "TCP" : "SCTP"); 388 "TCP" : "SCTP");
390 389
391 return np; 390 return np;
392 } 391 }
393 392
394 int iscsit_reset_np_thread( 393 int iscsit_reset_np_thread(
395 struct iscsi_np *np, 394 struct iscsi_np *np,
396 struct iscsi_tpg_np *tpg_np, 395 struct iscsi_tpg_np *tpg_np,
397 struct iscsi_portal_group *tpg) 396 struct iscsi_portal_group *tpg)
398 { 397 {
399 spin_lock_bh(&np->np_thread_lock); 398 spin_lock_bh(&np->np_thread_lock);
400 if (tpg && tpg_np) { 399 if (tpg && tpg_np) {
401 /* 400 /*
402 * The reset operation need only be performed when the 401 * The reset operation need only be performed when the
403 * passed struct iscsi_portal_group has a login in progress 402 * passed struct iscsi_portal_group has a login in progress
404 * to one of the network portals. 403 * to one of the network portals.
405 */ 404 */
406 if (tpg_np->tpg_np->np_login_tpg != tpg) { 405 if (tpg_np->tpg_np->np_login_tpg != tpg) {
407 spin_unlock_bh(&np->np_thread_lock); 406 spin_unlock_bh(&np->np_thread_lock);
408 return 0; 407 return 0;
409 } 408 }
410 } 409 }
411 if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) { 410 if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) {
412 spin_unlock_bh(&np->np_thread_lock); 411 spin_unlock_bh(&np->np_thread_lock);
413 return 0; 412 return 0;
414 } 413 }
415 np->np_thread_state = ISCSI_NP_THREAD_RESET; 414 np->np_thread_state = ISCSI_NP_THREAD_RESET;
416 415
417 if (np->np_thread) { 416 if (np->np_thread) {
418 spin_unlock_bh(&np->np_thread_lock); 417 spin_unlock_bh(&np->np_thread_lock);
419 send_sig(SIGINT, np->np_thread, 1); 418 send_sig(SIGINT, np->np_thread, 1);
420 wait_for_completion(&np->np_restart_comp); 419 wait_for_completion(&np->np_restart_comp);
421 spin_lock_bh(&np->np_thread_lock); 420 spin_lock_bh(&np->np_thread_lock);
422 } 421 }
423 spin_unlock_bh(&np->np_thread_lock); 422 spin_unlock_bh(&np->np_thread_lock);
424 423
425 return 0; 424 return 0;
426 } 425 }
427 426
428 int iscsit_del_np_comm(struct iscsi_np *np) 427 int iscsit_del_np_comm(struct iscsi_np *np)
429 { 428 {
430 if (!np->np_socket) 429 if (!np->np_socket)
431 return 0; 430 return 0;
432 431
433 /* 432 /*
434 * Some network transports allocate their own struct sock->file, 433 * Some network transports allocate their own struct sock->file,
435 * see if we need to free any additional allocated resources. 434 * see if we need to free any additional allocated resources.
436 */ 435 */
437 if (np->np_flags & NPF_SCTP_STRUCT_FILE) { 436 if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
438 kfree(np->np_socket->file); 437 kfree(np->np_socket->file);
439 np->np_socket->file = NULL; 438 np->np_socket->file = NULL;
440 } 439 }
441 440
442 sock_release(np->np_socket); 441 sock_release(np->np_socket);
443 return 0; 442 return 0;
444 } 443 }
445 444
446 int iscsit_del_np(struct iscsi_np *np) 445 int iscsit_del_np(struct iscsi_np *np)
447 { 446 {
448 spin_lock_bh(&np->np_thread_lock); 447 spin_lock_bh(&np->np_thread_lock);
449 np->np_exports--; 448 np->np_exports--;
450 if (np->np_exports) { 449 if (np->np_exports) {
451 spin_unlock_bh(&np->np_thread_lock); 450 spin_unlock_bh(&np->np_thread_lock);
452 return 0; 451 return 0;
453 } 452 }
454 np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN; 453 np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN;
455 spin_unlock_bh(&np->np_thread_lock); 454 spin_unlock_bh(&np->np_thread_lock);
456 455
457 if (np->np_thread) { 456 if (np->np_thread) {
458 /* 457 /*
459 * We need to send the signal to wakeup Linux/Net 458 * We need to send the signal to wakeup Linux/Net
460 * which may be sleeping in sock_accept().. 459 * which may be sleeping in sock_accept()..
461 */ 460 */
462 send_sig(SIGINT, np->np_thread, 1); 461 send_sig(SIGINT, np->np_thread, 1);
463 kthread_stop(np->np_thread); 462 kthread_stop(np->np_thread);
464 } 463 }
465 iscsit_del_np_comm(np); 464 iscsit_del_np_comm(np);
466 465
467 spin_lock_bh(&np_lock); 466 spin_lock_bh(&np_lock);
468 list_del(&np->np_list); 467 list_del(&np->np_list);
469 spin_unlock_bh(&np_lock); 468 spin_unlock_bh(&np_lock);
470 469
471 pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n", 470 pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
472 np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ? 471 np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
473 "TCP" : "SCTP"); 472 "TCP" : "SCTP");
474 473
475 kfree(np); 474 kfree(np);
476 return 0; 475 return 0;
477 } 476 }
478 477
479 static int __init iscsi_target_init_module(void) 478 static int __init iscsi_target_init_module(void)
480 { 479 {
481 int ret = 0; 480 int ret = 0;
482 481
483 pr_debug("iSCSI-Target "ISCSIT_VERSION"\n"); 482 pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
484 483
485 iscsit_global = kzalloc(sizeof(struct iscsit_global), GFP_KERNEL); 484 iscsit_global = kzalloc(sizeof(struct iscsit_global), GFP_KERNEL);
486 if (!iscsit_global) { 485 if (!iscsit_global) {
487 pr_err("Unable to allocate memory for iscsit_global\n"); 486 pr_err("Unable to allocate memory for iscsit_global\n");
488 return -1; 487 return -1;
489 } 488 }
490 mutex_init(&auth_id_lock); 489 mutex_init(&auth_id_lock);
491 spin_lock_init(&sess_idr_lock); 490 spin_lock_init(&sess_idr_lock);
492 idr_init(&tiqn_idr); 491 idr_init(&tiqn_idr);
493 idr_init(&sess_idr); 492 idr_init(&sess_idr);
494 493
495 ret = iscsi_target_register_configfs(); 494 ret = iscsi_target_register_configfs();
496 if (ret < 0) 495 if (ret < 0)
497 goto out; 496 goto out;
498 497
499 ret = iscsi_thread_set_init(); 498 ret = iscsi_thread_set_init();
500 if (ret < 0) 499 if (ret < 0)
501 goto configfs_out; 500 goto configfs_out;
502 501
503 if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) != 502 if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) !=
504 TARGET_THREAD_SET_COUNT) { 503 TARGET_THREAD_SET_COUNT) {
505 pr_err("iscsi_allocate_thread_sets() returned" 504 pr_err("iscsi_allocate_thread_sets() returned"
506 " unexpected value!\n"); 505 " unexpected value!\n");
507 goto ts_out1; 506 goto ts_out1;
508 } 507 }
509 508
510 lio_cmd_cache = kmem_cache_create("lio_cmd_cache", 509 lio_cmd_cache = kmem_cache_create("lio_cmd_cache",
511 sizeof(struct iscsi_cmd), __alignof__(struct iscsi_cmd), 510 sizeof(struct iscsi_cmd), __alignof__(struct iscsi_cmd),
512 0, NULL); 511 0, NULL);
513 if (!lio_cmd_cache) { 512 if (!lio_cmd_cache) {
514 pr_err("Unable to kmem_cache_create() for" 513 pr_err("Unable to kmem_cache_create() for"
515 " lio_cmd_cache\n"); 514 " lio_cmd_cache\n");
516 goto ts_out2; 515 goto ts_out2;
517 } 516 }
518 517
519 lio_qr_cache = kmem_cache_create("lio_qr_cache", 518 lio_qr_cache = kmem_cache_create("lio_qr_cache",
520 sizeof(struct iscsi_queue_req), 519 sizeof(struct iscsi_queue_req),
521 __alignof__(struct iscsi_queue_req), 0, NULL); 520 __alignof__(struct iscsi_queue_req), 0, NULL);
522 if (!lio_qr_cache) { 521 if (!lio_qr_cache) {
523 pr_err("nable to kmem_cache_create() for" 522 pr_err("nable to kmem_cache_create() for"
524 " lio_qr_cache\n"); 523 " lio_qr_cache\n");
525 goto cmd_out; 524 goto cmd_out;
526 } 525 }
527 526
528 lio_dr_cache = kmem_cache_create("lio_dr_cache", 527 lio_dr_cache = kmem_cache_create("lio_dr_cache",
529 sizeof(struct iscsi_datain_req), 528 sizeof(struct iscsi_datain_req),
530 __alignof__(struct iscsi_datain_req), 0, NULL); 529 __alignof__(struct iscsi_datain_req), 0, NULL);
531 if (!lio_dr_cache) { 530 if (!lio_dr_cache) {
532 pr_err("Unable to kmem_cache_create() for" 531 pr_err("Unable to kmem_cache_create() for"
533 " lio_dr_cache\n"); 532 " lio_dr_cache\n");
534 goto qr_out; 533 goto qr_out;
535 } 534 }
536 535
537 lio_ooo_cache = kmem_cache_create("lio_ooo_cache", 536 lio_ooo_cache = kmem_cache_create("lio_ooo_cache",
538 sizeof(struct iscsi_ooo_cmdsn), 537 sizeof(struct iscsi_ooo_cmdsn),
539 __alignof__(struct iscsi_ooo_cmdsn), 0, NULL); 538 __alignof__(struct iscsi_ooo_cmdsn), 0, NULL);
540 if (!lio_ooo_cache) { 539 if (!lio_ooo_cache) {
541 pr_err("Unable to kmem_cache_create() for" 540 pr_err("Unable to kmem_cache_create() for"
542 " lio_ooo_cache\n"); 541 " lio_ooo_cache\n");
543 goto dr_out; 542 goto dr_out;
544 } 543 }
545 544
546 lio_r2t_cache = kmem_cache_create("lio_r2t_cache", 545 lio_r2t_cache = kmem_cache_create("lio_r2t_cache",
547 sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t), 546 sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t),
548 0, NULL); 547 0, NULL);
549 if (!lio_r2t_cache) { 548 if (!lio_r2t_cache) {
550 pr_err("Unable to kmem_cache_create() for" 549 pr_err("Unable to kmem_cache_create() for"
551 " lio_r2t_cache\n"); 550 " lio_r2t_cache\n");
552 goto ooo_out; 551 goto ooo_out;
553 } 552 }
554 553
555 if (iscsit_load_discovery_tpg() < 0) 554 if (iscsit_load_discovery_tpg() < 0)
556 goto r2t_out; 555 goto r2t_out;
557 556
558 return ret; 557 return ret;
559 r2t_out: 558 r2t_out:
560 kmem_cache_destroy(lio_r2t_cache); 559 kmem_cache_destroy(lio_r2t_cache);
561 ooo_out: 560 ooo_out:
562 kmem_cache_destroy(lio_ooo_cache); 561 kmem_cache_destroy(lio_ooo_cache);
563 dr_out: 562 dr_out:
564 kmem_cache_destroy(lio_dr_cache); 563 kmem_cache_destroy(lio_dr_cache);
565 qr_out: 564 qr_out:
566 kmem_cache_destroy(lio_qr_cache); 565 kmem_cache_destroy(lio_qr_cache);
567 cmd_out: 566 cmd_out:
568 kmem_cache_destroy(lio_cmd_cache); 567 kmem_cache_destroy(lio_cmd_cache);
569 ts_out2: 568 ts_out2:
570 iscsi_deallocate_thread_sets(); 569 iscsi_deallocate_thread_sets();
571 ts_out1: 570 ts_out1:
572 iscsi_thread_set_free(); 571 iscsi_thread_set_free();
573 configfs_out: 572 configfs_out:
574 iscsi_target_deregister_configfs(); 573 iscsi_target_deregister_configfs();
575 out: 574 out:
576 kfree(iscsit_global); 575 kfree(iscsit_global);
577 return -ENOMEM; 576 return -ENOMEM;
578 } 577 }
579 578
580 static void __exit iscsi_target_cleanup_module(void) 579 static void __exit iscsi_target_cleanup_module(void)
581 { 580 {
582 iscsi_deallocate_thread_sets(); 581 iscsi_deallocate_thread_sets();
583 iscsi_thread_set_free(); 582 iscsi_thread_set_free();
584 iscsit_release_discovery_tpg(); 583 iscsit_release_discovery_tpg();
585 kmem_cache_destroy(lio_cmd_cache); 584 kmem_cache_destroy(lio_cmd_cache);
586 kmem_cache_destroy(lio_qr_cache); 585 kmem_cache_destroy(lio_qr_cache);
587 kmem_cache_destroy(lio_dr_cache); 586 kmem_cache_destroy(lio_dr_cache);
588 kmem_cache_destroy(lio_ooo_cache); 587 kmem_cache_destroy(lio_ooo_cache);
589 kmem_cache_destroy(lio_r2t_cache); 588 kmem_cache_destroy(lio_r2t_cache);
590 589
591 iscsi_target_deregister_configfs(); 590 iscsi_target_deregister_configfs();
592 591
593 kfree(iscsit_global); 592 kfree(iscsit_global);
594 } 593 }
595 594
596 int iscsit_add_reject( 595 int iscsit_add_reject(
597 u8 reason, 596 u8 reason,
598 int fail_conn, 597 int fail_conn,
599 unsigned char *buf, 598 unsigned char *buf,
600 struct iscsi_conn *conn) 599 struct iscsi_conn *conn)
601 { 600 {
602 struct iscsi_cmd *cmd; 601 struct iscsi_cmd *cmd;
603 struct iscsi_reject *hdr; 602 struct iscsi_reject *hdr;
604 int ret; 603 int ret;
605 604
606 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 605 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
607 if (!cmd) 606 if (!cmd)
608 return -1; 607 return -1;
609 608
610 cmd->iscsi_opcode = ISCSI_OP_REJECT; 609 cmd->iscsi_opcode = ISCSI_OP_REJECT;
611 if (fail_conn) 610 if (fail_conn)
612 cmd->cmd_flags |= ICF_REJECT_FAIL_CONN; 611 cmd->cmd_flags |= ICF_REJECT_FAIL_CONN;
613 612
614 hdr = (struct iscsi_reject *) cmd->pdu; 613 hdr = (struct iscsi_reject *) cmd->pdu;
615 hdr->reason = reason; 614 hdr->reason = reason;
616 615
617 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); 616 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
618 if (!cmd->buf_ptr) { 617 if (!cmd->buf_ptr) {
619 pr_err("Unable to allocate memory for cmd->buf_ptr\n"); 618 pr_err("Unable to allocate memory for cmd->buf_ptr\n");
620 iscsit_release_cmd(cmd); 619 iscsit_release_cmd(cmd);
621 return -1; 620 return -1;
622 } 621 }
623 622
624 spin_lock_bh(&conn->cmd_lock); 623 spin_lock_bh(&conn->cmd_lock);
625 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 624 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
626 spin_unlock_bh(&conn->cmd_lock); 625 spin_unlock_bh(&conn->cmd_lock);
627 626
628 cmd->i_state = ISTATE_SEND_REJECT; 627 cmd->i_state = ISTATE_SEND_REJECT;
629 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 628 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
630 629
631 ret = wait_for_completion_interruptible(&cmd->reject_comp); 630 ret = wait_for_completion_interruptible(&cmd->reject_comp);
632 if (ret != 0) 631 if (ret != 0)
633 return -1; 632 return -1;
634 633
635 return (!fail_conn) ? 0 : -1; 634 return (!fail_conn) ? 0 : -1;
636 } 635 }
637 636
638 int iscsit_add_reject_from_cmd( 637 int iscsit_add_reject_from_cmd(
639 u8 reason, 638 u8 reason,
640 int fail_conn, 639 int fail_conn,
641 int add_to_conn, 640 int add_to_conn,
642 unsigned char *buf, 641 unsigned char *buf,
643 struct iscsi_cmd *cmd) 642 struct iscsi_cmd *cmd)
644 { 643 {
645 struct iscsi_conn *conn; 644 struct iscsi_conn *conn;
646 struct iscsi_reject *hdr; 645 struct iscsi_reject *hdr;
647 int ret; 646 int ret;
648 647
649 if (!cmd->conn) { 648 if (!cmd->conn) {
650 pr_err("cmd->conn is NULL for ITT: 0x%08x\n", 649 pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
651 cmd->init_task_tag); 650 cmd->init_task_tag);
652 return -1; 651 return -1;
653 } 652 }
654 conn = cmd->conn; 653 conn = cmd->conn;
655 654
656 cmd->iscsi_opcode = ISCSI_OP_REJECT; 655 cmd->iscsi_opcode = ISCSI_OP_REJECT;
657 if (fail_conn) 656 if (fail_conn)
658 cmd->cmd_flags |= ICF_REJECT_FAIL_CONN; 657 cmd->cmd_flags |= ICF_REJECT_FAIL_CONN;
659 658
660 hdr = (struct iscsi_reject *) cmd->pdu; 659 hdr = (struct iscsi_reject *) cmd->pdu;
661 hdr->reason = reason; 660 hdr->reason = reason;
662 661
663 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); 662 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
664 if (!cmd->buf_ptr) { 663 if (!cmd->buf_ptr) {
665 pr_err("Unable to allocate memory for cmd->buf_ptr\n"); 664 pr_err("Unable to allocate memory for cmd->buf_ptr\n");
666 iscsit_release_cmd(cmd); 665 iscsit_release_cmd(cmd);
667 return -1; 666 return -1;
668 } 667 }
669 668
670 if (add_to_conn) { 669 if (add_to_conn) {
671 spin_lock_bh(&conn->cmd_lock); 670 spin_lock_bh(&conn->cmd_lock);
672 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 671 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
673 spin_unlock_bh(&conn->cmd_lock); 672 spin_unlock_bh(&conn->cmd_lock);
674 } 673 }
675 674
676 cmd->i_state = ISTATE_SEND_REJECT; 675 cmd->i_state = ISTATE_SEND_REJECT;
677 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 676 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
678 677
679 ret = wait_for_completion_interruptible(&cmd->reject_comp); 678 ret = wait_for_completion_interruptible(&cmd->reject_comp);
680 if (ret != 0) 679 if (ret != 0)
681 return -1; 680 return -1;
682 681
683 return (!fail_conn) ? 0 : -1; 682 return (!fail_conn) ? 0 : -1;
684 } 683 }
685 684
686 /* 685 /*
687 * Map some portion of the allocated scatterlist to an iovec, suitable for 686 * Map some portion of the allocated scatterlist to an iovec, suitable for
688 * kernel sockets to copy data in/out. This handles both pages and slab-allocated 687 * kernel sockets to copy data in/out. This handles both pages and slab-allocated
689 * buffers, since we have been tricky and mapped t_mem_sg to the buffer in 688 * buffers, since we have been tricky and mapped t_mem_sg to the buffer in
690 * either case (see iscsit_alloc_buffs) 689 * either case (see iscsit_alloc_buffs)
691 */ 690 */
692 static int iscsit_map_iovec( 691 static int iscsit_map_iovec(
693 struct iscsi_cmd *cmd, 692 struct iscsi_cmd *cmd,
694 struct kvec *iov, 693 struct kvec *iov,
695 u32 data_offset, 694 u32 data_offset,
696 u32 data_length) 695 u32 data_length)
697 { 696 {
698 u32 i = 0; 697 u32 i = 0;
699 struct scatterlist *sg; 698 struct scatterlist *sg;
700 unsigned int page_off; 699 unsigned int page_off;
701 700
702 /* 701 /*
703 * We have a private mapping of the allocated pages in t_mem_sg. 702 * We have a private mapping of the allocated pages in t_mem_sg.
704 * At this point, we also know each contains a page. 703 * At this point, we also know each contains a page.
705 */ 704 */
706 sg = &cmd->t_mem_sg[data_offset / PAGE_SIZE]; 705 sg = &cmd->t_mem_sg[data_offset / PAGE_SIZE];
707 page_off = (data_offset % PAGE_SIZE); 706 page_off = (data_offset % PAGE_SIZE);
708 707
709 cmd->first_data_sg = sg; 708 cmd->first_data_sg = sg;
710 cmd->first_data_sg_off = page_off; 709 cmd->first_data_sg_off = page_off;
711 710
712 while (data_length) { 711 while (data_length) {
713 u32 cur_len = min_t(u32, data_length, sg->length - page_off); 712 u32 cur_len = min_t(u32, data_length, sg->length - page_off);
714 713
715 iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off; 714 iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off;
716 iov[i].iov_len = cur_len; 715 iov[i].iov_len = cur_len;
717 716
718 data_length -= cur_len; 717 data_length -= cur_len;
719 page_off = 0; 718 page_off = 0;
720 sg = sg_next(sg); 719 sg = sg_next(sg);
721 i++; 720 i++;
722 } 721 }
723 722
724 cmd->kmapped_nents = i; 723 cmd->kmapped_nents = i;
725 724
726 return i; 725 return i;
727 } 726 }
728 727
729 static void iscsit_unmap_iovec(struct iscsi_cmd *cmd) 728 static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
730 { 729 {
731 u32 i; 730 u32 i;
732 struct scatterlist *sg; 731 struct scatterlist *sg;
733 732
734 sg = cmd->first_data_sg; 733 sg = cmd->first_data_sg;
735 734
736 for (i = 0; i < cmd->kmapped_nents; i++) 735 for (i = 0; i < cmd->kmapped_nents; i++)
737 kunmap(sg_page(&sg[i])); 736 kunmap(sg_page(&sg[i]));
738 } 737 }
739 738
740 static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn) 739 static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
741 { 740 {
742 struct iscsi_cmd *cmd; 741 struct iscsi_cmd *cmd;
743 742
744 conn->exp_statsn = exp_statsn; 743 conn->exp_statsn = exp_statsn;
745 744
746 spin_lock_bh(&conn->cmd_lock); 745 spin_lock_bh(&conn->cmd_lock);
747 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { 746 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
748 spin_lock(&cmd->istate_lock); 747 spin_lock(&cmd->istate_lock);
749 if ((cmd->i_state == ISTATE_SENT_STATUS) && 748 if ((cmd->i_state == ISTATE_SENT_STATUS) &&
750 (cmd->stat_sn < exp_statsn)) { 749 (cmd->stat_sn < exp_statsn)) {
751 cmd->i_state = ISTATE_REMOVE; 750 cmd->i_state = ISTATE_REMOVE;
752 spin_unlock(&cmd->istate_lock); 751 spin_unlock(&cmd->istate_lock);
753 iscsit_add_cmd_to_immediate_queue(cmd, conn, 752 iscsit_add_cmd_to_immediate_queue(cmd, conn,
754 cmd->i_state); 753 cmd->i_state);
755 continue; 754 continue;
756 } 755 }
757 spin_unlock(&cmd->istate_lock); 756 spin_unlock(&cmd->istate_lock);
758 } 757 }
759 spin_unlock_bh(&conn->cmd_lock); 758 spin_unlock_bh(&conn->cmd_lock);
760 } 759 }
761 760
762 static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd) 761 static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
763 { 762 {
764 u32 iov_count = (cmd->se_cmd.t_data_nents == 0) ? 1 : 763 u32 iov_count = (cmd->se_cmd.t_data_nents == 0) ? 1 :
765 cmd->se_cmd.t_data_nents; 764 cmd->se_cmd.t_data_nents;
766 765
767 iov_count += ISCSI_IOV_DATA_BUFFER; 766 iov_count += ISCSI_IOV_DATA_BUFFER;
768 767
769 cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL); 768 cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL);
770 if (!cmd->iov_data) { 769 if (!cmd->iov_data) {
771 pr_err("Unable to allocate cmd->iov_data\n"); 770 pr_err("Unable to allocate cmd->iov_data\n");
772 return -ENOMEM; 771 return -ENOMEM;
773 } 772 }
774 773
775 cmd->orig_iov_data_count = iov_count; 774 cmd->orig_iov_data_count = iov_count;
776 return 0; 775 return 0;
777 } 776 }
778 777
779 static int iscsit_alloc_buffs(struct iscsi_cmd *cmd) 778 static int iscsit_alloc_buffs(struct iscsi_cmd *cmd)
780 { 779 {
781 struct scatterlist *sgl; 780 struct scatterlist *sgl;
782 u32 length = cmd->se_cmd.data_length; 781 u32 length = cmd->se_cmd.data_length;
783 int nents = DIV_ROUND_UP(length, PAGE_SIZE); 782 int nents = DIV_ROUND_UP(length, PAGE_SIZE);
784 int i = 0, ret; 783 int i = 0, ret;
785 /* 784 /*
786 * If no SCSI payload is present, allocate the default iovecs used for 785 * If no SCSI payload is present, allocate the default iovecs used for
787 * iSCSI PDU Header 786 * iSCSI PDU Header
788 */ 787 */
789 if (!length) 788 if (!length)
790 return iscsit_allocate_iovecs(cmd); 789 return iscsit_allocate_iovecs(cmd);
791 790
792 sgl = kzalloc(sizeof(*sgl) * nents, GFP_KERNEL); 791 sgl = kzalloc(sizeof(*sgl) * nents, GFP_KERNEL);
793 if (!sgl) 792 if (!sgl)
794 return -ENOMEM; 793 return -ENOMEM;
795 794
796 sg_init_table(sgl, nents); 795 sg_init_table(sgl, nents);
797 796
798 while (length) { 797 while (length) {
799 int buf_size = min_t(int, length, PAGE_SIZE); 798 int buf_size = min_t(int, length, PAGE_SIZE);
800 struct page *page; 799 struct page *page;
801 800
802 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 801 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
803 if (!page) 802 if (!page)
804 goto page_alloc_failed; 803 goto page_alloc_failed;
805 804
806 sg_set_page(&sgl[i], page, buf_size, 0); 805 sg_set_page(&sgl[i], page, buf_size, 0);
807 806
808 length -= buf_size; 807 length -= buf_size;
809 i++; 808 i++;
810 } 809 }
811 810
812 cmd->t_mem_sg = sgl; 811 cmd->t_mem_sg = sgl;
813 cmd->t_mem_sg_nents = nents; 812 cmd->t_mem_sg_nents = nents;
814 813
815 /* BIDI ops not supported */ 814 /* BIDI ops not supported */
816 815
817 /* Tell the core about our preallocated memory */ 816 /* Tell the core about our preallocated memory */
818 transport_generic_map_mem_to_cmd(&cmd->se_cmd, sgl, nents, NULL, 0); 817 transport_generic_map_mem_to_cmd(&cmd->se_cmd, sgl, nents, NULL, 0);
819 /* 818 /*
820 * Allocate iovecs for SCSI payload after transport_generic_map_mem_to_cmd 819 * Allocate iovecs for SCSI payload after transport_generic_map_mem_to_cmd
821 * so that cmd->se_cmd.t_tasks_se_num has been set. 820 * so that cmd->se_cmd.t_tasks_se_num has been set.
822 */ 821 */
823 ret = iscsit_allocate_iovecs(cmd); 822 ret = iscsit_allocate_iovecs(cmd);
824 if (ret < 0) 823 if (ret < 0)
825 goto page_alloc_failed; 824 goto page_alloc_failed;
826 825
827 return 0; 826 return 0;
828 827
829 page_alloc_failed: 828 page_alloc_failed:
830 while (i >= 0) { 829 while (i >= 0) {
831 __free_page(sg_page(&sgl[i])); 830 __free_page(sg_page(&sgl[i]));
832 i--; 831 i--;
833 } 832 }
834 kfree(cmd->t_mem_sg); 833 kfree(cmd->t_mem_sg);
835 cmd->t_mem_sg = NULL; 834 cmd->t_mem_sg = NULL;
836 return -ENOMEM; 835 return -ENOMEM;
837 } 836 }
838 837
839 static int iscsit_handle_scsi_cmd( 838 static int iscsit_handle_scsi_cmd(
840 struct iscsi_conn *conn, 839 struct iscsi_conn *conn,
841 unsigned char *buf) 840 unsigned char *buf)
842 { 841 {
843 int data_direction, cmdsn_ret = 0, immed_ret, ret, transport_ret; 842 int data_direction, cmdsn_ret = 0, immed_ret, ret, transport_ret;
844 int dump_immediate_data = 0, send_check_condition = 0, payload_length; 843 int dump_immediate_data = 0, send_check_condition = 0, payload_length;
845 struct iscsi_cmd *cmd = NULL; 844 struct iscsi_cmd *cmd = NULL;
846 struct iscsi_scsi_req *hdr; 845 struct iscsi_scsi_req *hdr;
847 846
848 spin_lock_bh(&conn->sess->session_stats_lock); 847 spin_lock_bh(&conn->sess->session_stats_lock);
849 conn->sess->cmd_pdus++; 848 conn->sess->cmd_pdus++;
850 if (conn->sess->se_sess->se_node_acl) { 849 if (conn->sess->se_sess->se_node_acl) {
851 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); 850 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
852 conn->sess->se_sess->se_node_acl->num_cmds++; 851 conn->sess->se_sess->se_node_acl->num_cmds++;
853 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); 852 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
854 } 853 }
855 spin_unlock_bh(&conn->sess->session_stats_lock); 854 spin_unlock_bh(&conn->sess->session_stats_lock);
856 855
857 hdr = (struct iscsi_scsi_req *) buf; 856 hdr = (struct iscsi_scsi_req *) buf;
858 payload_length = ntoh24(hdr->dlength); 857 payload_length = ntoh24(hdr->dlength);
859 hdr->itt = be32_to_cpu(hdr->itt); 858 hdr->itt = be32_to_cpu(hdr->itt);
860 hdr->data_length = be32_to_cpu(hdr->data_length); 859 hdr->data_length = be32_to_cpu(hdr->data_length);
861 hdr->cmdsn = be32_to_cpu(hdr->cmdsn); 860 hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
862 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 861 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
863 862
864 /* FIXME; Add checks for AdditionalHeaderSegment */ 863 /* FIXME; Add checks for AdditionalHeaderSegment */
865 864
866 if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) && 865 if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) &&
867 !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) { 866 !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
868 pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL" 867 pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
869 " not set. Bad iSCSI Initiator.\n"); 868 " not set. Bad iSCSI Initiator.\n");
870 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 869 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
871 buf, conn); 870 buf, conn);
872 } 871 }
873 872
874 if (((hdr->flags & ISCSI_FLAG_CMD_READ) || 873 if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
875 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) { 874 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
876 /* 875 /*
877 * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2) 876 * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2)
878 * that adds support for RESERVE/RELEASE. There is a bug 877 * that adds support for RESERVE/RELEASE. There is a bug
879 * add with this new functionality that sets R/W bits when 878 * add with this new functionality that sets R/W bits when
880 * neither CDB carries any READ or WRITE datapayloads. 879 * neither CDB carries any READ or WRITE datapayloads.
881 */ 880 */
882 if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) { 881 if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) {
883 hdr->flags &= ~ISCSI_FLAG_CMD_READ; 882 hdr->flags &= ~ISCSI_FLAG_CMD_READ;
884 hdr->flags &= ~ISCSI_FLAG_CMD_WRITE; 883 hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
885 goto done; 884 goto done;
886 } 885 }
887 886
888 pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" 887 pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
889 " set when Expected Data Transfer Length is 0 for" 888 " set when Expected Data Transfer Length is 0 for"
890 " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]); 889 " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
891 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 890 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
892 buf, conn); 891 buf, conn);
893 } 892 }
894 done: 893 done:
895 894
896 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) && 895 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
897 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) { 896 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
898 pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE" 897 pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
899 " MUST be set if Expected Data Transfer Length is not 0." 898 " MUST be set if Expected Data Transfer Length is not 0."
900 " Bad iSCSI Initiator\n"); 899 " Bad iSCSI Initiator\n");
901 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 900 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
902 buf, conn); 901 buf, conn);
903 } 902 }
904 903
905 if ((hdr->flags & ISCSI_FLAG_CMD_READ) && 904 if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
906 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) { 905 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
907 pr_err("Bidirectional operations not supported!\n"); 906 pr_err("Bidirectional operations not supported!\n");
908 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 907 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
909 buf, conn); 908 buf, conn);
910 } 909 }
911 910
912 if (hdr->opcode & ISCSI_OP_IMMEDIATE) { 911 if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
913 pr_err("Illegally set Immediate Bit in iSCSI Initiator" 912 pr_err("Illegally set Immediate Bit in iSCSI Initiator"
914 " Scsi Command PDU.\n"); 913 " Scsi Command PDU.\n");
915 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 914 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
916 buf, conn); 915 buf, conn);
917 } 916 }
918 917
919 if (payload_length && !conn->sess->sess_ops->ImmediateData) { 918 if (payload_length && !conn->sess->sess_ops->ImmediateData) {
920 pr_err("ImmediateData=No but DataSegmentLength=%u," 919 pr_err("ImmediateData=No but DataSegmentLength=%u,"
921 " protocol error.\n", payload_length); 920 " protocol error.\n", payload_length);
922 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 921 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
923 buf, conn); 922 buf, conn);
924 } 923 }
925 924
926 if ((hdr->data_length == payload_length) && 925 if ((hdr->data_length == payload_length) &&
927 (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) { 926 (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
928 pr_err("Expected Data Transfer Length and Length of" 927 pr_err("Expected Data Transfer Length and Length of"
929 " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL" 928 " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
930 " bit is not set protocol error\n"); 929 " bit is not set protocol error\n");
931 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 930 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
932 buf, conn); 931 buf, conn);
933 } 932 }
934 933
935 if (payload_length > hdr->data_length) { 934 if (payload_length > hdr->data_length) {
936 pr_err("DataSegmentLength: %u is greater than" 935 pr_err("DataSegmentLength: %u is greater than"
937 " EDTL: %u, protocol error.\n", payload_length, 936 " EDTL: %u, protocol error.\n", payload_length,
938 hdr->data_length); 937 hdr->data_length);
939 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 938 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
940 buf, conn); 939 buf, conn);
941 } 940 }
942 941
943 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) { 942 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
944 pr_err("DataSegmentLength: %u is greater than" 943 pr_err("DataSegmentLength: %u is greater than"
945 " MaxRecvDataSegmentLength: %u, protocol error.\n", 944 " MaxRecvDataSegmentLength: %u, protocol error.\n",
946 payload_length, conn->conn_ops->MaxRecvDataSegmentLength); 945 payload_length, conn->conn_ops->MaxRecvDataSegmentLength);
947 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 946 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
948 buf, conn); 947 buf, conn);
949 } 948 }
950 949
951 if (payload_length > conn->sess->sess_ops->FirstBurstLength) { 950 if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
952 pr_err("DataSegmentLength: %u is greater than" 951 pr_err("DataSegmentLength: %u is greater than"
953 " FirstBurstLength: %u, protocol error.\n", 952 " FirstBurstLength: %u, protocol error.\n",
954 payload_length, conn->sess->sess_ops->FirstBurstLength); 953 payload_length, conn->sess->sess_ops->FirstBurstLength);
955 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 954 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
956 buf, conn); 955 buf, conn);
957 } 956 }
958 957
959 data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE : 958 data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
960 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE : 959 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
961 DMA_NONE; 960 DMA_NONE;
962 961
963 cmd = iscsit_allocate_se_cmd(conn, hdr->data_length, data_direction, 962 cmd = iscsit_allocate_se_cmd(conn, hdr->data_length, data_direction,
964 (hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK)); 963 (hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK));
965 if (!cmd) 964 if (!cmd)
966 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1, 965 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
967 buf, conn); 966 buf, conn);
968 967
969 pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x," 968 pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
970 " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt, 969 " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
971 hdr->cmdsn, hdr->data_length, payload_length, conn->cid); 970 hdr->cmdsn, hdr->data_length, payload_length, conn->cid);
972 971
973 cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD; 972 cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD;
974 cmd->i_state = ISTATE_NEW_CMD; 973 cmd->i_state = ISTATE_NEW_CMD;
975 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 974 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
976 cmd->immediate_data = (payload_length) ? 1 : 0; 975 cmd->immediate_data = (payload_length) ? 1 : 0;
977 cmd->unsolicited_data = ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) && 976 cmd->unsolicited_data = ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) &&
978 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0); 977 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0);
979 if (cmd->unsolicited_data) 978 if (cmd->unsolicited_data)
980 cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA; 979 cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
981 980
982 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 981 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
983 if (hdr->flags & ISCSI_FLAG_CMD_READ) { 982 if (hdr->flags & ISCSI_FLAG_CMD_READ) {
984 spin_lock_bh(&conn->sess->ttt_lock); 983 spin_lock_bh(&conn->sess->ttt_lock);
985 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++; 984 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
986 if (cmd->targ_xfer_tag == 0xFFFFFFFF) 985 if (cmd->targ_xfer_tag == 0xFFFFFFFF)
987 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++; 986 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
988 spin_unlock_bh(&conn->sess->ttt_lock); 987 spin_unlock_bh(&conn->sess->ttt_lock);
989 } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE) 988 } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
990 cmd->targ_xfer_tag = 0xFFFFFFFF; 989 cmd->targ_xfer_tag = 0xFFFFFFFF;
991 cmd->cmd_sn = hdr->cmdsn; 990 cmd->cmd_sn = hdr->cmdsn;
992 cmd->exp_stat_sn = hdr->exp_statsn; 991 cmd->exp_stat_sn = hdr->exp_statsn;
993 cmd->first_burst_len = payload_length; 992 cmd->first_burst_len = payload_length;
994 993
995 if (cmd->data_direction == DMA_FROM_DEVICE) { 994 if (cmd->data_direction == DMA_FROM_DEVICE) {
996 struct iscsi_datain_req *dr; 995 struct iscsi_datain_req *dr;
997 996
998 dr = iscsit_allocate_datain_req(); 997 dr = iscsit_allocate_datain_req();
999 if (!dr) 998 if (!dr)
1000 return iscsit_add_reject_from_cmd( 999 return iscsit_add_reject_from_cmd(
1001 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1000 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1002 1, 1, buf, cmd); 1001 1, 1, buf, cmd);
1003 1002
1004 iscsit_attach_datain_req(cmd, dr); 1003 iscsit_attach_datain_req(cmd, dr);
1005 } 1004 }
1006 1005
1007 /* 1006 /*
1008 * The CDB is going to an se_device_t. 1007 * The CDB is going to an se_device_t.
1009 */ 1008 */
1010 ret = iscsit_get_lun_for_cmd(cmd, hdr->cdb, 1009 ret = iscsit_get_lun_for_cmd(cmd, hdr->cdb,
1011 get_unaligned_le64(&hdr->lun)); 1010 get_unaligned_le64(&hdr->lun));
1012 if (ret < 0) { 1011 if (ret < 0) {
1013 if (cmd->se_cmd.scsi_sense_reason == TCM_NON_EXISTENT_LUN) { 1012 if (cmd->se_cmd.scsi_sense_reason == TCM_NON_EXISTENT_LUN) {
1014 pr_debug("Responding to non-acl'ed," 1013 pr_debug("Responding to non-acl'ed,"
1015 " non-existent or non-exported iSCSI LUN:" 1014 " non-existent or non-exported iSCSI LUN:"
1016 " 0x%016Lx\n", get_unaligned_le64(&hdr->lun)); 1015 " 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
1017 } 1016 }
1018 send_check_condition = 1; 1017 send_check_condition = 1;
1019 goto attach_cmd; 1018 goto attach_cmd;
1020 } 1019 }
1021 /* 1020 /*
1022 * The Initiator Node has access to the LUN (the addressing method 1021 * The Initiator Node has access to the LUN (the addressing method
1023 * is handled inside of iscsit_get_lun_for_cmd()). Now it's time to 1022 * is handled inside of iscsit_get_lun_for_cmd()). Now it's time to
1024 * allocate 1->N transport tasks (depending on sector count and 1023 * allocate 1->N transport tasks (depending on sector count and
1025 * maximum request size the physical HBA(s) can handle. 1024 * maximum request size the physical HBA(s) can handle.
1026 */ 1025 */
1027 transport_ret = transport_generic_allocate_tasks(&cmd->se_cmd, hdr->cdb); 1026 transport_ret = transport_generic_allocate_tasks(&cmd->se_cmd, hdr->cdb);
1028 if (transport_ret == -ENOMEM) { 1027 if (transport_ret == -ENOMEM) {
1029 return iscsit_add_reject_from_cmd( 1028 return iscsit_add_reject_from_cmd(
1030 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1029 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1031 1, 1, buf, cmd); 1030 1, 1, buf, cmd);
1032 } else if (transport_ret == -EINVAL) { 1031 } else if (transport_ret == -EINVAL) {
1033 /* 1032 /*
1034 * Unsupported SAM Opcode. CHECK_CONDITION will be sent 1033 * Unsupported SAM Opcode. CHECK_CONDITION will be sent
1035 * in iscsit_execute_cmd() during the CmdSN OOO Execution 1034 * in iscsit_execute_cmd() during the CmdSN OOO Execution
1036 * Mechinism. 1035 * Mechinism.
1037 */ 1036 */
1038 send_check_condition = 1; 1037 send_check_condition = 1;
1039 } else { 1038 } else {
1040 cmd->data_length = cmd->se_cmd.data_length; 1039 cmd->data_length = cmd->se_cmd.data_length;
1041 1040
1042 if (iscsit_decide_list_to_build(cmd, payload_length) < 0) 1041 if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
1043 return iscsit_add_reject_from_cmd( 1042 return iscsit_add_reject_from_cmd(
1044 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1043 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1045 1, 1, buf, cmd); 1044 1, 1, buf, cmd);
1046 } 1045 }
1047 1046
1048 attach_cmd: 1047 attach_cmd:
1049 spin_lock_bh(&conn->cmd_lock); 1048 spin_lock_bh(&conn->cmd_lock);
1050 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 1049 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
1051 spin_unlock_bh(&conn->cmd_lock); 1050 spin_unlock_bh(&conn->cmd_lock);
1052 /* 1051 /*
1053 * Check if we need to delay processing because of ALUA 1052 * Check if we need to delay processing because of ALUA
1054 * Active/NonOptimized primary access state.. 1053 * Active/NonOptimized primary access state..
1055 */ 1054 */
1056 core_alua_check_nonop_delay(&cmd->se_cmd); 1055 core_alua_check_nonop_delay(&cmd->se_cmd);
1057 /* 1056 /*
1058 * Allocate and setup SGL used with transport_generic_map_mem_to_cmd(). 1057 * Allocate and setup SGL used with transport_generic_map_mem_to_cmd().
1059 * also call iscsit_allocate_iovecs() 1058 * also call iscsit_allocate_iovecs()
1060 */ 1059 */
1061 ret = iscsit_alloc_buffs(cmd); 1060 ret = iscsit_alloc_buffs(cmd);
1062 if (ret < 0) 1061 if (ret < 0)
1063 return iscsit_add_reject_from_cmd( 1062 return iscsit_add_reject_from_cmd(
1064 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1063 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1065 1, 1, buf, cmd); 1064 1, 1, buf, cmd);
1066 /* 1065 /*
1067 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if 1066 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
1068 * the Immediate Bit is not set, and no Immediate 1067 * the Immediate Bit is not set, and no Immediate
1069 * Data is attached. 1068 * Data is attached.
1070 * 1069 *
1071 * A PDU/CmdSN carrying Immediate Data can only 1070 * A PDU/CmdSN carrying Immediate Data can only
1072 * be processed after the DataCRC has passed. 1071 * be processed after the DataCRC has passed.
1073 * If the DataCRC fails, the CmdSN MUST NOT 1072 * If the DataCRC fails, the CmdSN MUST NOT
1074 * be acknowledged. (See below) 1073 * be acknowledged. (See below)
1075 */ 1074 */
1076 if (!cmd->immediate_data) { 1075 if (!cmd->immediate_data) {
1077 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1076 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1078 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) 1077 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
1079 return 0; 1078 return 0;
1080 else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1079 else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1081 return iscsit_add_reject_from_cmd( 1080 return iscsit_add_reject_from_cmd(
1082 ISCSI_REASON_PROTOCOL_ERROR, 1081 ISCSI_REASON_PROTOCOL_ERROR,
1083 1, 0, buf, cmd); 1082 1, 0, buf, cmd);
1084 } 1083 }
1085 1084
1086 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 1085 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
1087 1086
1088 /* 1087 /*
1089 * If no Immediate Data is attached, it's OK to return now. 1088 * If no Immediate Data is attached, it's OK to return now.
1090 */ 1089 */
1091 if (!cmd->immediate_data) { 1090 if (!cmd->immediate_data) {
1092 if (send_check_condition) 1091 if (send_check_condition)
1093 return 0; 1092 return 0;
1094 1093
1095 if (cmd->unsolicited_data) { 1094 if (cmd->unsolicited_data) {
1096 iscsit_set_dataout_sequence_values(cmd); 1095 iscsit_set_dataout_sequence_values(cmd);
1097 1096
1098 spin_lock_bh(&cmd->dataout_timeout_lock); 1097 spin_lock_bh(&cmd->dataout_timeout_lock);
1099 iscsit_start_dataout_timer(cmd, cmd->conn); 1098 iscsit_start_dataout_timer(cmd, cmd->conn);
1100 spin_unlock_bh(&cmd->dataout_timeout_lock); 1099 spin_unlock_bh(&cmd->dataout_timeout_lock);
1101 } 1100 }
1102 1101
1103 return 0; 1102 return 0;
1104 } 1103 }
1105 1104
1106 /* 1105 /*
1107 * Early CHECK_CONDITIONs never make it to the transport processing 1106 * Early CHECK_CONDITIONs never make it to the transport processing
1108 * thread. They are processed in CmdSN order by 1107 * thread. They are processed in CmdSN order by
1109 * iscsit_check_received_cmdsn() below. 1108 * iscsit_check_received_cmdsn() below.
1110 */ 1109 */
1111 if (send_check_condition) { 1110 if (send_check_condition) {
1112 immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; 1111 immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
1113 dump_immediate_data = 1; 1112 dump_immediate_data = 1;
1114 goto after_immediate_data; 1113 goto after_immediate_data;
1115 } 1114 }
1116 /* 1115 /*
1117 * Call directly into transport_generic_new_cmd() to perform 1116 * Call directly into transport_generic_new_cmd() to perform
1118 * the backend memory allocation. 1117 * the backend memory allocation.
1119 */ 1118 */
1120 ret = transport_generic_new_cmd(&cmd->se_cmd); 1119 ret = transport_generic_new_cmd(&cmd->se_cmd);
1121 if (ret < 0) { 1120 if (ret < 0) {
1122 immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; 1121 immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
1123 dump_immediate_data = 1; 1122 dump_immediate_data = 1;
1124 goto after_immediate_data; 1123 goto after_immediate_data;
1125 } 1124 }
1126 1125
1127 immed_ret = iscsit_handle_immediate_data(cmd, buf, payload_length); 1126 immed_ret = iscsit_handle_immediate_data(cmd, buf, payload_length);
1128 after_immediate_data: 1127 after_immediate_data:
1129 if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) { 1128 if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
1130 /* 1129 /*
1131 * A PDU/CmdSN carrying Immediate Data passed 1130 * A PDU/CmdSN carrying Immediate Data passed
1132 * DataCRC, check against ExpCmdSN/MaxCmdSN if 1131 * DataCRC, check against ExpCmdSN/MaxCmdSN if
1133 * Immediate Bit is not set. 1132 * Immediate Bit is not set.
1134 */ 1133 */
1135 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1134 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1136 /* 1135 /*
1137 * Special case for Unsupported SAM WRITE Opcodes 1136 * Special case for Unsupported SAM WRITE Opcodes
1138 * and ImmediateData=Yes. 1137 * and ImmediateData=Yes.
1139 */ 1138 */
1140 if (dump_immediate_data) { 1139 if (dump_immediate_data) {
1141 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) 1140 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
1142 return -1; 1141 return -1;
1143 } else if (cmd->unsolicited_data) { 1142 } else if (cmd->unsolicited_data) {
1144 iscsit_set_dataout_sequence_values(cmd); 1143 iscsit_set_dataout_sequence_values(cmd);
1145 1144
1146 spin_lock_bh(&cmd->dataout_timeout_lock); 1145 spin_lock_bh(&cmd->dataout_timeout_lock);
1147 iscsit_start_dataout_timer(cmd, cmd->conn); 1146 iscsit_start_dataout_timer(cmd, cmd->conn);
1148 spin_unlock_bh(&cmd->dataout_timeout_lock); 1147 spin_unlock_bh(&cmd->dataout_timeout_lock);
1149 } 1148 }
1150 1149
1151 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1150 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1152 return iscsit_add_reject_from_cmd( 1151 return iscsit_add_reject_from_cmd(
1153 ISCSI_REASON_PROTOCOL_ERROR, 1152 ISCSI_REASON_PROTOCOL_ERROR,
1154 1, 0, buf, cmd); 1153 1, 0, buf, cmd);
1155 1154
1156 } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) { 1155 } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
1157 /* 1156 /*
1158 * Immediate Data failed DataCRC and ERL>=1, 1157 * Immediate Data failed DataCRC and ERL>=1,
1159 * silently drop this PDU and let the initiator 1158 * silently drop this PDU and let the initiator
1160 * plug the CmdSN gap. 1159 * plug the CmdSN gap.
1161 * 1160 *
1162 * FIXME: Send Unsolicited NOPIN with reserved 1161 * FIXME: Send Unsolicited NOPIN with reserved
1163 * TTT here to help the initiator figure out 1162 * TTT here to help the initiator figure out
1164 * the missing CmdSN, although they should be 1163 * the missing CmdSN, although they should be
1165 * intelligent enough to determine the missing 1164 * intelligent enough to determine the missing
1166 * CmdSN and issue a retry to plug the sequence. 1165 * CmdSN and issue a retry to plug the sequence.
1167 */ 1166 */
1168 cmd->i_state = ISTATE_REMOVE; 1167 cmd->i_state = ISTATE_REMOVE;
1169 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); 1168 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
1170 } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */ 1169 } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
1171 return -1; 1170 return -1;
1172 1171
1173 return 0; 1172 return 0;
1174 } 1173 }
1175 1174
1176 static u32 iscsit_do_crypto_hash_sg( 1175 static u32 iscsit_do_crypto_hash_sg(
1177 struct hash_desc *hash, 1176 struct hash_desc *hash,
1178 struct iscsi_cmd *cmd, 1177 struct iscsi_cmd *cmd,
1179 u32 data_offset, 1178 u32 data_offset,
1180 u32 data_length, 1179 u32 data_length,
1181 u32 padding, 1180 u32 padding,
1182 u8 *pad_bytes) 1181 u8 *pad_bytes)
1183 { 1182 {
1184 u32 data_crc; 1183 u32 data_crc;
1185 u32 i; 1184 u32 i;
1186 struct scatterlist *sg; 1185 struct scatterlist *sg;
1187 unsigned int page_off; 1186 unsigned int page_off;
1188 1187
1189 crypto_hash_init(hash); 1188 crypto_hash_init(hash);
1190 1189
1191 sg = cmd->first_data_sg; 1190 sg = cmd->first_data_sg;
1192 page_off = cmd->first_data_sg_off; 1191 page_off = cmd->first_data_sg_off;
1193 1192
1194 i = 0; 1193 i = 0;
1195 while (data_length) { 1194 while (data_length) {
1196 u32 cur_len = min_t(u32, data_length, (sg[i].length - page_off)); 1195 u32 cur_len = min_t(u32, data_length, (sg[i].length - page_off));
1197 1196
1198 crypto_hash_update(hash, &sg[i], cur_len); 1197 crypto_hash_update(hash, &sg[i], cur_len);
1199 1198
1200 data_length -= cur_len; 1199 data_length -= cur_len;
1201 page_off = 0; 1200 page_off = 0;
1202 i++; 1201 i++;
1203 } 1202 }
1204 1203
1205 if (padding) { 1204 if (padding) {
1206 struct scatterlist pad_sg; 1205 struct scatterlist pad_sg;
1207 1206
1208 sg_init_one(&pad_sg, pad_bytes, padding); 1207 sg_init_one(&pad_sg, pad_bytes, padding);
1209 crypto_hash_update(hash, &pad_sg, padding); 1208 crypto_hash_update(hash, &pad_sg, padding);
1210 } 1209 }
1211 crypto_hash_final(hash, (u8 *) &data_crc); 1210 crypto_hash_final(hash, (u8 *) &data_crc);
1212 1211
1213 return data_crc; 1212 return data_crc;
1214 } 1213 }
1215 1214
1216 static void iscsit_do_crypto_hash_buf( 1215 static void iscsit_do_crypto_hash_buf(
1217 struct hash_desc *hash, 1216 struct hash_desc *hash,
1218 unsigned char *buf, 1217 unsigned char *buf,
1219 u32 payload_length, 1218 u32 payload_length,
1220 u32 padding, 1219 u32 padding,
1221 u8 *pad_bytes, 1220 u8 *pad_bytes,
1222 u8 *data_crc) 1221 u8 *data_crc)
1223 { 1222 {
1224 struct scatterlist sg; 1223 struct scatterlist sg;
1225 1224
1226 crypto_hash_init(hash); 1225 crypto_hash_init(hash);
1227 1226
1228 sg_init_one(&sg, (u8 *)buf, payload_length); 1227 sg_init_one(&sg, (u8 *)buf, payload_length);
1229 crypto_hash_update(hash, &sg, payload_length); 1228 crypto_hash_update(hash, &sg, payload_length);
1230 1229
1231 if (padding) { 1230 if (padding) {
1232 sg_init_one(&sg, pad_bytes, padding); 1231 sg_init_one(&sg, pad_bytes, padding);
1233 crypto_hash_update(hash, &sg, padding); 1232 crypto_hash_update(hash, &sg, padding);
1234 } 1233 }
1235 crypto_hash_final(hash, data_crc); 1234 crypto_hash_final(hash, data_crc);
1236 } 1235 }
1237 1236
1238 static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) 1237 static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
1239 { 1238 {
1240 int iov_ret, ooo_cmdsn = 0, ret; 1239 int iov_ret, ooo_cmdsn = 0, ret;
1241 u8 data_crc_failed = 0; 1240 u8 data_crc_failed = 0;
1242 u32 checksum, iov_count = 0, padding = 0, rx_got = 0; 1241 u32 checksum, iov_count = 0, padding = 0, rx_got = 0;
1243 u32 rx_size = 0, payload_length; 1242 u32 rx_size = 0, payload_length;
1244 struct iscsi_cmd *cmd = NULL; 1243 struct iscsi_cmd *cmd = NULL;
1245 struct se_cmd *se_cmd; 1244 struct se_cmd *se_cmd;
1246 struct iscsi_data *hdr; 1245 struct iscsi_data *hdr;
1247 struct kvec *iov; 1246 struct kvec *iov;
1248 unsigned long flags; 1247 unsigned long flags;
1249 1248
1250 hdr = (struct iscsi_data *) buf; 1249 hdr = (struct iscsi_data *) buf;
1251 payload_length = ntoh24(hdr->dlength); 1250 payload_length = ntoh24(hdr->dlength);
1252 hdr->itt = be32_to_cpu(hdr->itt); 1251 hdr->itt = be32_to_cpu(hdr->itt);
1253 hdr->ttt = be32_to_cpu(hdr->ttt); 1252 hdr->ttt = be32_to_cpu(hdr->ttt);
1254 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 1253 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
1255 hdr->datasn = be32_to_cpu(hdr->datasn); 1254 hdr->datasn = be32_to_cpu(hdr->datasn);
1256 hdr->offset = be32_to_cpu(hdr->offset); 1255 hdr->offset = be32_to_cpu(hdr->offset);
1257 1256
1258 if (!payload_length) { 1257 if (!payload_length) {
1259 pr_err("DataOUT payload is ZERO, protocol error.\n"); 1258 pr_err("DataOUT payload is ZERO, protocol error.\n");
1260 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1259 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1261 buf, conn); 1260 buf, conn);
1262 } 1261 }
1263 1262
1264 /* iSCSI write */ 1263 /* iSCSI write */
1265 spin_lock_bh(&conn->sess->session_stats_lock); 1264 spin_lock_bh(&conn->sess->session_stats_lock);
1266 conn->sess->rx_data_octets += payload_length; 1265 conn->sess->rx_data_octets += payload_length;
1267 if (conn->sess->se_sess->se_node_acl) { 1266 if (conn->sess->se_sess->se_node_acl) {
1268 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); 1267 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
1269 conn->sess->se_sess->se_node_acl->write_bytes += payload_length; 1268 conn->sess->se_sess->se_node_acl->write_bytes += payload_length;
1270 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); 1269 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
1271 } 1270 }
1272 spin_unlock_bh(&conn->sess->session_stats_lock); 1271 spin_unlock_bh(&conn->sess->session_stats_lock);
1273 1272
1274 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) { 1273 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
1275 pr_err("DataSegmentLength: %u is greater than" 1274 pr_err("DataSegmentLength: %u is greater than"
1276 " MaxRecvDataSegmentLength: %u\n", payload_length, 1275 " MaxRecvDataSegmentLength: %u\n", payload_length,
1277 conn->conn_ops->MaxRecvDataSegmentLength); 1276 conn->conn_ops->MaxRecvDataSegmentLength);
1278 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1277 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1279 buf, conn); 1278 buf, conn);
1280 } 1279 }
1281 1280
1282 cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, 1281 cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt,
1283 payload_length); 1282 payload_length);
1284 if (!cmd) 1283 if (!cmd)
1285 return 0; 1284 return 0;
1286 1285
1287 pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x," 1286 pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x,"
1288 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", 1287 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
1289 hdr->itt, hdr->ttt, hdr->datasn, hdr->offset, 1288 hdr->itt, hdr->ttt, hdr->datasn, hdr->offset,
1290 payload_length, conn->cid); 1289 payload_length, conn->cid);
1291 1290
1292 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { 1291 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
1293 pr_err("Command ITT: 0x%08x received DataOUT after" 1292 pr_err("Command ITT: 0x%08x received DataOUT after"
1294 " last DataOUT received, dumping payload\n", 1293 " last DataOUT received, dumping payload\n",
1295 cmd->init_task_tag); 1294 cmd->init_task_tag);
1296 return iscsit_dump_data_payload(conn, payload_length, 1); 1295 return iscsit_dump_data_payload(conn, payload_length, 1);
1297 } 1296 }
1298 1297
1299 if (cmd->data_direction != DMA_TO_DEVICE) { 1298 if (cmd->data_direction != DMA_TO_DEVICE) {
1300 pr_err("Command ITT: 0x%08x received DataOUT for a" 1299 pr_err("Command ITT: 0x%08x received DataOUT for a"
1301 " NON-WRITE command.\n", cmd->init_task_tag); 1300 " NON-WRITE command.\n", cmd->init_task_tag);
1302 return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR, 1301 return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
1303 1, 0, buf, cmd); 1302 1, 0, buf, cmd);
1304 } 1303 }
1305 se_cmd = &cmd->se_cmd; 1304 se_cmd = &cmd->se_cmd;
1306 iscsit_mod_dataout_timer(cmd); 1305 iscsit_mod_dataout_timer(cmd);
1307 1306
1308 if ((hdr->offset + payload_length) > cmd->data_length) { 1307 if ((hdr->offset + payload_length) > cmd->data_length) {
1309 pr_err("DataOut Offset: %u, Length %u greater than" 1308 pr_err("DataOut Offset: %u, Length %u greater than"
1310 " iSCSI Command EDTL %u, protocol error.\n", 1309 " iSCSI Command EDTL %u, protocol error.\n",
1311 hdr->offset, payload_length, cmd->data_length); 1310 hdr->offset, payload_length, cmd->data_length);
1312 return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID, 1311 return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
1313 1, 0, buf, cmd); 1312 1, 0, buf, cmd);
1314 } 1313 }
1315 1314
1316 if (cmd->unsolicited_data) { 1315 if (cmd->unsolicited_data) {
1317 int dump_unsolicited_data = 0; 1316 int dump_unsolicited_data = 0;
1318 1317
1319 if (conn->sess->sess_ops->InitialR2T) { 1318 if (conn->sess->sess_ops->InitialR2T) {
1320 pr_err("Received unexpected unsolicited data" 1319 pr_err("Received unexpected unsolicited data"
1321 " while InitialR2T=Yes, protocol error.\n"); 1320 " while InitialR2T=Yes, protocol error.\n");
1322 transport_send_check_condition_and_sense(&cmd->se_cmd, 1321 transport_send_check_condition_and_sense(&cmd->se_cmd,
1323 TCM_UNEXPECTED_UNSOLICITED_DATA, 0); 1322 TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
1324 return -1; 1323 return -1;
1325 } 1324 }
1326 /* 1325 /*
1327 * Special case for dealing with Unsolicited DataOUT 1326 * Special case for dealing with Unsolicited DataOUT
1328 * and Unsupported SAM WRITE Opcodes and SE resource allocation 1327 * and Unsupported SAM WRITE Opcodes and SE resource allocation
1329 * failures; 1328 * failures;
1330 */ 1329 */
1331 1330
1332 /* Something's amiss if we're not in WRITE_PENDING state... */ 1331 /* Something's amiss if we're not in WRITE_PENDING state... */
1333 spin_lock_irqsave(&se_cmd->t_state_lock, flags); 1332 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
1334 WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING); 1333 WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING);
1335 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 1334 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
1336 1335
1337 spin_lock_irqsave(&se_cmd->t_state_lock, flags); 1336 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
1338 if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) || 1337 if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
1339 (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION)) 1338 (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
1340 dump_unsolicited_data = 1; 1339 dump_unsolicited_data = 1;
1341 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 1340 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
1342 1341
1343 if (dump_unsolicited_data) { 1342 if (dump_unsolicited_data) {
1344 /* 1343 /*
1345 * Check if a delayed TASK_ABORTED status needs to 1344 * Check if a delayed TASK_ABORTED status needs to
1346 * be sent now if the ISCSI_FLAG_CMD_FINAL has been 1345 * be sent now if the ISCSI_FLAG_CMD_FINAL has been
1347 * received with the unsolicitied data out. 1346 * received with the unsolicitied data out.
1348 */ 1347 */
1349 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) 1348 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
1350 iscsit_stop_dataout_timer(cmd); 1349 iscsit_stop_dataout_timer(cmd);
1351 1350
1352 transport_check_aborted_status(se_cmd, 1351 transport_check_aborted_status(se_cmd,
1353 (hdr->flags & ISCSI_FLAG_CMD_FINAL)); 1352 (hdr->flags & ISCSI_FLAG_CMD_FINAL));
1354 return iscsit_dump_data_payload(conn, payload_length, 1); 1353 return iscsit_dump_data_payload(conn, payload_length, 1);
1355 } 1354 }
1356 } else { 1355 } else {
1357 /* 1356 /*
1358 * For the normal solicited data path: 1357 * For the normal solicited data path:
1359 * 1358 *
1360 * Check for a delayed TASK_ABORTED status and dump any 1359 * Check for a delayed TASK_ABORTED status and dump any
1361 * incoming data out payload if one exists. Also, when the 1360 * incoming data out payload if one exists. Also, when the
1362 * ISCSI_FLAG_CMD_FINAL is set to denote the end of the current 1361 * ISCSI_FLAG_CMD_FINAL is set to denote the end of the current
1363 * data out sequence, we decrement outstanding_r2ts. Once 1362 * data out sequence, we decrement outstanding_r2ts. Once
1364 * outstanding_r2ts reaches zero, go ahead and send the delayed 1363 * outstanding_r2ts reaches zero, go ahead and send the delayed
1365 * TASK_ABORTED status. 1364 * TASK_ABORTED status.
1366 */ 1365 */
1367 if (atomic_read(&se_cmd->t_transport_aborted) != 0) { 1366 if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
1368 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) 1367 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
1369 if (--cmd->outstanding_r2ts < 1) { 1368 if (--cmd->outstanding_r2ts < 1) {
1370 iscsit_stop_dataout_timer(cmd); 1369 iscsit_stop_dataout_timer(cmd);
1371 transport_check_aborted_status( 1370 transport_check_aborted_status(
1372 se_cmd, 1); 1371 se_cmd, 1);
1373 } 1372 }
1374 1373
1375 return iscsit_dump_data_payload(conn, payload_length, 1); 1374 return iscsit_dump_data_payload(conn, payload_length, 1);
1376 } 1375 }
1377 } 1376 }
1378 /* 1377 /*
1379 * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and 1378 * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and
1380 * within-command recovery checks before receiving the payload. 1379 * within-command recovery checks before receiving the payload.
1381 */ 1380 */
1382 ret = iscsit_check_pre_dataout(cmd, buf); 1381 ret = iscsit_check_pre_dataout(cmd, buf);
1383 if (ret == DATAOUT_WITHIN_COMMAND_RECOVERY) 1382 if (ret == DATAOUT_WITHIN_COMMAND_RECOVERY)
1384 return 0; 1383 return 0;
1385 else if (ret == DATAOUT_CANNOT_RECOVER) 1384 else if (ret == DATAOUT_CANNOT_RECOVER)
1386 return -1; 1385 return -1;
1387 1386
1388 rx_size += payload_length; 1387 rx_size += payload_length;
1389 iov = &cmd->iov_data[0]; 1388 iov = &cmd->iov_data[0];
1390 1389
1391 iov_ret = iscsit_map_iovec(cmd, iov, hdr->offset, payload_length); 1390 iov_ret = iscsit_map_iovec(cmd, iov, hdr->offset, payload_length);
1392 if (iov_ret < 0) 1391 if (iov_ret < 0)
1393 return -1; 1392 return -1;
1394 1393
1395 iov_count += iov_ret; 1394 iov_count += iov_ret;
1396 1395
1397 padding = ((-payload_length) & 3); 1396 padding = ((-payload_length) & 3);
1398 if (padding != 0) { 1397 if (padding != 0) {
1399 iov[iov_count].iov_base = cmd->pad_bytes; 1398 iov[iov_count].iov_base = cmd->pad_bytes;
1400 iov[iov_count++].iov_len = padding; 1399 iov[iov_count++].iov_len = padding;
1401 rx_size += padding; 1400 rx_size += padding;
1402 pr_debug("Receiving %u padding bytes.\n", padding); 1401 pr_debug("Receiving %u padding bytes.\n", padding);
1403 } 1402 }
1404 1403
1405 if (conn->conn_ops->DataDigest) { 1404 if (conn->conn_ops->DataDigest) {
1406 iov[iov_count].iov_base = &checksum; 1405 iov[iov_count].iov_base = &checksum;
1407 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 1406 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
1408 rx_size += ISCSI_CRC_LEN; 1407 rx_size += ISCSI_CRC_LEN;
1409 } 1408 }
1410 1409
1411 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size); 1410 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
1412 1411
1413 iscsit_unmap_iovec(cmd); 1412 iscsit_unmap_iovec(cmd);
1414 1413
1415 if (rx_got != rx_size) 1414 if (rx_got != rx_size)
1416 return -1; 1415 return -1;
1417 1416
1418 if (conn->conn_ops->DataDigest) { 1417 if (conn->conn_ops->DataDigest) {
1419 u32 data_crc; 1418 u32 data_crc;
1420 1419
1421 data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd, 1420 data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
1422 hdr->offset, payload_length, padding, 1421 hdr->offset, payload_length, padding,
1423 cmd->pad_bytes); 1422 cmd->pad_bytes);
1424 1423
1425 if (checksum != data_crc) { 1424 if (checksum != data_crc) {
1426 pr_err("ITT: 0x%08x, Offset: %u, Length: %u," 1425 pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
1427 " DataSN: 0x%08x, CRC32C DataDigest 0x%08x" 1426 " DataSN: 0x%08x, CRC32C DataDigest 0x%08x"
1428 " does not match computed 0x%08x\n", 1427 " does not match computed 0x%08x\n",
1429 hdr->itt, hdr->offset, payload_length, 1428 hdr->itt, hdr->offset, payload_length,
1430 hdr->datasn, checksum, data_crc); 1429 hdr->datasn, checksum, data_crc);
1431 data_crc_failed = 1; 1430 data_crc_failed = 1;
1432 } else { 1431 } else {
1433 pr_debug("Got CRC32C DataDigest 0x%08x for" 1432 pr_debug("Got CRC32C DataDigest 0x%08x for"
1434 " %u bytes of Data Out\n", checksum, 1433 " %u bytes of Data Out\n", checksum,
1435 payload_length); 1434 payload_length);
1436 } 1435 }
1437 } 1436 }
1438 /* 1437 /*
1439 * Increment post receive data and CRC values or perform 1438 * Increment post receive data and CRC values or perform
1440 * within-command recovery. 1439 * within-command recovery.
1441 */ 1440 */
1442 ret = iscsit_check_post_dataout(cmd, buf, data_crc_failed); 1441 ret = iscsit_check_post_dataout(cmd, buf, data_crc_failed);
1443 if ((ret == DATAOUT_NORMAL) || (ret == DATAOUT_WITHIN_COMMAND_RECOVERY)) 1442 if ((ret == DATAOUT_NORMAL) || (ret == DATAOUT_WITHIN_COMMAND_RECOVERY))
1444 return 0; 1443 return 0;
1445 else if (ret == DATAOUT_SEND_R2T) { 1444 else if (ret == DATAOUT_SEND_R2T) {
1446 iscsit_set_dataout_sequence_values(cmd); 1445 iscsit_set_dataout_sequence_values(cmd);
1447 iscsit_build_r2ts_for_cmd(cmd, conn, 0); 1446 iscsit_build_r2ts_for_cmd(cmd, conn, 0);
1448 } else if (ret == DATAOUT_SEND_TO_TRANSPORT) { 1447 } else if (ret == DATAOUT_SEND_TO_TRANSPORT) {
1449 /* 1448 /*
1450 * Handle extra special case for out of order 1449 * Handle extra special case for out of order
1451 * Unsolicited Data Out. 1450 * Unsolicited Data Out.
1452 */ 1451 */
1453 spin_lock_bh(&cmd->istate_lock); 1452 spin_lock_bh(&cmd->istate_lock);
1454 ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN); 1453 ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN);
1455 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1454 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1456 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1455 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1457 spin_unlock_bh(&cmd->istate_lock); 1456 spin_unlock_bh(&cmd->istate_lock);
1458 1457
1459 iscsit_stop_dataout_timer(cmd); 1458 iscsit_stop_dataout_timer(cmd);
1460 return (!ooo_cmdsn) ? transport_generic_handle_data( 1459 return (!ooo_cmdsn) ? transport_generic_handle_data(
1461 &cmd->se_cmd) : 0; 1460 &cmd->se_cmd) : 0;
1462 } else /* DATAOUT_CANNOT_RECOVER */ 1461 } else /* DATAOUT_CANNOT_RECOVER */
1463 return -1; 1462 return -1;
1464 1463
1465 return 0; 1464 return 0;
1466 } 1465 }
1467 1466
1468 static int iscsit_handle_nop_out( 1467 static int iscsit_handle_nop_out(
1469 struct iscsi_conn *conn, 1468 struct iscsi_conn *conn,
1470 unsigned char *buf) 1469 unsigned char *buf)
1471 { 1470 {
1472 unsigned char *ping_data = NULL; 1471 unsigned char *ping_data = NULL;
1473 int cmdsn_ret, niov = 0, ret = 0, rx_got, rx_size; 1472 int cmdsn_ret, niov = 0, ret = 0, rx_got, rx_size;
1474 u32 checksum, data_crc, padding = 0, payload_length; 1473 u32 checksum, data_crc, padding = 0, payload_length;
1475 u64 lun; 1474 u64 lun;
1476 struct iscsi_cmd *cmd = NULL; 1475 struct iscsi_cmd *cmd = NULL;
1477 struct kvec *iov = NULL; 1476 struct kvec *iov = NULL;
1478 struct iscsi_nopout *hdr; 1477 struct iscsi_nopout *hdr;
1479 1478
1480 hdr = (struct iscsi_nopout *) buf; 1479 hdr = (struct iscsi_nopout *) buf;
1481 payload_length = ntoh24(hdr->dlength); 1480 payload_length = ntoh24(hdr->dlength);
1482 lun = get_unaligned_le64(&hdr->lun); 1481 lun = get_unaligned_le64(&hdr->lun);
1483 hdr->itt = be32_to_cpu(hdr->itt); 1482 hdr->itt = be32_to_cpu(hdr->itt);
1484 hdr->ttt = be32_to_cpu(hdr->ttt); 1483 hdr->ttt = be32_to_cpu(hdr->ttt);
1485 hdr->cmdsn = be32_to_cpu(hdr->cmdsn); 1484 hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
1486 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 1485 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
1487 1486
1488 if ((hdr->itt == 0xFFFFFFFF) && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1487 if ((hdr->itt == 0xFFFFFFFF) && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1489 pr_err("NOPOUT ITT is reserved, but Immediate Bit is" 1488 pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
1490 " not set, protocol error.\n"); 1489 " not set, protocol error.\n");
1491 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1490 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1492 buf, conn); 1491 buf, conn);
1493 } 1492 }
1494 1493
1495 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) { 1494 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
1496 pr_err("NOPOUT Ping Data DataSegmentLength: %u is" 1495 pr_err("NOPOUT Ping Data DataSegmentLength: %u is"
1497 " greater than MaxRecvDataSegmentLength: %u, protocol" 1496 " greater than MaxRecvDataSegmentLength: %u, protocol"
1498 " error.\n", payload_length, 1497 " error.\n", payload_length,
1499 conn->conn_ops->MaxRecvDataSegmentLength); 1498 conn->conn_ops->MaxRecvDataSegmentLength);
1500 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1499 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1501 buf, conn); 1500 buf, conn);
1502 } 1501 }
1503 1502
1504 pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%09x," 1503 pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%09x,"
1505 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n", 1504 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
1506 (hdr->itt == 0xFFFFFFFF) ? "Response" : "Request", 1505 (hdr->itt == 0xFFFFFFFF) ? "Response" : "Request",
1507 hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn, 1506 hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn,
1508 payload_length); 1507 payload_length);
1509 /* 1508 /*
1510 * This is not a response to a Unsolicited NopIN, which means 1509 * This is not a response to a Unsolicited NopIN, which means
1511 * it can either be a NOPOUT ping request (with a valid ITT), 1510 * it can either be a NOPOUT ping request (with a valid ITT),
1512 * or a NOPOUT not requesting a NOPIN (with a reserved ITT). 1511 * or a NOPOUT not requesting a NOPIN (with a reserved ITT).
1513 * Either way, make sure we allocate an struct iscsi_cmd, as both 1512 * Either way, make sure we allocate an struct iscsi_cmd, as both
1514 * can contain ping data. 1513 * can contain ping data.
1515 */ 1514 */
1516 if (hdr->ttt == 0xFFFFFFFF) { 1515 if (hdr->ttt == 0xFFFFFFFF) {
1517 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1516 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1518 if (!cmd) 1517 if (!cmd)
1519 return iscsit_add_reject( 1518 return iscsit_add_reject(
1520 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1519 ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1521 1, buf, conn); 1520 1, buf, conn);
1522 1521
1523 cmd->iscsi_opcode = ISCSI_OP_NOOP_OUT; 1522 cmd->iscsi_opcode = ISCSI_OP_NOOP_OUT;
1524 cmd->i_state = ISTATE_SEND_NOPIN; 1523 cmd->i_state = ISTATE_SEND_NOPIN;
1525 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1524 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ?
1526 1 : 0); 1525 1 : 0);
1527 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 1526 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1528 cmd->targ_xfer_tag = 0xFFFFFFFF; 1527 cmd->targ_xfer_tag = 0xFFFFFFFF;
1529 cmd->cmd_sn = hdr->cmdsn; 1528 cmd->cmd_sn = hdr->cmdsn;
1530 cmd->exp_stat_sn = hdr->exp_statsn; 1529 cmd->exp_stat_sn = hdr->exp_statsn;
1531 cmd->data_direction = DMA_NONE; 1530 cmd->data_direction = DMA_NONE;
1532 } 1531 }
1533 1532
1534 if (payload_length && (hdr->ttt == 0xFFFFFFFF)) { 1533 if (payload_length && (hdr->ttt == 0xFFFFFFFF)) {
1535 rx_size = payload_length; 1534 rx_size = payload_length;
1536 ping_data = kzalloc(payload_length + 1, GFP_KERNEL); 1535 ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
1537 if (!ping_data) { 1536 if (!ping_data) {
1538 pr_err("Unable to allocate memory for" 1537 pr_err("Unable to allocate memory for"
1539 " NOPOUT ping data.\n"); 1538 " NOPOUT ping data.\n");
1540 ret = -1; 1539 ret = -1;
1541 goto out; 1540 goto out;
1542 } 1541 }
1543 1542
1544 iov = &cmd->iov_misc[0]; 1543 iov = &cmd->iov_misc[0];
1545 iov[niov].iov_base = ping_data; 1544 iov[niov].iov_base = ping_data;
1546 iov[niov++].iov_len = payload_length; 1545 iov[niov++].iov_len = payload_length;
1547 1546
1548 padding = ((-payload_length) & 3); 1547 padding = ((-payload_length) & 3);
1549 if (padding != 0) { 1548 if (padding != 0) {
1550 pr_debug("Receiving %u additional bytes" 1549 pr_debug("Receiving %u additional bytes"
1551 " for padding.\n", padding); 1550 " for padding.\n", padding);
1552 iov[niov].iov_base = &cmd->pad_bytes; 1551 iov[niov].iov_base = &cmd->pad_bytes;
1553 iov[niov++].iov_len = padding; 1552 iov[niov++].iov_len = padding;
1554 rx_size += padding; 1553 rx_size += padding;
1555 } 1554 }
1556 if (conn->conn_ops->DataDigest) { 1555 if (conn->conn_ops->DataDigest) {
1557 iov[niov].iov_base = &checksum; 1556 iov[niov].iov_base = &checksum;
1558 iov[niov++].iov_len = ISCSI_CRC_LEN; 1557 iov[niov++].iov_len = ISCSI_CRC_LEN;
1559 rx_size += ISCSI_CRC_LEN; 1558 rx_size += ISCSI_CRC_LEN;
1560 } 1559 }
1561 1560
1562 rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size); 1561 rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size);
1563 if (rx_got != rx_size) { 1562 if (rx_got != rx_size) {
1564 ret = -1; 1563 ret = -1;
1565 goto out; 1564 goto out;
1566 } 1565 }
1567 1566
1568 if (conn->conn_ops->DataDigest) { 1567 if (conn->conn_ops->DataDigest) {
1569 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash, 1568 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
1570 ping_data, payload_length, 1569 ping_data, payload_length,
1571 padding, cmd->pad_bytes, 1570 padding, cmd->pad_bytes,
1572 (u8 *)&data_crc); 1571 (u8 *)&data_crc);
1573 1572
1574 if (checksum != data_crc) { 1573 if (checksum != data_crc) {
1575 pr_err("Ping data CRC32C DataDigest" 1574 pr_err("Ping data CRC32C DataDigest"
1576 " 0x%08x does not match computed 0x%08x\n", 1575 " 0x%08x does not match computed 0x%08x\n",
1577 checksum, data_crc); 1576 checksum, data_crc);
1578 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 1577 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
1579 pr_err("Unable to recover from" 1578 pr_err("Unable to recover from"
1580 " NOPOUT Ping DataCRC failure while in" 1579 " NOPOUT Ping DataCRC failure while in"
1581 " ERL=0.\n"); 1580 " ERL=0.\n");
1582 ret = -1; 1581 ret = -1;
1583 goto out; 1582 goto out;
1584 } else { 1583 } else {
1585 /* 1584 /*
1586 * Silently drop this PDU and let the 1585 * Silently drop this PDU and let the
1587 * initiator plug the CmdSN gap. 1586 * initiator plug the CmdSN gap.
1588 */ 1587 */
1589 pr_debug("Dropping NOPOUT" 1588 pr_debug("Dropping NOPOUT"
1590 " Command CmdSN: 0x%08x due to" 1589 " Command CmdSN: 0x%08x due to"
1591 " DataCRC error.\n", hdr->cmdsn); 1590 " DataCRC error.\n", hdr->cmdsn);
1592 ret = 0; 1591 ret = 0;
1593 goto out; 1592 goto out;
1594 } 1593 }
1595 } else { 1594 } else {
1596 pr_debug("Got CRC32C DataDigest" 1595 pr_debug("Got CRC32C DataDigest"
1597 " 0x%08x for %u bytes of ping data.\n", 1596 " 0x%08x for %u bytes of ping data.\n",
1598 checksum, payload_length); 1597 checksum, payload_length);
1599 } 1598 }
1600 } 1599 }
1601 1600
1602 ping_data[payload_length] = '\0'; 1601 ping_data[payload_length] = '\0';
1603 /* 1602 /*
1604 * Attach ping data to struct iscsi_cmd->buf_ptr. 1603 * Attach ping data to struct iscsi_cmd->buf_ptr.
1605 */ 1604 */
1606 cmd->buf_ptr = (void *)ping_data; 1605 cmd->buf_ptr = (void *)ping_data;
1607 cmd->buf_ptr_size = payload_length; 1606 cmd->buf_ptr_size = payload_length;
1608 1607
1609 pr_debug("Got %u bytes of NOPOUT ping" 1608 pr_debug("Got %u bytes of NOPOUT ping"
1610 " data.\n", payload_length); 1609 " data.\n", payload_length);
1611 pr_debug("Ping Data: \"%s\"\n", ping_data); 1610 pr_debug("Ping Data: \"%s\"\n", ping_data);
1612 } 1611 }
1613 1612
1614 if (hdr->itt != 0xFFFFFFFF) { 1613 if (hdr->itt != 0xFFFFFFFF) {
1615 if (!cmd) { 1614 if (!cmd) {
1616 pr_err("Checking CmdSN for NOPOUT," 1615 pr_err("Checking CmdSN for NOPOUT,"
1617 " but cmd is NULL!\n"); 1616 " but cmd is NULL!\n");
1618 return -1; 1617 return -1;
1619 } 1618 }
1620 /* 1619 /*
1621 * Initiator is expecting a NopIN ping reply, 1620 * Initiator is expecting a NopIN ping reply,
1622 */ 1621 */
1623 spin_lock_bh(&conn->cmd_lock); 1622 spin_lock_bh(&conn->cmd_lock);
1624 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 1623 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
1625 spin_unlock_bh(&conn->cmd_lock); 1624 spin_unlock_bh(&conn->cmd_lock);
1626 1625
1627 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 1626 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
1628 1627
1629 if (hdr->opcode & ISCSI_OP_IMMEDIATE) { 1628 if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
1630 iscsit_add_cmd_to_response_queue(cmd, conn, 1629 iscsit_add_cmd_to_response_queue(cmd, conn,
1631 cmd->i_state); 1630 cmd->i_state);
1632 return 0; 1631 return 0;
1633 } 1632 }
1634 1633
1635 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1634 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1636 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { 1635 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
1637 ret = 0; 1636 ret = 0;
1638 goto ping_out; 1637 goto ping_out;
1639 } 1638 }
1640 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1639 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1641 return iscsit_add_reject_from_cmd( 1640 return iscsit_add_reject_from_cmd(
1642 ISCSI_REASON_PROTOCOL_ERROR, 1641 ISCSI_REASON_PROTOCOL_ERROR,
1643 1, 0, buf, cmd); 1642 1, 0, buf, cmd);
1644 1643
1645 return 0; 1644 return 0;
1646 } 1645 }
1647 1646
1648 if (hdr->ttt != 0xFFFFFFFF) { 1647 if (hdr->ttt != 0xFFFFFFFF) {
1649 /* 1648 /*
1650 * This was a response to a unsolicited NOPIN ping. 1649 * This was a response to a unsolicited NOPIN ping.
1651 */ 1650 */
1652 cmd = iscsit_find_cmd_from_ttt(conn, hdr->ttt); 1651 cmd = iscsit_find_cmd_from_ttt(conn, hdr->ttt);
1653 if (!cmd) 1652 if (!cmd)
1654 return -1; 1653 return -1;
1655 1654
1656 iscsit_stop_nopin_response_timer(conn); 1655 iscsit_stop_nopin_response_timer(conn);
1657 1656
1658 cmd->i_state = ISTATE_REMOVE; 1657 cmd->i_state = ISTATE_REMOVE;
1659 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); 1658 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
1660 iscsit_start_nopin_timer(conn); 1659 iscsit_start_nopin_timer(conn);
1661 } else { 1660 } else {
1662 /* 1661 /*
1663 * Initiator is not expecting a NOPIN is response. 1662 * Initiator is not expecting a NOPIN is response.
1664 * Just ignore for now. 1663 * Just ignore for now.
1665 * 1664 *
1666 * iSCSI v19-91 10.18 1665 * iSCSI v19-91 10.18
1667 * "A NOP-OUT may also be used to confirm a changed 1666 * "A NOP-OUT may also be used to confirm a changed
1668 * ExpStatSN if another PDU will not be available 1667 * ExpStatSN if another PDU will not be available
1669 * for a long time." 1668 * for a long time."
1670 */ 1669 */
1671 ret = 0; 1670 ret = 0;
1672 goto out; 1671 goto out;
1673 } 1672 }
1674 1673
1675 return 0; 1674 return 0;
1676 out: 1675 out:
1677 if (cmd) 1676 if (cmd)
1678 iscsit_release_cmd(cmd); 1677 iscsit_release_cmd(cmd);
1679 ping_out: 1678 ping_out:
1680 kfree(ping_data); 1679 kfree(ping_data);
1681 return ret; 1680 return ret;
1682 } 1681 }
1683 1682
1684 static int iscsit_handle_task_mgt_cmd( 1683 static int iscsit_handle_task_mgt_cmd(
1685 struct iscsi_conn *conn, 1684 struct iscsi_conn *conn,
1686 unsigned char *buf) 1685 unsigned char *buf)
1687 { 1686 {
1688 struct iscsi_cmd *cmd; 1687 struct iscsi_cmd *cmd;
1689 struct se_tmr_req *se_tmr; 1688 struct se_tmr_req *se_tmr;
1690 struct iscsi_tmr_req *tmr_req; 1689 struct iscsi_tmr_req *tmr_req;
1691 struct iscsi_tm *hdr; 1690 struct iscsi_tm *hdr;
1692 u32 payload_length; 1691 u32 payload_length;
1693 int out_of_order_cmdsn = 0; 1692 int out_of_order_cmdsn = 0;
1694 int ret; 1693 int ret;
1695 u8 function; 1694 u8 function;
1696 1695
1697 hdr = (struct iscsi_tm *) buf; 1696 hdr = (struct iscsi_tm *) buf;
1698 payload_length = ntoh24(hdr->dlength); 1697 payload_length = ntoh24(hdr->dlength);
1699 hdr->itt = be32_to_cpu(hdr->itt); 1698 hdr->itt = be32_to_cpu(hdr->itt);
1700 hdr->rtt = be32_to_cpu(hdr->rtt); 1699 hdr->rtt = be32_to_cpu(hdr->rtt);
1701 hdr->cmdsn = be32_to_cpu(hdr->cmdsn); 1700 hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
1702 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 1701 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
1703 hdr->refcmdsn = be32_to_cpu(hdr->refcmdsn); 1702 hdr->refcmdsn = be32_to_cpu(hdr->refcmdsn);
1704 hdr->exp_datasn = be32_to_cpu(hdr->exp_datasn); 1703 hdr->exp_datasn = be32_to_cpu(hdr->exp_datasn);
1705 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; 1704 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
1706 function = hdr->flags; 1705 function = hdr->flags;
1707 1706
1708 pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:" 1707 pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:"
1709 " 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:" 1708 " 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:"
1710 " 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function, 1709 " 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function,
1711 hdr->rtt, hdr->refcmdsn, conn->cid); 1710 hdr->rtt, hdr->refcmdsn, conn->cid);
1712 1711
1713 if ((function != ISCSI_TM_FUNC_ABORT_TASK) && 1712 if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
1714 ((function != ISCSI_TM_FUNC_TASK_REASSIGN) && 1713 ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
1715 (hdr->rtt != ISCSI_RESERVED_TAG))) { 1714 (hdr->rtt != ISCSI_RESERVED_TAG))) {
1716 pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n"); 1715 pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n");
1717 hdr->rtt = ISCSI_RESERVED_TAG; 1716 hdr->rtt = ISCSI_RESERVED_TAG;
1718 } 1717 }
1719 1718
1720 if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) && 1719 if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) &&
1721 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1720 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1722 pr_err("Task Management Request TASK_REASSIGN not" 1721 pr_err("Task Management Request TASK_REASSIGN not"
1723 " issued as immediate command, bad iSCSI Initiator" 1722 " issued as immediate command, bad iSCSI Initiator"
1724 "implementation\n"); 1723 "implementation\n");
1725 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1724 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1726 buf, conn); 1725 buf, conn);
1727 } 1726 }
1728 if ((function != ISCSI_TM_FUNC_ABORT_TASK) && 1727 if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
1729 (hdr->refcmdsn != ISCSI_RESERVED_TAG)) 1728 (hdr->refcmdsn != ISCSI_RESERVED_TAG))
1730 hdr->refcmdsn = ISCSI_RESERVED_TAG; 1729 hdr->refcmdsn = ISCSI_RESERVED_TAG;
1731 1730
1732 cmd = iscsit_allocate_se_cmd_for_tmr(conn, function); 1731 cmd = iscsit_allocate_se_cmd_for_tmr(conn, function);
1733 if (!cmd) 1732 if (!cmd)
1734 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1733 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1735 1, buf, conn); 1734 1, buf, conn);
1736 1735
1737 cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC; 1736 cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC;
1738 cmd->i_state = ISTATE_SEND_TASKMGTRSP; 1737 cmd->i_state = ISTATE_SEND_TASKMGTRSP;
1739 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 1738 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
1740 cmd->init_task_tag = hdr->itt; 1739 cmd->init_task_tag = hdr->itt;
1741 cmd->targ_xfer_tag = 0xFFFFFFFF; 1740 cmd->targ_xfer_tag = 0xFFFFFFFF;
1742 cmd->cmd_sn = hdr->cmdsn; 1741 cmd->cmd_sn = hdr->cmdsn;
1743 cmd->exp_stat_sn = hdr->exp_statsn; 1742 cmd->exp_stat_sn = hdr->exp_statsn;
1744 se_tmr = cmd->se_cmd.se_tmr_req; 1743 se_tmr = cmd->se_cmd.se_tmr_req;
1745 tmr_req = cmd->tmr_req; 1744 tmr_req = cmd->tmr_req;
1746 /* 1745 /*
1747 * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN 1746 * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN
1748 */ 1747 */
1749 if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { 1748 if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
1750 ret = iscsit_get_lun_for_tmr(cmd, 1749 ret = iscsit_get_lun_for_tmr(cmd,
1751 get_unaligned_le64(&hdr->lun)); 1750 get_unaligned_le64(&hdr->lun));
1752 if (ret < 0) { 1751 if (ret < 0) {
1753 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1752 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1754 se_tmr->response = ISCSI_TMF_RSP_NO_LUN; 1753 se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
1755 goto attach; 1754 goto attach;
1756 } 1755 }
1757 } 1756 }
1758 1757
1759 switch (function) { 1758 switch (function) {
1760 case ISCSI_TM_FUNC_ABORT_TASK: 1759 case ISCSI_TM_FUNC_ABORT_TASK:
1761 se_tmr->response = iscsit_tmr_abort_task(cmd, buf); 1760 se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
1762 if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) { 1761 if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) {
1763 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1762 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1764 goto attach; 1763 goto attach;
1765 } 1764 }
1766 break; 1765 break;
1767 case ISCSI_TM_FUNC_ABORT_TASK_SET: 1766 case ISCSI_TM_FUNC_ABORT_TASK_SET:
1768 case ISCSI_TM_FUNC_CLEAR_ACA: 1767 case ISCSI_TM_FUNC_CLEAR_ACA:
1769 case ISCSI_TM_FUNC_CLEAR_TASK_SET: 1768 case ISCSI_TM_FUNC_CLEAR_TASK_SET:
1770 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: 1769 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
1771 break; 1770 break;
1772 case ISCSI_TM_FUNC_TARGET_WARM_RESET: 1771 case ISCSI_TM_FUNC_TARGET_WARM_RESET:
1773 if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) { 1772 if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
1774 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1773 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1775 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED; 1774 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
1776 goto attach; 1775 goto attach;
1777 } 1776 }
1778 break; 1777 break;
1779 case ISCSI_TM_FUNC_TARGET_COLD_RESET: 1778 case ISCSI_TM_FUNC_TARGET_COLD_RESET:
1780 if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) { 1779 if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
1781 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1780 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1782 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED; 1781 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
1783 goto attach; 1782 goto attach;
1784 } 1783 }
1785 break; 1784 break;
1786 case ISCSI_TM_FUNC_TASK_REASSIGN: 1785 case ISCSI_TM_FUNC_TASK_REASSIGN:
1787 se_tmr->response = iscsit_tmr_task_reassign(cmd, buf); 1786 se_tmr->response = iscsit_tmr_task_reassign(cmd, buf);
1788 /* 1787 /*
1789 * Perform sanity checks on the ExpDataSN only if the 1788 * Perform sanity checks on the ExpDataSN only if the
1790 * TASK_REASSIGN was successful. 1789 * TASK_REASSIGN was successful.
1791 */ 1790 */
1792 if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) 1791 if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE)
1793 break; 1792 break;
1794 1793
1795 if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0) 1794 if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
1796 return iscsit_add_reject_from_cmd( 1795 return iscsit_add_reject_from_cmd(
1797 ISCSI_REASON_BOOKMARK_INVALID, 1, 1, 1796 ISCSI_REASON_BOOKMARK_INVALID, 1, 1,
1798 buf, cmd); 1797 buf, cmd);
1799 break; 1798 break;
1800 default: 1799 default:
1801 pr_err("Unknown TMR function: 0x%02x, protocol" 1800 pr_err("Unknown TMR function: 0x%02x, protocol"
1802 " error.\n", function); 1801 " error.\n", function);
1803 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1802 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1804 se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED; 1803 se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
1805 goto attach; 1804 goto attach;
1806 } 1805 }
1807 1806
1808 if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) && 1807 if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
1809 (se_tmr->response == ISCSI_TMF_RSP_COMPLETE)) 1808 (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
1810 se_tmr->call_transport = 1; 1809 se_tmr->call_transport = 1;
1811 attach: 1810 attach:
1812 spin_lock_bh(&conn->cmd_lock); 1811 spin_lock_bh(&conn->cmd_lock);
1813 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 1812 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
1814 spin_unlock_bh(&conn->cmd_lock); 1813 spin_unlock_bh(&conn->cmd_lock);
1815 1814
1816 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1815 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1817 int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1816 int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1818 if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) 1817 if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
1819 out_of_order_cmdsn = 1; 1818 out_of_order_cmdsn = 1;
1820 else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) 1819 else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
1821 return 0; 1820 return 0;
1822 else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1821 else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1823 return iscsit_add_reject_from_cmd( 1822 return iscsit_add_reject_from_cmd(
1824 ISCSI_REASON_PROTOCOL_ERROR, 1823 ISCSI_REASON_PROTOCOL_ERROR,
1825 1, 0, buf, cmd); 1824 1, 0, buf, cmd);
1826 } 1825 }
1827 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 1826 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
1828 1827
1829 if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE)) 1828 if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE))
1830 return 0; 1829 return 0;
1831 /* 1830 /*
1832 * Found the referenced task, send to transport for processing. 1831 * Found the referenced task, send to transport for processing.
1833 */ 1832 */
1834 if (se_tmr->call_transport) 1833 if (se_tmr->call_transport)
1835 return transport_generic_handle_tmr(&cmd->se_cmd); 1834 return transport_generic_handle_tmr(&cmd->se_cmd);
1836 1835
1837 /* 1836 /*
1838 * Could not find the referenced LUN, task, or Task Management 1837 * Could not find the referenced LUN, task, or Task Management
1839 * command not authorized or supported. Change state and 1838 * command not authorized or supported. Change state and
1840 * let the tx_thread send the response. 1839 * let the tx_thread send the response.
1841 * 1840 *
1842 * For connection recovery, this is also the default action for 1841 * For connection recovery, this is also the default action for
1843 * TMR TASK_REASSIGN. 1842 * TMR TASK_REASSIGN.
1844 */ 1843 */
1845 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 1844 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
1846 return 0; 1845 return 0;
1847 } 1846 }
1848 1847
1849 /* #warning FIXME: Support Text Command parameters besides SendTargets */ 1848 /* #warning FIXME: Support Text Command parameters besides SendTargets */
1850 static int iscsit_handle_text_cmd( 1849 static int iscsit_handle_text_cmd(
1851 struct iscsi_conn *conn, 1850 struct iscsi_conn *conn,
1852 unsigned char *buf) 1851 unsigned char *buf)
1853 { 1852 {
1854 char *text_ptr, *text_in; 1853 char *text_ptr, *text_in;
1855 int cmdsn_ret, niov = 0, rx_got, rx_size; 1854 int cmdsn_ret, niov = 0, rx_got, rx_size;
1856 u32 checksum = 0, data_crc = 0, payload_length; 1855 u32 checksum = 0, data_crc = 0, payload_length;
1857 u32 padding = 0, pad_bytes = 0, text_length = 0; 1856 u32 padding = 0, pad_bytes = 0, text_length = 0;
1858 struct iscsi_cmd *cmd; 1857 struct iscsi_cmd *cmd;
1859 struct kvec iov[3]; 1858 struct kvec iov[3];
1860 struct iscsi_text *hdr; 1859 struct iscsi_text *hdr;
1861 1860
1862 hdr = (struct iscsi_text *) buf; 1861 hdr = (struct iscsi_text *) buf;
1863 payload_length = ntoh24(hdr->dlength); 1862 payload_length = ntoh24(hdr->dlength);
1864 hdr->itt = be32_to_cpu(hdr->itt); 1863 hdr->itt = be32_to_cpu(hdr->itt);
1865 hdr->ttt = be32_to_cpu(hdr->ttt); 1864 hdr->ttt = be32_to_cpu(hdr->ttt);
1866 hdr->cmdsn = be32_to_cpu(hdr->cmdsn); 1865 hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
1867 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 1866 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
1868 1867
1869 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) { 1868 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
1870 pr_err("Unable to accept text parameter length: %u" 1869 pr_err("Unable to accept text parameter length: %u"
1871 "greater than MaxRecvDataSegmentLength %u.\n", 1870 "greater than MaxRecvDataSegmentLength %u.\n",
1872 payload_length, conn->conn_ops->MaxRecvDataSegmentLength); 1871 payload_length, conn->conn_ops->MaxRecvDataSegmentLength);
1873 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1872 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
1874 buf, conn); 1873 buf, conn);
1875 } 1874 }
1876 1875
1877 pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x," 1876 pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
1878 " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn, 1877 " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
1879 hdr->exp_statsn, payload_length); 1878 hdr->exp_statsn, payload_length);
1880 1879
1881 rx_size = text_length = payload_length; 1880 rx_size = text_length = payload_length;
1882 if (text_length) { 1881 if (text_length) {
1883 text_in = kzalloc(text_length, GFP_KERNEL); 1882 text_in = kzalloc(text_length, GFP_KERNEL);
1884 if (!text_in) { 1883 if (!text_in) {
1885 pr_err("Unable to allocate memory for" 1884 pr_err("Unable to allocate memory for"
1886 " incoming text parameters\n"); 1885 " incoming text parameters\n");
1887 return -1; 1886 return -1;
1888 } 1887 }
1889 1888
1890 memset(iov, 0, 3 * sizeof(struct kvec)); 1889 memset(iov, 0, 3 * sizeof(struct kvec));
1891 iov[niov].iov_base = text_in; 1890 iov[niov].iov_base = text_in;
1892 iov[niov++].iov_len = text_length; 1891 iov[niov++].iov_len = text_length;
1893 1892
1894 padding = ((-payload_length) & 3); 1893 padding = ((-payload_length) & 3);
1895 if (padding != 0) { 1894 if (padding != 0) {
1896 iov[niov].iov_base = &pad_bytes; 1895 iov[niov].iov_base = &pad_bytes;
1897 iov[niov++].iov_len = padding; 1896 iov[niov++].iov_len = padding;
1898 rx_size += padding; 1897 rx_size += padding;
1899 pr_debug("Receiving %u additional bytes" 1898 pr_debug("Receiving %u additional bytes"
1900 " for padding.\n", padding); 1899 " for padding.\n", padding);
1901 } 1900 }
1902 if (conn->conn_ops->DataDigest) { 1901 if (conn->conn_ops->DataDigest) {
1903 iov[niov].iov_base = &checksum; 1902 iov[niov].iov_base = &checksum;
1904 iov[niov++].iov_len = ISCSI_CRC_LEN; 1903 iov[niov++].iov_len = ISCSI_CRC_LEN;
1905 rx_size += ISCSI_CRC_LEN; 1904 rx_size += ISCSI_CRC_LEN;
1906 } 1905 }
1907 1906
1908 rx_got = rx_data(conn, &iov[0], niov, rx_size); 1907 rx_got = rx_data(conn, &iov[0], niov, rx_size);
1909 if (rx_got != rx_size) { 1908 if (rx_got != rx_size) {
1910 kfree(text_in); 1909 kfree(text_in);
1911 return -1; 1910 return -1;
1912 } 1911 }
1913 1912
1914 if (conn->conn_ops->DataDigest) { 1913 if (conn->conn_ops->DataDigest) {
1915 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash, 1914 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
1916 text_in, text_length, 1915 text_in, text_length,
1917 padding, (u8 *)&pad_bytes, 1916 padding, (u8 *)&pad_bytes,
1918 (u8 *)&data_crc); 1917 (u8 *)&data_crc);
1919 1918
1920 if (checksum != data_crc) { 1919 if (checksum != data_crc) {
1921 pr_err("Text data CRC32C DataDigest" 1920 pr_err("Text data CRC32C DataDigest"
1922 " 0x%08x does not match computed" 1921 " 0x%08x does not match computed"
1923 " 0x%08x\n", checksum, data_crc); 1922 " 0x%08x\n", checksum, data_crc);
1924 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 1923 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
1925 pr_err("Unable to recover from" 1924 pr_err("Unable to recover from"
1926 " Text Data digest failure while in" 1925 " Text Data digest failure while in"
1927 " ERL=0.\n"); 1926 " ERL=0.\n");
1928 kfree(text_in); 1927 kfree(text_in);
1929 return -1; 1928 return -1;
1930 } else { 1929 } else {
1931 /* 1930 /*
1932 * Silently drop this PDU and let the 1931 * Silently drop this PDU and let the
1933 * initiator plug the CmdSN gap. 1932 * initiator plug the CmdSN gap.
1934 */ 1933 */
1935 pr_debug("Dropping Text" 1934 pr_debug("Dropping Text"
1936 " Command CmdSN: 0x%08x due to" 1935 " Command CmdSN: 0x%08x due to"
1937 " DataCRC error.\n", hdr->cmdsn); 1936 " DataCRC error.\n", hdr->cmdsn);
1938 kfree(text_in); 1937 kfree(text_in);
1939 return 0; 1938 return 0;
1940 } 1939 }
1941 } else { 1940 } else {
1942 pr_debug("Got CRC32C DataDigest" 1941 pr_debug("Got CRC32C DataDigest"
1943 " 0x%08x for %u bytes of text data.\n", 1942 " 0x%08x for %u bytes of text data.\n",
1944 checksum, text_length); 1943 checksum, text_length);
1945 } 1944 }
1946 } 1945 }
1947 text_in[text_length - 1] = '\0'; 1946 text_in[text_length - 1] = '\0';
1948 pr_debug("Successfully read %d bytes of text" 1947 pr_debug("Successfully read %d bytes of text"
1949 " data.\n", text_length); 1948 " data.\n", text_length);
1950 1949
1951 if (strncmp("SendTargets", text_in, 11) != 0) { 1950 if (strncmp("SendTargets", text_in, 11) != 0) {
1952 pr_err("Received Text Data that is not" 1951 pr_err("Received Text Data that is not"
1953 " SendTargets, cannot continue.\n"); 1952 " SendTargets, cannot continue.\n");
1954 kfree(text_in); 1953 kfree(text_in);
1955 return -1; 1954 return -1;
1956 } 1955 }
1957 text_ptr = strchr(text_in, '='); 1956 text_ptr = strchr(text_in, '=');
1958 if (!text_ptr) { 1957 if (!text_ptr) {
1959 pr_err("No \"=\" separator found in Text Data," 1958 pr_err("No \"=\" separator found in Text Data,"
1960 " cannot continue.\n"); 1959 " cannot continue.\n");
1961 kfree(text_in); 1960 kfree(text_in);
1962 return -1; 1961 return -1;
1963 } 1962 }
1964 if (strncmp("=All", text_ptr, 4) != 0) { 1963 if (strncmp("=All", text_ptr, 4) != 0) {
1965 pr_err("Unable to locate All value for" 1964 pr_err("Unable to locate All value for"
1966 " SendTargets key, cannot continue.\n"); 1965 " SendTargets key, cannot continue.\n");
1967 kfree(text_in); 1966 kfree(text_in);
1968 return -1; 1967 return -1;
1969 } 1968 }
1970 /*#warning Support SendTargets=(iSCSI Target Name/Nothing) values. */ 1969 /*#warning Support SendTargets=(iSCSI Target Name/Nothing) values. */
1971 kfree(text_in); 1970 kfree(text_in);
1972 } 1971 }
1973 1972
1974 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1973 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1975 if (!cmd) 1974 if (!cmd)
1976 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1975 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1977 1, buf, conn); 1976 1, buf, conn);
1978 1977
1979 cmd->iscsi_opcode = ISCSI_OP_TEXT; 1978 cmd->iscsi_opcode = ISCSI_OP_TEXT;
1980 cmd->i_state = ISTATE_SEND_TEXTRSP; 1979 cmd->i_state = ISTATE_SEND_TEXTRSP;
1981 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 1980 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
1982 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 1981 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1983 cmd->targ_xfer_tag = 0xFFFFFFFF; 1982 cmd->targ_xfer_tag = 0xFFFFFFFF;
1984 cmd->cmd_sn = hdr->cmdsn; 1983 cmd->cmd_sn = hdr->cmdsn;
1985 cmd->exp_stat_sn = hdr->exp_statsn; 1984 cmd->exp_stat_sn = hdr->exp_statsn;
1986 cmd->data_direction = DMA_NONE; 1985 cmd->data_direction = DMA_NONE;
1987 1986
1988 spin_lock_bh(&conn->cmd_lock); 1987 spin_lock_bh(&conn->cmd_lock);
1989 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 1988 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
1990 spin_unlock_bh(&conn->cmd_lock); 1989 spin_unlock_bh(&conn->cmd_lock);
1991 1990
1992 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 1991 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
1993 1992
1994 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1993 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
1995 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1994 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
1996 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1995 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
1997 return iscsit_add_reject_from_cmd( 1996 return iscsit_add_reject_from_cmd(
1998 ISCSI_REASON_PROTOCOL_ERROR, 1997 ISCSI_REASON_PROTOCOL_ERROR,
1999 1, 0, buf, cmd); 1998 1, 0, buf, cmd);
2000 1999
2001 return 0; 2000 return 0;
2002 } 2001 }
2003 2002
2004 return iscsit_execute_cmd(cmd, 0); 2003 return iscsit_execute_cmd(cmd, 0);
2005 } 2004 }
2006 2005
2007 int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2006 int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2008 { 2007 {
2009 struct iscsi_conn *conn_p; 2008 struct iscsi_conn *conn_p;
2010 struct iscsi_session *sess = conn->sess; 2009 struct iscsi_session *sess = conn->sess;
2011 2010
2012 pr_debug("Received logout request CLOSESESSION on CID: %hu" 2011 pr_debug("Received logout request CLOSESESSION on CID: %hu"
2013 " for SID: %u.\n", conn->cid, conn->sess->sid); 2012 " for SID: %u.\n", conn->cid, conn->sess->sid);
2014 2013
2015 atomic_set(&sess->session_logout, 1); 2014 atomic_set(&sess->session_logout, 1);
2016 atomic_set(&conn->conn_logout_remove, 1); 2015 atomic_set(&conn->conn_logout_remove, 1);
2017 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_SESSION; 2016 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_SESSION;
2018 2017
2019 iscsit_inc_conn_usage_count(conn); 2018 iscsit_inc_conn_usage_count(conn);
2020 iscsit_inc_session_usage_count(sess); 2019 iscsit_inc_session_usage_count(sess);
2021 2020
2022 spin_lock_bh(&sess->conn_lock); 2021 spin_lock_bh(&sess->conn_lock);
2023 list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) { 2022 list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) {
2024 if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN) 2023 if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN)
2025 continue; 2024 continue;
2026 2025
2027 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n"); 2026 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
2028 conn_p->conn_state = TARG_CONN_STATE_IN_LOGOUT; 2027 conn_p->conn_state = TARG_CONN_STATE_IN_LOGOUT;
2029 } 2028 }
2030 spin_unlock_bh(&sess->conn_lock); 2029 spin_unlock_bh(&sess->conn_lock);
2031 2030
2032 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2031 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2033 2032
2034 return 0; 2033 return 0;
2035 } 2034 }
2036 2035
2037 int iscsit_logout_closeconnection(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2036 int iscsit_logout_closeconnection(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2038 { 2037 {
2039 struct iscsi_conn *l_conn; 2038 struct iscsi_conn *l_conn;
2040 struct iscsi_session *sess = conn->sess; 2039 struct iscsi_session *sess = conn->sess;
2041 2040
2042 pr_debug("Received logout request CLOSECONNECTION for CID:" 2041 pr_debug("Received logout request CLOSECONNECTION for CID:"
2043 " %hu on CID: %hu.\n", cmd->logout_cid, conn->cid); 2042 " %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
2044 2043
2045 /* 2044 /*
2046 * A Logout Request with a CLOSECONNECTION reason code for a CID 2045 * A Logout Request with a CLOSECONNECTION reason code for a CID
2047 * can arrive on a connection with a differing CID. 2046 * can arrive on a connection with a differing CID.
2048 */ 2047 */
2049 if (conn->cid == cmd->logout_cid) { 2048 if (conn->cid == cmd->logout_cid) {
2050 spin_lock_bh(&conn->state_lock); 2049 spin_lock_bh(&conn->state_lock);
2051 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n"); 2050 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
2052 conn->conn_state = TARG_CONN_STATE_IN_LOGOUT; 2051 conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
2053 2052
2054 atomic_set(&conn->conn_logout_remove, 1); 2053 atomic_set(&conn->conn_logout_remove, 1);
2055 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_CONNECTION; 2054 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_CONNECTION;
2056 iscsit_inc_conn_usage_count(conn); 2055 iscsit_inc_conn_usage_count(conn);
2057 2056
2058 spin_unlock_bh(&conn->state_lock); 2057 spin_unlock_bh(&conn->state_lock);
2059 } else { 2058 } else {
2060 /* 2059 /*
2061 * Handle all different cid CLOSECONNECTION requests in 2060 * Handle all different cid CLOSECONNECTION requests in
2062 * iscsit_logout_post_handler_diffcid() as to give enough 2061 * iscsit_logout_post_handler_diffcid() as to give enough
2063 * time for any non immediate command's CmdSN to be 2062 * time for any non immediate command's CmdSN to be
2064 * acknowledged on the connection in question. 2063 * acknowledged on the connection in question.
2065 * 2064 *
2066 * Here we simply make sure the CID is still around. 2065 * Here we simply make sure the CID is still around.
2067 */ 2066 */
2068 l_conn = iscsit_get_conn_from_cid(sess, 2067 l_conn = iscsit_get_conn_from_cid(sess,
2069 cmd->logout_cid); 2068 cmd->logout_cid);
2070 if (!l_conn) { 2069 if (!l_conn) {
2071 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND; 2070 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
2072 iscsit_add_cmd_to_response_queue(cmd, conn, 2071 iscsit_add_cmd_to_response_queue(cmd, conn,
2073 cmd->i_state); 2072 cmd->i_state);
2074 return 0; 2073 return 0;
2075 } 2074 }
2076 2075
2077 iscsit_dec_conn_usage_count(l_conn); 2076 iscsit_dec_conn_usage_count(l_conn);
2078 } 2077 }
2079 2078
2080 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2079 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2081 2080
2082 return 0; 2081 return 0;
2083 } 2082 }
2084 2083
2085 int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2084 int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2086 { 2085 {
2087 struct iscsi_session *sess = conn->sess; 2086 struct iscsi_session *sess = conn->sess;
2088 2087
2089 pr_debug("Received explicit REMOVECONNFORRECOVERY logout for" 2088 pr_debug("Received explicit REMOVECONNFORRECOVERY logout for"
2090 " CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid); 2089 " CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
2091 2090
2092 if (sess->sess_ops->ErrorRecoveryLevel != 2) { 2091 if (sess->sess_ops->ErrorRecoveryLevel != 2) {
2093 pr_err("Received Logout Request REMOVECONNFORRECOVERY" 2092 pr_err("Received Logout Request REMOVECONNFORRECOVERY"
2094 " while ERL!=2.\n"); 2093 " while ERL!=2.\n");
2095 cmd->logout_response = ISCSI_LOGOUT_RECOVERY_UNSUPPORTED; 2094 cmd->logout_response = ISCSI_LOGOUT_RECOVERY_UNSUPPORTED;
2096 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2095 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2097 return 0; 2096 return 0;
2098 } 2097 }
2099 2098
2100 if (conn->cid == cmd->logout_cid) { 2099 if (conn->cid == cmd->logout_cid) {
2101 pr_err("Received Logout Request REMOVECONNFORRECOVERY" 2100 pr_err("Received Logout Request REMOVECONNFORRECOVERY"
2102 " with CID: %hu on CID: %hu, implementation error.\n", 2101 " with CID: %hu on CID: %hu, implementation error.\n",
2103 cmd->logout_cid, conn->cid); 2102 cmd->logout_cid, conn->cid);
2104 cmd->logout_response = ISCSI_LOGOUT_CLEANUP_FAILED; 2103 cmd->logout_response = ISCSI_LOGOUT_CLEANUP_FAILED;
2105 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2104 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2106 return 0; 2105 return 0;
2107 } 2106 }
2108 2107
2109 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2108 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
2110 2109
2111 return 0; 2110 return 0;
2112 } 2111 }
2113 2112
2114 static int iscsit_handle_logout_cmd( 2113 static int iscsit_handle_logout_cmd(
2115 struct iscsi_conn *conn, 2114 struct iscsi_conn *conn,
2116 unsigned char *buf) 2115 unsigned char *buf)
2117 { 2116 {
2118 int cmdsn_ret, logout_remove = 0; 2117 int cmdsn_ret, logout_remove = 0;
2119 u8 reason_code = 0; 2118 u8 reason_code = 0;
2120 struct iscsi_cmd *cmd; 2119 struct iscsi_cmd *cmd;
2121 struct iscsi_logout *hdr; 2120 struct iscsi_logout *hdr;
2122 struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn); 2121 struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn);
2123 2122
2124 hdr = (struct iscsi_logout *) buf; 2123 hdr = (struct iscsi_logout *) buf;
2125 reason_code = (hdr->flags & 0x7f); 2124 reason_code = (hdr->flags & 0x7f);
2126 hdr->itt = be32_to_cpu(hdr->itt); 2125 hdr->itt = be32_to_cpu(hdr->itt);
2127 hdr->cid = be16_to_cpu(hdr->cid); 2126 hdr->cid = be16_to_cpu(hdr->cid);
2128 hdr->cmdsn = be32_to_cpu(hdr->cmdsn); 2127 hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
2129 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 2128 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
2130 2129
2131 if (tiqn) { 2130 if (tiqn) {
2132 spin_lock(&tiqn->logout_stats.lock); 2131 spin_lock(&tiqn->logout_stats.lock);
2133 if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) 2132 if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION)
2134 tiqn->logout_stats.normal_logouts++; 2133 tiqn->logout_stats.normal_logouts++;
2135 else 2134 else
2136 tiqn->logout_stats.abnormal_logouts++; 2135 tiqn->logout_stats.abnormal_logouts++;
2137 spin_unlock(&tiqn->logout_stats.lock); 2136 spin_unlock(&tiqn->logout_stats.lock);
2138 } 2137 }
2139 2138
2140 pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x" 2139 pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x"
2141 " ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n", 2140 " ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n",
2142 hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code, 2141 hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code,
2143 hdr->cid, conn->cid); 2142 hdr->cid, conn->cid);
2144 2143
2145 if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) { 2144 if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
2146 pr_err("Received logout request on connection that" 2145 pr_err("Received logout request on connection that"
2147 " is not in logged in state, ignoring request.\n"); 2146 " is not in logged in state, ignoring request.\n");
2148 return 0; 2147 return 0;
2149 } 2148 }
2150 2149
2151 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 2150 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
2152 if (!cmd) 2151 if (!cmd)
2153 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1, 2152 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
2154 buf, conn); 2153 buf, conn);
2155 2154
2156 cmd->iscsi_opcode = ISCSI_OP_LOGOUT; 2155 cmd->iscsi_opcode = ISCSI_OP_LOGOUT;
2157 cmd->i_state = ISTATE_SEND_LOGOUTRSP; 2156 cmd->i_state = ISTATE_SEND_LOGOUTRSP;
2158 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 2157 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
2159 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 2158 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
2160 cmd->targ_xfer_tag = 0xFFFFFFFF; 2159 cmd->targ_xfer_tag = 0xFFFFFFFF;
2161 cmd->cmd_sn = hdr->cmdsn; 2160 cmd->cmd_sn = hdr->cmdsn;
2162 cmd->exp_stat_sn = hdr->exp_statsn; 2161 cmd->exp_stat_sn = hdr->exp_statsn;
2163 cmd->logout_cid = hdr->cid; 2162 cmd->logout_cid = hdr->cid;
2164 cmd->logout_reason = reason_code; 2163 cmd->logout_reason = reason_code;
2165 cmd->data_direction = DMA_NONE; 2164 cmd->data_direction = DMA_NONE;
2166 2165
2167 /* 2166 /*
2168 * We need to sleep in these cases (by returning 1) until the Logout 2167 * We need to sleep in these cases (by returning 1) until the Logout
2169 * Response gets sent in the tx thread. 2168 * Response gets sent in the tx thread.
2170 */ 2169 */
2171 if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) || 2170 if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) ||
2172 ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) && 2171 ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) &&
2173 (hdr->cid == conn->cid))) 2172 (hdr->cid == conn->cid)))
2174 logout_remove = 1; 2173 logout_remove = 1;
2175 2174
2176 spin_lock_bh(&conn->cmd_lock); 2175 spin_lock_bh(&conn->cmd_lock);
2177 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 2176 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
2178 spin_unlock_bh(&conn->cmd_lock); 2177 spin_unlock_bh(&conn->cmd_lock);
2179 2178
2180 if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY) 2179 if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY)
2181 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 2180 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
2182 2181
2183 /* 2182 /*
2184 * Immediate commands are executed, well, immediately. 2183 * Immediate commands are executed, well, immediately.
2185 * Non-Immediate Logout Commands are executed in CmdSN order. 2184 * Non-Immediate Logout Commands are executed in CmdSN order.
2186 */ 2185 */
2187 if (hdr->opcode & ISCSI_OP_IMMEDIATE) { 2186 if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
2188 int ret = iscsit_execute_cmd(cmd, 0); 2187 int ret = iscsit_execute_cmd(cmd, 0);
2189 2188
2190 if (ret < 0) 2189 if (ret < 0)
2191 return ret; 2190 return ret;
2192 } else { 2191 } else {
2193 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 2192 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
2194 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { 2193 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
2195 logout_remove = 0; 2194 logout_remove = 0;
2196 } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) { 2195 } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
2197 return iscsit_add_reject_from_cmd( 2196 return iscsit_add_reject_from_cmd(
2198 ISCSI_REASON_PROTOCOL_ERROR, 2197 ISCSI_REASON_PROTOCOL_ERROR,
2199 1, 0, buf, cmd); 2198 1, 0, buf, cmd);
2200 } 2199 }
2201 } 2200 }
2202 2201
2203 return logout_remove; 2202 return logout_remove;
2204 } 2203 }
2205 2204
2206 static int iscsit_handle_snack( 2205 static int iscsit_handle_snack(
2207 struct iscsi_conn *conn, 2206 struct iscsi_conn *conn,
2208 unsigned char *buf) 2207 unsigned char *buf)
2209 { 2208 {
2210 u32 unpacked_lun; 2209 u32 unpacked_lun;
2211 u64 lun; 2210 u64 lun;
2212 struct iscsi_snack *hdr; 2211 struct iscsi_snack *hdr;
2213 2212
2214 hdr = (struct iscsi_snack *) buf; 2213 hdr = (struct iscsi_snack *) buf;
2215 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; 2214 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
2216 lun = get_unaligned_le64(&hdr->lun); 2215 lun = get_unaligned_le64(&hdr->lun);
2217 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 2216 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
2218 hdr->itt = be32_to_cpu(hdr->itt); 2217 hdr->itt = be32_to_cpu(hdr->itt);
2219 hdr->ttt = be32_to_cpu(hdr->ttt); 2218 hdr->ttt = be32_to_cpu(hdr->ttt);
2220 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 2219 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
2221 hdr->begrun = be32_to_cpu(hdr->begrun); 2220 hdr->begrun = be32_to_cpu(hdr->begrun);
2222 hdr->runlength = be32_to_cpu(hdr->runlength); 2221 hdr->runlength = be32_to_cpu(hdr->runlength);
2223 2222
2224 pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:" 2223 pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:"
2225 " 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x," 2224 " 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x,"
2226 " CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags, 2225 " CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags,
2227 hdr->begrun, hdr->runlength, conn->cid); 2226 hdr->begrun, hdr->runlength, conn->cid);
2228 2227
2229 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 2228 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2230 pr_err("Initiator sent SNACK request while in" 2229 pr_err("Initiator sent SNACK request while in"
2231 " ErrorRecoveryLevel=0.\n"); 2230 " ErrorRecoveryLevel=0.\n");
2232 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 2231 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
2233 buf, conn); 2232 buf, conn);
2234 } 2233 }
2235 /* 2234 /*
2236 * SNACK_DATA and SNACK_R2T are both 0, so check which function to 2235 * SNACK_DATA and SNACK_R2T are both 0, so check which function to
2237 * call from inside iscsi_send_recovery_datain_or_r2t(). 2236 * call from inside iscsi_send_recovery_datain_or_r2t().
2238 */ 2237 */
2239 switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) { 2238 switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) {
2240 case 0: 2239 case 0:
2241 return iscsit_handle_recovery_datain_or_r2t(conn, buf, 2240 return iscsit_handle_recovery_datain_or_r2t(conn, buf,
2242 hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength); 2241 hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength);
2243 case ISCSI_FLAG_SNACK_TYPE_STATUS: 2242 case ISCSI_FLAG_SNACK_TYPE_STATUS:
2244 return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt, 2243 return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt,
2245 hdr->begrun, hdr->runlength); 2244 hdr->begrun, hdr->runlength);
2246 case ISCSI_FLAG_SNACK_TYPE_DATA_ACK: 2245 case ISCSI_FLAG_SNACK_TYPE_DATA_ACK:
2247 return iscsit_handle_data_ack(conn, hdr->ttt, hdr->begrun, 2246 return iscsit_handle_data_ack(conn, hdr->ttt, hdr->begrun,
2248 hdr->runlength); 2247 hdr->runlength);
2249 case ISCSI_FLAG_SNACK_TYPE_RDATA: 2248 case ISCSI_FLAG_SNACK_TYPE_RDATA:
2250 /* FIXME: Support R-Data SNACK */ 2249 /* FIXME: Support R-Data SNACK */
2251 pr_err("R-Data SNACK Not Supported.\n"); 2250 pr_err("R-Data SNACK Not Supported.\n");
2252 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 2251 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
2253 buf, conn); 2252 buf, conn);
2254 default: 2253 default:
2255 pr_err("Unknown SNACK type 0x%02x, protocol" 2254 pr_err("Unknown SNACK type 0x%02x, protocol"
2256 " error.\n", hdr->flags & 0x0f); 2255 " error.\n", hdr->flags & 0x0f);
2257 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 2256 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
2258 buf, conn); 2257 buf, conn);
2259 } 2258 }
2260 2259
2261 return 0; 2260 return 0;
2262 } 2261 }
2263 2262
2264 static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn) 2263 static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn)
2265 { 2264 {
2266 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) || 2265 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
2267 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) { 2266 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
2268 wait_for_completion_interruptible_timeout( 2267 wait_for_completion_interruptible_timeout(
2269 &conn->rx_half_close_comp, 2268 &conn->rx_half_close_comp,
2270 ISCSI_RX_THREAD_TCP_TIMEOUT * HZ); 2269 ISCSI_RX_THREAD_TCP_TIMEOUT * HZ);
2271 } 2270 }
2272 } 2271 }
2273 2272
2274 static int iscsit_handle_immediate_data( 2273 static int iscsit_handle_immediate_data(
2275 struct iscsi_cmd *cmd, 2274 struct iscsi_cmd *cmd,
2276 unsigned char *buf, 2275 unsigned char *buf,
2277 u32 length) 2276 u32 length)
2278 { 2277 {
2279 int iov_ret, rx_got = 0, rx_size = 0; 2278 int iov_ret, rx_got = 0, rx_size = 0;
2280 u32 checksum, iov_count = 0, padding = 0; 2279 u32 checksum, iov_count = 0, padding = 0;
2281 struct iscsi_conn *conn = cmd->conn; 2280 struct iscsi_conn *conn = cmd->conn;
2282 struct kvec *iov; 2281 struct kvec *iov;
2283 2282
2284 iov_ret = iscsit_map_iovec(cmd, cmd->iov_data, cmd->write_data_done, length); 2283 iov_ret = iscsit_map_iovec(cmd, cmd->iov_data, cmd->write_data_done, length);
2285 if (iov_ret < 0) 2284 if (iov_ret < 0)
2286 return IMMEDIATE_DATA_CANNOT_RECOVER; 2285 return IMMEDIATE_DATA_CANNOT_RECOVER;
2287 2286
2288 rx_size = length; 2287 rx_size = length;
2289 iov_count = iov_ret; 2288 iov_count = iov_ret;
2290 iov = &cmd->iov_data[0]; 2289 iov = &cmd->iov_data[0];
2291 2290
2292 padding = ((-length) & 3); 2291 padding = ((-length) & 3);
2293 if (padding != 0) { 2292 if (padding != 0) {
2294 iov[iov_count].iov_base = cmd->pad_bytes; 2293 iov[iov_count].iov_base = cmd->pad_bytes;
2295 iov[iov_count++].iov_len = padding; 2294 iov[iov_count++].iov_len = padding;
2296 rx_size += padding; 2295 rx_size += padding;
2297 } 2296 }
2298 2297
2299 if (conn->conn_ops->DataDigest) { 2298 if (conn->conn_ops->DataDigest) {
2300 iov[iov_count].iov_base = &checksum; 2299 iov[iov_count].iov_base = &checksum;
2301 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 2300 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
2302 rx_size += ISCSI_CRC_LEN; 2301 rx_size += ISCSI_CRC_LEN;
2303 } 2302 }
2304 2303
2305 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size); 2304 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
2306 2305
2307 iscsit_unmap_iovec(cmd); 2306 iscsit_unmap_iovec(cmd);
2308 2307
2309 if (rx_got != rx_size) { 2308 if (rx_got != rx_size) {
2310 iscsit_rx_thread_wait_for_tcp(conn); 2309 iscsit_rx_thread_wait_for_tcp(conn);
2311 return IMMEDIATE_DATA_CANNOT_RECOVER; 2310 return IMMEDIATE_DATA_CANNOT_RECOVER;
2312 } 2311 }
2313 2312
2314 if (conn->conn_ops->DataDigest) { 2313 if (conn->conn_ops->DataDigest) {
2315 u32 data_crc; 2314 u32 data_crc;
2316 2315
2317 data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd, 2316 data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
2318 cmd->write_data_done, length, padding, 2317 cmd->write_data_done, length, padding,
2319 cmd->pad_bytes); 2318 cmd->pad_bytes);
2320 2319
2321 if (checksum != data_crc) { 2320 if (checksum != data_crc) {
2322 pr_err("ImmediateData CRC32C DataDigest 0x%08x" 2321 pr_err("ImmediateData CRC32C DataDigest 0x%08x"
2323 " does not match computed 0x%08x\n", checksum, 2322 " does not match computed 0x%08x\n", checksum,
2324 data_crc); 2323 data_crc);
2325 2324
2326 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 2325 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
2327 pr_err("Unable to recover from" 2326 pr_err("Unable to recover from"
2328 " Immediate Data digest failure while" 2327 " Immediate Data digest failure while"
2329 " in ERL=0.\n"); 2328 " in ERL=0.\n");
2330 iscsit_add_reject_from_cmd( 2329 iscsit_add_reject_from_cmd(
2331 ISCSI_REASON_DATA_DIGEST_ERROR, 2330 ISCSI_REASON_DATA_DIGEST_ERROR,
2332 1, 0, buf, cmd); 2331 1, 0, buf, cmd);
2333 return IMMEDIATE_DATA_CANNOT_RECOVER; 2332 return IMMEDIATE_DATA_CANNOT_RECOVER;
2334 } else { 2333 } else {
2335 iscsit_add_reject_from_cmd( 2334 iscsit_add_reject_from_cmd(
2336 ISCSI_REASON_DATA_DIGEST_ERROR, 2335 ISCSI_REASON_DATA_DIGEST_ERROR,
2337 0, 0, buf, cmd); 2336 0, 0, buf, cmd);
2338 return IMMEDIATE_DATA_ERL1_CRC_FAILURE; 2337 return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
2339 } 2338 }
2340 } else { 2339 } else {
2341 pr_debug("Got CRC32C DataDigest 0x%08x for" 2340 pr_debug("Got CRC32C DataDigest 0x%08x for"
2342 " %u bytes of Immediate Data\n", checksum, 2341 " %u bytes of Immediate Data\n", checksum,
2343 length); 2342 length);
2344 } 2343 }
2345 } 2344 }
2346 2345
2347 cmd->write_data_done += length; 2346 cmd->write_data_done += length;
2348 2347
2349 if (cmd->write_data_done == cmd->data_length) { 2348 if (cmd->write_data_done == cmd->data_length) {
2350 spin_lock_bh(&cmd->istate_lock); 2349 spin_lock_bh(&cmd->istate_lock);
2351 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 2350 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
2352 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 2351 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
2353 spin_unlock_bh(&cmd->istate_lock); 2352 spin_unlock_bh(&cmd->istate_lock);
2354 } 2353 }
2355 2354
2356 return IMMEDIATE_DATA_NORMAL_OPERATION; 2355 return IMMEDIATE_DATA_NORMAL_OPERATION;
2357 } 2356 }
2358 2357
2359 /* 2358 /*
2360 * Called with sess->conn_lock held. 2359 * Called with sess->conn_lock held.
2361 */ 2360 */
2362 /* #warning iscsi_build_conn_drop_async_message() only sends out on connections 2361 /* #warning iscsi_build_conn_drop_async_message() only sends out on connections
2363 with active network interface */ 2362 with active network interface */
2364 static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn) 2363 static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
2365 { 2364 {
2366 struct iscsi_cmd *cmd; 2365 struct iscsi_cmd *cmd;
2367 struct iscsi_conn *conn_p; 2366 struct iscsi_conn *conn_p;
2368 2367
2369 /* 2368 /*
2370 * Only send a Asynchronous Message on connections whos network 2369 * Only send a Asynchronous Message on connections whos network
2371 * interface is still functional. 2370 * interface is still functional.
2372 */ 2371 */
2373 list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) { 2372 list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
2374 if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) { 2373 if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
2375 iscsit_inc_conn_usage_count(conn_p); 2374 iscsit_inc_conn_usage_count(conn_p);
2376 break; 2375 break;
2377 } 2376 }
2378 } 2377 }
2379 2378
2380 if (!conn_p) 2379 if (!conn_p)
2381 return; 2380 return;
2382 2381
2383 cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL); 2382 cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL);
2384 if (!cmd) { 2383 if (!cmd) {
2385 iscsit_dec_conn_usage_count(conn_p); 2384 iscsit_dec_conn_usage_count(conn_p);
2386 return; 2385 return;
2387 } 2386 }
2388 2387
2389 cmd->logout_cid = conn->cid; 2388 cmd->logout_cid = conn->cid;
2390 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT; 2389 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
2391 cmd->i_state = ISTATE_SEND_ASYNCMSG; 2390 cmd->i_state = ISTATE_SEND_ASYNCMSG;
2392 2391
2393 spin_lock_bh(&conn_p->cmd_lock); 2392 spin_lock_bh(&conn_p->cmd_lock);
2394 list_add_tail(&cmd->i_list, &conn_p->conn_cmd_list); 2393 list_add_tail(&cmd->i_list, &conn_p->conn_cmd_list);
2395 spin_unlock_bh(&conn_p->cmd_lock); 2394 spin_unlock_bh(&conn_p->cmd_lock);
2396 2395
2397 iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state); 2396 iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state);
2398 iscsit_dec_conn_usage_count(conn_p); 2397 iscsit_dec_conn_usage_count(conn_p);
2399 } 2398 }
2400 2399
2401 static int iscsit_send_conn_drop_async_message( 2400 static int iscsit_send_conn_drop_async_message(
2402 struct iscsi_cmd *cmd, 2401 struct iscsi_cmd *cmd,
2403 struct iscsi_conn *conn) 2402 struct iscsi_conn *conn)
2404 { 2403 {
2405 struct iscsi_async *hdr; 2404 struct iscsi_async *hdr;
2406 2405
2407 cmd->tx_size = ISCSI_HDR_LEN; 2406 cmd->tx_size = ISCSI_HDR_LEN;
2408 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT; 2407 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
2409 2408
2410 hdr = (struct iscsi_async *) cmd->pdu; 2409 hdr = (struct iscsi_async *) cmd->pdu;
2411 hdr->opcode = ISCSI_OP_ASYNC_EVENT; 2410 hdr->opcode = ISCSI_OP_ASYNC_EVENT;
2412 hdr->flags = ISCSI_FLAG_CMD_FINAL; 2411 hdr->flags = ISCSI_FLAG_CMD_FINAL;
2413 cmd->init_task_tag = 0xFFFFFFFF; 2412 cmd->init_task_tag = 0xFFFFFFFF;
2414 cmd->targ_xfer_tag = 0xFFFFFFFF; 2413 cmd->targ_xfer_tag = 0xFFFFFFFF;
2415 put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]); 2414 put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]);
2416 cmd->stat_sn = conn->stat_sn++; 2415 cmd->stat_sn = conn->stat_sn++;
2417 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2416 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2418 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2417 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2419 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2418 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2420 hdr->async_event = ISCSI_ASYNC_MSG_DROPPING_CONNECTION; 2419 hdr->async_event = ISCSI_ASYNC_MSG_DROPPING_CONNECTION;
2421 hdr->param1 = cpu_to_be16(cmd->logout_cid); 2420 hdr->param1 = cpu_to_be16(cmd->logout_cid);
2422 hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait); 2421 hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
2423 hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain); 2422 hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain);
2424 2423
2425 if (conn->conn_ops->HeaderDigest) { 2424 if (conn->conn_ops->HeaderDigest) {
2426 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2425 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2427 2426
2428 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2427 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2429 (unsigned char *)hdr, ISCSI_HDR_LEN, 2428 (unsigned char *)hdr, ISCSI_HDR_LEN,
2430 0, NULL, (u8 *)header_digest); 2429 0, NULL, (u8 *)header_digest);
2431 2430
2432 cmd->tx_size += ISCSI_CRC_LEN; 2431 cmd->tx_size += ISCSI_CRC_LEN;
2433 pr_debug("Attaching CRC32C HeaderDigest to" 2432 pr_debug("Attaching CRC32C HeaderDigest to"
2434 " Async Message 0x%08x\n", *header_digest); 2433 " Async Message 0x%08x\n", *header_digest);
2435 } 2434 }
2436 2435
2437 cmd->iov_misc[0].iov_base = cmd->pdu; 2436 cmd->iov_misc[0].iov_base = cmd->pdu;
2438 cmd->iov_misc[0].iov_len = cmd->tx_size; 2437 cmd->iov_misc[0].iov_len = cmd->tx_size;
2439 cmd->iov_misc_count = 1; 2438 cmd->iov_misc_count = 1;
2440 2439
2441 pr_debug("Sending Connection Dropped Async Message StatSN:" 2440 pr_debug("Sending Connection Dropped Async Message StatSN:"
2442 " 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn, 2441 " 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn,
2443 cmd->logout_cid, conn->cid); 2442 cmd->logout_cid, conn->cid);
2444 return 0; 2443 return 0;
2445 } 2444 }
2446 2445
2447 static int iscsit_send_data_in( 2446 static int iscsit_send_data_in(
2448 struct iscsi_cmd *cmd, 2447 struct iscsi_cmd *cmd,
2449 struct iscsi_conn *conn, 2448 struct iscsi_conn *conn,
2450 int *eodr) 2449 int *eodr)
2451 { 2450 {
2452 int iov_ret = 0, set_statsn = 0; 2451 int iov_ret = 0, set_statsn = 0;
2453 u32 iov_count = 0, tx_size = 0; 2452 u32 iov_count = 0, tx_size = 0;
2454 struct iscsi_datain datain; 2453 struct iscsi_datain datain;
2455 struct iscsi_datain_req *dr; 2454 struct iscsi_datain_req *dr;
2456 struct iscsi_data_rsp *hdr; 2455 struct iscsi_data_rsp *hdr;
2457 struct kvec *iov; 2456 struct kvec *iov;
2458 2457
2459 memset(&datain, 0, sizeof(struct iscsi_datain)); 2458 memset(&datain, 0, sizeof(struct iscsi_datain));
2460 dr = iscsit_get_datain_values(cmd, &datain); 2459 dr = iscsit_get_datain_values(cmd, &datain);
2461 if (!dr) { 2460 if (!dr) {
2462 pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n", 2461 pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n",
2463 cmd->init_task_tag); 2462 cmd->init_task_tag);
2464 return -1; 2463 return -1;
2465 } 2464 }
2466 2465
2467 /* 2466 /*
2468 * Be paranoid and double check the logic for now. 2467 * Be paranoid and double check the logic for now.
2469 */ 2468 */
2470 if ((datain.offset + datain.length) > cmd->data_length) { 2469 if ((datain.offset + datain.length) > cmd->data_length) {
2471 pr_err("Command ITT: 0x%08x, datain.offset: %u and" 2470 pr_err("Command ITT: 0x%08x, datain.offset: %u and"
2472 " datain.length: %u exceeds cmd->data_length: %u\n", 2471 " datain.length: %u exceeds cmd->data_length: %u\n",
2473 cmd->init_task_tag, datain.offset, datain.length, 2472 cmd->init_task_tag, datain.offset, datain.length,
2474 cmd->data_length); 2473 cmd->data_length);
2475 return -1; 2474 return -1;
2476 } 2475 }
2477 2476
2478 spin_lock_bh(&conn->sess->session_stats_lock); 2477 spin_lock_bh(&conn->sess->session_stats_lock);
2479 conn->sess->tx_data_octets += datain.length; 2478 conn->sess->tx_data_octets += datain.length;
2480 if (conn->sess->se_sess->se_node_acl) { 2479 if (conn->sess->se_sess->se_node_acl) {
2481 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); 2480 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
2482 conn->sess->se_sess->se_node_acl->read_bytes += datain.length; 2481 conn->sess->se_sess->se_node_acl->read_bytes += datain.length;
2483 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); 2482 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
2484 } 2483 }
2485 spin_unlock_bh(&conn->sess->session_stats_lock); 2484 spin_unlock_bh(&conn->sess->session_stats_lock);
2486 /* 2485 /*
2487 * Special case for successfully execution w/ both DATAIN 2486 * Special case for successfully execution w/ both DATAIN
2488 * and Sense Data. 2487 * and Sense Data.
2489 */ 2488 */
2490 if ((datain.flags & ISCSI_FLAG_DATA_STATUS) && 2489 if ((datain.flags & ISCSI_FLAG_DATA_STATUS) &&
2491 (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)) 2490 (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
2492 datain.flags &= ~ISCSI_FLAG_DATA_STATUS; 2491 datain.flags &= ~ISCSI_FLAG_DATA_STATUS;
2493 else { 2492 else {
2494 if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) || 2493 if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) ||
2495 (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) { 2494 (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) {
2496 iscsit_increment_maxcmdsn(cmd, conn->sess); 2495 iscsit_increment_maxcmdsn(cmd, conn->sess);
2497 cmd->stat_sn = conn->stat_sn++; 2496 cmd->stat_sn = conn->stat_sn++;
2498 set_statsn = 1; 2497 set_statsn = 1;
2499 } else if (dr->dr_complete == 2498 } else if (dr->dr_complete ==
2500 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY) 2499 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY)
2501 set_statsn = 1; 2500 set_statsn = 1;
2502 } 2501 }
2503 2502
2504 hdr = (struct iscsi_data_rsp *) cmd->pdu; 2503 hdr = (struct iscsi_data_rsp *) cmd->pdu;
2505 memset(hdr, 0, ISCSI_HDR_LEN); 2504 memset(hdr, 0, ISCSI_HDR_LEN);
2506 hdr->opcode = ISCSI_OP_SCSI_DATA_IN; 2505 hdr->opcode = ISCSI_OP_SCSI_DATA_IN;
2507 hdr->flags = datain.flags; 2506 hdr->flags = datain.flags;
2508 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { 2507 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
2509 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { 2508 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
2510 hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW; 2509 hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
2511 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 2510 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2512 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { 2511 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
2513 hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW; 2512 hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
2514 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 2513 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2515 } 2514 }
2516 } 2515 }
2517 hton24(hdr->dlength, datain.length); 2516 hton24(hdr->dlength, datain.length);
2518 if (hdr->flags & ISCSI_FLAG_DATA_ACK) 2517 if (hdr->flags & ISCSI_FLAG_DATA_ACK)
2519 int_to_scsilun(cmd->se_cmd.orig_fe_lun, 2518 int_to_scsilun(cmd->se_cmd.orig_fe_lun,
2520 (struct scsi_lun *)&hdr->lun); 2519 (struct scsi_lun *)&hdr->lun);
2521 else 2520 else
2522 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); 2521 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
2523 2522
2524 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2523 hdr->itt = cpu_to_be32(cmd->init_task_tag);
2525 hdr->ttt = (hdr->flags & ISCSI_FLAG_DATA_ACK) ? 2524 hdr->ttt = (hdr->flags & ISCSI_FLAG_DATA_ACK) ?
2526 cpu_to_be32(cmd->targ_xfer_tag) : 2525 cpu_to_be32(cmd->targ_xfer_tag) :
2527 0xFFFFFFFF; 2526 0xFFFFFFFF;
2528 hdr->statsn = (set_statsn) ? cpu_to_be32(cmd->stat_sn) : 2527 hdr->statsn = (set_statsn) ? cpu_to_be32(cmd->stat_sn) :
2529 0xFFFFFFFF; 2528 0xFFFFFFFF;
2530 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2529 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2531 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2530 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2532 hdr->datasn = cpu_to_be32(datain.data_sn); 2531 hdr->datasn = cpu_to_be32(datain.data_sn);
2533 hdr->offset = cpu_to_be32(datain.offset); 2532 hdr->offset = cpu_to_be32(datain.offset);
2534 2533
2535 iov = &cmd->iov_data[0]; 2534 iov = &cmd->iov_data[0];
2536 iov[iov_count].iov_base = cmd->pdu; 2535 iov[iov_count].iov_base = cmd->pdu;
2537 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 2536 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
2538 tx_size += ISCSI_HDR_LEN; 2537 tx_size += ISCSI_HDR_LEN;
2539 2538
2540 if (conn->conn_ops->HeaderDigest) { 2539 if (conn->conn_ops->HeaderDigest) {
2541 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2540 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2542 2541
2543 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2542 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2544 (unsigned char *)hdr, ISCSI_HDR_LEN, 2543 (unsigned char *)hdr, ISCSI_HDR_LEN,
2545 0, NULL, (u8 *)header_digest); 2544 0, NULL, (u8 *)header_digest);
2546 2545
2547 iov[0].iov_len += ISCSI_CRC_LEN; 2546 iov[0].iov_len += ISCSI_CRC_LEN;
2548 tx_size += ISCSI_CRC_LEN; 2547 tx_size += ISCSI_CRC_LEN;
2549 2548
2550 pr_debug("Attaching CRC32 HeaderDigest" 2549 pr_debug("Attaching CRC32 HeaderDigest"
2551 " for DataIN PDU 0x%08x\n", *header_digest); 2550 " for DataIN PDU 0x%08x\n", *header_digest);
2552 } 2551 }
2553 2552
2554 iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], datain.offset, datain.length); 2553 iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], datain.offset, datain.length);
2555 if (iov_ret < 0) 2554 if (iov_ret < 0)
2556 return -1; 2555 return -1;
2557 2556
2558 iov_count += iov_ret; 2557 iov_count += iov_ret;
2559 tx_size += datain.length; 2558 tx_size += datain.length;
2560 2559
2561 cmd->padding = ((-datain.length) & 3); 2560 cmd->padding = ((-datain.length) & 3);
2562 if (cmd->padding) { 2561 if (cmd->padding) {
2563 iov[iov_count].iov_base = cmd->pad_bytes; 2562 iov[iov_count].iov_base = cmd->pad_bytes;
2564 iov[iov_count++].iov_len = cmd->padding; 2563 iov[iov_count++].iov_len = cmd->padding;
2565 tx_size += cmd->padding; 2564 tx_size += cmd->padding;
2566 2565
2567 pr_debug("Attaching %u padding bytes\n", 2566 pr_debug("Attaching %u padding bytes\n",
2568 cmd->padding); 2567 cmd->padding);
2569 } 2568 }
2570 if (conn->conn_ops->DataDigest) { 2569 if (conn->conn_ops->DataDigest) {
2571 cmd->data_crc = iscsit_do_crypto_hash_sg(&conn->conn_tx_hash, cmd, 2570 cmd->data_crc = iscsit_do_crypto_hash_sg(&conn->conn_tx_hash, cmd,
2572 datain.offset, datain.length, cmd->padding, cmd->pad_bytes); 2571 datain.offset, datain.length, cmd->padding, cmd->pad_bytes);
2573 2572
2574 iov[iov_count].iov_base = &cmd->data_crc; 2573 iov[iov_count].iov_base = &cmd->data_crc;
2575 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 2574 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
2576 tx_size += ISCSI_CRC_LEN; 2575 tx_size += ISCSI_CRC_LEN;
2577 2576
2578 pr_debug("Attached CRC32C DataDigest %d bytes, crc" 2577 pr_debug("Attached CRC32C DataDigest %d bytes, crc"
2579 " 0x%08x\n", datain.length+cmd->padding, cmd->data_crc); 2578 " 0x%08x\n", datain.length+cmd->padding, cmd->data_crc);
2580 } 2579 }
2581 2580
2582 cmd->iov_data_count = iov_count; 2581 cmd->iov_data_count = iov_count;
2583 cmd->tx_size = tx_size; 2582 cmd->tx_size = tx_size;
2584 2583
2585 pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x," 2584 pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x,"
2586 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", 2585 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
2587 cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn), 2586 cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
2588 ntohl(hdr->offset), datain.length, conn->cid); 2587 ntohl(hdr->offset), datain.length, conn->cid);
2589 2588
2590 if (dr->dr_complete) { 2589 if (dr->dr_complete) {
2591 *eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ? 2590 *eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
2592 2 : 1; 2591 2 : 1;
2593 iscsit_free_datain_req(cmd, dr); 2592 iscsit_free_datain_req(cmd, dr);
2594 } 2593 }
2595 2594
2596 return 0; 2595 return 0;
2597 } 2596 }
2598 2597
2599 static int iscsit_send_logout_response( 2598 static int iscsit_send_logout_response(
2600 struct iscsi_cmd *cmd, 2599 struct iscsi_cmd *cmd,
2601 struct iscsi_conn *conn) 2600 struct iscsi_conn *conn)
2602 { 2601 {
2603 int niov = 0, tx_size; 2602 int niov = 0, tx_size;
2604 struct iscsi_conn *logout_conn = NULL; 2603 struct iscsi_conn *logout_conn = NULL;
2605 struct iscsi_conn_recovery *cr = NULL; 2604 struct iscsi_conn_recovery *cr = NULL;
2606 struct iscsi_session *sess = conn->sess; 2605 struct iscsi_session *sess = conn->sess;
2607 struct kvec *iov; 2606 struct kvec *iov;
2608 struct iscsi_logout_rsp *hdr; 2607 struct iscsi_logout_rsp *hdr;
2609 /* 2608 /*
2610 * The actual shutting down of Sessions and/or Connections 2609 * The actual shutting down of Sessions and/or Connections
2611 * for CLOSESESSION and CLOSECONNECTION Logout Requests 2610 * for CLOSESESSION and CLOSECONNECTION Logout Requests
2612 * is done in scsi_logout_post_handler(). 2611 * is done in scsi_logout_post_handler().
2613 */ 2612 */
2614 switch (cmd->logout_reason) { 2613 switch (cmd->logout_reason) {
2615 case ISCSI_LOGOUT_REASON_CLOSE_SESSION: 2614 case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
2616 pr_debug("iSCSI session logout successful, setting" 2615 pr_debug("iSCSI session logout successful, setting"
2617 " logout response to ISCSI_LOGOUT_SUCCESS.\n"); 2616 " logout response to ISCSI_LOGOUT_SUCCESS.\n");
2618 cmd->logout_response = ISCSI_LOGOUT_SUCCESS; 2617 cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2619 break; 2618 break;
2620 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION: 2619 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
2621 if (cmd->logout_response == ISCSI_LOGOUT_CID_NOT_FOUND) 2620 if (cmd->logout_response == ISCSI_LOGOUT_CID_NOT_FOUND)
2622 break; 2621 break;
2623 /* 2622 /*
2624 * For CLOSECONNECTION logout requests carrying 2623 * For CLOSECONNECTION logout requests carrying
2625 * a matching logout CID -> local CID, the reference 2624 * a matching logout CID -> local CID, the reference
2626 * for the local CID will have been incremented in 2625 * for the local CID will have been incremented in
2627 * iscsi_logout_closeconnection(). 2626 * iscsi_logout_closeconnection().
2628 * 2627 *
2629 * For CLOSECONNECTION logout requests carrying 2628 * For CLOSECONNECTION logout requests carrying
2630 * a different CID than the connection it arrived 2629 * a different CID than the connection it arrived
2631 * on, the connection responding to cmd->logout_cid 2630 * on, the connection responding to cmd->logout_cid
2632 * is stopped in iscsit_logout_post_handler_diffcid(). 2631 * is stopped in iscsit_logout_post_handler_diffcid().
2633 */ 2632 */
2634 2633
2635 pr_debug("iSCSI CID: %hu logout on CID: %hu" 2634 pr_debug("iSCSI CID: %hu logout on CID: %hu"
2636 " successful.\n", cmd->logout_cid, conn->cid); 2635 " successful.\n", cmd->logout_cid, conn->cid);
2637 cmd->logout_response = ISCSI_LOGOUT_SUCCESS; 2636 cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2638 break; 2637 break;
2639 case ISCSI_LOGOUT_REASON_RECOVERY: 2638 case ISCSI_LOGOUT_REASON_RECOVERY:
2640 if ((cmd->logout_response == ISCSI_LOGOUT_RECOVERY_UNSUPPORTED) || 2639 if ((cmd->logout_response == ISCSI_LOGOUT_RECOVERY_UNSUPPORTED) ||
2641 (cmd->logout_response == ISCSI_LOGOUT_CLEANUP_FAILED)) 2640 (cmd->logout_response == ISCSI_LOGOUT_CLEANUP_FAILED))
2642 break; 2641 break;
2643 /* 2642 /*
2644 * If the connection is still active from our point of view 2643 * If the connection is still active from our point of view
2645 * force connection recovery to occur. 2644 * force connection recovery to occur.
2646 */ 2645 */
2647 logout_conn = iscsit_get_conn_from_cid_rcfr(sess, 2646 logout_conn = iscsit_get_conn_from_cid_rcfr(sess,
2648 cmd->logout_cid); 2647 cmd->logout_cid);
2649 if ((logout_conn)) { 2648 if ((logout_conn)) {
2650 iscsit_connection_reinstatement_rcfr(logout_conn); 2649 iscsit_connection_reinstatement_rcfr(logout_conn);
2651 iscsit_dec_conn_usage_count(logout_conn); 2650 iscsit_dec_conn_usage_count(logout_conn);
2652 } 2651 }
2653 2652
2654 cr = iscsit_get_inactive_connection_recovery_entry( 2653 cr = iscsit_get_inactive_connection_recovery_entry(
2655 conn->sess, cmd->logout_cid); 2654 conn->sess, cmd->logout_cid);
2656 if (!cr) { 2655 if (!cr) {
2657 pr_err("Unable to locate CID: %hu for" 2656 pr_err("Unable to locate CID: %hu for"
2658 " REMOVECONNFORRECOVERY Logout Request.\n", 2657 " REMOVECONNFORRECOVERY Logout Request.\n",
2659 cmd->logout_cid); 2658 cmd->logout_cid);
2660 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND; 2659 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
2661 break; 2660 break;
2662 } 2661 }
2663 2662
2664 iscsit_discard_cr_cmds_by_expstatsn(cr, cmd->exp_stat_sn); 2663 iscsit_discard_cr_cmds_by_expstatsn(cr, cmd->exp_stat_sn);
2665 2664
2666 pr_debug("iSCSI REMOVECONNFORRECOVERY logout" 2665 pr_debug("iSCSI REMOVECONNFORRECOVERY logout"
2667 " for recovery for CID: %hu on CID: %hu successful.\n", 2666 " for recovery for CID: %hu on CID: %hu successful.\n",
2668 cmd->logout_cid, conn->cid); 2667 cmd->logout_cid, conn->cid);
2669 cmd->logout_response = ISCSI_LOGOUT_SUCCESS; 2668 cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
2670 break; 2669 break;
2671 default: 2670 default:
2672 pr_err("Unknown cmd->logout_reason: 0x%02x\n", 2671 pr_err("Unknown cmd->logout_reason: 0x%02x\n",
2673 cmd->logout_reason); 2672 cmd->logout_reason);
2674 return -1; 2673 return -1;
2675 } 2674 }
2676 2675
2677 tx_size = ISCSI_HDR_LEN; 2676 tx_size = ISCSI_HDR_LEN;
2678 hdr = (struct iscsi_logout_rsp *)cmd->pdu; 2677 hdr = (struct iscsi_logout_rsp *)cmd->pdu;
2679 memset(hdr, 0, ISCSI_HDR_LEN); 2678 memset(hdr, 0, ISCSI_HDR_LEN);
2680 hdr->opcode = ISCSI_OP_LOGOUT_RSP; 2679 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
2681 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2680 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2682 hdr->response = cmd->logout_response; 2681 hdr->response = cmd->logout_response;
2683 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2682 hdr->itt = cpu_to_be32(cmd->init_task_tag);
2684 cmd->stat_sn = conn->stat_sn++; 2683 cmd->stat_sn = conn->stat_sn++;
2685 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2684 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2686 2685
2687 iscsit_increment_maxcmdsn(cmd, conn->sess); 2686 iscsit_increment_maxcmdsn(cmd, conn->sess);
2688 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2687 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2689 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2688 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2690 2689
2691 iov = &cmd->iov_misc[0]; 2690 iov = &cmd->iov_misc[0];
2692 iov[niov].iov_base = cmd->pdu; 2691 iov[niov].iov_base = cmd->pdu;
2693 iov[niov++].iov_len = ISCSI_HDR_LEN; 2692 iov[niov++].iov_len = ISCSI_HDR_LEN;
2694 2693
2695 if (conn->conn_ops->HeaderDigest) { 2694 if (conn->conn_ops->HeaderDigest) {
2696 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2695 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2697 2696
2698 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2697 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2699 (unsigned char *)hdr, ISCSI_HDR_LEN, 2698 (unsigned char *)hdr, ISCSI_HDR_LEN,
2700 0, NULL, (u8 *)header_digest); 2699 0, NULL, (u8 *)header_digest);
2701 2700
2702 iov[0].iov_len += ISCSI_CRC_LEN; 2701 iov[0].iov_len += ISCSI_CRC_LEN;
2703 tx_size += ISCSI_CRC_LEN; 2702 tx_size += ISCSI_CRC_LEN;
2704 pr_debug("Attaching CRC32C HeaderDigest to" 2703 pr_debug("Attaching CRC32C HeaderDigest to"
2705 " Logout Response 0x%08x\n", *header_digest); 2704 " Logout Response 0x%08x\n", *header_digest);
2706 } 2705 }
2707 cmd->iov_misc_count = niov; 2706 cmd->iov_misc_count = niov;
2708 cmd->tx_size = tx_size; 2707 cmd->tx_size = tx_size;
2709 2708
2710 pr_debug("Sending Logout Response ITT: 0x%08x StatSN:" 2709 pr_debug("Sending Logout Response ITT: 0x%08x StatSN:"
2711 " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n", 2710 " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
2712 cmd->init_task_tag, cmd->stat_sn, hdr->response, 2711 cmd->init_task_tag, cmd->stat_sn, hdr->response,
2713 cmd->logout_cid, conn->cid); 2712 cmd->logout_cid, conn->cid);
2714 2713
2715 return 0; 2714 return 0;
2716 } 2715 }
2717 2716
2718 /* 2717 /*
2719 * Unsolicited NOPIN, either requesting a response or not. 2718 * Unsolicited NOPIN, either requesting a response or not.
2720 */ 2719 */
2721 static int iscsit_send_unsolicited_nopin( 2720 static int iscsit_send_unsolicited_nopin(
2722 struct iscsi_cmd *cmd, 2721 struct iscsi_cmd *cmd,
2723 struct iscsi_conn *conn, 2722 struct iscsi_conn *conn,
2724 int want_response) 2723 int want_response)
2725 { 2724 {
2726 int tx_size = ISCSI_HDR_LEN; 2725 int tx_size = ISCSI_HDR_LEN;
2727 struct iscsi_nopin *hdr; 2726 struct iscsi_nopin *hdr;
2728 2727
2729 hdr = (struct iscsi_nopin *) cmd->pdu; 2728 hdr = (struct iscsi_nopin *) cmd->pdu;
2730 memset(hdr, 0, ISCSI_HDR_LEN); 2729 memset(hdr, 0, ISCSI_HDR_LEN);
2731 hdr->opcode = ISCSI_OP_NOOP_IN; 2730 hdr->opcode = ISCSI_OP_NOOP_IN;
2732 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2731 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2733 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2732 hdr->itt = cpu_to_be32(cmd->init_task_tag);
2734 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 2733 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
2735 cmd->stat_sn = conn->stat_sn; 2734 cmd->stat_sn = conn->stat_sn;
2736 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2735 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2737 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2736 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2738 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2737 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2739 2738
2740 if (conn->conn_ops->HeaderDigest) { 2739 if (conn->conn_ops->HeaderDigest) {
2741 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2740 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2742 2741
2743 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2742 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2744 (unsigned char *)hdr, ISCSI_HDR_LEN, 2743 (unsigned char *)hdr, ISCSI_HDR_LEN,
2745 0, NULL, (u8 *)header_digest); 2744 0, NULL, (u8 *)header_digest);
2746 2745
2747 tx_size += ISCSI_CRC_LEN; 2746 tx_size += ISCSI_CRC_LEN;
2748 pr_debug("Attaching CRC32C HeaderDigest to" 2747 pr_debug("Attaching CRC32C HeaderDigest to"
2749 " NopIN 0x%08x\n", *header_digest); 2748 " NopIN 0x%08x\n", *header_digest);
2750 } 2749 }
2751 2750
2752 cmd->iov_misc[0].iov_base = cmd->pdu; 2751 cmd->iov_misc[0].iov_base = cmd->pdu;
2753 cmd->iov_misc[0].iov_len = tx_size; 2752 cmd->iov_misc[0].iov_len = tx_size;
2754 cmd->iov_misc_count = 1; 2753 cmd->iov_misc_count = 1;
2755 cmd->tx_size = tx_size; 2754 cmd->tx_size = tx_size;
2756 2755
2757 pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:" 2756 pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
2758 " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid); 2757 " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
2759 2758
2760 return 0; 2759 return 0;
2761 } 2760 }
2762 2761
2763 static int iscsit_send_nopin_response( 2762 static int iscsit_send_nopin_response(
2764 struct iscsi_cmd *cmd, 2763 struct iscsi_cmd *cmd,
2765 struct iscsi_conn *conn) 2764 struct iscsi_conn *conn)
2766 { 2765 {
2767 int niov = 0, tx_size; 2766 int niov = 0, tx_size;
2768 u32 padding = 0; 2767 u32 padding = 0;
2769 struct kvec *iov; 2768 struct kvec *iov;
2770 struct iscsi_nopin *hdr; 2769 struct iscsi_nopin *hdr;
2771 2770
2772 tx_size = ISCSI_HDR_LEN; 2771 tx_size = ISCSI_HDR_LEN;
2773 hdr = (struct iscsi_nopin *) cmd->pdu; 2772 hdr = (struct iscsi_nopin *) cmd->pdu;
2774 memset(hdr, 0, ISCSI_HDR_LEN); 2773 memset(hdr, 0, ISCSI_HDR_LEN);
2775 hdr->opcode = ISCSI_OP_NOOP_IN; 2774 hdr->opcode = ISCSI_OP_NOOP_IN;
2776 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2775 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2777 hton24(hdr->dlength, cmd->buf_ptr_size); 2776 hton24(hdr->dlength, cmd->buf_ptr_size);
2778 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); 2777 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
2779 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2778 hdr->itt = cpu_to_be32(cmd->init_task_tag);
2780 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 2779 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
2781 cmd->stat_sn = conn->stat_sn++; 2780 cmd->stat_sn = conn->stat_sn++;
2782 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2781 hdr->statsn = cpu_to_be32(cmd->stat_sn);
2783 2782
2784 iscsit_increment_maxcmdsn(cmd, conn->sess); 2783 iscsit_increment_maxcmdsn(cmd, conn->sess);
2785 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2784 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2786 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2785 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2787 2786
2788 iov = &cmd->iov_misc[0]; 2787 iov = &cmd->iov_misc[0];
2789 iov[niov].iov_base = cmd->pdu; 2788 iov[niov].iov_base = cmd->pdu;
2790 iov[niov++].iov_len = ISCSI_HDR_LEN; 2789 iov[niov++].iov_len = ISCSI_HDR_LEN;
2791 2790
2792 if (conn->conn_ops->HeaderDigest) { 2791 if (conn->conn_ops->HeaderDigest) {
2793 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2792 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2794 2793
2795 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2794 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2796 (unsigned char *)hdr, ISCSI_HDR_LEN, 2795 (unsigned char *)hdr, ISCSI_HDR_LEN,
2797 0, NULL, (u8 *)header_digest); 2796 0, NULL, (u8 *)header_digest);
2798 2797
2799 iov[0].iov_len += ISCSI_CRC_LEN; 2798 iov[0].iov_len += ISCSI_CRC_LEN;
2800 tx_size += ISCSI_CRC_LEN; 2799 tx_size += ISCSI_CRC_LEN;
2801 pr_debug("Attaching CRC32C HeaderDigest" 2800 pr_debug("Attaching CRC32C HeaderDigest"
2802 " to NopIn 0x%08x\n", *header_digest); 2801 " to NopIn 0x%08x\n", *header_digest);
2803 } 2802 }
2804 2803
2805 /* 2804 /*
2806 * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr. 2805 * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr.
2807 * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size. 2806 * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size.
2808 */ 2807 */
2809 if (cmd->buf_ptr_size) { 2808 if (cmd->buf_ptr_size) {
2810 iov[niov].iov_base = cmd->buf_ptr; 2809 iov[niov].iov_base = cmd->buf_ptr;
2811 iov[niov++].iov_len = cmd->buf_ptr_size; 2810 iov[niov++].iov_len = cmd->buf_ptr_size;
2812 tx_size += cmd->buf_ptr_size; 2811 tx_size += cmd->buf_ptr_size;
2813 2812
2814 pr_debug("Echoing back %u bytes of ping" 2813 pr_debug("Echoing back %u bytes of ping"
2815 " data.\n", cmd->buf_ptr_size); 2814 " data.\n", cmd->buf_ptr_size);
2816 2815
2817 padding = ((-cmd->buf_ptr_size) & 3); 2816 padding = ((-cmd->buf_ptr_size) & 3);
2818 if (padding != 0) { 2817 if (padding != 0) {
2819 iov[niov].iov_base = &cmd->pad_bytes; 2818 iov[niov].iov_base = &cmd->pad_bytes;
2820 iov[niov++].iov_len = padding; 2819 iov[niov++].iov_len = padding;
2821 tx_size += padding; 2820 tx_size += padding;
2822 pr_debug("Attaching %u additional" 2821 pr_debug("Attaching %u additional"
2823 " padding bytes.\n", padding); 2822 " padding bytes.\n", padding);
2824 } 2823 }
2825 if (conn->conn_ops->DataDigest) { 2824 if (conn->conn_ops->DataDigest) {
2826 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2825 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2827 cmd->buf_ptr, cmd->buf_ptr_size, 2826 cmd->buf_ptr, cmd->buf_ptr_size,
2828 padding, (u8 *)&cmd->pad_bytes, 2827 padding, (u8 *)&cmd->pad_bytes,
2829 (u8 *)&cmd->data_crc); 2828 (u8 *)&cmd->data_crc);
2830 2829
2831 iov[niov].iov_base = &cmd->data_crc; 2830 iov[niov].iov_base = &cmd->data_crc;
2832 iov[niov++].iov_len = ISCSI_CRC_LEN; 2831 iov[niov++].iov_len = ISCSI_CRC_LEN;
2833 tx_size += ISCSI_CRC_LEN; 2832 tx_size += ISCSI_CRC_LEN;
2834 pr_debug("Attached DataDigest for %u" 2833 pr_debug("Attached DataDigest for %u"
2835 " bytes of ping data, CRC 0x%08x\n", 2834 " bytes of ping data, CRC 0x%08x\n",
2836 cmd->buf_ptr_size, cmd->data_crc); 2835 cmd->buf_ptr_size, cmd->data_crc);
2837 } 2836 }
2838 } 2837 }
2839 2838
2840 cmd->iov_misc_count = niov; 2839 cmd->iov_misc_count = niov;
2841 cmd->tx_size = tx_size; 2840 cmd->tx_size = tx_size;
2842 2841
2843 pr_debug("Sending NOPIN Response ITT: 0x%08x, TTT:" 2842 pr_debug("Sending NOPIN Response ITT: 0x%08x, TTT:"
2844 " 0x%08x, StatSN: 0x%08x, Length %u\n", cmd->init_task_tag, 2843 " 0x%08x, StatSN: 0x%08x, Length %u\n", cmd->init_task_tag,
2845 cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size); 2844 cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
2846 2845
2847 return 0; 2846 return 0;
2848 } 2847 }
2849 2848
2850 int iscsit_send_r2t( 2849 int iscsit_send_r2t(
2851 struct iscsi_cmd *cmd, 2850 struct iscsi_cmd *cmd,
2852 struct iscsi_conn *conn) 2851 struct iscsi_conn *conn)
2853 { 2852 {
2854 int tx_size = 0; 2853 int tx_size = 0;
2855 struct iscsi_r2t *r2t; 2854 struct iscsi_r2t *r2t;
2856 struct iscsi_r2t_rsp *hdr; 2855 struct iscsi_r2t_rsp *hdr;
2857 2856
2858 r2t = iscsit_get_r2t_from_list(cmd); 2857 r2t = iscsit_get_r2t_from_list(cmd);
2859 if (!r2t) 2858 if (!r2t)
2860 return -1; 2859 return -1;
2861 2860
2862 hdr = (struct iscsi_r2t_rsp *) cmd->pdu; 2861 hdr = (struct iscsi_r2t_rsp *) cmd->pdu;
2863 memset(hdr, 0, ISCSI_HDR_LEN); 2862 memset(hdr, 0, ISCSI_HDR_LEN);
2864 hdr->opcode = ISCSI_OP_R2T; 2863 hdr->opcode = ISCSI_OP_R2T;
2865 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2864 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
2866 int_to_scsilun(cmd->se_cmd.orig_fe_lun, 2865 int_to_scsilun(cmd->se_cmd.orig_fe_lun,
2867 (struct scsi_lun *)&hdr->lun); 2866 (struct scsi_lun *)&hdr->lun);
2868 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2867 hdr->itt = cpu_to_be32(cmd->init_task_tag);
2869 spin_lock_bh(&conn->sess->ttt_lock); 2868 spin_lock_bh(&conn->sess->ttt_lock);
2870 r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++; 2869 r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
2871 if (r2t->targ_xfer_tag == 0xFFFFFFFF) 2870 if (r2t->targ_xfer_tag == 0xFFFFFFFF)
2872 r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++; 2871 r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
2873 spin_unlock_bh(&conn->sess->ttt_lock); 2872 spin_unlock_bh(&conn->sess->ttt_lock);
2874 hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag); 2873 hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag);
2875 hdr->statsn = cpu_to_be32(conn->stat_sn); 2874 hdr->statsn = cpu_to_be32(conn->stat_sn);
2876 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2875 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
2877 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2876 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
2878 hdr->r2tsn = cpu_to_be32(r2t->r2t_sn); 2877 hdr->r2tsn = cpu_to_be32(r2t->r2t_sn);
2879 hdr->data_offset = cpu_to_be32(r2t->offset); 2878 hdr->data_offset = cpu_to_be32(r2t->offset);
2880 hdr->data_length = cpu_to_be32(r2t->xfer_len); 2879 hdr->data_length = cpu_to_be32(r2t->xfer_len);
2881 2880
2882 cmd->iov_misc[0].iov_base = cmd->pdu; 2881 cmd->iov_misc[0].iov_base = cmd->pdu;
2883 cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN; 2882 cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
2884 tx_size += ISCSI_HDR_LEN; 2883 tx_size += ISCSI_HDR_LEN;
2885 2884
2886 if (conn->conn_ops->HeaderDigest) { 2885 if (conn->conn_ops->HeaderDigest) {
2887 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2886 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2888 2887
2889 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2888 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
2890 (unsigned char *)hdr, ISCSI_HDR_LEN, 2889 (unsigned char *)hdr, ISCSI_HDR_LEN,
2891 0, NULL, (u8 *)header_digest); 2890 0, NULL, (u8 *)header_digest);
2892 2891
2893 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; 2892 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
2894 tx_size += ISCSI_CRC_LEN; 2893 tx_size += ISCSI_CRC_LEN;
2895 pr_debug("Attaching CRC32 HeaderDigest for R2T" 2894 pr_debug("Attaching CRC32 HeaderDigest for R2T"
2896 " PDU 0x%08x\n", *header_digest); 2895 " PDU 0x%08x\n", *header_digest);
2897 } 2896 }
2898 2897
2899 pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:" 2898 pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:"
2900 " 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n", 2899 " 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n",
2901 (!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag, 2900 (!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag,
2902 r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn, 2901 r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn,
2903 r2t->offset, r2t->xfer_len, conn->cid); 2902 r2t->offset, r2t->xfer_len, conn->cid);
2904 2903
2905 cmd->iov_misc_count = 1; 2904 cmd->iov_misc_count = 1;
2906 cmd->tx_size = tx_size; 2905 cmd->tx_size = tx_size;
2907 2906
2908 spin_lock_bh(&cmd->r2t_lock); 2907 spin_lock_bh(&cmd->r2t_lock);
2909 r2t->sent_r2t = 1; 2908 r2t->sent_r2t = 1;
2910 spin_unlock_bh(&cmd->r2t_lock); 2909 spin_unlock_bh(&cmd->r2t_lock);
2911 2910
2912 return 0; 2911 return 0;
2913 } 2912 }
2914 2913
2915 /* 2914 /*
2916 * type 0: Normal Operation. 2915 * type 0: Normal Operation.
2917 * type 1: Called from Storage Transport. 2916 * type 1: Called from Storage Transport.
2918 * type 2: Called from iscsi_task_reassign_complete_write() for 2917 * type 2: Called from iscsi_task_reassign_complete_write() for
2919 * connection recovery. 2918 * connection recovery.
2920 */ 2919 */
2921 int iscsit_build_r2ts_for_cmd( 2920 int iscsit_build_r2ts_for_cmd(
2922 struct iscsi_cmd *cmd, 2921 struct iscsi_cmd *cmd,
2923 struct iscsi_conn *conn, 2922 struct iscsi_conn *conn,
2924 int type) 2923 int type)
2925 { 2924 {
2926 int first_r2t = 1; 2925 int first_r2t = 1;
2927 u32 offset = 0, xfer_len = 0; 2926 u32 offset = 0, xfer_len = 0;
2928 2927
2929 spin_lock_bh(&cmd->r2t_lock); 2928 spin_lock_bh(&cmd->r2t_lock);
2930 if (cmd->cmd_flags & ICF_SENT_LAST_R2T) { 2929 if (cmd->cmd_flags & ICF_SENT_LAST_R2T) {
2931 spin_unlock_bh(&cmd->r2t_lock); 2930 spin_unlock_bh(&cmd->r2t_lock);
2932 return 0; 2931 return 0;
2933 } 2932 }
2934 2933
2935 if (conn->sess->sess_ops->DataSequenceInOrder && (type != 2)) 2934 if (conn->sess->sess_ops->DataSequenceInOrder && (type != 2))
2936 if (cmd->r2t_offset < cmd->write_data_done) 2935 if (cmd->r2t_offset < cmd->write_data_done)
2937 cmd->r2t_offset = cmd->write_data_done; 2936 cmd->r2t_offset = cmd->write_data_done;
2938 2937
2939 while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) { 2938 while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) {
2940 if (conn->sess->sess_ops->DataSequenceInOrder) { 2939 if (conn->sess->sess_ops->DataSequenceInOrder) {
2941 offset = cmd->r2t_offset; 2940 offset = cmd->r2t_offset;
2942 2941
2943 if (first_r2t && (type == 2)) { 2942 if (first_r2t && (type == 2)) {
2944 xfer_len = ((offset + 2943 xfer_len = ((offset +
2945 (conn->sess->sess_ops->MaxBurstLength - 2944 (conn->sess->sess_ops->MaxBurstLength -
2946 cmd->next_burst_len) > 2945 cmd->next_burst_len) >
2947 cmd->data_length) ? 2946 cmd->data_length) ?
2948 (cmd->data_length - offset) : 2947 (cmd->data_length - offset) :
2949 (conn->sess->sess_ops->MaxBurstLength - 2948 (conn->sess->sess_ops->MaxBurstLength -
2950 cmd->next_burst_len)); 2949 cmd->next_burst_len));
2951 } else { 2950 } else {
2952 xfer_len = ((offset + 2951 xfer_len = ((offset +
2953 conn->sess->sess_ops->MaxBurstLength) > 2952 conn->sess->sess_ops->MaxBurstLength) >
2954 cmd->data_length) ? 2953 cmd->data_length) ?
2955 (cmd->data_length - offset) : 2954 (cmd->data_length - offset) :
2956 conn->sess->sess_ops->MaxBurstLength; 2955 conn->sess->sess_ops->MaxBurstLength;
2957 } 2956 }
2958 cmd->r2t_offset += xfer_len; 2957 cmd->r2t_offset += xfer_len;
2959 2958
2960 if (cmd->r2t_offset == cmd->data_length) 2959 if (cmd->r2t_offset == cmd->data_length)
2961 cmd->cmd_flags |= ICF_SENT_LAST_R2T; 2960 cmd->cmd_flags |= ICF_SENT_LAST_R2T;
2962 } else { 2961 } else {
2963 struct iscsi_seq *seq; 2962 struct iscsi_seq *seq;
2964 2963
2965 seq = iscsit_get_seq_holder_for_r2t(cmd); 2964 seq = iscsit_get_seq_holder_for_r2t(cmd);
2966 if (!seq) { 2965 if (!seq) {
2967 spin_unlock_bh(&cmd->r2t_lock); 2966 spin_unlock_bh(&cmd->r2t_lock);
2968 return -1; 2967 return -1;
2969 } 2968 }
2970 2969
2971 offset = seq->offset; 2970 offset = seq->offset;
2972 xfer_len = seq->xfer_len; 2971 xfer_len = seq->xfer_len;
2973 2972
2974 if (cmd->seq_send_order == cmd->seq_count) 2973 if (cmd->seq_send_order == cmd->seq_count)
2975 cmd->cmd_flags |= ICF_SENT_LAST_R2T; 2974 cmd->cmd_flags |= ICF_SENT_LAST_R2T;
2976 } 2975 }
2977 cmd->outstanding_r2ts++; 2976 cmd->outstanding_r2ts++;
2978 first_r2t = 0; 2977 first_r2t = 0;
2979 2978
2980 if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) { 2979 if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) {
2981 spin_unlock_bh(&cmd->r2t_lock); 2980 spin_unlock_bh(&cmd->r2t_lock);
2982 return -1; 2981 return -1;
2983 } 2982 }
2984 2983
2985 if (cmd->cmd_flags & ICF_SENT_LAST_R2T) 2984 if (cmd->cmd_flags & ICF_SENT_LAST_R2T)
2986 break; 2985 break;
2987 } 2986 }
2988 spin_unlock_bh(&cmd->r2t_lock); 2987 spin_unlock_bh(&cmd->r2t_lock);
2989 2988
2990 return 0; 2989 return 0;
2991 } 2990 }
2992 2991
2993 static int iscsit_send_status( 2992 static int iscsit_send_status(
2994 struct iscsi_cmd *cmd, 2993 struct iscsi_cmd *cmd,
2995 struct iscsi_conn *conn) 2994 struct iscsi_conn *conn)
2996 { 2995 {
2997 u8 iov_count = 0, recovery; 2996 u8 iov_count = 0, recovery;
2998 u32 padding = 0, tx_size = 0; 2997 u32 padding = 0, tx_size = 0;
2999 struct iscsi_scsi_rsp *hdr; 2998 struct iscsi_scsi_rsp *hdr;
3000 struct kvec *iov; 2999 struct kvec *iov;
3001 3000
3002 recovery = (cmd->i_state != ISTATE_SEND_STATUS); 3001 recovery = (cmd->i_state != ISTATE_SEND_STATUS);
3003 if (!recovery) 3002 if (!recovery)
3004 cmd->stat_sn = conn->stat_sn++; 3003 cmd->stat_sn = conn->stat_sn++;
3005 3004
3006 spin_lock_bh(&conn->sess->session_stats_lock); 3005 spin_lock_bh(&conn->sess->session_stats_lock);
3007 conn->sess->rsp_pdus++; 3006 conn->sess->rsp_pdus++;
3008 spin_unlock_bh(&conn->sess->session_stats_lock); 3007 spin_unlock_bh(&conn->sess->session_stats_lock);
3009 3008
3010 hdr = (struct iscsi_scsi_rsp *) cmd->pdu; 3009 hdr = (struct iscsi_scsi_rsp *) cmd->pdu;
3011 memset(hdr, 0, ISCSI_HDR_LEN); 3010 memset(hdr, 0, ISCSI_HDR_LEN);
3012 hdr->opcode = ISCSI_OP_SCSI_CMD_RSP; 3011 hdr->opcode = ISCSI_OP_SCSI_CMD_RSP;
3013 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3012 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3014 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { 3013 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
3015 hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW; 3014 hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
3016 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 3015 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3017 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { 3016 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
3018 hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW; 3017 hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
3019 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 3018 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3020 } 3019 }
3021 hdr->response = cmd->iscsi_response; 3020 hdr->response = cmd->iscsi_response;
3022 hdr->cmd_status = cmd->se_cmd.scsi_status; 3021 hdr->cmd_status = cmd->se_cmd.scsi_status;
3023 hdr->itt = cpu_to_be32(cmd->init_task_tag); 3022 hdr->itt = cpu_to_be32(cmd->init_task_tag);
3024 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3023 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3025 3024
3026 iscsit_increment_maxcmdsn(cmd, conn->sess); 3025 iscsit_increment_maxcmdsn(cmd, conn->sess);
3027 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3026 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3028 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3027 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3029 3028
3030 iov = &cmd->iov_misc[0]; 3029 iov = &cmd->iov_misc[0];
3031 iov[iov_count].iov_base = cmd->pdu; 3030 iov[iov_count].iov_base = cmd->pdu;
3032 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 3031 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3033 tx_size += ISCSI_HDR_LEN; 3032 tx_size += ISCSI_HDR_LEN;
3034 3033
3035 /* 3034 /*
3036 * Attach SENSE DATA payload to iSCSI Response PDU 3035 * Attach SENSE DATA payload to iSCSI Response PDU
3037 */ 3036 */
3038 if (cmd->se_cmd.sense_buffer && 3037 if (cmd->se_cmd.sense_buffer &&
3039 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 3038 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
3040 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 3039 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
3041 padding = -(cmd->se_cmd.scsi_sense_length) & 3; 3040 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
3042 hton24(hdr->dlength, cmd->se_cmd.scsi_sense_length); 3041 hton24(hdr->dlength, cmd->se_cmd.scsi_sense_length);
3043 iov[iov_count].iov_base = cmd->se_cmd.sense_buffer; 3042 iov[iov_count].iov_base = cmd->se_cmd.sense_buffer;
3044 iov[iov_count++].iov_len = 3043 iov[iov_count++].iov_len =
3045 (cmd->se_cmd.scsi_sense_length + padding); 3044 (cmd->se_cmd.scsi_sense_length + padding);
3046 tx_size += cmd->se_cmd.scsi_sense_length; 3045 tx_size += cmd->se_cmd.scsi_sense_length;
3047 3046
3048 if (padding) { 3047 if (padding) {
3049 memset(cmd->se_cmd.sense_buffer + 3048 memset(cmd->se_cmd.sense_buffer +
3050 cmd->se_cmd.scsi_sense_length, 0, padding); 3049 cmd->se_cmd.scsi_sense_length, 0, padding);
3051 tx_size += padding; 3050 tx_size += padding;
3052 pr_debug("Adding %u bytes of padding to" 3051 pr_debug("Adding %u bytes of padding to"
3053 " SENSE.\n", padding); 3052 " SENSE.\n", padding);
3054 } 3053 }
3055 3054
3056 if (conn->conn_ops->DataDigest) { 3055 if (conn->conn_ops->DataDigest) {
3057 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3056 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3058 cmd->se_cmd.sense_buffer, 3057 cmd->se_cmd.sense_buffer,
3059 (cmd->se_cmd.scsi_sense_length + padding), 3058 (cmd->se_cmd.scsi_sense_length + padding),
3060 0, NULL, (u8 *)&cmd->data_crc); 3059 0, NULL, (u8 *)&cmd->data_crc);
3061 3060
3062 iov[iov_count].iov_base = &cmd->data_crc; 3061 iov[iov_count].iov_base = &cmd->data_crc;
3063 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 3062 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
3064 tx_size += ISCSI_CRC_LEN; 3063 tx_size += ISCSI_CRC_LEN;
3065 3064
3066 pr_debug("Attaching CRC32 DataDigest for" 3065 pr_debug("Attaching CRC32 DataDigest for"
3067 " SENSE, %u bytes CRC 0x%08x\n", 3066 " SENSE, %u bytes CRC 0x%08x\n",
3068 (cmd->se_cmd.scsi_sense_length + padding), 3067 (cmd->se_cmd.scsi_sense_length + padding),
3069 cmd->data_crc); 3068 cmd->data_crc);
3070 } 3069 }
3071 3070
3072 pr_debug("Attaching SENSE DATA: %u bytes to iSCSI" 3071 pr_debug("Attaching SENSE DATA: %u bytes to iSCSI"
3073 " Response PDU\n", 3072 " Response PDU\n",
3074 cmd->se_cmd.scsi_sense_length); 3073 cmd->se_cmd.scsi_sense_length);
3075 } 3074 }
3076 3075
3077 if (conn->conn_ops->HeaderDigest) { 3076 if (conn->conn_ops->HeaderDigest) {
3078 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3077 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3079 3078
3080 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3079 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3081 (unsigned char *)hdr, ISCSI_HDR_LEN, 3080 (unsigned char *)hdr, ISCSI_HDR_LEN,
3082 0, NULL, (u8 *)header_digest); 3081 0, NULL, (u8 *)header_digest);
3083 3082
3084 iov[0].iov_len += ISCSI_CRC_LEN; 3083 iov[0].iov_len += ISCSI_CRC_LEN;
3085 tx_size += ISCSI_CRC_LEN; 3084 tx_size += ISCSI_CRC_LEN;
3086 pr_debug("Attaching CRC32 HeaderDigest for Response" 3085 pr_debug("Attaching CRC32 HeaderDigest for Response"
3087 " PDU 0x%08x\n", *header_digest); 3086 " PDU 0x%08x\n", *header_digest);
3088 } 3087 }
3089 3088
3090 cmd->iov_misc_count = iov_count; 3089 cmd->iov_misc_count = iov_count;
3091 cmd->tx_size = tx_size; 3090 cmd->tx_size = tx_size;
3092 3091
3093 pr_debug("Built %sSCSI Response, ITT: 0x%08x, StatSN: 0x%08x," 3092 pr_debug("Built %sSCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
3094 " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n", 3093 " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
3095 (!recovery) ? "" : "Recovery ", cmd->init_task_tag, 3094 (!recovery) ? "" : "Recovery ", cmd->init_task_tag,
3096 cmd->stat_sn, 0x00, cmd->se_cmd.scsi_status, conn->cid); 3095 cmd->stat_sn, 0x00, cmd->se_cmd.scsi_status, conn->cid);
3097 3096
3098 return 0; 3097 return 0;
3099 } 3098 }
3100 3099
3101 static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr) 3100 static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
3102 { 3101 {
3103 switch (se_tmr->response) { 3102 switch (se_tmr->response) {
3104 case TMR_FUNCTION_COMPLETE: 3103 case TMR_FUNCTION_COMPLETE:
3105 return ISCSI_TMF_RSP_COMPLETE; 3104 return ISCSI_TMF_RSP_COMPLETE;
3106 case TMR_TASK_DOES_NOT_EXIST: 3105 case TMR_TASK_DOES_NOT_EXIST:
3107 return ISCSI_TMF_RSP_NO_TASK; 3106 return ISCSI_TMF_RSP_NO_TASK;
3108 case TMR_LUN_DOES_NOT_EXIST: 3107 case TMR_LUN_DOES_NOT_EXIST:
3109 return ISCSI_TMF_RSP_NO_LUN; 3108 return ISCSI_TMF_RSP_NO_LUN;
3110 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED: 3109 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
3111 return ISCSI_TMF_RSP_NOT_SUPPORTED; 3110 return ISCSI_TMF_RSP_NOT_SUPPORTED;
3112 case TMR_FUNCTION_AUTHORIZATION_FAILED: 3111 case TMR_FUNCTION_AUTHORIZATION_FAILED:
3113 return ISCSI_TMF_RSP_AUTH_FAILED; 3112 return ISCSI_TMF_RSP_AUTH_FAILED;
3114 case TMR_FUNCTION_REJECTED: 3113 case TMR_FUNCTION_REJECTED:
3115 default: 3114 default:
3116 return ISCSI_TMF_RSP_REJECTED; 3115 return ISCSI_TMF_RSP_REJECTED;
3117 } 3116 }
3118 } 3117 }
3119 3118
3120 static int iscsit_send_task_mgt_rsp( 3119 static int iscsit_send_task_mgt_rsp(
3121 struct iscsi_cmd *cmd, 3120 struct iscsi_cmd *cmd,
3122 struct iscsi_conn *conn) 3121 struct iscsi_conn *conn)
3123 { 3122 {
3124 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; 3123 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
3125 struct iscsi_tm_rsp *hdr; 3124 struct iscsi_tm_rsp *hdr;
3126 u32 tx_size = 0; 3125 u32 tx_size = 0;
3127 3126
3128 hdr = (struct iscsi_tm_rsp *) cmd->pdu; 3127 hdr = (struct iscsi_tm_rsp *) cmd->pdu;
3129 memset(hdr, 0, ISCSI_HDR_LEN); 3128 memset(hdr, 0, ISCSI_HDR_LEN);
3130 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 3129 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
3131 hdr->flags = ISCSI_FLAG_CMD_FINAL; 3130 hdr->flags = ISCSI_FLAG_CMD_FINAL;
3132 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr); 3131 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
3133 hdr->itt = cpu_to_be32(cmd->init_task_tag); 3132 hdr->itt = cpu_to_be32(cmd->init_task_tag);
3134 cmd->stat_sn = conn->stat_sn++; 3133 cmd->stat_sn = conn->stat_sn++;
3135 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3134 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3136 3135
3137 iscsit_increment_maxcmdsn(cmd, conn->sess); 3136 iscsit_increment_maxcmdsn(cmd, conn->sess);
3138 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3137 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3139 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3138 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3140 3139
3141 cmd->iov_misc[0].iov_base = cmd->pdu; 3140 cmd->iov_misc[0].iov_base = cmd->pdu;
3142 cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN; 3141 cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
3143 tx_size += ISCSI_HDR_LEN; 3142 tx_size += ISCSI_HDR_LEN;
3144 3143
3145 if (conn->conn_ops->HeaderDigest) { 3144 if (conn->conn_ops->HeaderDigest) {
3146 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3145 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3147 3146
3148 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3147 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3149 (unsigned char *)hdr, ISCSI_HDR_LEN, 3148 (unsigned char *)hdr, ISCSI_HDR_LEN,
3150 0, NULL, (u8 *)header_digest); 3149 0, NULL, (u8 *)header_digest);
3151 3150
3152 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; 3151 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
3153 tx_size += ISCSI_CRC_LEN; 3152 tx_size += ISCSI_CRC_LEN;
3154 pr_debug("Attaching CRC32 HeaderDigest for Task" 3153 pr_debug("Attaching CRC32 HeaderDigest for Task"
3155 " Mgmt Response PDU 0x%08x\n", *header_digest); 3154 " Mgmt Response PDU 0x%08x\n", *header_digest);
3156 } 3155 }
3157 3156
3158 cmd->iov_misc_count = 1; 3157 cmd->iov_misc_count = 1;
3159 cmd->tx_size = tx_size; 3158 cmd->tx_size = tx_size;
3160 3159
3161 pr_debug("Built Task Management Response ITT: 0x%08x," 3160 pr_debug("Built Task Management Response ITT: 0x%08x,"
3162 " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n", 3161 " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
3163 cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid); 3162 cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid);
3164 3163
3165 return 0; 3164 return 0;
3166 } 3165 }
3167 3166
3168 static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) 3167 static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
3169 { 3168 {
3170 char *payload = NULL; 3169 char *payload = NULL;
3171 struct iscsi_conn *conn = cmd->conn; 3170 struct iscsi_conn *conn = cmd->conn;
3172 struct iscsi_portal_group *tpg; 3171 struct iscsi_portal_group *tpg;
3173 struct iscsi_tiqn *tiqn; 3172 struct iscsi_tiqn *tiqn;
3174 struct iscsi_tpg_np *tpg_np; 3173 struct iscsi_tpg_np *tpg_np;
3175 int buffer_len, end_of_buf = 0, len = 0, payload_len = 0; 3174 int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
3176 unsigned char buf[256]; 3175 unsigned char buf[256];
3177 3176
3178 buffer_len = (conn->conn_ops->MaxRecvDataSegmentLength > 32768) ? 3177 buffer_len = (conn->conn_ops->MaxRecvDataSegmentLength > 32768) ?
3179 32768 : conn->conn_ops->MaxRecvDataSegmentLength; 3178 32768 : conn->conn_ops->MaxRecvDataSegmentLength;
3180 3179
3181 memset(buf, 0, 256); 3180 memset(buf, 0, 256);
3182 3181
3183 payload = kzalloc(buffer_len, GFP_KERNEL); 3182 payload = kzalloc(buffer_len, GFP_KERNEL);
3184 if (!payload) { 3183 if (!payload) {
3185 pr_err("Unable to allocate memory for sendtargets" 3184 pr_err("Unable to allocate memory for sendtargets"
3186 " response.\n"); 3185 " response.\n");
3187 return -ENOMEM; 3186 return -ENOMEM;
3188 } 3187 }
3189 3188
3190 spin_lock(&tiqn_lock); 3189 spin_lock(&tiqn_lock);
3191 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) { 3190 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
3192 len = sprintf(buf, "TargetName=%s", tiqn->tiqn); 3191 len = sprintf(buf, "TargetName=%s", tiqn->tiqn);
3193 len += 1; 3192 len += 1;
3194 3193
3195 if ((len + payload_len) > buffer_len) { 3194 if ((len + payload_len) > buffer_len) {
3196 spin_unlock(&tiqn->tiqn_tpg_lock); 3195 spin_unlock(&tiqn->tiqn_tpg_lock);
3197 end_of_buf = 1; 3196 end_of_buf = 1;
3198 goto eob; 3197 goto eob;
3199 } 3198 }
3200 memcpy((void *)payload + payload_len, buf, len); 3199 memcpy((void *)payload + payload_len, buf, len);
3201 payload_len += len; 3200 payload_len += len;
3202 3201
3203 spin_lock(&tiqn->tiqn_tpg_lock); 3202 spin_lock(&tiqn->tiqn_tpg_lock);
3204 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { 3203 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
3205 3204
3206 spin_lock(&tpg->tpg_state_lock); 3205 spin_lock(&tpg->tpg_state_lock);
3207 if ((tpg->tpg_state == TPG_STATE_FREE) || 3206 if ((tpg->tpg_state == TPG_STATE_FREE) ||
3208 (tpg->tpg_state == TPG_STATE_INACTIVE)) { 3207 (tpg->tpg_state == TPG_STATE_INACTIVE)) {
3209 spin_unlock(&tpg->tpg_state_lock); 3208 spin_unlock(&tpg->tpg_state_lock);
3210 continue; 3209 continue;
3211 } 3210 }
3212 spin_unlock(&tpg->tpg_state_lock); 3211 spin_unlock(&tpg->tpg_state_lock);
3213 3212
3214 spin_lock(&tpg->tpg_np_lock); 3213 spin_lock(&tpg->tpg_np_lock);
3215 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, 3214 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
3216 tpg_np_list) { 3215 tpg_np_list) {
3217 len = sprintf(buf, "TargetAddress=" 3216 len = sprintf(buf, "TargetAddress="
3218 "%s%s%s:%hu,%hu", 3217 "%s%s%s:%hu,%hu",
3219 (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ? 3218 (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ?
3220 "[" : "", tpg_np->tpg_np->np_ip, 3219 "[" : "", tpg_np->tpg_np->np_ip,
3221 (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ? 3220 (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ?
3222 "]" : "", tpg_np->tpg_np->np_port, 3221 "]" : "", tpg_np->tpg_np->np_port,
3223 tpg->tpgt); 3222 tpg->tpgt);
3224 len += 1; 3223 len += 1;
3225 3224
3226 if ((len + payload_len) > buffer_len) { 3225 if ((len + payload_len) > buffer_len) {
3227 spin_unlock(&tpg->tpg_np_lock); 3226 spin_unlock(&tpg->tpg_np_lock);
3228 spin_unlock(&tiqn->tiqn_tpg_lock); 3227 spin_unlock(&tiqn->tiqn_tpg_lock);
3229 end_of_buf = 1; 3228 end_of_buf = 1;
3230 goto eob; 3229 goto eob;
3231 } 3230 }
3232 memcpy((void *)payload + payload_len, buf, len); 3231 memcpy((void *)payload + payload_len, buf, len);
3233 payload_len += len; 3232 payload_len += len;
3234 } 3233 }
3235 spin_unlock(&tpg->tpg_np_lock); 3234 spin_unlock(&tpg->tpg_np_lock);
3236 } 3235 }
3237 spin_unlock(&tiqn->tiqn_tpg_lock); 3236 spin_unlock(&tiqn->tiqn_tpg_lock);
3238 eob: 3237 eob:
3239 if (end_of_buf) 3238 if (end_of_buf)
3240 break; 3239 break;
3241 } 3240 }
3242 spin_unlock(&tiqn_lock); 3241 spin_unlock(&tiqn_lock);
3243 3242
3244 cmd->buf_ptr = payload; 3243 cmd->buf_ptr = payload;
3245 3244
3246 return payload_len; 3245 return payload_len;
3247 } 3246 }
3248 3247
3249 /* 3248 /*
3250 * FIXME: Add support for F_BIT and C_BIT when the length is longer than 3249 * FIXME: Add support for F_BIT and C_BIT when the length is longer than
3251 * MaxRecvDataSegmentLength. 3250 * MaxRecvDataSegmentLength.
3252 */ 3251 */
3253 static int iscsit_send_text_rsp( 3252 static int iscsit_send_text_rsp(
3254 struct iscsi_cmd *cmd, 3253 struct iscsi_cmd *cmd,
3255 struct iscsi_conn *conn) 3254 struct iscsi_conn *conn)
3256 { 3255 {
3257 struct iscsi_text_rsp *hdr; 3256 struct iscsi_text_rsp *hdr;
3258 struct kvec *iov; 3257 struct kvec *iov;
3259 u32 padding = 0, tx_size = 0; 3258 u32 padding = 0, tx_size = 0;
3260 int text_length, iov_count = 0; 3259 int text_length, iov_count = 0;
3261 3260
3262 text_length = iscsit_build_sendtargets_response(cmd); 3261 text_length = iscsit_build_sendtargets_response(cmd);
3263 if (text_length < 0) 3262 if (text_length < 0)
3264 return text_length; 3263 return text_length;
3265 3264
3266 padding = ((-text_length) & 3); 3265 padding = ((-text_length) & 3);
3267 if (padding != 0) { 3266 if (padding != 0) {
3268 memset(cmd->buf_ptr + text_length, 0, padding); 3267 memset(cmd->buf_ptr + text_length, 0, padding);
3269 pr_debug("Attaching %u additional bytes for" 3268 pr_debug("Attaching %u additional bytes for"
3270 " padding.\n", padding); 3269 " padding.\n", padding);
3271 } 3270 }
3272 3271
3273 hdr = (struct iscsi_text_rsp *) cmd->pdu; 3272 hdr = (struct iscsi_text_rsp *) cmd->pdu;
3274 memset(hdr, 0, ISCSI_HDR_LEN); 3273 memset(hdr, 0, ISCSI_HDR_LEN);
3275 hdr->opcode = ISCSI_OP_TEXT_RSP; 3274 hdr->opcode = ISCSI_OP_TEXT_RSP;
3276 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3275 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3277 hton24(hdr->dlength, text_length); 3276 hton24(hdr->dlength, text_length);
3278 hdr->itt = cpu_to_be32(cmd->init_task_tag); 3277 hdr->itt = cpu_to_be32(cmd->init_task_tag);
3279 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 3278 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
3280 cmd->stat_sn = conn->stat_sn++; 3279 cmd->stat_sn = conn->stat_sn++;
3281 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3280 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3282 3281
3283 iscsit_increment_maxcmdsn(cmd, conn->sess); 3282 iscsit_increment_maxcmdsn(cmd, conn->sess);
3284 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3283 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3285 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3284 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3286 3285
3287 iov = &cmd->iov_misc[0]; 3286 iov = &cmd->iov_misc[0];
3288 3287
3289 iov[iov_count].iov_base = cmd->pdu; 3288 iov[iov_count].iov_base = cmd->pdu;
3290 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 3289 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3291 iov[iov_count].iov_base = cmd->buf_ptr; 3290 iov[iov_count].iov_base = cmd->buf_ptr;
3292 iov[iov_count++].iov_len = text_length + padding; 3291 iov[iov_count++].iov_len = text_length + padding;
3293 3292
3294 tx_size += (ISCSI_HDR_LEN + text_length + padding); 3293 tx_size += (ISCSI_HDR_LEN + text_length + padding);
3295 3294
3296 if (conn->conn_ops->HeaderDigest) { 3295 if (conn->conn_ops->HeaderDigest) {
3297 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3296 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3298 3297
3299 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3298 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3300 (unsigned char *)hdr, ISCSI_HDR_LEN, 3299 (unsigned char *)hdr, ISCSI_HDR_LEN,
3301 0, NULL, (u8 *)header_digest); 3300 0, NULL, (u8 *)header_digest);
3302 3301
3303 iov[0].iov_len += ISCSI_CRC_LEN; 3302 iov[0].iov_len += ISCSI_CRC_LEN;
3304 tx_size += ISCSI_CRC_LEN; 3303 tx_size += ISCSI_CRC_LEN;
3305 pr_debug("Attaching CRC32 HeaderDigest for" 3304 pr_debug("Attaching CRC32 HeaderDigest for"
3306 " Text Response PDU 0x%08x\n", *header_digest); 3305 " Text Response PDU 0x%08x\n", *header_digest);
3307 } 3306 }
3308 3307
3309 if (conn->conn_ops->DataDigest) { 3308 if (conn->conn_ops->DataDigest) {
3310 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3309 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3311 cmd->buf_ptr, (text_length + padding), 3310 cmd->buf_ptr, (text_length + padding),
3312 0, NULL, (u8 *)&cmd->data_crc); 3311 0, NULL, (u8 *)&cmd->data_crc);
3313 3312
3314 iov[iov_count].iov_base = &cmd->data_crc; 3313 iov[iov_count].iov_base = &cmd->data_crc;
3315 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 3314 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
3316 tx_size += ISCSI_CRC_LEN; 3315 tx_size += ISCSI_CRC_LEN;
3317 3316
3318 pr_debug("Attaching DataDigest for %u bytes of text" 3317 pr_debug("Attaching DataDigest for %u bytes of text"
3319 " data, CRC 0x%08x\n", (text_length + padding), 3318 " data, CRC 0x%08x\n", (text_length + padding),
3320 cmd->data_crc); 3319 cmd->data_crc);
3321 } 3320 }
3322 3321
3323 cmd->iov_misc_count = iov_count; 3322 cmd->iov_misc_count = iov_count;
3324 cmd->tx_size = tx_size; 3323 cmd->tx_size = tx_size;
3325 3324
3326 pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x," 3325 pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x,"
3327 " Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn, 3326 " Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn,
3328 text_length, conn->cid); 3327 text_length, conn->cid);
3329 return 0; 3328 return 0;
3330 } 3329 }
3331 3330
3332 static int iscsit_send_reject( 3331 static int iscsit_send_reject(
3333 struct iscsi_cmd *cmd, 3332 struct iscsi_cmd *cmd,
3334 struct iscsi_conn *conn) 3333 struct iscsi_conn *conn)
3335 { 3334 {
3336 u32 iov_count = 0, tx_size = 0; 3335 u32 iov_count = 0, tx_size = 0;
3337 struct iscsi_reject *hdr; 3336 struct iscsi_reject *hdr;
3338 struct kvec *iov; 3337 struct kvec *iov;
3339 3338
3340 hdr = (struct iscsi_reject *) cmd->pdu; 3339 hdr = (struct iscsi_reject *) cmd->pdu;
3341 hdr->opcode = ISCSI_OP_REJECT; 3340 hdr->opcode = ISCSI_OP_REJECT;
3342 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3341 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3343 hton24(hdr->dlength, ISCSI_HDR_LEN); 3342 hton24(hdr->dlength, ISCSI_HDR_LEN);
3344 cmd->stat_sn = conn->stat_sn++; 3343 cmd->stat_sn = conn->stat_sn++;
3345 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3344 hdr->statsn = cpu_to_be32(cmd->stat_sn);
3346 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3345 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
3347 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3346 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
3348 3347
3349 iov = &cmd->iov_misc[0]; 3348 iov = &cmd->iov_misc[0];
3350 3349
3351 iov[iov_count].iov_base = cmd->pdu; 3350 iov[iov_count].iov_base = cmd->pdu;
3352 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 3351 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3353 iov[iov_count].iov_base = cmd->buf_ptr; 3352 iov[iov_count].iov_base = cmd->buf_ptr;
3354 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 3353 iov[iov_count++].iov_len = ISCSI_HDR_LEN;
3355 3354
3356 tx_size = (ISCSI_HDR_LEN + ISCSI_HDR_LEN); 3355 tx_size = (ISCSI_HDR_LEN + ISCSI_HDR_LEN);
3357 3356
3358 if (conn->conn_ops->HeaderDigest) { 3357 if (conn->conn_ops->HeaderDigest) {
3359 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3358 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3360 3359
3361 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3360 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3362 (unsigned char *)hdr, ISCSI_HDR_LEN, 3361 (unsigned char *)hdr, ISCSI_HDR_LEN,
3363 0, NULL, (u8 *)header_digest); 3362 0, NULL, (u8 *)header_digest);
3364 3363
3365 iov[0].iov_len += ISCSI_CRC_LEN; 3364 iov[0].iov_len += ISCSI_CRC_LEN;
3366 tx_size += ISCSI_CRC_LEN; 3365 tx_size += ISCSI_CRC_LEN;
3367 pr_debug("Attaching CRC32 HeaderDigest for" 3366 pr_debug("Attaching CRC32 HeaderDigest for"
3368 " REJECT PDU 0x%08x\n", *header_digest); 3367 " REJECT PDU 0x%08x\n", *header_digest);
3369 } 3368 }
3370 3369
3371 if (conn->conn_ops->DataDigest) { 3370 if (conn->conn_ops->DataDigest) {
3372 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3371 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
3373 (unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN, 3372 (unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN,
3374 0, NULL, (u8 *)&cmd->data_crc); 3373 0, NULL, (u8 *)&cmd->data_crc);
3375 3374
3376 iov[iov_count].iov_base = &cmd->data_crc; 3375 iov[iov_count].iov_base = &cmd->data_crc;
3377 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 3376 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
3378 tx_size += ISCSI_CRC_LEN; 3377 tx_size += ISCSI_CRC_LEN;
3379 pr_debug("Attaching CRC32 DataDigest for REJECT" 3378 pr_debug("Attaching CRC32 DataDigest for REJECT"
3380 " PDU 0x%08x\n", cmd->data_crc); 3379 " PDU 0x%08x\n", cmd->data_crc);
3381 } 3380 }
3382 3381
3383 cmd->iov_misc_count = iov_count; 3382 cmd->iov_misc_count = iov_count;
3384 cmd->tx_size = tx_size; 3383 cmd->tx_size = tx_size;
3385 3384
3386 pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x," 3385 pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x,"
3387 " CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid); 3386 " CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid);
3388 3387
3389 return 0; 3388 return 0;
3390 } 3389 }
3391 3390
3392 static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn) 3391 static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
3393 { 3392 {
3394 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) || 3393 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
3395 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) { 3394 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
3396 wait_for_completion_interruptible_timeout( 3395 wait_for_completion_interruptible_timeout(
3397 &conn->tx_half_close_comp, 3396 &conn->tx_half_close_comp,
3398 ISCSI_TX_THREAD_TCP_TIMEOUT * HZ); 3397 ISCSI_TX_THREAD_TCP_TIMEOUT * HZ);
3399 } 3398 }
3400 } 3399 }
3401 3400
3402 #ifdef CONFIG_SMP 3401 #ifdef CONFIG_SMP
3403 3402
3404 void iscsit_thread_get_cpumask(struct iscsi_conn *conn) 3403 void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
3405 { 3404 {
3406 struct iscsi_thread_set *ts = conn->thread_set; 3405 struct iscsi_thread_set *ts = conn->thread_set;
3407 int ord, cpu; 3406 int ord, cpu;
3408 /* 3407 /*
3409 * thread_id is assigned from iscsit_global->ts_bitmap from 3408 * thread_id is assigned from iscsit_global->ts_bitmap from
3410 * within iscsi_thread_set.c:iscsi_allocate_thread_sets() 3409 * within iscsi_thread_set.c:iscsi_allocate_thread_sets()
3411 * 3410 *
3412 * Here we use thread_id to determine which CPU that this 3411 * Here we use thread_id to determine which CPU that this
3413 * iSCSI connection's iscsi_thread_set will be scheduled to 3412 * iSCSI connection's iscsi_thread_set will be scheduled to
3414 * execute upon. 3413 * execute upon.
3415 */ 3414 */
3416 ord = ts->thread_id % cpumask_weight(cpu_online_mask); 3415 ord = ts->thread_id % cpumask_weight(cpu_online_mask);
3417 #if 0 3416 #if 0
3418 pr_debug(">>>>>>>>>>>>>>>>>>>> Generated ord: %d from" 3417 pr_debug(">>>>>>>>>>>>>>>>>>>> Generated ord: %d from"
3419 " thread_id: %d\n", ord, ts->thread_id); 3418 " thread_id: %d\n", ord, ts->thread_id);
3420 #endif 3419 #endif
3421 for_each_online_cpu(cpu) { 3420 for_each_online_cpu(cpu) {
3422 if (ord-- == 0) { 3421 if (ord-- == 0) {
3423 cpumask_set_cpu(cpu, conn->conn_cpumask); 3422 cpumask_set_cpu(cpu, conn->conn_cpumask);
3424 return; 3423 return;
3425 } 3424 }
3426 } 3425 }
3427 /* 3426 /*
3428 * This should never be reached.. 3427 * This should never be reached..
3429 */ 3428 */
3430 dump_stack(); 3429 dump_stack();
3431 cpumask_setall(conn->conn_cpumask); 3430 cpumask_setall(conn->conn_cpumask);
3432 } 3431 }
3433 3432
3434 static inline void iscsit_thread_check_cpumask( 3433 static inline void iscsit_thread_check_cpumask(
3435 struct iscsi_conn *conn, 3434 struct iscsi_conn *conn,
3436 struct task_struct *p, 3435 struct task_struct *p,
3437 int mode) 3436 int mode)
3438 { 3437 {
3439 char buf[128]; 3438 char buf[128];
3440 /* 3439 /*
3441 * mode == 1 signals iscsi_target_tx_thread() usage. 3440 * mode == 1 signals iscsi_target_tx_thread() usage.
3442 * mode == 0 signals iscsi_target_rx_thread() usage. 3441 * mode == 0 signals iscsi_target_rx_thread() usage.
3443 */ 3442 */
3444 if (mode == 1) { 3443 if (mode == 1) {
3445 if (!conn->conn_tx_reset_cpumask) 3444 if (!conn->conn_tx_reset_cpumask)
3446 return; 3445 return;
3447 conn->conn_tx_reset_cpumask = 0; 3446 conn->conn_tx_reset_cpumask = 0;
3448 } else { 3447 } else {
3449 if (!conn->conn_rx_reset_cpumask) 3448 if (!conn->conn_rx_reset_cpumask)
3450 return; 3449 return;
3451 conn->conn_rx_reset_cpumask = 0; 3450 conn->conn_rx_reset_cpumask = 0;
3452 } 3451 }
3453 /* 3452 /*
3454 * Update the CPU mask for this single kthread so that 3453 * Update the CPU mask for this single kthread so that
3455 * both TX and RX kthreads are scheduled to run on the 3454 * both TX and RX kthreads are scheduled to run on the
3456 * same CPU. 3455 * same CPU.
3457 */ 3456 */
3458 memset(buf, 0, 128); 3457 memset(buf, 0, 128);
3459 cpumask_scnprintf(buf, 128, conn->conn_cpumask); 3458 cpumask_scnprintf(buf, 128, conn->conn_cpumask);
3460 #if 0 3459 #if 0
3461 pr_debug(">>>>>>>>>>>>>> Calling set_cpus_allowed_ptr():" 3460 pr_debug(">>>>>>>>>>>>>> Calling set_cpus_allowed_ptr():"
3462 " %s for %s\n", buf, p->comm); 3461 " %s for %s\n", buf, p->comm);
3463 #endif 3462 #endif
3464 set_cpus_allowed_ptr(p, conn->conn_cpumask); 3463 set_cpus_allowed_ptr(p, conn->conn_cpumask);
3465 } 3464 }
3466 3465
3467 #else 3466 #else
3468 3467
3469 void iscsit_thread_get_cpumask(struct iscsi_conn *conn) 3468 void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
3470 { 3469 {
3471 return; 3470 return;
3472 } 3471 }
3473 3472
3474 #define iscsit_thread_check_cpumask(X, Y, Z) ({}) 3473 #define iscsit_thread_check_cpumask(X, Y, Z) ({})
3475 #endif /* CONFIG_SMP */ 3474 #endif /* CONFIG_SMP */
3476 3475
3477 int iscsi_target_tx_thread(void *arg) 3476 int iscsi_target_tx_thread(void *arg)
3478 { 3477 {
3479 u8 state; 3478 u8 state;
3480 int eodr = 0; 3479 int eodr = 0;
3481 int ret = 0; 3480 int ret = 0;
3482 int sent_status = 0; 3481 int sent_status = 0;
3483 int use_misc = 0; 3482 int use_misc = 0;
3484 int map_sg = 0; 3483 int map_sg = 0;
3485 struct iscsi_cmd *cmd = NULL; 3484 struct iscsi_cmd *cmd = NULL;
3486 struct iscsi_conn *conn; 3485 struct iscsi_conn *conn;
3487 struct iscsi_queue_req *qr = NULL; 3486 struct iscsi_queue_req *qr = NULL;
3488 struct se_cmd *se_cmd; 3487 struct se_cmd *se_cmd;
3489 struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg; 3488 struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
3490 /* 3489 /*
3491 * Allow ourselves to be interrupted by SIGINT so that a 3490 * Allow ourselves to be interrupted by SIGINT so that a
3492 * connection recovery / failure event can be triggered externally. 3491 * connection recovery / failure event can be triggered externally.
3493 */ 3492 */
3494 allow_signal(SIGINT); 3493 allow_signal(SIGINT);
3495 3494
3496 restart: 3495 restart:
3497 conn = iscsi_tx_thread_pre_handler(ts); 3496 conn = iscsi_tx_thread_pre_handler(ts);
3498 if (!conn) 3497 if (!conn)
3499 goto out; 3498 goto out;
3500 3499
3501 eodr = map_sg = ret = sent_status = use_misc = 0; 3500 eodr = map_sg = ret = sent_status = use_misc = 0;
3502 3501
3503 while (!kthread_should_stop()) { 3502 while (!kthread_should_stop()) {
3504 /* 3503 /*
3505 * Ensure that both TX and RX per connection kthreads 3504 * Ensure that both TX and RX per connection kthreads
3506 * are scheduled to run on the same CPU. 3505 * are scheduled to run on the same CPU.
3507 */ 3506 */
3508 iscsit_thread_check_cpumask(conn, current, 1); 3507 iscsit_thread_check_cpumask(conn, current, 1);
3509 3508
3510 schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT); 3509 schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
3511 3510
3512 if ((ts->status == ISCSI_THREAD_SET_RESET) || 3511 if ((ts->status == ISCSI_THREAD_SET_RESET) ||
3513 signal_pending(current)) 3512 signal_pending(current))
3514 goto transport_err; 3513 goto transport_err;
3515 3514
3516 get_immediate: 3515 get_immediate:
3517 qr = iscsit_get_cmd_from_immediate_queue(conn); 3516 qr = iscsit_get_cmd_from_immediate_queue(conn);
3518 if (qr) { 3517 if (qr) {
3519 atomic_set(&conn->check_immediate_queue, 0); 3518 atomic_set(&conn->check_immediate_queue, 0);
3520 cmd = qr->cmd; 3519 cmd = qr->cmd;
3521 state = qr->state; 3520 state = qr->state;
3522 kmem_cache_free(lio_qr_cache, qr); 3521 kmem_cache_free(lio_qr_cache, qr);
3523 3522
3524 spin_lock_bh(&cmd->istate_lock); 3523 spin_lock_bh(&cmd->istate_lock);
3525 switch (state) { 3524 switch (state) {
3526 case ISTATE_SEND_R2T: 3525 case ISTATE_SEND_R2T:
3527 spin_unlock_bh(&cmd->istate_lock); 3526 spin_unlock_bh(&cmd->istate_lock);
3528 ret = iscsit_send_r2t(cmd, conn); 3527 ret = iscsit_send_r2t(cmd, conn);
3529 break; 3528 break;
3530 case ISTATE_REMOVE: 3529 case ISTATE_REMOVE:
3531 spin_unlock_bh(&cmd->istate_lock); 3530 spin_unlock_bh(&cmd->istate_lock);
3532 3531
3533 if (cmd->data_direction == DMA_TO_DEVICE) 3532 if (cmd->data_direction == DMA_TO_DEVICE)
3534 iscsit_stop_dataout_timer(cmd); 3533 iscsit_stop_dataout_timer(cmd);
3535 3534
3536 spin_lock_bh(&conn->cmd_lock); 3535 spin_lock_bh(&conn->cmd_lock);
3537 list_del(&cmd->i_list); 3536 list_del(&cmd->i_list);
3538 spin_unlock_bh(&conn->cmd_lock); 3537 spin_unlock_bh(&conn->cmd_lock);
3539 3538
3540 iscsit_free_cmd(cmd); 3539 iscsit_free_cmd(cmd);
3541 goto get_immediate; 3540 goto get_immediate;
3542 case ISTATE_SEND_NOPIN_WANT_RESPONSE: 3541 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3543 spin_unlock_bh(&cmd->istate_lock); 3542 spin_unlock_bh(&cmd->istate_lock);
3544 iscsit_mod_nopin_response_timer(conn); 3543 iscsit_mod_nopin_response_timer(conn);
3545 ret = iscsit_send_unsolicited_nopin(cmd, 3544 ret = iscsit_send_unsolicited_nopin(cmd,
3546 conn, 1); 3545 conn, 1);
3547 break; 3546 break;
3548 case ISTATE_SEND_NOPIN_NO_RESPONSE: 3547 case ISTATE_SEND_NOPIN_NO_RESPONSE:
3549 spin_unlock_bh(&cmd->istate_lock); 3548 spin_unlock_bh(&cmd->istate_lock);
3550 ret = iscsit_send_unsolicited_nopin(cmd, 3549 ret = iscsit_send_unsolicited_nopin(cmd,
3551 conn, 0); 3550 conn, 0);
3552 break; 3551 break;
3553 default: 3552 default:
3554 pr_err("Unknown Opcode: 0x%02x ITT:" 3553 pr_err("Unknown Opcode: 0x%02x ITT:"
3555 " 0x%08x, i_state: %d on CID: %hu\n", 3554 " 0x%08x, i_state: %d on CID: %hu\n",
3556 cmd->iscsi_opcode, cmd->init_task_tag, state, 3555 cmd->iscsi_opcode, cmd->init_task_tag, state,
3557 conn->cid); 3556 conn->cid);
3558 spin_unlock_bh(&cmd->istate_lock); 3557 spin_unlock_bh(&cmd->istate_lock);
3559 goto transport_err; 3558 goto transport_err;
3560 } 3559 }
3561 if (ret < 0) { 3560 if (ret < 0) {
3562 conn->tx_immediate_queue = 0; 3561 conn->tx_immediate_queue = 0;
3563 goto transport_err; 3562 goto transport_err;
3564 } 3563 }
3565 3564
3566 if (iscsit_send_tx_data(cmd, conn, 1) < 0) { 3565 if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
3567 conn->tx_immediate_queue = 0; 3566 conn->tx_immediate_queue = 0;
3568 iscsit_tx_thread_wait_for_tcp(conn); 3567 iscsit_tx_thread_wait_for_tcp(conn);
3569 goto transport_err; 3568 goto transport_err;
3570 } 3569 }
3571 3570
3572 spin_lock_bh(&cmd->istate_lock); 3571 spin_lock_bh(&cmd->istate_lock);
3573 switch (state) { 3572 switch (state) {
3574 case ISTATE_SEND_R2T: 3573 case ISTATE_SEND_R2T:
3575 spin_unlock_bh(&cmd->istate_lock); 3574 spin_unlock_bh(&cmd->istate_lock);
3576 spin_lock_bh(&cmd->dataout_timeout_lock); 3575 spin_lock_bh(&cmd->dataout_timeout_lock);
3577 iscsit_start_dataout_timer(cmd, conn); 3576 iscsit_start_dataout_timer(cmd, conn);
3578 spin_unlock_bh(&cmd->dataout_timeout_lock); 3577 spin_unlock_bh(&cmd->dataout_timeout_lock);
3579 break; 3578 break;
3580 case ISTATE_SEND_NOPIN_WANT_RESPONSE: 3579 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3581 cmd->i_state = ISTATE_SENT_NOPIN_WANT_RESPONSE; 3580 cmd->i_state = ISTATE_SENT_NOPIN_WANT_RESPONSE;
3582 spin_unlock_bh(&cmd->istate_lock); 3581 spin_unlock_bh(&cmd->istate_lock);
3583 break; 3582 break;
3584 case ISTATE_SEND_NOPIN_NO_RESPONSE: 3583 case ISTATE_SEND_NOPIN_NO_RESPONSE:
3585 cmd->i_state = ISTATE_SENT_STATUS; 3584 cmd->i_state = ISTATE_SENT_STATUS;
3586 spin_unlock_bh(&cmd->istate_lock); 3585 spin_unlock_bh(&cmd->istate_lock);
3587 break; 3586 break;
3588 default: 3587 default:
3589 pr_err("Unknown Opcode: 0x%02x ITT:" 3588 pr_err("Unknown Opcode: 0x%02x ITT:"
3590 " 0x%08x, i_state: %d on CID: %hu\n", 3589 " 0x%08x, i_state: %d on CID: %hu\n",
3591 cmd->iscsi_opcode, cmd->init_task_tag, 3590 cmd->iscsi_opcode, cmd->init_task_tag,
3592 state, conn->cid); 3591 state, conn->cid);
3593 spin_unlock_bh(&cmd->istate_lock); 3592 spin_unlock_bh(&cmd->istate_lock);
3594 goto transport_err; 3593 goto transport_err;
3595 } 3594 }
3596 goto get_immediate; 3595 goto get_immediate;
3597 } else 3596 } else
3598 conn->tx_immediate_queue = 0; 3597 conn->tx_immediate_queue = 0;
3599 3598
3600 get_response: 3599 get_response:
3601 qr = iscsit_get_cmd_from_response_queue(conn); 3600 qr = iscsit_get_cmd_from_response_queue(conn);
3602 if (qr) { 3601 if (qr) {
3603 cmd = qr->cmd; 3602 cmd = qr->cmd;
3604 state = qr->state; 3603 state = qr->state;
3605 kmem_cache_free(lio_qr_cache, qr); 3604 kmem_cache_free(lio_qr_cache, qr);
3606 3605
3607 spin_lock_bh(&cmd->istate_lock); 3606 spin_lock_bh(&cmd->istate_lock);
3608 check_rsp_state: 3607 check_rsp_state:
3609 switch (state) { 3608 switch (state) {
3610 case ISTATE_SEND_DATAIN: 3609 case ISTATE_SEND_DATAIN:
3611 spin_unlock_bh(&cmd->istate_lock); 3610 spin_unlock_bh(&cmd->istate_lock);
3612 ret = iscsit_send_data_in(cmd, conn, 3611 ret = iscsit_send_data_in(cmd, conn,
3613 &eodr); 3612 &eodr);
3614 map_sg = 1; 3613 map_sg = 1;
3615 break; 3614 break;
3616 case ISTATE_SEND_STATUS: 3615 case ISTATE_SEND_STATUS:
3617 case ISTATE_SEND_STATUS_RECOVERY: 3616 case ISTATE_SEND_STATUS_RECOVERY:
3618 spin_unlock_bh(&cmd->istate_lock); 3617 spin_unlock_bh(&cmd->istate_lock);
3619 use_misc = 1; 3618 use_misc = 1;
3620 ret = iscsit_send_status(cmd, conn); 3619 ret = iscsit_send_status(cmd, conn);
3621 break; 3620 break;
3622 case ISTATE_SEND_LOGOUTRSP: 3621 case ISTATE_SEND_LOGOUTRSP:
3623 spin_unlock_bh(&cmd->istate_lock); 3622 spin_unlock_bh(&cmd->istate_lock);
3624 use_misc = 1; 3623 use_misc = 1;
3625 ret = iscsit_send_logout_response(cmd, conn); 3624 ret = iscsit_send_logout_response(cmd, conn);
3626 break; 3625 break;
3627 case ISTATE_SEND_ASYNCMSG: 3626 case ISTATE_SEND_ASYNCMSG:
3628 spin_unlock_bh(&cmd->istate_lock); 3627 spin_unlock_bh(&cmd->istate_lock);
3629 use_misc = 1; 3628 use_misc = 1;
3630 ret = iscsit_send_conn_drop_async_message( 3629 ret = iscsit_send_conn_drop_async_message(
3631 cmd, conn); 3630 cmd, conn);
3632 break; 3631 break;
3633 case ISTATE_SEND_NOPIN: 3632 case ISTATE_SEND_NOPIN:
3634 spin_unlock_bh(&cmd->istate_lock); 3633 spin_unlock_bh(&cmd->istate_lock);
3635 use_misc = 1; 3634 use_misc = 1;
3636 ret = iscsit_send_nopin_response(cmd, conn); 3635 ret = iscsit_send_nopin_response(cmd, conn);
3637 break; 3636 break;
3638 case ISTATE_SEND_REJECT: 3637 case ISTATE_SEND_REJECT:
3639 spin_unlock_bh(&cmd->istate_lock); 3638 spin_unlock_bh(&cmd->istate_lock);
3640 use_misc = 1; 3639 use_misc = 1;
3641 ret = iscsit_send_reject(cmd, conn); 3640 ret = iscsit_send_reject(cmd, conn);
3642 break; 3641 break;
3643 case ISTATE_SEND_TASKMGTRSP: 3642 case ISTATE_SEND_TASKMGTRSP:
3644 spin_unlock_bh(&cmd->istate_lock); 3643 spin_unlock_bh(&cmd->istate_lock);
3645 use_misc = 1; 3644 use_misc = 1;
3646 ret = iscsit_send_task_mgt_rsp(cmd, conn); 3645 ret = iscsit_send_task_mgt_rsp(cmd, conn);
3647 if (ret != 0) 3646 if (ret != 0)
3648 break; 3647 break;
3649 ret = iscsit_tmr_post_handler(cmd, conn); 3648 ret = iscsit_tmr_post_handler(cmd, conn);
3650 if (ret != 0) 3649 if (ret != 0)
3651 iscsit_fall_back_to_erl0(conn->sess); 3650 iscsit_fall_back_to_erl0(conn->sess);
3652 break; 3651 break;
3653 case ISTATE_SEND_TEXTRSP: 3652 case ISTATE_SEND_TEXTRSP:
3654 spin_unlock_bh(&cmd->istate_lock); 3653 spin_unlock_bh(&cmd->istate_lock);
3655 use_misc = 1; 3654 use_misc = 1;
3656 ret = iscsit_send_text_rsp(cmd, conn); 3655 ret = iscsit_send_text_rsp(cmd, conn);
3657 break; 3656 break;
3658 default: 3657 default:
3659 pr_err("Unknown Opcode: 0x%02x ITT:" 3658 pr_err("Unknown Opcode: 0x%02x ITT:"
3660 " 0x%08x, i_state: %d on CID: %hu\n", 3659 " 0x%08x, i_state: %d on CID: %hu\n",
3661 cmd->iscsi_opcode, cmd->init_task_tag, 3660 cmd->iscsi_opcode, cmd->init_task_tag,
3662 state, conn->cid); 3661 state, conn->cid);
3663 spin_unlock_bh(&cmd->istate_lock); 3662 spin_unlock_bh(&cmd->istate_lock);
3664 goto transport_err; 3663 goto transport_err;
3665 } 3664 }
3666 if (ret < 0) { 3665 if (ret < 0) {
3667 conn->tx_response_queue = 0; 3666 conn->tx_response_queue = 0;
3668 goto transport_err; 3667 goto transport_err;
3669 } 3668 }
3670 3669
3671 se_cmd = &cmd->se_cmd; 3670 se_cmd = &cmd->se_cmd;
3672 3671
3673 if (map_sg && !conn->conn_ops->IFMarker) { 3672 if (map_sg && !conn->conn_ops->IFMarker) {
3674 if (iscsit_fe_sendpage_sg(cmd, conn) < 0) { 3673 if (iscsit_fe_sendpage_sg(cmd, conn) < 0) {
3675 conn->tx_response_queue = 0; 3674 conn->tx_response_queue = 0;
3676 iscsit_tx_thread_wait_for_tcp(conn); 3675 iscsit_tx_thread_wait_for_tcp(conn);
3677 iscsit_unmap_iovec(cmd); 3676 iscsit_unmap_iovec(cmd);
3678 goto transport_err; 3677 goto transport_err;
3679 } 3678 }
3680 } else { 3679 } else {
3681 if (iscsit_send_tx_data(cmd, conn, use_misc) < 0) { 3680 if (iscsit_send_tx_data(cmd, conn, use_misc) < 0) {
3682 conn->tx_response_queue = 0; 3681 conn->tx_response_queue = 0;
3683 iscsit_tx_thread_wait_for_tcp(conn); 3682 iscsit_tx_thread_wait_for_tcp(conn);
3684 iscsit_unmap_iovec(cmd); 3683 iscsit_unmap_iovec(cmd);
3685 goto transport_err; 3684 goto transport_err;
3686 } 3685 }
3687 } 3686 }
3688 map_sg = 0; 3687 map_sg = 0;
3689 iscsit_unmap_iovec(cmd); 3688 iscsit_unmap_iovec(cmd);
3690 3689
3691 spin_lock_bh(&cmd->istate_lock); 3690 spin_lock_bh(&cmd->istate_lock);
3692 switch (state) { 3691 switch (state) {
3693 case ISTATE_SEND_DATAIN: 3692 case ISTATE_SEND_DATAIN:
3694 if (!eodr) 3693 if (!eodr)
3695 goto check_rsp_state; 3694 goto check_rsp_state;
3696 3695
3697 if (eodr == 1) { 3696 if (eodr == 1) {
3698 cmd->i_state = ISTATE_SENT_LAST_DATAIN; 3697 cmd->i_state = ISTATE_SENT_LAST_DATAIN;
3699 sent_status = 1; 3698 sent_status = 1;
3700 eodr = use_misc = 0; 3699 eodr = use_misc = 0;
3701 } else if (eodr == 2) { 3700 } else if (eodr == 2) {
3702 cmd->i_state = state = 3701 cmd->i_state = state =
3703 ISTATE_SEND_STATUS; 3702 ISTATE_SEND_STATUS;
3704 sent_status = 0; 3703 sent_status = 0;
3705 eodr = use_misc = 0; 3704 eodr = use_misc = 0;
3706 goto check_rsp_state; 3705 goto check_rsp_state;
3707 } 3706 }
3708 break; 3707 break;
3709 case ISTATE_SEND_STATUS: 3708 case ISTATE_SEND_STATUS:
3710 use_misc = 0; 3709 use_misc = 0;
3711 sent_status = 1; 3710 sent_status = 1;
3712 break; 3711 break;
3713 case ISTATE_SEND_ASYNCMSG: 3712 case ISTATE_SEND_ASYNCMSG:
3714 case ISTATE_SEND_NOPIN: 3713 case ISTATE_SEND_NOPIN:
3715 case ISTATE_SEND_STATUS_RECOVERY: 3714 case ISTATE_SEND_STATUS_RECOVERY:
3716 case ISTATE_SEND_TEXTRSP: 3715 case ISTATE_SEND_TEXTRSP:
3717 use_misc = 0; 3716 use_misc = 0;
3718 sent_status = 1; 3717 sent_status = 1;
3719 break; 3718 break;
3720 case ISTATE_SEND_REJECT: 3719 case ISTATE_SEND_REJECT:
3721 use_misc = 0; 3720 use_misc = 0;
3722 if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) { 3721 if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) {
3723 cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN; 3722 cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN;
3724 spin_unlock_bh(&cmd->istate_lock); 3723 spin_unlock_bh(&cmd->istate_lock);
3725 complete(&cmd->reject_comp); 3724 complete(&cmd->reject_comp);
3726 goto transport_err; 3725 goto transport_err;
3727 } 3726 }
3728 complete(&cmd->reject_comp); 3727 complete(&cmd->reject_comp);
3729 break; 3728 break;
3730 case ISTATE_SEND_TASKMGTRSP: 3729 case ISTATE_SEND_TASKMGTRSP:
3731 use_misc = 0; 3730 use_misc = 0;
3732 sent_status = 1; 3731 sent_status = 1;
3733 break; 3732 break;
3734 case ISTATE_SEND_LOGOUTRSP: 3733 case ISTATE_SEND_LOGOUTRSP:
3735 spin_unlock_bh(&cmd->istate_lock); 3734 spin_unlock_bh(&cmd->istate_lock);
3736 if (!iscsit_logout_post_handler(cmd, conn)) 3735 if (!iscsit_logout_post_handler(cmd, conn))
3737 goto restart; 3736 goto restart;
3738 spin_lock_bh(&cmd->istate_lock); 3737 spin_lock_bh(&cmd->istate_lock);
3739 use_misc = 0; 3738 use_misc = 0;
3740 sent_status = 1; 3739 sent_status = 1;
3741 break; 3740 break;
3742 default: 3741 default:
3743 pr_err("Unknown Opcode: 0x%02x ITT:" 3742 pr_err("Unknown Opcode: 0x%02x ITT:"
3744 " 0x%08x, i_state: %d on CID: %hu\n", 3743 " 0x%08x, i_state: %d on CID: %hu\n",
3745 cmd->iscsi_opcode, cmd->init_task_tag, 3744 cmd->iscsi_opcode, cmd->init_task_tag,
3746 cmd->i_state, conn->cid); 3745 cmd->i_state, conn->cid);
3747 spin_unlock_bh(&cmd->istate_lock); 3746 spin_unlock_bh(&cmd->istate_lock);
3748 goto transport_err; 3747 goto transport_err;
3749 } 3748 }
3750 3749
3751 if (sent_status) { 3750 if (sent_status) {
3752 cmd->i_state = ISTATE_SENT_STATUS; 3751 cmd->i_state = ISTATE_SENT_STATUS;
3753 sent_status = 0; 3752 sent_status = 0;
3754 } 3753 }
3755 spin_unlock_bh(&cmd->istate_lock); 3754 spin_unlock_bh(&cmd->istate_lock);
3756 3755
3757 if (atomic_read(&conn->check_immediate_queue)) 3756 if (atomic_read(&conn->check_immediate_queue))
3758 goto get_immediate; 3757 goto get_immediate;
3759 3758
3760 goto get_response; 3759 goto get_response;
3761 } else 3760 } else
3762 conn->tx_response_queue = 0; 3761 conn->tx_response_queue = 0;
3763 } 3762 }
3764 3763
3765 transport_err: 3764 transport_err:
3766 iscsit_take_action_for_connection_exit(conn); 3765 iscsit_take_action_for_connection_exit(conn);
3767 goto restart; 3766 goto restart;
3768 out: 3767 out:
3769 return 0; 3768 return 0;
3770 } 3769 }
3771 3770
3772 int iscsi_target_rx_thread(void *arg) 3771 int iscsi_target_rx_thread(void *arg)
3773 { 3772 {
3774 int ret; 3773 int ret;
3775 u8 buffer[ISCSI_HDR_LEN], opcode; 3774 u8 buffer[ISCSI_HDR_LEN], opcode;
3776 u32 checksum = 0, digest = 0; 3775 u32 checksum = 0, digest = 0;
3777 struct iscsi_conn *conn = NULL; 3776 struct iscsi_conn *conn = NULL;
3778 struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg; 3777 struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
3779 struct kvec iov; 3778 struct kvec iov;
3780 /* 3779 /*
3781 * Allow ourselves to be interrupted by SIGINT so that a 3780 * Allow ourselves to be interrupted by SIGINT so that a
3782 * connection recovery / failure event can be triggered externally. 3781 * connection recovery / failure event can be triggered externally.
3783 */ 3782 */
3784 allow_signal(SIGINT); 3783 allow_signal(SIGINT);
3785 3784
3786 restart: 3785 restart:
3787 conn = iscsi_rx_thread_pre_handler(ts); 3786 conn = iscsi_rx_thread_pre_handler(ts);
3788 if (!conn) 3787 if (!conn)
3789 goto out; 3788 goto out;
3790 3789
3791 while (!kthread_should_stop()) { 3790 while (!kthread_should_stop()) {
3792 /* 3791 /*
3793 * Ensure that both TX and RX per connection kthreads 3792 * Ensure that both TX and RX per connection kthreads
3794 * are scheduled to run on the same CPU. 3793 * are scheduled to run on the same CPU.
3795 */ 3794 */
3796 iscsit_thread_check_cpumask(conn, current, 0); 3795 iscsit_thread_check_cpumask(conn, current, 0);
3797 3796
3798 memset(buffer, 0, ISCSI_HDR_LEN); 3797 memset(buffer, 0, ISCSI_HDR_LEN);
3799 memset(&iov, 0, sizeof(struct kvec)); 3798 memset(&iov, 0, sizeof(struct kvec));
3800 3799
3801 iov.iov_base = buffer; 3800 iov.iov_base = buffer;
3802 iov.iov_len = ISCSI_HDR_LEN; 3801 iov.iov_len = ISCSI_HDR_LEN;
3803 3802
3804 ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN); 3803 ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
3805 if (ret != ISCSI_HDR_LEN) { 3804 if (ret != ISCSI_HDR_LEN) {
3806 iscsit_rx_thread_wait_for_tcp(conn); 3805 iscsit_rx_thread_wait_for_tcp(conn);
3807 goto transport_err; 3806 goto transport_err;
3808 } 3807 }
3809 3808
3810 /* 3809 /*
3811 * Set conn->bad_hdr for use with REJECT PDUs. 3810 * Set conn->bad_hdr for use with REJECT PDUs.
3812 */ 3811 */
3813 memcpy(&conn->bad_hdr, &buffer, ISCSI_HDR_LEN); 3812 memcpy(&conn->bad_hdr, &buffer, ISCSI_HDR_LEN);
3814 3813
3815 if (conn->conn_ops->HeaderDigest) { 3814 if (conn->conn_ops->HeaderDigest) {
3816 iov.iov_base = &digest; 3815 iov.iov_base = &digest;
3817 iov.iov_len = ISCSI_CRC_LEN; 3816 iov.iov_len = ISCSI_CRC_LEN;
3818 3817
3819 ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN); 3818 ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
3820 if (ret != ISCSI_CRC_LEN) { 3819 if (ret != ISCSI_CRC_LEN) {
3821 iscsit_rx_thread_wait_for_tcp(conn); 3820 iscsit_rx_thread_wait_for_tcp(conn);
3822 goto transport_err; 3821 goto transport_err;
3823 } 3822 }
3824 3823
3825 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash, 3824 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
3826 buffer, ISCSI_HDR_LEN, 3825 buffer, ISCSI_HDR_LEN,
3827 0, NULL, (u8 *)&checksum); 3826 0, NULL, (u8 *)&checksum);
3828 3827
3829 if (digest != checksum) { 3828 if (digest != checksum) {
3830 pr_err("HeaderDigest CRC32C failed," 3829 pr_err("HeaderDigest CRC32C failed,"
3831 " received 0x%08x, computed 0x%08x\n", 3830 " received 0x%08x, computed 0x%08x\n",
3832 digest, checksum); 3831 digest, checksum);
3833 /* 3832 /*
3834 * Set the PDU to 0xff so it will intentionally 3833 * Set the PDU to 0xff so it will intentionally
3835 * hit default in the switch below. 3834 * hit default in the switch below.
3836 */ 3835 */
3837 memset(buffer, 0xff, ISCSI_HDR_LEN); 3836 memset(buffer, 0xff, ISCSI_HDR_LEN);
3838 spin_lock_bh(&conn->sess->session_stats_lock); 3837 spin_lock_bh(&conn->sess->session_stats_lock);
3839 conn->sess->conn_digest_errors++; 3838 conn->sess->conn_digest_errors++;
3840 spin_unlock_bh(&conn->sess->session_stats_lock); 3839 spin_unlock_bh(&conn->sess->session_stats_lock);
3841 } else { 3840 } else {
3842 pr_debug("Got HeaderDigest CRC32C" 3841 pr_debug("Got HeaderDigest CRC32C"
3843 " 0x%08x\n", checksum); 3842 " 0x%08x\n", checksum);
3844 } 3843 }
3845 } 3844 }
3846 3845
3847 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) 3846 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
3848 goto transport_err; 3847 goto transport_err;
3849 3848
3850 opcode = buffer[0] & ISCSI_OPCODE_MASK; 3849 opcode = buffer[0] & ISCSI_OPCODE_MASK;
3851 3850
3852 if (conn->sess->sess_ops->SessionType && 3851 if (conn->sess->sess_ops->SessionType &&
3853 ((!(opcode & ISCSI_OP_TEXT)) || 3852 ((!(opcode & ISCSI_OP_TEXT)) ||
3854 (!(opcode & ISCSI_OP_LOGOUT)))) { 3853 (!(opcode & ISCSI_OP_LOGOUT)))) {
3855 pr_err("Received illegal iSCSI Opcode: 0x%02x" 3854 pr_err("Received illegal iSCSI Opcode: 0x%02x"
3856 " while in Discovery Session, rejecting.\n", opcode); 3855 " while in Discovery Session, rejecting.\n", opcode);
3857 iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 3856 iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
3858 buffer, conn); 3857 buffer, conn);
3859 goto transport_err; 3858 goto transport_err;
3860 } 3859 }
3861 3860
3862 switch (opcode) { 3861 switch (opcode) {
3863 case ISCSI_OP_SCSI_CMD: 3862 case ISCSI_OP_SCSI_CMD:
3864 if (iscsit_handle_scsi_cmd(conn, buffer) < 0) 3863 if (iscsit_handle_scsi_cmd(conn, buffer) < 0)
3865 goto transport_err; 3864 goto transport_err;
3866 break; 3865 break;
3867 case ISCSI_OP_SCSI_DATA_OUT: 3866 case ISCSI_OP_SCSI_DATA_OUT:
3868 if (iscsit_handle_data_out(conn, buffer) < 0) 3867 if (iscsit_handle_data_out(conn, buffer) < 0)
3869 goto transport_err; 3868 goto transport_err;
3870 break; 3869 break;
3871 case ISCSI_OP_NOOP_OUT: 3870 case ISCSI_OP_NOOP_OUT:
3872 if (iscsit_handle_nop_out(conn, buffer) < 0) 3871 if (iscsit_handle_nop_out(conn, buffer) < 0)
3873 goto transport_err; 3872 goto transport_err;
3874 break; 3873 break;
3875 case ISCSI_OP_SCSI_TMFUNC: 3874 case ISCSI_OP_SCSI_TMFUNC:
3876 if (iscsit_handle_task_mgt_cmd(conn, buffer) < 0) 3875 if (iscsit_handle_task_mgt_cmd(conn, buffer) < 0)
3877 goto transport_err; 3876 goto transport_err;
3878 break; 3877 break;
3879 case ISCSI_OP_TEXT: 3878 case ISCSI_OP_TEXT:
3880 if (iscsit_handle_text_cmd(conn, buffer) < 0) 3879 if (iscsit_handle_text_cmd(conn, buffer) < 0)
3881 goto transport_err; 3880 goto transport_err;
3882 break; 3881 break;
3883 case ISCSI_OP_LOGOUT: 3882 case ISCSI_OP_LOGOUT:
3884 ret = iscsit_handle_logout_cmd(conn, buffer); 3883 ret = iscsit_handle_logout_cmd(conn, buffer);
3885 if (ret > 0) { 3884 if (ret > 0) {
3886 wait_for_completion_timeout(&conn->conn_logout_comp, 3885 wait_for_completion_timeout(&conn->conn_logout_comp,
3887 SECONDS_FOR_LOGOUT_COMP * HZ); 3886 SECONDS_FOR_LOGOUT_COMP * HZ);
3888 goto transport_err; 3887 goto transport_err;
3889 } else if (ret < 0) 3888 } else if (ret < 0)
3890 goto transport_err; 3889 goto transport_err;
3891 break; 3890 break;
3892 case ISCSI_OP_SNACK: 3891 case ISCSI_OP_SNACK:
3893 if (iscsit_handle_snack(conn, buffer) < 0) 3892 if (iscsit_handle_snack(conn, buffer) < 0)
3894 goto transport_err; 3893 goto transport_err;
3895 break; 3894 break;
3896 default: 3895 default:
3897 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", 3896 pr_err("Got unknown iSCSI OpCode: 0x%02x\n",
3898 opcode); 3897 opcode);
3899 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 3898 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
3900 pr_err("Cannot recover from unknown" 3899 pr_err("Cannot recover from unknown"
3901 " opcode while ERL=0, closing iSCSI connection" 3900 " opcode while ERL=0, closing iSCSI connection"
3902 ".\n"); 3901 ".\n");
3903 goto transport_err; 3902 goto transport_err;
3904 } 3903 }
3905 if (!conn->conn_ops->OFMarker) { 3904 if (!conn->conn_ops->OFMarker) {
3906 pr_err("Unable to recover from unknown" 3905 pr_err("Unable to recover from unknown"
3907 " opcode while OFMarker=No, closing iSCSI" 3906 " opcode while OFMarker=No, closing iSCSI"
3908 " connection.\n"); 3907 " connection.\n");
3909 goto transport_err; 3908 goto transport_err;
3910 } 3909 }
3911 if (iscsit_recover_from_unknown_opcode(conn) < 0) { 3910 if (iscsit_recover_from_unknown_opcode(conn) < 0) {
3912 pr_err("Unable to recover from unknown" 3911 pr_err("Unable to recover from unknown"
3913 " opcode, closing iSCSI connection.\n"); 3912 " opcode, closing iSCSI connection.\n");
3914 goto transport_err; 3913 goto transport_err;
3915 } 3914 }
3916 break; 3915 break;
3917 } 3916 }
3918 } 3917 }
3919 3918
3920 transport_err: 3919 transport_err:
3921 if (!signal_pending(current)) 3920 if (!signal_pending(current))
3922 atomic_set(&conn->transport_failed, 1); 3921 atomic_set(&conn->transport_failed, 1);
3923 iscsit_take_action_for_connection_exit(conn); 3922 iscsit_take_action_for_connection_exit(conn);
3924 goto restart; 3923 goto restart;
3925 out: 3924 out:
3926 return 0; 3925 return 0;
3927 } 3926 }
3928 3927
3929 static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) 3928 static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
3930 { 3929 {
3931 struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL; 3930 struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
3932 struct iscsi_session *sess = conn->sess; 3931 struct iscsi_session *sess = conn->sess;
3933 /* 3932 /*
3934 * We expect this function to only ever be called from either RX or TX 3933 * We expect this function to only ever be called from either RX or TX
3935 * thread context via iscsit_close_connection() once the other context 3934 * thread context via iscsit_close_connection() once the other context
3936 * has been reset -> returned sleeping pre-handler state. 3935 * has been reset -> returned sleeping pre-handler state.
3937 */ 3936 */
3938 spin_lock_bh(&conn->cmd_lock); 3937 spin_lock_bh(&conn->cmd_lock);
3939 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) { 3938 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
3940 3939
3941 list_del(&cmd->i_list); 3940 list_del(&cmd->i_list);
3942 spin_unlock_bh(&conn->cmd_lock); 3941 spin_unlock_bh(&conn->cmd_lock);
3943 3942
3944 iscsit_increment_maxcmdsn(cmd, sess); 3943 iscsit_increment_maxcmdsn(cmd, sess);
3945 3944
3946 iscsit_free_cmd(cmd); 3945 iscsit_free_cmd(cmd);
3947 3946
3948 spin_lock_bh(&conn->cmd_lock); 3947 spin_lock_bh(&conn->cmd_lock);
3949 } 3948 }
3950 spin_unlock_bh(&conn->cmd_lock); 3949 spin_unlock_bh(&conn->cmd_lock);
3951 } 3950 }
3952 3951
3953 static void iscsit_stop_timers_for_cmds( 3952 static void iscsit_stop_timers_for_cmds(
3954 struct iscsi_conn *conn) 3953 struct iscsi_conn *conn)
3955 { 3954 {
3956 struct iscsi_cmd *cmd; 3955 struct iscsi_cmd *cmd;
3957 3956
3958 spin_lock_bh(&conn->cmd_lock); 3957 spin_lock_bh(&conn->cmd_lock);
3959 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { 3958 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
3960 if (cmd->data_direction == DMA_TO_DEVICE) 3959 if (cmd->data_direction == DMA_TO_DEVICE)
3961 iscsit_stop_dataout_timer(cmd); 3960 iscsit_stop_dataout_timer(cmd);
3962 } 3961 }
3963 spin_unlock_bh(&conn->cmd_lock); 3962 spin_unlock_bh(&conn->cmd_lock);
3964 } 3963 }
3965 3964
3966 int iscsit_close_connection( 3965 int iscsit_close_connection(
3967 struct iscsi_conn *conn) 3966 struct iscsi_conn *conn)
3968 { 3967 {
3969 int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT); 3968 int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT);
3970 struct iscsi_session *sess = conn->sess; 3969 struct iscsi_session *sess = conn->sess;
3971 3970
3972 pr_debug("Closing iSCSI connection CID %hu on SID:" 3971 pr_debug("Closing iSCSI connection CID %hu on SID:"
3973 " %u\n", conn->cid, sess->sid); 3972 " %u\n", conn->cid, sess->sid);
3974 /* 3973 /*
3975 * Always up conn_logout_comp just in case the RX Thread is sleeping 3974 * Always up conn_logout_comp just in case the RX Thread is sleeping
3976 * and the logout response never got sent because the connection 3975 * and the logout response never got sent because the connection
3977 * failed. 3976 * failed.
3978 */ 3977 */
3979 complete(&conn->conn_logout_comp); 3978 complete(&conn->conn_logout_comp);
3980 3979
3981 iscsi_release_thread_set(conn); 3980 iscsi_release_thread_set(conn);
3982 3981
3983 iscsit_stop_timers_for_cmds(conn); 3982 iscsit_stop_timers_for_cmds(conn);
3984 iscsit_stop_nopin_response_timer(conn); 3983 iscsit_stop_nopin_response_timer(conn);
3985 iscsit_stop_nopin_timer(conn); 3984 iscsit_stop_nopin_timer(conn);
3986 iscsit_free_queue_reqs_for_conn(conn); 3985 iscsit_free_queue_reqs_for_conn(conn);
3987 3986
3988 /* 3987 /*
3989 * During Connection recovery drop unacknowledged out of order 3988 * During Connection recovery drop unacknowledged out of order
3990 * commands for this connection, and prepare the other commands 3989 * commands for this connection, and prepare the other commands
3991 * for realligence. 3990 * for realligence.
3992 * 3991 *
3993 * During normal operation clear the out of order commands (but 3992 * During normal operation clear the out of order commands (but
3994 * do not free the struct iscsi_ooo_cmdsn's) and release all 3993 * do not free the struct iscsi_ooo_cmdsn's) and release all
3995 * struct iscsi_cmds. 3994 * struct iscsi_cmds.
3996 */ 3995 */
3997 if (atomic_read(&conn->connection_recovery)) { 3996 if (atomic_read(&conn->connection_recovery)) {
3998 iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn); 3997 iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn);
3999 iscsit_prepare_cmds_for_realligance(conn); 3998 iscsit_prepare_cmds_for_realligance(conn);
4000 } else { 3999 } else {
4001 iscsit_clear_ooo_cmdsns_for_conn(conn); 4000 iscsit_clear_ooo_cmdsns_for_conn(conn);
4002 iscsit_release_commands_from_conn(conn); 4001 iscsit_release_commands_from_conn(conn);
4003 } 4002 }
4004 4003
4005 /* 4004 /*
4006 * Handle decrementing session or connection usage count if 4005 * Handle decrementing session or connection usage count if
4007 * a logout response was not able to be sent because the 4006 * a logout response was not able to be sent because the
4008 * connection failed. Fall back to Session Recovery here. 4007 * connection failed. Fall back to Session Recovery here.
4009 */ 4008 */
4010 if (atomic_read(&conn->conn_logout_remove)) { 4009 if (atomic_read(&conn->conn_logout_remove)) {
4011 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) { 4010 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) {
4012 iscsit_dec_conn_usage_count(conn); 4011 iscsit_dec_conn_usage_count(conn);
4013 iscsit_dec_session_usage_count(sess); 4012 iscsit_dec_session_usage_count(sess);
4014 } 4013 }
4015 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) 4014 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION)
4016 iscsit_dec_conn_usage_count(conn); 4015 iscsit_dec_conn_usage_count(conn);
4017 4016
4018 atomic_set(&conn->conn_logout_remove, 0); 4017 atomic_set(&conn->conn_logout_remove, 0);
4019 atomic_set(&sess->session_reinstatement, 0); 4018 atomic_set(&sess->session_reinstatement, 0);
4020 atomic_set(&sess->session_fall_back_to_erl0, 1); 4019 atomic_set(&sess->session_fall_back_to_erl0, 1);
4021 } 4020 }
4022 4021
4023 spin_lock_bh(&sess->conn_lock); 4022 spin_lock_bh(&sess->conn_lock);
4024 list_del(&conn->conn_list); 4023 list_del(&conn->conn_list);
4025 4024
4026 /* 4025 /*
4027 * Attempt to let the Initiator know this connection failed by 4026 * Attempt to let the Initiator know this connection failed by
4028 * sending an Connection Dropped Async Message on another 4027 * sending an Connection Dropped Async Message on another
4029 * active connection. 4028 * active connection.
4030 */ 4029 */
4031 if (atomic_read(&conn->connection_recovery)) 4030 if (atomic_read(&conn->connection_recovery))
4032 iscsit_build_conn_drop_async_message(conn); 4031 iscsit_build_conn_drop_async_message(conn);
4033 4032
4034 spin_unlock_bh(&sess->conn_lock); 4033 spin_unlock_bh(&sess->conn_lock);
4035 4034
4036 /* 4035 /*
4037 * If connection reinstatement is being performed on this connection, 4036 * If connection reinstatement is being performed on this connection,
4038 * up the connection reinstatement semaphore that is being blocked on 4037 * up the connection reinstatement semaphore that is being blocked on
4039 * in iscsit_cause_connection_reinstatement(). 4038 * in iscsit_cause_connection_reinstatement().
4040 */ 4039 */
4041 spin_lock_bh(&conn->state_lock); 4040 spin_lock_bh(&conn->state_lock);
4042 if (atomic_read(&conn->sleep_on_conn_wait_comp)) { 4041 if (atomic_read(&conn->sleep_on_conn_wait_comp)) {
4043 spin_unlock_bh(&conn->state_lock); 4042 spin_unlock_bh(&conn->state_lock);
4044 complete(&conn->conn_wait_comp); 4043 complete(&conn->conn_wait_comp);
4045 wait_for_completion(&conn->conn_post_wait_comp); 4044 wait_for_completion(&conn->conn_post_wait_comp);
4046 spin_lock_bh(&conn->state_lock); 4045 spin_lock_bh(&conn->state_lock);
4047 } 4046 }
4048 4047
4049 /* 4048 /*
4050 * If connection reinstatement is being performed on this connection 4049 * If connection reinstatement is being performed on this connection
4051 * by receiving a REMOVECONNFORRECOVERY logout request, up the 4050 * by receiving a REMOVECONNFORRECOVERY logout request, up the
4052 * connection wait rcfr semaphore that is being blocked on 4051 * connection wait rcfr semaphore that is being blocked on
4053 * an iscsit_connection_reinstatement_rcfr(). 4052 * an iscsit_connection_reinstatement_rcfr().
4054 */ 4053 */
4055 if (atomic_read(&conn->connection_wait_rcfr)) { 4054 if (atomic_read(&conn->connection_wait_rcfr)) {
4056 spin_unlock_bh(&conn->state_lock); 4055 spin_unlock_bh(&conn->state_lock);
4057 complete(&conn->conn_wait_rcfr_comp); 4056 complete(&conn->conn_wait_rcfr_comp);
4058 wait_for_completion(&conn->conn_post_wait_comp); 4057 wait_for_completion(&conn->conn_post_wait_comp);
4059 spin_lock_bh(&conn->state_lock); 4058 spin_lock_bh(&conn->state_lock);
4060 } 4059 }
4061 atomic_set(&conn->connection_reinstatement, 1); 4060 atomic_set(&conn->connection_reinstatement, 1);
4062 spin_unlock_bh(&conn->state_lock); 4061 spin_unlock_bh(&conn->state_lock);
4063 4062
4064 /* 4063 /*
4065 * If any other processes are accessing this connection pointer we 4064 * If any other processes are accessing this connection pointer we
4066 * must wait until they have completed. 4065 * must wait until they have completed.
4067 */ 4066 */
4068 iscsit_check_conn_usage_count(conn); 4067 iscsit_check_conn_usage_count(conn);
4069 4068
4070 if (conn->conn_rx_hash.tfm) 4069 if (conn->conn_rx_hash.tfm)
4071 crypto_free_hash(conn->conn_rx_hash.tfm); 4070 crypto_free_hash(conn->conn_rx_hash.tfm);
4072 if (conn->conn_tx_hash.tfm) 4071 if (conn->conn_tx_hash.tfm)
4073 crypto_free_hash(conn->conn_tx_hash.tfm); 4072 crypto_free_hash(conn->conn_tx_hash.tfm);
4074 4073
4075 if (conn->conn_cpumask) 4074 if (conn->conn_cpumask)
4076 free_cpumask_var(conn->conn_cpumask); 4075 free_cpumask_var(conn->conn_cpumask);
4077 4076
4078 kfree(conn->conn_ops); 4077 kfree(conn->conn_ops);
4079 conn->conn_ops = NULL; 4078 conn->conn_ops = NULL;
4080 4079
4081 if (conn->sock) { 4080 if (conn->sock) {
4082 if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) { 4081 if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
4083 kfree(conn->sock->file); 4082 kfree(conn->sock->file);
4084 conn->sock->file = NULL; 4083 conn->sock->file = NULL;
4085 } 4084 }
4086 sock_release(conn->sock); 4085 sock_release(conn->sock);
4087 } 4086 }
4088 conn->thread_set = NULL; 4087 conn->thread_set = NULL;
4089 4088
4090 pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); 4089 pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
4091 conn->conn_state = TARG_CONN_STATE_FREE; 4090 conn->conn_state = TARG_CONN_STATE_FREE;
4092 kfree(conn); 4091 kfree(conn);
4093 4092
4094 spin_lock_bh(&sess->conn_lock); 4093 spin_lock_bh(&sess->conn_lock);
4095 atomic_dec(&sess->nconn); 4094 atomic_dec(&sess->nconn);
4096 pr_debug("Decremented iSCSI connection count to %hu from node:" 4095 pr_debug("Decremented iSCSI connection count to %hu from node:"
4097 " %s\n", atomic_read(&sess->nconn), 4096 " %s\n", atomic_read(&sess->nconn),
4098 sess->sess_ops->InitiatorName); 4097 sess->sess_ops->InitiatorName);
4099 /* 4098 /*
4100 * Make sure that if one connection fails in an non ERL=2 iSCSI 4099 * Make sure that if one connection fails in an non ERL=2 iSCSI
4101 * Session that they all fail. 4100 * Session that they all fail.
4102 */ 4101 */
4103 if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout && 4102 if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout &&
4104 !atomic_read(&sess->session_logout)) 4103 !atomic_read(&sess->session_logout))
4105 atomic_set(&sess->session_fall_back_to_erl0, 1); 4104 atomic_set(&sess->session_fall_back_to_erl0, 1);
4106 4105
4107 /* 4106 /*
4108 * If this was not the last connection in the session, and we are 4107 * If this was not the last connection in the session, and we are
4109 * performing session reinstatement or falling back to ERL=0, call 4108 * performing session reinstatement or falling back to ERL=0, call
4110 * iscsit_stop_session() without sleeping to shutdown the other 4109 * iscsit_stop_session() without sleeping to shutdown the other
4111 * active connections. 4110 * active connections.
4112 */ 4111 */
4113 if (atomic_read(&sess->nconn)) { 4112 if (atomic_read(&sess->nconn)) {
4114 if (!atomic_read(&sess->session_reinstatement) && 4113 if (!atomic_read(&sess->session_reinstatement) &&
4115 !atomic_read(&sess->session_fall_back_to_erl0)) { 4114 !atomic_read(&sess->session_fall_back_to_erl0)) {
4116 spin_unlock_bh(&sess->conn_lock); 4115 spin_unlock_bh(&sess->conn_lock);
4117 return 0; 4116 return 0;
4118 } 4117 }
4119 if (!atomic_read(&sess->session_stop_active)) { 4118 if (!atomic_read(&sess->session_stop_active)) {
4120 atomic_set(&sess->session_stop_active, 1); 4119 atomic_set(&sess->session_stop_active, 1);
4121 spin_unlock_bh(&sess->conn_lock); 4120 spin_unlock_bh(&sess->conn_lock);
4122 iscsit_stop_session(sess, 0, 0); 4121 iscsit_stop_session(sess, 0, 0);
4123 return 0; 4122 return 0;
4124 } 4123 }
4125 spin_unlock_bh(&sess->conn_lock); 4124 spin_unlock_bh(&sess->conn_lock);
4126 return 0; 4125 return 0;
4127 } 4126 }
4128 4127
4129 /* 4128 /*
4130 * If this was the last connection in the session and one of the 4129 * If this was the last connection in the session and one of the
4131 * following is occurring: 4130 * following is occurring:
4132 * 4131 *
4133 * Session Reinstatement is not being performed, and are falling back 4132 * Session Reinstatement is not being performed, and are falling back
4134 * to ERL=0 call iscsit_close_session(). 4133 * to ERL=0 call iscsit_close_session().
4135 * 4134 *
4136 * Session Logout was requested. iscsit_close_session() will be called 4135 * Session Logout was requested. iscsit_close_session() will be called
4137 * elsewhere. 4136 * elsewhere.
4138 * 4137 *
4139 * Session Continuation is not being performed, start the Time2Retain 4138 * Session Continuation is not being performed, start the Time2Retain
4140 * handler and check if sleep_on_sess_wait_sem is active. 4139 * handler and check if sleep_on_sess_wait_sem is active.
4141 */ 4140 */
4142 if (!atomic_read(&sess->session_reinstatement) && 4141 if (!atomic_read(&sess->session_reinstatement) &&
4143 atomic_read(&sess->session_fall_back_to_erl0)) { 4142 atomic_read(&sess->session_fall_back_to_erl0)) {
4144 spin_unlock_bh(&sess->conn_lock); 4143 spin_unlock_bh(&sess->conn_lock);
4145 iscsit_close_session(sess); 4144 iscsit_close_session(sess);
4146 4145
4147 return 0; 4146 return 0;
4148 } else if (atomic_read(&sess->session_logout)) { 4147 } else if (atomic_read(&sess->session_logout)) {
4149 pr_debug("Moving to TARG_SESS_STATE_FREE.\n"); 4148 pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
4150 sess->session_state = TARG_SESS_STATE_FREE; 4149 sess->session_state = TARG_SESS_STATE_FREE;
4151 spin_unlock_bh(&sess->conn_lock); 4150 spin_unlock_bh(&sess->conn_lock);
4152 4151
4153 if (atomic_read(&sess->sleep_on_sess_wait_comp)) 4152 if (atomic_read(&sess->sleep_on_sess_wait_comp))
4154 complete(&sess->session_wait_comp); 4153 complete(&sess->session_wait_comp);
4155 4154
4156 return 0; 4155 return 0;
4157 } else { 4156 } else {
4158 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n"); 4157 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
4159 sess->session_state = TARG_SESS_STATE_FAILED; 4158 sess->session_state = TARG_SESS_STATE_FAILED;
4160 4159
4161 if (!atomic_read(&sess->session_continuation)) { 4160 if (!atomic_read(&sess->session_continuation)) {
4162 spin_unlock_bh(&sess->conn_lock); 4161 spin_unlock_bh(&sess->conn_lock);
4163 iscsit_start_time2retain_handler(sess); 4162 iscsit_start_time2retain_handler(sess);
4164 } else 4163 } else
4165 spin_unlock_bh(&sess->conn_lock); 4164 spin_unlock_bh(&sess->conn_lock);
4166 4165
4167 if (atomic_read(&sess->sleep_on_sess_wait_comp)) 4166 if (atomic_read(&sess->sleep_on_sess_wait_comp))
4168 complete(&sess->session_wait_comp); 4167 complete(&sess->session_wait_comp);
4169 4168
4170 return 0; 4169 return 0;
4171 } 4170 }
4172 spin_unlock_bh(&sess->conn_lock); 4171 spin_unlock_bh(&sess->conn_lock);
4173 4172
4174 return 0; 4173 return 0;
4175 } 4174 }
4176 4175
4177 int iscsit_close_session(struct iscsi_session *sess) 4176 int iscsit_close_session(struct iscsi_session *sess)
4178 { 4177 {
4179 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); 4178 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
4180 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 4179 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4181 4180
4182 if (atomic_read(&sess->nconn)) { 4181 if (atomic_read(&sess->nconn)) {
4183 pr_err("%d connection(s) still exist for iSCSI session" 4182 pr_err("%d connection(s) still exist for iSCSI session"
4184 " to %s\n", atomic_read(&sess->nconn), 4183 " to %s\n", atomic_read(&sess->nconn),
4185 sess->sess_ops->InitiatorName); 4184 sess->sess_ops->InitiatorName);
4186 BUG(); 4185 BUG();
4187 } 4186 }
4188 4187
4189 spin_lock_bh(&se_tpg->session_lock); 4188 spin_lock_bh(&se_tpg->session_lock);
4190 atomic_set(&sess->session_logout, 1); 4189 atomic_set(&sess->session_logout, 1);
4191 atomic_set(&sess->session_reinstatement, 1); 4190 atomic_set(&sess->session_reinstatement, 1);
4192 iscsit_stop_time2retain_timer(sess); 4191 iscsit_stop_time2retain_timer(sess);
4193 spin_unlock_bh(&se_tpg->session_lock); 4192 spin_unlock_bh(&se_tpg->session_lock);
4194 4193
4195 /* 4194 /*
4196 * transport_deregister_session_configfs() will clear the 4195 * transport_deregister_session_configfs() will clear the
4197 * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context 4196 * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context
4198 * can be setting it again with __transport_register_session() in 4197 * can be setting it again with __transport_register_session() in
4199 * iscsi_post_login_handler() again after the iscsit_stop_session() 4198 * iscsi_post_login_handler() again after the iscsit_stop_session()
4200 * completes in iscsi_np context. 4199 * completes in iscsi_np context.
4201 */ 4200 */
4202 transport_deregister_session_configfs(sess->se_sess); 4201 transport_deregister_session_configfs(sess->se_sess);
4203 4202
4204 /* 4203 /*
4205 * If any other processes are accessing this session pointer we must 4204 * If any other processes are accessing this session pointer we must
4206 * wait until they have completed. If we are in an interrupt (the 4205 * wait until they have completed. If we are in an interrupt (the
4207 * time2retain handler) and contain and active session usage count we 4206 * time2retain handler) and contain and active session usage count we
4208 * restart the timer and exit. 4207 * restart the timer and exit.
4209 */ 4208 */
4210 if (!in_interrupt()) { 4209 if (!in_interrupt()) {
4211 if (iscsit_check_session_usage_count(sess) == 1) 4210 if (iscsit_check_session_usage_count(sess) == 1)
4212 iscsit_stop_session(sess, 1, 1); 4211 iscsit_stop_session(sess, 1, 1);
4213 } else { 4212 } else {
4214 if (iscsit_check_session_usage_count(sess) == 2) { 4213 if (iscsit_check_session_usage_count(sess) == 2) {
4215 atomic_set(&sess->session_logout, 0); 4214 atomic_set(&sess->session_logout, 0);
4216 iscsit_start_time2retain_handler(sess); 4215 iscsit_start_time2retain_handler(sess);
4217 return 0; 4216 return 0;
4218 } 4217 }
4219 } 4218 }
4220 4219
4221 transport_deregister_session(sess->se_sess); 4220 transport_deregister_session(sess->se_sess);
4222 4221
4223 if (sess->sess_ops->ErrorRecoveryLevel == 2) 4222 if (sess->sess_ops->ErrorRecoveryLevel == 2)
4224 iscsit_free_connection_recovery_entires(sess); 4223 iscsit_free_connection_recovery_entires(sess);
4225 4224
4226 iscsit_free_all_ooo_cmdsns(sess); 4225 iscsit_free_all_ooo_cmdsns(sess);
4227 4226
4228 spin_lock_bh(&se_tpg->session_lock); 4227 spin_lock_bh(&se_tpg->session_lock);
4229 pr_debug("Moving to TARG_SESS_STATE_FREE.\n"); 4228 pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
4230 sess->session_state = TARG_SESS_STATE_FREE; 4229 sess->session_state = TARG_SESS_STATE_FREE;
4231 pr_debug("Released iSCSI session from node: %s\n", 4230 pr_debug("Released iSCSI session from node: %s\n",
4232 sess->sess_ops->InitiatorName); 4231 sess->sess_ops->InitiatorName);
4233 tpg->nsessions--; 4232 tpg->nsessions--;
4234 if (tpg->tpg_tiqn) 4233 if (tpg->tpg_tiqn)
4235 tpg->tpg_tiqn->tiqn_nsessions--; 4234 tpg->tpg_tiqn->tiqn_nsessions--;
4236 4235
4237 pr_debug("Decremented number of active iSCSI Sessions on" 4236 pr_debug("Decremented number of active iSCSI Sessions on"
4238 " iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions); 4237 " iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions);
4239 4238
4240 spin_lock(&sess_idr_lock); 4239 spin_lock(&sess_idr_lock);
4241 idr_remove(&sess_idr, sess->session_index); 4240 idr_remove(&sess_idr, sess->session_index);
4242 spin_unlock(&sess_idr_lock); 4241 spin_unlock(&sess_idr_lock);
4243 4242
4244 kfree(sess->sess_ops); 4243 kfree(sess->sess_ops);
4245 sess->sess_ops = NULL; 4244 sess->sess_ops = NULL;
4246 spin_unlock_bh(&se_tpg->session_lock); 4245 spin_unlock_bh(&se_tpg->session_lock);
4247 4246
4248 kfree(sess); 4247 kfree(sess);
4249 return 0; 4248 return 0;
4250 } 4249 }
4251 4250
4252 static void iscsit_logout_post_handler_closesession( 4251 static void iscsit_logout_post_handler_closesession(
4253 struct iscsi_conn *conn) 4252 struct iscsi_conn *conn)
4254 { 4253 {
4255 struct iscsi_session *sess = conn->sess; 4254 struct iscsi_session *sess = conn->sess;
4256 4255
4257 iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD); 4256 iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
4258 iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD); 4257 iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
4259 4258
4260 atomic_set(&conn->conn_logout_remove, 0); 4259 atomic_set(&conn->conn_logout_remove, 0);
4261 complete(&conn->conn_logout_comp); 4260 complete(&conn->conn_logout_comp);
4262 4261
4263 iscsit_dec_conn_usage_count(conn); 4262 iscsit_dec_conn_usage_count(conn);
4264 iscsit_stop_session(sess, 1, 1); 4263 iscsit_stop_session(sess, 1, 1);
4265 iscsit_dec_session_usage_count(sess); 4264 iscsit_dec_session_usage_count(sess);
4266 iscsit_close_session(sess); 4265 iscsit_close_session(sess);
4267 } 4266 }
4268 4267
4269 static void iscsit_logout_post_handler_samecid( 4268 static void iscsit_logout_post_handler_samecid(
4270 struct iscsi_conn *conn) 4269 struct iscsi_conn *conn)
4271 { 4270 {
4272 iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD); 4271 iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
4273 iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD); 4272 iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
4274 4273
4275 atomic_set(&conn->conn_logout_remove, 0); 4274 atomic_set(&conn->conn_logout_remove, 0);
4276 complete(&conn->conn_logout_comp); 4275 complete(&conn->conn_logout_comp);
4277 4276
4278 iscsit_cause_connection_reinstatement(conn, 1); 4277 iscsit_cause_connection_reinstatement(conn, 1);
4279 iscsit_dec_conn_usage_count(conn); 4278 iscsit_dec_conn_usage_count(conn);
4280 } 4279 }
4281 4280
4282 static void iscsit_logout_post_handler_diffcid( 4281 static void iscsit_logout_post_handler_diffcid(
4283 struct iscsi_conn *conn, 4282 struct iscsi_conn *conn,
4284 u16 cid) 4283 u16 cid)
4285 { 4284 {
4286 struct iscsi_conn *l_conn; 4285 struct iscsi_conn *l_conn;
4287 struct iscsi_session *sess = conn->sess; 4286 struct iscsi_session *sess = conn->sess;
4288 4287
4289 if (!sess) 4288 if (!sess)
4290 return; 4289 return;
4291 4290
4292 spin_lock_bh(&sess->conn_lock); 4291 spin_lock_bh(&sess->conn_lock);
4293 list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) { 4292 list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
4294 if (l_conn->cid == cid) { 4293 if (l_conn->cid == cid) {
4295 iscsit_inc_conn_usage_count(l_conn); 4294 iscsit_inc_conn_usage_count(l_conn);
4296 break; 4295 break;
4297 } 4296 }
4298 } 4297 }
4299 spin_unlock_bh(&sess->conn_lock); 4298 spin_unlock_bh(&sess->conn_lock);
4300 4299
4301 if (!l_conn) 4300 if (!l_conn)
4302 return; 4301 return;
4303 4302
4304 if (l_conn->sock) 4303 if (l_conn->sock)
4305 l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN); 4304 l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN);
4306 4305
4307 spin_lock_bh(&l_conn->state_lock); 4306 spin_lock_bh(&l_conn->state_lock);
4308 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n"); 4307 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
4309 l_conn->conn_state = TARG_CONN_STATE_IN_LOGOUT; 4308 l_conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
4310 spin_unlock_bh(&l_conn->state_lock); 4309 spin_unlock_bh(&l_conn->state_lock);
4311 4310
4312 iscsit_cause_connection_reinstatement(l_conn, 1); 4311 iscsit_cause_connection_reinstatement(l_conn, 1);
4313 iscsit_dec_conn_usage_count(l_conn); 4312 iscsit_dec_conn_usage_count(l_conn);
4314 } 4313 }
4315 4314
4316 /* 4315 /*
4317 * Return of 0 causes the TX thread to restart. 4316 * Return of 0 causes the TX thread to restart.
4318 */ 4317 */
4319 static int iscsit_logout_post_handler( 4318 static int iscsit_logout_post_handler(
4320 struct iscsi_cmd *cmd, 4319 struct iscsi_cmd *cmd,
4321 struct iscsi_conn *conn) 4320 struct iscsi_conn *conn)
4322 { 4321 {
4323 int ret = 0; 4322 int ret = 0;
4324 4323
4325 switch (cmd->logout_reason) { 4324 switch (cmd->logout_reason) {
4326 case ISCSI_LOGOUT_REASON_CLOSE_SESSION: 4325 case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
4327 switch (cmd->logout_response) { 4326 switch (cmd->logout_response) {
4328 case ISCSI_LOGOUT_SUCCESS: 4327 case ISCSI_LOGOUT_SUCCESS:
4329 case ISCSI_LOGOUT_CLEANUP_FAILED: 4328 case ISCSI_LOGOUT_CLEANUP_FAILED:
4330 default: 4329 default:
4331 iscsit_logout_post_handler_closesession(conn); 4330 iscsit_logout_post_handler_closesession(conn);
4332 break; 4331 break;
4333 } 4332 }
4334 ret = 0; 4333 ret = 0;
4335 break; 4334 break;
4336 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION: 4335 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
4337 if (conn->cid == cmd->logout_cid) { 4336 if (conn->cid == cmd->logout_cid) {
4338 switch (cmd->logout_response) { 4337 switch (cmd->logout_response) {
4339 case ISCSI_LOGOUT_SUCCESS: 4338 case ISCSI_LOGOUT_SUCCESS:
4340 case ISCSI_LOGOUT_CLEANUP_FAILED: 4339 case ISCSI_LOGOUT_CLEANUP_FAILED:
4341 default: 4340 default:
4342 iscsit_logout_post_handler_samecid(conn); 4341 iscsit_logout_post_handler_samecid(conn);
4343 break; 4342 break;
4344 } 4343 }
4345 ret = 0; 4344 ret = 0;
4346 } else { 4345 } else {
4347 switch (cmd->logout_response) { 4346 switch (cmd->logout_response) {
4348 case ISCSI_LOGOUT_SUCCESS: 4347 case ISCSI_LOGOUT_SUCCESS:
4349 iscsit_logout_post_handler_diffcid(conn, 4348 iscsit_logout_post_handler_diffcid(conn,
4350 cmd->logout_cid); 4349 cmd->logout_cid);
4351 break; 4350 break;
4352 case ISCSI_LOGOUT_CID_NOT_FOUND: 4351 case ISCSI_LOGOUT_CID_NOT_FOUND:
4353 case ISCSI_LOGOUT_CLEANUP_FAILED: 4352 case ISCSI_LOGOUT_CLEANUP_FAILED:
4354 default: 4353 default:
4355 break; 4354 break;
4356 } 4355 }
4357 ret = 1; 4356 ret = 1;
4358 } 4357 }
4359 break; 4358 break;
4360 case ISCSI_LOGOUT_REASON_RECOVERY: 4359 case ISCSI_LOGOUT_REASON_RECOVERY:
4361 switch (cmd->logout_response) { 4360 switch (cmd->logout_response) {
4362 case ISCSI_LOGOUT_SUCCESS: 4361 case ISCSI_LOGOUT_SUCCESS:
4363 case ISCSI_LOGOUT_CID_NOT_FOUND: 4362 case ISCSI_LOGOUT_CID_NOT_FOUND:
4364 case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED: 4363 case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED:
4365 case ISCSI_LOGOUT_CLEANUP_FAILED: 4364 case ISCSI_LOGOUT_CLEANUP_FAILED:
4366 default: 4365 default:
4367 break; 4366 break;
4368 } 4367 }
4369 ret = 1; 4368 ret = 1;
4370 break; 4369 break;
4371 default: 4370 default:
4372 break; 4371 break;
4373 4372
4374 } 4373 }
4375 return ret; 4374 return ret;
4376 } 4375 }
4377 4376
4378 void iscsit_fail_session(struct iscsi_session *sess) 4377 void iscsit_fail_session(struct iscsi_session *sess)
4379 { 4378 {
4380 struct iscsi_conn *conn; 4379 struct iscsi_conn *conn;
4381 4380
4382 spin_lock_bh(&sess->conn_lock); 4381 spin_lock_bh(&sess->conn_lock);
4383 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { 4382 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
4384 pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n"); 4383 pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
4385 conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT; 4384 conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
4386 } 4385 }
4387 spin_unlock_bh(&sess->conn_lock); 4386 spin_unlock_bh(&sess->conn_lock);
4388 4387
4389 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n"); 4388 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
4390 sess->session_state = TARG_SESS_STATE_FAILED; 4389 sess->session_state = TARG_SESS_STATE_FAILED;
4391 } 4390 }
4392 4391
4393 int iscsit_free_session(struct iscsi_session *sess) 4392 int iscsit_free_session(struct iscsi_session *sess)
4394 { 4393 {
4395 u16 conn_count = atomic_read(&sess->nconn); 4394 u16 conn_count = atomic_read(&sess->nconn);
4396 struct iscsi_conn *conn, *conn_tmp = NULL; 4395 struct iscsi_conn *conn, *conn_tmp = NULL;
4397 int is_last; 4396 int is_last;
4398 4397
4399 spin_lock_bh(&sess->conn_lock); 4398 spin_lock_bh(&sess->conn_lock);
4400 atomic_set(&sess->sleep_on_sess_wait_comp, 1); 4399 atomic_set(&sess->sleep_on_sess_wait_comp, 1);
4401 4400
4402 list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list, 4401 list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
4403 conn_list) { 4402 conn_list) {
4404 if (conn_count == 0) 4403 if (conn_count == 0)
4405 break; 4404 break;
4406 4405
4407 if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) { 4406 if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
4408 is_last = 1; 4407 is_last = 1;
4409 } else { 4408 } else {
4410 iscsit_inc_conn_usage_count(conn_tmp); 4409 iscsit_inc_conn_usage_count(conn_tmp);
4411 is_last = 0; 4410 is_last = 0;
4412 } 4411 }
4413 iscsit_inc_conn_usage_count(conn); 4412 iscsit_inc_conn_usage_count(conn);
4414 4413
4415 spin_unlock_bh(&sess->conn_lock); 4414 spin_unlock_bh(&sess->conn_lock);
4416 iscsit_cause_connection_reinstatement(conn, 1); 4415 iscsit_cause_connection_reinstatement(conn, 1);
4417 spin_lock_bh(&sess->conn_lock); 4416 spin_lock_bh(&sess->conn_lock);
4418 4417
4419 iscsit_dec_conn_usage_count(conn); 4418 iscsit_dec_conn_usage_count(conn);
4420 if (is_last == 0) 4419 if (is_last == 0)
4421 iscsit_dec_conn_usage_count(conn_tmp); 4420 iscsit_dec_conn_usage_count(conn_tmp);
4422 4421
4423 conn_count--; 4422 conn_count--;
4424 } 4423 }
4425 4424
4426 if (atomic_read(&sess->nconn)) { 4425 if (atomic_read(&sess->nconn)) {
4427 spin_unlock_bh(&sess->conn_lock); 4426 spin_unlock_bh(&sess->conn_lock);
4428 wait_for_completion(&sess->session_wait_comp); 4427 wait_for_completion(&sess->session_wait_comp);
4429 } else 4428 } else
4430 spin_unlock_bh(&sess->conn_lock); 4429 spin_unlock_bh(&sess->conn_lock);
4431 4430
4432 iscsit_close_session(sess); 4431 iscsit_close_session(sess);
4433 return 0; 4432 return 0;
4434 } 4433 }
4435 4434
4436 void iscsit_stop_session( 4435 void iscsit_stop_session(
4437 struct iscsi_session *sess, 4436 struct iscsi_session *sess,
4438 int session_sleep, 4437 int session_sleep,
4439 int connection_sleep) 4438 int connection_sleep)
4440 { 4439 {
4441 u16 conn_count = atomic_read(&sess->nconn); 4440 u16 conn_count = atomic_read(&sess->nconn);
4442 struct iscsi_conn *conn, *conn_tmp = NULL; 4441 struct iscsi_conn *conn, *conn_tmp = NULL;
4443 int is_last; 4442 int is_last;
4444 4443
4445 spin_lock_bh(&sess->conn_lock); 4444 spin_lock_bh(&sess->conn_lock);
4446 if (session_sleep) 4445 if (session_sleep)
4447 atomic_set(&sess->sleep_on_sess_wait_comp, 1); 4446 atomic_set(&sess->sleep_on_sess_wait_comp, 1);
4448 4447
4449 if (connection_sleep) { 4448 if (connection_sleep) {
4450 list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list, 4449 list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
4451 conn_list) { 4450 conn_list) {
4452 if (conn_count == 0) 4451 if (conn_count == 0)
4453 break; 4452 break;
4454 4453
4455 if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) { 4454 if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
4456 is_last = 1; 4455 is_last = 1;
4457 } else { 4456 } else {
4458 iscsit_inc_conn_usage_count(conn_tmp); 4457 iscsit_inc_conn_usage_count(conn_tmp);
4459 is_last = 0; 4458 is_last = 0;
4460 } 4459 }
4461 iscsit_inc_conn_usage_count(conn); 4460 iscsit_inc_conn_usage_count(conn);
4462 4461
4463 spin_unlock_bh(&sess->conn_lock); 4462 spin_unlock_bh(&sess->conn_lock);
4464 iscsit_cause_connection_reinstatement(conn, 1); 4463 iscsit_cause_connection_reinstatement(conn, 1);
4465 spin_lock_bh(&sess->conn_lock); 4464 spin_lock_bh(&sess->conn_lock);
4466 4465
4467 iscsit_dec_conn_usage_count(conn); 4466 iscsit_dec_conn_usage_count(conn);
4468 if (is_last == 0) 4467 if (is_last == 0)
4469 iscsit_dec_conn_usage_count(conn_tmp); 4468 iscsit_dec_conn_usage_count(conn_tmp);
4470 conn_count--; 4469 conn_count--;
4471 } 4470 }
4472 } else { 4471 } else {
4473 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) 4472 list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
4474 iscsit_cause_connection_reinstatement(conn, 0); 4473 iscsit_cause_connection_reinstatement(conn, 0);
4475 } 4474 }
4476 4475
4477 if (session_sleep && atomic_read(&sess->nconn)) { 4476 if (session_sleep && atomic_read(&sess->nconn)) {
4478 spin_unlock_bh(&sess->conn_lock); 4477 spin_unlock_bh(&sess->conn_lock);
4479 wait_for_completion(&sess->session_wait_comp); 4478 wait_for_completion(&sess->session_wait_comp);
4480 } else 4479 } else
4481 spin_unlock_bh(&sess->conn_lock); 4480 spin_unlock_bh(&sess->conn_lock);
4482 } 4481 }
4483 4482
4484 int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force) 4483 int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
4485 { 4484 {
4486 struct iscsi_session *sess; 4485 struct iscsi_session *sess;
4487 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 4486 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4488 struct se_session *se_sess, *se_sess_tmp; 4487 struct se_session *se_sess, *se_sess_tmp;
4489 int session_count = 0; 4488 int session_count = 0;
4490 4489
4491 spin_lock_bh(&se_tpg->session_lock); 4490 spin_lock_bh(&se_tpg->session_lock);
4492 if (tpg->nsessions && !force) { 4491 if (tpg->nsessions && !force) {
4493 spin_unlock_bh(&se_tpg->session_lock); 4492 spin_unlock_bh(&se_tpg->session_lock);
4494 return -1; 4493 return -1;
4495 } 4494 }
4496 4495
4497 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list, 4496 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
4498 sess_list) { 4497 sess_list) {
4499 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 4498 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
4500 4499
4501 spin_lock(&sess->conn_lock); 4500 spin_lock(&sess->conn_lock);
4502 if (atomic_read(&sess->session_fall_back_to_erl0) || 4501 if (atomic_read(&sess->session_fall_back_to_erl0) ||
4503 atomic_read(&sess->session_logout) || 4502 atomic_read(&sess->session_logout) ||
4504 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { 4503 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
4505 spin_unlock(&sess->conn_lock); 4504 spin_unlock(&sess->conn_lock);
4506 continue; 4505 continue;
4507 } 4506 }
4508 atomic_set(&sess->session_reinstatement, 1); 4507 atomic_set(&sess->session_reinstatement, 1);
4509 spin_unlock(&sess->conn_lock); 4508 spin_unlock(&sess->conn_lock);
4510 spin_unlock_bh(&se_tpg->session_lock); 4509 spin_unlock_bh(&se_tpg->session_lock);
4511 4510
4512 iscsit_free_session(sess); 4511 iscsit_free_session(sess);
4513 spin_lock_bh(&se_tpg->session_lock); 4512 spin_lock_bh(&se_tpg->session_lock);
4514 4513
4515 session_count++; 4514 session_count++;
4516 } 4515 }
4517 spin_unlock_bh(&se_tpg->session_lock); 4516 spin_unlock_bh(&se_tpg->session_lock);
4518 4517
4519 pr_debug("Released %d iSCSI Session(s) from Target Portal" 4518 pr_debug("Released %d iSCSI Session(s) from Target Portal"
4520 " Group: %hu\n", session_count, tpg->tpgt); 4519 " Group: %hu\n", session_count, tpg->tpgt);
4521 return 0; 4520 return 0;
4522 } 4521 }
4523 4522
4524 MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure"); 4523 MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure");
4525 MODULE_VERSION("4.1.x"); 4524 MODULE_VERSION("4.1.x");
4526 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 4525 MODULE_AUTHOR("nab@Linux-iSCSI.org");
4527 MODULE_LICENSE("GPL"); 4526 MODULE_LICENSE("GPL");
4528 4527
4529 module_init(iscsi_target_init_module); 4528 module_init(iscsi_target_init_module);
4530 module_exit(iscsi_target_cleanup_module); 4529 module_exit(iscsi_target_cleanup_module);
4531 4530
drivers/target/iscsi/iscsi_target_configfs.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * This file contains the configfs implementation for iSCSI Target mode 2 * This file contains the configfs implementation for iSCSI Target mode
3 * from the LIO-Target Project. 3 * from the LIO-Target Project.
4 * 4 *
5 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
6 * 6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 * 8 *
9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or 13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version. 14 * (at your option) any later version.
15 * 15 *
16 * This program is distributed in the hope that it will be useful, 16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details. 19 * GNU General Public License for more details.
20 ****************************************************************************/ 20 ****************************************************************************/
21 21
22 #include <linux/configfs.h> 22 #include <linux/configfs.h>
23 #include <linux/export.h> 23 #include <linux/export.h>
24 #include <target/target_core_base.h> 24 #include <target/target_core_base.h>
25 #include <target/target_core_transport.h> 25 #include <target/target_core_fabric.h>
26 #include <target/target_core_fabric_ops.h>
27 #include <target/target_core_fabric_configfs.h> 26 #include <target/target_core_fabric_configfs.h>
28 #include <target/target_core_fabric_lib.h>
29 #include <target/target_core_device.h>
30 #include <target/target_core_tpg.h>
31 #include <target/target_core_configfs.h> 27 #include <target/target_core_configfs.h>
32 #include <target/configfs_macros.h> 28 #include <target/configfs_macros.h>
33 29
34 #include "iscsi_target_core.h" 30 #include "iscsi_target_core.h"
35 #include "iscsi_target_parameters.h" 31 #include "iscsi_target_parameters.h"
36 #include "iscsi_target_device.h" 32 #include "iscsi_target_device.h"
37 #include "iscsi_target_erl0.h" 33 #include "iscsi_target_erl0.h"
38 #include "iscsi_target_nodeattrib.h" 34 #include "iscsi_target_nodeattrib.h"
39 #include "iscsi_target_tpg.h" 35 #include "iscsi_target_tpg.h"
40 #include "iscsi_target_util.h" 36 #include "iscsi_target_util.h"
41 #include "iscsi_target.h" 37 #include "iscsi_target.h"
42 #include "iscsi_target_stat.h" 38 #include "iscsi_target_stat.h"
43 #include "iscsi_target_configfs.h" 39 #include "iscsi_target_configfs.h"
44 40
45 struct target_fabric_configfs *lio_target_fabric_configfs; 41 struct target_fabric_configfs *lio_target_fabric_configfs;
46 42
47 struct lio_target_configfs_attribute { 43 struct lio_target_configfs_attribute {
48 struct configfs_attribute attr; 44 struct configfs_attribute attr;
49 ssize_t (*show)(void *, char *); 45 ssize_t (*show)(void *, char *);
50 ssize_t (*store)(void *, const char *, size_t); 46 ssize_t (*store)(void *, const char *, size_t);
51 }; 47 };
52 48
53 struct iscsi_portal_group *lio_get_tpg_from_tpg_item( 49 struct iscsi_portal_group *lio_get_tpg_from_tpg_item(
54 struct config_item *item, 50 struct config_item *item,
55 struct iscsi_tiqn **tiqn_out) 51 struct iscsi_tiqn **tiqn_out)
56 { 52 {
57 struct se_portal_group *se_tpg = container_of(to_config_group(item), 53 struct se_portal_group *se_tpg = container_of(to_config_group(item),
58 struct se_portal_group, tpg_group); 54 struct se_portal_group, tpg_group);
59 struct iscsi_portal_group *tpg = 55 struct iscsi_portal_group *tpg =
60 (struct iscsi_portal_group *)se_tpg->se_tpg_fabric_ptr; 56 (struct iscsi_portal_group *)se_tpg->se_tpg_fabric_ptr;
61 int ret; 57 int ret;
62 58
63 if (!tpg) { 59 if (!tpg) {
64 pr_err("Unable to locate struct iscsi_portal_group " 60 pr_err("Unable to locate struct iscsi_portal_group "
65 "pointer\n"); 61 "pointer\n");
66 return NULL; 62 return NULL;
67 } 63 }
68 ret = iscsit_get_tpg(tpg); 64 ret = iscsit_get_tpg(tpg);
69 if (ret < 0) 65 if (ret < 0)
70 return NULL; 66 return NULL;
71 67
72 *tiqn_out = tpg->tpg_tiqn; 68 *tiqn_out = tpg->tpg_tiqn;
73 return tpg; 69 return tpg;
74 } 70 }
75 71
76 /* Start items for lio_target_portal_cit */ 72 /* Start items for lio_target_portal_cit */
77 73
78 static ssize_t lio_target_np_show_sctp( 74 static ssize_t lio_target_np_show_sctp(
79 struct se_tpg_np *se_tpg_np, 75 struct se_tpg_np *se_tpg_np,
80 char *page) 76 char *page)
81 { 77 {
82 struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np, 78 struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
83 struct iscsi_tpg_np, se_tpg_np); 79 struct iscsi_tpg_np, se_tpg_np);
84 struct iscsi_tpg_np *tpg_np_sctp; 80 struct iscsi_tpg_np *tpg_np_sctp;
85 ssize_t rb; 81 ssize_t rb;
86 82
87 tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP); 83 tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
88 if (tpg_np_sctp) 84 if (tpg_np_sctp)
89 rb = sprintf(page, "1\n"); 85 rb = sprintf(page, "1\n");
90 else 86 else
91 rb = sprintf(page, "0\n"); 87 rb = sprintf(page, "0\n");
92 88
93 return rb; 89 return rb;
94 } 90 }
95 91
96 static ssize_t lio_target_np_store_sctp( 92 static ssize_t lio_target_np_store_sctp(
97 struct se_tpg_np *se_tpg_np, 93 struct se_tpg_np *se_tpg_np,
98 const char *page, 94 const char *page,
99 size_t count) 95 size_t count)
100 { 96 {
101 struct iscsi_np *np; 97 struct iscsi_np *np;
102 struct iscsi_portal_group *tpg; 98 struct iscsi_portal_group *tpg;
103 struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np, 99 struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
104 struct iscsi_tpg_np, se_tpg_np); 100 struct iscsi_tpg_np, se_tpg_np);
105 struct iscsi_tpg_np *tpg_np_sctp = NULL; 101 struct iscsi_tpg_np *tpg_np_sctp = NULL;
106 char *endptr; 102 char *endptr;
107 u32 op; 103 u32 op;
108 int ret; 104 int ret;
109 105
110 op = simple_strtoul(page, &endptr, 0); 106 op = simple_strtoul(page, &endptr, 0);
111 if ((op != 1) && (op != 0)) { 107 if ((op != 1) && (op != 0)) {
112 pr_err("Illegal value for tpg_enable: %u\n", op); 108 pr_err("Illegal value for tpg_enable: %u\n", op);
113 return -EINVAL; 109 return -EINVAL;
114 } 110 }
115 np = tpg_np->tpg_np; 111 np = tpg_np->tpg_np;
116 if (!np) { 112 if (!np) {
117 pr_err("Unable to locate struct iscsi_np from" 113 pr_err("Unable to locate struct iscsi_np from"
118 " struct iscsi_tpg_np\n"); 114 " struct iscsi_tpg_np\n");
119 return -EINVAL; 115 return -EINVAL;
120 } 116 }
121 117
122 tpg = tpg_np->tpg; 118 tpg = tpg_np->tpg;
123 if (iscsit_get_tpg(tpg) < 0) 119 if (iscsit_get_tpg(tpg) < 0)
124 return -EINVAL; 120 return -EINVAL;
125 121
126 if (op) { 122 if (op) {
127 /* 123 /*
128 * Use existing np->np_sockaddr for SCTP network portal reference 124 * Use existing np->np_sockaddr for SCTP network portal reference
129 */ 125 */
130 tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr, 126 tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
131 np->np_ip, tpg_np, ISCSI_SCTP_TCP); 127 np->np_ip, tpg_np, ISCSI_SCTP_TCP);
132 if (!tpg_np_sctp || IS_ERR(tpg_np_sctp)) 128 if (!tpg_np_sctp || IS_ERR(tpg_np_sctp))
133 goto out; 129 goto out;
134 } else { 130 } else {
135 tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP); 131 tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
136 if (!tpg_np_sctp) 132 if (!tpg_np_sctp)
137 goto out; 133 goto out;
138 134
139 ret = iscsit_tpg_del_network_portal(tpg, tpg_np_sctp); 135 ret = iscsit_tpg_del_network_portal(tpg, tpg_np_sctp);
140 if (ret < 0) 136 if (ret < 0)
141 goto out; 137 goto out;
142 } 138 }
143 139
144 iscsit_put_tpg(tpg); 140 iscsit_put_tpg(tpg);
145 return count; 141 return count;
146 out: 142 out:
147 iscsit_put_tpg(tpg); 143 iscsit_put_tpg(tpg);
148 return -EINVAL; 144 return -EINVAL;
149 } 145 }
150 146
151 TF_NP_BASE_ATTR(lio_target, sctp, S_IRUGO | S_IWUSR); 147 TF_NP_BASE_ATTR(lio_target, sctp, S_IRUGO | S_IWUSR);
152 148
153 static struct configfs_attribute *lio_target_portal_attrs[] = { 149 static struct configfs_attribute *lio_target_portal_attrs[] = {
154 &lio_target_np_sctp.attr, 150 &lio_target_np_sctp.attr,
155 NULL, 151 NULL,
156 }; 152 };
157 153
158 /* Stop items for lio_target_portal_cit */ 154 /* Stop items for lio_target_portal_cit */
159 155
160 /* Start items for lio_target_np_cit */ 156 /* Start items for lio_target_np_cit */
161 157
162 #define MAX_PORTAL_LEN 256 158 #define MAX_PORTAL_LEN 256
163 159
164 struct se_tpg_np *lio_target_call_addnptotpg( 160 struct se_tpg_np *lio_target_call_addnptotpg(
165 struct se_portal_group *se_tpg, 161 struct se_portal_group *se_tpg,
166 struct config_group *group, 162 struct config_group *group,
167 const char *name) 163 const char *name)
168 { 164 {
169 struct iscsi_portal_group *tpg; 165 struct iscsi_portal_group *tpg;
170 struct iscsi_tpg_np *tpg_np; 166 struct iscsi_tpg_np *tpg_np;
171 char *str, *str2, *ip_str, *port_str; 167 char *str, *str2, *ip_str, *port_str;
172 struct __kernel_sockaddr_storage sockaddr; 168 struct __kernel_sockaddr_storage sockaddr;
173 struct sockaddr_in *sock_in; 169 struct sockaddr_in *sock_in;
174 struct sockaddr_in6 *sock_in6; 170 struct sockaddr_in6 *sock_in6;
175 unsigned long port; 171 unsigned long port;
176 int ret; 172 int ret;
177 char buf[MAX_PORTAL_LEN + 1]; 173 char buf[MAX_PORTAL_LEN + 1];
178 174
179 if (strlen(name) > MAX_PORTAL_LEN) { 175 if (strlen(name) > MAX_PORTAL_LEN) {
180 pr_err("strlen(name): %d exceeds MAX_PORTAL_LEN: %d\n", 176 pr_err("strlen(name): %d exceeds MAX_PORTAL_LEN: %d\n",
181 (int)strlen(name), MAX_PORTAL_LEN); 177 (int)strlen(name), MAX_PORTAL_LEN);
182 return ERR_PTR(-EOVERFLOW); 178 return ERR_PTR(-EOVERFLOW);
183 } 179 }
184 memset(buf, 0, MAX_PORTAL_LEN + 1); 180 memset(buf, 0, MAX_PORTAL_LEN + 1);
185 snprintf(buf, MAX_PORTAL_LEN + 1, "%s", name); 181 snprintf(buf, MAX_PORTAL_LEN + 1, "%s", name);
186 182
187 memset(&sockaddr, 0, sizeof(struct __kernel_sockaddr_storage)); 183 memset(&sockaddr, 0, sizeof(struct __kernel_sockaddr_storage));
188 184
189 str = strstr(buf, "["); 185 str = strstr(buf, "[");
190 if (str) { 186 if (str) {
191 const char *end; 187 const char *end;
192 188
193 str2 = strstr(str, "]"); 189 str2 = strstr(str, "]");
194 if (!str2) { 190 if (!str2) {
195 pr_err("Unable to locate trailing \"]\"" 191 pr_err("Unable to locate trailing \"]\""
196 " in IPv6 iSCSI network portal address\n"); 192 " in IPv6 iSCSI network portal address\n");
197 return ERR_PTR(-EINVAL); 193 return ERR_PTR(-EINVAL);
198 } 194 }
199 str++; /* Skip over leading "[" */ 195 str++; /* Skip over leading "[" */
200 *str2 = '\0'; /* Terminate the IPv6 address */ 196 *str2 = '\0'; /* Terminate the IPv6 address */
201 str2++; /* Skip over the "]" */ 197 str2++; /* Skip over the "]" */
202 port_str = strstr(str2, ":"); 198 port_str = strstr(str2, ":");
203 if (!port_str) { 199 if (!port_str) {
204 pr_err("Unable to locate \":port\"" 200 pr_err("Unable to locate \":port\""
205 " in IPv6 iSCSI network portal address\n"); 201 " in IPv6 iSCSI network portal address\n");
206 return ERR_PTR(-EINVAL); 202 return ERR_PTR(-EINVAL);
207 } 203 }
208 *port_str = '\0'; /* Terminate string for IP */ 204 *port_str = '\0'; /* Terminate string for IP */
209 port_str++; /* Skip over ":" */ 205 port_str++; /* Skip over ":" */
210 206
211 ret = strict_strtoul(port_str, 0, &port); 207 ret = strict_strtoul(port_str, 0, &port);
212 if (ret < 0) { 208 if (ret < 0) {
213 pr_err("strict_strtoul() failed for port_str: %d\n", ret); 209 pr_err("strict_strtoul() failed for port_str: %d\n", ret);
214 return ERR_PTR(ret); 210 return ERR_PTR(ret);
215 } 211 }
216 sock_in6 = (struct sockaddr_in6 *)&sockaddr; 212 sock_in6 = (struct sockaddr_in6 *)&sockaddr;
217 sock_in6->sin6_family = AF_INET6; 213 sock_in6->sin6_family = AF_INET6;
218 sock_in6->sin6_port = htons((unsigned short)port); 214 sock_in6->sin6_port = htons((unsigned short)port);
219 ret = in6_pton(str, IPV6_ADDRESS_SPACE, 215 ret = in6_pton(str, IPV6_ADDRESS_SPACE,
220 (void *)&sock_in6->sin6_addr.in6_u, -1, &end); 216 (void *)&sock_in6->sin6_addr.in6_u, -1, &end);
221 if (ret <= 0) { 217 if (ret <= 0) {
222 pr_err("in6_pton returned: %d\n", ret); 218 pr_err("in6_pton returned: %d\n", ret);
223 return ERR_PTR(-EINVAL); 219 return ERR_PTR(-EINVAL);
224 } 220 }
225 } else { 221 } else {
226 str = ip_str = &buf[0]; 222 str = ip_str = &buf[0];
227 port_str = strstr(ip_str, ":"); 223 port_str = strstr(ip_str, ":");
228 if (!port_str) { 224 if (!port_str) {
229 pr_err("Unable to locate \":port\"" 225 pr_err("Unable to locate \":port\""
230 " in IPv4 iSCSI network portal address\n"); 226 " in IPv4 iSCSI network portal address\n");
231 return ERR_PTR(-EINVAL); 227 return ERR_PTR(-EINVAL);
232 } 228 }
233 *port_str = '\0'; /* Terminate string for IP */ 229 *port_str = '\0'; /* Terminate string for IP */
234 port_str++; /* Skip over ":" */ 230 port_str++; /* Skip over ":" */
235 231
236 ret = strict_strtoul(port_str, 0, &port); 232 ret = strict_strtoul(port_str, 0, &port);
237 if (ret < 0) { 233 if (ret < 0) {
238 pr_err("strict_strtoul() failed for port_str: %d\n", ret); 234 pr_err("strict_strtoul() failed for port_str: %d\n", ret);
239 return ERR_PTR(ret); 235 return ERR_PTR(ret);
240 } 236 }
241 sock_in = (struct sockaddr_in *)&sockaddr; 237 sock_in = (struct sockaddr_in *)&sockaddr;
242 sock_in->sin_family = AF_INET; 238 sock_in->sin_family = AF_INET;
243 sock_in->sin_port = htons((unsigned short)port); 239 sock_in->sin_port = htons((unsigned short)port);
244 sock_in->sin_addr.s_addr = in_aton(ip_str); 240 sock_in->sin_addr.s_addr = in_aton(ip_str);
245 } 241 }
246 tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg); 242 tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
247 ret = iscsit_get_tpg(tpg); 243 ret = iscsit_get_tpg(tpg);
248 if (ret < 0) 244 if (ret < 0)
249 return ERR_PTR(-EINVAL); 245 return ERR_PTR(-EINVAL);
250 246
251 pr_debug("LIO_Target_ConfigFS: REGISTER -> %s TPGT: %hu" 247 pr_debug("LIO_Target_ConfigFS: REGISTER -> %s TPGT: %hu"
252 " PORTAL: %s\n", 248 " PORTAL: %s\n",
253 config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item), 249 config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
254 tpg->tpgt, name); 250 tpg->tpgt, name);
255 /* 251 /*
256 * Assume ISCSI_TCP by default. Other network portals for other 252 * Assume ISCSI_TCP by default. Other network portals for other
257 * iSCSI fabrics: 253 * iSCSI fabrics:
258 * 254 *
259 * Traditional iSCSI over SCTP (initial support) 255 * Traditional iSCSI over SCTP (initial support)
260 * iSER/TCP (TODO, hardware available) 256 * iSER/TCP (TODO, hardware available)
261 * iSER/SCTP (TODO, software emulation with osc-iwarp) 257 * iSER/SCTP (TODO, software emulation with osc-iwarp)
262 * iSER/IB (TODO, hardware available) 258 * iSER/IB (TODO, hardware available)
263 * 259 *
264 * can be enabled with atributes under 260 * can be enabled with atributes under
265 * sys/kernel/config/iscsi/$IQN/$TPG/np/$IP:$PORT/ 261 * sys/kernel/config/iscsi/$IQN/$TPG/np/$IP:$PORT/
266 * 262 *
267 */ 263 */
268 tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, str, NULL, 264 tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, str, NULL,
269 ISCSI_TCP); 265 ISCSI_TCP);
270 if (IS_ERR(tpg_np)) { 266 if (IS_ERR(tpg_np)) {
271 iscsit_put_tpg(tpg); 267 iscsit_put_tpg(tpg);
272 return ERR_CAST(tpg_np); 268 return ERR_CAST(tpg_np);
273 } 269 }
274 pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n"); 270 pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n");
275 271
276 iscsit_put_tpg(tpg); 272 iscsit_put_tpg(tpg);
277 return &tpg_np->se_tpg_np; 273 return &tpg_np->se_tpg_np;
278 } 274 }
279 275
280 static void lio_target_call_delnpfromtpg( 276 static void lio_target_call_delnpfromtpg(
281 struct se_tpg_np *se_tpg_np) 277 struct se_tpg_np *se_tpg_np)
282 { 278 {
283 struct iscsi_portal_group *tpg; 279 struct iscsi_portal_group *tpg;
284 struct iscsi_tpg_np *tpg_np; 280 struct iscsi_tpg_np *tpg_np;
285 struct se_portal_group *se_tpg; 281 struct se_portal_group *se_tpg;
286 int ret; 282 int ret;
287 283
288 tpg_np = container_of(se_tpg_np, struct iscsi_tpg_np, se_tpg_np); 284 tpg_np = container_of(se_tpg_np, struct iscsi_tpg_np, se_tpg_np);
289 tpg = tpg_np->tpg; 285 tpg = tpg_np->tpg;
290 ret = iscsit_get_tpg(tpg); 286 ret = iscsit_get_tpg(tpg);
291 if (ret < 0) 287 if (ret < 0)
292 return; 288 return;
293 289
294 se_tpg = &tpg->tpg_se_tpg; 290 se_tpg = &tpg->tpg_se_tpg;
295 pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s TPGT: %hu" 291 pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s TPGT: %hu"
296 " PORTAL: %s:%hu\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item), 292 " PORTAL: %s:%hu\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
297 tpg->tpgt, tpg_np->tpg_np->np_ip, tpg_np->tpg_np->np_port); 293 tpg->tpgt, tpg_np->tpg_np->np_ip, tpg_np->tpg_np->np_port);
298 294
299 ret = iscsit_tpg_del_network_portal(tpg, tpg_np); 295 ret = iscsit_tpg_del_network_portal(tpg, tpg_np);
300 if (ret < 0) 296 if (ret < 0)
301 goto out; 297 goto out;
302 298
303 pr_debug("LIO_Target_ConfigFS: delnpfromtpg done!\n"); 299 pr_debug("LIO_Target_ConfigFS: delnpfromtpg done!\n");
304 out: 300 out:
305 iscsit_put_tpg(tpg); 301 iscsit_put_tpg(tpg);
306 } 302 }
307 303
308 /* End items for lio_target_np_cit */ 304 /* End items for lio_target_np_cit */
309 305
310 /* Start items for lio_target_nacl_attrib_cit */ 306 /* Start items for lio_target_nacl_attrib_cit */
311 307
312 #define DEF_NACL_ATTRIB(name) \ 308 #define DEF_NACL_ATTRIB(name) \
313 static ssize_t iscsi_nacl_attrib_show_##name( \ 309 static ssize_t iscsi_nacl_attrib_show_##name( \
314 struct se_node_acl *se_nacl, \ 310 struct se_node_acl *se_nacl, \
315 char *page) \ 311 char *page) \
316 { \ 312 { \
317 struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \ 313 struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
318 se_node_acl); \ 314 se_node_acl); \
319 \ 315 \
320 return sprintf(page, "%u\n", ISCSI_NODE_ATTRIB(nacl)->name); \ 316 return sprintf(page, "%u\n", ISCSI_NODE_ATTRIB(nacl)->name); \
321 } \ 317 } \
322 \ 318 \
323 static ssize_t iscsi_nacl_attrib_store_##name( \ 319 static ssize_t iscsi_nacl_attrib_store_##name( \
324 struct se_node_acl *se_nacl, \ 320 struct se_node_acl *se_nacl, \
325 const char *page, \ 321 const char *page, \
326 size_t count) \ 322 size_t count) \
327 { \ 323 { \
328 struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \ 324 struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
329 se_node_acl); \ 325 se_node_acl); \
330 char *endptr; \ 326 char *endptr; \
331 u32 val; \ 327 u32 val; \
332 int ret; \ 328 int ret; \
333 \ 329 \
334 val = simple_strtoul(page, &endptr, 0); \ 330 val = simple_strtoul(page, &endptr, 0); \
335 ret = iscsit_na_##name(nacl, val); \ 331 ret = iscsit_na_##name(nacl, val); \
336 if (ret < 0) \ 332 if (ret < 0) \
337 return ret; \ 333 return ret; \
338 \ 334 \
339 return count; \ 335 return count; \
340 } 336 }
341 337
342 #define NACL_ATTR(_name, _mode) TF_NACL_ATTRIB_ATTR(iscsi, _name, _mode); 338 #define NACL_ATTR(_name, _mode) TF_NACL_ATTRIB_ATTR(iscsi, _name, _mode);
343 /* 339 /*
344 * Define iscsi_node_attrib_s_dataout_timeout 340 * Define iscsi_node_attrib_s_dataout_timeout
345 */ 341 */
346 DEF_NACL_ATTRIB(dataout_timeout); 342 DEF_NACL_ATTRIB(dataout_timeout);
347 NACL_ATTR(dataout_timeout, S_IRUGO | S_IWUSR); 343 NACL_ATTR(dataout_timeout, S_IRUGO | S_IWUSR);
348 /* 344 /*
349 * Define iscsi_node_attrib_s_dataout_timeout_retries 345 * Define iscsi_node_attrib_s_dataout_timeout_retries
350 */ 346 */
351 DEF_NACL_ATTRIB(dataout_timeout_retries); 347 DEF_NACL_ATTRIB(dataout_timeout_retries);
352 NACL_ATTR(dataout_timeout_retries, S_IRUGO | S_IWUSR); 348 NACL_ATTR(dataout_timeout_retries, S_IRUGO | S_IWUSR);
353 /* 349 /*
354 * Define iscsi_node_attrib_s_default_erl 350 * Define iscsi_node_attrib_s_default_erl
355 */ 351 */
356 DEF_NACL_ATTRIB(default_erl); 352 DEF_NACL_ATTRIB(default_erl);
357 NACL_ATTR(default_erl, S_IRUGO | S_IWUSR); 353 NACL_ATTR(default_erl, S_IRUGO | S_IWUSR);
358 /* 354 /*
359 * Define iscsi_node_attrib_s_nopin_timeout 355 * Define iscsi_node_attrib_s_nopin_timeout
360 */ 356 */
361 DEF_NACL_ATTRIB(nopin_timeout); 357 DEF_NACL_ATTRIB(nopin_timeout);
362 NACL_ATTR(nopin_timeout, S_IRUGO | S_IWUSR); 358 NACL_ATTR(nopin_timeout, S_IRUGO | S_IWUSR);
363 /* 359 /*
364 * Define iscsi_node_attrib_s_nopin_response_timeout 360 * Define iscsi_node_attrib_s_nopin_response_timeout
365 */ 361 */
366 DEF_NACL_ATTRIB(nopin_response_timeout); 362 DEF_NACL_ATTRIB(nopin_response_timeout);
367 NACL_ATTR(nopin_response_timeout, S_IRUGO | S_IWUSR); 363 NACL_ATTR(nopin_response_timeout, S_IRUGO | S_IWUSR);
368 /* 364 /*
369 * Define iscsi_node_attrib_s_random_datain_pdu_offsets 365 * Define iscsi_node_attrib_s_random_datain_pdu_offsets
370 */ 366 */
371 DEF_NACL_ATTRIB(random_datain_pdu_offsets); 367 DEF_NACL_ATTRIB(random_datain_pdu_offsets);
372 NACL_ATTR(random_datain_pdu_offsets, S_IRUGO | S_IWUSR); 368 NACL_ATTR(random_datain_pdu_offsets, S_IRUGO | S_IWUSR);
373 /* 369 /*
374 * Define iscsi_node_attrib_s_random_datain_seq_offsets 370 * Define iscsi_node_attrib_s_random_datain_seq_offsets
375 */ 371 */
376 DEF_NACL_ATTRIB(random_datain_seq_offsets); 372 DEF_NACL_ATTRIB(random_datain_seq_offsets);
377 NACL_ATTR(random_datain_seq_offsets, S_IRUGO | S_IWUSR); 373 NACL_ATTR(random_datain_seq_offsets, S_IRUGO | S_IWUSR);
378 /* 374 /*
379 * Define iscsi_node_attrib_s_random_r2t_offsets 375 * Define iscsi_node_attrib_s_random_r2t_offsets
380 */ 376 */
381 DEF_NACL_ATTRIB(random_r2t_offsets); 377 DEF_NACL_ATTRIB(random_r2t_offsets);
382 NACL_ATTR(random_r2t_offsets, S_IRUGO | S_IWUSR); 378 NACL_ATTR(random_r2t_offsets, S_IRUGO | S_IWUSR);
383 379
384 static struct configfs_attribute *lio_target_nacl_attrib_attrs[] = { 380 static struct configfs_attribute *lio_target_nacl_attrib_attrs[] = {
385 &iscsi_nacl_attrib_dataout_timeout.attr, 381 &iscsi_nacl_attrib_dataout_timeout.attr,
386 &iscsi_nacl_attrib_dataout_timeout_retries.attr, 382 &iscsi_nacl_attrib_dataout_timeout_retries.attr,
387 &iscsi_nacl_attrib_default_erl.attr, 383 &iscsi_nacl_attrib_default_erl.attr,
388 &iscsi_nacl_attrib_nopin_timeout.attr, 384 &iscsi_nacl_attrib_nopin_timeout.attr,
389 &iscsi_nacl_attrib_nopin_response_timeout.attr, 385 &iscsi_nacl_attrib_nopin_response_timeout.attr,
390 &iscsi_nacl_attrib_random_datain_pdu_offsets.attr, 386 &iscsi_nacl_attrib_random_datain_pdu_offsets.attr,
391 &iscsi_nacl_attrib_random_datain_seq_offsets.attr, 387 &iscsi_nacl_attrib_random_datain_seq_offsets.attr,
392 &iscsi_nacl_attrib_random_r2t_offsets.attr, 388 &iscsi_nacl_attrib_random_r2t_offsets.attr,
393 NULL, 389 NULL,
394 }; 390 };
395 391
396 /* End items for lio_target_nacl_attrib_cit */ 392 /* End items for lio_target_nacl_attrib_cit */
397 393
398 /* Start items for lio_target_nacl_auth_cit */ 394 /* Start items for lio_target_nacl_auth_cit */
399 395
400 #define __DEF_NACL_AUTH_STR(prefix, name, flags) \ 396 #define __DEF_NACL_AUTH_STR(prefix, name, flags) \
401 static ssize_t __iscsi_##prefix##_show_##name( \ 397 static ssize_t __iscsi_##prefix##_show_##name( \
402 struct iscsi_node_acl *nacl, \ 398 struct iscsi_node_acl *nacl, \
403 char *page) \ 399 char *page) \
404 { \ 400 { \
405 struct iscsi_node_auth *auth = &nacl->node_auth; \ 401 struct iscsi_node_auth *auth = &nacl->node_auth; \
406 \ 402 \
407 if (!capable(CAP_SYS_ADMIN)) \ 403 if (!capable(CAP_SYS_ADMIN)) \
408 return -EPERM; \ 404 return -EPERM; \
409 return snprintf(page, PAGE_SIZE, "%s\n", auth->name); \ 405 return snprintf(page, PAGE_SIZE, "%s\n", auth->name); \
410 } \ 406 } \
411 \ 407 \
412 static ssize_t __iscsi_##prefix##_store_##name( \ 408 static ssize_t __iscsi_##prefix##_store_##name( \
413 struct iscsi_node_acl *nacl, \ 409 struct iscsi_node_acl *nacl, \
414 const char *page, \ 410 const char *page, \
415 size_t count) \ 411 size_t count) \
416 { \ 412 { \
417 struct iscsi_node_auth *auth = &nacl->node_auth; \ 413 struct iscsi_node_auth *auth = &nacl->node_auth; \
418 \ 414 \
419 if (!capable(CAP_SYS_ADMIN)) \ 415 if (!capable(CAP_SYS_ADMIN)) \
420 return -EPERM; \ 416 return -EPERM; \
421 \ 417 \
422 snprintf(auth->name, PAGE_SIZE, "%s", page); \ 418 snprintf(auth->name, PAGE_SIZE, "%s", page); \
423 if (!strncmp("NULL", auth->name, 4)) \ 419 if (!strncmp("NULL", auth->name, 4)) \
424 auth->naf_flags &= ~flags; \ 420 auth->naf_flags &= ~flags; \
425 else \ 421 else \
426 auth->naf_flags |= flags; \ 422 auth->naf_flags |= flags; \
427 \ 423 \
428 if ((auth->naf_flags & NAF_USERID_IN_SET) && \ 424 if ((auth->naf_flags & NAF_USERID_IN_SET) && \
429 (auth->naf_flags & NAF_PASSWORD_IN_SET)) \ 425 (auth->naf_flags & NAF_PASSWORD_IN_SET)) \
430 auth->authenticate_target = 1; \ 426 auth->authenticate_target = 1; \
431 else \ 427 else \
432 auth->authenticate_target = 0; \ 428 auth->authenticate_target = 0; \
433 \ 429 \
434 return count; \ 430 return count; \
435 } 431 }
436 432
437 #define __DEF_NACL_AUTH_INT(prefix, name) \ 433 #define __DEF_NACL_AUTH_INT(prefix, name) \
438 static ssize_t __iscsi_##prefix##_show_##name( \ 434 static ssize_t __iscsi_##prefix##_show_##name( \
439 struct iscsi_node_acl *nacl, \ 435 struct iscsi_node_acl *nacl, \
440 char *page) \ 436 char *page) \
441 { \ 437 { \
442 struct iscsi_node_auth *auth = &nacl->node_auth; \ 438 struct iscsi_node_auth *auth = &nacl->node_auth; \
443 \ 439 \
444 if (!capable(CAP_SYS_ADMIN)) \ 440 if (!capable(CAP_SYS_ADMIN)) \
445 return -EPERM; \ 441 return -EPERM; \
446 \ 442 \
447 return snprintf(page, PAGE_SIZE, "%d\n", auth->name); \ 443 return snprintf(page, PAGE_SIZE, "%d\n", auth->name); \
448 } 444 }
449 445
450 #define DEF_NACL_AUTH_STR(name, flags) \ 446 #define DEF_NACL_AUTH_STR(name, flags) \
451 __DEF_NACL_AUTH_STR(nacl_auth, name, flags) \ 447 __DEF_NACL_AUTH_STR(nacl_auth, name, flags) \
452 static ssize_t iscsi_nacl_auth_show_##name( \ 448 static ssize_t iscsi_nacl_auth_show_##name( \
453 struct se_node_acl *nacl, \ 449 struct se_node_acl *nacl, \
454 char *page) \ 450 char *page) \
455 { \ 451 { \
456 return __iscsi_nacl_auth_show_##name(container_of(nacl, \ 452 return __iscsi_nacl_auth_show_##name(container_of(nacl, \
457 struct iscsi_node_acl, se_node_acl), page); \ 453 struct iscsi_node_acl, se_node_acl), page); \
458 } \ 454 } \
459 static ssize_t iscsi_nacl_auth_store_##name( \ 455 static ssize_t iscsi_nacl_auth_store_##name( \
460 struct se_node_acl *nacl, \ 456 struct se_node_acl *nacl, \
461 const char *page, \ 457 const char *page, \
462 size_t count) \ 458 size_t count) \
463 { \ 459 { \
464 return __iscsi_nacl_auth_store_##name(container_of(nacl, \ 460 return __iscsi_nacl_auth_store_##name(container_of(nacl, \
465 struct iscsi_node_acl, se_node_acl), page, count); \ 461 struct iscsi_node_acl, se_node_acl), page, count); \
466 } 462 }
467 463
468 #define DEF_NACL_AUTH_INT(name) \ 464 #define DEF_NACL_AUTH_INT(name) \
469 __DEF_NACL_AUTH_INT(nacl_auth, name) \ 465 __DEF_NACL_AUTH_INT(nacl_auth, name) \
470 static ssize_t iscsi_nacl_auth_show_##name( \ 466 static ssize_t iscsi_nacl_auth_show_##name( \
471 struct se_node_acl *nacl, \ 467 struct se_node_acl *nacl, \
472 char *page) \ 468 char *page) \
473 { \ 469 { \
474 return __iscsi_nacl_auth_show_##name(container_of(nacl, \ 470 return __iscsi_nacl_auth_show_##name(container_of(nacl, \
475 struct iscsi_node_acl, se_node_acl), page); \ 471 struct iscsi_node_acl, se_node_acl), page); \
476 } 472 }
477 473
478 #define AUTH_ATTR(_name, _mode) TF_NACL_AUTH_ATTR(iscsi, _name, _mode); 474 #define AUTH_ATTR(_name, _mode) TF_NACL_AUTH_ATTR(iscsi, _name, _mode);
479 #define AUTH_ATTR_RO(_name) TF_NACL_AUTH_ATTR_RO(iscsi, _name); 475 #define AUTH_ATTR_RO(_name) TF_NACL_AUTH_ATTR_RO(iscsi, _name);
480 476
481 /* 477 /*
482 * One-way authentication userid 478 * One-way authentication userid
483 */ 479 */
484 DEF_NACL_AUTH_STR(userid, NAF_USERID_SET); 480 DEF_NACL_AUTH_STR(userid, NAF_USERID_SET);
485 AUTH_ATTR(userid, S_IRUGO | S_IWUSR); 481 AUTH_ATTR(userid, S_IRUGO | S_IWUSR);
486 /* 482 /*
487 * One-way authentication password 483 * One-way authentication password
488 */ 484 */
489 DEF_NACL_AUTH_STR(password, NAF_PASSWORD_SET); 485 DEF_NACL_AUTH_STR(password, NAF_PASSWORD_SET);
490 AUTH_ATTR(password, S_IRUGO | S_IWUSR); 486 AUTH_ATTR(password, S_IRUGO | S_IWUSR);
491 /* 487 /*
492 * Enforce mutual authentication 488 * Enforce mutual authentication
493 */ 489 */
494 DEF_NACL_AUTH_INT(authenticate_target); 490 DEF_NACL_AUTH_INT(authenticate_target);
495 AUTH_ATTR_RO(authenticate_target); 491 AUTH_ATTR_RO(authenticate_target);
496 /* 492 /*
497 * Mutual authentication userid 493 * Mutual authentication userid
498 */ 494 */
499 DEF_NACL_AUTH_STR(userid_mutual, NAF_USERID_IN_SET); 495 DEF_NACL_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
500 AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR); 496 AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR);
501 /* 497 /*
502 * Mutual authentication password 498 * Mutual authentication password
503 */ 499 */
504 DEF_NACL_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET); 500 DEF_NACL_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
505 AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR); 501 AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR);
506 502
507 static struct configfs_attribute *lio_target_nacl_auth_attrs[] = { 503 static struct configfs_attribute *lio_target_nacl_auth_attrs[] = {
508 &iscsi_nacl_auth_userid.attr, 504 &iscsi_nacl_auth_userid.attr,
509 &iscsi_nacl_auth_password.attr, 505 &iscsi_nacl_auth_password.attr,
510 &iscsi_nacl_auth_authenticate_target.attr, 506 &iscsi_nacl_auth_authenticate_target.attr,
511 &iscsi_nacl_auth_userid_mutual.attr, 507 &iscsi_nacl_auth_userid_mutual.attr,
512 &iscsi_nacl_auth_password_mutual.attr, 508 &iscsi_nacl_auth_password_mutual.attr,
513 NULL, 509 NULL,
514 }; 510 };
515 511
516 /* End items for lio_target_nacl_auth_cit */ 512 /* End items for lio_target_nacl_auth_cit */
517 513
518 /* Start items for lio_target_nacl_param_cit */ 514 /* Start items for lio_target_nacl_param_cit */
519 515
520 #define DEF_NACL_PARAM(name) \ 516 #define DEF_NACL_PARAM(name) \
521 static ssize_t iscsi_nacl_param_show_##name( \ 517 static ssize_t iscsi_nacl_param_show_##name( \
522 struct se_node_acl *se_nacl, \ 518 struct se_node_acl *se_nacl, \
523 char *page) \ 519 char *page) \
524 { \ 520 { \
525 struct iscsi_session *sess; \ 521 struct iscsi_session *sess; \
526 struct se_session *se_sess; \ 522 struct se_session *se_sess; \
527 ssize_t rb; \ 523 ssize_t rb; \
528 \ 524 \
529 spin_lock_bh(&se_nacl->nacl_sess_lock); \ 525 spin_lock_bh(&se_nacl->nacl_sess_lock); \
530 se_sess = se_nacl->nacl_sess; \ 526 se_sess = se_nacl->nacl_sess; \
531 if (!se_sess) { \ 527 if (!se_sess) { \
532 rb = snprintf(page, PAGE_SIZE, \ 528 rb = snprintf(page, PAGE_SIZE, \
533 "No Active iSCSI Session\n"); \ 529 "No Active iSCSI Session\n"); \
534 } else { \ 530 } else { \
535 sess = se_sess->fabric_sess_ptr; \ 531 sess = se_sess->fabric_sess_ptr; \
536 rb = snprintf(page, PAGE_SIZE, "%u\n", \ 532 rb = snprintf(page, PAGE_SIZE, "%u\n", \
537 (u32)sess->sess_ops->name); \ 533 (u32)sess->sess_ops->name); \
538 } \ 534 } \
539 spin_unlock_bh(&se_nacl->nacl_sess_lock); \ 535 spin_unlock_bh(&se_nacl->nacl_sess_lock); \
540 \ 536 \
541 return rb; \ 537 return rb; \
542 } 538 }
543 539
544 #define NACL_PARAM_ATTR(_name) TF_NACL_PARAM_ATTR_RO(iscsi, _name); 540 #define NACL_PARAM_ATTR(_name) TF_NACL_PARAM_ATTR_RO(iscsi, _name);
545 541
546 DEF_NACL_PARAM(MaxConnections); 542 DEF_NACL_PARAM(MaxConnections);
547 NACL_PARAM_ATTR(MaxConnections); 543 NACL_PARAM_ATTR(MaxConnections);
548 544
549 DEF_NACL_PARAM(InitialR2T); 545 DEF_NACL_PARAM(InitialR2T);
550 NACL_PARAM_ATTR(InitialR2T); 546 NACL_PARAM_ATTR(InitialR2T);
551 547
552 DEF_NACL_PARAM(ImmediateData); 548 DEF_NACL_PARAM(ImmediateData);
553 NACL_PARAM_ATTR(ImmediateData); 549 NACL_PARAM_ATTR(ImmediateData);
554 550
555 DEF_NACL_PARAM(MaxBurstLength); 551 DEF_NACL_PARAM(MaxBurstLength);
556 NACL_PARAM_ATTR(MaxBurstLength); 552 NACL_PARAM_ATTR(MaxBurstLength);
557 553
558 DEF_NACL_PARAM(FirstBurstLength); 554 DEF_NACL_PARAM(FirstBurstLength);
559 NACL_PARAM_ATTR(FirstBurstLength); 555 NACL_PARAM_ATTR(FirstBurstLength);
560 556
561 DEF_NACL_PARAM(DefaultTime2Wait); 557 DEF_NACL_PARAM(DefaultTime2Wait);
562 NACL_PARAM_ATTR(DefaultTime2Wait); 558 NACL_PARAM_ATTR(DefaultTime2Wait);
563 559
564 DEF_NACL_PARAM(DefaultTime2Retain); 560 DEF_NACL_PARAM(DefaultTime2Retain);
565 NACL_PARAM_ATTR(DefaultTime2Retain); 561 NACL_PARAM_ATTR(DefaultTime2Retain);
566 562
567 DEF_NACL_PARAM(MaxOutstandingR2T); 563 DEF_NACL_PARAM(MaxOutstandingR2T);
568 NACL_PARAM_ATTR(MaxOutstandingR2T); 564 NACL_PARAM_ATTR(MaxOutstandingR2T);
569 565
570 DEF_NACL_PARAM(DataPDUInOrder); 566 DEF_NACL_PARAM(DataPDUInOrder);
571 NACL_PARAM_ATTR(DataPDUInOrder); 567 NACL_PARAM_ATTR(DataPDUInOrder);
572 568
573 DEF_NACL_PARAM(DataSequenceInOrder); 569 DEF_NACL_PARAM(DataSequenceInOrder);
574 NACL_PARAM_ATTR(DataSequenceInOrder); 570 NACL_PARAM_ATTR(DataSequenceInOrder);
575 571
576 DEF_NACL_PARAM(ErrorRecoveryLevel); 572 DEF_NACL_PARAM(ErrorRecoveryLevel);
577 NACL_PARAM_ATTR(ErrorRecoveryLevel); 573 NACL_PARAM_ATTR(ErrorRecoveryLevel);
578 574
579 static struct configfs_attribute *lio_target_nacl_param_attrs[] = { 575 static struct configfs_attribute *lio_target_nacl_param_attrs[] = {
580 &iscsi_nacl_param_MaxConnections.attr, 576 &iscsi_nacl_param_MaxConnections.attr,
581 &iscsi_nacl_param_InitialR2T.attr, 577 &iscsi_nacl_param_InitialR2T.attr,
582 &iscsi_nacl_param_ImmediateData.attr, 578 &iscsi_nacl_param_ImmediateData.attr,
583 &iscsi_nacl_param_MaxBurstLength.attr, 579 &iscsi_nacl_param_MaxBurstLength.attr,
584 &iscsi_nacl_param_FirstBurstLength.attr, 580 &iscsi_nacl_param_FirstBurstLength.attr,
585 &iscsi_nacl_param_DefaultTime2Wait.attr, 581 &iscsi_nacl_param_DefaultTime2Wait.attr,
586 &iscsi_nacl_param_DefaultTime2Retain.attr, 582 &iscsi_nacl_param_DefaultTime2Retain.attr,
587 &iscsi_nacl_param_MaxOutstandingR2T.attr, 583 &iscsi_nacl_param_MaxOutstandingR2T.attr,
588 &iscsi_nacl_param_DataPDUInOrder.attr, 584 &iscsi_nacl_param_DataPDUInOrder.attr,
589 &iscsi_nacl_param_DataSequenceInOrder.attr, 585 &iscsi_nacl_param_DataSequenceInOrder.attr,
590 &iscsi_nacl_param_ErrorRecoveryLevel.attr, 586 &iscsi_nacl_param_ErrorRecoveryLevel.attr,
591 NULL, 587 NULL,
592 }; 588 };
593 589
594 /* End items for lio_target_nacl_param_cit */ 590 /* End items for lio_target_nacl_param_cit */
595 591
596 /* Start items for lio_target_acl_cit */ 592 /* Start items for lio_target_acl_cit */
597 593
598 static ssize_t lio_target_nacl_show_info( 594 static ssize_t lio_target_nacl_show_info(
599 struct se_node_acl *se_nacl, 595 struct se_node_acl *se_nacl,
600 char *page) 596 char *page)
601 { 597 {
602 struct iscsi_session *sess; 598 struct iscsi_session *sess;
603 struct iscsi_conn *conn; 599 struct iscsi_conn *conn;
604 struct se_session *se_sess; 600 struct se_session *se_sess;
605 ssize_t rb = 0; 601 ssize_t rb = 0;
606 602
607 spin_lock_bh(&se_nacl->nacl_sess_lock); 603 spin_lock_bh(&se_nacl->nacl_sess_lock);
608 se_sess = se_nacl->nacl_sess; 604 se_sess = se_nacl->nacl_sess;
609 if (!se_sess) { 605 if (!se_sess) {
610 rb += sprintf(page+rb, "No active iSCSI Session for Initiator" 606 rb += sprintf(page+rb, "No active iSCSI Session for Initiator"
611 " Endpoint: %s\n", se_nacl->initiatorname); 607 " Endpoint: %s\n", se_nacl->initiatorname);
612 } else { 608 } else {
613 sess = se_sess->fabric_sess_ptr; 609 sess = se_sess->fabric_sess_ptr;
614 610
615 if (sess->sess_ops->InitiatorName) 611 if (sess->sess_ops->InitiatorName)
616 rb += sprintf(page+rb, "InitiatorName: %s\n", 612 rb += sprintf(page+rb, "InitiatorName: %s\n",
617 sess->sess_ops->InitiatorName); 613 sess->sess_ops->InitiatorName);
618 if (sess->sess_ops->InitiatorAlias) 614 if (sess->sess_ops->InitiatorAlias)
619 rb += sprintf(page+rb, "InitiatorAlias: %s\n", 615 rb += sprintf(page+rb, "InitiatorAlias: %s\n",
620 sess->sess_ops->InitiatorAlias); 616 sess->sess_ops->InitiatorAlias);
621 617
622 rb += sprintf(page+rb, "LIO Session ID: %u " 618 rb += sprintf(page+rb, "LIO Session ID: %u "
623 "ISID: 0x%02x %02x %02x %02x %02x %02x " 619 "ISID: 0x%02x %02x %02x %02x %02x %02x "
624 "TSIH: %hu ", sess->sid, 620 "TSIH: %hu ", sess->sid,
625 sess->isid[0], sess->isid[1], sess->isid[2], 621 sess->isid[0], sess->isid[1], sess->isid[2],
626 sess->isid[3], sess->isid[4], sess->isid[5], 622 sess->isid[3], sess->isid[4], sess->isid[5],
627 sess->tsih); 623 sess->tsih);
628 rb += sprintf(page+rb, "SessionType: %s\n", 624 rb += sprintf(page+rb, "SessionType: %s\n",
629 (sess->sess_ops->SessionType) ? 625 (sess->sess_ops->SessionType) ?
630 "Discovery" : "Normal"); 626 "Discovery" : "Normal");
631 rb += sprintf(page+rb, "Session State: "); 627 rb += sprintf(page+rb, "Session State: ");
632 switch (sess->session_state) { 628 switch (sess->session_state) {
633 case TARG_SESS_STATE_FREE: 629 case TARG_SESS_STATE_FREE:
634 rb += sprintf(page+rb, "TARG_SESS_FREE\n"); 630 rb += sprintf(page+rb, "TARG_SESS_FREE\n");
635 break; 631 break;
636 case TARG_SESS_STATE_ACTIVE: 632 case TARG_SESS_STATE_ACTIVE:
637 rb += sprintf(page+rb, "TARG_SESS_STATE_ACTIVE\n"); 633 rb += sprintf(page+rb, "TARG_SESS_STATE_ACTIVE\n");
638 break; 634 break;
639 case TARG_SESS_STATE_LOGGED_IN: 635 case TARG_SESS_STATE_LOGGED_IN:
640 rb += sprintf(page+rb, "TARG_SESS_STATE_LOGGED_IN\n"); 636 rb += sprintf(page+rb, "TARG_SESS_STATE_LOGGED_IN\n");
641 break; 637 break;
642 case TARG_SESS_STATE_FAILED: 638 case TARG_SESS_STATE_FAILED:
643 rb += sprintf(page+rb, "TARG_SESS_STATE_FAILED\n"); 639 rb += sprintf(page+rb, "TARG_SESS_STATE_FAILED\n");
644 break; 640 break;
645 case TARG_SESS_STATE_IN_CONTINUE: 641 case TARG_SESS_STATE_IN_CONTINUE:
646 rb += sprintf(page+rb, "TARG_SESS_STATE_IN_CONTINUE\n"); 642 rb += sprintf(page+rb, "TARG_SESS_STATE_IN_CONTINUE\n");
647 break; 643 break;
648 default: 644 default:
649 rb += sprintf(page+rb, "ERROR: Unknown Session" 645 rb += sprintf(page+rb, "ERROR: Unknown Session"
650 " State!\n"); 646 " State!\n");
651 break; 647 break;
652 } 648 }
653 649
654 rb += sprintf(page+rb, "---------------------[iSCSI Session" 650 rb += sprintf(page+rb, "---------------------[iSCSI Session"
655 " Values]-----------------------\n"); 651 " Values]-----------------------\n");
656 rb += sprintf(page+rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN" 652 rb += sprintf(page+rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN"
657 " : MaxCmdSN : ITT : TTT\n"); 653 " : MaxCmdSN : ITT : TTT\n");
658 rb += sprintf(page+rb, " 0x%08x 0x%08x 0x%08x 0x%08x" 654 rb += sprintf(page+rb, " 0x%08x 0x%08x 0x%08x 0x%08x"
659 " 0x%08x 0x%08x\n", 655 " 0x%08x 0x%08x\n",
660 sess->cmdsn_window, 656 sess->cmdsn_window,
661 (sess->max_cmd_sn - sess->exp_cmd_sn) + 1, 657 (sess->max_cmd_sn - sess->exp_cmd_sn) + 1,
662 sess->exp_cmd_sn, sess->max_cmd_sn, 658 sess->exp_cmd_sn, sess->max_cmd_sn,
663 sess->init_task_tag, sess->targ_xfer_tag); 659 sess->init_task_tag, sess->targ_xfer_tag);
664 rb += sprintf(page+rb, "----------------------[iSCSI" 660 rb += sprintf(page+rb, "----------------------[iSCSI"
665 " Connections]-------------------------\n"); 661 " Connections]-------------------------\n");
666 662
667 spin_lock(&sess->conn_lock); 663 spin_lock(&sess->conn_lock);
668 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { 664 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
669 rb += sprintf(page+rb, "CID: %hu Connection" 665 rb += sprintf(page+rb, "CID: %hu Connection"
670 " State: ", conn->cid); 666 " State: ", conn->cid);
671 switch (conn->conn_state) { 667 switch (conn->conn_state) {
672 case TARG_CONN_STATE_FREE: 668 case TARG_CONN_STATE_FREE:
673 rb += sprintf(page+rb, 669 rb += sprintf(page+rb,
674 "TARG_CONN_STATE_FREE\n"); 670 "TARG_CONN_STATE_FREE\n");
675 break; 671 break;
676 case TARG_CONN_STATE_XPT_UP: 672 case TARG_CONN_STATE_XPT_UP:
677 rb += sprintf(page+rb, 673 rb += sprintf(page+rb,
678 "TARG_CONN_STATE_XPT_UP\n"); 674 "TARG_CONN_STATE_XPT_UP\n");
679 break; 675 break;
680 case TARG_CONN_STATE_IN_LOGIN: 676 case TARG_CONN_STATE_IN_LOGIN:
681 rb += sprintf(page+rb, 677 rb += sprintf(page+rb,
682 "TARG_CONN_STATE_IN_LOGIN\n"); 678 "TARG_CONN_STATE_IN_LOGIN\n");
683 break; 679 break;
684 case TARG_CONN_STATE_LOGGED_IN: 680 case TARG_CONN_STATE_LOGGED_IN:
685 rb += sprintf(page+rb, 681 rb += sprintf(page+rb,
686 "TARG_CONN_STATE_LOGGED_IN\n"); 682 "TARG_CONN_STATE_LOGGED_IN\n");
687 break; 683 break;
688 case TARG_CONN_STATE_IN_LOGOUT: 684 case TARG_CONN_STATE_IN_LOGOUT:
689 rb += sprintf(page+rb, 685 rb += sprintf(page+rb,
690 "TARG_CONN_STATE_IN_LOGOUT\n"); 686 "TARG_CONN_STATE_IN_LOGOUT\n");
691 break; 687 break;
692 case TARG_CONN_STATE_LOGOUT_REQUESTED: 688 case TARG_CONN_STATE_LOGOUT_REQUESTED:
693 rb += sprintf(page+rb, 689 rb += sprintf(page+rb,
694 "TARG_CONN_STATE_LOGOUT_REQUESTED\n"); 690 "TARG_CONN_STATE_LOGOUT_REQUESTED\n");
695 break; 691 break;
696 case TARG_CONN_STATE_CLEANUP_WAIT: 692 case TARG_CONN_STATE_CLEANUP_WAIT:
697 rb += sprintf(page+rb, 693 rb += sprintf(page+rb,
698 "TARG_CONN_STATE_CLEANUP_WAIT\n"); 694 "TARG_CONN_STATE_CLEANUP_WAIT\n");
699 break; 695 break;
700 default: 696 default:
701 rb += sprintf(page+rb, 697 rb += sprintf(page+rb,
702 "ERROR: Unknown Connection State!\n"); 698 "ERROR: Unknown Connection State!\n");
703 break; 699 break;
704 } 700 }
705 701
706 rb += sprintf(page+rb, " Address %s %s", conn->login_ip, 702 rb += sprintf(page+rb, " Address %s %s", conn->login_ip,
707 (conn->network_transport == ISCSI_TCP) ? 703 (conn->network_transport == ISCSI_TCP) ?
708 "TCP" : "SCTP"); 704 "TCP" : "SCTP");
709 rb += sprintf(page+rb, " StatSN: 0x%08x\n", 705 rb += sprintf(page+rb, " StatSN: 0x%08x\n",
710 conn->stat_sn); 706 conn->stat_sn);
711 } 707 }
712 spin_unlock(&sess->conn_lock); 708 spin_unlock(&sess->conn_lock);
713 } 709 }
714 spin_unlock_bh(&se_nacl->nacl_sess_lock); 710 spin_unlock_bh(&se_nacl->nacl_sess_lock);
715 711
716 return rb; 712 return rb;
717 } 713 }
718 714
719 TF_NACL_BASE_ATTR_RO(lio_target, info); 715 TF_NACL_BASE_ATTR_RO(lio_target, info);
720 716
721 static ssize_t lio_target_nacl_show_cmdsn_depth( 717 static ssize_t lio_target_nacl_show_cmdsn_depth(
722 struct se_node_acl *se_nacl, 718 struct se_node_acl *se_nacl,
723 char *page) 719 char *page)
724 { 720 {
725 return sprintf(page, "%u\n", se_nacl->queue_depth); 721 return sprintf(page, "%u\n", se_nacl->queue_depth);
726 } 722 }
727 723
728 static ssize_t lio_target_nacl_store_cmdsn_depth( 724 static ssize_t lio_target_nacl_store_cmdsn_depth(
729 struct se_node_acl *se_nacl, 725 struct se_node_acl *se_nacl,
730 const char *page, 726 const char *page,
731 size_t count) 727 size_t count)
732 { 728 {
733 struct se_portal_group *se_tpg = se_nacl->se_tpg; 729 struct se_portal_group *se_tpg = se_nacl->se_tpg;
734 struct iscsi_portal_group *tpg = container_of(se_tpg, 730 struct iscsi_portal_group *tpg = container_of(se_tpg,
735 struct iscsi_portal_group, tpg_se_tpg); 731 struct iscsi_portal_group, tpg_se_tpg);
736 struct config_item *acl_ci, *tpg_ci, *wwn_ci; 732 struct config_item *acl_ci, *tpg_ci, *wwn_ci;
737 char *endptr; 733 char *endptr;
738 u32 cmdsn_depth = 0; 734 u32 cmdsn_depth = 0;
739 int ret; 735 int ret;
740 736
741 cmdsn_depth = simple_strtoul(page, &endptr, 0); 737 cmdsn_depth = simple_strtoul(page, &endptr, 0);
742 if (cmdsn_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) { 738 if (cmdsn_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) {
743 pr_err("Passed cmdsn_depth: %u exceeds" 739 pr_err("Passed cmdsn_depth: %u exceeds"
744 " TA_DEFAULT_CMDSN_DEPTH_MAX: %u\n", cmdsn_depth, 740 " TA_DEFAULT_CMDSN_DEPTH_MAX: %u\n", cmdsn_depth,
745 TA_DEFAULT_CMDSN_DEPTH_MAX); 741 TA_DEFAULT_CMDSN_DEPTH_MAX);
746 return -EINVAL; 742 return -EINVAL;
747 } 743 }
748 acl_ci = &se_nacl->acl_group.cg_item; 744 acl_ci = &se_nacl->acl_group.cg_item;
749 if (!acl_ci) { 745 if (!acl_ci) {
750 pr_err("Unable to locatel acl_ci\n"); 746 pr_err("Unable to locatel acl_ci\n");
751 return -EINVAL; 747 return -EINVAL;
752 } 748 }
753 tpg_ci = &acl_ci->ci_parent->ci_group->cg_item; 749 tpg_ci = &acl_ci->ci_parent->ci_group->cg_item;
754 if (!tpg_ci) { 750 if (!tpg_ci) {
755 pr_err("Unable to locate tpg_ci\n"); 751 pr_err("Unable to locate tpg_ci\n");
756 return -EINVAL; 752 return -EINVAL;
757 } 753 }
758 wwn_ci = &tpg_ci->ci_group->cg_item; 754 wwn_ci = &tpg_ci->ci_group->cg_item;
759 if (!wwn_ci) { 755 if (!wwn_ci) {
760 pr_err("Unable to locate config_item wwn_ci\n"); 756 pr_err("Unable to locate config_item wwn_ci\n");
761 return -EINVAL; 757 return -EINVAL;
762 } 758 }
763 759
764 if (iscsit_get_tpg(tpg) < 0) 760 if (iscsit_get_tpg(tpg) < 0)
765 return -EINVAL; 761 return -EINVAL;
766 /* 762 /*
767 * iscsit_tpg_set_initiator_node_queue_depth() assumes force=1 763 * iscsit_tpg_set_initiator_node_queue_depth() assumes force=1
768 */ 764 */
769 ret = iscsit_tpg_set_initiator_node_queue_depth(tpg, 765 ret = iscsit_tpg_set_initiator_node_queue_depth(tpg,
770 config_item_name(acl_ci), cmdsn_depth, 1); 766 config_item_name(acl_ci), cmdsn_depth, 1);
771 767
772 pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for" 768 pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for"
773 "InitiatorName: %s\n", config_item_name(wwn_ci), 769 "InitiatorName: %s\n", config_item_name(wwn_ci),
774 config_item_name(tpg_ci), cmdsn_depth, 770 config_item_name(tpg_ci), cmdsn_depth,
775 config_item_name(acl_ci)); 771 config_item_name(acl_ci));
776 772
777 iscsit_put_tpg(tpg); 773 iscsit_put_tpg(tpg);
778 return (!ret) ? count : (ssize_t)ret; 774 return (!ret) ? count : (ssize_t)ret;
779 } 775 }
780 776
781 TF_NACL_BASE_ATTR(lio_target, cmdsn_depth, S_IRUGO | S_IWUSR); 777 TF_NACL_BASE_ATTR(lio_target, cmdsn_depth, S_IRUGO | S_IWUSR);
782 778
783 static struct configfs_attribute *lio_target_initiator_attrs[] = { 779 static struct configfs_attribute *lio_target_initiator_attrs[] = {
784 &lio_target_nacl_info.attr, 780 &lio_target_nacl_info.attr,
785 &lio_target_nacl_cmdsn_depth.attr, 781 &lio_target_nacl_cmdsn_depth.attr,
786 NULL, 782 NULL,
787 }; 783 };
788 784
789 static struct se_node_acl *lio_tpg_alloc_fabric_acl( 785 static struct se_node_acl *lio_tpg_alloc_fabric_acl(
790 struct se_portal_group *se_tpg) 786 struct se_portal_group *se_tpg)
791 { 787 {
792 struct iscsi_node_acl *acl; 788 struct iscsi_node_acl *acl;
793 789
794 acl = kzalloc(sizeof(struct iscsi_node_acl), GFP_KERNEL); 790 acl = kzalloc(sizeof(struct iscsi_node_acl), GFP_KERNEL);
795 if (!acl) { 791 if (!acl) {
796 pr_err("Unable to allocate memory for struct iscsi_node_acl\n"); 792 pr_err("Unable to allocate memory for struct iscsi_node_acl\n");
797 return NULL; 793 return NULL;
798 } 794 }
799 795
800 return &acl->se_node_acl; 796 return &acl->se_node_acl;
801 } 797 }
802 798
803 static struct se_node_acl *lio_target_make_nodeacl( 799 static struct se_node_acl *lio_target_make_nodeacl(
804 struct se_portal_group *se_tpg, 800 struct se_portal_group *se_tpg,
805 struct config_group *group, 801 struct config_group *group,
806 const char *name) 802 const char *name)
807 { 803 {
808 struct config_group *stats_cg; 804 struct config_group *stats_cg;
809 struct iscsi_node_acl *acl; 805 struct iscsi_node_acl *acl;
810 struct se_node_acl *se_nacl_new, *se_nacl; 806 struct se_node_acl *se_nacl_new, *se_nacl;
811 struct iscsi_portal_group *tpg = container_of(se_tpg, 807 struct iscsi_portal_group *tpg = container_of(se_tpg,
812 struct iscsi_portal_group, tpg_se_tpg); 808 struct iscsi_portal_group, tpg_se_tpg);
813 u32 cmdsn_depth; 809 u32 cmdsn_depth;
814 810
815 se_nacl_new = lio_tpg_alloc_fabric_acl(se_tpg); 811 se_nacl_new = lio_tpg_alloc_fabric_acl(se_tpg);
816 if (!se_nacl_new) 812 if (!se_nacl_new)
817 return ERR_PTR(-ENOMEM); 813 return ERR_PTR(-ENOMEM);
818 814
819 acl = container_of(se_nacl_new, struct iscsi_node_acl, 815 acl = container_of(se_nacl_new, struct iscsi_node_acl,
820 se_node_acl); 816 se_node_acl);
821 817
822 cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth; 818 cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
823 /* 819 /*
824 * se_nacl_new may be released by core_tpg_add_initiator_node_acl() 820 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
825 * when converting a NdoeACL from demo mode -> explict 821 * when converting a NdoeACL from demo mode -> explict
826 */ 822 */
827 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, 823 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
828 name, cmdsn_depth); 824 name, cmdsn_depth);
829 if (IS_ERR(se_nacl)) 825 if (IS_ERR(se_nacl))
830 return se_nacl; 826 return se_nacl;
831 827
832 stats_cg = &acl->se_node_acl.acl_fabric_stat_group; 828 stats_cg = &acl->se_node_acl.acl_fabric_stat_group;
833 829
834 stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 830 stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
835 GFP_KERNEL); 831 GFP_KERNEL);
836 if (!stats_cg->default_groups) { 832 if (!stats_cg->default_groups) {
837 pr_err("Unable to allocate memory for" 833 pr_err("Unable to allocate memory for"
838 " stats_cg->default_groups\n"); 834 " stats_cg->default_groups\n");
839 core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1); 835 core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1);
840 kfree(acl); 836 kfree(acl);
841 return ERR_PTR(-ENOMEM); 837 return ERR_PTR(-ENOMEM);
842 } 838 }
843 839
844 stats_cg->default_groups[0] = &NODE_STAT_GRPS(acl)->iscsi_sess_stats_group; 840 stats_cg->default_groups[0] = &NODE_STAT_GRPS(acl)->iscsi_sess_stats_group;
845 stats_cg->default_groups[1] = NULL; 841 stats_cg->default_groups[1] = NULL;
846 config_group_init_type_name(&NODE_STAT_GRPS(acl)->iscsi_sess_stats_group, 842 config_group_init_type_name(&NODE_STAT_GRPS(acl)->iscsi_sess_stats_group,
847 "iscsi_sess_stats", &iscsi_stat_sess_cit); 843 "iscsi_sess_stats", &iscsi_stat_sess_cit);
848 844
849 return se_nacl; 845 return se_nacl;
850 } 846 }
851 847
852 static void lio_target_drop_nodeacl( 848 static void lio_target_drop_nodeacl(
853 struct se_node_acl *se_nacl) 849 struct se_node_acl *se_nacl)
854 { 850 {
855 struct se_portal_group *se_tpg = se_nacl->se_tpg; 851 struct se_portal_group *se_tpg = se_nacl->se_tpg;
856 struct iscsi_node_acl *acl = container_of(se_nacl, 852 struct iscsi_node_acl *acl = container_of(se_nacl,
857 struct iscsi_node_acl, se_node_acl); 853 struct iscsi_node_acl, se_node_acl);
858 struct config_item *df_item; 854 struct config_item *df_item;
859 struct config_group *stats_cg; 855 struct config_group *stats_cg;
860 int i; 856 int i;
861 857
862 stats_cg = &acl->se_node_acl.acl_fabric_stat_group; 858 stats_cg = &acl->se_node_acl.acl_fabric_stat_group;
863 for (i = 0; stats_cg->default_groups[i]; i++) { 859 for (i = 0; stats_cg->default_groups[i]; i++) {
864 df_item = &stats_cg->default_groups[i]->cg_item; 860 df_item = &stats_cg->default_groups[i]->cg_item;
865 stats_cg->default_groups[i] = NULL; 861 stats_cg->default_groups[i] = NULL;
866 config_item_put(df_item); 862 config_item_put(df_item);
867 } 863 }
868 kfree(stats_cg->default_groups); 864 kfree(stats_cg->default_groups);
869 865
870 core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1); 866 core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1);
871 kfree(acl); 867 kfree(acl);
872 } 868 }
873 869
874 /* End items for lio_target_acl_cit */ 870 /* End items for lio_target_acl_cit */
875 871
876 /* Start items for lio_target_tpg_attrib_cit */ 872 /* Start items for lio_target_tpg_attrib_cit */
877 873
878 #define DEF_TPG_ATTRIB(name) \ 874 #define DEF_TPG_ATTRIB(name) \
879 \ 875 \
880 static ssize_t iscsi_tpg_attrib_show_##name( \ 876 static ssize_t iscsi_tpg_attrib_show_##name( \
881 struct se_portal_group *se_tpg, \ 877 struct se_portal_group *se_tpg, \
882 char *page) \ 878 char *page) \
883 { \ 879 { \
884 struct iscsi_portal_group *tpg = container_of(se_tpg, \ 880 struct iscsi_portal_group *tpg = container_of(se_tpg, \
885 struct iscsi_portal_group, tpg_se_tpg); \ 881 struct iscsi_portal_group, tpg_se_tpg); \
886 ssize_t rb; \ 882 ssize_t rb; \
887 \ 883 \
888 if (iscsit_get_tpg(tpg) < 0) \ 884 if (iscsit_get_tpg(tpg) < 0) \
889 return -EINVAL; \ 885 return -EINVAL; \
890 \ 886 \
891 rb = sprintf(page, "%u\n", ISCSI_TPG_ATTRIB(tpg)->name); \ 887 rb = sprintf(page, "%u\n", ISCSI_TPG_ATTRIB(tpg)->name); \
892 iscsit_put_tpg(tpg); \ 888 iscsit_put_tpg(tpg); \
893 return rb; \ 889 return rb; \
894 } \ 890 } \
895 \ 891 \
896 static ssize_t iscsi_tpg_attrib_store_##name( \ 892 static ssize_t iscsi_tpg_attrib_store_##name( \
897 struct se_portal_group *se_tpg, \ 893 struct se_portal_group *se_tpg, \
898 const char *page, \ 894 const char *page, \
899 size_t count) \ 895 size_t count) \
900 { \ 896 { \
901 struct iscsi_portal_group *tpg = container_of(se_tpg, \ 897 struct iscsi_portal_group *tpg = container_of(se_tpg, \
902 struct iscsi_portal_group, tpg_se_tpg); \ 898 struct iscsi_portal_group, tpg_se_tpg); \
903 char *endptr; \ 899 char *endptr; \
904 u32 val; \ 900 u32 val; \
905 int ret; \ 901 int ret; \
906 \ 902 \
907 if (iscsit_get_tpg(tpg) < 0) \ 903 if (iscsit_get_tpg(tpg) < 0) \
908 return -EINVAL; \ 904 return -EINVAL; \
909 \ 905 \
910 val = simple_strtoul(page, &endptr, 0); \ 906 val = simple_strtoul(page, &endptr, 0); \
911 ret = iscsit_ta_##name(tpg, val); \ 907 ret = iscsit_ta_##name(tpg, val); \
912 if (ret < 0) \ 908 if (ret < 0) \
913 goto out; \ 909 goto out; \
914 \ 910 \
915 iscsit_put_tpg(tpg); \ 911 iscsit_put_tpg(tpg); \
916 return count; \ 912 return count; \
917 out: \ 913 out: \
918 iscsit_put_tpg(tpg); \ 914 iscsit_put_tpg(tpg); \
919 return ret; \ 915 return ret; \
920 } 916 }
921 917
922 #define TPG_ATTR(_name, _mode) TF_TPG_ATTRIB_ATTR(iscsi, _name, _mode); 918 #define TPG_ATTR(_name, _mode) TF_TPG_ATTRIB_ATTR(iscsi, _name, _mode);
923 919
924 /* 920 /*
925 * Define iscsi_tpg_attrib_s_authentication 921 * Define iscsi_tpg_attrib_s_authentication
926 */ 922 */
927 DEF_TPG_ATTRIB(authentication); 923 DEF_TPG_ATTRIB(authentication);
928 TPG_ATTR(authentication, S_IRUGO | S_IWUSR); 924 TPG_ATTR(authentication, S_IRUGO | S_IWUSR);
929 /* 925 /*
930 * Define iscsi_tpg_attrib_s_login_timeout 926 * Define iscsi_tpg_attrib_s_login_timeout
931 */ 927 */
932 DEF_TPG_ATTRIB(login_timeout); 928 DEF_TPG_ATTRIB(login_timeout);
933 TPG_ATTR(login_timeout, S_IRUGO | S_IWUSR); 929 TPG_ATTR(login_timeout, S_IRUGO | S_IWUSR);
934 /* 930 /*
935 * Define iscsi_tpg_attrib_s_netif_timeout 931 * Define iscsi_tpg_attrib_s_netif_timeout
936 */ 932 */
937 DEF_TPG_ATTRIB(netif_timeout); 933 DEF_TPG_ATTRIB(netif_timeout);
938 TPG_ATTR(netif_timeout, S_IRUGO | S_IWUSR); 934 TPG_ATTR(netif_timeout, S_IRUGO | S_IWUSR);
939 /* 935 /*
940 * Define iscsi_tpg_attrib_s_generate_node_acls 936 * Define iscsi_tpg_attrib_s_generate_node_acls
941 */ 937 */
942 DEF_TPG_ATTRIB(generate_node_acls); 938 DEF_TPG_ATTRIB(generate_node_acls);
943 TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR); 939 TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
944 /* 940 /*
945 * Define iscsi_tpg_attrib_s_default_cmdsn_depth 941 * Define iscsi_tpg_attrib_s_default_cmdsn_depth
946 */ 942 */
947 DEF_TPG_ATTRIB(default_cmdsn_depth); 943 DEF_TPG_ATTRIB(default_cmdsn_depth);
948 TPG_ATTR(default_cmdsn_depth, S_IRUGO | S_IWUSR); 944 TPG_ATTR(default_cmdsn_depth, S_IRUGO | S_IWUSR);
949 /* 945 /*
950 Define iscsi_tpg_attrib_s_cache_dynamic_acls 946 Define iscsi_tpg_attrib_s_cache_dynamic_acls
951 */ 947 */
952 DEF_TPG_ATTRIB(cache_dynamic_acls); 948 DEF_TPG_ATTRIB(cache_dynamic_acls);
953 TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR); 949 TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
954 /* 950 /*
955 * Define iscsi_tpg_attrib_s_demo_mode_write_protect 951 * Define iscsi_tpg_attrib_s_demo_mode_write_protect
956 */ 952 */
957 DEF_TPG_ATTRIB(demo_mode_write_protect); 953 DEF_TPG_ATTRIB(demo_mode_write_protect);
958 TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR); 954 TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
959 /* 955 /*
960 * Define iscsi_tpg_attrib_s_prod_mode_write_protect 956 * Define iscsi_tpg_attrib_s_prod_mode_write_protect
961 */ 957 */
962 DEF_TPG_ATTRIB(prod_mode_write_protect); 958 DEF_TPG_ATTRIB(prod_mode_write_protect);
963 TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR); 959 TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
964 960
965 static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { 961 static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
966 &iscsi_tpg_attrib_authentication.attr, 962 &iscsi_tpg_attrib_authentication.attr,
967 &iscsi_tpg_attrib_login_timeout.attr, 963 &iscsi_tpg_attrib_login_timeout.attr,
968 &iscsi_tpg_attrib_netif_timeout.attr, 964 &iscsi_tpg_attrib_netif_timeout.attr,
969 &iscsi_tpg_attrib_generate_node_acls.attr, 965 &iscsi_tpg_attrib_generate_node_acls.attr,
970 &iscsi_tpg_attrib_default_cmdsn_depth.attr, 966 &iscsi_tpg_attrib_default_cmdsn_depth.attr,
971 &iscsi_tpg_attrib_cache_dynamic_acls.attr, 967 &iscsi_tpg_attrib_cache_dynamic_acls.attr,
972 &iscsi_tpg_attrib_demo_mode_write_protect.attr, 968 &iscsi_tpg_attrib_demo_mode_write_protect.attr,
973 &iscsi_tpg_attrib_prod_mode_write_protect.attr, 969 &iscsi_tpg_attrib_prod_mode_write_protect.attr,
974 NULL, 970 NULL,
975 }; 971 };
976 972
977 /* End items for lio_target_tpg_attrib_cit */ 973 /* End items for lio_target_tpg_attrib_cit */
978 974
979 /* Start items for lio_target_tpg_param_cit */ 975 /* Start items for lio_target_tpg_param_cit */
980 976
981 #define DEF_TPG_PARAM(name) \ 977 #define DEF_TPG_PARAM(name) \
982 static ssize_t iscsi_tpg_param_show_##name( \ 978 static ssize_t iscsi_tpg_param_show_##name( \
983 struct se_portal_group *se_tpg, \ 979 struct se_portal_group *se_tpg, \
984 char *page) \ 980 char *page) \
985 { \ 981 { \
986 struct iscsi_portal_group *tpg = container_of(se_tpg, \ 982 struct iscsi_portal_group *tpg = container_of(se_tpg, \
987 struct iscsi_portal_group, tpg_se_tpg); \ 983 struct iscsi_portal_group, tpg_se_tpg); \
988 struct iscsi_param *param; \ 984 struct iscsi_param *param; \
989 ssize_t rb; \ 985 ssize_t rb; \
990 \ 986 \
991 if (iscsit_get_tpg(tpg) < 0) \ 987 if (iscsit_get_tpg(tpg) < 0) \
992 return -EINVAL; \ 988 return -EINVAL; \
993 \ 989 \
994 param = iscsi_find_param_from_key(__stringify(name), \ 990 param = iscsi_find_param_from_key(__stringify(name), \
995 tpg->param_list); \ 991 tpg->param_list); \
996 if (!param) { \ 992 if (!param) { \
997 iscsit_put_tpg(tpg); \ 993 iscsit_put_tpg(tpg); \
998 return -EINVAL; \ 994 return -EINVAL; \
999 } \ 995 } \
1000 rb = snprintf(page, PAGE_SIZE, "%s\n", param->value); \ 996 rb = snprintf(page, PAGE_SIZE, "%s\n", param->value); \
1001 \ 997 \
1002 iscsit_put_tpg(tpg); \ 998 iscsit_put_tpg(tpg); \
1003 return rb; \ 999 return rb; \
1004 } \ 1000 } \
1005 static ssize_t iscsi_tpg_param_store_##name( \ 1001 static ssize_t iscsi_tpg_param_store_##name( \
1006 struct se_portal_group *se_tpg, \ 1002 struct se_portal_group *se_tpg, \
1007 const char *page, \ 1003 const char *page, \
1008 size_t count) \ 1004 size_t count) \
1009 { \ 1005 { \
1010 struct iscsi_portal_group *tpg = container_of(se_tpg, \ 1006 struct iscsi_portal_group *tpg = container_of(se_tpg, \
1011 struct iscsi_portal_group, tpg_se_tpg); \ 1007 struct iscsi_portal_group, tpg_se_tpg); \
1012 char *buf; \ 1008 char *buf; \
1013 int ret; \ 1009 int ret; \
1014 \ 1010 \
1015 buf = kzalloc(PAGE_SIZE, GFP_KERNEL); \ 1011 buf = kzalloc(PAGE_SIZE, GFP_KERNEL); \
1016 if (!buf) \ 1012 if (!buf) \
1017 return -ENOMEM; \ 1013 return -ENOMEM; \
1018 snprintf(buf, PAGE_SIZE, "%s=%s", __stringify(name), page); \ 1014 snprintf(buf, PAGE_SIZE, "%s=%s", __stringify(name), page); \
1019 buf[strlen(buf)-1] = '\0'; /* Kill newline */ \ 1015 buf[strlen(buf)-1] = '\0'; /* Kill newline */ \
1020 \ 1016 \
1021 if (iscsit_get_tpg(tpg) < 0) { \ 1017 if (iscsit_get_tpg(tpg) < 0) { \
1022 kfree(buf); \ 1018 kfree(buf); \
1023 return -EINVAL; \ 1019 return -EINVAL; \
1024 } \ 1020 } \
1025 \ 1021 \
1026 ret = iscsi_change_param_value(buf, tpg->param_list, 1); \ 1022 ret = iscsi_change_param_value(buf, tpg->param_list, 1); \
1027 if (ret < 0) \ 1023 if (ret < 0) \
1028 goto out; \ 1024 goto out; \
1029 \ 1025 \
1030 kfree(buf); \ 1026 kfree(buf); \
1031 iscsit_put_tpg(tpg); \ 1027 iscsit_put_tpg(tpg); \
1032 return count; \ 1028 return count; \
1033 out: \ 1029 out: \
1034 kfree(buf); \ 1030 kfree(buf); \
1035 iscsit_put_tpg(tpg); \ 1031 iscsit_put_tpg(tpg); \
1036 return -EINVAL; \ 1032 return -EINVAL; \
1037 } 1033 }
1038 1034
1039 #define TPG_PARAM_ATTR(_name, _mode) TF_TPG_PARAM_ATTR(iscsi, _name, _mode); 1035 #define TPG_PARAM_ATTR(_name, _mode) TF_TPG_PARAM_ATTR(iscsi, _name, _mode);
1040 1036
1041 DEF_TPG_PARAM(AuthMethod); 1037 DEF_TPG_PARAM(AuthMethod);
1042 TPG_PARAM_ATTR(AuthMethod, S_IRUGO | S_IWUSR); 1038 TPG_PARAM_ATTR(AuthMethod, S_IRUGO | S_IWUSR);
1043 1039
1044 DEF_TPG_PARAM(HeaderDigest); 1040 DEF_TPG_PARAM(HeaderDigest);
1045 TPG_PARAM_ATTR(HeaderDigest, S_IRUGO | S_IWUSR); 1041 TPG_PARAM_ATTR(HeaderDigest, S_IRUGO | S_IWUSR);
1046 1042
1047 DEF_TPG_PARAM(DataDigest); 1043 DEF_TPG_PARAM(DataDigest);
1048 TPG_PARAM_ATTR(DataDigest, S_IRUGO | S_IWUSR); 1044 TPG_PARAM_ATTR(DataDigest, S_IRUGO | S_IWUSR);
1049 1045
1050 DEF_TPG_PARAM(MaxConnections); 1046 DEF_TPG_PARAM(MaxConnections);
1051 TPG_PARAM_ATTR(MaxConnections, S_IRUGO | S_IWUSR); 1047 TPG_PARAM_ATTR(MaxConnections, S_IRUGO | S_IWUSR);
1052 1048
1053 DEF_TPG_PARAM(TargetAlias); 1049 DEF_TPG_PARAM(TargetAlias);
1054 TPG_PARAM_ATTR(TargetAlias, S_IRUGO | S_IWUSR); 1050 TPG_PARAM_ATTR(TargetAlias, S_IRUGO | S_IWUSR);
1055 1051
1056 DEF_TPG_PARAM(InitialR2T); 1052 DEF_TPG_PARAM(InitialR2T);
1057 TPG_PARAM_ATTR(InitialR2T, S_IRUGO | S_IWUSR); 1053 TPG_PARAM_ATTR(InitialR2T, S_IRUGO | S_IWUSR);
1058 1054
1059 DEF_TPG_PARAM(ImmediateData); 1055 DEF_TPG_PARAM(ImmediateData);
1060 TPG_PARAM_ATTR(ImmediateData, S_IRUGO | S_IWUSR); 1056 TPG_PARAM_ATTR(ImmediateData, S_IRUGO | S_IWUSR);
1061 1057
1062 DEF_TPG_PARAM(MaxRecvDataSegmentLength); 1058 DEF_TPG_PARAM(MaxRecvDataSegmentLength);
1063 TPG_PARAM_ATTR(MaxRecvDataSegmentLength, S_IRUGO | S_IWUSR); 1059 TPG_PARAM_ATTR(MaxRecvDataSegmentLength, S_IRUGO | S_IWUSR);
1064 1060
1065 DEF_TPG_PARAM(MaxBurstLength); 1061 DEF_TPG_PARAM(MaxBurstLength);
1066 TPG_PARAM_ATTR(MaxBurstLength, S_IRUGO | S_IWUSR); 1062 TPG_PARAM_ATTR(MaxBurstLength, S_IRUGO | S_IWUSR);
1067 1063
1068 DEF_TPG_PARAM(FirstBurstLength); 1064 DEF_TPG_PARAM(FirstBurstLength);
1069 TPG_PARAM_ATTR(FirstBurstLength, S_IRUGO | S_IWUSR); 1065 TPG_PARAM_ATTR(FirstBurstLength, S_IRUGO | S_IWUSR);
1070 1066
1071 DEF_TPG_PARAM(DefaultTime2Wait); 1067 DEF_TPG_PARAM(DefaultTime2Wait);
1072 TPG_PARAM_ATTR(DefaultTime2Wait, S_IRUGO | S_IWUSR); 1068 TPG_PARAM_ATTR(DefaultTime2Wait, S_IRUGO | S_IWUSR);
1073 1069
1074 DEF_TPG_PARAM(DefaultTime2Retain); 1070 DEF_TPG_PARAM(DefaultTime2Retain);
1075 TPG_PARAM_ATTR(DefaultTime2Retain, S_IRUGO | S_IWUSR); 1071 TPG_PARAM_ATTR(DefaultTime2Retain, S_IRUGO | S_IWUSR);
1076 1072
1077 DEF_TPG_PARAM(MaxOutstandingR2T); 1073 DEF_TPG_PARAM(MaxOutstandingR2T);
1078 TPG_PARAM_ATTR(MaxOutstandingR2T, S_IRUGO | S_IWUSR); 1074 TPG_PARAM_ATTR(MaxOutstandingR2T, S_IRUGO | S_IWUSR);
1079 1075
1080 DEF_TPG_PARAM(DataPDUInOrder); 1076 DEF_TPG_PARAM(DataPDUInOrder);
1081 TPG_PARAM_ATTR(DataPDUInOrder, S_IRUGO | S_IWUSR); 1077 TPG_PARAM_ATTR(DataPDUInOrder, S_IRUGO | S_IWUSR);
1082 1078
1083 DEF_TPG_PARAM(DataSequenceInOrder); 1079 DEF_TPG_PARAM(DataSequenceInOrder);
1084 TPG_PARAM_ATTR(DataSequenceInOrder, S_IRUGO | S_IWUSR); 1080 TPG_PARAM_ATTR(DataSequenceInOrder, S_IRUGO | S_IWUSR);
1085 1081
1086 DEF_TPG_PARAM(ErrorRecoveryLevel); 1082 DEF_TPG_PARAM(ErrorRecoveryLevel);
1087 TPG_PARAM_ATTR(ErrorRecoveryLevel, S_IRUGO | S_IWUSR); 1083 TPG_PARAM_ATTR(ErrorRecoveryLevel, S_IRUGO | S_IWUSR);
1088 1084
1089 DEF_TPG_PARAM(IFMarker); 1085 DEF_TPG_PARAM(IFMarker);
1090 TPG_PARAM_ATTR(IFMarker, S_IRUGO | S_IWUSR); 1086 TPG_PARAM_ATTR(IFMarker, S_IRUGO | S_IWUSR);
1091 1087
1092 DEF_TPG_PARAM(OFMarker); 1088 DEF_TPG_PARAM(OFMarker);
1093 TPG_PARAM_ATTR(OFMarker, S_IRUGO | S_IWUSR); 1089 TPG_PARAM_ATTR(OFMarker, S_IRUGO | S_IWUSR);
1094 1090
1095 DEF_TPG_PARAM(IFMarkInt); 1091 DEF_TPG_PARAM(IFMarkInt);
1096 TPG_PARAM_ATTR(IFMarkInt, S_IRUGO | S_IWUSR); 1092 TPG_PARAM_ATTR(IFMarkInt, S_IRUGO | S_IWUSR);
1097 1093
1098 DEF_TPG_PARAM(OFMarkInt); 1094 DEF_TPG_PARAM(OFMarkInt);
1099 TPG_PARAM_ATTR(OFMarkInt, S_IRUGO | S_IWUSR); 1095 TPG_PARAM_ATTR(OFMarkInt, S_IRUGO | S_IWUSR);
1100 1096
1101 static struct configfs_attribute *lio_target_tpg_param_attrs[] = { 1097 static struct configfs_attribute *lio_target_tpg_param_attrs[] = {
1102 &iscsi_tpg_param_AuthMethod.attr, 1098 &iscsi_tpg_param_AuthMethod.attr,
1103 &iscsi_tpg_param_HeaderDigest.attr, 1099 &iscsi_tpg_param_HeaderDigest.attr,
1104 &iscsi_tpg_param_DataDigest.attr, 1100 &iscsi_tpg_param_DataDigest.attr,
1105 &iscsi_tpg_param_MaxConnections.attr, 1101 &iscsi_tpg_param_MaxConnections.attr,
1106 &iscsi_tpg_param_TargetAlias.attr, 1102 &iscsi_tpg_param_TargetAlias.attr,
1107 &iscsi_tpg_param_InitialR2T.attr, 1103 &iscsi_tpg_param_InitialR2T.attr,
1108 &iscsi_tpg_param_ImmediateData.attr, 1104 &iscsi_tpg_param_ImmediateData.attr,
1109 &iscsi_tpg_param_MaxRecvDataSegmentLength.attr, 1105 &iscsi_tpg_param_MaxRecvDataSegmentLength.attr,
1110 &iscsi_tpg_param_MaxBurstLength.attr, 1106 &iscsi_tpg_param_MaxBurstLength.attr,
1111 &iscsi_tpg_param_FirstBurstLength.attr, 1107 &iscsi_tpg_param_FirstBurstLength.attr,
1112 &iscsi_tpg_param_DefaultTime2Wait.attr, 1108 &iscsi_tpg_param_DefaultTime2Wait.attr,
1113 &iscsi_tpg_param_DefaultTime2Retain.attr, 1109 &iscsi_tpg_param_DefaultTime2Retain.attr,
1114 &iscsi_tpg_param_MaxOutstandingR2T.attr, 1110 &iscsi_tpg_param_MaxOutstandingR2T.attr,
1115 &iscsi_tpg_param_DataPDUInOrder.attr, 1111 &iscsi_tpg_param_DataPDUInOrder.attr,
1116 &iscsi_tpg_param_DataSequenceInOrder.attr, 1112 &iscsi_tpg_param_DataSequenceInOrder.attr,
1117 &iscsi_tpg_param_ErrorRecoveryLevel.attr, 1113 &iscsi_tpg_param_ErrorRecoveryLevel.attr,
1118 &iscsi_tpg_param_IFMarker.attr, 1114 &iscsi_tpg_param_IFMarker.attr,
1119 &iscsi_tpg_param_OFMarker.attr, 1115 &iscsi_tpg_param_OFMarker.attr,
1120 &iscsi_tpg_param_IFMarkInt.attr, 1116 &iscsi_tpg_param_IFMarkInt.attr,
1121 &iscsi_tpg_param_OFMarkInt.attr, 1117 &iscsi_tpg_param_OFMarkInt.attr,
1122 NULL, 1118 NULL,
1123 }; 1119 };
1124 1120
1125 /* End items for lio_target_tpg_param_cit */ 1121 /* End items for lio_target_tpg_param_cit */
1126 1122
1127 /* Start items for lio_target_tpg_cit */ 1123 /* Start items for lio_target_tpg_cit */
1128 1124
1129 static ssize_t lio_target_tpg_show_enable( 1125 static ssize_t lio_target_tpg_show_enable(
1130 struct se_portal_group *se_tpg, 1126 struct se_portal_group *se_tpg,
1131 char *page) 1127 char *page)
1132 { 1128 {
1133 struct iscsi_portal_group *tpg = container_of(se_tpg, 1129 struct iscsi_portal_group *tpg = container_of(se_tpg,
1134 struct iscsi_portal_group, tpg_se_tpg); 1130 struct iscsi_portal_group, tpg_se_tpg);
1135 ssize_t len; 1131 ssize_t len;
1136 1132
1137 spin_lock(&tpg->tpg_state_lock); 1133 spin_lock(&tpg->tpg_state_lock);
1138 len = sprintf(page, "%d\n", 1134 len = sprintf(page, "%d\n",
1139 (tpg->tpg_state == TPG_STATE_ACTIVE) ? 1 : 0); 1135 (tpg->tpg_state == TPG_STATE_ACTIVE) ? 1 : 0);
1140 spin_unlock(&tpg->tpg_state_lock); 1136 spin_unlock(&tpg->tpg_state_lock);
1141 1137
1142 return len; 1138 return len;
1143 } 1139 }
1144 1140
1145 static ssize_t lio_target_tpg_store_enable( 1141 static ssize_t lio_target_tpg_store_enable(
1146 struct se_portal_group *se_tpg, 1142 struct se_portal_group *se_tpg,
1147 const char *page, 1143 const char *page,
1148 size_t count) 1144 size_t count)
1149 { 1145 {
1150 struct iscsi_portal_group *tpg = container_of(se_tpg, 1146 struct iscsi_portal_group *tpg = container_of(se_tpg,
1151 struct iscsi_portal_group, tpg_se_tpg); 1147 struct iscsi_portal_group, tpg_se_tpg);
1152 char *endptr; 1148 char *endptr;
1153 u32 op; 1149 u32 op;
1154 int ret = 0; 1150 int ret = 0;
1155 1151
1156 op = simple_strtoul(page, &endptr, 0); 1152 op = simple_strtoul(page, &endptr, 0);
1157 if ((op != 1) && (op != 0)) { 1153 if ((op != 1) && (op != 0)) {
1158 pr_err("Illegal value for tpg_enable: %u\n", op); 1154 pr_err("Illegal value for tpg_enable: %u\n", op);
1159 return -EINVAL; 1155 return -EINVAL;
1160 } 1156 }
1161 1157
1162 ret = iscsit_get_tpg(tpg); 1158 ret = iscsit_get_tpg(tpg);
1163 if (ret < 0) 1159 if (ret < 0)
1164 return -EINVAL; 1160 return -EINVAL;
1165 1161
1166 if (op) { 1162 if (op) {
1167 ret = iscsit_tpg_enable_portal_group(tpg); 1163 ret = iscsit_tpg_enable_portal_group(tpg);
1168 if (ret < 0) 1164 if (ret < 0)
1169 goto out; 1165 goto out;
1170 } else { 1166 } else {
1171 /* 1167 /*
1172 * iscsit_tpg_disable_portal_group() assumes force=1 1168 * iscsit_tpg_disable_portal_group() assumes force=1
1173 */ 1169 */
1174 ret = iscsit_tpg_disable_portal_group(tpg, 1); 1170 ret = iscsit_tpg_disable_portal_group(tpg, 1);
1175 if (ret < 0) 1171 if (ret < 0)
1176 goto out; 1172 goto out;
1177 } 1173 }
1178 1174
1179 iscsit_put_tpg(tpg); 1175 iscsit_put_tpg(tpg);
1180 return count; 1176 return count;
1181 out: 1177 out:
1182 iscsit_put_tpg(tpg); 1178 iscsit_put_tpg(tpg);
1183 return -EINVAL; 1179 return -EINVAL;
1184 } 1180 }
1185 1181
1186 TF_TPG_BASE_ATTR(lio_target, enable, S_IRUGO | S_IWUSR); 1182 TF_TPG_BASE_ATTR(lio_target, enable, S_IRUGO | S_IWUSR);
1187 1183
1188 static struct configfs_attribute *lio_target_tpg_attrs[] = { 1184 static struct configfs_attribute *lio_target_tpg_attrs[] = {
1189 &lio_target_tpg_enable.attr, 1185 &lio_target_tpg_enable.attr,
1190 NULL, 1186 NULL,
1191 }; 1187 };
1192 1188
1193 /* End items for lio_target_tpg_cit */ 1189 /* End items for lio_target_tpg_cit */
1194 1190
1195 /* Start items for lio_target_tiqn_cit */ 1191 /* Start items for lio_target_tiqn_cit */
1196 1192
1197 struct se_portal_group *lio_target_tiqn_addtpg( 1193 struct se_portal_group *lio_target_tiqn_addtpg(
1198 struct se_wwn *wwn, 1194 struct se_wwn *wwn,
1199 struct config_group *group, 1195 struct config_group *group,
1200 const char *name) 1196 const char *name)
1201 { 1197 {
1202 struct iscsi_portal_group *tpg; 1198 struct iscsi_portal_group *tpg;
1203 struct iscsi_tiqn *tiqn; 1199 struct iscsi_tiqn *tiqn;
1204 char *tpgt_str, *end_ptr; 1200 char *tpgt_str, *end_ptr;
1205 int ret = 0; 1201 int ret = 0;
1206 unsigned short int tpgt; 1202 unsigned short int tpgt;
1207 1203
1208 tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn); 1204 tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
1209 /* 1205 /*
1210 * Only tpgt_# directory groups can be created below 1206 * Only tpgt_# directory groups can be created below
1211 * target/iscsi/iqn.superturodiskarry/ 1207 * target/iscsi/iqn.superturodiskarry/
1212 */ 1208 */
1213 tpgt_str = strstr(name, "tpgt_"); 1209 tpgt_str = strstr(name, "tpgt_");
1214 if (!tpgt_str) { 1210 if (!tpgt_str) {
1215 pr_err("Unable to locate \"tpgt_#\" directory" 1211 pr_err("Unable to locate \"tpgt_#\" directory"
1216 " group\n"); 1212 " group\n");
1217 return NULL; 1213 return NULL;
1218 } 1214 }
1219 tpgt_str += 5; /* Skip ahead of "tpgt_" */ 1215 tpgt_str += 5; /* Skip ahead of "tpgt_" */
1220 tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0); 1216 tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
1221 1217
1222 tpg = iscsit_alloc_portal_group(tiqn, tpgt); 1218 tpg = iscsit_alloc_portal_group(tiqn, tpgt);
1223 if (!tpg) 1219 if (!tpg)
1224 return NULL; 1220 return NULL;
1225 1221
1226 ret = core_tpg_register( 1222 ret = core_tpg_register(
1227 &lio_target_fabric_configfs->tf_ops, 1223 &lio_target_fabric_configfs->tf_ops,
1228 wwn, &tpg->tpg_se_tpg, (void *)tpg, 1224 wwn, &tpg->tpg_se_tpg, (void *)tpg,
1229 TRANSPORT_TPG_TYPE_NORMAL); 1225 TRANSPORT_TPG_TYPE_NORMAL);
1230 if (ret < 0) 1226 if (ret < 0)
1231 return NULL; 1227 return NULL;
1232 1228
1233 ret = iscsit_tpg_add_portal_group(tiqn, tpg); 1229 ret = iscsit_tpg_add_portal_group(tiqn, tpg);
1234 if (ret != 0) 1230 if (ret != 0)
1235 goto out; 1231 goto out;
1236 1232
1237 pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn); 1233 pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
1238 pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated TPG: %s\n", 1234 pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated TPG: %s\n",
1239 name); 1235 name);
1240 return &tpg->tpg_se_tpg; 1236 return &tpg->tpg_se_tpg;
1241 out: 1237 out:
1242 core_tpg_deregister(&tpg->tpg_se_tpg); 1238 core_tpg_deregister(&tpg->tpg_se_tpg);
1243 kfree(tpg); 1239 kfree(tpg);
1244 return NULL; 1240 return NULL;
1245 } 1241 }
1246 1242
1247 void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg) 1243 void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg)
1248 { 1244 {
1249 struct iscsi_portal_group *tpg; 1245 struct iscsi_portal_group *tpg;
1250 struct iscsi_tiqn *tiqn; 1246 struct iscsi_tiqn *tiqn;
1251 1247
1252 tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg); 1248 tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
1253 tiqn = tpg->tpg_tiqn; 1249 tiqn = tpg->tpg_tiqn;
1254 /* 1250 /*
1255 * iscsit_tpg_del_portal_group() assumes force=1 1251 * iscsit_tpg_del_portal_group() assumes force=1
1256 */ 1252 */
1257 pr_debug("LIO_Target_ConfigFS: DEREGISTER -> Releasing TPG\n"); 1253 pr_debug("LIO_Target_ConfigFS: DEREGISTER -> Releasing TPG\n");
1258 iscsit_tpg_del_portal_group(tiqn, tpg, 1); 1254 iscsit_tpg_del_portal_group(tiqn, tpg, 1);
1259 } 1255 }
1260 1256
1261 /* End items for lio_target_tiqn_cit */ 1257 /* End items for lio_target_tiqn_cit */
1262 1258
1263 /* Start LIO-Target TIQN struct contig_item lio_target_cit */ 1259 /* Start LIO-Target TIQN struct contig_item lio_target_cit */
1264 1260
1265 static ssize_t lio_target_wwn_show_attr_lio_version( 1261 static ssize_t lio_target_wwn_show_attr_lio_version(
1266 struct target_fabric_configfs *tf, 1262 struct target_fabric_configfs *tf,
1267 char *page) 1263 char *page)
1268 { 1264 {
1269 return sprintf(page, "RisingTide Systems Linux-iSCSI Target "ISCSIT_VERSION"\n"); 1265 return sprintf(page, "RisingTide Systems Linux-iSCSI Target "ISCSIT_VERSION"\n");
1270 } 1266 }
1271 1267
1272 TF_WWN_ATTR_RO(lio_target, lio_version); 1268 TF_WWN_ATTR_RO(lio_target, lio_version);
1273 1269
1274 static struct configfs_attribute *lio_target_wwn_attrs[] = { 1270 static struct configfs_attribute *lio_target_wwn_attrs[] = {
1275 &lio_target_wwn_lio_version.attr, 1271 &lio_target_wwn_lio_version.attr,
1276 NULL, 1272 NULL,
1277 }; 1273 };
1278 1274
1279 struct se_wwn *lio_target_call_coreaddtiqn( 1275 struct se_wwn *lio_target_call_coreaddtiqn(
1280 struct target_fabric_configfs *tf, 1276 struct target_fabric_configfs *tf,
1281 struct config_group *group, 1277 struct config_group *group,
1282 const char *name) 1278 const char *name)
1283 { 1279 {
1284 struct config_group *stats_cg; 1280 struct config_group *stats_cg;
1285 struct iscsi_tiqn *tiqn; 1281 struct iscsi_tiqn *tiqn;
1286 1282
1287 tiqn = iscsit_add_tiqn((unsigned char *)name); 1283 tiqn = iscsit_add_tiqn((unsigned char *)name);
1288 if (IS_ERR(tiqn)) 1284 if (IS_ERR(tiqn))
1289 return ERR_CAST(tiqn); 1285 return ERR_CAST(tiqn);
1290 /* 1286 /*
1291 * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group. 1287 * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group.
1292 */ 1288 */
1293 stats_cg = &tiqn->tiqn_wwn.fabric_stat_group; 1289 stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
1294 1290
1295 stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 6, 1291 stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 6,
1296 GFP_KERNEL); 1292 GFP_KERNEL);
1297 if (!stats_cg->default_groups) { 1293 if (!stats_cg->default_groups) {
1298 pr_err("Unable to allocate memory for" 1294 pr_err("Unable to allocate memory for"
1299 " stats_cg->default_groups\n"); 1295 " stats_cg->default_groups\n");
1300 iscsit_del_tiqn(tiqn); 1296 iscsit_del_tiqn(tiqn);
1301 return ERR_PTR(-ENOMEM); 1297 return ERR_PTR(-ENOMEM);
1302 } 1298 }
1303 1299
1304 stats_cg->default_groups[0] = &WWN_STAT_GRPS(tiqn)->iscsi_instance_group; 1300 stats_cg->default_groups[0] = &WWN_STAT_GRPS(tiqn)->iscsi_instance_group;
1305 stats_cg->default_groups[1] = &WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group; 1301 stats_cg->default_groups[1] = &WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group;
1306 stats_cg->default_groups[2] = &WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group; 1302 stats_cg->default_groups[2] = &WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group;
1307 stats_cg->default_groups[3] = &WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group; 1303 stats_cg->default_groups[3] = &WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group;
1308 stats_cg->default_groups[4] = &WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group; 1304 stats_cg->default_groups[4] = &WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group;
1309 stats_cg->default_groups[5] = NULL; 1305 stats_cg->default_groups[5] = NULL;
1310 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_instance_group, 1306 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_instance_group,
1311 "iscsi_instance", &iscsi_stat_instance_cit); 1307 "iscsi_instance", &iscsi_stat_instance_cit);
1312 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group, 1308 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group,
1313 "iscsi_sess_err", &iscsi_stat_sess_err_cit); 1309 "iscsi_sess_err", &iscsi_stat_sess_err_cit);
1314 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group, 1310 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group,
1315 "iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit); 1311 "iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit);
1316 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group, 1312 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group,
1317 "iscsi_login_stats", &iscsi_stat_login_cit); 1313 "iscsi_login_stats", &iscsi_stat_login_cit);
1318 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group, 1314 config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group,
1319 "iscsi_logout_stats", &iscsi_stat_logout_cit); 1315 "iscsi_logout_stats", &iscsi_stat_logout_cit);
1320 1316
1321 pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn); 1317 pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
1322 pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:" 1318 pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
1323 " %s\n", name); 1319 " %s\n", name);
1324 return &tiqn->tiqn_wwn; 1320 return &tiqn->tiqn_wwn;
1325 } 1321 }
1326 1322
1327 void lio_target_call_coredeltiqn( 1323 void lio_target_call_coredeltiqn(
1328 struct se_wwn *wwn) 1324 struct se_wwn *wwn)
1329 { 1325 {
1330 struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn); 1326 struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
1331 struct config_item *df_item; 1327 struct config_item *df_item;
1332 struct config_group *stats_cg; 1328 struct config_group *stats_cg;
1333 int i; 1329 int i;
1334 1330
1335 stats_cg = &tiqn->tiqn_wwn.fabric_stat_group; 1331 stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
1336 for (i = 0; stats_cg->default_groups[i]; i++) { 1332 for (i = 0; stats_cg->default_groups[i]; i++) {
1337 df_item = &stats_cg->default_groups[i]->cg_item; 1333 df_item = &stats_cg->default_groups[i]->cg_item;
1338 stats_cg->default_groups[i] = NULL; 1334 stats_cg->default_groups[i] = NULL;
1339 config_item_put(df_item); 1335 config_item_put(df_item);
1340 } 1336 }
1341 kfree(stats_cg->default_groups); 1337 kfree(stats_cg->default_groups);
1342 1338
1343 pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n", 1339 pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n",
1344 tiqn->tiqn); 1340 tiqn->tiqn);
1345 iscsit_del_tiqn(tiqn); 1341 iscsit_del_tiqn(tiqn);
1346 } 1342 }
1347 1343
1348 /* End LIO-Target TIQN struct contig_lio_target_cit */ 1344 /* End LIO-Target TIQN struct contig_lio_target_cit */
1349 1345
1350 /* Start lio_target_discovery_auth_cit */ 1346 /* Start lio_target_discovery_auth_cit */
1351 1347
1352 #define DEF_DISC_AUTH_STR(name, flags) \ 1348 #define DEF_DISC_AUTH_STR(name, flags) \
1353 __DEF_NACL_AUTH_STR(disc, name, flags) \ 1349 __DEF_NACL_AUTH_STR(disc, name, flags) \
1354 static ssize_t iscsi_disc_show_##name( \ 1350 static ssize_t iscsi_disc_show_##name( \
1355 struct target_fabric_configfs *tf, \ 1351 struct target_fabric_configfs *tf, \
1356 char *page) \ 1352 char *page) \
1357 { \ 1353 { \
1358 return __iscsi_disc_show_##name(&iscsit_global->discovery_acl, \ 1354 return __iscsi_disc_show_##name(&iscsit_global->discovery_acl, \
1359 page); \ 1355 page); \
1360 } \ 1356 } \
1361 static ssize_t iscsi_disc_store_##name( \ 1357 static ssize_t iscsi_disc_store_##name( \
1362 struct target_fabric_configfs *tf, \ 1358 struct target_fabric_configfs *tf, \
1363 const char *page, \ 1359 const char *page, \
1364 size_t count) \ 1360 size_t count) \
1365 { \ 1361 { \
1366 return __iscsi_disc_store_##name(&iscsit_global->discovery_acl, \ 1362 return __iscsi_disc_store_##name(&iscsit_global->discovery_acl, \
1367 page, count); \ 1363 page, count); \
1368 } 1364 }
1369 1365
1370 #define DEF_DISC_AUTH_INT(name) \ 1366 #define DEF_DISC_AUTH_INT(name) \
1371 __DEF_NACL_AUTH_INT(disc, name) \ 1367 __DEF_NACL_AUTH_INT(disc, name) \
1372 static ssize_t iscsi_disc_show_##name( \ 1368 static ssize_t iscsi_disc_show_##name( \
1373 struct target_fabric_configfs *tf, \ 1369 struct target_fabric_configfs *tf, \
1374 char *page) \ 1370 char *page) \
1375 { \ 1371 { \
1376 return __iscsi_disc_show_##name(&iscsit_global->discovery_acl, \ 1372 return __iscsi_disc_show_##name(&iscsit_global->discovery_acl, \
1377 page); \ 1373 page); \
1378 } 1374 }
1379 1375
1380 #define DISC_AUTH_ATTR(_name, _mode) TF_DISC_ATTR(iscsi, _name, _mode) 1376 #define DISC_AUTH_ATTR(_name, _mode) TF_DISC_ATTR(iscsi, _name, _mode)
1381 #define DISC_AUTH_ATTR_RO(_name) TF_DISC_ATTR_RO(iscsi, _name) 1377 #define DISC_AUTH_ATTR_RO(_name) TF_DISC_ATTR_RO(iscsi, _name)
1382 1378
1383 /* 1379 /*
1384 * One-way authentication userid 1380 * One-way authentication userid
1385 */ 1381 */
1386 DEF_DISC_AUTH_STR(userid, NAF_USERID_SET); 1382 DEF_DISC_AUTH_STR(userid, NAF_USERID_SET);
1387 DISC_AUTH_ATTR(userid, S_IRUGO | S_IWUSR); 1383 DISC_AUTH_ATTR(userid, S_IRUGO | S_IWUSR);
1388 /* 1384 /*
1389 * One-way authentication password 1385 * One-way authentication password
1390 */ 1386 */
1391 DEF_DISC_AUTH_STR(password, NAF_PASSWORD_SET); 1387 DEF_DISC_AUTH_STR(password, NAF_PASSWORD_SET);
1392 DISC_AUTH_ATTR(password, S_IRUGO | S_IWUSR); 1388 DISC_AUTH_ATTR(password, S_IRUGO | S_IWUSR);
1393 /* 1389 /*
1394 * Enforce mutual authentication 1390 * Enforce mutual authentication
1395 */ 1391 */
1396 DEF_DISC_AUTH_INT(authenticate_target); 1392 DEF_DISC_AUTH_INT(authenticate_target);
1397 DISC_AUTH_ATTR_RO(authenticate_target); 1393 DISC_AUTH_ATTR_RO(authenticate_target);
1398 /* 1394 /*
1399 * Mutual authentication userid 1395 * Mutual authentication userid
1400 */ 1396 */
1401 DEF_DISC_AUTH_STR(userid_mutual, NAF_USERID_IN_SET); 1397 DEF_DISC_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
1402 DISC_AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR); 1398 DISC_AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR);
1403 /* 1399 /*
1404 * Mutual authentication password 1400 * Mutual authentication password
1405 */ 1401 */
1406 DEF_DISC_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET); 1402 DEF_DISC_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
1407 DISC_AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR); 1403 DISC_AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR);
1408 1404
1409 /* 1405 /*
1410 * enforce_discovery_auth 1406 * enforce_discovery_auth
1411 */ 1407 */
1412 static ssize_t iscsi_disc_show_enforce_discovery_auth( 1408 static ssize_t iscsi_disc_show_enforce_discovery_auth(
1413 struct target_fabric_configfs *tf, 1409 struct target_fabric_configfs *tf,
1414 char *page) 1410 char *page)
1415 { 1411 {
1416 struct iscsi_node_auth *discovery_auth = &iscsit_global->discovery_acl.node_auth; 1412 struct iscsi_node_auth *discovery_auth = &iscsit_global->discovery_acl.node_auth;
1417 1413
1418 return sprintf(page, "%d\n", discovery_auth->enforce_discovery_auth); 1414 return sprintf(page, "%d\n", discovery_auth->enforce_discovery_auth);
1419 } 1415 }
1420 1416
1421 static ssize_t iscsi_disc_store_enforce_discovery_auth( 1417 static ssize_t iscsi_disc_store_enforce_discovery_auth(
1422 struct target_fabric_configfs *tf, 1418 struct target_fabric_configfs *tf,
1423 const char *page, 1419 const char *page,
1424 size_t count) 1420 size_t count)
1425 { 1421 {
1426 struct iscsi_param *param; 1422 struct iscsi_param *param;
1427 struct iscsi_portal_group *discovery_tpg = iscsit_global->discovery_tpg; 1423 struct iscsi_portal_group *discovery_tpg = iscsit_global->discovery_tpg;
1428 char *endptr; 1424 char *endptr;
1429 u32 op; 1425 u32 op;
1430 1426
1431 op = simple_strtoul(page, &endptr, 0); 1427 op = simple_strtoul(page, &endptr, 0);
1432 if ((op != 1) && (op != 0)) { 1428 if ((op != 1) && (op != 0)) {
1433 pr_err("Illegal value for enforce_discovery_auth:" 1429 pr_err("Illegal value for enforce_discovery_auth:"
1434 " %u\n", op); 1430 " %u\n", op);
1435 return -EINVAL; 1431 return -EINVAL;
1436 } 1432 }
1437 1433
1438 if (!discovery_tpg) { 1434 if (!discovery_tpg) {
1439 pr_err("iscsit_global->discovery_tpg is NULL\n"); 1435 pr_err("iscsit_global->discovery_tpg is NULL\n");
1440 return -EINVAL; 1436 return -EINVAL;
1441 } 1437 }
1442 1438
1443 param = iscsi_find_param_from_key(AUTHMETHOD, 1439 param = iscsi_find_param_from_key(AUTHMETHOD,
1444 discovery_tpg->param_list); 1440 discovery_tpg->param_list);
1445 if (!param) 1441 if (!param)
1446 return -EINVAL; 1442 return -EINVAL;
1447 1443
1448 if (op) { 1444 if (op) {
1449 /* 1445 /*
1450 * Reset the AuthMethod key to CHAP. 1446 * Reset the AuthMethod key to CHAP.
1451 */ 1447 */
1452 if (iscsi_update_param_value(param, CHAP) < 0) 1448 if (iscsi_update_param_value(param, CHAP) < 0)
1453 return -EINVAL; 1449 return -EINVAL;
1454 1450
1455 discovery_tpg->tpg_attrib.authentication = 1; 1451 discovery_tpg->tpg_attrib.authentication = 1;
1456 iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 1; 1452 iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 1;
1457 pr_debug("LIO-CORE[0] Successfully enabled" 1453 pr_debug("LIO-CORE[0] Successfully enabled"
1458 " authentication enforcement for iSCSI" 1454 " authentication enforcement for iSCSI"
1459 " Discovery TPG\n"); 1455 " Discovery TPG\n");
1460 } else { 1456 } else {
1461 /* 1457 /*
1462 * Reset the AuthMethod key to CHAP,None 1458 * Reset the AuthMethod key to CHAP,None
1463 */ 1459 */
1464 if (iscsi_update_param_value(param, "CHAP,None") < 0) 1460 if (iscsi_update_param_value(param, "CHAP,None") < 0)
1465 return -EINVAL; 1461 return -EINVAL;
1466 1462
1467 discovery_tpg->tpg_attrib.authentication = 0; 1463 discovery_tpg->tpg_attrib.authentication = 0;
1468 iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 0; 1464 iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 0;
1469 pr_debug("LIO-CORE[0] Successfully disabled" 1465 pr_debug("LIO-CORE[0] Successfully disabled"
1470 " authentication enforcement for iSCSI" 1466 " authentication enforcement for iSCSI"
1471 " Discovery TPG\n"); 1467 " Discovery TPG\n");
1472 } 1468 }
1473 1469
1474 return count; 1470 return count;
1475 } 1471 }
1476 1472
1477 DISC_AUTH_ATTR(enforce_discovery_auth, S_IRUGO | S_IWUSR); 1473 DISC_AUTH_ATTR(enforce_discovery_auth, S_IRUGO | S_IWUSR);
1478 1474
1479 static struct configfs_attribute *lio_target_discovery_auth_attrs[] = { 1475 static struct configfs_attribute *lio_target_discovery_auth_attrs[] = {
1480 &iscsi_disc_userid.attr, 1476 &iscsi_disc_userid.attr,
1481 &iscsi_disc_password.attr, 1477 &iscsi_disc_password.attr,
1482 &iscsi_disc_authenticate_target.attr, 1478 &iscsi_disc_authenticate_target.attr,
1483 &iscsi_disc_userid_mutual.attr, 1479 &iscsi_disc_userid_mutual.attr,
1484 &iscsi_disc_password_mutual.attr, 1480 &iscsi_disc_password_mutual.attr,
1485 &iscsi_disc_enforce_discovery_auth.attr, 1481 &iscsi_disc_enforce_discovery_auth.attr,
1486 NULL, 1482 NULL,
1487 }; 1483 };
1488 1484
1489 /* End lio_target_discovery_auth_cit */ 1485 /* End lio_target_discovery_auth_cit */
1490 1486
1491 /* Start functions for target_core_fabric_ops */ 1487 /* Start functions for target_core_fabric_ops */
1492 1488
1493 static char *iscsi_get_fabric_name(void) 1489 static char *iscsi_get_fabric_name(void)
1494 { 1490 {
1495 return "iSCSI"; 1491 return "iSCSI";
1496 } 1492 }
1497 1493
1498 static u32 iscsi_get_task_tag(struct se_cmd *se_cmd) 1494 static u32 iscsi_get_task_tag(struct se_cmd *se_cmd)
1499 { 1495 {
1500 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1496 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1501 1497
1502 return cmd->init_task_tag; 1498 return cmd->init_task_tag;
1503 } 1499 }
1504 1500
1505 static int iscsi_get_cmd_state(struct se_cmd *se_cmd) 1501 static int iscsi_get_cmd_state(struct se_cmd *se_cmd)
1506 { 1502 {
1507 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1503 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1508 1504
1509 return cmd->i_state; 1505 return cmd->i_state;
1510 } 1506 }
1511 1507
1512 static int iscsi_is_state_remove(struct se_cmd *se_cmd) 1508 static int iscsi_is_state_remove(struct se_cmd *se_cmd)
1513 { 1509 {
1514 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1510 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1515 1511
1516 return (cmd->i_state == ISTATE_REMOVE); 1512 return (cmd->i_state == ISTATE_REMOVE);
1517 } 1513 }
1518 1514
1519 static int lio_sess_logged_in(struct se_session *se_sess) 1515 static int lio_sess_logged_in(struct se_session *se_sess)
1520 { 1516 {
1521 struct iscsi_session *sess = se_sess->fabric_sess_ptr; 1517 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1522 int ret; 1518 int ret;
1523 /* 1519 /*
1524 * Called with spin_lock_bh(&tpg_lock); and 1520 * Called with spin_lock_bh(&tpg_lock); and
1525 * spin_lock(&se_tpg->session_lock); held. 1521 * spin_lock(&se_tpg->session_lock); held.
1526 */ 1522 */
1527 spin_lock(&sess->conn_lock); 1523 spin_lock(&sess->conn_lock);
1528 ret = (sess->session_state != TARG_SESS_STATE_LOGGED_IN); 1524 ret = (sess->session_state != TARG_SESS_STATE_LOGGED_IN);
1529 spin_unlock(&sess->conn_lock); 1525 spin_unlock(&sess->conn_lock);
1530 1526
1531 return ret; 1527 return ret;
1532 } 1528 }
1533 1529
1534 static u32 lio_sess_get_index(struct se_session *se_sess) 1530 static u32 lio_sess_get_index(struct se_session *se_sess)
1535 { 1531 {
1536 struct iscsi_session *sess = se_sess->fabric_sess_ptr; 1532 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1537 1533
1538 return sess->session_index; 1534 return sess->session_index;
1539 } 1535 }
1540 1536
1541 static u32 lio_sess_get_initiator_sid( 1537 static u32 lio_sess_get_initiator_sid(
1542 struct se_session *se_sess, 1538 struct se_session *se_sess,
1543 unsigned char *buf, 1539 unsigned char *buf,
1544 u32 size) 1540 u32 size)
1545 { 1541 {
1546 struct iscsi_session *sess = se_sess->fabric_sess_ptr; 1542 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1547 /* 1543 /*
1548 * iSCSI Initiator Session Identifier from RFC-3720. 1544 * iSCSI Initiator Session Identifier from RFC-3720.
1549 */ 1545 */
1550 return snprintf(buf, size, "%02x%02x%02x%02x%02x%02x", 1546 return snprintf(buf, size, "%02x%02x%02x%02x%02x%02x",
1551 sess->isid[0], sess->isid[1], sess->isid[2], 1547 sess->isid[0], sess->isid[1], sess->isid[2],
1552 sess->isid[3], sess->isid[4], sess->isid[5]); 1548 sess->isid[3], sess->isid[4], sess->isid[5]);
1553 } 1549 }
1554 1550
1555 static int lio_queue_data_in(struct se_cmd *se_cmd) 1551 static int lio_queue_data_in(struct se_cmd *se_cmd)
1556 { 1552 {
1557 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1553 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1558 1554
1559 cmd->i_state = ISTATE_SEND_DATAIN; 1555 cmd->i_state = ISTATE_SEND_DATAIN;
1560 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 1556 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
1561 return 0; 1557 return 0;
1562 } 1558 }
1563 1559
1564 static int lio_write_pending(struct se_cmd *se_cmd) 1560 static int lio_write_pending(struct se_cmd *se_cmd)
1565 { 1561 {
1566 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1562 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1567 1563
1568 if (!cmd->immediate_data && !cmd->unsolicited_data) 1564 if (!cmd->immediate_data && !cmd->unsolicited_data)
1569 return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 1); 1565 return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 1);
1570 1566
1571 return 0; 1567 return 0;
1572 } 1568 }
1573 1569
1574 static int lio_write_pending_status(struct se_cmd *se_cmd) 1570 static int lio_write_pending_status(struct se_cmd *se_cmd)
1575 { 1571 {
1576 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1572 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1577 int ret; 1573 int ret;
1578 1574
1579 spin_lock_bh(&cmd->istate_lock); 1575 spin_lock_bh(&cmd->istate_lock);
1580 ret = !(cmd->cmd_flags & ICF_GOT_LAST_DATAOUT); 1576 ret = !(cmd->cmd_flags & ICF_GOT_LAST_DATAOUT);
1581 spin_unlock_bh(&cmd->istate_lock); 1577 spin_unlock_bh(&cmd->istate_lock);
1582 1578
1583 return ret; 1579 return ret;
1584 } 1580 }
1585 1581
1586 static int lio_queue_status(struct se_cmd *se_cmd) 1582 static int lio_queue_status(struct se_cmd *se_cmd)
1587 { 1583 {
1588 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1584 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1589 1585
1590 cmd->i_state = ISTATE_SEND_STATUS; 1586 cmd->i_state = ISTATE_SEND_STATUS;
1591 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 1587 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
1592 return 0; 1588 return 0;
1593 } 1589 }
1594 1590
1595 static u16 lio_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length) 1591 static u16 lio_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
1596 { 1592 {
1597 unsigned char *buffer = se_cmd->sense_buffer; 1593 unsigned char *buffer = se_cmd->sense_buffer;
1598 /* 1594 /*
1599 * From RFC-3720 10.4.7. Data Segment - Sense and Response Data Segment 1595 * From RFC-3720 10.4.7. Data Segment - Sense and Response Data Segment
1600 * 16-bit SenseLength. 1596 * 16-bit SenseLength.
1601 */ 1597 */
1602 buffer[0] = ((sense_length >> 8) & 0xff); 1598 buffer[0] = ((sense_length >> 8) & 0xff);
1603 buffer[1] = (sense_length & 0xff); 1599 buffer[1] = (sense_length & 0xff);
1604 /* 1600 /*
1605 * Return two byte offset into allocated sense_buffer. 1601 * Return two byte offset into allocated sense_buffer.
1606 */ 1602 */
1607 return 2; 1603 return 2;
1608 } 1604 }
1609 1605
1610 static u16 lio_get_fabric_sense_len(void) 1606 static u16 lio_get_fabric_sense_len(void)
1611 { 1607 {
1612 /* 1608 /*
1613 * Return two byte offset into allocated sense_buffer. 1609 * Return two byte offset into allocated sense_buffer.
1614 */ 1610 */
1615 return 2; 1611 return 2;
1616 } 1612 }
1617 1613
1618 static int lio_queue_tm_rsp(struct se_cmd *se_cmd) 1614 static int lio_queue_tm_rsp(struct se_cmd *se_cmd)
1619 { 1615 {
1620 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1616 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1621 1617
1622 cmd->i_state = ISTATE_SEND_TASKMGTRSP; 1618 cmd->i_state = ISTATE_SEND_TASKMGTRSP;
1623 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 1619 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
1624 return 0; 1620 return 0;
1625 } 1621 }
1626 1622
1627 static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg) 1623 static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
1628 { 1624 {
1629 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; 1625 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1630 1626
1631 return &tpg->tpg_tiqn->tiqn[0]; 1627 return &tpg->tpg_tiqn->tiqn[0];
1632 } 1628 }
1633 1629
1634 static u16 lio_tpg_get_tag(struct se_portal_group *se_tpg) 1630 static u16 lio_tpg_get_tag(struct se_portal_group *se_tpg)
1635 { 1631 {
1636 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; 1632 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1637 1633
1638 return tpg->tpgt; 1634 return tpg->tpgt;
1639 } 1635 }
1640 1636
1641 static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg) 1637 static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg)
1642 { 1638 {
1643 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; 1639 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1644 1640
1645 return ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth; 1641 return ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
1646 } 1642 }
1647 1643
1648 static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg) 1644 static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg)
1649 { 1645 {
1650 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; 1646 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1651 1647
1652 return ISCSI_TPG_ATTRIB(tpg)->generate_node_acls; 1648 return ISCSI_TPG_ATTRIB(tpg)->generate_node_acls;
1653 } 1649 }
1654 1650
1655 static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg) 1651 static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg)
1656 { 1652 {
1657 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; 1653 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1658 1654
1659 return ISCSI_TPG_ATTRIB(tpg)->cache_dynamic_acls; 1655 return ISCSI_TPG_ATTRIB(tpg)->cache_dynamic_acls;
1660 } 1656 }
1661 1657
1662 static int lio_tpg_check_demo_mode_write_protect( 1658 static int lio_tpg_check_demo_mode_write_protect(
1663 struct se_portal_group *se_tpg) 1659 struct se_portal_group *se_tpg)
1664 { 1660 {
1665 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; 1661 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1666 1662
1667 return ISCSI_TPG_ATTRIB(tpg)->demo_mode_write_protect; 1663 return ISCSI_TPG_ATTRIB(tpg)->demo_mode_write_protect;
1668 } 1664 }
1669 1665
1670 static int lio_tpg_check_prod_mode_write_protect( 1666 static int lio_tpg_check_prod_mode_write_protect(
1671 struct se_portal_group *se_tpg) 1667 struct se_portal_group *se_tpg)
1672 { 1668 {
1673 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; 1669 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1674 1670
1675 return ISCSI_TPG_ATTRIB(tpg)->prod_mode_write_protect; 1671 return ISCSI_TPG_ATTRIB(tpg)->prod_mode_write_protect;
1676 } 1672 }
1677 1673
1678 static void lio_tpg_release_fabric_acl( 1674 static void lio_tpg_release_fabric_acl(
1679 struct se_portal_group *se_tpg, 1675 struct se_portal_group *se_tpg,
1680 struct se_node_acl *se_acl) 1676 struct se_node_acl *se_acl)
1681 { 1677 {
1682 struct iscsi_node_acl *acl = container_of(se_acl, 1678 struct iscsi_node_acl *acl = container_of(se_acl,
1683 struct iscsi_node_acl, se_node_acl); 1679 struct iscsi_node_acl, se_node_acl);
1684 kfree(acl); 1680 kfree(acl);
1685 } 1681 }
1686 1682
1687 /* 1683 /*
1688 * Called with spin_lock_bh(struct se_portal_group->session_lock) held.. 1684 * Called with spin_lock_bh(struct se_portal_group->session_lock) held..
1689 * 1685 *
1690 * Also, this function calls iscsit_inc_session_usage_count() on the 1686 * Also, this function calls iscsit_inc_session_usage_count() on the
1691 * struct iscsi_session in question. 1687 * struct iscsi_session in question.
1692 */ 1688 */
1693 static int lio_tpg_shutdown_session(struct se_session *se_sess) 1689 static int lio_tpg_shutdown_session(struct se_session *se_sess)
1694 { 1690 {
1695 struct iscsi_session *sess = se_sess->fabric_sess_ptr; 1691 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1696 1692
1697 spin_lock(&sess->conn_lock); 1693 spin_lock(&sess->conn_lock);
1698 if (atomic_read(&sess->session_fall_back_to_erl0) || 1694 if (atomic_read(&sess->session_fall_back_to_erl0) ||
1699 atomic_read(&sess->session_logout) || 1695 atomic_read(&sess->session_logout) ||
1700 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { 1696 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
1701 spin_unlock(&sess->conn_lock); 1697 spin_unlock(&sess->conn_lock);
1702 return 0; 1698 return 0;
1703 } 1699 }
1704 atomic_set(&sess->session_reinstatement, 1); 1700 atomic_set(&sess->session_reinstatement, 1);
1705 spin_unlock(&sess->conn_lock); 1701 spin_unlock(&sess->conn_lock);
1706 1702
1707 iscsit_inc_session_usage_count(sess); 1703 iscsit_inc_session_usage_count(sess);
1708 iscsit_stop_time2retain_timer(sess); 1704 iscsit_stop_time2retain_timer(sess);
1709 1705
1710 return 1; 1706 return 1;
1711 } 1707 }
1712 1708
1713 /* 1709 /*
1714 * Calls iscsit_dec_session_usage_count() as inverse of 1710 * Calls iscsit_dec_session_usage_count() as inverse of
1715 * lio_tpg_shutdown_session() 1711 * lio_tpg_shutdown_session()
1716 */ 1712 */
1717 static void lio_tpg_close_session(struct se_session *se_sess) 1713 static void lio_tpg_close_session(struct se_session *se_sess)
1718 { 1714 {
1719 struct iscsi_session *sess = se_sess->fabric_sess_ptr; 1715 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1720 /* 1716 /*
1721 * If the iSCSI Session for the iSCSI Initiator Node exists, 1717 * If the iSCSI Session for the iSCSI Initiator Node exists,
1722 * forcefully shutdown the iSCSI NEXUS. 1718 * forcefully shutdown the iSCSI NEXUS.
1723 */ 1719 */
1724 iscsit_stop_session(sess, 1, 1); 1720 iscsit_stop_session(sess, 1, 1);
1725 iscsit_dec_session_usage_count(sess); 1721 iscsit_dec_session_usage_count(sess);
1726 iscsit_close_session(sess); 1722 iscsit_close_session(sess);
1727 } 1723 }
1728 1724
1729 static void lio_tpg_stop_session( 1725 static void lio_tpg_stop_session(
1730 struct se_session *se_sess, 1726 struct se_session *se_sess,
1731 int sess_sleep, 1727 int sess_sleep,
1732 int conn_sleep) 1728 int conn_sleep)
1733 { 1729 {
1734 struct iscsi_session *sess = se_sess->fabric_sess_ptr; 1730 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1735 1731
1736 iscsit_stop_session(sess, sess_sleep, conn_sleep); 1732 iscsit_stop_session(sess, sess_sleep, conn_sleep);
1737 } 1733 }
1738 1734
1739 static void lio_tpg_fall_back_to_erl0(struct se_session *se_sess) 1735 static void lio_tpg_fall_back_to_erl0(struct se_session *se_sess)
1740 { 1736 {
1741 struct iscsi_session *sess = se_sess->fabric_sess_ptr; 1737 struct iscsi_session *sess = se_sess->fabric_sess_ptr;
1742 1738
1743 iscsit_fall_back_to_erl0(sess); 1739 iscsit_fall_back_to_erl0(sess);
1744 } 1740 }
1745 1741
1746 static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg) 1742 static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
1747 { 1743 {
1748 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; 1744 struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
1749 1745
1750 return tpg->tpg_tiqn->tiqn_index; 1746 return tpg->tpg_tiqn->tiqn_index;
1751 } 1747 }
1752 1748
1753 static void lio_set_default_node_attributes(struct se_node_acl *se_acl) 1749 static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
1754 { 1750 {
1755 struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl, 1751 struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl,
1756 se_node_acl); 1752 se_node_acl);
1757 1753
1758 ISCSI_NODE_ATTRIB(acl)->nacl = acl; 1754 ISCSI_NODE_ATTRIB(acl)->nacl = acl;
1759 iscsit_set_default_node_attribues(acl); 1755 iscsit_set_default_node_attribues(acl);
1760 } 1756 }
1761 1757
1762 static void lio_release_cmd(struct se_cmd *se_cmd) 1758 static void lio_release_cmd(struct se_cmd *se_cmd)
1763 { 1759 {
1764 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 1760 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
1765 1761
1766 iscsit_release_cmd(cmd); 1762 iscsit_release_cmd(cmd);
1767 } 1763 }
1768 1764
1769 /* End functions for target_core_fabric_ops */ 1765 /* End functions for target_core_fabric_ops */
1770 1766
1771 int iscsi_target_register_configfs(void) 1767 int iscsi_target_register_configfs(void)
1772 { 1768 {
1773 struct target_fabric_configfs *fabric; 1769 struct target_fabric_configfs *fabric;
1774 int ret; 1770 int ret;
1775 1771
1776 lio_target_fabric_configfs = NULL; 1772 lio_target_fabric_configfs = NULL;
1777 fabric = target_fabric_configfs_init(THIS_MODULE, "iscsi"); 1773 fabric = target_fabric_configfs_init(THIS_MODULE, "iscsi");
1778 if (IS_ERR(fabric)) { 1774 if (IS_ERR(fabric)) {
1779 pr_err("target_fabric_configfs_init() for" 1775 pr_err("target_fabric_configfs_init() for"
1780 " LIO-Target failed!\n"); 1776 " LIO-Target failed!\n");
1781 return PTR_ERR(fabric); 1777 return PTR_ERR(fabric);
1782 } 1778 }
1783 /* 1779 /*
1784 * Setup the fabric API of function pointers used by target_core_mod.. 1780 * Setup the fabric API of function pointers used by target_core_mod..
1785 */ 1781 */
1786 fabric->tf_ops.get_fabric_name = &iscsi_get_fabric_name; 1782 fabric->tf_ops.get_fabric_name = &iscsi_get_fabric_name;
1787 fabric->tf_ops.get_fabric_proto_ident = &iscsi_get_fabric_proto_ident; 1783 fabric->tf_ops.get_fabric_proto_ident = &iscsi_get_fabric_proto_ident;
1788 fabric->tf_ops.tpg_get_wwn = &lio_tpg_get_endpoint_wwn; 1784 fabric->tf_ops.tpg_get_wwn = &lio_tpg_get_endpoint_wwn;
1789 fabric->tf_ops.tpg_get_tag = &lio_tpg_get_tag; 1785 fabric->tf_ops.tpg_get_tag = &lio_tpg_get_tag;
1790 fabric->tf_ops.tpg_get_default_depth = &lio_tpg_get_default_depth; 1786 fabric->tf_ops.tpg_get_default_depth = &lio_tpg_get_default_depth;
1791 fabric->tf_ops.tpg_get_pr_transport_id = &iscsi_get_pr_transport_id; 1787 fabric->tf_ops.tpg_get_pr_transport_id = &iscsi_get_pr_transport_id;
1792 fabric->tf_ops.tpg_get_pr_transport_id_len = 1788 fabric->tf_ops.tpg_get_pr_transport_id_len =
1793 &iscsi_get_pr_transport_id_len; 1789 &iscsi_get_pr_transport_id_len;
1794 fabric->tf_ops.tpg_parse_pr_out_transport_id = 1790 fabric->tf_ops.tpg_parse_pr_out_transport_id =
1795 &iscsi_parse_pr_out_transport_id; 1791 &iscsi_parse_pr_out_transport_id;
1796 fabric->tf_ops.tpg_check_demo_mode = &lio_tpg_check_demo_mode; 1792 fabric->tf_ops.tpg_check_demo_mode = &lio_tpg_check_demo_mode;
1797 fabric->tf_ops.tpg_check_demo_mode_cache = 1793 fabric->tf_ops.tpg_check_demo_mode_cache =
1798 &lio_tpg_check_demo_mode_cache; 1794 &lio_tpg_check_demo_mode_cache;
1799 fabric->tf_ops.tpg_check_demo_mode_write_protect = 1795 fabric->tf_ops.tpg_check_demo_mode_write_protect =
1800 &lio_tpg_check_demo_mode_write_protect; 1796 &lio_tpg_check_demo_mode_write_protect;
1801 fabric->tf_ops.tpg_check_prod_mode_write_protect = 1797 fabric->tf_ops.tpg_check_prod_mode_write_protect =
1802 &lio_tpg_check_prod_mode_write_protect; 1798 &lio_tpg_check_prod_mode_write_protect;
1803 fabric->tf_ops.tpg_alloc_fabric_acl = &lio_tpg_alloc_fabric_acl; 1799 fabric->tf_ops.tpg_alloc_fabric_acl = &lio_tpg_alloc_fabric_acl;
1804 fabric->tf_ops.tpg_release_fabric_acl = &lio_tpg_release_fabric_acl; 1800 fabric->tf_ops.tpg_release_fabric_acl = &lio_tpg_release_fabric_acl;
1805 fabric->tf_ops.tpg_get_inst_index = &lio_tpg_get_inst_index; 1801 fabric->tf_ops.tpg_get_inst_index = &lio_tpg_get_inst_index;
1806 fabric->tf_ops.release_cmd = &lio_release_cmd; 1802 fabric->tf_ops.release_cmd = &lio_release_cmd;
1807 fabric->tf_ops.shutdown_session = &lio_tpg_shutdown_session; 1803 fabric->tf_ops.shutdown_session = &lio_tpg_shutdown_session;
1808 fabric->tf_ops.close_session = &lio_tpg_close_session; 1804 fabric->tf_ops.close_session = &lio_tpg_close_session;
1809 fabric->tf_ops.stop_session = &lio_tpg_stop_session; 1805 fabric->tf_ops.stop_session = &lio_tpg_stop_session;
1810 fabric->tf_ops.fall_back_to_erl0 = &lio_tpg_fall_back_to_erl0; 1806 fabric->tf_ops.fall_back_to_erl0 = &lio_tpg_fall_back_to_erl0;
1811 fabric->tf_ops.sess_logged_in = &lio_sess_logged_in; 1807 fabric->tf_ops.sess_logged_in = &lio_sess_logged_in;
1812 fabric->tf_ops.sess_get_index = &lio_sess_get_index; 1808 fabric->tf_ops.sess_get_index = &lio_sess_get_index;
1813 fabric->tf_ops.sess_get_initiator_sid = &lio_sess_get_initiator_sid; 1809 fabric->tf_ops.sess_get_initiator_sid = &lio_sess_get_initiator_sid;
1814 fabric->tf_ops.write_pending = &lio_write_pending; 1810 fabric->tf_ops.write_pending = &lio_write_pending;
1815 fabric->tf_ops.write_pending_status = &lio_write_pending_status; 1811 fabric->tf_ops.write_pending_status = &lio_write_pending_status;
1816 fabric->tf_ops.set_default_node_attributes = 1812 fabric->tf_ops.set_default_node_attributes =
1817 &lio_set_default_node_attributes; 1813 &lio_set_default_node_attributes;
1818 fabric->tf_ops.get_task_tag = &iscsi_get_task_tag; 1814 fabric->tf_ops.get_task_tag = &iscsi_get_task_tag;
1819 fabric->tf_ops.get_cmd_state = &iscsi_get_cmd_state; 1815 fabric->tf_ops.get_cmd_state = &iscsi_get_cmd_state;
1820 fabric->tf_ops.queue_data_in = &lio_queue_data_in; 1816 fabric->tf_ops.queue_data_in = &lio_queue_data_in;
1821 fabric->tf_ops.queue_status = &lio_queue_status; 1817 fabric->tf_ops.queue_status = &lio_queue_status;
1822 fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp; 1818 fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp;
1823 fabric->tf_ops.set_fabric_sense_len = &lio_set_fabric_sense_len; 1819 fabric->tf_ops.set_fabric_sense_len = &lio_set_fabric_sense_len;
1824 fabric->tf_ops.get_fabric_sense_len = &lio_get_fabric_sense_len; 1820 fabric->tf_ops.get_fabric_sense_len = &lio_get_fabric_sense_len;
1825 fabric->tf_ops.is_state_remove = &iscsi_is_state_remove; 1821 fabric->tf_ops.is_state_remove = &iscsi_is_state_remove;
1826 /* 1822 /*
1827 * Setup function pointers for generic logic in target_core_fabric_configfs.c 1823 * Setup function pointers for generic logic in target_core_fabric_configfs.c
1828 */ 1824 */
1829 fabric->tf_ops.fabric_make_wwn = &lio_target_call_coreaddtiqn; 1825 fabric->tf_ops.fabric_make_wwn = &lio_target_call_coreaddtiqn;
1830 fabric->tf_ops.fabric_drop_wwn = &lio_target_call_coredeltiqn; 1826 fabric->tf_ops.fabric_drop_wwn = &lio_target_call_coredeltiqn;
1831 fabric->tf_ops.fabric_make_tpg = &lio_target_tiqn_addtpg; 1827 fabric->tf_ops.fabric_make_tpg = &lio_target_tiqn_addtpg;
1832 fabric->tf_ops.fabric_drop_tpg = &lio_target_tiqn_deltpg; 1828 fabric->tf_ops.fabric_drop_tpg = &lio_target_tiqn_deltpg;
1833 fabric->tf_ops.fabric_post_link = NULL; 1829 fabric->tf_ops.fabric_post_link = NULL;
1834 fabric->tf_ops.fabric_pre_unlink = NULL; 1830 fabric->tf_ops.fabric_pre_unlink = NULL;
1835 fabric->tf_ops.fabric_make_np = &lio_target_call_addnptotpg; 1831 fabric->tf_ops.fabric_make_np = &lio_target_call_addnptotpg;
1836 fabric->tf_ops.fabric_drop_np = &lio_target_call_delnpfromtpg; 1832 fabric->tf_ops.fabric_drop_np = &lio_target_call_delnpfromtpg;
1837 fabric->tf_ops.fabric_make_nodeacl = &lio_target_make_nodeacl; 1833 fabric->tf_ops.fabric_make_nodeacl = &lio_target_make_nodeacl;
1838 fabric->tf_ops.fabric_drop_nodeacl = &lio_target_drop_nodeacl; 1834 fabric->tf_ops.fabric_drop_nodeacl = &lio_target_drop_nodeacl;
1839 /* 1835 /*
1840 * Setup default attribute lists for various fabric->tf_cit_tmpl 1836 * Setup default attribute lists for various fabric->tf_cit_tmpl
1841 * sturct config_item_type's 1837 * sturct config_item_type's
1842 */ 1838 */
1843 TF_CIT_TMPL(fabric)->tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs; 1839 TF_CIT_TMPL(fabric)->tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs;
1844 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs; 1840 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs;
1845 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs; 1841 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs;
1846 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs; 1842 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs;
1847 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs; 1843 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs;
1848 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs; 1844 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs;
1849 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs; 1845 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs;
1850 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs; 1846 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs;
1851 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs; 1847 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs;
1852 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs; 1848 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs;
1853 1849
1854 ret = target_fabric_configfs_register(fabric); 1850 ret = target_fabric_configfs_register(fabric);
1855 if (ret < 0) { 1851 if (ret < 0) {
1856 pr_err("target_fabric_configfs_register() for" 1852 pr_err("target_fabric_configfs_register() for"
1857 " LIO-Target failed!\n"); 1853 " LIO-Target failed!\n");
1858 target_fabric_configfs_free(fabric); 1854 target_fabric_configfs_free(fabric);
1859 return ret; 1855 return ret;
1860 } 1856 }
1861 1857
1862 lio_target_fabric_configfs = fabric; 1858 lio_target_fabric_configfs = fabric;
1863 pr_debug("LIO_TARGET[0] - Set fabric ->" 1859 pr_debug("LIO_TARGET[0] - Set fabric ->"
1864 " lio_target_fabric_configfs\n"); 1860 " lio_target_fabric_configfs\n");
1865 return 0; 1861 return 0;
1866 } 1862 }
1867 1863
1868 1864
1869 void iscsi_target_deregister_configfs(void) 1865 void iscsi_target_deregister_configfs(void)
1870 { 1866 {
1871 if (!lio_target_fabric_configfs) 1867 if (!lio_target_fabric_configfs)
1872 return; 1868 return;
1873 /* 1869 /*
1874 * Shutdown discovery sessions and disable discovery TPG 1870 * Shutdown discovery sessions and disable discovery TPG
1875 */ 1871 */
1876 if (iscsit_global->discovery_tpg) 1872 if (iscsit_global->discovery_tpg)
1877 iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1); 1873 iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
1878 1874
1879 target_fabric_configfs_deregister(lio_target_fabric_configfs); 1875 target_fabric_configfs_deregister(lio_target_fabric_configfs);
1880 lio_target_fabric_configfs = NULL; 1876 lio_target_fabric_configfs = NULL;
1881 pr_debug("LIO_TARGET[0] - Cleared" 1877 pr_debug("LIO_TARGET[0] - Cleared"
1882 " lio_target_fabric_configfs\n"); 1878 " lio_target_fabric_configfs\n");
1883 } 1879 }
1884 1880
drivers/target/iscsi/iscsi_target_device.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * This file contains the iSCSI Virtual Device and Disk Transport 2 * This file contains the iSCSI Virtual Device and Disk Transport
3 * agnostic related functions. 3 * agnostic related functions.
4 * 4 *
5 \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
6 * 6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 * 8 *
9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or 13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version. 14 * (at your option) any later version.
15 * 15 *
16 * This program is distributed in the hope that it will be useful, 16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details. 19 * GNU General Public License for more details.
20 ******************************************************************************/ 20 ******************************************************************************/
21 21
22 #include <scsi/scsi_device.h> 22 #include <scsi/scsi_device.h>
23 #include <target/target_core_base.h> 23 #include <target/target_core_base.h>
24 #include <target/target_core_device.h> 24 #include <target/target_core_fabric.h>
25 #include <target/target_core_transport.h>
26 25
27 #include "iscsi_target_core.h" 26 #include "iscsi_target_core.h"
28 #include "iscsi_target_device.h" 27 #include "iscsi_target_device.h"
29 #include "iscsi_target_tpg.h" 28 #include "iscsi_target_tpg.h"
30 #include "iscsi_target_util.h" 29 #include "iscsi_target_util.h"
31 30
32 int iscsit_get_lun_for_tmr( 31 int iscsit_get_lun_for_tmr(
33 struct iscsi_cmd *cmd, 32 struct iscsi_cmd *cmd,
34 u64 lun) 33 u64 lun)
35 { 34 {
36 u32 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 35 u32 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
37 36
38 return transport_lookup_tmr_lun(&cmd->se_cmd, unpacked_lun); 37 return transport_lookup_tmr_lun(&cmd->se_cmd, unpacked_lun);
39 } 38 }
40 39
41 int iscsit_get_lun_for_cmd( 40 int iscsit_get_lun_for_cmd(
42 struct iscsi_cmd *cmd, 41 struct iscsi_cmd *cmd,
43 unsigned char *cdb, 42 unsigned char *cdb,
44 u64 lun) 43 u64 lun)
45 { 44 {
46 u32 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 45 u32 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
47 46
48 return transport_lookup_cmd_lun(&cmd->se_cmd, unpacked_lun); 47 return transport_lookup_cmd_lun(&cmd->se_cmd, unpacked_lun);
49 } 48 }
50 49
51 void iscsit_determine_maxcmdsn(struct iscsi_session *sess) 50 void iscsit_determine_maxcmdsn(struct iscsi_session *sess)
52 { 51 {
53 struct se_node_acl *se_nacl; 52 struct se_node_acl *se_nacl;
54 53
55 /* 54 /*
56 * This is a discovery session, the single queue slot was already 55 * This is a discovery session, the single queue slot was already
57 * assigned in iscsi_login_zero_tsih(). Since only Logout and 56 * assigned in iscsi_login_zero_tsih(). Since only Logout and
58 * Text Opcodes are allowed during discovery we do not have to worry 57 * Text Opcodes are allowed during discovery we do not have to worry
59 * about the HBA's queue depth here. 58 * about the HBA's queue depth here.
60 */ 59 */
61 if (sess->sess_ops->SessionType) 60 if (sess->sess_ops->SessionType)
62 return; 61 return;
63 62
64 se_nacl = sess->se_sess->se_node_acl; 63 se_nacl = sess->se_sess->se_node_acl;
65 64
66 /* 65 /*
67 * This is a normal session, set the Session's CmdSN window to the 66 * This is a normal session, set the Session's CmdSN window to the
68 * struct se_node_acl->queue_depth. The value in struct se_node_acl->queue_depth 67 * struct se_node_acl->queue_depth. The value in struct se_node_acl->queue_depth
69 * has already been validated as a legal value in 68 * has already been validated as a legal value in
70 * core_set_queue_depth_for_node(). 69 * core_set_queue_depth_for_node().
71 */ 70 */
72 sess->cmdsn_window = se_nacl->queue_depth; 71 sess->cmdsn_window = se_nacl->queue_depth;
73 sess->max_cmd_sn = (sess->max_cmd_sn + se_nacl->queue_depth) - 1; 72 sess->max_cmd_sn = (sess->max_cmd_sn + se_nacl->queue_depth) - 1;
74 } 73 }
75 74
76 void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess) 75 void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess)
77 { 76 {
78 if (cmd->immediate_cmd || cmd->maxcmdsn_inc) 77 if (cmd->immediate_cmd || cmd->maxcmdsn_inc)
79 return; 78 return;
80 79
81 cmd->maxcmdsn_inc = 1; 80 cmd->maxcmdsn_inc = 1;
82 81
83 mutex_lock(&sess->cmdsn_mutex); 82 mutex_lock(&sess->cmdsn_mutex);
84 sess->max_cmd_sn += 1; 83 sess->max_cmd_sn += 1;
85 pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn); 84 pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
86 mutex_unlock(&sess->cmdsn_mutex); 85 mutex_unlock(&sess->cmdsn_mutex);
87 } 86 }
88 87
drivers/target/iscsi/iscsi_target_erl0.c
1 /****************************************************************************** 1 /******************************************************************************
2 * This file contains error recovery level zero functions used by 2 * This file contains error recovery level zero functions used by
3 * the iSCSI Target driver. 3 * the iSCSI Target driver.
4 * 4 *
5 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
6 * 6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 * 8 *
9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or 13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version. 14 * (at your option) any later version.
15 * 15 *
16 * This program is distributed in the hope that it will be useful, 16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details. 19 * GNU General Public License for more details.
20 ******************************************************************************/ 20 ******************************************************************************/
21 21
22 #include <scsi/iscsi_proto.h> 22 #include <scsi/iscsi_proto.h>
23 #include <target/target_core_base.h> 23 #include <target/target_core_base.h>
24 #include <target/target_core_transport.h> 24 #include <target/target_core_fabric.h>
25 25
26 #include "iscsi_target_core.h" 26 #include "iscsi_target_core.h"
27 #include "iscsi_target_seq_pdu_list.h" 27 #include "iscsi_target_seq_pdu_list.h"
28 #include "iscsi_target_tq.h" 28 #include "iscsi_target_tq.h"
29 #include "iscsi_target_erl0.h" 29 #include "iscsi_target_erl0.h"
30 #include "iscsi_target_erl1.h" 30 #include "iscsi_target_erl1.h"
31 #include "iscsi_target_erl2.h" 31 #include "iscsi_target_erl2.h"
32 #include "iscsi_target_util.h" 32 #include "iscsi_target_util.h"
33 #include "iscsi_target.h" 33 #include "iscsi_target.h"
34 34
35 /* 35 /*
36 * Used to set values in struct iscsi_cmd that iscsit_dataout_check_sequence() 36 * Used to set values in struct iscsi_cmd that iscsit_dataout_check_sequence()
37 * checks against to determine a PDU's Offset+Length is within the current 37 * checks against to determine a PDU's Offset+Length is within the current
38 * DataOUT Sequence. Used for DataSequenceInOrder=Yes only. 38 * DataOUT Sequence. Used for DataSequenceInOrder=Yes only.
39 */ 39 */
40 void iscsit_set_dataout_sequence_values( 40 void iscsit_set_dataout_sequence_values(
41 struct iscsi_cmd *cmd) 41 struct iscsi_cmd *cmd)
42 { 42 {
43 struct iscsi_conn *conn = cmd->conn; 43 struct iscsi_conn *conn = cmd->conn;
44 /* 44 /*
45 * Still set seq_start_offset and seq_end_offset for Unsolicited 45 * Still set seq_start_offset and seq_end_offset for Unsolicited
46 * DataOUT, even if DataSequenceInOrder=No. 46 * DataOUT, even if DataSequenceInOrder=No.
47 */ 47 */
48 if (cmd->unsolicited_data) { 48 if (cmd->unsolicited_data) {
49 cmd->seq_start_offset = cmd->write_data_done; 49 cmd->seq_start_offset = cmd->write_data_done;
50 cmd->seq_end_offset = (cmd->write_data_done + 50 cmd->seq_end_offset = (cmd->write_data_done +
51 (cmd->data_length > 51 (cmd->data_length >
52 conn->sess->sess_ops->FirstBurstLength) ? 52 conn->sess->sess_ops->FirstBurstLength) ?
53 conn->sess->sess_ops->FirstBurstLength : cmd->data_length); 53 conn->sess->sess_ops->FirstBurstLength : cmd->data_length);
54 return; 54 return;
55 } 55 }
56 56
57 if (!conn->sess->sess_ops->DataSequenceInOrder) 57 if (!conn->sess->sess_ops->DataSequenceInOrder)
58 return; 58 return;
59 59
60 if (!cmd->seq_start_offset && !cmd->seq_end_offset) { 60 if (!cmd->seq_start_offset && !cmd->seq_end_offset) {
61 cmd->seq_start_offset = cmd->write_data_done; 61 cmd->seq_start_offset = cmd->write_data_done;
62 cmd->seq_end_offset = (cmd->data_length > 62 cmd->seq_end_offset = (cmd->data_length >
63 conn->sess->sess_ops->MaxBurstLength) ? 63 conn->sess->sess_ops->MaxBurstLength) ?
64 (cmd->write_data_done + 64 (cmd->write_data_done +
65 conn->sess->sess_ops->MaxBurstLength) : cmd->data_length; 65 conn->sess->sess_ops->MaxBurstLength) : cmd->data_length;
66 } else { 66 } else {
67 cmd->seq_start_offset = cmd->seq_end_offset; 67 cmd->seq_start_offset = cmd->seq_end_offset;
68 cmd->seq_end_offset = ((cmd->seq_end_offset + 68 cmd->seq_end_offset = ((cmd->seq_end_offset +
69 conn->sess->sess_ops->MaxBurstLength) >= 69 conn->sess->sess_ops->MaxBurstLength) >=
70 cmd->data_length) ? cmd->data_length : 70 cmd->data_length) ? cmd->data_length :
71 (cmd->seq_end_offset + 71 (cmd->seq_end_offset +
72 conn->sess->sess_ops->MaxBurstLength); 72 conn->sess->sess_ops->MaxBurstLength);
73 } 73 }
74 } 74 }
75 75
76 static int iscsit_dataout_within_command_recovery_check( 76 static int iscsit_dataout_within_command_recovery_check(
77 struct iscsi_cmd *cmd, 77 struct iscsi_cmd *cmd,
78 unsigned char *buf) 78 unsigned char *buf)
79 { 79 {
80 struct iscsi_conn *conn = cmd->conn; 80 struct iscsi_conn *conn = cmd->conn;
81 struct iscsi_data *hdr = (struct iscsi_data *) buf; 81 struct iscsi_data *hdr = (struct iscsi_data *) buf;
82 u32 payload_length = ntoh24(hdr->dlength); 82 u32 payload_length = ntoh24(hdr->dlength);
83 83
84 /* 84 /*
85 * We do the within-command recovery checks here as it is 85 * We do the within-command recovery checks here as it is
86 * the first function called in iscsi_check_pre_dataout(). 86 * the first function called in iscsi_check_pre_dataout().
87 * Basically, if we are in within-command recovery and 87 * Basically, if we are in within-command recovery and
88 * the PDU does not contain the offset the sequence needs, 88 * the PDU does not contain the offset the sequence needs,
89 * dump the payload. 89 * dump the payload.
90 * 90 *
91 * This only applies to DataPDUInOrder=Yes, for 91 * This only applies to DataPDUInOrder=Yes, for
92 * DataPDUInOrder=No we only re-request the failed PDU 92 * DataPDUInOrder=No we only re-request the failed PDU
93 * and check that all PDUs in a sequence are received 93 * and check that all PDUs in a sequence are received
94 * upon end of sequence. 94 * upon end of sequence.
95 */ 95 */
96 if (conn->sess->sess_ops->DataSequenceInOrder) { 96 if (conn->sess->sess_ops->DataSequenceInOrder) {
97 if ((cmd->cmd_flags & ICF_WITHIN_COMMAND_RECOVERY) && 97 if ((cmd->cmd_flags & ICF_WITHIN_COMMAND_RECOVERY) &&
98 (cmd->write_data_done != hdr->offset)) 98 (cmd->write_data_done != hdr->offset))
99 goto dump; 99 goto dump;
100 100
101 cmd->cmd_flags &= ~ICF_WITHIN_COMMAND_RECOVERY; 101 cmd->cmd_flags &= ~ICF_WITHIN_COMMAND_RECOVERY;
102 } else { 102 } else {
103 struct iscsi_seq *seq; 103 struct iscsi_seq *seq;
104 104
105 seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length); 105 seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length);
106 if (!seq) 106 if (!seq)
107 return DATAOUT_CANNOT_RECOVER; 107 return DATAOUT_CANNOT_RECOVER;
108 /* 108 /*
109 * Set the struct iscsi_seq pointer to reuse later. 109 * Set the struct iscsi_seq pointer to reuse later.
110 */ 110 */
111 cmd->seq_ptr = seq; 111 cmd->seq_ptr = seq;
112 112
113 if (conn->sess->sess_ops->DataPDUInOrder) { 113 if (conn->sess->sess_ops->DataPDUInOrder) {
114 if ((seq->status == 114 if ((seq->status ==
115 DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) && 115 DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) &&
116 ((seq->offset != hdr->offset) || 116 ((seq->offset != hdr->offset) ||
117 (seq->data_sn != hdr->datasn))) 117 (seq->data_sn != hdr->datasn)))
118 goto dump; 118 goto dump;
119 } else { 119 } else {
120 if ((seq->status == 120 if ((seq->status ==
121 DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) && 121 DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) &&
122 (seq->data_sn != hdr->datasn)) 122 (seq->data_sn != hdr->datasn))
123 goto dump; 123 goto dump;
124 } 124 }
125 125
126 if (seq->status == DATAOUT_SEQUENCE_COMPLETE) 126 if (seq->status == DATAOUT_SEQUENCE_COMPLETE)
127 goto dump; 127 goto dump;
128 128
129 if (seq->status != DATAOUT_SEQUENCE_COMPLETE) 129 if (seq->status != DATAOUT_SEQUENCE_COMPLETE)
130 seq->status = 0; 130 seq->status = 0;
131 } 131 }
132 132
133 return DATAOUT_NORMAL; 133 return DATAOUT_NORMAL;
134 134
135 dump: 135 dump:
136 pr_err("Dumping DataOUT PDU Offset: %u Length: %d DataSN:" 136 pr_err("Dumping DataOUT PDU Offset: %u Length: %d DataSN:"
137 " 0x%08x\n", hdr->offset, payload_length, hdr->datasn); 137 " 0x%08x\n", hdr->offset, payload_length, hdr->datasn);
138 return iscsit_dump_data_payload(conn, payload_length, 1); 138 return iscsit_dump_data_payload(conn, payload_length, 1);
139 } 139 }
140 140
141 static int iscsit_dataout_check_unsolicited_sequence( 141 static int iscsit_dataout_check_unsolicited_sequence(
142 struct iscsi_cmd *cmd, 142 struct iscsi_cmd *cmd,
143 unsigned char *buf) 143 unsigned char *buf)
144 { 144 {
145 u32 first_burst_len; 145 u32 first_burst_len;
146 struct iscsi_conn *conn = cmd->conn; 146 struct iscsi_conn *conn = cmd->conn;
147 struct iscsi_data *hdr = (struct iscsi_data *) buf; 147 struct iscsi_data *hdr = (struct iscsi_data *) buf;
148 u32 payload_length = ntoh24(hdr->dlength); 148 u32 payload_length = ntoh24(hdr->dlength);
149 149
150 150
151 if ((hdr->offset < cmd->seq_start_offset) || 151 if ((hdr->offset < cmd->seq_start_offset) ||
152 ((hdr->offset + payload_length) > cmd->seq_end_offset)) { 152 ((hdr->offset + payload_length) > cmd->seq_end_offset)) {
153 pr_err("Command ITT: 0x%08x with Offset: %u," 153 pr_err("Command ITT: 0x%08x with Offset: %u,"
154 " Length: %u outside of Unsolicited Sequence %u:%u while" 154 " Length: %u outside of Unsolicited Sequence %u:%u while"
155 " DataSequenceInOrder=Yes.\n", cmd->init_task_tag, 155 " DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
156 hdr->offset, payload_length, cmd->seq_start_offset, 156 hdr->offset, payload_length, cmd->seq_start_offset,
157 cmd->seq_end_offset); 157 cmd->seq_end_offset);
158 return DATAOUT_CANNOT_RECOVER; 158 return DATAOUT_CANNOT_RECOVER;
159 } 159 }
160 160
161 first_burst_len = (cmd->first_burst_len + payload_length); 161 first_burst_len = (cmd->first_burst_len + payload_length);
162 162
163 if (first_burst_len > conn->sess->sess_ops->FirstBurstLength) { 163 if (first_burst_len > conn->sess->sess_ops->FirstBurstLength) {
164 pr_err("Total %u bytes exceeds FirstBurstLength: %u" 164 pr_err("Total %u bytes exceeds FirstBurstLength: %u"
165 " for this Unsolicited DataOut Burst.\n", 165 " for this Unsolicited DataOut Burst.\n",
166 first_burst_len, conn->sess->sess_ops->FirstBurstLength); 166 first_burst_len, conn->sess->sess_ops->FirstBurstLength);
167 transport_send_check_condition_and_sense(&cmd->se_cmd, 167 transport_send_check_condition_and_sense(&cmd->se_cmd,
168 TCM_INCORRECT_AMOUNT_OF_DATA, 0); 168 TCM_INCORRECT_AMOUNT_OF_DATA, 0);
169 return DATAOUT_CANNOT_RECOVER; 169 return DATAOUT_CANNOT_RECOVER;
170 } 170 }
171 171
172 /* 172 /*
173 * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity 173 * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity
174 * checks for the current Unsolicited DataOUT Sequence. 174 * checks for the current Unsolicited DataOUT Sequence.
175 */ 175 */
176 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) { 176 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) {
177 /* 177 /*
178 * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of 178 * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of
179 * sequence checks are handled in 179 * sequence checks are handled in
180 * iscsit_dataout_datapduinorder_no_fbit(). 180 * iscsit_dataout_datapduinorder_no_fbit().
181 */ 181 */
182 if (!conn->sess->sess_ops->DataPDUInOrder) 182 if (!conn->sess->sess_ops->DataPDUInOrder)
183 goto out; 183 goto out;
184 184
185 if ((first_burst_len != cmd->data_length) && 185 if ((first_burst_len != cmd->data_length) &&
186 (first_burst_len != conn->sess->sess_ops->FirstBurstLength)) { 186 (first_burst_len != conn->sess->sess_ops->FirstBurstLength)) {
187 pr_err("Unsolicited non-immediate data" 187 pr_err("Unsolicited non-immediate data"
188 " received %u does not equal FirstBurstLength: %u, and" 188 " received %u does not equal FirstBurstLength: %u, and"
189 " does not equal ExpXferLen %u.\n", first_burst_len, 189 " does not equal ExpXferLen %u.\n", first_burst_len,
190 conn->sess->sess_ops->FirstBurstLength, 190 conn->sess->sess_ops->FirstBurstLength,
191 cmd->data_length); 191 cmd->data_length);
192 transport_send_check_condition_and_sense(&cmd->se_cmd, 192 transport_send_check_condition_and_sense(&cmd->se_cmd,
193 TCM_INCORRECT_AMOUNT_OF_DATA, 0); 193 TCM_INCORRECT_AMOUNT_OF_DATA, 0);
194 return DATAOUT_CANNOT_RECOVER; 194 return DATAOUT_CANNOT_RECOVER;
195 } 195 }
196 } else { 196 } else {
197 if (first_burst_len == conn->sess->sess_ops->FirstBurstLength) { 197 if (first_burst_len == conn->sess->sess_ops->FirstBurstLength) {
198 pr_err("Command ITT: 0x%08x reached" 198 pr_err("Command ITT: 0x%08x reached"
199 " FirstBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol" 199 " FirstBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
200 " error.\n", cmd->init_task_tag, 200 " error.\n", cmd->init_task_tag,
201 conn->sess->sess_ops->FirstBurstLength); 201 conn->sess->sess_ops->FirstBurstLength);
202 return DATAOUT_CANNOT_RECOVER; 202 return DATAOUT_CANNOT_RECOVER;
203 } 203 }
204 if (first_burst_len == cmd->data_length) { 204 if (first_burst_len == cmd->data_length) {
205 pr_err("Command ITT: 0x%08x reached" 205 pr_err("Command ITT: 0x%08x reached"
206 " ExpXferLen: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol" 206 " ExpXferLen: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
207 " error.\n", cmd->init_task_tag, cmd->data_length); 207 " error.\n", cmd->init_task_tag, cmd->data_length);
208 return DATAOUT_CANNOT_RECOVER; 208 return DATAOUT_CANNOT_RECOVER;
209 } 209 }
210 } 210 }
211 211
212 out: 212 out:
213 return DATAOUT_NORMAL; 213 return DATAOUT_NORMAL;
214 } 214 }
215 215
216 static int iscsit_dataout_check_sequence( 216 static int iscsit_dataout_check_sequence(
217 struct iscsi_cmd *cmd, 217 struct iscsi_cmd *cmd,
218 unsigned char *buf) 218 unsigned char *buf)
219 { 219 {
220 u32 next_burst_len; 220 u32 next_burst_len;
221 struct iscsi_conn *conn = cmd->conn; 221 struct iscsi_conn *conn = cmd->conn;
222 struct iscsi_seq *seq = NULL; 222 struct iscsi_seq *seq = NULL;
223 struct iscsi_data *hdr = (struct iscsi_data *) buf; 223 struct iscsi_data *hdr = (struct iscsi_data *) buf;
224 u32 payload_length = ntoh24(hdr->dlength); 224 u32 payload_length = ntoh24(hdr->dlength);
225 225
226 /* 226 /*
227 * For DataSequenceInOrder=Yes: Check that the offset and offset+length 227 * For DataSequenceInOrder=Yes: Check that the offset and offset+length
228 * is within range as defined by iscsi_set_dataout_sequence_values(). 228 * is within range as defined by iscsi_set_dataout_sequence_values().
229 * 229 *
230 * For DataSequenceInOrder=No: Check that an struct iscsi_seq exists for 230 * For DataSequenceInOrder=No: Check that an struct iscsi_seq exists for
231 * offset+length tuple. 231 * offset+length tuple.
232 */ 232 */
233 if (conn->sess->sess_ops->DataSequenceInOrder) { 233 if (conn->sess->sess_ops->DataSequenceInOrder) {
234 /* 234 /*
235 * Due to possibility of recovery DataOUT sent by the initiator 235 * Due to possibility of recovery DataOUT sent by the initiator
236 * fullfilling an Recovery R2T, it's best to just dump the 236 * fullfilling an Recovery R2T, it's best to just dump the
237 * payload here, instead of erroring out. 237 * payload here, instead of erroring out.
238 */ 238 */
239 if ((hdr->offset < cmd->seq_start_offset) || 239 if ((hdr->offset < cmd->seq_start_offset) ||
240 ((hdr->offset + payload_length) > cmd->seq_end_offset)) { 240 ((hdr->offset + payload_length) > cmd->seq_end_offset)) {
241 pr_err("Command ITT: 0x%08x with Offset: %u," 241 pr_err("Command ITT: 0x%08x with Offset: %u,"
242 " Length: %u outside of Sequence %u:%u while" 242 " Length: %u outside of Sequence %u:%u while"
243 " DataSequenceInOrder=Yes.\n", cmd->init_task_tag, 243 " DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
244 hdr->offset, payload_length, cmd->seq_start_offset, 244 hdr->offset, payload_length, cmd->seq_start_offset,
245 cmd->seq_end_offset); 245 cmd->seq_end_offset);
246 246
247 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) 247 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
248 return DATAOUT_CANNOT_RECOVER; 248 return DATAOUT_CANNOT_RECOVER;
249 return DATAOUT_WITHIN_COMMAND_RECOVERY; 249 return DATAOUT_WITHIN_COMMAND_RECOVERY;
250 } 250 }
251 251
252 next_burst_len = (cmd->next_burst_len + payload_length); 252 next_burst_len = (cmd->next_burst_len + payload_length);
253 } else { 253 } else {
254 seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length); 254 seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length);
255 if (!seq) 255 if (!seq)
256 return DATAOUT_CANNOT_RECOVER; 256 return DATAOUT_CANNOT_RECOVER;
257 /* 257 /*
258 * Set the struct iscsi_seq pointer to reuse later. 258 * Set the struct iscsi_seq pointer to reuse later.
259 */ 259 */
260 cmd->seq_ptr = seq; 260 cmd->seq_ptr = seq;
261 261
262 if (seq->status == DATAOUT_SEQUENCE_COMPLETE) { 262 if (seq->status == DATAOUT_SEQUENCE_COMPLETE) {
263 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) 263 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
264 return DATAOUT_CANNOT_RECOVER; 264 return DATAOUT_CANNOT_RECOVER;
265 return DATAOUT_WITHIN_COMMAND_RECOVERY; 265 return DATAOUT_WITHIN_COMMAND_RECOVERY;
266 } 266 }
267 267
268 next_burst_len = (seq->next_burst_len + payload_length); 268 next_burst_len = (seq->next_burst_len + payload_length);
269 } 269 }
270 270
271 if (next_burst_len > conn->sess->sess_ops->MaxBurstLength) { 271 if (next_burst_len > conn->sess->sess_ops->MaxBurstLength) {
272 pr_err("Command ITT: 0x%08x, NextBurstLength: %u and" 272 pr_err("Command ITT: 0x%08x, NextBurstLength: %u and"
273 " Length: %u exceeds MaxBurstLength: %u. protocol" 273 " Length: %u exceeds MaxBurstLength: %u. protocol"
274 " error.\n", cmd->init_task_tag, 274 " error.\n", cmd->init_task_tag,
275 (next_burst_len - payload_length), 275 (next_burst_len - payload_length),
276 payload_length, conn->sess->sess_ops->MaxBurstLength); 276 payload_length, conn->sess->sess_ops->MaxBurstLength);
277 return DATAOUT_CANNOT_RECOVER; 277 return DATAOUT_CANNOT_RECOVER;
278 } 278 }
279 279
280 /* 280 /*
281 * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity 281 * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity
282 * checks for the current DataOUT Sequence. 282 * checks for the current DataOUT Sequence.
283 */ 283 */
284 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) { 284 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) {
285 /* 285 /*
286 * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of 286 * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of
287 * sequence checks are handled in 287 * sequence checks are handled in
288 * iscsit_dataout_datapduinorder_no_fbit(). 288 * iscsit_dataout_datapduinorder_no_fbit().
289 */ 289 */
290 if (!conn->sess->sess_ops->DataPDUInOrder) 290 if (!conn->sess->sess_ops->DataPDUInOrder)
291 goto out; 291 goto out;
292 292
293 if (conn->sess->sess_ops->DataSequenceInOrder) { 293 if (conn->sess->sess_ops->DataSequenceInOrder) {
294 if ((next_burst_len < 294 if ((next_burst_len <
295 conn->sess->sess_ops->MaxBurstLength) && 295 conn->sess->sess_ops->MaxBurstLength) &&
296 ((cmd->write_data_done + payload_length) < 296 ((cmd->write_data_done + payload_length) <
297 cmd->data_length)) { 297 cmd->data_length)) {
298 pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL" 298 pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
299 " before end of DataOUT sequence, protocol" 299 " before end of DataOUT sequence, protocol"
300 " error.\n", cmd->init_task_tag); 300 " error.\n", cmd->init_task_tag);
301 return DATAOUT_CANNOT_RECOVER; 301 return DATAOUT_CANNOT_RECOVER;
302 } 302 }
303 } else { 303 } else {
304 if (next_burst_len < seq->xfer_len) { 304 if (next_burst_len < seq->xfer_len) {
305 pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL" 305 pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
306 " before end of DataOUT sequence, protocol" 306 " before end of DataOUT sequence, protocol"
307 " error.\n", cmd->init_task_tag); 307 " error.\n", cmd->init_task_tag);
308 return DATAOUT_CANNOT_RECOVER; 308 return DATAOUT_CANNOT_RECOVER;
309 } 309 }
310 } 310 }
311 } else { 311 } else {
312 if (conn->sess->sess_ops->DataSequenceInOrder) { 312 if (conn->sess->sess_ops->DataSequenceInOrder) {
313 if (next_burst_len == 313 if (next_burst_len ==
314 conn->sess->sess_ops->MaxBurstLength) { 314 conn->sess->sess_ops->MaxBurstLength) {
315 pr_err("Command ITT: 0x%08x reached" 315 pr_err("Command ITT: 0x%08x reached"
316 " MaxBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is" 316 " MaxBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is"
317 " not set, protocol error.", cmd->init_task_tag, 317 " not set, protocol error.", cmd->init_task_tag,
318 conn->sess->sess_ops->MaxBurstLength); 318 conn->sess->sess_ops->MaxBurstLength);
319 return DATAOUT_CANNOT_RECOVER; 319 return DATAOUT_CANNOT_RECOVER;
320 } 320 }
321 if ((cmd->write_data_done + payload_length) == 321 if ((cmd->write_data_done + payload_length) ==
322 cmd->data_length) { 322 cmd->data_length) {
323 pr_err("Command ITT: 0x%08x reached" 323 pr_err("Command ITT: 0x%08x reached"
324 " last DataOUT PDU in sequence but ISCSI_FLAG_" 324 " last DataOUT PDU in sequence but ISCSI_FLAG_"
325 "CMD_FINAL is not set, protocol error.\n", 325 "CMD_FINAL is not set, protocol error.\n",
326 cmd->init_task_tag); 326 cmd->init_task_tag);
327 return DATAOUT_CANNOT_RECOVER; 327 return DATAOUT_CANNOT_RECOVER;
328 } 328 }
329 } else { 329 } else {
330 if (next_burst_len == seq->xfer_len) { 330 if (next_burst_len == seq->xfer_len) {
331 pr_err("Command ITT: 0x%08x reached" 331 pr_err("Command ITT: 0x%08x reached"
332 " last DataOUT PDU in sequence but ISCSI_FLAG_" 332 " last DataOUT PDU in sequence but ISCSI_FLAG_"
333 "CMD_FINAL is not set, protocol error.\n", 333 "CMD_FINAL is not set, protocol error.\n",
334 cmd->init_task_tag); 334 cmd->init_task_tag);
335 return DATAOUT_CANNOT_RECOVER; 335 return DATAOUT_CANNOT_RECOVER;
336 } 336 }
337 } 337 }
338 } 338 }
339 339
340 out: 340 out:
341 return DATAOUT_NORMAL; 341 return DATAOUT_NORMAL;
342 } 342 }
343 343
344 static int iscsit_dataout_check_datasn( 344 static int iscsit_dataout_check_datasn(
345 struct iscsi_cmd *cmd, 345 struct iscsi_cmd *cmd,
346 unsigned char *buf) 346 unsigned char *buf)
347 { 347 {
348 int dump = 0, recovery = 0; 348 int dump = 0, recovery = 0;
349 u32 data_sn = 0; 349 u32 data_sn = 0;
350 struct iscsi_conn *conn = cmd->conn; 350 struct iscsi_conn *conn = cmd->conn;
351 struct iscsi_data *hdr = (struct iscsi_data *) buf; 351 struct iscsi_data *hdr = (struct iscsi_data *) buf;
352 u32 payload_length = ntoh24(hdr->dlength); 352 u32 payload_length = ntoh24(hdr->dlength);
353 353
354 /* 354 /*
355 * Considering the target has no method of re-requesting DataOUT 355 * Considering the target has no method of re-requesting DataOUT
356 * by DataSN, if we receieve a greater DataSN than expected we 356 * by DataSN, if we receieve a greater DataSN than expected we
357 * assume the functions for DataPDUInOrder=[Yes,No] below will 357 * assume the functions for DataPDUInOrder=[Yes,No] below will
358 * handle it. 358 * handle it.
359 * 359 *
360 * If the DataSN is less than expected, dump the payload. 360 * If the DataSN is less than expected, dump the payload.
361 */ 361 */
362 if (conn->sess->sess_ops->DataSequenceInOrder) 362 if (conn->sess->sess_ops->DataSequenceInOrder)
363 data_sn = cmd->data_sn; 363 data_sn = cmd->data_sn;
364 else { 364 else {
365 struct iscsi_seq *seq = cmd->seq_ptr; 365 struct iscsi_seq *seq = cmd->seq_ptr;
366 data_sn = seq->data_sn; 366 data_sn = seq->data_sn;
367 } 367 }
368 368
369 if (hdr->datasn > data_sn) { 369 if (hdr->datasn > data_sn) {
370 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x" 370 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
371 " higher than expected 0x%08x.\n", cmd->init_task_tag, 371 " higher than expected 0x%08x.\n", cmd->init_task_tag,
372 hdr->datasn, data_sn); 372 hdr->datasn, data_sn);
373 recovery = 1; 373 recovery = 1;
374 goto recover; 374 goto recover;
375 } else if (hdr->datasn < data_sn) { 375 } else if (hdr->datasn < data_sn) {
376 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x" 376 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
377 " lower than expected 0x%08x, discarding payload.\n", 377 " lower than expected 0x%08x, discarding payload.\n",
378 cmd->init_task_tag, hdr->datasn, data_sn); 378 cmd->init_task_tag, hdr->datasn, data_sn);
379 dump = 1; 379 dump = 1;
380 goto dump; 380 goto dump;
381 } 381 }
382 382
383 return DATAOUT_NORMAL; 383 return DATAOUT_NORMAL;
384 384
385 recover: 385 recover:
386 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 386 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
387 pr_err("Unable to perform within-command recovery" 387 pr_err("Unable to perform within-command recovery"
388 " while ERL=0.\n"); 388 " while ERL=0.\n");
389 return DATAOUT_CANNOT_RECOVER; 389 return DATAOUT_CANNOT_RECOVER;
390 } 390 }
391 dump: 391 dump:
392 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) 392 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
393 return DATAOUT_CANNOT_RECOVER; 393 return DATAOUT_CANNOT_RECOVER;
394 394
395 return (recovery || dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY : 395 return (recovery || dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY :
396 DATAOUT_NORMAL; 396 DATAOUT_NORMAL;
397 } 397 }
398 398
399 static int iscsit_dataout_pre_datapduinorder_yes( 399 static int iscsit_dataout_pre_datapduinorder_yes(
400 struct iscsi_cmd *cmd, 400 struct iscsi_cmd *cmd,
401 unsigned char *buf) 401 unsigned char *buf)
402 { 402 {
403 int dump = 0, recovery = 0; 403 int dump = 0, recovery = 0;
404 struct iscsi_conn *conn = cmd->conn; 404 struct iscsi_conn *conn = cmd->conn;
405 struct iscsi_data *hdr = (struct iscsi_data *) buf; 405 struct iscsi_data *hdr = (struct iscsi_data *) buf;
406 u32 payload_length = ntoh24(hdr->dlength); 406 u32 payload_length = ntoh24(hdr->dlength);
407 407
408 /* 408 /*
409 * For DataSequenceInOrder=Yes: If the offset is greater than the global 409 * For DataSequenceInOrder=Yes: If the offset is greater than the global
410 * DataPDUInOrder=Yes offset counter in struct iscsi_cmd a protcol error has 410 * DataPDUInOrder=Yes offset counter in struct iscsi_cmd a protcol error has
411 * occured and fail the connection. 411 * occured and fail the connection.
412 * 412 *
413 * For DataSequenceInOrder=No: If the offset is greater than the per 413 * For DataSequenceInOrder=No: If the offset is greater than the per
414 * sequence DataPDUInOrder=Yes offset counter in struct iscsi_seq a protocol 414 * sequence DataPDUInOrder=Yes offset counter in struct iscsi_seq a protocol
415 * error has occured and fail the connection. 415 * error has occured and fail the connection.
416 */ 416 */
417 if (conn->sess->sess_ops->DataSequenceInOrder) { 417 if (conn->sess->sess_ops->DataSequenceInOrder) {
418 if (hdr->offset != cmd->write_data_done) { 418 if (hdr->offset != cmd->write_data_done) {
419 pr_err("Command ITT: 0x%08x, received offset" 419 pr_err("Command ITT: 0x%08x, received offset"
420 " %u different than expected %u.\n", cmd->init_task_tag, 420 " %u different than expected %u.\n", cmd->init_task_tag,
421 hdr->offset, cmd->write_data_done); 421 hdr->offset, cmd->write_data_done);
422 recovery = 1; 422 recovery = 1;
423 goto recover; 423 goto recover;
424 } 424 }
425 } else { 425 } else {
426 struct iscsi_seq *seq = cmd->seq_ptr; 426 struct iscsi_seq *seq = cmd->seq_ptr;
427 427
428 if (hdr->offset > seq->offset) { 428 if (hdr->offset > seq->offset) {
429 pr_err("Command ITT: 0x%08x, received offset" 429 pr_err("Command ITT: 0x%08x, received offset"
430 " %u greater than expected %u.\n", cmd->init_task_tag, 430 " %u greater than expected %u.\n", cmd->init_task_tag,
431 hdr->offset, seq->offset); 431 hdr->offset, seq->offset);
432 recovery = 1; 432 recovery = 1;
433 goto recover; 433 goto recover;
434 } else if (hdr->offset < seq->offset) { 434 } else if (hdr->offset < seq->offset) {
435 pr_err("Command ITT: 0x%08x, received offset" 435 pr_err("Command ITT: 0x%08x, received offset"
436 " %u less than expected %u, discarding payload.\n", 436 " %u less than expected %u, discarding payload.\n",
437 cmd->init_task_tag, hdr->offset, seq->offset); 437 cmd->init_task_tag, hdr->offset, seq->offset);
438 dump = 1; 438 dump = 1;
439 goto dump; 439 goto dump;
440 } 440 }
441 } 441 }
442 442
443 return DATAOUT_NORMAL; 443 return DATAOUT_NORMAL;
444 444
445 recover: 445 recover:
446 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 446 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
447 pr_err("Unable to perform within-command recovery" 447 pr_err("Unable to perform within-command recovery"
448 " while ERL=0.\n"); 448 " while ERL=0.\n");
449 return DATAOUT_CANNOT_RECOVER; 449 return DATAOUT_CANNOT_RECOVER;
450 } 450 }
451 dump: 451 dump:
452 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) 452 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
453 return DATAOUT_CANNOT_RECOVER; 453 return DATAOUT_CANNOT_RECOVER;
454 454
455 return (recovery) ? iscsit_recover_dataout_sequence(cmd, 455 return (recovery) ? iscsit_recover_dataout_sequence(cmd,
456 hdr->offset, payload_length) : 456 hdr->offset, payload_length) :
457 (dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY : DATAOUT_NORMAL; 457 (dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY : DATAOUT_NORMAL;
458 } 458 }
459 459
460 static int iscsit_dataout_pre_datapduinorder_no( 460 static int iscsit_dataout_pre_datapduinorder_no(
461 struct iscsi_cmd *cmd, 461 struct iscsi_cmd *cmd,
462 unsigned char *buf) 462 unsigned char *buf)
463 { 463 {
464 struct iscsi_pdu *pdu; 464 struct iscsi_pdu *pdu;
465 struct iscsi_data *hdr = (struct iscsi_data *) buf; 465 struct iscsi_data *hdr = (struct iscsi_data *) buf;
466 u32 payload_length = ntoh24(hdr->dlength); 466 u32 payload_length = ntoh24(hdr->dlength);
467 467
468 pdu = iscsit_get_pdu_holder(cmd, hdr->offset, payload_length); 468 pdu = iscsit_get_pdu_holder(cmd, hdr->offset, payload_length);
469 if (!pdu) 469 if (!pdu)
470 return DATAOUT_CANNOT_RECOVER; 470 return DATAOUT_CANNOT_RECOVER;
471 471
472 cmd->pdu_ptr = pdu; 472 cmd->pdu_ptr = pdu;
473 473
474 switch (pdu->status) { 474 switch (pdu->status) {
475 case ISCSI_PDU_NOT_RECEIVED: 475 case ISCSI_PDU_NOT_RECEIVED:
476 case ISCSI_PDU_CRC_FAILED: 476 case ISCSI_PDU_CRC_FAILED:
477 case ISCSI_PDU_TIMED_OUT: 477 case ISCSI_PDU_TIMED_OUT:
478 break; 478 break;
479 case ISCSI_PDU_RECEIVED_OK: 479 case ISCSI_PDU_RECEIVED_OK:
480 pr_err("Command ITT: 0x%08x received already gotten" 480 pr_err("Command ITT: 0x%08x received already gotten"
481 " Offset: %u, Length: %u\n", cmd->init_task_tag, 481 " Offset: %u, Length: %u\n", cmd->init_task_tag,
482 hdr->offset, payload_length); 482 hdr->offset, payload_length);
483 return iscsit_dump_data_payload(cmd->conn, payload_length, 1); 483 return iscsit_dump_data_payload(cmd->conn, payload_length, 1);
484 default: 484 default:
485 return DATAOUT_CANNOT_RECOVER; 485 return DATAOUT_CANNOT_RECOVER;
486 } 486 }
487 487
488 return DATAOUT_NORMAL; 488 return DATAOUT_NORMAL;
489 } 489 }
490 490
491 static int iscsit_dataout_update_r2t(struct iscsi_cmd *cmd, u32 offset, u32 length) 491 static int iscsit_dataout_update_r2t(struct iscsi_cmd *cmd, u32 offset, u32 length)
492 { 492 {
493 struct iscsi_r2t *r2t; 493 struct iscsi_r2t *r2t;
494 494
495 if (cmd->unsolicited_data) 495 if (cmd->unsolicited_data)
496 return 0; 496 return 0;
497 497
498 r2t = iscsit_get_r2t_for_eos(cmd, offset, length); 498 r2t = iscsit_get_r2t_for_eos(cmd, offset, length);
499 if (!r2t) 499 if (!r2t)
500 return -1; 500 return -1;
501 501
502 spin_lock_bh(&cmd->r2t_lock); 502 spin_lock_bh(&cmd->r2t_lock);
503 r2t->seq_complete = 1; 503 r2t->seq_complete = 1;
504 cmd->outstanding_r2ts--; 504 cmd->outstanding_r2ts--;
505 spin_unlock_bh(&cmd->r2t_lock); 505 spin_unlock_bh(&cmd->r2t_lock);
506 506
507 return 0; 507 return 0;
508 } 508 }
509 509
510 static int iscsit_dataout_update_datapduinorder_no( 510 static int iscsit_dataout_update_datapduinorder_no(
511 struct iscsi_cmd *cmd, 511 struct iscsi_cmd *cmd,
512 u32 data_sn, 512 u32 data_sn,
513 int f_bit) 513 int f_bit)
514 { 514 {
515 int ret = 0; 515 int ret = 0;
516 struct iscsi_pdu *pdu = cmd->pdu_ptr; 516 struct iscsi_pdu *pdu = cmd->pdu_ptr;
517 517
518 pdu->data_sn = data_sn; 518 pdu->data_sn = data_sn;
519 519
520 switch (pdu->status) { 520 switch (pdu->status) {
521 case ISCSI_PDU_NOT_RECEIVED: 521 case ISCSI_PDU_NOT_RECEIVED:
522 pdu->status = ISCSI_PDU_RECEIVED_OK; 522 pdu->status = ISCSI_PDU_RECEIVED_OK;
523 break; 523 break;
524 case ISCSI_PDU_CRC_FAILED: 524 case ISCSI_PDU_CRC_FAILED:
525 pdu->status = ISCSI_PDU_RECEIVED_OK; 525 pdu->status = ISCSI_PDU_RECEIVED_OK;
526 break; 526 break;
527 case ISCSI_PDU_TIMED_OUT: 527 case ISCSI_PDU_TIMED_OUT:
528 pdu->status = ISCSI_PDU_RECEIVED_OK; 528 pdu->status = ISCSI_PDU_RECEIVED_OK;
529 break; 529 break;
530 default: 530 default:
531 return DATAOUT_CANNOT_RECOVER; 531 return DATAOUT_CANNOT_RECOVER;
532 } 532 }
533 533
534 if (f_bit) { 534 if (f_bit) {
535 ret = iscsit_dataout_datapduinorder_no_fbit(cmd, pdu); 535 ret = iscsit_dataout_datapduinorder_no_fbit(cmd, pdu);
536 if (ret == DATAOUT_CANNOT_RECOVER) 536 if (ret == DATAOUT_CANNOT_RECOVER)
537 return ret; 537 return ret;
538 } 538 }
539 539
540 return DATAOUT_NORMAL; 540 return DATAOUT_NORMAL;
541 } 541 }
542 542
543 static int iscsit_dataout_post_crc_passed( 543 static int iscsit_dataout_post_crc_passed(
544 struct iscsi_cmd *cmd, 544 struct iscsi_cmd *cmd,
545 unsigned char *buf) 545 unsigned char *buf)
546 { 546 {
547 int ret, send_r2t = 0; 547 int ret, send_r2t = 0;
548 struct iscsi_conn *conn = cmd->conn; 548 struct iscsi_conn *conn = cmd->conn;
549 struct iscsi_seq *seq = NULL; 549 struct iscsi_seq *seq = NULL;
550 struct iscsi_data *hdr = (struct iscsi_data *) buf; 550 struct iscsi_data *hdr = (struct iscsi_data *) buf;
551 u32 payload_length = ntoh24(hdr->dlength); 551 u32 payload_length = ntoh24(hdr->dlength);
552 552
553 if (cmd->unsolicited_data) { 553 if (cmd->unsolicited_data) {
554 if ((cmd->first_burst_len + payload_length) == 554 if ((cmd->first_burst_len + payload_length) ==
555 conn->sess->sess_ops->FirstBurstLength) { 555 conn->sess->sess_ops->FirstBurstLength) {
556 if (iscsit_dataout_update_r2t(cmd, hdr->offset, 556 if (iscsit_dataout_update_r2t(cmd, hdr->offset,
557 payload_length) < 0) 557 payload_length) < 0)
558 return DATAOUT_CANNOT_RECOVER; 558 return DATAOUT_CANNOT_RECOVER;
559 send_r2t = 1; 559 send_r2t = 1;
560 } 560 }
561 561
562 if (!conn->sess->sess_ops->DataPDUInOrder) { 562 if (!conn->sess->sess_ops->DataPDUInOrder) {
563 ret = iscsit_dataout_update_datapduinorder_no(cmd, 563 ret = iscsit_dataout_update_datapduinorder_no(cmd,
564 hdr->datasn, (hdr->flags & ISCSI_FLAG_CMD_FINAL)); 564 hdr->datasn, (hdr->flags & ISCSI_FLAG_CMD_FINAL));
565 if (ret == DATAOUT_CANNOT_RECOVER) 565 if (ret == DATAOUT_CANNOT_RECOVER)
566 return ret; 566 return ret;
567 } 567 }
568 568
569 cmd->first_burst_len += payload_length; 569 cmd->first_burst_len += payload_length;
570 570
571 if (conn->sess->sess_ops->DataSequenceInOrder) 571 if (conn->sess->sess_ops->DataSequenceInOrder)
572 cmd->data_sn++; 572 cmd->data_sn++;
573 else { 573 else {
574 seq = cmd->seq_ptr; 574 seq = cmd->seq_ptr;
575 seq->data_sn++; 575 seq->data_sn++;
576 seq->offset += payload_length; 576 seq->offset += payload_length;
577 } 577 }
578 578
579 if (send_r2t) { 579 if (send_r2t) {
580 if (seq) 580 if (seq)
581 seq->status = DATAOUT_SEQUENCE_COMPLETE; 581 seq->status = DATAOUT_SEQUENCE_COMPLETE;
582 cmd->first_burst_len = 0; 582 cmd->first_burst_len = 0;
583 cmd->unsolicited_data = 0; 583 cmd->unsolicited_data = 0;
584 } 584 }
585 } else { 585 } else {
586 if (conn->sess->sess_ops->DataSequenceInOrder) { 586 if (conn->sess->sess_ops->DataSequenceInOrder) {
587 if ((cmd->next_burst_len + payload_length) == 587 if ((cmd->next_burst_len + payload_length) ==
588 conn->sess->sess_ops->MaxBurstLength) { 588 conn->sess->sess_ops->MaxBurstLength) {
589 if (iscsit_dataout_update_r2t(cmd, hdr->offset, 589 if (iscsit_dataout_update_r2t(cmd, hdr->offset,
590 payload_length) < 0) 590 payload_length) < 0)
591 return DATAOUT_CANNOT_RECOVER; 591 return DATAOUT_CANNOT_RECOVER;
592 send_r2t = 1; 592 send_r2t = 1;
593 } 593 }
594 594
595 if (!conn->sess->sess_ops->DataPDUInOrder) { 595 if (!conn->sess->sess_ops->DataPDUInOrder) {
596 ret = iscsit_dataout_update_datapduinorder_no( 596 ret = iscsit_dataout_update_datapduinorder_no(
597 cmd, hdr->datasn, 597 cmd, hdr->datasn,
598 (hdr->flags & ISCSI_FLAG_CMD_FINAL)); 598 (hdr->flags & ISCSI_FLAG_CMD_FINAL));
599 if (ret == DATAOUT_CANNOT_RECOVER) 599 if (ret == DATAOUT_CANNOT_RECOVER)
600 return ret; 600 return ret;
601 } 601 }
602 602
603 cmd->next_burst_len += payload_length; 603 cmd->next_burst_len += payload_length;
604 cmd->data_sn++; 604 cmd->data_sn++;
605 605
606 if (send_r2t) 606 if (send_r2t)
607 cmd->next_burst_len = 0; 607 cmd->next_burst_len = 0;
608 } else { 608 } else {
609 seq = cmd->seq_ptr; 609 seq = cmd->seq_ptr;
610 610
611 if ((seq->next_burst_len + payload_length) == 611 if ((seq->next_burst_len + payload_length) ==
612 seq->xfer_len) { 612 seq->xfer_len) {
613 if (iscsit_dataout_update_r2t(cmd, hdr->offset, 613 if (iscsit_dataout_update_r2t(cmd, hdr->offset,
614 payload_length) < 0) 614 payload_length) < 0)
615 return DATAOUT_CANNOT_RECOVER; 615 return DATAOUT_CANNOT_RECOVER;
616 send_r2t = 1; 616 send_r2t = 1;
617 } 617 }
618 618
619 if (!conn->sess->sess_ops->DataPDUInOrder) { 619 if (!conn->sess->sess_ops->DataPDUInOrder) {
620 ret = iscsit_dataout_update_datapduinorder_no( 620 ret = iscsit_dataout_update_datapduinorder_no(
621 cmd, hdr->datasn, 621 cmd, hdr->datasn,
622 (hdr->flags & ISCSI_FLAG_CMD_FINAL)); 622 (hdr->flags & ISCSI_FLAG_CMD_FINAL));
623 if (ret == DATAOUT_CANNOT_RECOVER) 623 if (ret == DATAOUT_CANNOT_RECOVER)
624 return ret; 624 return ret;
625 } 625 }
626 626
627 seq->data_sn++; 627 seq->data_sn++;
628 seq->offset += payload_length; 628 seq->offset += payload_length;
629 seq->next_burst_len += payload_length; 629 seq->next_burst_len += payload_length;
630 630
631 if (send_r2t) { 631 if (send_r2t) {
632 seq->next_burst_len = 0; 632 seq->next_burst_len = 0;
633 seq->status = DATAOUT_SEQUENCE_COMPLETE; 633 seq->status = DATAOUT_SEQUENCE_COMPLETE;
634 } 634 }
635 } 635 }
636 } 636 }
637 637
638 if (send_r2t && conn->sess->sess_ops->DataSequenceInOrder) 638 if (send_r2t && conn->sess->sess_ops->DataSequenceInOrder)
639 cmd->data_sn = 0; 639 cmd->data_sn = 0;
640 640
641 cmd->write_data_done += payload_length; 641 cmd->write_data_done += payload_length;
642 642
643 return (cmd->write_data_done == cmd->data_length) ? 643 return (cmd->write_data_done == cmd->data_length) ?
644 DATAOUT_SEND_TO_TRANSPORT : (send_r2t) ? 644 DATAOUT_SEND_TO_TRANSPORT : (send_r2t) ?
645 DATAOUT_SEND_R2T : DATAOUT_NORMAL; 645 DATAOUT_SEND_R2T : DATAOUT_NORMAL;
646 } 646 }
647 647
648 static int iscsit_dataout_post_crc_failed( 648 static int iscsit_dataout_post_crc_failed(
649 struct iscsi_cmd *cmd, 649 struct iscsi_cmd *cmd,
650 unsigned char *buf) 650 unsigned char *buf)
651 { 651 {
652 struct iscsi_conn *conn = cmd->conn; 652 struct iscsi_conn *conn = cmd->conn;
653 struct iscsi_pdu *pdu; 653 struct iscsi_pdu *pdu;
654 struct iscsi_data *hdr = (struct iscsi_data *) buf; 654 struct iscsi_data *hdr = (struct iscsi_data *) buf;
655 u32 payload_length = ntoh24(hdr->dlength); 655 u32 payload_length = ntoh24(hdr->dlength);
656 656
657 if (conn->sess->sess_ops->DataPDUInOrder) 657 if (conn->sess->sess_ops->DataPDUInOrder)
658 goto recover; 658 goto recover;
659 /* 659 /*
660 * The rest of this function is only called when DataPDUInOrder=No. 660 * The rest of this function is only called when DataPDUInOrder=No.
661 */ 661 */
662 pdu = cmd->pdu_ptr; 662 pdu = cmd->pdu_ptr;
663 663
664 switch (pdu->status) { 664 switch (pdu->status) {
665 case ISCSI_PDU_NOT_RECEIVED: 665 case ISCSI_PDU_NOT_RECEIVED:
666 pdu->status = ISCSI_PDU_CRC_FAILED; 666 pdu->status = ISCSI_PDU_CRC_FAILED;
667 break; 667 break;
668 case ISCSI_PDU_CRC_FAILED: 668 case ISCSI_PDU_CRC_FAILED:
669 break; 669 break;
670 case ISCSI_PDU_TIMED_OUT: 670 case ISCSI_PDU_TIMED_OUT:
671 pdu->status = ISCSI_PDU_CRC_FAILED; 671 pdu->status = ISCSI_PDU_CRC_FAILED;
672 break; 672 break;
673 default: 673 default:
674 return DATAOUT_CANNOT_RECOVER; 674 return DATAOUT_CANNOT_RECOVER;
675 } 675 }
676 676
677 recover: 677 recover:
678 return iscsit_recover_dataout_sequence(cmd, hdr->offset, payload_length); 678 return iscsit_recover_dataout_sequence(cmd, hdr->offset, payload_length);
679 } 679 }
680 680
681 /* 681 /*
682 * Called from iscsit_handle_data_out() before DataOUT Payload is received 682 * Called from iscsit_handle_data_out() before DataOUT Payload is received
683 * and CRC computed. 683 * and CRC computed.
684 */ 684 */
685 extern int iscsit_check_pre_dataout( 685 extern int iscsit_check_pre_dataout(
686 struct iscsi_cmd *cmd, 686 struct iscsi_cmd *cmd,
687 unsigned char *buf) 687 unsigned char *buf)
688 { 688 {
689 int ret; 689 int ret;
690 struct iscsi_conn *conn = cmd->conn; 690 struct iscsi_conn *conn = cmd->conn;
691 691
692 ret = iscsit_dataout_within_command_recovery_check(cmd, buf); 692 ret = iscsit_dataout_within_command_recovery_check(cmd, buf);
693 if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) || 693 if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
694 (ret == DATAOUT_CANNOT_RECOVER)) 694 (ret == DATAOUT_CANNOT_RECOVER))
695 return ret; 695 return ret;
696 696
697 ret = iscsit_dataout_check_datasn(cmd, buf); 697 ret = iscsit_dataout_check_datasn(cmd, buf);
698 if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) || 698 if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
699 (ret == DATAOUT_CANNOT_RECOVER)) 699 (ret == DATAOUT_CANNOT_RECOVER))
700 return ret; 700 return ret;
701 701
702 if (cmd->unsolicited_data) { 702 if (cmd->unsolicited_data) {
703 ret = iscsit_dataout_check_unsolicited_sequence(cmd, buf); 703 ret = iscsit_dataout_check_unsolicited_sequence(cmd, buf);
704 if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) || 704 if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
705 (ret == DATAOUT_CANNOT_RECOVER)) 705 (ret == DATAOUT_CANNOT_RECOVER))
706 return ret; 706 return ret;
707 } else { 707 } else {
708 ret = iscsit_dataout_check_sequence(cmd, buf); 708 ret = iscsit_dataout_check_sequence(cmd, buf);
709 if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) || 709 if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
710 (ret == DATAOUT_CANNOT_RECOVER)) 710 (ret == DATAOUT_CANNOT_RECOVER))
711 return ret; 711 return ret;
712 } 712 }
713 713
714 return (conn->sess->sess_ops->DataPDUInOrder) ? 714 return (conn->sess->sess_ops->DataPDUInOrder) ?
715 iscsit_dataout_pre_datapduinorder_yes(cmd, buf) : 715 iscsit_dataout_pre_datapduinorder_yes(cmd, buf) :
716 iscsit_dataout_pre_datapduinorder_no(cmd, buf); 716 iscsit_dataout_pre_datapduinorder_no(cmd, buf);
717 } 717 }
718 718
719 /* 719 /*
720 * Called from iscsit_handle_data_out() after DataOUT Payload is received 720 * Called from iscsit_handle_data_out() after DataOUT Payload is received
721 * and CRC computed. 721 * and CRC computed.
722 */ 722 */
723 int iscsit_check_post_dataout( 723 int iscsit_check_post_dataout(
724 struct iscsi_cmd *cmd, 724 struct iscsi_cmd *cmd,
725 unsigned char *buf, 725 unsigned char *buf,
726 u8 data_crc_failed) 726 u8 data_crc_failed)
727 { 727 {
728 struct iscsi_conn *conn = cmd->conn; 728 struct iscsi_conn *conn = cmd->conn;
729 729
730 cmd->dataout_timeout_retries = 0; 730 cmd->dataout_timeout_retries = 0;
731 731
732 if (!data_crc_failed) 732 if (!data_crc_failed)
733 return iscsit_dataout_post_crc_passed(cmd, buf); 733 return iscsit_dataout_post_crc_passed(cmd, buf);
734 else { 734 else {
735 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 735 if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
736 pr_err("Unable to recover from DataOUT CRC" 736 pr_err("Unable to recover from DataOUT CRC"
737 " failure while ERL=0, closing session.\n"); 737 " failure while ERL=0, closing session.\n");
738 iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR, 738 iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
739 1, 0, buf, cmd); 739 1, 0, buf, cmd);
740 return DATAOUT_CANNOT_RECOVER; 740 return DATAOUT_CANNOT_RECOVER;
741 } 741 }
742 742
743 iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR, 743 iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
744 0, 0, buf, cmd); 744 0, 0, buf, cmd);
745 return iscsit_dataout_post_crc_failed(cmd, buf); 745 return iscsit_dataout_post_crc_failed(cmd, buf);
746 } 746 }
747 } 747 }
748 748
749 static void iscsit_handle_time2retain_timeout(unsigned long data) 749 static void iscsit_handle_time2retain_timeout(unsigned long data)
750 { 750 {
751 struct iscsi_session *sess = (struct iscsi_session *) data; 751 struct iscsi_session *sess = (struct iscsi_session *) data;
752 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); 752 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
753 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 753 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
754 754
755 spin_lock_bh(&se_tpg->session_lock); 755 spin_lock_bh(&se_tpg->session_lock);
756 if (sess->time2retain_timer_flags & ISCSI_TF_STOP) { 756 if (sess->time2retain_timer_flags & ISCSI_TF_STOP) {
757 spin_unlock_bh(&se_tpg->session_lock); 757 spin_unlock_bh(&se_tpg->session_lock);
758 return; 758 return;
759 } 759 }
760 if (atomic_read(&sess->session_reinstatement)) { 760 if (atomic_read(&sess->session_reinstatement)) {
761 pr_err("Exiting Time2Retain handler because" 761 pr_err("Exiting Time2Retain handler because"
762 " session_reinstatement=1\n"); 762 " session_reinstatement=1\n");
763 spin_unlock_bh(&se_tpg->session_lock); 763 spin_unlock_bh(&se_tpg->session_lock);
764 return; 764 return;
765 } 765 }
766 sess->time2retain_timer_flags |= ISCSI_TF_EXPIRED; 766 sess->time2retain_timer_flags |= ISCSI_TF_EXPIRED;
767 767
768 pr_err("Time2Retain timer expired for SID: %u, cleaning up" 768 pr_err("Time2Retain timer expired for SID: %u, cleaning up"
769 " iSCSI session.\n", sess->sid); 769 " iSCSI session.\n", sess->sid);
770 { 770 {
771 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; 771 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
772 772
773 if (tiqn) { 773 if (tiqn) {
774 spin_lock(&tiqn->sess_err_stats.lock); 774 spin_lock(&tiqn->sess_err_stats.lock);
775 strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name, 775 strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
776 (void *)sess->sess_ops->InitiatorName); 776 (void *)sess->sess_ops->InitiatorName);
777 tiqn->sess_err_stats.last_sess_failure_type = 777 tiqn->sess_err_stats.last_sess_failure_type =
778 ISCSI_SESS_ERR_CXN_TIMEOUT; 778 ISCSI_SESS_ERR_CXN_TIMEOUT;
779 tiqn->sess_err_stats.cxn_timeout_errors++; 779 tiqn->sess_err_stats.cxn_timeout_errors++;
780 sess->conn_timeout_errors++; 780 sess->conn_timeout_errors++;
781 spin_unlock(&tiqn->sess_err_stats.lock); 781 spin_unlock(&tiqn->sess_err_stats.lock);
782 } 782 }
783 } 783 }
784 784
785 spin_unlock_bh(&se_tpg->session_lock); 785 spin_unlock_bh(&se_tpg->session_lock);
786 iscsit_close_session(sess); 786 iscsit_close_session(sess);
787 } 787 }
788 788
789 extern void iscsit_start_time2retain_handler(struct iscsi_session *sess) 789 extern void iscsit_start_time2retain_handler(struct iscsi_session *sess)
790 { 790 {
791 int tpg_active; 791 int tpg_active;
792 /* 792 /*
793 * Only start Time2Retain timer when the assoicated TPG is still in 793 * Only start Time2Retain timer when the assoicated TPG is still in
794 * an ACTIVE (eg: not disabled or shutdown) state. 794 * an ACTIVE (eg: not disabled or shutdown) state.
795 */ 795 */
796 spin_lock(&ISCSI_TPG_S(sess)->tpg_state_lock); 796 spin_lock(&ISCSI_TPG_S(sess)->tpg_state_lock);
797 tpg_active = (ISCSI_TPG_S(sess)->tpg_state == TPG_STATE_ACTIVE); 797 tpg_active = (ISCSI_TPG_S(sess)->tpg_state == TPG_STATE_ACTIVE);
798 spin_unlock(&ISCSI_TPG_S(sess)->tpg_state_lock); 798 spin_unlock(&ISCSI_TPG_S(sess)->tpg_state_lock);
799 799
800 if (!tpg_active) 800 if (!tpg_active)
801 return; 801 return;
802 802
803 if (sess->time2retain_timer_flags & ISCSI_TF_RUNNING) 803 if (sess->time2retain_timer_flags & ISCSI_TF_RUNNING)
804 return; 804 return;
805 805
806 pr_debug("Starting Time2Retain timer for %u seconds on" 806 pr_debug("Starting Time2Retain timer for %u seconds on"
807 " SID: %u\n", sess->sess_ops->DefaultTime2Retain, sess->sid); 807 " SID: %u\n", sess->sess_ops->DefaultTime2Retain, sess->sid);
808 808
809 init_timer(&sess->time2retain_timer); 809 init_timer(&sess->time2retain_timer);
810 sess->time2retain_timer.expires = 810 sess->time2retain_timer.expires =
811 (get_jiffies_64() + sess->sess_ops->DefaultTime2Retain * HZ); 811 (get_jiffies_64() + sess->sess_ops->DefaultTime2Retain * HZ);
812 sess->time2retain_timer.data = (unsigned long)sess; 812 sess->time2retain_timer.data = (unsigned long)sess;
813 sess->time2retain_timer.function = iscsit_handle_time2retain_timeout; 813 sess->time2retain_timer.function = iscsit_handle_time2retain_timeout;
814 sess->time2retain_timer_flags &= ~ISCSI_TF_STOP; 814 sess->time2retain_timer_flags &= ~ISCSI_TF_STOP;
815 sess->time2retain_timer_flags |= ISCSI_TF_RUNNING; 815 sess->time2retain_timer_flags |= ISCSI_TF_RUNNING;
816 add_timer(&sess->time2retain_timer); 816 add_timer(&sess->time2retain_timer);
817 } 817 }
818 818
819 /* 819 /*
820 * Called with spin_lock_bh(&struct se_portal_group->session_lock) held 820 * Called with spin_lock_bh(&struct se_portal_group->session_lock) held
821 */ 821 */
822 extern int iscsit_stop_time2retain_timer(struct iscsi_session *sess) 822 extern int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
823 { 823 {
824 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); 824 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
825 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 825 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
826 826
827 if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED) 827 if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)
828 return -1; 828 return -1;
829 829
830 if (!(sess->time2retain_timer_flags & ISCSI_TF_RUNNING)) 830 if (!(sess->time2retain_timer_flags & ISCSI_TF_RUNNING))
831 return 0; 831 return 0;
832 832
833 sess->time2retain_timer_flags |= ISCSI_TF_STOP; 833 sess->time2retain_timer_flags |= ISCSI_TF_STOP;
834 spin_unlock_bh(&se_tpg->session_lock); 834 spin_unlock_bh(&se_tpg->session_lock);
835 835
836 del_timer_sync(&sess->time2retain_timer); 836 del_timer_sync(&sess->time2retain_timer);
837 837
838 spin_lock_bh(&se_tpg->session_lock); 838 spin_lock_bh(&se_tpg->session_lock);
839 sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING; 839 sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING;
840 pr_debug("Stopped Time2Retain Timer for SID: %u\n", 840 pr_debug("Stopped Time2Retain Timer for SID: %u\n",
841 sess->sid); 841 sess->sid);
842 return 0; 842 return 0;
843 } 843 }
844 844
845 void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn) 845 void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn)
846 { 846 {
847 spin_lock_bh(&conn->state_lock); 847 spin_lock_bh(&conn->state_lock);
848 if (atomic_read(&conn->connection_exit)) { 848 if (atomic_read(&conn->connection_exit)) {
849 spin_unlock_bh(&conn->state_lock); 849 spin_unlock_bh(&conn->state_lock);
850 goto sleep; 850 goto sleep;
851 } 851 }
852 852
853 if (atomic_read(&conn->transport_failed)) { 853 if (atomic_read(&conn->transport_failed)) {
854 spin_unlock_bh(&conn->state_lock); 854 spin_unlock_bh(&conn->state_lock);
855 goto sleep; 855 goto sleep;
856 } 856 }
857 spin_unlock_bh(&conn->state_lock); 857 spin_unlock_bh(&conn->state_lock);
858 858
859 iscsi_thread_set_force_reinstatement(conn); 859 iscsi_thread_set_force_reinstatement(conn);
860 860
861 sleep: 861 sleep:
862 wait_for_completion(&conn->conn_wait_rcfr_comp); 862 wait_for_completion(&conn->conn_wait_rcfr_comp);
863 complete(&conn->conn_post_wait_comp); 863 complete(&conn->conn_post_wait_comp);
864 } 864 }
865 865
866 void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep) 866 void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
867 { 867 {
868 spin_lock_bh(&conn->state_lock); 868 spin_lock_bh(&conn->state_lock);
869 if (atomic_read(&conn->connection_exit)) { 869 if (atomic_read(&conn->connection_exit)) {
870 spin_unlock_bh(&conn->state_lock); 870 spin_unlock_bh(&conn->state_lock);
871 return; 871 return;
872 } 872 }
873 873
874 if (atomic_read(&conn->transport_failed)) { 874 if (atomic_read(&conn->transport_failed)) {
875 spin_unlock_bh(&conn->state_lock); 875 spin_unlock_bh(&conn->state_lock);
876 return; 876 return;
877 } 877 }
878 878
879 if (atomic_read(&conn->connection_reinstatement)) { 879 if (atomic_read(&conn->connection_reinstatement)) {
880 spin_unlock_bh(&conn->state_lock); 880 spin_unlock_bh(&conn->state_lock);
881 return; 881 return;
882 } 882 }
883 883
884 if (iscsi_thread_set_force_reinstatement(conn) < 0) { 884 if (iscsi_thread_set_force_reinstatement(conn) < 0) {
885 spin_unlock_bh(&conn->state_lock); 885 spin_unlock_bh(&conn->state_lock);
886 return; 886 return;
887 } 887 }
888 888
889 atomic_set(&conn->connection_reinstatement, 1); 889 atomic_set(&conn->connection_reinstatement, 1);
890 if (!sleep) { 890 if (!sleep) {
891 spin_unlock_bh(&conn->state_lock); 891 spin_unlock_bh(&conn->state_lock);
892 return; 892 return;
893 } 893 }
894 894
895 atomic_set(&conn->sleep_on_conn_wait_comp, 1); 895 atomic_set(&conn->sleep_on_conn_wait_comp, 1);
896 spin_unlock_bh(&conn->state_lock); 896 spin_unlock_bh(&conn->state_lock);
897 897
898 wait_for_completion(&conn->conn_wait_comp); 898 wait_for_completion(&conn->conn_wait_comp);
899 complete(&conn->conn_post_wait_comp); 899 complete(&conn->conn_post_wait_comp);
900 } 900 }
901 901
902 void iscsit_fall_back_to_erl0(struct iscsi_session *sess) 902 void iscsit_fall_back_to_erl0(struct iscsi_session *sess)
903 { 903 {
904 pr_debug("Falling back to ErrorRecoveryLevel=0 for SID:" 904 pr_debug("Falling back to ErrorRecoveryLevel=0 for SID:"
905 " %u\n", sess->sid); 905 " %u\n", sess->sid);
906 906
907 atomic_set(&sess->session_fall_back_to_erl0, 1); 907 atomic_set(&sess->session_fall_back_to_erl0, 1);
908 } 908 }
909 909
910 static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn) 910 static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
911 { 911 {
912 struct iscsi_session *sess = conn->sess; 912 struct iscsi_session *sess = conn->sess;
913 913
914 if ((sess->sess_ops->ErrorRecoveryLevel == 2) && 914 if ((sess->sess_ops->ErrorRecoveryLevel == 2) &&
915 !atomic_read(&sess->session_reinstatement) && 915 !atomic_read(&sess->session_reinstatement) &&
916 !atomic_read(&sess->session_fall_back_to_erl0)) 916 !atomic_read(&sess->session_fall_back_to_erl0))
917 iscsit_connection_recovery_transport_reset(conn); 917 iscsit_connection_recovery_transport_reset(conn);
918 else { 918 else {
919 pr_debug("Performing cleanup for failed iSCSI" 919 pr_debug("Performing cleanup for failed iSCSI"
920 " Connection ID: %hu from %s\n", conn->cid, 920 " Connection ID: %hu from %s\n", conn->cid,
921 sess->sess_ops->InitiatorName); 921 sess->sess_ops->InitiatorName);
922 iscsit_close_connection(conn); 922 iscsit_close_connection(conn);
923 } 923 }
924 } 924 }
925 925
926 extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) 926 extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
927 { 927 {
928 spin_lock_bh(&conn->state_lock); 928 spin_lock_bh(&conn->state_lock);
929 if (atomic_read(&conn->connection_exit)) { 929 if (atomic_read(&conn->connection_exit)) {
930 spin_unlock_bh(&conn->state_lock); 930 spin_unlock_bh(&conn->state_lock);
931 return; 931 return;
932 } 932 }
933 atomic_set(&conn->connection_exit, 1); 933 atomic_set(&conn->connection_exit, 1);
934 934
935 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { 935 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
936 spin_unlock_bh(&conn->state_lock); 936 spin_unlock_bh(&conn->state_lock);
937 iscsit_close_connection(conn); 937 iscsit_close_connection(conn);
938 return; 938 return;
939 } 939 }
940 940
941 if (conn->conn_state == TARG_CONN_STATE_CLEANUP_WAIT) { 941 if (conn->conn_state == TARG_CONN_STATE_CLEANUP_WAIT) {
942 spin_unlock_bh(&conn->state_lock); 942 spin_unlock_bh(&conn->state_lock);
943 return; 943 return;
944 } 944 }
945 945
946 pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n"); 946 pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
947 conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT; 947 conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
948 spin_unlock_bh(&conn->state_lock); 948 spin_unlock_bh(&conn->state_lock);
949 949
950 iscsit_handle_connection_cleanup(conn); 950 iscsit_handle_connection_cleanup(conn);
951 } 951 }
952 952
953 /* 953 /*
954 * This is the simple function that makes the magic of 954 * This is the simple function that makes the magic of
955 * sync and steering happen in the follow paradoxical order: 955 * sync and steering happen in the follow paradoxical order:
956 * 956 *
957 * 0) Receive conn->of_marker (bytes left until next OFMarker) 957 * 0) Receive conn->of_marker (bytes left until next OFMarker)
958 * bytes into an offload buffer. When we pass the exact number 958 * bytes into an offload buffer. When we pass the exact number
959 * of bytes in conn->of_marker, iscsit_dump_data_payload() and hence 959 * of bytes in conn->of_marker, iscsit_dump_data_payload() and hence
960 * rx_data() will automatically receive the identical u32 marker 960 * rx_data() will automatically receive the identical u32 marker
961 * values and store it in conn->of_marker_offset; 961 * values and store it in conn->of_marker_offset;
962 * 1) Now conn->of_marker_offset will contain the offset to the start 962 * 1) Now conn->of_marker_offset will contain the offset to the start
963 * of the next iSCSI PDU. Dump these remaining bytes into another 963 * of the next iSCSI PDU. Dump these remaining bytes into another
964 * offload buffer. 964 * offload buffer.
965 * 2) We are done! 965 * 2) We are done!
966 * Next byte in the TCP stream will contain the next iSCSI PDU! 966 * Next byte in the TCP stream will contain the next iSCSI PDU!
967 * Cool Huh?! 967 * Cool Huh?!
968 */ 968 */
969 int iscsit_recover_from_unknown_opcode(struct iscsi_conn *conn) 969 int iscsit_recover_from_unknown_opcode(struct iscsi_conn *conn)
970 { 970 {
971 /* 971 /*
972 * Make sure the remaining bytes to next maker is a sane value. 972 * Make sure the remaining bytes to next maker is a sane value.
973 */ 973 */
974 if (conn->of_marker > (conn->conn_ops->OFMarkInt * 4)) { 974 if (conn->of_marker > (conn->conn_ops->OFMarkInt * 4)) {
975 pr_err("Remaining bytes to OFMarker: %u exceeds" 975 pr_err("Remaining bytes to OFMarker: %u exceeds"
976 " OFMarkInt bytes: %u.\n", conn->of_marker, 976 " OFMarkInt bytes: %u.\n", conn->of_marker,
977 conn->conn_ops->OFMarkInt * 4); 977 conn->conn_ops->OFMarkInt * 4);
978 return -1; 978 return -1;
979 } 979 }
980 980
981 pr_debug("Advancing %u bytes in TCP stream to get to the" 981 pr_debug("Advancing %u bytes in TCP stream to get to the"
982 " next OFMarker.\n", conn->of_marker); 982 " next OFMarker.\n", conn->of_marker);
983 983
984 if (iscsit_dump_data_payload(conn, conn->of_marker, 0) < 0) 984 if (iscsit_dump_data_payload(conn, conn->of_marker, 0) < 0)
985 return -1; 985 return -1;
986 986
987 /* 987 /*
988 * Make sure the offset marker we retrived is a valid value. 988 * Make sure the offset marker we retrived is a valid value.
989 */ 989 */
990 if (conn->of_marker_offset > (ISCSI_HDR_LEN + (ISCSI_CRC_LEN * 2) + 990 if (conn->of_marker_offset > (ISCSI_HDR_LEN + (ISCSI_CRC_LEN * 2) +
991 conn->conn_ops->MaxRecvDataSegmentLength)) { 991 conn->conn_ops->MaxRecvDataSegmentLength)) {
992 pr_err("OfMarker offset value: %u exceeds limit.\n", 992 pr_err("OfMarker offset value: %u exceeds limit.\n",
993 conn->of_marker_offset); 993 conn->of_marker_offset);
994 return -1; 994 return -1;
995 } 995 }
996 996
997 pr_debug("Discarding %u bytes of TCP stream to get to the" 997 pr_debug("Discarding %u bytes of TCP stream to get to the"
998 " next iSCSI Opcode.\n", conn->of_marker_offset); 998 " next iSCSI Opcode.\n", conn->of_marker_offset);
999 999
1000 if (iscsit_dump_data_payload(conn, conn->of_marker_offset, 0) < 0) 1000 if (iscsit_dump_data_payload(conn, conn->of_marker_offset, 0) < 0)
1001 return -1; 1001 return -1;
1002 1002
1003 return 0; 1003 return 0;
1004 } 1004 }
1005 1005
drivers/target/iscsi/iscsi_target_erl1.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * This file contains error recovery level one used by the iSCSI Target driver. 2 * This file contains error recovery level one used by the iSCSI Target driver.
3 * 3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 * 5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 * 7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or 12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version. 13 * (at your option) any later version.
14 * 14 *
15 * This program is distributed in the hope that it will be useful, 15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 ******************************************************************************/ 19 ******************************************************************************/
20 20
21 #include <linux/list.h> 21 #include <linux/list.h>
22 #include <scsi/iscsi_proto.h> 22 #include <scsi/iscsi_proto.h>
23 #include <target/target_core_base.h> 23 #include <target/target_core_base.h>
24 #include <target/target_core_transport.h> 24 #include <target/target_core_fabric.h>
25 25
26 #include "iscsi_target_core.h" 26 #include "iscsi_target_core.h"
27 #include "iscsi_target_seq_pdu_list.h" 27 #include "iscsi_target_seq_pdu_list.h"
28 #include "iscsi_target_datain_values.h" 28 #include "iscsi_target_datain_values.h"
29 #include "iscsi_target_device.h" 29 #include "iscsi_target_device.h"
30 #include "iscsi_target_tpg.h" 30 #include "iscsi_target_tpg.h"
31 #include "iscsi_target_util.h" 31 #include "iscsi_target_util.h"
32 #include "iscsi_target_erl0.h" 32 #include "iscsi_target_erl0.h"
33 #include "iscsi_target_erl1.h" 33 #include "iscsi_target_erl1.h"
34 #include "iscsi_target_erl2.h" 34 #include "iscsi_target_erl2.h"
35 #include "iscsi_target.h" 35 #include "iscsi_target.h"
36 36
37 #define OFFLOAD_BUF_SIZE 32768 37 #define OFFLOAD_BUF_SIZE 32768
38 38
39 /* 39 /*
40 * Used to dump excess datain payload for certain error recovery 40 * Used to dump excess datain payload for certain error recovery
41 * situations. Receive in OFFLOAD_BUF_SIZE max of datain per rx_data(). 41 * situations. Receive in OFFLOAD_BUF_SIZE max of datain per rx_data().
42 * 42 *
43 * dump_padding_digest denotes if padding and data digests need 43 * dump_padding_digest denotes if padding and data digests need
44 * to be dumped. 44 * to be dumped.
45 */ 45 */
46 int iscsit_dump_data_payload( 46 int iscsit_dump_data_payload(
47 struct iscsi_conn *conn, 47 struct iscsi_conn *conn,
48 u32 buf_len, 48 u32 buf_len,
49 int dump_padding_digest) 49 int dump_padding_digest)
50 { 50 {
51 char *buf, pad_bytes[4]; 51 char *buf, pad_bytes[4];
52 int ret = DATAOUT_WITHIN_COMMAND_RECOVERY, rx_got; 52 int ret = DATAOUT_WITHIN_COMMAND_RECOVERY, rx_got;
53 u32 length, padding, offset = 0, size; 53 u32 length, padding, offset = 0, size;
54 struct kvec iov; 54 struct kvec iov;
55 55
56 length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len; 56 length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len;
57 57
58 buf = kzalloc(length, GFP_ATOMIC); 58 buf = kzalloc(length, GFP_ATOMIC);
59 if (!buf) { 59 if (!buf) {
60 pr_err("Unable to allocate %u bytes for offload" 60 pr_err("Unable to allocate %u bytes for offload"
61 " buffer.\n", length); 61 " buffer.\n", length);
62 return -1; 62 return -1;
63 } 63 }
64 memset(&iov, 0, sizeof(struct kvec)); 64 memset(&iov, 0, sizeof(struct kvec));
65 65
66 while (offset < buf_len) { 66 while (offset < buf_len) {
67 size = ((offset + length) > buf_len) ? 67 size = ((offset + length) > buf_len) ?
68 (buf_len - offset) : length; 68 (buf_len - offset) : length;
69 69
70 iov.iov_len = size; 70 iov.iov_len = size;
71 iov.iov_base = buf; 71 iov.iov_base = buf;
72 72
73 rx_got = rx_data(conn, &iov, 1, size); 73 rx_got = rx_data(conn, &iov, 1, size);
74 if (rx_got != size) { 74 if (rx_got != size) {
75 ret = DATAOUT_CANNOT_RECOVER; 75 ret = DATAOUT_CANNOT_RECOVER;
76 goto out; 76 goto out;
77 } 77 }
78 78
79 offset += size; 79 offset += size;
80 } 80 }
81 81
82 if (!dump_padding_digest) 82 if (!dump_padding_digest)
83 goto out; 83 goto out;
84 84
85 padding = ((-buf_len) & 3); 85 padding = ((-buf_len) & 3);
86 if (padding != 0) { 86 if (padding != 0) {
87 iov.iov_len = padding; 87 iov.iov_len = padding;
88 iov.iov_base = pad_bytes; 88 iov.iov_base = pad_bytes;
89 89
90 rx_got = rx_data(conn, &iov, 1, padding); 90 rx_got = rx_data(conn, &iov, 1, padding);
91 if (rx_got != padding) { 91 if (rx_got != padding) {
92 ret = DATAOUT_CANNOT_RECOVER; 92 ret = DATAOUT_CANNOT_RECOVER;
93 goto out; 93 goto out;
94 } 94 }
95 } 95 }
96 96
97 if (conn->conn_ops->DataDigest) { 97 if (conn->conn_ops->DataDigest) {
98 u32 data_crc; 98 u32 data_crc;
99 99
100 iov.iov_len = ISCSI_CRC_LEN; 100 iov.iov_len = ISCSI_CRC_LEN;
101 iov.iov_base = &data_crc; 101 iov.iov_base = &data_crc;
102 102
103 rx_got = rx_data(conn, &iov, 1, ISCSI_CRC_LEN); 103 rx_got = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
104 if (rx_got != ISCSI_CRC_LEN) { 104 if (rx_got != ISCSI_CRC_LEN) {
105 ret = DATAOUT_CANNOT_RECOVER; 105 ret = DATAOUT_CANNOT_RECOVER;
106 goto out; 106 goto out;
107 } 107 }
108 } 108 }
109 109
110 out: 110 out:
111 kfree(buf); 111 kfree(buf);
112 return ret; 112 return ret;
113 } 113 }
114 114
115 /* 115 /*
116 * Used for retransmitting R2Ts from a R2T SNACK request. 116 * Used for retransmitting R2Ts from a R2T SNACK request.
117 */ 117 */
118 static int iscsit_send_recovery_r2t_for_snack( 118 static int iscsit_send_recovery_r2t_for_snack(
119 struct iscsi_cmd *cmd, 119 struct iscsi_cmd *cmd,
120 struct iscsi_r2t *r2t) 120 struct iscsi_r2t *r2t)
121 { 121 {
122 /* 122 /*
123 * If the struct iscsi_r2t has not been sent yet, we can safely 123 * If the struct iscsi_r2t has not been sent yet, we can safely
124 * ignore retransmission 124 * ignore retransmission
125 * of the R2TSN in question. 125 * of the R2TSN in question.
126 */ 126 */
127 spin_lock_bh(&cmd->r2t_lock); 127 spin_lock_bh(&cmd->r2t_lock);
128 if (!r2t->sent_r2t) { 128 if (!r2t->sent_r2t) {
129 spin_unlock_bh(&cmd->r2t_lock); 129 spin_unlock_bh(&cmd->r2t_lock);
130 return 0; 130 return 0;
131 } 131 }
132 r2t->sent_r2t = 0; 132 r2t->sent_r2t = 0;
133 spin_unlock_bh(&cmd->r2t_lock); 133 spin_unlock_bh(&cmd->r2t_lock);
134 134
135 iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T); 135 iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
136 136
137 return 0; 137 return 0;
138 } 138 }
139 139
140 static int iscsit_handle_r2t_snack( 140 static int iscsit_handle_r2t_snack(
141 struct iscsi_cmd *cmd, 141 struct iscsi_cmd *cmd,
142 unsigned char *buf, 142 unsigned char *buf,
143 u32 begrun, 143 u32 begrun,
144 u32 runlength) 144 u32 runlength)
145 { 145 {
146 u32 last_r2tsn; 146 u32 last_r2tsn;
147 struct iscsi_r2t *r2t; 147 struct iscsi_r2t *r2t;
148 148
149 /* 149 /*
150 * Make sure the initiator is not requesting retransmission 150 * Make sure the initiator is not requesting retransmission
151 * of R2TSNs already acknowledged by a TMR TASK_REASSIGN. 151 * of R2TSNs already acknowledged by a TMR TASK_REASSIGN.
152 */ 152 */
153 if ((cmd->cmd_flags & ICF_GOT_DATACK_SNACK) && 153 if ((cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
154 (begrun <= cmd->acked_data_sn)) { 154 (begrun <= cmd->acked_data_sn)) {
155 pr_err("ITT: 0x%08x, R2T SNACK requesting" 155 pr_err("ITT: 0x%08x, R2T SNACK requesting"
156 " retransmission of R2TSN: 0x%08x to 0x%08x but already" 156 " retransmission of R2TSN: 0x%08x to 0x%08x but already"
157 " acked to R2TSN: 0x%08x by TMR TASK_REASSIGN," 157 " acked to R2TSN: 0x%08x by TMR TASK_REASSIGN,"
158 " protocol error.\n", cmd->init_task_tag, begrun, 158 " protocol error.\n", cmd->init_task_tag, begrun,
159 (begrun + runlength), cmd->acked_data_sn); 159 (begrun + runlength), cmd->acked_data_sn);
160 160
161 return iscsit_add_reject_from_cmd( 161 return iscsit_add_reject_from_cmd(
162 ISCSI_REASON_PROTOCOL_ERROR, 162 ISCSI_REASON_PROTOCOL_ERROR,
163 1, 0, buf, cmd); 163 1, 0, buf, cmd);
164 } 164 }
165 165
166 if (runlength) { 166 if (runlength) {
167 if ((begrun + runlength) > cmd->r2t_sn) { 167 if ((begrun + runlength) > cmd->r2t_sn) {
168 pr_err("Command ITT: 0x%08x received R2T SNACK" 168 pr_err("Command ITT: 0x%08x received R2T SNACK"
169 " with BegRun: 0x%08x, RunLength: 0x%08x, exceeds" 169 " with BegRun: 0x%08x, RunLength: 0x%08x, exceeds"
170 " current R2TSN: 0x%08x, protocol error.\n", 170 " current R2TSN: 0x%08x, protocol error.\n",
171 cmd->init_task_tag, begrun, runlength, cmd->r2t_sn); 171 cmd->init_task_tag, begrun, runlength, cmd->r2t_sn);
172 return iscsit_add_reject_from_cmd( 172 return iscsit_add_reject_from_cmd(
173 ISCSI_REASON_BOOKMARK_INVALID, 1, 0, buf, cmd); 173 ISCSI_REASON_BOOKMARK_INVALID, 1, 0, buf, cmd);
174 } 174 }
175 last_r2tsn = (begrun + runlength); 175 last_r2tsn = (begrun + runlength);
176 } else 176 } else
177 last_r2tsn = cmd->r2t_sn; 177 last_r2tsn = cmd->r2t_sn;
178 178
179 while (begrun < last_r2tsn) { 179 while (begrun < last_r2tsn) {
180 r2t = iscsit_get_holder_for_r2tsn(cmd, begrun); 180 r2t = iscsit_get_holder_for_r2tsn(cmd, begrun);
181 if (!r2t) 181 if (!r2t)
182 return -1; 182 return -1;
183 if (iscsit_send_recovery_r2t_for_snack(cmd, r2t) < 0) 183 if (iscsit_send_recovery_r2t_for_snack(cmd, r2t) < 0)
184 return -1; 184 return -1;
185 185
186 begrun++; 186 begrun++;
187 } 187 }
188 188
189 return 0; 189 return 0;
190 } 190 }
191 191
192 /* 192 /*
193 * Generates Offsets and NextBurstLength based on Begrun and Runlength 193 * Generates Offsets and NextBurstLength based on Begrun and Runlength
194 * carried in a Data SNACK or ExpDataSN in TMR TASK_REASSIGN. 194 * carried in a Data SNACK or ExpDataSN in TMR TASK_REASSIGN.
195 * 195 *
196 * For DataSequenceInOrder=Yes and DataPDUInOrder=[Yes,No] only. 196 * For DataSequenceInOrder=Yes and DataPDUInOrder=[Yes,No] only.
197 * 197 *
198 * FIXME: How is this handled for a RData SNACK? 198 * FIXME: How is this handled for a RData SNACK?
199 */ 199 */
200 int iscsit_create_recovery_datain_values_datasequenceinorder_yes( 200 int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
201 struct iscsi_cmd *cmd, 201 struct iscsi_cmd *cmd,
202 struct iscsi_datain_req *dr) 202 struct iscsi_datain_req *dr)
203 { 203 {
204 u32 data_sn = 0, data_sn_count = 0; 204 u32 data_sn = 0, data_sn_count = 0;
205 u32 pdu_start = 0, seq_no = 0; 205 u32 pdu_start = 0, seq_no = 0;
206 u32 begrun = dr->begrun; 206 u32 begrun = dr->begrun;
207 struct iscsi_conn *conn = cmd->conn; 207 struct iscsi_conn *conn = cmd->conn;
208 208
209 while (begrun > data_sn++) { 209 while (begrun > data_sn++) {
210 data_sn_count++; 210 data_sn_count++;
211 if ((dr->next_burst_len + 211 if ((dr->next_burst_len +
212 conn->conn_ops->MaxRecvDataSegmentLength) < 212 conn->conn_ops->MaxRecvDataSegmentLength) <
213 conn->sess->sess_ops->MaxBurstLength) { 213 conn->sess->sess_ops->MaxBurstLength) {
214 dr->read_data_done += 214 dr->read_data_done +=
215 conn->conn_ops->MaxRecvDataSegmentLength; 215 conn->conn_ops->MaxRecvDataSegmentLength;
216 dr->next_burst_len += 216 dr->next_burst_len +=
217 conn->conn_ops->MaxRecvDataSegmentLength; 217 conn->conn_ops->MaxRecvDataSegmentLength;
218 } else { 218 } else {
219 dr->read_data_done += 219 dr->read_data_done +=
220 (conn->sess->sess_ops->MaxBurstLength - 220 (conn->sess->sess_ops->MaxBurstLength -
221 dr->next_burst_len); 221 dr->next_burst_len);
222 dr->next_burst_len = 0; 222 dr->next_burst_len = 0;
223 pdu_start += data_sn_count; 223 pdu_start += data_sn_count;
224 data_sn_count = 0; 224 data_sn_count = 0;
225 seq_no++; 225 seq_no++;
226 } 226 }
227 } 227 }
228 228
229 if (!conn->sess->sess_ops->DataPDUInOrder) { 229 if (!conn->sess->sess_ops->DataPDUInOrder) {
230 cmd->seq_no = seq_no; 230 cmd->seq_no = seq_no;
231 cmd->pdu_start = pdu_start; 231 cmd->pdu_start = pdu_start;
232 cmd->pdu_send_order = data_sn_count; 232 cmd->pdu_send_order = data_sn_count;
233 } 233 }
234 234
235 return 0; 235 return 0;
236 } 236 }
237 237
238 /* 238 /*
239 * Generates Offsets and NextBurstLength based on Begrun and Runlength 239 * Generates Offsets and NextBurstLength based on Begrun and Runlength
240 * carried in a Data SNACK or ExpDataSN in TMR TASK_REASSIGN. 240 * carried in a Data SNACK or ExpDataSN in TMR TASK_REASSIGN.
241 * 241 *
242 * For DataSequenceInOrder=No and DataPDUInOrder=[Yes,No] only. 242 * For DataSequenceInOrder=No and DataPDUInOrder=[Yes,No] only.
243 * 243 *
244 * FIXME: How is this handled for a RData SNACK? 244 * FIXME: How is this handled for a RData SNACK?
245 */ 245 */
246 int iscsit_create_recovery_datain_values_datasequenceinorder_no( 246 int iscsit_create_recovery_datain_values_datasequenceinorder_no(
247 struct iscsi_cmd *cmd, 247 struct iscsi_cmd *cmd,
248 struct iscsi_datain_req *dr) 248 struct iscsi_datain_req *dr)
249 { 249 {
250 int found_seq = 0, i; 250 int found_seq = 0, i;
251 u32 data_sn, read_data_done = 0, seq_send_order = 0; 251 u32 data_sn, read_data_done = 0, seq_send_order = 0;
252 u32 begrun = dr->begrun; 252 u32 begrun = dr->begrun;
253 u32 runlength = dr->runlength; 253 u32 runlength = dr->runlength;
254 struct iscsi_conn *conn = cmd->conn; 254 struct iscsi_conn *conn = cmd->conn;
255 struct iscsi_seq *first_seq = NULL, *seq = NULL; 255 struct iscsi_seq *first_seq = NULL, *seq = NULL;
256 256
257 if (!cmd->seq_list) { 257 if (!cmd->seq_list) {
258 pr_err("struct iscsi_cmd->seq_list is NULL!\n"); 258 pr_err("struct iscsi_cmd->seq_list is NULL!\n");
259 return -1; 259 return -1;
260 } 260 }
261 261
262 /* 262 /*
263 * Calculate read_data_done for all sequences containing a 263 * Calculate read_data_done for all sequences containing a
264 * first_datasn and last_datasn less than the BegRun. 264 * first_datasn and last_datasn less than the BegRun.
265 * 265 *
266 * Locate the struct iscsi_seq the BegRun lies within and calculate 266 * Locate the struct iscsi_seq the BegRun lies within and calculate
267 * NextBurstLenghth up to the DataSN based on MaxRecvDataSegmentLength. 267 * NextBurstLenghth up to the DataSN based on MaxRecvDataSegmentLength.
268 * 268 *
269 * Also use struct iscsi_seq->seq_send_order to determine where to start. 269 * Also use struct iscsi_seq->seq_send_order to determine where to start.
270 */ 270 */
271 for (i = 0; i < cmd->seq_count; i++) { 271 for (i = 0; i < cmd->seq_count; i++) {
272 seq = &cmd->seq_list[i]; 272 seq = &cmd->seq_list[i];
273 273
274 if (!seq->seq_send_order) 274 if (!seq->seq_send_order)
275 first_seq = seq; 275 first_seq = seq;
276 276
277 /* 277 /*
278 * No data has been transferred for this DataIN sequence, so the 278 * No data has been transferred for this DataIN sequence, so the
279 * seq->first_datasn and seq->last_datasn have not been set. 279 * seq->first_datasn and seq->last_datasn have not been set.
280 */ 280 */
281 if (!seq->sent) { 281 if (!seq->sent) {
282 #if 0 282 #if 0
283 pr_err("Ignoring non-sent sequence 0x%08x ->" 283 pr_err("Ignoring non-sent sequence 0x%08x ->"
284 " 0x%08x\n\n", seq->first_datasn, 284 " 0x%08x\n\n", seq->first_datasn,
285 seq->last_datasn); 285 seq->last_datasn);
286 #endif 286 #endif
287 continue; 287 continue;
288 } 288 }
289 289
290 /* 290 /*
291 * This DataIN sequence is precedes the received BegRun, add the 291 * This DataIN sequence is precedes the received BegRun, add the
292 * total xfer_len of the sequence to read_data_done and reset 292 * total xfer_len of the sequence to read_data_done and reset
293 * seq->pdu_send_order. 293 * seq->pdu_send_order.
294 */ 294 */
295 if ((seq->first_datasn < begrun) && 295 if ((seq->first_datasn < begrun) &&
296 (seq->last_datasn < begrun)) { 296 (seq->last_datasn < begrun)) {
297 #if 0 297 #if 0
298 pr_err("Pre BegRun sequence 0x%08x ->" 298 pr_err("Pre BegRun sequence 0x%08x ->"
299 " 0x%08x\n", seq->first_datasn, 299 " 0x%08x\n", seq->first_datasn,
300 seq->last_datasn); 300 seq->last_datasn);
301 #endif 301 #endif
302 read_data_done += cmd->seq_list[i].xfer_len; 302 read_data_done += cmd->seq_list[i].xfer_len;
303 seq->next_burst_len = seq->pdu_send_order = 0; 303 seq->next_burst_len = seq->pdu_send_order = 0;
304 continue; 304 continue;
305 } 305 }
306 306
307 /* 307 /*
308 * The BegRun lies within this DataIN sequence. 308 * The BegRun lies within this DataIN sequence.
309 */ 309 */
310 if ((seq->first_datasn <= begrun) && 310 if ((seq->first_datasn <= begrun) &&
311 (seq->last_datasn >= begrun)) { 311 (seq->last_datasn >= begrun)) {
312 #if 0 312 #if 0
313 pr_err("Found sequence begrun: 0x%08x in" 313 pr_err("Found sequence begrun: 0x%08x in"
314 " 0x%08x -> 0x%08x\n", begrun, 314 " 0x%08x -> 0x%08x\n", begrun,
315 seq->first_datasn, seq->last_datasn); 315 seq->first_datasn, seq->last_datasn);
316 #endif 316 #endif
317 seq_send_order = seq->seq_send_order; 317 seq_send_order = seq->seq_send_order;
318 data_sn = seq->first_datasn; 318 data_sn = seq->first_datasn;
319 seq->next_burst_len = seq->pdu_send_order = 0; 319 seq->next_burst_len = seq->pdu_send_order = 0;
320 found_seq = 1; 320 found_seq = 1;
321 321
322 /* 322 /*
323 * For DataPDUInOrder=Yes, while the first DataSN of 323 * For DataPDUInOrder=Yes, while the first DataSN of
324 * the sequence is less than the received BegRun, add 324 * the sequence is less than the received BegRun, add
325 * the MaxRecvDataSegmentLength to read_data_done and 325 * the MaxRecvDataSegmentLength to read_data_done and
326 * to the sequence's next_burst_len; 326 * to the sequence's next_burst_len;
327 * 327 *
328 * For DataPDUInOrder=No, while the first DataSN of the 328 * For DataPDUInOrder=No, while the first DataSN of the
329 * sequence is less than the received BegRun, find the 329 * sequence is less than the received BegRun, find the
330 * struct iscsi_pdu of the DataSN in question and add the 330 * struct iscsi_pdu of the DataSN in question and add the
331 * MaxRecvDataSegmentLength to read_data_done and to the 331 * MaxRecvDataSegmentLength to read_data_done and to the
332 * sequence's next_burst_len; 332 * sequence's next_burst_len;
333 */ 333 */
334 if (conn->sess->sess_ops->DataPDUInOrder) { 334 if (conn->sess->sess_ops->DataPDUInOrder) {
335 while (data_sn < begrun) { 335 while (data_sn < begrun) {
336 seq->pdu_send_order++; 336 seq->pdu_send_order++;
337 read_data_done += 337 read_data_done +=
338 conn->conn_ops->MaxRecvDataSegmentLength; 338 conn->conn_ops->MaxRecvDataSegmentLength;
339 seq->next_burst_len += 339 seq->next_burst_len +=
340 conn->conn_ops->MaxRecvDataSegmentLength; 340 conn->conn_ops->MaxRecvDataSegmentLength;
341 data_sn++; 341 data_sn++;
342 } 342 }
343 } else { 343 } else {
344 int j; 344 int j;
345 struct iscsi_pdu *pdu; 345 struct iscsi_pdu *pdu;
346 346
347 while (data_sn < begrun) { 347 while (data_sn < begrun) {
348 seq->pdu_send_order++; 348 seq->pdu_send_order++;
349 349
350 for (j = 0; j < seq->pdu_count; j++) { 350 for (j = 0; j < seq->pdu_count; j++) {
351 pdu = &cmd->pdu_list[ 351 pdu = &cmd->pdu_list[
352 seq->pdu_start + j]; 352 seq->pdu_start + j];
353 if (pdu->data_sn == data_sn) { 353 if (pdu->data_sn == data_sn) {
354 read_data_done += 354 read_data_done +=
355 pdu->length; 355 pdu->length;
356 seq->next_burst_len += 356 seq->next_burst_len +=
357 pdu->length; 357 pdu->length;
358 } 358 }
359 } 359 }
360 data_sn++; 360 data_sn++;
361 } 361 }
362 } 362 }
363 continue; 363 continue;
364 } 364 }
365 365
366 /* 366 /*
367 * This DataIN sequence is larger than the received BegRun, 367 * This DataIN sequence is larger than the received BegRun,
368 * reset seq->pdu_send_order and continue. 368 * reset seq->pdu_send_order and continue.
369 */ 369 */
370 if ((seq->first_datasn > begrun) || 370 if ((seq->first_datasn > begrun) ||
371 (seq->last_datasn > begrun)) { 371 (seq->last_datasn > begrun)) {
372 #if 0 372 #if 0
373 pr_err("Post BegRun sequence 0x%08x -> 0x%08x\n", 373 pr_err("Post BegRun sequence 0x%08x -> 0x%08x\n",
374 seq->first_datasn, seq->last_datasn); 374 seq->first_datasn, seq->last_datasn);
375 #endif 375 #endif
376 seq->next_burst_len = seq->pdu_send_order = 0; 376 seq->next_burst_len = seq->pdu_send_order = 0;
377 continue; 377 continue;
378 } 378 }
379 } 379 }
380 380
381 if (!found_seq) { 381 if (!found_seq) {
382 if (!begrun) { 382 if (!begrun) {
383 if (!first_seq) { 383 if (!first_seq) {
384 pr_err("ITT: 0x%08x, Begrun: 0x%08x" 384 pr_err("ITT: 0x%08x, Begrun: 0x%08x"
385 " but first_seq is NULL\n", 385 " but first_seq is NULL\n",
386 cmd->init_task_tag, begrun); 386 cmd->init_task_tag, begrun);
387 return -1; 387 return -1;
388 } 388 }
389 seq_send_order = first_seq->seq_send_order; 389 seq_send_order = first_seq->seq_send_order;
390 seq->next_burst_len = seq->pdu_send_order = 0; 390 seq->next_burst_len = seq->pdu_send_order = 0;
391 goto done; 391 goto done;
392 } 392 }
393 393
394 pr_err("Unable to locate struct iscsi_seq for ITT: 0x%08x," 394 pr_err("Unable to locate struct iscsi_seq for ITT: 0x%08x,"
395 " BegRun: 0x%08x, RunLength: 0x%08x while" 395 " BegRun: 0x%08x, RunLength: 0x%08x while"
396 " DataSequenceInOrder=No and DataPDUInOrder=%s.\n", 396 " DataSequenceInOrder=No and DataPDUInOrder=%s.\n",
397 cmd->init_task_tag, begrun, runlength, 397 cmd->init_task_tag, begrun, runlength,
398 (conn->sess->sess_ops->DataPDUInOrder) ? "Yes" : "No"); 398 (conn->sess->sess_ops->DataPDUInOrder) ? "Yes" : "No");
399 return -1; 399 return -1;
400 } 400 }
401 401
402 done: 402 done:
403 dr->read_data_done = read_data_done; 403 dr->read_data_done = read_data_done;
404 dr->seq_send_order = seq_send_order; 404 dr->seq_send_order = seq_send_order;
405 405
406 return 0; 406 return 0;
407 } 407 }
408 408
409 static int iscsit_handle_recovery_datain( 409 static int iscsit_handle_recovery_datain(
410 struct iscsi_cmd *cmd, 410 struct iscsi_cmd *cmd,
411 unsigned char *buf, 411 unsigned char *buf,
412 u32 begrun, 412 u32 begrun,
413 u32 runlength) 413 u32 runlength)
414 { 414 {
415 struct iscsi_conn *conn = cmd->conn; 415 struct iscsi_conn *conn = cmd->conn;
416 struct iscsi_datain_req *dr; 416 struct iscsi_datain_req *dr;
417 struct se_cmd *se_cmd = &cmd->se_cmd; 417 struct se_cmd *se_cmd = &cmd->se_cmd;
418 418
419 if (!atomic_read(&se_cmd->t_transport_complete)) { 419 if (!atomic_read(&se_cmd->t_transport_complete)) {
420 pr_err("Ignoring ITT: 0x%08x Data SNACK\n", 420 pr_err("Ignoring ITT: 0x%08x Data SNACK\n",
421 cmd->init_task_tag); 421 cmd->init_task_tag);
422 return 0; 422 return 0;
423 } 423 }
424 424
425 /* 425 /*
426 * Make sure the initiator is not requesting retransmission 426 * Make sure the initiator is not requesting retransmission
427 * of DataSNs already acknowledged by a Data ACK SNACK. 427 * of DataSNs already acknowledged by a Data ACK SNACK.
428 */ 428 */
429 if ((cmd->cmd_flags & ICF_GOT_DATACK_SNACK) && 429 if ((cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
430 (begrun <= cmd->acked_data_sn)) { 430 (begrun <= cmd->acked_data_sn)) {
431 pr_err("ITT: 0x%08x, Data SNACK requesting" 431 pr_err("ITT: 0x%08x, Data SNACK requesting"
432 " retransmission of DataSN: 0x%08x to 0x%08x but" 432 " retransmission of DataSN: 0x%08x to 0x%08x but"
433 " already acked to DataSN: 0x%08x by Data ACK SNACK," 433 " already acked to DataSN: 0x%08x by Data ACK SNACK,"
434 " protocol error.\n", cmd->init_task_tag, begrun, 434 " protocol error.\n", cmd->init_task_tag, begrun,
435 (begrun + runlength), cmd->acked_data_sn); 435 (begrun + runlength), cmd->acked_data_sn);
436 436
437 return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR, 437 return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
438 1, 0, buf, cmd); 438 1, 0, buf, cmd);
439 } 439 }
440 440
441 /* 441 /*
442 * Make sure BegRun and RunLength in the Data SNACK are sane. 442 * Make sure BegRun and RunLength in the Data SNACK are sane.
443 * Note: (cmd->data_sn - 1) will carry the maximum DataSN sent. 443 * Note: (cmd->data_sn - 1) will carry the maximum DataSN sent.
444 */ 444 */
445 if ((begrun + runlength) > (cmd->data_sn - 1)) { 445 if ((begrun + runlength) > (cmd->data_sn - 1)) {
446 pr_err("Initiator requesting BegRun: 0x%08x, RunLength" 446 pr_err("Initiator requesting BegRun: 0x%08x, RunLength"
447 ": 0x%08x greater than maximum DataSN: 0x%08x.\n", 447 ": 0x%08x greater than maximum DataSN: 0x%08x.\n",
448 begrun, runlength, (cmd->data_sn - 1)); 448 begrun, runlength, (cmd->data_sn - 1));
449 return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID, 449 return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
450 1, 0, buf, cmd); 450 1, 0, buf, cmd);
451 } 451 }
452 452
453 dr = iscsit_allocate_datain_req(); 453 dr = iscsit_allocate_datain_req();
454 if (!dr) 454 if (!dr)
455 return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 455 return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
456 1, 0, buf, cmd); 456 1, 0, buf, cmd);
457 457
458 dr->data_sn = dr->begrun = begrun; 458 dr->data_sn = dr->begrun = begrun;
459 dr->runlength = runlength; 459 dr->runlength = runlength;
460 dr->generate_recovery_values = 1; 460 dr->generate_recovery_values = 1;
461 dr->recovery = DATAIN_WITHIN_COMMAND_RECOVERY; 461 dr->recovery = DATAIN_WITHIN_COMMAND_RECOVERY;
462 462
463 iscsit_attach_datain_req(cmd, dr); 463 iscsit_attach_datain_req(cmd, dr);
464 464
465 cmd->i_state = ISTATE_SEND_DATAIN; 465 cmd->i_state = ISTATE_SEND_DATAIN;
466 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 466 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
467 467
468 return 0; 468 return 0;
469 } 469 }
470 470
471 int iscsit_handle_recovery_datain_or_r2t( 471 int iscsit_handle_recovery_datain_or_r2t(
472 struct iscsi_conn *conn, 472 struct iscsi_conn *conn,
473 unsigned char *buf, 473 unsigned char *buf,
474 u32 init_task_tag, 474 u32 init_task_tag,
475 u32 targ_xfer_tag, 475 u32 targ_xfer_tag,
476 u32 begrun, 476 u32 begrun,
477 u32 runlength) 477 u32 runlength)
478 { 478 {
479 struct iscsi_cmd *cmd; 479 struct iscsi_cmd *cmd;
480 480
481 cmd = iscsit_find_cmd_from_itt(conn, init_task_tag); 481 cmd = iscsit_find_cmd_from_itt(conn, init_task_tag);
482 if (!cmd) 482 if (!cmd)
483 return 0; 483 return 0;
484 484
485 /* 485 /*
486 * FIXME: This will not work for bidi commands. 486 * FIXME: This will not work for bidi commands.
487 */ 487 */
488 switch (cmd->data_direction) { 488 switch (cmd->data_direction) {
489 case DMA_TO_DEVICE: 489 case DMA_TO_DEVICE:
490 return iscsit_handle_r2t_snack(cmd, buf, begrun, runlength); 490 return iscsit_handle_r2t_snack(cmd, buf, begrun, runlength);
491 case DMA_FROM_DEVICE: 491 case DMA_FROM_DEVICE:
492 return iscsit_handle_recovery_datain(cmd, buf, begrun, 492 return iscsit_handle_recovery_datain(cmd, buf, begrun,
493 runlength); 493 runlength);
494 default: 494 default:
495 pr_err("Unknown cmd->data_direction: 0x%02x\n", 495 pr_err("Unknown cmd->data_direction: 0x%02x\n",
496 cmd->data_direction); 496 cmd->data_direction);
497 return -1; 497 return -1;
498 } 498 }
499 499
500 return 0; 500 return 0;
501 } 501 }
502 502
503 /* #warning FIXME: Status SNACK needs to be dependent on OPCODE!!! */ 503 /* #warning FIXME: Status SNACK needs to be dependent on OPCODE!!! */
504 int iscsit_handle_status_snack( 504 int iscsit_handle_status_snack(
505 struct iscsi_conn *conn, 505 struct iscsi_conn *conn,
506 u32 init_task_tag, 506 u32 init_task_tag,
507 u32 targ_xfer_tag, 507 u32 targ_xfer_tag,
508 u32 begrun, 508 u32 begrun,
509 u32 runlength) 509 u32 runlength)
510 { 510 {
511 struct iscsi_cmd *cmd = NULL; 511 struct iscsi_cmd *cmd = NULL;
512 u32 last_statsn; 512 u32 last_statsn;
513 int found_cmd; 513 int found_cmd;
514 514
515 if (conn->exp_statsn > begrun) { 515 if (conn->exp_statsn > begrun) {
516 pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:" 516 pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:"
517 " 0x%08x but already got ExpStatSN: 0x%08x on CID:" 517 " 0x%08x but already got ExpStatSN: 0x%08x on CID:"
518 " %hu.\n", begrun, runlength, conn->exp_statsn, 518 " %hu.\n", begrun, runlength, conn->exp_statsn,
519 conn->cid); 519 conn->cid);
520 return 0; 520 return 0;
521 } 521 }
522 522
523 last_statsn = (!runlength) ? conn->stat_sn : (begrun + runlength); 523 last_statsn = (!runlength) ? conn->stat_sn : (begrun + runlength);
524 524
525 while (begrun < last_statsn) { 525 while (begrun < last_statsn) {
526 found_cmd = 0; 526 found_cmd = 0;
527 527
528 spin_lock_bh(&conn->cmd_lock); 528 spin_lock_bh(&conn->cmd_lock);
529 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { 529 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
530 if (cmd->stat_sn == begrun) { 530 if (cmd->stat_sn == begrun) {
531 found_cmd = 1; 531 found_cmd = 1;
532 break; 532 break;
533 } 533 }
534 } 534 }
535 spin_unlock_bh(&conn->cmd_lock); 535 spin_unlock_bh(&conn->cmd_lock);
536 536
537 if (!found_cmd) { 537 if (!found_cmd) {
538 pr_err("Unable to find StatSN: 0x%08x for" 538 pr_err("Unable to find StatSN: 0x%08x for"
539 " a Status SNACK, assuming this was a" 539 " a Status SNACK, assuming this was a"
540 " protactic SNACK for an untransmitted" 540 " protactic SNACK for an untransmitted"
541 " StatSN, ignoring.\n", begrun); 541 " StatSN, ignoring.\n", begrun);
542 begrun++; 542 begrun++;
543 continue; 543 continue;
544 } 544 }
545 545
546 spin_lock_bh(&cmd->istate_lock); 546 spin_lock_bh(&cmd->istate_lock);
547 if (cmd->i_state == ISTATE_SEND_DATAIN) { 547 if (cmd->i_state == ISTATE_SEND_DATAIN) {
548 spin_unlock_bh(&cmd->istate_lock); 548 spin_unlock_bh(&cmd->istate_lock);
549 pr_err("Ignoring Status SNACK for BegRun:" 549 pr_err("Ignoring Status SNACK for BegRun:"
550 " 0x%08x, RunLength: 0x%08x, assuming this was" 550 " 0x%08x, RunLength: 0x%08x, assuming this was"
551 " a protactic SNACK for an untransmitted" 551 " a protactic SNACK for an untransmitted"
552 " StatSN\n", begrun, runlength); 552 " StatSN\n", begrun, runlength);
553 begrun++; 553 begrun++;
554 continue; 554 continue;
555 } 555 }
556 spin_unlock_bh(&cmd->istate_lock); 556 spin_unlock_bh(&cmd->istate_lock);
557 557
558 cmd->i_state = ISTATE_SEND_STATUS_RECOVERY; 558 cmd->i_state = ISTATE_SEND_STATUS_RECOVERY;
559 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 559 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
560 begrun++; 560 begrun++;
561 } 561 }
562 562
563 return 0; 563 return 0;
564 } 564 }
565 565
566 int iscsit_handle_data_ack( 566 int iscsit_handle_data_ack(
567 struct iscsi_conn *conn, 567 struct iscsi_conn *conn,
568 u32 targ_xfer_tag, 568 u32 targ_xfer_tag,
569 u32 begrun, 569 u32 begrun,
570 u32 runlength) 570 u32 runlength)
571 { 571 {
572 struct iscsi_cmd *cmd = NULL; 572 struct iscsi_cmd *cmd = NULL;
573 573
574 cmd = iscsit_find_cmd_from_ttt(conn, targ_xfer_tag); 574 cmd = iscsit_find_cmd_from_ttt(conn, targ_xfer_tag);
575 if (!cmd) { 575 if (!cmd) {
576 pr_err("Data ACK SNACK for TTT: 0x%08x is" 576 pr_err("Data ACK SNACK for TTT: 0x%08x is"
577 " invalid.\n", targ_xfer_tag); 577 " invalid.\n", targ_xfer_tag);
578 return -1; 578 return -1;
579 } 579 }
580 580
581 if (begrun <= cmd->acked_data_sn) { 581 if (begrun <= cmd->acked_data_sn) {
582 pr_err("ITT: 0x%08x Data ACK SNACK BegRUN: 0x%08x is" 582 pr_err("ITT: 0x%08x Data ACK SNACK BegRUN: 0x%08x is"
583 " less than the already acked DataSN: 0x%08x.\n", 583 " less than the already acked DataSN: 0x%08x.\n",
584 cmd->init_task_tag, begrun, cmd->acked_data_sn); 584 cmd->init_task_tag, begrun, cmd->acked_data_sn);
585 return -1; 585 return -1;
586 } 586 }
587 587
588 /* 588 /*
589 * For Data ACK SNACK, BegRun is the next expected DataSN. 589 * For Data ACK SNACK, BegRun is the next expected DataSN.
590 * (see iSCSI v19: 10.16.6) 590 * (see iSCSI v19: 10.16.6)
591 */ 591 */
592 cmd->cmd_flags |= ICF_GOT_DATACK_SNACK; 592 cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
593 cmd->acked_data_sn = (begrun - 1); 593 cmd->acked_data_sn = (begrun - 1);
594 594
595 pr_debug("Received Data ACK SNACK for ITT: 0x%08x," 595 pr_debug("Received Data ACK SNACK for ITT: 0x%08x,"
596 " updated acked DataSN to 0x%08x.\n", 596 " updated acked DataSN to 0x%08x.\n",
597 cmd->init_task_tag, cmd->acked_data_sn); 597 cmd->init_task_tag, cmd->acked_data_sn);
598 598
599 return 0; 599 return 0;
600 } 600 }
601 601
602 static int iscsit_send_recovery_r2t( 602 static int iscsit_send_recovery_r2t(
603 struct iscsi_cmd *cmd, 603 struct iscsi_cmd *cmd,
604 u32 offset, 604 u32 offset,
605 u32 xfer_len) 605 u32 xfer_len)
606 { 606 {
607 int ret; 607 int ret;
608 608
609 spin_lock_bh(&cmd->r2t_lock); 609 spin_lock_bh(&cmd->r2t_lock);
610 ret = iscsit_add_r2t_to_list(cmd, offset, xfer_len, 1, 0); 610 ret = iscsit_add_r2t_to_list(cmd, offset, xfer_len, 1, 0);
611 spin_unlock_bh(&cmd->r2t_lock); 611 spin_unlock_bh(&cmd->r2t_lock);
612 612
613 return ret; 613 return ret;
614 } 614 }
615 615
616 int iscsit_dataout_datapduinorder_no_fbit( 616 int iscsit_dataout_datapduinorder_no_fbit(
617 struct iscsi_cmd *cmd, 617 struct iscsi_cmd *cmd,
618 struct iscsi_pdu *pdu) 618 struct iscsi_pdu *pdu)
619 { 619 {
620 int i, send_recovery_r2t = 0, recovery = 0; 620 int i, send_recovery_r2t = 0, recovery = 0;
621 u32 length = 0, offset = 0, pdu_count = 0, xfer_len = 0; 621 u32 length = 0, offset = 0, pdu_count = 0, xfer_len = 0;
622 struct iscsi_conn *conn = cmd->conn; 622 struct iscsi_conn *conn = cmd->conn;
623 struct iscsi_pdu *first_pdu = NULL; 623 struct iscsi_pdu *first_pdu = NULL;
624 624
625 /* 625 /*
626 * Get an struct iscsi_pdu pointer to the first PDU, and total PDU count 626 * Get an struct iscsi_pdu pointer to the first PDU, and total PDU count
627 * of the DataOUT sequence. 627 * of the DataOUT sequence.
628 */ 628 */
629 if (conn->sess->sess_ops->DataSequenceInOrder) { 629 if (conn->sess->sess_ops->DataSequenceInOrder) {
630 for (i = 0; i < cmd->pdu_count; i++) { 630 for (i = 0; i < cmd->pdu_count; i++) {
631 if (cmd->pdu_list[i].seq_no == pdu->seq_no) { 631 if (cmd->pdu_list[i].seq_no == pdu->seq_no) {
632 if (!first_pdu) 632 if (!first_pdu)
633 first_pdu = &cmd->pdu_list[i]; 633 first_pdu = &cmd->pdu_list[i];
634 xfer_len += cmd->pdu_list[i].length; 634 xfer_len += cmd->pdu_list[i].length;
635 pdu_count++; 635 pdu_count++;
636 } else if (pdu_count) 636 } else if (pdu_count)
637 break; 637 break;
638 } 638 }
639 } else { 639 } else {
640 struct iscsi_seq *seq = cmd->seq_ptr; 640 struct iscsi_seq *seq = cmd->seq_ptr;
641 641
642 first_pdu = &cmd->pdu_list[seq->pdu_start]; 642 first_pdu = &cmd->pdu_list[seq->pdu_start];
643 pdu_count = seq->pdu_count; 643 pdu_count = seq->pdu_count;
644 } 644 }
645 645
646 if (!first_pdu || !pdu_count) 646 if (!first_pdu || !pdu_count)
647 return DATAOUT_CANNOT_RECOVER; 647 return DATAOUT_CANNOT_RECOVER;
648 648
649 /* 649 /*
650 * Loop through the ending DataOUT Sequence checking each struct iscsi_pdu. 650 * Loop through the ending DataOUT Sequence checking each struct iscsi_pdu.
651 * The following ugly logic does batching of not received PDUs. 651 * The following ugly logic does batching of not received PDUs.
652 */ 652 */
653 for (i = 0; i < pdu_count; i++) { 653 for (i = 0; i < pdu_count; i++) {
654 if (first_pdu[i].status == ISCSI_PDU_RECEIVED_OK) { 654 if (first_pdu[i].status == ISCSI_PDU_RECEIVED_OK) {
655 if (!send_recovery_r2t) 655 if (!send_recovery_r2t)
656 continue; 656 continue;
657 657
658 if (iscsit_send_recovery_r2t(cmd, offset, length) < 0) 658 if (iscsit_send_recovery_r2t(cmd, offset, length) < 0)
659 return DATAOUT_CANNOT_RECOVER; 659 return DATAOUT_CANNOT_RECOVER;
660 660
661 send_recovery_r2t = length = offset = 0; 661 send_recovery_r2t = length = offset = 0;
662 continue; 662 continue;
663 } 663 }
664 /* 664 /*
665 * Set recovery = 1 for any missing, CRC failed, or timed 665 * Set recovery = 1 for any missing, CRC failed, or timed
666 * out PDUs to let the DataOUT logic know that this sequence 666 * out PDUs to let the DataOUT logic know that this sequence
667 * has not been completed yet. 667 * has not been completed yet.
668 * 668 *
669 * Also, only send a Recovery R2T for ISCSI_PDU_NOT_RECEIVED. 669 * Also, only send a Recovery R2T for ISCSI_PDU_NOT_RECEIVED.
670 * We assume if the PDU either failed CRC or timed out 670 * We assume if the PDU either failed CRC or timed out
671 * that a Recovery R2T has already been sent. 671 * that a Recovery R2T has already been sent.
672 */ 672 */
673 recovery = 1; 673 recovery = 1;
674 674
675 if (first_pdu[i].status != ISCSI_PDU_NOT_RECEIVED) 675 if (first_pdu[i].status != ISCSI_PDU_NOT_RECEIVED)
676 continue; 676 continue;
677 677
678 if (!offset) 678 if (!offset)
679 offset = first_pdu[i].offset; 679 offset = first_pdu[i].offset;
680 length += first_pdu[i].length; 680 length += first_pdu[i].length;
681 681
682 send_recovery_r2t = 1; 682 send_recovery_r2t = 1;
683 } 683 }
684 684
685 if (send_recovery_r2t) 685 if (send_recovery_r2t)
686 if (iscsit_send_recovery_r2t(cmd, offset, length) < 0) 686 if (iscsit_send_recovery_r2t(cmd, offset, length) < 0)
687 return DATAOUT_CANNOT_RECOVER; 687 return DATAOUT_CANNOT_RECOVER;
688 688
689 return (!recovery) ? DATAOUT_NORMAL : DATAOUT_WITHIN_COMMAND_RECOVERY; 689 return (!recovery) ? DATAOUT_NORMAL : DATAOUT_WITHIN_COMMAND_RECOVERY;
690 } 690 }
691 691
692 static int iscsit_recalculate_dataout_values( 692 static int iscsit_recalculate_dataout_values(
693 struct iscsi_cmd *cmd, 693 struct iscsi_cmd *cmd,
694 u32 pdu_offset, 694 u32 pdu_offset,
695 u32 pdu_length, 695 u32 pdu_length,
696 u32 *r2t_offset, 696 u32 *r2t_offset,
697 u32 *r2t_length) 697 u32 *r2t_length)
698 { 698 {
699 int i; 699 int i;
700 struct iscsi_conn *conn = cmd->conn; 700 struct iscsi_conn *conn = cmd->conn;
701 struct iscsi_pdu *pdu = NULL; 701 struct iscsi_pdu *pdu = NULL;
702 702
703 if (conn->sess->sess_ops->DataSequenceInOrder) { 703 if (conn->sess->sess_ops->DataSequenceInOrder) {
704 cmd->data_sn = 0; 704 cmd->data_sn = 0;
705 705
706 if (conn->sess->sess_ops->DataPDUInOrder) { 706 if (conn->sess->sess_ops->DataPDUInOrder) {
707 *r2t_offset = cmd->write_data_done; 707 *r2t_offset = cmd->write_data_done;
708 *r2t_length = (cmd->seq_end_offset - 708 *r2t_length = (cmd->seq_end_offset -
709 cmd->write_data_done); 709 cmd->write_data_done);
710 return 0; 710 return 0;
711 } 711 }
712 712
713 *r2t_offset = cmd->seq_start_offset; 713 *r2t_offset = cmd->seq_start_offset;
714 *r2t_length = (cmd->seq_end_offset - cmd->seq_start_offset); 714 *r2t_length = (cmd->seq_end_offset - cmd->seq_start_offset);
715 715
716 for (i = 0; i < cmd->pdu_count; i++) { 716 for (i = 0; i < cmd->pdu_count; i++) {
717 pdu = &cmd->pdu_list[i]; 717 pdu = &cmd->pdu_list[i];
718 718
719 if (pdu->status != ISCSI_PDU_RECEIVED_OK) 719 if (pdu->status != ISCSI_PDU_RECEIVED_OK)
720 continue; 720 continue;
721 721
722 if ((pdu->offset >= cmd->seq_start_offset) && 722 if ((pdu->offset >= cmd->seq_start_offset) &&
723 ((pdu->offset + pdu->length) <= 723 ((pdu->offset + pdu->length) <=
724 cmd->seq_end_offset)) { 724 cmd->seq_end_offset)) {
725 if (!cmd->unsolicited_data) 725 if (!cmd->unsolicited_data)
726 cmd->next_burst_len -= pdu->length; 726 cmd->next_burst_len -= pdu->length;
727 else 727 else
728 cmd->first_burst_len -= pdu->length; 728 cmd->first_burst_len -= pdu->length;
729 729
730 cmd->write_data_done -= pdu->length; 730 cmd->write_data_done -= pdu->length;
731 pdu->status = ISCSI_PDU_NOT_RECEIVED; 731 pdu->status = ISCSI_PDU_NOT_RECEIVED;
732 } 732 }
733 } 733 }
734 } else { 734 } else {
735 struct iscsi_seq *seq = NULL; 735 struct iscsi_seq *seq = NULL;
736 736
737 seq = iscsit_get_seq_holder(cmd, pdu_offset, pdu_length); 737 seq = iscsit_get_seq_holder(cmd, pdu_offset, pdu_length);
738 if (!seq) 738 if (!seq)
739 return -1; 739 return -1;
740 740
741 *r2t_offset = seq->orig_offset; 741 *r2t_offset = seq->orig_offset;
742 *r2t_length = seq->xfer_len; 742 *r2t_length = seq->xfer_len;
743 743
744 cmd->write_data_done -= (seq->offset - seq->orig_offset); 744 cmd->write_data_done -= (seq->offset - seq->orig_offset);
745 if (cmd->immediate_data) 745 if (cmd->immediate_data)
746 cmd->first_burst_len = cmd->write_data_done; 746 cmd->first_burst_len = cmd->write_data_done;
747 747
748 seq->data_sn = 0; 748 seq->data_sn = 0;
749 seq->offset = seq->orig_offset; 749 seq->offset = seq->orig_offset;
750 seq->next_burst_len = 0; 750 seq->next_burst_len = 0;
751 seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY; 751 seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
752 752
753 if (conn->sess->sess_ops->DataPDUInOrder) 753 if (conn->sess->sess_ops->DataPDUInOrder)
754 return 0; 754 return 0;
755 755
756 for (i = 0; i < seq->pdu_count; i++) { 756 for (i = 0; i < seq->pdu_count; i++) {
757 pdu = &cmd->pdu_list[i+seq->pdu_start]; 757 pdu = &cmd->pdu_list[i+seq->pdu_start];
758 758
759 if (pdu->status != ISCSI_PDU_RECEIVED_OK) 759 if (pdu->status != ISCSI_PDU_RECEIVED_OK)
760 continue; 760 continue;
761 761
762 pdu->status = ISCSI_PDU_NOT_RECEIVED; 762 pdu->status = ISCSI_PDU_NOT_RECEIVED;
763 } 763 }
764 } 764 }
765 765
766 return 0; 766 return 0;
767 } 767 }
768 768
769 int iscsit_recover_dataout_sequence( 769 int iscsit_recover_dataout_sequence(
770 struct iscsi_cmd *cmd, 770 struct iscsi_cmd *cmd,
771 u32 pdu_offset, 771 u32 pdu_offset,
772 u32 pdu_length) 772 u32 pdu_length)
773 { 773 {
774 u32 r2t_length = 0, r2t_offset = 0; 774 u32 r2t_length = 0, r2t_offset = 0;
775 775
776 spin_lock_bh(&cmd->istate_lock); 776 spin_lock_bh(&cmd->istate_lock);
777 cmd->cmd_flags |= ICF_WITHIN_COMMAND_RECOVERY; 777 cmd->cmd_flags |= ICF_WITHIN_COMMAND_RECOVERY;
778 spin_unlock_bh(&cmd->istate_lock); 778 spin_unlock_bh(&cmd->istate_lock);
779 779
780 if (iscsit_recalculate_dataout_values(cmd, pdu_offset, pdu_length, 780 if (iscsit_recalculate_dataout_values(cmd, pdu_offset, pdu_length,
781 &r2t_offset, &r2t_length) < 0) 781 &r2t_offset, &r2t_length) < 0)
782 return DATAOUT_CANNOT_RECOVER; 782 return DATAOUT_CANNOT_RECOVER;
783 783
784 iscsit_send_recovery_r2t(cmd, r2t_offset, r2t_length); 784 iscsit_send_recovery_r2t(cmd, r2t_offset, r2t_length);
785 785
786 return DATAOUT_WITHIN_COMMAND_RECOVERY; 786 return DATAOUT_WITHIN_COMMAND_RECOVERY;
787 } 787 }
788 788
789 static struct iscsi_ooo_cmdsn *iscsit_allocate_ooo_cmdsn(void) 789 static struct iscsi_ooo_cmdsn *iscsit_allocate_ooo_cmdsn(void)
790 { 790 {
791 struct iscsi_ooo_cmdsn *ooo_cmdsn = NULL; 791 struct iscsi_ooo_cmdsn *ooo_cmdsn = NULL;
792 792
793 ooo_cmdsn = kmem_cache_zalloc(lio_ooo_cache, GFP_ATOMIC); 793 ooo_cmdsn = kmem_cache_zalloc(lio_ooo_cache, GFP_ATOMIC);
794 if (!ooo_cmdsn) { 794 if (!ooo_cmdsn) {
795 pr_err("Unable to allocate memory for" 795 pr_err("Unable to allocate memory for"
796 " struct iscsi_ooo_cmdsn.\n"); 796 " struct iscsi_ooo_cmdsn.\n");
797 return NULL; 797 return NULL;
798 } 798 }
799 INIT_LIST_HEAD(&ooo_cmdsn->ooo_list); 799 INIT_LIST_HEAD(&ooo_cmdsn->ooo_list);
800 800
801 return ooo_cmdsn; 801 return ooo_cmdsn;
802 } 802 }
803 803
804 /* 804 /*
805 * Called with sess->cmdsn_mutex held. 805 * Called with sess->cmdsn_mutex held.
806 */ 806 */
807 static int iscsit_attach_ooo_cmdsn( 807 static int iscsit_attach_ooo_cmdsn(
808 struct iscsi_session *sess, 808 struct iscsi_session *sess,
809 struct iscsi_ooo_cmdsn *ooo_cmdsn) 809 struct iscsi_ooo_cmdsn *ooo_cmdsn)
810 { 810 {
811 struct iscsi_ooo_cmdsn *ooo_tail, *ooo_tmp; 811 struct iscsi_ooo_cmdsn *ooo_tail, *ooo_tmp;
812 /* 812 /*
813 * We attach the struct iscsi_ooo_cmdsn entry to the out of order 813 * We attach the struct iscsi_ooo_cmdsn entry to the out of order
814 * list in increasing CmdSN order. 814 * list in increasing CmdSN order.
815 * This allows iscsi_execute_ooo_cmdsns() to detect any 815 * This allows iscsi_execute_ooo_cmdsns() to detect any
816 * additional CmdSN holes while performing delayed execution. 816 * additional CmdSN holes while performing delayed execution.
817 */ 817 */
818 if (list_empty(&sess->sess_ooo_cmdsn_list)) 818 if (list_empty(&sess->sess_ooo_cmdsn_list))
819 list_add_tail(&ooo_cmdsn->ooo_list, 819 list_add_tail(&ooo_cmdsn->ooo_list,
820 &sess->sess_ooo_cmdsn_list); 820 &sess->sess_ooo_cmdsn_list);
821 else { 821 else {
822 ooo_tail = list_entry(sess->sess_ooo_cmdsn_list.prev, 822 ooo_tail = list_entry(sess->sess_ooo_cmdsn_list.prev,
823 typeof(*ooo_tail), ooo_list); 823 typeof(*ooo_tail), ooo_list);
824 /* 824 /*
825 * CmdSN is greater than the tail of the list. 825 * CmdSN is greater than the tail of the list.
826 */ 826 */
827 if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn) 827 if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn)
828 list_add_tail(&ooo_cmdsn->ooo_list, 828 list_add_tail(&ooo_cmdsn->ooo_list,
829 &sess->sess_ooo_cmdsn_list); 829 &sess->sess_ooo_cmdsn_list);
830 else { 830 else {
831 /* 831 /*
832 * CmdSN is either lower than the head, or somewhere 832 * CmdSN is either lower than the head, or somewhere
833 * in the middle. 833 * in the middle.
834 */ 834 */
835 list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list, 835 list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
836 ooo_list) { 836 ooo_list) {
837 if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) 837 if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
838 continue; 838 continue;
839 839
840 list_add(&ooo_cmdsn->ooo_list, 840 list_add(&ooo_cmdsn->ooo_list,
841 &ooo_tmp->ooo_list); 841 &ooo_tmp->ooo_list);
842 break; 842 break;
843 } 843 }
844 } 844 }
845 } 845 }
846 846
847 return 0; 847 return 0;
848 } 848 }
849 849
850 /* 850 /*
851 * Removes an struct iscsi_ooo_cmdsn from a session's list, 851 * Removes an struct iscsi_ooo_cmdsn from a session's list,
852 * called with struct iscsi_session->cmdsn_mutex held. 852 * called with struct iscsi_session->cmdsn_mutex held.
853 */ 853 */
854 void iscsit_remove_ooo_cmdsn( 854 void iscsit_remove_ooo_cmdsn(
855 struct iscsi_session *sess, 855 struct iscsi_session *sess,
856 struct iscsi_ooo_cmdsn *ooo_cmdsn) 856 struct iscsi_ooo_cmdsn *ooo_cmdsn)
857 { 857 {
858 list_del(&ooo_cmdsn->ooo_list); 858 list_del(&ooo_cmdsn->ooo_list);
859 kmem_cache_free(lio_ooo_cache, ooo_cmdsn); 859 kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
860 } 860 }
861 861
862 void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *conn) 862 void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
863 { 863 {
864 struct iscsi_ooo_cmdsn *ooo_cmdsn; 864 struct iscsi_ooo_cmdsn *ooo_cmdsn;
865 struct iscsi_session *sess = conn->sess; 865 struct iscsi_session *sess = conn->sess;
866 866
867 mutex_lock(&sess->cmdsn_mutex); 867 mutex_lock(&sess->cmdsn_mutex);
868 list_for_each_entry(ooo_cmdsn, &sess->sess_ooo_cmdsn_list, ooo_list) { 868 list_for_each_entry(ooo_cmdsn, &sess->sess_ooo_cmdsn_list, ooo_list) {
869 if (ooo_cmdsn->cid != conn->cid) 869 if (ooo_cmdsn->cid != conn->cid)
870 continue; 870 continue;
871 871
872 ooo_cmdsn->cmd = NULL; 872 ooo_cmdsn->cmd = NULL;
873 } 873 }
874 mutex_unlock(&sess->cmdsn_mutex); 874 mutex_unlock(&sess->cmdsn_mutex);
875 } 875 }
876 876
877 /* 877 /*
878 * Called with sess->cmdsn_mutex held. 878 * Called with sess->cmdsn_mutex held.
879 */ 879 */
880 int iscsit_execute_ooo_cmdsns(struct iscsi_session *sess) 880 int iscsit_execute_ooo_cmdsns(struct iscsi_session *sess)
881 { 881 {
882 int ooo_count = 0; 882 int ooo_count = 0;
883 struct iscsi_cmd *cmd = NULL; 883 struct iscsi_cmd *cmd = NULL;
884 struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp; 884 struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
885 885
886 list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp, 886 list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
887 &sess->sess_ooo_cmdsn_list, ooo_list) { 887 &sess->sess_ooo_cmdsn_list, ooo_list) {
888 if (ooo_cmdsn->cmdsn != sess->exp_cmd_sn) 888 if (ooo_cmdsn->cmdsn != sess->exp_cmd_sn)
889 continue; 889 continue;
890 890
891 if (!ooo_cmdsn->cmd) { 891 if (!ooo_cmdsn->cmd) {
892 sess->exp_cmd_sn++; 892 sess->exp_cmd_sn++;
893 iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn); 893 iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
894 continue; 894 continue;
895 } 895 }
896 896
897 cmd = ooo_cmdsn->cmd; 897 cmd = ooo_cmdsn->cmd;
898 cmd->i_state = cmd->deferred_i_state; 898 cmd->i_state = cmd->deferred_i_state;
899 ooo_count++; 899 ooo_count++;
900 sess->exp_cmd_sn++; 900 sess->exp_cmd_sn++;
901 pr_debug("Executing out of order CmdSN: 0x%08x," 901 pr_debug("Executing out of order CmdSN: 0x%08x,"
902 " incremented ExpCmdSN to 0x%08x.\n", 902 " incremented ExpCmdSN to 0x%08x.\n",
903 cmd->cmd_sn, sess->exp_cmd_sn); 903 cmd->cmd_sn, sess->exp_cmd_sn);
904 904
905 iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn); 905 iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
906 906
907 if (iscsit_execute_cmd(cmd, 1) < 0) 907 if (iscsit_execute_cmd(cmd, 1) < 0)
908 return -1; 908 return -1;
909 909
910 continue; 910 continue;
911 } 911 }
912 912
913 return ooo_count; 913 return ooo_count;
914 } 914 }
915 915
916 /* 916 /*
917 * Called either: 917 * Called either:
918 * 918 *
919 * 1. With sess->cmdsn_mutex held from iscsi_execute_ooo_cmdsns() 919 * 1. With sess->cmdsn_mutex held from iscsi_execute_ooo_cmdsns()
920 * or iscsi_check_received_cmdsn(). 920 * or iscsi_check_received_cmdsn().
921 * 2. With no locks held directly from iscsi_handle_XXX_pdu() functions 921 * 2. With no locks held directly from iscsi_handle_XXX_pdu() functions
922 * for immediate commands. 922 * for immediate commands.
923 */ 923 */
924 int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo) 924 int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
925 { 925 {
926 struct se_cmd *se_cmd = &cmd->se_cmd; 926 struct se_cmd *se_cmd = &cmd->se_cmd;
927 int lr = 0; 927 int lr = 0;
928 928
929 spin_lock_bh(&cmd->istate_lock); 929 spin_lock_bh(&cmd->istate_lock);
930 if (ooo) 930 if (ooo)
931 cmd->cmd_flags &= ~ICF_OOO_CMDSN; 931 cmd->cmd_flags &= ~ICF_OOO_CMDSN;
932 932
933 switch (cmd->iscsi_opcode) { 933 switch (cmd->iscsi_opcode) {
934 case ISCSI_OP_SCSI_CMD: 934 case ISCSI_OP_SCSI_CMD:
935 /* 935 /*
936 * Go ahead and send the CHECK_CONDITION status for 936 * Go ahead and send the CHECK_CONDITION status for
937 * any SCSI CDB exceptions that may have occurred, also 937 * any SCSI CDB exceptions that may have occurred, also
938 * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well. 938 * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
939 */ 939 */
940 if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) { 940 if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
941 if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) { 941 if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
942 cmd->i_state = ISTATE_SEND_STATUS; 942 cmd->i_state = ISTATE_SEND_STATUS;
943 spin_unlock_bh(&cmd->istate_lock); 943 spin_unlock_bh(&cmd->istate_lock);
944 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, 944 iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
945 cmd->i_state); 945 cmd->i_state);
946 return 0; 946 return 0;
947 } 947 }
948 spin_unlock_bh(&cmd->istate_lock); 948 spin_unlock_bh(&cmd->istate_lock);
949 /* 949 /*
950 * Determine if delayed TASK_ABORTED status for WRITEs 950 * Determine if delayed TASK_ABORTED status for WRITEs
951 * should be sent now if no unsolicited data out 951 * should be sent now if no unsolicited data out
952 * payloads are expected, or if the delayed status 952 * payloads are expected, or if the delayed status
953 * should be sent after unsolicited data out with 953 * should be sent after unsolicited data out with
954 * ISCSI_FLAG_CMD_FINAL set in iscsi_handle_data_out() 954 * ISCSI_FLAG_CMD_FINAL set in iscsi_handle_data_out()
955 */ 955 */
956 if (transport_check_aborted_status(se_cmd, 956 if (transport_check_aborted_status(se_cmd,
957 (cmd->unsolicited_data == 0)) != 0) 957 (cmd->unsolicited_data == 0)) != 0)
958 return 0; 958 return 0;
959 /* 959 /*
960 * Otherwise send CHECK_CONDITION and sense for 960 * Otherwise send CHECK_CONDITION and sense for
961 * exception 961 * exception
962 */ 962 */
963 return transport_send_check_condition_and_sense(se_cmd, 963 return transport_send_check_condition_and_sense(se_cmd,
964 se_cmd->scsi_sense_reason, 0); 964 se_cmd->scsi_sense_reason, 0);
965 } 965 }
966 /* 966 /*
967 * Special case for delayed CmdSN with Immediate 967 * Special case for delayed CmdSN with Immediate
968 * Data and/or Unsolicited Data Out attached. 968 * Data and/or Unsolicited Data Out attached.
969 */ 969 */
970 if (cmd->immediate_data) { 970 if (cmd->immediate_data) {
971 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { 971 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
972 spin_unlock_bh(&cmd->istate_lock); 972 spin_unlock_bh(&cmd->istate_lock);
973 return transport_generic_handle_data( 973 return transport_generic_handle_data(
974 &cmd->se_cmd); 974 &cmd->se_cmd);
975 } 975 }
976 spin_unlock_bh(&cmd->istate_lock); 976 spin_unlock_bh(&cmd->istate_lock);
977 977
978 if (!(cmd->cmd_flags & 978 if (!(cmd->cmd_flags &
979 ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) { 979 ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
980 /* 980 /*
981 * Send the delayed TASK_ABORTED status for 981 * Send the delayed TASK_ABORTED status for
982 * WRITEs if no more unsolicitied data is 982 * WRITEs if no more unsolicitied data is
983 * expected. 983 * expected.
984 */ 984 */
985 if (transport_check_aborted_status(se_cmd, 1) 985 if (transport_check_aborted_status(se_cmd, 1)
986 != 0) 986 != 0)
987 return 0; 987 return 0;
988 988
989 iscsit_set_dataout_sequence_values(cmd); 989 iscsit_set_dataout_sequence_values(cmd);
990 iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 0); 990 iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 0);
991 } 991 }
992 return 0; 992 return 0;
993 } 993 }
994 /* 994 /*
995 * The default handler. 995 * The default handler.
996 */ 996 */
997 spin_unlock_bh(&cmd->istate_lock); 997 spin_unlock_bh(&cmd->istate_lock);
998 998
999 if ((cmd->data_direction == DMA_TO_DEVICE) && 999 if ((cmd->data_direction == DMA_TO_DEVICE) &&
1000 !(cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) { 1000 !(cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
1001 /* 1001 /*
1002 * Send the delayed TASK_ABORTED status for WRITEs if 1002 * Send the delayed TASK_ABORTED status for WRITEs if
1003 * no more nsolicitied data is expected. 1003 * no more nsolicitied data is expected.
1004 */ 1004 */
1005 if (transport_check_aborted_status(se_cmd, 1) != 0) 1005 if (transport_check_aborted_status(se_cmd, 1) != 0)
1006 return 0; 1006 return 0;
1007 1007
1008 iscsit_set_dataout_sequence_values(cmd); 1008 iscsit_set_dataout_sequence_values(cmd);
1009 spin_lock_bh(&cmd->dataout_timeout_lock); 1009 spin_lock_bh(&cmd->dataout_timeout_lock);
1010 iscsit_start_dataout_timer(cmd, cmd->conn); 1010 iscsit_start_dataout_timer(cmd, cmd->conn);
1011 spin_unlock_bh(&cmd->dataout_timeout_lock); 1011 spin_unlock_bh(&cmd->dataout_timeout_lock);
1012 } 1012 }
1013 return transport_handle_cdb_direct(&cmd->se_cmd); 1013 return transport_handle_cdb_direct(&cmd->se_cmd);
1014 1014
1015 case ISCSI_OP_NOOP_OUT: 1015 case ISCSI_OP_NOOP_OUT:
1016 case ISCSI_OP_TEXT: 1016 case ISCSI_OP_TEXT:
1017 spin_unlock_bh(&cmd->istate_lock); 1017 spin_unlock_bh(&cmd->istate_lock);
1018 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 1018 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
1019 break; 1019 break;
1020 case ISCSI_OP_SCSI_TMFUNC: 1020 case ISCSI_OP_SCSI_TMFUNC:
1021 if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) { 1021 if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
1022 spin_unlock_bh(&cmd->istate_lock); 1022 spin_unlock_bh(&cmd->istate_lock);
1023 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, 1023 iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
1024 cmd->i_state); 1024 cmd->i_state);
1025 return 0; 1025 return 0;
1026 } 1026 }
1027 spin_unlock_bh(&cmd->istate_lock); 1027 spin_unlock_bh(&cmd->istate_lock);
1028 1028
1029 return transport_generic_handle_tmr(&cmd->se_cmd); 1029 return transport_generic_handle_tmr(&cmd->se_cmd);
1030 case ISCSI_OP_LOGOUT: 1030 case ISCSI_OP_LOGOUT:
1031 spin_unlock_bh(&cmd->istate_lock); 1031 spin_unlock_bh(&cmd->istate_lock);
1032 switch (cmd->logout_reason) { 1032 switch (cmd->logout_reason) {
1033 case ISCSI_LOGOUT_REASON_CLOSE_SESSION: 1033 case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
1034 lr = iscsit_logout_closesession(cmd, cmd->conn); 1034 lr = iscsit_logout_closesession(cmd, cmd->conn);
1035 break; 1035 break;
1036 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION: 1036 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
1037 lr = iscsit_logout_closeconnection(cmd, cmd->conn); 1037 lr = iscsit_logout_closeconnection(cmd, cmd->conn);
1038 break; 1038 break;
1039 case ISCSI_LOGOUT_REASON_RECOVERY: 1039 case ISCSI_LOGOUT_REASON_RECOVERY:
1040 lr = iscsit_logout_removeconnforrecovery(cmd, cmd->conn); 1040 lr = iscsit_logout_removeconnforrecovery(cmd, cmd->conn);
1041 break; 1041 break;
1042 default: 1042 default:
1043 pr_err("Unknown iSCSI Logout Request Code:" 1043 pr_err("Unknown iSCSI Logout Request Code:"
1044 " 0x%02x\n", cmd->logout_reason); 1044 " 0x%02x\n", cmd->logout_reason);
1045 return -1; 1045 return -1;
1046 } 1046 }
1047 1047
1048 return lr; 1048 return lr;
1049 default: 1049 default:
1050 spin_unlock_bh(&cmd->istate_lock); 1050 spin_unlock_bh(&cmd->istate_lock);
1051 pr_err("Cannot perform out of order execution for" 1051 pr_err("Cannot perform out of order execution for"
1052 " unknown iSCSI Opcode: 0x%02x\n", cmd->iscsi_opcode); 1052 " unknown iSCSI Opcode: 0x%02x\n", cmd->iscsi_opcode);
1053 return -1; 1053 return -1;
1054 } 1054 }
1055 1055
1056 return 0; 1056 return 0;
1057 } 1057 }
1058 1058
1059 void iscsit_free_all_ooo_cmdsns(struct iscsi_session *sess) 1059 void iscsit_free_all_ooo_cmdsns(struct iscsi_session *sess)
1060 { 1060 {
1061 struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp; 1061 struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
1062 1062
1063 mutex_lock(&sess->cmdsn_mutex); 1063 mutex_lock(&sess->cmdsn_mutex);
1064 list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp, 1064 list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
1065 &sess->sess_ooo_cmdsn_list, ooo_list) { 1065 &sess->sess_ooo_cmdsn_list, ooo_list) {
1066 1066
1067 list_del(&ooo_cmdsn->ooo_list); 1067 list_del(&ooo_cmdsn->ooo_list);
1068 kmem_cache_free(lio_ooo_cache, ooo_cmdsn); 1068 kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
1069 } 1069 }
1070 mutex_unlock(&sess->cmdsn_mutex); 1070 mutex_unlock(&sess->cmdsn_mutex);
1071 } 1071 }
1072 1072
1073 int iscsit_handle_ooo_cmdsn( 1073 int iscsit_handle_ooo_cmdsn(
1074 struct iscsi_session *sess, 1074 struct iscsi_session *sess,
1075 struct iscsi_cmd *cmd, 1075 struct iscsi_cmd *cmd,
1076 u32 cmdsn) 1076 u32 cmdsn)
1077 { 1077 {
1078 int batch = 0; 1078 int batch = 0;
1079 struct iscsi_ooo_cmdsn *ooo_cmdsn = NULL, *ooo_tail = NULL; 1079 struct iscsi_ooo_cmdsn *ooo_cmdsn = NULL, *ooo_tail = NULL;
1080 1080
1081 cmd->deferred_i_state = cmd->i_state; 1081 cmd->deferred_i_state = cmd->i_state;
1082 cmd->i_state = ISTATE_DEFERRED_CMD; 1082 cmd->i_state = ISTATE_DEFERRED_CMD;
1083 cmd->cmd_flags |= ICF_OOO_CMDSN; 1083 cmd->cmd_flags |= ICF_OOO_CMDSN;
1084 1084
1085 if (list_empty(&sess->sess_ooo_cmdsn_list)) 1085 if (list_empty(&sess->sess_ooo_cmdsn_list))
1086 batch = 1; 1086 batch = 1;
1087 else { 1087 else {
1088 ooo_tail = list_entry(sess->sess_ooo_cmdsn_list.prev, 1088 ooo_tail = list_entry(sess->sess_ooo_cmdsn_list.prev,
1089 typeof(*ooo_tail), ooo_list); 1089 typeof(*ooo_tail), ooo_list);
1090 if (ooo_tail->cmdsn != (cmdsn - 1)) 1090 if (ooo_tail->cmdsn != (cmdsn - 1))
1091 batch = 1; 1091 batch = 1;
1092 } 1092 }
1093 1093
1094 ooo_cmdsn = iscsit_allocate_ooo_cmdsn(); 1094 ooo_cmdsn = iscsit_allocate_ooo_cmdsn();
1095 if (!ooo_cmdsn) 1095 if (!ooo_cmdsn)
1096 return CMDSN_ERROR_CANNOT_RECOVER; 1096 return CMDSN_ERROR_CANNOT_RECOVER;
1097 1097
1098 ooo_cmdsn->cmd = cmd; 1098 ooo_cmdsn->cmd = cmd;
1099 ooo_cmdsn->batch_count = (batch) ? 1099 ooo_cmdsn->batch_count = (batch) ?
1100 (cmdsn - sess->exp_cmd_sn) : 1; 1100 (cmdsn - sess->exp_cmd_sn) : 1;
1101 ooo_cmdsn->cid = cmd->conn->cid; 1101 ooo_cmdsn->cid = cmd->conn->cid;
1102 ooo_cmdsn->exp_cmdsn = sess->exp_cmd_sn; 1102 ooo_cmdsn->exp_cmdsn = sess->exp_cmd_sn;
1103 ooo_cmdsn->cmdsn = cmdsn; 1103 ooo_cmdsn->cmdsn = cmdsn;
1104 1104
1105 if (iscsit_attach_ooo_cmdsn(sess, ooo_cmdsn) < 0) { 1105 if (iscsit_attach_ooo_cmdsn(sess, ooo_cmdsn) < 0) {
1106 kmem_cache_free(lio_ooo_cache, ooo_cmdsn); 1106 kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
1107 return CMDSN_ERROR_CANNOT_RECOVER; 1107 return CMDSN_ERROR_CANNOT_RECOVER;
1108 } 1108 }
1109 1109
1110 return CMDSN_HIGHER_THAN_EXP; 1110 return CMDSN_HIGHER_THAN_EXP;
1111 } 1111 }
1112 1112
1113 static int iscsit_set_dataout_timeout_values( 1113 static int iscsit_set_dataout_timeout_values(
1114 struct iscsi_cmd *cmd, 1114 struct iscsi_cmd *cmd,
1115 u32 *offset, 1115 u32 *offset,
1116 u32 *length) 1116 u32 *length)
1117 { 1117 {
1118 struct iscsi_conn *conn = cmd->conn; 1118 struct iscsi_conn *conn = cmd->conn;
1119 struct iscsi_r2t *r2t; 1119 struct iscsi_r2t *r2t;
1120 1120
1121 if (cmd->unsolicited_data) { 1121 if (cmd->unsolicited_data) {
1122 *offset = 0; 1122 *offset = 0;
1123 *length = (conn->sess->sess_ops->FirstBurstLength > 1123 *length = (conn->sess->sess_ops->FirstBurstLength >
1124 cmd->data_length) ? 1124 cmd->data_length) ?
1125 cmd->data_length : 1125 cmd->data_length :
1126 conn->sess->sess_ops->FirstBurstLength; 1126 conn->sess->sess_ops->FirstBurstLength;
1127 return 0; 1127 return 0;
1128 } 1128 }
1129 1129
1130 spin_lock_bh(&cmd->r2t_lock); 1130 spin_lock_bh(&cmd->r2t_lock);
1131 if (list_empty(&cmd->cmd_r2t_list)) { 1131 if (list_empty(&cmd->cmd_r2t_list)) {
1132 pr_err("cmd->cmd_r2t_list is empty!\n"); 1132 pr_err("cmd->cmd_r2t_list is empty!\n");
1133 spin_unlock_bh(&cmd->r2t_lock); 1133 spin_unlock_bh(&cmd->r2t_lock);
1134 return -1; 1134 return -1;
1135 } 1135 }
1136 1136
1137 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { 1137 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
1138 if (r2t->sent_r2t && !r2t->recovery_r2t && !r2t->seq_complete) { 1138 if (r2t->sent_r2t && !r2t->recovery_r2t && !r2t->seq_complete) {
1139 *offset = r2t->offset; 1139 *offset = r2t->offset;
1140 *length = r2t->xfer_len; 1140 *length = r2t->xfer_len;
1141 spin_unlock_bh(&cmd->r2t_lock); 1141 spin_unlock_bh(&cmd->r2t_lock);
1142 return 0; 1142 return 0;
1143 } 1143 }
1144 } 1144 }
1145 spin_unlock_bh(&cmd->r2t_lock); 1145 spin_unlock_bh(&cmd->r2t_lock);
1146 1146
1147 pr_err("Unable to locate any incomplete DataOUT" 1147 pr_err("Unable to locate any incomplete DataOUT"
1148 " sequences for ITT: 0x%08x.\n", cmd->init_task_tag); 1148 " sequences for ITT: 0x%08x.\n", cmd->init_task_tag);
1149 1149
1150 return -1; 1150 return -1;
1151 } 1151 }
1152 1152
1153 /* 1153 /*
1154 * NOTE: Called from interrupt (timer) context. 1154 * NOTE: Called from interrupt (timer) context.
1155 */ 1155 */
1156 static void iscsit_handle_dataout_timeout(unsigned long data) 1156 static void iscsit_handle_dataout_timeout(unsigned long data)
1157 { 1157 {
1158 u32 pdu_length = 0, pdu_offset = 0; 1158 u32 pdu_length = 0, pdu_offset = 0;
1159 u32 r2t_length = 0, r2t_offset = 0; 1159 u32 r2t_length = 0, r2t_offset = 0;
1160 struct iscsi_cmd *cmd = (struct iscsi_cmd *) data; 1160 struct iscsi_cmd *cmd = (struct iscsi_cmd *) data;
1161 struct iscsi_conn *conn = cmd->conn; 1161 struct iscsi_conn *conn = cmd->conn;
1162 struct iscsi_session *sess = NULL; 1162 struct iscsi_session *sess = NULL;
1163 struct iscsi_node_attrib *na; 1163 struct iscsi_node_attrib *na;
1164 1164
1165 iscsit_inc_conn_usage_count(conn); 1165 iscsit_inc_conn_usage_count(conn);
1166 1166
1167 spin_lock_bh(&cmd->dataout_timeout_lock); 1167 spin_lock_bh(&cmd->dataout_timeout_lock);
1168 if (cmd->dataout_timer_flags & ISCSI_TF_STOP) { 1168 if (cmd->dataout_timer_flags & ISCSI_TF_STOP) {
1169 spin_unlock_bh(&cmd->dataout_timeout_lock); 1169 spin_unlock_bh(&cmd->dataout_timeout_lock);
1170 iscsit_dec_conn_usage_count(conn); 1170 iscsit_dec_conn_usage_count(conn);
1171 return; 1171 return;
1172 } 1172 }
1173 cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING; 1173 cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING;
1174 sess = conn->sess; 1174 sess = conn->sess;
1175 na = iscsit_tpg_get_node_attrib(sess); 1175 na = iscsit_tpg_get_node_attrib(sess);
1176 1176
1177 if (!sess->sess_ops->ErrorRecoveryLevel) { 1177 if (!sess->sess_ops->ErrorRecoveryLevel) {
1178 pr_debug("Unable to recover from DataOut timeout while" 1178 pr_debug("Unable to recover from DataOut timeout while"
1179 " in ERL=0.\n"); 1179 " in ERL=0.\n");
1180 goto failure; 1180 goto failure;
1181 } 1181 }
1182 1182
1183 if (++cmd->dataout_timeout_retries == na->dataout_timeout_retries) { 1183 if (++cmd->dataout_timeout_retries == na->dataout_timeout_retries) {
1184 pr_debug("Command ITT: 0x%08x exceeded max retries" 1184 pr_debug("Command ITT: 0x%08x exceeded max retries"
1185 " for DataOUT timeout %u, closing iSCSI connection.\n", 1185 " for DataOUT timeout %u, closing iSCSI connection.\n",
1186 cmd->init_task_tag, na->dataout_timeout_retries); 1186 cmd->init_task_tag, na->dataout_timeout_retries);
1187 goto failure; 1187 goto failure;
1188 } 1188 }
1189 1189
1190 cmd->cmd_flags |= ICF_WITHIN_COMMAND_RECOVERY; 1190 cmd->cmd_flags |= ICF_WITHIN_COMMAND_RECOVERY;
1191 1191
1192 if (conn->sess->sess_ops->DataSequenceInOrder) { 1192 if (conn->sess->sess_ops->DataSequenceInOrder) {
1193 if (conn->sess->sess_ops->DataPDUInOrder) { 1193 if (conn->sess->sess_ops->DataPDUInOrder) {
1194 pdu_offset = cmd->write_data_done; 1194 pdu_offset = cmd->write_data_done;
1195 if ((pdu_offset + (conn->sess->sess_ops->MaxBurstLength - 1195 if ((pdu_offset + (conn->sess->sess_ops->MaxBurstLength -
1196 cmd->next_burst_len)) > cmd->data_length) 1196 cmd->next_burst_len)) > cmd->data_length)
1197 pdu_length = (cmd->data_length - 1197 pdu_length = (cmd->data_length -
1198 cmd->write_data_done); 1198 cmd->write_data_done);
1199 else 1199 else
1200 pdu_length = (conn->sess->sess_ops->MaxBurstLength - 1200 pdu_length = (conn->sess->sess_ops->MaxBurstLength -
1201 cmd->next_burst_len); 1201 cmd->next_burst_len);
1202 } else { 1202 } else {
1203 pdu_offset = cmd->seq_start_offset; 1203 pdu_offset = cmd->seq_start_offset;
1204 pdu_length = (cmd->seq_end_offset - 1204 pdu_length = (cmd->seq_end_offset -
1205 cmd->seq_start_offset); 1205 cmd->seq_start_offset);
1206 } 1206 }
1207 } else { 1207 } else {
1208 if (iscsit_set_dataout_timeout_values(cmd, &pdu_offset, 1208 if (iscsit_set_dataout_timeout_values(cmd, &pdu_offset,
1209 &pdu_length) < 0) 1209 &pdu_length) < 0)
1210 goto failure; 1210 goto failure;
1211 } 1211 }
1212 1212
1213 if (iscsit_recalculate_dataout_values(cmd, pdu_offset, pdu_length, 1213 if (iscsit_recalculate_dataout_values(cmd, pdu_offset, pdu_length,
1214 &r2t_offset, &r2t_length) < 0) 1214 &r2t_offset, &r2t_length) < 0)
1215 goto failure; 1215 goto failure;
1216 1216
1217 pr_debug("Command ITT: 0x%08x timed out waiting for" 1217 pr_debug("Command ITT: 0x%08x timed out waiting for"
1218 " completion of %sDataOUT Sequence Offset: %u, Length: %u\n", 1218 " completion of %sDataOUT Sequence Offset: %u, Length: %u\n",
1219 cmd->init_task_tag, (cmd->unsolicited_data) ? "Unsolicited " : 1219 cmd->init_task_tag, (cmd->unsolicited_data) ? "Unsolicited " :
1220 "", r2t_offset, r2t_length); 1220 "", r2t_offset, r2t_length);
1221 1221
1222 if (iscsit_send_recovery_r2t(cmd, r2t_offset, r2t_length) < 0) 1222 if (iscsit_send_recovery_r2t(cmd, r2t_offset, r2t_length) < 0)
1223 goto failure; 1223 goto failure;
1224 1224
1225 iscsit_start_dataout_timer(cmd, conn); 1225 iscsit_start_dataout_timer(cmd, conn);
1226 spin_unlock_bh(&cmd->dataout_timeout_lock); 1226 spin_unlock_bh(&cmd->dataout_timeout_lock);
1227 iscsit_dec_conn_usage_count(conn); 1227 iscsit_dec_conn_usage_count(conn);
1228 1228
1229 return; 1229 return;
1230 1230
1231 failure: 1231 failure:
1232 spin_unlock_bh(&cmd->dataout_timeout_lock); 1232 spin_unlock_bh(&cmd->dataout_timeout_lock);
1233 iscsit_cause_connection_reinstatement(conn, 0); 1233 iscsit_cause_connection_reinstatement(conn, 0);
1234 iscsit_dec_conn_usage_count(conn); 1234 iscsit_dec_conn_usage_count(conn);
1235 } 1235 }
1236 1236
1237 void iscsit_mod_dataout_timer(struct iscsi_cmd *cmd) 1237 void iscsit_mod_dataout_timer(struct iscsi_cmd *cmd)
1238 { 1238 {
1239 struct iscsi_conn *conn = cmd->conn; 1239 struct iscsi_conn *conn = cmd->conn;
1240 struct iscsi_session *sess = conn->sess; 1240 struct iscsi_session *sess = conn->sess;
1241 struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess); 1241 struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess);
1242 1242
1243 spin_lock_bh(&cmd->dataout_timeout_lock); 1243 spin_lock_bh(&cmd->dataout_timeout_lock);
1244 if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) { 1244 if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) {
1245 spin_unlock_bh(&cmd->dataout_timeout_lock); 1245 spin_unlock_bh(&cmd->dataout_timeout_lock);
1246 return; 1246 return;
1247 } 1247 }
1248 1248
1249 mod_timer(&cmd->dataout_timer, 1249 mod_timer(&cmd->dataout_timer,
1250 (get_jiffies_64() + na->dataout_timeout * HZ)); 1250 (get_jiffies_64() + na->dataout_timeout * HZ));
1251 pr_debug("Updated DataOUT timer for ITT: 0x%08x", 1251 pr_debug("Updated DataOUT timer for ITT: 0x%08x",
1252 cmd->init_task_tag); 1252 cmd->init_task_tag);
1253 spin_unlock_bh(&cmd->dataout_timeout_lock); 1253 spin_unlock_bh(&cmd->dataout_timeout_lock);
1254 } 1254 }
1255 1255
1256 /* 1256 /*
1257 * Called with cmd->dataout_timeout_lock held. 1257 * Called with cmd->dataout_timeout_lock held.
1258 */ 1258 */
1259 void iscsit_start_dataout_timer( 1259 void iscsit_start_dataout_timer(
1260 struct iscsi_cmd *cmd, 1260 struct iscsi_cmd *cmd,
1261 struct iscsi_conn *conn) 1261 struct iscsi_conn *conn)
1262 { 1262 {
1263 struct iscsi_session *sess = conn->sess; 1263 struct iscsi_session *sess = conn->sess;
1264 struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess); 1264 struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess);
1265 1265
1266 if (cmd->dataout_timer_flags & ISCSI_TF_RUNNING) 1266 if (cmd->dataout_timer_flags & ISCSI_TF_RUNNING)
1267 return; 1267 return;
1268 1268
1269 pr_debug("Starting DataOUT timer for ITT: 0x%08x on" 1269 pr_debug("Starting DataOUT timer for ITT: 0x%08x on"
1270 " CID: %hu.\n", cmd->init_task_tag, conn->cid); 1270 " CID: %hu.\n", cmd->init_task_tag, conn->cid);
1271 1271
1272 init_timer(&cmd->dataout_timer); 1272 init_timer(&cmd->dataout_timer);
1273 cmd->dataout_timer.expires = (get_jiffies_64() + na->dataout_timeout * HZ); 1273 cmd->dataout_timer.expires = (get_jiffies_64() + na->dataout_timeout * HZ);
1274 cmd->dataout_timer.data = (unsigned long)cmd; 1274 cmd->dataout_timer.data = (unsigned long)cmd;
1275 cmd->dataout_timer.function = iscsit_handle_dataout_timeout; 1275 cmd->dataout_timer.function = iscsit_handle_dataout_timeout;
1276 cmd->dataout_timer_flags &= ~ISCSI_TF_STOP; 1276 cmd->dataout_timer_flags &= ~ISCSI_TF_STOP;
1277 cmd->dataout_timer_flags |= ISCSI_TF_RUNNING; 1277 cmd->dataout_timer_flags |= ISCSI_TF_RUNNING;
1278 add_timer(&cmd->dataout_timer); 1278 add_timer(&cmd->dataout_timer);
1279 } 1279 }
1280 1280
1281 void iscsit_stop_dataout_timer(struct iscsi_cmd *cmd) 1281 void iscsit_stop_dataout_timer(struct iscsi_cmd *cmd)
1282 { 1282 {
1283 spin_lock_bh(&cmd->dataout_timeout_lock); 1283 spin_lock_bh(&cmd->dataout_timeout_lock);
1284 if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) { 1284 if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) {
1285 spin_unlock_bh(&cmd->dataout_timeout_lock); 1285 spin_unlock_bh(&cmd->dataout_timeout_lock);
1286 return; 1286 return;
1287 } 1287 }
1288 cmd->dataout_timer_flags |= ISCSI_TF_STOP; 1288 cmd->dataout_timer_flags |= ISCSI_TF_STOP;
1289 spin_unlock_bh(&cmd->dataout_timeout_lock); 1289 spin_unlock_bh(&cmd->dataout_timeout_lock);
1290 1290
1291 del_timer_sync(&cmd->dataout_timer); 1291 del_timer_sync(&cmd->dataout_timer);
1292 1292
1293 spin_lock_bh(&cmd->dataout_timeout_lock); 1293 spin_lock_bh(&cmd->dataout_timeout_lock);
1294 cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING; 1294 cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING;
1295 pr_debug("Stopped DataOUT Timer for ITT: 0x%08x\n", 1295 pr_debug("Stopped DataOUT Timer for ITT: 0x%08x\n",
1296 cmd->init_task_tag); 1296 cmd->init_task_tag);
1297 spin_unlock_bh(&cmd->dataout_timeout_lock); 1297 spin_unlock_bh(&cmd->dataout_timeout_lock);
1298 } 1298 }
1299 1299
drivers/target/iscsi/iscsi_target_erl2.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * This file contains error recovery level two functions used by 2 * This file contains error recovery level two functions used by
3 * the iSCSI Target driver. 3 * the iSCSI Target driver.
4 * 4 *
5 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
6 * 6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 * 8 *
9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or 13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version. 14 * (at your option) any later version.
15 * 15 *
16 * This program is distributed in the hope that it will be useful, 16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details. 19 * GNU General Public License for more details.
20 ******************************************************************************/ 20 ******************************************************************************/
21 21
22 #include <scsi/iscsi_proto.h> 22 #include <scsi/iscsi_proto.h>
23 #include <target/target_core_base.h> 23 #include <target/target_core_base.h>
24 #include <target/target_core_transport.h> 24 #include <target/target_core_fabric.h>
25 25
26 #include "iscsi_target_core.h" 26 #include "iscsi_target_core.h"
27 #include "iscsi_target_datain_values.h" 27 #include "iscsi_target_datain_values.h"
28 #include "iscsi_target_util.h" 28 #include "iscsi_target_util.h"
29 #include "iscsi_target_erl0.h" 29 #include "iscsi_target_erl0.h"
30 #include "iscsi_target_erl1.h" 30 #include "iscsi_target_erl1.h"
31 #include "iscsi_target_erl2.h" 31 #include "iscsi_target_erl2.h"
32 #include "iscsi_target.h" 32 #include "iscsi_target.h"
33 33
34 /* 34 /*
35 * FIXME: Does RData SNACK apply here as well? 35 * FIXME: Does RData SNACK apply here as well?
36 */ 36 */
37 void iscsit_create_conn_recovery_datain_values( 37 void iscsit_create_conn_recovery_datain_values(
38 struct iscsi_cmd *cmd, 38 struct iscsi_cmd *cmd,
39 u32 exp_data_sn) 39 u32 exp_data_sn)
40 { 40 {
41 u32 data_sn = 0; 41 u32 data_sn = 0;
42 struct iscsi_conn *conn = cmd->conn; 42 struct iscsi_conn *conn = cmd->conn;
43 43
44 cmd->next_burst_len = 0; 44 cmd->next_burst_len = 0;
45 cmd->read_data_done = 0; 45 cmd->read_data_done = 0;
46 46
47 while (exp_data_sn > data_sn) { 47 while (exp_data_sn > data_sn) {
48 if ((cmd->next_burst_len + 48 if ((cmd->next_burst_len +
49 conn->conn_ops->MaxRecvDataSegmentLength) < 49 conn->conn_ops->MaxRecvDataSegmentLength) <
50 conn->sess->sess_ops->MaxBurstLength) { 50 conn->sess->sess_ops->MaxBurstLength) {
51 cmd->read_data_done += 51 cmd->read_data_done +=
52 conn->conn_ops->MaxRecvDataSegmentLength; 52 conn->conn_ops->MaxRecvDataSegmentLength;
53 cmd->next_burst_len += 53 cmd->next_burst_len +=
54 conn->conn_ops->MaxRecvDataSegmentLength; 54 conn->conn_ops->MaxRecvDataSegmentLength;
55 } else { 55 } else {
56 cmd->read_data_done += 56 cmd->read_data_done +=
57 (conn->sess->sess_ops->MaxBurstLength - 57 (conn->sess->sess_ops->MaxBurstLength -
58 cmd->next_burst_len); 58 cmd->next_burst_len);
59 cmd->next_burst_len = 0; 59 cmd->next_burst_len = 0;
60 } 60 }
61 data_sn++; 61 data_sn++;
62 } 62 }
63 } 63 }
64 64
65 void iscsit_create_conn_recovery_dataout_values( 65 void iscsit_create_conn_recovery_dataout_values(
66 struct iscsi_cmd *cmd) 66 struct iscsi_cmd *cmd)
67 { 67 {
68 u32 write_data_done = 0; 68 u32 write_data_done = 0;
69 struct iscsi_conn *conn = cmd->conn; 69 struct iscsi_conn *conn = cmd->conn;
70 70
71 cmd->data_sn = 0; 71 cmd->data_sn = 0;
72 cmd->next_burst_len = 0; 72 cmd->next_burst_len = 0;
73 73
74 while (cmd->write_data_done > write_data_done) { 74 while (cmd->write_data_done > write_data_done) {
75 if ((write_data_done + conn->sess->sess_ops->MaxBurstLength) <= 75 if ((write_data_done + conn->sess->sess_ops->MaxBurstLength) <=
76 cmd->write_data_done) 76 cmd->write_data_done)
77 write_data_done += conn->sess->sess_ops->MaxBurstLength; 77 write_data_done += conn->sess->sess_ops->MaxBurstLength;
78 else 78 else
79 break; 79 break;
80 } 80 }
81 81
82 cmd->write_data_done = write_data_done; 82 cmd->write_data_done = write_data_done;
83 } 83 }
84 84
85 static int iscsit_attach_active_connection_recovery_entry( 85 static int iscsit_attach_active_connection_recovery_entry(
86 struct iscsi_session *sess, 86 struct iscsi_session *sess,
87 struct iscsi_conn_recovery *cr) 87 struct iscsi_conn_recovery *cr)
88 { 88 {
89 spin_lock(&sess->cr_a_lock); 89 spin_lock(&sess->cr_a_lock);
90 list_add_tail(&cr->cr_list, &sess->cr_active_list); 90 list_add_tail(&cr->cr_list, &sess->cr_active_list);
91 spin_unlock(&sess->cr_a_lock); 91 spin_unlock(&sess->cr_a_lock);
92 92
93 return 0; 93 return 0;
94 } 94 }
95 95
96 static int iscsit_attach_inactive_connection_recovery_entry( 96 static int iscsit_attach_inactive_connection_recovery_entry(
97 struct iscsi_session *sess, 97 struct iscsi_session *sess,
98 struct iscsi_conn_recovery *cr) 98 struct iscsi_conn_recovery *cr)
99 { 99 {
100 spin_lock(&sess->cr_i_lock); 100 spin_lock(&sess->cr_i_lock);
101 list_add_tail(&cr->cr_list, &sess->cr_inactive_list); 101 list_add_tail(&cr->cr_list, &sess->cr_inactive_list);
102 102
103 sess->conn_recovery_count++; 103 sess->conn_recovery_count++;
104 pr_debug("Incremented connection recovery count to %u for" 104 pr_debug("Incremented connection recovery count to %u for"
105 " SID: %u\n", sess->conn_recovery_count, sess->sid); 105 " SID: %u\n", sess->conn_recovery_count, sess->sid);
106 spin_unlock(&sess->cr_i_lock); 106 spin_unlock(&sess->cr_i_lock);
107 107
108 return 0; 108 return 0;
109 } 109 }
110 110
111 struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry( 111 struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
112 struct iscsi_session *sess, 112 struct iscsi_session *sess,
113 u16 cid) 113 u16 cid)
114 { 114 {
115 struct iscsi_conn_recovery *cr; 115 struct iscsi_conn_recovery *cr;
116 116
117 spin_lock(&sess->cr_i_lock); 117 spin_lock(&sess->cr_i_lock);
118 list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) { 118 list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
119 if (cr->cid == cid) { 119 if (cr->cid == cid) {
120 spin_unlock(&sess->cr_i_lock); 120 spin_unlock(&sess->cr_i_lock);
121 return cr; 121 return cr;
122 } 122 }
123 } 123 }
124 spin_unlock(&sess->cr_i_lock); 124 spin_unlock(&sess->cr_i_lock);
125 125
126 return NULL; 126 return NULL;
127 } 127 }
128 128
129 void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) 129 void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
130 { 130 {
131 struct iscsi_cmd *cmd, *cmd_tmp; 131 struct iscsi_cmd *cmd, *cmd_tmp;
132 struct iscsi_conn_recovery *cr, *cr_tmp; 132 struct iscsi_conn_recovery *cr, *cr_tmp;
133 133
134 spin_lock(&sess->cr_a_lock); 134 spin_lock(&sess->cr_a_lock);
135 list_for_each_entry_safe(cr, cr_tmp, &sess->cr_active_list, cr_list) { 135 list_for_each_entry_safe(cr, cr_tmp, &sess->cr_active_list, cr_list) {
136 list_del(&cr->cr_list); 136 list_del(&cr->cr_list);
137 spin_unlock(&sess->cr_a_lock); 137 spin_unlock(&sess->cr_a_lock);
138 138
139 spin_lock(&cr->conn_recovery_cmd_lock); 139 spin_lock(&cr->conn_recovery_cmd_lock);
140 list_for_each_entry_safe(cmd, cmd_tmp, 140 list_for_each_entry_safe(cmd, cmd_tmp,
141 &cr->conn_recovery_cmd_list, i_list) { 141 &cr->conn_recovery_cmd_list, i_list) {
142 142
143 list_del(&cmd->i_list); 143 list_del(&cmd->i_list);
144 cmd->conn = NULL; 144 cmd->conn = NULL;
145 spin_unlock(&cr->conn_recovery_cmd_lock); 145 spin_unlock(&cr->conn_recovery_cmd_lock);
146 iscsit_free_cmd(cmd); 146 iscsit_free_cmd(cmd);
147 spin_lock(&cr->conn_recovery_cmd_lock); 147 spin_lock(&cr->conn_recovery_cmd_lock);
148 } 148 }
149 spin_unlock(&cr->conn_recovery_cmd_lock); 149 spin_unlock(&cr->conn_recovery_cmd_lock);
150 spin_lock(&sess->cr_a_lock); 150 spin_lock(&sess->cr_a_lock);
151 151
152 kfree(cr); 152 kfree(cr);
153 } 153 }
154 spin_unlock(&sess->cr_a_lock); 154 spin_unlock(&sess->cr_a_lock);
155 155
156 spin_lock(&sess->cr_i_lock); 156 spin_lock(&sess->cr_i_lock);
157 list_for_each_entry_safe(cr, cr_tmp, &sess->cr_inactive_list, cr_list) { 157 list_for_each_entry_safe(cr, cr_tmp, &sess->cr_inactive_list, cr_list) {
158 list_del(&cr->cr_list); 158 list_del(&cr->cr_list);
159 spin_unlock(&sess->cr_i_lock); 159 spin_unlock(&sess->cr_i_lock);
160 160
161 spin_lock(&cr->conn_recovery_cmd_lock); 161 spin_lock(&cr->conn_recovery_cmd_lock);
162 list_for_each_entry_safe(cmd, cmd_tmp, 162 list_for_each_entry_safe(cmd, cmd_tmp,
163 &cr->conn_recovery_cmd_list, i_list) { 163 &cr->conn_recovery_cmd_list, i_list) {
164 164
165 list_del(&cmd->i_list); 165 list_del(&cmd->i_list);
166 cmd->conn = NULL; 166 cmd->conn = NULL;
167 spin_unlock(&cr->conn_recovery_cmd_lock); 167 spin_unlock(&cr->conn_recovery_cmd_lock);
168 iscsit_free_cmd(cmd); 168 iscsit_free_cmd(cmd);
169 spin_lock(&cr->conn_recovery_cmd_lock); 169 spin_lock(&cr->conn_recovery_cmd_lock);
170 } 170 }
171 spin_unlock(&cr->conn_recovery_cmd_lock); 171 spin_unlock(&cr->conn_recovery_cmd_lock);
172 spin_lock(&sess->cr_i_lock); 172 spin_lock(&sess->cr_i_lock);
173 173
174 kfree(cr); 174 kfree(cr);
175 } 175 }
176 spin_unlock(&sess->cr_i_lock); 176 spin_unlock(&sess->cr_i_lock);
177 } 177 }
178 178
179 int iscsit_remove_active_connection_recovery_entry( 179 int iscsit_remove_active_connection_recovery_entry(
180 struct iscsi_conn_recovery *cr, 180 struct iscsi_conn_recovery *cr,
181 struct iscsi_session *sess) 181 struct iscsi_session *sess)
182 { 182 {
183 spin_lock(&sess->cr_a_lock); 183 spin_lock(&sess->cr_a_lock);
184 list_del(&cr->cr_list); 184 list_del(&cr->cr_list);
185 185
186 sess->conn_recovery_count--; 186 sess->conn_recovery_count--;
187 pr_debug("Decremented connection recovery count to %u for" 187 pr_debug("Decremented connection recovery count to %u for"
188 " SID: %u\n", sess->conn_recovery_count, sess->sid); 188 " SID: %u\n", sess->conn_recovery_count, sess->sid);
189 spin_unlock(&sess->cr_a_lock); 189 spin_unlock(&sess->cr_a_lock);
190 190
191 kfree(cr); 191 kfree(cr);
192 192
193 return 0; 193 return 0;
194 } 194 }
195 195
196 int iscsit_remove_inactive_connection_recovery_entry( 196 int iscsit_remove_inactive_connection_recovery_entry(
197 struct iscsi_conn_recovery *cr, 197 struct iscsi_conn_recovery *cr,
198 struct iscsi_session *sess) 198 struct iscsi_session *sess)
199 { 199 {
200 spin_lock(&sess->cr_i_lock); 200 spin_lock(&sess->cr_i_lock);
201 list_del(&cr->cr_list); 201 list_del(&cr->cr_list);
202 spin_unlock(&sess->cr_i_lock); 202 spin_unlock(&sess->cr_i_lock);
203 203
204 return 0; 204 return 0;
205 } 205 }
206 206
207 /* 207 /*
208 * Called with cr->conn_recovery_cmd_lock help. 208 * Called with cr->conn_recovery_cmd_lock help.
209 */ 209 */
210 int iscsit_remove_cmd_from_connection_recovery( 210 int iscsit_remove_cmd_from_connection_recovery(
211 struct iscsi_cmd *cmd, 211 struct iscsi_cmd *cmd,
212 struct iscsi_session *sess) 212 struct iscsi_session *sess)
213 { 213 {
214 struct iscsi_conn_recovery *cr; 214 struct iscsi_conn_recovery *cr;
215 215
216 if (!cmd->cr) { 216 if (!cmd->cr) {
217 pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x" 217 pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
218 " is NULL!\n", cmd->init_task_tag); 218 " is NULL!\n", cmd->init_task_tag);
219 BUG(); 219 BUG();
220 } 220 }
221 cr = cmd->cr; 221 cr = cmd->cr;
222 222
223 list_del(&cmd->i_list); 223 list_del(&cmd->i_list);
224 return --cr->cmd_count; 224 return --cr->cmd_count;
225 } 225 }
226 226
227 void iscsit_discard_cr_cmds_by_expstatsn( 227 void iscsit_discard_cr_cmds_by_expstatsn(
228 struct iscsi_conn_recovery *cr, 228 struct iscsi_conn_recovery *cr,
229 u32 exp_statsn) 229 u32 exp_statsn)
230 { 230 {
231 u32 dropped_count = 0; 231 u32 dropped_count = 0;
232 struct iscsi_cmd *cmd, *cmd_tmp; 232 struct iscsi_cmd *cmd, *cmd_tmp;
233 struct iscsi_session *sess = cr->sess; 233 struct iscsi_session *sess = cr->sess;
234 234
235 spin_lock(&cr->conn_recovery_cmd_lock); 235 spin_lock(&cr->conn_recovery_cmd_lock);
236 list_for_each_entry_safe(cmd, cmd_tmp, 236 list_for_each_entry_safe(cmd, cmd_tmp,
237 &cr->conn_recovery_cmd_list, i_list) { 237 &cr->conn_recovery_cmd_list, i_list) {
238 238
239 if (((cmd->deferred_i_state != ISTATE_SENT_STATUS) && 239 if (((cmd->deferred_i_state != ISTATE_SENT_STATUS) &&
240 (cmd->deferred_i_state != ISTATE_REMOVE)) || 240 (cmd->deferred_i_state != ISTATE_REMOVE)) ||
241 (cmd->stat_sn >= exp_statsn)) { 241 (cmd->stat_sn >= exp_statsn)) {
242 continue; 242 continue;
243 } 243 }
244 244
245 dropped_count++; 245 dropped_count++;
246 pr_debug("Dropping Acknowledged ITT: 0x%08x, StatSN:" 246 pr_debug("Dropping Acknowledged ITT: 0x%08x, StatSN:"
247 " 0x%08x, CID: %hu.\n", cmd->init_task_tag, 247 " 0x%08x, CID: %hu.\n", cmd->init_task_tag,
248 cmd->stat_sn, cr->cid); 248 cmd->stat_sn, cr->cid);
249 249
250 iscsit_remove_cmd_from_connection_recovery(cmd, sess); 250 iscsit_remove_cmd_from_connection_recovery(cmd, sess);
251 251
252 spin_unlock(&cr->conn_recovery_cmd_lock); 252 spin_unlock(&cr->conn_recovery_cmd_lock);
253 iscsit_free_cmd(cmd); 253 iscsit_free_cmd(cmd);
254 spin_lock(&cr->conn_recovery_cmd_lock); 254 spin_lock(&cr->conn_recovery_cmd_lock);
255 } 255 }
256 spin_unlock(&cr->conn_recovery_cmd_lock); 256 spin_unlock(&cr->conn_recovery_cmd_lock);
257 257
258 pr_debug("Dropped %u total acknowledged commands on" 258 pr_debug("Dropped %u total acknowledged commands on"
259 " CID: %hu less than old ExpStatSN: 0x%08x\n", 259 " CID: %hu less than old ExpStatSN: 0x%08x\n",
260 dropped_count, cr->cid, exp_statsn); 260 dropped_count, cr->cid, exp_statsn);
261 261
262 if (!cr->cmd_count) { 262 if (!cr->cmd_count) {
263 pr_debug("No commands to be reassigned for failed" 263 pr_debug("No commands to be reassigned for failed"
264 " connection CID: %hu on SID: %u\n", 264 " connection CID: %hu on SID: %u\n",
265 cr->cid, sess->sid); 265 cr->cid, sess->sid);
266 iscsit_remove_inactive_connection_recovery_entry(cr, sess); 266 iscsit_remove_inactive_connection_recovery_entry(cr, sess);
267 iscsit_attach_active_connection_recovery_entry(sess, cr); 267 iscsit_attach_active_connection_recovery_entry(sess, cr);
268 pr_debug("iSCSI connection recovery successful for CID:" 268 pr_debug("iSCSI connection recovery successful for CID:"
269 " %hu on SID: %u\n", cr->cid, sess->sid); 269 " %hu on SID: %u\n", cr->cid, sess->sid);
270 iscsit_remove_active_connection_recovery_entry(cr, sess); 270 iscsit_remove_active_connection_recovery_entry(cr, sess);
271 } else { 271 } else {
272 iscsit_remove_inactive_connection_recovery_entry(cr, sess); 272 iscsit_remove_inactive_connection_recovery_entry(cr, sess);
273 iscsit_attach_active_connection_recovery_entry(sess, cr); 273 iscsit_attach_active_connection_recovery_entry(sess, cr);
274 } 274 }
275 } 275 }
276 276
277 int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn) 277 int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
278 { 278 {
279 u32 dropped_count = 0; 279 u32 dropped_count = 0;
280 struct iscsi_cmd *cmd, *cmd_tmp; 280 struct iscsi_cmd *cmd, *cmd_tmp;
281 struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp; 281 struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
282 struct iscsi_session *sess = conn->sess; 282 struct iscsi_session *sess = conn->sess;
283 283
284 mutex_lock(&sess->cmdsn_mutex); 284 mutex_lock(&sess->cmdsn_mutex);
285 list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp, 285 list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
286 &sess->sess_ooo_cmdsn_list, ooo_list) { 286 &sess->sess_ooo_cmdsn_list, ooo_list) {
287 287
288 if (ooo_cmdsn->cid != conn->cid) 288 if (ooo_cmdsn->cid != conn->cid)
289 continue; 289 continue;
290 290
291 dropped_count++; 291 dropped_count++;
292 pr_debug("Dropping unacknowledged CmdSN:" 292 pr_debug("Dropping unacknowledged CmdSN:"
293 " 0x%08x during connection recovery on CID: %hu\n", 293 " 0x%08x during connection recovery on CID: %hu\n",
294 ooo_cmdsn->cmdsn, conn->cid); 294 ooo_cmdsn->cmdsn, conn->cid);
295 iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn); 295 iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
296 } 296 }
297 mutex_unlock(&sess->cmdsn_mutex); 297 mutex_unlock(&sess->cmdsn_mutex);
298 298
299 spin_lock_bh(&conn->cmd_lock); 299 spin_lock_bh(&conn->cmd_lock);
300 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) { 300 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
301 if (!(cmd->cmd_flags & ICF_OOO_CMDSN)) 301 if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
302 continue; 302 continue;
303 303
304 list_del(&cmd->i_list); 304 list_del(&cmd->i_list);
305 305
306 spin_unlock_bh(&conn->cmd_lock); 306 spin_unlock_bh(&conn->cmd_lock);
307 iscsit_free_cmd(cmd); 307 iscsit_free_cmd(cmd);
308 spin_lock_bh(&conn->cmd_lock); 308 spin_lock_bh(&conn->cmd_lock);
309 } 309 }
310 spin_unlock_bh(&conn->cmd_lock); 310 spin_unlock_bh(&conn->cmd_lock);
311 311
312 pr_debug("Dropped %u total unacknowledged commands on CID:" 312 pr_debug("Dropped %u total unacknowledged commands on CID:"
313 " %hu for ExpCmdSN: 0x%08x.\n", dropped_count, conn->cid, 313 " %hu for ExpCmdSN: 0x%08x.\n", dropped_count, conn->cid,
314 sess->exp_cmd_sn); 314 sess->exp_cmd_sn);
315 return 0; 315 return 0;
316 } 316 }
317 317
318 int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) 318 int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
319 { 319 {
320 u32 cmd_count = 0; 320 u32 cmd_count = 0;
321 struct iscsi_cmd *cmd, *cmd_tmp; 321 struct iscsi_cmd *cmd, *cmd_tmp;
322 struct iscsi_conn_recovery *cr; 322 struct iscsi_conn_recovery *cr;
323 323
324 /* 324 /*
325 * Allocate an struct iscsi_conn_recovery for this connection. 325 * Allocate an struct iscsi_conn_recovery for this connection.
326 * Each struct iscsi_cmd contains an struct iscsi_conn_recovery pointer 326 * Each struct iscsi_cmd contains an struct iscsi_conn_recovery pointer
327 * (struct iscsi_cmd->cr) so we need to allocate this before preparing the 327 * (struct iscsi_cmd->cr) so we need to allocate this before preparing the
328 * connection's command list for connection recovery. 328 * connection's command list for connection recovery.
329 */ 329 */
330 cr = kzalloc(sizeof(struct iscsi_conn_recovery), GFP_KERNEL); 330 cr = kzalloc(sizeof(struct iscsi_conn_recovery), GFP_KERNEL);
331 if (!cr) { 331 if (!cr) {
332 pr_err("Unable to allocate memory for" 332 pr_err("Unable to allocate memory for"
333 " struct iscsi_conn_recovery.\n"); 333 " struct iscsi_conn_recovery.\n");
334 return -1; 334 return -1;
335 } 335 }
336 INIT_LIST_HEAD(&cr->cr_list); 336 INIT_LIST_HEAD(&cr->cr_list);
337 INIT_LIST_HEAD(&cr->conn_recovery_cmd_list); 337 INIT_LIST_HEAD(&cr->conn_recovery_cmd_list);
338 spin_lock_init(&cr->conn_recovery_cmd_lock); 338 spin_lock_init(&cr->conn_recovery_cmd_lock);
339 /* 339 /*
340 * Only perform connection recovery on ISCSI_OP_SCSI_CMD or 340 * Only perform connection recovery on ISCSI_OP_SCSI_CMD or
341 * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call 341 * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call
342 * list_del(&cmd->i_list); to release the command to the 342 * list_del(&cmd->i_list); to release the command to the
343 * session pool and remove it from the connection's list. 343 * session pool and remove it from the connection's list.
344 * 344 *
345 * Also stop the DataOUT timer, which will be restarted after 345 * Also stop the DataOUT timer, which will be restarted after
346 * sending the TMR response. 346 * sending the TMR response.
347 */ 347 */
348 spin_lock_bh(&conn->cmd_lock); 348 spin_lock_bh(&conn->cmd_lock);
349 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) { 349 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
350 350
351 if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) && 351 if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) &&
352 (cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) { 352 (cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) {
353 pr_debug("Not performing realligence on" 353 pr_debug("Not performing realligence on"
354 " Opcode: 0x%02x, ITT: 0x%08x, CmdSN: 0x%08x," 354 " Opcode: 0x%02x, ITT: 0x%08x, CmdSN: 0x%08x,"
355 " CID: %hu\n", cmd->iscsi_opcode, 355 " CID: %hu\n", cmd->iscsi_opcode,
356 cmd->init_task_tag, cmd->cmd_sn, conn->cid); 356 cmd->init_task_tag, cmd->cmd_sn, conn->cid);
357 357
358 list_del(&cmd->i_list); 358 list_del(&cmd->i_list);
359 spin_unlock_bh(&conn->cmd_lock); 359 spin_unlock_bh(&conn->cmd_lock);
360 iscsit_free_cmd(cmd); 360 iscsit_free_cmd(cmd);
361 spin_lock_bh(&conn->cmd_lock); 361 spin_lock_bh(&conn->cmd_lock);
362 continue; 362 continue;
363 } 363 }
364 364
365 /* 365 /*
366 * Special case where commands greater than or equal to 366 * Special case where commands greater than or equal to
367 * the session's ExpCmdSN are attached to the connection 367 * the session's ExpCmdSN are attached to the connection
368 * list but not to the out of order CmdSN list. The one 368 * list but not to the out of order CmdSN list. The one
369 * obvious case is when a command with immediate data 369 * obvious case is when a command with immediate data
370 * attached must only check the CmdSN against ExpCmdSN 370 * attached must only check the CmdSN against ExpCmdSN
371 * after the data is received. The special case below 371 * after the data is received. The special case below
372 * is when the connection fails before data is received, 372 * is when the connection fails before data is received,
373 * but also may apply to other PDUs, so it has been 373 * but also may apply to other PDUs, so it has been
374 * made generic here. 374 * made generic here.
375 */ 375 */
376 if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd && 376 if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
377 (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) { 377 (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) {
378 list_del(&cmd->i_list); 378 list_del(&cmd->i_list);
379 spin_unlock_bh(&conn->cmd_lock); 379 spin_unlock_bh(&conn->cmd_lock);
380 iscsit_free_cmd(cmd); 380 iscsit_free_cmd(cmd);
381 spin_lock_bh(&conn->cmd_lock); 381 spin_lock_bh(&conn->cmd_lock);
382 continue; 382 continue;
383 } 383 }
384 384
385 cmd_count++; 385 cmd_count++;
386 pr_debug("Preparing Opcode: 0x%02x, ITT: 0x%08x," 386 pr_debug("Preparing Opcode: 0x%02x, ITT: 0x%08x,"
387 " CmdSN: 0x%08x, StatSN: 0x%08x, CID: %hu for" 387 " CmdSN: 0x%08x, StatSN: 0x%08x, CID: %hu for"
388 " realligence.\n", cmd->iscsi_opcode, 388 " realligence.\n", cmd->iscsi_opcode,
389 cmd->init_task_tag, cmd->cmd_sn, cmd->stat_sn, 389 cmd->init_task_tag, cmd->cmd_sn, cmd->stat_sn,
390 conn->cid); 390 conn->cid);
391 391
392 cmd->deferred_i_state = cmd->i_state; 392 cmd->deferred_i_state = cmd->i_state;
393 cmd->i_state = ISTATE_IN_CONNECTION_RECOVERY; 393 cmd->i_state = ISTATE_IN_CONNECTION_RECOVERY;
394 394
395 if (cmd->data_direction == DMA_TO_DEVICE) 395 if (cmd->data_direction == DMA_TO_DEVICE)
396 iscsit_stop_dataout_timer(cmd); 396 iscsit_stop_dataout_timer(cmd);
397 397
398 cmd->sess = conn->sess; 398 cmd->sess = conn->sess;
399 399
400 list_del(&cmd->i_list); 400 list_del(&cmd->i_list);
401 spin_unlock_bh(&conn->cmd_lock); 401 spin_unlock_bh(&conn->cmd_lock);
402 402
403 iscsit_free_all_datain_reqs(cmd); 403 iscsit_free_all_datain_reqs(cmd);
404 404
405 transport_wait_for_tasks(&cmd->se_cmd); 405 transport_wait_for_tasks(&cmd->se_cmd);
406 /* 406 /*
407 * Add the struct iscsi_cmd to the connection recovery cmd list 407 * Add the struct iscsi_cmd to the connection recovery cmd list
408 */ 408 */
409 spin_lock(&cr->conn_recovery_cmd_lock); 409 spin_lock(&cr->conn_recovery_cmd_lock);
410 list_add_tail(&cmd->i_list, &cr->conn_recovery_cmd_list); 410 list_add_tail(&cmd->i_list, &cr->conn_recovery_cmd_list);
411 spin_unlock(&cr->conn_recovery_cmd_lock); 411 spin_unlock(&cr->conn_recovery_cmd_lock);
412 412
413 spin_lock_bh(&conn->cmd_lock); 413 spin_lock_bh(&conn->cmd_lock);
414 cmd->cr = cr; 414 cmd->cr = cr;
415 cmd->conn = NULL; 415 cmd->conn = NULL;
416 } 416 }
417 spin_unlock_bh(&conn->cmd_lock); 417 spin_unlock_bh(&conn->cmd_lock);
418 /* 418 /*
419 * Fill in the various values in the preallocated struct iscsi_conn_recovery. 419 * Fill in the various values in the preallocated struct iscsi_conn_recovery.
420 */ 420 */
421 cr->cid = conn->cid; 421 cr->cid = conn->cid;
422 cr->cmd_count = cmd_count; 422 cr->cmd_count = cmd_count;
423 cr->maxrecvdatasegmentlength = conn->conn_ops->MaxRecvDataSegmentLength; 423 cr->maxrecvdatasegmentlength = conn->conn_ops->MaxRecvDataSegmentLength;
424 cr->sess = conn->sess; 424 cr->sess = conn->sess;
425 425
426 iscsit_attach_inactive_connection_recovery_entry(conn->sess, cr); 426 iscsit_attach_inactive_connection_recovery_entry(conn->sess, cr);
427 427
428 return 0; 428 return 0;
429 } 429 }
430 430
431 int iscsit_connection_recovery_transport_reset(struct iscsi_conn *conn) 431 int iscsit_connection_recovery_transport_reset(struct iscsi_conn *conn)
432 { 432 {
433 atomic_set(&conn->connection_recovery, 1); 433 atomic_set(&conn->connection_recovery, 1);
434 434
435 if (iscsit_close_connection(conn) < 0) 435 if (iscsit_close_connection(conn) < 0)
436 return -1; 436 return -1;
437 437
438 return 0; 438 return 0;
439 } 439 }
440 440
drivers/target/iscsi/iscsi_target_login.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * This file contains the login functions used by the iSCSI Target driver. 2 * This file contains the login functions used by the iSCSI Target driver.
3 * 3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 * 5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 * 7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or 12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version. 13 * (at your option) any later version.
14 * 14 *
15 * This program is distributed in the hope that it will be useful, 15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 ******************************************************************************/ 19 ******************************************************************************/
20 20
21 #include <linux/string.h> 21 #include <linux/string.h>
22 #include <linux/kthread.h> 22 #include <linux/kthread.h>
23 #include <linux/crypto.h> 23 #include <linux/crypto.h>
24 #include <scsi/iscsi_proto.h> 24 #include <scsi/iscsi_proto.h>
25 #include <target/target_core_base.h> 25 #include <target/target_core_base.h>
26 #include <target/target_core_transport.h> 26 #include <target/target_core_fabric.h>
27 27
28 #include "iscsi_target_core.h" 28 #include "iscsi_target_core.h"
29 #include "iscsi_target_tq.h" 29 #include "iscsi_target_tq.h"
30 #include "iscsi_target_device.h" 30 #include "iscsi_target_device.h"
31 #include "iscsi_target_nego.h" 31 #include "iscsi_target_nego.h"
32 #include "iscsi_target_erl0.h" 32 #include "iscsi_target_erl0.h"
33 #include "iscsi_target_erl2.h" 33 #include "iscsi_target_erl2.h"
34 #include "iscsi_target_login.h" 34 #include "iscsi_target_login.h"
35 #include "iscsi_target_stat.h" 35 #include "iscsi_target_stat.h"
36 #include "iscsi_target_tpg.h" 36 #include "iscsi_target_tpg.h"
37 #include "iscsi_target_util.h" 37 #include "iscsi_target_util.h"
38 #include "iscsi_target.h" 38 #include "iscsi_target.h"
39 #include "iscsi_target_parameters.h" 39 #include "iscsi_target_parameters.h"
40 40
41 extern struct idr sess_idr; 41 extern struct idr sess_idr;
42 extern struct mutex auth_id_lock; 42 extern struct mutex auth_id_lock;
43 extern spinlock_t sess_idr_lock; 43 extern spinlock_t sess_idr_lock;
44 44
45 static int iscsi_login_init_conn(struct iscsi_conn *conn) 45 static int iscsi_login_init_conn(struct iscsi_conn *conn)
46 { 46 {
47 INIT_LIST_HEAD(&conn->conn_list); 47 INIT_LIST_HEAD(&conn->conn_list);
48 INIT_LIST_HEAD(&conn->conn_cmd_list); 48 INIT_LIST_HEAD(&conn->conn_cmd_list);
49 INIT_LIST_HEAD(&conn->immed_queue_list); 49 INIT_LIST_HEAD(&conn->immed_queue_list);
50 INIT_LIST_HEAD(&conn->response_queue_list); 50 INIT_LIST_HEAD(&conn->response_queue_list);
51 init_completion(&conn->conn_post_wait_comp); 51 init_completion(&conn->conn_post_wait_comp);
52 init_completion(&conn->conn_wait_comp); 52 init_completion(&conn->conn_wait_comp);
53 init_completion(&conn->conn_wait_rcfr_comp); 53 init_completion(&conn->conn_wait_rcfr_comp);
54 init_completion(&conn->conn_waiting_on_uc_comp); 54 init_completion(&conn->conn_waiting_on_uc_comp);
55 init_completion(&conn->conn_logout_comp); 55 init_completion(&conn->conn_logout_comp);
56 init_completion(&conn->rx_half_close_comp); 56 init_completion(&conn->rx_half_close_comp);
57 init_completion(&conn->tx_half_close_comp); 57 init_completion(&conn->tx_half_close_comp);
58 spin_lock_init(&conn->cmd_lock); 58 spin_lock_init(&conn->cmd_lock);
59 spin_lock_init(&conn->conn_usage_lock); 59 spin_lock_init(&conn->conn_usage_lock);
60 spin_lock_init(&conn->immed_queue_lock); 60 spin_lock_init(&conn->immed_queue_lock);
61 spin_lock_init(&conn->nopin_timer_lock); 61 spin_lock_init(&conn->nopin_timer_lock);
62 spin_lock_init(&conn->response_queue_lock); 62 spin_lock_init(&conn->response_queue_lock);
63 spin_lock_init(&conn->state_lock); 63 spin_lock_init(&conn->state_lock);
64 64
65 if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) { 65 if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
66 pr_err("Unable to allocate conn->conn_cpumask\n"); 66 pr_err("Unable to allocate conn->conn_cpumask\n");
67 return -ENOMEM; 67 return -ENOMEM;
68 } 68 }
69 69
70 return 0; 70 return 0;
71 } 71 }
72 72
73 /* 73 /*
74 * Used by iscsi_target_nego.c:iscsi_target_locate_portal() to setup 74 * Used by iscsi_target_nego.c:iscsi_target_locate_portal() to setup
75 * per struct iscsi_conn libcrypto contexts for crc32c and crc32-intel 75 * per struct iscsi_conn libcrypto contexts for crc32c and crc32-intel
76 */ 76 */
77 int iscsi_login_setup_crypto(struct iscsi_conn *conn) 77 int iscsi_login_setup_crypto(struct iscsi_conn *conn)
78 { 78 {
79 /* 79 /*
80 * Setup slicing by CRC32C algorithm for RX and TX libcrypto contexts 80 * Setup slicing by CRC32C algorithm for RX and TX libcrypto contexts
81 * which will default to crc32c_intel.ko for cpu_has_xmm4_2, or fallback 81 * which will default to crc32c_intel.ko for cpu_has_xmm4_2, or fallback
82 * to software 1x8 byte slicing from crc32c.ko 82 * to software 1x8 byte slicing from crc32c.ko
83 */ 83 */
84 conn->conn_rx_hash.flags = 0; 84 conn->conn_rx_hash.flags = 0;
85 conn->conn_rx_hash.tfm = crypto_alloc_hash("crc32c", 0, 85 conn->conn_rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
86 CRYPTO_ALG_ASYNC); 86 CRYPTO_ALG_ASYNC);
87 if (IS_ERR(conn->conn_rx_hash.tfm)) { 87 if (IS_ERR(conn->conn_rx_hash.tfm)) {
88 pr_err("crypto_alloc_hash() failed for conn_rx_tfm\n"); 88 pr_err("crypto_alloc_hash() failed for conn_rx_tfm\n");
89 return -ENOMEM; 89 return -ENOMEM;
90 } 90 }
91 91
92 conn->conn_tx_hash.flags = 0; 92 conn->conn_tx_hash.flags = 0;
93 conn->conn_tx_hash.tfm = crypto_alloc_hash("crc32c", 0, 93 conn->conn_tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
94 CRYPTO_ALG_ASYNC); 94 CRYPTO_ALG_ASYNC);
95 if (IS_ERR(conn->conn_tx_hash.tfm)) { 95 if (IS_ERR(conn->conn_tx_hash.tfm)) {
96 pr_err("crypto_alloc_hash() failed for conn_tx_tfm\n"); 96 pr_err("crypto_alloc_hash() failed for conn_tx_tfm\n");
97 crypto_free_hash(conn->conn_rx_hash.tfm); 97 crypto_free_hash(conn->conn_rx_hash.tfm);
98 return -ENOMEM; 98 return -ENOMEM;
99 } 99 }
100 100
101 return 0; 101 return 0;
102 } 102 }
103 103
104 static int iscsi_login_check_initiator_version( 104 static int iscsi_login_check_initiator_version(
105 struct iscsi_conn *conn, 105 struct iscsi_conn *conn,
106 u8 version_max, 106 u8 version_max,
107 u8 version_min) 107 u8 version_min)
108 { 108 {
109 if ((version_max != 0x00) || (version_min != 0x00)) { 109 if ((version_max != 0x00) || (version_min != 0x00)) {
110 pr_err("Unsupported iSCSI IETF Pre-RFC Revision," 110 pr_err("Unsupported iSCSI IETF Pre-RFC Revision,"
111 " version Min/Max 0x%02x/0x%02x, rejecting login.\n", 111 " version Min/Max 0x%02x/0x%02x, rejecting login.\n",
112 version_min, version_max); 112 version_min, version_max);
113 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 113 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
114 ISCSI_LOGIN_STATUS_NO_VERSION); 114 ISCSI_LOGIN_STATUS_NO_VERSION);
115 return -1; 115 return -1;
116 } 116 }
117 117
118 return 0; 118 return 0;
119 } 119 }
120 120
121 int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) 121 int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
122 { 122 {
123 int sessiontype; 123 int sessiontype;
124 struct iscsi_param *initiatorname_param = NULL, *sessiontype_param = NULL; 124 struct iscsi_param *initiatorname_param = NULL, *sessiontype_param = NULL;
125 struct iscsi_portal_group *tpg = conn->tpg; 125 struct iscsi_portal_group *tpg = conn->tpg;
126 struct iscsi_session *sess = NULL, *sess_p = NULL; 126 struct iscsi_session *sess = NULL, *sess_p = NULL;
127 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 127 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
128 struct se_session *se_sess, *se_sess_tmp; 128 struct se_session *se_sess, *se_sess_tmp;
129 129
130 initiatorname_param = iscsi_find_param_from_key( 130 initiatorname_param = iscsi_find_param_from_key(
131 INITIATORNAME, conn->param_list); 131 INITIATORNAME, conn->param_list);
132 if (!initiatorname_param) 132 if (!initiatorname_param)
133 return -1; 133 return -1;
134 134
135 sessiontype_param = iscsi_find_param_from_key( 135 sessiontype_param = iscsi_find_param_from_key(
136 SESSIONTYPE, conn->param_list); 136 SESSIONTYPE, conn->param_list);
137 if (!sessiontype_param) 137 if (!sessiontype_param)
138 return -1; 138 return -1;
139 139
140 sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0; 140 sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0;
141 141
142 spin_lock_bh(&se_tpg->session_lock); 142 spin_lock_bh(&se_tpg->session_lock);
143 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list, 143 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
144 sess_list) { 144 sess_list) {
145 145
146 sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr; 146 sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
147 spin_lock(&sess_p->conn_lock); 147 spin_lock(&sess_p->conn_lock);
148 if (atomic_read(&sess_p->session_fall_back_to_erl0) || 148 if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
149 atomic_read(&sess_p->session_logout) || 149 atomic_read(&sess_p->session_logout) ||
150 (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { 150 (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
151 spin_unlock(&sess_p->conn_lock); 151 spin_unlock(&sess_p->conn_lock);
152 continue; 152 continue;
153 } 153 }
154 if (!memcmp((void *)sess_p->isid, (void *)conn->sess->isid, 6) && 154 if (!memcmp((void *)sess_p->isid, (void *)conn->sess->isid, 6) &&
155 (!strcmp((void *)sess_p->sess_ops->InitiatorName, 155 (!strcmp((void *)sess_p->sess_ops->InitiatorName,
156 (void *)initiatorname_param->value) && 156 (void *)initiatorname_param->value) &&
157 (sess_p->sess_ops->SessionType == sessiontype))) { 157 (sess_p->sess_ops->SessionType == sessiontype))) {
158 atomic_set(&sess_p->session_reinstatement, 1); 158 atomic_set(&sess_p->session_reinstatement, 1);
159 spin_unlock(&sess_p->conn_lock); 159 spin_unlock(&sess_p->conn_lock);
160 iscsit_inc_session_usage_count(sess_p); 160 iscsit_inc_session_usage_count(sess_p);
161 iscsit_stop_time2retain_timer(sess_p); 161 iscsit_stop_time2retain_timer(sess_p);
162 sess = sess_p; 162 sess = sess_p;
163 break; 163 break;
164 } 164 }
165 spin_unlock(&sess_p->conn_lock); 165 spin_unlock(&sess_p->conn_lock);
166 } 166 }
167 spin_unlock_bh(&se_tpg->session_lock); 167 spin_unlock_bh(&se_tpg->session_lock);
168 /* 168 /*
169 * If the Time2Retain handler has expired, the session is already gone. 169 * If the Time2Retain handler has expired, the session is already gone.
170 */ 170 */
171 if (!sess) 171 if (!sess)
172 return 0; 172 return 0;
173 173
174 pr_debug("%s iSCSI Session SID %u is still active for %s," 174 pr_debug("%s iSCSI Session SID %u is still active for %s,"
175 " preforming session reinstatement.\n", (sessiontype) ? 175 " preforming session reinstatement.\n", (sessiontype) ?
176 "Discovery" : "Normal", sess->sid, 176 "Discovery" : "Normal", sess->sid,
177 sess->sess_ops->InitiatorName); 177 sess->sess_ops->InitiatorName);
178 178
179 spin_lock_bh(&sess->conn_lock); 179 spin_lock_bh(&sess->conn_lock);
180 if (sess->session_state == TARG_SESS_STATE_FAILED) { 180 if (sess->session_state == TARG_SESS_STATE_FAILED) {
181 spin_unlock_bh(&sess->conn_lock); 181 spin_unlock_bh(&sess->conn_lock);
182 iscsit_dec_session_usage_count(sess); 182 iscsit_dec_session_usage_count(sess);
183 return iscsit_close_session(sess); 183 return iscsit_close_session(sess);
184 } 184 }
185 spin_unlock_bh(&sess->conn_lock); 185 spin_unlock_bh(&sess->conn_lock);
186 186
187 iscsit_stop_session(sess, 1, 1); 187 iscsit_stop_session(sess, 1, 1);
188 iscsit_dec_session_usage_count(sess); 188 iscsit_dec_session_usage_count(sess);
189 189
190 return iscsit_close_session(sess); 190 return iscsit_close_session(sess);
191 } 191 }
192 192
193 static void iscsi_login_set_conn_values( 193 static void iscsi_login_set_conn_values(
194 struct iscsi_session *sess, 194 struct iscsi_session *sess,
195 struct iscsi_conn *conn, 195 struct iscsi_conn *conn,
196 u16 cid) 196 u16 cid)
197 { 197 {
198 conn->sess = sess; 198 conn->sess = sess;
199 conn->cid = cid; 199 conn->cid = cid;
200 /* 200 /*
201 * Generate a random Status sequence number (statsn) for the new 201 * Generate a random Status sequence number (statsn) for the new
202 * iSCSI connection. 202 * iSCSI connection.
203 */ 203 */
204 get_random_bytes(&conn->stat_sn, sizeof(u32)); 204 get_random_bytes(&conn->stat_sn, sizeof(u32));
205 205
206 mutex_lock(&auth_id_lock); 206 mutex_lock(&auth_id_lock);
207 conn->auth_id = iscsit_global->auth_id++; 207 conn->auth_id = iscsit_global->auth_id++;
208 mutex_unlock(&auth_id_lock); 208 mutex_unlock(&auth_id_lock);
209 } 209 }
210 210
211 /* 211 /*
212 * This is the leading connection of a new session, 212 * This is the leading connection of a new session,
213 * or session reinstatement. 213 * or session reinstatement.
214 */ 214 */
215 static int iscsi_login_zero_tsih_s1( 215 static int iscsi_login_zero_tsih_s1(
216 struct iscsi_conn *conn, 216 struct iscsi_conn *conn,
217 unsigned char *buf) 217 unsigned char *buf)
218 { 218 {
219 struct iscsi_session *sess = NULL; 219 struct iscsi_session *sess = NULL;
220 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; 220 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
221 221
222 sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL); 222 sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
223 if (!sess) { 223 if (!sess) {
224 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 224 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
225 ISCSI_LOGIN_STATUS_NO_RESOURCES); 225 ISCSI_LOGIN_STATUS_NO_RESOURCES);
226 pr_err("Could not allocate memory for session\n"); 226 pr_err("Could not allocate memory for session\n");
227 return -ENOMEM; 227 return -ENOMEM;
228 } 228 }
229 229
230 iscsi_login_set_conn_values(sess, conn, pdu->cid); 230 iscsi_login_set_conn_values(sess, conn, pdu->cid);
231 sess->init_task_tag = pdu->itt; 231 sess->init_task_tag = pdu->itt;
232 memcpy((void *)&sess->isid, (void *)pdu->isid, 6); 232 memcpy((void *)&sess->isid, (void *)pdu->isid, 6);
233 sess->exp_cmd_sn = pdu->cmdsn; 233 sess->exp_cmd_sn = pdu->cmdsn;
234 INIT_LIST_HEAD(&sess->sess_conn_list); 234 INIT_LIST_HEAD(&sess->sess_conn_list);
235 INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list); 235 INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list);
236 INIT_LIST_HEAD(&sess->cr_active_list); 236 INIT_LIST_HEAD(&sess->cr_active_list);
237 INIT_LIST_HEAD(&sess->cr_inactive_list); 237 INIT_LIST_HEAD(&sess->cr_inactive_list);
238 init_completion(&sess->async_msg_comp); 238 init_completion(&sess->async_msg_comp);
239 init_completion(&sess->reinstatement_comp); 239 init_completion(&sess->reinstatement_comp);
240 init_completion(&sess->session_wait_comp); 240 init_completion(&sess->session_wait_comp);
241 init_completion(&sess->session_waiting_on_uc_comp); 241 init_completion(&sess->session_waiting_on_uc_comp);
242 mutex_init(&sess->cmdsn_mutex); 242 mutex_init(&sess->cmdsn_mutex);
243 spin_lock_init(&sess->conn_lock); 243 spin_lock_init(&sess->conn_lock);
244 spin_lock_init(&sess->cr_a_lock); 244 spin_lock_init(&sess->cr_a_lock);
245 spin_lock_init(&sess->cr_i_lock); 245 spin_lock_init(&sess->cr_i_lock);
246 spin_lock_init(&sess->session_usage_lock); 246 spin_lock_init(&sess->session_usage_lock);
247 spin_lock_init(&sess->ttt_lock); 247 spin_lock_init(&sess->ttt_lock);
248 248
249 if (!idr_pre_get(&sess_idr, GFP_KERNEL)) { 249 if (!idr_pre_get(&sess_idr, GFP_KERNEL)) {
250 pr_err("idr_pre_get() for sess_idr failed\n"); 250 pr_err("idr_pre_get() for sess_idr failed\n");
251 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 251 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
252 ISCSI_LOGIN_STATUS_NO_RESOURCES); 252 ISCSI_LOGIN_STATUS_NO_RESOURCES);
253 kfree(sess); 253 kfree(sess);
254 return -ENOMEM; 254 return -ENOMEM;
255 } 255 }
256 spin_lock(&sess_idr_lock); 256 spin_lock(&sess_idr_lock);
257 idr_get_new(&sess_idr, NULL, &sess->session_index); 257 idr_get_new(&sess_idr, NULL, &sess->session_index);
258 spin_unlock(&sess_idr_lock); 258 spin_unlock(&sess_idr_lock);
259 259
260 sess->creation_time = get_jiffies_64(); 260 sess->creation_time = get_jiffies_64();
261 spin_lock_init(&sess->session_stats_lock); 261 spin_lock_init(&sess->session_stats_lock);
262 /* 262 /*
263 * The FFP CmdSN window values will be allocated from the TPG's 263 * The FFP CmdSN window values will be allocated from the TPG's
264 * Initiator Node's ACL once the login has been successfully completed. 264 * Initiator Node's ACL once the login has been successfully completed.
265 */ 265 */
266 sess->max_cmd_sn = pdu->cmdsn; 266 sess->max_cmd_sn = pdu->cmdsn;
267 267
268 sess->sess_ops = kzalloc(sizeof(struct iscsi_sess_ops), GFP_KERNEL); 268 sess->sess_ops = kzalloc(sizeof(struct iscsi_sess_ops), GFP_KERNEL);
269 if (!sess->sess_ops) { 269 if (!sess->sess_ops) {
270 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 270 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
271 ISCSI_LOGIN_STATUS_NO_RESOURCES); 271 ISCSI_LOGIN_STATUS_NO_RESOURCES);
272 pr_err("Unable to allocate memory for" 272 pr_err("Unable to allocate memory for"
273 " struct iscsi_sess_ops.\n"); 273 " struct iscsi_sess_ops.\n");
274 kfree(sess); 274 kfree(sess);
275 return -ENOMEM; 275 return -ENOMEM;
276 } 276 }
277 277
278 sess->se_sess = transport_init_session(); 278 sess->se_sess = transport_init_session();
279 if (IS_ERR(sess->se_sess)) { 279 if (IS_ERR(sess->se_sess)) {
280 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 280 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
281 ISCSI_LOGIN_STATUS_NO_RESOURCES); 281 ISCSI_LOGIN_STATUS_NO_RESOURCES);
282 kfree(sess); 282 kfree(sess);
283 return -ENOMEM; 283 return -ENOMEM;
284 } 284 }
285 285
286 return 0; 286 return 0;
287 } 287 }
288 288
289 static int iscsi_login_zero_tsih_s2( 289 static int iscsi_login_zero_tsih_s2(
290 struct iscsi_conn *conn) 290 struct iscsi_conn *conn)
291 { 291 {
292 struct iscsi_node_attrib *na; 292 struct iscsi_node_attrib *na;
293 struct iscsi_session *sess = conn->sess; 293 struct iscsi_session *sess = conn->sess;
294 unsigned char buf[32]; 294 unsigned char buf[32];
295 295
296 sess->tpg = conn->tpg; 296 sess->tpg = conn->tpg;
297 297
298 /* 298 /*
299 * Assign a new TPG Session Handle. Note this is protected with 299 * Assign a new TPG Session Handle. Note this is protected with
300 * struct iscsi_portal_group->np_login_sem from iscsit_access_np(). 300 * struct iscsi_portal_group->np_login_sem from iscsit_access_np().
301 */ 301 */
302 sess->tsih = ++ISCSI_TPG_S(sess)->ntsih; 302 sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
303 if (!sess->tsih) 303 if (!sess->tsih)
304 sess->tsih = ++ISCSI_TPG_S(sess)->ntsih; 304 sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
305 305
306 /* 306 /*
307 * Create the default params from user defined values.. 307 * Create the default params from user defined values..
308 */ 308 */
309 if (iscsi_copy_param_list(&conn->param_list, 309 if (iscsi_copy_param_list(&conn->param_list,
310 ISCSI_TPG_C(conn)->param_list, 1) < 0) { 310 ISCSI_TPG_C(conn)->param_list, 1) < 0) {
311 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 311 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
312 ISCSI_LOGIN_STATUS_NO_RESOURCES); 312 ISCSI_LOGIN_STATUS_NO_RESOURCES);
313 return -1; 313 return -1;
314 } 314 }
315 315
316 iscsi_set_keys_to_negotiate(0, conn->param_list); 316 iscsi_set_keys_to_negotiate(0, conn->param_list);
317 317
318 if (sess->sess_ops->SessionType) 318 if (sess->sess_ops->SessionType)
319 return iscsi_set_keys_irrelevant_for_discovery( 319 return iscsi_set_keys_irrelevant_for_discovery(
320 conn->param_list); 320 conn->param_list);
321 321
322 na = iscsit_tpg_get_node_attrib(sess); 322 na = iscsit_tpg_get_node_attrib(sess);
323 323
324 /* 324 /*
325 * Need to send TargetPortalGroupTag back in first login response 325 * Need to send TargetPortalGroupTag back in first login response
326 * on any iSCSI connection where the Initiator provides TargetName. 326 * on any iSCSI connection where the Initiator provides TargetName.
327 * See 5.3.1. Login Phase Start 327 * See 5.3.1. Login Phase Start
328 * 328 *
329 * In our case, we have already located the struct iscsi_tiqn at this point. 329 * In our case, we have already located the struct iscsi_tiqn at this point.
330 */ 330 */
331 memset(buf, 0, 32); 331 memset(buf, 0, 32);
332 sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt); 332 sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
333 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { 333 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
334 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 334 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
335 ISCSI_LOGIN_STATUS_NO_RESOURCES); 335 ISCSI_LOGIN_STATUS_NO_RESOURCES);
336 return -1; 336 return -1;
337 } 337 }
338 338
339 /* 339 /*
340 * Workaround for Initiators that have broken connection recovery logic. 340 * Workaround for Initiators that have broken connection recovery logic.
341 * 341 *
342 * "We would really like to get rid of this." Linux-iSCSI.org team 342 * "We would really like to get rid of this." Linux-iSCSI.org team
343 */ 343 */
344 memset(buf, 0, 32); 344 memset(buf, 0, 32);
345 sprintf(buf, "ErrorRecoveryLevel=%d", na->default_erl); 345 sprintf(buf, "ErrorRecoveryLevel=%d", na->default_erl);
346 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { 346 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
347 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 347 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
348 ISCSI_LOGIN_STATUS_NO_RESOURCES); 348 ISCSI_LOGIN_STATUS_NO_RESOURCES);
349 return -1; 349 return -1;
350 } 350 }
351 351
352 if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0) 352 if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0)
353 return -1; 353 return -1;
354 354
355 return 0; 355 return 0;
356 } 356 }
357 357
358 /* 358 /*
359 * Remove PSTATE_NEGOTIATE for the four FIM related keys. 359 * Remove PSTATE_NEGOTIATE for the four FIM related keys.
360 * The Initiator node will be able to enable FIM by proposing them itself. 360 * The Initiator node will be able to enable FIM by proposing them itself.
361 */ 361 */
362 int iscsi_login_disable_FIM_keys( 362 int iscsi_login_disable_FIM_keys(
363 struct iscsi_param_list *param_list, 363 struct iscsi_param_list *param_list,
364 struct iscsi_conn *conn) 364 struct iscsi_conn *conn)
365 { 365 {
366 struct iscsi_param *param; 366 struct iscsi_param *param;
367 367
368 param = iscsi_find_param_from_key("OFMarker", param_list); 368 param = iscsi_find_param_from_key("OFMarker", param_list);
369 if (!param) { 369 if (!param) {
370 pr_err("iscsi_find_param_from_key() for" 370 pr_err("iscsi_find_param_from_key() for"
371 " OFMarker failed\n"); 371 " OFMarker failed\n");
372 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 372 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
373 ISCSI_LOGIN_STATUS_NO_RESOURCES); 373 ISCSI_LOGIN_STATUS_NO_RESOURCES);
374 return -1; 374 return -1;
375 } 375 }
376 param->state &= ~PSTATE_NEGOTIATE; 376 param->state &= ~PSTATE_NEGOTIATE;
377 377
378 param = iscsi_find_param_from_key("OFMarkInt", param_list); 378 param = iscsi_find_param_from_key("OFMarkInt", param_list);
379 if (!param) { 379 if (!param) {
380 pr_err("iscsi_find_param_from_key() for" 380 pr_err("iscsi_find_param_from_key() for"
381 " IFMarker failed\n"); 381 " IFMarker failed\n");
382 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 382 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
383 ISCSI_LOGIN_STATUS_NO_RESOURCES); 383 ISCSI_LOGIN_STATUS_NO_RESOURCES);
384 return -1; 384 return -1;
385 } 385 }
386 param->state &= ~PSTATE_NEGOTIATE; 386 param->state &= ~PSTATE_NEGOTIATE;
387 387
388 param = iscsi_find_param_from_key("IFMarker", param_list); 388 param = iscsi_find_param_from_key("IFMarker", param_list);
389 if (!param) { 389 if (!param) {
390 pr_err("iscsi_find_param_from_key() for" 390 pr_err("iscsi_find_param_from_key() for"
391 " IFMarker failed\n"); 391 " IFMarker failed\n");
392 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 392 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
393 ISCSI_LOGIN_STATUS_NO_RESOURCES); 393 ISCSI_LOGIN_STATUS_NO_RESOURCES);
394 return -1; 394 return -1;
395 } 395 }
396 param->state &= ~PSTATE_NEGOTIATE; 396 param->state &= ~PSTATE_NEGOTIATE;
397 397
398 param = iscsi_find_param_from_key("IFMarkInt", param_list); 398 param = iscsi_find_param_from_key("IFMarkInt", param_list);
399 if (!param) { 399 if (!param) {
400 pr_err("iscsi_find_param_from_key() for" 400 pr_err("iscsi_find_param_from_key() for"
401 " IFMarker failed\n"); 401 " IFMarker failed\n");
402 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 402 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
403 ISCSI_LOGIN_STATUS_NO_RESOURCES); 403 ISCSI_LOGIN_STATUS_NO_RESOURCES);
404 return -1; 404 return -1;
405 } 405 }
406 param->state &= ~PSTATE_NEGOTIATE; 406 param->state &= ~PSTATE_NEGOTIATE;
407 407
408 return 0; 408 return 0;
409 } 409 }
410 410
411 static int iscsi_login_non_zero_tsih_s1( 411 static int iscsi_login_non_zero_tsih_s1(
412 struct iscsi_conn *conn, 412 struct iscsi_conn *conn,
413 unsigned char *buf) 413 unsigned char *buf)
414 { 414 {
415 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; 415 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
416 416
417 iscsi_login_set_conn_values(NULL, conn, pdu->cid); 417 iscsi_login_set_conn_values(NULL, conn, pdu->cid);
418 return 0; 418 return 0;
419 } 419 }
420 420
421 /* 421 /*
422 * Add a new connection to an existing session. 422 * Add a new connection to an existing session.
423 */ 423 */
424 static int iscsi_login_non_zero_tsih_s2( 424 static int iscsi_login_non_zero_tsih_s2(
425 struct iscsi_conn *conn, 425 struct iscsi_conn *conn,
426 unsigned char *buf) 426 unsigned char *buf)
427 { 427 {
428 struct iscsi_portal_group *tpg = conn->tpg; 428 struct iscsi_portal_group *tpg = conn->tpg;
429 struct iscsi_session *sess = NULL, *sess_p = NULL; 429 struct iscsi_session *sess = NULL, *sess_p = NULL;
430 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 430 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
431 struct se_session *se_sess, *se_sess_tmp; 431 struct se_session *se_sess, *se_sess_tmp;
432 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; 432 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
433 433
434 spin_lock_bh(&se_tpg->session_lock); 434 spin_lock_bh(&se_tpg->session_lock);
435 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list, 435 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
436 sess_list) { 436 sess_list) {
437 437
438 sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr; 438 sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
439 if (atomic_read(&sess_p->session_fall_back_to_erl0) || 439 if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
440 atomic_read(&sess_p->session_logout) || 440 atomic_read(&sess_p->session_logout) ||
441 (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) 441 (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
442 continue; 442 continue;
443 if (!memcmp((const void *)sess_p->isid, 443 if (!memcmp((const void *)sess_p->isid,
444 (const void *)pdu->isid, 6) && 444 (const void *)pdu->isid, 6) &&
445 (sess_p->tsih == pdu->tsih)) { 445 (sess_p->tsih == pdu->tsih)) {
446 iscsit_inc_session_usage_count(sess_p); 446 iscsit_inc_session_usage_count(sess_p);
447 iscsit_stop_time2retain_timer(sess_p); 447 iscsit_stop_time2retain_timer(sess_p);
448 sess = sess_p; 448 sess = sess_p;
449 break; 449 break;
450 } 450 }
451 } 451 }
452 spin_unlock_bh(&se_tpg->session_lock); 452 spin_unlock_bh(&se_tpg->session_lock);
453 453
454 /* 454 /*
455 * If the Time2Retain handler has expired, the session is already gone. 455 * If the Time2Retain handler has expired, the session is already gone.
456 */ 456 */
457 if (!sess) { 457 if (!sess) {
458 pr_err("Initiator attempting to add a connection to" 458 pr_err("Initiator attempting to add a connection to"
459 " a non-existent session, rejecting iSCSI Login.\n"); 459 " a non-existent session, rejecting iSCSI Login.\n");
460 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 460 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
461 ISCSI_LOGIN_STATUS_NO_SESSION); 461 ISCSI_LOGIN_STATUS_NO_SESSION);
462 return -1; 462 return -1;
463 } 463 }
464 464
465 /* 465 /*
466 * Stop the Time2Retain timer if this is a failed session, we restart 466 * Stop the Time2Retain timer if this is a failed session, we restart
467 * the timer if the login is not successful. 467 * the timer if the login is not successful.
468 */ 468 */
469 spin_lock_bh(&sess->conn_lock); 469 spin_lock_bh(&sess->conn_lock);
470 if (sess->session_state == TARG_SESS_STATE_FAILED) 470 if (sess->session_state == TARG_SESS_STATE_FAILED)
471 atomic_set(&sess->session_continuation, 1); 471 atomic_set(&sess->session_continuation, 1);
472 spin_unlock_bh(&sess->conn_lock); 472 spin_unlock_bh(&sess->conn_lock);
473 473
474 iscsi_login_set_conn_values(sess, conn, pdu->cid); 474 iscsi_login_set_conn_values(sess, conn, pdu->cid);
475 475
476 if (iscsi_copy_param_list(&conn->param_list, 476 if (iscsi_copy_param_list(&conn->param_list,
477 ISCSI_TPG_C(conn)->param_list, 0) < 0) { 477 ISCSI_TPG_C(conn)->param_list, 0) < 0) {
478 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 478 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
479 ISCSI_LOGIN_STATUS_NO_RESOURCES); 479 ISCSI_LOGIN_STATUS_NO_RESOURCES);
480 return -1; 480 return -1;
481 } 481 }
482 482
483 iscsi_set_keys_to_negotiate(0, conn->param_list); 483 iscsi_set_keys_to_negotiate(0, conn->param_list);
484 /* 484 /*
485 * Need to send TargetPortalGroupTag back in first login response 485 * Need to send TargetPortalGroupTag back in first login response
486 * on any iSCSI connection where the Initiator provides TargetName. 486 * on any iSCSI connection where the Initiator provides TargetName.
487 * See 5.3.1. Login Phase Start 487 * See 5.3.1. Login Phase Start
488 * 488 *
489 * In our case, we have already located the struct iscsi_tiqn at this point. 489 * In our case, we have already located the struct iscsi_tiqn at this point.
490 */ 490 */
491 memset(buf, 0, 32); 491 memset(buf, 0, 32);
492 sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt); 492 sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
493 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { 493 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
494 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 494 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
495 ISCSI_LOGIN_STATUS_NO_RESOURCES); 495 ISCSI_LOGIN_STATUS_NO_RESOURCES);
496 return -1; 496 return -1;
497 } 497 }
498 498
499 return iscsi_login_disable_FIM_keys(conn->param_list, conn); 499 return iscsi_login_disable_FIM_keys(conn->param_list, conn);
500 } 500 }
501 501
502 int iscsi_login_post_auth_non_zero_tsih( 502 int iscsi_login_post_auth_non_zero_tsih(
503 struct iscsi_conn *conn, 503 struct iscsi_conn *conn,
504 u16 cid, 504 u16 cid,
505 u32 exp_statsn) 505 u32 exp_statsn)
506 { 506 {
507 struct iscsi_conn *conn_ptr = NULL; 507 struct iscsi_conn *conn_ptr = NULL;
508 struct iscsi_conn_recovery *cr = NULL; 508 struct iscsi_conn_recovery *cr = NULL;
509 struct iscsi_session *sess = conn->sess; 509 struct iscsi_session *sess = conn->sess;
510 510
511 /* 511 /*
512 * By following item 5 in the login table, if we have found 512 * By following item 5 in the login table, if we have found
513 * an existing ISID and a valid/existing TSIH and an existing 513 * an existing ISID and a valid/existing TSIH and an existing
514 * CID we do connection reinstatement. Currently we dont not 514 * CID we do connection reinstatement. Currently we dont not
515 * support it so we send back an non-zero status class to the 515 * support it so we send back an non-zero status class to the
516 * initiator and release the new connection. 516 * initiator and release the new connection.
517 */ 517 */
518 conn_ptr = iscsit_get_conn_from_cid_rcfr(sess, cid); 518 conn_ptr = iscsit_get_conn_from_cid_rcfr(sess, cid);
519 if ((conn_ptr)) { 519 if ((conn_ptr)) {
520 pr_err("Connection exists with CID %hu for %s," 520 pr_err("Connection exists with CID %hu for %s,"
521 " performing connection reinstatement.\n", 521 " performing connection reinstatement.\n",
522 conn_ptr->cid, sess->sess_ops->InitiatorName); 522 conn_ptr->cid, sess->sess_ops->InitiatorName);
523 523
524 iscsit_connection_reinstatement_rcfr(conn_ptr); 524 iscsit_connection_reinstatement_rcfr(conn_ptr);
525 iscsit_dec_conn_usage_count(conn_ptr); 525 iscsit_dec_conn_usage_count(conn_ptr);
526 } 526 }
527 527
528 /* 528 /*
529 * Check for any connection recovery entires containing CID. 529 * Check for any connection recovery entires containing CID.
530 * We use the original ExpStatSN sent in the first login request 530 * We use the original ExpStatSN sent in the first login request
531 * to acknowledge commands for the failed connection. 531 * to acknowledge commands for the failed connection.
532 * 532 *
533 * Also note that an explict logout may have already been sent, 533 * Also note that an explict logout may have already been sent,
534 * but the response may not be sent due to additional connection 534 * but the response may not be sent due to additional connection
535 * loss. 535 * loss.
536 */ 536 */
537 if (sess->sess_ops->ErrorRecoveryLevel == 2) { 537 if (sess->sess_ops->ErrorRecoveryLevel == 2) {
538 cr = iscsit_get_inactive_connection_recovery_entry( 538 cr = iscsit_get_inactive_connection_recovery_entry(
539 sess, cid); 539 sess, cid);
540 if ((cr)) { 540 if ((cr)) {
541 pr_debug("Performing implicit logout" 541 pr_debug("Performing implicit logout"
542 " for connection recovery on CID: %hu\n", 542 " for connection recovery on CID: %hu\n",
543 conn->cid); 543 conn->cid);
544 iscsit_discard_cr_cmds_by_expstatsn(cr, exp_statsn); 544 iscsit_discard_cr_cmds_by_expstatsn(cr, exp_statsn);
545 } 545 }
546 } 546 }
547 547
548 /* 548 /*
549 * Else we follow item 4 from the login table in that we have 549 * Else we follow item 4 from the login table in that we have
550 * found an existing ISID and a valid/existing TSIH and a new 550 * found an existing ISID and a valid/existing TSIH and a new
551 * CID we go ahead and continue to add a new connection to the 551 * CID we go ahead and continue to add a new connection to the
552 * session. 552 * session.
553 */ 553 */
554 pr_debug("Adding CID %hu to existing session for %s.\n", 554 pr_debug("Adding CID %hu to existing session for %s.\n",
555 cid, sess->sess_ops->InitiatorName); 555 cid, sess->sess_ops->InitiatorName);
556 556
557 if ((atomic_read(&sess->nconn) + 1) > sess->sess_ops->MaxConnections) { 557 if ((atomic_read(&sess->nconn) + 1) > sess->sess_ops->MaxConnections) {
558 pr_err("Adding additional connection to this session" 558 pr_err("Adding additional connection to this session"
559 " would exceed MaxConnections %d, login failed.\n", 559 " would exceed MaxConnections %d, login failed.\n",
560 sess->sess_ops->MaxConnections); 560 sess->sess_ops->MaxConnections);
561 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 561 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
562 ISCSI_LOGIN_STATUS_ISID_ERROR); 562 ISCSI_LOGIN_STATUS_ISID_ERROR);
563 return -1; 563 return -1;
564 } 564 }
565 565
566 return 0; 566 return 0;
567 } 567 }
568 568
569 static void iscsi_post_login_start_timers(struct iscsi_conn *conn) 569 static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
570 { 570 {
571 struct iscsi_session *sess = conn->sess; 571 struct iscsi_session *sess = conn->sess;
572 572
573 if (!sess->sess_ops->SessionType) 573 if (!sess->sess_ops->SessionType)
574 iscsit_start_nopin_timer(conn); 574 iscsit_start_nopin_timer(conn);
575 } 575 }
576 576
577 static int iscsi_post_login_handler( 577 static int iscsi_post_login_handler(
578 struct iscsi_np *np, 578 struct iscsi_np *np,
579 struct iscsi_conn *conn, 579 struct iscsi_conn *conn,
580 u8 zero_tsih) 580 u8 zero_tsih)
581 { 581 {
582 int stop_timer = 0; 582 int stop_timer = 0;
583 struct iscsi_session *sess = conn->sess; 583 struct iscsi_session *sess = conn->sess;
584 struct se_session *se_sess = sess->se_sess; 584 struct se_session *se_sess = sess->se_sess;
585 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); 585 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
586 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 586 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
587 struct iscsi_thread_set *ts; 587 struct iscsi_thread_set *ts;
588 588
589 iscsit_inc_conn_usage_count(conn); 589 iscsit_inc_conn_usage_count(conn);
590 590
591 iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_SUCCESS, 591 iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_SUCCESS,
592 ISCSI_LOGIN_STATUS_ACCEPT); 592 ISCSI_LOGIN_STATUS_ACCEPT);
593 593
594 pr_debug("Moving to TARG_CONN_STATE_LOGGED_IN.\n"); 594 pr_debug("Moving to TARG_CONN_STATE_LOGGED_IN.\n");
595 conn->conn_state = TARG_CONN_STATE_LOGGED_IN; 595 conn->conn_state = TARG_CONN_STATE_LOGGED_IN;
596 596
597 iscsi_set_connection_parameters(conn->conn_ops, conn->param_list); 597 iscsi_set_connection_parameters(conn->conn_ops, conn->param_list);
598 iscsit_set_sync_and_steering_values(conn); 598 iscsit_set_sync_and_steering_values(conn);
599 /* 599 /*
600 * SCSI Initiator -> SCSI Target Port Mapping 600 * SCSI Initiator -> SCSI Target Port Mapping
601 */ 601 */
602 ts = iscsi_get_thread_set(); 602 ts = iscsi_get_thread_set();
603 if (!zero_tsih) { 603 if (!zero_tsih) {
604 iscsi_set_session_parameters(sess->sess_ops, 604 iscsi_set_session_parameters(sess->sess_ops,
605 conn->param_list, 0); 605 conn->param_list, 0);
606 iscsi_release_param_list(conn->param_list); 606 iscsi_release_param_list(conn->param_list);
607 conn->param_list = NULL; 607 conn->param_list = NULL;
608 608
609 spin_lock_bh(&sess->conn_lock); 609 spin_lock_bh(&sess->conn_lock);
610 atomic_set(&sess->session_continuation, 0); 610 atomic_set(&sess->session_continuation, 0);
611 if (sess->session_state == TARG_SESS_STATE_FAILED) { 611 if (sess->session_state == TARG_SESS_STATE_FAILED) {
612 pr_debug("Moving to" 612 pr_debug("Moving to"
613 " TARG_SESS_STATE_LOGGED_IN.\n"); 613 " TARG_SESS_STATE_LOGGED_IN.\n");
614 sess->session_state = TARG_SESS_STATE_LOGGED_IN; 614 sess->session_state = TARG_SESS_STATE_LOGGED_IN;
615 stop_timer = 1; 615 stop_timer = 1;
616 } 616 }
617 617
618 pr_debug("iSCSI Login successful on CID: %hu from %s to" 618 pr_debug("iSCSI Login successful on CID: %hu from %s to"
619 " %s:%hu,%hu\n", conn->cid, conn->login_ip, np->np_ip, 619 " %s:%hu,%hu\n", conn->cid, conn->login_ip, np->np_ip,
620 np->np_port, tpg->tpgt); 620 np->np_port, tpg->tpgt);
621 621
622 list_add_tail(&conn->conn_list, &sess->sess_conn_list); 622 list_add_tail(&conn->conn_list, &sess->sess_conn_list);
623 atomic_inc(&sess->nconn); 623 atomic_inc(&sess->nconn);
624 pr_debug("Incremented iSCSI Connection count to %hu" 624 pr_debug("Incremented iSCSI Connection count to %hu"
625 " from node: %s\n", atomic_read(&sess->nconn), 625 " from node: %s\n", atomic_read(&sess->nconn),
626 sess->sess_ops->InitiatorName); 626 sess->sess_ops->InitiatorName);
627 spin_unlock_bh(&sess->conn_lock); 627 spin_unlock_bh(&sess->conn_lock);
628 628
629 iscsi_post_login_start_timers(conn); 629 iscsi_post_login_start_timers(conn);
630 iscsi_activate_thread_set(conn, ts); 630 iscsi_activate_thread_set(conn, ts);
631 /* 631 /*
632 * Determine CPU mask to ensure connection's RX and TX kthreads 632 * Determine CPU mask to ensure connection's RX and TX kthreads
633 * are scheduled on the same CPU. 633 * are scheduled on the same CPU.
634 */ 634 */
635 iscsit_thread_get_cpumask(conn); 635 iscsit_thread_get_cpumask(conn);
636 conn->conn_rx_reset_cpumask = 1; 636 conn->conn_rx_reset_cpumask = 1;
637 conn->conn_tx_reset_cpumask = 1; 637 conn->conn_tx_reset_cpumask = 1;
638 638
639 iscsit_dec_conn_usage_count(conn); 639 iscsit_dec_conn_usage_count(conn);
640 if (stop_timer) { 640 if (stop_timer) {
641 spin_lock_bh(&se_tpg->session_lock); 641 spin_lock_bh(&se_tpg->session_lock);
642 iscsit_stop_time2retain_timer(sess); 642 iscsit_stop_time2retain_timer(sess);
643 spin_unlock_bh(&se_tpg->session_lock); 643 spin_unlock_bh(&se_tpg->session_lock);
644 } 644 }
645 iscsit_dec_session_usage_count(sess); 645 iscsit_dec_session_usage_count(sess);
646 return 0; 646 return 0;
647 } 647 }
648 648
649 iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1); 649 iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
650 iscsi_release_param_list(conn->param_list); 650 iscsi_release_param_list(conn->param_list);
651 conn->param_list = NULL; 651 conn->param_list = NULL;
652 652
653 iscsit_determine_maxcmdsn(sess); 653 iscsit_determine_maxcmdsn(sess);
654 654
655 spin_lock_bh(&se_tpg->session_lock); 655 spin_lock_bh(&se_tpg->session_lock);
656 __transport_register_session(&sess->tpg->tpg_se_tpg, 656 __transport_register_session(&sess->tpg->tpg_se_tpg,
657 se_sess->se_node_acl, se_sess, (void *)sess); 657 se_sess->se_node_acl, se_sess, (void *)sess);
658 pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n"); 658 pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n");
659 sess->session_state = TARG_SESS_STATE_LOGGED_IN; 659 sess->session_state = TARG_SESS_STATE_LOGGED_IN;
660 660
661 pr_debug("iSCSI Login successful on CID: %hu from %s to %s:%hu,%hu\n", 661 pr_debug("iSCSI Login successful on CID: %hu from %s to %s:%hu,%hu\n",
662 conn->cid, conn->login_ip, np->np_ip, np->np_port, tpg->tpgt); 662 conn->cid, conn->login_ip, np->np_ip, np->np_port, tpg->tpgt);
663 663
664 spin_lock_bh(&sess->conn_lock); 664 spin_lock_bh(&sess->conn_lock);
665 list_add_tail(&conn->conn_list, &sess->sess_conn_list); 665 list_add_tail(&conn->conn_list, &sess->sess_conn_list);
666 atomic_inc(&sess->nconn); 666 atomic_inc(&sess->nconn);
667 pr_debug("Incremented iSCSI Connection count to %hu from node:" 667 pr_debug("Incremented iSCSI Connection count to %hu from node:"
668 " %s\n", atomic_read(&sess->nconn), 668 " %s\n", atomic_read(&sess->nconn),
669 sess->sess_ops->InitiatorName); 669 sess->sess_ops->InitiatorName);
670 spin_unlock_bh(&sess->conn_lock); 670 spin_unlock_bh(&sess->conn_lock);
671 671
672 sess->sid = tpg->sid++; 672 sess->sid = tpg->sid++;
673 if (!sess->sid) 673 if (!sess->sid)
674 sess->sid = tpg->sid++; 674 sess->sid = tpg->sid++;
675 pr_debug("Established iSCSI session from node: %s\n", 675 pr_debug("Established iSCSI session from node: %s\n",
676 sess->sess_ops->InitiatorName); 676 sess->sess_ops->InitiatorName);
677 677
678 tpg->nsessions++; 678 tpg->nsessions++;
679 if (tpg->tpg_tiqn) 679 if (tpg->tpg_tiqn)
680 tpg->tpg_tiqn->tiqn_nsessions++; 680 tpg->tpg_tiqn->tiqn_nsessions++;
681 681
682 pr_debug("Incremented number of active iSCSI sessions to %u on" 682 pr_debug("Incremented number of active iSCSI sessions to %u on"
683 " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt); 683 " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
684 spin_unlock_bh(&se_tpg->session_lock); 684 spin_unlock_bh(&se_tpg->session_lock);
685 685
686 iscsi_post_login_start_timers(conn); 686 iscsi_post_login_start_timers(conn);
687 iscsi_activate_thread_set(conn, ts); 687 iscsi_activate_thread_set(conn, ts);
688 /* 688 /*
689 * Determine CPU mask to ensure connection's RX and TX kthreads 689 * Determine CPU mask to ensure connection's RX and TX kthreads
690 * are scheduled on the same CPU. 690 * are scheduled on the same CPU.
691 */ 691 */
692 iscsit_thread_get_cpumask(conn); 692 iscsit_thread_get_cpumask(conn);
693 conn->conn_rx_reset_cpumask = 1; 693 conn->conn_rx_reset_cpumask = 1;
694 conn->conn_tx_reset_cpumask = 1; 694 conn->conn_tx_reset_cpumask = 1;
695 695
696 iscsit_dec_conn_usage_count(conn); 696 iscsit_dec_conn_usage_count(conn);
697 697
698 return 0; 698 return 0;
699 } 699 }
700 700
701 static void iscsi_handle_login_thread_timeout(unsigned long data) 701 static void iscsi_handle_login_thread_timeout(unsigned long data)
702 { 702 {
703 struct iscsi_np *np = (struct iscsi_np *) data; 703 struct iscsi_np *np = (struct iscsi_np *) data;
704 704
705 spin_lock_bh(&np->np_thread_lock); 705 spin_lock_bh(&np->np_thread_lock);
706 pr_err("iSCSI Login timeout on Network Portal %s:%hu\n", 706 pr_err("iSCSI Login timeout on Network Portal %s:%hu\n",
707 np->np_ip, np->np_port); 707 np->np_ip, np->np_port);
708 708
709 if (np->np_login_timer_flags & ISCSI_TF_STOP) { 709 if (np->np_login_timer_flags & ISCSI_TF_STOP) {
710 spin_unlock_bh(&np->np_thread_lock); 710 spin_unlock_bh(&np->np_thread_lock);
711 return; 711 return;
712 } 712 }
713 713
714 if (np->np_thread) 714 if (np->np_thread)
715 send_sig(SIGINT, np->np_thread, 1); 715 send_sig(SIGINT, np->np_thread, 1);
716 716
717 np->np_login_timer_flags &= ~ISCSI_TF_RUNNING; 717 np->np_login_timer_flags &= ~ISCSI_TF_RUNNING;
718 spin_unlock_bh(&np->np_thread_lock); 718 spin_unlock_bh(&np->np_thread_lock);
719 } 719 }
720 720
721 static void iscsi_start_login_thread_timer(struct iscsi_np *np) 721 static void iscsi_start_login_thread_timer(struct iscsi_np *np)
722 { 722 {
723 /* 723 /*
724 * This used the TA_LOGIN_TIMEOUT constant because at this 724 * This used the TA_LOGIN_TIMEOUT constant because at this
725 * point we do not have access to ISCSI_TPG_ATTRIB(tpg)->login_timeout 725 * point we do not have access to ISCSI_TPG_ATTRIB(tpg)->login_timeout
726 */ 726 */
727 spin_lock_bh(&np->np_thread_lock); 727 spin_lock_bh(&np->np_thread_lock);
728 init_timer(&np->np_login_timer); 728 init_timer(&np->np_login_timer);
729 np->np_login_timer.expires = (get_jiffies_64() + TA_LOGIN_TIMEOUT * HZ); 729 np->np_login_timer.expires = (get_jiffies_64() + TA_LOGIN_TIMEOUT * HZ);
730 np->np_login_timer.data = (unsigned long)np; 730 np->np_login_timer.data = (unsigned long)np;
731 np->np_login_timer.function = iscsi_handle_login_thread_timeout; 731 np->np_login_timer.function = iscsi_handle_login_thread_timeout;
732 np->np_login_timer_flags &= ~ISCSI_TF_STOP; 732 np->np_login_timer_flags &= ~ISCSI_TF_STOP;
733 np->np_login_timer_flags |= ISCSI_TF_RUNNING; 733 np->np_login_timer_flags |= ISCSI_TF_RUNNING;
734 add_timer(&np->np_login_timer); 734 add_timer(&np->np_login_timer);
735 735
736 pr_debug("Added timeout timer to iSCSI login request for" 736 pr_debug("Added timeout timer to iSCSI login request for"
737 " %u seconds.\n", TA_LOGIN_TIMEOUT); 737 " %u seconds.\n", TA_LOGIN_TIMEOUT);
738 spin_unlock_bh(&np->np_thread_lock); 738 spin_unlock_bh(&np->np_thread_lock);
739 } 739 }
740 740
741 static void iscsi_stop_login_thread_timer(struct iscsi_np *np) 741 static void iscsi_stop_login_thread_timer(struct iscsi_np *np)
742 { 742 {
743 spin_lock_bh(&np->np_thread_lock); 743 spin_lock_bh(&np->np_thread_lock);
744 if (!(np->np_login_timer_flags & ISCSI_TF_RUNNING)) { 744 if (!(np->np_login_timer_flags & ISCSI_TF_RUNNING)) {
745 spin_unlock_bh(&np->np_thread_lock); 745 spin_unlock_bh(&np->np_thread_lock);
746 return; 746 return;
747 } 747 }
748 np->np_login_timer_flags |= ISCSI_TF_STOP; 748 np->np_login_timer_flags |= ISCSI_TF_STOP;
749 spin_unlock_bh(&np->np_thread_lock); 749 spin_unlock_bh(&np->np_thread_lock);
750 750
751 del_timer_sync(&np->np_login_timer); 751 del_timer_sync(&np->np_login_timer);
752 752
753 spin_lock_bh(&np->np_thread_lock); 753 spin_lock_bh(&np->np_thread_lock);
754 np->np_login_timer_flags &= ~ISCSI_TF_RUNNING; 754 np->np_login_timer_flags &= ~ISCSI_TF_RUNNING;
755 spin_unlock_bh(&np->np_thread_lock); 755 spin_unlock_bh(&np->np_thread_lock);
756 } 756 }
757 757
758 int iscsi_target_setup_login_socket( 758 int iscsi_target_setup_login_socket(
759 struct iscsi_np *np, 759 struct iscsi_np *np,
760 struct __kernel_sockaddr_storage *sockaddr) 760 struct __kernel_sockaddr_storage *sockaddr)
761 { 761 {
762 struct socket *sock; 762 struct socket *sock;
763 int backlog = 5, ret, opt = 0, len; 763 int backlog = 5, ret, opt = 0, len;
764 764
765 switch (np->np_network_transport) { 765 switch (np->np_network_transport) {
766 case ISCSI_TCP: 766 case ISCSI_TCP:
767 np->np_ip_proto = IPPROTO_TCP; 767 np->np_ip_proto = IPPROTO_TCP;
768 np->np_sock_type = SOCK_STREAM; 768 np->np_sock_type = SOCK_STREAM;
769 break; 769 break;
770 case ISCSI_SCTP_TCP: 770 case ISCSI_SCTP_TCP:
771 np->np_ip_proto = IPPROTO_SCTP; 771 np->np_ip_proto = IPPROTO_SCTP;
772 np->np_sock_type = SOCK_STREAM; 772 np->np_sock_type = SOCK_STREAM;
773 break; 773 break;
774 case ISCSI_SCTP_UDP: 774 case ISCSI_SCTP_UDP:
775 np->np_ip_proto = IPPROTO_SCTP; 775 np->np_ip_proto = IPPROTO_SCTP;
776 np->np_sock_type = SOCK_SEQPACKET; 776 np->np_sock_type = SOCK_SEQPACKET;
777 break; 777 break;
778 case ISCSI_IWARP_TCP: 778 case ISCSI_IWARP_TCP:
779 case ISCSI_IWARP_SCTP: 779 case ISCSI_IWARP_SCTP:
780 case ISCSI_INFINIBAND: 780 case ISCSI_INFINIBAND:
781 default: 781 default:
782 pr_err("Unsupported network_transport: %d\n", 782 pr_err("Unsupported network_transport: %d\n",
783 np->np_network_transport); 783 np->np_network_transport);
784 return -EINVAL; 784 return -EINVAL;
785 } 785 }
786 786
787 ret = sock_create(sockaddr->ss_family, np->np_sock_type, 787 ret = sock_create(sockaddr->ss_family, np->np_sock_type,
788 np->np_ip_proto, &sock); 788 np->np_ip_proto, &sock);
789 if (ret < 0) { 789 if (ret < 0) {
790 pr_err("sock_create() failed.\n"); 790 pr_err("sock_create() failed.\n");
791 return ret; 791 return ret;
792 } 792 }
793 np->np_socket = sock; 793 np->np_socket = sock;
794 /* 794 /*
795 * The SCTP stack needs struct socket->file. 795 * The SCTP stack needs struct socket->file.
796 */ 796 */
797 if ((np->np_network_transport == ISCSI_SCTP_TCP) || 797 if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
798 (np->np_network_transport == ISCSI_SCTP_UDP)) { 798 (np->np_network_transport == ISCSI_SCTP_UDP)) {
799 if (!sock->file) { 799 if (!sock->file) {
800 sock->file = kzalloc(sizeof(struct file), GFP_KERNEL); 800 sock->file = kzalloc(sizeof(struct file), GFP_KERNEL);
801 if (!sock->file) { 801 if (!sock->file) {
802 pr_err("Unable to allocate struct" 802 pr_err("Unable to allocate struct"
803 " file for SCTP\n"); 803 " file for SCTP\n");
804 ret = -ENOMEM; 804 ret = -ENOMEM;
805 goto fail; 805 goto fail;
806 } 806 }
807 np->np_flags |= NPF_SCTP_STRUCT_FILE; 807 np->np_flags |= NPF_SCTP_STRUCT_FILE;
808 } 808 }
809 } 809 }
810 /* 810 /*
811 * Setup the np->np_sockaddr from the passed sockaddr setup 811 * Setup the np->np_sockaddr from the passed sockaddr setup
812 * in iscsi_target_configfs.c code.. 812 * in iscsi_target_configfs.c code..
813 */ 813 */
814 memcpy((void *)&np->np_sockaddr, (void *)sockaddr, 814 memcpy((void *)&np->np_sockaddr, (void *)sockaddr,
815 sizeof(struct __kernel_sockaddr_storage)); 815 sizeof(struct __kernel_sockaddr_storage));
816 816
817 if (sockaddr->ss_family == AF_INET6) 817 if (sockaddr->ss_family == AF_INET6)
818 len = sizeof(struct sockaddr_in6); 818 len = sizeof(struct sockaddr_in6);
819 else 819 else
820 len = sizeof(struct sockaddr_in); 820 len = sizeof(struct sockaddr_in);
821 /* 821 /*
822 * Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY. 822 * Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY.
823 */ 823 */
824 opt = 1; 824 opt = 1;
825 if (np->np_network_transport == ISCSI_TCP) { 825 if (np->np_network_transport == ISCSI_TCP) {
826 ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, 826 ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
827 (char *)&opt, sizeof(opt)); 827 (char *)&opt, sizeof(opt));
828 if (ret < 0) { 828 if (ret < 0) {
829 pr_err("kernel_setsockopt() for TCP_NODELAY" 829 pr_err("kernel_setsockopt() for TCP_NODELAY"
830 " failed: %d\n", ret); 830 " failed: %d\n", ret);
831 goto fail; 831 goto fail;
832 } 832 }
833 } 833 }
834 834
835 ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, 835 ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
836 (char *)&opt, sizeof(opt)); 836 (char *)&opt, sizeof(opt));
837 if (ret < 0) { 837 if (ret < 0) {
838 pr_err("kernel_setsockopt() for SO_REUSEADDR" 838 pr_err("kernel_setsockopt() for SO_REUSEADDR"
839 " failed\n"); 839 " failed\n");
840 goto fail; 840 goto fail;
841 } 841 }
842 842
843 ret = kernel_bind(sock, (struct sockaddr *)&np->np_sockaddr, len); 843 ret = kernel_bind(sock, (struct sockaddr *)&np->np_sockaddr, len);
844 if (ret < 0) { 844 if (ret < 0) {
845 pr_err("kernel_bind() failed: %d\n", ret); 845 pr_err("kernel_bind() failed: %d\n", ret);
846 goto fail; 846 goto fail;
847 } 847 }
848 848
849 ret = kernel_listen(sock, backlog); 849 ret = kernel_listen(sock, backlog);
850 if (ret != 0) { 850 if (ret != 0) {
851 pr_err("kernel_listen() failed: %d\n", ret); 851 pr_err("kernel_listen() failed: %d\n", ret);
852 goto fail; 852 goto fail;
853 } 853 }
854 854
855 return 0; 855 return 0;
856 856
857 fail: 857 fail:
858 np->np_socket = NULL; 858 np->np_socket = NULL;
859 if (sock) { 859 if (sock) {
860 if (np->np_flags & NPF_SCTP_STRUCT_FILE) { 860 if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
861 kfree(sock->file); 861 kfree(sock->file);
862 sock->file = NULL; 862 sock->file = NULL;
863 } 863 }
864 864
865 sock_release(sock); 865 sock_release(sock);
866 } 866 }
867 return ret; 867 return ret;
868 } 868 }
869 869
870 static int __iscsi_target_login_thread(struct iscsi_np *np) 870 static int __iscsi_target_login_thread(struct iscsi_np *np)
871 { 871 {
872 u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0; 872 u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0;
873 int err, ret = 0, ip_proto, sock_type, set_sctp_conn_flag, stop; 873 int err, ret = 0, ip_proto, sock_type, set_sctp_conn_flag, stop;
874 struct iscsi_conn *conn = NULL; 874 struct iscsi_conn *conn = NULL;
875 struct iscsi_login *login; 875 struct iscsi_login *login;
876 struct iscsi_portal_group *tpg = NULL; 876 struct iscsi_portal_group *tpg = NULL;
877 struct socket *new_sock, *sock; 877 struct socket *new_sock, *sock;
878 struct kvec iov; 878 struct kvec iov;
879 struct iscsi_login_req *pdu; 879 struct iscsi_login_req *pdu;
880 struct sockaddr_in sock_in; 880 struct sockaddr_in sock_in;
881 struct sockaddr_in6 sock_in6; 881 struct sockaddr_in6 sock_in6;
882 882
883 flush_signals(current); 883 flush_signals(current);
884 set_sctp_conn_flag = 0; 884 set_sctp_conn_flag = 0;
885 sock = np->np_socket; 885 sock = np->np_socket;
886 ip_proto = np->np_ip_proto; 886 ip_proto = np->np_ip_proto;
887 sock_type = np->np_sock_type; 887 sock_type = np->np_sock_type;
888 888
889 spin_lock_bh(&np->np_thread_lock); 889 spin_lock_bh(&np->np_thread_lock);
890 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { 890 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
891 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; 891 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
892 complete(&np->np_restart_comp); 892 complete(&np->np_restart_comp);
893 } else { 893 } else {
894 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; 894 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
895 } 895 }
896 spin_unlock_bh(&np->np_thread_lock); 896 spin_unlock_bh(&np->np_thread_lock);
897 897
898 if (kernel_accept(sock, &new_sock, 0) < 0) { 898 if (kernel_accept(sock, &new_sock, 0) < 0) {
899 spin_lock_bh(&np->np_thread_lock); 899 spin_lock_bh(&np->np_thread_lock);
900 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { 900 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
901 spin_unlock_bh(&np->np_thread_lock); 901 spin_unlock_bh(&np->np_thread_lock);
902 complete(&np->np_restart_comp); 902 complete(&np->np_restart_comp);
903 /* Get another socket */ 903 /* Get another socket */
904 return 1; 904 return 1;
905 } 905 }
906 spin_unlock_bh(&np->np_thread_lock); 906 spin_unlock_bh(&np->np_thread_lock);
907 goto out; 907 goto out;
908 } 908 }
909 /* 909 /*
910 * The SCTP stack needs struct socket->file. 910 * The SCTP stack needs struct socket->file.
911 */ 911 */
912 if ((np->np_network_transport == ISCSI_SCTP_TCP) || 912 if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
913 (np->np_network_transport == ISCSI_SCTP_UDP)) { 913 (np->np_network_transport == ISCSI_SCTP_UDP)) {
914 if (!new_sock->file) { 914 if (!new_sock->file) {
915 new_sock->file = kzalloc( 915 new_sock->file = kzalloc(
916 sizeof(struct file), GFP_KERNEL); 916 sizeof(struct file), GFP_KERNEL);
917 if (!new_sock->file) { 917 if (!new_sock->file) {
918 pr_err("Unable to allocate struct" 918 pr_err("Unable to allocate struct"
919 " file for SCTP\n"); 919 " file for SCTP\n");
920 sock_release(new_sock); 920 sock_release(new_sock);
921 /* Get another socket */ 921 /* Get another socket */
922 return 1; 922 return 1;
923 } 923 }
924 set_sctp_conn_flag = 1; 924 set_sctp_conn_flag = 1;
925 } 925 }
926 } 926 }
927 927
928 iscsi_start_login_thread_timer(np); 928 iscsi_start_login_thread_timer(np);
929 929
930 conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL); 930 conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
931 if (!conn) { 931 if (!conn) {
932 pr_err("Could not allocate memory for" 932 pr_err("Could not allocate memory for"
933 " new connection\n"); 933 " new connection\n");
934 if (set_sctp_conn_flag) { 934 if (set_sctp_conn_flag) {
935 kfree(new_sock->file); 935 kfree(new_sock->file);
936 new_sock->file = NULL; 936 new_sock->file = NULL;
937 } 937 }
938 sock_release(new_sock); 938 sock_release(new_sock);
939 /* Get another socket */ 939 /* Get another socket */
940 return 1; 940 return 1;
941 } 941 }
942 942
943 pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); 943 pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
944 conn->conn_state = TARG_CONN_STATE_FREE; 944 conn->conn_state = TARG_CONN_STATE_FREE;
945 conn->sock = new_sock; 945 conn->sock = new_sock;
946 946
947 if (set_sctp_conn_flag) 947 if (set_sctp_conn_flag)
948 conn->conn_flags |= CONNFLAG_SCTP_STRUCT_FILE; 948 conn->conn_flags |= CONNFLAG_SCTP_STRUCT_FILE;
949 949
950 pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n"); 950 pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
951 conn->conn_state = TARG_CONN_STATE_XPT_UP; 951 conn->conn_state = TARG_CONN_STATE_XPT_UP;
952 952
953 /* 953 /*
954 * Allocate conn->conn_ops early as a failure calling 954 * Allocate conn->conn_ops early as a failure calling
955 * iscsit_tx_login_rsp() below will call tx_data(). 955 * iscsit_tx_login_rsp() below will call tx_data().
956 */ 956 */
957 conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL); 957 conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
958 if (!conn->conn_ops) { 958 if (!conn->conn_ops) {
959 pr_err("Unable to allocate memory for" 959 pr_err("Unable to allocate memory for"
960 " struct iscsi_conn_ops.\n"); 960 " struct iscsi_conn_ops.\n");
961 goto new_sess_out; 961 goto new_sess_out;
962 } 962 }
963 /* 963 /*
964 * Perform the remaining iSCSI connection initialization items.. 964 * Perform the remaining iSCSI connection initialization items..
965 */ 965 */
966 if (iscsi_login_init_conn(conn) < 0) 966 if (iscsi_login_init_conn(conn) < 0)
967 goto new_sess_out; 967 goto new_sess_out;
968 968
969 memset(buffer, 0, ISCSI_HDR_LEN); 969 memset(buffer, 0, ISCSI_HDR_LEN);
970 memset(&iov, 0, sizeof(struct kvec)); 970 memset(&iov, 0, sizeof(struct kvec));
971 iov.iov_base = buffer; 971 iov.iov_base = buffer;
972 iov.iov_len = ISCSI_HDR_LEN; 972 iov.iov_len = ISCSI_HDR_LEN;
973 973
974 if (rx_data(conn, &iov, 1, ISCSI_HDR_LEN) <= 0) { 974 if (rx_data(conn, &iov, 1, ISCSI_HDR_LEN) <= 0) {
975 pr_err("rx_data() returned an error.\n"); 975 pr_err("rx_data() returned an error.\n");
976 goto new_sess_out; 976 goto new_sess_out;
977 } 977 }
978 978
979 iscsi_opcode = (buffer[0] & ISCSI_OPCODE_MASK); 979 iscsi_opcode = (buffer[0] & ISCSI_OPCODE_MASK);
980 if (!(iscsi_opcode & ISCSI_OP_LOGIN)) { 980 if (!(iscsi_opcode & ISCSI_OP_LOGIN)) {
981 pr_err("First opcode is not login request," 981 pr_err("First opcode is not login request,"
982 " failing login request.\n"); 982 " failing login request.\n");
983 goto new_sess_out; 983 goto new_sess_out;
984 } 984 }
985 985
986 pdu = (struct iscsi_login_req *) buffer; 986 pdu = (struct iscsi_login_req *) buffer;
987 pdu->cid = be16_to_cpu(pdu->cid); 987 pdu->cid = be16_to_cpu(pdu->cid);
988 pdu->tsih = be16_to_cpu(pdu->tsih); 988 pdu->tsih = be16_to_cpu(pdu->tsih);
989 pdu->itt = be32_to_cpu(pdu->itt); 989 pdu->itt = be32_to_cpu(pdu->itt);
990 pdu->cmdsn = be32_to_cpu(pdu->cmdsn); 990 pdu->cmdsn = be32_to_cpu(pdu->cmdsn);
991 pdu->exp_statsn = be32_to_cpu(pdu->exp_statsn); 991 pdu->exp_statsn = be32_to_cpu(pdu->exp_statsn);
992 /* 992 /*
993 * Used by iscsit_tx_login_rsp() for Login Resonses PDUs 993 * Used by iscsit_tx_login_rsp() for Login Resonses PDUs
994 * when Status-Class != 0. 994 * when Status-Class != 0.
995 */ 995 */
996 conn->login_itt = pdu->itt; 996 conn->login_itt = pdu->itt;
997 997
998 spin_lock_bh(&np->np_thread_lock); 998 spin_lock_bh(&np->np_thread_lock);
999 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { 999 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
1000 spin_unlock_bh(&np->np_thread_lock); 1000 spin_unlock_bh(&np->np_thread_lock);
1001 pr_err("iSCSI Network Portal on %s:%hu currently not" 1001 pr_err("iSCSI Network Portal on %s:%hu currently not"
1002 " active.\n", np->np_ip, np->np_port); 1002 " active.\n", np->np_ip, np->np_port);
1003 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 1003 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1004 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE); 1004 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
1005 goto new_sess_out; 1005 goto new_sess_out;
1006 } 1006 }
1007 spin_unlock_bh(&np->np_thread_lock); 1007 spin_unlock_bh(&np->np_thread_lock);
1008 1008
1009 if (np->np_sockaddr.ss_family == AF_INET6) { 1009 if (np->np_sockaddr.ss_family == AF_INET6) {
1010 memset(&sock_in6, 0, sizeof(struct sockaddr_in6)); 1010 memset(&sock_in6, 0, sizeof(struct sockaddr_in6));
1011 1011
1012 if (conn->sock->ops->getname(conn->sock, 1012 if (conn->sock->ops->getname(conn->sock,
1013 (struct sockaddr *)&sock_in6, &err, 1) < 0) { 1013 (struct sockaddr *)&sock_in6, &err, 1) < 0) {
1014 pr_err("sock_ops->getname() failed.\n"); 1014 pr_err("sock_ops->getname() failed.\n");
1015 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 1015 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1016 ISCSI_LOGIN_STATUS_TARGET_ERROR); 1016 ISCSI_LOGIN_STATUS_TARGET_ERROR);
1017 goto new_sess_out; 1017 goto new_sess_out;
1018 } 1018 }
1019 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c", 1019 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
1020 &sock_in6.sin6_addr.in6_u); 1020 &sock_in6.sin6_addr.in6_u);
1021 conn->login_port = ntohs(sock_in6.sin6_port); 1021 conn->login_port = ntohs(sock_in6.sin6_port);
1022 } else { 1022 } else {
1023 memset(&sock_in, 0, sizeof(struct sockaddr_in)); 1023 memset(&sock_in, 0, sizeof(struct sockaddr_in));
1024 1024
1025 if (conn->sock->ops->getname(conn->sock, 1025 if (conn->sock->ops->getname(conn->sock,
1026 (struct sockaddr *)&sock_in, &err, 1) < 0) { 1026 (struct sockaddr *)&sock_in, &err, 1) < 0) {
1027 pr_err("sock_ops->getname() failed.\n"); 1027 pr_err("sock_ops->getname() failed.\n");
1028 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 1028 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1029 ISCSI_LOGIN_STATUS_TARGET_ERROR); 1029 ISCSI_LOGIN_STATUS_TARGET_ERROR);
1030 goto new_sess_out; 1030 goto new_sess_out;
1031 } 1031 }
1032 sprintf(conn->login_ip, "%pI4", &sock_in.sin_addr.s_addr); 1032 sprintf(conn->login_ip, "%pI4", &sock_in.sin_addr.s_addr);
1033 conn->login_port = ntohs(sock_in.sin_port); 1033 conn->login_port = ntohs(sock_in.sin_port);
1034 } 1034 }
1035 1035
1036 conn->network_transport = np->np_network_transport; 1036 conn->network_transport = np->np_network_transport;
1037 1037
1038 pr_debug("Received iSCSI login request from %s on %s Network" 1038 pr_debug("Received iSCSI login request from %s on %s Network"
1039 " Portal %s:%hu\n", conn->login_ip, 1039 " Portal %s:%hu\n", conn->login_ip,
1040 (conn->network_transport == ISCSI_TCP) ? "TCP" : "SCTP", 1040 (conn->network_transport == ISCSI_TCP) ? "TCP" : "SCTP",
1041 np->np_ip, np->np_port); 1041 np->np_ip, np->np_port);
1042 1042
1043 pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n"); 1043 pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n");
1044 conn->conn_state = TARG_CONN_STATE_IN_LOGIN; 1044 conn->conn_state = TARG_CONN_STATE_IN_LOGIN;
1045 1045
1046 if (iscsi_login_check_initiator_version(conn, pdu->max_version, 1046 if (iscsi_login_check_initiator_version(conn, pdu->max_version,
1047 pdu->min_version) < 0) 1047 pdu->min_version) < 0)
1048 goto new_sess_out; 1048 goto new_sess_out;
1049 1049
1050 zero_tsih = (pdu->tsih == 0x0000); 1050 zero_tsih = (pdu->tsih == 0x0000);
1051 if ((zero_tsih)) { 1051 if ((zero_tsih)) {
1052 /* 1052 /*
1053 * This is the leading connection of a new session. 1053 * This is the leading connection of a new session.
1054 * We wait until after authentication to check for 1054 * We wait until after authentication to check for
1055 * session reinstatement. 1055 * session reinstatement.
1056 */ 1056 */
1057 if (iscsi_login_zero_tsih_s1(conn, buffer) < 0) 1057 if (iscsi_login_zero_tsih_s1(conn, buffer) < 0)
1058 goto new_sess_out; 1058 goto new_sess_out;
1059 } else { 1059 } else {
1060 /* 1060 /*
1061 * Add a new connection to an existing session. 1061 * Add a new connection to an existing session.
1062 * We check for a non-existant session in 1062 * We check for a non-existant session in
1063 * iscsi_login_non_zero_tsih_s2() below based 1063 * iscsi_login_non_zero_tsih_s2() below based
1064 * on ISID/TSIH, but wait until after authentication 1064 * on ISID/TSIH, but wait until after authentication
1065 * to check for connection reinstatement, etc. 1065 * to check for connection reinstatement, etc.
1066 */ 1066 */
1067 if (iscsi_login_non_zero_tsih_s1(conn, buffer) < 0) 1067 if (iscsi_login_non_zero_tsih_s1(conn, buffer) < 0)
1068 goto new_sess_out; 1068 goto new_sess_out;
1069 } 1069 }
1070 1070
1071 /* 1071 /*
1072 * This will process the first login request, and call 1072 * This will process the first login request, and call
1073 * iscsi_target_locate_portal(), and return a valid struct iscsi_login. 1073 * iscsi_target_locate_portal(), and return a valid struct iscsi_login.
1074 */ 1074 */
1075 login = iscsi_target_init_negotiation(np, conn, buffer); 1075 login = iscsi_target_init_negotiation(np, conn, buffer);
1076 if (!login) { 1076 if (!login) {
1077 tpg = conn->tpg; 1077 tpg = conn->tpg;
1078 goto new_sess_out; 1078 goto new_sess_out;
1079 } 1079 }
1080 1080
1081 tpg = conn->tpg; 1081 tpg = conn->tpg;
1082 if (!tpg) { 1082 if (!tpg) {
1083 pr_err("Unable to locate struct iscsi_conn->tpg\n"); 1083 pr_err("Unable to locate struct iscsi_conn->tpg\n");
1084 goto new_sess_out; 1084 goto new_sess_out;
1085 } 1085 }
1086 1086
1087 if (zero_tsih) { 1087 if (zero_tsih) {
1088 if (iscsi_login_zero_tsih_s2(conn) < 0) { 1088 if (iscsi_login_zero_tsih_s2(conn) < 0) {
1089 iscsi_target_nego_release(login, conn); 1089 iscsi_target_nego_release(login, conn);
1090 goto new_sess_out; 1090 goto new_sess_out;
1091 } 1091 }
1092 } else { 1092 } else {
1093 if (iscsi_login_non_zero_tsih_s2(conn, buffer) < 0) { 1093 if (iscsi_login_non_zero_tsih_s2(conn, buffer) < 0) {
1094 iscsi_target_nego_release(login, conn); 1094 iscsi_target_nego_release(login, conn);
1095 goto old_sess_out; 1095 goto old_sess_out;
1096 } 1096 }
1097 } 1097 }
1098 1098
1099 if (iscsi_target_start_negotiation(login, conn) < 0) 1099 if (iscsi_target_start_negotiation(login, conn) < 0)
1100 goto new_sess_out; 1100 goto new_sess_out;
1101 1101
1102 if (!conn->sess) { 1102 if (!conn->sess) {
1103 pr_err("struct iscsi_conn session pointer is NULL!\n"); 1103 pr_err("struct iscsi_conn session pointer is NULL!\n");
1104 goto new_sess_out; 1104 goto new_sess_out;
1105 } 1105 }
1106 1106
1107 iscsi_stop_login_thread_timer(np); 1107 iscsi_stop_login_thread_timer(np);
1108 1108
1109 if (signal_pending(current)) 1109 if (signal_pending(current))
1110 goto new_sess_out; 1110 goto new_sess_out;
1111 1111
1112 ret = iscsi_post_login_handler(np, conn, zero_tsih); 1112 ret = iscsi_post_login_handler(np, conn, zero_tsih);
1113 1113
1114 if (ret < 0) 1114 if (ret < 0)
1115 goto new_sess_out; 1115 goto new_sess_out;
1116 1116
1117 iscsit_deaccess_np(np, tpg); 1117 iscsit_deaccess_np(np, tpg);
1118 tpg = NULL; 1118 tpg = NULL;
1119 /* Get another socket */ 1119 /* Get another socket */
1120 return 1; 1120 return 1;
1121 1121
1122 new_sess_out: 1122 new_sess_out:
1123 pr_err("iSCSI Login negotiation failed.\n"); 1123 pr_err("iSCSI Login negotiation failed.\n");
1124 iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 1124 iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
1125 ISCSI_LOGIN_STATUS_INIT_ERR); 1125 ISCSI_LOGIN_STATUS_INIT_ERR);
1126 if (!zero_tsih || !conn->sess) 1126 if (!zero_tsih || !conn->sess)
1127 goto old_sess_out; 1127 goto old_sess_out;
1128 if (conn->sess->se_sess) 1128 if (conn->sess->se_sess)
1129 transport_free_session(conn->sess->se_sess); 1129 transport_free_session(conn->sess->se_sess);
1130 if (conn->sess->session_index != 0) { 1130 if (conn->sess->session_index != 0) {
1131 spin_lock_bh(&sess_idr_lock); 1131 spin_lock_bh(&sess_idr_lock);
1132 idr_remove(&sess_idr, conn->sess->session_index); 1132 idr_remove(&sess_idr, conn->sess->session_index);
1133 spin_unlock_bh(&sess_idr_lock); 1133 spin_unlock_bh(&sess_idr_lock);
1134 } 1134 }
1135 if (conn->sess->sess_ops) 1135 if (conn->sess->sess_ops)
1136 kfree(conn->sess->sess_ops); 1136 kfree(conn->sess->sess_ops);
1137 if (conn->sess) 1137 if (conn->sess)
1138 kfree(conn->sess); 1138 kfree(conn->sess);
1139 old_sess_out: 1139 old_sess_out:
1140 iscsi_stop_login_thread_timer(np); 1140 iscsi_stop_login_thread_timer(np);
1141 /* 1141 /*
1142 * If login negotiation fails check if the Time2Retain timer 1142 * If login negotiation fails check if the Time2Retain timer
1143 * needs to be restarted. 1143 * needs to be restarted.
1144 */ 1144 */
1145 if (!zero_tsih && conn->sess) { 1145 if (!zero_tsih && conn->sess) {
1146 spin_lock_bh(&conn->sess->conn_lock); 1146 spin_lock_bh(&conn->sess->conn_lock);
1147 if (conn->sess->session_state == TARG_SESS_STATE_FAILED) { 1147 if (conn->sess->session_state == TARG_SESS_STATE_FAILED) {
1148 struct se_portal_group *se_tpg = 1148 struct se_portal_group *se_tpg =
1149 &ISCSI_TPG_C(conn)->tpg_se_tpg; 1149 &ISCSI_TPG_C(conn)->tpg_se_tpg;
1150 1150
1151 atomic_set(&conn->sess->session_continuation, 0); 1151 atomic_set(&conn->sess->session_continuation, 0);
1152 spin_unlock_bh(&conn->sess->conn_lock); 1152 spin_unlock_bh(&conn->sess->conn_lock);
1153 spin_lock_bh(&se_tpg->session_lock); 1153 spin_lock_bh(&se_tpg->session_lock);
1154 iscsit_start_time2retain_handler(conn->sess); 1154 iscsit_start_time2retain_handler(conn->sess);
1155 spin_unlock_bh(&se_tpg->session_lock); 1155 spin_unlock_bh(&se_tpg->session_lock);
1156 } else 1156 } else
1157 spin_unlock_bh(&conn->sess->conn_lock); 1157 spin_unlock_bh(&conn->sess->conn_lock);
1158 iscsit_dec_session_usage_count(conn->sess); 1158 iscsit_dec_session_usage_count(conn->sess);
1159 } 1159 }
1160 1160
1161 if (!IS_ERR(conn->conn_rx_hash.tfm)) 1161 if (!IS_ERR(conn->conn_rx_hash.tfm))
1162 crypto_free_hash(conn->conn_rx_hash.tfm); 1162 crypto_free_hash(conn->conn_rx_hash.tfm);
1163 if (!IS_ERR(conn->conn_tx_hash.tfm)) 1163 if (!IS_ERR(conn->conn_tx_hash.tfm))
1164 crypto_free_hash(conn->conn_tx_hash.tfm); 1164 crypto_free_hash(conn->conn_tx_hash.tfm);
1165 1165
1166 if (conn->conn_cpumask) 1166 if (conn->conn_cpumask)
1167 free_cpumask_var(conn->conn_cpumask); 1167 free_cpumask_var(conn->conn_cpumask);
1168 1168
1169 kfree(conn->conn_ops); 1169 kfree(conn->conn_ops);
1170 1170
1171 if (conn->param_list) { 1171 if (conn->param_list) {
1172 iscsi_release_param_list(conn->param_list); 1172 iscsi_release_param_list(conn->param_list);
1173 conn->param_list = NULL; 1173 conn->param_list = NULL;
1174 } 1174 }
1175 if (conn->sock) { 1175 if (conn->sock) {
1176 if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) { 1176 if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
1177 kfree(conn->sock->file); 1177 kfree(conn->sock->file);
1178 conn->sock->file = NULL; 1178 conn->sock->file = NULL;
1179 } 1179 }
1180 sock_release(conn->sock); 1180 sock_release(conn->sock);
1181 } 1181 }
1182 kfree(conn); 1182 kfree(conn);
1183 1183
1184 if (tpg) { 1184 if (tpg) {
1185 iscsit_deaccess_np(np, tpg); 1185 iscsit_deaccess_np(np, tpg);
1186 tpg = NULL; 1186 tpg = NULL;
1187 } 1187 }
1188 1188
1189 out: 1189 out:
1190 stop = kthread_should_stop(); 1190 stop = kthread_should_stop();
1191 if (!stop && signal_pending(current)) { 1191 if (!stop && signal_pending(current)) {
1192 spin_lock_bh(&np->np_thread_lock); 1192 spin_lock_bh(&np->np_thread_lock);
1193 stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN); 1193 stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN);
1194 spin_unlock_bh(&np->np_thread_lock); 1194 spin_unlock_bh(&np->np_thread_lock);
1195 } 1195 }
1196 /* Wait for another socket.. */ 1196 /* Wait for another socket.. */
1197 if (!stop) 1197 if (!stop)
1198 return 1; 1198 return 1;
1199 1199
1200 iscsi_stop_login_thread_timer(np); 1200 iscsi_stop_login_thread_timer(np);
1201 spin_lock_bh(&np->np_thread_lock); 1201 spin_lock_bh(&np->np_thread_lock);
1202 np->np_thread_state = ISCSI_NP_THREAD_EXIT; 1202 np->np_thread_state = ISCSI_NP_THREAD_EXIT;
1203 spin_unlock_bh(&np->np_thread_lock); 1203 spin_unlock_bh(&np->np_thread_lock);
1204 return 0; 1204 return 0;
1205 } 1205 }
1206 1206
1207 int iscsi_target_login_thread(void *arg) 1207 int iscsi_target_login_thread(void *arg)
1208 { 1208 {
1209 struct iscsi_np *np = (struct iscsi_np *)arg; 1209 struct iscsi_np *np = (struct iscsi_np *)arg;
1210 int ret; 1210 int ret;
1211 1211
1212 allow_signal(SIGINT); 1212 allow_signal(SIGINT);
1213 1213
1214 while (!kthread_should_stop()) { 1214 while (!kthread_should_stop()) {
1215 ret = __iscsi_target_login_thread(np); 1215 ret = __iscsi_target_login_thread(np);
1216 /* 1216 /*
1217 * We break and exit here unless another sock_accept() call 1217 * We break and exit here unless another sock_accept() call
1218 * is expected. 1218 * is expected.
1219 */ 1219 */
1220 if (ret != 1) 1220 if (ret != 1)
1221 break; 1221 break;
1222 } 1222 }
1223 1223
1224 return 0; 1224 return 0;
1225 } 1225 }
1226 1226
drivers/target/iscsi/iscsi_target_nego.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * This file contains main functions related to iSCSI Parameter negotiation. 2 * This file contains main functions related to iSCSI Parameter negotiation.
3 * 3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 * 5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 * 7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or 12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version. 13 * (at your option) any later version.
14 * 14 *
15 * This program is distributed in the hope that it will be useful, 15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 ******************************************************************************/ 19 ******************************************************************************/
20 20
21 #include <linux/ctype.h> 21 #include <linux/ctype.h>
22 #include <scsi/iscsi_proto.h> 22 #include <scsi/iscsi_proto.h>
23 #include <target/target_core_base.h> 23 #include <target/target_core_base.h>
24 #include <target/target_core_tpg.h> 24 #include <target/target_core_fabric.h>
25 25
26 #include "iscsi_target_core.h" 26 #include "iscsi_target_core.h"
27 #include "iscsi_target_parameters.h" 27 #include "iscsi_target_parameters.h"
28 #include "iscsi_target_login.h" 28 #include "iscsi_target_login.h"
29 #include "iscsi_target_nego.h" 29 #include "iscsi_target_nego.h"
30 #include "iscsi_target_tpg.h" 30 #include "iscsi_target_tpg.h"
31 #include "iscsi_target_util.h" 31 #include "iscsi_target_util.h"
32 #include "iscsi_target.h" 32 #include "iscsi_target.h"
33 #include "iscsi_target_auth.h" 33 #include "iscsi_target_auth.h"
34 34
35 #define MAX_LOGIN_PDUS 7 35 #define MAX_LOGIN_PDUS 7
36 #define TEXT_LEN 4096 36 #define TEXT_LEN 4096
37 37
38 void convert_null_to_semi(char *buf, int len) 38 void convert_null_to_semi(char *buf, int len)
39 { 39 {
40 int i; 40 int i;
41 41
42 for (i = 0; i < len; i++) 42 for (i = 0; i < len; i++)
43 if (buf[i] == '\0') 43 if (buf[i] == '\0')
44 buf[i] = ';'; 44 buf[i] = ';';
45 } 45 }
46 46
47 int strlen_semi(char *buf) 47 int strlen_semi(char *buf)
48 { 48 {
49 int i = 0; 49 int i = 0;
50 50
51 while (buf[i] != '\0') { 51 while (buf[i] != '\0') {
52 if (buf[i] == ';') 52 if (buf[i] == ';')
53 return i; 53 return i;
54 i++; 54 i++;
55 } 55 }
56 56
57 return -1; 57 return -1;
58 } 58 }
59 59
60 int extract_param( 60 int extract_param(
61 const char *in_buf, 61 const char *in_buf,
62 const char *pattern, 62 const char *pattern,
63 unsigned int max_length, 63 unsigned int max_length,
64 char *out_buf, 64 char *out_buf,
65 unsigned char *type) 65 unsigned char *type)
66 { 66 {
67 char *ptr; 67 char *ptr;
68 int len; 68 int len;
69 69
70 if (!in_buf || !pattern || !out_buf || !type) 70 if (!in_buf || !pattern || !out_buf || !type)
71 return -1; 71 return -1;
72 72
73 ptr = strstr(in_buf, pattern); 73 ptr = strstr(in_buf, pattern);
74 if (!ptr) 74 if (!ptr)
75 return -1; 75 return -1;
76 76
77 ptr = strstr(ptr, "="); 77 ptr = strstr(ptr, "=");
78 if (!ptr) 78 if (!ptr)
79 return -1; 79 return -1;
80 80
81 ptr += 1; 81 ptr += 1;
82 if (*ptr == '0' && (*(ptr+1) == 'x' || *(ptr+1) == 'X')) { 82 if (*ptr == '0' && (*(ptr+1) == 'x' || *(ptr+1) == 'X')) {
83 ptr += 2; /* skip 0x */ 83 ptr += 2; /* skip 0x */
84 *type = HEX; 84 *type = HEX;
85 } else 85 } else
86 *type = DECIMAL; 86 *type = DECIMAL;
87 87
88 len = strlen_semi(ptr); 88 len = strlen_semi(ptr);
89 if (len < 0) 89 if (len < 0)
90 return -1; 90 return -1;
91 91
92 if (len > max_length) { 92 if (len > max_length) {
93 pr_err("Length of input: %d exeeds max_length:" 93 pr_err("Length of input: %d exeeds max_length:"
94 " %d\n", len, max_length); 94 " %d\n", len, max_length);
95 return -1; 95 return -1;
96 } 96 }
97 memcpy(out_buf, ptr, len); 97 memcpy(out_buf, ptr, len);
98 out_buf[len] = '\0'; 98 out_buf[len] = '\0';
99 99
100 return 0; 100 return 0;
101 } 101 }
102 102
103 static u32 iscsi_handle_authentication( 103 static u32 iscsi_handle_authentication(
104 struct iscsi_conn *conn, 104 struct iscsi_conn *conn,
105 char *in_buf, 105 char *in_buf,
106 char *out_buf, 106 char *out_buf,
107 int in_length, 107 int in_length,
108 int *out_length, 108 int *out_length,
109 unsigned char *authtype) 109 unsigned char *authtype)
110 { 110 {
111 struct iscsi_session *sess = conn->sess; 111 struct iscsi_session *sess = conn->sess;
112 struct iscsi_node_auth *auth; 112 struct iscsi_node_auth *auth;
113 struct iscsi_node_acl *iscsi_nacl; 113 struct iscsi_node_acl *iscsi_nacl;
114 struct se_node_acl *se_nacl; 114 struct se_node_acl *se_nacl;
115 115
116 if (!sess->sess_ops->SessionType) { 116 if (!sess->sess_ops->SessionType) {
117 /* 117 /*
118 * For SessionType=Normal 118 * For SessionType=Normal
119 */ 119 */
120 se_nacl = conn->sess->se_sess->se_node_acl; 120 se_nacl = conn->sess->se_sess->se_node_acl;
121 if (!se_nacl) { 121 if (!se_nacl) {
122 pr_err("Unable to locate struct se_node_acl for" 122 pr_err("Unable to locate struct se_node_acl for"
123 " CHAP auth\n"); 123 " CHAP auth\n");
124 return -1; 124 return -1;
125 } 125 }
126 iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl, 126 iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
127 se_node_acl); 127 se_node_acl);
128 if (!iscsi_nacl) { 128 if (!iscsi_nacl) {
129 pr_err("Unable to locate struct iscsi_node_acl for" 129 pr_err("Unable to locate struct iscsi_node_acl for"
130 " CHAP auth\n"); 130 " CHAP auth\n");
131 return -1; 131 return -1;
132 } 132 }
133 133
134 auth = ISCSI_NODE_AUTH(iscsi_nacl); 134 auth = ISCSI_NODE_AUTH(iscsi_nacl);
135 } else { 135 } else {
136 /* 136 /*
137 * For SessionType=Discovery 137 * For SessionType=Discovery
138 */ 138 */
139 auth = &iscsit_global->discovery_acl.node_auth; 139 auth = &iscsit_global->discovery_acl.node_auth;
140 } 140 }
141 141
142 if (strstr("CHAP", authtype)) 142 if (strstr("CHAP", authtype))
143 strcpy(conn->sess->auth_type, "CHAP"); 143 strcpy(conn->sess->auth_type, "CHAP");
144 else 144 else
145 strcpy(conn->sess->auth_type, NONE); 145 strcpy(conn->sess->auth_type, NONE);
146 146
147 if (strstr("None", authtype)) 147 if (strstr("None", authtype))
148 return 1; 148 return 1;
149 #ifdef CANSRP 149 #ifdef CANSRP
150 else if (strstr("SRP", authtype)) 150 else if (strstr("SRP", authtype))
151 return srp_main_loop(conn, auth, in_buf, out_buf, 151 return srp_main_loop(conn, auth, in_buf, out_buf,
152 &in_length, out_length); 152 &in_length, out_length);
153 #endif 153 #endif
154 else if (strstr("CHAP", authtype)) 154 else if (strstr("CHAP", authtype))
155 return chap_main_loop(conn, auth, in_buf, out_buf, 155 return chap_main_loop(conn, auth, in_buf, out_buf,
156 &in_length, out_length); 156 &in_length, out_length);
157 else if (strstr("SPKM1", authtype)) 157 else if (strstr("SPKM1", authtype))
158 return 2; 158 return 2;
159 else if (strstr("SPKM2", authtype)) 159 else if (strstr("SPKM2", authtype))
160 return 2; 160 return 2;
161 else if (strstr("KRB5", authtype)) 161 else if (strstr("KRB5", authtype))
162 return 2; 162 return 2;
163 else 163 else
164 return 2; 164 return 2;
165 } 165 }
166 166
167 static void iscsi_remove_failed_auth_entry(struct iscsi_conn *conn) 167 static void iscsi_remove_failed_auth_entry(struct iscsi_conn *conn)
168 { 168 {
169 kfree(conn->auth_protocol); 169 kfree(conn->auth_protocol);
170 } 170 }
171 171
172 static int iscsi_target_check_login_request( 172 static int iscsi_target_check_login_request(
173 struct iscsi_conn *conn, 173 struct iscsi_conn *conn,
174 struct iscsi_login *login) 174 struct iscsi_login *login)
175 { 175 {
176 int req_csg, req_nsg, rsp_csg, rsp_nsg; 176 int req_csg, req_nsg, rsp_csg, rsp_nsg;
177 u32 payload_length; 177 u32 payload_length;
178 struct iscsi_login_req *login_req; 178 struct iscsi_login_req *login_req;
179 struct iscsi_login_rsp *login_rsp; 179 struct iscsi_login_rsp *login_rsp;
180 180
181 login_req = (struct iscsi_login_req *) login->req; 181 login_req = (struct iscsi_login_req *) login->req;
182 login_rsp = (struct iscsi_login_rsp *) login->rsp; 182 login_rsp = (struct iscsi_login_rsp *) login->rsp;
183 payload_length = ntoh24(login_req->dlength); 183 payload_length = ntoh24(login_req->dlength);
184 184
185 switch (login_req->opcode & ISCSI_OPCODE_MASK) { 185 switch (login_req->opcode & ISCSI_OPCODE_MASK) {
186 case ISCSI_OP_LOGIN: 186 case ISCSI_OP_LOGIN:
187 break; 187 break;
188 default: 188 default:
189 pr_err("Received unknown opcode 0x%02x.\n", 189 pr_err("Received unknown opcode 0x%02x.\n",
190 login_req->opcode & ISCSI_OPCODE_MASK); 190 login_req->opcode & ISCSI_OPCODE_MASK);
191 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 191 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
192 ISCSI_LOGIN_STATUS_INIT_ERR); 192 ISCSI_LOGIN_STATUS_INIT_ERR);
193 return -1; 193 return -1;
194 } 194 }
195 195
196 if ((login_req->flags & ISCSI_FLAG_LOGIN_CONTINUE) && 196 if ((login_req->flags & ISCSI_FLAG_LOGIN_CONTINUE) &&
197 (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) { 197 (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
198 pr_err("Login request has both ISCSI_FLAG_LOGIN_CONTINUE" 198 pr_err("Login request has both ISCSI_FLAG_LOGIN_CONTINUE"
199 " and ISCSI_FLAG_LOGIN_TRANSIT set, protocol error.\n"); 199 " and ISCSI_FLAG_LOGIN_TRANSIT set, protocol error.\n");
200 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 200 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
201 ISCSI_LOGIN_STATUS_INIT_ERR); 201 ISCSI_LOGIN_STATUS_INIT_ERR);
202 return -1; 202 return -1;
203 } 203 }
204 204
205 req_csg = (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2; 205 req_csg = (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
206 rsp_csg = (login_rsp->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2; 206 rsp_csg = (login_rsp->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
207 req_nsg = (login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK); 207 req_nsg = (login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK);
208 rsp_nsg = (login_rsp->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK); 208 rsp_nsg = (login_rsp->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK);
209 209
210 if (req_csg != login->current_stage) { 210 if (req_csg != login->current_stage) {
211 pr_err("Initiator unexpectedly changed login stage" 211 pr_err("Initiator unexpectedly changed login stage"
212 " from %d to %d, login failed.\n", login->current_stage, 212 " from %d to %d, login failed.\n", login->current_stage,
213 req_csg); 213 req_csg);
214 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 214 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
215 ISCSI_LOGIN_STATUS_INIT_ERR); 215 ISCSI_LOGIN_STATUS_INIT_ERR);
216 return -1; 216 return -1;
217 } 217 }
218 218
219 if ((req_nsg == 2) || (req_csg >= 2) || 219 if ((req_nsg == 2) || (req_csg >= 2) ||
220 ((login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT) && 220 ((login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT) &&
221 (req_nsg <= req_csg))) { 221 (req_nsg <= req_csg))) {
222 pr_err("Illegal login_req->flags Combination, CSG: %d," 222 pr_err("Illegal login_req->flags Combination, CSG: %d,"
223 " NSG: %d, ISCSI_FLAG_LOGIN_TRANSIT: %d.\n", req_csg, 223 " NSG: %d, ISCSI_FLAG_LOGIN_TRANSIT: %d.\n", req_csg,
224 req_nsg, (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)); 224 req_nsg, (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT));
225 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 225 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
226 ISCSI_LOGIN_STATUS_INIT_ERR); 226 ISCSI_LOGIN_STATUS_INIT_ERR);
227 return -1; 227 return -1;
228 } 228 }
229 229
230 if ((login_req->max_version != login->version_max) || 230 if ((login_req->max_version != login->version_max) ||
231 (login_req->min_version != login->version_min)) { 231 (login_req->min_version != login->version_min)) {
232 pr_err("Login request changed Version Max/Nin" 232 pr_err("Login request changed Version Max/Nin"
233 " unexpectedly to 0x%02x/0x%02x, protocol error\n", 233 " unexpectedly to 0x%02x/0x%02x, protocol error\n",
234 login_req->max_version, login_req->min_version); 234 login_req->max_version, login_req->min_version);
235 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 235 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
236 ISCSI_LOGIN_STATUS_INIT_ERR); 236 ISCSI_LOGIN_STATUS_INIT_ERR);
237 return -1; 237 return -1;
238 } 238 }
239 239
240 if (memcmp(login_req->isid, login->isid, 6) != 0) { 240 if (memcmp(login_req->isid, login->isid, 6) != 0) {
241 pr_err("Login request changed ISID unexpectedly," 241 pr_err("Login request changed ISID unexpectedly,"
242 " protocol error.\n"); 242 " protocol error.\n");
243 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 243 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
244 ISCSI_LOGIN_STATUS_INIT_ERR); 244 ISCSI_LOGIN_STATUS_INIT_ERR);
245 return -1; 245 return -1;
246 } 246 }
247 247
248 if (login_req->itt != login->init_task_tag) { 248 if (login_req->itt != login->init_task_tag) {
249 pr_err("Login request changed ITT unexpectedly to" 249 pr_err("Login request changed ITT unexpectedly to"
250 " 0x%08x, protocol error.\n", login_req->itt); 250 " 0x%08x, protocol error.\n", login_req->itt);
251 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 251 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
252 ISCSI_LOGIN_STATUS_INIT_ERR); 252 ISCSI_LOGIN_STATUS_INIT_ERR);
253 return -1; 253 return -1;
254 } 254 }
255 255
256 if (payload_length > MAX_KEY_VALUE_PAIRS) { 256 if (payload_length > MAX_KEY_VALUE_PAIRS) {
257 pr_err("Login request payload exceeds default" 257 pr_err("Login request payload exceeds default"
258 " MaxRecvDataSegmentLength: %u, protocol error.\n", 258 " MaxRecvDataSegmentLength: %u, protocol error.\n",
259 MAX_KEY_VALUE_PAIRS); 259 MAX_KEY_VALUE_PAIRS);
260 return -1; 260 return -1;
261 } 261 }
262 262
263 return 0; 263 return 0;
264 } 264 }
265 265
266 static int iscsi_target_check_first_request( 266 static int iscsi_target_check_first_request(
267 struct iscsi_conn *conn, 267 struct iscsi_conn *conn,
268 struct iscsi_login *login) 268 struct iscsi_login *login)
269 { 269 {
270 struct iscsi_param *param = NULL; 270 struct iscsi_param *param = NULL;
271 struct se_node_acl *se_nacl; 271 struct se_node_acl *se_nacl;
272 272
273 login->first_request = 0; 273 login->first_request = 0;
274 274
275 list_for_each_entry(param, &conn->param_list->param_list, p_list) { 275 list_for_each_entry(param, &conn->param_list->param_list, p_list) {
276 if (!strncmp(param->name, SESSIONTYPE, 11)) { 276 if (!strncmp(param->name, SESSIONTYPE, 11)) {
277 if (!IS_PSTATE_ACCEPTOR(param)) { 277 if (!IS_PSTATE_ACCEPTOR(param)) {
278 pr_err("SessionType key not received" 278 pr_err("SessionType key not received"
279 " in first login request.\n"); 279 " in first login request.\n");
280 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 280 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
281 ISCSI_LOGIN_STATUS_MISSING_FIELDS); 281 ISCSI_LOGIN_STATUS_MISSING_FIELDS);
282 return -1; 282 return -1;
283 } 283 }
284 if (!strncmp(param->value, DISCOVERY, 9)) 284 if (!strncmp(param->value, DISCOVERY, 9))
285 return 0; 285 return 0;
286 } 286 }
287 287
288 if (!strncmp(param->name, INITIATORNAME, 13)) { 288 if (!strncmp(param->name, INITIATORNAME, 13)) {
289 if (!IS_PSTATE_ACCEPTOR(param)) { 289 if (!IS_PSTATE_ACCEPTOR(param)) {
290 if (!login->leading_connection) 290 if (!login->leading_connection)
291 continue; 291 continue;
292 292
293 pr_err("InitiatorName key not received" 293 pr_err("InitiatorName key not received"
294 " in first login request.\n"); 294 " in first login request.\n");
295 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 295 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
296 ISCSI_LOGIN_STATUS_MISSING_FIELDS); 296 ISCSI_LOGIN_STATUS_MISSING_FIELDS);
297 return -1; 297 return -1;
298 } 298 }
299 299
300 /* 300 /*
301 * For non-leading connections, double check that the 301 * For non-leading connections, double check that the
302 * received InitiatorName matches the existing session's 302 * received InitiatorName matches the existing session's
303 * struct iscsi_node_acl. 303 * struct iscsi_node_acl.
304 */ 304 */
305 if (!login->leading_connection) { 305 if (!login->leading_connection) {
306 se_nacl = conn->sess->se_sess->se_node_acl; 306 se_nacl = conn->sess->se_sess->se_node_acl;
307 if (!se_nacl) { 307 if (!se_nacl) {
308 pr_err("Unable to locate" 308 pr_err("Unable to locate"
309 " struct se_node_acl\n"); 309 " struct se_node_acl\n");
310 iscsit_tx_login_rsp(conn, 310 iscsit_tx_login_rsp(conn,
311 ISCSI_STATUS_CLS_INITIATOR_ERR, 311 ISCSI_STATUS_CLS_INITIATOR_ERR,
312 ISCSI_LOGIN_STATUS_TGT_NOT_FOUND); 312 ISCSI_LOGIN_STATUS_TGT_NOT_FOUND);
313 return -1; 313 return -1;
314 } 314 }
315 315
316 if (strcmp(param->value, 316 if (strcmp(param->value,
317 se_nacl->initiatorname)) { 317 se_nacl->initiatorname)) {
318 pr_err("Incorrect" 318 pr_err("Incorrect"
319 " InitiatorName: %s for this" 319 " InitiatorName: %s for this"
320 " iSCSI Initiator Node.\n", 320 " iSCSI Initiator Node.\n",
321 param->value); 321 param->value);
322 iscsit_tx_login_rsp(conn, 322 iscsit_tx_login_rsp(conn,
323 ISCSI_STATUS_CLS_INITIATOR_ERR, 323 ISCSI_STATUS_CLS_INITIATOR_ERR,
324 ISCSI_LOGIN_STATUS_TGT_NOT_FOUND); 324 ISCSI_LOGIN_STATUS_TGT_NOT_FOUND);
325 return -1; 325 return -1;
326 } 326 }
327 } 327 }
328 } 328 }
329 } 329 }
330 330
331 return 0; 331 return 0;
332 } 332 }
333 333
334 static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_login *login) 334 static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
335 { 335 {
336 u32 padding = 0; 336 u32 padding = 0;
337 struct iscsi_session *sess = conn->sess; 337 struct iscsi_session *sess = conn->sess;
338 struct iscsi_login_rsp *login_rsp; 338 struct iscsi_login_rsp *login_rsp;
339 339
340 login_rsp = (struct iscsi_login_rsp *) login->rsp; 340 login_rsp = (struct iscsi_login_rsp *) login->rsp;
341 341
342 login_rsp->opcode = ISCSI_OP_LOGIN_RSP; 342 login_rsp->opcode = ISCSI_OP_LOGIN_RSP;
343 hton24(login_rsp->dlength, login->rsp_length); 343 hton24(login_rsp->dlength, login->rsp_length);
344 memcpy(login_rsp->isid, login->isid, 6); 344 memcpy(login_rsp->isid, login->isid, 6);
345 login_rsp->tsih = cpu_to_be16(login->tsih); 345 login_rsp->tsih = cpu_to_be16(login->tsih);
346 login_rsp->itt = cpu_to_be32(login->init_task_tag); 346 login_rsp->itt = cpu_to_be32(login->init_task_tag);
347 login_rsp->statsn = cpu_to_be32(conn->stat_sn++); 347 login_rsp->statsn = cpu_to_be32(conn->stat_sn++);
348 login_rsp->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 348 login_rsp->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
349 login_rsp->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 349 login_rsp->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
350 350
351 pr_debug("Sending Login Response, Flags: 0x%02x, ITT: 0x%08x," 351 pr_debug("Sending Login Response, Flags: 0x%02x, ITT: 0x%08x,"
352 " ExpCmdSN; 0x%08x, MaxCmdSN: 0x%08x, StatSN: 0x%08x, Length:" 352 " ExpCmdSN; 0x%08x, MaxCmdSN: 0x%08x, StatSN: 0x%08x, Length:"
353 " %u\n", login_rsp->flags, ntohl(login_rsp->itt), 353 " %u\n", login_rsp->flags, ntohl(login_rsp->itt),
354 ntohl(login_rsp->exp_cmdsn), ntohl(login_rsp->max_cmdsn), 354 ntohl(login_rsp->exp_cmdsn), ntohl(login_rsp->max_cmdsn),
355 ntohl(login_rsp->statsn), login->rsp_length); 355 ntohl(login_rsp->statsn), login->rsp_length);
356 356
357 padding = ((-login->rsp_length) & 3); 357 padding = ((-login->rsp_length) & 3);
358 358
359 if (iscsi_login_tx_data( 359 if (iscsi_login_tx_data(
360 conn, 360 conn,
361 login->rsp, 361 login->rsp,
362 login->rsp_buf, 362 login->rsp_buf,
363 login->rsp_length + padding) < 0) 363 login->rsp_length + padding) < 0)
364 return -1; 364 return -1;
365 365
366 login->rsp_length = 0; 366 login->rsp_length = 0;
367 login_rsp->tsih = be16_to_cpu(login_rsp->tsih); 367 login_rsp->tsih = be16_to_cpu(login_rsp->tsih);
368 login_rsp->itt = be32_to_cpu(login_rsp->itt); 368 login_rsp->itt = be32_to_cpu(login_rsp->itt);
369 login_rsp->statsn = be32_to_cpu(login_rsp->statsn); 369 login_rsp->statsn = be32_to_cpu(login_rsp->statsn);
370 mutex_lock(&sess->cmdsn_mutex); 370 mutex_lock(&sess->cmdsn_mutex);
371 login_rsp->exp_cmdsn = be32_to_cpu(sess->exp_cmd_sn); 371 login_rsp->exp_cmdsn = be32_to_cpu(sess->exp_cmd_sn);
372 login_rsp->max_cmdsn = be32_to_cpu(sess->max_cmd_sn); 372 login_rsp->max_cmdsn = be32_to_cpu(sess->max_cmd_sn);
373 mutex_unlock(&sess->cmdsn_mutex); 373 mutex_unlock(&sess->cmdsn_mutex);
374 374
375 return 0; 375 return 0;
376 } 376 }
377 377
378 static int iscsi_target_do_rx_login_io(struct iscsi_conn *conn, struct iscsi_login *login) 378 static int iscsi_target_do_rx_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
379 { 379 {
380 u32 padding = 0, payload_length; 380 u32 padding = 0, payload_length;
381 struct iscsi_login_req *login_req; 381 struct iscsi_login_req *login_req;
382 382
383 if (iscsi_login_rx_data(conn, login->req, ISCSI_HDR_LEN) < 0) 383 if (iscsi_login_rx_data(conn, login->req, ISCSI_HDR_LEN) < 0)
384 return -1; 384 return -1;
385 385
386 login_req = (struct iscsi_login_req *) login->req; 386 login_req = (struct iscsi_login_req *) login->req;
387 payload_length = ntoh24(login_req->dlength); 387 payload_length = ntoh24(login_req->dlength);
388 login_req->tsih = be16_to_cpu(login_req->tsih); 388 login_req->tsih = be16_to_cpu(login_req->tsih);
389 login_req->itt = be32_to_cpu(login_req->itt); 389 login_req->itt = be32_to_cpu(login_req->itt);
390 login_req->cid = be16_to_cpu(login_req->cid); 390 login_req->cid = be16_to_cpu(login_req->cid);
391 login_req->cmdsn = be32_to_cpu(login_req->cmdsn); 391 login_req->cmdsn = be32_to_cpu(login_req->cmdsn);
392 login_req->exp_statsn = be32_to_cpu(login_req->exp_statsn); 392 login_req->exp_statsn = be32_to_cpu(login_req->exp_statsn);
393 393
394 pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x," 394 pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
395 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n", 395 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
396 login_req->flags, login_req->itt, login_req->cmdsn, 396 login_req->flags, login_req->itt, login_req->cmdsn,
397 login_req->exp_statsn, login_req->cid, payload_length); 397 login_req->exp_statsn, login_req->cid, payload_length);
398 398
399 if (iscsi_target_check_login_request(conn, login) < 0) 399 if (iscsi_target_check_login_request(conn, login) < 0)
400 return -1; 400 return -1;
401 401
402 padding = ((-payload_length) & 3); 402 padding = ((-payload_length) & 3);
403 memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS); 403 memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
404 404
405 if (iscsi_login_rx_data( 405 if (iscsi_login_rx_data(
406 conn, 406 conn,
407 login->req_buf, 407 login->req_buf,
408 payload_length + padding) < 0) 408 payload_length + padding) < 0)
409 return -1; 409 return -1;
410 410
411 return 0; 411 return 0;
412 } 412 }
413 413
414 static int iscsi_target_do_login_io(struct iscsi_conn *conn, struct iscsi_login *login) 414 static int iscsi_target_do_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
415 { 415 {
416 if (iscsi_target_do_tx_login_io(conn, login) < 0) 416 if (iscsi_target_do_tx_login_io(conn, login) < 0)
417 return -1; 417 return -1;
418 418
419 if (iscsi_target_do_rx_login_io(conn, login) < 0) 419 if (iscsi_target_do_rx_login_io(conn, login) < 0)
420 return -1; 420 return -1;
421 421
422 return 0; 422 return 0;
423 } 423 }
424 424
425 static int iscsi_target_get_initial_payload( 425 static int iscsi_target_get_initial_payload(
426 struct iscsi_conn *conn, 426 struct iscsi_conn *conn,
427 struct iscsi_login *login) 427 struct iscsi_login *login)
428 { 428 {
429 u32 padding = 0, payload_length; 429 u32 padding = 0, payload_length;
430 struct iscsi_login_req *login_req; 430 struct iscsi_login_req *login_req;
431 431
432 login_req = (struct iscsi_login_req *) login->req; 432 login_req = (struct iscsi_login_req *) login->req;
433 payload_length = ntoh24(login_req->dlength); 433 payload_length = ntoh24(login_req->dlength);
434 434
435 pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x," 435 pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
436 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n", 436 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
437 login_req->flags, login_req->itt, login_req->cmdsn, 437 login_req->flags, login_req->itt, login_req->cmdsn,
438 login_req->exp_statsn, payload_length); 438 login_req->exp_statsn, payload_length);
439 439
440 if (iscsi_target_check_login_request(conn, login) < 0) 440 if (iscsi_target_check_login_request(conn, login) < 0)
441 return -1; 441 return -1;
442 442
443 padding = ((-payload_length) & 3); 443 padding = ((-payload_length) & 3);
444 444
445 if (iscsi_login_rx_data( 445 if (iscsi_login_rx_data(
446 conn, 446 conn,
447 login->req_buf, 447 login->req_buf,
448 payload_length + padding) < 0) 448 payload_length + padding) < 0)
449 return -1; 449 return -1;
450 450
451 return 0; 451 return 0;
452 } 452 }
453 453
454 /* 454 /*
455 * NOTE: We check for existing sessions or connections AFTER the initiator 455 * NOTE: We check for existing sessions or connections AFTER the initiator
456 * has been successfully authenticated in order to protect against faked 456 * has been successfully authenticated in order to protect against faked
457 * ISID/TSIH combinations. 457 * ISID/TSIH combinations.
458 */ 458 */
459 static int iscsi_target_check_for_existing_instances( 459 static int iscsi_target_check_for_existing_instances(
460 struct iscsi_conn *conn, 460 struct iscsi_conn *conn,
461 struct iscsi_login *login) 461 struct iscsi_login *login)
462 { 462 {
463 if (login->checked_for_existing) 463 if (login->checked_for_existing)
464 return 0; 464 return 0;
465 465
466 login->checked_for_existing = 1; 466 login->checked_for_existing = 1;
467 467
468 if (!login->tsih) 468 if (!login->tsih)
469 return iscsi_check_for_session_reinstatement(conn); 469 return iscsi_check_for_session_reinstatement(conn);
470 else 470 else
471 return iscsi_login_post_auth_non_zero_tsih(conn, login->cid, 471 return iscsi_login_post_auth_non_zero_tsih(conn, login->cid,
472 login->initial_exp_statsn); 472 login->initial_exp_statsn);
473 } 473 }
474 474
475 static int iscsi_target_do_authentication( 475 static int iscsi_target_do_authentication(
476 struct iscsi_conn *conn, 476 struct iscsi_conn *conn,
477 struct iscsi_login *login) 477 struct iscsi_login *login)
478 { 478 {
479 int authret; 479 int authret;
480 u32 payload_length; 480 u32 payload_length;
481 struct iscsi_param *param; 481 struct iscsi_param *param;
482 struct iscsi_login_req *login_req; 482 struct iscsi_login_req *login_req;
483 struct iscsi_login_rsp *login_rsp; 483 struct iscsi_login_rsp *login_rsp;
484 484
485 login_req = (struct iscsi_login_req *) login->req; 485 login_req = (struct iscsi_login_req *) login->req;
486 login_rsp = (struct iscsi_login_rsp *) login->rsp; 486 login_rsp = (struct iscsi_login_rsp *) login->rsp;
487 payload_length = ntoh24(login_req->dlength); 487 payload_length = ntoh24(login_req->dlength);
488 488
489 param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list); 489 param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list);
490 if (!param) 490 if (!param)
491 return -1; 491 return -1;
492 492
493 authret = iscsi_handle_authentication( 493 authret = iscsi_handle_authentication(
494 conn, 494 conn,
495 login->req_buf, 495 login->req_buf,
496 login->rsp_buf, 496 login->rsp_buf,
497 payload_length, 497 payload_length,
498 &login->rsp_length, 498 &login->rsp_length,
499 param->value); 499 param->value);
500 switch (authret) { 500 switch (authret) {
501 case 0: 501 case 0:
502 pr_debug("Received OK response" 502 pr_debug("Received OK response"
503 " from LIO Authentication, continuing.\n"); 503 " from LIO Authentication, continuing.\n");
504 break; 504 break;
505 case 1: 505 case 1:
506 pr_debug("iSCSI security negotiation" 506 pr_debug("iSCSI security negotiation"
507 " completed successfully.\n"); 507 " completed successfully.\n");
508 login->auth_complete = 1; 508 login->auth_complete = 1;
509 if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) && 509 if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&
510 (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) { 510 (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
511 login_rsp->flags |= (ISCSI_FLAG_LOGIN_NEXT_STAGE1 | 511 login_rsp->flags |= (ISCSI_FLAG_LOGIN_NEXT_STAGE1 |
512 ISCSI_FLAG_LOGIN_TRANSIT); 512 ISCSI_FLAG_LOGIN_TRANSIT);
513 login->current_stage = 1; 513 login->current_stage = 1;
514 } 514 }
515 return iscsi_target_check_for_existing_instances( 515 return iscsi_target_check_for_existing_instances(
516 conn, login); 516 conn, login);
517 case 2: 517 case 2:
518 pr_err("Security negotiation" 518 pr_err("Security negotiation"
519 " failed.\n"); 519 " failed.\n");
520 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 520 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
521 ISCSI_LOGIN_STATUS_AUTH_FAILED); 521 ISCSI_LOGIN_STATUS_AUTH_FAILED);
522 return -1; 522 return -1;
523 default: 523 default:
524 pr_err("Received unknown error %d from LIO" 524 pr_err("Received unknown error %d from LIO"
525 " Authentication\n", authret); 525 " Authentication\n", authret);
526 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 526 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
527 ISCSI_LOGIN_STATUS_TARGET_ERROR); 527 ISCSI_LOGIN_STATUS_TARGET_ERROR);
528 return -1; 528 return -1;
529 } 529 }
530 530
531 return 0; 531 return 0;
532 } 532 }
533 533
534 static int iscsi_target_handle_csg_zero( 534 static int iscsi_target_handle_csg_zero(
535 struct iscsi_conn *conn, 535 struct iscsi_conn *conn,
536 struct iscsi_login *login) 536 struct iscsi_login *login)
537 { 537 {
538 int ret; 538 int ret;
539 u32 payload_length; 539 u32 payload_length;
540 struct iscsi_param *param; 540 struct iscsi_param *param;
541 struct iscsi_login_req *login_req; 541 struct iscsi_login_req *login_req;
542 struct iscsi_login_rsp *login_rsp; 542 struct iscsi_login_rsp *login_rsp;
543 543
544 login_req = (struct iscsi_login_req *) login->req; 544 login_req = (struct iscsi_login_req *) login->req;
545 login_rsp = (struct iscsi_login_rsp *) login->rsp; 545 login_rsp = (struct iscsi_login_rsp *) login->rsp;
546 payload_length = ntoh24(login_req->dlength); 546 payload_length = ntoh24(login_req->dlength);
547 547
548 param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list); 548 param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list);
549 if (!param) 549 if (!param)
550 return -1; 550 return -1;
551 551
552 ret = iscsi_decode_text_input( 552 ret = iscsi_decode_text_input(
553 PHASE_SECURITY|PHASE_DECLARATIVE, 553 PHASE_SECURITY|PHASE_DECLARATIVE,
554 SENDER_INITIATOR|SENDER_RECEIVER, 554 SENDER_INITIATOR|SENDER_RECEIVER,
555 login->req_buf, 555 login->req_buf,
556 payload_length, 556 payload_length,
557 conn->param_list); 557 conn->param_list);
558 if (ret < 0) 558 if (ret < 0)
559 return -1; 559 return -1;
560 560
561 if (ret > 0) { 561 if (ret > 0) {
562 if (login->auth_complete) { 562 if (login->auth_complete) {
563 pr_err("Initiator has already been" 563 pr_err("Initiator has already been"
564 " successfully authenticated, but is still" 564 " successfully authenticated, but is still"
565 " sending %s keys.\n", param->value); 565 " sending %s keys.\n", param->value);
566 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 566 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
567 ISCSI_LOGIN_STATUS_INIT_ERR); 567 ISCSI_LOGIN_STATUS_INIT_ERR);
568 return -1; 568 return -1;
569 } 569 }
570 570
571 goto do_auth; 571 goto do_auth;
572 } 572 }
573 573
574 if (login->first_request) 574 if (login->first_request)
575 if (iscsi_target_check_first_request(conn, login) < 0) 575 if (iscsi_target_check_first_request(conn, login) < 0)
576 return -1; 576 return -1;
577 577
578 ret = iscsi_encode_text_output( 578 ret = iscsi_encode_text_output(
579 PHASE_SECURITY|PHASE_DECLARATIVE, 579 PHASE_SECURITY|PHASE_DECLARATIVE,
580 SENDER_TARGET, 580 SENDER_TARGET,
581 login->rsp_buf, 581 login->rsp_buf,
582 &login->rsp_length, 582 &login->rsp_length,
583 conn->param_list); 583 conn->param_list);
584 if (ret < 0) 584 if (ret < 0)
585 return -1; 585 return -1;
586 586
587 if (!iscsi_check_negotiated_keys(conn->param_list)) { 587 if (!iscsi_check_negotiated_keys(conn->param_list)) {
588 if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication && 588 if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
589 !strncmp(param->value, NONE, 4)) { 589 !strncmp(param->value, NONE, 4)) {
590 pr_err("Initiator sent AuthMethod=None but" 590 pr_err("Initiator sent AuthMethod=None but"
591 " Target is enforcing iSCSI Authentication," 591 " Target is enforcing iSCSI Authentication,"
592 " login failed.\n"); 592 " login failed.\n");
593 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 593 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
594 ISCSI_LOGIN_STATUS_AUTH_FAILED); 594 ISCSI_LOGIN_STATUS_AUTH_FAILED);
595 return -1; 595 return -1;
596 } 596 }
597 597
598 if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication && 598 if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
599 !login->auth_complete) 599 !login->auth_complete)
600 return 0; 600 return 0;
601 601
602 if (strncmp(param->value, NONE, 4) && !login->auth_complete) 602 if (strncmp(param->value, NONE, 4) && !login->auth_complete)
603 return 0; 603 return 0;
604 604
605 if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) && 605 if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&
606 (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) { 606 (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
607 login_rsp->flags |= ISCSI_FLAG_LOGIN_NEXT_STAGE1 | 607 login_rsp->flags |= ISCSI_FLAG_LOGIN_NEXT_STAGE1 |
608 ISCSI_FLAG_LOGIN_TRANSIT; 608 ISCSI_FLAG_LOGIN_TRANSIT;
609 login->current_stage = 1; 609 login->current_stage = 1;
610 } 610 }
611 } 611 }
612 612
613 return 0; 613 return 0;
614 do_auth: 614 do_auth:
615 return iscsi_target_do_authentication(conn, login); 615 return iscsi_target_do_authentication(conn, login);
616 } 616 }
617 617
618 static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_login *login) 618 static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_login *login)
619 { 619 {
620 int ret; 620 int ret;
621 u32 payload_length; 621 u32 payload_length;
622 struct iscsi_login_req *login_req; 622 struct iscsi_login_req *login_req;
623 struct iscsi_login_rsp *login_rsp; 623 struct iscsi_login_rsp *login_rsp;
624 624
625 login_req = (struct iscsi_login_req *) login->req; 625 login_req = (struct iscsi_login_req *) login->req;
626 login_rsp = (struct iscsi_login_rsp *) login->rsp; 626 login_rsp = (struct iscsi_login_rsp *) login->rsp;
627 payload_length = ntoh24(login_req->dlength); 627 payload_length = ntoh24(login_req->dlength);
628 628
629 ret = iscsi_decode_text_input( 629 ret = iscsi_decode_text_input(
630 PHASE_OPERATIONAL|PHASE_DECLARATIVE, 630 PHASE_OPERATIONAL|PHASE_DECLARATIVE,
631 SENDER_INITIATOR|SENDER_RECEIVER, 631 SENDER_INITIATOR|SENDER_RECEIVER,
632 login->req_buf, 632 login->req_buf,
633 payload_length, 633 payload_length,
634 conn->param_list); 634 conn->param_list);
635 if (ret < 0) 635 if (ret < 0)
636 return -1; 636 return -1;
637 637
638 if (login->first_request) 638 if (login->first_request)
639 if (iscsi_target_check_first_request(conn, login) < 0) 639 if (iscsi_target_check_first_request(conn, login) < 0)
640 return -1; 640 return -1;
641 641
642 if (iscsi_target_check_for_existing_instances(conn, login) < 0) 642 if (iscsi_target_check_for_existing_instances(conn, login) < 0)
643 return -1; 643 return -1;
644 644
645 ret = iscsi_encode_text_output( 645 ret = iscsi_encode_text_output(
646 PHASE_OPERATIONAL|PHASE_DECLARATIVE, 646 PHASE_OPERATIONAL|PHASE_DECLARATIVE,
647 SENDER_TARGET, 647 SENDER_TARGET,
648 login->rsp_buf, 648 login->rsp_buf,
649 &login->rsp_length, 649 &login->rsp_length,
650 conn->param_list); 650 conn->param_list);
651 if (ret < 0) 651 if (ret < 0)
652 return -1; 652 return -1;
653 653
654 if (!login->auth_complete && 654 if (!login->auth_complete &&
655 ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) { 655 ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) {
656 pr_err("Initiator is requesting CSG: 1, has not been" 656 pr_err("Initiator is requesting CSG: 1, has not been"
657 " successfully authenticated, and the Target is" 657 " successfully authenticated, and the Target is"
658 " enforcing iSCSI Authentication, login failed.\n"); 658 " enforcing iSCSI Authentication, login failed.\n");
659 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 659 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
660 ISCSI_LOGIN_STATUS_AUTH_FAILED); 660 ISCSI_LOGIN_STATUS_AUTH_FAILED);
661 return -1; 661 return -1;
662 } 662 }
663 663
664 if (!iscsi_check_negotiated_keys(conn->param_list)) 664 if (!iscsi_check_negotiated_keys(conn->param_list))
665 if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE3) && 665 if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE3) &&
666 (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) 666 (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT))
667 login_rsp->flags |= ISCSI_FLAG_LOGIN_NEXT_STAGE3 | 667 login_rsp->flags |= ISCSI_FLAG_LOGIN_NEXT_STAGE3 |
668 ISCSI_FLAG_LOGIN_TRANSIT; 668 ISCSI_FLAG_LOGIN_TRANSIT;
669 669
670 return 0; 670 return 0;
671 } 671 }
672 672
673 static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *login) 673 static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *login)
674 { 674 {
675 int pdu_count = 0; 675 int pdu_count = 0;
676 struct iscsi_login_req *login_req; 676 struct iscsi_login_req *login_req;
677 struct iscsi_login_rsp *login_rsp; 677 struct iscsi_login_rsp *login_rsp;
678 678
679 login_req = (struct iscsi_login_req *) login->req; 679 login_req = (struct iscsi_login_req *) login->req;
680 login_rsp = (struct iscsi_login_rsp *) login->rsp; 680 login_rsp = (struct iscsi_login_rsp *) login->rsp;
681 681
682 while (1) { 682 while (1) {
683 if (++pdu_count > MAX_LOGIN_PDUS) { 683 if (++pdu_count > MAX_LOGIN_PDUS) {
684 pr_err("MAX_LOGIN_PDUS count reached.\n"); 684 pr_err("MAX_LOGIN_PDUS count reached.\n");
685 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 685 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
686 ISCSI_LOGIN_STATUS_TARGET_ERROR); 686 ISCSI_LOGIN_STATUS_TARGET_ERROR);
687 return -1; 687 return -1;
688 } 688 }
689 689
690 switch ((login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2) { 690 switch ((login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2) {
691 case 0: 691 case 0:
692 login_rsp->flags |= (0 & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK); 692 login_rsp->flags |= (0 & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK);
693 if (iscsi_target_handle_csg_zero(conn, login) < 0) 693 if (iscsi_target_handle_csg_zero(conn, login) < 0)
694 return -1; 694 return -1;
695 break; 695 break;
696 case 1: 696 case 1:
697 login_rsp->flags |= ISCSI_FLAG_LOGIN_CURRENT_STAGE1; 697 login_rsp->flags |= ISCSI_FLAG_LOGIN_CURRENT_STAGE1;
698 if (iscsi_target_handle_csg_one(conn, login) < 0) 698 if (iscsi_target_handle_csg_one(conn, login) < 0)
699 return -1; 699 return -1;
700 if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { 700 if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
701 login->tsih = conn->sess->tsih; 701 login->tsih = conn->sess->tsih;
702 if (iscsi_target_do_tx_login_io(conn, 702 if (iscsi_target_do_tx_login_io(conn,
703 login) < 0) 703 login) < 0)
704 return -1; 704 return -1;
705 return 0; 705 return 0;
706 } 706 }
707 break; 707 break;
708 default: 708 default:
709 pr_err("Illegal CSG: %d received from" 709 pr_err("Illegal CSG: %d received from"
710 " Initiator, protocol error.\n", 710 " Initiator, protocol error.\n",
711 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) 711 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
712 >> 2); 712 >> 2);
713 break; 713 break;
714 } 714 }
715 715
716 if (iscsi_target_do_login_io(conn, login) < 0) 716 if (iscsi_target_do_login_io(conn, login) < 0)
717 return -1; 717 return -1;
718 718
719 if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { 719 if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
720 login_rsp->flags &= ~ISCSI_FLAG_LOGIN_TRANSIT; 720 login_rsp->flags &= ~ISCSI_FLAG_LOGIN_TRANSIT;
721 login_rsp->flags &= ~ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK; 721 login_rsp->flags &= ~ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK;
722 } 722 }
723 } 723 }
724 724
725 return 0; 725 return 0;
726 } 726 }
727 727
728 static void iscsi_initiatorname_tolower( 728 static void iscsi_initiatorname_tolower(
729 char *param_buf) 729 char *param_buf)
730 { 730 {
731 char *c; 731 char *c;
732 u32 iqn_size = strlen(param_buf), i; 732 u32 iqn_size = strlen(param_buf), i;
733 733
734 for (i = 0; i < iqn_size; i++) { 734 for (i = 0; i < iqn_size; i++) {
735 c = (char *)&param_buf[i]; 735 c = (char *)&param_buf[i];
736 if (!isupper(*c)) 736 if (!isupper(*c))
737 continue; 737 continue;
738 738
739 *c = tolower(*c); 739 *c = tolower(*c);
740 } 740 }
741 } 741 }
742 742
743 /* 743 /*
744 * Processes the first Login Request.. 744 * Processes the first Login Request..
745 */ 745 */
746 static int iscsi_target_locate_portal( 746 static int iscsi_target_locate_portal(
747 struct iscsi_np *np, 747 struct iscsi_np *np,
748 struct iscsi_conn *conn, 748 struct iscsi_conn *conn,
749 struct iscsi_login *login) 749 struct iscsi_login *login)
750 { 750 {
751 char *i_buf = NULL, *s_buf = NULL, *t_buf = NULL; 751 char *i_buf = NULL, *s_buf = NULL, *t_buf = NULL;
752 char *tmpbuf, *start = NULL, *end = NULL, *key, *value; 752 char *tmpbuf, *start = NULL, *end = NULL, *key, *value;
753 struct iscsi_session *sess = conn->sess; 753 struct iscsi_session *sess = conn->sess;
754 struct iscsi_tiqn *tiqn; 754 struct iscsi_tiqn *tiqn;
755 struct iscsi_login_req *login_req; 755 struct iscsi_login_req *login_req;
756 struct iscsi_targ_login_rsp *login_rsp; 756 struct iscsi_targ_login_rsp *login_rsp;
757 u32 payload_length; 757 u32 payload_length;
758 int sessiontype = 0, ret = 0; 758 int sessiontype = 0, ret = 0;
759 759
760 login_req = (struct iscsi_login_req *) login->req; 760 login_req = (struct iscsi_login_req *) login->req;
761 login_rsp = (struct iscsi_targ_login_rsp *) login->rsp; 761 login_rsp = (struct iscsi_targ_login_rsp *) login->rsp;
762 payload_length = ntoh24(login_req->dlength); 762 payload_length = ntoh24(login_req->dlength);
763 763
764 login->first_request = 1; 764 login->first_request = 1;
765 login->leading_connection = (!login_req->tsih) ? 1 : 0; 765 login->leading_connection = (!login_req->tsih) ? 1 : 0;
766 login->current_stage = 766 login->current_stage =
767 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2; 767 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
768 login->version_min = login_req->min_version; 768 login->version_min = login_req->min_version;
769 login->version_max = login_req->max_version; 769 login->version_max = login_req->max_version;
770 memcpy(login->isid, login_req->isid, 6); 770 memcpy(login->isid, login_req->isid, 6);
771 login->cmd_sn = login_req->cmdsn; 771 login->cmd_sn = login_req->cmdsn;
772 login->init_task_tag = login_req->itt; 772 login->init_task_tag = login_req->itt;
773 login->initial_exp_statsn = login_req->exp_statsn; 773 login->initial_exp_statsn = login_req->exp_statsn;
774 login->cid = login_req->cid; 774 login->cid = login_req->cid;
775 login->tsih = login_req->tsih; 775 login->tsih = login_req->tsih;
776 776
777 if (iscsi_target_get_initial_payload(conn, login) < 0) 777 if (iscsi_target_get_initial_payload(conn, login) < 0)
778 return -1; 778 return -1;
779 779
780 tmpbuf = kzalloc(payload_length + 1, GFP_KERNEL); 780 tmpbuf = kzalloc(payload_length + 1, GFP_KERNEL);
781 if (!tmpbuf) { 781 if (!tmpbuf) {
782 pr_err("Unable to allocate memory for tmpbuf.\n"); 782 pr_err("Unable to allocate memory for tmpbuf.\n");
783 return -1; 783 return -1;
784 } 784 }
785 785
786 memcpy(tmpbuf, login->req_buf, payload_length); 786 memcpy(tmpbuf, login->req_buf, payload_length);
787 tmpbuf[payload_length] = '\0'; 787 tmpbuf[payload_length] = '\0';
788 start = tmpbuf; 788 start = tmpbuf;
789 end = (start + payload_length); 789 end = (start + payload_length);
790 790
791 /* 791 /*
792 * Locate the initial keys expected from the Initiator node in 792 * Locate the initial keys expected from the Initiator node in
793 * the first login request in order to progress with the login phase. 793 * the first login request in order to progress with the login phase.
794 */ 794 */
795 while (start < end) { 795 while (start < end) {
796 if (iscsi_extract_key_value(start, &key, &value) < 0) { 796 if (iscsi_extract_key_value(start, &key, &value) < 0) {
797 ret = -1; 797 ret = -1;
798 goto out; 798 goto out;
799 } 799 }
800 800
801 if (!strncmp(key, "InitiatorName", 13)) 801 if (!strncmp(key, "InitiatorName", 13))
802 i_buf = value; 802 i_buf = value;
803 else if (!strncmp(key, "SessionType", 11)) 803 else if (!strncmp(key, "SessionType", 11))
804 s_buf = value; 804 s_buf = value;
805 else if (!strncmp(key, "TargetName", 10)) 805 else if (!strncmp(key, "TargetName", 10))
806 t_buf = value; 806 t_buf = value;
807 807
808 start += strlen(key) + strlen(value) + 2; 808 start += strlen(key) + strlen(value) + 2;
809 } 809 }
810 810
811 /* 811 /*
812 * See 5.3. Login Phase. 812 * See 5.3. Login Phase.
813 */ 813 */
814 if (!i_buf) { 814 if (!i_buf) {
815 pr_err("InitiatorName key not received" 815 pr_err("InitiatorName key not received"
816 " in first login request.\n"); 816 " in first login request.\n");
817 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 817 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
818 ISCSI_LOGIN_STATUS_MISSING_FIELDS); 818 ISCSI_LOGIN_STATUS_MISSING_FIELDS);
819 ret = -1; 819 ret = -1;
820 goto out; 820 goto out;
821 } 821 }
822 /* 822 /*
823 * Convert the incoming InitiatorName to lowercase following 823 * Convert the incoming InitiatorName to lowercase following
824 * RFC-3720 3.2.6.1. section c) that says that iSCSI IQNs 824 * RFC-3720 3.2.6.1. section c) that says that iSCSI IQNs
825 * are NOT case sensitive. 825 * are NOT case sensitive.
826 */ 826 */
827 iscsi_initiatorname_tolower(i_buf); 827 iscsi_initiatorname_tolower(i_buf);
828 828
829 if (!s_buf) { 829 if (!s_buf) {
830 if (!login->leading_connection) 830 if (!login->leading_connection)
831 goto get_target; 831 goto get_target;
832 832
833 pr_err("SessionType key not received" 833 pr_err("SessionType key not received"
834 " in first login request.\n"); 834 " in first login request.\n");
835 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 835 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
836 ISCSI_LOGIN_STATUS_MISSING_FIELDS); 836 ISCSI_LOGIN_STATUS_MISSING_FIELDS);
837 ret = -1; 837 ret = -1;
838 goto out; 838 goto out;
839 } 839 }
840 840
841 /* 841 /*
842 * Use default portal group for discovery sessions. 842 * Use default portal group for discovery sessions.
843 */ 843 */
844 sessiontype = strncmp(s_buf, DISCOVERY, 9); 844 sessiontype = strncmp(s_buf, DISCOVERY, 9);
845 if (!sessiontype) { 845 if (!sessiontype) {
846 conn->tpg = iscsit_global->discovery_tpg; 846 conn->tpg = iscsit_global->discovery_tpg;
847 if (!login->leading_connection) 847 if (!login->leading_connection)
848 goto get_target; 848 goto get_target;
849 849
850 sess->sess_ops->SessionType = 1; 850 sess->sess_ops->SessionType = 1;
851 /* 851 /*
852 * Setup crc32c modules from libcrypto 852 * Setup crc32c modules from libcrypto
853 */ 853 */
854 if (iscsi_login_setup_crypto(conn) < 0) { 854 if (iscsi_login_setup_crypto(conn) < 0) {
855 pr_err("iscsi_login_setup_crypto() failed\n"); 855 pr_err("iscsi_login_setup_crypto() failed\n");
856 ret = -1; 856 ret = -1;
857 goto out; 857 goto out;
858 } 858 }
859 /* 859 /*
860 * Serialize access across the discovery struct iscsi_portal_group to 860 * Serialize access across the discovery struct iscsi_portal_group to
861 * process login attempt. 861 * process login attempt.
862 */ 862 */
863 if (iscsit_access_np(np, conn->tpg) < 0) { 863 if (iscsit_access_np(np, conn->tpg) < 0) {
864 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 864 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
865 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE); 865 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
866 ret = -1; 866 ret = -1;
867 goto out; 867 goto out;
868 } 868 }
869 ret = 0; 869 ret = 0;
870 goto out; 870 goto out;
871 } 871 }
872 872
873 get_target: 873 get_target:
874 if (!t_buf) { 874 if (!t_buf) {
875 pr_err("TargetName key not received" 875 pr_err("TargetName key not received"
876 " in first login request while" 876 " in first login request while"
877 " SessionType=Normal.\n"); 877 " SessionType=Normal.\n");
878 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 878 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
879 ISCSI_LOGIN_STATUS_MISSING_FIELDS); 879 ISCSI_LOGIN_STATUS_MISSING_FIELDS);
880 ret = -1; 880 ret = -1;
881 goto out; 881 goto out;
882 } 882 }
883 883
884 /* 884 /*
885 * Locate Target IQN from Storage Node. 885 * Locate Target IQN from Storage Node.
886 */ 886 */
887 tiqn = iscsit_get_tiqn_for_login(t_buf); 887 tiqn = iscsit_get_tiqn_for_login(t_buf);
888 if (!tiqn) { 888 if (!tiqn) {
889 pr_err("Unable to locate Target IQN: %s in" 889 pr_err("Unable to locate Target IQN: %s in"
890 " Storage Node\n", t_buf); 890 " Storage Node\n", t_buf);
891 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 891 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
892 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE); 892 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
893 ret = -1; 893 ret = -1;
894 goto out; 894 goto out;
895 } 895 }
896 pr_debug("Located Storage Object: %s\n", tiqn->tiqn); 896 pr_debug("Located Storage Object: %s\n", tiqn->tiqn);
897 897
898 /* 898 /*
899 * Locate Target Portal Group from Storage Node. 899 * Locate Target Portal Group from Storage Node.
900 */ 900 */
901 conn->tpg = iscsit_get_tpg_from_np(tiqn, np); 901 conn->tpg = iscsit_get_tpg_from_np(tiqn, np);
902 if (!conn->tpg) { 902 if (!conn->tpg) {
903 pr_err("Unable to locate Target Portal Group" 903 pr_err("Unable to locate Target Portal Group"
904 " on %s\n", tiqn->tiqn); 904 " on %s\n", tiqn->tiqn);
905 iscsit_put_tiqn_for_login(tiqn); 905 iscsit_put_tiqn_for_login(tiqn);
906 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 906 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
907 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE); 907 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
908 ret = -1; 908 ret = -1;
909 goto out; 909 goto out;
910 } 910 }
911 pr_debug("Located Portal Group Object: %hu\n", conn->tpg->tpgt); 911 pr_debug("Located Portal Group Object: %hu\n", conn->tpg->tpgt);
912 /* 912 /*
913 * Setup crc32c modules from libcrypto 913 * Setup crc32c modules from libcrypto
914 */ 914 */
915 if (iscsi_login_setup_crypto(conn) < 0) { 915 if (iscsi_login_setup_crypto(conn) < 0) {
916 pr_err("iscsi_login_setup_crypto() failed\n"); 916 pr_err("iscsi_login_setup_crypto() failed\n");
917 ret = -1; 917 ret = -1;
918 goto out; 918 goto out;
919 } 919 }
920 /* 920 /*
921 * Serialize access across the struct iscsi_portal_group to 921 * Serialize access across the struct iscsi_portal_group to
922 * process login attempt. 922 * process login attempt.
923 */ 923 */
924 if (iscsit_access_np(np, conn->tpg) < 0) { 924 if (iscsit_access_np(np, conn->tpg) < 0) {
925 iscsit_put_tiqn_for_login(tiqn); 925 iscsit_put_tiqn_for_login(tiqn);
926 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 926 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
927 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE); 927 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
928 ret = -1; 928 ret = -1;
929 conn->tpg = NULL; 929 conn->tpg = NULL;
930 goto out; 930 goto out;
931 } 931 }
932 932
933 /* 933 /*
934 * conn->sess->node_acl will be set when the referenced 934 * conn->sess->node_acl will be set when the referenced
935 * struct iscsi_session is located from received ISID+TSIH in 935 * struct iscsi_session is located from received ISID+TSIH in
936 * iscsi_login_non_zero_tsih_s2(). 936 * iscsi_login_non_zero_tsih_s2().
937 */ 937 */
938 if (!login->leading_connection) { 938 if (!login->leading_connection) {
939 ret = 0; 939 ret = 0;
940 goto out; 940 goto out;
941 } 941 }
942 942
943 /* 943 /*
944 * This value is required in iscsi_login_zero_tsih_s2() 944 * This value is required in iscsi_login_zero_tsih_s2()
945 */ 945 */
946 sess->sess_ops->SessionType = 0; 946 sess->sess_ops->SessionType = 0;
947 947
948 /* 948 /*
949 * Locate incoming Initiator IQN reference from Storage Node. 949 * Locate incoming Initiator IQN reference from Storage Node.
950 */ 950 */
951 sess->se_sess->se_node_acl = core_tpg_check_initiator_node_acl( 951 sess->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
952 &conn->tpg->tpg_se_tpg, i_buf); 952 &conn->tpg->tpg_se_tpg, i_buf);
953 if (!sess->se_sess->se_node_acl) { 953 if (!sess->se_sess->se_node_acl) {
954 pr_err("iSCSI Initiator Node: %s is not authorized to" 954 pr_err("iSCSI Initiator Node: %s is not authorized to"
955 " access iSCSI target portal group: %hu.\n", 955 " access iSCSI target portal group: %hu.\n",
956 i_buf, conn->tpg->tpgt); 956 i_buf, conn->tpg->tpgt);
957 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, 957 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
958 ISCSI_LOGIN_STATUS_TGT_FORBIDDEN); 958 ISCSI_LOGIN_STATUS_TGT_FORBIDDEN);
959 ret = -1; 959 ret = -1;
960 goto out; 960 goto out;
961 } 961 }
962 962
963 ret = 0; 963 ret = 0;
964 out: 964 out:
965 kfree(tmpbuf); 965 kfree(tmpbuf);
966 return ret; 966 return ret;
967 } 967 }
968 968
969 struct iscsi_login *iscsi_target_init_negotiation( 969 struct iscsi_login *iscsi_target_init_negotiation(
970 struct iscsi_np *np, 970 struct iscsi_np *np,
971 struct iscsi_conn *conn, 971 struct iscsi_conn *conn,
972 char *login_pdu) 972 char *login_pdu)
973 { 973 {
974 struct iscsi_login *login; 974 struct iscsi_login *login;
975 975
976 login = kzalloc(sizeof(struct iscsi_login), GFP_KERNEL); 976 login = kzalloc(sizeof(struct iscsi_login), GFP_KERNEL);
977 if (!login) { 977 if (!login) {
978 pr_err("Unable to allocate memory for struct iscsi_login.\n"); 978 pr_err("Unable to allocate memory for struct iscsi_login.\n");
979 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 979 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
980 ISCSI_LOGIN_STATUS_NO_RESOURCES); 980 ISCSI_LOGIN_STATUS_NO_RESOURCES);
981 return NULL; 981 return NULL;
982 } 982 }
983 983
984 login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL); 984 login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL);
985 if (!login->req) { 985 if (!login->req) {
986 pr_err("Unable to allocate memory for Login Request.\n"); 986 pr_err("Unable to allocate memory for Login Request.\n");
987 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 987 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
988 ISCSI_LOGIN_STATUS_NO_RESOURCES); 988 ISCSI_LOGIN_STATUS_NO_RESOURCES);
989 goto out; 989 goto out;
990 } 990 }
991 991
992 login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL); 992 login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
993 if (!login->req_buf) { 993 if (!login->req_buf) {
994 pr_err("Unable to allocate memory for response buffer.\n"); 994 pr_err("Unable to allocate memory for response buffer.\n");
995 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 995 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
996 ISCSI_LOGIN_STATUS_NO_RESOURCES); 996 ISCSI_LOGIN_STATUS_NO_RESOURCES);
997 goto out; 997 goto out;
998 } 998 }
999 /* 999 /*
1000 * SessionType: Discovery 1000 * SessionType: Discovery
1001 * 1001 *
1002 * Locates Default Portal 1002 * Locates Default Portal
1003 * 1003 *
1004 * SessionType: Normal 1004 * SessionType: Normal
1005 * 1005 *
1006 * Locates Target Portal from NP -> Target IQN 1006 * Locates Target Portal from NP -> Target IQN
1007 */ 1007 */
1008 if (iscsi_target_locate_portal(np, conn, login) < 0) { 1008 if (iscsi_target_locate_portal(np, conn, login) < 0) {
1009 pr_err("iSCSI Login negotiation failed.\n"); 1009 pr_err("iSCSI Login negotiation failed.\n");
1010 goto out; 1010 goto out;
1011 } 1011 }
1012 1012
1013 return login; 1013 return login;
1014 out: 1014 out:
1015 kfree(login->req); 1015 kfree(login->req);
1016 kfree(login->req_buf); 1016 kfree(login->req_buf);
1017 kfree(login); 1017 kfree(login);
1018 1018
1019 return NULL; 1019 return NULL;
1020 } 1020 }
1021 1021
1022 int iscsi_target_start_negotiation( 1022 int iscsi_target_start_negotiation(
1023 struct iscsi_login *login, 1023 struct iscsi_login *login,
1024 struct iscsi_conn *conn) 1024 struct iscsi_conn *conn)
1025 { 1025 {
1026 int ret = -1; 1026 int ret = -1;
1027 1027
1028 login->rsp = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); 1028 login->rsp = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
1029 if (!login->rsp) { 1029 if (!login->rsp) {
1030 pr_err("Unable to allocate memory for" 1030 pr_err("Unable to allocate memory for"
1031 " Login Response.\n"); 1031 " Login Response.\n");
1032 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 1032 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1033 ISCSI_LOGIN_STATUS_NO_RESOURCES); 1033 ISCSI_LOGIN_STATUS_NO_RESOURCES);
1034 ret = -1; 1034 ret = -1;
1035 goto out; 1035 goto out;
1036 } 1036 }
1037 1037
1038 login->rsp_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL); 1038 login->rsp_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
1039 if (!login->rsp_buf) { 1039 if (!login->rsp_buf) {
1040 pr_err("Unable to allocate memory for" 1040 pr_err("Unable to allocate memory for"
1041 " request buffer.\n"); 1041 " request buffer.\n");
1042 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 1042 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
1043 ISCSI_LOGIN_STATUS_NO_RESOURCES); 1043 ISCSI_LOGIN_STATUS_NO_RESOURCES);
1044 ret = -1; 1044 ret = -1;
1045 goto out; 1045 goto out;
1046 } 1046 }
1047 1047
1048 ret = iscsi_target_do_login(conn, login); 1048 ret = iscsi_target_do_login(conn, login);
1049 out: 1049 out:
1050 if (ret != 0) 1050 if (ret != 0)
1051 iscsi_remove_failed_auth_entry(conn); 1051 iscsi_remove_failed_auth_entry(conn);
1052 1052
1053 iscsi_target_nego_release(login, conn); 1053 iscsi_target_nego_release(login, conn);
1054 return ret; 1054 return ret;
1055 } 1055 }
1056 1056
1057 void iscsi_target_nego_release( 1057 void iscsi_target_nego_release(
1058 struct iscsi_login *login, 1058 struct iscsi_login *login,
1059 struct iscsi_conn *conn) 1059 struct iscsi_conn *conn)
1060 { 1060 {
1061 kfree(login->req); 1061 kfree(login->req);
1062 kfree(login->rsp); 1062 kfree(login->rsp);
1063 kfree(login->req_buf); 1063 kfree(login->req_buf);
1064 kfree(login->rsp_buf); 1064 kfree(login->rsp_buf);
1065 kfree(login); 1065 kfree(login);
1066 } 1066 }
1067 1067
drivers/target/iscsi/iscsi_target_nodeattrib.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * This file contains the main functions related to Initiator Node Attributes. 2 * This file contains the main functions related to Initiator Node Attributes.
3 * 3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 * 5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 * 7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or 12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version. 13 * (at your option) any later version.
14 * 14 *
15 * This program is distributed in the hope that it will be useful, 15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 ******************************************************************************/ 19 ******************************************************************************/
20 20
21 #include <target/target_core_base.h> 21 #include <target/target_core_base.h>
22 #include <target/target_core_transport.h>
23 22
24 #include "iscsi_target_core.h" 23 #include "iscsi_target_core.h"
25 #include "iscsi_target_device.h" 24 #include "iscsi_target_device.h"
26 #include "iscsi_target_tpg.h" 25 #include "iscsi_target_tpg.h"
27 #include "iscsi_target_util.h" 26 #include "iscsi_target_util.h"
28 #include "iscsi_target_nodeattrib.h" 27 #include "iscsi_target_nodeattrib.h"
29 28
30 static inline char *iscsit_na_get_initiatorname( 29 static inline char *iscsit_na_get_initiatorname(
31 struct iscsi_node_acl *nacl) 30 struct iscsi_node_acl *nacl)
32 { 31 {
33 struct se_node_acl *se_nacl = &nacl->se_node_acl; 32 struct se_node_acl *se_nacl = &nacl->se_node_acl;
34 33
35 return &se_nacl->initiatorname[0]; 34 return &se_nacl->initiatorname[0];
36 } 35 }
37 36
38 void iscsit_set_default_node_attribues( 37 void iscsit_set_default_node_attribues(
39 struct iscsi_node_acl *acl) 38 struct iscsi_node_acl *acl)
40 { 39 {
41 struct iscsi_node_attrib *a = &acl->node_attrib; 40 struct iscsi_node_attrib *a = &acl->node_attrib;
42 41
43 a->dataout_timeout = NA_DATAOUT_TIMEOUT; 42 a->dataout_timeout = NA_DATAOUT_TIMEOUT;
44 a->dataout_timeout_retries = NA_DATAOUT_TIMEOUT_RETRIES; 43 a->dataout_timeout_retries = NA_DATAOUT_TIMEOUT_RETRIES;
45 a->nopin_timeout = NA_NOPIN_TIMEOUT; 44 a->nopin_timeout = NA_NOPIN_TIMEOUT;
46 a->nopin_response_timeout = NA_NOPIN_RESPONSE_TIMEOUT; 45 a->nopin_response_timeout = NA_NOPIN_RESPONSE_TIMEOUT;
47 a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS; 46 a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS;
48 a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS; 47 a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS;
49 a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS; 48 a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS;
50 a->default_erl = NA_DEFAULT_ERL; 49 a->default_erl = NA_DEFAULT_ERL;
51 } 50 }
52 51
53 extern int iscsit_na_dataout_timeout( 52 extern int iscsit_na_dataout_timeout(
54 struct iscsi_node_acl *acl, 53 struct iscsi_node_acl *acl,
55 u32 dataout_timeout) 54 u32 dataout_timeout)
56 { 55 {
57 struct iscsi_node_attrib *a = &acl->node_attrib; 56 struct iscsi_node_attrib *a = &acl->node_attrib;
58 57
59 if (dataout_timeout > NA_DATAOUT_TIMEOUT_MAX) { 58 if (dataout_timeout > NA_DATAOUT_TIMEOUT_MAX) {
60 pr_err("Requested DataOut Timeout %u larger than" 59 pr_err("Requested DataOut Timeout %u larger than"
61 " maximum %u\n", dataout_timeout, 60 " maximum %u\n", dataout_timeout,
62 NA_DATAOUT_TIMEOUT_MAX); 61 NA_DATAOUT_TIMEOUT_MAX);
63 return -EINVAL; 62 return -EINVAL;
64 } else if (dataout_timeout < NA_DATAOUT_TIMEOUT_MIX) { 63 } else if (dataout_timeout < NA_DATAOUT_TIMEOUT_MIX) {
65 pr_err("Requested DataOut Timeout %u smaller than" 64 pr_err("Requested DataOut Timeout %u smaller than"
66 " minimum %u\n", dataout_timeout, 65 " minimum %u\n", dataout_timeout,
67 NA_DATAOUT_TIMEOUT_MIX); 66 NA_DATAOUT_TIMEOUT_MIX);
68 return -EINVAL; 67 return -EINVAL;
69 } 68 }
70 69
71 a->dataout_timeout = dataout_timeout; 70 a->dataout_timeout = dataout_timeout;
72 pr_debug("Set DataOut Timeout to %u for Initiator Node" 71 pr_debug("Set DataOut Timeout to %u for Initiator Node"
73 " %s\n", a->dataout_timeout, iscsit_na_get_initiatorname(acl)); 72 " %s\n", a->dataout_timeout, iscsit_na_get_initiatorname(acl));
74 73
75 return 0; 74 return 0;
76 } 75 }
77 76
78 extern int iscsit_na_dataout_timeout_retries( 77 extern int iscsit_na_dataout_timeout_retries(
79 struct iscsi_node_acl *acl, 78 struct iscsi_node_acl *acl,
80 u32 dataout_timeout_retries) 79 u32 dataout_timeout_retries)
81 { 80 {
82 struct iscsi_node_attrib *a = &acl->node_attrib; 81 struct iscsi_node_attrib *a = &acl->node_attrib;
83 82
84 if (dataout_timeout_retries > NA_DATAOUT_TIMEOUT_RETRIES_MAX) { 83 if (dataout_timeout_retries > NA_DATAOUT_TIMEOUT_RETRIES_MAX) {
85 pr_err("Requested DataOut Timeout Retries %u larger" 84 pr_err("Requested DataOut Timeout Retries %u larger"
86 " than maximum %u", dataout_timeout_retries, 85 " than maximum %u", dataout_timeout_retries,
87 NA_DATAOUT_TIMEOUT_RETRIES_MAX); 86 NA_DATAOUT_TIMEOUT_RETRIES_MAX);
88 return -EINVAL; 87 return -EINVAL;
89 } else if (dataout_timeout_retries < NA_DATAOUT_TIMEOUT_RETRIES_MIN) { 88 } else if (dataout_timeout_retries < NA_DATAOUT_TIMEOUT_RETRIES_MIN) {
90 pr_err("Requested DataOut Timeout Retries %u smaller" 89 pr_err("Requested DataOut Timeout Retries %u smaller"
91 " than minimum %u", dataout_timeout_retries, 90 " than minimum %u", dataout_timeout_retries,
92 NA_DATAOUT_TIMEOUT_RETRIES_MIN); 91 NA_DATAOUT_TIMEOUT_RETRIES_MIN);
93 return -EINVAL; 92 return -EINVAL;
94 } 93 }
95 94
96 a->dataout_timeout_retries = dataout_timeout_retries; 95 a->dataout_timeout_retries = dataout_timeout_retries;
97 pr_debug("Set DataOut Timeout Retries to %u for" 96 pr_debug("Set DataOut Timeout Retries to %u for"
98 " Initiator Node %s\n", a->dataout_timeout_retries, 97 " Initiator Node %s\n", a->dataout_timeout_retries,
99 iscsit_na_get_initiatorname(acl)); 98 iscsit_na_get_initiatorname(acl));
100 99
101 return 0; 100 return 0;
102 } 101 }
103 102
104 extern int iscsit_na_nopin_timeout( 103 extern int iscsit_na_nopin_timeout(
105 struct iscsi_node_acl *acl, 104 struct iscsi_node_acl *acl,
106 u32 nopin_timeout) 105 u32 nopin_timeout)
107 { 106 {
108 struct iscsi_node_attrib *a = &acl->node_attrib; 107 struct iscsi_node_attrib *a = &acl->node_attrib;
109 struct iscsi_session *sess; 108 struct iscsi_session *sess;
110 struct iscsi_conn *conn; 109 struct iscsi_conn *conn;
111 struct se_node_acl *se_nacl = &a->nacl->se_node_acl; 110 struct se_node_acl *se_nacl = &a->nacl->se_node_acl;
112 struct se_session *se_sess; 111 struct se_session *se_sess;
113 u32 orig_nopin_timeout = a->nopin_timeout; 112 u32 orig_nopin_timeout = a->nopin_timeout;
114 113
115 if (nopin_timeout > NA_NOPIN_TIMEOUT_MAX) { 114 if (nopin_timeout > NA_NOPIN_TIMEOUT_MAX) {
116 pr_err("Requested NopIn Timeout %u larger than maximum" 115 pr_err("Requested NopIn Timeout %u larger than maximum"
117 " %u\n", nopin_timeout, NA_NOPIN_TIMEOUT_MAX); 116 " %u\n", nopin_timeout, NA_NOPIN_TIMEOUT_MAX);
118 return -EINVAL; 117 return -EINVAL;
119 } else if ((nopin_timeout < NA_NOPIN_TIMEOUT_MIN) && 118 } else if ((nopin_timeout < NA_NOPIN_TIMEOUT_MIN) &&
120 (nopin_timeout != 0)) { 119 (nopin_timeout != 0)) {
121 pr_err("Requested NopIn Timeout %u smaller than" 120 pr_err("Requested NopIn Timeout %u smaller than"
122 " minimum %u and not 0\n", nopin_timeout, 121 " minimum %u and not 0\n", nopin_timeout,
123 NA_NOPIN_TIMEOUT_MIN); 122 NA_NOPIN_TIMEOUT_MIN);
124 return -EINVAL; 123 return -EINVAL;
125 } 124 }
126 125
127 a->nopin_timeout = nopin_timeout; 126 a->nopin_timeout = nopin_timeout;
128 pr_debug("Set NopIn Timeout to %u for Initiator" 127 pr_debug("Set NopIn Timeout to %u for Initiator"
129 " Node %s\n", a->nopin_timeout, 128 " Node %s\n", a->nopin_timeout,
130 iscsit_na_get_initiatorname(acl)); 129 iscsit_na_get_initiatorname(acl));
131 /* 130 /*
132 * Reenable disabled nopin_timeout timer for all iSCSI connections. 131 * Reenable disabled nopin_timeout timer for all iSCSI connections.
133 */ 132 */
134 if (!orig_nopin_timeout) { 133 if (!orig_nopin_timeout) {
135 spin_lock_bh(&se_nacl->nacl_sess_lock); 134 spin_lock_bh(&se_nacl->nacl_sess_lock);
136 se_sess = se_nacl->nacl_sess; 135 se_sess = se_nacl->nacl_sess;
137 if (se_sess) { 136 if (se_sess) {
138 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 137 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
139 138
140 spin_lock(&sess->conn_lock); 139 spin_lock(&sess->conn_lock);
141 list_for_each_entry(conn, &sess->sess_conn_list, 140 list_for_each_entry(conn, &sess->sess_conn_list,
142 conn_list) { 141 conn_list) {
143 if (conn->conn_state != 142 if (conn->conn_state !=
144 TARG_CONN_STATE_LOGGED_IN) 143 TARG_CONN_STATE_LOGGED_IN)
145 continue; 144 continue;
146 145
147 spin_lock(&conn->nopin_timer_lock); 146 spin_lock(&conn->nopin_timer_lock);
148 __iscsit_start_nopin_timer(conn); 147 __iscsit_start_nopin_timer(conn);
149 spin_unlock(&conn->nopin_timer_lock); 148 spin_unlock(&conn->nopin_timer_lock);
150 } 149 }
151 spin_unlock(&sess->conn_lock); 150 spin_unlock(&sess->conn_lock);
152 } 151 }
153 spin_unlock_bh(&se_nacl->nacl_sess_lock); 152 spin_unlock_bh(&se_nacl->nacl_sess_lock);
154 } 153 }
155 154
156 return 0; 155 return 0;
157 } 156 }
158 157
159 extern int iscsit_na_nopin_response_timeout( 158 extern int iscsit_na_nopin_response_timeout(
160 struct iscsi_node_acl *acl, 159 struct iscsi_node_acl *acl,
161 u32 nopin_response_timeout) 160 u32 nopin_response_timeout)
162 { 161 {
163 struct iscsi_node_attrib *a = &acl->node_attrib; 162 struct iscsi_node_attrib *a = &acl->node_attrib;
164 163
165 if (nopin_response_timeout > NA_NOPIN_RESPONSE_TIMEOUT_MAX) { 164 if (nopin_response_timeout > NA_NOPIN_RESPONSE_TIMEOUT_MAX) {
166 pr_err("Requested NopIn Response Timeout %u larger" 165 pr_err("Requested NopIn Response Timeout %u larger"
167 " than maximum %u\n", nopin_response_timeout, 166 " than maximum %u\n", nopin_response_timeout,
168 NA_NOPIN_RESPONSE_TIMEOUT_MAX); 167 NA_NOPIN_RESPONSE_TIMEOUT_MAX);
169 return -EINVAL; 168 return -EINVAL;
170 } else if (nopin_response_timeout < NA_NOPIN_RESPONSE_TIMEOUT_MIN) { 169 } else if (nopin_response_timeout < NA_NOPIN_RESPONSE_TIMEOUT_MIN) {
171 pr_err("Requested NopIn Response Timeout %u smaller" 170 pr_err("Requested NopIn Response Timeout %u smaller"
172 " than minimum %u\n", nopin_response_timeout, 171 " than minimum %u\n", nopin_response_timeout,
173 NA_NOPIN_RESPONSE_TIMEOUT_MIN); 172 NA_NOPIN_RESPONSE_TIMEOUT_MIN);
174 return -EINVAL; 173 return -EINVAL;
175 } 174 }
176 175
177 a->nopin_response_timeout = nopin_response_timeout; 176 a->nopin_response_timeout = nopin_response_timeout;
178 pr_debug("Set NopIn Response Timeout to %u for" 177 pr_debug("Set NopIn Response Timeout to %u for"
179 " Initiator Node %s\n", a->nopin_timeout, 178 " Initiator Node %s\n", a->nopin_timeout,
180 iscsit_na_get_initiatorname(acl)); 179 iscsit_na_get_initiatorname(acl));
181 180
182 return 0; 181 return 0;
183 } 182 }
184 183
185 extern int iscsit_na_random_datain_pdu_offsets( 184 extern int iscsit_na_random_datain_pdu_offsets(
186 struct iscsi_node_acl *acl, 185 struct iscsi_node_acl *acl,
187 u32 random_datain_pdu_offsets) 186 u32 random_datain_pdu_offsets)
188 { 187 {
189 struct iscsi_node_attrib *a = &acl->node_attrib; 188 struct iscsi_node_attrib *a = &acl->node_attrib;
190 189
191 if (random_datain_pdu_offsets != 0 && random_datain_pdu_offsets != 1) { 190 if (random_datain_pdu_offsets != 0 && random_datain_pdu_offsets != 1) {
192 pr_err("Requested Random DataIN PDU Offsets: %u not" 191 pr_err("Requested Random DataIN PDU Offsets: %u not"
193 " 0 or 1\n", random_datain_pdu_offsets); 192 " 0 or 1\n", random_datain_pdu_offsets);
194 return -EINVAL; 193 return -EINVAL;
195 } 194 }
196 195
197 a->random_datain_pdu_offsets = random_datain_pdu_offsets; 196 a->random_datain_pdu_offsets = random_datain_pdu_offsets;
198 pr_debug("Set Random DataIN PDU Offsets to %u for" 197 pr_debug("Set Random DataIN PDU Offsets to %u for"
199 " Initiator Node %s\n", a->random_datain_pdu_offsets, 198 " Initiator Node %s\n", a->random_datain_pdu_offsets,
200 iscsit_na_get_initiatorname(acl)); 199 iscsit_na_get_initiatorname(acl));
201 200
202 return 0; 201 return 0;
203 } 202 }
204 203
205 extern int iscsit_na_random_datain_seq_offsets( 204 extern int iscsit_na_random_datain_seq_offsets(
206 struct iscsi_node_acl *acl, 205 struct iscsi_node_acl *acl,
207 u32 random_datain_seq_offsets) 206 u32 random_datain_seq_offsets)
208 { 207 {
209 struct iscsi_node_attrib *a = &acl->node_attrib; 208 struct iscsi_node_attrib *a = &acl->node_attrib;
210 209
211 if (random_datain_seq_offsets != 0 && random_datain_seq_offsets != 1) { 210 if (random_datain_seq_offsets != 0 && random_datain_seq_offsets != 1) {
212 pr_err("Requested Random DataIN Sequence Offsets: %u" 211 pr_err("Requested Random DataIN Sequence Offsets: %u"
213 " not 0 or 1\n", random_datain_seq_offsets); 212 " not 0 or 1\n", random_datain_seq_offsets);
214 return -EINVAL; 213 return -EINVAL;
215 } 214 }
216 215
217 a->random_datain_seq_offsets = random_datain_seq_offsets; 216 a->random_datain_seq_offsets = random_datain_seq_offsets;
218 pr_debug("Set Random DataIN Sequence Offsets to %u for" 217 pr_debug("Set Random DataIN Sequence Offsets to %u for"
219 " Initiator Node %s\n", a->random_datain_seq_offsets, 218 " Initiator Node %s\n", a->random_datain_seq_offsets,
220 iscsit_na_get_initiatorname(acl)); 219 iscsit_na_get_initiatorname(acl));
221 220
222 return 0; 221 return 0;
223 } 222 }
224 223
225 extern int iscsit_na_random_r2t_offsets( 224 extern int iscsit_na_random_r2t_offsets(
226 struct iscsi_node_acl *acl, 225 struct iscsi_node_acl *acl,
227 u32 random_r2t_offsets) 226 u32 random_r2t_offsets)
228 { 227 {
229 struct iscsi_node_attrib *a = &acl->node_attrib; 228 struct iscsi_node_attrib *a = &acl->node_attrib;
230 229
231 if (random_r2t_offsets != 0 && random_r2t_offsets != 1) { 230 if (random_r2t_offsets != 0 && random_r2t_offsets != 1) {
232 pr_err("Requested Random R2T Offsets: %u not" 231 pr_err("Requested Random R2T Offsets: %u not"
233 " 0 or 1\n", random_r2t_offsets); 232 " 0 or 1\n", random_r2t_offsets);
234 return -EINVAL; 233 return -EINVAL;
235 } 234 }
236 235
237 a->random_r2t_offsets = random_r2t_offsets; 236 a->random_r2t_offsets = random_r2t_offsets;
238 pr_debug("Set Random R2T Offsets to %u for" 237 pr_debug("Set Random R2T Offsets to %u for"
239 " Initiator Node %s\n", a->random_r2t_offsets, 238 " Initiator Node %s\n", a->random_r2t_offsets,
240 iscsit_na_get_initiatorname(acl)); 239 iscsit_na_get_initiatorname(acl));
241 240
242 return 0; 241 return 0;
243 } 242 }
244 243
245 extern int iscsit_na_default_erl( 244 extern int iscsit_na_default_erl(
246 struct iscsi_node_acl *acl, 245 struct iscsi_node_acl *acl,
247 u32 default_erl) 246 u32 default_erl)
248 { 247 {
249 struct iscsi_node_attrib *a = &acl->node_attrib; 248 struct iscsi_node_attrib *a = &acl->node_attrib;
250 249
251 if (default_erl != 0 && default_erl != 1 && default_erl != 2) { 250 if (default_erl != 0 && default_erl != 1 && default_erl != 2) {
252 pr_err("Requested default ERL: %u not 0, 1, or 2\n", 251 pr_err("Requested default ERL: %u not 0, 1, or 2\n",
253 default_erl); 252 default_erl);
254 return -EINVAL; 253 return -EINVAL;
255 } 254 }
256 255
257 a->default_erl = default_erl; 256 a->default_erl = default_erl;
258 pr_debug("Set use ERL0 flag to %u for Initiator" 257 pr_debug("Set use ERL0 flag to %u for Initiator"
259 " Node %s\n", a->default_erl, 258 " Node %s\n", a->default_erl,
260 iscsit_na_get_initiatorname(acl)); 259 iscsit_na_get_initiatorname(acl));
261 260
262 return 0; 261 return 0;
263 } 262 }
264 263
drivers/target/iscsi/iscsi_target_stat.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * Modern ConfigFS group context specific iSCSI statistics based on original 2 * Modern ConfigFS group context specific iSCSI statistics based on original
3 * iscsi_target_mib.c code 3 * iscsi_target_mib.c code
4 * 4 *
5 * Copyright (c) 2011 Rising Tide Systems 5 * Copyright (c) 2011 Rising Tide Systems
6 * 6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 * 8 *
9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 9 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or 13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version. 14 * (at your option) any later version.
15 * 15 *
16 * This program is distributed in the hope that it will be useful, 16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details. 19 * GNU General Public License for more details.
20 ******************************************************************************/ 20 ******************************************************************************/
21 21
22 #include <linux/configfs.h> 22 #include <linux/configfs.h>
23 #include <linux/export.h> 23 #include <linux/export.h>
24 #include <scsi/iscsi_proto.h> 24 #include <scsi/iscsi_proto.h>
25 #include <target/target_core_base.h> 25 #include <target/target_core_base.h>
26 #include <target/target_core_transport.h>
27 #include <target/configfs_macros.h> 26 #include <target/configfs_macros.h>
28 27
29 #include "iscsi_target_core.h" 28 #include "iscsi_target_core.h"
30 #include "iscsi_target_parameters.h" 29 #include "iscsi_target_parameters.h"
31 #include "iscsi_target_device.h" 30 #include "iscsi_target_device.h"
32 #include "iscsi_target_tpg.h" 31 #include "iscsi_target_tpg.h"
33 #include "iscsi_target_util.h" 32 #include "iscsi_target_util.h"
34 #include "iscsi_target_stat.h" 33 #include "iscsi_target_stat.h"
35 34
36 #ifndef INITIAL_JIFFIES 35 #ifndef INITIAL_JIFFIES
37 #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) 36 #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
38 #endif 37 #endif
39 38
40 /* Instance Attributes Table */ 39 /* Instance Attributes Table */
41 #define ISCSI_INST_NUM_NODES 1 40 #define ISCSI_INST_NUM_NODES 1
42 #define ISCSI_INST_DESCR "Storage Engine Target" 41 #define ISCSI_INST_DESCR "Storage Engine Target"
43 #define ISCSI_INST_LAST_FAILURE_TYPE 0 42 #define ISCSI_INST_LAST_FAILURE_TYPE 0
44 #define ISCSI_DISCONTINUITY_TIME 0 43 #define ISCSI_DISCONTINUITY_TIME 0
45 44
46 #define ISCSI_NODE_INDEX 1 45 #define ISCSI_NODE_INDEX 1
47 46
48 #define ISPRINT(a) ((a >= ' ') && (a <= '~')) 47 #define ISPRINT(a) ((a >= ' ') && (a <= '~'))
49 48
50 /**************************************************************************** 49 /****************************************************************************
51 * iSCSI MIB Tables 50 * iSCSI MIB Tables
52 ****************************************************************************/ 51 ****************************************************************************/
53 /* 52 /*
54 * Instance Attributes Table 53 * Instance Attributes Table
55 */ 54 */
56 CONFIGFS_EATTR_STRUCT(iscsi_stat_instance, iscsi_wwn_stat_grps); 55 CONFIGFS_EATTR_STRUCT(iscsi_stat_instance, iscsi_wwn_stat_grps);
57 #define ISCSI_STAT_INSTANCE_ATTR(_name, _mode) \ 56 #define ISCSI_STAT_INSTANCE_ATTR(_name, _mode) \
58 static struct iscsi_stat_instance_attribute \ 57 static struct iscsi_stat_instance_attribute \
59 iscsi_stat_instance_##_name = \ 58 iscsi_stat_instance_##_name = \
60 __CONFIGFS_EATTR(_name, _mode, \ 59 __CONFIGFS_EATTR(_name, _mode, \
61 iscsi_stat_instance_show_attr_##_name, \ 60 iscsi_stat_instance_show_attr_##_name, \
62 iscsi_stat_instance_store_attr_##_name); 61 iscsi_stat_instance_store_attr_##_name);
63 62
64 #define ISCSI_STAT_INSTANCE_ATTR_RO(_name) \ 63 #define ISCSI_STAT_INSTANCE_ATTR_RO(_name) \
65 static struct iscsi_stat_instance_attribute \ 64 static struct iscsi_stat_instance_attribute \
66 iscsi_stat_instance_##_name = \ 65 iscsi_stat_instance_##_name = \
67 __CONFIGFS_EATTR_RO(_name, \ 66 __CONFIGFS_EATTR_RO(_name, \
68 iscsi_stat_instance_show_attr_##_name); 67 iscsi_stat_instance_show_attr_##_name);
69 68
70 static ssize_t iscsi_stat_instance_show_attr_inst( 69 static ssize_t iscsi_stat_instance_show_attr_inst(
71 struct iscsi_wwn_stat_grps *igrps, char *page) 70 struct iscsi_wwn_stat_grps *igrps, char *page)
72 { 71 {
73 struct iscsi_tiqn *tiqn = container_of(igrps, 72 struct iscsi_tiqn *tiqn = container_of(igrps,
74 struct iscsi_tiqn, tiqn_stat_grps); 73 struct iscsi_tiqn, tiqn_stat_grps);
75 74
76 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index); 75 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
77 } 76 }
78 ISCSI_STAT_INSTANCE_ATTR_RO(inst); 77 ISCSI_STAT_INSTANCE_ATTR_RO(inst);
79 78
80 static ssize_t iscsi_stat_instance_show_attr_min_ver( 79 static ssize_t iscsi_stat_instance_show_attr_min_ver(
81 struct iscsi_wwn_stat_grps *igrps, char *page) 80 struct iscsi_wwn_stat_grps *igrps, char *page)
82 { 81 {
83 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION); 82 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
84 } 83 }
85 ISCSI_STAT_INSTANCE_ATTR_RO(min_ver); 84 ISCSI_STAT_INSTANCE_ATTR_RO(min_ver);
86 85
87 static ssize_t iscsi_stat_instance_show_attr_max_ver( 86 static ssize_t iscsi_stat_instance_show_attr_max_ver(
88 struct iscsi_wwn_stat_grps *igrps, char *page) 87 struct iscsi_wwn_stat_grps *igrps, char *page)
89 { 88 {
90 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION); 89 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
91 } 90 }
92 ISCSI_STAT_INSTANCE_ATTR_RO(max_ver); 91 ISCSI_STAT_INSTANCE_ATTR_RO(max_ver);
93 92
94 static ssize_t iscsi_stat_instance_show_attr_portals( 93 static ssize_t iscsi_stat_instance_show_attr_portals(
95 struct iscsi_wwn_stat_grps *igrps, char *page) 94 struct iscsi_wwn_stat_grps *igrps, char *page)
96 { 95 {
97 struct iscsi_tiqn *tiqn = container_of(igrps, 96 struct iscsi_tiqn *tiqn = container_of(igrps,
98 struct iscsi_tiqn, tiqn_stat_grps); 97 struct iscsi_tiqn, tiqn_stat_grps);
99 98
100 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_num_tpg_nps); 99 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_num_tpg_nps);
101 } 100 }
102 ISCSI_STAT_INSTANCE_ATTR_RO(portals); 101 ISCSI_STAT_INSTANCE_ATTR_RO(portals);
103 102
104 static ssize_t iscsi_stat_instance_show_attr_nodes( 103 static ssize_t iscsi_stat_instance_show_attr_nodes(
105 struct iscsi_wwn_stat_grps *igrps, char *page) 104 struct iscsi_wwn_stat_grps *igrps, char *page)
106 { 105 {
107 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_INST_NUM_NODES); 106 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_INST_NUM_NODES);
108 } 107 }
109 ISCSI_STAT_INSTANCE_ATTR_RO(nodes); 108 ISCSI_STAT_INSTANCE_ATTR_RO(nodes);
110 109
111 static ssize_t iscsi_stat_instance_show_attr_sessions( 110 static ssize_t iscsi_stat_instance_show_attr_sessions(
112 struct iscsi_wwn_stat_grps *igrps, char *page) 111 struct iscsi_wwn_stat_grps *igrps, char *page)
113 { 112 {
114 struct iscsi_tiqn *tiqn = container_of(igrps, 113 struct iscsi_tiqn *tiqn = container_of(igrps,
115 struct iscsi_tiqn, tiqn_stat_grps); 114 struct iscsi_tiqn, tiqn_stat_grps);
116 115
117 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_nsessions); 116 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_nsessions);
118 } 117 }
119 ISCSI_STAT_INSTANCE_ATTR_RO(sessions); 118 ISCSI_STAT_INSTANCE_ATTR_RO(sessions);
120 119
121 static ssize_t iscsi_stat_instance_show_attr_fail_sess( 120 static ssize_t iscsi_stat_instance_show_attr_fail_sess(
122 struct iscsi_wwn_stat_grps *igrps, char *page) 121 struct iscsi_wwn_stat_grps *igrps, char *page)
123 { 122 {
124 struct iscsi_tiqn *tiqn = container_of(igrps, 123 struct iscsi_tiqn *tiqn = container_of(igrps,
125 struct iscsi_tiqn, tiqn_stat_grps); 124 struct iscsi_tiqn, tiqn_stat_grps);
126 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats; 125 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
127 u32 sess_err_count; 126 u32 sess_err_count;
128 127
129 spin_lock_bh(&sess_err->lock); 128 spin_lock_bh(&sess_err->lock);
130 sess_err_count = (sess_err->digest_errors + 129 sess_err_count = (sess_err->digest_errors +
131 sess_err->cxn_timeout_errors + 130 sess_err->cxn_timeout_errors +
132 sess_err->pdu_format_errors); 131 sess_err->pdu_format_errors);
133 spin_unlock_bh(&sess_err->lock); 132 spin_unlock_bh(&sess_err->lock);
134 133
135 return snprintf(page, PAGE_SIZE, "%u\n", sess_err_count); 134 return snprintf(page, PAGE_SIZE, "%u\n", sess_err_count);
136 } 135 }
137 ISCSI_STAT_INSTANCE_ATTR_RO(fail_sess); 136 ISCSI_STAT_INSTANCE_ATTR_RO(fail_sess);
138 137
139 static ssize_t iscsi_stat_instance_show_attr_fail_type( 138 static ssize_t iscsi_stat_instance_show_attr_fail_type(
140 struct iscsi_wwn_stat_grps *igrps, char *page) 139 struct iscsi_wwn_stat_grps *igrps, char *page)
141 { 140 {
142 struct iscsi_tiqn *tiqn = container_of(igrps, 141 struct iscsi_tiqn *tiqn = container_of(igrps,
143 struct iscsi_tiqn, tiqn_stat_grps); 142 struct iscsi_tiqn, tiqn_stat_grps);
144 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats; 143 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
145 144
146 return snprintf(page, PAGE_SIZE, "%u\n", 145 return snprintf(page, PAGE_SIZE, "%u\n",
147 sess_err->last_sess_failure_type); 146 sess_err->last_sess_failure_type);
148 } 147 }
149 ISCSI_STAT_INSTANCE_ATTR_RO(fail_type); 148 ISCSI_STAT_INSTANCE_ATTR_RO(fail_type);
150 149
151 static ssize_t iscsi_stat_instance_show_attr_fail_rem_name( 150 static ssize_t iscsi_stat_instance_show_attr_fail_rem_name(
152 struct iscsi_wwn_stat_grps *igrps, char *page) 151 struct iscsi_wwn_stat_grps *igrps, char *page)
153 { 152 {
154 struct iscsi_tiqn *tiqn = container_of(igrps, 153 struct iscsi_tiqn *tiqn = container_of(igrps,
155 struct iscsi_tiqn, tiqn_stat_grps); 154 struct iscsi_tiqn, tiqn_stat_grps);
156 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats; 155 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
157 156
158 return snprintf(page, PAGE_SIZE, "%s\n", 157 return snprintf(page, PAGE_SIZE, "%s\n",
159 sess_err->last_sess_fail_rem_name[0] ? 158 sess_err->last_sess_fail_rem_name[0] ?
160 sess_err->last_sess_fail_rem_name : NONE); 159 sess_err->last_sess_fail_rem_name : NONE);
161 } 160 }
162 ISCSI_STAT_INSTANCE_ATTR_RO(fail_rem_name); 161 ISCSI_STAT_INSTANCE_ATTR_RO(fail_rem_name);
163 162
164 static ssize_t iscsi_stat_instance_show_attr_disc_time( 163 static ssize_t iscsi_stat_instance_show_attr_disc_time(
165 struct iscsi_wwn_stat_grps *igrps, char *page) 164 struct iscsi_wwn_stat_grps *igrps, char *page)
166 { 165 {
167 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DISCONTINUITY_TIME); 166 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DISCONTINUITY_TIME);
168 } 167 }
169 ISCSI_STAT_INSTANCE_ATTR_RO(disc_time); 168 ISCSI_STAT_INSTANCE_ATTR_RO(disc_time);
170 169
171 static ssize_t iscsi_stat_instance_show_attr_description( 170 static ssize_t iscsi_stat_instance_show_attr_description(
172 struct iscsi_wwn_stat_grps *igrps, char *page) 171 struct iscsi_wwn_stat_grps *igrps, char *page)
173 { 172 {
174 return snprintf(page, PAGE_SIZE, "%s\n", ISCSI_INST_DESCR); 173 return snprintf(page, PAGE_SIZE, "%s\n", ISCSI_INST_DESCR);
175 } 174 }
176 ISCSI_STAT_INSTANCE_ATTR_RO(description); 175 ISCSI_STAT_INSTANCE_ATTR_RO(description);
177 176
178 static ssize_t iscsi_stat_instance_show_attr_vendor( 177 static ssize_t iscsi_stat_instance_show_attr_vendor(
179 struct iscsi_wwn_stat_grps *igrps, char *page) 178 struct iscsi_wwn_stat_grps *igrps, char *page)
180 { 179 {
181 return snprintf(page, PAGE_SIZE, "RisingTide Systems iSCSI-Target\n"); 180 return snprintf(page, PAGE_SIZE, "RisingTide Systems iSCSI-Target\n");
182 } 181 }
183 ISCSI_STAT_INSTANCE_ATTR_RO(vendor); 182 ISCSI_STAT_INSTANCE_ATTR_RO(vendor);
184 183
185 static ssize_t iscsi_stat_instance_show_attr_version( 184 static ssize_t iscsi_stat_instance_show_attr_version(
186 struct iscsi_wwn_stat_grps *igrps, char *page) 185 struct iscsi_wwn_stat_grps *igrps, char *page)
187 { 186 {
188 return snprintf(page, PAGE_SIZE, "%s\n", ISCSIT_VERSION); 187 return snprintf(page, PAGE_SIZE, "%s\n", ISCSIT_VERSION);
189 } 188 }
190 ISCSI_STAT_INSTANCE_ATTR_RO(version); 189 ISCSI_STAT_INSTANCE_ATTR_RO(version);
191 190
192 CONFIGFS_EATTR_OPS(iscsi_stat_instance, iscsi_wwn_stat_grps, 191 CONFIGFS_EATTR_OPS(iscsi_stat_instance, iscsi_wwn_stat_grps,
193 iscsi_instance_group); 192 iscsi_instance_group);
194 193
195 static struct configfs_attribute *iscsi_stat_instance_attrs[] = { 194 static struct configfs_attribute *iscsi_stat_instance_attrs[] = {
196 &iscsi_stat_instance_inst.attr, 195 &iscsi_stat_instance_inst.attr,
197 &iscsi_stat_instance_min_ver.attr, 196 &iscsi_stat_instance_min_ver.attr,
198 &iscsi_stat_instance_max_ver.attr, 197 &iscsi_stat_instance_max_ver.attr,
199 &iscsi_stat_instance_portals.attr, 198 &iscsi_stat_instance_portals.attr,
200 &iscsi_stat_instance_nodes.attr, 199 &iscsi_stat_instance_nodes.attr,
201 &iscsi_stat_instance_sessions.attr, 200 &iscsi_stat_instance_sessions.attr,
202 &iscsi_stat_instance_fail_sess.attr, 201 &iscsi_stat_instance_fail_sess.attr,
203 &iscsi_stat_instance_fail_type.attr, 202 &iscsi_stat_instance_fail_type.attr,
204 &iscsi_stat_instance_fail_rem_name.attr, 203 &iscsi_stat_instance_fail_rem_name.attr,
205 &iscsi_stat_instance_disc_time.attr, 204 &iscsi_stat_instance_disc_time.attr,
206 &iscsi_stat_instance_description.attr, 205 &iscsi_stat_instance_description.attr,
207 &iscsi_stat_instance_vendor.attr, 206 &iscsi_stat_instance_vendor.attr,
208 &iscsi_stat_instance_version.attr, 207 &iscsi_stat_instance_version.attr,
209 NULL, 208 NULL,
210 }; 209 };
211 210
212 static struct configfs_item_operations iscsi_stat_instance_item_ops = { 211 static struct configfs_item_operations iscsi_stat_instance_item_ops = {
213 .show_attribute = iscsi_stat_instance_attr_show, 212 .show_attribute = iscsi_stat_instance_attr_show,
214 .store_attribute = iscsi_stat_instance_attr_store, 213 .store_attribute = iscsi_stat_instance_attr_store,
215 }; 214 };
216 215
217 struct config_item_type iscsi_stat_instance_cit = { 216 struct config_item_type iscsi_stat_instance_cit = {
218 .ct_item_ops = &iscsi_stat_instance_item_ops, 217 .ct_item_ops = &iscsi_stat_instance_item_ops,
219 .ct_attrs = iscsi_stat_instance_attrs, 218 .ct_attrs = iscsi_stat_instance_attrs,
220 .ct_owner = THIS_MODULE, 219 .ct_owner = THIS_MODULE,
221 }; 220 };
222 221
223 /* 222 /*
224 * Instance Session Failure Stats Table 223 * Instance Session Failure Stats Table
225 */ 224 */
226 CONFIGFS_EATTR_STRUCT(iscsi_stat_sess_err, iscsi_wwn_stat_grps); 225 CONFIGFS_EATTR_STRUCT(iscsi_stat_sess_err, iscsi_wwn_stat_grps);
227 #define ISCSI_STAT_SESS_ERR_ATTR(_name, _mode) \ 226 #define ISCSI_STAT_SESS_ERR_ATTR(_name, _mode) \
228 static struct iscsi_stat_sess_err_attribute \ 227 static struct iscsi_stat_sess_err_attribute \
229 iscsi_stat_sess_err_##_name = \ 228 iscsi_stat_sess_err_##_name = \
230 __CONFIGFS_EATTR(_name, _mode, \ 229 __CONFIGFS_EATTR(_name, _mode, \
231 iscsi_stat_sess_err_show_attr_##_name, \ 230 iscsi_stat_sess_err_show_attr_##_name, \
232 iscsi_stat_sess_err_store_attr_##_name); 231 iscsi_stat_sess_err_store_attr_##_name);
233 232
234 #define ISCSI_STAT_SESS_ERR_ATTR_RO(_name) \ 233 #define ISCSI_STAT_SESS_ERR_ATTR_RO(_name) \
235 static struct iscsi_stat_sess_err_attribute \ 234 static struct iscsi_stat_sess_err_attribute \
236 iscsi_stat_sess_err_##_name = \ 235 iscsi_stat_sess_err_##_name = \
237 __CONFIGFS_EATTR_RO(_name, \ 236 __CONFIGFS_EATTR_RO(_name, \
238 iscsi_stat_sess_err_show_attr_##_name); 237 iscsi_stat_sess_err_show_attr_##_name);
239 238
240 static ssize_t iscsi_stat_sess_err_show_attr_inst( 239 static ssize_t iscsi_stat_sess_err_show_attr_inst(
241 struct iscsi_wwn_stat_grps *igrps, char *page) 240 struct iscsi_wwn_stat_grps *igrps, char *page)
242 { 241 {
243 struct iscsi_tiqn *tiqn = container_of(igrps, 242 struct iscsi_tiqn *tiqn = container_of(igrps,
244 struct iscsi_tiqn, tiqn_stat_grps); 243 struct iscsi_tiqn, tiqn_stat_grps);
245 244
246 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index); 245 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
247 } 246 }
248 ISCSI_STAT_SESS_ERR_ATTR_RO(inst); 247 ISCSI_STAT_SESS_ERR_ATTR_RO(inst);
249 248
250 static ssize_t iscsi_stat_sess_err_show_attr_digest_errors( 249 static ssize_t iscsi_stat_sess_err_show_attr_digest_errors(
251 struct iscsi_wwn_stat_grps *igrps, char *page) 250 struct iscsi_wwn_stat_grps *igrps, char *page)
252 { 251 {
253 struct iscsi_tiqn *tiqn = container_of(igrps, 252 struct iscsi_tiqn *tiqn = container_of(igrps,
254 struct iscsi_tiqn, tiqn_stat_grps); 253 struct iscsi_tiqn, tiqn_stat_grps);
255 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats; 254 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
256 255
257 return snprintf(page, PAGE_SIZE, "%u\n", sess_err->digest_errors); 256 return snprintf(page, PAGE_SIZE, "%u\n", sess_err->digest_errors);
258 } 257 }
259 ISCSI_STAT_SESS_ERR_ATTR_RO(digest_errors); 258 ISCSI_STAT_SESS_ERR_ATTR_RO(digest_errors);
260 259
261 static ssize_t iscsi_stat_sess_err_show_attr_cxn_errors( 260 static ssize_t iscsi_stat_sess_err_show_attr_cxn_errors(
262 struct iscsi_wwn_stat_grps *igrps, char *page) 261 struct iscsi_wwn_stat_grps *igrps, char *page)
263 { 262 {
264 struct iscsi_tiqn *tiqn = container_of(igrps, 263 struct iscsi_tiqn *tiqn = container_of(igrps,
265 struct iscsi_tiqn, tiqn_stat_grps); 264 struct iscsi_tiqn, tiqn_stat_grps);
266 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats; 265 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
267 266
268 return snprintf(page, PAGE_SIZE, "%u\n", sess_err->cxn_timeout_errors); 267 return snprintf(page, PAGE_SIZE, "%u\n", sess_err->cxn_timeout_errors);
269 } 268 }
270 ISCSI_STAT_SESS_ERR_ATTR_RO(cxn_errors); 269 ISCSI_STAT_SESS_ERR_ATTR_RO(cxn_errors);
271 270
272 static ssize_t iscsi_stat_sess_err_show_attr_format_errors( 271 static ssize_t iscsi_stat_sess_err_show_attr_format_errors(
273 struct iscsi_wwn_stat_grps *igrps, char *page) 272 struct iscsi_wwn_stat_grps *igrps, char *page)
274 { 273 {
275 struct iscsi_tiqn *tiqn = container_of(igrps, 274 struct iscsi_tiqn *tiqn = container_of(igrps,
276 struct iscsi_tiqn, tiqn_stat_grps); 275 struct iscsi_tiqn, tiqn_stat_grps);
277 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats; 276 struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
278 277
279 return snprintf(page, PAGE_SIZE, "%u\n", sess_err->pdu_format_errors); 278 return snprintf(page, PAGE_SIZE, "%u\n", sess_err->pdu_format_errors);
280 } 279 }
281 ISCSI_STAT_SESS_ERR_ATTR_RO(format_errors); 280 ISCSI_STAT_SESS_ERR_ATTR_RO(format_errors);
282 281
283 CONFIGFS_EATTR_OPS(iscsi_stat_sess_err, iscsi_wwn_stat_grps, 282 CONFIGFS_EATTR_OPS(iscsi_stat_sess_err, iscsi_wwn_stat_grps,
284 iscsi_sess_err_group); 283 iscsi_sess_err_group);
285 284
286 static struct configfs_attribute *iscsi_stat_sess_err_attrs[] = { 285 static struct configfs_attribute *iscsi_stat_sess_err_attrs[] = {
287 &iscsi_stat_sess_err_inst.attr, 286 &iscsi_stat_sess_err_inst.attr,
288 &iscsi_stat_sess_err_digest_errors.attr, 287 &iscsi_stat_sess_err_digest_errors.attr,
289 &iscsi_stat_sess_err_cxn_errors.attr, 288 &iscsi_stat_sess_err_cxn_errors.attr,
290 &iscsi_stat_sess_err_format_errors.attr, 289 &iscsi_stat_sess_err_format_errors.attr,
291 NULL, 290 NULL,
292 }; 291 };
293 292
294 static struct configfs_item_operations iscsi_stat_sess_err_item_ops = { 293 static struct configfs_item_operations iscsi_stat_sess_err_item_ops = {
295 .show_attribute = iscsi_stat_sess_err_attr_show, 294 .show_attribute = iscsi_stat_sess_err_attr_show,
296 .store_attribute = iscsi_stat_sess_err_attr_store, 295 .store_attribute = iscsi_stat_sess_err_attr_store,
297 }; 296 };
298 297
299 struct config_item_type iscsi_stat_sess_err_cit = { 298 struct config_item_type iscsi_stat_sess_err_cit = {
300 .ct_item_ops = &iscsi_stat_sess_err_item_ops, 299 .ct_item_ops = &iscsi_stat_sess_err_item_ops,
301 .ct_attrs = iscsi_stat_sess_err_attrs, 300 .ct_attrs = iscsi_stat_sess_err_attrs,
302 .ct_owner = THIS_MODULE, 301 .ct_owner = THIS_MODULE,
303 }; 302 };
304 303
305 /* 304 /*
306 * Target Attributes Table 305 * Target Attributes Table
307 */ 306 */
308 CONFIGFS_EATTR_STRUCT(iscsi_stat_tgt_attr, iscsi_wwn_stat_grps); 307 CONFIGFS_EATTR_STRUCT(iscsi_stat_tgt_attr, iscsi_wwn_stat_grps);
309 #define ISCSI_STAT_TGT_ATTR(_name, _mode) \ 308 #define ISCSI_STAT_TGT_ATTR(_name, _mode) \
310 static struct iscsi_stat_tgt_attr_attribute \ 309 static struct iscsi_stat_tgt_attr_attribute \
311 iscsi_stat_tgt_attr_##_name = \ 310 iscsi_stat_tgt_attr_##_name = \
312 __CONFIGFS_EATTR(_name, _mode, \ 311 __CONFIGFS_EATTR(_name, _mode, \
313 iscsi_stat_tgt-attr_show_attr_##_name, \ 312 iscsi_stat_tgt-attr_show_attr_##_name, \
314 iscsi_stat_tgt_attr_store_attr_##_name); 313 iscsi_stat_tgt_attr_store_attr_##_name);
315 314
316 #define ISCSI_STAT_TGT_ATTR_RO(_name) \ 315 #define ISCSI_STAT_TGT_ATTR_RO(_name) \
317 static struct iscsi_stat_tgt_attr_attribute \ 316 static struct iscsi_stat_tgt_attr_attribute \
318 iscsi_stat_tgt_attr_##_name = \ 317 iscsi_stat_tgt_attr_##_name = \
319 __CONFIGFS_EATTR_RO(_name, \ 318 __CONFIGFS_EATTR_RO(_name, \
320 iscsi_stat_tgt_attr_show_attr_##_name); 319 iscsi_stat_tgt_attr_show_attr_##_name);
321 320
322 static ssize_t iscsi_stat_tgt_attr_show_attr_inst( 321 static ssize_t iscsi_stat_tgt_attr_show_attr_inst(
323 struct iscsi_wwn_stat_grps *igrps, char *page) 322 struct iscsi_wwn_stat_grps *igrps, char *page)
324 { 323 {
325 struct iscsi_tiqn *tiqn = container_of(igrps, 324 struct iscsi_tiqn *tiqn = container_of(igrps,
326 struct iscsi_tiqn, tiqn_stat_grps); 325 struct iscsi_tiqn, tiqn_stat_grps);
327 326
328 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index); 327 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
329 } 328 }
330 ISCSI_STAT_TGT_ATTR_RO(inst); 329 ISCSI_STAT_TGT_ATTR_RO(inst);
331 330
332 static ssize_t iscsi_stat_tgt_attr_show_attr_indx( 331 static ssize_t iscsi_stat_tgt_attr_show_attr_indx(
333 struct iscsi_wwn_stat_grps *igrps, char *page) 332 struct iscsi_wwn_stat_grps *igrps, char *page)
334 { 333 {
335 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX); 334 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
336 } 335 }
337 ISCSI_STAT_TGT_ATTR_RO(indx); 336 ISCSI_STAT_TGT_ATTR_RO(indx);
338 337
339 static ssize_t iscsi_stat_tgt_attr_show_attr_login_fails( 338 static ssize_t iscsi_stat_tgt_attr_show_attr_login_fails(
340 struct iscsi_wwn_stat_grps *igrps, char *page) 339 struct iscsi_wwn_stat_grps *igrps, char *page)
341 { 340 {
342 struct iscsi_tiqn *tiqn = container_of(igrps, 341 struct iscsi_tiqn *tiqn = container_of(igrps,
343 struct iscsi_tiqn, tiqn_stat_grps); 342 struct iscsi_tiqn, tiqn_stat_grps);
344 struct iscsi_login_stats *lstat = &tiqn->login_stats; 343 struct iscsi_login_stats *lstat = &tiqn->login_stats;
345 u32 fail_count; 344 u32 fail_count;
346 345
347 spin_lock(&lstat->lock); 346 spin_lock(&lstat->lock);
348 fail_count = (lstat->redirects + lstat->authorize_fails + 347 fail_count = (lstat->redirects + lstat->authorize_fails +
349 lstat->authenticate_fails + lstat->negotiate_fails + 348 lstat->authenticate_fails + lstat->negotiate_fails +
350 lstat->other_fails); 349 lstat->other_fails);
351 spin_unlock(&lstat->lock); 350 spin_unlock(&lstat->lock);
352 351
353 return snprintf(page, PAGE_SIZE, "%u\n", fail_count); 352 return snprintf(page, PAGE_SIZE, "%u\n", fail_count);
354 } 353 }
355 ISCSI_STAT_TGT_ATTR_RO(login_fails); 354 ISCSI_STAT_TGT_ATTR_RO(login_fails);
356 355
357 static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_time( 356 static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_time(
358 struct iscsi_wwn_stat_grps *igrps, char *page) 357 struct iscsi_wwn_stat_grps *igrps, char *page)
359 { 358 {
360 struct iscsi_tiqn *tiqn = container_of(igrps, 359 struct iscsi_tiqn *tiqn = container_of(igrps,
361 struct iscsi_tiqn, tiqn_stat_grps); 360 struct iscsi_tiqn, tiqn_stat_grps);
362 struct iscsi_login_stats *lstat = &tiqn->login_stats; 361 struct iscsi_login_stats *lstat = &tiqn->login_stats;
363 u32 last_fail_time; 362 u32 last_fail_time;
364 363
365 spin_lock(&lstat->lock); 364 spin_lock(&lstat->lock);
366 last_fail_time = lstat->last_fail_time ? 365 last_fail_time = lstat->last_fail_time ?
367 (u32)(((u32)lstat->last_fail_time - 366 (u32)(((u32)lstat->last_fail_time -
368 INITIAL_JIFFIES) * 100 / HZ) : 0; 367 INITIAL_JIFFIES) * 100 / HZ) : 0;
369 spin_unlock(&lstat->lock); 368 spin_unlock(&lstat->lock);
370 369
371 return snprintf(page, PAGE_SIZE, "%u\n", last_fail_time); 370 return snprintf(page, PAGE_SIZE, "%u\n", last_fail_time);
372 } 371 }
373 ISCSI_STAT_TGT_ATTR_RO(last_fail_time); 372 ISCSI_STAT_TGT_ATTR_RO(last_fail_time);
374 373
375 static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_type( 374 static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_type(
376 struct iscsi_wwn_stat_grps *igrps, char *page) 375 struct iscsi_wwn_stat_grps *igrps, char *page)
377 { 376 {
378 struct iscsi_tiqn *tiqn = container_of(igrps, 377 struct iscsi_tiqn *tiqn = container_of(igrps,
379 struct iscsi_tiqn, tiqn_stat_grps); 378 struct iscsi_tiqn, tiqn_stat_grps);
380 struct iscsi_login_stats *lstat = &tiqn->login_stats; 379 struct iscsi_login_stats *lstat = &tiqn->login_stats;
381 u32 last_fail_type; 380 u32 last_fail_type;
382 381
383 spin_lock(&lstat->lock); 382 spin_lock(&lstat->lock);
384 last_fail_type = lstat->last_fail_type; 383 last_fail_type = lstat->last_fail_type;
385 spin_unlock(&lstat->lock); 384 spin_unlock(&lstat->lock);
386 385
387 return snprintf(page, PAGE_SIZE, "%u\n", last_fail_type); 386 return snprintf(page, PAGE_SIZE, "%u\n", last_fail_type);
388 } 387 }
389 ISCSI_STAT_TGT_ATTR_RO(last_fail_type); 388 ISCSI_STAT_TGT_ATTR_RO(last_fail_type);
390 389
391 static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_name( 390 static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_name(
392 struct iscsi_wwn_stat_grps *igrps, char *page) 391 struct iscsi_wwn_stat_grps *igrps, char *page)
393 { 392 {
394 struct iscsi_tiqn *tiqn = container_of(igrps, 393 struct iscsi_tiqn *tiqn = container_of(igrps,
395 struct iscsi_tiqn, tiqn_stat_grps); 394 struct iscsi_tiqn, tiqn_stat_grps);
396 struct iscsi_login_stats *lstat = &tiqn->login_stats; 395 struct iscsi_login_stats *lstat = &tiqn->login_stats;
397 unsigned char buf[224]; 396 unsigned char buf[224];
398 397
399 spin_lock(&lstat->lock); 398 spin_lock(&lstat->lock);
400 snprintf(buf, 224, "%s", lstat->last_intr_fail_name[0] ? 399 snprintf(buf, 224, "%s", lstat->last_intr_fail_name[0] ?
401 lstat->last_intr_fail_name : NONE); 400 lstat->last_intr_fail_name : NONE);
402 spin_unlock(&lstat->lock); 401 spin_unlock(&lstat->lock);
403 402
404 return snprintf(page, PAGE_SIZE, "%s\n", buf); 403 return snprintf(page, PAGE_SIZE, "%s\n", buf);
405 } 404 }
406 ISCSI_STAT_TGT_ATTR_RO(fail_intr_name); 405 ISCSI_STAT_TGT_ATTR_RO(fail_intr_name);
407 406
408 static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr_type( 407 static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr_type(
409 struct iscsi_wwn_stat_grps *igrps, char *page) 408 struct iscsi_wwn_stat_grps *igrps, char *page)
410 { 409 {
411 struct iscsi_tiqn *tiqn = container_of(igrps, 410 struct iscsi_tiqn *tiqn = container_of(igrps,
412 struct iscsi_tiqn, tiqn_stat_grps); 411 struct iscsi_tiqn, tiqn_stat_grps);
413 struct iscsi_login_stats *lstat = &tiqn->login_stats; 412 struct iscsi_login_stats *lstat = &tiqn->login_stats;
414 unsigned char buf[8]; 413 unsigned char buf[8];
415 414
416 spin_lock(&lstat->lock); 415 spin_lock(&lstat->lock);
417 snprintf(buf, 8, "%s", (lstat->last_intr_fail_ip_addr != NULL) ? 416 snprintf(buf, 8, "%s", (lstat->last_intr_fail_ip_addr != NULL) ?
418 "ipv6" : "ipv4"); 417 "ipv6" : "ipv4");
419 spin_unlock(&lstat->lock); 418 spin_unlock(&lstat->lock);
420 419
421 return snprintf(page, PAGE_SIZE, "%s\n", buf); 420 return snprintf(page, PAGE_SIZE, "%s\n", buf);
422 } 421 }
423 ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr_type); 422 ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr_type);
424 423
425 static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr( 424 static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr(
426 struct iscsi_wwn_stat_grps *igrps, char *page) 425 struct iscsi_wwn_stat_grps *igrps, char *page)
427 { 426 {
428 struct iscsi_tiqn *tiqn = container_of(igrps, 427 struct iscsi_tiqn *tiqn = container_of(igrps,
429 struct iscsi_tiqn, tiqn_stat_grps); 428 struct iscsi_tiqn, tiqn_stat_grps);
430 struct iscsi_login_stats *lstat = &tiqn->login_stats; 429 struct iscsi_login_stats *lstat = &tiqn->login_stats;
431 unsigned char buf[32]; 430 unsigned char buf[32];
432 431
433 spin_lock(&lstat->lock); 432 spin_lock(&lstat->lock);
434 if (lstat->last_intr_fail_ip_family == AF_INET6) 433 if (lstat->last_intr_fail_ip_family == AF_INET6)
435 snprintf(buf, 32, "[%s]", lstat->last_intr_fail_ip_addr); 434 snprintf(buf, 32, "[%s]", lstat->last_intr_fail_ip_addr);
436 else 435 else
437 snprintf(buf, 32, "%s", lstat->last_intr_fail_ip_addr); 436 snprintf(buf, 32, "%s", lstat->last_intr_fail_ip_addr);
438 spin_unlock(&lstat->lock); 437 spin_unlock(&lstat->lock);
439 438
440 return snprintf(page, PAGE_SIZE, "%s\n", buf); 439 return snprintf(page, PAGE_SIZE, "%s\n", buf);
441 } 440 }
442 ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr); 441 ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr);
443 442
444 CONFIGFS_EATTR_OPS(iscsi_stat_tgt_attr, iscsi_wwn_stat_grps, 443 CONFIGFS_EATTR_OPS(iscsi_stat_tgt_attr, iscsi_wwn_stat_grps,
445 iscsi_tgt_attr_group); 444 iscsi_tgt_attr_group);
446 445
447 static struct configfs_attribute *iscsi_stat_tgt_attr_attrs[] = { 446 static struct configfs_attribute *iscsi_stat_tgt_attr_attrs[] = {
448 &iscsi_stat_tgt_attr_inst.attr, 447 &iscsi_stat_tgt_attr_inst.attr,
449 &iscsi_stat_tgt_attr_indx.attr, 448 &iscsi_stat_tgt_attr_indx.attr,
450 &iscsi_stat_tgt_attr_login_fails.attr, 449 &iscsi_stat_tgt_attr_login_fails.attr,
451 &iscsi_stat_tgt_attr_last_fail_time.attr, 450 &iscsi_stat_tgt_attr_last_fail_time.attr,
452 &iscsi_stat_tgt_attr_last_fail_type.attr, 451 &iscsi_stat_tgt_attr_last_fail_type.attr,
453 &iscsi_stat_tgt_attr_fail_intr_name.attr, 452 &iscsi_stat_tgt_attr_fail_intr_name.attr,
454 &iscsi_stat_tgt_attr_fail_intr_addr_type.attr, 453 &iscsi_stat_tgt_attr_fail_intr_addr_type.attr,
455 &iscsi_stat_tgt_attr_fail_intr_addr.attr, 454 &iscsi_stat_tgt_attr_fail_intr_addr.attr,
456 NULL, 455 NULL,
457 }; 456 };
458 457
459 static struct configfs_item_operations iscsi_stat_tgt_attr_item_ops = { 458 static struct configfs_item_operations iscsi_stat_tgt_attr_item_ops = {
460 .show_attribute = iscsi_stat_tgt_attr_attr_show, 459 .show_attribute = iscsi_stat_tgt_attr_attr_show,
461 .store_attribute = iscsi_stat_tgt_attr_attr_store, 460 .store_attribute = iscsi_stat_tgt_attr_attr_store,
462 }; 461 };
463 462
464 struct config_item_type iscsi_stat_tgt_attr_cit = { 463 struct config_item_type iscsi_stat_tgt_attr_cit = {
465 .ct_item_ops = &iscsi_stat_tgt_attr_item_ops, 464 .ct_item_ops = &iscsi_stat_tgt_attr_item_ops,
466 .ct_attrs = iscsi_stat_tgt_attr_attrs, 465 .ct_attrs = iscsi_stat_tgt_attr_attrs,
467 .ct_owner = THIS_MODULE, 466 .ct_owner = THIS_MODULE,
468 }; 467 };
469 468
470 /* 469 /*
471 * Target Login Stats Table 470 * Target Login Stats Table
472 */ 471 */
473 CONFIGFS_EATTR_STRUCT(iscsi_stat_login, iscsi_wwn_stat_grps); 472 CONFIGFS_EATTR_STRUCT(iscsi_stat_login, iscsi_wwn_stat_grps);
474 #define ISCSI_STAT_LOGIN(_name, _mode) \ 473 #define ISCSI_STAT_LOGIN(_name, _mode) \
475 static struct iscsi_stat_login_attribute \ 474 static struct iscsi_stat_login_attribute \
476 iscsi_stat_login_##_name = \ 475 iscsi_stat_login_##_name = \
477 __CONFIGFS_EATTR(_name, _mode, \ 476 __CONFIGFS_EATTR(_name, _mode, \
478 iscsi_stat_login_show_attr_##_name, \ 477 iscsi_stat_login_show_attr_##_name, \
479 iscsi_stat_login_store_attr_##_name); 478 iscsi_stat_login_store_attr_##_name);
480 479
481 #define ISCSI_STAT_LOGIN_RO(_name) \ 480 #define ISCSI_STAT_LOGIN_RO(_name) \
482 static struct iscsi_stat_login_attribute \ 481 static struct iscsi_stat_login_attribute \
483 iscsi_stat_login_##_name = \ 482 iscsi_stat_login_##_name = \
484 __CONFIGFS_EATTR_RO(_name, \ 483 __CONFIGFS_EATTR_RO(_name, \
485 iscsi_stat_login_show_attr_##_name); 484 iscsi_stat_login_show_attr_##_name);
486 485
487 static ssize_t iscsi_stat_login_show_attr_inst( 486 static ssize_t iscsi_stat_login_show_attr_inst(
488 struct iscsi_wwn_stat_grps *igrps, char *page) 487 struct iscsi_wwn_stat_grps *igrps, char *page)
489 { 488 {
490 struct iscsi_tiqn *tiqn = container_of(igrps, 489 struct iscsi_tiqn *tiqn = container_of(igrps,
491 struct iscsi_tiqn, tiqn_stat_grps); 490 struct iscsi_tiqn, tiqn_stat_grps);
492 491
493 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index); 492 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
494 } 493 }
495 ISCSI_STAT_LOGIN_RO(inst); 494 ISCSI_STAT_LOGIN_RO(inst);
496 495
497 static ssize_t iscsi_stat_login_show_attr_indx( 496 static ssize_t iscsi_stat_login_show_attr_indx(
498 struct iscsi_wwn_stat_grps *igrps, char *page) 497 struct iscsi_wwn_stat_grps *igrps, char *page)
499 { 498 {
500 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX); 499 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
501 } 500 }
502 ISCSI_STAT_LOGIN_RO(indx); 501 ISCSI_STAT_LOGIN_RO(indx);
503 502
504 static ssize_t iscsi_stat_login_show_attr_accepts( 503 static ssize_t iscsi_stat_login_show_attr_accepts(
505 struct iscsi_wwn_stat_grps *igrps, char *page) 504 struct iscsi_wwn_stat_grps *igrps, char *page)
506 { 505 {
507 struct iscsi_tiqn *tiqn = container_of(igrps, 506 struct iscsi_tiqn *tiqn = container_of(igrps,
508 struct iscsi_tiqn, tiqn_stat_grps); 507 struct iscsi_tiqn, tiqn_stat_grps);
509 struct iscsi_login_stats *lstat = &tiqn->login_stats; 508 struct iscsi_login_stats *lstat = &tiqn->login_stats;
510 ssize_t ret; 509 ssize_t ret;
511 510
512 spin_lock(&lstat->lock); 511 spin_lock(&lstat->lock);
513 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->accepts); 512 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->accepts);
514 spin_unlock(&lstat->lock); 513 spin_unlock(&lstat->lock);
515 514
516 return ret; 515 return ret;
517 } 516 }
518 ISCSI_STAT_LOGIN_RO(accepts); 517 ISCSI_STAT_LOGIN_RO(accepts);
519 518
520 static ssize_t iscsi_stat_login_show_attr_other_fails( 519 static ssize_t iscsi_stat_login_show_attr_other_fails(
521 struct iscsi_wwn_stat_grps *igrps, char *page) 520 struct iscsi_wwn_stat_grps *igrps, char *page)
522 { 521 {
523 struct iscsi_tiqn *tiqn = container_of(igrps, 522 struct iscsi_tiqn *tiqn = container_of(igrps,
524 struct iscsi_tiqn, tiqn_stat_grps); 523 struct iscsi_tiqn, tiqn_stat_grps);
525 struct iscsi_login_stats *lstat = &tiqn->login_stats; 524 struct iscsi_login_stats *lstat = &tiqn->login_stats;
526 ssize_t ret; 525 ssize_t ret;
527 526
528 spin_lock(&lstat->lock); 527 spin_lock(&lstat->lock);
529 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->other_fails); 528 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->other_fails);
530 spin_unlock(&lstat->lock); 529 spin_unlock(&lstat->lock);
531 530
532 return ret; 531 return ret;
533 } 532 }
534 ISCSI_STAT_LOGIN_RO(other_fails); 533 ISCSI_STAT_LOGIN_RO(other_fails);
535 534
536 static ssize_t iscsi_stat_login_show_attr_redirects( 535 static ssize_t iscsi_stat_login_show_attr_redirects(
537 struct iscsi_wwn_stat_grps *igrps, char *page) 536 struct iscsi_wwn_stat_grps *igrps, char *page)
538 { 537 {
539 struct iscsi_tiqn *tiqn = container_of(igrps, 538 struct iscsi_tiqn *tiqn = container_of(igrps,
540 struct iscsi_tiqn, tiqn_stat_grps); 539 struct iscsi_tiqn, tiqn_stat_grps);
541 struct iscsi_login_stats *lstat = &tiqn->login_stats; 540 struct iscsi_login_stats *lstat = &tiqn->login_stats;
542 ssize_t ret; 541 ssize_t ret;
543 542
544 spin_lock(&lstat->lock); 543 spin_lock(&lstat->lock);
545 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->redirects); 544 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->redirects);
546 spin_unlock(&lstat->lock); 545 spin_unlock(&lstat->lock);
547 546
548 return ret; 547 return ret;
549 } 548 }
550 ISCSI_STAT_LOGIN_RO(redirects); 549 ISCSI_STAT_LOGIN_RO(redirects);
551 550
552 static ssize_t iscsi_stat_login_show_attr_authorize_fails( 551 static ssize_t iscsi_stat_login_show_attr_authorize_fails(
553 struct iscsi_wwn_stat_grps *igrps, char *page) 552 struct iscsi_wwn_stat_grps *igrps, char *page)
554 { 553 {
555 struct iscsi_tiqn *tiqn = container_of(igrps, 554 struct iscsi_tiqn *tiqn = container_of(igrps,
556 struct iscsi_tiqn, tiqn_stat_grps); 555 struct iscsi_tiqn, tiqn_stat_grps);
557 struct iscsi_login_stats *lstat = &tiqn->login_stats; 556 struct iscsi_login_stats *lstat = &tiqn->login_stats;
558 ssize_t ret; 557 ssize_t ret;
559 558
560 spin_lock(&lstat->lock); 559 spin_lock(&lstat->lock);
561 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authorize_fails); 560 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authorize_fails);
562 spin_unlock(&lstat->lock); 561 spin_unlock(&lstat->lock);
563 562
564 return ret; 563 return ret;
565 } 564 }
566 ISCSI_STAT_LOGIN_RO(authorize_fails); 565 ISCSI_STAT_LOGIN_RO(authorize_fails);
567 566
568 static ssize_t iscsi_stat_login_show_attr_authenticate_fails( 567 static ssize_t iscsi_stat_login_show_attr_authenticate_fails(
569 struct iscsi_wwn_stat_grps *igrps, char *page) 568 struct iscsi_wwn_stat_grps *igrps, char *page)
570 { 569 {
571 struct iscsi_tiqn *tiqn = container_of(igrps, 570 struct iscsi_tiqn *tiqn = container_of(igrps,
572 struct iscsi_tiqn, tiqn_stat_grps); 571 struct iscsi_tiqn, tiqn_stat_grps);
573 struct iscsi_login_stats *lstat = &tiqn->login_stats; 572 struct iscsi_login_stats *lstat = &tiqn->login_stats;
574 ssize_t ret; 573 ssize_t ret;
575 574
576 spin_lock(&lstat->lock); 575 spin_lock(&lstat->lock);
577 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authenticate_fails); 576 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authenticate_fails);
578 spin_unlock(&lstat->lock); 577 spin_unlock(&lstat->lock);
579 578
580 return ret; 579 return ret;
581 } 580 }
582 ISCSI_STAT_LOGIN_RO(authenticate_fails); 581 ISCSI_STAT_LOGIN_RO(authenticate_fails);
583 582
584 static ssize_t iscsi_stat_login_show_attr_negotiate_fails( 583 static ssize_t iscsi_stat_login_show_attr_negotiate_fails(
585 struct iscsi_wwn_stat_grps *igrps, char *page) 584 struct iscsi_wwn_stat_grps *igrps, char *page)
586 { 585 {
587 struct iscsi_tiqn *tiqn = container_of(igrps, 586 struct iscsi_tiqn *tiqn = container_of(igrps,
588 struct iscsi_tiqn, tiqn_stat_grps); 587 struct iscsi_tiqn, tiqn_stat_grps);
589 struct iscsi_login_stats *lstat = &tiqn->login_stats; 588 struct iscsi_login_stats *lstat = &tiqn->login_stats;
590 ssize_t ret; 589 ssize_t ret;
591 590
592 spin_lock(&lstat->lock); 591 spin_lock(&lstat->lock);
593 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->negotiate_fails); 592 ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->negotiate_fails);
594 spin_unlock(&lstat->lock); 593 spin_unlock(&lstat->lock);
595 594
596 return ret; 595 return ret;
597 } 596 }
598 ISCSI_STAT_LOGIN_RO(negotiate_fails); 597 ISCSI_STAT_LOGIN_RO(negotiate_fails);
599 598
600 CONFIGFS_EATTR_OPS(iscsi_stat_login, iscsi_wwn_stat_grps, 599 CONFIGFS_EATTR_OPS(iscsi_stat_login, iscsi_wwn_stat_grps,
601 iscsi_login_stats_group); 600 iscsi_login_stats_group);
602 601
603 static struct configfs_attribute *iscsi_stat_login_stats_attrs[] = { 602 static struct configfs_attribute *iscsi_stat_login_stats_attrs[] = {
604 &iscsi_stat_login_inst.attr, 603 &iscsi_stat_login_inst.attr,
605 &iscsi_stat_login_indx.attr, 604 &iscsi_stat_login_indx.attr,
606 &iscsi_stat_login_accepts.attr, 605 &iscsi_stat_login_accepts.attr,
607 &iscsi_stat_login_other_fails.attr, 606 &iscsi_stat_login_other_fails.attr,
608 &iscsi_stat_login_redirects.attr, 607 &iscsi_stat_login_redirects.attr,
609 &iscsi_stat_login_authorize_fails.attr, 608 &iscsi_stat_login_authorize_fails.attr,
610 &iscsi_stat_login_authenticate_fails.attr, 609 &iscsi_stat_login_authenticate_fails.attr,
611 &iscsi_stat_login_negotiate_fails.attr, 610 &iscsi_stat_login_negotiate_fails.attr,
612 NULL, 611 NULL,
613 }; 612 };
614 613
615 static struct configfs_item_operations iscsi_stat_login_stats_item_ops = { 614 static struct configfs_item_operations iscsi_stat_login_stats_item_ops = {
616 .show_attribute = iscsi_stat_login_attr_show, 615 .show_attribute = iscsi_stat_login_attr_show,
617 .store_attribute = iscsi_stat_login_attr_store, 616 .store_attribute = iscsi_stat_login_attr_store,
618 }; 617 };
619 618
620 struct config_item_type iscsi_stat_login_cit = { 619 struct config_item_type iscsi_stat_login_cit = {
621 .ct_item_ops = &iscsi_stat_login_stats_item_ops, 620 .ct_item_ops = &iscsi_stat_login_stats_item_ops,
622 .ct_attrs = iscsi_stat_login_stats_attrs, 621 .ct_attrs = iscsi_stat_login_stats_attrs,
623 .ct_owner = THIS_MODULE, 622 .ct_owner = THIS_MODULE,
624 }; 623 };
625 624
626 /* 625 /*
627 * Target Logout Stats Table 626 * Target Logout Stats Table
628 */ 627 */
629 628
630 CONFIGFS_EATTR_STRUCT(iscsi_stat_logout, iscsi_wwn_stat_grps); 629 CONFIGFS_EATTR_STRUCT(iscsi_stat_logout, iscsi_wwn_stat_grps);
631 #define ISCSI_STAT_LOGOUT(_name, _mode) \ 630 #define ISCSI_STAT_LOGOUT(_name, _mode) \
632 static struct iscsi_stat_logout_attribute \ 631 static struct iscsi_stat_logout_attribute \
633 iscsi_stat_logout_##_name = \ 632 iscsi_stat_logout_##_name = \
634 __CONFIGFS_EATTR(_name, _mode, \ 633 __CONFIGFS_EATTR(_name, _mode, \
635 iscsi_stat_logout_show_attr_##_name, \ 634 iscsi_stat_logout_show_attr_##_name, \
636 iscsi_stat_logout_store_attr_##_name); 635 iscsi_stat_logout_store_attr_##_name);
637 636
638 #define ISCSI_STAT_LOGOUT_RO(_name) \ 637 #define ISCSI_STAT_LOGOUT_RO(_name) \
639 static struct iscsi_stat_logout_attribute \ 638 static struct iscsi_stat_logout_attribute \
640 iscsi_stat_logout_##_name = \ 639 iscsi_stat_logout_##_name = \
641 __CONFIGFS_EATTR_RO(_name, \ 640 __CONFIGFS_EATTR_RO(_name, \
642 iscsi_stat_logout_show_attr_##_name); 641 iscsi_stat_logout_show_attr_##_name);
643 642
644 static ssize_t iscsi_stat_logout_show_attr_inst( 643 static ssize_t iscsi_stat_logout_show_attr_inst(
645 struct iscsi_wwn_stat_grps *igrps, char *page) 644 struct iscsi_wwn_stat_grps *igrps, char *page)
646 { 645 {
647 struct iscsi_tiqn *tiqn = container_of(igrps, 646 struct iscsi_tiqn *tiqn = container_of(igrps,
648 struct iscsi_tiqn, tiqn_stat_grps); 647 struct iscsi_tiqn, tiqn_stat_grps);
649 648
650 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index); 649 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
651 } 650 }
652 ISCSI_STAT_LOGOUT_RO(inst); 651 ISCSI_STAT_LOGOUT_RO(inst);
653 652
654 static ssize_t iscsi_stat_logout_show_attr_indx( 653 static ssize_t iscsi_stat_logout_show_attr_indx(
655 struct iscsi_wwn_stat_grps *igrps, char *page) 654 struct iscsi_wwn_stat_grps *igrps, char *page)
656 { 655 {
657 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX); 656 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
658 } 657 }
659 ISCSI_STAT_LOGOUT_RO(indx); 658 ISCSI_STAT_LOGOUT_RO(indx);
660 659
661 static ssize_t iscsi_stat_logout_show_attr_normal_logouts( 660 static ssize_t iscsi_stat_logout_show_attr_normal_logouts(
662 struct iscsi_wwn_stat_grps *igrps, char *page) 661 struct iscsi_wwn_stat_grps *igrps, char *page)
663 { 662 {
664 struct iscsi_tiqn *tiqn = container_of(igrps, 663 struct iscsi_tiqn *tiqn = container_of(igrps,
665 struct iscsi_tiqn, tiqn_stat_grps); 664 struct iscsi_tiqn, tiqn_stat_grps);
666 struct iscsi_logout_stats *lstats = &tiqn->logout_stats; 665 struct iscsi_logout_stats *lstats = &tiqn->logout_stats;
667 666
668 return snprintf(page, PAGE_SIZE, "%u\n", lstats->normal_logouts); 667 return snprintf(page, PAGE_SIZE, "%u\n", lstats->normal_logouts);
669 } 668 }
670 ISCSI_STAT_LOGOUT_RO(normal_logouts); 669 ISCSI_STAT_LOGOUT_RO(normal_logouts);
671 670
672 static ssize_t iscsi_stat_logout_show_attr_abnormal_logouts( 671 static ssize_t iscsi_stat_logout_show_attr_abnormal_logouts(
673 struct iscsi_wwn_stat_grps *igrps, char *page) 672 struct iscsi_wwn_stat_grps *igrps, char *page)
674 { 673 {
675 struct iscsi_tiqn *tiqn = container_of(igrps, 674 struct iscsi_tiqn *tiqn = container_of(igrps,
676 struct iscsi_tiqn, tiqn_stat_grps); 675 struct iscsi_tiqn, tiqn_stat_grps);
677 struct iscsi_logout_stats *lstats = &tiqn->logout_stats; 676 struct iscsi_logout_stats *lstats = &tiqn->logout_stats;
678 677
679 return snprintf(page, PAGE_SIZE, "%u\n", lstats->abnormal_logouts); 678 return snprintf(page, PAGE_SIZE, "%u\n", lstats->abnormal_logouts);
680 } 679 }
681 ISCSI_STAT_LOGOUT_RO(abnormal_logouts); 680 ISCSI_STAT_LOGOUT_RO(abnormal_logouts);
682 681
683 CONFIGFS_EATTR_OPS(iscsi_stat_logout, iscsi_wwn_stat_grps, 682 CONFIGFS_EATTR_OPS(iscsi_stat_logout, iscsi_wwn_stat_grps,
684 iscsi_logout_stats_group); 683 iscsi_logout_stats_group);
685 684
686 static struct configfs_attribute *iscsi_stat_logout_stats_attrs[] = { 685 static struct configfs_attribute *iscsi_stat_logout_stats_attrs[] = {
687 &iscsi_stat_logout_inst.attr, 686 &iscsi_stat_logout_inst.attr,
688 &iscsi_stat_logout_indx.attr, 687 &iscsi_stat_logout_indx.attr,
689 &iscsi_stat_logout_normal_logouts.attr, 688 &iscsi_stat_logout_normal_logouts.attr,
690 &iscsi_stat_logout_abnormal_logouts.attr, 689 &iscsi_stat_logout_abnormal_logouts.attr,
691 NULL, 690 NULL,
692 }; 691 };
693 692
694 static struct configfs_item_operations iscsi_stat_logout_stats_item_ops = { 693 static struct configfs_item_operations iscsi_stat_logout_stats_item_ops = {
695 .show_attribute = iscsi_stat_logout_attr_show, 694 .show_attribute = iscsi_stat_logout_attr_show,
696 .store_attribute = iscsi_stat_logout_attr_store, 695 .store_attribute = iscsi_stat_logout_attr_store,
697 }; 696 };
698 697
699 struct config_item_type iscsi_stat_logout_cit = { 698 struct config_item_type iscsi_stat_logout_cit = {
700 .ct_item_ops = &iscsi_stat_logout_stats_item_ops, 699 .ct_item_ops = &iscsi_stat_logout_stats_item_ops,
701 .ct_attrs = iscsi_stat_logout_stats_attrs, 700 .ct_attrs = iscsi_stat_logout_stats_attrs,
702 .ct_owner = THIS_MODULE, 701 .ct_owner = THIS_MODULE,
703 }; 702 };
704 703
705 /* 704 /*
706 * Session Stats Table 705 * Session Stats Table
707 */ 706 */
708 707
709 CONFIGFS_EATTR_STRUCT(iscsi_stat_sess, iscsi_node_stat_grps); 708 CONFIGFS_EATTR_STRUCT(iscsi_stat_sess, iscsi_node_stat_grps);
710 #define ISCSI_STAT_SESS(_name, _mode) \ 709 #define ISCSI_STAT_SESS(_name, _mode) \
711 static struct iscsi_stat_sess_attribute \ 710 static struct iscsi_stat_sess_attribute \
712 iscsi_stat_sess_##_name = \ 711 iscsi_stat_sess_##_name = \
713 __CONFIGFS_EATTR(_name, _mode, \ 712 __CONFIGFS_EATTR(_name, _mode, \
714 iscsi_stat_sess_show_attr_##_name, \ 713 iscsi_stat_sess_show_attr_##_name, \
715 iscsi_stat_sess_store_attr_##_name); 714 iscsi_stat_sess_store_attr_##_name);
716 715
717 #define ISCSI_STAT_SESS_RO(_name) \ 716 #define ISCSI_STAT_SESS_RO(_name) \
718 static struct iscsi_stat_sess_attribute \ 717 static struct iscsi_stat_sess_attribute \
719 iscsi_stat_sess_##_name = \ 718 iscsi_stat_sess_##_name = \
720 __CONFIGFS_EATTR_RO(_name, \ 719 __CONFIGFS_EATTR_RO(_name, \
721 iscsi_stat_sess_show_attr_##_name); 720 iscsi_stat_sess_show_attr_##_name);
722 721
723 static ssize_t iscsi_stat_sess_show_attr_inst( 722 static ssize_t iscsi_stat_sess_show_attr_inst(
724 struct iscsi_node_stat_grps *igrps, char *page) 723 struct iscsi_node_stat_grps *igrps, char *page)
725 { 724 {
726 struct iscsi_node_acl *acl = container_of(igrps, 725 struct iscsi_node_acl *acl = container_of(igrps,
727 struct iscsi_node_acl, node_stat_grps); 726 struct iscsi_node_acl, node_stat_grps);
728 struct se_wwn *wwn = acl->se_node_acl.se_tpg->se_tpg_wwn; 727 struct se_wwn *wwn = acl->se_node_acl.se_tpg->se_tpg_wwn;
729 struct iscsi_tiqn *tiqn = container_of(wwn, 728 struct iscsi_tiqn *tiqn = container_of(wwn,
730 struct iscsi_tiqn, tiqn_wwn); 729 struct iscsi_tiqn, tiqn_wwn);
731 730
732 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index); 731 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
733 } 732 }
734 ISCSI_STAT_SESS_RO(inst); 733 ISCSI_STAT_SESS_RO(inst);
735 734
736 static ssize_t iscsi_stat_sess_show_attr_node( 735 static ssize_t iscsi_stat_sess_show_attr_node(
737 struct iscsi_node_stat_grps *igrps, char *page) 736 struct iscsi_node_stat_grps *igrps, char *page)
738 { 737 {
739 struct iscsi_node_acl *acl = container_of(igrps, 738 struct iscsi_node_acl *acl = container_of(igrps,
740 struct iscsi_node_acl, node_stat_grps); 739 struct iscsi_node_acl, node_stat_grps);
741 struct se_node_acl *se_nacl = &acl->se_node_acl; 740 struct se_node_acl *se_nacl = &acl->se_node_acl;
742 struct iscsi_session *sess; 741 struct iscsi_session *sess;
743 struct se_session *se_sess; 742 struct se_session *se_sess;
744 ssize_t ret = 0; 743 ssize_t ret = 0;
745 744
746 spin_lock_bh(&se_nacl->nacl_sess_lock); 745 spin_lock_bh(&se_nacl->nacl_sess_lock);
747 se_sess = se_nacl->nacl_sess; 746 se_sess = se_nacl->nacl_sess;
748 if (se_sess) { 747 if (se_sess) {
749 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 748 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
750 if (sess) 749 if (sess)
751 ret = snprintf(page, PAGE_SIZE, "%u\n", 750 ret = snprintf(page, PAGE_SIZE, "%u\n",
752 sess->sess_ops->SessionType ? 0 : ISCSI_NODE_INDEX); 751 sess->sess_ops->SessionType ? 0 : ISCSI_NODE_INDEX);
753 } 752 }
754 spin_unlock_bh(&se_nacl->nacl_sess_lock); 753 spin_unlock_bh(&se_nacl->nacl_sess_lock);
755 754
756 return ret; 755 return ret;
757 } 756 }
758 ISCSI_STAT_SESS_RO(node); 757 ISCSI_STAT_SESS_RO(node);
759 758
760 static ssize_t iscsi_stat_sess_show_attr_indx( 759 static ssize_t iscsi_stat_sess_show_attr_indx(
761 struct iscsi_node_stat_grps *igrps, char *page) 760 struct iscsi_node_stat_grps *igrps, char *page)
762 { 761 {
763 struct iscsi_node_acl *acl = container_of(igrps, 762 struct iscsi_node_acl *acl = container_of(igrps,
764 struct iscsi_node_acl, node_stat_grps); 763 struct iscsi_node_acl, node_stat_grps);
765 struct se_node_acl *se_nacl = &acl->se_node_acl; 764 struct se_node_acl *se_nacl = &acl->se_node_acl;
766 struct iscsi_session *sess; 765 struct iscsi_session *sess;
767 struct se_session *se_sess; 766 struct se_session *se_sess;
768 ssize_t ret = 0; 767 ssize_t ret = 0;
769 768
770 spin_lock_bh(&se_nacl->nacl_sess_lock); 769 spin_lock_bh(&se_nacl->nacl_sess_lock);
771 se_sess = se_nacl->nacl_sess; 770 se_sess = se_nacl->nacl_sess;
772 if (se_sess) { 771 if (se_sess) {
773 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 772 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
774 if (sess) 773 if (sess)
775 ret = snprintf(page, PAGE_SIZE, "%u\n", 774 ret = snprintf(page, PAGE_SIZE, "%u\n",
776 sess->session_index); 775 sess->session_index);
777 } 776 }
778 spin_unlock_bh(&se_nacl->nacl_sess_lock); 777 spin_unlock_bh(&se_nacl->nacl_sess_lock);
779 778
780 return ret; 779 return ret;
781 } 780 }
782 ISCSI_STAT_SESS_RO(indx); 781 ISCSI_STAT_SESS_RO(indx);
783 782
784 static ssize_t iscsi_stat_sess_show_attr_cmd_pdus( 783 static ssize_t iscsi_stat_sess_show_attr_cmd_pdus(
785 struct iscsi_node_stat_grps *igrps, char *page) 784 struct iscsi_node_stat_grps *igrps, char *page)
786 { 785 {
787 struct iscsi_node_acl *acl = container_of(igrps, 786 struct iscsi_node_acl *acl = container_of(igrps,
788 struct iscsi_node_acl, node_stat_grps); 787 struct iscsi_node_acl, node_stat_grps);
789 struct se_node_acl *se_nacl = &acl->se_node_acl; 788 struct se_node_acl *se_nacl = &acl->se_node_acl;
790 struct iscsi_session *sess; 789 struct iscsi_session *sess;
791 struct se_session *se_sess; 790 struct se_session *se_sess;
792 ssize_t ret = 0; 791 ssize_t ret = 0;
793 792
794 spin_lock_bh(&se_nacl->nacl_sess_lock); 793 spin_lock_bh(&se_nacl->nacl_sess_lock);
795 se_sess = se_nacl->nacl_sess; 794 se_sess = se_nacl->nacl_sess;
796 if (se_sess) { 795 if (se_sess) {
797 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 796 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
798 if (sess) 797 if (sess)
799 ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus); 798 ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus);
800 } 799 }
801 spin_unlock_bh(&se_nacl->nacl_sess_lock); 800 spin_unlock_bh(&se_nacl->nacl_sess_lock);
802 801
803 return ret; 802 return ret;
804 } 803 }
805 ISCSI_STAT_SESS_RO(cmd_pdus); 804 ISCSI_STAT_SESS_RO(cmd_pdus);
806 805
807 static ssize_t iscsi_stat_sess_show_attr_rsp_pdus( 806 static ssize_t iscsi_stat_sess_show_attr_rsp_pdus(
808 struct iscsi_node_stat_grps *igrps, char *page) 807 struct iscsi_node_stat_grps *igrps, char *page)
809 { 808 {
810 struct iscsi_node_acl *acl = container_of(igrps, 809 struct iscsi_node_acl *acl = container_of(igrps,
811 struct iscsi_node_acl, node_stat_grps); 810 struct iscsi_node_acl, node_stat_grps);
812 struct se_node_acl *se_nacl = &acl->se_node_acl; 811 struct se_node_acl *se_nacl = &acl->se_node_acl;
813 struct iscsi_session *sess; 812 struct iscsi_session *sess;
814 struct se_session *se_sess; 813 struct se_session *se_sess;
815 ssize_t ret = 0; 814 ssize_t ret = 0;
816 815
817 spin_lock_bh(&se_nacl->nacl_sess_lock); 816 spin_lock_bh(&se_nacl->nacl_sess_lock);
818 se_sess = se_nacl->nacl_sess; 817 se_sess = se_nacl->nacl_sess;
819 if (se_sess) { 818 if (se_sess) {
820 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 819 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
821 if (sess) 820 if (sess)
822 ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus); 821 ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus);
823 } 822 }
824 spin_unlock_bh(&se_nacl->nacl_sess_lock); 823 spin_unlock_bh(&se_nacl->nacl_sess_lock);
825 824
826 return ret; 825 return ret;
827 } 826 }
828 ISCSI_STAT_SESS_RO(rsp_pdus); 827 ISCSI_STAT_SESS_RO(rsp_pdus);
829 828
830 static ssize_t iscsi_stat_sess_show_attr_txdata_octs( 829 static ssize_t iscsi_stat_sess_show_attr_txdata_octs(
831 struct iscsi_node_stat_grps *igrps, char *page) 830 struct iscsi_node_stat_grps *igrps, char *page)
832 { 831 {
833 struct iscsi_node_acl *acl = container_of(igrps, 832 struct iscsi_node_acl *acl = container_of(igrps,
834 struct iscsi_node_acl, node_stat_grps); 833 struct iscsi_node_acl, node_stat_grps);
835 struct se_node_acl *se_nacl = &acl->se_node_acl; 834 struct se_node_acl *se_nacl = &acl->se_node_acl;
836 struct iscsi_session *sess; 835 struct iscsi_session *sess;
837 struct se_session *se_sess; 836 struct se_session *se_sess;
838 ssize_t ret = 0; 837 ssize_t ret = 0;
839 838
840 spin_lock_bh(&se_nacl->nacl_sess_lock); 839 spin_lock_bh(&se_nacl->nacl_sess_lock);
841 se_sess = se_nacl->nacl_sess; 840 se_sess = se_nacl->nacl_sess;
842 if (se_sess) { 841 if (se_sess) {
843 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 842 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
844 if (sess) 843 if (sess)
845 ret = snprintf(page, PAGE_SIZE, "%llu\n", 844 ret = snprintf(page, PAGE_SIZE, "%llu\n",
846 (unsigned long long)sess->tx_data_octets); 845 (unsigned long long)sess->tx_data_octets);
847 } 846 }
848 spin_unlock_bh(&se_nacl->nacl_sess_lock); 847 spin_unlock_bh(&se_nacl->nacl_sess_lock);
849 848
850 return ret; 849 return ret;
851 } 850 }
852 ISCSI_STAT_SESS_RO(txdata_octs); 851 ISCSI_STAT_SESS_RO(txdata_octs);
853 852
854 static ssize_t iscsi_stat_sess_show_attr_rxdata_octs( 853 static ssize_t iscsi_stat_sess_show_attr_rxdata_octs(
855 struct iscsi_node_stat_grps *igrps, char *page) 854 struct iscsi_node_stat_grps *igrps, char *page)
856 { 855 {
857 struct iscsi_node_acl *acl = container_of(igrps, 856 struct iscsi_node_acl *acl = container_of(igrps,
858 struct iscsi_node_acl, node_stat_grps); 857 struct iscsi_node_acl, node_stat_grps);
859 struct se_node_acl *se_nacl = &acl->se_node_acl; 858 struct se_node_acl *se_nacl = &acl->se_node_acl;
860 struct iscsi_session *sess; 859 struct iscsi_session *sess;
861 struct se_session *se_sess; 860 struct se_session *se_sess;
862 ssize_t ret = 0; 861 ssize_t ret = 0;
863 862
864 spin_lock_bh(&se_nacl->nacl_sess_lock); 863 spin_lock_bh(&se_nacl->nacl_sess_lock);
865 se_sess = se_nacl->nacl_sess; 864 se_sess = se_nacl->nacl_sess;
866 if (se_sess) { 865 if (se_sess) {
867 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 866 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
868 if (sess) 867 if (sess)
869 ret = snprintf(page, PAGE_SIZE, "%llu\n", 868 ret = snprintf(page, PAGE_SIZE, "%llu\n",
870 (unsigned long long)sess->rx_data_octets); 869 (unsigned long long)sess->rx_data_octets);
871 } 870 }
872 spin_unlock_bh(&se_nacl->nacl_sess_lock); 871 spin_unlock_bh(&se_nacl->nacl_sess_lock);
873 872
874 return ret; 873 return ret;
875 } 874 }
876 ISCSI_STAT_SESS_RO(rxdata_octs); 875 ISCSI_STAT_SESS_RO(rxdata_octs);
877 876
878 static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors( 877 static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors(
879 struct iscsi_node_stat_grps *igrps, char *page) 878 struct iscsi_node_stat_grps *igrps, char *page)
880 { 879 {
881 struct iscsi_node_acl *acl = container_of(igrps, 880 struct iscsi_node_acl *acl = container_of(igrps,
882 struct iscsi_node_acl, node_stat_grps); 881 struct iscsi_node_acl, node_stat_grps);
883 struct se_node_acl *se_nacl = &acl->se_node_acl; 882 struct se_node_acl *se_nacl = &acl->se_node_acl;
884 struct iscsi_session *sess; 883 struct iscsi_session *sess;
885 struct se_session *se_sess; 884 struct se_session *se_sess;
886 ssize_t ret = 0; 885 ssize_t ret = 0;
887 886
888 spin_lock_bh(&se_nacl->nacl_sess_lock); 887 spin_lock_bh(&se_nacl->nacl_sess_lock);
889 se_sess = se_nacl->nacl_sess; 888 se_sess = se_nacl->nacl_sess;
890 if (se_sess) { 889 if (se_sess) {
891 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 890 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
892 if (sess) 891 if (sess)
893 ret = snprintf(page, PAGE_SIZE, "%u\n", 892 ret = snprintf(page, PAGE_SIZE, "%u\n",
894 sess->conn_digest_errors); 893 sess->conn_digest_errors);
895 } 894 }
896 spin_unlock_bh(&se_nacl->nacl_sess_lock); 895 spin_unlock_bh(&se_nacl->nacl_sess_lock);
897 896
898 return ret; 897 return ret;
899 } 898 }
900 ISCSI_STAT_SESS_RO(conn_digest_errors); 899 ISCSI_STAT_SESS_RO(conn_digest_errors);
901 900
902 static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors( 901 static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors(
903 struct iscsi_node_stat_grps *igrps, char *page) 902 struct iscsi_node_stat_grps *igrps, char *page)
904 { 903 {
905 struct iscsi_node_acl *acl = container_of(igrps, 904 struct iscsi_node_acl *acl = container_of(igrps,
906 struct iscsi_node_acl, node_stat_grps); 905 struct iscsi_node_acl, node_stat_grps);
907 struct se_node_acl *se_nacl = &acl->se_node_acl; 906 struct se_node_acl *se_nacl = &acl->se_node_acl;
908 struct iscsi_session *sess; 907 struct iscsi_session *sess;
909 struct se_session *se_sess; 908 struct se_session *se_sess;
910 ssize_t ret = 0; 909 ssize_t ret = 0;
911 910
912 spin_lock_bh(&se_nacl->nacl_sess_lock); 911 spin_lock_bh(&se_nacl->nacl_sess_lock);
913 se_sess = se_nacl->nacl_sess; 912 se_sess = se_nacl->nacl_sess;
914 if (se_sess) { 913 if (se_sess) {
915 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 914 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
916 if (sess) 915 if (sess)
917 ret = snprintf(page, PAGE_SIZE, "%u\n", 916 ret = snprintf(page, PAGE_SIZE, "%u\n",
918 sess->conn_timeout_errors); 917 sess->conn_timeout_errors);
919 } 918 }
920 spin_unlock_bh(&se_nacl->nacl_sess_lock); 919 spin_unlock_bh(&se_nacl->nacl_sess_lock);
921 920
922 return ret; 921 return ret;
923 } 922 }
924 ISCSI_STAT_SESS_RO(conn_timeout_errors); 923 ISCSI_STAT_SESS_RO(conn_timeout_errors);
925 924
926 CONFIGFS_EATTR_OPS(iscsi_stat_sess, iscsi_node_stat_grps, 925 CONFIGFS_EATTR_OPS(iscsi_stat_sess, iscsi_node_stat_grps,
927 iscsi_sess_stats_group); 926 iscsi_sess_stats_group);
928 927
929 static struct configfs_attribute *iscsi_stat_sess_stats_attrs[] = { 928 static struct configfs_attribute *iscsi_stat_sess_stats_attrs[] = {
930 &iscsi_stat_sess_inst.attr, 929 &iscsi_stat_sess_inst.attr,
931 &iscsi_stat_sess_node.attr, 930 &iscsi_stat_sess_node.attr,
932 &iscsi_stat_sess_indx.attr, 931 &iscsi_stat_sess_indx.attr,
933 &iscsi_stat_sess_cmd_pdus.attr, 932 &iscsi_stat_sess_cmd_pdus.attr,
934 &iscsi_stat_sess_rsp_pdus.attr, 933 &iscsi_stat_sess_rsp_pdus.attr,
935 &iscsi_stat_sess_txdata_octs.attr, 934 &iscsi_stat_sess_txdata_octs.attr,
936 &iscsi_stat_sess_rxdata_octs.attr, 935 &iscsi_stat_sess_rxdata_octs.attr,
937 &iscsi_stat_sess_conn_digest_errors.attr, 936 &iscsi_stat_sess_conn_digest_errors.attr,
938 &iscsi_stat_sess_conn_timeout_errors.attr, 937 &iscsi_stat_sess_conn_timeout_errors.attr,
939 NULL, 938 NULL,
940 }; 939 };
941 940
942 static struct configfs_item_operations iscsi_stat_sess_stats_item_ops = { 941 static struct configfs_item_operations iscsi_stat_sess_stats_item_ops = {
943 .show_attribute = iscsi_stat_sess_attr_show, 942 .show_attribute = iscsi_stat_sess_attr_show,
944 .store_attribute = iscsi_stat_sess_attr_store, 943 .store_attribute = iscsi_stat_sess_attr_store,
945 }; 944 };
946 945
947 struct config_item_type iscsi_stat_sess_cit = { 946 struct config_item_type iscsi_stat_sess_cit = {
948 .ct_item_ops = &iscsi_stat_sess_stats_item_ops, 947 .ct_item_ops = &iscsi_stat_sess_stats_item_ops,
949 .ct_attrs = iscsi_stat_sess_stats_attrs, 948 .ct_attrs = iscsi_stat_sess_stats_attrs,
950 .ct_owner = THIS_MODULE, 949 .ct_owner = THIS_MODULE,
951 }; 950 };
952 951
drivers/target/iscsi/iscsi_target_tmr.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * This file contains the iSCSI Target specific Task Management functions. 2 * This file contains the iSCSI Target specific Task Management functions.
3 * 3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 * 5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 * 7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or 12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version. 13 * (at your option) any later version.
14 * 14 *
15 * This program is distributed in the hope that it will be useful, 15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 ******************************************************************************/ 19 ******************************************************************************/
20 20
21 #include <asm/unaligned.h> 21 #include <asm/unaligned.h>
22 #include <scsi/iscsi_proto.h> 22 #include <scsi/iscsi_proto.h>
23 #include <target/target_core_base.h> 23 #include <target/target_core_base.h>
24 #include <target/target_core_transport.h> 24 #include <target/target_core_fabric.h>
25 25
26 #include "iscsi_target_core.h" 26 #include "iscsi_target_core.h"
27 #include "iscsi_target_seq_pdu_list.h" 27 #include "iscsi_target_seq_pdu_list.h"
28 #include "iscsi_target_datain_values.h" 28 #include "iscsi_target_datain_values.h"
29 #include "iscsi_target_device.h" 29 #include "iscsi_target_device.h"
30 #include "iscsi_target_erl0.h" 30 #include "iscsi_target_erl0.h"
31 #include "iscsi_target_erl1.h" 31 #include "iscsi_target_erl1.h"
32 #include "iscsi_target_erl2.h" 32 #include "iscsi_target_erl2.h"
33 #include "iscsi_target_tmr.h" 33 #include "iscsi_target_tmr.h"
34 #include "iscsi_target_tpg.h" 34 #include "iscsi_target_tpg.h"
35 #include "iscsi_target_util.h" 35 #include "iscsi_target_util.h"
36 #include "iscsi_target.h" 36 #include "iscsi_target.h"
37 37
38 u8 iscsit_tmr_abort_task( 38 u8 iscsit_tmr_abort_task(
39 struct iscsi_cmd *cmd, 39 struct iscsi_cmd *cmd,
40 unsigned char *buf) 40 unsigned char *buf)
41 { 41 {
42 struct iscsi_cmd *ref_cmd; 42 struct iscsi_cmd *ref_cmd;
43 struct iscsi_conn *conn = cmd->conn; 43 struct iscsi_conn *conn = cmd->conn;
44 struct iscsi_tmr_req *tmr_req = cmd->tmr_req; 44 struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
45 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; 45 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
46 struct iscsi_tm *hdr = (struct iscsi_tm *) buf; 46 struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
47 47
48 ref_cmd = iscsit_find_cmd_from_itt(conn, hdr->rtt); 48 ref_cmd = iscsit_find_cmd_from_itt(conn, hdr->rtt);
49 if (!ref_cmd) { 49 if (!ref_cmd) {
50 pr_err("Unable to locate RefTaskTag: 0x%08x on CID:" 50 pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
51 " %hu.\n", hdr->rtt, conn->cid); 51 " %hu.\n", hdr->rtt, conn->cid);
52 return ((hdr->refcmdsn >= conn->sess->exp_cmd_sn) && 52 return ((hdr->refcmdsn >= conn->sess->exp_cmd_sn) &&
53 (hdr->refcmdsn <= conn->sess->max_cmd_sn)) ? 53 (hdr->refcmdsn <= conn->sess->max_cmd_sn)) ?
54 ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK; 54 ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
55 } 55 }
56 if (ref_cmd->cmd_sn != hdr->refcmdsn) { 56 if (ref_cmd->cmd_sn != hdr->refcmdsn) {
57 pr_err("RefCmdSN 0x%08x does not equal" 57 pr_err("RefCmdSN 0x%08x does not equal"
58 " task's CmdSN 0x%08x. Rejecting ABORT_TASK.\n", 58 " task's CmdSN 0x%08x. Rejecting ABORT_TASK.\n",
59 hdr->refcmdsn, ref_cmd->cmd_sn); 59 hdr->refcmdsn, ref_cmd->cmd_sn);
60 return ISCSI_TMF_RSP_REJECTED; 60 return ISCSI_TMF_RSP_REJECTED;
61 } 61 }
62 62
63 se_tmr->ref_task_tag = hdr->rtt; 63 se_tmr->ref_task_tag = hdr->rtt;
64 se_tmr->ref_cmd = &ref_cmd->se_cmd; 64 se_tmr->ref_cmd = &ref_cmd->se_cmd;
65 tmr_req->ref_cmd_sn = hdr->refcmdsn; 65 tmr_req->ref_cmd_sn = hdr->refcmdsn;
66 tmr_req->exp_data_sn = hdr->exp_datasn; 66 tmr_req->exp_data_sn = hdr->exp_datasn;
67 67
68 return ISCSI_TMF_RSP_COMPLETE; 68 return ISCSI_TMF_RSP_COMPLETE;
69 } 69 }
70 70
71 /* 71 /*
72 * Called from iscsit_handle_task_mgt_cmd(). 72 * Called from iscsit_handle_task_mgt_cmd().
73 */ 73 */
74 int iscsit_tmr_task_warm_reset( 74 int iscsit_tmr_task_warm_reset(
75 struct iscsi_conn *conn, 75 struct iscsi_conn *conn,
76 struct iscsi_tmr_req *tmr_req, 76 struct iscsi_tmr_req *tmr_req,
77 unsigned char *buf) 77 unsigned char *buf)
78 { 78 {
79 struct iscsi_session *sess = conn->sess; 79 struct iscsi_session *sess = conn->sess;
80 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 80 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
81 #if 0 81 #if 0
82 struct iscsi_init_task_mgt_cmnd *hdr = 82 struct iscsi_init_task_mgt_cmnd *hdr =
83 (struct iscsi_init_task_mgt_cmnd *) buf; 83 (struct iscsi_init_task_mgt_cmnd *) buf;
84 #endif 84 #endif
85 if (!na->tmr_warm_reset) { 85 if (!na->tmr_warm_reset) {
86 pr_err("TMR Opcode TARGET_WARM_RESET authorization" 86 pr_err("TMR Opcode TARGET_WARM_RESET authorization"
87 " failed for Initiator Node: %s\n", 87 " failed for Initiator Node: %s\n",
88 sess->se_sess->se_node_acl->initiatorname); 88 sess->se_sess->se_node_acl->initiatorname);
89 return -1; 89 return -1;
90 } 90 }
91 /* 91 /*
92 * Do the real work in transport_generic_do_tmr(). 92 * Do the real work in transport_generic_do_tmr().
93 */ 93 */
94 return 0; 94 return 0;
95 } 95 }
96 96
97 int iscsit_tmr_task_cold_reset( 97 int iscsit_tmr_task_cold_reset(
98 struct iscsi_conn *conn, 98 struct iscsi_conn *conn,
99 struct iscsi_tmr_req *tmr_req, 99 struct iscsi_tmr_req *tmr_req,
100 unsigned char *buf) 100 unsigned char *buf)
101 { 101 {
102 struct iscsi_session *sess = conn->sess; 102 struct iscsi_session *sess = conn->sess;
103 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 103 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
104 104
105 if (!na->tmr_cold_reset) { 105 if (!na->tmr_cold_reset) {
106 pr_err("TMR Opcode TARGET_COLD_RESET authorization" 106 pr_err("TMR Opcode TARGET_COLD_RESET authorization"
107 " failed for Initiator Node: %s\n", 107 " failed for Initiator Node: %s\n",
108 sess->se_sess->se_node_acl->initiatorname); 108 sess->se_sess->se_node_acl->initiatorname);
109 return -1; 109 return -1;
110 } 110 }
111 /* 111 /*
112 * Do the real work in transport_generic_do_tmr(). 112 * Do the real work in transport_generic_do_tmr().
113 */ 113 */
114 return 0; 114 return 0;
115 } 115 }
116 116
117 u8 iscsit_tmr_task_reassign( 117 u8 iscsit_tmr_task_reassign(
118 struct iscsi_cmd *cmd, 118 struct iscsi_cmd *cmd,
119 unsigned char *buf) 119 unsigned char *buf)
120 { 120 {
121 struct iscsi_cmd *ref_cmd = NULL; 121 struct iscsi_cmd *ref_cmd = NULL;
122 struct iscsi_conn *conn = cmd->conn; 122 struct iscsi_conn *conn = cmd->conn;
123 struct iscsi_conn_recovery *cr = NULL; 123 struct iscsi_conn_recovery *cr = NULL;
124 struct iscsi_tmr_req *tmr_req = cmd->tmr_req; 124 struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
125 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; 125 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
126 struct iscsi_tm *hdr = (struct iscsi_tm *) buf; 126 struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
127 int ret; 127 int ret;
128 128
129 pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x," 129 pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x,"
130 " RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n", 130 " RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n",
131 hdr->itt, hdr->rtt, hdr->exp_datasn, conn->cid); 131 hdr->itt, hdr->rtt, hdr->exp_datasn, conn->cid);
132 132
133 if (conn->sess->sess_ops->ErrorRecoveryLevel != 2) { 133 if (conn->sess->sess_ops->ErrorRecoveryLevel != 2) {
134 pr_err("TMR TASK_REASSIGN not supported in ERL<2," 134 pr_err("TMR TASK_REASSIGN not supported in ERL<2,"
135 " ignoring request.\n"); 135 " ignoring request.\n");
136 return ISCSI_TMF_RSP_NOT_SUPPORTED; 136 return ISCSI_TMF_RSP_NOT_SUPPORTED;
137 } 137 }
138 138
139 ret = iscsit_find_cmd_for_recovery(conn->sess, &ref_cmd, &cr, hdr->rtt); 139 ret = iscsit_find_cmd_for_recovery(conn->sess, &ref_cmd, &cr, hdr->rtt);
140 if (ret == -2) { 140 if (ret == -2) {
141 pr_err("Command ITT: 0x%08x is still alligent to CID:" 141 pr_err("Command ITT: 0x%08x is still alligent to CID:"
142 " %hu\n", ref_cmd->init_task_tag, cr->cid); 142 " %hu\n", ref_cmd->init_task_tag, cr->cid);
143 return ISCSI_TMF_RSP_TASK_ALLEGIANT; 143 return ISCSI_TMF_RSP_TASK_ALLEGIANT;
144 } else if (ret == -1) { 144 } else if (ret == -1) {
145 pr_err("Unable to locate RefTaskTag: 0x%08x in" 145 pr_err("Unable to locate RefTaskTag: 0x%08x in"
146 " connection recovery command list.\n", hdr->rtt); 146 " connection recovery command list.\n", hdr->rtt);
147 return ISCSI_TMF_RSP_NO_TASK; 147 return ISCSI_TMF_RSP_NO_TASK;
148 } 148 }
149 /* 149 /*
150 * Temporary check to prevent connection recovery for 150 * Temporary check to prevent connection recovery for
151 * connections with a differing MaxRecvDataSegmentLength. 151 * connections with a differing MaxRecvDataSegmentLength.
152 */ 152 */
153 if (cr->maxrecvdatasegmentlength != 153 if (cr->maxrecvdatasegmentlength !=
154 conn->conn_ops->MaxRecvDataSegmentLength) { 154 conn->conn_ops->MaxRecvDataSegmentLength) {
155 pr_err("Unable to perform connection recovery for" 155 pr_err("Unable to perform connection recovery for"
156 " differing MaxRecvDataSegmentLength, rejecting" 156 " differing MaxRecvDataSegmentLength, rejecting"
157 " TMR TASK_REASSIGN.\n"); 157 " TMR TASK_REASSIGN.\n");
158 return ISCSI_TMF_RSP_REJECTED; 158 return ISCSI_TMF_RSP_REJECTED;
159 } 159 }
160 160
161 se_tmr->ref_task_tag = hdr->rtt; 161 se_tmr->ref_task_tag = hdr->rtt;
162 se_tmr->ref_cmd = &ref_cmd->se_cmd; 162 se_tmr->ref_cmd = &ref_cmd->se_cmd;
163 se_tmr->ref_task_lun = get_unaligned_le64(&hdr->lun); 163 se_tmr->ref_task_lun = get_unaligned_le64(&hdr->lun);
164 tmr_req->ref_cmd_sn = hdr->refcmdsn; 164 tmr_req->ref_cmd_sn = hdr->refcmdsn;
165 tmr_req->exp_data_sn = hdr->exp_datasn; 165 tmr_req->exp_data_sn = hdr->exp_datasn;
166 tmr_req->conn_recovery = cr; 166 tmr_req->conn_recovery = cr;
167 tmr_req->task_reassign = 1; 167 tmr_req->task_reassign = 1;
168 /* 168 /*
169 * Command can now be reassigned to a new connection. 169 * Command can now be reassigned to a new connection.
170 * The task management response must be sent before the 170 * The task management response must be sent before the
171 * reassignment actually happens. See iscsi_tmr_post_handler(). 171 * reassignment actually happens. See iscsi_tmr_post_handler().
172 */ 172 */
173 return ISCSI_TMF_RSP_COMPLETE; 173 return ISCSI_TMF_RSP_COMPLETE;
174 } 174 }
175 175
176 static void iscsit_task_reassign_remove_cmd( 176 static void iscsit_task_reassign_remove_cmd(
177 struct iscsi_cmd *cmd, 177 struct iscsi_cmd *cmd,
178 struct iscsi_conn_recovery *cr, 178 struct iscsi_conn_recovery *cr,
179 struct iscsi_session *sess) 179 struct iscsi_session *sess)
180 { 180 {
181 int ret; 181 int ret;
182 182
183 spin_lock(&cr->conn_recovery_cmd_lock); 183 spin_lock(&cr->conn_recovery_cmd_lock);
184 ret = iscsit_remove_cmd_from_connection_recovery(cmd, sess); 184 ret = iscsit_remove_cmd_from_connection_recovery(cmd, sess);
185 spin_unlock(&cr->conn_recovery_cmd_lock); 185 spin_unlock(&cr->conn_recovery_cmd_lock);
186 if (!ret) { 186 if (!ret) {
187 pr_debug("iSCSI connection recovery successful for CID:" 187 pr_debug("iSCSI connection recovery successful for CID:"
188 " %hu on SID: %u\n", cr->cid, sess->sid); 188 " %hu on SID: %u\n", cr->cid, sess->sid);
189 iscsit_remove_active_connection_recovery_entry(cr, sess); 189 iscsit_remove_active_connection_recovery_entry(cr, sess);
190 } 190 }
191 } 191 }
192 192
193 static int iscsit_task_reassign_complete_nop_out( 193 static int iscsit_task_reassign_complete_nop_out(
194 struct iscsi_tmr_req *tmr_req, 194 struct iscsi_tmr_req *tmr_req,
195 struct iscsi_conn *conn) 195 struct iscsi_conn *conn)
196 { 196 {
197 struct se_tmr_req *se_tmr = tmr_req->se_tmr_req; 197 struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
198 struct se_cmd *se_cmd = se_tmr->ref_cmd; 198 struct se_cmd *se_cmd = se_tmr->ref_cmd;
199 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 199 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
200 struct iscsi_conn_recovery *cr; 200 struct iscsi_conn_recovery *cr;
201 201
202 if (!cmd->cr) { 202 if (!cmd->cr) {
203 pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x" 203 pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
204 " is NULL!\n", cmd->init_task_tag); 204 " is NULL!\n", cmd->init_task_tag);
205 return -1; 205 return -1;
206 } 206 }
207 cr = cmd->cr; 207 cr = cmd->cr;
208 208
209 /* 209 /*
210 * Reset the StatSN so a new one for this commands new connection 210 * Reset the StatSN so a new one for this commands new connection
211 * will be assigned. 211 * will be assigned.
212 * Reset the ExpStatSN as well so we may receive Status SNACKs. 212 * Reset the ExpStatSN as well so we may receive Status SNACKs.
213 */ 213 */
214 cmd->stat_sn = cmd->exp_stat_sn = 0; 214 cmd->stat_sn = cmd->exp_stat_sn = 0;
215 215
216 iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess); 216 iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
217 217
218 spin_lock_bh(&conn->cmd_lock); 218 spin_lock_bh(&conn->cmd_lock);
219 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 219 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
220 spin_unlock_bh(&conn->cmd_lock); 220 spin_unlock_bh(&conn->cmd_lock);
221 221
222 cmd->i_state = ISTATE_SEND_NOPIN; 222 cmd->i_state = ISTATE_SEND_NOPIN;
223 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 223 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
224 return 0; 224 return 0;
225 } 225 }
226 226
227 static int iscsit_task_reassign_complete_write( 227 static int iscsit_task_reassign_complete_write(
228 struct iscsi_cmd *cmd, 228 struct iscsi_cmd *cmd,
229 struct iscsi_tmr_req *tmr_req) 229 struct iscsi_tmr_req *tmr_req)
230 { 230 {
231 int no_build_r2ts = 0; 231 int no_build_r2ts = 0;
232 u32 length = 0, offset = 0; 232 u32 length = 0, offset = 0;
233 struct iscsi_conn *conn = cmd->conn; 233 struct iscsi_conn *conn = cmd->conn;
234 struct se_cmd *se_cmd = &cmd->se_cmd; 234 struct se_cmd *se_cmd = &cmd->se_cmd;
235 /* 235 /*
236 * The Initiator must not send a R2T SNACK with a Begrun less than 236 * The Initiator must not send a R2T SNACK with a Begrun less than
237 * the TMR TASK_REASSIGN's ExpDataSN. 237 * the TMR TASK_REASSIGN's ExpDataSN.
238 */ 238 */
239 if (!tmr_req->exp_data_sn) { 239 if (!tmr_req->exp_data_sn) {
240 cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK; 240 cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK;
241 cmd->acked_data_sn = 0; 241 cmd->acked_data_sn = 0;
242 } else { 242 } else {
243 cmd->cmd_flags |= ICF_GOT_DATACK_SNACK; 243 cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
244 cmd->acked_data_sn = (tmr_req->exp_data_sn - 1); 244 cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);
245 } 245 }
246 246
247 /* 247 /*
248 * The TMR TASK_REASSIGN's ExpDataSN contains the next R2TSN the 248 * The TMR TASK_REASSIGN's ExpDataSN contains the next R2TSN the
249 * Initiator is expecting. The Target controls all WRITE operations 249 * Initiator is expecting. The Target controls all WRITE operations
250 * so if we have received all DataOUT we can safety ignore Initiator. 250 * so if we have received all DataOUT we can safety ignore Initiator.
251 */ 251 */
252 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { 252 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
253 if (!atomic_read(&cmd->se_cmd.t_transport_sent)) { 253 if (!atomic_read(&cmd->se_cmd.t_transport_sent)) {
254 pr_debug("WRITE ITT: 0x%08x: t_state: %d" 254 pr_debug("WRITE ITT: 0x%08x: t_state: %d"
255 " never sent to transport\n", 255 " never sent to transport\n",
256 cmd->init_task_tag, cmd->se_cmd.t_state); 256 cmd->init_task_tag, cmd->se_cmd.t_state);
257 return transport_generic_handle_data(se_cmd); 257 return transport_generic_handle_data(se_cmd);
258 } 258 }
259 259
260 cmd->i_state = ISTATE_SEND_STATUS; 260 cmd->i_state = ISTATE_SEND_STATUS;
261 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 261 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
262 return 0; 262 return 0;
263 } 263 }
264 264
265 /* 265 /*
266 * Special case to deal with DataSequenceInOrder=No and Non-Immeidate 266 * Special case to deal with DataSequenceInOrder=No and Non-Immeidate
267 * Unsolicited DataOut. 267 * Unsolicited DataOut.
268 */ 268 */
269 if (cmd->unsolicited_data) { 269 if (cmd->unsolicited_data) {
270 cmd->unsolicited_data = 0; 270 cmd->unsolicited_data = 0;
271 271
272 offset = cmd->next_burst_len = cmd->write_data_done; 272 offset = cmd->next_burst_len = cmd->write_data_done;
273 273
274 if ((conn->sess->sess_ops->FirstBurstLength - offset) >= 274 if ((conn->sess->sess_ops->FirstBurstLength - offset) >=
275 cmd->data_length) { 275 cmd->data_length) {
276 no_build_r2ts = 1; 276 no_build_r2ts = 1;
277 length = (cmd->data_length - offset); 277 length = (cmd->data_length - offset);
278 } else 278 } else
279 length = (conn->sess->sess_ops->FirstBurstLength - offset); 279 length = (conn->sess->sess_ops->FirstBurstLength - offset);
280 280
281 spin_lock_bh(&cmd->r2t_lock); 281 spin_lock_bh(&cmd->r2t_lock);
282 if (iscsit_add_r2t_to_list(cmd, offset, length, 0, 0) < 0) { 282 if (iscsit_add_r2t_to_list(cmd, offset, length, 0, 0) < 0) {
283 spin_unlock_bh(&cmd->r2t_lock); 283 spin_unlock_bh(&cmd->r2t_lock);
284 return -1; 284 return -1;
285 } 285 }
286 cmd->outstanding_r2ts++; 286 cmd->outstanding_r2ts++;
287 spin_unlock_bh(&cmd->r2t_lock); 287 spin_unlock_bh(&cmd->r2t_lock);
288 288
289 if (no_build_r2ts) 289 if (no_build_r2ts)
290 return 0; 290 return 0;
291 } 291 }
292 /* 292 /*
293 * iscsit_build_r2ts_for_cmd() can handle the rest from here. 293 * iscsit_build_r2ts_for_cmd() can handle the rest from here.
294 */ 294 */
295 return iscsit_build_r2ts_for_cmd(cmd, conn, 2); 295 return iscsit_build_r2ts_for_cmd(cmd, conn, 2);
296 } 296 }
297 297
298 static int iscsit_task_reassign_complete_read( 298 static int iscsit_task_reassign_complete_read(
299 struct iscsi_cmd *cmd, 299 struct iscsi_cmd *cmd,
300 struct iscsi_tmr_req *tmr_req) 300 struct iscsi_tmr_req *tmr_req)
301 { 301 {
302 struct iscsi_conn *conn = cmd->conn; 302 struct iscsi_conn *conn = cmd->conn;
303 struct iscsi_datain_req *dr; 303 struct iscsi_datain_req *dr;
304 struct se_cmd *se_cmd = &cmd->se_cmd; 304 struct se_cmd *se_cmd = &cmd->se_cmd;
305 /* 305 /*
306 * The Initiator must not send a Data SNACK with a BegRun less than 306 * The Initiator must not send a Data SNACK with a BegRun less than
307 * the TMR TASK_REASSIGN's ExpDataSN. 307 * the TMR TASK_REASSIGN's ExpDataSN.
308 */ 308 */
309 if (!tmr_req->exp_data_sn) { 309 if (!tmr_req->exp_data_sn) {
310 cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK; 310 cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK;
311 cmd->acked_data_sn = 0; 311 cmd->acked_data_sn = 0;
312 } else { 312 } else {
313 cmd->cmd_flags |= ICF_GOT_DATACK_SNACK; 313 cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
314 cmd->acked_data_sn = (tmr_req->exp_data_sn - 1); 314 cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);
315 } 315 }
316 316
317 if (!atomic_read(&cmd->se_cmd.t_transport_sent)) { 317 if (!atomic_read(&cmd->se_cmd.t_transport_sent)) {
318 pr_debug("READ ITT: 0x%08x: t_state: %d never sent to" 318 pr_debug("READ ITT: 0x%08x: t_state: %d never sent to"
319 " transport\n", cmd->init_task_tag, 319 " transport\n", cmd->init_task_tag,
320 cmd->se_cmd.t_state); 320 cmd->se_cmd.t_state);
321 transport_handle_cdb_direct(se_cmd); 321 transport_handle_cdb_direct(se_cmd);
322 return 0; 322 return 0;
323 } 323 }
324 324
325 if (!atomic_read(&se_cmd->t_transport_complete)) { 325 if (!atomic_read(&se_cmd->t_transport_complete)) {
326 pr_err("READ ITT: 0x%08x: t_state: %d, never returned" 326 pr_err("READ ITT: 0x%08x: t_state: %d, never returned"
327 " from transport\n", cmd->init_task_tag, 327 " from transport\n", cmd->init_task_tag,
328 cmd->se_cmd.t_state); 328 cmd->se_cmd.t_state);
329 return -1; 329 return -1;
330 } 330 }
331 331
332 dr = iscsit_allocate_datain_req(); 332 dr = iscsit_allocate_datain_req();
333 if (!dr) 333 if (!dr)
334 return -1; 334 return -1;
335 /* 335 /*
336 * The TMR TASK_REASSIGN's ExpDataSN contains the next DataSN the 336 * The TMR TASK_REASSIGN's ExpDataSN contains the next DataSN the
337 * Initiator is expecting. 337 * Initiator is expecting.
338 */ 338 */
339 dr->data_sn = dr->begrun = tmr_req->exp_data_sn; 339 dr->data_sn = dr->begrun = tmr_req->exp_data_sn;
340 dr->runlength = 0; 340 dr->runlength = 0;
341 dr->generate_recovery_values = 1; 341 dr->generate_recovery_values = 1;
342 dr->recovery = DATAIN_CONNECTION_RECOVERY; 342 dr->recovery = DATAIN_CONNECTION_RECOVERY;
343 343
344 iscsit_attach_datain_req(cmd, dr); 344 iscsit_attach_datain_req(cmd, dr);
345 345
346 cmd->i_state = ISTATE_SEND_DATAIN; 346 cmd->i_state = ISTATE_SEND_DATAIN;
347 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 347 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
348 return 0; 348 return 0;
349 } 349 }
350 350
351 static int iscsit_task_reassign_complete_none( 351 static int iscsit_task_reassign_complete_none(
352 struct iscsi_cmd *cmd, 352 struct iscsi_cmd *cmd,
353 struct iscsi_tmr_req *tmr_req) 353 struct iscsi_tmr_req *tmr_req)
354 { 354 {
355 struct iscsi_conn *conn = cmd->conn; 355 struct iscsi_conn *conn = cmd->conn;
356 356
357 cmd->i_state = ISTATE_SEND_STATUS; 357 cmd->i_state = ISTATE_SEND_STATUS;
358 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 358 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
359 return 0; 359 return 0;
360 } 360 }
361 361
362 static int iscsit_task_reassign_complete_scsi_cmnd( 362 static int iscsit_task_reassign_complete_scsi_cmnd(
363 struct iscsi_tmr_req *tmr_req, 363 struct iscsi_tmr_req *tmr_req,
364 struct iscsi_conn *conn) 364 struct iscsi_conn *conn)
365 { 365 {
366 struct se_tmr_req *se_tmr = tmr_req->se_tmr_req; 366 struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
367 struct se_cmd *se_cmd = se_tmr->ref_cmd; 367 struct se_cmd *se_cmd = se_tmr->ref_cmd;
368 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 368 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
369 struct iscsi_conn_recovery *cr; 369 struct iscsi_conn_recovery *cr;
370 370
371 if (!cmd->cr) { 371 if (!cmd->cr) {
372 pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x" 372 pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
373 " is NULL!\n", cmd->init_task_tag); 373 " is NULL!\n", cmd->init_task_tag);
374 return -1; 374 return -1;
375 } 375 }
376 cr = cmd->cr; 376 cr = cmd->cr;
377 377
378 /* 378 /*
379 * Reset the StatSN so a new one for this commands new connection 379 * Reset the StatSN so a new one for this commands new connection
380 * will be assigned. 380 * will be assigned.
381 * Reset the ExpStatSN as well so we may receive Status SNACKs. 381 * Reset the ExpStatSN as well so we may receive Status SNACKs.
382 */ 382 */
383 cmd->stat_sn = cmd->exp_stat_sn = 0; 383 cmd->stat_sn = cmd->exp_stat_sn = 0;
384 384
385 iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess); 385 iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
386 386
387 spin_lock_bh(&conn->cmd_lock); 387 spin_lock_bh(&conn->cmd_lock);
388 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 388 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
389 spin_unlock_bh(&conn->cmd_lock); 389 spin_unlock_bh(&conn->cmd_lock);
390 390
391 if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 391 if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
392 cmd->i_state = ISTATE_SEND_STATUS; 392 cmd->i_state = ISTATE_SEND_STATUS;
393 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 393 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
394 return 0; 394 return 0;
395 } 395 }
396 396
397 switch (cmd->data_direction) { 397 switch (cmd->data_direction) {
398 case DMA_TO_DEVICE: 398 case DMA_TO_DEVICE:
399 return iscsit_task_reassign_complete_write(cmd, tmr_req); 399 return iscsit_task_reassign_complete_write(cmd, tmr_req);
400 case DMA_FROM_DEVICE: 400 case DMA_FROM_DEVICE:
401 return iscsit_task_reassign_complete_read(cmd, tmr_req); 401 return iscsit_task_reassign_complete_read(cmd, tmr_req);
402 case DMA_NONE: 402 case DMA_NONE:
403 return iscsit_task_reassign_complete_none(cmd, tmr_req); 403 return iscsit_task_reassign_complete_none(cmd, tmr_req);
404 default: 404 default:
405 pr_err("Unknown cmd->data_direction: 0x%02x\n", 405 pr_err("Unknown cmd->data_direction: 0x%02x\n",
406 cmd->data_direction); 406 cmd->data_direction);
407 return -1; 407 return -1;
408 } 408 }
409 409
410 return 0; 410 return 0;
411 } 411 }
412 412
413 static int iscsit_task_reassign_complete( 413 static int iscsit_task_reassign_complete(
414 struct iscsi_tmr_req *tmr_req, 414 struct iscsi_tmr_req *tmr_req,
415 struct iscsi_conn *conn) 415 struct iscsi_conn *conn)
416 { 416 {
417 struct se_tmr_req *se_tmr = tmr_req->se_tmr_req; 417 struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
418 struct se_cmd *se_cmd; 418 struct se_cmd *se_cmd;
419 struct iscsi_cmd *cmd; 419 struct iscsi_cmd *cmd;
420 int ret = 0; 420 int ret = 0;
421 421
422 if (!se_tmr->ref_cmd) { 422 if (!se_tmr->ref_cmd) {
423 pr_err("TMR Request is missing a RefCmd struct iscsi_cmd.\n"); 423 pr_err("TMR Request is missing a RefCmd struct iscsi_cmd.\n");
424 return -1; 424 return -1;
425 } 425 }
426 se_cmd = se_tmr->ref_cmd; 426 se_cmd = se_tmr->ref_cmd;
427 cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 427 cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
428 428
429 cmd->conn = conn; 429 cmd->conn = conn;
430 430
431 switch (cmd->iscsi_opcode) { 431 switch (cmd->iscsi_opcode) {
432 case ISCSI_OP_NOOP_OUT: 432 case ISCSI_OP_NOOP_OUT:
433 ret = iscsit_task_reassign_complete_nop_out(tmr_req, conn); 433 ret = iscsit_task_reassign_complete_nop_out(tmr_req, conn);
434 break; 434 break;
435 case ISCSI_OP_SCSI_CMD: 435 case ISCSI_OP_SCSI_CMD:
436 ret = iscsit_task_reassign_complete_scsi_cmnd(tmr_req, conn); 436 ret = iscsit_task_reassign_complete_scsi_cmnd(tmr_req, conn);
437 break; 437 break;
438 default: 438 default:
439 pr_err("Illegal iSCSI Opcode 0x%02x during" 439 pr_err("Illegal iSCSI Opcode 0x%02x during"
440 " command realligence\n", cmd->iscsi_opcode); 440 " command realligence\n", cmd->iscsi_opcode);
441 return -1; 441 return -1;
442 } 442 }
443 443
444 if (ret != 0) 444 if (ret != 0)
445 return ret; 445 return ret;
446 446
447 pr_debug("Completed connection realligence for Opcode: 0x%02x," 447 pr_debug("Completed connection realligence for Opcode: 0x%02x,"
448 " ITT: 0x%08x to CID: %hu.\n", cmd->iscsi_opcode, 448 " ITT: 0x%08x to CID: %hu.\n", cmd->iscsi_opcode,
449 cmd->init_task_tag, conn->cid); 449 cmd->init_task_tag, conn->cid);
450 450
451 return 0; 451 return 0;
452 } 452 }
453 453
454 /* 454 /*
455 * Handles special after-the-fact actions related to TMRs. 455 * Handles special after-the-fact actions related to TMRs.
456 * Right now the only one that its really needed for is 456 * Right now the only one that its really needed for is
457 * connection recovery releated TASK_REASSIGN. 457 * connection recovery releated TASK_REASSIGN.
458 */ 458 */
459 extern int iscsit_tmr_post_handler(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 459 extern int iscsit_tmr_post_handler(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
460 { 460 {
461 struct iscsi_tmr_req *tmr_req = cmd->tmr_req; 461 struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
462 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; 462 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
463 463
464 if (tmr_req->task_reassign && 464 if (tmr_req->task_reassign &&
465 (se_tmr->response == ISCSI_TMF_RSP_COMPLETE)) 465 (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
466 return iscsit_task_reassign_complete(tmr_req, conn); 466 return iscsit_task_reassign_complete(tmr_req, conn);
467 467
468 return 0; 468 return 0;
469 } 469 }
470 470
471 /* 471 /*
472 * Nothing to do here, but leave it for good measure. :-) 472 * Nothing to do here, but leave it for good measure. :-)
473 */ 473 */
474 int iscsit_task_reassign_prepare_read( 474 int iscsit_task_reassign_prepare_read(
475 struct iscsi_tmr_req *tmr_req, 475 struct iscsi_tmr_req *tmr_req,
476 struct iscsi_conn *conn) 476 struct iscsi_conn *conn)
477 { 477 {
478 return 0; 478 return 0;
479 } 479 }
480 480
481 static void iscsit_task_reassign_prepare_unsolicited_dataout( 481 static void iscsit_task_reassign_prepare_unsolicited_dataout(
482 struct iscsi_cmd *cmd, 482 struct iscsi_cmd *cmd,
483 struct iscsi_conn *conn) 483 struct iscsi_conn *conn)
484 { 484 {
485 int i, j; 485 int i, j;
486 struct iscsi_pdu *pdu = NULL; 486 struct iscsi_pdu *pdu = NULL;
487 struct iscsi_seq *seq = NULL; 487 struct iscsi_seq *seq = NULL;
488 488
489 if (conn->sess->sess_ops->DataSequenceInOrder) { 489 if (conn->sess->sess_ops->DataSequenceInOrder) {
490 cmd->data_sn = 0; 490 cmd->data_sn = 0;
491 491
492 if (cmd->immediate_data) 492 if (cmd->immediate_data)
493 cmd->r2t_offset += (cmd->first_burst_len - 493 cmd->r2t_offset += (cmd->first_burst_len -
494 cmd->seq_start_offset); 494 cmd->seq_start_offset);
495 495
496 if (conn->sess->sess_ops->DataPDUInOrder) { 496 if (conn->sess->sess_ops->DataPDUInOrder) {
497 cmd->write_data_done -= (cmd->immediate_data) ? 497 cmd->write_data_done -= (cmd->immediate_data) ?
498 (cmd->first_burst_len - 498 (cmd->first_burst_len -
499 cmd->seq_start_offset) : 499 cmd->seq_start_offset) :
500 cmd->first_burst_len; 500 cmd->first_burst_len;
501 cmd->first_burst_len = 0; 501 cmd->first_burst_len = 0;
502 return; 502 return;
503 } 503 }
504 504
505 for (i = 0; i < cmd->pdu_count; i++) { 505 for (i = 0; i < cmd->pdu_count; i++) {
506 pdu = &cmd->pdu_list[i]; 506 pdu = &cmd->pdu_list[i];
507 507
508 if (pdu->status != ISCSI_PDU_RECEIVED_OK) 508 if (pdu->status != ISCSI_PDU_RECEIVED_OK)
509 continue; 509 continue;
510 510
511 if ((pdu->offset >= cmd->seq_start_offset) && 511 if ((pdu->offset >= cmd->seq_start_offset) &&
512 ((pdu->offset + pdu->length) <= 512 ((pdu->offset + pdu->length) <=
513 cmd->seq_end_offset)) { 513 cmd->seq_end_offset)) {
514 cmd->first_burst_len -= pdu->length; 514 cmd->first_burst_len -= pdu->length;
515 cmd->write_data_done -= pdu->length; 515 cmd->write_data_done -= pdu->length;
516 pdu->status = ISCSI_PDU_NOT_RECEIVED; 516 pdu->status = ISCSI_PDU_NOT_RECEIVED;
517 } 517 }
518 } 518 }
519 } else { 519 } else {
520 for (i = 0; i < cmd->seq_count; i++) { 520 for (i = 0; i < cmd->seq_count; i++) {
521 seq = &cmd->seq_list[i]; 521 seq = &cmd->seq_list[i];
522 522
523 if (seq->type != SEQTYPE_UNSOLICITED) 523 if (seq->type != SEQTYPE_UNSOLICITED)
524 continue; 524 continue;
525 525
526 cmd->write_data_done -= 526 cmd->write_data_done -=
527 (seq->offset - seq->orig_offset); 527 (seq->offset - seq->orig_offset);
528 cmd->first_burst_len = 0; 528 cmd->first_burst_len = 0;
529 seq->data_sn = 0; 529 seq->data_sn = 0;
530 seq->offset = seq->orig_offset; 530 seq->offset = seq->orig_offset;
531 seq->next_burst_len = 0; 531 seq->next_burst_len = 0;
532 seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY; 532 seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
533 533
534 if (conn->sess->sess_ops->DataPDUInOrder) 534 if (conn->sess->sess_ops->DataPDUInOrder)
535 continue; 535 continue;
536 536
537 for (j = 0; j < seq->pdu_count; j++) { 537 for (j = 0; j < seq->pdu_count; j++) {
538 pdu = &cmd->pdu_list[j+seq->pdu_start]; 538 pdu = &cmd->pdu_list[j+seq->pdu_start];
539 539
540 if (pdu->status != ISCSI_PDU_RECEIVED_OK) 540 if (pdu->status != ISCSI_PDU_RECEIVED_OK)
541 continue; 541 continue;
542 542
543 pdu->status = ISCSI_PDU_NOT_RECEIVED; 543 pdu->status = ISCSI_PDU_NOT_RECEIVED;
544 } 544 }
545 } 545 }
546 } 546 }
547 } 547 }
548 548
549 int iscsit_task_reassign_prepare_write( 549 int iscsit_task_reassign_prepare_write(
550 struct iscsi_tmr_req *tmr_req, 550 struct iscsi_tmr_req *tmr_req,
551 struct iscsi_conn *conn) 551 struct iscsi_conn *conn)
552 { 552 {
553 struct se_tmr_req *se_tmr = tmr_req->se_tmr_req; 553 struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
554 struct se_cmd *se_cmd = se_tmr->ref_cmd; 554 struct se_cmd *se_cmd = se_tmr->ref_cmd;
555 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 555 struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
556 struct iscsi_pdu *pdu = NULL; 556 struct iscsi_pdu *pdu = NULL;
557 struct iscsi_r2t *r2t = NULL, *r2t_tmp; 557 struct iscsi_r2t *r2t = NULL, *r2t_tmp;
558 int first_incomplete_r2t = 1, i = 0; 558 int first_incomplete_r2t = 1, i = 0;
559 559
560 /* 560 /*
561 * The command was in the process of receiving Unsolicited DataOUT when 561 * The command was in the process of receiving Unsolicited DataOUT when
562 * the connection failed. 562 * the connection failed.
563 */ 563 */
564 if (cmd->unsolicited_data) 564 if (cmd->unsolicited_data)
565 iscsit_task_reassign_prepare_unsolicited_dataout(cmd, conn); 565 iscsit_task_reassign_prepare_unsolicited_dataout(cmd, conn);
566 566
567 /* 567 /*
568 * The Initiator is requesting R2Ts starting from zero, skip 568 * The Initiator is requesting R2Ts starting from zero, skip
569 * checking acknowledged R2Ts and start checking struct iscsi_r2ts 569 * checking acknowledged R2Ts and start checking struct iscsi_r2ts
570 * greater than zero. 570 * greater than zero.
571 */ 571 */
572 if (!tmr_req->exp_data_sn) 572 if (!tmr_req->exp_data_sn)
573 goto drop_unacknowledged_r2ts; 573 goto drop_unacknowledged_r2ts;
574 574
575 /* 575 /*
576 * We now check that the PDUs in DataOUT sequences below 576 * We now check that the PDUs in DataOUT sequences below
577 * the TMR TASK_REASSIGN ExpDataSN (R2TSN the Initiator is 577 * the TMR TASK_REASSIGN ExpDataSN (R2TSN the Initiator is
578 * expecting next) have all the DataOUT they require to complete 578 * expecting next) have all the DataOUT they require to complete
579 * the DataOUT sequence. First scan from R2TSN 0 to TMR 579 * the DataOUT sequence. First scan from R2TSN 0 to TMR
580 * TASK_REASSIGN ExpDataSN-1. 580 * TASK_REASSIGN ExpDataSN-1.
581 * 581 *
582 * If we have not received all DataOUT in question, we must 582 * If we have not received all DataOUT in question, we must
583 * make sure to make the appropriate changes to values in 583 * make sure to make the appropriate changes to values in
584 * struct iscsi_cmd (and elsewhere depending on session parameters) 584 * struct iscsi_cmd (and elsewhere depending on session parameters)
585 * so iscsit_build_r2ts_for_cmd() in iscsit_task_reassign_complete_write() 585 * so iscsit_build_r2ts_for_cmd() in iscsit_task_reassign_complete_write()
586 * will resend a new R2T for the DataOUT sequences in question. 586 * will resend a new R2T for the DataOUT sequences in question.
587 */ 587 */
588 spin_lock_bh(&cmd->r2t_lock); 588 spin_lock_bh(&cmd->r2t_lock);
589 if (list_empty(&cmd->cmd_r2t_list)) { 589 if (list_empty(&cmd->cmd_r2t_list)) {
590 spin_unlock_bh(&cmd->r2t_lock); 590 spin_unlock_bh(&cmd->r2t_lock);
591 return -1; 591 return -1;
592 } 592 }
593 593
594 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { 594 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
595 595
596 if (r2t->r2t_sn >= tmr_req->exp_data_sn) 596 if (r2t->r2t_sn >= tmr_req->exp_data_sn)
597 continue; 597 continue;
598 /* 598 /*
599 * Safely ignore Recovery R2Ts and R2Ts that have completed 599 * Safely ignore Recovery R2Ts and R2Ts that have completed
600 * DataOUT sequences. 600 * DataOUT sequences.
601 */ 601 */
602 if (r2t->seq_complete) 602 if (r2t->seq_complete)
603 continue; 603 continue;
604 604
605 if (r2t->recovery_r2t) 605 if (r2t->recovery_r2t)
606 continue; 606 continue;
607 607
608 /* 608 /*
609 * DataSequenceInOrder=Yes: 609 * DataSequenceInOrder=Yes:
610 * 610 *
611 * Taking into account the iSCSI implementation requirement of 611 * Taking into account the iSCSI implementation requirement of
612 * MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and 612 * MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and
613 * DataSequenceInOrder=Yes, we must take into consideration 613 * DataSequenceInOrder=Yes, we must take into consideration
614 * the following: 614 * the following:
615 * 615 *
616 * DataSequenceInOrder=No: 616 * DataSequenceInOrder=No:
617 * 617 *
618 * Taking into account that the Initiator controls the (possibly 618 * Taking into account that the Initiator controls the (possibly
619 * random) PDU Order in (possibly random) Sequence Order of 619 * random) PDU Order in (possibly random) Sequence Order of
620 * DataOUT the target requests with R2Ts, we must take into 620 * DataOUT the target requests with R2Ts, we must take into
621 * consideration the following: 621 * consideration the following:
622 * 622 *
623 * DataPDUInOrder=Yes for DataSequenceInOrder=[Yes,No]: 623 * DataPDUInOrder=Yes for DataSequenceInOrder=[Yes,No]:
624 * 624 *
625 * While processing non-complete R2T DataOUT sequence requests 625 * While processing non-complete R2T DataOUT sequence requests
626 * the Target will re-request only the total sequence length 626 * the Target will re-request only the total sequence length
627 * minus current received offset. This is because we must 627 * minus current received offset. This is because we must
628 * assume the initiator will continue sending DataOUT from the 628 * assume the initiator will continue sending DataOUT from the
629 * last PDU before the connection failed. 629 * last PDU before the connection failed.
630 * 630 *
631 * DataPDUInOrder=No for DataSequenceInOrder=[Yes,No]: 631 * DataPDUInOrder=No for DataSequenceInOrder=[Yes,No]:
632 * 632 *
633 * While processing non-complete R2T DataOUT sequence requests 633 * While processing non-complete R2T DataOUT sequence requests
634 * the Target will re-request the entire DataOUT sequence if 634 * the Target will re-request the entire DataOUT sequence if
635 * any single PDU is missing from the sequence. This is because 635 * any single PDU is missing from the sequence. This is because
636 * we have no logical method to determine the next PDU offset, 636 * we have no logical method to determine the next PDU offset,
637 * and we must assume the Initiator will be sending any random 637 * and we must assume the Initiator will be sending any random
638 * PDU offset in the current sequence after TASK_REASSIGN 638 * PDU offset in the current sequence after TASK_REASSIGN
639 * has completed. 639 * has completed.
640 */ 640 */
641 if (conn->sess->sess_ops->DataSequenceInOrder) { 641 if (conn->sess->sess_ops->DataSequenceInOrder) {
642 if (!first_incomplete_r2t) { 642 if (!first_incomplete_r2t) {
643 cmd->r2t_offset -= r2t->xfer_len; 643 cmd->r2t_offset -= r2t->xfer_len;
644 goto next; 644 goto next;
645 } 645 }
646 646
647 if (conn->sess->sess_ops->DataPDUInOrder) { 647 if (conn->sess->sess_ops->DataPDUInOrder) {
648 cmd->data_sn = 0; 648 cmd->data_sn = 0;
649 cmd->r2t_offset -= (r2t->xfer_len - 649 cmd->r2t_offset -= (r2t->xfer_len -
650 cmd->next_burst_len); 650 cmd->next_burst_len);
651 first_incomplete_r2t = 0; 651 first_incomplete_r2t = 0;
652 goto next; 652 goto next;
653 } 653 }
654 654
655 cmd->data_sn = 0; 655 cmd->data_sn = 0;
656 cmd->r2t_offset -= r2t->xfer_len; 656 cmd->r2t_offset -= r2t->xfer_len;
657 657
658 for (i = 0; i < cmd->pdu_count; i++) { 658 for (i = 0; i < cmd->pdu_count; i++) {
659 pdu = &cmd->pdu_list[i]; 659 pdu = &cmd->pdu_list[i];
660 660
661 if (pdu->status != ISCSI_PDU_RECEIVED_OK) 661 if (pdu->status != ISCSI_PDU_RECEIVED_OK)
662 continue; 662 continue;
663 663
664 if ((pdu->offset >= r2t->offset) && 664 if ((pdu->offset >= r2t->offset) &&
665 (pdu->offset < (r2t->offset + 665 (pdu->offset < (r2t->offset +
666 r2t->xfer_len))) { 666 r2t->xfer_len))) {
667 cmd->next_burst_len -= pdu->length; 667 cmd->next_burst_len -= pdu->length;
668 cmd->write_data_done -= pdu->length; 668 cmd->write_data_done -= pdu->length;
669 pdu->status = ISCSI_PDU_NOT_RECEIVED; 669 pdu->status = ISCSI_PDU_NOT_RECEIVED;
670 } 670 }
671 } 671 }
672 672
673 first_incomplete_r2t = 0; 673 first_incomplete_r2t = 0;
674 } else { 674 } else {
675 struct iscsi_seq *seq; 675 struct iscsi_seq *seq;
676 676
677 seq = iscsit_get_seq_holder(cmd, r2t->offset, 677 seq = iscsit_get_seq_holder(cmd, r2t->offset,
678 r2t->xfer_len); 678 r2t->xfer_len);
679 if (!seq) { 679 if (!seq) {
680 spin_unlock_bh(&cmd->r2t_lock); 680 spin_unlock_bh(&cmd->r2t_lock);
681 return -1; 681 return -1;
682 } 682 }
683 683
684 cmd->write_data_done -= 684 cmd->write_data_done -=
685 (seq->offset - seq->orig_offset); 685 (seq->offset - seq->orig_offset);
686 seq->data_sn = 0; 686 seq->data_sn = 0;
687 seq->offset = seq->orig_offset; 687 seq->offset = seq->orig_offset;
688 seq->next_burst_len = 0; 688 seq->next_burst_len = 0;
689 seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY; 689 seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
690 690
691 cmd->seq_send_order--; 691 cmd->seq_send_order--;
692 692
693 if (conn->sess->sess_ops->DataPDUInOrder) 693 if (conn->sess->sess_ops->DataPDUInOrder)
694 goto next; 694 goto next;
695 695
696 for (i = 0; i < seq->pdu_count; i++) { 696 for (i = 0; i < seq->pdu_count; i++) {
697 pdu = &cmd->pdu_list[i+seq->pdu_start]; 697 pdu = &cmd->pdu_list[i+seq->pdu_start];
698 698
699 if (pdu->status != ISCSI_PDU_RECEIVED_OK) 699 if (pdu->status != ISCSI_PDU_RECEIVED_OK)
700 continue; 700 continue;
701 701
702 pdu->status = ISCSI_PDU_NOT_RECEIVED; 702 pdu->status = ISCSI_PDU_NOT_RECEIVED;
703 } 703 }
704 } 704 }
705 705
706 next: 706 next:
707 cmd->outstanding_r2ts--; 707 cmd->outstanding_r2ts--;
708 } 708 }
709 spin_unlock_bh(&cmd->r2t_lock); 709 spin_unlock_bh(&cmd->r2t_lock);
710 710
711 /* 711 /*
712 * We now drop all unacknowledged R2Ts, ie: ExpDataSN from TMR 712 * We now drop all unacknowledged R2Ts, ie: ExpDataSN from TMR
713 * TASK_REASSIGN to the last R2T in the list.. We are also careful 713 * TASK_REASSIGN to the last R2T in the list.. We are also careful
714 * to check that the Initiator is not requesting R2Ts for DataOUT 714 * to check that the Initiator is not requesting R2Ts for DataOUT
715 * sequences it has already completed. 715 * sequences it has already completed.
716 * 716 *
717 * Free each R2T in question and adjust values in struct iscsi_cmd 717 * Free each R2T in question and adjust values in struct iscsi_cmd
718 * accordingly so iscsit_build_r2ts_for_cmd() do the rest of 718 * accordingly so iscsit_build_r2ts_for_cmd() do the rest of
719 * the work after the TMR TASK_REASSIGN Response is sent. 719 * the work after the TMR TASK_REASSIGN Response is sent.
720 */ 720 */
721 drop_unacknowledged_r2ts: 721 drop_unacknowledged_r2ts:
722 722
723 cmd->cmd_flags &= ~ICF_SENT_LAST_R2T; 723 cmd->cmd_flags &= ~ICF_SENT_LAST_R2T;
724 cmd->r2t_sn = tmr_req->exp_data_sn; 724 cmd->r2t_sn = tmr_req->exp_data_sn;
725 725
726 spin_lock_bh(&cmd->r2t_lock); 726 spin_lock_bh(&cmd->r2t_lock);
727 list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list) { 727 list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list) {
728 /* 728 /*
729 * Skip up to the R2T Sequence number provided by the 729 * Skip up to the R2T Sequence number provided by the
730 * iSCSI TASK_REASSIGN TMR 730 * iSCSI TASK_REASSIGN TMR
731 */ 731 */
732 if (r2t->r2t_sn < tmr_req->exp_data_sn) 732 if (r2t->r2t_sn < tmr_req->exp_data_sn)
733 continue; 733 continue;
734 734
735 if (r2t->seq_complete) { 735 if (r2t->seq_complete) {
736 pr_err("Initiator is requesting R2Ts from" 736 pr_err("Initiator is requesting R2Ts from"
737 " R2TSN: 0x%08x, but R2TSN: 0x%08x, Offset: %u," 737 " R2TSN: 0x%08x, but R2TSN: 0x%08x, Offset: %u,"
738 " Length: %u is already complete." 738 " Length: %u is already complete."
739 " BAD INITIATOR ERL=2 IMPLEMENTATION!\n", 739 " BAD INITIATOR ERL=2 IMPLEMENTATION!\n",
740 tmr_req->exp_data_sn, r2t->r2t_sn, 740 tmr_req->exp_data_sn, r2t->r2t_sn,
741 r2t->offset, r2t->xfer_len); 741 r2t->offset, r2t->xfer_len);
742 spin_unlock_bh(&cmd->r2t_lock); 742 spin_unlock_bh(&cmd->r2t_lock);
743 return -1; 743 return -1;
744 } 744 }
745 745
746 if (r2t->recovery_r2t) { 746 if (r2t->recovery_r2t) {
747 iscsit_free_r2t(r2t, cmd); 747 iscsit_free_r2t(r2t, cmd);
748 continue; 748 continue;
749 } 749 }
750 750
751 /* DataSequenceInOrder=Yes: 751 /* DataSequenceInOrder=Yes:
752 * 752 *
753 * Taking into account the iSCSI implementation requirement of 753 * Taking into account the iSCSI implementation requirement of
754 * MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and 754 * MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and
755 * DataSequenceInOrder=Yes, it's safe to subtract the R2Ts 755 * DataSequenceInOrder=Yes, it's safe to subtract the R2Ts
756 * entire transfer length from the commands R2T offset marker. 756 * entire transfer length from the commands R2T offset marker.
757 * 757 *
758 * DataSequenceInOrder=No: 758 * DataSequenceInOrder=No:
759 * 759 *
760 * We subtract the difference from struct iscsi_seq between the 760 * We subtract the difference from struct iscsi_seq between the
761 * current offset and original offset from cmd->write_data_done 761 * current offset and original offset from cmd->write_data_done
762 * for account for DataOUT PDUs already received. Then reset 762 * for account for DataOUT PDUs already received. Then reset
763 * the current offset to the original and zero out the current 763 * the current offset to the original and zero out the current
764 * burst length, to make sure we re-request the entire DataOUT 764 * burst length, to make sure we re-request the entire DataOUT
765 * sequence. 765 * sequence.
766 */ 766 */
767 if (conn->sess->sess_ops->DataSequenceInOrder) 767 if (conn->sess->sess_ops->DataSequenceInOrder)
768 cmd->r2t_offset -= r2t->xfer_len; 768 cmd->r2t_offset -= r2t->xfer_len;
769 else 769 else
770 cmd->seq_send_order--; 770 cmd->seq_send_order--;
771 771
772 cmd->outstanding_r2ts--; 772 cmd->outstanding_r2ts--;
773 iscsit_free_r2t(r2t, cmd); 773 iscsit_free_r2t(r2t, cmd);
774 } 774 }
775 spin_unlock_bh(&cmd->r2t_lock); 775 spin_unlock_bh(&cmd->r2t_lock);
776 776
777 return 0; 777 return 0;
778 } 778 }
779 779
780 /* 780 /*
781 * Performs sanity checks TMR TASK_REASSIGN's ExpDataSN for 781 * Performs sanity checks TMR TASK_REASSIGN's ExpDataSN for
782 * a given struct iscsi_cmd. 782 * a given struct iscsi_cmd.
783 */ 783 */
784 int iscsit_check_task_reassign_expdatasn( 784 int iscsit_check_task_reassign_expdatasn(
785 struct iscsi_tmr_req *tmr_req, 785 struct iscsi_tmr_req *tmr_req,
786 struct iscsi_conn *conn) 786 struct iscsi_conn *conn)
787 { 787 {
788 struct se_tmr_req *se_tmr = tmr_req->se_tmr_req; 788 struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
789 struct se_cmd *se_cmd = se_tmr->ref_cmd; 789 struct se_cmd *se_cmd = se_tmr->ref_cmd;
790 struct iscsi_cmd *ref_cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); 790 struct iscsi_cmd *ref_cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
791 791
792 if (ref_cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) 792 if (ref_cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD)
793 return 0; 793 return 0;
794 794
795 if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 795 if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
796 return 0; 796 return 0;
797 797
798 if (ref_cmd->data_direction == DMA_NONE) 798 if (ref_cmd->data_direction == DMA_NONE)
799 return 0; 799 return 0;
800 800
801 /* 801 /*
802 * For READs the TMR TASK_REASSIGNs ExpDataSN contains the next DataSN 802 * For READs the TMR TASK_REASSIGNs ExpDataSN contains the next DataSN
803 * of DataIN the Initiator is expecting. 803 * of DataIN the Initiator is expecting.
804 * 804 *
805 * Also check that the Initiator is not re-requesting DataIN that has 805 * Also check that the Initiator is not re-requesting DataIN that has
806 * already been acknowledged with a DataAck SNACK. 806 * already been acknowledged with a DataAck SNACK.
807 */ 807 */
808 if (ref_cmd->data_direction == DMA_FROM_DEVICE) { 808 if (ref_cmd->data_direction == DMA_FROM_DEVICE) {
809 if (tmr_req->exp_data_sn > ref_cmd->data_sn) { 809 if (tmr_req->exp_data_sn > ref_cmd->data_sn) {
810 pr_err("Received ExpDataSN: 0x%08x for READ" 810 pr_err("Received ExpDataSN: 0x%08x for READ"
811 " in TMR TASK_REASSIGN greater than command's" 811 " in TMR TASK_REASSIGN greater than command's"
812 " DataSN: 0x%08x.\n", tmr_req->exp_data_sn, 812 " DataSN: 0x%08x.\n", tmr_req->exp_data_sn,
813 ref_cmd->data_sn); 813 ref_cmd->data_sn);
814 return -1; 814 return -1;
815 } 815 }
816 if ((ref_cmd->cmd_flags & ICF_GOT_DATACK_SNACK) && 816 if ((ref_cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
817 (tmr_req->exp_data_sn <= ref_cmd->acked_data_sn)) { 817 (tmr_req->exp_data_sn <= ref_cmd->acked_data_sn)) {
818 pr_err("Received ExpDataSN: 0x%08x for READ" 818 pr_err("Received ExpDataSN: 0x%08x for READ"
819 " in TMR TASK_REASSIGN for previously" 819 " in TMR TASK_REASSIGN for previously"
820 " acknowledged DataIN: 0x%08x," 820 " acknowledged DataIN: 0x%08x,"
821 " protocol error\n", tmr_req->exp_data_sn, 821 " protocol error\n", tmr_req->exp_data_sn,
822 ref_cmd->acked_data_sn); 822 ref_cmd->acked_data_sn);
823 return -1; 823 return -1;
824 } 824 }
825 return iscsit_task_reassign_prepare_read(tmr_req, conn); 825 return iscsit_task_reassign_prepare_read(tmr_req, conn);
826 } 826 }
827 827
828 /* 828 /*
829 * For WRITEs the TMR TASK_REASSIGNs ExpDataSN contains the next R2TSN 829 * For WRITEs the TMR TASK_REASSIGNs ExpDataSN contains the next R2TSN
830 * for R2Ts the Initiator is expecting. 830 * for R2Ts the Initiator is expecting.
831 * 831 *
832 * Do the magic in iscsit_task_reassign_prepare_write(). 832 * Do the magic in iscsit_task_reassign_prepare_write().
833 */ 833 */
834 if (ref_cmd->data_direction == DMA_TO_DEVICE) { 834 if (ref_cmd->data_direction == DMA_TO_DEVICE) {
835 if (tmr_req->exp_data_sn > ref_cmd->r2t_sn) { 835 if (tmr_req->exp_data_sn > ref_cmd->r2t_sn) {
836 pr_err("Received ExpDataSN: 0x%08x for WRITE" 836 pr_err("Received ExpDataSN: 0x%08x for WRITE"
837 " in TMR TASK_REASSIGN greater than command's" 837 " in TMR TASK_REASSIGN greater than command's"
838 " R2TSN: 0x%08x.\n", tmr_req->exp_data_sn, 838 " R2TSN: 0x%08x.\n", tmr_req->exp_data_sn,
839 ref_cmd->r2t_sn); 839 ref_cmd->r2t_sn);
840 return -1; 840 return -1;
841 } 841 }
842 return iscsit_task_reassign_prepare_write(tmr_req, conn); 842 return iscsit_task_reassign_prepare_write(tmr_req, conn);
843 } 843 }
844 844
845 pr_err("Unknown iSCSI data_direction: 0x%02x\n", 845 pr_err("Unknown iSCSI data_direction: 0x%02x\n",
846 ref_cmd->data_direction); 846 ref_cmd->data_direction);
847 847
848 return -1; 848 return -1;
849 } 849 }
850 850
drivers/target/iscsi/iscsi_target_tpg.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * This file contains iSCSI Target Portal Group related functions. 2 * This file contains iSCSI Target Portal Group related functions.
3 * 3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 * 5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 * 7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or 12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version. 13 * (at your option) any later version.
14 * 14 *
15 * This program is distributed in the hope that it will be useful, 15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 ******************************************************************************/ 19 ******************************************************************************/
20 20
21 #include <target/target_core_base.h> 21 #include <target/target_core_base.h>
22 #include <target/target_core_transport.h> 22 #include <target/target_core_fabric.h>
23 #include <target/target_core_fabric_ops.h>
24 #include <target/target_core_configfs.h> 23 #include <target/target_core_configfs.h>
25 #include <target/target_core_tpg.h>
26 24
27 #include "iscsi_target_core.h" 25 #include "iscsi_target_core.h"
28 #include "iscsi_target_erl0.h" 26 #include "iscsi_target_erl0.h"
29 #include "iscsi_target_login.h" 27 #include "iscsi_target_login.h"
30 #include "iscsi_target_nodeattrib.h" 28 #include "iscsi_target_nodeattrib.h"
31 #include "iscsi_target_tpg.h" 29 #include "iscsi_target_tpg.h"
32 #include "iscsi_target_util.h" 30 #include "iscsi_target_util.h"
33 #include "iscsi_target.h" 31 #include "iscsi_target.h"
34 #include "iscsi_target_parameters.h" 32 #include "iscsi_target_parameters.h"
35 33
36 struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u16 tpgt) 34 struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u16 tpgt)
37 { 35 {
38 struct iscsi_portal_group *tpg; 36 struct iscsi_portal_group *tpg;
39 37
40 tpg = kzalloc(sizeof(struct iscsi_portal_group), GFP_KERNEL); 38 tpg = kzalloc(sizeof(struct iscsi_portal_group), GFP_KERNEL);
41 if (!tpg) { 39 if (!tpg) {
42 pr_err("Unable to allocate struct iscsi_portal_group\n"); 40 pr_err("Unable to allocate struct iscsi_portal_group\n");
43 return NULL; 41 return NULL;
44 } 42 }
45 43
46 tpg->tpgt = tpgt; 44 tpg->tpgt = tpgt;
47 tpg->tpg_state = TPG_STATE_FREE; 45 tpg->tpg_state = TPG_STATE_FREE;
48 tpg->tpg_tiqn = tiqn; 46 tpg->tpg_tiqn = tiqn;
49 INIT_LIST_HEAD(&tpg->tpg_gnp_list); 47 INIT_LIST_HEAD(&tpg->tpg_gnp_list);
50 INIT_LIST_HEAD(&tpg->tpg_list); 48 INIT_LIST_HEAD(&tpg->tpg_list);
51 mutex_init(&tpg->tpg_access_lock); 49 mutex_init(&tpg->tpg_access_lock);
52 mutex_init(&tpg->np_login_lock); 50 mutex_init(&tpg->np_login_lock);
53 spin_lock_init(&tpg->tpg_state_lock); 51 spin_lock_init(&tpg->tpg_state_lock);
54 spin_lock_init(&tpg->tpg_np_lock); 52 spin_lock_init(&tpg->tpg_np_lock);
55 53
56 return tpg; 54 return tpg;
57 } 55 }
58 56
59 static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *); 57 static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *);
60 58
61 int iscsit_load_discovery_tpg(void) 59 int iscsit_load_discovery_tpg(void)
62 { 60 {
63 struct iscsi_param *param; 61 struct iscsi_param *param;
64 struct iscsi_portal_group *tpg; 62 struct iscsi_portal_group *tpg;
65 int ret; 63 int ret;
66 64
67 tpg = iscsit_alloc_portal_group(NULL, 1); 65 tpg = iscsit_alloc_portal_group(NULL, 1);
68 if (!tpg) { 66 if (!tpg) {
69 pr_err("Unable to allocate struct iscsi_portal_group\n"); 67 pr_err("Unable to allocate struct iscsi_portal_group\n");
70 return -1; 68 return -1;
71 } 69 }
72 70
73 ret = core_tpg_register( 71 ret = core_tpg_register(
74 &lio_target_fabric_configfs->tf_ops, 72 &lio_target_fabric_configfs->tf_ops,
75 NULL, &tpg->tpg_se_tpg, (void *)tpg, 73 NULL, &tpg->tpg_se_tpg, (void *)tpg,
76 TRANSPORT_TPG_TYPE_DISCOVERY); 74 TRANSPORT_TPG_TYPE_DISCOVERY);
77 if (ret < 0) { 75 if (ret < 0) {
78 kfree(tpg); 76 kfree(tpg);
79 return -1; 77 return -1;
80 } 78 }
81 79
82 tpg->sid = 1; /* First Assigned LIO Session ID */ 80 tpg->sid = 1; /* First Assigned LIO Session ID */
83 iscsit_set_default_tpg_attribs(tpg); 81 iscsit_set_default_tpg_attribs(tpg);
84 82
85 if (iscsi_create_default_params(&tpg->param_list) < 0) 83 if (iscsi_create_default_params(&tpg->param_list) < 0)
86 goto out; 84 goto out;
87 /* 85 /*
88 * By default we disable authentication for discovery sessions, 86 * By default we disable authentication for discovery sessions,
89 * this can be changed with: 87 * this can be changed with:
90 * 88 *
91 * /sys/kernel/config/target/iscsi/discovery_auth/enforce_discovery_auth 89 * /sys/kernel/config/target/iscsi/discovery_auth/enforce_discovery_auth
92 */ 90 */
93 param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list); 91 param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
94 if (!param) 92 if (!param)
95 goto out; 93 goto out;
96 94
97 if (iscsi_update_param_value(param, "CHAP,None") < 0) 95 if (iscsi_update_param_value(param, "CHAP,None") < 0)
98 goto out; 96 goto out;
99 97
100 tpg->tpg_attrib.authentication = 0; 98 tpg->tpg_attrib.authentication = 0;
101 99
102 spin_lock(&tpg->tpg_state_lock); 100 spin_lock(&tpg->tpg_state_lock);
103 tpg->tpg_state = TPG_STATE_ACTIVE; 101 tpg->tpg_state = TPG_STATE_ACTIVE;
104 spin_unlock(&tpg->tpg_state_lock); 102 spin_unlock(&tpg->tpg_state_lock);
105 103
106 iscsit_global->discovery_tpg = tpg; 104 iscsit_global->discovery_tpg = tpg;
107 pr_debug("CORE[0] - Allocated Discovery TPG\n"); 105 pr_debug("CORE[0] - Allocated Discovery TPG\n");
108 106
109 return 0; 107 return 0;
110 out: 108 out:
111 if (tpg->sid == 1) 109 if (tpg->sid == 1)
112 core_tpg_deregister(&tpg->tpg_se_tpg); 110 core_tpg_deregister(&tpg->tpg_se_tpg);
113 kfree(tpg); 111 kfree(tpg);
114 return -1; 112 return -1;
115 } 113 }
116 114
117 void iscsit_release_discovery_tpg(void) 115 void iscsit_release_discovery_tpg(void)
118 { 116 {
119 struct iscsi_portal_group *tpg = iscsit_global->discovery_tpg; 117 struct iscsi_portal_group *tpg = iscsit_global->discovery_tpg;
120 118
121 if (!tpg) 119 if (!tpg)
122 return; 120 return;
123 121
124 core_tpg_deregister(&tpg->tpg_se_tpg); 122 core_tpg_deregister(&tpg->tpg_se_tpg);
125 123
126 kfree(tpg); 124 kfree(tpg);
127 iscsit_global->discovery_tpg = NULL; 125 iscsit_global->discovery_tpg = NULL;
128 } 126 }
129 127
130 struct iscsi_portal_group *iscsit_get_tpg_from_np( 128 struct iscsi_portal_group *iscsit_get_tpg_from_np(
131 struct iscsi_tiqn *tiqn, 129 struct iscsi_tiqn *tiqn,
132 struct iscsi_np *np) 130 struct iscsi_np *np)
133 { 131 {
134 struct iscsi_portal_group *tpg = NULL; 132 struct iscsi_portal_group *tpg = NULL;
135 struct iscsi_tpg_np *tpg_np; 133 struct iscsi_tpg_np *tpg_np;
136 134
137 spin_lock(&tiqn->tiqn_tpg_lock); 135 spin_lock(&tiqn->tiqn_tpg_lock);
138 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { 136 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
139 137
140 spin_lock(&tpg->tpg_state_lock); 138 spin_lock(&tpg->tpg_state_lock);
141 if (tpg->tpg_state == TPG_STATE_FREE) { 139 if (tpg->tpg_state == TPG_STATE_FREE) {
142 spin_unlock(&tpg->tpg_state_lock); 140 spin_unlock(&tpg->tpg_state_lock);
143 continue; 141 continue;
144 } 142 }
145 spin_unlock(&tpg->tpg_state_lock); 143 spin_unlock(&tpg->tpg_state_lock);
146 144
147 spin_lock(&tpg->tpg_np_lock); 145 spin_lock(&tpg->tpg_np_lock);
148 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) { 146 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
149 if (tpg_np->tpg_np == np) { 147 if (tpg_np->tpg_np == np) {
150 spin_unlock(&tpg->tpg_np_lock); 148 spin_unlock(&tpg->tpg_np_lock);
151 spin_unlock(&tiqn->tiqn_tpg_lock); 149 spin_unlock(&tiqn->tiqn_tpg_lock);
152 return tpg; 150 return tpg;
153 } 151 }
154 } 152 }
155 spin_unlock(&tpg->tpg_np_lock); 153 spin_unlock(&tpg->tpg_np_lock);
156 } 154 }
157 spin_unlock(&tiqn->tiqn_tpg_lock); 155 spin_unlock(&tiqn->tiqn_tpg_lock);
158 156
159 return NULL; 157 return NULL;
160 } 158 }
161 159
162 int iscsit_get_tpg( 160 int iscsit_get_tpg(
163 struct iscsi_portal_group *tpg) 161 struct iscsi_portal_group *tpg)
164 { 162 {
165 int ret; 163 int ret;
166 164
167 ret = mutex_lock_interruptible(&tpg->tpg_access_lock); 165 ret = mutex_lock_interruptible(&tpg->tpg_access_lock);
168 return ((ret != 0) || signal_pending(current)) ? -1 : 0; 166 return ((ret != 0) || signal_pending(current)) ? -1 : 0;
169 } 167 }
170 168
171 void iscsit_put_tpg(struct iscsi_portal_group *tpg) 169 void iscsit_put_tpg(struct iscsi_portal_group *tpg)
172 { 170 {
173 mutex_unlock(&tpg->tpg_access_lock); 171 mutex_unlock(&tpg->tpg_access_lock);
174 } 172 }
175 173
176 static void iscsit_clear_tpg_np_login_thread( 174 static void iscsit_clear_tpg_np_login_thread(
177 struct iscsi_tpg_np *tpg_np, 175 struct iscsi_tpg_np *tpg_np,
178 struct iscsi_portal_group *tpg) 176 struct iscsi_portal_group *tpg)
179 { 177 {
180 if (!tpg_np->tpg_np) { 178 if (!tpg_np->tpg_np) {
181 pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n"); 179 pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
182 return; 180 return;
183 } 181 }
184 182
185 iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg); 183 iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg);
186 } 184 }
187 185
188 void iscsit_clear_tpg_np_login_threads( 186 void iscsit_clear_tpg_np_login_threads(
189 struct iscsi_portal_group *tpg) 187 struct iscsi_portal_group *tpg)
190 { 188 {
191 struct iscsi_tpg_np *tpg_np; 189 struct iscsi_tpg_np *tpg_np;
192 190
193 spin_lock(&tpg->tpg_np_lock); 191 spin_lock(&tpg->tpg_np_lock);
194 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) { 192 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
195 if (!tpg_np->tpg_np) { 193 if (!tpg_np->tpg_np) {
196 pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n"); 194 pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
197 continue; 195 continue;
198 } 196 }
199 spin_unlock(&tpg->tpg_np_lock); 197 spin_unlock(&tpg->tpg_np_lock);
200 iscsit_clear_tpg_np_login_thread(tpg_np, tpg); 198 iscsit_clear_tpg_np_login_thread(tpg_np, tpg);
201 spin_lock(&tpg->tpg_np_lock); 199 spin_lock(&tpg->tpg_np_lock);
202 } 200 }
203 spin_unlock(&tpg->tpg_np_lock); 201 spin_unlock(&tpg->tpg_np_lock);
204 } 202 }
205 203
206 void iscsit_tpg_dump_params(struct iscsi_portal_group *tpg) 204 void iscsit_tpg_dump_params(struct iscsi_portal_group *tpg)
207 { 205 {
208 iscsi_print_params(tpg->param_list); 206 iscsi_print_params(tpg->param_list);
209 } 207 }
210 208
211 static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg) 209 static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
212 { 210 {
213 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; 211 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
214 212
215 a->authentication = TA_AUTHENTICATION; 213 a->authentication = TA_AUTHENTICATION;
216 a->login_timeout = TA_LOGIN_TIMEOUT; 214 a->login_timeout = TA_LOGIN_TIMEOUT;
217 a->netif_timeout = TA_NETIF_TIMEOUT; 215 a->netif_timeout = TA_NETIF_TIMEOUT;
218 a->default_cmdsn_depth = TA_DEFAULT_CMDSN_DEPTH; 216 a->default_cmdsn_depth = TA_DEFAULT_CMDSN_DEPTH;
219 a->generate_node_acls = TA_GENERATE_NODE_ACLS; 217 a->generate_node_acls = TA_GENERATE_NODE_ACLS;
220 a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS; 218 a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS;
221 a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT; 219 a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT;
222 a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT; 220 a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT;
223 } 221 }
224 222
225 int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg) 223 int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
226 { 224 {
227 if (tpg->tpg_state != TPG_STATE_FREE) { 225 if (tpg->tpg_state != TPG_STATE_FREE) {
228 pr_err("Unable to add iSCSI Target Portal Group: %d" 226 pr_err("Unable to add iSCSI Target Portal Group: %d"
229 " while not in TPG_STATE_FREE state.\n", tpg->tpgt); 227 " while not in TPG_STATE_FREE state.\n", tpg->tpgt);
230 return -EEXIST; 228 return -EEXIST;
231 } 229 }
232 iscsit_set_default_tpg_attribs(tpg); 230 iscsit_set_default_tpg_attribs(tpg);
233 231
234 if (iscsi_create_default_params(&tpg->param_list) < 0) 232 if (iscsi_create_default_params(&tpg->param_list) < 0)
235 goto err_out; 233 goto err_out;
236 234
237 ISCSI_TPG_ATTRIB(tpg)->tpg = tpg; 235 ISCSI_TPG_ATTRIB(tpg)->tpg = tpg;
238 236
239 spin_lock(&tpg->tpg_state_lock); 237 spin_lock(&tpg->tpg_state_lock);
240 tpg->tpg_state = TPG_STATE_INACTIVE; 238 tpg->tpg_state = TPG_STATE_INACTIVE;
241 spin_unlock(&tpg->tpg_state_lock); 239 spin_unlock(&tpg->tpg_state_lock);
242 240
243 spin_lock(&tiqn->tiqn_tpg_lock); 241 spin_lock(&tiqn->tiqn_tpg_lock);
244 list_add_tail(&tpg->tpg_list, &tiqn->tiqn_tpg_list); 242 list_add_tail(&tpg->tpg_list, &tiqn->tiqn_tpg_list);
245 tiqn->tiqn_ntpgs++; 243 tiqn->tiqn_ntpgs++;
246 pr_debug("CORE[%s]_TPG[%hu] - Added iSCSI Target Portal Group\n", 244 pr_debug("CORE[%s]_TPG[%hu] - Added iSCSI Target Portal Group\n",
247 tiqn->tiqn, tpg->tpgt); 245 tiqn->tiqn, tpg->tpgt);
248 spin_unlock(&tiqn->tiqn_tpg_lock); 246 spin_unlock(&tiqn->tiqn_tpg_lock);
249 247
250 return 0; 248 return 0;
251 err_out: 249 err_out:
252 if (tpg->param_list) { 250 if (tpg->param_list) {
253 iscsi_release_param_list(tpg->param_list); 251 iscsi_release_param_list(tpg->param_list);
254 tpg->param_list = NULL; 252 tpg->param_list = NULL;
255 } 253 }
256 kfree(tpg); 254 kfree(tpg);
257 return -ENOMEM; 255 return -ENOMEM;
258 } 256 }
259 257
260 int iscsit_tpg_del_portal_group( 258 int iscsit_tpg_del_portal_group(
261 struct iscsi_tiqn *tiqn, 259 struct iscsi_tiqn *tiqn,
262 struct iscsi_portal_group *tpg, 260 struct iscsi_portal_group *tpg,
263 int force) 261 int force)
264 { 262 {
265 u8 old_state = tpg->tpg_state; 263 u8 old_state = tpg->tpg_state;
266 264
267 spin_lock(&tpg->tpg_state_lock); 265 spin_lock(&tpg->tpg_state_lock);
268 tpg->tpg_state = TPG_STATE_INACTIVE; 266 tpg->tpg_state = TPG_STATE_INACTIVE;
269 spin_unlock(&tpg->tpg_state_lock); 267 spin_unlock(&tpg->tpg_state_lock);
270 268
271 if (iscsit_release_sessions_for_tpg(tpg, force) < 0) { 269 if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
272 pr_err("Unable to delete iSCSI Target Portal Group:" 270 pr_err("Unable to delete iSCSI Target Portal Group:"
273 " %hu while active sessions exist, and force=0\n", 271 " %hu while active sessions exist, and force=0\n",
274 tpg->tpgt); 272 tpg->tpgt);
275 tpg->tpg_state = old_state; 273 tpg->tpg_state = old_state;
276 return -EPERM; 274 return -EPERM;
277 } 275 }
278 276
279 core_tpg_clear_object_luns(&tpg->tpg_se_tpg); 277 core_tpg_clear_object_luns(&tpg->tpg_se_tpg);
280 278
281 if (tpg->param_list) { 279 if (tpg->param_list) {
282 iscsi_release_param_list(tpg->param_list); 280 iscsi_release_param_list(tpg->param_list);
283 tpg->param_list = NULL; 281 tpg->param_list = NULL;
284 } 282 }
285 283
286 core_tpg_deregister(&tpg->tpg_se_tpg); 284 core_tpg_deregister(&tpg->tpg_se_tpg);
287 285
288 spin_lock(&tpg->tpg_state_lock); 286 spin_lock(&tpg->tpg_state_lock);
289 tpg->tpg_state = TPG_STATE_FREE; 287 tpg->tpg_state = TPG_STATE_FREE;
290 spin_unlock(&tpg->tpg_state_lock); 288 spin_unlock(&tpg->tpg_state_lock);
291 289
292 spin_lock(&tiqn->tiqn_tpg_lock); 290 spin_lock(&tiqn->tiqn_tpg_lock);
293 tiqn->tiqn_ntpgs--; 291 tiqn->tiqn_ntpgs--;
294 list_del(&tpg->tpg_list); 292 list_del(&tpg->tpg_list);
295 spin_unlock(&tiqn->tiqn_tpg_lock); 293 spin_unlock(&tiqn->tiqn_tpg_lock);
296 294
297 pr_debug("CORE[%s]_TPG[%hu] - Deleted iSCSI Target Portal Group\n", 295 pr_debug("CORE[%s]_TPG[%hu] - Deleted iSCSI Target Portal Group\n",
298 tiqn->tiqn, tpg->tpgt); 296 tiqn->tiqn, tpg->tpgt);
299 297
300 kfree(tpg); 298 kfree(tpg);
301 return 0; 299 return 0;
302 } 300 }
303 301
304 int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg) 302 int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
305 { 303 {
306 struct iscsi_param *param; 304 struct iscsi_param *param;
307 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; 305 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
308 306
309 spin_lock(&tpg->tpg_state_lock); 307 spin_lock(&tpg->tpg_state_lock);
310 if (tpg->tpg_state == TPG_STATE_ACTIVE) { 308 if (tpg->tpg_state == TPG_STATE_ACTIVE) {
311 pr_err("iSCSI target portal group: %hu is already" 309 pr_err("iSCSI target portal group: %hu is already"
312 " active, ignoring request.\n", tpg->tpgt); 310 " active, ignoring request.\n", tpg->tpgt);
313 spin_unlock(&tpg->tpg_state_lock); 311 spin_unlock(&tpg->tpg_state_lock);
314 return -EINVAL; 312 return -EINVAL;
315 } 313 }
316 /* 314 /*
317 * Make sure that AuthMethod does not contain None as an option 315 * Make sure that AuthMethod does not contain None as an option
318 * unless explictly disabled. Set the default to CHAP if authentication 316 * unless explictly disabled. Set the default to CHAP if authentication
319 * is enforced (as per default), and remove the NONE option. 317 * is enforced (as per default), and remove the NONE option.
320 */ 318 */
321 param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list); 319 param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
322 if (!param) { 320 if (!param) {
323 spin_unlock(&tpg->tpg_state_lock); 321 spin_unlock(&tpg->tpg_state_lock);
324 return -ENOMEM; 322 return -ENOMEM;
325 } 323 }
326 324
327 if (ISCSI_TPG_ATTRIB(tpg)->authentication) { 325 if (ISCSI_TPG_ATTRIB(tpg)->authentication) {
328 if (!strcmp(param->value, NONE)) 326 if (!strcmp(param->value, NONE))
329 if (iscsi_update_param_value(param, CHAP) < 0) { 327 if (iscsi_update_param_value(param, CHAP) < 0) {
330 spin_unlock(&tpg->tpg_state_lock); 328 spin_unlock(&tpg->tpg_state_lock);
331 return -ENOMEM; 329 return -ENOMEM;
332 } 330 }
333 if (iscsit_ta_authentication(tpg, 1) < 0) { 331 if (iscsit_ta_authentication(tpg, 1) < 0) {
334 spin_unlock(&tpg->tpg_state_lock); 332 spin_unlock(&tpg->tpg_state_lock);
335 return -ENOMEM; 333 return -ENOMEM;
336 } 334 }
337 } 335 }
338 336
339 tpg->tpg_state = TPG_STATE_ACTIVE; 337 tpg->tpg_state = TPG_STATE_ACTIVE;
340 spin_unlock(&tpg->tpg_state_lock); 338 spin_unlock(&tpg->tpg_state_lock);
341 339
342 spin_lock(&tiqn->tiqn_tpg_lock); 340 spin_lock(&tiqn->tiqn_tpg_lock);
343 tiqn->tiqn_active_tpgs++; 341 tiqn->tiqn_active_tpgs++;
344 pr_debug("iSCSI_TPG[%hu] - Enabled iSCSI Target Portal Group\n", 342 pr_debug("iSCSI_TPG[%hu] - Enabled iSCSI Target Portal Group\n",
345 tpg->tpgt); 343 tpg->tpgt);
346 spin_unlock(&tiqn->tiqn_tpg_lock); 344 spin_unlock(&tiqn->tiqn_tpg_lock);
347 345
348 return 0; 346 return 0;
349 } 347 }
350 348
351 int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force) 349 int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force)
352 { 350 {
353 struct iscsi_tiqn *tiqn; 351 struct iscsi_tiqn *tiqn;
354 u8 old_state = tpg->tpg_state; 352 u8 old_state = tpg->tpg_state;
355 353
356 spin_lock(&tpg->tpg_state_lock); 354 spin_lock(&tpg->tpg_state_lock);
357 if (tpg->tpg_state == TPG_STATE_INACTIVE) { 355 if (tpg->tpg_state == TPG_STATE_INACTIVE) {
358 pr_err("iSCSI Target Portal Group: %hu is already" 356 pr_err("iSCSI Target Portal Group: %hu is already"
359 " inactive, ignoring request.\n", tpg->tpgt); 357 " inactive, ignoring request.\n", tpg->tpgt);
360 spin_unlock(&tpg->tpg_state_lock); 358 spin_unlock(&tpg->tpg_state_lock);
361 return -EINVAL; 359 return -EINVAL;
362 } 360 }
363 tpg->tpg_state = TPG_STATE_INACTIVE; 361 tpg->tpg_state = TPG_STATE_INACTIVE;
364 spin_unlock(&tpg->tpg_state_lock); 362 spin_unlock(&tpg->tpg_state_lock);
365 363
366 iscsit_clear_tpg_np_login_threads(tpg); 364 iscsit_clear_tpg_np_login_threads(tpg);
367 365
368 if (iscsit_release_sessions_for_tpg(tpg, force) < 0) { 366 if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
369 spin_lock(&tpg->tpg_state_lock); 367 spin_lock(&tpg->tpg_state_lock);
370 tpg->tpg_state = old_state; 368 tpg->tpg_state = old_state;
371 spin_unlock(&tpg->tpg_state_lock); 369 spin_unlock(&tpg->tpg_state_lock);
372 pr_err("Unable to disable iSCSI Target Portal Group:" 370 pr_err("Unable to disable iSCSI Target Portal Group:"
373 " %hu while active sessions exist, and force=0\n", 371 " %hu while active sessions exist, and force=0\n",
374 tpg->tpgt); 372 tpg->tpgt);
375 return -EPERM; 373 return -EPERM;
376 } 374 }
377 375
378 tiqn = tpg->tpg_tiqn; 376 tiqn = tpg->tpg_tiqn;
379 if (!tiqn || (tpg == iscsit_global->discovery_tpg)) 377 if (!tiqn || (tpg == iscsit_global->discovery_tpg))
380 return 0; 378 return 0;
381 379
382 spin_lock(&tiqn->tiqn_tpg_lock); 380 spin_lock(&tiqn->tiqn_tpg_lock);
383 tiqn->tiqn_active_tpgs--; 381 tiqn->tiqn_active_tpgs--;
384 pr_debug("iSCSI_TPG[%hu] - Disabled iSCSI Target Portal Group\n", 382 pr_debug("iSCSI_TPG[%hu] - Disabled iSCSI Target Portal Group\n",
385 tpg->tpgt); 383 tpg->tpgt);
386 spin_unlock(&tiqn->tiqn_tpg_lock); 384 spin_unlock(&tiqn->tiqn_tpg_lock);
387 385
388 return 0; 386 return 0;
389 } 387 }
390 388
391 struct iscsi_node_attrib *iscsit_tpg_get_node_attrib( 389 struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(
392 struct iscsi_session *sess) 390 struct iscsi_session *sess)
393 { 391 {
394 struct se_session *se_sess = sess->se_sess; 392 struct se_session *se_sess = sess->se_sess;
395 struct se_node_acl *se_nacl = se_sess->se_node_acl; 393 struct se_node_acl *se_nacl = se_sess->se_node_acl;
396 struct iscsi_node_acl *acl = container_of(se_nacl, struct iscsi_node_acl, 394 struct iscsi_node_acl *acl = container_of(se_nacl, struct iscsi_node_acl,
397 se_node_acl); 395 se_node_acl);
398 396
399 return &acl->node_attrib; 397 return &acl->node_attrib;
400 } 398 }
401 399
402 struct iscsi_tpg_np *iscsit_tpg_locate_child_np( 400 struct iscsi_tpg_np *iscsit_tpg_locate_child_np(
403 struct iscsi_tpg_np *tpg_np, 401 struct iscsi_tpg_np *tpg_np,
404 int network_transport) 402 int network_transport)
405 { 403 {
406 struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp; 404 struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp;
407 405
408 spin_lock(&tpg_np->tpg_np_parent_lock); 406 spin_lock(&tpg_np->tpg_np_parent_lock);
409 list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp, 407 list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp,
410 &tpg_np->tpg_np_parent_list, tpg_np_child_list) { 408 &tpg_np->tpg_np_parent_list, tpg_np_child_list) {
411 if (tpg_np_child->tpg_np->np_network_transport == 409 if (tpg_np_child->tpg_np->np_network_transport ==
412 network_transport) { 410 network_transport) {
413 spin_unlock(&tpg_np->tpg_np_parent_lock); 411 spin_unlock(&tpg_np->tpg_np_parent_lock);
414 return tpg_np_child; 412 return tpg_np_child;
415 } 413 }
416 } 414 }
417 spin_unlock(&tpg_np->tpg_np_parent_lock); 415 spin_unlock(&tpg_np->tpg_np_parent_lock);
418 416
419 return NULL; 417 return NULL;
420 } 418 }
421 419
422 struct iscsi_tpg_np *iscsit_tpg_add_network_portal( 420 struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
423 struct iscsi_portal_group *tpg, 421 struct iscsi_portal_group *tpg,
424 struct __kernel_sockaddr_storage *sockaddr, 422 struct __kernel_sockaddr_storage *sockaddr,
425 char *ip_str, 423 char *ip_str,
426 struct iscsi_tpg_np *tpg_np_parent, 424 struct iscsi_tpg_np *tpg_np_parent,
427 int network_transport) 425 int network_transport)
428 { 426 {
429 struct iscsi_np *np; 427 struct iscsi_np *np;
430 struct iscsi_tpg_np *tpg_np; 428 struct iscsi_tpg_np *tpg_np;
431 429
432 tpg_np = kzalloc(sizeof(struct iscsi_tpg_np), GFP_KERNEL); 430 tpg_np = kzalloc(sizeof(struct iscsi_tpg_np), GFP_KERNEL);
433 if (!tpg_np) { 431 if (!tpg_np) {
434 pr_err("Unable to allocate memory for" 432 pr_err("Unable to allocate memory for"
435 " struct iscsi_tpg_np.\n"); 433 " struct iscsi_tpg_np.\n");
436 return ERR_PTR(-ENOMEM); 434 return ERR_PTR(-ENOMEM);
437 } 435 }
438 436
439 np = iscsit_add_np(sockaddr, ip_str, network_transport); 437 np = iscsit_add_np(sockaddr, ip_str, network_transport);
440 if (IS_ERR(np)) { 438 if (IS_ERR(np)) {
441 kfree(tpg_np); 439 kfree(tpg_np);
442 return ERR_CAST(np); 440 return ERR_CAST(np);
443 } 441 }
444 442
445 INIT_LIST_HEAD(&tpg_np->tpg_np_list); 443 INIT_LIST_HEAD(&tpg_np->tpg_np_list);
446 INIT_LIST_HEAD(&tpg_np->tpg_np_child_list); 444 INIT_LIST_HEAD(&tpg_np->tpg_np_child_list);
447 INIT_LIST_HEAD(&tpg_np->tpg_np_parent_list); 445 INIT_LIST_HEAD(&tpg_np->tpg_np_parent_list);
448 spin_lock_init(&tpg_np->tpg_np_parent_lock); 446 spin_lock_init(&tpg_np->tpg_np_parent_lock);
449 tpg_np->tpg_np = np; 447 tpg_np->tpg_np = np;
450 tpg_np->tpg = tpg; 448 tpg_np->tpg = tpg;
451 449
452 spin_lock(&tpg->tpg_np_lock); 450 spin_lock(&tpg->tpg_np_lock);
453 list_add_tail(&tpg_np->tpg_np_list, &tpg->tpg_gnp_list); 451 list_add_tail(&tpg_np->tpg_np_list, &tpg->tpg_gnp_list);
454 tpg->num_tpg_nps++; 452 tpg->num_tpg_nps++;
455 if (tpg->tpg_tiqn) 453 if (tpg->tpg_tiqn)
456 tpg->tpg_tiqn->tiqn_num_tpg_nps++; 454 tpg->tpg_tiqn->tiqn_num_tpg_nps++;
457 spin_unlock(&tpg->tpg_np_lock); 455 spin_unlock(&tpg->tpg_np_lock);
458 456
459 if (tpg_np_parent) { 457 if (tpg_np_parent) {
460 tpg_np->tpg_np_parent = tpg_np_parent; 458 tpg_np->tpg_np_parent = tpg_np_parent;
461 spin_lock(&tpg_np_parent->tpg_np_parent_lock); 459 spin_lock(&tpg_np_parent->tpg_np_parent_lock);
462 list_add_tail(&tpg_np->tpg_np_child_list, 460 list_add_tail(&tpg_np->tpg_np_child_list,
463 &tpg_np_parent->tpg_np_parent_list); 461 &tpg_np_parent->tpg_np_parent_list);
464 spin_unlock(&tpg_np_parent->tpg_np_parent_lock); 462 spin_unlock(&tpg_np_parent->tpg_np_parent_lock);
465 } 463 }
466 464
467 pr_debug("CORE[%s] - Added Network Portal: %s:%hu,%hu on %s\n", 465 pr_debug("CORE[%s] - Added Network Portal: %s:%hu,%hu on %s\n",
468 tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt, 466 tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
469 (np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP"); 467 (np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP");
470 468
471 return tpg_np; 469 return tpg_np;
472 } 470 }
473 471
474 static int iscsit_tpg_release_np( 472 static int iscsit_tpg_release_np(
475 struct iscsi_tpg_np *tpg_np, 473 struct iscsi_tpg_np *tpg_np,
476 struct iscsi_portal_group *tpg, 474 struct iscsi_portal_group *tpg,
477 struct iscsi_np *np) 475 struct iscsi_np *np)
478 { 476 {
479 iscsit_clear_tpg_np_login_thread(tpg_np, tpg); 477 iscsit_clear_tpg_np_login_thread(tpg_np, tpg);
480 478
481 pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n", 479 pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n",
482 tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt, 480 tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
483 (np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP"); 481 (np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP");
484 482
485 tpg_np->tpg_np = NULL; 483 tpg_np->tpg_np = NULL;
486 tpg_np->tpg = NULL; 484 tpg_np->tpg = NULL;
487 kfree(tpg_np); 485 kfree(tpg_np);
488 /* 486 /*
489 * iscsit_del_np() will shutdown struct iscsi_np when last TPG reference is released. 487 * iscsit_del_np() will shutdown struct iscsi_np when last TPG reference is released.
490 */ 488 */
491 return iscsit_del_np(np); 489 return iscsit_del_np(np);
492 } 490 }
493 491
494 int iscsit_tpg_del_network_portal( 492 int iscsit_tpg_del_network_portal(
495 struct iscsi_portal_group *tpg, 493 struct iscsi_portal_group *tpg,
496 struct iscsi_tpg_np *tpg_np) 494 struct iscsi_tpg_np *tpg_np)
497 { 495 {
498 struct iscsi_np *np; 496 struct iscsi_np *np;
499 struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp; 497 struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp;
500 int ret = 0; 498 int ret = 0;
501 499
502 np = tpg_np->tpg_np; 500 np = tpg_np->tpg_np;
503 if (!np) { 501 if (!np) {
504 pr_err("Unable to locate struct iscsi_np from" 502 pr_err("Unable to locate struct iscsi_np from"
505 " struct iscsi_tpg_np\n"); 503 " struct iscsi_tpg_np\n");
506 return -EINVAL; 504 return -EINVAL;
507 } 505 }
508 506
509 if (!tpg_np->tpg_np_parent) { 507 if (!tpg_np->tpg_np_parent) {
510 /* 508 /*
511 * We are the parent tpg network portal. Release all of the 509 * We are the parent tpg network portal. Release all of the
512 * child tpg_np's (eg: the non ISCSI_TCP ones) on our parent 510 * child tpg_np's (eg: the non ISCSI_TCP ones) on our parent
513 * list first. 511 * list first.
514 */ 512 */
515 list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp, 513 list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp,
516 &tpg_np->tpg_np_parent_list, 514 &tpg_np->tpg_np_parent_list,
517 tpg_np_child_list) { 515 tpg_np_child_list) {
518 ret = iscsit_tpg_del_network_portal(tpg, tpg_np_child); 516 ret = iscsit_tpg_del_network_portal(tpg, tpg_np_child);
519 if (ret < 0) 517 if (ret < 0)
520 pr_err("iscsit_tpg_del_network_portal()" 518 pr_err("iscsit_tpg_del_network_portal()"
521 " failed: %d\n", ret); 519 " failed: %d\n", ret);
522 } 520 }
523 } else { 521 } else {
524 /* 522 /*
525 * We are not the parent ISCSI_TCP tpg network portal. Release 523 * We are not the parent ISCSI_TCP tpg network portal. Release
526 * our own network portals from the child list. 524 * our own network portals from the child list.
527 */ 525 */
528 spin_lock(&tpg_np->tpg_np_parent->tpg_np_parent_lock); 526 spin_lock(&tpg_np->tpg_np_parent->tpg_np_parent_lock);
529 list_del(&tpg_np->tpg_np_child_list); 527 list_del(&tpg_np->tpg_np_child_list);
530 spin_unlock(&tpg_np->tpg_np_parent->tpg_np_parent_lock); 528 spin_unlock(&tpg_np->tpg_np_parent->tpg_np_parent_lock);
531 } 529 }
532 530
533 spin_lock(&tpg->tpg_np_lock); 531 spin_lock(&tpg->tpg_np_lock);
534 list_del(&tpg_np->tpg_np_list); 532 list_del(&tpg_np->tpg_np_list);
535 tpg->num_tpg_nps--; 533 tpg->num_tpg_nps--;
536 if (tpg->tpg_tiqn) 534 if (tpg->tpg_tiqn)
537 tpg->tpg_tiqn->tiqn_num_tpg_nps--; 535 tpg->tpg_tiqn->tiqn_num_tpg_nps--;
538 spin_unlock(&tpg->tpg_np_lock); 536 spin_unlock(&tpg->tpg_np_lock);
539 537
540 return iscsit_tpg_release_np(tpg_np, tpg, np); 538 return iscsit_tpg_release_np(tpg_np, tpg, np);
541 } 539 }
542 540
543 int iscsit_tpg_set_initiator_node_queue_depth( 541 int iscsit_tpg_set_initiator_node_queue_depth(
544 struct iscsi_portal_group *tpg, 542 struct iscsi_portal_group *tpg,
545 unsigned char *initiatorname, 543 unsigned char *initiatorname,
546 u32 queue_depth, 544 u32 queue_depth,
547 int force) 545 int force)
548 { 546 {
549 return core_tpg_set_initiator_node_queue_depth(&tpg->tpg_se_tpg, 547 return core_tpg_set_initiator_node_queue_depth(&tpg->tpg_se_tpg,
550 initiatorname, queue_depth, force); 548 initiatorname, queue_depth, force);
551 } 549 }
552 550
553 int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication) 551 int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
554 { 552 {
555 unsigned char buf1[256], buf2[256], *none = NULL; 553 unsigned char buf1[256], buf2[256], *none = NULL;
556 int len; 554 int len;
557 struct iscsi_param *param; 555 struct iscsi_param *param;
558 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; 556 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
559 557
560 if ((authentication != 1) && (authentication != 0)) { 558 if ((authentication != 1) && (authentication != 0)) {
561 pr_err("Illegal value for authentication parameter:" 559 pr_err("Illegal value for authentication parameter:"
562 " %u, ignoring request.\n", authentication); 560 " %u, ignoring request.\n", authentication);
563 return -1; 561 return -1;
564 } 562 }
565 563
566 memset(buf1, 0, sizeof(buf1)); 564 memset(buf1, 0, sizeof(buf1));
567 memset(buf2, 0, sizeof(buf2)); 565 memset(buf2, 0, sizeof(buf2));
568 566
569 param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list); 567 param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
570 if (!param) 568 if (!param)
571 return -EINVAL; 569 return -EINVAL;
572 570
573 if (authentication) { 571 if (authentication) {
574 snprintf(buf1, sizeof(buf1), "%s", param->value); 572 snprintf(buf1, sizeof(buf1), "%s", param->value);
575 none = strstr(buf1, NONE); 573 none = strstr(buf1, NONE);
576 if (!none) 574 if (!none)
577 goto out; 575 goto out;
578 if (!strncmp(none + 4, ",", 1)) { 576 if (!strncmp(none + 4, ",", 1)) {
579 if (!strcmp(buf1, none)) 577 if (!strcmp(buf1, none))
580 sprintf(buf2, "%s", none+5); 578 sprintf(buf2, "%s", none+5);
581 else { 579 else {
582 none--; 580 none--;
583 *none = '\0'; 581 *none = '\0';
584 len = sprintf(buf2, "%s", buf1); 582 len = sprintf(buf2, "%s", buf1);
585 none += 5; 583 none += 5;
586 sprintf(buf2 + len, "%s", none); 584 sprintf(buf2 + len, "%s", none);
587 } 585 }
588 } else { 586 } else {
589 none--; 587 none--;
590 *none = '\0'; 588 *none = '\0';
591 sprintf(buf2, "%s", buf1); 589 sprintf(buf2, "%s", buf1);
592 } 590 }
593 if (iscsi_update_param_value(param, buf2) < 0) 591 if (iscsi_update_param_value(param, buf2) < 0)
594 return -EINVAL; 592 return -EINVAL;
595 } else { 593 } else {
596 snprintf(buf1, sizeof(buf1), "%s", param->value); 594 snprintf(buf1, sizeof(buf1), "%s", param->value);
597 none = strstr(buf1, NONE); 595 none = strstr(buf1, NONE);
598 if ((none)) 596 if ((none))
599 goto out; 597 goto out;
600 strncat(buf1, ",", strlen(",")); 598 strncat(buf1, ",", strlen(","));
601 strncat(buf1, NONE, strlen(NONE)); 599 strncat(buf1, NONE, strlen(NONE));
602 if (iscsi_update_param_value(param, buf1) < 0) 600 if (iscsi_update_param_value(param, buf1) < 0)
603 return -EINVAL; 601 return -EINVAL;
604 } 602 }
605 603
606 out: 604 out:
607 a->authentication = authentication; 605 a->authentication = authentication;
608 pr_debug("%s iSCSI Authentication Methods for TPG: %hu.\n", 606 pr_debug("%s iSCSI Authentication Methods for TPG: %hu.\n",
609 a->authentication ? "Enforcing" : "Disabling", tpg->tpgt); 607 a->authentication ? "Enforcing" : "Disabling", tpg->tpgt);
610 608
611 return 0; 609 return 0;
612 } 610 }
613 611
614 int iscsit_ta_login_timeout( 612 int iscsit_ta_login_timeout(
615 struct iscsi_portal_group *tpg, 613 struct iscsi_portal_group *tpg,
616 u32 login_timeout) 614 u32 login_timeout)
617 { 615 {
618 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; 616 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
619 617
620 if (login_timeout > TA_LOGIN_TIMEOUT_MAX) { 618 if (login_timeout > TA_LOGIN_TIMEOUT_MAX) {
621 pr_err("Requested Login Timeout %u larger than maximum" 619 pr_err("Requested Login Timeout %u larger than maximum"
622 " %u\n", login_timeout, TA_LOGIN_TIMEOUT_MAX); 620 " %u\n", login_timeout, TA_LOGIN_TIMEOUT_MAX);
623 return -EINVAL; 621 return -EINVAL;
624 } else if (login_timeout < TA_LOGIN_TIMEOUT_MIN) { 622 } else if (login_timeout < TA_LOGIN_TIMEOUT_MIN) {
625 pr_err("Requested Logout Timeout %u smaller than" 623 pr_err("Requested Logout Timeout %u smaller than"
626 " minimum %u\n", login_timeout, TA_LOGIN_TIMEOUT_MIN); 624 " minimum %u\n", login_timeout, TA_LOGIN_TIMEOUT_MIN);
627 return -EINVAL; 625 return -EINVAL;
628 } 626 }
629 627
630 a->login_timeout = login_timeout; 628 a->login_timeout = login_timeout;
631 pr_debug("Set Logout Timeout to %u for Target Portal Group" 629 pr_debug("Set Logout Timeout to %u for Target Portal Group"
632 " %hu\n", a->login_timeout, tpg->tpgt); 630 " %hu\n", a->login_timeout, tpg->tpgt);
633 631
634 return 0; 632 return 0;
635 } 633 }
636 634
637 int iscsit_ta_netif_timeout( 635 int iscsit_ta_netif_timeout(
638 struct iscsi_portal_group *tpg, 636 struct iscsi_portal_group *tpg,
639 u32 netif_timeout) 637 u32 netif_timeout)
640 { 638 {
641 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; 639 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
642 640
643 if (netif_timeout > TA_NETIF_TIMEOUT_MAX) { 641 if (netif_timeout > TA_NETIF_TIMEOUT_MAX) {
644 pr_err("Requested Network Interface Timeout %u larger" 642 pr_err("Requested Network Interface Timeout %u larger"
645 " than maximum %u\n", netif_timeout, 643 " than maximum %u\n", netif_timeout,
646 TA_NETIF_TIMEOUT_MAX); 644 TA_NETIF_TIMEOUT_MAX);
647 return -EINVAL; 645 return -EINVAL;
648 } else if (netif_timeout < TA_NETIF_TIMEOUT_MIN) { 646 } else if (netif_timeout < TA_NETIF_TIMEOUT_MIN) {
649 pr_err("Requested Network Interface Timeout %u smaller" 647 pr_err("Requested Network Interface Timeout %u smaller"
650 " than minimum %u\n", netif_timeout, 648 " than minimum %u\n", netif_timeout,
651 TA_NETIF_TIMEOUT_MIN); 649 TA_NETIF_TIMEOUT_MIN);
652 return -EINVAL; 650 return -EINVAL;
653 } 651 }
654 652
655 a->netif_timeout = netif_timeout; 653 a->netif_timeout = netif_timeout;
656 pr_debug("Set Network Interface Timeout to %u for" 654 pr_debug("Set Network Interface Timeout to %u for"
657 " Target Portal Group %hu\n", a->netif_timeout, tpg->tpgt); 655 " Target Portal Group %hu\n", a->netif_timeout, tpg->tpgt);
658 656
659 return 0; 657 return 0;
660 } 658 }
661 659
662 int iscsit_ta_generate_node_acls( 660 int iscsit_ta_generate_node_acls(
663 struct iscsi_portal_group *tpg, 661 struct iscsi_portal_group *tpg,
664 u32 flag) 662 u32 flag)
665 { 663 {
666 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; 664 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
667 665
668 if ((flag != 0) && (flag != 1)) { 666 if ((flag != 0) && (flag != 1)) {
669 pr_err("Illegal value %d\n", flag); 667 pr_err("Illegal value %d\n", flag);
670 return -EINVAL; 668 return -EINVAL;
671 } 669 }
672 670
673 a->generate_node_acls = flag; 671 a->generate_node_acls = flag;
674 pr_debug("iSCSI_TPG[%hu] - Generate Initiator Portal Group ACLs: %s\n", 672 pr_debug("iSCSI_TPG[%hu] - Generate Initiator Portal Group ACLs: %s\n",
675 tpg->tpgt, (a->generate_node_acls) ? "Enabled" : "Disabled"); 673 tpg->tpgt, (a->generate_node_acls) ? "Enabled" : "Disabled");
676 674
677 return 0; 675 return 0;
678 } 676 }
679 677
680 int iscsit_ta_default_cmdsn_depth( 678 int iscsit_ta_default_cmdsn_depth(
681 struct iscsi_portal_group *tpg, 679 struct iscsi_portal_group *tpg,
682 u32 tcq_depth) 680 u32 tcq_depth)
683 { 681 {
684 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; 682 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
685 683
686 if (tcq_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) { 684 if (tcq_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) {
687 pr_err("Requested Default Queue Depth: %u larger" 685 pr_err("Requested Default Queue Depth: %u larger"
688 " than maximum %u\n", tcq_depth, 686 " than maximum %u\n", tcq_depth,
689 TA_DEFAULT_CMDSN_DEPTH_MAX); 687 TA_DEFAULT_CMDSN_DEPTH_MAX);
690 return -EINVAL; 688 return -EINVAL;
691 } else if (tcq_depth < TA_DEFAULT_CMDSN_DEPTH_MIN) { 689 } else if (tcq_depth < TA_DEFAULT_CMDSN_DEPTH_MIN) {
692 pr_err("Requested Default Queue Depth: %u smaller" 690 pr_err("Requested Default Queue Depth: %u smaller"
693 " than minimum %u\n", tcq_depth, 691 " than minimum %u\n", tcq_depth,
694 TA_DEFAULT_CMDSN_DEPTH_MIN); 692 TA_DEFAULT_CMDSN_DEPTH_MIN);
695 return -EINVAL; 693 return -EINVAL;
696 } 694 }
697 695
698 a->default_cmdsn_depth = tcq_depth; 696 a->default_cmdsn_depth = tcq_depth;
699 pr_debug("iSCSI_TPG[%hu] - Set Default CmdSN TCQ Depth to %u\n", 697 pr_debug("iSCSI_TPG[%hu] - Set Default CmdSN TCQ Depth to %u\n",
700 tpg->tpgt, a->default_cmdsn_depth); 698 tpg->tpgt, a->default_cmdsn_depth);
701 699
702 return 0; 700 return 0;
703 } 701 }
704 702
705 int iscsit_ta_cache_dynamic_acls( 703 int iscsit_ta_cache_dynamic_acls(
706 struct iscsi_portal_group *tpg, 704 struct iscsi_portal_group *tpg,
707 u32 flag) 705 u32 flag)
708 { 706 {
709 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; 707 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
710 708
711 if ((flag != 0) && (flag != 1)) { 709 if ((flag != 0) && (flag != 1)) {
712 pr_err("Illegal value %d\n", flag); 710 pr_err("Illegal value %d\n", flag);
713 return -EINVAL; 711 return -EINVAL;
714 } 712 }
715 713
716 a->cache_dynamic_acls = flag; 714 a->cache_dynamic_acls = flag;
717 pr_debug("iSCSI_TPG[%hu] - Cache Dynamic Initiator Portal Group" 715 pr_debug("iSCSI_TPG[%hu] - Cache Dynamic Initiator Portal Group"
718 " ACLs %s\n", tpg->tpgt, (a->cache_dynamic_acls) ? 716 " ACLs %s\n", tpg->tpgt, (a->cache_dynamic_acls) ?
719 "Enabled" : "Disabled"); 717 "Enabled" : "Disabled");
720 718
721 return 0; 719 return 0;
722 } 720 }
723 721
724 int iscsit_ta_demo_mode_write_protect( 722 int iscsit_ta_demo_mode_write_protect(
725 struct iscsi_portal_group *tpg, 723 struct iscsi_portal_group *tpg,
726 u32 flag) 724 u32 flag)
727 { 725 {
728 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; 726 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
729 727
730 if ((flag != 0) && (flag != 1)) { 728 if ((flag != 0) && (flag != 1)) {
731 pr_err("Illegal value %d\n", flag); 729 pr_err("Illegal value %d\n", flag);
732 return -EINVAL; 730 return -EINVAL;
733 } 731 }
734 732
735 a->demo_mode_write_protect = flag; 733 a->demo_mode_write_protect = flag;
736 pr_debug("iSCSI_TPG[%hu] - Demo Mode Write Protect bit: %s\n", 734 pr_debug("iSCSI_TPG[%hu] - Demo Mode Write Protect bit: %s\n",
737 tpg->tpgt, (a->demo_mode_write_protect) ? "ON" : "OFF"); 735 tpg->tpgt, (a->demo_mode_write_protect) ? "ON" : "OFF");
738 736
739 return 0; 737 return 0;
740 } 738 }
741 739
742 int iscsit_ta_prod_mode_write_protect( 740 int iscsit_ta_prod_mode_write_protect(
743 struct iscsi_portal_group *tpg, 741 struct iscsi_portal_group *tpg,
744 u32 flag) 742 u32 flag)
745 { 743 {
746 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; 744 struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
747 745
748 if ((flag != 0) && (flag != 1)) { 746 if ((flag != 0) && (flag != 1)) {
749 pr_err("Illegal value %d\n", flag); 747 pr_err("Illegal value %d\n", flag);
750 return -EINVAL; 748 return -EINVAL;
751 } 749 }
752 750
753 a->prod_mode_write_protect = flag; 751 a->prod_mode_write_protect = flag;
754 pr_debug("iSCSI_TPG[%hu] - Production Mode Write Protect bit:" 752 pr_debug("iSCSI_TPG[%hu] - Production Mode Write Protect bit:"
755 " %s\n", tpg->tpgt, (a->prod_mode_write_protect) ? 753 " %s\n", tpg->tpgt, (a->prod_mode_write_protect) ?
756 "ON" : "OFF"); 754 "ON" : "OFF");
757 755
758 return 0; 756 return 0;
759 } 757 }
760 758
drivers/target/iscsi/iscsi_target_util.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * This file contains the iSCSI Target specific utility functions. 2 * This file contains the iSCSI Target specific utility functions.
3 * 3 *
4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5 * 5 *
6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7 * 7 *
8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or 12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version. 13 * (at your option) any later version.
14 * 14 *
15 * This program is distributed in the hope that it will be useful, 15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 ******************************************************************************/ 19 ******************************************************************************/
20 20
21 #include <linux/list.h> 21 #include <linux/list.h>
22 #include <scsi/scsi_tcq.h> 22 #include <scsi/scsi_tcq.h>
23 #include <scsi/iscsi_proto.h> 23 #include <scsi/iscsi_proto.h>
24 #include <target/target_core_base.h> 24 #include <target/target_core_base.h>
25 #include <target/target_core_transport.h> 25 #include <target/target_core_fabric.h>
26 #include <target/target_core_tmr.h>
27 #include <target/target_core_fabric_ops.h>
28 #include <target/target_core_configfs.h> 26 #include <target/target_core_configfs.h>
29 27
30 #include "iscsi_target_core.h" 28 #include "iscsi_target_core.h"
31 #include "iscsi_target_parameters.h" 29 #include "iscsi_target_parameters.h"
32 #include "iscsi_target_seq_pdu_list.h" 30 #include "iscsi_target_seq_pdu_list.h"
33 #include "iscsi_target_datain_values.h" 31 #include "iscsi_target_datain_values.h"
34 #include "iscsi_target_erl0.h" 32 #include "iscsi_target_erl0.h"
35 #include "iscsi_target_erl1.h" 33 #include "iscsi_target_erl1.h"
36 #include "iscsi_target_erl2.h" 34 #include "iscsi_target_erl2.h"
37 #include "iscsi_target_tpg.h" 35 #include "iscsi_target_tpg.h"
38 #include "iscsi_target_tq.h" 36 #include "iscsi_target_tq.h"
39 #include "iscsi_target_util.h" 37 #include "iscsi_target_util.h"
40 #include "iscsi_target.h" 38 #include "iscsi_target.h"
41 39
42 #define PRINT_BUFF(buff, len) \ 40 #define PRINT_BUFF(buff, len) \
43 { \ 41 { \
44 int zzz; \ 42 int zzz; \
45 \ 43 \
46 pr_debug("%d:\n", __LINE__); \ 44 pr_debug("%d:\n", __LINE__); \
47 for (zzz = 0; zzz < len; zzz++) { \ 45 for (zzz = 0; zzz < len; zzz++) { \
48 if (zzz % 16 == 0) { \ 46 if (zzz % 16 == 0) { \
49 if (zzz) \ 47 if (zzz) \
50 pr_debug("\n"); \ 48 pr_debug("\n"); \
51 pr_debug("%4i: ", zzz); \ 49 pr_debug("%4i: ", zzz); \
52 } \ 50 } \
53 pr_debug("%02x ", (unsigned char) (buff)[zzz]); \ 51 pr_debug("%02x ", (unsigned char) (buff)[zzz]); \
54 } \ 52 } \
55 if ((len + 1) % 16) \ 53 if ((len + 1) % 16) \
56 pr_debug("\n"); \ 54 pr_debug("\n"); \
57 } 55 }
58 56
59 extern struct list_head g_tiqn_list; 57 extern struct list_head g_tiqn_list;
60 extern spinlock_t tiqn_lock; 58 extern spinlock_t tiqn_lock;
61 59
62 /* 60 /*
63 * Called with cmd->r2t_lock held. 61 * Called with cmd->r2t_lock held.
64 */ 62 */
65 int iscsit_add_r2t_to_list( 63 int iscsit_add_r2t_to_list(
66 struct iscsi_cmd *cmd, 64 struct iscsi_cmd *cmd,
67 u32 offset, 65 u32 offset,
68 u32 xfer_len, 66 u32 xfer_len,
69 int recovery, 67 int recovery,
70 u32 r2t_sn) 68 u32 r2t_sn)
71 { 69 {
72 struct iscsi_r2t *r2t; 70 struct iscsi_r2t *r2t;
73 71
74 r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC); 72 r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC);
75 if (!r2t) { 73 if (!r2t) {
76 pr_err("Unable to allocate memory for struct iscsi_r2t.\n"); 74 pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
77 return -1; 75 return -1;
78 } 76 }
79 INIT_LIST_HEAD(&r2t->r2t_list); 77 INIT_LIST_HEAD(&r2t->r2t_list);
80 78
81 r2t->recovery_r2t = recovery; 79 r2t->recovery_r2t = recovery;
82 r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn; 80 r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn;
83 r2t->offset = offset; 81 r2t->offset = offset;
84 r2t->xfer_len = xfer_len; 82 r2t->xfer_len = xfer_len;
85 list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list); 83 list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list);
86 spin_unlock_bh(&cmd->r2t_lock); 84 spin_unlock_bh(&cmd->r2t_lock);
87 85
88 iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T); 86 iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
89 87
90 spin_lock_bh(&cmd->r2t_lock); 88 spin_lock_bh(&cmd->r2t_lock);
91 return 0; 89 return 0;
92 } 90 }
93 91
94 struct iscsi_r2t *iscsit_get_r2t_for_eos( 92 struct iscsi_r2t *iscsit_get_r2t_for_eos(
95 struct iscsi_cmd *cmd, 93 struct iscsi_cmd *cmd,
96 u32 offset, 94 u32 offset,
97 u32 length) 95 u32 length)
98 { 96 {
99 struct iscsi_r2t *r2t; 97 struct iscsi_r2t *r2t;
100 98
101 spin_lock_bh(&cmd->r2t_lock); 99 spin_lock_bh(&cmd->r2t_lock);
102 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { 100 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
103 if ((r2t->offset <= offset) && 101 if ((r2t->offset <= offset) &&
104 (r2t->offset + r2t->xfer_len) >= (offset + length)) { 102 (r2t->offset + r2t->xfer_len) >= (offset + length)) {
105 spin_unlock_bh(&cmd->r2t_lock); 103 spin_unlock_bh(&cmd->r2t_lock);
106 return r2t; 104 return r2t;
107 } 105 }
108 } 106 }
109 spin_unlock_bh(&cmd->r2t_lock); 107 spin_unlock_bh(&cmd->r2t_lock);
110 108
111 pr_err("Unable to locate R2T for Offset: %u, Length:" 109 pr_err("Unable to locate R2T for Offset: %u, Length:"
112 " %u\n", offset, length); 110 " %u\n", offset, length);
113 return NULL; 111 return NULL;
114 } 112 }
115 113
116 struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd) 114 struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd)
117 { 115 {
118 struct iscsi_r2t *r2t; 116 struct iscsi_r2t *r2t;
119 117
120 spin_lock_bh(&cmd->r2t_lock); 118 spin_lock_bh(&cmd->r2t_lock);
121 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { 119 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
122 if (!r2t->sent_r2t) { 120 if (!r2t->sent_r2t) {
123 spin_unlock_bh(&cmd->r2t_lock); 121 spin_unlock_bh(&cmd->r2t_lock);
124 return r2t; 122 return r2t;
125 } 123 }
126 } 124 }
127 spin_unlock_bh(&cmd->r2t_lock); 125 spin_unlock_bh(&cmd->r2t_lock);
128 126
129 pr_err("Unable to locate next R2T to send for ITT:" 127 pr_err("Unable to locate next R2T to send for ITT:"
130 " 0x%08x.\n", cmd->init_task_tag); 128 " 0x%08x.\n", cmd->init_task_tag);
131 return NULL; 129 return NULL;
132 } 130 }
133 131
134 /* 132 /*
135 * Called with cmd->r2t_lock held. 133 * Called with cmd->r2t_lock held.
136 */ 134 */
137 void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd) 135 void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd)
138 { 136 {
139 list_del(&r2t->r2t_list); 137 list_del(&r2t->r2t_list);
140 kmem_cache_free(lio_r2t_cache, r2t); 138 kmem_cache_free(lio_r2t_cache, r2t);
141 } 139 }
142 140
143 void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd) 141 void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
144 { 142 {
145 struct iscsi_r2t *r2t, *r2t_tmp; 143 struct iscsi_r2t *r2t, *r2t_tmp;
146 144
147 spin_lock_bh(&cmd->r2t_lock); 145 spin_lock_bh(&cmd->r2t_lock);
148 list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list) 146 list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list)
149 iscsit_free_r2t(r2t, cmd); 147 iscsit_free_r2t(r2t, cmd);
150 spin_unlock_bh(&cmd->r2t_lock); 148 spin_unlock_bh(&cmd->r2t_lock);
151 } 149 }
152 150
153 /* 151 /*
154 * May be called from software interrupt (timer) context for allocating 152 * May be called from software interrupt (timer) context for allocating
155 * iSCSI NopINs. 153 * iSCSI NopINs.
156 */ 154 */
157 struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) 155 struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
158 { 156 {
159 struct iscsi_cmd *cmd; 157 struct iscsi_cmd *cmd;
160 158
161 cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask); 159 cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask);
162 if (!cmd) { 160 if (!cmd) {
163 pr_err("Unable to allocate memory for struct iscsi_cmd.\n"); 161 pr_err("Unable to allocate memory for struct iscsi_cmd.\n");
164 return NULL; 162 return NULL;
165 } 163 }
166 164
167 cmd->conn = conn; 165 cmd->conn = conn;
168 INIT_LIST_HEAD(&cmd->i_list); 166 INIT_LIST_HEAD(&cmd->i_list);
169 INIT_LIST_HEAD(&cmd->datain_list); 167 INIT_LIST_HEAD(&cmd->datain_list);
170 INIT_LIST_HEAD(&cmd->cmd_r2t_list); 168 INIT_LIST_HEAD(&cmd->cmd_r2t_list);
171 init_completion(&cmd->reject_comp); 169 init_completion(&cmd->reject_comp);
172 spin_lock_init(&cmd->datain_lock); 170 spin_lock_init(&cmd->datain_lock);
173 spin_lock_init(&cmd->dataout_timeout_lock); 171 spin_lock_init(&cmd->dataout_timeout_lock);
174 spin_lock_init(&cmd->istate_lock); 172 spin_lock_init(&cmd->istate_lock);
175 spin_lock_init(&cmd->error_lock); 173 spin_lock_init(&cmd->error_lock);
176 spin_lock_init(&cmd->r2t_lock); 174 spin_lock_init(&cmd->r2t_lock);
177 175
178 return cmd; 176 return cmd;
179 } 177 }
180 178
181 /* 179 /*
182 * Called from iscsi_handle_scsi_cmd() 180 * Called from iscsi_handle_scsi_cmd()
183 */ 181 */
184 struct iscsi_cmd *iscsit_allocate_se_cmd( 182 struct iscsi_cmd *iscsit_allocate_se_cmd(
185 struct iscsi_conn *conn, 183 struct iscsi_conn *conn,
186 u32 data_length, 184 u32 data_length,
187 int data_direction, 185 int data_direction,
188 int iscsi_task_attr) 186 int iscsi_task_attr)
189 { 187 {
190 struct iscsi_cmd *cmd; 188 struct iscsi_cmd *cmd;
191 struct se_cmd *se_cmd; 189 struct se_cmd *se_cmd;
192 int sam_task_attr; 190 int sam_task_attr;
193 191
194 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 192 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
195 if (!cmd) 193 if (!cmd)
196 return NULL; 194 return NULL;
197 195
198 cmd->data_direction = data_direction; 196 cmd->data_direction = data_direction;
199 cmd->data_length = data_length; 197 cmd->data_length = data_length;
200 /* 198 /*
201 * Figure out the SAM Task Attribute for the incoming SCSI CDB 199 * Figure out the SAM Task Attribute for the incoming SCSI CDB
202 */ 200 */
203 if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) || 201 if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
204 (iscsi_task_attr == ISCSI_ATTR_SIMPLE)) 202 (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
205 sam_task_attr = MSG_SIMPLE_TAG; 203 sam_task_attr = MSG_SIMPLE_TAG;
206 else if (iscsi_task_attr == ISCSI_ATTR_ORDERED) 204 else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
207 sam_task_attr = MSG_ORDERED_TAG; 205 sam_task_attr = MSG_ORDERED_TAG;
208 else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE) 206 else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
209 sam_task_attr = MSG_HEAD_TAG; 207 sam_task_attr = MSG_HEAD_TAG;
210 else if (iscsi_task_attr == ISCSI_ATTR_ACA) 208 else if (iscsi_task_attr == ISCSI_ATTR_ACA)
211 sam_task_attr = MSG_ACA_TAG; 209 sam_task_attr = MSG_ACA_TAG;
212 else { 210 else {
213 pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using" 211 pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
214 " MSG_SIMPLE_TAG\n", iscsi_task_attr); 212 " MSG_SIMPLE_TAG\n", iscsi_task_attr);
215 sam_task_attr = MSG_SIMPLE_TAG; 213 sam_task_attr = MSG_SIMPLE_TAG;
216 } 214 }
217 215
218 se_cmd = &cmd->se_cmd; 216 se_cmd = &cmd->se_cmd;
219 /* 217 /*
220 * Initialize struct se_cmd descriptor from target_core_mod infrastructure 218 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
221 */ 219 */
222 transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops, 220 transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
223 conn->sess->se_sess, data_length, data_direction, 221 conn->sess->se_sess, data_length, data_direction,
224 sam_task_attr, &cmd->sense_buffer[0]); 222 sam_task_attr, &cmd->sense_buffer[0]);
225 return cmd; 223 return cmd;
226 } 224 }
227 225
228 struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr( 226 struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
229 struct iscsi_conn *conn, 227 struct iscsi_conn *conn,
230 u8 function) 228 u8 function)
231 { 229 {
232 struct iscsi_cmd *cmd; 230 struct iscsi_cmd *cmd;
233 struct se_cmd *se_cmd; 231 struct se_cmd *se_cmd;
234 u8 tcm_function; 232 u8 tcm_function;
235 233
236 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 234 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
237 if (!cmd) 235 if (!cmd)
238 return NULL; 236 return NULL;
239 237
240 cmd->data_direction = DMA_NONE; 238 cmd->data_direction = DMA_NONE;
241 239
242 cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL); 240 cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL);
243 if (!cmd->tmr_req) { 241 if (!cmd->tmr_req) {
244 pr_err("Unable to allocate memory for" 242 pr_err("Unable to allocate memory for"
245 " Task Management command!\n"); 243 " Task Management command!\n");
246 goto out; 244 goto out;
247 } 245 }
248 /* 246 /*
249 * TASK_REASSIGN for ERL=2 / connection stays inside of 247 * TASK_REASSIGN for ERL=2 / connection stays inside of
250 * LIO-Target $FABRIC_MOD 248 * LIO-Target $FABRIC_MOD
251 */ 249 */
252 if (function == ISCSI_TM_FUNC_TASK_REASSIGN) 250 if (function == ISCSI_TM_FUNC_TASK_REASSIGN)
253 return cmd; 251 return cmd;
254 252
255 se_cmd = &cmd->se_cmd; 253 se_cmd = &cmd->se_cmd;
256 /* 254 /*
257 * Initialize struct se_cmd descriptor from target_core_mod infrastructure 255 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
258 */ 256 */
259 transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops, 257 transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
260 conn->sess->se_sess, 0, DMA_NONE, 258 conn->sess->se_sess, 0, DMA_NONE,
261 MSG_SIMPLE_TAG, &cmd->sense_buffer[0]); 259 MSG_SIMPLE_TAG, &cmd->sense_buffer[0]);
262 260
263 switch (function) { 261 switch (function) {
264 case ISCSI_TM_FUNC_ABORT_TASK: 262 case ISCSI_TM_FUNC_ABORT_TASK:
265 tcm_function = TMR_ABORT_TASK; 263 tcm_function = TMR_ABORT_TASK;
266 break; 264 break;
267 case ISCSI_TM_FUNC_ABORT_TASK_SET: 265 case ISCSI_TM_FUNC_ABORT_TASK_SET:
268 tcm_function = TMR_ABORT_TASK_SET; 266 tcm_function = TMR_ABORT_TASK_SET;
269 break; 267 break;
270 case ISCSI_TM_FUNC_CLEAR_ACA: 268 case ISCSI_TM_FUNC_CLEAR_ACA:
271 tcm_function = TMR_CLEAR_ACA; 269 tcm_function = TMR_CLEAR_ACA;
272 break; 270 break;
273 case ISCSI_TM_FUNC_CLEAR_TASK_SET: 271 case ISCSI_TM_FUNC_CLEAR_TASK_SET:
274 tcm_function = TMR_CLEAR_TASK_SET; 272 tcm_function = TMR_CLEAR_TASK_SET;
275 break; 273 break;
276 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: 274 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
277 tcm_function = TMR_LUN_RESET; 275 tcm_function = TMR_LUN_RESET;
278 break; 276 break;
279 case ISCSI_TM_FUNC_TARGET_WARM_RESET: 277 case ISCSI_TM_FUNC_TARGET_WARM_RESET:
280 tcm_function = TMR_TARGET_WARM_RESET; 278 tcm_function = TMR_TARGET_WARM_RESET;
281 break; 279 break;
282 case ISCSI_TM_FUNC_TARGET_COLD_RESET: 280 case ISCSI_TM_FUNC_TARGET_COLD_RESET:
283 tcm_function = TMR_TARGET_COLD_RESET; 281 tcm_function = TMR_TARGET_COLD_RESET;
284 break; 282 break;
285 default: 283 default:
286 pr_err("Unknown iSCSI TMR Function:" 284 pr_err("Unknown iSCSI TMR Function:"
287 " 0x%02x\n", function); 285 " 0x%02x\n", function);
288 goto out; 286 goto out;
289 } 287 }
290 288
291 se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, 289 se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd,
292 (void *)cmd->tmr_req, tcm_function, 290 (void *)cmd->tmr_req, tcm_function,
293 GFP_KERNEL); 291 GFP_KERNEL);
294 if (!se_cmd->se_tmr_req) 292 if (!se_cmd->se_tmr_req)
295 goto out; 293 goto out;
296 294
297 cmd->tmr_req->se_tmr_req = se_cmd->se_tmr_req; 295 cmd->tmr_req->se_tmr_req = se_cmd->se_tmr_req;
298 296
299 return cmd; 297 return cmd;
300 out: 298 out:
301 iscsit_release_cmd(cmd); 299 iscsit_release_cmd(cmd);
302 return NULL; 300 return NULL;
303 } 301 }
304 302
305 int iscsit_decide_list_to_build( 303 int iscsit_decide_list_to_build(
306 struct iscsi_cmd *cmd, 304 struct iscsi_cmd *cmd,
307 u32 immediate_data_length) 305 u32 immediate_data_length)
308 { 306 {
309 struct iscsi_build_list bl; 307 struct iscsi_build_list bl;
310 struct iscsi_conn *conn = cmd->conn; 308 struct iscsi_conn *conn = cmd->conn;
311 struct iscsi_session *sess = conn->sess; 309 struct iscsi_session *sess = conn->sess;
312 struct iscsi_node_attrib *na; 310 struct iscsi_node_attrib *na;
313 311
314 if (sess->sess_ops->DataSequenceInOrder && 312 if (sess->sess_ops->DataSequenceInOrder &&
315 sess->sess_ops->DataPDUInOrder) 313 sess->sess_ops->DataPDUInOrder)
316 return 0; 314 return 0;
317 315
318 if (cmd->data_direction == DMA_NONE) 316 if (cmd->data_direction == DMA_NONE)
319 return 0; 317 return 0;
320 318
321 na = iscsit_tpg_get_node_attrib(sess); 319 na = iscsit_tpg_get_node_attrib(sess);
322 memset(&bl, 0, sizeof(struct iscsi_build_list)); 320 memset(&bl, 0, sizeof(struct iscsi_build_list));
323 321
324 if (cmd->data_direction == DMA_FROM_DEVICE) { 322 if (cmd->data_direction == DMA_FROM_DEVICE) {
325 bl.data_direction = ISCSI_PDU_READ; 323 bl.data_direction = ISCSI_PDU_READ;
326 bl.type = PDULIST_NORMAL; 324 bl.type = PDULIST_NORMAL;
327 if (na->random_datain_pdu_offsets) 325 if (na->random_datain_pdu_offsets)
328 bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS; 326 bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS;
329 if (na->random_datain_seq_offsets) 327 if (na->random_datain_seq_offsets)
330 bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS; 328 bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS;
331 } else { 329 } else {
332 bl.data_direction = ISCSI_PDU_WRITE; 330 bl.data_direction = ISCSI_PDU_WRITE;
333 bl.immediate_data_length = immediate_data_length; 331 bl.immediate_data_length = immediate_data_length;
334 if (na->random_r2t_offsets) 332 if (na->random_r2t_offsets)
335 bl.randomize |= RANDOM_R2T_OFFSETS; 333 bl.randomize |= RANDOM_R2T_OFFSETS;
336 334
337 if (!cmd->immediate_data && !cmd->unsolicited_data) 335 if (!cmd->immediate_data && !cmd->unsolicited_data)
338 bl.type = PDULIST_NORMAL; 336 bl.type = PDULIST_NORMAL;
339 else if (cmd->immediate_data && !cmd->unsolicited_data) 337 else if (cmd->immediate_data && !cmd->unsolicited_data)
340 bl.type = PDULIST_IMMEDIATE; 338 bl.type = PDULIST_IMMEDIATE;
341 else if (!cmd->immediate_data && cmd->unsolicited_data) 339 else if (!cmd->immediate_data && cmd->unsolicited_data)
342 bl.type = PDULIST_UNSOLICITED; 340 bl.type = PDULIST_UNSOLICITED;
343 else if (cmd->immediate_data && cmd->unsolicited_data) 341 else if (cmd->immediate_data && cmd->unsolicited_data)
344 bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED; 342 bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED;
345 } 343 }
346 344
347 return iscsit_do_build_list(cmd, &bl); 345 return iscsit_do_build_list(cmd, &bl);
348 } 346 }
349 347
350 struct iscsi_seq *iscsit_get_seq_holder_for_datain( 348 struct iscsi_seq *iscsit_get_seq_holder_for_datain(
351 struct iscsi_cmd *cmd, 349 struct iscsi_cmd *cmd,
352 u32 seq_send_order) 350 u32 seq_send_order)
353 { 351 {
354 u32 i; 352 u32 i;
355 353
356 for (i = 0; i < cmd->seq_count; i++) 354 for (i = 0; i < cmd->seq_count; i++)
357 if (cmd->seq_list[i].seq_send_order == seq_send_order) 355 if (cmd->seq_list[i].seq_send_order == seq_send_order)
358 return &cmd->seq_list[i]; 356 return &cmd->seq_list[i];
359 357
360 return NULL; 358 return NULL;
361 } 359 }
362 360
363 struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd) 361 struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd)
364 { 362 {
365 u32 i; 363 u32 i;
366 364
367 if (!cmd->seq_list) { 365 if (!cmd->seq_list) {
368 pr_err("struct iscsi_cmd->seq_list is NULL!\n"); 366 pr_err("struct iscsi_cmd->seq_list is NULL!\n");
369 return NULL; 367 return NULL;
370 } 368 }
371 369
372 for (i = 0; i < cmd->seq_count; i++) { 370 for (i = 0; i < cmd->seq_count; i++) {
373 if (cmd->seq_list[i].type != SEQTYPE_NORMAL) 371 if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
374 continue; 372 continue;
375 if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) { 373 if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) {
376 cmd->seq_send_order++; 374 cmd->seq_send_order++;
377 return &cmd->seq_list[i]; 375 return &cmd->seq_list[i];
378 } 376 }
379 } 377 }
380 378
381 return NULL; 379 return NULL;
382 } 380 }
383 381
384 struct iscsi_r2t *iscsit_get_holder_for_r2tsn( 382 struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
385 struct iscsi_cmd *cmd, 383 struct iscsi_cmd *cmd,
386 u32 r2t_sn) 384 u32 r2t_sn)
387 { 385 {
388 struct iscsi_r2t *r2t; 386 struct iscsi_r2t *r2t;
389 387
390 spin_lock_bh(&cmd->r2t_lock); 388 spin_lock_bh(&cmd->r2t_lock);
391 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { 389 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
392 if (r2t->r2t_sn == r2t_sn) { 390 if (r2t->r2t_sn == r2t_sn) {
393 spin_unlock_bh(&cmd->r2t_lock); 391 spin_unlock_bh(&cmd->r2t_lock);
394 return r2t; 392 return r2t;
395 } 393 }
396 } 394 }
397 spin_unlock_bh(&cmd->r2t_lock); 395 spin_unlock_bh(&cmd->r2t_lock);
398 396
399 return NULL; 397 return NULL;
400 } 398 }
401 399
402 static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn) 400 static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn)
403 { 401 {
404 int ret; 402 int ret;
405 403
406 /* 404 /*
407 * This is the proper method of checking received CmdSN against 405 * This is the proper method of checking received CmdSN against
408 * ExpCmdSN and MaxCmdSN values, as well as accounting for out 406 * ExpCmdSN and MaxCmdSN values, as well as accounting for out
409 * or order CmdSNs due to multiple connection sessions and/or 407 * or order CmdSNs due to multiple connection sessions and/or
410 * CRC failures. 408 * CRC failures.
411 */ 409 */
412 if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) { 410 if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) {
413 pr_err("Received CmdSN: 0x%08x is greater than" 411 pr_err("Received CmdSN: 0x%08x is greater than"
414 " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn, 412 " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn,
415 sess->max_cmd_sn); 413 sess->max_cmd_sn);
416 ret = CMDSN_ERROR_CANNOT_RECOVER; 414 ret = CMDSN_ERROR_CANNOT_RECOVER;
417 415
418 } else if (cmdsn == sess->exp_cmd_sn) { 416 } else if (cmdsn == sess->exp_cmd_sn) {
419 sess->exp_cmd_sn++; 417 sess->exp_cmd_sn++;
420 pr_debug("Received CmdSN matches ExpCmdSN," 418 pr_debug("Received CmdSN matches ExpCmdSN,"
421 " incremented ExpCmdSN to: 0x%08x\n", 419 " incremented ExpCmdSN to: 0x%08x\n",
422 sess->exp_cmd_sn); 420 sess->exp_cmd_sn);
423 ret = CMDSN_NORMAL_OPERATION; 421 ret = CMDSN_NORMAL_OPERATION;
424 422
425 } else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) { 423 } else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) {
426 pr_debug("Received CmdSN: 0x%08x is greater" 424 pr_debug("Received CmdSN: 0x%08x is greater"
427 " than ExpCmdSN: 0x%08x, not acknowledging.\n", 425 " than ExpCmdSN: 0x%08x, not acknowledging.\n",
428 cmdsn, sess->exp_cmd_sn); 426 cmdsn, sess->exp_cmd_sn);
429 ret = CMDSN_HIGHER_THAN_EXP; 427 ret = CMDSN_HIGHER_THAN_EXP;
430 428
431 } else { 429 } else {
432 pr_err("Received CmdSN: 0x%08x is less than" 430 pr_err("Received CmdSN: 0x%08x is less than"
433 " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn, 431 " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn,
434 sess->exp_cmd_sn); 432 sess->exp_cmd_sn);
435 ret = CMDSN_LOWER_THAN_EXP; 433 ret = CMDSN_LOWER_THAN_EXP;
436 } 434 }
437 435
438 return ret; 436 return ret;
439 } 437 }
440 438
441 /* 439 /*
442 * Commands may be received out of order if MC/S is in use. 440 * Commands may be received out of order if MC/S is in use.
443 * Ensure they are executed in CmdSN order. 441 * Ensure they are executed in CmdSN order.
444 */ 442 */
445 int iscsit_sequence_cmd( 443 int iscsit_sequence_cmd(
446 struct iscsi_conn *conn, 444 struct iscsi_conn *conn,
447 struct iscsi_cmd *cmd, 445 struct iscsi_cmd *cmd,
448 u32 cmdsn) 446 u32 cmdsn)
449 { 447 {
450 int ret; 448 int ret;
451 int cmdsn_ret; 449 int cmdsn_ret;
452 450
453 mutex_lock(&conn->sess->cmdsn_mutex); 451 mutex_lock(&conn->sess->cmdsn_mutex);
454 452
455 cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, cmdsn); 453 cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, cmdsn);
456 switch (cmdsn_ret) { 454 switch (cmdsn_ret) {
457 case CMDSN_NORMAL_OPERATION: 455 case CMDSN_NORMAL_OPERATION:
458 ret = iscsit_execute_cmd(cmd, 0); 456 ret = iscsit_execute_cmd(cmd, 0);
459 if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list)) 457 if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list))
460 iscsit_execute_ooo_cmdsns(conn->sess); 458 iscsit_execute_ooo_cmdsns(conn->sess);
461 break; 459 break;
462 case CMDSN_HIGHER_THAN_EXP: 460 case CMDSN_HIGHER_THAN_EXP:
463 ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, cmdsn); 461 ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, cmdsn);
464 break; 462 break;
465 case CMDSN_LOWER_THAN_EXP: 463 case CMDSN_LOWER_THAN_EXP:
466 cmd->i_state = ISTATE_REMOVE; 464 cmd->i_state = ISTATE_REMOVE;
467 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); 465 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
468 ret = cmdsn_ret; 466 ret = cmdsn_ret;
469 break; 467 break;
470 default: 468 default:
471 ret = cmdsn_ret; 469 ret = cmdsn_ret;
472 break; 470 break;
473 } 471 }
474 mutex_unlock(&conn->sess->cmdsn_mutex); 472 mutex_unlock(&conn->sess->cmdsn_mutex);
475 473
476 return ret; 474 return ret;
477 } 475 }
478 476
479 int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf) 477 int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
480 { 478 {
481 struct iscsi_conn *conn = cmd->conn; 479 struct iscsi_conn *conn = cmd->conn;
482 struct se_cmd *se_cmd = &cmd->se_cmd; 480 struct se_cmd *se_cmd = &cmd->se_cmd;
483 struct iscsi_data *hdr = (struct iscsi_data *) buf; 481 struct iscsi_data *hdr = (struct iscsi_data *) buf;
484 u32 payload_length = ntoh24(hdr->dlength); 482 u32 payload_length = ntoh24(hdr->dlength);
485 483
486 if (conn->sess->sess_ops->InitialR2T) { 484 if (conn->sess->sess_ops->InitialR2T) {
487 pr_err("Received unexpected unsolicited data" 485 pr_err("Received unexpected unsolicited data"
488 " while InitialR2T=Yes, protocol error.\n"); 486 " while InitialR2T=Yes, protocol error.\n");
489 transport_send_check_condition_and_sense(se_cmd, 487 transport_send_check_condition_and_sense(se_cmd,
490 TCM_UNEXPECTED_UNSOLICITED_DATA, 0); 488 TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
491 return -1; 489 return -1;
492 } 490 }
493 491
494 if ((cmd->first_burst_len + payload_length) > 492 if ((cmd->first_burst_len + payload_length) >
495 conn->sess->sess_ops->FirstBurstLength) { 493 conn->sess->sess_ops->FirstBurstLength) {
496 pr_err("Total %u bytes exceeds FirstBurstLength: %u" 494 pr_err("Total %u bytes exceeds FirstBurstLength: %u"
497 " for this Unsolicited DataOut Burst.\n", 495 " for this Unsolicited DataOut Burst.\n",
498 (cmd->first_burst_len + payload_length), 496 (cmd->first_burst_len + payload_length),
499 conn->sess->sess_ops->FirstBurstLength); 497 conn->sess->sess_ops->FirstBurstLength);
500 transport_send_check_condition_and_sense(se_cmd, 498 transport_send_check_condition_and_sense(se_cmd,
501 TCM_INCORRECT_AMOUNT_OF_DATA, 0); 499 TCM_INCORRECT_AMOUNT_OF_DATA, 0);
502 return -1; 500 return -1;
503 } 501 }
504 502
505 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) 503 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
506 return 0; 504 return 0;
507 505
508 if (((cmd->first_burst_len + payload_length) != cmd->data_length) && 506 if (((cmd->first_burst_len + payload_length) != cmd->data_length) &&
509 ((cmd->first_burst_len + payload_length) != 507 ((cmd->first_burst_len + payload_length) !=
510 conn->sess->sess_ops->FirstBurstLength)) { 508 conn->sess->sess_ops->FirstBurstLength)) {
511 pr_err("Unsolicited non-immediate data received %u" 509 pr_err("Unsolicited non-immediate data received %u"
512 " does not equal FirstBurstLength: %u, and does" 510 " does not equal FirstBurstLength: %u, and does"
513 " not equal ExpXferLen %u.\n", 511 " not equal ExpXferLen %u.\n",
514 (cmd->first_burst_len + payload_length), 512 (cmd->first_burst_len + payload_length),
515 conn->sess->sess_ops->FirstBurstLength, cmd->data_length); 513 conn->sess->sess_ops->FirstBurstLength, cmd->data_length);
516 transport_send_check_condition_and_sense(se_cmd, 514 transport_send_check_condition_and_sense(se_cmd,
517 TCM_INCORRECT_AMOUNT_OF_DATA, 0); 515 TCM_INCORRECT_AMOUNT_OF_DATA, 0);
518 return -1; 516 return -1;
519 } 517 }
520 return 0; 518 return 0;
521 } 519 }
522 520
523 struct iscsi_cmd *iscsit_find_cmd_from_itt( 521 struct iscsi_cmd *iscsit_find_cmd_from_itt(
524 struct iscsi_conn *conn, 522 struct iscsi_conn *conn,
525 u32 init_task_tag) 523 u32 init_task_tag)
526 { 524 {
527 struct iscsi_cmd *cmd; 525 struct iscsi_cmd *cmd;
528 526
529 spin_lock_bh(&conn->cmd_lock); 527 spin_lock_bh(&conn->cmd_lock);
530 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { 528 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
531 if (cmd->init_task_tag == init_task_tag) { 529 if (cmd->init_task_tag == init_task_tag) {
532 spin_unlock_bh(&conn->cmd_lock); 530 spin_unlock_bh(&conn->cmd_lock);
533 return cmd; 531 return cmd;
534 } 532 }
535 } 533 }
536 spin_unlock_bh(&conn->cmd_lock); 534 spin_unlock_bh(&conn->cmd_lock);
537 535
538 pr_err("Unable to locate ITT: 0x%08x on CID: %hu", 536 pr_err("Unable to locate ITT: 0x%08x on CID: %hu",
539 init_task_tag, conn->cid); 537 init_task_tag, conn->cid);
540 return NULL; 538 return NULL;
541 } 539 }
542 540
543 struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump( 541 struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
544 struct iscsi_conn *conn, 542 struct iscsi_conn *conn,
545 u32 init_task_tag, 543 u32 init_task_tag,
546 u32 length) 544 u32 length)
547 { 545 {
548 struct iscsi_cmd *cmd; 546 struct iscsi_cmd *cmd;
549 547
550 spin_lock_bh(&conn->cmd_lock); 548 spin_lock_bh(&conn->cmd_lock);
551 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { 549 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
552 if (cmd->init_task_tag == init_task_tag) { 550 if (cmd->init_task_tag == init_task_tag) {
553 spin_unlock_bh(&conn->cmd_lock); 551 spin_unlock_bh(&conn->cmd_lock);
554 return cmd; 552 return cmd;
555 } 553 }
556 } 554 }
557 spin_unlock_bh(&conn->cmd_lock); 555 spin_unlock_bh(&conn->cmd_lock);
558 556
559 pr_err("Unable to locate ITT: 0x%08x on CID: %hu," 557 pr_err("Unable to locate ITT: 0x%08x on CID: %hu,"
560 " dumping payload\n", init_task_tag, conn->cid); 558 " dumping payload\n", init_task_tag, conn->cid);
561 if (length) 559 if (length)
562 iscsit_dump_data_payload(conn, length, 1); 560 iscsit_dump_data_payload(conn, length, 1);
563 561
564 return NULL; 562 return NULL;
565 } 563 }
566 564
567 struct iscsi_cmd *iscsit_find_cmd_from_ttt( 565 struct iscsi_cmd *iscsit_find_cmd_from_ttt(
568 struct iscsi_conn *conn, 566 struct iscsi_conn *conn,
569 u32 targ_xfer_tag) 567 u32 targ_xfer_tag)
570 { 568 {
571 struct iscsi_cmd *cmd = NULL; 569 struct iscsi_cmd *cmd = NULL;
572 570
573 spin_lock_bh(&conn->cmd_lock); 571 spin_lock_bh(&conn->cmd_lock);
574 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { 572 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
575 if (cmd->targ_xfer_tag == targ_xfer_tag) { 573 if (cmd->targ_xfer_tag == targ_xfer_tag) {
576 spin_unlock_bh(&conn->cmd_lock); 574 spin_unlock_bh(&conn->cmd_lock);
577 return cmd; 575 return cmd;
578 } 576 }
579 } 577 }
580 spin_unlock_bh(&conn->cmd_lock); 578 spin_unlock_bh(&conn->cmd_lock);
581 579
582 pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n", 580 pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n",
583 targ_xfer_tag, conn->cid); 581 targ_xfer_tag, conn->cid);
584 return NULL; 582 return NULL;
585 } 583 }
586 584
587 int iscsit_find_cmd_for_recovery( 585 int iscsit_find_cmd_for_recovery(
588 struct iscsi_session *sess, 586 struct iscsi_session *sess,
589 struct iscsi_cmd **cmd_ptr, 587 struct iscsi_cmd **cmd_ptr,
590 struct iscsi_conn_recovery **cr_ptr, 588 struct iscsi_conn_recovery **cr_ptr,
591 u32 init_task_tag) 589 u32 init_task_tag)
592 { 590 {
593 struct iscsi_cmd *cmd = NULL; 591 struct iscsi_cmd *cmd = NULL;
594 struct iscsi_conn_recovery *cr; 592 struct iscsi_conn_recovery *cr;
595 /* 593 /*
596 * Scan through the inactive connection recovery list's command list. 594 * Scan through the inactive connection recovery list's command list.
597 * If init_task_tag matches the command is still alligent. 595 * If init_task_tag matches the command is still alligent.
598 */ 596 */
599 spin_lock(&sess->cr_i_lock); 597 spin_lock(&sess->cr_i_lock);
600 list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) { 598 list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
601 spin_lock(&cr->conn_recovery_cmd_lock); 599 spin_lock(&cr->conn_recovery_cmd_lock);
602 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) { 600 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
603 if (cmd->init_task_tag == init_task_tag) { 601 if (cmd->init_task_tag == init_task_tag) {
604 spin_unlock(&cr->conn_recovery_cmd_lock); 602 spin_unlock(&cr->conn_recovery_cmd_lock);
605 spin_unlock(&sess->cr_i_lock); 603 spin_unlock(&sess->cr_i_lock);
606 604
607 *cr_ptr = cr; 605 *cr_ptr = cr;
608 *cmd_ptr = cmd; 606 *cmd_ptr = cmd;
609 return -2; 607 return -2;
610 } 608 }
611 } 609 }
612 spin_unlock(&cr->conn_recovery_cmd_lock); 610 spin_unlock(&cr->conn_recovery_cmd_lock);
613 } 611 }
614 spin_unlock(&sess->cr_i_lock); 612 spin_unlock(&sess->cr_i_lock);
615 /* 613 /*
616 * Scan through the active connection recovery list's command list. 614 * Scan through the active connection recovery list's command list.
617 * If init_task_tag matches the command is ready to be reassigned. 615 * If init_task_tag matches the command is ready to be reassigned.
618 */ 616 */
619 spin_lock(&sess->cr_a_lock); 617 spin_lock(&sess->cr_a_lock);
620 list_for_each_entry(cr, &sess->cr_active_list, cr_list) { 618 list_for_each_entry(cr, &sess->cr_active_list, cr_list) {
621 spin_lock(&cr->conn_recovery_cmd_lock); 619 spin_lock(&cr->conn_recovery_cmd_lock);
622 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) { 620 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
623 if (cmd->init_task_tag == init_task_tag) { 621 if (cmd->init_task_tag == init_task_tag) {
624 spin_unlock(&cr->conn_recovery_cmd_lock); 622 spin_unlock(&cr->conn_recovery_cmd_lock);
625 spin_unlock(&sess->cr_a_lock); 623 spin_unlock(&sess->cr_a_lock);
626 624
627 *cr_ptr = cr; 625 *cr_ptr = cr;
628 *cmd_ptr = cmd; 626 *cmd_ptr = cmd;
629 return 0; 627 return 0;
630 } 628 }
631 } 629 }
632 spin_unlock(&cr->conn_recovery_cmd_lock); 630 spin_unlock(&cr->conn_recovery_cmd_lock);
633 } 631 }
634 spin_unlock(&sess->cr_a_lock); 632 spin_unlock(&sess->cr_a_lock);
635 633
636 return -1; 634 return -1;
637 } 635 }
638 636
639 void iscsit_add_cmd_to_immediate_queue( 637 void iscsit_add_cmd_to_immediate_queue(
640 struct iscsi_cmd *cmd, 638 struct iscsi_cmd *cmd,
641 struct iscsi_conn *conn, 639 struct iscsi_conn *conn,
642 u8 state) 640 u8 state)
643 { 641 {
644 struct iscsi_queue_req *qr; 642 struct iscsi_queue_req *qr;
645 643
646 qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC); 644 qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
647 if (!qr) { 645 if (!qr) {
648 pr_err("Unable to allocate memory for" 646 pr_err("Unable to allocate memory for"
649 " struct iscsi_queue_req\n"); 647 " struct iscsi_queue_req\n");
650 return; 648 return;
651 } 649 }
652 INIT_LIST_HEAD(&qr->qr_list); 650 INIT_LIST_HEAD(&qr->qr_list);
653 qr->cmd = cmd; 651 qr->cmd = cmd;
654 qr->state = state; 652 qr->state = state;
655 653
656 spin_lock_bh(&conn->immed_queue_lock); 654 spin_lock_bh(&conn->immed_queue_lock);
657 list_add_tail(&qr->qr_list, &conn->immed_queue_list); 655 list_add_tail(&qr->qr_list, &conn->immed_queue_list);
658 atomic_inc(&cmd->immed_queue_count); 656 atomic_inc(&cmd->immed_queue_count);
659 atomic_set(&conn->check_immediate_queue, 1); 657 atomic_set(&conn->check_immediate_queue, 1);
660 spin_unlock_bh(&conn->immed_queue_lock); 658 spin_unlock_bh(&conn->immed_queue_lock);
661 659
662 wake_up_process(conn->thread_set->tx_thread); 660 wake_up_process(conn->thread_set->tx_thread);
663 } 661 }
664 662
665 struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn) 663 struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
666 { 664 {
667 struct iscsi_queue_req *qr; 665 struct iscsi_queue_req *qr;
668 666
669 spin_lock_bh(&conn->immed_queue_lock); 667 spin_lock_bh(&conn->immed_queue_lock);
670 if (list_empty(&conn->immed_queue_list)) { 668 if (list_empty(&conn->immed_queue_list)) {
671 spin_unlock_bh(&conn->immed_queue_lock); 669 spin_unlock_bh(&conn->immed_queue_lock);
672 return NULL; 670 return NULL;
673 } 671 }
674 list_for_each_entry(qr, &conn->immed_queue_list, qr_list) 672 list_for_each_entry(qr, &conn->immed_queue_list, qr_list)
675 break; 673 break;
676 674
677 list_del(&qr->qr_list); 675 list_del(&qr->qr_list);
678 if (qr->cmd) 676 if (qr->cmd)
679 atomic_dec(&qr->cmd->immed_queue_count); 677 atomic_dec(&qr->cmd->immed_queue_count);
680 spin_unlock_bh(&conn->immed_queue_lock); 678 spin_unlock_bh(&conn->immed_queue_lock);
681 679
682 return qr; 680 return qr;
683 } 681 }
684 682
685 static void iscsit_remove_cmd_from_immediate_queue( 683 static void iscsit_remove_cmd_from_immediate_queue(
686 struct iscsi_cmd *cmd, 684 struct iscsi_cmd *cmd,
687 struct iscsi_conn *conn) 685 struct iscsi_conn *conn)
688 { 686 {
689 struct iscsi_queue_req *qr, *qr_tmp; 687 struct iscsi_queue_req *qr, *qr_tmp;
690 688
691 spin_lock_bh(&conn->immed_queue_lock); 689 spin_lock_bh(&conn->immed_queue_lock);
692 if (!atomic_read(&cmd->immed_queue_count)) { 690 if (!atomic_read(&cmd->immed_queue_count)) {
693 spin_unlock_bh(&conn->immed_queue_lock); 691 spin_unlock_bh(&conn->immed_queue_lock);
694 return; 692 return;
695 } 693 }
696 694
697 list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) { 695 list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
698 if (qr->cmd != cmd) 696 if (qr->cmd != cmd)
699 continue; 697 continue;
700 698
701 atomic_dec(&qr->cmd->immed_queue_count); 699 atomic_dec(&qr->cmd->immed_queue_count);
702 list_del(&qr->qr_list); 700 list_del(&qr->qr_list);
703 kmem_cache_free(lio_qr_cache, qr); 701 kmem_cache_free(lio_qr_cache, qr);
704 } 702 }
705 spin_unlock_bh(&conn->immed_queue_lock); 703 spin_unlock_bh(&conn->immed_queue_lock);
706 704
707 if (atomic_read(&cmd->immed_queue_count)) { 705 if (atomic_read(&cmd->immed_queue_count)) {
708 pr_err("ITT: 0x%08x immed_queue_count: %d\n", 706 pr_err("ITT: 0x%08x immed_queue_count: %d\n",
709 cmd->init_task_tag, 707 cmd->init_task_tag,
710 atomic_read(&cmd->immed_queue_count)); 708 atomic_read(&cmd->immed_queue_count));
711 } 709 }
712 } 710 }
713 711
714 void iscsit_add_cmd_to_response_queue( 712 void iscsit_add_cmd_to_response_queue(
715 struct iscsi_cmd *cmd, 713 struct iscsi_cmd *cmd,
716 struct iscsi_conn *conn, 714 struct iscsi_conn *conn,
717 u8 state) 715 u8 state)
718 { 716 {
719 struct iscsi_queue_req *qr; 717 struct iscsi_queue_req *qr;
720 718
721 qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC); 719 qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
722 if (!qr) { 720 if (!qr) {
723 pr_err("Unable to allocate memory for" 721 pr_err("Unable to allocate memory for"
724 " struct iscsi_queue_req\n"); 722 " struct iscsi_queue_req\n");
725 return; 723 return;
726 } 724 }
727 INIT_LIST_HEAD(&qr->qr_list); 725 INIT_LIST_HEAD(&qr->qr_list);
728 qr->cmd = cmd; 726 qr->cmd = cmd;
729 qr->state = state; 727 qr->state = state;
730 728
731 spin_lock_bh(&conn->response_queue_lock); 729 spin_lock_bh(&conn->response_queue_lock);
732 list_add_tail(&qr->qr_list, &conn->response_queue_list); 730 list_add_tail(&qr->qr_list, &conn->response_queue_list);
733 atomic_inc(&cmd->response_queue_count); 731 atomic_inc(&cmd->response_queue_count);
734 spin_unlock_bh(&conn->response_queue_lock); 732 spin_unlock_bh(&conn->response_queue_lock);
735 733
736 wake_up_process(conn->thread_set->tx_thread); 734 wake_up_process(conn->thread_set->tx_thread);
737 } 735 }
738 736
739 struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn) 737 struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
740 { 738 {
741 struct iscsi_queue_req *qr; 739 struct iscsi_queue_req *qr;
742 740
743 spin_lock_bh(&conn->response_queue_lock); 741 spin_lock_bh(&conn->response_queue_lock);
744 if (list_empty(&conn->response_queue_list)) { 742 if (list_empty(&conn->response_queue_list)) {
745 spin_unlock_bh(&conn->response_queue_lock); 743 spin_unlock_bh(&conn->response_queue_lock);
746 return NULL; 744 return NULL;
747 } 745 }
748 746
749 list_for_each_entry(qr, &conn->response_queue_list, qr_list) 747 list_for_each_entry(qr, &conn->response_queue_list, qr_list)
750 break; 748 break;
751 749
752 list_del(&qr->qr_list); 750 list_del(&qr->qr_list);
753 if (qr->cmd) 751 if (qr->cmd)
754 atomic_dec(&qr->cmd->response_queue_count); 752 atomic_dec(&qr->cmd->response_queue_count);
755 spin_unlock_bh(&conn->response_queue_lock); 753 spin_unlock_bh(&conn->response_queue_lock);
756 754
757 return qr; 755 return qr;
758 } 756 }
759 757
760 static void iscsit_remove_cmd_from_response_queue( 758 static void iscsit_remove_cmd_from_response_queue(
761 struct iscsi_cmd *cmd, 759 struct iscsi_cmd *cmd,
762 struct iscsi_conn *conn) 760 struct iscsi_conn *conn)
763 { 761 {
764 struct iscsi_queue_req *qr, *qr_tmp; 762 struct iscsi_queue_req *qr, *qr_tmp;
765 763
766 spin_lock_bh(&conn->response_queue_lock); 764 spin_lock_bh(&conn->response_queue_lock);
767 if (!atomic_read(&cmd->response_queue_count)) { 765 if (!atomic_read(&cmd->response_queue_count)) {
768 spin_unlock_bh(&conn->response_queue_lock); 766 spin_unlock_bh(&conn->response_queue_lock);
769 return; 767 return;
770 } 768 }
771 769
772 list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list, 770 list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
773 qr_list) { 771 qr_list) {
774 if (qr->cmd != cmd) 772 if (qr->cmd != cmd)
775 continue; 773 continue;
776 774
777 atomic_dec(&qr->cmd->response_queue_count); 775 atomic_dec(&qr->cmd->response_queue_count);
778 list_del(&qr->qr_list); 776 list_del(&qr->qr_list);
779 kmem_cache_free(lio_qr_cache, qr); 777 kmem_cache_free(lio_qr_cache, qr);
780 } 778 }
781 spin_unlock_bh(&conn->response_queue_lock); 779 spin_unlock_bh(&conn->response_queue_lock);
782 780
783 if (atomic_read(&cmd->response_queue_count)) { 781 if (atomic_read(&cmd->response_queue_count)) {
784 pr_err("ITT: 0x%08x response_queue_count: %d\n", 782 pr_err("ITT: 0x%08x response_queue_count: %d\n",
785 cmd->init_task_tag, 783 cmd->init_task_tag,
786 atomic_read(&cmd->response_queue_count)); 784 atomic_read(&cmd->response_queue_count));
787 } 785 }
788 } 786 }
789 787
790 void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn) 788 void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
791 { 789 {
792 struct iscsi_queue_req *qr, *qr_tmp; 790 struct iscsi_queue_req *qr, *qr_tmp;
793 791
794 spin_lock_bh(&conn->immed_queue_lock); 792 spin_lock_bh(&conn->immed_queue_lock);
795 list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) { 793 list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
796 list_del(&qr->qr_list); 794 list_del(&qr->qr_list);
797 if (qr->cmd) 795 if (qr->cmd)
798 atomic_dec(&qr->cmd->immed_queue_count); 796 atomic_dec(&qr->cmd->immed_queue_count);
799 797
800 kmem_cache_free(lio_qr_cache, qr); 798 kmem_cache_free(lio_qr_cache, qr);
801 } 799 }
802 spin_unlock_bh(&conn->immed_queue_lock); 800 spin_unlock_bh(&conn->immed_queue_lock);
803 801
804 spin_lock_bh(&conn->response_queue_lock); 802 spin_lock_bh(&conn->response_queue_lock);
805 list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list, 803 list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
806 qr_list) { 804 qr_list) {
807 list_del(&qr->qr_list); 805 list_del(&qr->qr_list);
808 if (qr->cmd) 806 if (qr->cmd)
809 atomic_dec(&qr->cmd->response_queue_count); 807 atomic_dec(&qr->cmd->response_queue_count);
810 808
811 kmem_cache_free(lio_qr_cache, qr); 809 kmem_cache_free(lio_qr_cache, qr);
812 } 810 }
813 spin_unlock_bh(&conn->response_queue_lock); 811 spin_unlock_bh(&conn->response_queue_lock);
814 } 812 }
815 813
816 void iscsit_release_cmd(struct iscsi_cmd *cmd) 814 void iscsit_release_cmd(struct iscsi_cmd *cmd)
817 { 815 {
818 struct iscsi_conn *conn = cmd->conn; 816 struct iscsi_conn *conn = cmd->conn;
819 int i; 817 int i;
820 818
821 iscsit_free_r2ts_from_list(cmd); 819 iscsit_free_r2ts_from_list(cmd);
822 iscsit_free_all_datain_reqs(cmd); 820 iscsit_free_all_datain_reqs(cmd);
823 821
824 kfree(cmd->buf_ptr); 822 kfree(cmd->buf_ptr);
825 kfree(cmd->pdu_list); 823 kfree(cmd->pdu_list);
826 kfree(cmd->seq_list); 824 kfree(cmd->seq_list);
827 kfree(cmd->tmr_req); 825 kfree(cmd->tmr_req);
828 kfree(cmd->iov_data); 826 kfree(cmd->iov_data);
829 827
830 for (i = 0; i < cmd->t_mem_sg_nents; i++) 828 for (i = 0; i < cmd->t_mem_sg_nents; i++)
831 __free_page(sg_page(&cmd->t_mem_sg[i])); 829 __free_page(sg_page(&cmd->t_mem_sg[i]));
832 830
833 kfree(cmd->t_mem_sg); 831 kfree(cmd->t_mem_sg);
834 832
835 if (conn) { 833 if (conn) {
836 iscsit_remove_cmd_from_immediate_queue(cmd, conn); 834 iscsit_remove_cmd_from_immediate_queue(cmd, conn);
837 iscsit_remove_cmd_from_response_queue(cmd, conn); 835 iscsit_remove_cmd_from_response_queue(cmd, conn);
838 } 836 }
839 837
840 kmem_cache_free(lio_cmd_cache, cmd); 838 kmem_cache_free(lio_cmd_cache, cmd);
841 } 839 }
842 840
843 void iscsit_free_cmd(struct iscsi_cmd *cmd) 841 void iscsit_free_cmd(struct iscsi_cmd *cmd)
844 { 842 {
845 /* 843 /*
846 * Determine if a struct se_cmd is assoicated with 844 * Determine if a struct se_cmd is assoicated with
847 * this struct iscsi_cmd. 845 * this struct iscsi_cmd.
848 */ 846 */
849 switch (cmd->iscsi_opcode) { 847 switch (cmd->iscsi_opcode) {
850 case ISCSI_OP_SCSI_CMD: 848 case ISCSI_OP_SCSI_CMD:
851 case ISCSI_OP_SCSI_TMFUNC: 849 case ISCSI_OP_SCSI_TMFUNC:
852 transport_generic_free_cmd(&cmd->se_cmd, 1); 850 transport_generic_free_cmd(&cmd->se_cmd, 1);
853 break; 851 break;
854 default: 852 default:
855 iscsit_release_cmd(cmd); 853 iscsit_release_cmd(cmd);
856 break; 854 break;
857 } 855 }
858 } 856 }
859 857
860 int iscsit_check_session_usage_count(struct iscsi_session *sess) 858 int iscsit_check_session_usage_count(struct iscsi_session *sess)
861 { 859 {
862 spin_lock_bh(&sess->session_usage_lock); 860 spin_lock_bh(&sess->session_usage_lock);
863 if (sess->session_usage_count != 0) { 861 if (sess->session_usage_count != 0) {
864 sess->session_waiting_on_uc = 1; 862 sess->session_waiting_on_uc = 1;
865 spin_unlock_bh(&sess->session_usage_lock); 863 spin_unlock_bh(&sess->session_usage_lock);
866 if (in_interrupt()) 864 if (in_interrupt())
867 return 2; 865 return 2;
868 866
869 wait_for_completion(&sess->session_waiting_on_uc_comp); 867 wait_for_completion(&sess->session_waiting_on_uc_comp);
870 return 1; 868 return 1;
871 } 869 }
872 spin_unlock_bh(&sess->session_usage_lock); 870 spin_unlock_bh(&sess->session_usage_lock);
873 871
874 return 0; 872 return 0;
875 } 873 }
876 874
877 void iscsit_dec_session_usage_count(struct iscsi_session *sess) 875 void iscsit_dec_session_usage_count(struct iscsi_session *sess)
878 { 876 {
879 spin_lock_bh(&sess->session_usage_lock); 877 spin_lock_bh(&sess->session_usage_lock);
880 sess->session_usage_count--; 878 sess->session_usage_count--;
881 879
882 if (!sess->session_usage_count && sess->session_waiting_on_uc) 880 if (!sess->session_usage_count && sess->session_waiting_on_uc)
883 complete(&sess->session_waiting_on_uc_comp); 881 complete(&sess->session_waiting_on_uc_comp);
884 882
885 spin_unlock_bh(&sess->session_usage_lock); 883 spin_unlock_bh(&sess->session_usage_lock);
886 } 884 }
887 885
888 void iscsit_inc_session_usage_count(struct iscsi_session *sess) 886 void iscsit_inc_session_usage_count(struct iscsi_session *sess)
889 { 887 {
890 spin_lock_bh(&sess->session_usage_lock); 888 spin_lock_bh(&sess->session_usage_lock);
891 sess->session_usage_count++; 889 sess->session_usage_count++;
892 spin_unlock_bh(&sess->session_usage_lock); 890 spin_unlock_bh(&sess->session_usage_lock);
893 } 891 }
894 892
895 /* 893 /*
896 * Setup conn->if_marker and conn->of_marker values based upon 894 * Setup conn->if_marker and conn->of_marker values based upon
897 * the initial marker-less interval. (see iSCSI v19 A.2) 895 * the initial marker-less interval. (see iSCSI v19 A.2)
898 */ 896 */
899 int iscsit_set_sync_and_steering_values(struct iscsi_conn *conn) 897 int iscsit_set_sync_and_steering_values(struct iscsi_conn *conn)
900 { 898 {
901 int login_ifmarker_count = 0, login_ofmarker_count = 0, next_marker = 0; 899 int login_ifmarker_count = 0, login_ofmarker_count = 0, next_marker = 0;
902 /* 900 /*
903 * IFMarkInt and OFMarkInt are negotiated as 32-bit words. 901 * IFMarkInt and OFMarkInt are negotiated as 32-bit words.
904 */ 902 */
905 u32 IFMarkInt = (conn->conn_ops->IFMarkInt * 4); 903 u32 IFMarkInt = (conn->conn_ops->IFMarkInt * 4);
906 u32 OFMarkInt = (conn->conn_ops->OFMarkInt * 4); 904 u32 OFMarkInt = (conn->conn_ops->OFMarkInt * 4);
907 905
908 if (conn->conn_ops->OFMarker) { 906 if (conn->conn_ops->OFMarker) {
909 /* 907 /*
910 * Account for the first Login Command received not 908 * Account for the first Login Command received not
911 * via iscsi_recv_msg(). 909 * via iscsi_recv_msg().
912 */ 910 */
913 conn->of_marker += ISCSI_HDR_LEN; 911 conn->of_marker += ISCSI_HDR_LEN;
914 if (conn->of_marker <= OFMarkInt) { 912 if (conn->of_marker <= OFMarkInt) {
915 conn->of_marker = (OFMarkInt - conn->of_marker); 913 conn->of_marker = (OFMarkInt - conn->of_marker);
916 } else { 914 } else {
917 login_ofmarker_count = (conn->of_marker / OFMarkInt); 915 login_ofmarker_count = (conn->of_marker / OFMarkInt);
918 next_marker = (OFMarkInt * (login_ofmarker_count + 1)) + 916 next_marker = (OFMarkInt * (login_ofmarker_count + 1)) +
919 (login_ofmarker_count * MARKER_SIZE); 917 (login_ofmarker_count * MARKER_SIZE);
920 conn->of_marker = (next_marker - conn->of_marker); 918 conn->of_marker = (next_marker - conn->of_marker);
921 } 919 }
922 conn->of_marker_offset = 0; 920 conn->of_marker_offset = 0;
923 pr_debug("Setting OFMarker value to %u based on Initial" 921 pr_debug("Setting OFMarker value to %u based on Initial"
924 " Markerless Interval.\n", conn->of_marker); 922 " Markerless Interval.\n", conn->of_marker);
925 } 923 }
926 924
927 if (conn->conn_ops->IFMarker) { 925 if (conn->conn_ops->IFMarker) {
928 if (conn->if_marker <= IFMarkInt) { 926 if (conn->if_marker <= IFMarkInt) {
929 conn->if_marker = (IFMarkInt - conn->if_marker); 927 conn->if_marker = (IFMarkInt - conn->if_marker);
930 } else { 928 } else {
931 login_ifmarker_count = (conn->if_marker / IFMarkInt); 929 login_ifmarker_count = (conn->if_marker / IFMarkInt);
932 next_marker = (IFMarkInt * (login_ifmarker_count + 1)) + 930 next_marker = (IFMarkInt * (login_ifmarker_count + 1)) +
933 (login_ifmarker_count * MARKER_SIZE); 931 (login_ifmarker_count * MARKER_SIZE);
934 conn->if_marker = (next_marker - conn->if_marker); 932 conn->if_marker = (next_marker - conn->if_marker);
935 } 933 }
936 pr_debug("Setting IFMarker value to %u based on Initial" 934 pr_debug("Setting IFMarker value to %u based on Initial"
937 " Markerless Interval.\n", conn->if_marker); 935 " Markerless Interval.\n", conn->if_marker);
938 } 936 }
939 937
940 return 0; 938 return 0;
941 } 939 }
942 940
943 struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid) 941 struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid)
944 { 942 {
945 struct iscsi_conn *conn; 943 struct iscsi_conn *conn;
946 944
947 spin_lock_bh(&sess->conn_lock); 945 spin_lock_bh(&sess->conn_lock);
948 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { 946 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
949 if ((conn->cid == cid) && 947 if ((conn->cid == cid) &&
950 (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) { 948 (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) {
951 iscsit_inc_conn_usage_count(conn); 949 iscsit_inc_conn_usage_count(conn);
952 spin_unlock_bh(&sess->conn_lock); 950 spin_unlock_bh(&sess->conn_lock);
953 return conn; 951 return conn;
954 } 952 }
955 } 953 }
956 spin_unlock_bh(&sess->conn_lock); 954 spin_unlock_bh(&sess->conn_lock);
957 955
958 return NULL; 956 return NULL;
959 } 957 }
960 958
961 struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid) 959 struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid)
962 { 960 {
963 struct iscsi_conn *conn; 961 struct iscsi_conn *conn;
964 962
965 spin_lock_bh(&sess->conn_lock); 963 spin_lock_bh(&sess->conn_lock);
966 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { 964 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
967 if (conn->cid == cid) { 965 if (conn->cid == cid) {
968 iscsit_inc_conn_usage_count(conn); 966 iscsit_inc_conn_usage_count(conn);
969 spin_lock(&conn->state_lock); 967 spin_lock(&conn->state_lock);
970 atomic_set(&conn->connection_wait_rcfr, 1); 968 atomic_set(&conn->connection_wait_rcfr, 1);
971 spin_unlock(&conn->state_lock); 969 spin_unlock(&conn->state_lock);
972 spin_unlock_bh(&sess->conn_lock); 970 spin_unlock_bh(&sess->conn_lock);
973 return conn; 971 return conn;
974 } 972 }
975 } 973 }
976 spin_unlock_bh(&sess->conn_lock); 974 spin_unlock_bh(&sess->conn_lock);
977 975
978 return NULL; 976 return NULL;
979 } 977 }
980 978
981 void iscsit_check_conn_usage_count(struct iscsi_conn *conn) 979 void iscsit_check_conn_usage_count(struct iscsi_conn *conn)
982 { 980 {
983 spin_lock_bh(&conn->conn_usage_lock); 981 spin_lock_bh(&conn->conn_usage_lock);
984 if (conn->conn_usage_count != 0) { 982 if (conn->conn_usage_count != 0) {
985 conn->conn_waiting_on_uc = 1; 983 conn->conn_waiting_on_uc = 1;
986 spin_unlock_bh(&conn->conn_usage_lock); 984 spin_unlock_bh(&conn->conn_usage_lock);
987 985
988 wait_for_completion(&conn->conn_waiting_on_uc_comp); 986 wait_for_completion(&conn->conn_waiting_on_uc_comp);
989 return; 987 return;
990 } 988 }
991 spin_unlock_bh(&conn->conn_usage_lock); 989 spin_unlock_bh(&conn->conn_usage_lock);
992 } 990 }
993 991
994 void iscsit_dec_conn_usage_count(struct iscsi_conn *conn) 992 void iscsit_dec_conn_usage_count(struct iscsi_conn *conn)
995 { 993 {
996 spin_lock_bh(&conn->conn_usage_lock); 994 spin_lock_bh(&conn->conn_usage_lock);
997 conn->conn_usage_count--; 995 conn->conn_usage_count--;
998 996
999 if (!conn->conn_usage_count && conn->conn_waiting_on_uc) 997 if (!conn->conn_usage_count && conn->conn_waiting_on_uc)
1000 complete(&conn->conn_waiting_on_uc_comp); 998 complete(&conn->conn_waiting_on_uc_comp);
1001 999
1002 spin_unlock_bh(&conn->conn_usage_lock); 1000 spin_unlock_bh(&conn->conn_usage_lock);
1003 } 1001 }
1004 1002
1005 void iscsit_inc_conn_usage_count(struct iscsi_conn *conn) 1003 void iscsit_inc_conn_usage_count(struct iscsi_conn *conn)
1006 { 1004 {
1007 spin_lock_bh(&conn->conn_usage_lock); 1005 spin_lock_bh(&conn->conn_usage_lock);
1008 conn->conn_usage_count++; 1006 conn->conn_usage_count++;
1009 spin_unlock_bh(&conn->conn_usage_lock); 1007 spin_unlock_bh(&conn->conn_usage_lock);
1010 } 1008 }
1011 1009
1012 static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response) 1010 static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
1013 { 1011 {
1014 u8 state; 1012 u8 state;
1015 struct iscsi_cmd *cmd; 1013 struct iscsi_cmd *cmd;
1016 1014
1017 cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC); 1015 cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC);
1018 if (!cmd) 1016 if (!cmd)
1019 return -1; 1017 return -1;
1020 1018
1021 cmd->iscsi_opcode = ISCSI_OP_NOOP_IN; 1019 cmd->iscsi_opcode = ISCSI_OP_NOOP_IN;
1022 state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE : 1020 state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
1023 ISTATE_SEND_NOPIN_NO_RESPONSE; 1021 ISTATE_SEND_NOPIN_NO_RESPONSE;
1024 cmd->init_task_tag = 0xFFFFFFFF; 1022 cmd->init_task_tag = 0xFFFFFFFF;
1025 spin_lock_bh(&conn->sess->ttt_lock); 1023 spin_lock_bh(&conn->sess->ttt_lock);
1026 cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ : 1024 cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ :
1027 0xFFFFFFFF; 1025 0xFFFFFFFF;
1028 if (want_response && (cmd->targ_xfer_tag == 0xFFFFFFFF)) 1026 if (want_response && (cmd->targ_xfer_tag == 0xFFFFFFFF))
1029 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++; 1027 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
1030 spin_unlock_bh(&conn->sess->ttt_lock); 1028 spin_unlock_bh(&conn->sess->ttt_lock);
1031 1029
1032 spin_lock_bh(&conn->cmd_lock); 1030 spin_lock_bh(&conn->cmd_lock);
1033 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 1031 list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
1034 spin_unlock_bh(&conn->cmd_lock); 1032 spin_unlock_bh(&conn->cmd_lock);
1035 1033
1036 if (want_response) 1034 if (want_response)
1037 iscsit_start_nopin_response_timer(conn); 1035 iscsit_start_nopin_response_timer(conn);
1038 iscsit_add_cmd_to_immediate_queue(cmd, conn, state); 1036 iscsit_add_cmd_to_immediate_queue(cmd, conn, state);
1039 1037
1040 return 0; 1038 return 0;
1041 } 1039 }
1042 1040
1043 static void iscsit_handle_nopin_response_timeout(unsigned long data) 1041 static void iscsit_handle_nopin_response_timeout(unsigned long data)
1044 { 1042 {
1045 struct iscsi_conn *conn = (struct iscsi_conn *) data; 1043 struct iscsi_conn *conn = (struct iscsi_conn *) data;
1046 1044
1047 iscsit_inc_conn_usage_count(conn); 1045 iscsit_inc_conn_usage_count(conn);
1048 1046
1049 spin_lock_bh(&conn->nopin_timer_lock); 1047 spin_lock_bh(&conn->nopin_timer_lock);
1050 if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) { 1048 if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) {
1051 spin_unlock_bh(&conn->nopin_timer_lock); 1049 spin_unlock_bh(&conn->nopin_timer_lock);
1052 iscsit_dec_conn_usage_count(conn); 1050 iscsit_dec_conn_usage_count(conn);
1053 return; 1051 return;
1054 } 1052 }
1055 1053
1056 pr_debug("Did not receive response to NOPIN on CID: %hu on" 1054 pr_debug("Did not receive response to NOPIN on CID: %hu on"
1057 " SID: %u, failing connection.\n", conn->cid, 1055 " SID: %u, failing connection.\n", conn->cid,
1058 conn->sess->sid); 1056 conn->sess->sid);
1059 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING; 1057 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
1060 spin_unlock_bh(&conn->nopin_timer_lock); 1058 spin_unlock_bh(&conn->nopin_timer_lock);
1061 1059
1062 { 1060 {
1063 struct iscsi_portal_group *tpg = conn->sess->tpg; 1061 struct iscsi_portal_group *tpg = conn->sess->tpg;
1064 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; 1062 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
1065 1063
1066 if (tiqn) { 1064 if (tiqn) {
1067 spin_lock_bh(&tiqn->sess_err_stats.lock); 1065 spin_lock_bh(&tiqn->sess_err_stats.lock);
1068 strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name, 1066 strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
1069 (void *)conn->sess->sess_ops->InitiatorName); 1067 (void *)conn->sess->sess_ops->InitiatorName);
1070 tiqn->sess_err_stats.last_sess_failure_type = 1068 tiqn->sess_err_stats.last_sess_failure_type =
1071 ISCSI_SESS_ERR_CXN_TIMEOUT; 1069 ISCSI_SESS_ERR_CXN_TIMEOUT;
1072 tiqn->sess_err_stats.cxn_timeout_errors++; 1070 tiqn->sess_err_stats.cxn_timeout_errors++;
1073 conn->sess->conn_timeout_errors++; 1071 conn->sess->conn_timeout_errors++;
1074 spin_unlock_bh(&tiqn->sess_err_stats.lock); 1072 spin_unlock_bh(&tiqn->sess_err_stats.lock);
1075 } 1073 }
1076 } 1074 }
1077 1075
1078 iscsit_cause_connection_reinstatement(conn, 0); 1076 iscsit_cause_connection_reinstatement(conn, 0);
1079 iscsit_dec_conn_usage_count(conn); 1077 iscsit_dec_conn_usage_count(conn);
1080 } 1078 }
1081 1079
1082 void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn) 1080 void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn)
1083 { 1081 {
1084 struct iscsi_session *sess = conn->sess; 1082 struct iscsi_session *sess = conn->sess;
1085 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 1083 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1086 1084
1087 spin_lock_bh(&conn->nopin_timer_lock); 1085 spin_lock_bh(&conn->nopin_timer_lock);
1088 if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) { 1086 if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
1089 spin_unlock_bh(&conn->nopin_timer_lock); 1087 spin_unlock_bh(&conn->nopin_timer_lock);
1090 return; 1088 return;
1091 } 1089 }
1092 1090
1093 mod_timer(&conn->nopin_response_timer, 1091 mod_timer(&conn->nopin_response_timer,
1094 (get_jiffies_64() + na->nopin_response_timeout * HZ)); 1092 (get_jiffies_64() + na->nopin_response_timeout * HZ));
1095 spin_unlock_bh(&conn->nopin_timer_lock); 1093 spin_unlock_bh(&conn->nopin_timer_lock);
1096 } 1094 }
1097 1095
1098 /* 1096 /*
1099 * Called with conn->nopin_timer_lock held. 1097 * Called with conn->nopin_timer_lock held.
1100 */ 1098 */
1101 void iscsit_start_nopin_response_timer(struct iscsi_conn *conn) 1099 void iscsit_start_nopin_response_timer(struct iscsi_conn *conn)
1102 { 1100 {
1103 struct iscsi_session *sess = conn->sess; 1101 struct iscsi_session *sess = conn->sess;
1104 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 1102 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1105 1103
1106 spin_lock_bh(&conn->nopin_timer_lock); 1104 spin_lock_bh(&conn->nopin_timer_lock);
1107 if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) { 1105 if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) {
1108 spin_unlock_bh(&conn->nopin_timer_lock); 1106 spin_unlock_bh(&conn->nopin_timer_lock);
1109 return; 1107 return;
1110 } 1108 }
1111 1109
1112 init_timer(&conn->nopin_response_timer); 1110 init_timer(&conn->nopin_response_timer);
1113 conn->nopin_response_timer.expires = 1111 conn->nopin_response_timer.expires =
1114 (get_jiffies_64() + na->nopin_response_timeout * HZ); 1112 (get_jiffies_64() + na->nopin_response_timeout * HZ);
1115 conn->nopin_response_timer.data = (unsigned long)conn; 1113 conn->nopin_response_timer.data = (unsigned long)conn;
1116 conn->nopin_response_timer.function = iscsit_handle_nopin_response_timeout; 1114 conn->nopin_response_timer.function = iscsit_handle_nopin_response_timeout;
1117 conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP; 1115 conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP;
1118 conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING; 1116 conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING;
1119 add_timer(&conn->nopin_response_timer); 1117 add_timer(&conn->nopin_response_timer);
1120 1118
1121 pr_debug("Started NOPIN Response Timer on CID: %d to %u" 1119 pr_debug("Started NOPIN Response Timer on CID: %d to %u"
1122 " seconds\n", conn->cid, na->nopin_response_timeout); 1120 " seconds\n", conn->cid, na->nopin_response_timeout);
1123 spin_unlock_bh(&conn->nopin_timer_lock); 1121 spin_unlock_bh(&conn->nopin_timer_lock);
1124 } 1122 }
1125 1123
1126 void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn) 1124 void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn)
1127 { 1125 {
1128 spin_lock_bh(&conn->nopin_timer_lock); 1126 spin_lock_bh(&conn->nopin_timer_lock);
1129 if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) { 1127 if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
1130 spin_unlock_bh(&conn->nopin_timer_lock); 1128 spin_unlock_bh(&conn->nopin_timer_lock);
1131 return; 1129 return;
1132 } 1130 }
1133 conn->nopin_response_timer_flags |= ISCSI_TF_STOP; 1131 conn->nopin_response_timer_flags |= ISCSI_TF_STOP;
1134 spin_unlock_bh(&conn->nopin_timer_lock); 1132 spin_unlock_bh(&conn->nopin_timer_lock);
1135 1133
1136 del_timer_sync(&conn->nopin_response_timer); 1134 del_timer_sync(&conn->nopin_response_timer);
1137 1135
1138 spin_lock_bh(&conn->nopin_timer_lock); 1136 spin_lock_bh(&conn->nopin_timer_lock);
1139 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING; 1137 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
1140 spin_unlock_bh(&conn->nopin_timer_lock); 1138 spin_unlock_bh(&conn->nopin_timer_lock);
1141 } 1139 }
1142 1140
1143 static void iscsit_handle_nopin_timeout(unsigned long data) 1141 static void iscsit_handle_nopin_timeout(unsigned long data)
1144 { 1142 {
1145 struct iscsi_conn *conn = (struct iscsi_conn *) data; 1143 struct iscsi_conn *conn = (struct iscsi_conn *) data;
1146 1144
1147 iscsit_inc_conn_usage_count(conn); 1145 iscsit_inc_conn_usage_count(conn);
1148 1146
1149 spin_lock_bh(&conn->nopin_timer_lock); 1147 spin_lock_bh(&conn->nopin_timer_lock);
1150 if (conn->nopin_timer_flags & ISCSI_TF_STOP) { 1148 if (conn->nopin_timer_flags & ISCSI_TF_STOP) {
1151 spin_unlock_bh(&conn->nopin_timer_lock); 1149 spin_unlock_bh(&conn->nopin_timer_lock);
1152 iscsit_dec_conn_usage_count(conn); 1150 iscsit_dec_conn_usage_count(conn);
1153 return; 1151 return;
1154 } 1152 }
1155 conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING; 1153 conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
1156 spin_unlock_bh(&conn->nopin_timer_lock); 1154 spin_unlock_bh(&conn->nopin_timer_lock);
1157 1155
1158 iscsit_add_nopin(conn, 1); 1156 iscsit_add_nopin(conn, 1);
1159 iscsit_dec_conn_usage_count(conn); 1157 iscsit_dec_conn_usage_count(conn);
1160 } 1158 }
1161 1159
1162 /* 1160 /*
1163 * Called with conn->nopin_timer_lock held. 1161 * Called with conn->nopin_timer_lock held.
1164 */ 1162 */
1165 void __iscsit_start_nopin_timer(struct iscsi_conn *conn) 1163 void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
1166 { 1164 {
1167 struct iscsi_session *sess = conn->sess; 1165 struct iscsi_session *sess = conn->sess;
1168 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 1166 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1169 /* 1167 /*
1170 * NOPIN timeout is disabled. 1168 * NOPIN timeout is disabled.
1171 */ 1169 */
1172 if (!na->nopin_timeout) 1170 if (!na->nopin_timeout)
1173 return; 1171 return;
1174 1172
1175 if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) 1173 if (conn->nopin_timer_flags & ISCSI_TF_RUNNING)
1176 return; 1174 return;
1177 1175
1178 init_timer(&conn->nopin_timer); 1176 init_timer(&conn->nopin_timer);
1179 conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ); 1177 conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
1180 conn->nopin_timer.data = (unsigned long)conn; 1178 conn->nopin_timer.data = (unsigned long)conn;
1181 conn->nopin_timer.function = iscsit_handle_nopin_timeout; 1179 conn->nopin_timer.function = iscsit_handle_nopin_timeout;
1182 conn->nopin_timer_flags &= ~ISCSI_TF_STOP; 1180 conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
1183 conn->nopin_timer_flags |= ISCSI_TF_RUNNING; 1181 conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
1184 add_timer(&conn->nopin_timer); 1182 add_timer(&conn->nopin_timer);
1185 1183
1186 pr_debug("Started NOPIN Timer on CID: %d at %u second" 1184 pr_debug("Started NOPIN Timer on CID: %d at %u second"
1187 " interval\n", conn->cid, na->nopin_timeout); 1185 " interval\n", conn->cid, na->nopin_timeout);
1188 } 1186 }
1189 1187
1190 void iscsit_start_nopin_timer(struct iscsi_conn *conn) 1188 void iscsit_start_nopin_timer(struct iscsi_conn *conn)
1191 { 1189 {
1192 struct iscsi_session *sess = conn->sess; 1190 struct iscsi_session *sess = conn->sess;
1193 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 1191 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1194 /* 1192 /*
1195 * NOPIN timeout is disabled.. 1193 * NOPIN timeout is disabled..
1196 */ 1194 */
1197 if (!na->nopin_timeout) 1195 if (!na->nopin_timeout)
1198 return; 1196 return;
1199 1197
1200 spin_lock_bh(&conn->nopin_timer_lock); 1198 spin_lock_bh(&conn->nopin_timer_lock);
1201 if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) { 1199 if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) {
1202 spin_unlock_bh(&conn->nopin_timer_lock); 1200 spin_unlock_bh(&conn->nopin_timer_lock);
1203 return; 1201 return;
1204 } 1202 }
1205 1203
1206 init_timer(&conn->nopin_timer); 1204 init_timer(&conn->nopin_timer);
1207 conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ); 1205 conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
1208 conn->nopin_timer.data = (unsigned long)conn; 1206 conn->nopin_timer.data = (unsigned long)conn;
1209 conn->nopin_timer.function = iscsit_handle_nopin_timeout; 1207 conn->nopin_timer.function = iscsit_handle_nopin_timeout;
1210 conn->nopin_timer_flags &= ~ISCSI_TF_STOP; 1208 conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
1211 conn->nopin_timer_flags |= ISCSI_TF_RUNNING; 1209 conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
1212 add_timer(&conn->nopin_timer); 1210 add_timer(&conn->nopin_timer);
1213 1211
1214 pr_debug("Started NOPIN Timer on CID: %d at %u second" 1212 pr_debug("Started NOPIN Timer on CID: %d at %u second"
1215 " interval\n", conn->cid, na->nopin_timeout); 1213 " interval\n", conn->cid, na->nopin_timeout);
1216 spin_unlock_bh(&conn->nopin_timer_lock); 1214 spin_unlock_bh(&conn->nopin_timer_lock);
1217 } 1215 }
1218 1216
1219 void iscsit_stop_nopin_timer(struct iscsi_conn *conn) 1217 void iscsit_stop_nopin_timer(struct iscsi_conn *conn)
1220 { 1218 {
1221 spin_lock_bh(&conn->nopin_timer_lock); 1219 spin_lock_bh(&conn->nopin_timer_lock);
1222 if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) { 1220 if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) {
1223 spin_unlock_bh(&conn->nopin_timer_lock); 1221 spin_unlock_bh(&conn->nopin_timer_lock);
1224 return; 1222 return;
1225 } 1223 }
1226 conn->nopin_timer_flags |= ISCSI_TF_STOP; 1224 conn->nopin_timer_flags |= ISCSI_TF_STOP;
1227 spin_unlock_bh(&conn->nopin_timer_lock); 1225 spin_unlock_bh(&conn->nopin_timer_lock);
1228 1226
1229 del_timer_sync(&conn->nopin_timer); 1227 del_timer_sync(&conn->nopin_timer);
1230 1228
1231 spin_lock_bh(&conn->nopin_timer_lock); 1229 spin_lock_bh(&conn->nopin_timer_lock);
1232 conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING; 1230 conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
1233 spin_unlock_bh(&conn->nopin_timer_lock); 1231 spin_unlock_bh(&conn->nopin_timer_lock);
1234 } 1232 }
1235 1233
1236 int iscsit_send_tx_data( 1234 int iscsit_send_tx_data(
1237 struct iscsi_cmd *cmd, 1235 struct iscsi_cmd *cmd,
1238 struct iscsi_conn *conn, 1236 struct iscsi_conn *conn,
1239 int use_misc) 1237 int use_misc)
1240 { 1238 {
1241 int tx_sent, tx_size; 1239 int tx_sent, tx_size;
1242 u32 iov_count; 1240 u32 iov_count;
1243 struct kvec *iov; 1241 struct kvec *iov;
1244 1242
1245 send_data: 1243 send_data:
1246 tx_size = cmd->tx_size; 1244 tx_size = cmd->tx_size;
1247 1245
1248 if (!use_misc) { 1246 if (!use_misc) {
1249 iov = &cmd->iov_data[0]; 1247 iov = &cmd->iov_data[0];
1250 iov_count = cmd->iov_data_count; 1248 iov_count = cmd->iov_data_count;
1251 } else { 1249 } else {
1252 iov = &cmd->iov_misc[0]; 1250 iov = &cmd->iov_misc[0];
1253 iov_count = cmd->iov_misc_count; 1251 iov_count = cmd->iov_misc_count;
1254 } 1252 }
1255 1253
1256 tx_sent = tx_data(conn, &iov[0], iov_count, tx_size); 1254 tx_sent = tx_data(conn, &iov[0], iov_count, tx_size);
1257 if (tx_size != tx_sent) { 1255 if (tx_size != tx_sent) {
1258 if (tx_sent == -EAGAIN) { 1256 if (tx_sent == -EAGAIN) {
1259 pr_err("tx_data() returned -EAGAIN\n"); 1257 pr_err("tx_data() returned -EAGAIN\n");
1260 goto send_data; 1258 goto send_data;
1261 } else 1259 } else
1262 return -1; 1260 return -1;
1263 } 1261 }
1264 cmd->tx_size = 0; 1262 cmd->tx_size = 0;
1265 1263
1266 return 0; 1264 return 0;
1267 } 1265 }
1268 1266
1269 int iscsit_fe_sendpage_sg( 1267 int iscsit_fe_sendpage_sg(
1270 struct iscsi_cmd *cmd, 1268 struct iscsi_cmd *cmd,
1271 struct iscsi_conn *conn) 1269 struct iscsi_conn *conn)
1272 { 1270 {
1273 struct scatterlist *sg = cmd->first_data_sg; 1271 struct scatterlist *sg = cmd->first_data_sg;
1274 struct kvec iov; 1272 struct kvec iov;
1275 u32 tx_hdr_size, data_len; 1273 u32 tx_hdr_size, data_len;
1276 u32 offset = cmd->first_data_sg_off; 1274 u32 offset = cmd->first_data_sg_off;
1277 int tx_sent, iov_off; 1275 int tx_sent, iov_off;
1278 1276
1279 send_hdr: 1277 send_hdr:
1280 tx_hdr_size = ISCSI_HDR_LEN; 1278 tx_hdr_size = ISCSI_HDR_LEN;
1281 if (conn->conn_ops->HeaderDigest) 1279 if (conn->conn_ops->HeaderDigest)
1282 tx_hdr_size += ISCSI_CRC_LEN; 1280 tx_hdr_size += ISCSI_CRC_LEN;
1283 1281
1284 iov.iov_base = cmd->pdu; 1282 iov.iov_base = cmd->pdu;
1285 iov.iov_len = tx_hdr_size; 1283 iov.iov_len = tx_hdr_size;
1286 1284
1287 tx_sent = tx_data(conn, &iov, 1, tx_hdr_size); 1285 tx_sent = tx_data(conn, &iov, 1, tx_hdr_size);
1288 if (tx_hdr_size != tx_sent) { 1286 if (tx_hdr_size != tx_sent) {
1289 if (tx_sent == -EAGAIN) { 1287 if (tx_sent == -EAGAIN) {
1290 pr_err("tx_data() returned -EAGAIN\n"); 1288 pr_err("tx_data() returned -EAGAIN\n");
1291 goto send_hdr; 1289 goto send_hdr;
1292 } 1290 }
1293 return -1; 1291 return -1;
1294 } 1292 }
1295 1293
1296 data_len = cmd->tx_size - tx_hdr_size - cmd->padding; 1294 data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
1297 /* 1295 /*
1298 * Set iov_off used by padding and data digest tx_data() calls below 1296 * Set iov_off used by padding and data digest tx_data() calls below
1299 * in order to determine proper offset into cmd->iov_data[] 1297 * in order to determine proper offset into cmd->iov_data[]
1300 */ 1298 */
1301 if (conn->conn_ops->DataDigest) { 1299 if (conn->conn_ops->DataDigest) {
1302 data_len -= ISCSI_CRC_LEN; 1300 data_len -= ISCSI_CRC_LEN;
1303 if (cmd->padding) 1301 if (cmd->padding)
1304 iov_off = (cmd->iov_data_count - 2); 1302 iov_off = (cmd->iov_data_count - 2);
1305 else 1303 else
1306 iov_off = (cmd->iov_data_count - 1); 1304 iov_off = (cmd->iov_data_count - 1);
1307 } else { 1305 } else {
1308 iov_off = (cmd->iov_data_count - 1); 1306 iov_off = (cmd->iov_data_count - 1);
1309 } 1307 }
1310 /* 1308 /*
1311 * Perform sendpage() for each page in the scatterlist 1309 * Perform sendpage() for each page in the scatterlist
1312 */ 1310 */
1313 while (data_len) { 1311 while (data_len) {
1314 u32 space = (sg->length - offset); 1312 u32 space = (sg->length - offset);
1315 u32 sub_len = min_t(u32, data_len, space); 1313 u32 sub_len = min_t(u32, data_len, space);
1316 send_pg: 1314 send_pg:
1317 tx_sent = conn->sock->ops->sendpage(conn->sock, 1315 tx_sent = conn->sock->ops->sendpage(conn->sock,
1318 sg_page(sg), sg->offset + offset, sub_len, 0); 1316 sg_page(sg), sg->offset + offset, sub_len, 0);
1319 if (tx_sent != sub_len) { 1317 if (tx_sent != sub_len) {
1320 if (tx_sent == -EAGAIN) { 1318 if (tx_sent == -EAGAIN) {
1321 pr_err("tcp_sendpage() returned" 1319 pr_err("tcp_sendpage() returned"
1322 " -EAGAIN\n"); 1320 " -EAGAIN\n");
1323 goto send_pg; 1321 goto send_pg;
1324 } 1322 }
1325 1323
1326 pr_err("tcp_sendpage() failure: %d\n", 1324 pr_err("tcp_sendpage() failure: %d\n",
1327 tx_sent); 1325 tx_sent);
1328 return -1; 1326 return -1;
1329 } 1327 }
1330 1328
1331 data_len -= sub_len; 1329 data_len -= sub_len;
1332 offset = 0; 1330 offset = 0;
1333 sg = sg_next(sg); 1331 sg = sg_next(sg);
1334 } 1332 }
1335 1333
1336 send_padding: 1334 send_padding:
1337 if (cmd->padding) { 1335 if (cmd->padding) {
1338 struct kvec *iov_p = &cmd->iov_data[iov_off++]; 1336 struct kvec *iov_p = &cmd->iov_data[iov_off++];
1339 1337
1340 tx_sent = tx_data(conn, iov_p, 1, cmd->padding); 1338 tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
1341 if (cmd->padding != tx_sent) { 1339 if (cmd->padding != tx_sent) {
1342 if (tx_sent == -EAGAIN) { 1340 if (tx_sent == -EAGAIN) {
1343 pr_err("tx_data() returned -EAGAIN\n"); 1341 pr_err("tx_data() returned -EAGAIN\n");
1344 goto send_padding; 1342 goto send_padding;
1345 } 1343 }
1346 return -1; 1344 return -1;
1347 } 1345 }
1348 } 1346 }
1349 1347
1350 send_datacrc: 1348 send_datacrc:
1351 if (conn->conn_ops->DataDigest) { 1349 if (conn->conn_ops->DataDigest) {
1352 struct kvec *iov_d = &cmd->iov_data[iov_off]; 1350 struct kvec *iov_d = &cmd->iov_data[iov_off];
1353 1351
1354 tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN); 1352 tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
1355 if (ISCSI_CRC_LEN != tx_sent) { 1353 if (ISCSI_CRC_LEN != tx_sent) {
1356 if (tx_sent == -EAGAIN) { 1354 if (tx_sent == -EAGAIN) {
1357 pr_err("tx_data() returned -EAGAIN\n"); 1355 pr_err("tx_data() returned -EAGAIN\n");
1358 goto send_datacrc; 1356 goto send_datacrc;
1359 } 1357 }
1360 return -1; 1358 return -1;
1361 } 1359 }
1362 } 1360 }
1363 1361
1364 return 0; 1362 return 0;
1365 } 1363 }
1366 1364
1367 /* 1365 /*
1368 * This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU 1366 * This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU
1369 * back to the Initiator when an expection condition occurs with the 1367 * back to the Initiator when an expection condition occurs with the
1370 * errors set in status_class and status_detail. 1368 * errors set in status_class and status_detail.
1371 * 1369 *
1372 * Parameters: iSCSI Connection, Status Class, Status Detail. 1370 * Parameters: iSCSI Connection, Status Class, Status Detail.
1373 * Returns: 0 on success, -1 on error. 1371 * Returns: 0 on success, -1 on error.
1374 */ 1372 */
1375 int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail) 1373 int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail)
1376 { 1374 {
1377 u8 iscsi_hdr[ISCSI_HDR_LEN]; 1375 u8 iscsi_hdr[ISCSI_HDR_LEN];
1378 int err; 1376 int err;
1379 struct kvec iov; 1377 struct kvec iov;
1380 struct iscsi_login_rsp *hdr; 1378 struct iscsi_login_rsp *hdr;
1381 1379
1382 iscsit_collect_login_stats(conn, status_class, status_detail); 1380 iscsit_collect_login_stats(conn, status_class, status_detail);
1383 1381
1384 memset(&iov, 0, sizeof(struct kvec)); 1382 memset(&iov, 0, sizeof(struct kvec));
1385 memset(&iscsi_hdr, 0x0, ISCSI_HDR_LEN); 1383 memset(&iscsi_hdr, 0x0, ISCSI_HDR_LEN);
1386 1384
1387 hdr = (struct iscsi_login_rsp *)&iscsi_hdr; 1385 hdr = (struct iscsi_login_rsp *)&iscsi_hdr;
1388 hdr->opcode = ISCSI_OP_LOGIN_RSP; 1386 hdr->opcode = ISCSI_OP_LOGIN_RSP;
1389 hdr->status_class = status_class; 1387 hdr->status_class = status_class;
1390 hdr->status_detail = status_detail; 1388 hdr->status_detail = status_detail;
1391 hdr->itt = cpu_to_be32(conn->login_itt); 1389 hdr->itt = cpu_to_be32(conn->login_itt);
1392 1390
1393 iov.iov_base = &iscsi_hdr; 1391 iov.iov_base = &iscsi_hdr;
1394 iov.iov_len = ISCSI_HDR_LEN; 1392 iov.iov_len = ISCSI_HDR_LEN;
1395 1393
1396 PRINT_BUFF(iscsi_hdr, ISCSI_HDR_LEN); 1394 PRINT_BUFF(iscsi_hdr, ISCSI_HDR_LEN);
1397 1395
1398 err = tx_data(conn, &iov, 1, ISCSI_HDR_LEN); 1396 err = tx_data(conn, &iov, 1, ISCSI_HDR_LEN);
1399 if (err != ISCSI_HDR_LEN) { 1397 if (err != ISCSI_HDR_LEN) {
1400 pr_err("tx_data returned less than expected\n"); 1398 pr_err("tx_data returned less than expected\n");
1401 return -1; 1399 return -1;
1402 } 1400 }
1403 1401
1404 return 0; 1402 return 0;
1405 } 1403 }
1406 1404
1407 void iscsit_print_session_params(struct iscsi_session *sess) 1405 void iscsit_print_session_params(struct iscsi_session *sess)
1408 { 1406 {
1409 struct iscsi_conn *conn; 1407 struct iscsi_conn *conn;
1410 1408
1411 pr_debug("-----------------------------[Session Params for" 1409 pr_debug("-----------------------------[Session Params for"
1412 " SID: %u]-----------------------------\n", sess->sid); 1410 " SID: %u]-----------------------------\n", sess->sid);
1413 spin_lock_bh(&sess->conn_lock); 1411 spin_lock_bh(&sess->conn_lock);
1414 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) 1412 list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
1415 iscsi_dump_conn_ops(conn->conn_ops); 1413 iscsi_dump_conn_ops(conn->conn_ops);
1416 spin_unlock_bh(&sess->conn_lock); 1414 spin_unlock_bh(&sess->conn_lock);
1417 1415
1418 iscsi_dump_sess_ops(sess->sess_ops); 1416 iscsi_dump_sess_ops(sess->sess_ops);
1419 } 1417 }
1420 1418
1421 static int iscsit_do_rx_data( 1419 static int iscsit_do_rx_data(
1422 struct iscsi_conn *conn, 1420 struct iscsi_conn *conn,
1423 struct iscsi_data_count *count) 1421 struct iscsi_data_count *count)
1424 { 1422 {
1425 int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len; 1423 int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len;
1426 struct kvec *iov_p; 1424 struct kvec *iov_p;
1427 struct msghdr msg; 1425 struct msghdr msg;
1428 1426
1429 if (!conn || !conn->sock || !conn->conn_ops) 1427 if (!conn || !conn->sock || !conn->conn_ops)
1430 return -1; 1428 return -1;
1431 1429
1432 memset(&msg, 0, sizeof(struct msghdr)); 1430 memset(&msg, 0, sizeof(struct msghdr));
1433 1431
1434 iov_p = count->iov; 1432 iov_p = count->iov;
1435 iov_len = count->iov_count; 1433 iov_len = count->iov_count;
1436 1434
1437 while (total_rx < data) { 1435 while (total_rx < data) {
1438 rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len, 1436 rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len,
1439 (data - total_rx), MSG_WAITALL); 1437 (data - total_rx), MSG_WAITALL);
1440 if (rx_loop <= 0) { 1438 if (rx_loop <= 0) {
1441 pr_debug("rx_loop: %d total_rx: %d\n", 1439 pr_debug("rx_loop: %d total_rx: %d\n",
1442 rx_loop, total_rx); 1440 rx_loop, total_rx);
1443 return rx_loop; 1441 return rx_loop;
1444 } 1442 }
1445 total_rx += rx_loop; 1443 total_rx += rx_loop;
1446 pr_debug("rx_loop: %d, total_rx: %d, data: %d\n", 1444 pr_debug("rx_loop: %d, total_rx: %d, data: %d\n",
1447 rx_loop, total_rx, data); 1445 rx_loop, total_rx, data);
1448 } 1446 }
1449 1447
1450 return total_rx; 1448 return total_rx;
1451 } 1449 }
1452 1450
1453 static int iscsit_do_tx_data( 1451 static int iscsit_do_tx_data(
1454 struct iscsi_conn *conn, 1452 struct iscsi_conn *conn,
1455 struct iscsi_data_count *count) 1453 struct iscsi_data_count *count)
1456 { 1454 {
1457 int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len; 1455 int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
1458 struct kvec *iov_p; 1456 struct kvec *iov_p;
1459 struct msghdr msg; 1457 struct msghdr msg;
1460 1458
1461 if (!conn || !conn->sock || !conn->conn_ops) 1459 if (!conn || !conn->sock || !conn->conn_ops)
1462 return -1; 1460 return -1;
1463 1461
1464 if (data <= 0) { 1462 if (data <= 0) {
1465 pr_err("Data length is: %d\n", data); 1463 pr_err("Data length is: %d\n", data);
1466 return -1; 1464 return -1;
1467 } 1465 }
1468 1466
1469 memset(&msg, 0, sizeof(struct msghdr)); 1467 memset(&msg, 0, sizeof(struct msghdr));
1470 1468
1471 iov_p = count->iov; 1469 iov_p = count->iov;
1472 iov_len = count->iov_count; 1470 iov_len = count->iov_count;
1473 1471
1474 while (total_tx < data) { 1472 while (total_tx < data) {
1475 tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, 1473 tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
1476 (data - total_tx)); 1474 (data - total_tx));
1477 if (tx_loop <= 0) { 1475 if (tx_loop <= 0) {
1478 pr_debug("tx_loop: %d total_tx %d\n", 1476 pr_debug("tx_loop: %d total_tx %d\n",
1479 tx_loop, total_tx); 1477 tx_loop, total_tx);
1480 return tx_loop; 1478 return tx_loop;
1481 } 1479 }
1482 total_tx += tx_loop; 1480 total_tx += tx_loop;
1483 pr_debug("tx_loop: %d, total_tx: %d, data: %d\n", 1481 pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
1484 tx_loop, total_tx, data); 1482 tx_loop, total_tx, data);
1485 } 1483 }
1486 1484
1487 return total_tx; 1485 return total_tx;
1488 } 1486 }
1489 1487
1490 int rx_data( 1488 int rx_data(
1491 struct iscsi_conn *conn, 1489 struct iscsi_conn *conn,
1492 struct kvec *iov, 1490 struct kvec *iov,
1493 int iov_count, 1491 int iov_count,
1494 int data) 1492 int data)
1495 { 1493 {
1496 struct iscsi_data_count c; 1494 struct iscsi_data_count c;
1497 1495
1498 if (!conn || !conn->sock || !conn->conn_ops) 1496 if (!conn || !conn->sock || !conn->conn_ops)
1499 return -1; 1497 return -1;
1500 1498
1501 memset(&c, 0, sizeof(struct iscsi_data_count)); 1499 memset(&c, 0, sizeof(struct iscsi_data_count));
1502 c.iov = iov; 1500 c.iov = iov;
1503 c.iov_count = iov_count; 1501 c.iov_count = iov_count;
1504 c.data_length = data; 1502 c.data_length = data;
1505 c.type = ISCSI_RX_DATA; 1503 c.type = ISCSI_RX_DATA;
1506 1504
1507 return iscsit_do_rx_data(conn, &c); 1505 return iscsit_do_rx_data(conn, &c);
1508 } 1506 }
1509 1507
1510 int tx_data( 1508 int tx_data(
1511 struct iscsi_conn *conn, 1509 struct iscsi_conn *conn,
1512 struct kvec *iov, 1510 struct kvec *iov,
1513 int iov_count, 1511 int iov_count,
1514 int data) 1512 int data)
1515 { 1513 {
1516 struct iscsi_data_count c; 1514 struct iscsi_data_count c;
1517 1515
1518 if (!conn || !conn->sock || !conn->conn_ops) 1516 if (!conn || !conn->sock || !conn->conn_ops)
1519 return -1; 1517 return -1;
1520 1518
1521 memset(&c, 0, sizeof(struct iscsi_data_count)); 1519 memset(&c, 0, sizeof(struct iscsi_data_count));
1522 c.iov = iov; 1520 c.iov = iov;
1523 c.iov_count = iov_count; 1521 c.iov_count = iov_count;
1524 c.data_length = data; 1522 c.data_length = data;
1525 c.type = ISCSI_TX_DATA; 1523 c.type = ISCSI_TX_DATA;
1526 1524
1527 return iscsit_do_tx_data(conn, &c); 1525 return iscsit_do_tx_data(conn, &c);
1528 } 1526 }
1529 1527
1530 void iscsit_collect_login_stats( 1528 void iscsit_collect_login_stats(
1531 struct iscsi_conn *conn, 1529 struct iscsi_conn *conn,
1532 u8 status_class, 1530 u8 status_class,
1533 u8 status_detail) 1531 u8 status_detail)
1534 { 1532 {
1535 struct iscsi_param *intrname = NULL; 1533 struct iscsi_param *intrname = NULL;
1536 struct iscsi_tiqn *tiqn; 1534 struct iscsi_tiqn *tiqn;
1537 struct iscsi_login_stats *ls; 1535 struct iscsi_login_stats *ls;
1538 1536
1539 tiqn = iscsit_snmp_get_tiqn(conn); 1537 tiqn = iscsit_snmp_get_tiqn(conn);
1540 if (!tiqn) 1538 if (!tiqn)
1541 return; 1539 return;
1542 1540
1543 ls = &tiqn->login_stats; 1541 ls = &tiqn->login_stats;
1544 1542
1545 spin_lock(&ls->lock); 1543 spin_lock(&ls->lock);
1546 if (!strcmp(conn->login_ip, ls->last_intr_fail_ip_addr) && 1544 if (!strcmp(conn->login_ip, ls->last_intr_fail_ip_addr) &&
1547 ((get_jiffies_64() - ls->last_fail_time) < 10)) { 1545 ((get_jiffies_64() - ls->last_fail_time) < 10)) {
1548 /* We already have the failure info for this login */ 1546 /* We already have the failure info for this login */
1549 spin_unlock(&ls->lock); 1547 spin_unlock(&ls->lock);
1550 return; 1548 return;
1551 } 1549 }
1552 1550
1553 if (status_class == ISCSI_STATUS_CLS_SUCCESS) 1551 if (status_class == ISCSI_STATUS_CLS_SUCCESS)
1554 ls->accepts++; 1552 ls->accepts++;
1555 else if (status_class == ISCSI_STATUS_CLS_REDIRECT) { 1553 else if (status_class == ISCSI_STATUS_CLS_REDIRECT) {
1556 ls->redirects++; 1554 ls->redirects++;
1557 ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT; 1555 ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT;
1558 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && 1556 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
1559 (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) { 1557 (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) {
1560 ls->authenticate_fails++; 1558 ls->authenticate_fails++;
1561 ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHENTICATE; 1559 ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHENTICATE;
1562 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && 1560 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
1563 (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) { 1561 (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) {
1564 ls->authorize_fails++; 1562 ls->authorize_fails++;
1565 ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE; 1563 ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE;
1566 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && 1564 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
1567 (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) { 1565 (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) {
1568 ls->negotiate_fails++; 1566 ls->negotiate_fails++;
1569 ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE; 1567 ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE;
1570 } else { 1568 } else {
1571 ls->other_fails++; 1569 ls->other_fails++;
1572 ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER; 1570 ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER;
1573 } 1571 }
1574 1572
1575 /* Save initiator name, ip address and time, if it is a failed login */ 1573 /* Save initiator name, ip address and time, if it is a failed login */
1576 if (status_class != ISCSI_STATUS_CLS_SUCCESS) { 1574 if (status_class != ISCSI_STATUS_CLS_SUCCESS) {
1577 if (conn->param_list) 1575 if (conn->param_list)
1578 intrname = iscsi_find_param_from_key(INITIATORNAME, 1576 intrname = iscsi_find_param_from_key(INITIATORNAME,
1579 conn->param_list); 1577 conn->param_list);
1580 strcpy(ls->last_intr_fail_name, 1578 strcpy(ls->last_intr_fail_name,
1581 (intrname ? intrname->value : "Unknown")); 1579 (intrname ? intrname->value : "Unknown"));
1582 1580
1583 ls->last_intr_fail_ip_family = conn->sock->sk->sk_family; 1581 ls->last_intr_fail_ip_family = conn->sock->sk->sk_family;
1584 snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE, 1582 snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE,
1585 "%s", conn->login_ip); 1583 "%s", conn->login_ip);
1586 ls->last_fail_time = get_jiffies_64(); 1584 ls->last_fail_time = get_jiffies_64();
1587 } 1585 }
1588 1586
1589 spin_unlock(&ls->lock); 1587 spin_unlock(&ls->lock);
1590 } 1588 }
1591 1589
1592 struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn) 1590 struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
1593 { 1591 {
1594 struct iscsi_portal_group *tpg; 1592 struct iscsi_portal_group *tpg;
1595 1593
1596 if (!conn || !conn->sess) 1594 if (!conn || !conn->sess)
1597 return NULL; 1595 return NULL;
1598 1596
1599 tpg = conn->sess->tpg; 1597 tpg = conn->sess->tpg;
1600 if (!tpg) 1598 if (!tpg)
1601 return NULL; 1599 return NULL;
1602 1600
1603 if (!tpg->tpg_tiqn) 1601 if (!tpg->tpg_tiqn)
1604 return NULL; 1602 return NULL;
1605 1603
1606 return tpg->tpg_tiqn; 1604 return tpg->tpg_tiqn;
1607 } 1605 }
1608 1606
drivers/target/loopback/tcm_loop.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * 2 *
3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver 3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
4 * for emulated SAS initiator ports 4 * for emulated SAS initiator ports
5 * 5 *
6 * ยฉ Copyright 2011 RisingTide Systems LLC. 6 * ยฉ Copyright 2011 RisingTide Systems LLC.
7 * 7 *
8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
9 * 9 *
10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com> 10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or 14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version. 15 * (at your option) any later version.
16 * 16 *
17 * This program is distributed in the hope that it will be useful, 17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details. 20 * GNU General Public License for more details.
21 ****************************************************************************/ 21 ****************************************************************************/
22 22
23 #include <linux/module.h> 23 #include <linux/module.h>
24 #include <linux/moduleparam.h> 24 #include <linux/moduleparam.h>
25 #include <linux/init.h> 25 #include <linux/init.h>
26 #include <linux/slab.h> 26 #include <linux/slab.h>
27 #include <linux/types.h> 27 #include <linux/types.h>
28 #include <linux/configfs.h> 28 #include <linux/configfs.h>
29 #include <scsi/scsi.h> 29 #include <scsi/scsi.h>
30 #include <scsi/scsi_tcq.h> 30 #include <scsi/scsi_tcq.h>
31 #include <scsi/scsi_host.h> 31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_device.h> 32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_cmnd.h> 33 #include <scsi/scsi_cmnd.h>
34 34
35 #include <target/target_core_base.h> 35 #include <target/target_core_base.h>
36 #include <target/target_core_transport.h> 36 #include <target/target_core_fabric.h>
37 #include <target/target_core_fabric_ops.h>
38 #include <target/target_core_fabric_configfs.h> 37 #include <target/target_core_fabric_configfs.h>
39 #include <target/target_core_fabric_lib.h>
40 #include <target/target_core_configfs.h> 38 #include <target/target_core_configfs.h>
41 #include <target/target_core_device.h>
42 #include <target/target_core_tpg.h>
43 #include <target/target_core_tmr.h>
44 39
45 #include "tcm_loop.h" 40 #include "tcm_loop.h"
46 41
47 #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev) 42 #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
48 43
49 /* Local pointer to allocated TCM configfs fabric module */ 44 /* Local pointer to allocated TCM configfs fabric module */
50 static struct target_fabric_configfs *tcm_loop_fabric_configfs; 45 static struct target_fabric_configfs *tcm_loop_fabric_configfs;
51 46
52 static struct kmem_cache *tcm_loop_cmd_cache; 47 static struct kmem_cache *tcm_loop_cmd_cache;
53 48
54 static int tcm_loop_hba_no_cnt; 49 static int tcm_loop_hba_no_cnt;
55 50
56 /* 51 /*
57 * Allocate a tcm_loop cmd descriptor from target_core_mod code 52 * Allocate a tcm_loop cmd descriptor from target_core_mod code
58 * 53 *
59 * Can be called from interrupt context in tcm_loop_queuecommand() below 54 * Can be called from interrupt context in tcm_loop_queuecommand() below
60 */ 55 */
61 static struct se_cmd *tcm_loop_allocate_core_cmd( 56 static struct se_cmd *tcm_loop_allocate_core_cmd(
62 struct tcm_loop_hba *tl_hba, 57 struct tcm_loop_hba *tl_hba,
63 struct se_portal_group *se_tpg, 58 struct se_portal_group *se_tpg,
64 struct scsi_cmnd *sc) 59 struct scsi_cmnd *sc)
65 { 60 {
66 struct se_cmd *se_cmd; 61 struct se_cmd *se_cmd;
67 struct se_session *se_sess; 62 struct se_session *se_sess;
68 struct tcm_loop_nexus *tl_nexus = tl_hba->tl_nexus; 63 struct tcm_loop_nexus *tl_nexus = tl_hba->tl_nexus;
69 struct tcm_loop_cmd *tl_cmd; 64 struct tcm_loop_cmd *tl_cmd;
70 int sam_task_attr; 65 int sam_task_attr;
71 66
72 if (!tl_nexus) { 67 if (!tl_nexus) {
73 scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" 68 scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
74 " does not exist\n"); 69 " does not exist\n");
75 set_host_byte(sc, DID_ERROR); 70 set_host_byte(sc, DID_ERROR);
76 return NULL; 71 return NULL;
77 } 72 }
78 se_sess = tl_nexus->se_sess; 73 se_sess = tl_nexus->se_sess;
79 74
80 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC); 75 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
81 if (!tl_cmd) { 76 if (!tl_cmd) {
82 pr_err("Unable to allocate struct tcm_loop_cmd\n"); 77 pr_err("Unable to allocate struct tcm_loop_cmd\n");
83 set_host_byte(sc, DID_ERROR); 78 set_host_byte(sc, DID_ERROR);
84 return NULL; 79 return NULL;
85 } 80 }
86 se_cmd = &tl_cmd->tl_se_cmd; 81 se_cmd = &tl_cmd->tl_se_cmd;
87 /* 82 /*
88 * Save the pointer to struct scsi_cmnd *sc 83 * Save the pointer to struct scsi_cmnd *sc
89 */ 84 */
90 tl_cmd->sc = sc; 85 tl_cmd->sc = sc;
91 /* 86 /*
92 * Locate the SAM Task Attr from struct scsi_cmnd * 87 * Locate the SAM Task Attr from struct scsi_cmnd *
93 */ 88 */
94 if (sc->device->tagged_supported) { 89 if (sc->device->tagged_supported) {
95 switch (sc->tag) { 90 switch (sc->tag) {
96 case HEAD_OF_QUEUE_TAG: 91 case HEAD_OF_QUEUE_TAG:
97 sam_task_attr = MSG_HEAD_TAG; 92 sam_task_attr = MSG_HEAD_TAG;
98 break; 93 break;
99 case ORDERED_QUEUE_TAG: 94 case ORDERED_QUEUE_TAG:
100 sam_task_attr = MSG_ORDERED_TAG; 95 sam_task_attr = MSG_ORDERED_TAG;
101 break; 96 break;
102 default: 97 default:
103 sam_task_attr = MSG_SIMPLE_TAG; 98 sam_task_attr = MSG_SIMPLE_TAG;
104 break; 99 break;
105 } 100 }
106 } else 101 } else
107 sam_task_attr = MSG_SIMPLE_TAG; 102 sam_task_attr = MSG_SIMPLE_TAG;
108 103
109 /* 104 /*
110 * Initialize struct se_cmd descriptor from target_core_mod infrastructure 105 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
111 */ 106 */
112 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 107 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
113 scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr, 108 scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
114 &tl_cmd->tl_sense_buf[0]); 109 &tl_cmd->tl_sense_buf[0]);
115 110
116 if (scsi_bidi_cmnd(sc)) 111 if (scsi_bidi_cmnd(sc))
117 se_cmd->se_cmd_flags |= SCF_BIDI; 112 se_cmd->se_cmd_flags |= SCF_BIDI;
118 113
119 /* 114 /*
120 * Locate the struct se_lun pointer and attach it to struct se_cmd 115 * Locate the struct se_lun pointer and attach it to struct se_cmd
121 */ 116 */
122 if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) { 117 if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) {
123 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 118 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
124 set_host_byte(sc, DID_NO_CONNECT); 119 set_host_byte(sc, DID_NO_CONNECT);
125 return NULL; 120 return NULL;
126 } 121 }
127 122
128 return se_cmd; 123 return se_cmd;
129 } 124 }
130 125
131 /* 126 /*
132 * Called by struct target_core_fabric_ops->new_cmd_map() 127 * Called by struct target_core_fabric_ops->new_cmd_map()
133 * 128 *
134 * Always called in process context. A non zero return value 129 * Always called in process context. A non zero return value
135 * here will signal to handle an exception based on the return code. 130 * here will signal to handle an exception based on the return code.
136 */ 131 */
137 static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) 132 static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
138 { 133 {
139 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 134 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
140 struct tcm_loop_cmd, tl_se_cmd); 135 struct tcm_loop_cmd, tl_se_cmd);
141 struct scsi_cmnd *sc = tl_cmd->sc; 136 struct scsi_cmnd *sc = tl_cmd->sc;
142 struct scatterlist *sgl_bidi = NULL; 137 struct scatterlist *sgl_bidi = NULL;
143 u32 sgl_bidi_count = 0; 138 u32 sgl_bidi_count = 0;
144 int ret; 139 int ret;
145 /* 140 /*
146 * Allocate the necessary tasks to complete the received CDB+data 141 * Allocate the necessary tasks to complete the received CDB+data
147 */ 142 */
148 ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd); 143 ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
149 if (ret != 0) 144 if (ret != 0)
150 return ret; 145 return ret;
151 /* 146 /*
152 * For BIDI commands, pass in the extra READ buffer 147 * For BIDI commands, pass in the extra READ buffer
153 * to transport_generic_map_mem_to_cmd() below.. 148 * to transport_generic_map_mem_to_cmd() below..
154 */ 149 */
155 if (se_cmd->se_cmd_flags & SCF_BIDI) { 150 if (se_cmd->se_cmd_flags & SCF_BIDI) {
156 struct scsi_data_buffer *sdb = scsi_in(sc); 151 struct scsi_data_buffer *sdb = scsi_in(sc);
157 152
158 sgl_bidi = sdb->table.sgl; 153 sgl_bidi = sdb->table.sgl;
159 sgl_bidi_count = sdb->table.nents; 154 sgl_bidi_count = sdb->table.nents;
160 } 155 }
161 /* 156 /*
162 * Because some userspace code via scsi-generic do not memset their 157 * Because some userspace code via scsi-generic do not memset their
163 * associated read buffers, go ahead and do that here for type 158 * associated read buffers, go ahead and do that here for type
164 * SCF_SCSI_CONTROL_SG_IO_CDB. Also note that this is currently 159 * SCF_SCSI_CONTROL_SG_IO_CDB. Also note that this is currently
165 * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB 160 * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
166 * by target core in transport_generic_allocate_tasks() -> 161 * by target core in transport_generic_allocate_tasks() ->
167 * transport_generic_cmd_sequencer(). 162 * transport_generic_cmd_sequencer().
168 */ 163 */
169 if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB && 164 if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
170 se_cmd->data_direction == DMA_FROM_DEVICE) { 165 se_cmd->data_direction == DMA_FROM_DEVICE) {
171 struct scatterlist *sg = scsi_sglist(sc); 166 struct scatterlist *sg = scsi_sglist(sc);
172 unsigned char *buf = kmap(sg_page(sg)) + sg->offset; 167 unsigned char *buf = kmap(sg_page(sg)) + sg->offset;
173 168
174 if (buf != NULL) { 169 if (buf != NULL) {
175 memset(buf, 0, sg->length); 170 memset(buf, 0, sg->length);
176 kunmap(sg_page(sg)); 171 kunmap(sg_page(sg));
177 } 172 }
178 } 173 }
179 174
180 /* Tell the core about our preallocated memory */ 175 /* Tell the core about our preallocated memory */
181 return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc), 176 return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
182 scsi_sg_count(sc), sgl_bidi, sgl_bidi_count); 177 scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
183 } 178 }
184 179
185 /* 180 /*
186 * Called from struct target_core_fabric_ops->check_stop_free() 181 * Called from struct target_core_fabric_ops->check_stop_free()
187 */ 182 */
188 static int tcm_loop_check_stop_free(struct se_cmd *se_cmd) 183 static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
189 { 184 {
190 /* 185 /*
191 * Do not release struct se_cmd's containing a valid TMR 186 * Do not release struct se_cmd's containing a valid TMR
192 * pointer. These will be released directly in tcm_loop_device_reset() 187 * pointer. These will be released directly in tcm_loop_device_reset()
193 * with transport_generic_free_cmd(). 188 * with transport_generic_free_cmd().
194 */ 189 */
195 if (se_cmd->se_tmr_req) 190 if (se_cmd->se_tmr_req)
196 return 0; 191 return 0;
197 /* 192 /*
198 * Release the struct se_cmd, which will make a callback to release 193 * Release the struct se_cmd, which will make a callback to release
199 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd() 194 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
200 */ 195 */
201 transport_generic_free_cmd(se_cmd, 0); 196 transport_generic_free_cmd(se_cmd, 0);
202 return 1; 197 return 1;
203 } 198 }
204 199
205 static void tcm_loop_release_cmd(struct se_cmd *se_cmd) 200 static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
206 { 201 {
207 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 202 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
208 struct tcm_loop_cmd, tl_se_cmd); 203 struct tcm_loop_cmd, tl_se_cmd);
209 204
210 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 205 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
211 } 206 }
212 207
213 static int tcm_loop_proc_info(struct Scsi_Host *host, char *buffer, 208 static int tcm_loop_proc_info(struct Scsi_Host *host, char *buffer,
214 char **start, off_t offset, 209 char **start, off_t offset,
215 int length, int inout) 210 int length, int inout)
216 { 211 {
217 return sprintf(buffer, "tcm_loop_proc_info()\n"); 212 return sprintf(buffer, "tcm_loop_proc_info()\n");
218 } 213 }
219 214
220 static int tcm_loop_driver_probe(struct device *); 215 static int tcm_loop_driver_probe(struct device *);
221 static int tcm_loop_driver_remove(struct device *); 216 static int tcm_loop_driver_remove(struct device *);
222 217
223 static int pseudo_lld_bus_match(struct device *dev, 218 static int pseudo_lld_bus_match(struct device *dev,
224 struct device_driver *dev_driver) 219 struct device_driver *dev_driver)
225 { 220 {
226 return 1; 221 return 1;
227 } 222 }
228 223
229 static struct bus_type tcm_loop_lld_bus = { 224 static struct bus_type tcm_loop_lld_bus = {
230 .name = "tcm_loop_bus", 225 .name = "tcm_loop_bus",
231 .match = pseudo_lld_bus_match, 226 .match = pseudo_lld_bus_match,
232 .probe = tcm_loop_driver_probe, 227 .probe = tcm_loop_driver_probe,
233 .remove = tcm_loop_driver_remove, 228 .remove = tcm_loop_driver_remove,
234 }; 229 };
235 230
236 static struct device_driver tcm_loop_driverfs = { 231 static struct device_driver tcm_loop_driverfs = {
237 .name = "tcm_loop", 232 .name = "tcm_loop",
238 .bus = &tcm_loop_lld_bus, 233 .bus = &tcm_loop_lld_bus,
239 }; 234 };
240 /* 235 /*
241 * Used with root_device_register() in tcm_loop_alloc_core_bus() below 236 * Used with root_device_register() in tcm_loop_alloc_core_bus() below
242 */ 237 */
243 struct device *tcm_loop_primary; 238 struct device *tcm_loop_primary;
244 239
245 /* 240 /*
246 * Copied from drivers/scsi/libfc/fc_fcp.c:fc_change_queue_depth() and 241 * Copied from drivers/scsi/libfc/fc_fcp.c:fc_change_queue_depth() and
247 * drivers/scsi/libiscsi.c:iscsi_change_queue_depth() 242 * drivers/scsi/libiscsi.c:iscsi_change_queue_depth()
248 */ 243 */
249 static int tcm_loop_change_queue_depth( 244 static int tcm_loop_change_queue_depth(
250 struct scsi_device *sdev, 245 struct scsi_device *sdev,
251 int depth, 246 int depth,
252 int reason) 247 int reason)
253 { 248 {
254 switch (reason) { 249 switch (reason) {
255 case SCSI_QDEPTH_DEFAULT: 250 case SCSI_QDEPTH_DEFAULT:
256 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); 251 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
257 break; 252 break;
258 case SCSI_QDEPTH_QFULL: 253 case SCSI_QDEPTH_QFULL:
259 scsi_track_queue_full(sdev, depth); 254 scsi_track_queue_full(sdev, depth);
260 break; 255 break;
261 case SCSI_QDEPTH_RAMP_UP: 256 case SCSI_QDEPTH_RAMP_UP:
262 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); 257 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
263 break; 258 break;
264 default: 259 default:
265 return -EOPNOTSUPP; 260 return -EOPNOTSUPP;
266 } 261 }
267 return sdev->queue_depth; 262 return sdev->queue_depth;
268 } 263 }
269 264
270 /* 265 /*
271 * Main entry point from struct scsi_host_template for incoming SCSI CDB+Data 266 * Main entry point from struct scsi_host_template for incoming SCSI CDB+Data
272 * from Linux/SCSI subsystem for SCSI low level device drivers (LLDs) 267 * from Linux/SCSI subsystem for SCSI low level device drivers (LLDs)
273 */ 268 */
274 static int tcm_loop_queuecommand( 269 static int tcm_loop_queuecommand(
275 struct Scsi_Host *sh, 270 struct Scsi_Host *sh,
276 struct scsi_cmnd *sc) 271 struct scsi_cmnd *sc)
277 { 272 {
278 struct se_cmd *se_cmd; 273 struct se_cmd *se_cmd;
279 struct se_portal_group *se_tpg; 274 struct se_portal_group *se_tpg;
280 struct tcm_loop_hba *tl_hba; 275 struct tcm_loop_hba *tl_hba;
281 struct tcm_loop_tpg *tl_tpg; 276 struct tcm_loop_tpg *tl_tpg;
282 277
283 pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x" 278 pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
284 " scsi_buf_len: %u\n", sc->device->host->host_no, 279 " scsi_buf_len: %u\n", sc->device->host->host_no,
285 sc->device->id, sc->device->channel, sc->device->lun, 280 sc->device->id, sc->device->channel, sc->device->lun,
286 sc->cmnd[0], scsi_bufflen(sc)); 281 sc->cmnd[0], scsi_bufflen(sc));
287 /* 282 /*
288 * Locate the tcm_loop_hba_t pointer 283 * Locate the tcm_loop_hba_t pointer
289 */ 284 */
290 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 285 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
291 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 286 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
292 /* 287 /*
293 * Ensure that this tl_tpg reference from the incoming sc->device->id 288 * Ensure that this tl_tpg reference from the incoming sc->device->id
294 * has already been configured via tcm_loop_make_naa_tpg(). 289 * has already been configured via tcm_loop_make_naa_tpg().
295 */ 290 */
296 if (!tl_tpg->tl_hba) { 291 if (!tl_tpg->tl_hba) {
297 set_host_byte(sc, DID_NO_CONNECT); 292 set_host_byte(sc, DID_NO_CONNECT);
298 sc->scsi_done(sc); 293 sc->scsi_done(sc);
299 return 0; 294 return 0;
300 } 295 }
301 se_tpg = &tl_tpg->tl_se_tpg; 296 se_tpg = &tl_tpg->tl_se_tpg;
302 /* 297 /*
303 * Determine the SAM Task Attribute and allocate tl_cmd and 298 * Determine the SAM Task Attribute and allocate tl_cmd and
304 * tl_cmd->tl_se_cmd from TCM infrastructure 299 * tl_cmd->tl_se_cmd from TCM infrastructure
305 */ 300 */
306 se_cmd = tcm_loop_allocate_core_cmd(tl_hba, se_tpg, sc); 301 se_cmd = tcm_loop_allocate_core_cmd(tl_hba, se_tpg, sc);
307 if (!se_cmd) { 302 if (!se_cmd) {
308 sc->scsi_done(sc); 303 sc->scsi_done(sc);
309 return 0; 304 return 0;
310 } 305 }
311 /* 306 /*
312 * Queue up the newly allocated to be processed in TCM thread context. 307 * Queue up the newly allocated to be processed in TCM thread context.
313 */ 308 */
314 transport_generic_handle_cdb_map(se_cmd); 309 transport_generic_handle_cdb_map(se_cmd);
315 return 0; 310 return 0;
316 } 311 }
317 312
318 /* 313 /*
319 * Called from SCSI EH process context to issue a LUN_RESET TMR 314 * Called from SCSI EH process context to issue a LUN_RESET TMR
320 * to struct scsi_device 315 * to struct scsi_device
321 */ 316 */
322 static int tcm_loop_device_reset(struct scsi_cmnd *sc) 317 static int tcm_loop_device_reset(struct scsi_cmnd *sc)
323 { 318 {
324 struct se_cmd *se_cmd = NULL; 319 struct se_cmd *se_cmd = NULL;
325 struct se_portal_group *se_tpg; 320 struct se_portal_group *se_tpg;
326 struct se_session *se_sess; 321 struct se_session *se_sess;
327 struct tcm_loop_cmd *tl_cmd = NULL; 322 struct tcm_loop_cmd *tl_cmd = NULL;
328 struct tcm_loop_hba *tl_hba; 323 struct tcm_loop_hba *tl_hba;
329 struct tcm_loop_nexus *tl_nexus; 324 struct tcm_loop_nexus *tl_nexus;
330 struct tcm_loop_tmr *tl_tmr = NULL; 325 struct tcm_loop_tmr *tl_tmr = NULL;
331 struct tcm_loop_tpg *tl_tpg; 326 struct tcm_loop_tpg *tl_tpg;
332 int ret = FAILED; 327 int ret = FAILED;
333 /* 328 /*
334 * Locate the tcm_loop_hba_t pointer 329 * Locate the tcm_loop_hba_t pointer
335 */ 330 */
336 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 331 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
337 /* 332 /*
338 * Locate the tl_nexus and se_sess pointers 333 * Locate the tl_nexus and se_sess pointers
339 */ 334 */
340 tl_nexus = tl_hba->tl_nexus; 335 tl_nexus = tl_hba->tl_nexus;
341 if (!tl_nexus) { 336 if (!tl_nexus) {
342 pr_err("Unable to perform device reset without" 337 pr_err("Unable to perform device reset without"
343 " active I_T Nexus\n"); 338 " active I_T Nexus\n");
344 return FAILED; 339 return FAILED;
345 } 340 }
346 se_sess = tl_nexus->se_sess; 341 se_sess = tl_nexus->se_sess;
347 /* 342 /*
348 * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id 343 * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
349 */ 344 */
350 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 345 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
351 se_tpg = &tl_tpg->tl_se_tpg; 346 se_tpg = &tl_tpg->tl_se_tpg;
352 347
353 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); 348 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
354 if (!tl_cmd) { 349 if (!tl_cmd) {
355 pr_err("Unable to allocate memory for tl_cmd\n"); 350 pr_err("Unable to allocate memory for tl_cmd\n");
356 return FAILED; 351 return FAILED;
357 } 352 }
358 353
359 tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL); 354 tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
360 if (!tl_tmr) { 355 if (!tl_tmr) {
361 pr_err("Unable to allocate memory for tl_tmr\n"); 356 pr_err("Unable to allocate memory for tl_tmr\n");
362 goto release; 357 goto release;
363 } 358 }
364 init_waitqueue_head(&tl_tmr->tl_tmr_wait); 359 init_waitqueue_head(&tl_tmr->tl_tmr_wait);
365 360
366 se_cmd = &tl_cmd->tl_se_cmd; 361 se_cmd = &tl_cmd->tl_se_cmd;
367 /* 362 /*
368 * Initialize struct se_cmd descriptor from target_core_mod infrastructure 363 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
369 */ 364 */
370 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0, 365 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
371 DMA_NONE, MSG_SIMPLE_TAG, 366 DMA_NONE, MSG_SIMPLE_TAG,
372 &tl_cmd->tl_sense_buf[0]); 367 &tl_cmd->tl_sense_buf[0]);
373 /* 368 /*
374 * Allocate the LUN_RESET TMR 369 * Allocate the LUN_RESET TMR
375 */ 370 */
376 se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr, 371 se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr,
377 TMR_LUN_RESET, GFP_KERNEL); 372 TMR_LUN_RESET, GFP_KERNEL);
378 if (IS_ERR(se_cmd->se_tmr_req)) 373 if (IS_ERR(se_cmd->se_tmr_req))
379 goto release; 374 goto release;
380 /* 375 /*
381 * Locate the underlying TCM struct se_lun from sc->device->lun 376 * Locate the underlying TCM struct se_lun from sc->device->lun
382 */ 377 */
383 if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0) 378 if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0)
384 goto release; 379 goto release;
385 /* 380 /*
386 * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp() 381 * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp()
387 * to wake us up. 382 * to wake us up.
388 */ 383 */
389 transport_generic_handle_tmr(se_cmd); 384 transport_generic_handle_tmr(se_cmd);
390 wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete)); 385 wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
391 /* 386 /*
392 * The TMR LUN_RESET has completed, check the response status and 387 * The TMR LUN_RESET has completed, check the response status and
393 * then release allocations. 388 * then release allocations.
394 */ 389 */
395 ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ? 390 ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
396 SUCCESS : FAILED; 391 SUCCESS : FAILED;
397 release: 392 release:
398 if (se_cmd) 393 if (se_cmd)
399 transport_generic_free_cmd(se_cmd, 1); 394 transport_generic_free_cmd(se_cmd, 1);
400 else 395 else
401 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 396 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
402 kfree(tl_tmr); 397 kfree(tl_tmr);
403 return ret; 398 return ret;
404 } 399 }
405 400
406 static int tcm_loop_slave_alloc(struct scsi_device *sd) 401 static int tcm_loop_slave_alloc(struct scsi_device *sd)
407 { 402 {
408 set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags); 403 set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
409 return 0; 404 return 0;
410 } 405 }
411 406
412 static int tcm_loop_slave_configure(struct scsi_device *sd) 407 static int tcm_loop_slave_configure(struct scsi_device *sd)
413 { 408 {
414 return 0; 409 return 0;
415 } 410 }
416 411
417 static struct scsi_host_template tcm_loop_driver_template = { 412 static struct scsi_host_template tcm_loop_driver_template = {
418 .proc_info = tcm_loop_proc_info, 413 .proc_info = tcm_loop_proc_info,
419 .proc_name = "tcm_loopback", 414 .proc_name = "tcm_loopback",
420 .name = "TCM_Loopback", 415 .name = "TCM_Loopback",
421 .queuecommand = tcm_loop_queuecommand, 416 .queuecommand = tcm_loop_queuecommand,
422 .change_queue_depth = tcm_loop_change_queue_depth, 417 .change_queue_depth = tcm_loop_change_queue_depth,
423 .eh_device_reset_handler = tcm_loop_device_reset, 418 .eh_device_reset_handler = tcm_loop_device_reset,
424 .can_queue = TL_SCSI_CAN_QUEUE, 419 .can_queue = TL_SCSI_CAN_QUEUE,
425 .this_id = -1, 420 .this_id = -1,
426 .sg_tablesize = TL_SCSI_SG_TABLESIZE, 421 .sg_tablesize = TL_SCSI_SG_TABLESIZE,
427 .cmd_per_lun = TL_SCSI_CMD_PER_LUN, 422 .cmd_per_lun = TL_SCSI_CMD_PER_LUN,
428 .max_sectors = TL_SCSI_MAX_SECTORS, 423 .max_sectors = TL_SCSI_MAX_SECTORS,
429 .use_clustering = DISABLE_CLUSTERING, 424 .use_clustering = DISABLE_CLUSTERING,
430 .slave_alloc = tcm_loop_slave_alloc, 425 .slave_alloc = tcm_loop_slave_alloc,
431 .slave_configure = tcm_loop_slave_configure, 426 .slave_configure = tcm_loop_slave_configure,
432 .module = THIS_MODULE, 427 .module = THIS_MODULE,
433 }; 428 };
434 429
435 static int tcm_loop_driver_probe(struct device *dev) 430 static int tcm_loop_driver_probe(struct device *dev)
436 { 431 {
437 struct tcm_loop_hba *tl_hba; 432 struct tcm_loop_hba *tl_hba;
438 struct Scsi_Host *sh; 433 struct Scsi_Host *sh;
439 int error; 434 int error;
440 435
441 tl_hba = to_tcm_loop_hba(dev); 436 tl_hba = to_tcm_loop_hba(dev);
442 437
443 sh = scsi_host_alloc(&tcm_loop_driver_template, 438 sh = scsi_host_alloc(&tcm_loop_driver_template,
444 sizeof(struct tcm_loop_hba)); 439 sizeof(struct tcm_loop_hba));
445 if (!sh) { 440 if (!sh) {
446 pr_err("Unable to allocate struct scsi_host\n"); 441 pr_err("Unable to allocate struct scsi_host\n");
447 return -ENODEV; 442 return -ENODEV;
448 } 443 }
449 tl_hba->sh = sh; 444 tl_hba->sh = sh;
450 445
451 /* 446 /*
452 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata 447 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
453 */ 448 */
454 *((struct tcm_loop_hba **)sh->hostdata) = tl_hba; 449 *((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
455 /* 450 /*
456 * Setup single ID, Channel and LUN for now.. 451 * Setup single ID, Channel and LUN for now..
457 */ 452 */
458 sh->max_id = 2; 453 sh->max_id = 2;
459 sh->max_lun = 0; 454 sh->max_lun = 0;
460 sh->max_channel = 0; 455 sh->max_channel = 0;
461 sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN; 456 sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN;
462 457
463 error = scsi_add_host(sh, &tl_hba->dev); 458 error = scsi_add_host(sh, &tl_hba->dev);
464 if (error) { 459 if (error) {
465 pr_err("%s: scsi_add_host failed\n", __func__); 460 pr_err("%s: scsi_add_host failed\n", __func__);
466 scsi_host_put(sh); 461 scsi_host_put(sh);
467 return -ENODEV; 462 return -ENODEV;
468 } 463 }
469 return 0; 464 return 0;
470 } 465 }
471 466
472 static int tcm_loop_driver_remove(struct device *dev) 467 static int tcm_loop_driver_remove(struct device *dev)
473 { 468 {
474 struct tcm_loop_hba *tl_hba; 469 struct tcm_loop_hba *tl_hba;
475 struct Scsi_Host *sh; 470 struct Scsi_Host *sh;
476 471
477 tl_hba = to_tcm_loop_hba(dev); 472 tl_hba = to_tcm_loop_hba(dev);
478 sh = tl_hba->sh; 473 sh = tl_hba->sh;
479 474
480 scsi_remove_host(sh); 475 scsi_remove_host(sh);
481 scsi_host_put(sh); 476 scsi_host_put(sh);
482 return 0; 477 return 0;
483 } 478 }
484 479
485 static void tcm_loop_release_adapter(struct device *dev) 480 static void tcm_loop_release_adapter(struct device *dev)
486 { 481 {
487 struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev); 482 struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
488 483
489 kfree(tl_hba); 484 kfree(tl_hba);
490 } 485 }
491 486
492 /* 487 /*
493 * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c 488 * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
494 */ 489 */
495 static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id) 490 static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
496 { 491 {
497 int ret; 492 int ret;
498 493
499 tl_hba->dev.bus = &tcm_loop_lld_bus; 494 tl_hba->dev.bus = &tcm_loop_lld_bus;
500 tl_hba->dev.parent = tcm_loop_primary; 495 tl_hba->dev.parent = tcm_loop_primary;
501 tl_hba->dev.release = &tcm_loop_release_adapter; 496 tl_hba->dev.release = &tcm_loop_release_adapter;
502 dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id); 497 dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
503 498
504 ret = device_register(&tl_hba->dev); 499 ret = device_register(&tl_hba->dev);
505 if (ret) { 500 if (ret) {
506 pr_err("device_register() failed for" 501 pr_err("device_register() failed for"
507 " tl_hba->dev: %d\n", ret); 502 " tl_hba->dev: %d\n", ret);
508 return -ENODEV; 503 return -ENODEV;
509 } 504 }
510 505
511 return 0; 506 return 0;
512 } 507 }
513 508
514 /* 509 /*
515 * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated 510 * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
516 * tcm_loop SCSI bus. 511 * tcm_loop SCSI bus.
517 */ 512 */
518 static int tcm_loop_alloc_core_bus(void) 513 static int tcm_loop_alloc_core_bus(void)
519 { 514 {
520 int ret; 515 int ret;
521 516
522 tcm_loop_primary = root_device_register("tcm_loop_0"); 517 tcm_loop_primary = root_device_register("tcm_loop_0");
523 if (IS_ERR(tcm_loop_primary)) { 518 if (IS_ERR(tcm_loop_primary)) {
524 pr_err("Unable to allocate tcm_loop_primary\n"); 519 pr_err("Unable to allocate tcm_loop_primary\n");
525 return PTR_ERR(tcm_loop_primary); 520 return PTR_ERR(tcm_loop_primary);
526 } 521 }
527 522
528 ret = bus_register(&tcm_loop_lld_bus); 523 ret = bus_register(&tcm_loop_lld_bus);
529 if (ret) { 524 if (ret) {
530 pr_err("bus_register() failed for tcm_loop_lld_bus\n"); 525 pr_err("bus_register() failed for tcm_loop_lld_bus\n");
531 goto dev_unreg; 526 goto dev_unreg;
532 } 527 }
533 528
534 ret = driver_register(&tcm_loop_driverfs); 529 ret = driver_register(&tcm_loop_driverfs);
535 if (ret) { 530 if (ret) {
536 pr_err("driver_register() failed for" 531 pr_err("driver_register() failed for"
537 "tcm_loop_driverfs\n"); 532 "tcm_loop_driverfs\n");
538 goto bus_unreg; 533 goto bus_unreg;
539 } 534 }
540 535
541 pr_debug("Initialized TCM Loop Core Bus\n"); 536 pr_debug("Initialized TCM Loop Core Bus\n");
542 return ret; 537 return ret;
543 538
544 bus_unreg: 539 bus_unreg:
545 bus_unregister(&tcm_loop_lld_bus); 540 bus_unregister(&tcm_loop_lld_bus);
546 dev_unreg: 541 dev_unreg:
547 root_device_unregister(tcm_loop_primary); 542 root_device_unregister(tcm_loop_primary);
548 return ret; 543 return ret;
549 } 544 }
550 545
551 static void tcm_loop_release_core_bus(void) 546 static void tcm_loop_release_core_bus(void)
552 { 547 {
553 driver_unregister(&tcm_loop_driverfs); 548 driver_unregister(&tcm_loop_driverfs);
554 bus_unregister(&tcm_loop_lld_bus); 549 bus_unregister(&tcm_loop_lld_bus);
555 root_device_unregister(tcm_loop_primary); 550 root_device_unregister(tcm_loop_primary);
556 551
557 pr_debug("Releasing TCM Loop Core BUS\n"); 552 pr_debug("Releasing TCM Loop Core BUS\n");
558 } 553 }
559 554
560 static char *tcm_loop_get_fabric_name(void) 555 static char *tcm_loop_get_fabric_name(void)
561 { 556 {
562 return "loopback"; 557 return "loopback";
563 } 558 }
564 559
565 static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg) 560 static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
566 { 561 {
567 struct tcm_loop_tpg *tl_tpg = 562 struct tcm_loop_tpg *tl_tpg =
568 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; 563 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
569 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 564 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
570 /* 565 /*
571 * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba() 566 * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba()
572 * time based on the protocol dependent prefix of the passed configfs group. 567 * time based on the protocol dependent prefix of the passed configfs group.
573 * 568 *
574 * Based upon tl_proto_id, TCM_Loop emulates the requested fabric 569 * Based upon tl_proto_id, TCM_Loop emulates the requested fabric
575 * ProtocolID using target_core_fabric_lib.c symbols. 570 * ProtocolID using target_core_fabric_lib.c symbols.
576 */ 571 */
577 switch (tl_hba->tl_proto_id) { 572 switch (tl_hba->tl_proto_id) {
578 case SCSI_PROTOCOL_SAS: 573 case SCSI_PROTOCOL_SAS:
579 return sas_get_fabric_proto_ident(se_tpg); 574 return sas_get_fabric_proto_ident(se_tpg);
580 case SCSI_PROTOCOL_FCP: 575 case SCSI_PROTOCOL_FCP:
581 return fc_get_fabric_proto_ident(se_tpg); 576 return fc_get_fabric_proto_ident(se_tpg);
582 case SCSI_PROTOCOL_ISCSI: 577 case SCSI_PROTOCOL_ISCSI:
583 return iscsi_get_fabric_proto_ident(se_tpg); 578 return iscsi_get_fabric_proto_ident(se_tpg);
584 default: 579 default:
585 pr_err("Unknown tl_proto_id: 0x%02x, using" 580 pr_err("Unknown tl_proto_id: 0x%02x, using"
586 " SAS emulation\n", tl_hba->tl_proto_id); 581 " SAS emulation\n", tl_hba->tl_proto_id);
587 break; 582 break;
588 } 583 }
589 584
590 return sas_get_fabric_proto_ident(se_tpg); 585 return sas_get_fabric_proto_ident(se_tpg);
591 } 586 }
592 587
593 static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg) 588 static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
594 { 589 {
595 struct tcm_loop_tpg *tl_tpg = 590 struct tcm_loop_tpg *tl_tpg =
596 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; 591 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
597 /* 592 /*
598 * Return the passed NAA identifier for the SAS Target Port 593 * Return the passed NAA identifier for the SAS Target Port
599 */ 594 */
600 return &tl_tpg->tl_hba->tl_wwn_address[0]; 595 return &tl_tpg->tl_hba->tl_wwn_address[0];
601 } 596 }
602 597
603 static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg) 598 static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
604 { 599 {
605 struct tcm_loop_tpg *tl_tpg = 600 struct tcm_loop_tpg *tl_tpg =
606 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; 601 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
607 /* 602 /*
608 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83 603 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
609 * to represent the SCSI Target Port. 604 * to represent the SCSI Target Port.
610 */ 605 */
611 return tl_tpg->tl_tpgt; 606 return tl_tpg->tl_tpgt;
612 } 607 }
613 608
614 static u32 tcm_loop_get_default_depth(struct se_portal_group *se_tpg) 609 static u32 tcm_loop_get_default_depth(struct se_portal_group *se_tpg)
615 { 610 {
616 return 1; 611 return 1;
617 } 612 }
618 613
619 static u32 tcm_loop_get_pr_transport_id( 614 static u32 tcm_loop_get_pr_transport_id(
620 struct se_portal_group *se_tpg, 615 struct se_portal_group *se_tpg,
621 struct se_node_acl *se_nacl, 616 struct se_node_acl *se_nacl,
622 struct t10_pr_registration *pr_reg, 617 struct t10_pr_registration *pr_reg,
623 int *format_code, 618 int *format_code,
624 unsigned char *buf) 619 unsigned char *buf)
625 { 620 {
626 struct tcm_loop_tpg *tl_tpg = 621 struct tcm_loop_tpg *tl_tpg =
627 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; 622 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
628 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 623 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
629 624
630 switch (tl_hba->tl_proto_id) { 625 switch (tl_hba->tl_proto_id) {
631 case SCSI_PROTOCOL_SAS: 626 case SCSI_PROTOCOL_SAS:
632 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 627 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
633 format_code, buf); 628 format_code, buf);
634 case SCSI_PROTOCOL_FCP: 629 case SCSI_PROTOCOL_FCP:
635 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 630 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
636 format_code, buf); 631 format_code, buf);
637 case SCSI_PROTOCOL_ISCSI: 632 case SCSI_PROTOCOL_ISCSI:
638 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 633 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
639 format_code, buf); 634 format_code, buf);
640 default: 635 default:
641 pr_err("Unknown tl_proto_id: 0x%02x, using" 636 pr_err("Unknown tl_proto_id: 0x%02x, using"
642 " SAS emulation\n", tl_hba->tl_proto_id); 637 " SAS emulation\n", tl_hba->tl_proto_id);
643 break; 638 break;
644 } 639 }
645 640
646 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, 641 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
647 format_code, buf); 642 format_code, buf);
648 } 643 }
649 644
650 static u32 tcm_loop_get_pr_transport_id_len( 645 static u32 tcm_loop_get_pr_transport_id_len(
651 struct se_portal_group *se_tpg, 646 struct se_portal_group *se_tpg,
652 struct se_node_acl *se_nacl, 647 struct se_node_acl *se_nacl,
653 struct t10_pr_registration *pr_reg, 648 struct t10_pr_registration *pr_reg,
654 int *format_code) 649 int *format_code)
655 { 650 {
656 struct tcm_loop_tpg *tl_tpg = 651 struct tcm_loop_tpg *tl_tpg =
657 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; 652 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
658 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 653 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
659 654
660 switch (tl_hba->tl_proto_id) { 655 switch (tl_hba->tl_proto_id) {
661 case SCSI_PROTOCOL_SAS: 656 case SCSI_PROTOCOL_SAS:
662 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 657 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
663 format_code); 658 format_code);
664 case SCSI_PROTOCOL_FCP: 659 case SCSI_PROTOCOL_FCP:
665 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 660 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
666 format_code); 661 format_code);
667 case SCSI_PROTOCOL_ISCSI: 662 case SCSI_PROTOCOL_ISCSI:
668 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 663 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
669 format_code); 664 format_code);
670 default: 665 default:
671 pr_err("Unknown tl_proto_id: 0x%02x, using" 666 pr_err("Unknown tl_proto_id: 0x%02x, using"
672 " SAS emulation\n", tl_hba->tl_proto_id); 667 " SAS emulation\n", tl_hba->tl_proto_id);
673 break; 668 break;
674 } 669 }
675 670
676 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, 671 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
677 format_code); 672 format_code);
678 } 673 }
679 674
680 /* 675 /*
681 * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above 676 * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
682 * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations. 677 * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
683 */ 678 */
684 static char *tcm_loop_parse_pr_out_transport_id( 679 static char *tcm_loop_parse_pr_out_transport_id(
685 struct se_portal_group *se_tpg, 680 struct se_portal_group *se_tpg,
686 const char *buf, 681 const char *buf,
687 u32 *out_tid_len, 682 u32 *out_tid_len,
688 char **port_nexus_ptr) 683 char **port_nexus_ptr)
689 { 684 {
690 struct tcm_loop_tpg *tl_tpg = 685 struct tcm_loop_tpg *tl_tpg =
691 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; 686 (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
692 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 687 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
693 688
694 switch (tl_hba->tl_proto_id) { 689 switch (tl_hba->tl_proto_id) {
695 case SCSI_PROTOCOL_SAS: 690 case SCSI_PROTOCOL_SAS:
696 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 691 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
697 port_nexus_ptr); 692 port_nexus_ptr);
698 case SCSI_PROTOCOL_FCP: 693 case SCSI_PROTOCOL_FCP:
699 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 694 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
700 port_nexus_ptr); 695 port_nexus_ptr);
701 case SCSI_PROTOCOL_ISCSI: 696 case SCSI_PROTOCOL_ISCSI:
702 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 697 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
703 port_nexus_ptr); 698 port_nexus_ptr);
704 default: 699 default:
705 pr_err("Unknown tl_proto_id: 0x%02x, using" 700 pr_err("Unknown tl_proto_id: 0x%02x, using"
706 " SAS emulation\n", tl_hba->tl_proto_id); 701 " SAS emulation\n", tl_hba->tl_proto_id);
707 break; 702 break;
708 } 703 }
709 704
710 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, 705 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
711 port_nexus_ptr); 706 port_nexus_ptr);
712 } 707 }
713 708
714 /* 709 /*
715 * Returning (1) here allows for target_core_mod struct se_node_acl to be generated 710 * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
716 * based upon the incoming fabric dependent SCSI Initiator Port 711 * based upon the incoming fabric dependent SCSI Initiator Port
717 */ 712 */
718 static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg) 713 static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
719 { 714 {
720 return 1; 715 return 1;
721 } 716 }
722 717
723 static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg) 718 static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
724 { 719 {
725 return 0; 720 return 0;
726 } 721 }
727 722
728 /* 723 /*
729 * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for 724 * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
730 * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest 725 * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
731 */ 726 */
732 static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg) 727 static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
733 { 728 {
734 return 0; 729 return 0;
735 } 730 }
736 731
737 /* 732 /*
738 * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will 733 * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
739 * never be called for TCM_Loop by target_core_fabric_configfs.c code. 734 * never be called for TCM_Loop by target_core_fabric_configfs.c code.
740 * It has been added here as a nop for target_fabric_tf_ops_check() 735 * It has been added here as a nop for target_fabric_tf_ops_check()
741 */ 736 */
742 static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg) 737 static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
743 { 738 {
744 return 0; 739 return 0;
745 } 740 }
746 741
747 static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl( 742 static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl(
748 struct se_portal_group *se_tpg) 743 struct se_portal_group *se_tpg)
749 { 744 {
750 struct tcm_loop_nacl *tl_nacl; 745 struct tcm_loop_nacl *tl_nacl;
751 746
752 tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL); 747 tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL);
753 if (!tl_nacl) { 748 if (!tl_nacl) {
754 pr_err("Unable to allocate struct tcm_loop_nacl\n"); 749 pr_err("Unable to allocate struct tcm_loop_nacl\n");
755 return NULL; 750 return NULL;
756 } 751 }
757 752
758 return &tl_nacl->se_node_acl; 753 return &tl_nacl->se_node_acl;
759 } 754 }
760 755
761 static void tcm_loop_tpg_release_fabric_acl( 756 static void tcm_loop_tpg_release_fabric_acl(
762 struct se_portal_group *se_tpg, 757 struct se_portal_group *se_tpg,
763 struct se_node_acl *se_nacl) 758 struct se_node_acl *se_nacl)
764 { 759 {
765 struct tcm_loop_nacl *tl_nacl = container_of(se_nacl, 760 struct tcm_loop_nacl *tl_nacl = container_of(se_nacl,
766 struct tcm_loop_nacl, se_node_acl); 761 struct tcm_loop_nacl, se_node_acl);
767 762
768 kfree(tl_nacl); 763 kfree(tl_nacl);
769 } 764 }
770 765
771 static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg) 766 static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
772 { 767 {
773 return 1; 768 return 1;
774 } 769 }
775 770
776 static int tcm_loop_is_state_remove(struct se_cmd *se_cmd) 771 static int tcm_loop_is_state_remove(struct se_cmd *se_cmd)
777 { 772 {
778 /* 773 /*
779 * Assume struct scsi_cmnd is not in remove state.. 774 * Assume struct scsi_cmnd is not in remove state..
780 */ 775 */
781 return 0; 776 return 0;
782 } 777 }
783 778
784 static int tcm_loop_sess_logged_in(struct se_session *se_sess) 779 static int tcm_loop_sess_logged_in(struct se_session *se_sess)
785 { 780 {
786 /* 781 /*
787 * Assume that TL Nexus is always active 782 * Assume that TL Nexus is always active
788 */ 783 */
789 return 1; 784 return 1;
790 } 785 }
791 786
792 static u32 tcm_loop_sess_get_index(struct se_session *se_sess) 787 static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
793 { 788 {
794 return 1; 789 return 1;
795 } 790 }
796 791
797 static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl) 792 static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
798 { 793 {
799 return; 794 return;
800 } 795 }
801 796
802 static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd) 797 static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd)
803 { 798 {
804 return 1; 799 return 1;
805 } 800 }
806 801
807 static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd) 802 static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
808 { 803 {
809 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 804 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
810 struct tcm_loop_cmd, tl_se_cmd); 805 struct tcm_loop_cmd, tl_se_cmd);
811 806
812 return tl_cmd->sc_cmd_state; 807 return tl_cmd->sc_cmd_state;
813 } 808 }
814 809
815 static int tcm_loop_shutdown_session(struct se_session *se_sess) 810 static int tcm_loop_shutdown_session(struct se_session *se_sess)
816 { 811 {
817 return 0; 812 return 0;
818 } 813 }
819 814
820 static void tcm_loop_close_session(struct se_session *se_sess) 815 static void tcm_loop_close_session(struct se_session *se_sess)
821 { 816 {
822 return; 817 return;
823 }; 818 };
824 819
825 static void tcm_loop_stop_session( 820 static void tcm_loop_stop_session(
826 struct se_session *se_sess, 821 struct se_session *se_sess,
827 int sess_sleep, 822 int sess_sleep,
828 int conn_sleep) 823 int conn_sleep)
829 { 824 {
830 return; 825 return;
831 } 826 }
832 827
833 static void tcm_loop_fall_back_to_erl0(struct se_session *se_sess) 828 static void tcm_loop_fall_back_to_erl0(struct se_session *se_sess)
834 { 829 {
835 return; 830 return;
836 } 831 }
837 832
838 static int tcm_loop_write_pending(struct se_cmd *se_cmd) 833 static int tcm_loop_write_pending(struct se_cmd *se_cmd)
839 { 834 {
840 /* 835 /*
841 * Since Linux/SCSI has already sent down a struct scsi_cmnd 836 * Since Linux/SCSI has already sent down a struct scsi_cmnd
842 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array 837 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
843 * memory, and memory has already been mapped to struct se_cmd->t_mem_list 838 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
844 * format with transport_generic_map_mem_to_cmd(). 839 * format with transport_generic_map_mem_to_cmd().
845 * 840 *
846 * We now tell TCM to add this WRITE CDB directly into the TCM storage 841 * We now tell TCM to add this WRITE CDB directly into the TCM storage
847 * object execution queue. 842 * object execution queue.
848 */ 843 */
849 transport_generic_process_write(se_cmd); 844 transport_generic_process_write(se_cmd);
850 return 0; 845 return 0;
851 } 846 }
852 847
853 static int tcm_loop_write_pending_status(struct se_cmd *se_cmd) 848 static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
854 { 849 {
855 return 0; 850 return 0;
856 } 851 }
857 852
858 static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) 853 static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
859 { 854 {
860 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 855 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
861 struct tcm_loop_cmd, tl_se_cmd); 856 struct tcm_loop_cmd, tl_se_cmd);
862 struct scsi_cmnd *sc = tl_cmd->sc; 857 struct scsi_cmnd *sc = tl_cmd->sc;
863 858
864 pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p" 859 pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
865 " cdb: 0x%02x\n", sc, sc->cmnd[0]); 860 " cdb: 0x%02x\n", sc, sc->cmnd[0]);
866 861
867 sc->result = SAM_STAT_GOOD; 862 sc->result = SAM_STAT_GOOD;
868 set_host_byte(sc, DID_OK); 863 set_host_byte(sc, DID_OK);
869 sc->scsi_done(sc); 864 sc->scsi_done(sc);
870 return 0; 865 return 0;
871 } 866 }
872 867
873 static int tcm_loop_queue_status(struct se_cmd *se_cmd) 868 static int tcm_loop_queue_status(struct se_cmd *se_cmd)
874 { 869 {
875 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 870 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
876 struct tcm_loop_cmd, tl_se_cmd); 871 struct tcm_loop_cmd, tl_se_cmd);
877 struct scsi_cmnd *sc = tl_cmd->sc; 872 struct scsi_cmnd *sc = tl_cmd->sc;
878 873
879 pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p" 874 pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
880 " cdb: 0x%02x\n", sc, sc->cmnd[0]); 875 " cdb: 0x%02x\n", sc, sc->cmnd[0]);
881 876
882 if (se_cmd->sense_buffer && 877 if (se_cmd->sense_buffer &&
883 ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 878 ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
884 (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 879 (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
885 880
886 memcpy(sc->sense_buffer, se_cmd->sense_buffer, 881 memcpy(sc->sense_buffer, se_cmd->sense_buffer,
887 SCSI_SENSE_BUFFERSIZE); 882 SCSI_SENSE_BUFFERSIZE);
888 sc->result = SAM_STAT_CHECK_CONDITION; 883 sc->result = SAM_STAT_CHECK_CONDITION;
889 set_driver_byte(sc, DRIVER_SENSE); 884 set_driver_byte(sc, DRIVER_SENSE);
890 } else 885 } else
891 sc->result = se_cmd->scsi_status; 886 sc->result = se_cmd->scsi_status;
892 887
893 set_host_byte(sc, DID_OK); 888 set_host_byte(sc, DID_OK);
894 sc->scsi_done(sc); 889 sc->scsi_done(sc);
895 return 0; 890 return 0;
896 } 891 }
897 892
898 static int tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) 893 static int tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
899 { 894 {
900 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 895 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
901 struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr; 896 struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
902 /* 897 /*
903 * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead 898 * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
904 * and wake up the wait_queue_head_t in tcm_loop_device_reset() 899 * and wake up the wait_queue_head_t in tcm_loop_device_reset()
905 */ 900 */
906 atomic_set(&tl_tmr->tmr_complete, 1); 901 atomic_set(&tl_tmr->tmr_complete, 1);
907 wake_up(&tl_tmr->tl_tmr_wait); 902 wake_up(&tl_tmr->tl_tmr_wait);
908 return 0; 903 return 0;
909 } 904 }
910 905
911 static u16 tcm_loop_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length) 906 static u16 tcm_loop_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
912 { 907 {
913 return 0; 908 return 0;
914 } 909 }
915 910
916 static u16 tcm_loop_get_fabric_sense_len(void) 911 static u16 tcm_loop_get_fabric_sense_len(void)
917 { 912 {
918 return 0; 913 return 0;
919 } 914 }
920 915
921 static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba) 916 static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
922 { 917 {
923 switch (tl_hba->tl_proto_id) { 918 switch (tl_hba->tl_proto_id) {
924 case SCSI_PROTOCOL_SAS: 919 case SCSI_PROTOCOL_SAS:
925 return "SAS"; 920 return "SAS";
926 case SCSI_PROTOCOL_FCP: 921 case SCSI_PROTOCOL_FCP:
927 return "FCP"; 922 return "FCP";
928 case SCSI_PROTOCOL_ISCSI: 923 case SCSI_PROTOCOL_ISCSI:
929 return "iSCSI"; 924 return "iSCSI";
930 default: 925 default:
931 break; 926 break;
932 } 927 }
933 928
934 return "Unknown"; 929 return "Unknown";
935 } 930 }
936 931
937 /* Start items for tcm_loop_port_cit */ 932 /* Start items for tcm_loop_port_cit */
938 933
939 static int tcm_loop_port_link( 934 static int tcm_loop_port_link(
940 struct se_portal_group *se_tpg, 935 struct se_portal_group *se_tpg,
941 struct se_lun *lun) 936 struct se_lun *lun)
942 { 937 {
943 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 938 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
944 struct tcm_loop_tpg, tl_se_tpg); 939 struct tcm_loop_tpg, tl_se_tpg);
945 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 940 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
946 941
947 atomic_inc(&tl_tpg->tl_tpg_port_count); 942 atomic_inc(&tl_tpg->tl_tpg_port_count);
948 smp_mb__after_atomic_inc(); 943 smp_mb__after_atomic_inc();
949 /* 944 /*
950 * Add Linux/SCSI struct scsi_device by HCTL 945 * Add Linux/SCSI struct scsi_device by HCTL
951 */ 946 */
952 scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun); 947 scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
953 948
954 pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n"); 949 pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
955 return 0; 950 return 0;
956 } 951 }
957 952
958 static void tcm_loop_port_unlink( 953 static void tcm_loop_port_unlink(
959 struct se_portal_group *se_tpg, 954 struct se_portal_group *se_tpg,
960 struct se_lun *se_lun) 955 struct se_lun *se_lun)
961 { 956 {
962 struct scsi_device *sd; 957 struct scsi_device *sd;
963 struct tcm_loop_hba *tl_hba; 958 struct tcm_loop_hba *tl_hba;
964 struct tcm_loop_tpg *tl_tpg; 959 struct tcm_loop_tpg *tl_tpg;
965 960
966 tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); 961 tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
967 tl_hba = tl_tpg->tl_hba; 962 tl_hba = tl_tpg->tl_hba;
968 963
969 sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt, 964 sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
970 se_lun->unpacked_lun); 965 se_lun->unpacked_lun);
971 if (!sd) { 966 if (!sd) {
972 pr_err("Unable to locate struct scsi_device for %d:%d:" 967 pr_err("Unable to locate struct scsi_device for %d:%d:"
973 "%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); 968 "%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
974 return; 969 return;
975 } 970 }
976 /* 971 /*
977 * Remove Linux/SCSI struct scsi_device by HCTL 972 * Remove Linux/SCSI struct scsi_device by HCTL
978 */ 973 */
979 scsi_remove_device(sd); 974 scsi_remove_device(sd);
980 scsi_device_put(sd); 975 scsi_device_put(sd);
981 976
982 atomic_dec(&tl_tpg->tl_tpg_port_count); 977 atomic_dec(&tl_tpg->tl_tpg_port_count);
983 smp_mb__after_atomic_dec(); 978 smp_mb__after_atomic_dec();
984 979
985 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); 980 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
986 } 981 }
987 982
988 /* End items for tcm_loop_port_cit */ 983 /* End items for tcm_loop_port_cit */
989 984
990 /* Start items for tcm_loop_nexus_cit */ 985 /* Start items for tcm_loop_nexus_cit */
991 986
992 static int tcm_loop_make_nexus( 987 static int tcm_loop_make_nexus(
993 struct tcm_loop_tpg *tl_tpg, 988 struct tcm_loop_tpg *tl_tpg,
994 const char *name) 989 const char *name)
995 { 990 {
996 struct se_portal_group *se_tpg; 991 struct se_portal_group *se_tpg;
997 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 992 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
998 struct tcm_loop_nexus *tl_nexus; 993 struct tcm_loop_nexus *tl_nexus;
999 int ret = -ENOMEM; 994 int ret = -ENOMEM;
1000 995
1001 if (tl_tpg->tl_hba->tl_nexus) { 996 if (tl_tpg->tl_hba->tl_nexus) {
1002 pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n"); 997 pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
1003 return -EEXIST; 998 return -EEXIST;
1004 } 999 }
1005 se_tpg = &tl_tpg->tl_se_tpg; 1000 se_tpg = &tl_tpg->tl_se_tpg;
1006 1001
1007 tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL); 1002 tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
1008 if (!tl_nexus) { 1003 if (!tl_nexus) {
1009 pr_err("Unable to allocate struct tcm_loop_nexus\n"); 1004 pr_err("Unable to allocate struct tcm_loop_nexus\n");
1010 return -ENOMEM; 1005 return -ENOMEM;
1011 } 1006 }
1012 /* 1007 /*
1013 * Initialize the struct se_session pointer 1008 * Initialize the struct se_session pointer
1014 */ 1009 */
1015 tl_nexus->se_sess = transport_init_session(); 1010 tl_nexus->se_sess = transport_init_session();
1016 if (IS_ERR(tl_nexus->se_sess)) { 1011 if (IS_ERR(tl_nexus->se_sess)) {
1017 ret = PTR_ERR(tl_nexus->se_sess); 1012 ret = PTR_ERR(tl_nexus->se_sess);
1018 goto out; 1013 goto out;
1019 } 1014 }
1020 /* 1015 /*
1021 * Since we are running in 'demo mode' this call with generate a 1016 * Since we are running in 'demo mode' this call with generate a
1022 * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI 1017 * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
1023 * Initiator port name of the passed configfs group 'name'. 1018 * Initiator port name of the passed configfs group 'name'.
1024 */ 1019 */
1025 tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl( 1020 tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1026 se_tpg, (unsigned char *)name); 1021 se_tpg, (unsigned char *)name);
1027 if (!tl_nexus->se_sess->se_node_acl) { 1022 if (!tl_nexus->se_sess->se_node_acl) {
1028 transport_free_session(tl_nexus->se_sess); 1023 transport_free_session(tl_nexus->se_sess);
1029 goto out; 1024 goto out;
1030 } 1025 }
1031 /* 1026 /*
1032 * Now, register the SAS I_T Nexus as active with the call to 1027 * Now, register the SAS I_T Nexus as active with the call to
1033 * transport_register_session() 1028 * transport_register_session()
1034 */ 1029 */
1035 __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, 1030 __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
1036 tl_nexus->se_sess, tl_nexus); 1031 tl_nexus->se_sess, tl_nexus);
1037 tl_tpg->tl_hba->tl_nexus = tl_nexus; 1032 tl_tpg->tl_hba->tl_nexus = tl_nexus;
1038 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" 1033 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
1039 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), 1034 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
1040 name); 1035 name);
1041 return 0; 1036 return 0;
1042 1037
1043 out: 1038 out:
1044 kfree(tl_nexus); 1039 kfree(tl_nexus);
1045 return ret; 1040 return ret;
1046 } 1041 }
1047 1042
1048 static int tcm_loop_drop_nexus( 1043 static int tcm_loop_drop_nexus(
1049 struct tcm_loop_tpg *tpg) 1044 struct tcm_loop_tpg *tpg)
1050 { 1045 {
1051 struct se_session *se_sess; 1046 struct se_session *se_sess;
1052 struct tcm_loop_nexus *tl_nexus; 1047 struct tcm_loop_nexus *tl_nexus;
1053 struct tcm_loop_hba *tl_hba = tpg->tl_hba; 1048 struct tcm_loop_hba *tl_hba = tpg->tl_hba;
1054 1049
1055 tl_nexus = tpg->tl_hba->tl_nexus; 1050 tl_nexus = tpg->tl_hba->tl_nexus;
1056 if (!tl_nexus) 1051 if (!tl_nexus)
1057 return -ENODEV; 1052 return -ENODEV;
1058 1053
1059 se_sess = tl_nexus->se_sess; 1054 se_sess = tl_nexus->se_sess;
1060 if (!se_sess) 1055 if (!se_sess)
1061 return -ENODEV; 1056 return -ENODEV;
1062 1057
1063 if (atomic_read(&tpg->tl_tpg_port_count)) { 1058 if (atomic_read(&tpg->tl_tpg_port_count)) {
1064 pr_err("Unable to remove TCM_Loop I_T Nexus with" 1059 pr_err("Unable to remove TCM_Loop I_T Nexus with"
1065 " active TPG port count: %d\n", 1060 " active TPG port count: %d\n",
1066 atomic_read(&tpg->tl_tpg_port_count)); 1061 atomic_read(&tpg->tl_tpg_port_count));
1067 return -EPERM; 1062 return -EPERM;
1068 } 1063 }
1069 1064
1070 pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" 1065 pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
1071 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), 1066 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
1072 tl_nexus->se_sess->se_node_acl->initiatorname); 1067 tl_nexus->se_sess->se_node_acl->initiatorname);
1073 /* 1068 /*
1074 * Release the SCSI I_T Nexus to the emulated SAS Target Port 1069 * Release the SCSI I_T Nexus to the emulated SAS Target Port
1075 */ 1070 */
1076 transport_deregister_session(tl_nexus->se_sess); 1071 transport_deregister_session(tl_nexus->se_sess);
1077 tpg->tl_hba->tl_nexus = NULL; 1072 tpg->tl_hba->tl_nexus = NULL;
1078 kfree(tl_nexus); 1073 kfree(tl_nexus);
1079 return 0; 1074 return 0;
1080 } 1075 }
1081 1076
1082 /* End items for tcm_loop_nexus_cit */ 1077 /* End items for tcm_loop_nexus_cit */
1083 1078
1084 static ssize_t tcm_loop_tpg_show_nexus( 1079 static ssize_t tcm_loop_tpg_show_nexus(
1085 struct se_portal_group *se_tpg, 1080 struct se_portal_group *se_tpg,
1086 char *page) 1081 char *page)
1087 { 1082 {
1088 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 1083 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1089 struct tcm_loop_tpg, tl_se_tpg); 1084 struct tcm_loop_tpg, tl_se_tpg);
1090 struct tcm_loop_nexus *tl_nexus; 1085 struct tcm_loop_nexus *tl_nexus;
1091 ssize_t ret; 1086 ssize_t ret;
1092 1087
1093 tl_nexus = tl_tpg->tl_hba->tl_nexus; 1088 tl_nexus = tl_tpg->tl_hba->tl_nexus;
1094 if (!tl_nexus) 1089 if (!tl_nexus)
1095 return -ENODEV; 1090 return -ENODEV;
1096 1091
1097 ret = snprintf(page, PAGE_SIZE, "%s\n", 1092 ret = snprintf(page, PAGE_SIZE, "%s\n",
1098 tl_nexus->se_sess->se_node_acl->initiatorname); 1093 tl_nexus->se_sess->se_node_acl->initiatorname);
1099 1094
1100 return ret; 1095 return ret;
1101 } 1096 }
1102 1097
1103 static ssize_t tcm_loop_tpg_store_nexus( 1098 static ssize_t tcm_loop_tpg_store_nexus(
1104 struct se_portal_group *se_tpg, 1099 struct se_portal_group *se_tpg,
1105 const char *page, 1100 const char *page,
1106 size_t count) 1101 size_t count)
1107 { 1102 {
1108 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 1103 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1109 struct tcm_loop_tpg, tl_se_tpg); 1104 struct tcm_loop_tpg, tl_se_tpg);
1110 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 1105 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
1111 unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr; 1106 unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
1112 int ret; 1107 int ret;
1113 /* 1108 /*
1114 * Shutdown the active I_T nexus if 'NULL' is passed.. 1109 * Shutdown the active I_T nexus if 'NULL' is passed..
1115 */ 1110 */
1116 if (!strncmp(page, "NULL", 4)) { 1111 if (!strncmp(page, "NULL", 4)) {
1117 ret = tcm_loop_drop_nexus(tl_tpg); 1112 ret = tcm_loop_drop_nexus(tl_tpg);
1118 return (!ret) ? count : ret; 1113 return (!ret) ? count : ret;
1119 } 1114 }
1120 /* 1115 /*
1121 * Otherwise make sure the passed virtual Initiator port WWN matches 1116 * Otherwise make sure the passed virtual Initiator port WWN matches
1122 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call 1117 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
1123 * tcm_loop_make_nexus() 1118 * tcm_loop_make_nexus()
1124 */ 1119 */
1125 if (strlen(page) >= TL_WWN_ADDR_LEN) { 1120 if (strlen(page) >= TL_WWN_ADDR_LEN) {
1126 pr_err("Emulated NAA Sas Address: %s, exceeds" 1121 pr_err("Emulated NAA Sas Address: %s, exceeds"
1127 " max: %d\n", page, TL_WWN_ADDR_LEN); 1122 " max: %d\n", page, TL_WWN_ADDR_LEN);
1128 return -EINVAL; 1123 return -EINVAL;
1129 } 1124 }
1130 snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page); 1125 snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
1131 1126
1132 ptr = strstr(i_port, "naa."); 1127 ptr = strstr(i_port, "naa.");
1133 if (ptr) { 1128 if (ptr) {
1134 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) { 1129 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
1135 pr_err("Passed SAS Initiator Port %s does not" 1130 pr_err("Passed SAS Initiator Port %s does not"
1136 " match target port protoid: %s\n", i_port, 1131 " match target port protoid: %s\n", i_port,
1137 tcm_loop_dump_proto_id(tl_hba)); 1132 tcm_loop_dump_proto_id(tl_hba));
1138 return -EINVAL; 1133 return -EINVAL;
1139 } 1134 }
1140 port_ptr = &i_port[0]; 1135 port_ptr = &i_port[0];
1141 goto check_newline; 1136 goto check_newline;
1142 } 1137 }
1143 ptr = strstr(i_port, "fc."); 1138 ptr = strstr(i_port, "fc.");
1144 if (ptr) { 1139 if (ptr) {
1145 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) { 1140 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
1146 pr_err("Passed FCP Initiator Port %s does not" 1141 pr_err("Passed FCP Initiator Port %s does not"
1147 " match target port protoid: %s\n", i_port, 1142 " match target port protoid: %s\n", i_port,
1148 tcm_loop_dump_proto_id(tl_hba)); 1143 tcm_loop_dump_proto_id(tl_hba));
1149 return -EINVAL; 1144 return -EINVAL;
1150 } 1145 }
1151 port_ptr = &i_port[3]; /* Skip over "fc." */ 1146 port_ptr = &i_port[3]; /* Skip over "fc." */
1152 goto check_newline; 1147 goto check_newline;
1153 } 1148 }
1154 ptr = strstr(i_port, "iqn."); 1149 ptr = strstr(i_port, "iqn.");
1155 if (ptr) { 1150 if (ptr) {
1156 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) { 1151 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
1157 pr_err("Passed iSCSI Initiator Port %s does not" 1152 pr_err("Passed iSCSI Initiator Port %s does not"
1158 " match target port protoid: %s\n", i_port, 1153 " match target port protoid: %s\n", i_port,
1159 tcm_loop_dump_proto_id(tl_hba)); 1154 tcm_loop_dump_proto_id(tl_hba));
1160 return -EINVAL; 1155 return -EINVAL;
1161 } 1156 }
1162 port_ptr = &i_port[0]; 1157 port_ptr = &i_port[0];
1163 goto check_newline; 1158 goto check_newline;
1164 } 1159 }
1165 pr_err("Unable to locate prefix for emulated Initiator Port:" 1160 pr_err("Unable to locate prefix for emulated Initiator Port:"
1166 " %s\n", i_port); 1161 " %s\n", i_port);
1167 return -EINVAL; 1162 return -EINVAL;
1168 /* 1163 /*
1169 * Clear any trailing newline for the NAA WWN 1164 * Clear any trailing newline for the NAA WWN
1170 */ 1165 */
1171 check_newline: 1166 check_newline:
1172 if (i_port[strlen(i_port)-1] == '\n') 1167 if (i_port[strlen(i_port)-1] == '\n')
1173 i_port[strlen(i_port)-1] = '\0'; 1168 i_port[strlen(i_port)-1] = '\0';
1174 1169
1175 ret = tcm_loop_make_nexus(tl_tpg, port_ptr); 1170 ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
1176 if (ret < 0) 1171 if (ret < 0)
1177 return ret; 1172 return ret;
1178 1173
1179 return count; 1174 return count;
1180 } 1175 }
1181 1176
1182 TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR); 1177 TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
1183 1178
1184 static struct configfs_attribute *tcm_loop_tpg_attrs[] = { 1179 static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
1185 &tcm_loop_tpg_nexus.attr, 1180 &tcm_loop_tpg_nexus.attr,
1186 NULL, 1181 NULL,
1187 }; 1182 };
1188 1183
1189 /* Start items for tcm_loop_naa_cit */ 1184 /* Start items for tcm_loop_naa_cit */
1190 1185
1191 struct se_portal_group *tcm_loop_make_naa_tpg( 1186 struct se_portal_group *tcm_loop_make_naa_tpg(
1192 struct se_wwn *wwn, 1187 struct se_wwn *wwn,
1193 struct config_group *group, 1188 struct config_group *group,
1194 const char *name) 1189 const char *name)
1195 { 1190 {
1196 struct tcm_loop_hba *tl_hba = container_of(wwn, 1191 struct tcm_loop_hba *tl_hba = container_of(wwn,
1197 struct tcm_loop_hba, tl_hba_wwn); 1192 struct tcm_loop_hba, tl_hba_wwn);
1198 struct tcm_loop_tpg *tl_tpg; 1193 struct tcm_loop_tpg *tl_tpg;
1199 char *tpgt_str, *end_ptr; 1194 char *tpgt_str, *end_ptr;
1200 int ret; 1195 int ret;
1201 unsigned short int tpgt; 1196 unsigned short int tpgt;
1202 1197
1203 tpgt_str = strstr(name, "tpgt_"); 1198 tpgt_str = strstr(name, "tpgt_");
1204 if (!tpgt_str) { 1199 if (!tpgt_str) {
1205 pr_err("Unable to locate \"tpgt_#\" directory" 1200 pr_err("Unable to locate \"tpgt_#\" directory"
1206 " group\n"); 1201 " group\n");
1207 return ERR_PTR(-EINVAL); 1202 return ERR_PTR(-EINVAL);
1208 } 1203 }
1209 tpgt_str += 5; /* Skip ahead of "tpgt_" */ 1204 tpgt_str += 5; /* Skip ahead of "tpgt_" */
1210 tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0); 1205 tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
1211 1206
1212 if (tpgt >= TL_TPGS_PER_HBA) { 1207 if (tpgt >= TL_TPGS_PER_HBA) {
1213 pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:" 1208 pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
1214 " %u\n", tpgt, TL_TPGS_PER_HBA); 1209 " %u\n", tpgt, TL_TPGS_PER_HBA);
1215 return ERR_PTR(-EINVAL); 1210 return ERR_PTR(-EINVAL);
1216 } 1211 }
1217 tl_tpg = &tl_hba->tl_hba_tpgs[tpgt]; 1212 tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
1218 tl_tpg->tl_hba = tl_hba; 1213 tl_tpg->tl_hba = tl_hba;
1219 tl_tpg->tl_tpgt = tpgt; 1214 tl_tpg->tl_tpgt = tpgt;
1220 /* 1215 /*
1221 * Register the tl_tpg as a emulated SAS TCM Target Endpoint 1216 * Register the tl_tpg as a emulated SAS TCM Target Endpoint
1222 */ 1217 */
1223 ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops, 1218 ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops,
1224 wwn, &tl_tpg->tl_se_tpg, tl_tpg, 1219 wwn, &tl_tpg->tl_se_tpg, tl_tpg,
1225 TRANSPORT_TPG_TYPE_NORMAL); 1220 TRANSPORT_TPG_TYPE_NORMAL);
1226 if (ret < 0) 1221 if (ret < 0)
1227 return ERR_PTR(-ENOMEM); 1222 return ERR_PTR(-ENOMEM);
1228 1223
1229 pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s" 1224 pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
1230 " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), 1225 " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
1231 config_item_name(&wwn->wwn_group.cg_item), tpgt); 1226 config_item_name(&wwn->wwn_group.cg_item), tpgt);
1232 1227
1233 return &tl_tpg->tl_se_tpg; 1228 return &tl_tpg->tl_se_tpg;
1234 } 1229 }
1235 1230
1236 void tcm_loop_drop_naa_tpg( 1231 void tcm_loop_drop_naa_tpg(
1237 struct se_portal_group *se_tpg) 1232 struct se_portal_group *se_tpg)
1238 { 1233 {
1239 struct se_wwn *wwn = se_tpg->se_tpg_wwn; 1234 struct se_wwn *wwn = se_tpg->se_tpg_wwn;
1240 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 1235 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1241 struct tcm_loop_tpg, tl_se_tpg); 1236 struct tcm_loop_tpg, tl_se_tpg);
1242 struct tcm_loop_hba *tl_hba; 1237 struct tcm_loop_hba *tl_hba;
1243 unsigned short tpgt; 1238 unsigned short tpgt;
1244 1239
1245 tl_hba = tl_tpg->tl_hba; 1240 tl_hba = tl_tpg->tl_hba;
1246 tpgt = tl_tpg->tl_tpgt; 1241 tpgt = tl_tpg->tl_tpgt;
1247 /* 1242 /*
1248 * Release the I_T Nexus for the Virtual SAS link if present 1243 * Release the I_T Nexus for the Virtual SAS link if present
1249 */ 1244 */
1250 tcm_loop_drop_nexus(tl_tpg); 1245 tcm_loop_drop_nexus(tl_tpg);
1251 /* 1246 /*
1252 * Deregister the tl_tpg as a emulated SAS TCM Target Endpoint 1247 * Deregister the tl_tpg as a emulated SAS TCM Target Endpoint
1253 */ 1248 */
1254 core_tpg_deregister(se_tpg); 1249 core_tpg_deregister(se_tpg);
1255 1250
1256 tl_tpg->tl_hba = NULL; 1251 tl_tpg->tl_hba = NULL;
1257 tl_tpg->tl_tpgt = 0; 1252 tl_tpg->tl_tpgt = 0;
1258 1253
1259 pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s" 1254 pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
1260 " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), 1255 " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
1261 config_item_name(&wwn->wwn_group.cg_item), tpgt); 1256 config_item_name(&wwn->wwn_group.cg_item), tpgt);
1262 } 1257 }
1263 1258
1264 /* End items for tcm_loop_naa_cit */ 1259 /* End items for tcm_loop_naa_cit */
1265 1260
1266 /* Start items for tcm_loop_cit */ 1261 /* Start items for tcm_loop_cit */
1267 1262
1268 struct se_wwn *tcm_loop_make_scsi_hba( 1263 struct se_wwn *tcm_loop_make_scsi_hba(
1269 struct target_fabric_configfs *tf, 1264 struct target_fabric_configfs *tf,
1270 struct config_group *group, 1265 struct config_group *group,
1271 const char *name) 1266 const char *name)
1272 { 1267 {
1273 struct tcm_loop_hba *tl_hba; 1268 struct tcm_loop_hba *tl_hba;
1274 struct Scsi_Host *sh; 1269 struct Scsi_Host *sh;
1275 char *ptr; 1270 char *ptr;
1276 int ret, off = 0; 1271 int ret, off = 0;
1277 1272
1278 tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL); 1273 tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
1279 if (!tl_hba) { 1274 if (!tl_hba) {
1280 pr_err("Unable to allocate struct tcm_loop_hba\n"); 1275 pr_err("Unable to allocate struct tcm_loop_hba\n");
1281 return ERR_PTR(-ENOMEM); 1276 return ERR_PTR(-ENOMEM);
1282 } 1277 }
1283 /* 1278 /*
1284 * Determine the emulated Protocol Identifier and Target Port Name 1279 * Determine the emulated Protocol Identifier and Target Port Name
1285 * based on the incoming configfs directory name. 1280 * based on the incoming configfs directory name.
1286 */ 1281 */
1287 ptr = strstr(name, "naa."); 1282 ptr = strstr(name, "naa.");
1288 if (ptr) { 1283 if (ptr) {
1289 tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS; 1284 tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
1290 goto check_len; 1285 goto check_len;
1291 } 1286 }
1292 ptr = strstr(name, "fc."); 1287 ptr = strstr(name, "fc.");
1293 if (ptr) { 1288 if (ptr) {
1294 tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP; 1289 tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
1295 off = 3; /* Skip over "fc." */ 1290 off = 3; /* Skip over "fc." */
1296 goto check_len; 1291 goto check_len;
1297 } 1292 }
1298 ptr = strstr(name, "iqn."); 1293 ptr = strstr(name, "iqn.");
1299 if (!ptr) { 1294 if (!ptr) {
1300 pr_err("Unable to locate prefix for emulated Target " 1295 pr_err("Unable to locate prefix for emulated Target "
1301 "Port: %s\n", name); 1296 "Port: %s\n", name);
1302 ret = -EINVAL; 1297 ret = -EINVAL;
1303 goto out; 1298 goto out;
1304 } 1299 }
1305 tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI; 1300 tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
1306 1301
1307 check_len: 1302 check_len:
1308 if (strlen(name) >= TL_WWN_ADDR_LEN) { 1303 if (strlen(name) >= TL_WWN_ADDR_LEN) {
1309 pr_err("Emulated NAA %s Address: %s, exceeds" 1304 pr_err("Emulated NAA %s Address: %s, exceeds"
1310 " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba), 1305 " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
1311 TL_WWN_ADDR_LEN); 1306 TL_WWN_ADDR_LEN);
1312 ret = -EINVAL; 1307 ret = -EINVAL;
1313 goto out; 1308 goto out;
1314 } 1309 }
1315 snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]); 1310 snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
1316 1311
1317 /* 1312 /*
1318 * Call device_register(tl_hba->dev) to register the emulated 1313 * Call device_register(tl_hba->dev) to register the emulated
1319 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after 1314 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
1320 * device_register() callbacks in tcm_loop_driver_probe() 1315 * device_register() callbacks in tcm_loop_driver_probe()
1321 */ 1316 */
1322 ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt); 1317 ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
1323 if (ret) 1318 if (ret)
1324 goto out; 1319 goto out;
1325 1320
1326 sh = tl_hba->sh; 1321 sh = tl_hba->sh;
1327 tcm_loop_hba_no_cnt++; 1322 tcm_loop_hba_no_cnt++;
1328 pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target" 1323 pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
1329 " %s Address: %s at Linux/SCSI Host ID: %d\n", 1324 " %s Address: %s at Linux/SCSI Host ID: %d\n",
1330 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); 1325 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
1331 1326
1332 return &tl_hba->tl_hba_wwn; 1327 return &tl_hba->tl_hba_wwn;
1333 out: 1328 out:
1334 kfree(tl_hba); 1329 kfree(tl_hba);
1335 return ERR_PTR(ret); 1330 return ERR_PTR(ret);
1336 } 1331 }
1337 1332
1338 void tcm_loop_drop_scsi_hba( 1333 void tcm_loop_drop_scsi_hba(
1339 struct se_wwn *wwn) 1334 struct se_wwn *wwn)
1340 { 1335 {
1341 struct tcm_loop_hba *tl_hba = container_of(wwn, 1336 struct tcm_loop_hba *tl_hba = container_of(wwn,
1342 struct tcm_loop_hba, tl_hba_wwn); 1337 struct tcm_loop_hba, tl_hba_wwn);
1343 1338
1344 pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target" 1339 pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
1345 " SAS Address: %s at Linux/SCSI Host ID: %d\n", 1340 " SAS Address: %s at Linux/SCSI Host ID: %d\n",
1346 tl_hba->tl_wwn_address, tl_hba->sh->host_no); 1341 tl_hba->tl_wwn_address, tl_hba->sh->host_no);
1347 /* 1342 /*
1348 * Call device_unregister() on the original tl_hba->dev. 1343 * Call device_unregister() on the original tl_hba->dev.
1349 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will 1344 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1350 * release *tl_hba; 1345 * release *tl_hba;
1351 */ 1346 */
1352 device_unregister(&tl_hba->dev); 1347 device_unregister(&tl_hba->dev);
1353 } 1348 }
1354 1349
1355 /* Start items for tcm_loop_cit */ 1350 /* Start items for tcm_loop_cit */
1356 static ssize_t tcm_loop_wwn_show_attr_version( 1351 static ssize_t tcm_loop_wwn_show_attr_version(
1357 struct target_fabric_configfs *tf, 1352 struct target_fabric_configfs *tf,
1358 char *page) 1353 char *page)
1359 { 1354 {
1360 return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION); 1355 return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
1361 } 1356 }
1362 1357
1363 TF_WWN_ATTR_RO(tcm_loop, version); 1358 TF_WWN_ATTR_RO(tcm_loop, version);
1364 1359
1365 static struct configfs_attribute *tcm_loop_wwn_attrs[] = { 1360 static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
1366 &tcm_loop_wwn_version.attr, 1361 &tcm_loop_wwn_version.attr,
1367 NULL, 1362 NULL,
1368 }; 1363 };
1369 1364
1370 /* End items for tcm_loop_cit */ 1365 /* End items for tcm_loop_cit */
1371 1366
1372 static int tcm_loop_register_configfs(void) 1367 static int tcm_loop_register_configfs(void)
1373 { 1368 {
1374 struct target_fabric_configfs *fabric; 1369 struct target_fabric_configfs *fabric;
1375 struct config_group *tf_cg; 1370 struct config_group *tf_cg;
1376 int ret; 1371 int ret;
1377 /* 1372 /*
1378 * Set the TCM Loop HBA counter to zero 1373 * Set the TCM Loop HBA counter to zero
1379 */ 1374 */
1380 tcm_loop_hba_no_cnt = 0; 1375 tcm_loop_hba_no_cnt = 0;
1381 /* 1376 /*
1382 * Register the top level struct config_item_type with TCM core 1377 * Register the top level struct config_item_type with TCM core
1383 */ 1378 */
1384 fabric = target_fabric_configfs_init(THIS_MODULE, "loopback"); 1379 fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
1385 if (IS_ERR(fabric)) { 1380 if (IS_ERR(fabric)) {
1386 pr_err("tcm_loop_register_configfs() failed!\n"); 1381 pr_err("tcm_loop_register_configfs() failed!\n");
1387 return PTR_ERR(fabric); 1382 return PTR_ERR(fabric);
1388 } 1383 }
1389 /* 1384 /*
1390 * Setup the fabric API of function pointers used by target_core_mod 1385 * Setup the fabric API of function pointers used by target_core_mod
1391 */ 1386 */
1392 fabric->tf_ops.get_fabric_name = &tcm_loop_get_fabric_name; 1387 fabric->tf_ops.get_fabric_name = &tcm_loop_get_fabric_name;
1393 fabric->tf_ops.get_fabric_proto_ident = &tcm_loop_get_fabric_proto_ident; 1388 fabric->tf_ops.get_fabric_proto_ident = &tcm_loop_get_fabric_proto_ident;
1394 fabric->tf_ops.tpg_get_wwn = &tcm_loop_get_endpoint_wwn; 1389 fabric->tf_ops.tpg_get_wwn = &tcm_loop_get_endpoint_wwn;
1395 fabric->tf_ops.tpg_get_tag = &tcm_loop_get_tag; 1390 fabric->tf_ops.tpg_get_tag = &tcm_loop_get_tag;
1396 fabric->tf_ops.tpg_get_default_depth = &tcm_loop_get_default_depth; 1391 fabric->tf_ops.tpg_get_default_depth = &tcm_loop_get_default_depth;
1397 fabric->tf_ops.tpg_get_pr_transport_id = &tcm_loop_get_pr_transport_id; 1392 fabric->tf_ops.tpg_get_pr_transport_id = &tcm_loop_get_pr_transport_id;
1398 fabric->tf_ops.tpg_get_pr_transport_id_len = 1393 fabric->tf_ops.tpg_get_pr_transport_id_len =
1399 &tcm_loop_get_pr_transport_id_len; 1394 &tcm_loop_get_pr_transport_id_len;
1400 fabric->tf_ops.tpg_parse_pr_out_transport_id = 1395 fabric->tf_ops.tpg_parse_pr_out_transport_id =
1401 &tcm_loop_parse_pr_out_transport_id; 1396 &tcm_loop_parse_pr_out_transport_id;
1402 fabric->tf_ops.tpg_check_demo_mode = &tcm_loop_check_demo_mode; 1397 fabric->tf_ops.tpg_check_demo_mode = &tcm_loop_check_demo_mode;
1403 fabric->tf_ops.tpg_check_demo_mode_cache = 1398 fabric->tf_ops.tpg_check_demo_mode_cache =
1404 &tcm_loop_check_demo_mode_cache; 1399 &tcm_loop_check_demo_mode_cache;
1405 fabric->tf_ops.tpg_check_demo_mode_write_protect = 1400 fabric->tf_ops.tpg_check_demo_mode_write_protect =
1406 &tcm_loop_check_demo_mode_write_protect; 1401 &tcm_loop_check_demo_mode_write_protect;
1407 fabric->tf_ops.tpg_check_prod_mode_write_protect = 1402 fabric->tf_ops.tpg_check_prod_mode_write_protect =
1408 &tcm_loop_check_prod_mode_write_protect; 1403 &tcm_loop_check_prod_mode_write_protect;
1409 /* 1404 /*
1410 * The TCM loopback fabric module runs in demo-mode to a local 1405 * The TCM loopback fabric module runs in demo-mode to a local
1411 * virtual SCSI device, so fabric dependent initator ACLs are 1406 * virtual SCSI device, so fabric dependent initator ACLs are
1412 * not required. 1407 * not required.
1413 */ 1408 */
1414 fabric->tf_ops.tpg_alloc_fabric_acl = &tcm_loop_tpg_alloc_fabric_acl; 1409 fabric->tf_ops.tpg_alloc_fabric_acl = &tcm_loop_tpg_alloc_fabric_acl;
1415 fabric->tf_ops.tpg_release_fabric_acl = 1410 fabric->tf_ops.tpg_release_fabric_acl =
1416 &tcm_loop_tpg_release_fabric_acl; 1411 &tcm_loop_tpg_release_fabric_acl;
1417 fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index; 1412 fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index;
1418 /* 1413 /*
1419 * Used for setting up remaining TCM resources in process context 1414 * Used for setting up remaining TCM resources in process context
1420 */ 1415 */
1421 fabric->tf_ops.new_cmd_map = &tcm_loop_new_cmd_map; 1416 fabric->tf_ops.new_cmd_map = &tcm_loop_new_cmd_map;
1422 fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free; 1417 fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free;
1423 fabric->tf_ops.release_cmd = &tcm_loop_release_cmd; 1418 fabric->tf_ops.release_cmd = &tcm_loop_release_cmd;
1424 fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session; 1419 fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session;
1425 fabric->tf_ops.close_session = &tcm_loop_close_session; 1420 fabric->tf_ops.close_session = &tcm_loop_close_session;
1426 fabric->tf_ops.stop_session = &tcm_loop_stop_session; 1421 fabric->tf_ops.stop_session = &tcm_loop_stop_session;
1427 fabric->tf_ops.fall_back_to_erl0 = &tcm_loop_fall_back_to_erl0; 1422 fabric->tf_ops.fall_back_to_erl0 = &tcm_loop_fall_back_to_erl0;
1428 fabric->tf_ops.sess_logged_in = &tcm_loop_sess_logged_in; 1423 fabric->tf_ops.sess_logged_in = &tcm_loop_sess_logged_in;
1429 fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index; 1424 fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index;
1430 fabric->tf_ops.sess_get_initiator_sid = NULL; 1425 fabric->tf_ops.sess_get_initiator_sid = NULL;
1431 fabric->tf_ops.write_pending = &tcm_loop_write_pending; 1426 fabric->tf_ops.write_pending = &tcm_loop_write_pending;
1432 fabric->tf_ops.write_pending_status = &tcm_loop_write_pending_status; 1427 fabric->tf_ops.write_pending_status = &tcm_loop_write_pending_status;
1433 /* 1428 /*
1434 * Not used for TCM loopback 1429 * Not used for TCM loopback
1435 */ 1430 */
1436 fabric->tf_ops.set_default_node_attributes = 1431 fabric->tf_ops.set_default_node_attributes =
1437 &tcm_loop_set_default_node_attributes; 1432 &tcm_loop_set_default_node_attributes;
1438 fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag; 1433 fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag;
1439 fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state; 1434 fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state;
1440 fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in; 1435 fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
1441 fabric->tf_ops.queue_status = &tcm_loop_queue_status; 1436 fabric->tf_ops.queue_status = &tcm_loop_queue_status;
1442 fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; 1437 fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
1443 fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len; 1438 fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len;
1444 fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len; 1439 fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len;
1445 fabric->tf_ops.is_state_remove = &tcm_loop_is_state_remove; 1440 fabric->tf_ops.is_state_remove = &tcm_loop_is_state_remove;
1446 1441
1447 tf_cg = &fabric->tf_group; 1442 tf_cg = &fabric->tf_group;
1448 /* 1443 /*
1449 * Setup function pointers for generic logic in target_core_fabric_configfs.c 1444 * Setup function pointers for generic logic in target_core_fabric_configfs.c
1450 */ 1445 */
1451 fabric->tf_ops.fabric_make_wwn = &tcm_loop_make_scsi_hba; 1446 fabric->tf_ops.fabric_make_wwn = &tcm_loop_make_scsi_hba;
1452 fabric->tf_ops.fabric_drop_wwn = &tcm_loop_drop_scsi_hba; 1447 fabric->tf_ops.fabric_drop_wwn = &tcm_loop_drop_scsi_hba;
1453 fabric->tf_ops.fabric_make_tpg = &tcm_loop_make_naa_tpg; 1448 fabric->tf_ops.fabric_make_tpg = &tcm_loop_make_naa_tpg;
1454 fabric->tf_ops.fabric_drop_tpg = &tcm_loop_drop_naa_tpg; 1449 fabric->tf_ops.fabric_drop_tpg = &tcm_loop_drop_naa_tpg;
1455 /* 1450 /*
1456 * fabric_post_link() and fabric_pre_unlink() are used for 1451 * fabric_post_link() and fabric_pre_unlink() are used for
1457 * registration and release of TCM Loop Virtual SCSI LUNs. 1452 * registration and release of TCM Loop Virtual SCSI LUNs.
1458 */ 1453 */
1459 fabric->tf_ops.fabric_post_link = &tcm_loop_port_link; 1454 fabric->tf_ops.fabric_post_link = &tcm_loop_port_link;
1460 fabric->tf_ops.fabric_pre_unlink = &tcm_loop_port_unlink; 1455 fabric->tf_ops.fabric_pre_unlink = &tcm_loop_port_unlink;
1461 fabric->tf_ops.fabric_make_np = NULL; 1456 fabric->tf_ops.fabric_make_np = NULL;
1462 fabric->tf_ops.fabric_drop_np = NULL; 1457 fabric->tf_ops.fabric_drop_np = NULL;
1463 /* 1458 /*
1464 * Setup default attribute lists for various fabric->tf_cit_tmpl 1459 * Setup default attribute lists for various fabric->tf_cit_tmpl
1465 */ 1460 */
1466 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs; 1461 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
1467 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs; 1462 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
1468 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; 1463 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
1469 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; 1464 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
1470 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; 1465 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
1471 /* 1466 /*
1472 * Once fabric->tf_ops has been setup, now register the fabric for 1467 * Once fabric->tf_ops has been setup, now register the fabric for
1473 * use within TCM 1468 * use within TCM
1474 */ 1469 */
1475 ret = target_fabric_configfs_register(fabric); 1470 ret = target_fabric_configfs_register(fabric);
1476 if (ret < 0) { 1471 if (ret < 0) {
1477 pr_err("target_fabric_configfs_register() for" 1472 pr_err("target_fabric_configfs_register() for"
1478 " TCM_Loop failed!\n"); 1473 " TCM_Loop failed!\n");
1479 target_fabric_configfs_free(fabric); 1474 target_fabric_configfs_free(fabric);
1480 return -1; 1475 return -1;
1481 } 1476 }
1482 /* 1477 /*
1483 * Setup our local pointer to *fabric. 1478 * Setup our local pointer to *fabric.
1484 */ 1479 */
1485 tcm_loop_fabric_configfs = fabric; 1480 tcm_loop_fabric_configfs = fabric;
1486 pr_debug("TCM_LOOP[0] - Set fabric ->" 1481 pr_debug("TCM_LOOP[0] - Set fabric ->"
1487 " tcm_loop_fabric_configfs\n"); 1482 " tcm_loop_fabric_configfs\n");
1488 return 0; 1483 return 0;
1489 } 1484 }
1490 1485
1491 static void tcm_loop_deregister_configfs(void) 1486 static void tcm_loop_deregister_configfs(void)
1492 { 1487 {
1493 if (!tcm_loop_fabric_configfs) 1488 if (!tcm_loop_fabric_configfs)
1494 return; 1489 return;
1495 1490
1496 target_fabric_configfs_deregister(tcm_loop_fabric_configfs); 1491 target_fabric_configfs_deregister(tcm_loop_fabric_configfs);
1497 tcm_loop_fabric_configfs = NULL; 1492 tcm_loop_fabric_configfs = NULL;
1498 pr_debug("TCM_LOOP[0] - Cleared" 1493 pr_debug("TCM_LOOP[0] - Cleared"
1499 " tcm_loop_fabric_configfs\n"); 1494 " tcm_loop_fabric_configfs\n");
1500 } 1495 }
1501 1496
1502 static int __init tcm_loop_fabric_init(void) 1497 static int __init tcm_loop_fabric_init(void)
1503 { 1498 {
1504 int ret; 1499 int ret;
1505 1500
1506 tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache", 1501 tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
1507 sizeof(struct tcm_loop_cmd), 1502 sizeof(struct tcm_loop_cmd),
1508 __alignof__(struct tcm_loop_cmd), 1503 __alignof__(struct tcm_loop_cmd),
1509 0, NULL); 1504 0, NULL);
1510 if (!tcm_loop_cmd_cache) { 1505 if (!tcm_loop_cmd_cache) {
1511 pr_debug("kmem_cache_create() for" 1506 pr_debug("kmem_cache_create() for"
1512 " tcm_loop_cmd_cache failed\n"); 1507 " tcm_loop_cmd_cache failed\n");
1513 return -ENOMEM; 1508 return -ENOMEM;
1514 } 1509 }
1515 1510
1516 ret = tcm_loop_alloc_core_bus(); 1511 ret = tcm_loop_alloc_core_bus();
1517 if (ret) 1512 if (ret)
1518 return ret; 1513 return ret;
1519 1514
1520 ret = tcm_loop_register_configfs(); 1515 ret = tcm_loop_register_configfs();
1521 if (ret) { 1516 if (ret) {
1522 tcm_loop_release_core_bus(); 1517 tcm_loop_release_core_bus();
1523 return ret; 1518 return ret;
1524 } 1519 }
1525 1520
1526 return 0; 1521 return 0;
1527 } 1522 }
1528 1523
1529 static void __exit tcm_loop_fabric_exit(void) 1524 static void __exit tcm_loop_fabric_exit(void)
1530 { 1525 {
1531 tcm_loop_deregister_configfs(); 1526 tcm_loop_deregister_configfs();
1532 tcm_loop_release_core_bus(); 1527 tcm_loop_release_core_bus();
1533 kmem_cache_destroy(tcm_loop_cmd_cache); 1528 kmem_cache_destroy(tcm_loop_cmd_cache);
1534 } 1529 }
1535 1530
1536 MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module"); 1531 MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
1537 MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>"); 1532 MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
1538 MODULE_LICENSE("GPL"); 1533 MODULE_LICENSE("GPL");
1539 module_init(tcm_loop_fabric_init); 1534 module_init(tcm_loop_fabric_init);
1540 module_exit(tcm_loop_fabric_exit); 1535 module_exit(tcm_loop_fabric_exit);
1541 1536
drivers/target/target_core_alua.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * Filename: target_core_alua.c 2 * Filename: target_core_alua.c
3 * 3 *
4 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA) 4 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
5 * 5 *
6 * Copyright (c) 2009-2010 Rising Tide Systems 6 * Copyright (c) 2009-2010 Rising Tide Systems
7 * Copyright (c) 2009-2010 Linux-iSCSI.org 7 * Copyright (c) 2009-2010 Linux-iSCSI.org
8 * 8 *
9 * Nicholas A. Bellinger <nab@kernel.org> 9 * Nicholas A. Bellinger <nab@kernel.org>
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or 13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version. 14 * (at your option) any later version.
15 * 15 *
16 * This program is distributed in the hope that it will be useful, 16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details. 19 * GNU General Public License for more details.
20 * 20 *
21 * You should have received a copy of the GNU General Public License 21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software 22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 * 24 *
25 ******************************************************************************/ 25 ******************************************************************************/
26 26
27 #include <linux/slab.h> 27 #include <linux/slab.h>
28 #include <linux/spinlock.h> 28 #include <linux/spinlock.h>
29 #include <linux/configfs.h> 29 #include <linux/configfs.h>
30 #include <linux/export.h> 30 #include <linux/export.h>
31 #include <scsi/scsi.h> 31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h> 32 #include <scsi/scsi_cmnd.h>
33 33
34 #include <target/target_core_base.h> 34 #include <target/target_core_base.h>
35 #include <target/target_core_device.h> 35 #include <target/target_core_backend.h>
36 #include <target/target_core_transport.h> 36 #include <target/target_core_fabric.h>
37 #include <target/target_core_fabric_ops.h>
38 #include <target/target_core_configfs.h> 37 #include <target/target_core_configfs.h>
39 38
40 #include "target_core_internal.h" 39 #include "target_core_internal.h"
41 #include "target_core_alua.h" 40 #include "target_core_alua.h"
42 #include "target_core_ua.h" 41 #include "target_core_ua.h"
43 42
44 static int core_alua_check_transition(int state, int *primary); 43 static int core_alua_check_transition(int state, int *primary);
45 static int core_alua_set_tg_pt_secondary_state( 44 static int core_alua_set_tg_pt_secondary_state(
46 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 45 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
47 struct se_port *port, int explict, int offline); 46 struct se_port *port, int explict, int offline);
48 47
49 static u16 alua_lu_gps_counter; 48 static u16 alua_lu_gps_counter;
50 static u32 alua_lu_gps_count; 49 static u32 alua_lu_gps_count;
51 50
52 static DEFINE_SPINLOCK(lu_gps_lock); 51 static DEFINE_SPINLOCK(lu_gps_lock);
53 static LIST_HEAD(lu_gps_list); 52 static LIST_HEAD(lu_gps_list);
54 53
55 struct t10_alua_lu_gp *default_lu_gp; 54 struct t10_alua_lu_gp *default_lu_gp;
56 55
57 /* 56 /*
58 * REPORT_TARGET_PORT_GROUPS 57 * REPORT_TARGET_PORT_GROUPS
59 * 58 *
60 * See spc4r17 section 6.27 59 * See spc4r17 section 6.27
61 */ 60 */
62 int target_emulate_report_target_port_groups(struct se_task *task) 61 int target_emulate_report_target_port_groups(struct se_task *task)
63 { 62 {
64 struct se_cmd *cmd = task->task_se_cmd; 63 struct se_cmd *cmd = task->task_se_cmd;
65 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; 64 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
66 struct se_port *port; 65 struct se_port *port;
67 struct t10_alua_tg_pt_gp *tg_pt_gp; 66 struct t10_alua_tg_pt_gp *tg_pt_gp;
68 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 67 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
69 unsigned char *buf; 68 unsigned char *buf;
70 u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first 69 u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
71 Target port group descriptor */ 70 Target port group descriptor */
72 /* 71 /*
73 * Need at least 4 bytes of response data or else we can't 72 * Need at least 4 bytes of response data or else we can't
74 * even fit the return data length. 73 * even fit the return data length.
75 */ 74 */
76 if (cmd->data_length < 4) { 75 if (cmd->data_length < 4) {
77 pr_warn("REPORT TARGET PORT GROUPS allocation length %u" 76 pr_warn("REPORT TARGET PORT GROUPS allocation length %u"
78 " too small\n", cmd->data_length); 77 " too small\n", cmd->data_length);
79 return -EINVAL; 78 return -EINVAL;
80 } 79 }
81 80
82 buf = transport_kmap_first_data_page(cmd); 81 buf = transport_kmap_first_data_page(cmd);
83 82
84 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 83 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
85 list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, 84 list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
86 tg_pt_gp_list) { 85 tg_pt_gp_list) {
87 /* 86 /*
88 * Check if the Target port group and Target port descriptor list 87 * Check if the Target port group and Target port descriptor list
89 * based on tg_pt_gp_members count will fit into the response payload. 88 * based on tg_pt_gp_members count will fit into the response payload.
90 * Otherwise, bump rd_len to let the initiator know we have exceeded 89 * Otherwise, bump rd_len to let the initiator know we have exceeded
91 * the allocation length and the response is truncated. 90 * the allocation length and the response is truncated.
92 */ 91 */
93 if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) > 92 if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
94 cmd->data_length) { 93 cmd->data_length) {
95 rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4); 94 rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
96 continue; 95 continue;
97 } 96 }
98 /* 97 /*
99 * PREF: Preferred target port bit, determine if this 98 * PREF: Preferred target port bit, determine if this
100 * bit should be set for port group. 99 * bit should be set for port group.
101 */ 100 */
102 if (tg_pt_gp->tg_pt_gp_pref) 101 if (tg_pt_gp->tg_pt_gp_pref)
103 buf[off] = 0x80; 102 buf[off] = 0x80;
104 /* 103 /*
105 * Set the ASYMMETRIC ACCESS State 104 * Set the ASYMMETRIC ACCESS State
106 */ 105 */
107 buf[off++] |= (atomic_read( 106 buf[off++] |= (atomic_read(
108 &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff); 107 &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
109 /* 108 /*
110 * Set supported ASYMMETRIC ACCESS State bits 109 * Set supported ASYMMETRIC ACCESS State bits
111 */ 110 */
112 buf[off] = 0x80; /* T_SUP */ 111 buf[off] = 0x80; /* T_SUP */
113 buf[off] |= 0x40; /* O_SUP */ 112 buf[off] |= 0x40; /* O_SUP */
114 buf[off] |= 0x8; /* U_SUP */ 113 buf[off] |= 0x8; /* U_SUP */
115 buf[off] |= 0x4; /* S_SUP */ 114 buf[off] |= 0x4; /* S_SUP */
116 buf[off] |= 0x2; /* AN_SUP */ 115 buf[off] |= 0x2; /* AN_SUP */
117 buf[off++] |= 0x1; /* AO_SUP */ 116 buf[off++] |= 0x1; /* AO_SUP */
118 /* 117 /*
119 * TARGET PORT GROUP 118 * TARGET PORT GROUP
120 */ 119 */
121 buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff); 120 buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
122 buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff); 121 buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
123 122
124 off++; /* Skip over Reserved */ 123 off++; /* Skip over Reserved */
125 /* 124 /*
126 * STATUS CODE 125 * STATUS CODE
127 */ 126 */
128 buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff); 127 buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
129 /* 128 /*
130 * Vendor Specific field 129 * Vendor Specific field
131 */ 130 */
132 buf[off++] = 0x00; 131 buf[off++] = 0x00;
133 /* 132 /*
134 * TARGET PORT COUNT 133 * TARGET PORT COUNT
135 */ 134 */
136 buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff); 135 buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
137 rd_len += 8; 136 rd_len += 8;
138 137
139 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 138 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
140 list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list, 139 list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
141 tg_pt_gp_mem_list) { 140 tg_pt_gp_mem_list) {
142 port = tg_pt_gp_mem->tg_pt; 141 port = tg_pt_gp_mem->tg_pt;
143 /* 142 /*
144 * Start Target Port descriptor format 143 * Start Target Port descriptor format
145 * 144 *
146 * See spc4r17 section 6.2.7 Table 247 145 * See spc4r17 section 6.2.7 Table 247
147 */ 146 */
148 off += 2; /* Skip over Obsolete */ 147 off += 2; /* Skip over Obsolete */
149 /* 148 /*
150 * Set RELATIVE TARGET PORT IDENTIFIER 149 * Set RELATIVE TARGET PORT IDENTIFIER
151 */ 150 */
152 buf[off++] = ((port->sep_rtpi >> 8) & 0xff); 151 buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
153 buf[off++] = (port->sep_rtpi & 0xff); 152 buf[off++] = (port->sep_rtpi & 0xff);
154 rd_len += 4; 153 rd_len += 4;
155 } 154 }
156 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 155 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
157 } 156 }
158 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 157 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
159 /* 158 /*
160 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload 159 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
161 */ 160 */
162 buf[0] = ((rd_len >> 24) & 0xff); 161 buf[0] = ((rd_len >> 24) & 0xff);
163 buf[1] = ((rd_len >> 16) & 0xff); 162 buf[1] = ((rd_len >> 16) & 0xff);
164 buf[2] = ((rd_len >> 8) & 0xff); 163 buf[2] = ((rd_len >> 8) & 0xff);
165 buf[3] = (rd_len & 0xff); 164 buf[3] = (rd_len & 0xff);
166 165
167 transport_kunmap_first_data_page(cmd); 166 transport_kunmap_first_data_page(cmd);
168 167
169 task->task_scsi_status = GOOD; 168 task->task_scsi_status = GOOD;
170 transport_complete_task(task, 1); 169 transport_complete_task(task, 1);
171 return 0; 170 return 0;
172 } 171 }
173 172
174 /* 173 /*
175 * SET_TARGET_PORT_GROUPS for explict ALUA operation. 174 * SET_TARGET_PORT_GROUPS for explict ALUA operation.
176 * 175 *
177 * See spc4r17 section 6.35 176 * See spc4r17 section 6.35
178 */ 177 */
179 int target_emulate_set_target_port_groups(struct se_task *task) 178 int target_emulate_set_target_port_groups(struct se_task *task)
180 { 179 {
181 struct se_cmd *cmd = task->task_se_cmd; 180 struct se_cmd *cmd = task->task_se_cmd;
182 struct se_device *dev = cmd->se_dev; 181 struct se_device *dev = cmd->se_dev;
183 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 182 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
184 struct se_port *port, *l_port = cmd->se_lun->lun_sep; 183 struct se_port *port, *l_port = cmd->se_lun->lun_sep;
185 struct se_node_acl *nacl = cmd->se_sess->se_node_acl; 184 struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
186 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; 185 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
187 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem; 186 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
188 unsigned char *buf; 187 unsigned char *buf;
189 unsigned char *ptr; 188 unsigned char *ptr;
190 u32 len = 4; /* Skip over RESERVED area in header */ 189 u32 len = 4; /* Skip over RESERVED area in header */
191 int alua_access_state, primary = 0, rc; 190 int alua_access_state, primary = 0, rc;
192 u16 tg_pt_id, rtpi; 191 u16 tg_pt_id, rtpi;
193 192
194 if (!l_port) { 193 if (!l_port) {
195 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 194 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
196 return -EINVAL; 195 return -EINVAL;
197 } 196 }
198 buf = transport_kmap_first_data_page(cmd); 197 buf = transport_kmap_first_data_page(cmd);
199 198
200 /* 199 /*
201 * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed 200 * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
202 * for the local tg_pt_gp. 201 * for the local tg_pt_gp.
203 */ 202 */
204 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; 203 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
205 if (!l_tg_pt_gp_mem) { 204 if (!l_tg_pt_gp_mem) {
206 pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); 205 pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
207 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 206 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
208 rc = -EINVAL; 207 rc = -EINVAL;
209 goto out; 208 goto out;
210 } 209 }
211 spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 210 spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
212 l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp; 211 l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
213 if (!l_tg_pt_gp) { 212 if (!l_tg_pt_gp) {
214 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 213 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
215 pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); 214 pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
216 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 215 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
217 rc = -EINVAL; 216 rc = -EINVAL;
218 goto out; 217 goto out;
219 } 218 }
220 rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA); 219 rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
221 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 220 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
222 221
223 if (!rc) { 222 if (!rc) {
224 pr_debug("Unable to process SET_TARGET_PORT_GROUPS" 223 pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
225 " while TPGS_EXPLICT_ALUA is disabled\n"); 224 " while TPGS_EXPLICT_ALUA is disabled\n");
226 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 225 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
227 rc = -EINVAL; 226 rc = -EINVAL;
228 goto out; 227 goto out;
229 } 228 }
230 229
231 ptr = &buf[4]; /* Skip over RESERVED area in header */ 230 ptr = &buf[4]; /* Skip over RESERVED area in header */
232 231
233 while (len < cmd->data_length) { 232 while (len < cmd->data_length) {
234 alua_access_state = (ptr[0] & 0x0f); 233 alua_access_state = (ptr[0] & 0x0f);
235 /* 234 /*
236 * Check the received ALUA access state, and determine if 235 * Check the received ALUA access state, and determine if
237 * the state is a primary or secondary target port asymmetric 236 * the state is a primary or secondary target port asymmetric
238 * access state. 237 * access state.
239 */ 238 */
240 rc = core_alua_check_transition(alua_access_state, &primary); 239 rc = core_alua_check_transition(alua_access_state, &primary);
241 if (rc != 0) { 240 if (rc != 0) {
242 /* 241 /*
243 * If the SET TARGET PORT GROUPS attempts to establish 242 * If the SET TARGET PORT GROUPS attempts to establish
244 * an invalid combination of target port asymmetric 243 * an invalid combination of target port asymmetric
245 * access states or attempts to establish an 244 * access states or attempts to establish an
246 * unsupported target port asymmetric access state, 245 * unsupported target port asymmetric access state,
247 * then the command shall be terminated with CHECK 246 * then the command shall be terminated with CHECK
248 * CONDITION status, with the sense key set to ILLEGAL 247 * CONDITION status, with the sense key set to ILLEGAL
249 * REQUEST, and the additional sense code set to INVALID 248 * REQUEST, and the additional sense code set to INVALID
250 * FIELD IN PARAMETER LIST. 249 * FIELD IN PARAMETER LIST.
251 */ 250 */
252 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 251 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
253 rc = -EINVAL; 252 rc = -EINVAL;
254 goto out; 253 goto out;
255 } 254 }
256 rc = -1; 255 rc = -1;
257 /* 256 /*
258 * If the ASYMMETRIC ACCESS STATE field (see table 267) 257 * If the ASYMMETRIC ACCESS STATE field (see table 267)
259 * specifies a primary target port asymmetric access state, 258 * specifies a primary target port asymmetric access state,
260 * then the TARGET PORT GROUP OR TARGET PORT field specifies 259 * then the TARGET PORT GROUP OR TARGET PORT field specifies
261 * a primary target port group for which the primary target 260 * a primary target port group for which the primary target
262 * port asymmetric access state shall be changed. If the 261 * port asymmetric access state shall be changed. If the
263 * ASYMMETRIC ACCESS STATE field specifies a secondary target 262 * ASYMMETRIC ACCESS STATE field specifies a secondary target
264 * port asymmetric access state, then the TARGET PORT GROUP OR 263 * port asymmetric access state, then the TARGET PORT GROUP OR
265 * TARGET PORT field specifies the relative target port 264 * TARGET PORT field specifies the relative target port
266 * identifier (see 3.1.120) of the target port for which the 265 * identifier (see 3.1.120) of the target port for which the
267 * secondary target port asymmetric access state shall be 266 * secondary target port asymmetric access state shall be
268 * changed. 267 * changed.
269 */ 268 */
270 if (primary) { 269 if (primary) {
271 tg_pt_id = ((ptr[2] << 8) & 0xff); 270 tg_pt_id = ((ptr[2] << 8) & 0xff);
272 tg_pt_id |= (ptr[3] & 0xff); 271 tg_pt_id |= (ptr[3] & 0xff);
273 /* 272 /*
274 * Locate the matching target port group ID from 273 * Locate the matching target port group ID from
275 * the global tg_pt_gp list 274 * the global tg_pt_gp list
276 */ 275 */
277 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 276 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
278 list_for_each_entry(tg_pt_gp, 277 list_for_each_entry(tg_pt_gp,
279 &su_dev->t10_alua.tg_pt_gps_list, 278 &su_dev->t10_alua.tg_pt_gps_list,
280 tg_pt_gp_list) { 279 tg_pt_gp_list) {
281 if (!tg_pt_gp->tg_pt_gp_valid_id) 280 if (!tg_pt_gp->tg_pt_gp_valid_id)
282 continue; 281 continue;
283 282
284 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id) 283 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
285 continue; 284 continue;
286 285
287 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 286 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
288 smp_mb__after_atomic_inc(); 287 smp_mb__after_atomic_inc();
289 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 288 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
290 289
291 rc = core_alua_do_port_transition(tg_pt_gp, 290 rc = core_alua_do_port_transition(tg_pt_gp,
292 dev, l_port, nacl, 291 dev, l_port, nacl,
293 alua_access_state, 1); 292 alua_access_state, 1);
294 293
295 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 294 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
296 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 295 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
297 smp_mb__after_atomic_dec(); 296 smp_mb__after_atomic_dec();
298 break; 297 break;
299 } 298 }
300 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 299 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
301 /* 300 /*
302 * If not matching target port group ID can be located 301 * If not matching target port group ID can be located
303 * throw an exception with ASCQ: INVALID_PARAMETER_LIST 302 * throw an exception with ASCQ: INVALID_PARAMETER_LIST
304 */ 303 */
305 if (rc != 0) { 304 if (rc != 0) {
306 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 305 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
307 rc = -EINVAL; 306 rc = -EINVAL;
308 goto out; 307 goto out;
309 } 308 }
310 } else { 309 } else {
311 /* 310 /*
312 * Extact the RELATIVE TARGET PORT IDENTIFIER to identify 311 * Extact the RELATIVE TARGET PORT IDENTIFIER to identify
313 * the Target Port in question for the the incoming 312 * the Target Port in question for the the incoming
314 * SET_TARGET_PORT_GROUPS op. 313 * SET_TARGET_PORT_GROUPS op.
315 */ 314 */
316 rtpi = ((ptr[2] << 8) & 0xff); 315 rtpi = ((ptr[2] << 8) & 0xff);
317 rtpi |= (ptr[3] & 0xff); 316 rtpi |= (ptr[3] & 0xff);
318 /* 317 /*
319 * Locate the matching relative target port identifer 318 * Locate the matching relative target port identifer
320 * for the struct se_device storage object. 319 * for the struct se_device storage object.
321 */ 320 */
322 spin_lock(&dev->se_port_lock); 321 spin_lock(&dev->se_port_lock);
323 list_for_each_entry(port, &dev->dev_sep_list, 322 list_for_each_entry(port, &dev->dev_sep_list,
324 sep_list) { 323 sep_list) {
325 if (port->sep_rtpi != rtpi) 324 if (port->sep_rtpi != rtpi)
326 continue; 325 continue;
327 326
328 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 327 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
329 spin_unlock(&dev->se_port_lock); 328 spin_unlock(&dev->se_port_lock);
330 329
331 rc = core_alua_set_tg_pt_secondary_state( 330 rc = core_alua_set_tg_pt_secondary_state(
332 tg_pt_gp_mem, port, 1, 1); 331 tg_pt_gp_mem, port, 1, 1);
333 332
334 spin_lock(&dev->se_port_lock); 333 spin_lock(&dev->se_port_lock);
335 break; 334 break;
336 } 335 }
337 spin_unlock(&dev->se_port_lock); 336 spin_unlock(&dev->se_port_lock);
338 /* 337 /*
339 * If not matching relative target port identifier can 338 * If not matching relative target port identifier can
340 * be located, throw an exception with ASCQ: 339 * be located, throw an exception with ASCQ:
341 * INVALID_PARAMETER_LIST 340 * INVALID_PARAMETER_LIST
342 */ 341 */
343 if (rc != 0) { 342 if (rc != 0) {
344 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 343 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
345 rc = -EINVAL; 344 rc = -EINVAL;
346 goto out; 345 goto out;
347 } 346 }
348 } 347 }
349 348
350 ptr += 4; 349 ptr += 4;
351 len += 4; 350 len += 4;
352 } 351 }
353 352
354 out: 353 out:
355 transport_kunmap_first_data_page(cmd); 354 transport_kunmap_first_data_page(cmd);
356 task->task_scsi_status = GOOD; 355 task->task_scsi_status = GOOD;
357 transport_complete_task(task, 1); 356 transport_complete_task(task, 1);
358 return 0; 357 return 0;
359 } 358 }
360 359
361 static inline int core_alua_state_nonoptimized( 360 static inline int core_alua_state_nonoptimized(
362 struct se_cmd *cmd, 361 struct se_cmd *cmd,
363 unsigned char *cdb, 362 unsigned char *cdb,
364 int nonop_delay_msecs, 363 int nonop_delay_msecs,
365 u8 *alua_ascq) 364 u8 *alua_ascq)
366 { 365 {
367 /* 366 /*
368 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked 367 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
369 * later to determine if processing of this cmd needs to be 368 * later to determine if processing of this cmd needs to be
370 * temporarily delayed for the Active/NonOptimized primary access state. 369 * temporarily delayed for the Active/NonOptimized primary access state.
371 */ 370 */
372 cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED; 371 cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
373 cmd->alua_nonop_delay = nonop_delay_msecs; 372 cmd->alua_nonop_delay = nonop_delay_msecs;
374 return 0; 373 return 0;
375 } 374 }
376 375
377 static inline int core_alua_state_standby( 376 static inline int core_alua_state_standby(
378 struct se_cmd *cmd, 377 struct se_cmd *cmd,
379 unsigned char *cdb, 378 unsigned char *cdb,
380 u8 *alua_ascq) 379 u8 *alua_ascq)
381 { 380 {
382 /* 381 /*
383 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by 382 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
384 * spc4r17 section 5.9.2.4.4 383 * spc4r17 section 5.9.2.4.4
385 */ 384 */
386 switch (cdb[0]) { 385 switch (cdb[0]) {
387 case INQUIRY: 386 case INQUIRY:
388 case LOG_SELECT: 387 case LOG_SELECT:
389 case LOG_SENSE: 388 case LOG_SENSE:
390 case MODE_SELECT: 389 case MODE_SELECT:
391 case MODE_SENSE: 390 case MODE_SENSE:
392 case REPORT_LUNS: 391 case REPORT_LUNS:
393 case RECEIVE_DIAGNOSTIC: 392 case RECEIVE_DIAGNOSTIC:
394 case SEND_DIAGNOSTIC: 393 case SEND_DIAGNOSTIC:
395 case MAINTENANCE_IN: 394 case MAINTENANCE_IN:
396 switch (cdb[1]) { 395 switch (cdb[1]) {
397 case MI_REPORT_TARGET_PGS: 396 case MI_REPORT_TARGET_PGS:
398 return 0; 397 return 0;
399 default: 398 default:
400 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; 399 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
401 return 1; 400 return 1;
402 } 401 }
403 case MAINTENANCE_OUT: 402 case MAINTENANCE_OUT:
404 switch (cdb[1]) { 403 switch (cdb[1]) {
405 case MO_SET_TARGET_PGS: 404 case MO_SET_TARGET_PGS:
406 return 0; 405 return 0;
407 default: 406 default:
408 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; 407 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
409 return 1; 408 return 1;
410 } 409 }
411 case REQUEST_SENSE: 410 case REQUEST_SENSE:
412 case PERSISTENT_RESERVE_IN: 411 case PERSISTENT_RESERVE_IN:
413 case PERSISTENT_RESERVE_OUT: 412 case PERSISTENT_RESERVE_OUT:
414 case READ_BUFFER: 413 case READ_BUFFER:
415 case WRITE_BUFFER: 414 case WRITE_BUFFER:
416 return 0; 415 return 0;
417 default: 416 default:
418 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; 417 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
419 return 1; 418 return 1;
420 } 419 }
421 420
422 return 0; 421 return 0;
423 } 422 }
424 423
425 static inline int core_alua_state_unavailable( 424 static inline int core_alua_state_unavailable(
426 struct se_cmd *cmd, 425 struct se_cmd *cmd,
427 unsigned char *cdb, 426 unsigned char *cdb,
428 u8 *alua_ascq) 427 u8 *alua_ascq)
429 { 428 {
430 /* 429 /*
431 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by 430 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
432 * spc4r17 section 5.9.2.4.5 431 * spc4r17 section 5.9.2.4.5
433 */ 432 */
434 switch (cdb[0]) { 433 switch (cdb[0]) {
435 case INQUIRY: 434 case INQUIRY:
436 case REPORT_LUNS: 435 case REPORT_LUNS:
437 case MAINTENANCE_IN: 436 case MAINTENANCE_IN:
438 switch (cdb[1]) { 437 switch (cdb[1]) {
439 case MI_REPORT_TARGET_PGS: 438 case MI_REPORT_TARGET_PGS:
440 return 0; 439 return 0;
441 default: 440 default:
442 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 441 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
443 return 1; 442 return 1;
444 } 443 }
445 case MAINTENANCE_OUT: 444 case MAINTENANCE_OUT:
446 switch (cdb[1]) { 445 switch (cdb[1]) {
447 case MO_SET_TARGET_PGS: 446 case MO_SET_TARGET_PGS:
448 return 0; 447 return 0;
449 default: 448 default:
450 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 449 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
451 return 1; 450 return 1;
452 } 451 }
453 case REQUEST_SENSE: 452 case REQUEST_SENSE:
454 case READ_BUFFER: 453 case READ_BUFFER:
455 case WRITE_BUFFER: 454 case WRITE_BUFFER:
456 return 0; 455 return 0;
457 default: 456 default:
458 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 457 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
459 return 1; 458 return 1;
460 } 459 }
461 460
462 return 0; 461 return 0;
463 } 462 }
464 463
465 static inline int core_alua_state_transition( 464 static inline int core_alua_state_transition(
466 struct se_cmd *cmd, 465 struct se_cmd *cmd,
467 unsigned char *cdb, 466 unsigned char *cdb,
468 u8 *alua_ascq) 467 u8 *alua_ascq)
469 { 468 {
470 /* 469 /*
471 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by 470 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by
472 * spc4r17 section 5.9.2.5 471 * spc4r17 section 5.9.2.5
473 */ 472 */
474 switch (cdb[0]) { 473 switch (cdb[0]) {
475 case INQUIRY: 474 case INQUIRY:
476 case REPORT_LUNS: 475 case REPORT_LUNS:
477 case MAINTENANCE_IN: 476 case MAINTENANCE_IN:
478 switch (cdb[1]) { 477 switch (cdb[1]) {
479 case MI_REPORT_TARGET_PGS: 478 case MI_REPORT_TARGET_PGS:
480 return 0; 479 return 0;
481 default: 480 default:
482 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; 481 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
483 return 1; 482 return 1;
484 } 483 }
485 case REQUEST_SENSE: 484 case REQUEST_SENSE:
486 case READ_BUFFER: 485 case READ_BUFFER:
487 case WRITE_BUFFER: 486 case WRITE_BUFFER:
488 return 0; 487 return 0;
489 default: 488 default:
490 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; 489 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
491 return 1; 490 return 1;
492 } 491 }
493 492
494 return 0; 493 return 0;
495 } 494 }
496 495
497 /* 496 /*
498 * Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED 497 * Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED
499 * in transport_cmd_sequencer(). This function is assigned to 498 * in transport_cmd_sequencer(). This function is assigned to
500 * struct t10_alua *->state_check() in core_setup_alua() 499 * struct t10_alua *->state_check() in core_setup_alua()
501 */ 500 */
502 static int core_alua_state_check_nop( 501 static int core_alua_state_check_nop(
503 struct se_cmd *cmd, 502 struct se_cmd *cmd,
504 unsigned char *cdb, 503 unsigned char *cdb,
505 u8 *alua_ascq) 504 u8 *alua_ascq)
506 { 505 {
507 return 0; 506 return 0;
508 } 507 }
509 508
510 /* 509 /*
511 * Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer(). 510 * Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer().
512 * This function is assigned to struct t10_alua *->state_check() in 511 * This function is assigned to struct t10_alua *->state_check() in
513 * core_setup_alua() 512 * core_setup_alua()
514 * 513 *
515 * Also, this function can return three different return codes to 514 * Also, this function can return three different return codes to
516 * signal transport_generic_cmd_sequencer() 515 * signal transport_generic_cmd_sequencer()
517 * 516 *
518 * return 1: Is used to signal LUN not accecsable, and check condition/not ready 517 * return 1: Is used to signal LUN not accecsable, and check condition/not ready
519 * return 0: Used to signal success 518 * return 0: Used to signal success
520 * reutrn -1: Used to signal failure, and invalid cdb field 519 * reutrn -1: Used to signal failure, and invalid cdb field
521 */ 520 */
522 static int core_alua_state_check( 521 static int core_alua_state_check(
523 struct se_cmd *cmd, 522 struct se_cmd *cmd,
524 unsigned char *cdb, 523 unsigned char *cdb,
525 u8 *alua_ascq) 524 u8 *alua_ascq)
526 { 525 {
527 struct se_lun *lun = cmd->se_lun; 526 struct se_lun *lun = cmd->se_lun;
528 struct se_port *port = lun->lun_sep; 527 struct se_port *port = lun->lun_sep;
529 struct t10_alua_tg_pt_gp *tg_pt_gp; 528 struct t10_alua_tg_pt_gp *tg_pt_gp;
530 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 529 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
531 int out_alua_state, nonop_delay_msecs; 530 int out_alua_state, nonop_delay_msecs;
532 531
533 if (!port) 532 if (!port)
534 return 0; 533 return 0;
535 /* 534 /*
536 * First, check for a struct se_port specific secondary ALUA target port 535 * First, check for a struct se_port specific secondary ALUA target port
537 * access state: OFFLINE 536 * access state: OFFLINE
538 */ 537 */
539 if (atomic_read(&port->sep_tg_pt_secondary_offline)) { 538 if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
540 *alua_ascq = ASCQ_04H_ALUA_OFFLINE; 539 *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
541 pr_debug("ALUA: Got secondary offline status for local" 540 pr_debug("ALUA: Got secondary offline status for local"
542 " target port\n"); 541 " target port\n");
543 *alua_ascq = ASCQ_04H_ALUA_OFFLINE; 542 *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
544 return 1; 543 return 1;
545 } 544 }
546 /* 545 /*
547 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the 546 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
548 * ALUA target port group, to obtain current ALUA access state. 547 * ALUA target port group, to obtain current ALUA access state.
549 * Otherwise look for the underlying struct se_device association with 548 * Otherwise look for the underlying struct se_device association with
550 * a ALUA logical unit group. 549 * a ALUA logical unit group.
551 */ 550 */
552 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 551 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
553 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 552 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
554 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 553 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
555 out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); 554 out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
556 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; 555 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
557 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 556 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
558 /* 557 /*
559 * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a separate conditional 558 * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a separate conditional
560 * statement so the compiler knows explicitly to check this case first. 559 * statement so the compiler knows explicitly to check this case first.
561 * For the Optimized ALUA access state case, we want to process the 560 * For the Optimized ALUA access state case, we want to process the
562 * incoming fabric cmd ASAP.. 561 * incoming fabric cmd ASAP..
563 */ 562 */
564 if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED) 563 if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED)
565 return 0; 564 return 0;
566 565
567 switch (out_alua_state) { 566 switch (out_alua_state) {
568 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 567 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
569 return core_alua_state_nonoptimized(cmd, cdb, 568 return core_alua_state_nonoptimized(cmd, cdb,
570 nonop_delay_msecs, alua_ascq); 569 nonop_delay_msecs, alua_ascq);
571 case ALUA_ACCESS_STATE_STANDBY: 570 case ALUA_ACCESS_STATE_STANDBY:
572 return core_alua_state_standby(cmd, cdb, alua_ascq); 571 return core_alua_state_standby(cmd, cdb, alua_ascq);
573 case ALUA_ACCESS_STATE_UNAVAILABLE: 572 case ALUA_ACCESS_STATE_UNAVAILABLE:
574 return core_alua_state_unavailable(cmd, cdb, alua_ascq); 573 return core_alua_state_unavailable(cmd, cdb, alua_ascq);
575 case ALUA_ACCESS_STATE_TRANSITION: 574 case ALUA_ACCESS_STATE_TRANSITION:
576 return core_alua_state_transition(cmd, cdb, alua_ascq); 575 return core_alua_state_transition(cmd, cdb, alua_ascq);
577 /* 576 /*
578 * OFFLINE is a secondary ALUA target port group access state, that is 577 * OFFLINE is a secondary ALUA target port group access state, that is
579 * handled above with struct se_port->sep_tg_pt_secondary_offline=1 578 * handled above with struct se_port->sep_tg_pt_secondary_offline=1
580 */ 579 */
581 case ALUA_ACCESS_STATE_OFFLINE: 580 case ALUA_ACCESS_STATE_OFFLINE:
582 default: 581 default:
583 pr_err("Unknown ALUA access state: 0x%02x\n", 582 pr_err("Unknown ALUA access state: 0x%02x\n",
584 out_alua_state); 583 out_alua_state);
585 return -EINVAL; 584 return -EINVAL;
586 } 585 }
587 586
588 return 0; 587 return 0;
589 } 588 }
590 589
591 /* 590 /*
592 * Check implict and explict ALUA state change request. 591 * Check implict and explict ALUA state change request.
593 */ 592 */
594 static int core_alua_check_transition(int state, int *primary) 593 static int core_alua_check_transition(int state, int *primary)
595 { 594 {
596 switch (state) { 595 switch (state) {
597 case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED: 596 case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
598 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 597 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
599 case ALUA_ACCESS_STATE_STANDBY: 598 case ALUA_ACCESS_STATE_STANDBY:
600 case ALUA_ACCESS_STATE_UNAVAILABLE: 599 case ALUA_ACCESS_STATE_UNAVAILABLE:
601 /* 600 /*
602 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are 601 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
603 * defined as primary target port asymmetric access states. 602 * defined as primary target port asymmetric access states.
604 */ 603 */
605 *primary = 1; 604 *primary = 1;
606 break; 605 break;
607 case ALUA_ACCESS_STATE_OFFLINE: 606 case ALUA_ACCESS_STATE_OFFLINE:
608 /* 607 /*
609 * OFFLINE state is defined as a secondary target port 608 * OFFLINE state is defined as a secondary target port
610 * asymmetric access state. 609 * asymmetric access state.
611 */ 610 */
612 *primary = 0; 611 *primary = 0;
613 break; 612 break;
614 default: 613 default:
615 pr_err("Unknown ALUA access state: 0x%02x\n", state); 614 pr_err("Unknown ALUA access state: 0x%02x\n", state);
616 return -EINVAL; 615 return -EINVAL;
617 } 616 }
618 617
619 return 0; 618 return 0;
620 } 619 }
621 620
622 static char *core_alua_dump_state(int state) 621 static char *core_alua_dump_state(int state)
623 { 622 {
624 switch (state) { 623 switch (state) {
625 case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED: 624 case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
626 return "Active/Optimized"; 625 return "Active/Optimized";
627 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 626 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
628 return "Active/NonOptimized"; 627 return "Active/NonOptimized";
629 case ALUA_ACCESS_STATE_STANDBY: 628 case ALUA_ACCESS_STATE_STANDBY:
630 return "Standby"; 629 return "Standby";
631 case ALUA_ACCESS_STATE_UNAVAILABLE: 630 case ALUA_ACCESS_STATE_UNAVAILABLE:
632 return "Unavailable"; 631 return "Unavailable";
633 case ALUA_ACCESS_STATE_OFFLINE: 632 case ALUA_ACCESS_STATE_OFFLINE:
634 return "Offline"; 633 return "Offline";
635 default: 634 default:
636 return "Unknown"; 635 return "Unknown";
637 } 636 }
638 637
639 return NULL; 638 return NULL;
640 } 639 }
641 640
642 char *core_alua_dump_status(int status) 641 char *core_alua_dump_status(int status)
643 { 642 {
644 switch (status) { 643 switch (status) {
645 case ALUA_STATUS_NONE: 644 case ALUA_STATUS_NONE:
646 return "None"; 645 return "None";
647 case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG: 646 case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG:
648 return "Altered by Explict STPG"; 647 return "Altered by Explict STPG";
649 case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA: 648 case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA:
650 return "Altered by Implict ALUA"; 649 return "Altered by Implict ALUA";
651 default: 650 default:
652 return "Unknown"; 651 return "Unknown";
653 } 652 }
654 653
655 return NULL; 654 return NULL;
656 } 655 }
657 656
658 /* 657 /*
659 * Used by fabric modules to determine when we need to delay processing 658 * Used by fabric modules to determine when we need to delay processing
660 * for the Active/NonOptimized paths.. 659 * for the Active/NonOptimized paths..
661 */ 660 */
662 int core_alua_check_nonop_delay( 661 int core_alua_check_nonop_delay(
663 struct se_cmd *cmd) 662 struct se_cmd *cmd)
664 { 663 {
665 if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED)) 664 if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
666 return 0; 665 return 0;
667 if (in_interrupt()) 666 if (in_interrupt())
668 return 0; 667 return 0;
669 /* 668 /*
670 * The ALUA Active/NonOptimized access state delay can be disabled 669 * The ALUA Active/NonOptimized access state delay can be disabled
671 * in via configfs with a value of zero 670 * in via configfs with a value of zero
672 */ 671 */
673 if (!cmd->alua_nonop_delay) 672 if (!cmd->alua_nonop_delay)
674 return 0; 673 return 0;
675 /* 674 /*
676 * struct se_cmd->alua_nonop_delay gets set by a target port group 675 * struct se_cmd->alua_nonop_delay gets set by a target port group
677 * defined interval in core_alua_state_nonoptimized() 676 * defined interval in core_alua_state_nonoptimized()
678 */ 677 */
679 msleep_interruptible(cmd->alua_nonop_delay); 678 msleep_interruptible(cmd->alua_nonop_delay);
680 return 0; 679 return 0;
681 } 680 }
682 EXPORT_SYMBOL(core_alua_check_nonop_delay); 681 EXPORT_SYMBOL(core_alua_check_nonop_delay);
683 682
684 /* 683 /*
685 * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex 684 * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
686 * 685 *
687 */ 686 */
688 static int core_alua_write_tpg_metadata( 687 static int core_alua_write_tpg_metadata(
689 const char *path, 688 const char *path,
690 unsigned char *md_buf, 689 unsigned char *md_buf,
691 u32 md_buf_len) 690 u32 md_buf_len)
692 { 691 {
693 mm_segment_t old_fs; 692 mm_segment_t old_fs;
694 struct file *file; 693 struct file *file;
695 struct iovec iov[1]; 694 struct iovec iov[1];
696 int flags = O_RDWR | O_CREAT | O_TRUNC, ret; 695 int flags = O_RDWR | O_CREAT | O_TRUNC, ret;
697 696
698 memset(iov, 0, sizeof(struct iovec)); 697 memset(iov, 0, sizeof(struct iovec));
699 698
700 file = filp_open(path, flags, 0600); 699 file = filp_open(path, flags, 0600);
701 if (IS_ERR(file) || !file || !file->f_dentry) { 700 if (IS_ERR(file) || !file || !file->f_dentry) {
702 pr_err("filp_open(%s) for ALUA metadata failed\n", 701 pr_err("filp_open(%s) for ALUA metadata failed\n",
703 path); 702 path);
704 return -ENODEV; 703 return -ENODEV;
705 } 704 }
706 705
707 iov[0].iov_base = &md_buf[0]; 706 iov[0].iov_base = &md_buf[0];
708 iov[0].iov_len = md_buf_len; 707 iov[0].iov_len = md_buf_len;
709 708
710 old_fs = get_fs(); 709 old_fs = get_fs();
711 set_fs(get_ds()); 710 set_fs(get_ds());
712 ret = vfs_writev(file, &iov[0], 1, &file->f_pos); 711 ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
713 set_fs(old_fs); 712 set_fs(old_fs);
714 713
715 if (ret < 0) { 714 if (ret < 0) {
716 pr_err("Error writing ALUA metadata file: %s\n", path); 715 pr_err("Error writing ALUA metadata file: %s\n", path);
717 filp_close(file, NULL); 716 filp_close(file, NULL);
718 return -EIO; 717 return -EIO;
719 } 718 }
720 filp_close(file, NULL); 719 filp_close(file, NULL);
721 720
722 return 0; 721 return 0;
723 } 722 }
724 723
725 /* 724 /*
726 * Called with tg_pt_gp->tg_pt_gp_md_mutex held 725 * Called with tg_pt_gp->tg_pt_gp_md_mutex held
727 */ 726 */
728 static int core_alua_update_tpg_primary_metadata( 727 static int core_alua_update_tpg_primary_metadata(
729 struct t10_alua_tg_pt_gp *tg_pt_gp, 728 struct t10_alua_tg_pt_gp *tg_pt_gp,
730 int primary_state, 729 int primary_state,
731 unsigned char *md_buf) 730 unsigned char *md_buf)
732 { 731 {
733 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; 732 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
734 struct t10_wwn *wwn = &su_dev->t10_wwn; 733 struct t10_wwn *wwn = &su_dev->t10_wwn;
735 char path[ALUA_METADATA_PATH_LEN]; 734 char path[ALUA_METADATA_PATH_LEN];
736 int len; 735 int len;
737 736
738 memset(path, 0, ALUA_METADATA_PATH_LEN); 737 memset(path, 0, ALUA_METADATA_PATH_LEN);
739 738
740 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len, 739 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
741 "tg_pt_gp_id=%hu\n" 740 "tg_pt_gp_id=%hu\n"
742 "alua_access_state=0x%02x\n" 741 "alua_access_state=0x%02x\n"
743 "alua_access_status=0x%02x\n", 742 "alua_access_status=0x%02x\n",
744 tg_pt_gp->tg_pt_gp_id, primary_state, 743 tg_pt_gp->tg_pt_gp_id, primary_state,
745 tg_pt_gp->tg_pt_gp_alua_access_status); 744 tg_pt_gp->tg_pt_gp_alua_access_status);
746 745
747 snprintf(path, ALUA_METADATA_PATH_LEN, 746 snprintf(path, ALUA_METADATA_PATH_LEN,
748 "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0], 747 "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
749 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item)); 748 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
750 749
751 return core_alua_write_tpg_metadata(path, md_buf, len); 750 return core_alua_write_tpg_metadata(path, md_buf, len);
752 } 751 }
753 752
754 static int core_alua_do_transition_tg_pt( 753 static int core_alua_do_transition_tg_pt(
755 struct t10_alua_tg_pt_gp *tg_pt_gp, 754 struct t10_alua_tg_pt_gp *tg_pt_gp,
756 struct se_port *l_port, 755 struct se_port *l_port,
757 struct se_node_acl *nacl, 756 struct se_node_acl *nacl,
758 unsigned char *md_buf, 757 unsigned char *md_buf,
759 int new_state, 758 int new_state,
760 int explict) 759 int explict)
761 { 760 {
762 struct se_dev_entry *se_deve; 761 struct se_dev_entry *se_deve;
763 struct se_lun_acl *lacl; 762 struct se_lun_acl *lacl;
764 struct se_port *port; 763 struct se_port *port;
765 struct t10_alua_tg_pt_gp_member *mem; 764 struct t10_alua_tg_pt_gp_member *mem;
766 int old_state = 0; 765 int old_state = 0;
767 /* 766 /*
768 * Save the old primary ALUA access state, and set the current state 767 * Save the old primary ALUA access state, and set the current state
769 * to ALUA_ACCESS_STATE_TRANSITION. 768 * to ALUA_ACCESS_STATE_TRANSITION.
770 */ 769 */
771 old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); 770 old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
772 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 771 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
773 ALUA_ACCESS_STATE_TRANSITION); 772 ALUA_ACCESS_STATE_TRANSITION);
774 tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ? 773 tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ?
775 ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : 774 ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
776 ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; 775 ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
777 /* 776 /*
778 * Check for the optional ALUA primary state transition delay 777 * Check for the optional ALUA primary state transition delay
779 */ 778 */
780 if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0) 779 if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
781 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); 780 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
782 781
783 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 782 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
784 list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list, 783 list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
785 tg_pt_gp_mem_list) { 784 tg_pt_gp_mem_list) {
786 port = mem->tg_pt; 785 port = mem->tg_pt;
787 /* 786 /*
788 * After an implicit target port asymmetric access state 787 * After an implicit target port asymmetric access state
789 * change, a device server shall establish a unit attention 788 * change, a device server shall establish a unit attention
790 * condition for the initiator port associated with every I_T 789 * condition for the initiator port associated with every I_T
791 * nexus with the additional sense code set to ASYMMETRIC 790 * nexus with the additional sense code set to ASYMMETRIC
792 * ACCESS STATE CHAGED. 791 * ACCESS STATE CHAGED.
793 * 792 *
794 * After an explicit target port asymmetric access state 793 * After an explicit target port asymmetric access state
795 * change, a device server shall establish a unit attention 794 * change, a device server shall establish a unit attention
796 * condition with the additional sense code set to ASYMMETRIC 795 * condition with the additional sense code set to ASYMMETRIC
797 * ACCESS STATE CHANGED for the initiator port associated with 796 * ACCESS STATE CHANGED for the initiator port associated with
798 * every I_T nexus other than the I_T nexus on which the SET 797 * every I_T nexus other than the I_T nexus on which the SET
799 * TARGET PORT GROUPS command 798 * TARGET PORT GROUPS command
800 */ 799 */
801 atomic_inc(&mem->tg_pt_gp_mem_ref_cnt); 800 atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
802 smp_mb__after_atomic_inc(); 801 smp_mb__after_atomic_inc();
803 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 802 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
804 803
805 spin_lock_bh(&port->sep_alua_lock); 804 spin_lock_bh(&port->sep_alua_lock);
806 list_for_each_entry(se_deve, &port->sep_alua_list, 805 list_for_each_entry(se_deve, &port->sep_alua_list,
807 alua_port_list) { 806 alua_port_list) {
808 lacl = se_deve->se_lun_acl; 807 lacl = se_deve->se_lun_acl;
809 /* 808 /*
810 * se_deve->se_lun_acl pointer may be NULL for a 809 * se_deve->se_lun_acl pointer may be NULL for a
811 * entry created without explict Node+MappedLUN ACLs 810 * entry created without explict Node+MappedLUN ACLs
812 */ 811 */
813 if (!lacl) 812 if (!lacl)
814 continue; 813 continue;
815 814
816 if (explict && 815 if (explict &&
817 (nacl != NULL) && (nacl == lacl->se_lun_nacl) && 816 (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
818 (l_port != NULL) && (l_port == port)) 817 (l_port != NULL) && (l_port == port))
819 continue; 818 continue;
820 819
821 core_scsi3_ua_allocate(lacl->se_lun_nacl, 820 core_scsi3_ua_allocate(lacl->se_lun_nacl,
822 se_deve->mapped_lun, 0x2A, 821 se_deve->mapped_lun, 0x2A,
823 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED); 822 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
824 } 823 }
825 spin_unlock_bh(&port->sep_alua_lock); 824 spin_unlock_bh(&port->sep_alua_lock);
826 825
827 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 826 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
828 atomic_dec(&mem->tg_pt_gp_mem_ref_cnt); 827 atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
829 smp_mb__after_atomic_dec(); 828 smp_mb__after_atomic_dec();
830 } 829 }
831 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 830 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
832 /* 831 /*
833 * Update the ALUA metadata buf that has been allocated in 832 * Update the ALUA metadata buf that has been allocated in
834 * core_alua_do_port_transition(), this metadata will be written 833 * core_alua_do_port_transition(), this metadata will be written
835 * to struct file. 834 * to struct file.
836 * 835 *
837 * Note that there is the case where we do not want to update the 836 * Note that there is the case where we do not want to update the
838 * metadata when the saved metadata is being parsed in userspace 837 * metadata when the saved metadata is being parsed in userspace
839 * when setting the existing port access state and access status. 838 * when setting the existing port access state and access status.
840 * 839 *
841 * Also note that the failure to write out the ALUA metadata to 840 * Also note that the failure to write out the ALUA metadata to
842 * struct file does NOT affect the actual ALUA transition. 841 * struct file does NOT affect the actual ALUA transition.
843 */ 842 */
844 if (tg_pt_gp->tg_pt_gp_write_metadata) { 843 if (tg_pt_gp->tg_pt_gp_write_metadata) {
845 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex); 844 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
846 core_alua_update_tpg_primary_metadata(tg_pt_gp, 845 core_alua_update_tpg_primary_metadata(tg_pt_gp,
847 new_state, md_buf); 846 new_state, md_buf);
848 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex); 847 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
849 } 848 }
850 /* 849 /*
851 * Set the current primary ALUA access state to the requested new state 850 * Set the current primary ALUA access state to the requested new state
852 */ 851 */
853 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state); 852 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
854 853
855 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" 854 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
856 " from primary access state %s to %s\n", (explict) ? "explict" : 855 " from primary access state %s to %s\n", (explict) ? "explict" :
857 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 856 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
858 tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state), 857 tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
859 core_alua_dump_state(new_state)); 858 core_alua_dump_state(new_state));
860 859
861 return 0; 860 return 0;
862 } 861 }
863 862
864 int core_alua_do_port_transition( 863 int core_alua_do_port_transition(
865 struct t10_alua_tg_pt_gp *l_tg_pt_gp, 864 struct t10_alua_tg_pt_gp *l_tg_pt_gp,
866 struct se_device *l_dev, 865 struct se_device *l_dev,
867 struct se_port *l_port, 866 struct se_port *l_port,
868 struct se_node_acl *l_nacl, 867 struct se_node_acl *l_nacl,
869 int new_state, 868 int new_state,
870 int explict) 869 int explict)
871 { 870 {
872 struct se_device *dev; 871 struct se_device *dev;
873 struct se_port *port; 872 struct se_port *port;
874 struct se_subsystem_dev *su_dev; 873 struct se_subsystem_dev *su_dev;
875 struct se_node_acl *nacl; 874 struct se_node_acl *nacl;
876 struct t10_alua_lu_gp *lu_gp; 875 struct t10_alua_lu_gp *lu_gp;
877 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem; 876 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
878 struct t10_alua_tg_pt_gp *tg_pt_gp; 877 struct t10_alua_tg_pt_gp *tg_pt_gp;
879 unsigned char *md_buf; 878 unsigned char *md_buf;
880 int primary; 879 int primary;
881 880
882 if (core_alua_check_transition(new_state, &primary) != 0) 881 if (core_alua_check_transition(new_state, &primary) != 0)
883 return -EINVAL; 882 return -EINVAL;
884 883
885 md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL); 884 md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
886 if (!md_buf) { 885 if (!md_buf) {
887 pr_err("Unable to allocate buf for ALUA metadata\n"); 886 pr_err("Unable to allocate buf for ALUA metadata\n");
888 return -ENOMEM; 887 return -ENOMEM;
889 } 888 }
890 889
891 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; 890 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
892 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); 891 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
893 lu_gp = local_lu_gp_mem->lu_gp; 892 lu_gp = local_lu_gp_mem->lu_gp;
894 atomic_inc(&lu_gp->lu_gp_ref_cnt); 893 atomic_inc(&lu_gp->lu_gp_ref_cnt);
895 smp_mb__after_atomic_inc(); 894 smp_mb__after_atomic_inc();
896 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); 895 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
897 /* 896 /*
898 * For storage objects that are members of the 'default_lu_gp', 897 * For storage objects that are members of the 'default_lu_gp',
899 * we only do transition on the passed *l_tp_pt_gp, and not 898 * we only do transition on the passed *l_tp_pt_gp, and not
900 * on all of the matching target port groups IDs in default_lu_gp. 899 * on all of the matching target port groups IDs in default_lu_gp.
901 */ 900 */
902 if (!lu_gp->lu_gp_id) { 901 if (!lu_gp->lu_gp_id) {
903 /* 902 /*
904 * core_alua_do_transition_tg_pt() will always return 903 * core_alua_do_transition_tg_pt() will always return
905 * success. 904 * success.
906 */ 905 */
907 core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl, 906 core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
908 md_buf, new_state, explict); 907 md_buf, new_state, explict);
909 atomic_dec(&lu_gp->lu_gp_ref_cnt); 908 atomic_dec(&lu_gp->lu_gp_ref_cnt);
910 smp_mb__after_atomic_dec(); 909 smp_mb__after_atomic_dec();
911 kfree(md_buf); 910 kfree(md_buf);
912 return 0; 911 return 0;
913 } 912 }
914 /* 913 /*
915 * For all other LU groups aside from 'default_lu_gp', walk all of 914 * For all other LU groups aside from 'default_lu_gp', walk all of
916 * the associated storage objects looking for a matching target port 915 * the associated storage objects looking for a matching target port
917 * group ID from the local target port group. 916 * group ID from the local target port group.
918 */ 917 */
919 spin_lock(&lu_gp->lu_gp_lock); 918 spin_lock(&lu_gp->lu_gp_lock);
920 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, 919 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
921 lu_gp_mem_list) { 920 lu_gp_mem_list) {
922 921
923 dev = lu_gp_mem->lu_gp_mem_dev; 922 dev = lu_gp_mem->lu_gp_mem_dev;
924 su_dev = dev->se_sub_dev; 923 su_dev = dev->se_sub_dev;
925 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); 924 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
926 smp_mb__after_atomic_inc(); 925 smp_mb__after_atomic_inc();
927 spin_unlock(&lu_gp->lu_gp_lock); 926 spin_unlock(&lu_gp->lu_gp_lock);
928 927
929 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 928 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
930 list_for_each_entry(tg_pt_gp, 929 list_for_each_entry(tg_pt_gp,
931 &su_dev->t10_alua.tg_pt_gps_list, 930 &su_dev->t10_alua.tg_pt_gps_list,
932 tg_pt_gp_list) { 931 tg_pt_gp_list) {
933 932
934 if (!tg_pt_gp->tg_pt_gp_valid_id) 933 if (!tg_pt_gp->tg_pt_gp_valid_id)
935 continue; 934 continue;
936 /* 935 /*
937 * If the target behavior port asymmetric access state 936 * If the target behavior port asymmetric access state
938 * is changed for any target port group accessiable via 937 * is changed for any target port group accessiable via
939 * a logical unit within a LU group, the target port 938 * a logical unit within a LU group, the target port
940 * behavior group asymmetric access states for the same 939 * behavior group asymmetric access states for the same
941 * target port group accessible via other logical units 940 * target port group accessible via other logical units
942 * in that LU group will also change. 941 * in that LU group will also change.
943 */ 942 */
944 if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id) 943 if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
945 continue; 944 continue;
946 945
947 if (l_tg_pt_gp == tg_pt_gp) { 946 if (l_tg_pt_gp == tg_pt_gp) {
948 port = l_port; 947 port = l_port;
949 nacl = l_nacl; 948 nacl = l_nacl;
950 } else { 949 } else {
951 port = NULL; 950 port = NULL;
952 nacl = NULL; 951 nacl = NULL;
953 } 952 }
954 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 953 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
955 smp_mb__after_atomic_inc(); 954 smp_mb__after_atomic_inc();
956 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 955 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
957 /* 956 /*
958 * core_alua_do_transition_tg_pt() will always return 957 * core_alua_do_transition_tg_pt() will always return
959 * success. 958 * success.
960 */ 959 */
961 core_alua_do_transition_tg_pt(tg_pt_gp, port, 960 core_alua_do_transition_tg_pt(tg_pt_gp, port,
962 nacl, md_buf, new_state, explict); 961 nacl, md_buf, new_state, explict);
963 962
964 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 963 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
965 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 964 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
966 smp_mb__after_atomic_dec(); 965 smp_mb__after_atomic_dec();
967 } 966 }
968 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 967 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
969 968
970 spin_lock(&lu_gp->lu_gp_lock); 969 spin_lock(&lu_gp->lu_gp_lock);
971 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); 970 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
972 smp_mb__after_atomic_dec(); 971 smp_mb__after_atomic_dec();
973 } 972 }
974 spin_unlock(&lu_gp->lu_gp_lock); 973 spin_unlock(&lu_gp->lu_gp_lock);
975 974
976 pr_debug("Successfully processed LU Group: %s all ALUA TG PT" 975 pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
977 " Group IDs: %hu %s transition to primary state: %s\n", 976 " Group IDs: %hu %s transition to primary state: %s\n",
978 config_item_name(&lu_gp->lu_gp_group.cg_item), 977 config_item_name(&lu_gp->lu_gp_group.cg_item),
979 l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict", 978 l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
980 core_alua_dump_state(new_state)); 979 core_alua_dump_state(new_state));
981 980
982 atomic_dec(&lu_gp->lu_gp_ref_cnt); 981 atomic_dec(&lu_gp->lu_gp_ref_cnt);
983 smp_mb__after_atomic_dec(); 982 smp_mb__after_atomic_dec();
984 kfree(md_buf); 983 kfree(md_buf);
985 return 0; 984 return 0;
986 } 985 }
987 986
988 /* 987 /*
989 * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held 988 * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
990 */ 989 */
991 static int core_alua_update_tpg_secondary_metadata( 990 static int core_alua_update_tpg_secondary_metadata(
992 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 991 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
993 struct se_port *port, 992 struct se_port *port,
994 unsigned char *md_buf, 993 unsigned char *md_buf,
995 u32 md_buf_len) 994 u32 md_buf_len)
996 { 995 {
997 struct se_portal_group *se_tpg = port->sep_tpg; 996 struct se_portal_group *se_tpg = port->sep_tpg;
998 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN]; 997 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
999 int len; 998 int len;
1000 999
1001 memset(path, 0, ALUA_METADATA_PATH_LEN); 1000 memset(path, 0, ALUA_METADATA_PATH_LEN);
1002 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN); 1001 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
1003 1002
1004 len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s", 1003 len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
1005 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg)); 1004 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
1006 1005
1007 if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) 1006 if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
1008 snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu", 1007 snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
1009 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); 1008 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
1010 1009
1011 len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n" 1010 len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
1012 "alua_tg_pt_status=0x%02x\n", 1011 "alua_tg_pt_status=0x%02x\n",
1013 atomic_read(&port->sep_tg_pt_secondary_offline), 1012 atomic_read(&port->sep_tg_pt_secondary_offline),
1014 port->sep_tg_pt_secondary_stat); 1013 port->sep_tg_pt_secondary_stat);
1015 1014
1016 snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u", 1015 snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
1017 se_tpg->se_tpg_tfo->get_fabric_name(), wwn, 1016 se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
1018 port->sep_lun->unpacked_lun); 1017 port->sep_lun->unpacked_lun);
1019 1018
1020 return core_alua_write_tpg_metadata(path, md_buf, len); 1019 return core_alua_write_tpg_metadata(path, md_buf, len);
1021 } 1020 }
1022 1021
1023 static int core_alua_set_tg_pt_secondary_state( 1022 static int core_alua_set_tg_pt_secondary_state(
1024 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 1023 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1025 struct se_port *port, 1024 struct se_port *port,
1026 int explict, 1025 int explict,
1027 int offline) 1026 int offline)
1028 { 1027 {
1029 struct t10_alua_tg_pt_gp *tg_pt_gp; 1028 struct t10_alua_tg_pt_gp *tg_pt_gp;
1030 unsigned char *md_buf; 1029 unsigned char *md_buf;
1031 u32 md_buf_len; 1030 u32 md_buf_len;
1032 int trans_delay_msecs; 1031 int trans_delay_msecs;
1033 1032
1034 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1033 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1035 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1034 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1036 if (!tg_pt_gp) { 1035 if (!tg_pt_gp) {
1037 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1036 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1038 pr_err("Unable to complete secondary state" 1037 pr_err("Unable to complete secondary state"
1039 " transition\n"); 1038 " transition\n");
1040 return -EINVAL; 1039 return -EINVAL;
1041 } 1040 }
1042 trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs; 1041 trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1043 /* 1042 /*
1044 * Set the secondary ALUA target port access state to OFFLINE 1043 * Set the secondary ALUA target port access state to OFFLINE
1045 * or release the previously secondary state for struct se_port 1044 * or release the previously secondary state for struct se_port
1046 */ 1045 */
1047 if (offline) 1046 if (offline)
1048 atomic_set(&port->sep_tg_pt_secondary_offline, 1); 1047 atomic_set(&port->sep_tg_pt_secondary_offline, 1);
1049 else 1048 else
1050 atomic_set(&port->sep_tg_pt_secondary_offline, 0); 1049 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
1051 1050
1052 md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len; 1051 md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
1053 port->sep_tg_pt_secondary_stat = (explict) ? 1052 port->sep_tg_pt_secondary_stat = (explict) ?
1054 ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : 1053 ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
1055 ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; 1054 ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
1056 1055
1057 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" 1056 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1058 " to secondary access state: %s\n", (explict) ? "explict" : 1057 " to secondary access state: %s\n", (explict) ? "explict" :
1059 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 1058 "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1060 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); 1059 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1061 1060
1062 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1061 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1063 /* 1062 /*
1064 * Do the optional transition delay after we set the secondary 1063 * Do the optional transition delay after we set the secondary
1065 * ALUA access state. 1064 * ALUA access state.
1066 */ 1065 */
1067 if (trans_delay_msecs != 0) 1066 if (trans_delay_msecs != 0)
1068 msleep_interruptible(trans_delay_msecs); 1067 msleep_interruptible(trans_delay_msecs);
1069 /* 1068 /*
1070 * See if we need to update the ALUA fabric port metadata for 1069 * See if we need to update the ALUA fabric port metadata for
1071 * secondary state and status 1070 * secondary state and status
1072 */ 1071 */
1073 if (port->sep_tg_pt_secondary_write_md) { 1072 if (port->sep_tg_pt_secondary_write_md) {
1074 md_buf = kzalloc(md_buf_len, GFP_KERNEL); 1073 md_buf = kzalloc(md_buf_len, GFP_KERNEL);
1075 if (!md_buf) { 1074 if (!md_buf) {
1076 pr_err("Unable to allocate md_buf for" 1075 pr_err("Unable to allocate md_buf for"
1077 " secondary ALUA access metadata\n"); 1076 " secondary ALUA access metadata\n");
1078 return -ENOMEM; 1077 return -ENOMEM;
1079 } 1078 }
1080 mutex_lock(&port->sep_tg_pt_md_mutex); 1079 mutex_lock(&port->sep_tg_pt_md_mutex);
1081 core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port, 1080 core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
1082 md_buf, md_buf_len); 1081 md_buf, md_buf_len);
1083 mutex_unlock(&port->sep_tg_pt_md_mutex); 1082 mutex_unlock(&port->sep_tg_pt_md_mutex);
1084 1083
1085 kfree(md_buf); 1084 kfree(md_buf);
1086 } 1085 }
1087 1086
1088 return 0; 1087 return 0;
1089 } 1088 }
1090 1089
1091 struct t10_alua_lu_gp * 1090 struct t10_alua_lu_gp *
1092 core_alua_allocate_lu_gp(const char *name, int def_group) 1091 core_alua_allocate_lu_gp(const char *name, int def_group)
1093 { 1092 {
1094 struct t10_alua_lu_gp *lu_gp; 1093 struct t10_alua_lu_gp *lu_gp;
1095 1094
1096 lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL); 1095 lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1097 if (!lu_gp) { 1096 if (!lu_gp) {
1098 pr_err("Unable to allocate struct t10_alua_lu_gp\n"); 1097 pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1099 return ERR_PTR(-ENOMEM); 1098 return ERR_PTR(-ENOMEM);
1100 } 1099 }
1101 INIT_LIST_HEAD(&lu_gp->lu_gp_node); 1100 INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1102 INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list); 1101 INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1103 spin_lock_init(&lu_gp->lu_gp_lock); 1102 spin_lock_init(&lu_gp->lu_gp_lock);
1104 atomic_set(&lu_gp->lu_gp_ref_cnt, 0); 1103 atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1105 1104
1106 if (def_group) { 1105 if (def_group) {
1107 lu_gp->lu_gp_id = alua_lu_gps_counter++; 1106 lu_gp->lu_gp_id = alua_lu_gps_counter++;
1108 lu_gp->lu_gp_valid_id = 1; 1107 lu_gp->lu_gp_valid_id = 1;
1109 alua_lu_gps_count++; 1108 alua_lu_gps_count++;
1110 } 1109 }
1111 1110
1112 return lu_gp; 1111 return lu_gp;
1113 } 1112 }
1114 1113
1115 int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id) 1114 int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1116 { 1115 {
1117 struct t10_alua_lu_gp *lu_gp_tmp; 1116 struct t10_alua_lu_gp *lu_gp_tmp;
1118 u16 lu_gp_id_tmp; 1117 u16 lu_gp_id_tmp;
1119 /* 1118 /*
1120 * The lu_gp->lu_gp_id may only be set once.. 1119 * The lu_gp->lu_gp_id may only be set once..
1121 */ 1120 */
1122 if (lu_gp->lu_gp_valid_id) { 1121 if (lu_gp->lu_gp_valid_id) {
1123 pr_warn("ALUA LU Group already has a valid ID," 1122 pr_warn("ALUA LU Group already has a valid ID,"
1124 " ignoring request\n"); 1123 " ignoring request\n");
1125 return -EINVAL; 1124 return -EINVAL;
1126 } 1125 }
1127 1126
1128 spin_lock(&lu_gps_lock); 1127 spin_lock(&lu_gps_lock);
1129 if (alua_lu_gps_count == 0x0000ffff) { 1128 if (alua_lu_gps_count == 0x0000ffff) {
1130 pr_err("Maximum ALUA alua_lu_gps_count:" 1129 pr_err("Maximum ALUA alua_lu_gps_count:"
1131 " 0x0000ffff reached\n"); 1130 " 0x0000ffff reached\n");
1132 spin_unlock(&lu_gps_lock); 1131 spin_unlock(&lu_gps_lock);
1133 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); 1132 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1134 return -ENOSPC; 1133 return -ENOSPC;
1135 } 1134 }
1136 again: 1135 again:
1137 lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id : 1136 lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1138 alua_lu_gps_counter++; 1137 alua_lu_gps_counter++;
1139 1138
1140 list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) { 1139 list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1141 if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) { 1140 if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1142 if (!lu_gp_id) 1141 if (!lu_gp_id)
1143 goto again; 1142 goto again;
1144 1143
1145 pr_warn("ALUA Logical Unit Group ID: %hu" 1144 pr_warn("ALUA Logical Unit Group ID: %hu"
1146 " already exists, ignoring request\n", 1145 " already exists, ignoring request\n",
1147 lu_gp_id); 1146 lu_gp_id);
1148 spin_unlock(&lu_gps_lock); 1147 spin_unlock(&lu_gps_lock);
1149 return -EINVAL; 1148 return -EINVAL;
1150 } 1149 }
1151 } 1150 }
1152 1151
1153 lu_gp->lu_gp_id = lu_gp_id_tmp; 1152 lu_gp->lu_gp_id = lu_gp_id_tmp;
1154 lu_gp->lu_gp_valid_id = 1; 1153 lu_gp->lu_gp_valid_id = 1;
1155 list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list); 1154 list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1156 alua_lu_gps_count++; 1155 alua_lu_gps_count++;
1157 spin_unlock(&lu_gps_lock); 1156 spin_unlock(&lu_gps_lock);
1158 1157
1159 return 0; 1158 return 0;
1160 } 1159 }
1161 1160
1162 static struct t10_alua_lu_gp_member * 1161 static struct t10_alua_lu_gp_member *
1163 core_alua_allocate_lu_gp_mem(struct se_device *dev) 1162 core_alua_allocate_lu_gp_mem(struct se_device *dev)
1164 { 1163 {
1165 struct t10_alua_lu_gp_member *lu_gp_mem; 1164 struct t10_alua_lu_gp_member *lu_gp_mem;
1166 1165
1167 lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL); 1166 lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1168 if (!lu_gp_mem) { 1167 if (!lu_gp_mem) {
1169 pr_err("Unable to allocate struct t10_alua_lu_gp_member\n"); 1168 pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1170 return ERR_PTR(-ENOMEM); 1169 return ERR_PTR(-ENOMEM);
1171 } 1170 }
1172 INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list); 1171 INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1173 spin_lock_init(&lu_gp_mem->lu_gp_mem_lock); 1172 spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1174 atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0); 1173 atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1175 1174
1176 lu_gp_mem->lu_gp_mem_dev = dev; 1175 lu_gp_mem->lu_gp_mem_dev = dev;
1177 dev->dev_alua_lu_gp_mem = lu_gp_mem; 1176 dev->dev_alua_lu_gp_mem = lu_gp_mem;
1178 1177
1179 return lu_gp_mem; 1178 return lu_gp_mem;
1180 } 1179 }
1181 1180
1182 void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) 1181 void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1183 { 1182 {
1184 struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp; 1183 struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1185 /* 1184 /*
1186 * Once we have reached this point, config_item_put() has 1185 * Once we have reached this point, config_item_put() has
1187 * already been called from target_core_alua_drop_lu_gp(). 1186 * already been called from target_core_alua_drop_lu_gp().
1188 * 1187 *
1189 * Here, we remove the *lu_gp from the global list so that 1188 * Here, we remove the *lu_gp from the global list so that
1190 * no associations can be made while we are releasing 1189 * no associations can be made while we are releasing
1191 * struct t10_alua_lu_gp. 1190 * struct t10_alua_lu_gp.
1192 */ 1191 */
1193 spin_lock(&lu_gps_lock); 1192 spin_lock(&lu_gps_lock);
1194 list_del(&lu_gp->lu_gp_node); 1193 list_del(&lu_gp->lu_gp_node);
1195 alua_lu_gps_count--; 1194 alua_lu_gps_count--;
1196 spin_unlock(&lu_gps_lock); 1195 spin_unlock(&lu_gps_lock);
1197 /* 1196 /*
1198 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name() 1197 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
1199 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be 1198 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
1200 * released with core_alua_put_lu_gp_from_name() 1199 * released with core_alua_put_lu_gp_from_name()
1201 */ 1200 */
1202 while (atomic_read(&lu_gp->lu_gp_ref_cnt)) 1201 while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1203 cpu_relax(); 1202 cpu_relax();
1204 /* 1203 /*
1205 * Release reference to struct t10_alua_lu_gp * from all associated 1204 * Release reference to struct t10_alua_lu_gp * from all associated
1206 * struct se_device. 1205 * struct se_device.
1207 */ 1206 */
1208 spin_lock(&lu_gp->lu_gp_lock); 1207 spin_lock(&lu_gp->lu_gp_lock);
1209 list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp, 1208 list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1210 &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { 1209 &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1211 if (lu_gp_mem->lu_gp_assoc) { 1210 if (lu_gp_mem->lu_gp_assoc) {
1212 list_del(&lu_gp_mem->lu_gp_mem_list); 1211 list_del(&lu_gp_mem->lu_gp_mem_list);
1213 lu_gp->lu_gp_members--; 1212 lu_gp->lu_gp_members--;
1214 lu_gp_mem->lu_gp_assoc = 0; 1213 lu_gp_mem->lu_gp_assoc = 0;
1215 } 1214 }
1216 spin_unlock(&lu_gp->lu_gp_lock); 1215 spin_unlock(&lu_gp->lu_gp_lock);
1217 /* 1216 /*
1218 * 1217 *
1219 * lu_gp_mem is associated with a single 1218 * lu_gp_mem is associated with a single
1220 * struct se_device->dev_alua_lu_gp_mem, and is released when 1219 * struct se_device->dev_alua_lu_gp_mem, and is released when
1221 * struct se_device is released via core_alua_free_lu_gp_mem(). 1220 * struct se_device is released via core_alua_free_lu_gp_mem().
1222 * 1221 *
1223 * If the passed lu_gp does NOT match the default_lu_gp, assume 1222 * If the passed lu_gp does NOT match the default_lu_gp, assume
1224 * we want to re-assocate a given lu_gp_mem with default_lu_gp. 1223 * we want to re-assocate a given lu_gp_mem with default_lu_gp.
1225 */ 1224 */
1226 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1225 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1227 if (lu_gp != default_lu_gp) 1226 if (lu_gp != default_lu_gp)
1228 __core_alua_attach_lu_gp_mem(lu_gp_mem, 1227 __core_alua_attach_lu_gp_mem(lu_gp_mem,
1229 default_lu_gp); 1228 default_lu_gp);
1230 else 1229 else
1231 lu_gp_mem->lu_gp = NULL; 1230 lu_gp_mem->lu_gp = NULL;
1232 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 1231 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1233 1232
1234 spin_lock(&lu_gp->lu_gp_lock); 1233 spin_lock(&lu_gp->lu_gp_lock);
1235 } 1234 }
1236 spin_unlock(&lu_gp->lu_gp_lock); 1235 spin_unlock(&lu_gp->lu_gp_lock);
1237 1236
1238 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); 1237 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1239 } 1238 }
1240 1239
1241 void core_alua_free_lu_gp_mem(struct se_device *dev) 1240 void core_alua_free_lu_gp_mem(struct se_device *dev)
1242 { 1241 {
1243 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 1242 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
1244 struct t10_alua *alua = &su_dev->t10_alua; 1243 struct t10_alua *alua = &su_dev->t10_alua;
1245 struct t10_alua_lu_gp *lu_gp; 1244 struct t10_alua_lu_gp *lu_gp;
1246 struct t10_alua_lu_gp_member *lu_gp_mem; 1245 struct t10_alua_lu_gp_member *lu_gp_mem;
1247 1246
1248 if (alua->alua_type != SPC3_ALUA_EMULATED) 1247 if (alua->alua_type != SPC3_ALUA_EMULATED)
1249 return; 1248 return;
1250 1249
1251 lu_gp_mem = dev->dev_alua_lu_gp_mem; 1250 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1252 if (!lu_gp_mem) 1251 if (!lu_gp_mem)
1253 return; 1252 return;
1254 1253
1255 while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt)) 1254 while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1256 cpu_relax(); 1255 cpu_relax();
1257 1256
1258 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1257 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1259 lu_gp = lu_gp_mem->lu_gp; 1258 lu_gp = lu_gp_mem->lu_gp;
1260 if (lu_gp) { 1259 if (lu_gp) {
1261 spin_lock(&lu_gp->lu_gp_lock); 1260 spin_lock(&lu_gp->lu_gp_lock);
1262 if (lu_gp_mem->lu_gp_assoc) { 1261 if (lu_gp_mem->lu_gp_assoc) {
1263 list_del(&lu_gp_mem->lu_gp_mem_list); 1262 list_del(&lu_gp_mem->lu_gp_mem_list);
1264 lu_gp->lu_gp_members--; 1263 lu_gp->lu_gp_members--;
1265 lu_gp_mem->lu_gp_assoc = 0; 1264 lu_gp_mem->lu_gp_assoc = 0;
1266 } 1265 }
1267 spin_unlock(&lu_gp->lu_gp_lock); 1266 spin_unlock(&lu_gp->lu_gp_lock);
1268 lu_gp_mem->lu_gp = NULL; 1267 lu_gp_mem->lu_gp = NULL;
1269 } 1268 }
1270 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 1269 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1271 1270
1272 kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem); 1271 kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1273 } 1272 }
1274 1273
1275 struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name) 1274 struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1276 { 1275 {
1277 struct t10_alua_lu_gp *lu_gp; 1276 struct t10_alua_lu_gp *lu_gp;
1278 struct config_item *ci; 1277 struct config_item *ci;
1279 1278
1280 spin_lock(&lu_gps_lock); 1279 spin_lock(&lu_gps_lock);
1281 list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) { 1280 list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1282 if (!lu_gp->lu_gp_valid_id) 1281 if (!lu_gp->lu_gp_valid_id)
1283 continue; 1282 continue;
1284 ci = &lu_gp->lu_gp_group.cg_item; 1283 ci = &lu_gp->lu_gp_group.cg_item;
1285 if (!strcmp(config_item_name(ci), name)) { 1284 if (!strcmp(config_item_name(ci), name)) {
1286 atomic_inc(&lu_gp->lu_gp_ref_cnt); 1285 atomic_inc(&lu_gp->lu_gp_ref_cnt);
1287 spin_unlock(&lu_gps_lock); 1286 spin_unlock(&lu_gps_lock);
1288 return lu_gp; 1287 return lu_gp;
1289 } 1288 }
1290 } 1289 }
1291 spin_unlock(&lu_gps_lock); 1290 spin_unlock(&lu_gps_lock);
1292 1291
1293 return NULL; 1292 return NULL;
1294 } 1293 }
1295 1294
1296 void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp) 1295 void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1297 { 1296 {
1298 spin_lock(&lu_gps_lock); 1297 spin_lock(&lu_gps_lock);
1299 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1298 atomic_dec(&lu_gp->lu_gp_ref_cnt);
1300 spin_unlock(&lu_gps_lock); 1299 spin_unlock(&lu_gps_lock);
1301 } 1300 }
1302 1301
1303 /* 1302 /*
1304 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock 1303 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1305 */ 1304 */
1306 void __core_alua_attach_lu_gp_mem( 1305 void __core_alua_attach_lu_gp_mem(
1307 struct t10_alua_lu_gp_member *lu_gp_mem, 1306 struct t10_alua_lu_gp_member *lu_gp_mem,
1308 struct t10_alua_lu_gp *lu_gp) 1307 struct t10_alua_lu_gp *lu_gp)
1309 { 1308 {
1310 spin_lock(&lu_gp->lu_gp_lock); 1309 spin_lock(&lu_gp->lu_gp_lock);
1311 lu_gp_mem->lu_gp = lu_gp; 1310 lu_gp_mem->lu_gp = lu_gp;
1312 lu_gp_mem->lu_gp_assoc = 1; 1311 lu_gp_mem->lu_gp_assoc = 1;
1313 list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list); 1312 list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1314 lu_gp->lu_gp_members++; 1313 lu_gp->lu_gp_members++;
1315 spin_unlock(&lu_gp->lu_gp_lock); 1314 spin_unlock(&lu_gp->lu_gp_lock);
1316 } 1315 }
1317 1316
1318 /* 1317 /*
1319 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock 1318 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1320 */ 1319 */
1321 void __core_alua_drop_lu_gp_mem( 1320 void __core_alua_drop_lu_gp_mem(
1322 struct t10_alua_lu_gp_member *lu_gp_mem, 1321 struct t10_alua_lu_gp_member *lu_gp_mem,
1323 struct t10_alua_lu_gp *lu_gp) 1322 struct t10_alua_lu_gp *lu_gp)
1324 { 1323 {
1325 spin_lock(&lu_gp->lu_gp_lock); 1324 spin_lock(&lu_gp->lu_gp_lock);
1326 list_del(&lu_gp_mem->lu_gp_mem_list); 1325 list_del(&lu_gp_mem->lu_gp_mem_list);
1327 lu_gp_mem->lu_gp = NULL; 1326 lu_gp_mem->lu_gp = NULL;
1328 lu_gp_mem->lu_gp_assoc = 0; 1327 lu_gp_mem->lu_gp_assoc = 0;
1329 lu_gp->lu_gp_members--; 1328 lu_gp->lu_gp_members--;
1330 spin_unlock(&lu_gp->lu_gp_lock); 1329 spin_unlock(&lu_gp->lu_gp_lock);
1331 } 1330 }
1332 1331
1333 struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( 1332 struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
1334 struct se_subsystem_dev *su_dev, 1333 struct se_subsystem_dev *su_dev,
1335 const char *name, 1334 const char *name,
1336 int def_group) 1335 int def_group)
1337 { 1336 {
1338 struct t10_alua_tg_pt_gp *tg_pt_gp; 1337 struct t10_alua_tg_pt_gp *tg_pt_gp;
1339 1338
1340 tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL); 1339 tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1341 if (!tg_pt_gp) { 1340 if (!tg_pt_gp) {
1342 pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n"); 1341 pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1343 return NULL; 1342 return NULL;
1344 } 1343 }
1345 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); 1344 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1346 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list); 1345 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
1347 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); 1346 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
1348 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); 1347 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1349 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); 1348 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1350 tg_pt_gp->tg_pt_gp_su_dev = su_dev; 1349 tg_pt_gp->tg_pt_gp_su_dev = su_dev;
1351 tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN; 1350 tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
1352 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1351 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1353 ALUA_ACCESS_STATE_ACTIVE_OPTMIZED); 1352 ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
1354 /* 1353 /*
1355 * Enable both explict and implict ALUA support by default 1354 * Enable both explict and implict ALUA support by default
1356 */ 1355 */
1357 tg_pt_gp->tg_pt_gp_alua_access_type = 1356 tg_pt_gp->tg_pt_gp_alua_access_type =
1358 TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA; 1357 TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA;
1359 /* 1358 /*
1360 * Set the default Active/NonOptimized Delay in milliseconds 1359 * Set the default Active/NonOptimized Delay in milliseconds
1361 */ 1360 */
1362 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS; 1361 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1363 tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; 1362 tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1364 1363
1365 if (def_group) { 1364 if (def_group) {
1366 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 1365 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1367 tg_pt_gp->tg_pt_gp_id = 1366 tg_pt_gp->tg_pt_gp_id =
1368 su_dev->t10_alua.alua_tg_pt_gps_counter++; 1367 su_dev->t10_alua.alua_tg_pt_gps_counter++;
1369 tg_pt_gp->tg_pt_gp_valid_id = 1; 1368 tg_pt_gp->tg_pt_gp_valid_id = 1;
1370 su_dev->t10_alua.alua_tg_pt_gps_count++; 1369 su_dev->t10_alua.alua_tg_pt_gps_count++;
1371 list_add_tail(&tg_pt_gp->tg_pt_gp_list, 1370 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1372 &su_dev->t10_alua.tg_pt_gps_list); 1371 &su_dev->t10_alua.tg_pt_gps_list);
1373 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1372 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1374 } 1373 }
1375 1374
1376 return tg_pt_gp; 1375 return tg_pt_gp;
1377 } 1376 }
1378 1377
1379 int core_alua_set_tg_pt_gp_id( 1378 int core_alua_set_tg_pt_gp_id(
1380 struct t10_alua_tg_pt_gp *tg_pt_gp, 1379 struct t10_alua_tg_pt_gp *tg_pt_gp,
1381 u16 tg_pt_gp_id) 1380 u16 tg_pt_gp_id)
1382 { 1381 {
1383 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; 1382 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1384 struct t10_alua_tg_pt_gp *tg_pt_gp_tmp; 1383 struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1385 u16 tg_pt_gp_id_tmp; 1384 u16 tg_pt_gp_id_tmp;
1386 /* 1385 /*
1387 * The tg_pt_gp->tg_pt_gp_id may only be set once.. 1386 * The tg_pt_gp->tg_pt_gp_id may only be set once..
1388 */ 1387 */
1389 if (tg_pt_gp->tg_pt_gp_valid_id) { 1388 if (tg_pt_gp->tg_pt_gp_valid_id) {
1390 pr_warn("ALUA TG PT Group already has a valid ID," 1389 pr_warn("ALUA TG PT Group already has a valid ID,"
1391 " ignoring request\n"); 1390 " ignoring request\n");
1392 return -EINVAL; 1391 return -EINVAL;
1393 } 1392 }
1394 1393
1395 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 1394 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1396 if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) { 1395 if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1397 pr_err("Maximum ALUA alua_tg_pt_gps_count:" 1396 pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1398 " 0x0000ffff reached\n"); 1397 " 0x0000ffff reached\n");
1399 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1398 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1400 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); 1399 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1401 return -ENOSPC; 1400 return -ENOSPC;
1402 } 1401 }
1403 again: 1402 again:
1404 tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id : 1403 tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1405 su_dev->t10_alua.alua_tg_pt_gps_counter++; 1404 su_dev->t10_alua.alua_tg_pt_gps_counter++;
1406 1405
1407 list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list, 1406 list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list,
1408 tg_pt_gp_list) { 1407 tg_pt_gp_list) {
1409 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) { 1408 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1410 if (!tg_pt_gp_id) 1409 if (!tg_pt_gp_id)
1411 goto again; 1410 goto again;
1412 1411
1413 pr_err("ALUA Target Port Group ID: %hu already" 1412 pr_err("ALUA Target Port Group ID: %hu already"
1414 " exists, ignoring request\n", tg_pt_gp_id); 1413 " exists, ignoring request\n", tg_pt_gp_id);
1415 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1414 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1416 return -EINVAL; 1415 return -EINVAL;
1417 } 1416 }
1418 } 1417 }
1419 1418
1420 tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp; 1419 tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1421 tg_pt_gp->tg_pt_gp_valid_id = 1; 1420 tg_pt_gp->tg_pt_gp_valid_id = 1;
1422 list_add_tail(&tg_pt_gp->tg_pt_gp_list, 1421 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1423 &su_dev->t10_alua.tg_pt_gps_list); 1422 &su_dev->t10_alua.tg_pt_gps_list);
1424 su_dev->t10_alua.alua_tg_pt_gps_count++; 1423 su_dev->t10_alua.alua_tg_pt_gps_count++;
1425 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1424 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1426 1425
1427 return 0; 1426 return 0;
1428 } 1427 }
1429 1428
1430 struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem( 1429 struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
1431 struct se_port *port) 1430 struct se_port *port)
1432 { 1431 {
1433 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1432 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1434 1433
1435 tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache, 1434 tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
1436 GFP_KERNEL); 1435 GFP_KERNEL);
1437 if (!tg_pt_gp_mem) { 1436 if (!tg_pt_gp_mem) {
1438 pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n"); 1437 pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
1439 return ERR_PTR(-ENOMEM); 1438 return ERR_PTR(-ENOMEM);
1440 } 1439 }
1441 INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list); 1440 INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1442 spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1441 spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1443 atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0); 1442 atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
1444 1443
1445 tg_pt_gp_mem->tg_pt = port; 1444 tg_pt_gp_mem->tg_pt = port;
1446 port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem; 1445 port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
1447 1446
1448 return tg_pt_gp_mem; 1447 return tg_pt_gp_mem;
1449 } 1448 }
1450 1449
1451 void core_alua_free_tg_pt_gp( 1450 void core_alua_free_tg_pt_gp(
1452 struct t10_alua_tg_pt_gp *tg_pt_gp) 1451 struct t10_alua_tg_pt_gp *tg_pt_gp)
1453 { 1452 {
1454 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; 1453 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1455 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp; 1454 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
1456 /* 1455 /*
1457 * Once we have reached this point, config_item_put() has already 1456 * Once we have reached this point, config_item_put() has already
1458 * been called from target_core_alua_drop_tg_pt_gp(). 1457 * been called from target_core_alua_drop_tg_pt_gp().
1459 * 1458 *
1460 * Here we remove *tg_pt_gp from the global list so that 1459 * Here we remove *tg_pt_gp from the global list so that
1461 * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS 1460 * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
1462 * can be made while we are releasing struct t10_alua_tg_pt_gp. 1461 * can be made while we are releasing struct t10_alua_tg_pt_gp.
1463 */ 1462 */
1464 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 1463 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1465 list_del(&tg_pt_gp->tg_pt_gp_list); 1464 list_del(&tg_pt_gp->tg_pt_gp_list);
1466 su_dev->t10_alua.alua_tg_pt_gps_counter--; 1465 su_dev->t10_alua.alua_tg_pt_gps_counter--;
1467 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1466 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1468 /* 1467 /*
1469 * Allow a struct t10_alua_tg_pt_gp_member * referenced by 1468 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1470 * core_alua_get_tg_pt_gp_by_name() in 1469 * core_alua_get_tg_pt_gp_by_name() in
1471 * target_core_configfs.c:target_core_store_alua_tg_pt_gp() 1470 * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
1472 * to be released with core_alua_put_tg_pt_gp_from_name(). 1471 * to be released with core_alua_put_tg_pt_gp_from_name().
1473 */ 1472 */
1474 while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt)) 1473 while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1475 cpu_relax(); 1474 cpu_relax();
1476 /* 1475 /*
1477 * Release reference to struct t10_alua_tg_pt_gp from all associated 1476 * Release reference to struct t10_alua_tg_pt_gp from all associated
1478 * struct se_port. 1477 * struct se_port.
1479 */ 1478 */
1480 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1479 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1481 list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp, 1480 list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
1482 &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) { 1481 &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
1483 if (tg_pt_gp_mem->tg_pt_gp_assoc) { 1482 if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1484 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); 1483 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1485 tg_pt_gp->tg_pt_gp_members--; 1484 tg_pt_gp->tg_pt_gp_members--;
1486 tg_pt_gp_mem->tg_pt_gp_assoc = 0; 1485 tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1487 } 1486 }
1488 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1487 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1489 /* 1488 /*
1490 * tg_pt_gp_mem is associated with a single 1489 * tg_pt_gp_mem is associated with a single
1491 * se_port->sep_alua_tg_pt_gp_mem, and is released via 1490 * se_port->sep_alua_tg_pt_gp_mem, and is released via
1492 * core_alua_free_tg_pt_gp_mem(). 1491 * core_alua_free_tg_pt_gp_mem().
1493 * 1492 *
1494 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp, 1493 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1495 * assume we want to re-assocate a given tg_pt_gp_mem with 1494 * assume we want to re-assocate a given tg_pt_gp_mem with
1496 * default_tg_pt_gp. 1495 * default_tg_pt_gp.
1497 */ 1496 */
1498 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1497 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1499 if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) { 1498 if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) {
1500 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 1499 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1501 su_dev->t10_alua.default_tg_pt_gp); 1500 su_dev->t10_alua.default_tg_pt_gp);
1502 } else 1501 } else
1503 tg_pt_gp_mem->tg_pt_gp = NULL; 1502 tg_pt_gp_mem->tg_pt_gp = NULL;
1504 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1503 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1505 1504
1506 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1505 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1507 } 1506 }
1508 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1507 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1509 1508
1510 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); 1509 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1511 } 1510 }
1512 1511
1513 void core_alua_free_tg_pt_gp_mem(struct se_port *port) 1512 void core_alua_free_tg_pt_gp_mem(struct se_port *port)
1514 { 1513 {
1515 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; 1514 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1516 struct t10_alua *alua = &su_dev->t10_alua; 1515 struct t10_alua *alua = &su_dev->t10_alua;
1517 struct t10_alua_tg_pt_gp *tg_pt_gp; 1516 struct t10_alua_tg_pt_gp *tg_pt_gp;
1518 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1517 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1519 1518
1520 if (alua->alua_type != SPC3_ALUA_EMULATED) 1519 if (alua->alua_type != SPC3_ALUA_EMULATED)
1521 return; 1520 return;
1522 1521
1523 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 1522 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1524 if (!tg_pt_gp_mem) 1523 if (!tg_pt_gp_mem)
1525 return; 1524 return;
1526 1525
1527 while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt)) 1526 while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
1528 cpu_relax(); 1527 cpu_relax();
1529 1528
1530 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1529 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1531 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1530 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1532 if (tg_pt_gp) { 1531 if (tg_pt_gp) {
1533 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1532 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1534 if (tg_pt_gp_mem->tg_pt_gp_assoc) { 1533 if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1535 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); 1534 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1536 tg_pt_gp->tg_pt_gp_members--; 1535 tg_pt_gp->tg_pt_gp_members--;
1537 tg_pt_gp_mem->tg_pt_gp_assoc = 0; 1536 tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1538 } 1537 }
1539 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1538 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1540 tg_pt_gp_mem->tg_pt_gp = NULL; 1539 tg_pt_gp_mem->tg_pt_gp = NULL;
1541 } 1540 }
1542 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1541 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1543 1542
1544 kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem); 1543 kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
1545 } 1544 }
1546 1545
1547 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name( 1546 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1548 struct se_subsystem_dev *su_dev, 1547 struct se_subsystem_dev *su_dev,
1549 const char *name) 1548 const char *name)
1550 { 1549 {
1551 struct t10_alua_tg_pt_gp *tg_pt_gp; 1550 struct t10_alua_tg_pt_gp *tg_pt_gp;
1552 struct config_item *ci; 1551 struct config_item *ci;
1553 1552
1554 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 1553 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1555 list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, 1554 list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
1556 tg_pt_gp_list) { 1555 tg_pt_gp_list) {
1557 if (!tg_pt_gp->tg_pt_gp_valid_id) 1556 if (!tg_pt_gp->tg_pt_gp_valid_id)
1558 continue; 1557 continue;
1559 ci = &tg_pt_gp->tg_pt_gp_group.cg_item; 1558 ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1560 if (!strcmp(config_item_name(ci), name)) { 1559 if (!strcmp(config_item_name(ci), name)) {
1561 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1560 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1562 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1561 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1563 return tg_pt_gp; 1562 return tg_pt_gp;
1564 } 1563 }
1565 } 1564 }
1566 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1565 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1567 1566
1568 return NULL; 1567 return NULL;
1569 } 1568 }
1570 1569
1571 static void core_alua_put_tg_pt_gp_from_name( 1570 static void core_alua_put_tg_pt_gp_from_name(
1572 struct t10_alua_tg_pt_gp *tg_pt_gp) 1571 struct t10_alua_tg_pt_gp *tg_pt_gp)
1573 { 1572 {
1574 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; 1573 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
1575 1574
1576 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 1575 spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
1577 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1576 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1578 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1577 spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
1579 } 1578 }
1580 1579
1581 /* 1580 /*
1582 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held 1581 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1583 */ 1582 */
1584 void __core_alua_attach_tg_pt_gp_mem( 1583 void __core_alua_attach_tg_pt_gp_mem(
1585 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 1584 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1586 struct t10_alua_tg_pt_gp *tg_pt_gp) 1585 struct t10_alua_tg_pt_gp *tg_pt_gp)
1587 { 1586 {
1588 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1587 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1589 tg_pt_gp_mem->tg_pt_gp = tg_pt_gp; 1588 tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
1590 tg_pt_gp_mem->tg_pt_gp_assoc = 1; 1589 tg_pt_gp_mem->tg_pt_gp_assoc = 1;
1591 list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list, 1590 list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
1592 &tg_pt_gp->tg_pt_gp_mem_list); 1591 &tg_pt_gp->tg_pt_gp_mem_list);
1593 tg_pt_gp->tg_pt_gp_members++; 1592 tg_pt_gp->tg_pt_gp_members++;
1594 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1593 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1595 } 1594 }
1596 1595
1597 /* 1596 /*
1598 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held 1597 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1599 */ 1598 */
1600 static void __core_alua_drop_tg_pt_gp_mem( 1599 static void __core_alua_drop_tg_pt_gp_mem(
1601 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 1600 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1602 struct t10_alua_tg_pt_gp *tg_pt_gp) 1601 struct t10_alua_tg_pt_gp *tg_pt_gp)
1603 { 1602 {
1604 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1603 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1605 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); 1604 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1606 tg_pt_gp_mem->tg_pt_gp = NULL; 1605 tg_pt_gp_mem->tg_pt_gp = NULL;
1607 tg_pt_gp_mem->tg_pt_gp_assoc = 0; 1606 tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1608 tg_pt_gp->tg_pt_gp_members--; 1607 tg_pt_gp->tg_pt_gp_members--;
1609 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1608 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1610 } 1609 }
1611 1610
1612 ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page) 1611 ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
1613 { 1612 {
1614 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; 1613 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1615 struct config_item *tg_pt_ci; 1614 struct config_item *tg_pt_ci;
1616 struct t10_alua *alua = &su_dev->t10_alua; 1615 struct t10_alua *alua = &su_dev->t10_alua;
1617 struct t10_alua_tg_pt_gp *tg_pt_gp; 1616 struct t10_alua_tg_pt_gp *tg_pt_gp;
1618 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1617 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1619 ssize_t len = 0; 1618 ssize_t len = 0;
1620 1619
1621 if (alua->alua_type != SPC3_ALUA_EMULATED) 1620 if (alua->alua_type != SPC3_ALUA_EMULATED)
1622 return len; 1621 return len;
1623 1622
1624 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 1623 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1625 if (!tg_pt_gp_mem) 1624 if (!tg_pt_gp_mem)
1626 return len; 1625 return len;
1627 1626
1628 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1627 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1629 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1628 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1630 if (tg_pt_gp) { 1629 if (tg_pt_gp) {
1631 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; 1630 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1632 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" 1631 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
1633 " %hu\nTG Port Primary Access State: %s\nTG Port " 1632 " %hu\nTG Port Primary Access State: %s\nTG Port "
1634 "Primary Access Status: %s\nTG Port Secondary Access" 1633 "Primary Access Status: %s\nTG Port Secondary Access"
1635 " State: %s\nTG Port Secondary Access Status: %s\n", 1634 " State: %s\nTG Port Secondary Access Status: %s\n",
1636 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id, 1635 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1637 core_alua_dump_state(atomic_read( 1636 core_alua_dump_state(atomic_read(
1638 &tg_pt_gp->tg_pt_gp_alua_access_state)), 1637 &tg_pt_gp->tg_pt_gp_alua_access_state)),
1639 core_alua_dump_status( 1638 core_alua_dump_status(
1640 tg_pt_gp->tg_pt_gp_alua_access_status), 1639 tg_pt_gp->tg_pt_gp_alua_access_status),
1641 (atomic_read(&port->sep_tg_pt_secondary_offline)) ? 1640 (atomic_read(&port->sep_tg_pt_secondary_offline)) ?
1642 "Offline" : "None", 1641 "Offline" : "None",
1643 core_alua_dump_status(port->sep_tg_pt_secondary_stat)); 1642 core_alua_dump_status(port->sep_tg_pt_secondary_stat));
1644 } 1643 }
1645 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1644 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1646 1645
1647 return len; 1646 return len;
1648 } 1647 }
1649 1648
1650 ssize_t core_alua_store_tg_pt_gp_info( 1649 ssize_t core_alua_store_tg_pt_gp_info(
1651 struct se_port *port, 1650 struct se_port *port,
1652 const char *page, 1651 const char *page,
1653 size_t count) 1652 size_t count)
1654 { 1653 {
1655 struct se_portal_group *tpg; 1654 struct se_portal_group *tpg;
1656 struct se_lun *lun; 1655 struct se_lun *lun;
1657 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; 1656 struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
1658 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL; 1657 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
1659 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1658 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1660 unsigned char buf[TG_PT_GROUP_NAME_BUF]; 1659 unsigned char buf[TG_PT_GROUP_NAME_BUF];
1661 int move = 0; 1660 int move = 0;
1662 1661
1663 tpg = port->sep_tpg; 1662 tpg = port->sep_tpg;
1664 lun = port->sep_lun; 1663 lun = port->sep_lun;
1665 1664
1666 if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) { 1665 if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
1667 pr_warn("SPC3_ALUA_EMULATED not enabled for" 1666 pr_warn("SPC3_ALUA_EMULATED not enabled for"
1668 " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg), 1667 " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1669 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1668 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1670 config_item_name(&lun->lun_group.cg_item)); 1669 config_item_name(&lun->lun_group.cg_item));
1671 return -EINVAL; 1670 return -EINVAL;
1672 } 1671 }
1673 1672
1674 if (count > TG_PT_GROUP_NAME_BUF) { 1673 if (count > TG_PT_GROUP_NAME_BUF) {
1675 pr_err("ALUA Target Port Group alias too large!\n"); 1674 pr_err("ALUA Target Port Group alias too large!\n");
1676 return -EINVAL; 1675 return -EINVAL;
1677 } 1676 }
1678 memset(buf, 0, TG_PT_GROUP_NAME_BUF); 1677 memset(buf, 0, TG_PT_GROUP_NAME_BUF);
1679 memcpy(buf, page, count); 1678 memcpy(buf, page, count);
1680 /* 1679 /*
1681 * Any ALUA target port group alias besides "NULL" means we will be 1680 * Any ALUA target port group alias besides "NULL" means we will be
1682 * making a new group association. 1681 * making a new group association.
1683 */ 1682 */
1684 if (strcmp(strstrip(buf), "NULL")) { 1683 if (strcmp(strstrip(buf), "NULL")) {
1685 /* 1684 /*
1686 * core_alua_get_tg_pt_gp_by_name() will increment reference to 1685 * core_alua_get_tg_pt_gp_by_name() will increment reference to
1687 * struct t10_alua_tg_pt_gp. This reference is released with 1686 * struct t10_alua_tg_pt_gp. This reference is released with
1688 * core_alua_put_tg_pt_gp_from_name() below. 1687 * core_alua_put_tg_pt_gp_from_name() below.
1689 */ 1688 */
1690 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev, 1689 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
1691 strstrip(buf)); 1690 strstrip(buf));
1692 if (!tg_pt_gp_new) 1691 if (!tg_pt_gp_new)
1693 return -ENODEV; 1692 return -ENODEV;
1694 } 1693 }
1695 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 1694 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1696 if (!tg_pt_gp_mem) { 1695 if (!tg_pt_gp_mem) {
1697 if (tg_pt_gp_new) 1696 if (tg_pt_gp_new)
1698 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); 1697 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1699 pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n"); 1698 pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
1700 return -EINVAL; 1699 return -EINVAL;
1701 } 1700 }
1702 1701
1703 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1702 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1704 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1703 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1705 if (tg_pt_gp) { 1704 if (tg_pt_gp) {
1706 /* 1705 /*
1707 * Clearing an existing tg_pt_gp association, and replacing 1706 * Clearing an existing tg_pt_gp association, and replacing
1708 * with the default_tg_pt_gp. 1707 * with the default_tg_pt_gp.
1709 */ 1708 */
1710 if (!tg_pt_gp_new) { 1709 if (!tg_pt_gp_new) {
1711 pr_debug("Target_Core_ConfigFS: Moving" 1710 pr_debug("Target_Core_ConfigFS: Moving"
1712 " %s/tpgt_%hu/%s from ALUA Target Port Group:" 1711 " %s/tpgt_%hu/%s from ALUA Target Port Group:"
1713 " alua/%s, ID: %hu back to" 1712 " alua/%s, ID: %hu back to"
1714 " default_tg_pt_gp\n", 1713 " default_tg_pt_gp\n",
1715 tpg->se_tpg_tfo->tpg_get_wwn(tpg), 1714 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1716 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1715 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1717 config_item_name(&lun->lun_group.cg_item), 1716 config_item_name(&lun->lun_group.cg_item),
1718 config_item_name( 1717 config_item_name(
1719 &tg_pt_gp->tg_pt_gp_group.cg_item), 1718 &tg_pt_gp->tg_pt_gp_group.cg_item),
1720 tg_pt_gp->tg_pt_gp_id); 1719 tg_pt_gp->tg_pt_gp_id);
1721 1720
1722 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); 1721 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
1723 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 1722 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1724 su_dev->t10_alua.default_tg_pt_gp); 1723 su_dev->t10_alua.default_tg_pt_gp);
1725 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1724 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1726 1725
1727 return count; 1726 return count;
1728 } 1727 }
1729 /* 1728 /*
1730 * Removing existing association of tg_pt_gp_mem with tg_pt_gp 1729 * Removing existing association of tg_pt_gp_mem with tg_pt_gp
1731 */ 1730 */
1732 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); 1731 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
1733 move = 1; 1732 move = 1;
1734 } 1733 }
1735 /* 1734 /*
1736 * Associate tg_pt_gp_mem with tg_pt_gp_new. 1735 * Associate tg_pt_gp_mem with tg_pt_gp_new.
1737 */ 1736 */
1738 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new); 1737 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
1739 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1738 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1740 pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" 1739 pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
1741 " Target Port Group: alua/%s, ID: %hu\n", (move) ? 1740 " Target Port Group: alua/%s, ID: %hu\n", (move) ?
1742 "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg), 1741 "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1743 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1742 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1744 config_item_name(&lun->lun_group.cg_item), 1743 config_item_name(&lun->lun_group.cg_item),
1745 config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item), 1744 config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
1746 tg_pt_gp_new->tg_pt_gp_id); 1745 tg_pt_gp_new->tg_pt_gp_id);
1747 1746
1748 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); 1747 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1749 return count; 1748 return count;
1750 } 1749 }
1751 1750
1752 ssize_t core_alua_show_access_type( 1751 ssize_t core_alua_show_access_type(
1753 struct t10_alua_tg_pt_gp *tg_pt_gp, 1752 struct t10_alua_tg_pt_gp *tg_pt_gp,
1754 char *page) 1753 char *page)
1755 { 1754 {
1756 if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) && 1755 if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) &&
1757 (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) 1756 (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA))
1758 return sprintf(page, "Implict and Explict\n"); 1757 return sprintf(page, "Implict and Explict\n");
1759 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA) 1758 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)
1760 return sprintf(page, "Implict\n"); 1759 return sprintf(page, "Implict\n");
1761 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) 1760 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)
1762 return sprintf(page, "Explict\n"); 1761 return sprintf(page, "Explict\n");
1763 else 1762 else
1764 return sprintf(page, "None\n"); 1763 return sprintf(page, "None\n");
1765 } 1764 }
1766 1765
1767 ssize_t core_alua_store_access_type( 1766 ssize_t core_alua_store_access_type(
1768 struct t10_alua_tg_pt_gp *tg_pt_gp, 1767 struct t10_alua_tg_pt_gp *tg_pt_gp,
1769 const char *page, 1768 const char *page,
1770 size_t count) 1769 size_t count)
1771 { 1770 {
1772 unsigned long tmp; 1771 unsigned long tmp;
1773 int ret; 1772 int ret;
1774 1773
1775 ret = strict_strtoul(page, 0, &tmp); 1774 ret = strict_strtoul(page, 0, &tmp);
1776 if (ret < 0) { 1775 if (ret < 0) {
1777 pr_err("Unable to extract alua_access_type\n"); 1776 pr_err("Unable to extract alua_access_type\n");
1778 return -EINVAL; 1777 return -EINVAL;
1779 } 1778 }
1780 if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) { 1779 if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
1781 pr_err("Illegal value for alua_access_type:" 1780 pr_err("Illegal value for alua_access_type:"
1782 " %lu\n", tmp); 1781 " %lu\n", tmp);
1783 return -EINVAL; 1782 return -EINVAL;
1784 } 1783 }
1785 if (tmp == 3) 1784 if (tmp == 3)
1786 tg_pt_gp->tg_pt_gp_alua_access_type = 1785 tg_pt_gp->tg_pt_gp_alua_access_type =
1787 TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA; 1786 TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA;
1788 else if (tmp == 2) 1787 else if (tmp == 2)
1789 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA; 1788 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA;
1790 else if (tmp == 1) 1789 else if (tmp == 1)
1791 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA; 1790 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA;
1792 else 1791 else
1793 tg_pt_gp->tg_pt_gp_alua_access_type = 0; 1792 tg_pt_gp->tg_pt_gp_alua_access_type = 0;
1794 1793
1795 return count; 1794 return count;
1796 } 1795 }
1797 1796
1798 ssize_t core_alua_show_nonop_delay_msecs( 1797 ssize_t core_alua_show_nonop_delay_msecs(
1799 struct t10_alua_tg_pt_gp *tg_pt_gp, 1798 struct t10_alua_tg_pt_gp *tg_pt_gp,
1800 char *page) 1799 char *page)
1801 { 1800 {
1802 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs); 1801 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
1803 } 1802 }
1804 1803
1805 ssize_t core_alua_store_nonop_delay_msecs( 1804 ssize_t core_alua_store_nonop_delay_msecs(
1806 struct t10_alua_tg_pt_gp *tg_pt_gp, 1805 struct t10_alua_tg_pt_gp *tg_pt_gp,
1807 const char *page, 1806 const char *page,
1808 size_t count) 1807 size_t count)
1809 { 1808 {
1810 unsigned long tmp; 1809 unsigned long tmp;
1811 int ret; 1810 int ret;
1812 1811
1813 ret = strict_strtoul(page, 0, &tmp); 1812 ret = strict_strtoul(page, 0, &tmp);
1814 if (ret < 0) { 1813 if (ret < 0) {
1815 pr_err("Unable to extract nonop_delay_msecs\n"); 1814 pr_err("Unable to extract nonop_delay_msecs\n");
1816 return -EINVAL; 1815 return -EINVAL;
1817 } 1816 }
1818 if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) { 1817 if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
1819 pr_err("Passed nonop_delay_msecs: %lu, exceeds" 1818 pr_err("Passed nonop_delay_msecs: %lu, exceeds"
1820 " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp, 1819 " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
1821 ALUA_MAX_NONOP_DELAY_MSECS); 1820 ALUA_MAX_NONOP_DELAY_MSECS);
1822 return -EINVAL; 1821 return -EINVAL;
1823 } 1822 }
1824 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp; 1823 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
1825 1824
1826 return count; 1825 return count;
1827 } 1826 }
1828 1827
1829 ssize_t core_alua_show_trans_delay_msecs( 1828 ssize_t core_alua_show_trans_delay_msecs(
1830 struct t10_alua_tg_pt_gp *tg_pt_gp, 1829 struct t10_alua_tg_pt_gp *tg_pt_gp,
1831 char *page) 1830 char *page)
1832 { 1831 {
1833 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs); 1832 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1834 } 1833 }
1835 1834
1836 ssize_t core_alua_store_trans_delay_msecs( 1835 ssize_t core_alua_store_trans_delay_msecs(
1837 struct t10_alua_tg_pt_gp *tg_pt_gp, 1836 struct t10_alua_tg_pt_gp *tg_pt_gp,
1838 const char *page, 1837 const char *page,
1839 size_t count) 1838 size_t count)
1840 { 1839 {
1841 unsigned long tmp; 1840 unsigned long tmp;
1842 int ret; 1841 int ret;
1843 1842
1844 ret = strict_strtoul(page, 0, &tmp); 1843 ret = strict_strtoul(page, 0, &tmp);
1845 if (ret < 0) { 1844 if (ret < 0) {
1846 pr_err("Unable to extract trans_delay_msecs\n"); 1845 pr_err("Unable to extract trans_delay_msecs\n");
1847 return -EINVAL; 1846 return -EINVAL;
1848 } 1847 }
1849 if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) { 1848 if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
1850 pr_err("Passed trans_delay_msecs: %lu, exceeds" 1849 pr_err("Passed trans_delay_msecs: %lu, exceeds"
1851 " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp, 1850 " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
1852 ALUA_MAX_TRANS_DELAY_MSECS); 1851 ALUA_MAX_TRANS_DELAY_MSECS);
1853 return -EINVAL; 1852 return -EINVAL;
1854 } 1853 }
1855 tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp; 1854 tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
1856 1855
1857 return count; 1856 return count;
1858 } 1857 }
1859 1858
1860 ssize_t core_alua_show_preferred_bit( 1859 ssize_t core_alua_show_preferred_bit(
1861 struct t10_alua_tg_pt_gp *tg_pt_gp, 1860 struct t10_alua_tg_pt_gp *tg_pt_gp,
1862 char *page) 1861 char *page)
1863 { 1862 {
1864 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref); 1863 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
1865 } 1864 }
1866 1865
1867 ssize_t core_alua_store_preferred_bit( 1866 ssize_t core_alua_store_preferred_bit(
1868 struct t10_alua_tg_pt_gp *tg_pt_gp, 1867 struct t10_alua_tg_pt_gp *tg_pt_gp,
1869 const char *page, 1868 const char *page,
1870 size_t count) 1869 size_t count)
1871 { 1870 {
1872 unsigned long tmp; 1871 unsigned long tmp;
1873 int ret; 1872 int ret;
1874 1873
1875 ret = strict_strtoul(page, 0, &tmp); 1874 ret = strict_strtoul(page, 0, &tmp);
1876 if (ret < 0) { 1875 if (ret < 0) {
1877 pr_err("Unable to extract preferred ALUA value\n"); 1876 pr_err("Unable to extract preferred ALUA value\n");
1878 return -EINVAL; 1877 return -EINVAL;
1879 } 1878 }
1880 if ((tmp != 0) && (tmp != 1)) { 1879 if ((tmp != 0) && (tmp != 1)) {
1881 pr_err("Illegal value for preferred ALUA: %lu\n", tmp); 1880 pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
1882 return -EINVAL; 1881 return -EINVAL;
1883 } 1882 }
1884 tg_pt_gp->tg_pt_gp_pref = (int)tmp; 1883 tg_pt_gp->tg_pt_gp_pref = (int)tmp;
1885 1884
1886 return count; 1885 return count;
1887 } 1886 }
1888 1887
1889 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page) 1888 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
1890 { 1889 {
1891 if (!lun->lun_sep) 1890 if (!lun->lun_sep)
1892 return -ENODEV; 1891 return -ENODEV;
1893 1892
1894 return sprintf(page, "%d\n", 1893 return sprintf(page, "%d\n",
1895 atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline)); 1894 atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
1896 } 1895 }
1897 1896
1898 ssize_t core_alua_store_offline_bit( 1897 ssize_t core_alua_store_offline_bit(
1899 struct se_lun *lun, 1898 struct se_lun *lun,
1900 const char *page, 1899 const char *page,
1901 size_t count) 1900 size_t count)
1902 { 1901 {
1903 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1902 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1904 unsigned long tmp; 1903 unsigned long tmp;
1905 int ret; 1904 int ret;
1906 1905
1907 if (!lun->lun_sep) 1906 if (!lun->lun_sep)
1908 return -ENODEV; 1907 return -ENODEV;
1909 1908
1910 ret = strict_strtoul(page, 0, &tmp); 1909 ret = strict_strtoul(page, 0, &tmp);
1911 if (ret < 0) { 1910 if (ret < 0) {
1912 pr_err("Unable to extract alua_tg_pt_offline value\n"); 1911 pr_err("Unable to extract alua_tg_pt_offline value\n");
1913 return -EINVAL; 1912 return -EINVAL;
1914 } 1913 }
1915 if ((tmp != 0) && (tmp != 1)) { 1914 if ((tmp != 0) && (tmp != 1)) {
1916 pr_err("Illegal value for alua_tg_pt_offline: %lu\n", 1915 pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
1917 tmp); 1916 tmp);
1918 return -EINVAL; 1917 return -EINVAL;
1919 } 1918 }
1920 tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem; 1919 tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
1921 if (!tg_pt_gp_mem) { 1920 if (!tg_pt_gp_mem) {
1922 pr_err("Unable to locate *tg_pt_gp_mem\n"); 1921 pr_err("Unable to locate *tg_pt_gp_mem\n");
1923 return -EINVAL; 1922 return -EINVAL;
1924 } 1923 }
1925 1924
1926 ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem, 1925 ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
1927 lun->lun_sep, 0, (int)tmp); 1926 lun->lun_sep, 0, (int)tmp);
1928 if (ret < 0) 1927 if (ret < 0)
1929 return -EINVAL; 1928 return -EINVAL;
1930 1929
1931 return count; 1930 return count;
1932 } 1931 }
1933 1932
1934 ssize_t core_alua_show_secondary_status( 1933 ssize_t core_alua_show_secondary_status(
1935 struct se_lun *lun, 1934 struct se_lun *lun,
1936 char *page) 1935 char *page)
1937 { 1936 {
1938 return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat); 1937 return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
1939 } 1938 }
1940 1939
1941 ssize_t core_alua_store_secondary_status( 1940 ssize_t core_alua_store_secondary_status(
1942 struct se_lun *lun, 1941 struct se_lun *lun,
1943 const char *page, 1942 const char *page,
1944 size_t count) 1943 size_t count)
1945 { 1944 {
1946 unsigned long tmp; 1945 unsigned long tmp;
1947 int ret; 1946 int ret;
1948 1947
1949 ret = strict_strtoul(page, 0, &tmp); 1948 ret = strict_strtoul(page, 0, &tmp);
1950 if (ret < 0) { 1949 if (ret < 0) {
1951 pr_err("Unable to extract alua_tg_pt_status\n"); 1950 pr_err("Unable to extract alua_tg_pt_status\n");
1952 return -EINVAL; 1951 return -EINVAL;
1953 } 1952 }
1954 if ((tmp != ALUA_STATUS_NONE) && 1953 if ((tmp != ALUA_STATUS_NONE) &&
1955 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && 1954 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
1956 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { 1955 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
1957 pr_err("Illegal value for alua_tg_pt_status: %lu\n", 1956 pr_err("Illegal value for alua_tg_pt_status: %lu\n",
1958 tmp); 1957 tmp);
1959 return -EINVAL; 1958 return -EINVAL;
1960 } 1959 }
1961 lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp; 1960 lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
1962 1961
1963 return count; 1962 return count;
1964 } 1963 }
1965 1964
1966 ssize_t core_alua_show_secondary_write_metadata( 1965 ssize_t core_alua_show_secondary_write_metadata(
1967 struct se_lun *lun, 1966 struct se_lun *lun,
1968 char *page) 1967 char *page)
1969 { 1968 {
1970 return sprintf(page, "%d\n", 1969 return sprintf(page, "%d\n",
1971 lun->lun_sep->sep_tg_pt_secondary_write_md); 1970 lun->lun_sep->sep_tg_pt_secondary_write_md);
1972 } 1971 }
1973 1972
1974 ssize_t core_alua_store_secondary_write_metadata( 1973 ssize_t core_alua_store_secondary_write_metadata(
1975 struct se_lun *lun, 1974 struct se_lun *lun,
1976 const char *page, 1975 const char *page,
1977 size_t count) 1976 size_t count)
1978 { 1977 {
1979 unsigned long tmp; 1978 unsigned long tmp;
1980 int ret; 1979 int ret;
1981 1980
1982 ret = strict_strtoul(page, 0, &tmp); 1981 ret = strict_strtoul(page, 0, &tmp);
1983 if (ret < 0) { 1982 if (ret < 0) {
1984 pr_err("Unable to extract alua_tg_pt_write_md\n"); 1983 pr_err("Unable to extract alua_tg_pt_write_md\n");
1985 return -EINVAL; 1984 return -EINVAL;
1986 } 1985 }
1987 if ((tmp != 0) && (tmp != 1)) { 1986 if ((tmp != 0) && (tmp != 1)) {
1988 pr_err("Illegal value for alua_tg_pt_write_md:" 1987 pr_err("Illegal value for alua_tg_pt_write_md:"
1989 " %lu\n", tmp); 1988 " %lu\n", tmp);
1990 return -EINVAL; 1989 return -EINVAL;
1991 } 1990 }
1992 lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp; 1991 lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
1993 1992
1994 return count; 1993 return count;
1995 } 1994 }
1996 1995
1997 int core_setup_alua(struct se_device *dev, int force_pt) 1996 int core_setup_alua(struct se_device *dev, int force_pt)
1998 { 1997 {
1999 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 1998 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
2000 struct t10_alua *alua = &su_dev->t10_alua; 1999 struct t10_alua *alua = &su_dev->t10_alua;
2001 struct t10_alua_lu_gp_member *lu_gp_mem; 2000 struct t10_alua_lu_gp_member *lu_gp_mem;
2002 /* 2001 /*
2003 * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic 2002 * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
2004 * of the Underlying SCSI hardware. In Linux/SCSI terms, this can 2003 * of the Underlying SCSI hardware. In Linux/SCSI terms, this can
2005 * cause a problem because libata and some SATA RAID HBAs appear 2004 * cause a problem because libata and some SATA RAID HBAs appear
2006 * under Linux/SCSI, but emulate SCSI logic themselves. 2005 * under Linux/SCSI, but emulate SCSI logic themselves.
2007 */ 2006 */
2008 if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && 2007 if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
2009 !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) { 2008 !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) {
2010 alua->alua_type = SPC_ALUA_PASSTHROUGH; 2009 alua->alua_type = SPC_ALUA_PASSTHROUGH;
2011 alua->alua_state_check = &core_alua_state_check_nop; 2010 alua->alua_state_check = &core_alua_state_check_nop;
2012 pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA" 2011 pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
2013 " emulation\n", dev->transport->name); 2012 " emulation\n", dev->transport->name);
2014 return 0; 2013 return 0;
2015 } 2014 }
2016 /* 2015 /*
2017 * If SPC-3 or above is reported by real or emulated struct se_device, 2016 * If SPC-3 or above is reported by real or emulated struct se_device,
2018 * use emulated ALUA. 2017 * use emulated ALUA.
2019 */ 2018 */
2020 if (dev->transport->get_device_rev(dev) >= SCSI_3) { 2019 if (dev->transport->get_device_rev(dev) >= SCSI_3) {
2021 pr_debug("%s: Enabling ALUA Emulation for SPC-3" 2020 pr_debug("%s: Enabling ALUA Emulation for SPC-3"
2022 " device\n", dev->transport->name); 2021 " device\n", dev->transport->name);
2023 /* 2022 /*
2024 * Associate this struct se_device with the default ALUA 2023 * Associate this struct se_device with the default ALUA
2025 * LUN Group. 2024 * LUN Group.
2026 */ 2025 */
2027 lu_gp_mem = core_alua_allocate_lu_gp_mem(dev); 2026 lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
2028 if (IS_ERR(lu_gp_mem)) 2027 if (IS_ERR(lu_gp_mem))
2029 return PTR_ERR(lu_gp_mem); 2028 return PTR_ERR(lu_gp_mem);
2030 2029
2031 alua->alua_type = SPC3_ALUA_EMULATED; 2030 alua->alua_type = SPC3_ALUA_EMULATED;
2032 alua->alua_state_check = &core_alua_state_check; 2031 alua->alua_state_check = &core_alua_state_check;
2033 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 2032 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2034 __core_alua_attach_lu_gp_mem(lu_gp_mem, 2033 __core_alua_attach_lu_gp_mem(lu_gp_mem,
2035 default_lu_gp); 2034 default_lu_gp);
2036 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 2035 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2037 2036
2038 pr_debug("%s: Adding to default ALUA LU Group:" 2037 pr_debug("%s: Adding to default ALUA LU Group:"
2039 " core/alua/lu_gps/default_lu_gp\n", 2038 " core/alua/lu_gps/default_lu_gp\n",
2040 dev->transport->name); 2039 dev->transport->name);
2041 } else { 2040 } else {
2042 alua->alua_type = SPC2_ALUA_DISABLED; 2041 alua->alua_type = SPC2_ALUA_DISABLED;
2043 alua->alua_state_check = &core_alua_state_check_nop; 2042 alua->alua_state_check = &core_alua_state_check_nop;
2044 pr_debug("%s: Disabling ALUA Emulation for SPC-2" 2043 pr_debug("%s: Disabling ALUA Emulation for SPC-2"
2045 " device\n", dev->transport->name); 2044 " device\n", dev->transport->name);
2046 } 2045 }
2047 2046
2048 return 0; 2047 return 0;
2049 } 2048 }
2050 2049
drivers/target/target_core_cdb.c
1 /* 1 /*
2 * CDB emulation for non-READ/WRITE commands. 2 * CDB emulation for non-READ/WRITE commands.
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. 4 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
5 * Copyright (c) 2005, 2006, 2007 SBE, Inc. 5 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
6 * Copyright (c) 2007-2010 Rising Tide Systems 6 * Copyright (c) 2007-2010 Rising Tide Systems
7 * Copyright (c) 2008-2010 Linux-iSCSI.org 7 * Copyright (c) 2008-2010 Linux-iSCSI.org
8 * 8 *
9 * Nicholas A. Bellinger <nab@kernel.org> 9 * Nicholas A. Bellinger <nab@kernel.org>
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or 13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version. 14 * (at your option) any later version.
15 * 15 *
16 * This program is distributed in the hope that it will be useful, 16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details. 19 * GNU General Public License for more details.
20 * 20 *
21 * You should have received a copy of the GNU General Public License 21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software 22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 */ 24 */
25 25
26 #include <linux/kernel.h> 26 #include <linux/kernel.h>
27 #include <linux/module.h> 27 #include <linux/module.h>
28 #include <asm/unaligned.h> 28 #include <asm/unaligned.h>
29 #include <scsi/scsi.h> 29 #include <scsi/scsi.h>
30 30
31 #include <target/target_core_base.h> 31 #include <target/target_core_base.h>
32 #include <target/target_core_transport.h> 32 #include <target/target_core_backend.h>
33 #include <target/target_core_fabric_ops.h> 33 #include <target/target_core_fabric.h>
34 34
35 #include "target_core_internal.h" 35 #include "target_core_internal.h"
36 #include "target_core_ua.h" 36 #include "target_core_ua.h"
37 37
38 static void 38 static void
39 target_fill_alua_data(struct se_port *port, unsigned char *buf) 39 target_fill_alua_data(struct se_port *port, unsigned char *buf)
40 { 40 {
41 struct t10_alua_tg_pt_gp *tg_pt_gp; 41 struct t10_alua_tg_pt_gp *tg_pt_gp;
42 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 42 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
43 43
44 /* 44 /*
45 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS. 45 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
46 */ 46 */
47 buf[5] = 0x80; 47 buf[5] = 0x80;
48 48
49 /* 49 /*
50 * Set TPGS field for explict and/or implict ALUA access type 50 * Set TPGS field for explict and/or implict ALUA access type
51 * and opteration. 51 * and opteration.
52 * 52 *
53 * See spc4r17 section 6.4.2 Table 135 53 * See spc4r17 section 6.4.2 Table 135
54 */ 54 */
55 if (!port) 55 if (!port)
56 return; 56 return;
57 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 57 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
58 if (!tg_pt_gp_mem) 58 if (!tg_pt_gp_mem)
59 return; 59 return;
60 60
61 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 61 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
62 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 62 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
63 if (tg_pt_gp) 63 if (tg_pt_gp)
64 buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type; 64 buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
65 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 65 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
66 } 66 }
67 67
68 static int 68 static int
69 target_emulate_inquiry_std(struct se_cmd *cmd) 69 target_emulate_inquiry_std(struct se_cmd *cmd)
70 { 70 {
71 struct se_lun *lun = cmd->se_lun; 71 struct se_lun *lun = cmd->se_lun;
72 struct se_device *dev = cmd->se_dev; 72 struct se_device *dev = cmd->se_dev;
73 struct se_portal_group *tpg = lun->lun_sep->sep_tpg; 73 struct se_portal_group *tpg = lun->lun_sep->sep_tpg;
74 unsigned char *buf; 74 unsigned char *buf;
75 75
76 /* 76 /*
77 * Make sure we at least have 6 bytes of INQUIRY response 77 * Make sure we at least have 6 bytes of INQUIRY response
78 * payload going back for EVPD=0 78 * payload going back for EVPD=0
79 */ 79 */
80 if (cmd->data_length < 6) { 80 if (cmd->data_length < 6) {
81 pr_err("SCSI Inquiry payload length: %u" 81 pr_err("SCSI Inquiry payload length: %u"
82 " too small for EVPD=0\n", cmd->data_length); 82 " too small for EVPD=0\n", cmd->data_length);
83 return -EINVAL; 83 return -EINVAL;
84 } 84 }
85 85
86 buf = transport_kmap_first_data_page(cmd); 86 buf = transport_kmap_first_data_page(cmd);
87 87
88 if (dev == tpg->tpg_virt_lun0.lun_se_dev) { 88 if (dev == tpg->tpg_virt_lun0.lun_se_dev) {
89 buf[0] = 0x3f; /* Not connected */ 89 buf[0] = 0x3f; /* Not connected */
90 } else { 90 } else {
91 buf[0] = dev->transport->get_device_type(dev); 91 buf[0] = dev->transport->get_device_type(dev);
92 if (buf[0] == TYPE_TAPE) 92 if (buf[0] == TYPE_TAPE)
93 buf[1] = 0x80; 93 buf[1] = 0x80;
94 } 94 }
95 buf[2] = dev->transport->get_device_rev(dev); 95 buf[2] = dev->transport->get_device_rev(dev);
96 96
97 /* 97 /*
98 * Enable SCCS and TPGS fields for Emulated ALUA 98 * Enable SCCS and TPGS fields for Emulated ALUA
99 */ 99 */
100 if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) 100 if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
101 target_fill_alua_data(lun->lun_sep, buf); 101 target_fill_alua_data(lun->lun_sep, buf);
102 102
103 if (cmd->data_length < 8) { 103 if (cmd->data_length < 8) {
104 buf[4] = 1; /* Set additional length to 1 */ 104 buf[4] = 1; /* Set additional length to 1 */
105 goto out; 105 goto out;
106 } 106 }
107 107
108 buf[7] = 0x32; /* Sync=1 and CmdQue=1 */ 108 buf[7] = 0x32; /* Sync=1 and CmdQue=1 */
109 109
110 /* 110 /*
111 * Do not include vendor, product, reversion info in INQUIRY 111 * Do not include vendor, product, reversion info in INQUIRY
112 * response payload for cdbs with a small allocation length. 112 * response payload for cdbs with a small allocation length.
113 */ 113 */
114 if (cmd->data_length < 36) { 114 if (cmd->data_length < 36) {
115 buf[4] = 3; /* Set additional length to 3 */ 115 buf[4] = 3; /* Set additional length to 3 */
116 goto out; 116 goto out;
117 } 117 }
118 118
119 snprintf((unsigned char *)&buf[8], 8, "LIO-ORG"); 119 snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
120 snprintf((unsigned char *)&buf[16], 16, "%s", 120 snprintf((unsigned char *)&buf[16], 16, "%s",
121 &dev->se_sub_dev->t10_wwn.model[0]); 121 &dev->se_sub_dev->t10_wwn.model[0]);
122 snprintf((unsigned char *)&buf[32], 4, "%s", 122 snprintf((unsigned char *)&buf[32], 4, "%s",
123 &dev->se_sub_dev->t10_wwn.revision[0]); 123 &dev->se_sub_dev->t10_wwn.revision[0]);
124 buf[4] = 31; /* Set additional length to 31 */ 124 buf[4] = 31; /* Set additional length to 31 */
125 125
126 out: 126 out:
127 transport_kunmap_first_data_page(cmd); 127 transport_kunmap_first_data_page(cmd);
128 return 0; 128 return 0;
129 } 129 }
130 130
131 /* unit serial number */ 131 /* unit serial number */
132 static int 132 static int
133 target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) 133 target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
134 { 134 {
135 struct se_device *dev = cmd->se_dev; 135 struct se_device *dev = cmd->se_dev;
136 u16 len = 0; 136 u16 len = 0;
137 137
138 if (dev->se_sub_dev->su_dev_flags & 138 if (dev->se_sub_dev->su_dev_flags &
139 SDF_EMULATED_VPD_UNIT_SERIAL) { 139 SDF_EMULATED_VPD_UNIT_SERIAL) {
140 u32 unit_serial_len; 140 u32 unit_serial_len;
141 141
142 unit_serial_len = 142 unit_serial_len =
143 strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]); 143 strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
144 unit_serial_len++; /* For NULL Terminator */ 144 unit_serial_len++; /* For NULL Terminator */
145 145
146 if (((len + 4) + unit_serial_len) > cmd->data_length) { 146 if (((len + 4) + unit_serial_len) > cmd->data_length) {
147 len += unit_serial_len; 147 len += unit_serial_len;
148 buf[2] = ((len >> 8) & 0xff); 148 buf[2] = ((len >> 8) & 0xff);
149 buf[3] = (len & 0xff); 149 buf[3] = (len & 0xff);
150 return 0; 150 return 0;
151 } 151 }
152 len += sprintf((unsigned char *)&buf[4], "%s", 152 len += sprintf((unsigned char *)&buf[4], "%s",
153 &dev->se_sub_dev->t10_wwn.unit_serial[0]); 153 &dev->se_sub_dev->t10_wwn.unit_serial[0]);
154 len++; /* Extra Byte for NULL Terminator */ 154 len++; /* Extra Byte for NULL Terminator */
155 buf[3] = len; 155 buf[3] = len;
156 } 156 }
157 return 0; 157 return 0;
158 } 158 }
159 159
160 static void 160 static void
161 target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf) 161 target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf)
162 { 162 {
163 unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0]; 163 unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0];
164 int cnt; 164 int cnt;
165 bool next = true; 165 bool next = true;
166 166
167 /* 167 /*
168 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on 168 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
169 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field 169 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
170 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION 170 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
171 * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL 171 * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL
172 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure 172 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
173 * per device uniqeness. 173 * per device uniqeness.
174 */ 174 */
175 for (cnt = 0; *p && cnt < 13; p++) { 175 for (cnt = 0; *p && cnt < 13; p++) {
176 int val = hex_to_bin(*p); 176 int val = hex_to_bin(*p);
177 177
178 if (val < 0) 178 if (val < 0)
179 continue; 179 continue;
180 180
181 if (next) { 181 if (next) {
182 next = false; 182 next = false;
183 buf[cnt++] |= val; 183 buf[cnt++] |= val;
184 } else { 184 } else {
185 next = true; 185 next = true;
186 buf[cnt] = val << 4; 186 buf[cnt] = val << 4;
187 } 187 }
188 } 188 }
189 } 189 }
190 190
191 /* 191 /*
192 * Device identification VPD, for a complete list of 192 * Device identification VPD, for a complete list of
193 * DESIGNATOR TYPEs see spc4r17 Table 459. 193 * DESIGNATOR TYPEs see spc4r17 Table 459.
194 */ 194 */
195 static int 195 static int
196 target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) 196 target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
197 { 197 {
198 struct se_device *dev = cmd->se_dev; 198 struct se_device *dev = cmd->se_dev;
199 struct se_lun *lun = cmd->se_lun; 199 struct se_lun *lun = cmd->se_lun;
200 struct se_port *port = NULL; 200 struct se_port *port = NULL;
201 struct se_portal_group *tpg = NULL; 201 struct se_portal_group *tpg = NULL;
202 struct t10_alua_lu_gp_member *lu_gp_mem; 202 struct t10_alua_lu_gp_member *lu_gp_mem;
203 struct t10_alua_tg_pt_gp *tg_pt_gp; 203 struct t10_alua_tg_pt_gp *tg_pt_gp;
204 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 204 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
205 unsigned char *prod = &dev->se_sub_dev->t10_wwn.model[0]; 205 unsigned char *prod = &dev->se_sub_dev->t10_wwn.model[0];
206 u32 prod_len; 206 u32 prod_len;
207 u32 unit_serial_len, off = 0; 207 u32 unit_serial_len, off = 0;
208 u16 len = 0, id_len; 208 u16 len = 0, id_len;
209 209
210 off = 4; 210 off = 4;
211 211
212 /* 212 /*
213 * NAA IEEE Registered Extended Assigned designator format, see 213 * NAA IEEE Registered Extended Assigned designator format, see
214 * spc4r17 section 7.7.3.6.5 214 * spc4r17 section 7.7.3.6.5
215 * 215 *
216 * We depend upon a target_core_mod/ConfigFS provided 216 * We depend upon a target_core_mod/ConfigFS provided
217 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial 217 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
218 * value in order to return the NAA id. 218 * value in order to return the NAA id.
219 */ 219 */
220 if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL)) 220 if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL))
221 goto check_t10_vend_desc; 221 goto check_t10_vend_desc;
222 222
223 if (off + 20 > cmd->data_length) 223 if (off + 20 > cmd->data_length)
224 goto check_t10_vend_desc; 224 goto check_t10_vend_desc;
225 225
226 /* CODE SET == Binary */ 226 /* CODE SET == Binary */
227 buf[off++] = 0x1; 227 buf[off++] = 0x1;
228 228
229 /* Set ASSOCIATION == addressed logical unit: 0)b */ 229 /* Set ASSOCIATION == addressed logical unit: 0)b */
230 buf[off] = 0x00; 230 buf[off] = 0x00;
231 231
232 /* Identifier/Designator type == NAA identifier */ 232 /* Identifier/Designator type == NAA identifier */
233 buf[off++] |= 0x3; 233 buf[off++] |= 0x3;
234 off++; 234 off++;
235 235
236 /* Identifier/Designator length */ 236 /* Identifier/Designator length */
237 buf[off++] = 0x10; 237 buf[off++] = 0x10;
238 238
239 /* 239 /*
240 * Start NAA IEEE Registered Extended Identifier/Designator 240 * Start NAA IEEE Registered Extended Identifier/Designator
241 */ 241 */
242 buf[off++] = (0x6 << 4); 242 buf[off++] = (0x6 << 4);
243 243
244 /* 244 /*
245 * Use OpenFabrics IEEE Company ID: 00 14 05 245 * Use OpenFabrics IEEE Company ID: 00 14 05
246 */ 246 */
247 buf[off++] = 0x01; 247 buf[off++] = 0x01;
248 buf[off++] = 0x40; 248 buf[off++] = 0x40;
249 buf[off] = (0x5 << 4); 249 buf[off] = (0x5 << 4);
250 250
251 /* 251 /*
252 * Return ConfigFS Unit Serial Number information for 252 * Return ConfigFS Unit Serial Number information for
253 * VENDOR_SPECIFIC_IDENTIFIER and 253 * VENDOR_SPECIFIC_IDENTIFIER and
254 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION 254 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
255 */ 255 */
256 target_parse_naa_6h_vendor_specific(dev, &buf[off]); 256 target_parse_naa_6h_vendor_specific(dev, &buf[off]);
257 257
258 len = 20; 258 len = 20;
259 off = (len + 4); 259 off = (len + 4);
260 260
261 check_t10_vend_desc: 261 check_t10_vend_desc:
262 /* 262 /*
263 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4 263 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
264 */ 264 */
265 id_len = 8; /* For Vendor field */ 265 id_len = 8; /* For Vendor field */
266 prod_len = 4; /* For VPD Header */ 266 prod_len = 4; /* For VPD Header */
267 prod_len += 8; /* For Vendor field */ 267 prod_len += 8; /* For Vendor field */
268 prod_len += strlen(prod); 268 prod_len += strlen(prod);
269 prod_len++; /* For : */ 269 prod_len++; /* For : */
270 270
271 if (dev->se_sub_dev->su_dev_flags & 271 if (dev->se_sub_dev->su_dev_flags &
272 SDF_EMULATED_VPD_UNIT_SERIAL) { 272 SDF_EMULATED_VPD_UNIT_SERIAL) {
273 unit_serial_len = 273 unit_serial_len =
274 strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]); 274 strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
275 unit_serial_len++; /* For NULL Terminator */ 275 unit_serial_len++; /* For NULL Terminator */
276 276
277 if ((len + (id_len + 4) + 277 if ((len + (id_len + 4) +
278 (prod_len + unit_serial_len)) > 278 (prod_len + unit_serial_len)) >
279 cmd->data_length) { 279 cmd->data_length) {
280 len += (prod_len + unit_serial_len); 280 len += (prod_len + unit_serial_len);
281 goto check_port; 281 goto check_port;
282 } 282 }
283 id_len += sprintf((unsigned char *)&buf[off+12], 283 id_len += sprintf((unsigned char *)&buf[off+12],
284 "%s:%s", prod, 284 "%s:%s", prod,
285 &dev->se_sub_dev->t10_wwn.unit_serial[0]); 285 &dev->se_sub_dev->t10_wwn.unit_serial[0]);
286 } 286 }
287 buf[off] = 0x2; /* ASCII */ 287 buf[off] = 0x2; /* ASCII */
288 buf[off+1] = 0x1; /* T10 Vendor ID */ 288 buf[off+1] = 0x1; /* T10 Vendor ID */
289 buf[off+2] = 0x0; 289 buf[off+2] = 0x0;
290 memcpy((unsigned char *)&buf[off+4], "LIO-ORG", 8); 290 memcpy((unsigned char *)&buf[off+4], "LIO-ORG", 8);
291 /* Extra Byte for NULL Terminator */ 291 /* Extra Byte for NULL Terminator */
292 id_len++; 292 id_len++;
293 /* Identifier Length */ 293 /* Identifier Length */
294 buf[off+3] = id_len; 294 buf[off+3] = id_len;
295 /* Header size for Designation descriptor */ 295 /* Header size for Designation descriptor */
296 len += (id_len + 4); 296 len += (id_len + 4);
297 off += (id_len + 4); 297 off += (id_len + 4);
298 /* 298 /*
299 * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD 299 * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD
300 */ 300 */
301 check_port: 301 check_port:
302 port = lun->lun_sep; 302 port = lun->lun_sep;
303 if (port) { 303 if (port) {
304 struct t10_alua_lu_gp *lu_gp; 304 struct t10_alua_lu_gp *lu_gp;
305 u32 padding, scsi_name_len; 305 u32 padding, scsi_name_len;
306 u16 lu_gp_id = 0; 306 u16 lu_gp_id = 0;
307 u16 tg_pt_gp_id = 0; 307 u16 tg_pt_gp_id = 0;
308 u16 tpgt; 308 u16 tpgt;
309 309
310 tpg = port->sep_tpg; 310 tpg = port->sep_tpg;
311 /* 311 /*
312 * Relative target port identifer, see spc4r17 312 * Relative target port identifer, see spc4r17
313 * section 7.7.3.7 313 * section 7.7.3.7
314 * 314 *
315 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 315 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
316 * section 7.5.1 Table 362 316 * section 7.5.1 Table 362
317 */ 317 */
318 if (((len + 4) + 8) > cmd->data_length) { 318 if (((len + 4) + 8) > cmd->data_length) {
319 len += 8; 319 len += 8;
320 goto check_tpgi; 320 goto check_tpgi;
321 } 321 }
322 buf[off] = 322 buf[off] =
323 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 323 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
324 buf[off++] |= 0x1; /* CODE SET == Binary */ 324 buf[off++] |= 0x1; /* CODE SET == Binary */
325 buf[off] = 0x80; /* Set PIV=1 */ 325 buf[off] = 0x80; /* Set PIV=1 */
326 /* Set ASSOCIATION == target port: 01b */ 326 /* Set ASSOCIATION == target port: 01b */
327 buf[off] |= 0x10; 327 buf[off] |= 0x10;
328 /* DESIGNATOR TYPE == Relative target port identifer */ 328 /* DESIGNATOR TYPE == Relative target port identifer */
329 buf[off++] |= 0x4; 329 buf[off++] |= 0x4;
330 off++; /* Skip over Reserved */ 330 off++; /* Skip over Reserved */
331 buf[off++] = 4; /* DESIGNATOR LENGTH */ 331 buf[off++] = 4; /* DESIGNATOR LENGTH */
332 /* Skip over Obsolete field in RTPI payload 332 /* Skip over Obsolete field in RTPI payload
333 * in Table 472 */ 333 * in Table 472 */
334 off += 2; 334 off += 2;
335 buf[off++] = ((port->sep_rtpi >> 8) & 0xff); 335 buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
336 buf[off++] = (port->sep_rtpi & 0xff); 336 buf[off++] = (port->sep_rtpi & 0xff);
337 len += 8; /* Header size + Designation descriptor */ 337 len += 8; /* Header size + Designation descriptor */
338 /* 338 /*
339 * Target port group identifier, see spc4r17 339 * Target port group identifier, see spc4r17
340 * section 7.7.3.8 340 * section 7.7.3.8
341 * 341 *
342 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 342 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
343 * section 7.5.1 Table 362 343 * section 7.5.1 Table 362
344 */ 344 */
345 check_tpgi: 345 check_tpgi:
346 if (dev->se_sub_dev->t10_alua.alua_type != 346 if (dev->se_sub_dev->t10_alua.alua_type !=
347 SPC3_ALUA_EMULATED) 347 SPC3_ALUA_EMULATED)
348 goto check_scsi_name; 348 goto check_scsi_name;
349 349
350 if (((len + 4) + 8) > cmd->data_length) { 350 if (((len + 4) + 8) > cmd->data_length) {
351 len += 8; 351 len += 8;
352 goto check_lu_gp; 352 goto check_lu_gp;
353 } 353 }
354 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 354 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
355 if (!tg_pt_gp_mem) 355 if (!tg_pt_gp_mem)
356 goto check_lu_gp; 356 goto check_lu_gp;
357 357
358 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 358 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
359 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 359 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
360 if (!tg_pt_gp) { 360 if (!tg_pt_gp) {
361 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 361 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
362 goto check_lu_gp; 362 goto check_lu_gp;
363 } 363 }
364 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; 364 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
365 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 365 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
366 366
367 buf[off] = 367 buf[off] =
368 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 368 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
369 buf[off++] |= 0x1; /* CODE SET == Binary */ 369 buf[off++] |= 0x1; /* CODE SET == Binary */
370 buf[off] = 0x80; /* Set PIV=1 */ 370 buf[off] = 0x80; /* Set PIV=1 */
371 /* Set ASSOCIATION == target port: 01b */ 371 /* Set ASSOCIATION == target port: 01b */
372 buf[off] |= 0x10; 372 buf[off] |= 0x10;
373 /* DESIGNATOR TYPE == Target port group identifier */ 373 /* DESIGNATOR TYPE == Target port group identifier */
374 buf[off++] |= 0x5; 374 buf[off++] |= 0x5;
375 off++; /* Skip over Reserved */ 375 off++; /* Skip over Reserved */
376 buf[off++] = 4; /* DESIGNATOR LENGTH */ 376 buf[off++] = 4; /* DESIGNATOR LENGTH */
377 off += 2; /* Skip over Reserved Field */ 377 off += 2; /* Skip over Reserved Field */
378 buf[off++] = ((tg_pt_gp_id >> 8) & 0xff); 378 buf[off++] = ((tg_pt_gp_id >> 8) & 0xff);
379 buf[off++] = (tg_pt_gp_id & 0xff); 379 buf[off++] = (tg_pt_gp_id & 0xff);
380 len += 8; /* Header size + Designation descriptor */ 380 len += 8; /* Header size + Designation descriptor */
381 /* 381 /*
382 * Logical Unit Group identifier, see spc4r17 382 * Logical Unit Group identifier, see spc4r17
383 * section 7.7.3.8 383 * section 7.7.3.8
384 */ 384 */
385 check_lu_gp: 385 check_lu_gp:
386 if (((len + 4) + 8) > cmd->data_length) { 386 if (((len + 4) + 8) > cmd->data_length) {
387 len += 8; 387 len += 8;
388 goto check_scsi_name; 388 goto check_scsi_name;
389 } 389 }
390 lu_gp_mem = dev->dev_alua_lu_gp_mem; 390 lu_gp_mem = dev->dev_alua_lu_gp_mem;
391 if (!lu_gp_mem) 391 if (!lu_gp_mem)
392 goto check_scsi_name; 392 goto check_scsi_name;
393 393
394 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 394 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
395 lu_gp = lu_gp_mem->lu_gp; 395 lu_gp = lu_gp_mem->lu_gp;
396 if (!lu_gp) { 396 if (!lu_gp) {
397 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 397 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
398 goto check_scsi_name; 398 goto check_scsi_name;
399 } 399 }
400 lu_gp_id = lu_gp->lu_gp_id; 400 lu_gp_id = lu_gp->lu_gp_id;
401 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 401 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
402 402
403 buf[off++] |= 0x1; /* CODE SET == Binary */ 403 buf[off++] |= 0x1; /* CODE SET == Binary */
404 /* DESIGNATOR TYPE == Logical Unit Group identifier */ 404 /* DESIGNATOR TYPE == Logical Unit Group identifier */
405 buf[off++] |= 0x6; 405 buf[off++] |= 0x6;
406 off++; /* Skip over Reserved */ 406 off++; /* Skip over Reserved */
407 buf[off++] = 4; /* DESIGNATOR LENGTH */ 407 buf[off++] = 4; /* DESIGNATOR LENGTH */
408 off += 2; /* Skip over Reserved Field */ 408 off += 2; /* Skip over Reserved Field */
409 buf[off++] = ((lu_gp_id >> 8) & 0xff); 409 buf[off++] = ((lu_gp_id >> 8) & 0xff);
410 buf[off++] = (lu_gp_id & 0xff); 410 buf[off++] = (lu_gp_id & 0xff);
411 len += 8; /* Header size + Designation descriptor */ 411 len += 8; /* Header size + Designation descriptor */
412 /* 412 /*
413 * SCSI name string designator, see spc4r17 413 * SCSI name string designator, see spc4r17
414 * section 7.7.3.11 414 * section 7.7.3.11
415 * 415 *
416 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 416 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
417 * section 7.5.1 Table 362 417 * section 7.5.1 Table 362
418 */ 418 */
419 check_scsi_name: 419 check_scsi_name:
420 scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg)); 420 scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg));
421 /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */ 421 /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
422 scsi_name_len += 10; 422 scsi_name_len += 10;
423 /* Check for 4-byte padding */ 423 /* Check for 4-byte padding */
424 padding = ((-scsi_name_len) & 3); 424 padding = ((-scsi_name_len) & 3);
425 if (padding != 0) 425 if (padding != 0)
426 scsi_name_len += padding; 426 scsi_name_len += padding;
427 /* Header size + Designation descriptor */ 427 /* Header size + Designation descriptor */
428 scsi_name_len += 4; 428 scsi_name_len += 4;
429 429
430 if (((len + 4) + scsi_name_len) > cmd->data_length) { 430 if (((len + 4) + scsi_name_len) > cmd->data_length) {
431 len += scsi_name_len; 431 len += scsi_name_len;
432 goto set_len; 432 goto set_len;
433 } 433 }
434 buf[off] = 434 buf[off] =
435 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 435 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
436 buf[off++] |= 0x3; /* CODE SET == UTF-8 */ 436 buf[off++] |= 0x3; /* CODE SET == UTF-8 */
437 buf[off] = 0x80; /* Set PIV=1 */ 437 buf[off] = 0x80; /* Set PIV=1 */
438 /* Set ASSOCIATION == target port: 01b */ 438 /* Set ASSOCIATION == target port: 01b */
439 buf[off] |= 0x10; 439 buf[off] |= 0x10;
440 /* DESIGNATOR TYPE == SCSI name string */ 440 /* DESIGNATOR TYPE == SCSI name string */
441 buf[off++] |= 0x8; 441 buf[off++] |= 0x8;
442 off += 2; /* Skip over Reserved and length */ 442 off += 2; /* Skip over Reserved and length */
443 /* 443 /*
444 * SCSI name string identifer containing, $FABRIC_MOD 444 * SCSI name string identifer containing, $FABRIC_MOD
445 * dependent information. For LIO-Target and iSCSI 445 * dependent information. For LIO-Target and iSCSI
446 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in 446 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in
447 * UTF-8 encoding. 447 * UTF-8 encoding.
448 */ 448 */
449 tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg); 449 tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
450 scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x", 450 scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
451 tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt); 451 tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt);
452 scsi_name_len += 1 /* Include NULL terminator */; 452 scsi_name_len += 1 /* Include NULL terminator */;
453 /* 453 /*
454 * The null-terminated, null-padded (see 4.4.2) SCSI 454 * The null-terminated, null-padded (see 4.4.2) SCSI
455 * NAME STRING field contains a UTF-8 format string. 455 * NAME STRING field contains a UTF-8 format string.
456 * The number of bytes in the SCSI NAME STRING field 456 * The number of bytes in the SCSI NAME STRING field
457 * (i.e., the value in the DESIGNATOR LENGTH field) 457 * (i.e., the value in the DESIGNATOR LENGTH field)
458 * shall be no larger than 256 and shall be a multiple 458 * shall be no larger than 256 and shall be a multiple
459 * of four. 459 * of four.
460 */ 460 */
461 if (padding) 461 if (padding)
462 scsi_name_len += padding; 462 scsi_name_len += padding;
463 463
464 buf[off-1] = scsi_name_len; 464 buf[off-1] = scsi_name_len;
465 off += scsi_name_len; 465 off += scsi_name_len;
466 /* Header size + Designation descriptor */ 466 /* Header size + Designation descriptor */
467 len += (scsi_name_len + 4); 467 len += (scsi_name_len + 4);
468 } 468 }
469 set_len: 469 set_len:
470 buf[2] = ((len >> 8) & 0xff); 470 buf[2] = ((len >> 8) & 0xff);
471 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */ 471 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
472 return 0; 472 return 0;
473 } 473 }
474 474
475 /* Extended INQUIRY Data VPD Page */ 475 /* Extended INQUIRY Data VPD Page */
476 static int 476 static int
477 target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) 477 target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
478 { 478 {
479 if (cmd->data_length < 60) 479 if (cmd->data_length < 60)
480 return 0; 480 return 0;
481 481
482 buf[3] = 0x3c; 482 buf[3] = 0x3c;
483 /* Set HEADSUP, ORDSUP, SIMPSUP */ 483 /* Set HEADSUP, ORDSUP, SIMPSUP */
484 buf[5] = 0x07; 484 buf[5] = 0x07;
485 485
486 /* If WriteCache emulation is enabled, set V_SUP */ 486 /* If WriteCache emulation is enabled, set V_SUP */
487 if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) 487 if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
488 buf[6] = 0x01; 488 buf[6] = 0x01;
489 return 0; 489 return 0;
490 } 490 }
491 491
492 /* Block Limits VPD page */ 492 /* Block Limits VPD page */
493 static int 493 static int
494 target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) 494 target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
495 { 495 {
496 struct se_device *dev = cmd->se_dev; 496 struct se_device *dev = cmd->se_dev;
497 int have_tp = 0; 497 int have_tp = 0;
498 498
499 /* 499 /*
500 * Following sbc3r22 section 6.5.3 Block Limits VPD page, when 500 * Following sbc3r22 section 6.5.3 Block Limits VPD page, when
501 * emulate_tpu=1 or emulate_tpws=1 we will be expect a 501 * emulate_tpu=1 or emulate_tpws=1 we will be expect a
502 * different page length for Thin Provisioning. 502 * different page length for Thin Provisioning.
503 */ 503 */
504 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) 504 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
505 have_tp = 1; 505 have_tp = 1;
506 506
507 if (cmd->data_length < (0x10 + 4)) { 507 if (cmd->data_length < (0x10 + 4)) {
508 pr_debug("Received data_length: %u" 508 pr_debug("Received data_length: %u"
509 " too small for EVPD 0xb0\n", 509 " too small for EVPD 0xb0\n",
510 cmd->data_length); 510 cmd->data_length);
511 return -EINVAL; 511 return -EINVAL;
512 } 512 }
513 513
514 if (have_tp && cmd->data_length < (0x3c + 4)) { 514 if (have_tp && cmd->data_length < (0x3c + 4)) {
515 pr_debug("Received data_length: %u" 515 pr_debug("Received data_length: %u"
516 " too small for TPE=1 EVPD 0xb0\n", 516 " too small for TPE=1 EVPD 0xb0\n",
517 cmd->data_length); 517 cmd->data_length);
518 have_tp = 0; 518 have_tp = 0;
519 } 519 }
520 520
521 buf[0] = dev->transport->get_device_type(dev); 521 buf[0] = dev->transport->get_device_type(dev);
522 buf[3] = have_tp ? 0x3c : 0x10; 522 buf[3] = have_tp ? 0x3c : 0x10;
523 523
524 /* Set WSNZ to 1 */ 524 /* Set WSNZ to 1 */
525 buf[4] = 0x01; 525 buf[4] = 0x01;
526 526
527 /* 527 /*
528 * Set OPTIMAL TRANSFER LENGTH GRANULARITY 528 * Set OPTIMAL TRANSFER LENGTH GRANULARITY
529 */ 529 */
530 put_unaligned_be16(1, &buf[6]); 530 put_unaligned_be16(1, &buf[6]);
531 531
532 /* 532 /*
533 * Set MAXIMUM TRANSFER LENGTH 533 * Set MAXIMUM TRANSFER LENGTH
534 */ 534 */
535 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_sectors, &buf[8]); 535 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_sectors, &buf[8]);
536 536
537 /* 537 /*
538 * Set OPTIMAL TRANSFER LENGTH 538 * Set OPTIMAL TRANSFER LENGTH
539 */ 539 */
540 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]); 540 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]);
541 541
542 /* 542 /*
543 * Exit now if we don't support TP or the initiator sent a too 543 * Exit now if we don't support TP or the initiator sent a too
544 * short buffer. 544 * short buffer.
545 */ 545 */
546 if (!have_tp || cmd->data_length < (0x3c + 4)) 546 if (!have_tp || cmd->data_length < (0x3c + 4))
547 return 0; 547 return 0;
548 548
549 /* 549 /*
550 * Set MAXIMUM UNMAP LBA COUNT 550 * Set MAXIMUM UNMAP LBA COUNT
551 */ 551 */
552 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count, &buf[20]); 552 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count, &buf[20]);
553 553
554 /* 554 /*
555 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT 555 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
556 */ 556 */
557 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count, 557 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count,
558 &buf[24]); 558 &buf[24]);
559 559
560 /* 560 /*
561 * Set OPTIMAL UNMAP GRANULARITY 561 * Set OPTIMAL UNMAP GRANULARITY
562 */ 562 */
563 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity, &buf[28]); 563 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity, &buf[28]);
564 564
565 /* 565 /*
566 * UNMAP GRANULARITY ALIGNMENT 566 * UNMAP GRANULARITY ALIGNMENT
567 */ 567 */
568 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment, 568 put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment,
569 &buf[32]); 569 &buf[32]);
570 if (dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment != 0) 570 if (dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment != 0)
571 buf[32] |= 0x80; /* Set the UGAVALID bit */ 571 buf[32] |= 0x80; /* Set the UGAVALID bit */
572 572
573 return 0; 573 return 0;
574 } 574 }
575 575
576 /* Block Device Characteristics VPD page */ 576 /* Block Device Characteristics VPD page */
577 static int 577 static int
578 target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf) 578 target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
579 { 579 {
580 struct se_device *dev = cmd->se_dev; 580 struct se_device *dev = cmd->se_dev;
581 581
582 buf[0] = dev->transport->get_device_type(dev); 582 buf[0] = dev->transport->get_device_type(dev);
583 buf[3] = 0x3c; 583 buf[3] = 0x3c;
584 584
585 if (cmd->data_length >= 5 && 585 if (cmd->data_length >= 5 &&
586 dev->se_sub_dev->se_dev_attrib.is_nonrot) 586 dev->se_sub_dev->se_dev_attrib.is_nonrot)
587 buf[5] = 1; 587 buf[5] = 1;
588 588
589 return 0; 589 return 0;
590 } 590 }
591 591
592 /* Thin Provisioning VPD */ 592 /* Thin Provisioning VPD */
593 static int 593 static int
594 target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) 594 target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
595 { 595 {
596 struct se_device *dev = cmd->se_dev; 596 struct se_device *dev = cmd->se_dev;
597 597
598 /* 598 /*
599 * From sbc3r22 section 6.5.4 Thin Provisioning VPD page: 599 * From sbc3r22 section 6.5.4 Thin Provisioning VPD page:
600 * 600 *
601 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to 601 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
602 * zero, then the page length shall be set to 0004h. If the DP bit 602 * zero, then the page length shall be set to 0004h. If the DP bit
603 * is set to one, then the page length shall be set to the value 603 * is set to one, then the page length shall be set to the value
604 * defined in table 162. 604 * defined in table 162.
605 */ 605 */
606 buf[0] = dev->transport->get_device_type(dev); 606 buf[0] = dev->transport->get_device_type(dev);
607 607
608 /* 608 /*
609 * Set Hardcoded length mentioned above for DP=0 609 * Set Hardcoded length mentioned above for DP=0
610 */ 610 */
611 put_unaligned_be16(0x0004, &buf[2]); 611 put_unaligned_be16(0x0004, &buf[2]);
612 612
613 /* 613 /*
614 * The THRESHOLD EXPONENT field indicates the threshold set size in 614 * The THRESHOLD EXPONENT field indicates the threshold set size in
615 * LBAs as a power of 2 (i.e., the threshold set size is equal to 615 * LBAs as a power of 2 (i.e., the threshold set size is equal to
616 * 2(threshold exponent)). 616 * 2(threshold exponent)).
617 * 617 *
618 * Note that this is currently set to 0x00 as mkp says it will be 618 * Note that this is currently set to 0x00 as mkp says it will be
619 * changing again. We can enable this once it has settled in T10 619 * changing again. We can enable this once it has settled in T10
620 * and is actually used by Linux/SCSI ML code. 620 * and is actually used by Linux/SCSI ML code.
621 */ 621 */
622 buf[4] = 0x00; 622 buf[4] = 0x00;
623 623
624 /* 624 /*
625 * A TPU bit set to one indicates that the device server supports 625 * A TPU bit set to one indicates that the device server supports
626 * the UNMAP command (see 5.25). A TPU bit set to zero indicates 626 * the UNMAP command (see 5.25). A TPU bit set to zero indicates
627 * that the device server does not support the UNMAP command. 627 * that the device server does not support the UNMAP command.
628 */ 628 */
629 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu != 0) 629 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu != 0)
630 buf[5] = 0x80; 630 buf[5] = 0x80;
631 631
632 /* 632 /*
633 * A TPWS bit set to one indicates that the device server supports 633 * A TPWS bit set to one indicates that the device server supports
634 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs. 634 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
635 * A TPWS bit set to zero indicates that the device server does not 635 * A TPWS bit set to zero indicates that the device server does not
636 * support the use of the WRITE SAME (16) command to unmap LBAs. 636 * support the use of the WRITE SAME (16) command to unmap LBAs.
637 */ 637 */
638 if (dev->se_sub_dev->se_dev_attrib.emulate_tpws != 0) 638 if (dev->se_sub_dev->se_dev_attrib.emulate_tpws != 0)
639 buf[5] |= 0x40; 639 buf[5] |= 0x40;
640 640
641 return 0; 641 return 0;
642 } 642 }
643 643
644 static int 644 static int
645 target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); 645 target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
646 646
647 static struct { 647 static struct {
648 uint8_t page; 648 uint8_t page;
649 int (*emulate)(struct se_cmd *, unsigned char *); 649 int (*emulate)(struct se_cmd *, unsigned char *);
650 } evpd_handlers[] = { 650 } evpd_handlers[] = {
651 { .page = 0x00, .emulate = target_emulate_evpd_00 }, 651 { .page = 0x00, .emulate = target_emulate_evpd_00 },
652 { .page = 0x80, .emulate = target_emulate_evpd_80 }, 652 { .page = 0x80, .emulate = target_emulate_evpd_80 },
653 { .page = 0x83, .emulate = target_emulate_evpd_83 }, 653 { .page = 0x83, .emulate = target_emulate_evpd_83 },
654 { .page = 0x86, .emulate = target_emulate_evpd_86 }, 654 { .page = 0x86, .emulate = target_emulate_evpd_86 },
655 { .page = 0xb0, .emulate = target_emulate_evpd_b0 }, 655 { .page = 0xb0, .emulate = target_emulate_evpd_b0 },
656 { .page = 0xb1, .emulate = target_emulate_evpd_b1 }, 656 { .page = 0xb1, .emulate = target_emulate_evpd_b1 },
657 { .page = 0xb2, .emulate = target_emulate_evpd_b2 }, 657 { .page = 0xb2, .emulate = target_emulate_evpd_b2 },
658 }; 658 };
659 659
660 /* supported vital product data pages */ 660 /* supported vital product data pages */
661 static int 661 static int
662 target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) 662 target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
663 { 663 {
664 int p; 664 int p;
665 665
666 if (cmd->data_length < 8) 666 if (cmd->data_length < 8)
667 return 0; 667 return 0;
668 /* 668 /*
669 * Only report the INQUIRY EVPD=1 pages after a valid NAA 669 * Only report the INQUIRY EVPD=1 pages after a valid NAA
670 * Registered Extended LUN WWN has been set via ConfigFS 670 * Registered Extended LUN WWN has been set via ConfigFS
671 * during device creation/restart. 671 * during device creation/restart.
672 */ 672 */
673 if (cmd->se_dev->se_sub_dev->su_dev_flags & 673 if (cmd->se_dev->se_sub_dev->su_dev_flags &
674 SDF_EMULATED_VPD_UNIT_SERIAL) { 674 SDF_EMULATED_VPD_UNIT_SERIAL) {
675 buf[3] = ARRAY_SIZE(evpd_handlers); 675 buf[3] = ARRAY_SIZE(evpd_handlers);
676 for (p = 0; p < min_t(int, ARRAY_SIZE(evpd_handlers), 676 for (p = 0; p < min_t(int, ARRAY_SIZE(evpd_handlers),
677 cmd->data_length - 4); ++p) 677 cmd->data_length - 4); ++p)
678 buf[p + 4] = evpd_handlers[p].page; 678 buf[p + 4] = evpd_handlers[p].page;
679 } 679 }
680 680
681 return 0; 681 return 0;
682 } 682 }
683 683
684 int target_emulate_inquiry(struct se_task *task) 684 int target_emulate_inquiry(struct se_task *task)
685 { 685 {
686 struct se_cmd *cmd = task->task_se_cmd; 686 struct se_cmd *cmd = task->task_se_cmd;
687 struct se_device *dev = cmd->se_dev; 687 struct se_device *dev = cmd->se_dev;
688 unsigned char *buf; 688 unsigned char *buf;
689 unsigned char *cdb = cmd->t_task_cdb; 689 unsigned char *cdb = cmd->t_task_cdb;
690 int p, ret; 690 int p, ret;
691 691
692 if (!(cdb[1] & 0x1)) { 692 if (!(cdb[1] & 0x1)) {
693 ret = target_emulate_inquiry_std(cmd); 693 ret = target_emulate_inquiry_std(cmd);
694 goto out; 694 goto out;
695 } 695 }
696 696
697 /* 697 /*
698 * Make sure we at least have 4 bytes of INQUIRY response 698 * Make sure we at least have 4 bytes of INQUIRY response
699 * payload for 0x00 going back for EVPD=1. Note that 0x80 699 * payload for 0x00 going back for EVPD=1. Note that 0x80
700 * and 0x83 will check for enough payload data length and 700 * and 0x83 will check for enough payload data length and
701 * jump to set_len: label when there is not enough inquiry EVPD 701 * jump to set_len: label when there is not enough inquiry EVPD
702 * payload length left for the next outgoing EVPD metadata 702 * payload length left for the next outgoing EVPD metadata
703 */ 703 */
704 if (cmd->data_length < 4) { 704 if (cmd->data_length < 4) {
705 pr_err("SCSI Inquiry payload length: %u" 705 pr_err("SCSI Inquiry payload length: %u"
706 " too small for EVPD=1\n", cmd->data_length); 706 " too small for EVPD=1\n", cmd->data_length);
707 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 707 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
708 return -EINVAL; 708 return -EINVAL;
709 } 709 }
710 710
711 buf = transport_kmap_first_data_page(cmd); 711 buf = transport_kmap_first_data_page(cmd);
712 712
713 buf[0] = dev->transport->get_device_type(dev); 713 buf[0] = dev->transport->get_device_type(dev);
714 714
715 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) { 715 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) {
716 if (cdb[2] == evpd_handlers[p].page) { 716 if (cdb[2] == evpd_handlers[p].page) {
717 buf[1] = cdb[2]; 717 buf[1] = cdb[2];
718 ret = evpd_handlers[p].emulate(cmd, buf); 718 ret = evpd_handlers[p].emulate(cmd, buf);
719 goto out_unmap; 719 goto out_unmap;
720 } 720 }
721 } 721 }
722 722
723 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]); 723 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
724 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 724 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
725 ret = -EINVAL; 725 ret = -EINVAL;
726 726
727 out_unmap: 727 out_unmap:
728 transport_kunmap_first_data_page(cmd); 728 transport_kunmap_first_data_page(cmd);
729 out: 729 out:
730 if (!ret) { 730 if (!ret) {
731 task->task_scsi_status = GOOD; 731 task->task_scsi_status = GOOD;
732 transport_complete_task(task, 1); 732 transport_complete_task(task, 1);
733 } 733 }
734 return ret; 734 return ret;
735 } 735 }
736 736
737 int target_emulate_readcapacity(struct se_task *task) 737 int target_emulate_readcapacity(struct se_task *task)
738 { 738 {
739 struct se_cmd *cmd = task->task_se_cmd; 739 struct se_cmd *cmd = task->task_se_cmd;
740 struct se_device *dev = cmd->se_dev; 740 struct se_device *dev = cmd->se_dev;
741 unsigned char *buf; 741 unsigned char *buf;
742 unsigned long long blocks_long = dev->transport->get_blocks(dev); 742 unsigned long long blocks_long = dev->transport->get_blocks(dev);
743 u32 blocks; 743 u32 blocks;
744 744
745 if (blocks_long >= 0x00000000ffffffff) 745 if (blocks_long >= 0x00000000ffffffff)
746 blocks = 0xffffffff; 746 blocks = 0xffffffff;
747 else 747 else
748 blocks = (u32)blocks_long; 748 blocks = (u32)blocks_long;
749 749
750 buf = transport_kmap_first_data_page(cmd); 750 buf = transport_kmap_first_data_page(cmd);
751 751
752 buf[0] = (blocks >> 24) & 0xff; 752 buf[0] = (blocks >> 24) & 0xff;
753 buf[1] = (blocks >> 16) & 0xff; 753 buf[1] = (blocks >> 16) & 0xff;
754 buf[2] = (blocks >> 8) & 0xff; 754 buf[2] = (blocks >> 8) & 0xff;
755 buf[3] = blocks & 0xff; 755 buf[3] = blocks & 0xff;
756 buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; 756 buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
757 buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; 757 buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
758 buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; 758 buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
759 buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; 759 buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
760 /* 760 /*
761 * Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16 761 * Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16
762 */ 762 */
763 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) 763 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
764 put_unaligned_be32(0xFFFFFFFF, &buf[0]); 764 put_unaligned_be32(0xFFFFFFFF, &buf[0]);
765 765
766 transport_kunmap_first_data_page(cmd); 766 transport_kunmap_first_data_page(cmd);
767 767
768 task->task_scsi_status = GOOD; 768 task->task_scsi_status = GOOD;
769 transport_complete_task(task, 1); 769 transport_complete_task(task, 1);
770 return 0; 770 return 0;
771 } 771 }
772 772
773 int target_emulate_readcapacity_16(struct se_task *task) 773 int target_emulate_readcapacity_16(struct se_task *task)
774 { 774 {
775 struct se_cmd *cmd = task->task_se_cmd; 775 struct se_cmd *cmd = task->task_se_cmd;
776 struct se_device *dev = cmd->se_dev; 776 struct se_device *dev = cmd->se_dev;
777 unsigned char *buf; 777 unsigned char *buf;
778 unsigned long long blocks = dev->transport->get_blocks(dev); 778 unsigned long long blocks = dev->transport->get_blocks(dev);
779 779
780 buf = transport_kmap_first_data_page(cmd); 780 buf = transport_kmap_first_data_page(cmd);
781 781
782 buf[0] = (blocks >> 56) & 0xff; 782 buf[0] = (blocks >> 56) & 0xff;
783 buf[1] = (blocks >> 48) & 0xff; 783 buf[1] = (blocks >> 48) & 0xff;
784 buf[2] = (blocks >> 40) & 0xff; 784 buf[2] = (blocks >> 40) & 0xff;
785 buf[3] = (blocks >> 32) & 0xff; 785 buf[3] = (blocks >> 32) & 0xff;
786 buf[4] = (blocks >> 24) & 0xff; 786 buf[4] = (blocks >> 24) & 0xff;
787 buf[5] = (blocks >> 16) & 0xff; 787 buf[5] = (blocks >> 16) & 0xff;
788 buf[6] = (blocks >> 8) & 0xff; 788 buf[6] = (blocks >> 8) & 0xff;
789 buf[7] = blocks & 0xff; 789 buf[7] = blocks & 0xff;
790 buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; 790 buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
791 buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; 791 buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
792 buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; 792 buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
793 buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; 793 buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
794 /* 794 /*
795 * Set Thin Provisioning Enable bit following sbc3r22 in section 795 * Set Thin Provisioning Enable bit following sbc3r22 in section
796 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. 796 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
797 */ 797 */
798 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) 798 if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
799 buf[14] = 0x80; 799 buf[14] = 0x80;
800 800
801 transport_kunmap_first_data_page(cmd); 801 transport_kunmap_first_data_page(cmd);
802 802
803 task->task_scsi_status = GOOD; 803 task->task_scsi_status = GOOD;
804 transport_complete_task(task, 1); 804 transport_complete_task(task, 1);
805 return 0; 805 return 0;
806 } 806 }
807 807
808 static int 808 static int
809 target_modesense_rwrecovery(unsigned char *p) 809 target_modesense_rwrecovery(unsigned char *p)
810 { 810 {
811 p[0] = 0x01; 811 p[0] = 0x01;
812 p[1] = 0x0a; 812 p[1] = 0x0a;
813 813
814 return 12; 814 return 12;
815 } 815 }
816 816
817 static int 817 static int
818 target_modesense_control(struct se_device *dev, unsigned char *p) 818 target_modesense_control(struct se_device *dev, unsigned char *p)
819 { 819 {
820 p[0] = 0x0a; 820 p[0] = 0x0a;
821 p[1] = 0x0a; 821 p[1] = 0x0a;
822 p[2] = 2; 822 p[2] = 2;
823 /* 823 /*
824 * From spc4r23, 7.4.7 Control mode page 824 * From spc4r23, 7.4.7 Control mode page
825 * 825 *
826 * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies 826 * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies
827 * restrictions on the algorithm used for reordering commands 827 * restrictions on the algorithm used for reordering commands
828 * having the SIMPLE task attribute (see SAM-4). 828 * having the SIMPLE task attribute (see SAM-4).
829 * 829 *
830 * Table 368 -- QUEUE ALGORITHM MODIFIER field 830 * Table 368 -- QUEUE ALGORITHM MODIFIER field
831 * Code Description 831 * Code Description
832 * 0h Restricted reordering 832 * 0h Restricted reordering
833 * 1h Unrestricted reordering allowed 833 * 1h Unrestricted reordering allowed
834 * 2h to 7h Reserved 834 * 2h to 7h Reserved
835 * 8h to Fh Vendor specific 835 * 8h to Fh Vendor specific
836 * 836 *
837 * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that 837 * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that
838 * the device server shall order the processing sequence of commands 838 * the device server shall order the processing sequence of commands
839 * having the SIMPLE task attribute such that data integrity is maintained 839 * having the SIMPLE task attribute such that data integrity is maintained
840 * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol 840 * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol
841 * requests is halted at any time, the final value of all data observable 841 * requests is halted at any time, the final value of all data observable
842 * on the medium shall be the same as if all the commands had been processed 842 * on the medium shall be the same as if all the commands had been processed
843 * with the ORDERED task attribute). 843 * with the ORDERED task attribute).
844 * 844 *
845 * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the 845 * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the
846 * device server may reorder the processing sequence of commands having the 846 * device server may reorder the processing sequence of commands having the
847 * SIMPLE task attribute in any manner. Any data integrity exposures related to 847 * SIMPLE task attribute in any manner. Any data integrity exposures related to
848 * command sequence order shall be explicitly handled by the application client 848 * command sequence order shall be explicitly handled by the application client
849 * through the selection of appropriate ommands and task attributes. 849 * through the selection of appropriate ommands and task attributes.
850 */ 850 */
851 p[3] = (dev->se_sub_dev->se_dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10; 851 p[3] = (dev->se_sub_dev->se_dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
852 /* 852 /*
853 * From spc4r17, section 7.4.6 Control mode Page 853 * From spc4r17, section 7.4.6 Control mode Page
854 * 854 *
855 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b 855 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
856 * 856 *
857 * 00b: The logical unit shall clear any unit attention condition 857 * 00b: The logical unit shall clear any unit attention condition
858 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 858 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
859 * status and shall not establish a unit attention condition when a com- 859 * status and shall not establish a unit attention condition when a com-
860 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT 860 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
861 * status. 861 * status.
862 * 862 *
863 * 10b: The logical unit shall not clear any unit attention condition 863 * 10b: The logical unit shall not clear any unit attention condition
864 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 864 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
865 * status and shall not establish a unit attention condition when 865 * status and shall not establish a unit attention condition when
866 * a command is completed with BUSY, TASK SET FULL, or RESERVATION 866 * a command is completed with BUSY, TASK SET FULL, or RESERVATION
867 * CONFLICT status. 867 * CONFLICT status.
868 * 868 *
869 * 11b a The logical unit shall not clear any unit attention condition 869 * 11b a The logical unit shall not clear any unit attention condition
870 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 870 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
871 * status and shall establish a unit attention condition for the 871 * status and shall establish a unit attention condition for the
872 * initiator port associated with the I_T nexus on which the BUSY, 872 * initiator port associated with the I_T nexus on which the BUSY,
873 * TASK SET FULL, or RESERVATION CONFLICT status is being returned. 873 * TASK SET FULL, or RESERVATION CONFLICT status is being returned.
874 * Depending on the status, the additional sense code shall be set to 874 * Depending on the status, the additional sense code shall be set to
875 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS 875 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
876 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE 876 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
877 * command, a unit attention condition shall be established only once 877 * command, a unit attention condition shall be established only once
878 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless 878 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
879 * to the number of commands completed with one of those status codes. 879 * to the number of commands completed with one of those status codes.
880 */ 880 */
881 p[4] = (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 : 881 p[4] = (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
882 (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00; 882 (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
883 /* 883 /*
884 * From spc4r17, section 7.4.6 Control mode Page 884 * From spc4r17, section 7.4.6 Control mode Page
885 * 885 *
886 * Task Aborted Status (TAS) bit set to zero. 886 * Task Aborted Status (TAS) bit set to zero.
887 * 887 *
888 * A task aborted status (TAS) bit set to zero specifies that aborted 888 * A task aborted status (TAS) bit set to zero specifies that aborted
889 * tasks shall be terminated by the device server without any response 889 * tasks shall be terminated by the device server without any response
890 * to the application client. A TAS bit set to one specifies that tasks 890 * to the application client. A TAS bit set to one specifies that tasks
891 * aborted by the actions of an I_T nexus other than the I_T nexus on 891 * aborted by the actions of an I_T nexus other than the I_T nexus on
892 * which the command was received shall be completed with TASK ABORTED 892 * which the command was received shall be completed with TASK ABORTED
893 * status (see SAM-4). 893 * status (see SAM-4).
894 */ 894 */
895 p[5] = (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? 0x40 : 0x00; 895 p[5] = (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? 0x40 : 0x00;
896 p[8] = 0xff; 896 p[8] = 0xff;
897 p[9] = 0xff; 897 p[9] = 0xff;
898 p[11] = 30; 898 p[11] = 30;
899 899
900 return 12; 900 return 12;
901 } 901 }
902 902
903 static int 903 static int
904 target_modesense_caching(struct se_device *dev, unsigned char *p) 904 target_modesense_caching(struct se_device *dev, unsigned char *p)
905 { 905 {
906 p[0] = 0x08; 906 p[0] = 0x08;
907 p[1] = 0x12; 907 p[1] = 0x12;
908 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) 908 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
909 p[2] = 0x04; /* Write Cache Enable */ 909 p[2] = 0x04; /* Write Cache Enable */
910 p[12] = 0x20; /* Disabled Read Ahead */ 910 p[12] = 0x20; /* Disabled Read Ahead */
911 911
912 return 20; 912 return 20;
913 } 913 }
914 914
915 static void 915 static void
916 target_modesense_write_protect(unsigned char *buf, int type) 916 target_modesense_write_protect(unsigned char *buf, int type)
917 { 917 {
918 /* 918 /*
919 * I believe that the WP bit (bit 7) in the mode header is the same for 919 * I believe that the WP bit (bit 7) in the mode header is the same for
920 * all device types.. 920 * all device types..
921 */ 921 */
922 switch (type) { 922 switch (type) {
923 case TYPE_DISK: 923 case TYPE_DISK:
924 case TYPE_TAPE: 924 case TYPE_TAPE:
925 default: 925 default:
926 buf[0] |= 0x80; /* WP bit */ 926 buf[0] |= 0x80; /* WP bit */
927 break; 927 break;
928 } 928 }
929 } 929 }
930 930
931 static void 931 static void
932 target_modesense_dpofua(unsigned char *buf, int type) 932 target_modesense_dpofua(unsigned char *buf, int type)
933 { 933 {
934 switch (type) { 934 switch (type) {
935 case TYPE_DISK: 935 case TYPE_DISK:
936 buf[0] |= 0x10; /* DPOFUA bit */ 936 buf[0] |= 0x10; /* DPOFUA bit */
937 break; 937 break;
938 default: 938 default:
939 break; 939 break;
940 } 940 }
941 } 941 }
942 942
943 int target_emulate_modesense(struct se_task *task) 943 int target_emulate_modesense(struct se_task *task)
944 { 944 {
945 struct se_cmd *cmd = task->task_se_cmd; 945 struct se_cmd *cmd = task->task_se_cmd;
946 struct se_device *dev = cmd->se_dev; 946 struct se_device *dev = cmd->se_dev;
947 char *cdb = cmd->t_task_cdb; 947 char *cdb = cmd->t_task_cdb;
948 unsigned char *rbuf; 948 unsigned char *rbuf;
949 int type = dev->transport->get_device_type(dev); 949 int type = dev->transport->get_device_type(dev);
950 int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10); 950 int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
951 int offset = ten ? 8 : 4; 951 int offset = ten ? 8 : 4;
952 int length = 0; 952 int length = 0;
953 unsigned char buf[SE_MODE_PAGE_BUF]; 953 unsigned char buf[SE_MODE_PAGE_BUF];
954 954
955 memset(buf, 0, SE_MODE_PAGE_BUF); 955 memset(buf, 0, SE_MODE_PAGE_BUF);
956 956
957 switch (cdb[2] & 0x3f) { 957 switch (cdb[2] & 0x3f) {
958 case 0x01: 958 case 0x01:
959 length = target_modesense_rwrecovery(&buf[offset]); 959 length = target_modesense_rwrecovery(&buf[offset]);
960 break; 960 break;
961 case 0x08: 961 case 0x08:
962 length = target_modesense_caching(dev, &buf[offset]); 962 length = target_modesense_caching(dev, &buf[offset]);
963 break; 963 break;
964 case 0x0a: 964 case 0x0a:
965 length = target_modesense_control(dev, &buf[offset]); 965 length = target_modesense_control(dev, &buf[offset]);
966 break; 966 break;
967 case 0x3f: 967 case 0x3f:
968 length = target_modesense_rwrecovery(&buf[offset]); 968 length = target_modesense_rwrecovery(&buf[offset]);
969 length += target_modesense_caching(dev, &buf[offset+length]); 969 length += target_modesense_caching(dev, &buf[offset+length]);
970 length += target_modesense_control(dev, &buf[offset+length]); 970 length += target_modesense_control(dev, &buf[offset+length]);
971 break; 971 break;
972 default: 972 default:
973 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", 973 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
974 cdb[2] & 0x3f, cdb[3]); 974 cdb[2] & 0x3f, cdb[3]);
975 cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE; 975 cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
976 return -EINVAL; 976 return -EINVAL;
977 } 977 }
978 offset += length; 978 offset += length;
979 979
980 if (ten) { 980 if (ten) {
981 offset -= 2; 981 offset -= 2;
982 buf[0] = (offset >> 8) & 0xff; 982 buf[0] = (offset >> 8) & 0xff;
983 buf[1] = offset & 0xff; 983 buf[1] = offset & 0xff;
984 984
985 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || 985 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
986 (cmd->se_deve && 986 (cmd->se_deve &&
987 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) 987 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
988 target_modesense_write_protect(&buf[3], type); 988 target_modesense_write_protect(&buf[3], type);
989 989
990 if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && 990 if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
991 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) 991 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
992 target_modesense_dpofua(&buf[3], type); 992 target_modesense_dpofua(&buf[3], type);
993 993
994 if ((offset + 2) > cmd->data_length) 994 if ((offset + 2) > cmd->data_length)
995 offset = cmd->data_length; 995 offset = cmd->data_length;
996 996
997 } else { 997 } else {
998 offset -= 1; 998 offset -= 1;
999 buf[0] = offset & 0xff; 999 buf[0] = offset & 0xff;
1000 1000
1001 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || 1001 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
1002 (cmd->se_deve && 1002 (cmd->se_deve &&
1003 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) 1003 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
1004 target_modesense_write_protect(&buf[2], type); 1004 target_modesense_write_protect(&buf[2], type);
1005 1005
1006 if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && 1006 if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
1007 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) 1007 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
1008 target_modesense_dpofua(&buf[2], type); 1008 target_modesense_dpofua(&buf[2], type);
1009 1009
1010 if ((offset + 1) > cmd->data_length) 1010 if ((offset + 1) > cmd->data_length)
1011 offset = cmd->data_length; 1011 offset = cmd->data_length;
1012 } 1012 }
1013 1013
1014 rbuf = transport_kmap_first_data_page(cmd); 1014 rbuf = transport_kmap_first_data_page(cmd);
1015 memcpy(rbuf, buf, offset); 1015 memcpy(rbuf, buf, offset);
1016 transport_kunmap_first_data_page(cmd); 1016 transport_kunmap_first_data_page(cmd);
1017 1017
1018 task->task_scsi_status = GOOD; 1018 task->task_scsi_status = GOOD;
1019 transport_complete_task(task, 1); 1019 transport_complete_task(task, 1);
1020 return 0; 1020 return 0;
1021 } 1021 }
1022 1022
1023 int target_emulate_request_sense(struct se_task *task) 1023 int target_emulate_request_sense(struct se_task *task)
1024 { 1024 {
1025 struct se_cmd *cmd = task->task_se_cmd; 1025 struct se_cmd *cmd = task->task_se_cmd;
1026 unsigned char *cdb = cmd->t_task_cdb; 1026 unsigned char *cdb = cmd->t_task_cdb;
1027 unsigned char *buf; 1027 unsigned char *buf;
1028 u8 ua_asc = 0, ua_ascq = 0; 1028 u8 ua_asc = 0, ua_ascq = 0;
1029 int err = 0; 1029 int err = 0;
1030 1030
1031 if (cdb[1] & 0x01) { 1031 if (cdb[1] & 0x01) {
1032 pr_err("REQUEST_SENSE description emulation not" 1032 pr_err("REQUEST_SENSE description emulation not"
1033 " supported\n"); 1033 " supported\n");
1034 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 1034 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1035 return -ENOSYS; 1035 return -ENOSYS;
1036 } 1036 }
1037 1037
1038 buf = transport_kmap_first_data_page(cmd); 1038 buf = transport_kmap_first_data_page(cmd);
1039 1039
1040 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) { 1040 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
1041 /* 1041 /*
1042 * CURRENT ERROR, UNIT ATTENTION 1042 * CURRENT ERROR, UNIT ATTENTION
1043 */ 1043 */
1044 buf[0] = 0x70; 1044 buf[0] = 0x70;
1045 buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; 1045 buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
1046 /* 1046 /*
1047 * Make sure request data length is enough for additional 1047 * Make sure request data length is enough for additional
1048 * sense data. 1048 * sense data.
1049 */ 1049 */
1050 if (cmd->data_length <= 18) { 1050 if (cmd->data_length <= 18) {
1051 buf[7] = 0x00; 1051 buf[7] = 0x00;
1052 err = -EINVAL; 1052 err = -EINVAL;
1053 goto end; 1053 goto end;
1054 } 1054 }
1055 /* 1055 /*
1056 * The Additional Sense Code (ASC) from the UNIT ATTENTION 1056 * The Additional Sense Code (ASC) from the UNIT ATTENTION
1057 */ 1057 */
1058 buf[SPC_ASC_KEY_OFFSET] = ua_asc; 1058 buf[SPC_ASC_KEY_OFFSET] = ua_asc;
1059 buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq; 1059 buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq;
1060 buf[7] = 0x0A; 1060 buf[7] = 0x0A;
1061 } else { 1061 } else {
1062 /* 1062 /*
1063 * CURRENT ERROR, NO SENSE 1063 * CURRENT ERROR, NO SENSE
1064 */ 1064 */
1065 buf[0] = 0x70; 1065 buf[0] = 0x70;
1066 buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE; 1066 buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
1067 /* 1067 /*
1068 * Make sure request data length is enough for additional 1068 * Make sure request data length is enough for additional
1069 * sense data. 1069 * sense data.
1070 */ 1070 */
1071 if (cmd->data_length <= 18) { 1071 if (cmd->data_length <= 18) {
1072 buf[7] = 0x00; 1072 buf[7] = 0x00;
1073 err = -EINVAL; 1073 err = -EINVAL;
1074 goto end; 1074 goto end;
1075 } 1075 }
1076 /* 1076 /*
1077 * NO ADDITIONAL SENSE INFORMATION 1077 * NO ADDITIONAL SENSE INFORMATION
1078 */ 1078 */
1079 buf[SPC_ASC_KEY_OFFSET] = 0x00; 1079 buf[SPC_ASC_KEY_OFFSET] = 0x00;
1080 buf[7] = 0x0A; 1080 buf[7] = 0x0A;
1081 } 1081 }
1082 1082
1083 end: 1083 end:
1084 transport_kunmap_first_data_page(cmd); 1084 transport_kunmap_first_data_page(cmd);
1085 task->task_scsi_status = GOOD; 1085 task->task_scsi_status = GOOD;
1086 transport_complete_task(task, 1); 1086 transport_complete_task(task, 1);
1087 return 0; 1087 return 0;
1088 } 1088 }
1089 1089
1090 /* 1090 /*
1091 * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support. 1091 * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
1092 * Note this is not used for TCM/pSCSI passthrough 1092 * Note this is not used for TCM/pSCSI passthrough
1093 */ 1093 */
1094 int target_emulate_unmap(struct se_task *task) 1094 int target_emulate_unmap(struct se_task *task)
1095 { 1095 {
1096 struct se_cmd *cmd = task->task_se_cmd; 1096 struct se_cmd *cmd = task->task_se_cmd;
1097 struct se_device *dev = cmd->se_dev; 1097 struct se_device *dev = cmd->se_dev;
1098 unsigned char *buf, *ptr = NULL; 1098 unsigned char *buf, *ptr = NULL;
1099 unsigned char *cdb = &cmd->t_task_cdb[0]; 1099 unsigned char *cdb = &cmd->t_task_cdb[0];
1100 sector_t lba; 1100 sector_t lba;
1101 unsigned int size = cmd->data_length, range; 1101 unsigned int size = cmd->data_length, range;
1102 int ret = 0, offset; 1102 int ret = 0, offset;
1103 unsigned short dl, bd_dl; 1103 unsigned short dl, bd_dl;
1104 1104
1105 if (!dev->transport->do_discard) { 1105 if (!dev->transport->do_discard) {
1106 pr_err("UNMAP emulation not supported for: %s\n", 1106 pr_err("UNMAP emulation not supported for: %s\n",
1107 dev->transport->name); 1107 dev->transport->name);
1108 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1108 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1109 return -ENOSYS; 1109 return -ENOSYS;
1110 } 1110 }
1111 1111
1112 /* First UNMAP block descriptor starts at 8 byte offset */ 1112 /* First UNMAP block descriptor starts at 8 byte offset */
1113 offset = 8; 1113 offset = 8;
1114 size -= 8; 1114 size -= 8;
1115 dl = get_unaligned_be16(&cdb[0]); 1115 dl = get_unaligned_be16(&cdb[0]);
1116 bd_dl = get_unaligned_be16(&cdb[2]); 1116 bd_dl = get_unaligned_be16(&cdb[2]);
1117 1117
1118 buf = transport_kmap_first_data_page(cmd); 1118 buf = transport_kmap_first_data_page(cmd);
1119 1119
1120 ptr = &buf[offset]; 1120 ptr = &buf[offset];
1121 pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu" 1121 pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
1122 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); 1122 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
1123 1123
1124 while (size) { 1124 while (size) {
1125 lba = get_unaligned_be64(&ptr[0]); 1125 lba = get_unaligned_be64(&ptr[0]);
1126 range = get_unaligned_be32(&ptr[8]); 1126 range = get_unaligned_be32(&ptr[8]);
1127 pr_debug("UNMAP: Using lba: %llu and range: %u\n", 1127 pr_debug("UNMAP: Using lba: %llu and range: %u\n",
1128 (unsigned long long)lba, range); 1128 (unsigned long long)lba, range);
1129 1129
1130 ret = dev->transport->do_discard(dev, lba, range); 1130 ret = dev->transport->do_discard(dev, lba, range);
1131 if (ret < 0) { 1131 if (ret < 0) {
1132 pr_err("blkdev_issue_discard() failed: %d\n", 1132 pr_err("blkdev_issue_discard() failed: %d\n",
1133 ret); 1133 ret);
1134 goto err; 1134 goto err;
1135 } 1135 }
1136 1136
1137 ptr += 16; 1137 ptr += 16;
1138 size -= 16; 1138 size -= 16;
1139 } 1139 }
1140 1140
1141 err: 1141 err:
1142 transport_kunmap_first_data_page(cmd); 1142 transport_kunmap_first_data_page(cmd);
1143 if (!ret) { 1143 if (!ret) {
1144 task->task_scsi_status = GOOD; 1144 task->task_scsi_status = GOOD;
1145 transport_complete_task(task, 1); 1145 transport_complete_task(task, 1);
1146 } 1146 }
1147 return ret; 1147 return ret;
1148 } 1148 }
1149 1149
1150 /* 1150 /*
1151 * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support. 1151 * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
1152 * Note this is not used for TCM/pSCSI passthrough 1152 * Note this is not used for TCM/pSCSI passthrough
1153 */ 1153 */
1154 int target_emulate_write_same(struct se_task *task) 1154 int target_emulate_write_same(struct se_task *task)
1155 { 1155 {
1156 struct se_cmd *cmd = task->task_se_cmd; 1156 struct se_cmd *cmd = task->task_se_cmd;
1157 struct se_device *dev = cmd->se_dev; 1157 struct se_device *dev = cmd->se_dev;
1158 sector_t range; 1158 sector_t range;
1159 sector_t lba = cmd->t_task_lba; 1159 sector_t lba = cmd->t_task_lba;
1160 u32 num_blocks; 1160 u32 num_blocks;
1161 int ret; 1161 int ret;
1162 1162
1163 if (!dev->transport->do_discard) { 1163 if (!dev->transport->do_discard) {
1164 pr_err("WRITE_SAME emulation not supported" 1164 pr_err("WRITE_SAME emulation not supported"
1165 " for: %s\n", dev->transport->name); 1165 " for: %s\n", dev->transport->name);
1166 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1166 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1167 return -ENOSYS; 1167 return -ENOSYS;
1168 } 1168 }
1169 1169
1170 if (cmd->t_task_cdb[0] == WRITE_SAME) 1170 if (cmd->t_task_cdb[0] == WRITE_SAME)
1171 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]); 1171 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
1172 else if (cmd->t_task_cdb[0] == WRITE_SAME_16) 1172 else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
1173 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); 1173 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
1174 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */ 1174 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
1175 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); 1175 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
1176 1176
1177 /* 1177 /*
1178 * Use the explicit range when non zero is supplied, otherwise calculate 1178 * Use the explicit range when non zero is supplied, otherwise calculate
1179 * the remaining range based on ->get_blocks() - starting LBA. 1179 * the remaining range based on ->get_blocks() - starting LBA.
1180 */ 1180 */
1181 if (num_blocks != 0) 1181 if (num_blocks != 0)
1182 range = num_blocks; 1182 range = num_blocks;
1183 else 1183 else
1184 range = (dev->transport->get_blocks(dev) - lba); 1184 range = (dev->transport->get_blocks(dev) - lba);
1185 1185
1186 pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n", 1186 pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
1187 (unsigned long long)lba, (unsigned long long)range); 1187 (unsigned long long)lba, (unsigned long long)range);
1188 1188
1189 ret = dev->transport->do_discard(dev, lba, range); 1189 ret = dev->transport->do_discard(dev, lba, range);
1190 if (ret < 0) { 1190 if (ret < 0) {
1191 pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n"); 1191 pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
1192 return ret; 1192 return ret;
1193 } 1193 }
1194 1194
1195 task->task_scsi_status = GOOD; 1195 task->task_scsi_status = GOOD;
1196 transport_complete_task(task, 1); 1196 transport_complete_task(task, 1);
1197 return 0; 1197 return 0;
1198 } 1198 }
1199 1199
1200 int target_emulate_synchronize_cache(struct se_task *task) 1200 int target_emulate_synchronize_cache(struct se_task *task)
1201 { 1201 {
1202 struct se_device *dev = task->task_se_cmd->se_dev; 1202 struct se_device *dev = task->task_se_cmd->se_dev;
1203 struct se_cmd *cmd = task->task_se_cmd; 1203 struct se_cmd *cmd = task->task_se_cmd;
1204 1204
1205 if (!dev->transport->do_sync_cache) { 1205 if (!dev->transport->do_sync_cache) {
1206 pr_err("SYNCHRONIZE_CACHE emulation not supported" 1206 pr_err("SYNCHRONIZE_CACHE emulation not supported"
1207 " for: %s\n", dev->transport->name); 1207 " for: %s\n", dev->transport->name);
1208 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1208 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1209 return -ENOSYS; 1209 return -ENOSYS;
1210 } 1210 }
1211 1211
1212 dev->transport->do_sync_cache(task); 1212 dev->transport->do_sync_cache(task);
1213 return 0; 1213 return 0;
1214 } 1214 }
1215 1215
1216 int target_emulate_noop(struct se_task *task) 1216 int target_emulate_noop(struct se_task *task)
1217 { 1217 {
1218 task->task_scsi_status = GOOD; 1218 task->task_scsi_status = GOOD;
1219 transport_complete_task(task, 1); 1219 transport_complete_task(task, 1);
1220 return 0; 1220 return 0;
1221 } 1221 }
1222 1222
1223 /* 1223 /*
1224 * Write a CDB into @cdb that is based on the one the intiator sent us, 1224 * Write a CDB into @cdb that is based on the one the intiator sent us,
1225 * but updated to only cover the sectors that the current task handles. 1225 * but updated to only cover the sectors that the current task handles.
1226 */ 1226 */
1227 void target_get_task_cdb(struct se_task *task, unsigned char *cdb) 1227 void target_get_task_cdb(struct se_task *task, unsigned char *cdb)
1228 { 1228 {
1229 struct se_cmd *cmd = task->task_se_cmd; 1229 struct se_cmd *cmd = task->task_se_cmd;
1230 unsigned int cdb_len = scsi_command_size(cmd->t_task_cdb); 1230 unsigned int cdb_len = scsi_command_size(cmd->t_task_cdb);
1231 1231
1232 memcpy(cdb, cmd->t_task_cdb, cdb_len); 1232 memcpy(cdb, cmd->t_task_cdb, cdb_len);
1233 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { 1233 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
1234 unsigned long long lba = task->task_lba; 1234 unsigned long long lba = task->task_lba;
1235 u32 sectors = task->task_sectors; 1235 u32 sectors = task->task_sectors;
1236 1236
1237 switch (cdb_len) { 1237 switch (cdb_len) {
1238 case 6: 1238 case 6:
1239 /* 21-bit LBA and 8-bit sectors */ 1239 /* 21-bit LBA and 8-bit sectors */
1240 cdb[1] = (lba >> 16) & 0x1f; 1240 cdb[1] = (lba >> 16) & 0x1f;
1241 cdb[2] = (lba >> 8) & 0xff; 1241 cdb[2] = (lba >> 8) & 0xff;
1242 cdb[3] = lba & 0xff; 1242 cdb[3] = lba & 0xff;
1243 cdb[4] = sectors & 0xff; 1243 cdb[4] = sectors & 0xff;
1244 break; 1244 break;
1245 case 10: 1245 case 10:
1246 /* 32-bit LBA and 16-bit sectors */ 1246 /* 32-bit LBA and 16-bit sectors */
1247 put_unaligned_be32(lba, &cdb[2]); 1247 put_unaligned_be32(lba, &cdb[2]);
1248 put_unaligned_be16(sectors, &cdb[7]); 1248 put_unaligned_be16(sectors, &cdb[7]);
1249 break; 1249 break;
1250 case 12: 1250 case 12:
1251 /* 32-bit LBA and 32-bit sectors */ 1251 /* 32-bit LBA and 32-bit sectors */
1252 put_unaligned_be32(lba, &cdb[2]); 1252 put_unaligned_be32(lba, &cdb[2]);
1253 put_unaligned_be32(sectors, &cdb[6]); 1253 put_unaligned_be32(sectors, &cdb[6]);
1254 break; 1254 break;
1255 case 16: 1255 case 16:
1256 /* 64-bit LBA and 32-bit sectors */ 1256 /* 64-bit LBA and 32-bit sectors */
1257 put_unaligned_be64(lba, &cdb[2]); 1257 put_unaligned_be64(lba, &cdb[2]);
1258 put_unaligned_be32(sectors, &cdb[10]); 1258 put_unaligned_be32(sectors, &cdb[10]);
1259 break; 1259 break;
1260 case 32: 1260 case 32:
1261 /* 64-bit LBA and 32-bit sectors, extended CDB */ 1261 /* 64-bit LBA and 32-bit sectors, extended CDB */
1262 put_unaligned_be64(lba, &cdb[12]); 1262 put_unaligned_be64(lba, &cdb[12]);
1263 put_unaligned_be32(sectors, &cdb[28]); 1263 put_unaligned_be32(sectors, &cdb[28]);
1264 break; 1264 break;
1265 default: 1265 default:
1266 BUG(); 1266 BUG();
1267 } 1267 }
1268 } 1268 }
1269 } 1269 }
1270 EXPORT_SYMBOL(target_get_task_cdb); 1270 EXPORT_SYMBOL(target_get_task_cdb);
1271 1271
drivers/target/target_core_configfs.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * Filename: target_core_configfs.c 2 * Filename: target_core_configfs.c
3 * 3 *
4 * This file contains ConfigFS logic for the Generic Target Engine project. 4 * This file contains ConfigFS logic for the Generic Target Engine project.
5 * 5 *
6 * Copyright (c) 2008-2011 Rising Tide Systems 6 * Copyright (c) 2008-2011 Rising Tide Systems
7 * Copyright (c) 2008-2011 Linux-iSCSI.org 7 * Copyright (c) 2008-2011 Linux-iSCSI.org
8 * 8 *
9 * Nicholas A. Bellinger <nab@kernel.org> 9 * Nicholas A. Bellinger <nab@kernel.org>
10 * 10 *
11 * based on configfs Copyright (C) 2005 Oracle. All rights reserved. 11 * based on configfs Copyright (C) 2005 Oracle. All rights reserved.
12 * 12 *
13 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by 14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or 15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version. 16 * (at your option) any later version.
17 * 17 *
18 * This program is distributed in the hope that it will be useful, 18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details. 21 * GNU General Public License for more details.
22 ****************************************************************************/ 22 ****************************************************************************/
23 23
24 #include <linux/module.h> 24 #include <linux/module.h>
25 #include <linux/moduleparam.h> 25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h> 26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h> 27 #include <linux/utsname.h>
28 #include <linux/init.h> 28 #include <linux/init.h>
29 #include <linux/fs.h> 29 #include <linux/fs.h>
30 #include <linux/namei.h> 30 #include <linux/namei.h>
31 #include <linux/slab.h> 31 #include <linux/slab.h>
32 #include <linux/types.h> 32 #include <linux/types.h>
33 #include <linux/delay.h> 33 #include <linux/delay.h>
34 #include <linux/unistd.h> 34 #include <linux/unistd.h>
35 #include <linux/string.h> 35 #include <linux/string.h>
36 #include <linux/parser.h> 36 #include <linux/parser.h>
37 #include <linux/syscalls.h> 37 #include <linux/syscalls.h>
38 #include <linux/configfs.h> 38 #include <linux/configfs.h>
39 #include <linux/spinlock.h> 39 #include <linux/spinlock.h>
40 40
41 #include <target/target_core_base.h> 41 #include <target/target_core_base.h>
42 #include <target/target_core_device.h> 42 #include <target/target_core_backend.h>
43 #include <target/target_core_transport.h> 43 #include <target/target_core_fabric.h>
44 #include <target/target_core_fabric_ops.h>
45 #include <target/target_core_fabric_configfs.h> 44 #include <target/target_core_fabric_configfs.h>
46 #include <target/target_core_configfs.h> 45 #include <target/target_core_configfs.h>
47 #include <target/configfs_macros.h> 46 #include <target/configfs_macros.h>
48 47
49 #include "target_core_internal.h" 48 #include "target_core_internal.h"
50 #include "target_core_alua.h" 49 #include "target_core_alua.h"
51 #include "target_core_pr.h" 50 #include "target_core_pr.h"
52 #include "target_core_rd.h" 51 #include "target_core_rd.h"
53 52
54 extern struct t10_alua_lu_gp *default_lu_gp; 53 extern struct t10_alua_lu_gp *default_lu_gp;
55 54
56 static struct list_head g_tf_list; 55 static struct list_head g_tf_list;
57 static struct mutex g_tf_lock; 56 static struct mutex g_tf_lock;
58 57
59 struct target_core_configfs_attribute { 58 struct target_core_configfs_attribute {
60 struct configfs_attribute attr; 59 struct configfs_attribute attr;
61 ssize_t (*show)(void *, char *); 60 ssize_t (*show)(void *, char *);
62 ssize_t (*store)(void *, const char *, size_t); 61 ssize_t (*store)(void *, const char *, size_t);
63 }; 62 };
64 63
65 static struct config_group target_core_hbagroup; 64 static struct config_group target_core_hbagroup;
66 static struct config_group alua_group; 65 static struct config_group alua_group;
67 static struct config_group alua_lu_gps_group; 66 static struct config_group alua_lu_gps_group;
68 67
69 static inline struct se_hba * 68 static inline struct se_hba *
70 item_to_hba(struct config_item *item) 69 item_to_hba(struct config_item *item)
71 { 70 {
72 return container_of(to_config_group(item), struct se_hba, hba_group); 71 return container_of(to_config_group(item), struct se_hba, hba_group);
73 } 72 }
74 73
75 /* 74 /*
76 * Attributes for /sys/kernel/config/target/ 75 * Attributes for /sys/kernel/config/target/
77 */ 76 */
78 static ssize_t target_core_attr_show(struct config_item *item, 77 static ssize_t target_core_attr_show(struct config_item *item,
79 struct configfs_attribute *attr, 78 struct configfs_attribute *attr,
80 char *page) 79 char *page)
81 { 80 {
82 return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s" 81 return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
83 " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_CONFIGFS_VERSION, 82 " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_CONFIGFS_VERSION,
84 utsname()->sysname, utsname()->machine); 83 utsname()->sysname, utsname()->machine);
85 } 84 }
86 85
87 static struct configfs_item_operations target_core_fabric_item_ops = { 86 static struct configfs_item_operations target_core_fabric_item_ops = {
88 .show_attribute = target_core_attr_show, 87 .show_attribute = target_core_attr_show,
89 }; 88 };
90 89
91 static struct configfs_attribute target_core_item_attr_version = { 90 static struct configfs_attribute target_core_item_attr_version = {
92 .ca_owner = THIS_MODULE, 91 .ca_owner = THIS_MODULE,
93 .ca_name = "version", 92 .ca_name = "version",
94 .ca_mode = S_IRUGO, 93 .ca_mode = S_IRUGO,
95 }; 94 };
96 95
97 static struct target_fabric_configfs *target_core_get_fabric( 96 static struct target_fabric_configfs *target_core_get_fabric(
98 const char *name) 97 const char *name)
99 { 98 {
100 struct target_fabric_configfs *tf; 99 struct target_fabric_configfs *tf;
101 100
102 if (!name) 101 if (!name)
103 return NULL; 102 return NULL;
104 103
105 mutex_lock(&g_tf_lock); 104 mutex_lock(&g_tf_lock);
106 list_for_each_entry(tf, &g_tf_list, tf_list) { 105 list_for_each_entry(tf, &g_tf_list, tf_list) {
107 if (!strcmp(tf->tf_name, name)) { 106 if (!strcmp(tf->tf_name, name)) {
108 atomic_inc(&tf->tf_access_cnt); 107 atomic_inc(&tf->tf_access_cnt);
109 mutex_unlock(&g_tf_lock); 108 mutex_unlock(&g_tf_lock);
110 return tf; 109 return tf;
111 } 110 }
112 } 111 }
113 mutex_unlock(&g_tf_lock); 112 mutex_unlock(&g_tf_lock);
114 113
115 return NULL; 114 return NULL;
116 } 115 }
117 116
118 /* 117 /*
119 * Called from struct target_core_group_ops->make_group() 118 * Called from struct target_core_group_ops->make_group()
120 */ 119 */
121 static struct config_group *target_core_register_fabric( 120 static struct config_group *target_core_register_fabric(
122 struct config_group *group, 121 struct config_group *group,
123 const char *name) 122 const char *name)
124 { 123 {
125 struct target_fabric_configfs *tf; 124 struct target_fabric_configfs *tf;
126 int ret; 125 int ret;
127 126
128 pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:" 127 pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
129 " %s\n", group, name); 128 " %s\n", group, name);
130 /* 129 /*
131 * Below are some hardcoded request_module() calls to automatically 130 * Below are some hardcoded request_module() calls to automatically
132 * local fabric modules when the following is called: 131 * local fabric modules when the following is called:
133 * 132 *
134 * mkdir -p /sys/kernel/config/target/$MODULE_NAME 133 * mkdir -p /sys/kernel/config/target/$MODULE_NAME
135 * 134 *
136 * Note that this does not limit which TCM fabric module can be 135 * Note that this does not limit which TCM fabric module can be
137 * registered, but simply provids auto loading logic for modules with 136 * registered, but simply provids auto loading logic for modules with
138 * mkdir(2) system calls with known TCM fabric modules. 137 * mkdir(2) system calls with known TCM fabric modules.
139 */ 138 */
140 if (!strncmp(name, "iscsi", 5)) { 139 if (!strncmp(name, "iscsi", 5)) {
141 /* 140 /*
142 * Automatically load the LIO Target fabric module when the 141 * Automatically load the LIO Target fabric module when the
143 * following is called: 142 * following is called:
144 * 143 *
145 * mkdir -p $CONFIGFS/target/iscsi 144 * mkdir -p $CONFIGFS/target/iscsi
146 */ 145 */
147 ret = request_module("iscsi_target_mod"); 146 ret = request_module("iscsi_target_mod");
148 if (ret < 0) { 147 if (ret < 0) {
149 pr_err("request_module() failed for" 148 pr_err("request_module() failed for"
150 " iscsi_target_mod.ko: %d\n", ret); 149 " iscsi_target_mod.ko: %d\n", ret);
151 return ERR_PTR(-EINVAL); 150 return ERR_PTR(-EINVAL);
152 } 151 }
153 } else if (!strncmp(name, "loopback", 8)) { 152 } else if (!strncmp(name, "loopback", 8)) {
154 /* 153 /*
155 * Automatically load the tcm_loop fabric module when the 154 * Automatically load the tcm_loop fabric module when the
156 * following is called: 155 * following is called:
157 * 156 *
158 * mkdir -p $CONFIGFS/target/loopback 157 * mkdir -p $CONFIGFS/target/loopback
159 */ 158 */
160 ret = request_module("tcm_loop"); 159 ret = request_module("tcm_loop");
161 if (ret < 0) { 160 if (ret < 0) {
162 pr_err("request_module() failed for" 161 pr_err("request_module() failed for"
163 " tcm_loop.ko: %d\n", ret); 162 " tcm_loop.ko: %d\n", ret);
164 return ERR_PTR(-EINVAL); 163 return ERR_PTR(-EINVAL);
165 } 164 }
166 } 165 }
167 166
168 tf = target_core_get_fabric(name); 167 tf = target_core_get_fabric(name);
169 if (!tf) { 168 if (!tf) {
170 pr_err("target_core_get_fabric() failed for %s\n", 169 pr_err("target_core_get_fabric() failed for %s\n",
171 name); 170 name);
172 return ERR_PTR(-EINVAL); 171 return ERR_PTR(-EINVAL);
173 } 172 }
174 pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" 173 pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
175 " %s\n", tf->tf_name); 174 " %s\n", tf->tf_name);
176 /* 175 /*
177 * On a successful target_core_get_fabric() look, the returned 176 * On a successful target_core_get_fabric() look, the returned
178 * struct target_fabric_configfs *tf will contain a usage reference. 177 * struct target_fabric_configfs *tf will contain a usage reference.
179 */ 178 */
180 pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", 179 pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
181 &TF_CIT_TMPL(tf)->tfc_wwn_cit); 180 &TF_CIT_TMPL(tf)->tfc_wwn_cit);
182 181
183 tf->tf_group.default_groups = tf->tf_default_groups; 182 tf->tf_group.default_groups = tf->tf_default_groups;
184 tf->tf_group.default_groups[0] = &tf->tf_disc_group; 183 tf->tf_group.default_groups[0] = &tf->tf_disc_group;
185 tf->tf_group.default_groups[1] = NULL; 184 tf->tf_group.default_groups[1] = NULL;
186 185
187 config_group_init_type_name(&tf->tf_group, name, 186 config_group_init_type_name(&tf->tf_group, name,
188 &TF_CIT_TMPL(tf)->tfc_wwn_cit); 187 &TF_CIT_TMPL(tf)->tfc_wwn_cit);
189 config_group_init_type_name(&tf->tf_disc_group, "discovery_auth", 188 config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
190 &TF_CIT_TMPL(tf)->tfc_discovery_cit); 189 &TF_CIT_TMPL(tf)->tfc_discovery_cit);
191 190
192 pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" 191 pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
193 " %s\n", tf->tf_group.cg_item.ci_name); 192 " %s\n", tf->tf_group.cg_item.ci_name);
194 /* 193 /*
195 * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item() 194 * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item()
196 */ 195 */
197 tf->tf_ops.tf_subsys = tf->tf_subsys; 196 tf->tf_ops.tf_subsys = tf->tf_subsys;
198 tf->tf_fabric = &tf->tf_group.cg_item; 197 tf->tf_fabric = &tf->tf_group.cg_item;
199 pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric" 198 pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
200 " for %s\n", name); 199 " for %s\n", name);
201 200
202 return &tf->tf_group; 201 return &tf->tf_group;
203 } 202 }
204 203
205 /* 204 /*
206 * Called from struct target_core_group_ops->drop_item() 205 * Called from struct target_core_group_ops->drop_item()
207 */ 206 */
208 static void target_core_deregister_fabric( 207 static void target_core_deregister_fabric(
209 struct config_group *group, 208 struct config_group *group,
210 struct config_item *item) 209 struct config_item *item)
211 { 210 {
212 struct target_fabric_configfs *tf = container_of( 211 struct target_fabric_configfs *tf = container_of(
213 to_config_group(item), struct target_fabric_configfs, tf_group); 212 to_config_group(item), struct target_fabric_configfs, tf_group);
214 struct config_group *tf_group; 213 struct config_group *tf_group;
215 struct config_item *df_item; 214 struct config_item *df_item;
216 int i; 215 int i;
217 216
218 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in" 217 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
219 " tf list\n", config_item_name(item)); 218 " tf list\n", config_item_name(item));
220 219
221 pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:" 220 pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
222 " %s\n", tf->tf_name); 221 " %s\n", tf->tf_name);
223 atomic_dec(&tf->tf_access_cnt); 222 atomic_dec(&tf->tf_access_cnt);
224 223
225 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing" 224 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing"
226 " tf->tf_fabric for %s\n", tf->tf_name); 225 " tf->tf_fabric for %s\n", tf->tf_name);
227 tf->tf_fabric = NULL; 226 tf->tf_fabric = NULL;
228 227
229 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci" 228 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
230 " %s\n", config_item_name(item)); 229 " %s\n", config_item_name(item));
231 230
232 tf_group = &tf->tf_group; 231 tf_group = &tf->tf_group;
233 for (i = 0; tf_group->default_groups[i]; i++) { 232 for (i = 0; tf_group->default_groups[i]; i++) {
234 df_item = &tf_group->default_groups[i]->cg_item; 233 df_item = &tf_group->default_groups[i]->cg_item;
235 tf_group->default_groups[i] = NULL; 234 tf_group->default_groups[i] = NULL;
236 config_item_put(df_item); 235 config_item_put(df_item);
237 } 236 }
238 config_item_put(item); 237 config_item_put(item);
239 } 238 }
240 239
241 static struct configfs_group_operations target_core_fabric_group_ops = { 240 static struct configfs_group_operations target_core_fabric_group_ops = {
242 .make_group = &target_core_register_fabric, 241 .make_group = &target_core_register_fabric,
243 .drop_item = &target_core_deregister_fabric, 242 .drop_item = &target_core_deregister_fabric,
244 }; 243 };
245 244
246 /* 245 /*
247 * All item attributes appearing in /sys/kernel/target/ appear here. 246 * All item attributes appearing in /sys/kernel/target/ appear here.
248 */ 247 */
249 static struct configfs_attribute *target_core_fabric_item_attrs[] = { 248 static struct configfs_attribute *target_core_fabric_item_attrs[] = {
250 &target_core_item_attr_version, 249 &target_core_item_attr_version,
251 NULL, 250 NULL,
252 }; 251 };
253 252
254 /* 253 /*
255 * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/ 254 * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
256 */ 255 */
257 static struct config_item_type target_core_fabrics_item = { 256 static struct config_item_type target_core_fabrics_item = {
258 .ct_item_ops = &target_core_fabric_item_ops, 257 .ct_item_ops = &target_core_fabric_item_ops,
259 .ct_group_ops = &target_core_fabric_group_ops, 258 .ct_group_ops = &target_core_fabric_group_ops,
260 .ct_attrs = target_core_fabric_item_attrs, 259 .ct_attrs = target_core_fabric_item_attrs,
261 .ct_owner = THIS_MODULE, 260 .ct_owner = THIS_MODULE,
262 }; 261 };
263 262
264 static struct configfs_subsystem target_core_fabrics = { 263 static struct configfs_subsystem target_core_fabrics = {
265 .su_group = { 264 .su_group = {
266 .cg_item = { 265 .cg_item = {
267 .ci_namebuf = "target", 266 .ci_namebuf = "target",
268 .ci_type = &target_core_fabrics_item, 267 .ci_type = &target_core_fabrics_item,
269 }, 268 },
270 }, 269 },
271 }; 270 };
272 271
273 static struct configfs_subsystem *target_core_subsystem[] = { 272 static struct configfs_subsystem *target_core_subsystem[] = {
274 &target_core_fabrics, 273 &target_core_fabrics,
275 NULL, 274 NULL,
276 }; 275 };
277 276
278 /*############################################################################## 277 /*##############################################################################
279 // Start functions called by external Target Fabrics Modules 278 // Start functions called by external Target Fabrics Modules
280 //############################################################################*/ 279 //############################################################################*/
281 280
282 /* 281 /*
283 * First function called by fabric modules to: 282 * First function called by fabric modules to:
284 * 283 *
285 * 1) Allocate a struct target_fabric_configfs and save the *fabric_cit pointer. 284 * 1) Allocate a struct target_fabric_configfs and save the *fabric_cit pointer.
286 * 2) Add struct target_fabric_configfs to g_tf_list 285 * 2) Add struct target_fabric_configfs to g_tf_list
287 * 3) Return struct target_fabric_configfs to fabric module to be passed 286 * 3) Return struct target_fabric_configfs to fabric module to be passed
288 * into target_fabric_configfs_register(). 287 * into target_fabric_configfs_register().
289 */ 288 */
290 struct target_fabric_configfs *target_fabric_configfs_init( 289 struct target_fabric_configfs *target_fabric_configfs_init(
291 struct module *fabric_mod, 290 struct module *fabric_mod,
292 const char *name) 291 const char *name)
293 { 292 {
294 struct target_fabric_configfs *tf; 293 struct target_fabric_configfs *tf;
295 294
296 if (!(name)) { 295 if (!(name)) {
297 pr_err("Unable to locate passed fabric name\n"); 296 pr_err("Unable to locate passed fabric name\n");
298 return ERR_PTR(-EINVAL); 297 return ERR_PTR(-EINVAL);
299 } 298 }
300 if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) { 299 if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) {
301 pr_err("Passed name: %s exceeds TARGET_FABRIC" 300 pr_err("Passed name: %s exceeds TARGET_FABRIC"
302 "_NAME_SIZE\n", name); 301 "_NAME_SIZE\n", name);
303 return ERR_PTR(-EINVAL); 302 return ERR_PTR(-EINVAL);
304 } 303 }
305 304
306 tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); 305 tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
307 if (!tf) 306 if (!tf)
308 return ERR_PTR(-ENOMEM); 307 return ERR_PTR(-ENOMEM);
309 308
310 INIT_LIST_HEAD(&tf->tf_list); 309 INIT_LIST_HEAD(&tf->tf_list);
311 atomic_set(&tf->tf_access_cnt, 0); 310 atomic_set(&tf->tf_access_cnt, 0);
312 /* 311 /*
313 * Setup the default generic struct config_item_type's (cits) in 312 * Setup the default generic struct config_item_type's (cits) in
314 * struct target_fabric_configfs->tf_cit_tmpl 313 * struct target_fabric_configfs->tf_cit_tmpl
315 */ 314 */
316 tf->tf_module = fabric_mod; 315 tf->tf_module = fabric_mod;
317 target_fabric_setup_cits(tf); 316 target_fabric_setup_cits(tf);
318 317
319 tf->tf_subsys = target_core_subsystem[0]; 318 tf->tf_subsys = target_core_subsystem[0];
320 snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", name); 319 snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", name);
321 320
322 mutex_lock(&g_tf_lock); 321 mutex_lock(&g_tf_lock);
323 list_add_tail(&tf->tf_list, &g_tf_list); 322 list_add_tail(&tf->tf_list, &g_tf_list);
324 mutex_unlock(&g_tf_lock); 323 mutex_unlock(&g_tf_lock);
325 324
326 pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>" 325 pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>"
327 ">>>>>>>>>>>>>>\n"); 326 ">>>>>>>>>>>>>>\n");
328 pr_debug("Initialized struct target_fabric_configfs: %p for" 327 pr_debug("Initialized struct target_fabric_configfs: %p for"
329 " %s\n", tf, tf->tf_name); 328 " %s\n", tf, tf->tf_name);
330 return tf; 329 return tf;
331 } 330 }
332 EXPORT_SYMBOL(target_fabric_configfs_init); 331 EXPORT_SYMBOL(target_fabric_configfs_init);
333 332
334 /* 333 /*
335 * Called by fabric plugins after FAILED target_fabric_configfs_register() call. 334 * Called by fabric plugins after FAILED target_fabric_configfs_register() call.
336 */ 335 */
337 void target_fabric_configfs_free( 336 void target_fabric_configfs_free(
338 struct target_fabric_configfs *tf) 337 struct target_fabric_configfs *tf)
339 { 338 {
340 mutex_lock(&g_tf_lock); 339 mutex_lock(&g_tf_lock);
341 list_del(&tf->tf_list); 340 list_del(&tf->tf_list);
342 mutex_unlock(&g_tf_lock); 341 mutex_unlock(&g_tf_lock);
343 342
344 kfree(tf); 343 kfree(tf);
345 } 344 }
346 EXPORT_SYMBOL(target_fabric_configfs_free); 345 EXPORT_SYMBOL(target_fabric_configfs_free);
347 346
348 /* 347 /*
349 * Perform a sanity check of the passed tf->tf_ops before completing 348 * Perform a sanity check of the passed tf->tf_ops before completing
350 * TCM fabric module registration. 349 * TCM fabric module registration.
351 */ 350 */
352 static int target_fabric_tf_ops_check( 351 static int target_fabric_tf_ops_check(
353 struct target_fabric_configfs *tf) 352 struct target_fabric_configfs *tf)
354 { 353 {
355 struct target_core_fabric_ops *tfo = &tf->tf_ops; 354 struct target_core_fabric_ops *tfo = &tf->tf_ops;
356 355
357 if (!tfo->get_fabric_name) { 356 if (!tfo->get_fabric_name) {
358 pr_err("Missing tfo->get_fabric_name()\n"); 357 pr_err("Missing tfo->get_fabric_name()\n");
359 return -EINVAL; 358 return -EINVAL;
360 } 359 }
361 if (!tfo->get_fabric_proto_ident) { 360 if (!tfo->get_fabric_proto_ident) {
362 pr_err("Missing tfo->get_fabric_proto_ident()\n"); 361 pr_err("Missing tfo->get_fabric_proto_ident()\n");
363 return -EINVAL; 362 return -EINVAL;
364 } 363 }
365 if (!tfo->tpg_get_wwn) { 364 if (!tfo->tpg_get_wwn) {
366 pr_err("Missing tfo->tpg_get_wwn()\n"); 365 pr_err("Missing tfo->tpg_get_wwn()\n");
367 return -EINVAL; 366 return -EINVAL;
368 } 367 }
369 if (!tfo->tpg_get_tag) { 368 if (!tfo->tpg_get_tag) {
370 pr_err("Missing tfo->tpg_get_tag()\n"); 369 pr_err("Missing tfo->tpg_get_tag()\n");
371 return -EINVAL; 370 return -EINVAL;
372 } 371 }
373 if (!tfo->tpg_get_default_depth) { 372 if (!tfo->tpg_get_default_depth) {
374 pr_err("Missing tfo->tpg_get_default_depth()\n"); 373 pr_err("Missing tfo->tpg_get_default_depth()\n");
375 return -EINVAL; 374 return -EINVAL;
376 } 375 }
377 if (!tfo->tpg_get_pr_transport_id) { 376 if (!tfo->tpg_get_pr_transport_id) {
378 pr_err("Missing tfo->tpg_get_pr_transport_id()\n"); 377 pr_err("Missing tfo->tpg_get_pr_transport_id()\n");
379 return -EINVAL; 378 return -EINVAL;
380 } 379 }
381 if (!tfo->tpg_get_pr_transport_id_len) { 380 if (!tfo->tpg_get_pr_transport_id_len) {
382 pr_err("Missing tfo->tpg_get_pr_transport_id_len()\n"); 381 pr_err("Missing tfo->tpg_get_pr_transport_id_len()\n");
383 return -EINVAL; 382 return -EINVAL;
384 } 383 }
385 if (!tfo->tpg_check_demo_mode) { 384 if (!tfo->tpg_check_demo_mode) {
386 pr_err("Missing tfo->tpg_check_demo_mode()\n"); 385 pr_err("Missing tfo->tpg_check_demo_mode()\n");
387 return -EINVAL; 386 return -EINVAL;
388 } 387 }
389 if (!tfo->tpg_check_demo_mode_cache) { 388 if (!tfo->tpg_check_demo_mode_cache) {
390 pr_err("Missing tfo->tpg_check_demo_mode_cache()\n"); 389 pr_err("Missing tfo->tpg_check_demo_mode_cache()\n");
391 return -EINVAL; 390 return -EINVAL;
392 } 391 }
393 if (!tfo->tpg_check_demo_mode_write_protect) { 392 if (!tfo->tpg_check_demo_mode_write_protect) {
394 pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n"); 393 pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n");
395 return -EINVAL; 394 return -EINVAL;
396 } 395 }
397 if (!tfo->tpg_check_prod_mode_write_protect) { 396 if (!tfo->tpg_check_prod_mode_write_protect) {
398 pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n"); 397 pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n");
399 return -EINVAL; 398 return -EINVAL;
400 } 399 }
401 if (!tfo->tpg_alloc_fabric_acl) { 400 if (!tfo->tpg_alloc_fabric_acl) {
402 pr_err("Missing tfo->tpg_alloc_fabric_acl()\n"); 401 pr_err("Missing tfo->tpg_alloc_fabric_acl()\n");
403 return -EINVAL; 402 return -EINVAL;
404 } 403 }
405 if (!tfo->tpg_release_fabric_acl) { 404 if (!tfo->tpg_release_fabric_acl) {
406 pr_err("Missing tfo->tpg_release_fabric_acl()\n"); 405 pr_err("Missing tfo->tpg_release_fabric_acl()\n");
407 return -EINVAL; 406 return -EINVAL;
408 } 407 }
409 if (!tfo->tpg_get_inst_index) { 408 if (!tfo->tpg_get_inst_index) {
410 pr_err("Missing tfo->tpg_get_inst_index()\n"); 409 pr_err("Missing tfo->tpg_get_inst_index()\n");
411 return -EINVAL; 410 return -EINVAL;
412 } 411 }
413 if (!tfo->release_cmd) { 412 if (!tfo->release_cmd) {
414 pr_err("Missing tfo->release_cmd()\n"); 413 pr_err("Missing tfo->release_cmd()\n");
415 return -EINVAL; 414 return -EINVAL;
416 } 415 }
417 if (!tfo->shutdown_session) { 416 if (!tfo->shutdown_session) {
418 pr_err("Missing tfo->shutdown_session()\n"); 417 pr_err("Missing tfo->shutdown_session()\n");
419 return -EINVAL; 418 return -EINVAL;
420 } 419 }
421 if (!tfo->close_session) { 420 if (!tfo->close_session) {
422 pr_err("Missing tfo->close_session()\n"); 421 pr_err("Missing tfo->close_session()\n");
423 return -EINVAL; 422 return -EINVAL;
424 } 423 }
425 if (!tfo->stop_session) { 424 if (!tfo->stop_session) {
426 pr_err("Missing tfo->stop_session()\n"); 425 pr_err("Missing tfo->stop_session()\n");
427 return -EINVAL; 426 return -EINVAL;
428 } 427 }
429 if (!tfo->fall_back_to_erl0) { 428 if (!tfo->fall_back_to_erl0) {
430 pr_err("Missing tfo->fall_back_to_erl0()\n"); 429 pr_err("Missing tfo->fall_back_to_erl0()\n");
431 return -EINVAL; 430 return -EINVAL;
432 } 431 }
433 if (!tfo->sess_logged_in) { 432 if (!tfo->sess_logged_in) {
434 pr_err("Missing tfo->sess_logged_in()\n"); 433 pr_err("Missing tfo->sess_logged_in()\n");
435 return -EINVAL; 434 return -EINVAL;
436 } 435 }
437 if (!tfo->sess_get_index) { 436 if (!tfo->sess_get_index) {
438 pr_err("Missing tfo->sess_get_index()\n"); 437 pr_err("Missing tfo->sess_get_index()\n");
439 return -EINVAL; 438 return -EINVAL;
440 } 439 }
441 if (!tfo->write_pending) { 440 if (!tfo->write_pending) {
442 pr_err("Missing tfo->write_pending()\n"); 441 pr_err("Missing tfo->write_pending()\n");
443 return -EINVAL; 442 return -EINVAL;
444 } 443 }
445 if (!tfo->write_pending_status) { 444 if (!tfo->write_pending_status) {
446 pr_err("Missing tfo->write_pending_status()\n"); 445 pr_err("Missing tfo->write_pending_status()\n");
447 return -EINVAL; 446 return -EINVAL;
448 } 447 }
449 if (!tfo->set_default_node_attributes) { 448 if (!tfo->set_default_node_attributes) {
450 pr_err("Missing tfo->set_default_node_attributes()\n"); 449 pr_err("Missing tfo->set_default_node_attributes()\n");
451 return -EINVAL; 450 return -EINVAL;
452 } 451 }
453 if (!tfo->get_task_tag) { 452 if (!tfo->get_task_tag) {
454 pr_err("Missing tfo->get_task_tag()\n"); 453 pr_err("Missing tfo->get_task_tag()\n");
455 return -EINVAL; 454 return -EINVAL;
456 } 455 }
457 if (!tfo->get_cmd_state) { 456 if (!tfo->get_cmd_state) {
458 pr_err("Missing tfo->get_cmd_state()\n"); 457 pr_err("Missing tfo->get_cmd_state()\n");
459 return -EINVAL; 458 return -EINVAL;
460 } 459 }
461 if (!tfo->queue_data_in) { 460 if (!tfo->queue_data_in) {
462 pr_err("Missing tfo->queue_data_in()\n"); 461 pr_err("Missing tfo->queue_data_in()\n");
463 return -EINVAL; 462 return -EINVAL;
464 } 463 }
465 if (!tfo->queue_status) { 464 if (!tfo->queue_status) {
466 pr_err("Missing tfo->queue_status()\n"); 465 pr_err("Missing tfo->queue_status()\n");
467 return -EINVAL; 466 return -EINVAL;
468 } 467 }
469 if (!tfo->queue_tm_rsp) { 468 if (!tfo->queue_tm_rsp) {
470 pr_err("Missing tfo->queue_tm_rsp()\n"); 469 pr_err("Missing tfo->queue_tm_rsp()\n");
471 return -EINVAL; 470 return -EINVAL;
472 } 471 }
473 if (!tfo->set_fabric_sense_len) { 472 if (!tfo->set_fabric_sense_len) {
474 pr_err("Missing tfo->set_fabric_sense_len()\n"); 473 pr_err("Missing tfo->set_fabric_sense_len()\n");
475 return -EINVAL; 474 return -EINVAL;
476 } 475 }
477 if (!tfo->get_fabric_sense_len) { 476 if (!tfo->get_fabric_sense_len) {
478 pr_err("Missing tfo->get_fabric_sense_len()\n"); 477 pr_err("Missing tfo->get_fabric_sense_len()\n");
479 return -EINVAL; 478 return -EINVAL;
480 } 479 }
481 if (!tfo->is_state_remove) { 480 if (!tfo->is_state_remove) {
482 pr_err("Missing tfo->is_state_remove()\n"); 481 pr_err("Missing tfo->is_state_remove()\n");
483 return -EINVAL; 482 return -EINVAL;
484 } 483 }
485 /* 484 /*
486 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() 485 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
487 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in 486 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
488 * target_core_fabric_configfs.c WWN+TPG group context code. 487 * target_core_fabric_configfs.c WWN+TPG group context code.
489 */ 488 */
490 if (!tfo->fabric_make_wwn) { 489 if (!tfo->fabric_make_wwn) {
491 pr_err("Missing tfo->fabric_make_wwn()\n"); 490 pr_err("Missing tfo->fabric_make_wwn()\n");
492 return -EINVAL; 491 return -EINVAL;
493 } 492 }
494 if (!tfo->fabric_drop_wwn) { 493 if (!tfo->fabric_drop_wwn) {
495 pr_err("Missing tfo->fabric_drop_wwn()\n"); 494 pr_err("Missing tfo->fabric_drop_wwn()\n");
496 return -EINVAL; 495 return -EINVAL;
497 } 496 }
498 if (!tfo->fabric_make_tpg) { 497 if (!tfo->fabric_make_tpg) {
499 pr_err("Missing tfo->fabric_make_tpg()\n"); 498 pr_err("Missing tfo->fabric_make_tpg()\n");
500 return -EINVAL; 499 return -EINVAL;
501 } 500 }
502 if (!tfo->fabric_drop_tpg) { 501 if (!tfo->fabric_drop_tpg) {
503 pr_err("Missing tfo->fabric_drop_tpg()\n"); 502 pr_err("Missing tfo->fabric_drop_tpg()\n");
504 return -EINVAL; 503 return -EINVAL;
505 } 504 }
506 505
507 return 0; 506 return 0;
508 } 507 }
509 508
510 /* 509 /*
511 * Called 2nd from fabric module with returned parameter of 510 * Called 2nd from fabric module with returned parameter of
512 * struct target_fabric_configfs * from target_fabric_configfs_init(). 511 * struct target_fabric_configfs * from target_fabric_configfs_init().
513 * 512 *
514 * Upon a successful registration, the new fabric's struct config_item is 513 * Upon a successful registration, the new fabric's struct config_item is
515 * return. Also, a pointer to this struct is set in the passed 514 * return. Also, a pointer to this struct is set in the passed
516 * struct target_fabric_configfs. 515 * struct target_fabric_configfs.
517 */ 516 */
518 int target_fabric_configfs_register( 517 int target_fabric_configfs_register(
519 struct target_fabric_configfs *tf) 518 struct target_fabric_configfs *tf)
520 { 519 {
521 int ret; 520 int ret;
522 521
523 if (!tf) { 522 if (!tf) {
524 pr_err("Unable to locate target_fabric_configfs" 523 pr_err("Unable to locate target_fabric_configfs"
525 " pointer\n"); 524 " pointer\n");
526 return -EINVAL; 525 return -EINVAL;
527 } 526 }
528 if (!tf->tf_subsys) { 527 if (!tf->tf_subsys) {
529 pr_err("Unable to target struct config_subsystem" 528 pr_err("Unable to target struct config_subsystem"
530 " pointer\n"); 529 " pointer\n");
531 return -EINVAL; 530 return -EINVAL;
532 } 531 }
533 ret = target_fabric_tf_ops_check(tf); 532 ret = target_fabric_tf_ops_check(tf);
534 if (ret < 0) 533 if (ret < 0)
535 return ret; 534 return ret;
536 535
537 pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>" 536 pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>"
538 ">>>>>>>>>>\n"); 537 ">>>>>>>>>>\n");
539 return 0; 538 return 0;
540 } 539 }
541 EXPORT_SYMBOL(target_fabric_configfs_register); 540 EXPORT_SYMBOL(target_fabric_configfs_register);
542 541
543 void target_fabric_configfs_deregister( 542 void target_fabric_configfs_deregister(
544 struct target_fabric_configfs *tf) 543 struct target_fabric_configfs *tf)
545 { 544 {
546 struct configfs_subsystem *su; 545 struct configfs_subsystem *su;
547 546
548 if (!tf) { 547 if (!tf) {
549 pr_err("Unable to locate passed target_fabric_" 548 pr_err("Unable to locate passed target_fabric_"
550 "configfs\n"); 549 "configfs\n");
551 return; 550 return;
552 } 551 }
553 su = tf->tf_subsys; 552 su = tf->tf_subsys;
554 if (!su) { 553 if (!su) {
555 pr_err("Unable to locate passed tf->tf_subsys" 554 pr_err("Unable to locate passed tf->tf_subsys"
556 " pointer\n"); 555 " pointer\n");
557 return; 556 return;
558 } 557 }
559 pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>" 558 pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>"
560 ">>>>>>>>>>>>\n"); 559 ">>>>>>>>>>>>\n");
561 mutex_lock(&g_tf_lock); 560 mutex_lock(&g_tf_lock);
562 if (atomic_read(&tf->tf_access_cnt)) { 561 if (atomic_read(&tf->tf_access_cnt)) {
563 mutex_unlock(&g_tf_lock); 562 mutex_unlock(&g_tf_lock);
564 pr_err("Non zero tf->tf_access_cnt for fabric %s\n", 563 pr_err("Non zero tf->tf_access_cnt for fabric %s\n",
565 tf->tf_name); 564 tf->tf_name);
566 BUG(); 565 BUG();
567 } 566 }
568 list_del(&tf->tf_list); 567 list_del(&tf->tf_list);
569 mutex_unlock(&g_tf_lock); 568 mutex_unlock(&g_tf_lock);
570 569
571 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing tf:" 570 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing tf:"
572 " %s\n", tf->tf_name); 571 " %s\n", tf->tf_name);
573 tf->tf_module = NULL; 572 tf->tf_module = NULL;
574 tf->tf_subsys = NULL; 573 tf->tf_subsys = NULL;
575 kfree(tf); 574 kfree(tf);
576 575
577 pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>" 576 pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
578 ">>>>>\n"); 577 ">>>>>\n");
579 } 578 }
580 EXPORT_SYMBOL(target_fabric_configfs_deregister); 579 EXPORT_SYMBOL(target_fabric_configfs_deregister);
581 580
582 /*############################################################################## 581 /*##############################################################################
583 // Stop functions called by external Target Fabrics Modules 582 // Stop functions called by external Target Fabrics Modules
584 //############################################################################*/ 583 //############################################################################*/
585 584
586 /* Start functions for struct config_item_type target_core_dev_attrib_cit */ 585 /* Start functions for struct config_item_type target_core_dev_attrib_cit */
587 586
588 #define DEF_DEV_ATTRIB_SHOW(_name) \ 587 #define DEF_DEV_ATTRIB_SHOW(_name) \
589 static ssize_t target_core_dev_show_attr_##_name( \ 588 static ssize_t target_core_dev_show_attr_##_name( \
590 struct se_dev_attrib *da, \ 589 struct se_dev_attrib *da, \
591 char *page) \ 590 char *page) \
592 { \ 591 { \
593 struct se_device *dev; \ 592 struct se_device *dev; \
594 struct se_subsystem_dev *se_dev = da->da_sub_dev; \ 593 struct se_subsystem_dev *se_dev = da->da_sub_dev; \
595 ssize_t rb; \ 594 ssize_t rb; \
596 \ 595 \
597 spin_lock(&se_dev->se_dev_lock); \ 596 spin_lock(&se_dev->se_dev_lock); \
598 dev = se_dev->se_dev_ptr; \ 597 dev = se_dev->se_dev_ptr; \
599 if (!dev) { \ 598 if (!dev) { \
600 spin_unlock(&se_dev->se_dev_lock); \ 599 spin_unlock(&se_dev->se_dev_lock); \
601 return -ENODEV; \ 600 return -ENODEV; \
602 } \ 601 } \
603 rb = snprintf(page, PAGE_SIZE, "%u\n", \ 602 rb = snprintf(page, PAGE_SIZE, "%u\n", \
604 (u32)dev->se_sub_dev->se_dev_attrib._name); \ 603 (u32)dev->se_sub_dev->se_dev_attrib._name); \
605 spin_unlock(&se_dev->se_dev_lock); \ 604 spin_unlock(&se_dev->se_dev_lock); \
606 \ 605 \
607 return rb; \ 606 return rb; \
608 } 607 }
609 608
610 #define DEF_DEV_ATTRIB_STORE(_name) \ 609 #define DEF_DEV_ATTRIB_STORE(_name) \
611 static ssize_t target_core_dev_store_attr_##_name( \ 610 static ssize_t target_core_dev_store_attr_##_name( \
612 struct se_dev_attrib *da, \ 611 struct se_dev_attrib *da, \
613 const char *page, \ 612 const char *page, \
614 size_t count) \ 613 size_t count) \
615 { \ 614 { \
616 struct se_device *dev; \ 615 struct se_device *dev; \
617 struct se_subsystem_dev *se_dev = da->da_sub_dev; \ 616 struct se_subsystem_dev *se_dev = da->da_sub_dev; \
618 unsigned long val; \ 617 unsigned long val; \
619 int ret; \ 618 int ret; \
620 \ 619 \
621 spin_lock(&se_dev->se_dev_lock); \ 620 spin_lock(&se_dev->se_dev_lock); \
622 dev = se_dev->se_dev_ptr; \ 621 dev = se_dev->se_dev_ptr; \
623 if (!dev) { \ 622 if (!dev) { \
624 spin_unlock(&se_dev->se_dev_lock); \ 623 spin_unlock(&se_dev->se_dev_lock); \
625 return -ENODEV; \ 624 return -ENODEV; \
626 } \ 625 } \
627 ret = strict_strtoul(page, 0, &val); \ 626 ret = strict_strtoul(page, 0, &val); \
628 if (ret < 0) { \ 627 if (ret < 0) { \
629 spin_unlock(&se_dev->se_dev_lock); \ 628 spin_unlock(&se_dev->se_dev_lock); \
630 pr_err("strict_strtoul() failed with" \ 629 pr_err("strict_strtoul() failed with" \
631 " ret: %d\n", ret); \ 630 " ret: %d\n", ret); \
632 return -EINVAL; \ 631 return -EINVAL; \
633 } \ 632 } \
634 ret = se_dev_set_##_name(dev, (u32)val); \ 633 ret = se_dev_set_##_name(dev, (u32)val); \
635 spin_unlock(&se_dev->se_dev_lock); \ 634 spin_unlock(&se_dev->se_dev_lock); \
636 \ 635 \
637 return (!ret) ? count : -EINVAL; \ 636 return (!ret) ? count : -EINVAL; \
638 } 637 }
639 638
640 #define DEF_DEV_ATTRIB(_name) \ 639 #define DEF_DEV_ATTRIB(_name) \
641 DEF_DEV_ATTRIB_SHOW(_name); \ 640 DEF_DEV_ATTRIB_SHOW(_name); \
642 DEF_DEV_ATTRIB_STORE(_name); 641 DEF_DEV_ATTRIB_STORE(_name);
643 642
644 #define DEF_DEV_ATTRIB_RO(_name) \ 643 #define DEF_DEV_ATTRIB_RO(_name) \
645 DEF_DEV_ATTRIB_SHOW(_name); 644 DEF_DEV_ATTRIB_SHOW(_name);
646 645
647 CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib); 646 CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib);
648 #define SE_DEV_ATTR(_name, _mode) \ 647 #define SE_DEV_ATTR(_name, _mode) \
649 static struct target_core_dev_attrib_attribute \ 648 static struct target_core_dev_attrib_attribute \
650 target_core_dev_attrib_##_name = \ 649 target_core_dev_attrib_##_name = \
651 __CONFIGFS_EATTR(_name, _mode, \ 650 __CONFIGFS_EATTR(_name, _mode, \
652 target_core_dev_show_attr_##_name, \ 651 target_core_dev_show_attr_##_name, \
653 target_core_dev_store_attr_##_name); 652 target_core_dev_store_attr_##_name);
654 653
655 #define SE_DEV_ATTR_RO(_name); \ 654 #define SE_DEV_ATTR_RO(_name); \
656 static struct target_core_dev_attrib_attribute \ 655 static struct target_core_dev_attrib_attribute \
657 target_core_dev_attrib_##_name = \ 656 target_core_dev_attrib_##_name = \
658 __CONFIGFS_EATTR_RO(_name, \ 657 __CONFIGFS_EATTR_RO(_name, \
659 target_core_dev_show_attr_##_name); 658 target_core_dev_show_attr_##_name);
660 659
661 DEF_DEV_ATTRIB(emulate_dpo); 660 DEF_DEV_ATTRIB(emulate_dpo);
662 SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR); 661 SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
663 662
664 DEF_DEV_ATTRIB(emulate_fua_write); 663 DEF_DEV_ATTRIB(emulate_fua_write);
665 SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR); 664 SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR);
666 665
667 DEF_DEV_ATTRIB(emulate_fua_read); 666 DEF_DEV_ATTRIB(emulate_fua_read);
668 SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR); 667 SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR);
669 668
670 DEF_DEV_ATTRIB(emulate_write_cache); 669 DEF_DEV_ATTRIB(emulate_write_cache);
671 SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR); 670 SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR);
672 671
673 DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl); 672 DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl);
674 SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR); 673 SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR);
675 674
676 DEF_DEV_ATTRIB(emulate_tas); 675 DEF_DEV_ATTRIB(emulate_tas);
677 SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR); 676 SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR);
678 677
679 DEF_DEV_ATTRIB(emulate_tpu); 678 DEF_DEV_ATTRIB(emulate_tpu);
680 SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR); 679 SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR);
681 680
682 DEF_DEV_ATTRIB(emulate_tpws); 681 DEF_DEV_ATTRIB(emulate_tpws);
683 SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR); 682 SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
684 683
685 DEF_DEV_ATTRIB(enforce_pr_isids); 684 DEF_DEV_ATTRIB(enforce_pr_isids);
686 SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR); 685 SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
687 686
688 DEF_DEV_ATTRIB(is_nonrot); 687 DEF_DEV_ATTRIB(is_nonrot);
689 SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR); 688 SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR);
690 689
691 DEF_DEV_ATTRIB(emulate_rest_reord); 690 DEF_DEV_ATTRIB(emulate_rest_reord);
692 SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR); 691 SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR);
693 692
694 DEF_DEV_ATTRIB_RO(hw_block_size); 693 DEF_DEV_ATTRIB_RO(hw_block_size);
695 SE_DEV_ATTR_RO(hw_block_size); 694 SE_DEV_ATTR_RO(hw_block_size);
696 695
697 DEF_DEV_ATTRIB(block_size); 696 DEF_DEV_ATTRIB(block_size);
698 SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR); 697 SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR);
699 698
700 DEF_DEV_ATTRIB_RO(hw_max_sectors); 699 DEF_DEV_ATTRIB_RO(hw_max_sectors);
701 SE_DEV_ATTR_RO(hw_max_sectors); 700 SE_DEV_ATTR_RO(hw_max_sectors);
702 701
703 DEF_DEV_ATTRIB(max_sectors); 702 DEF_DEV_ATTRIB(max_sectors);
704 SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR); 703 SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR);
705 704
706 DEF_DEV_ATTRIB(optimal_sectors); 705 DEF_DEV_ATTRIB(optimal_sectors);
707 SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR); 706 SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR);
708 707
709 DEF_DEV_ATTRIB_RO(hw_queue_depth); 708 DEF_DEV_ATTRIB_RO(hw_queue_depth);
710 SE_DEV_ATTR_RO(hw_queue_depth); 709 SE_DEV_ATTR_RO(hw_queue_depth);
711 710
712 DEF_DEV_ATTRIB(queue_depth); 711 DEF_DEV_ATTRIB(queue_depth);
713 SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR); 712 SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR);
714 713
715 DEF_DEV_ATTRIB(max_unmap_lba_count); 714 DEF_DEV_ATTRIB(max_unmap_lba_count);
716 SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR); 715 SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR);
717 716
718 DEF_DEV_ATTRIB(max_unmap_block_desc_count); 717 DEF_DEV_ATTRIB(max_unmap_block_desc_count);
719 SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR); 718 SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR);
720 719
721 DEF_DEV_ATTRIB(unmap_granularity); 720 DEF_DEV_ATTRIB(unmap_granularity);
722 SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR); 721 SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR);
723 722
724 DEF_DEV_ATTRIB(unmap_granularity_alignment); 723 DEF_DEV_ATTRIB(unmap_granularity_alignment);
725 SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR); 724 SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR);
726 725
727 CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group); 726 CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
728 727
729 static struct configfs_attribute *target_core_dev_attrib_attrs[] = { 728 static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
730 &target_core_dev_attrib_emulate_dpo.attr, 729 &target_core_dev_attrib_emulate_dpo.attr,
731 &target_core_dev_attrib_emulate_fua_write.attr, 730 &target_core_dev_attrib_emulate_fua_write.attr,
732 &target_core_dev_attrib_emulate_fua_read.attr, 731 &target_core_dev_attrib_emulate_fua_read.attr,
733 &target_core_dev_attrib_emulate_write_cache.attr, 732 &target_core_dev_attrib_emulate_write_cache.attr,
734 &target_core_dev_attrib_emulate_ua_intlck_ctrl.attr, 733 &target_core_dev_attrib_emulate_ua_intlck_ctrl.attr,
735 &target_core_dev_attrib_emulate_tas.attr, 734 &target_core_dev_attrib_emulate_tas.attr,
736 &target_core_dev_attrib_emulate_tpu.attr, 735 &target_core_dev_attrib_emulate_tpu.attr,
737 &target_core_dev_attrib_emulate_tpws.attr, 736 &target_core_dev_attrib_emulate_tpws.attr,
738 &target_core_dev_attrib_enforce_pr_isids.attr, 737 &target_core_dev_attrib_enforce_pr_isids.attr,
739 &target_core_dev_attrib_is_nonrot.attr, 738 &target_core_dev_attrib_is_nonrot.attr,
740 &target_core_dev_attrib_emulate_rest_reord.attr, 739 &target_core_dev_attrib_emulate_rest_reord.attr,
741 &target_core_dev_attrib_hw_block_size.attr, 740 &target_core_dev_attrib_hw_block_size.attr,
742 &target_core_dev_attrib_block_size.attr, 741 &target_core_dev_attrib_block_size.attr,
743 &target_core_dev_attrib_hw_max_sectors.attr, 742 &target_core_dev_attrib_hw_max_sectors.attr,
744 &target_core_dev_attrib_max_sectors.attr, 743 &target_core_dev_attrib_max_sectors.attr,
745 &target_core_dev_attrib_optimal_sectors.attr, 744 &target_core_dev_attrib_optimal_sectors.attr,
746 &target_core_dev_attrib_hw_queue_depth.attr, 745 &target_core_dev_attrib_hw_queue_depth.attr,
747 &target_core_dev_attrib_queue_depth.attr, 746 &target_core_dev_attrib_queue_depth.attr,
748 &target_core_dev_attrib_max_unmap_lba_count.attr, 747 &target_core_dev_attrib_max_unmap_lba_count.attr,
749 &target_core_dev_attrib_max_unmap_block_desc_count.attr, 748 &target_core_dev_attrib_max_unmap_block_desc_count.attr,
750 &target_core_dev_attrib_unmap_granularity.attr, 749 &target_core_dev_attrib_unmap_granularity.attr,
751 &target_core_dev_attrib_unmap_granularity_alignment.attr, 750 &target_core_dev_attrib_unmap_granularity_alignment.attr,
752 NULL, 751 NULL,
753 }; 752 };
754 753
755 static struct configfs_item_operations target_core_dev_attrib_ops = { 754 static struct configfs_item_operations target_core_dev_attrib_ops = {
756 .show_attribute = target_core_dev_attrib_attr_show, 755 .show_attribute = target_core_dev_attrib_attr_show,
757 .store_attribute = target_core_dev_attrib_attr_store, 756 .store_attribute = target_core_dev_attrib_attr_store,
758 }; 757 };
759 758
760 static struct config_item_type target_core_dev_attrib_cit = { 759 static struct config_item_type target_core_dev_attrib_cit = {
761 .ct_item_ops = &target_core_dev_attrib_ops, 760 .ct_item_ops = &target_core_dev_attrib_ops,
762 .ct_attrs = target_core_dev_attrib_attrs, 761 .ct_attrs = target_core_dev_attrib_attrs,
763 .ct_owner = THIS_MODULE, 762 .ct_owner = THIS_MODULE,
764 }; 763 };
765 764
766 /* End functions for struct config_item_type target_core_dev_attrib_cit */ 765 /* End functions for struct config_item_type target_core_dev_attrib_cit */
767 766
768 /* Start functions for struct config_item_type target_core_dev_wwn_cit */ 767 /* Start functions for struct config_item_type target_core_dev_wwn_cit */
769 768
770 CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn); 769 CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn);
771 #define SE_DEV_WWN_ATTR(_name, _mode) \ 770 #define SE_DEV_WWN_ATTR(_name, _mode) \
772 static struct target_core_dev_wwn_attribute target_core_dev_wwn_##_name = \ 771 static struct target_core_dev_wwn_attribute target_core_dev_wwn_##_name = \
773 __CONFIGFS_EATTR(_name, _mode, \ 772 __CONFIGFS_EATTR(_name, _mode, \
774 target_core_dev_wwn_show_attr_##_name, \ 773 target_core_dev_wwn_show_attr_##_name, \
775 target_core_dev_wwn_store_attr_##_name); 774 target_core_dev_wwn_store_attr_##_name);
776 775
777 #define SE_DEV_WWN_ATTR_RO(_name); \ 776 #define SE_DEV_WWN_ATTR_RO(_name); \
778 do { \ 777 do { \
779 static struct target_core_dev_wwn_attribute \ 778 static struct target_core_dev_wwn_attribute \
780 target_core_dev_wwn_##_name = \ 779 target_core_dev_wwn_##_name = \
781 __CONFIGFS_EATTR_RO(_name, \ 780 __CONFIGFS_EATTR_RO(_name, \
782 target_core_dev_wwn_show_attr_##_name); \ 781 target_core_dev_wwn_show_attr_##_name); \
783 } while (0); 782 } while (0);
784 783
785 /* 784 /*
786 * VPD page 0x80 Unit serial 785 * VPD page 0x80 Unit serial
787 */ 786 */
788 static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial( 787 static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
789 struct t10_wwn *t10_wwn, 788 struct t10_wwn *t10_wwn,
790 char *page) 789 char *page)
791 { 790 {
792 struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev; 791 struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
793 struct se_device *dev; 792 struct se_device *dev;
794 793
795 dev = se_dev->se_dev_ptr; 794 dev = se_dev->se_dev_ptr;
796 if (!dev) 795 if (!dev)
797 return -ENODEV; 796 return -ENODEV;
798 797
799 return sprintf(page, "T10 VPD Unit Serial Number: %s\n", 798 return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
800 &t10_wwn->unit_serial[0]); 799 &t10_wwn->unit_serial[0]);
801 } 800 }
802 801
803 static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial( 802 static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
804 struct t10_wwn *t10_wwn, 803 struct t10_wwn *t10_wwn,
805 const char *page, 804 const char *page,
806 size_t count) 805 size_t count)
807 { 806 {
808 struct se_subsystem_dev *su_dev = t10_wwn->t10_sub_dev; 807 struct se_subsystem_dev *su_dev = t10_wwn->t10_sub_dev;
809 struct se_device *dev; 808 struct se_device *dev;
810 unsigned char buf[INQUIRY_VPD_SERIAL_LEN]; 809 unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
811 810
812 /* 811 /*
813 * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial 812 * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
814 * from the struct scsi_device level firmware, do not allow 813 * from the struct scsi_device level firmware, do not allow
815 * VPD Unit Serial to be emulated. 814 * VPD Unit Serial to be emulated.
816 * 815 *
817 * Note this struct scsi_device could also be emulating VPD 816 * Note this struct scsi_device could also be emulating VPD
818 * information from its drivers/scsi LLD. But for now we assume 817 * information from its drivers/scsi LLD. But for now we assume
819 * it is doing 'the right thing' wrt a world wide unique 818 * it is doing 'the right thing' wrt a world wide unique
820 * VPD Unit Serial Number that OS dependent multipath can depend on. 819 * VPD Unit Serial Number that OS dependent multipath can depend on.
821 */ 820 */
822 if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) { 821 if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) {
823 pr_err("Underlying SCSI device firmware provided VPD" 822 pr_err("Underlying SCSI device firmware provided VPD"
824 " Unit Serial, ignoring request\n"); 823 " Unit Serial, ignoring request\n");
825 return -EOPNOTSUPP; 824 return -EOPNOTSUPP;
826 } 825 }
827 826
828 if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) { 827 if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
829 pr_err("Emulated VPD Unit Serial exceeds" 828 pr_err("Emulated VPD Unit Serial exceeds"
830 " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN); 829 " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
831 return -EOVERFLOW; 830 return -EOVERFLOW;
832 } 831 }
833 /* 832 /*
834 * Check to see if any active $FABRIC_MOD exports exist. If they 833 * Check to see if any active $FABRIC_MOD exports exist. If they
835 * do exist, fail here as changing this information on the fly 834 * do exist, fail here as changing this information on the fly
836 * (underneath the initiator side OS dependent multipath code) 835 * (underneath the initiator side OS dependent multipath code)
837 * could cause negative effects. 836 * could cause negative effects.
838 */ 837 */
839 dev = su_dev->se_dev_ptr; 838 dev = su_dev->se_dev_ptr;
840 if (dev) { 839 if (dev) {
841 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 840 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
842 pr_err("Unable to set VPD Unit Serial while" 841 pr_err("Unable to set VPD Unit Serial while"
843 " active %d $FABRIC_MOD exports exist\n", 842 " active %d $FABRIC_MOD exports exist\n",
844 atomic_read(&dev->dev_export_obj.obj_access_count)); 843 atomic_read(&dev->dev_export_obj.obj_access_count));
845 return -EINVAL; 844 return -EINVAL;
846 } 845 }
847 } 846 }
848 /* 847 /*
849 * This currently assumes ASCII encoding for emulated VPD Unit Serial. 848 * This currently assumes ASCII encoding for emulated VPD Unit Serial.
850 * 849 *
851 * Also, strip any newline added from the userspace 850 * Also, strip any newline added from the userspace
852 * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial 851 * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
853 */ 852 */
854 memset(buf, 0, INQUIRY_VPD_SERIAL_LEN); 853 memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
855 snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page); 854 snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
856 snprintf(su_dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN, 855 snprintf(su_dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
857 "%s", strstrip(buf)); 856 "%s", strstrip(buf));
858 su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL; 857 su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL;
859 858
860 pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:" 859 pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
861 " %s\n", su_dev->t10_wwn.unit_serial); 860 " %s\n", su_dev->t10_wwn.unit_serial);
862 861
863 return count; 862 return count;
864 } 863 }
865 864
866 SE_DEV_WWN_ATTR(vpd_unit_serial, S_IRUGO | S_IWUSR); 865 SE_DEV_WWN_ATTR(vpd_unit_serial, S_IRUGO | S_IWUSR);
867 866
868 /* 867 /*
869 * VPD page 0x83 Protocol Identifier 868 * VPD page 0x83 Protocol Identifier
870 */ 869 */
871 static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier( 870 static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
872 struct t10_wwn *t10_wwn, 871 struct t10_wwn *t10_wwn,
873 char *page) 872 char *page)
874 { 873 {
875 struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev; 874 struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
876 struct se_device *dev; 875 struct se_device *dev;
877 struct t10_vpd *vpd; 876 struct t10_vpd *vpd;
878 unsigned char buf[VPD_TMP_BUF_SIZE]; 877 unsigned char buf[VPD_TMP_BUF_SIZE];
879 ssize_t len = 0; 878 ssize_t len = 0;
880 879
881 dev = se_dev->se_dev_ptr; 880 dev = se_dev->se_dev_ptr;
882 if (!dev) 881 if (!dev)
883 return -ENODEV; 882 return -ENODEV;
884 883
885 memset(buf, 0, VPD_TMP_BUF_SIZE); 884 memset(buf, 0, VPD_TMP_BUF_SIZE);
886 885
887 spin_lock(&t10_wwn->t10_vpd_lock); 886 spin_lock(&t10_wwn->t10_vpd_lock);
888 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { 887 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
889 if (!vpd->protocol_identifier_set) 888 if (!vpd->protocol_identifier_set)
890 continue; 889 continue;
891 890
892 transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE); 891 transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
893 892
894 if (len + strlen(buf) >= PAGE_SIZE) 893 if (len + strlen(buf) >= PAGE_SIZE)
895 break; 894 break;
896 895
897 len += sprintf(page+len, "%s", buf); 896 len += sprintf(page+len, "%s", buf);
898 } 897 }
899 spin_unlock(&t10_wwn->t10_vpd_lock); 898 spin_unlock(&t10_wwn->t10_vpd_lock);
900 899
901 return len; 900 return len;
902 } 901 }
903 902
904 static ssize_t target_core_dev_wwn_store_attr_vpd_protocol_identifier( 903 static ssize_t target_core_dev_wwn_store_attr_vpd_protocol_identifier(
905 struct t10_wwn *t10_wwn, 904 struct t10_wwn *t10_wwn,
906 const char *page, 905 const char *page,
907 size_t count) 906 size_t count)
908 { 907 {
909 return -ENOSYS; 908 return -ENOSYS;
910 } 909 }
911 910
912 SE_DEV_WWN_ATTR(vpd_protocol_identifier, S_IRUGO | S_IWUSR); 911 SE_DEV_WWN_ATTR(vpd_protocol_identifier, S_IRUGO | S_IWUSR);
913 912
914 /* 913 /*
915 * Generic wrapper for dumping VPD identifiers by association. 914 * Generic wrapper for dumping VPD identifiers by association.
916 */ 915 */
917 #define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \ 916 #define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \
918 static ssize_t target_core_dev_wwn_show_attr_##_name( \ 917 static ssize_t target_core_dev_wwn_show_attr_##_name( \
919 struct t10_wwn *t10_wwn, \ 918 struct t10_wwn *t10_wwn, \
920 char *page) \ 919 char *page) \
921 { \ 920 { \
922 struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev; \ 921 struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev; \
923 struct se_device *dev; \ 922 struct se_device *dev; \
924 struct t10_vpd *vpd; \ 923 struct t10_vpd *vpd; \
925 unsigned char buf[VPD_TMP_BUF_SIZE]; \ 924 unsigned char buf[VPD_TMP_BUF_SIZE]; \
926 ssize_t len = 0; \ 925 ssize_t len = 0; \
927 \ 926 \
928 dev = se_dev->se_dev_ptr; \ 927 dev = se_dev->se_dev_ptr; \
929 if (!dev) \ 928 if (!dev) \
930 return -ENODEV; \ 929 return -ENODEV; \
931 \ 930 \
932 spin_lock(&t10_wwn->t10_vpd_lock); \ 931 spin_lock(&t10_wwn->t10_vpd_lock); \
933 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \ 932 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \
934 if (vpd->association != _assoc) \ 933 if (vpd->association != _assoc) \
935 continue; \ 934 continue; \
936 \ 935 \
937 memset(buf, 0, VPD_TMP_BUF_SIZE); \ 936 memset(buf, 0, VPD_TMP_BUF_SIZE); \
938 transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \ 937 transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
939 if (len + strlen(buf) >= PAGE_SIZE) \ 938 if (len + strlen(buf) >= PAGE_SIZE) \
940 break; \ 939 break; \
941 len += sprintf(page+len, "%s", buf); \ 940 len += sprintf(page+len, "%s", buf); \
942 \ 941 \
943 memset(buf, 0, VPD_TMP_BUF_SIZE); \ 942 memset(buf, 0, VPD_TMP_BUF_SIZE); \
944 transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \ 943 transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
945 if (len + strlen(buf) >= PAGE_SIZE) \ 944 if (len + strlen(buf) >= PAGE_SIZE) \
946 break; \ 945 break; \
947 len += sprintf(page+len, "%s", buf); \ 946 len += sprintf(page+len, "%s", buf); \
948 \ 947 \
949 memset(buf, 0, VPD_TMP_BUF_SIZE); \ 948 memset(buf, 0, VPD_TMP_BUF_SIZE); \
950 transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \ 949 transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
951 if (len + strlen(buf) >= PAGE_SIZE) \ 950 if (len + strlen(buf) >= PAGE_SIZE) \
952 break; \ 951 break; \
953 len += sprintf(page+len, "%s", buf); \ 952 len += sprintf(page+len, "%s", buf); \
954 } \ 953 } \
955 spin_unlock(&t10_wwn->t10_vpd_lock); \ 954 spin_unlock(&t10_wwn->t10_vpd_lock); \
956 \ 955 \
957 return len; \ 956 return len; \
958 } 957 }
959 958
960 /* 959 /*
961 * VPD page 0x83 Association: Logical Unit 960 * VPD page 0x83 Association: Logical Unit
962 */ 961 */
963 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00); 962 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
964 963
965 static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_logical_unit( 964 static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_logical_unit(
966 struct t10_wwn *t10_wwn, 965 struct t10_wwn *t10_wwn,
967 const char *page, 966 const char *page,
968 size_t count) 967 size_t count)
969 { 968 {
970 return -ENOSYS; 969 return -ENOSYS;
971 } 970 }
972 971
973 SE_DEV_WWN_ATTR(vpd_assoc_logical_unit, S_IRUGO | S_IWUSR); 972 SE_DEV_WWN_ATTR(vpd_assoc_logical_unit, S_IRUGO | S_IWUSR);
974 973
975 /* 974 /*
976 * VPD page 0x83 Association: Target Port 975 * VPD page 0x83 Association: Target Port
977 */ 976 */
978 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10); 977 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
979 978
980 static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_target_port( 979 static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_target_port(
981 struct t10_wwn *t10_wwn, 980 struct t10_wwn *t10_wwn,
982 const char *page, 981 const char *page,
983 size_t count) 982 size_t count)
984 { 983 {
985 return -ENOSYS; 984 return -ENOSYS;
986 } 985 }
987 986
988 SE_DEV_WWN_ATTR(vpd_assoc_target_port, S_IRUGO | S_IWUSR); 987 SE_DEV_WWN_ATTR(vpd_assoc_target_port, S_IRUGO | S_IWUSR);
989 988
990 /* 989 /*
991 * VPD page 0x83 Association: SCSI Target Device 990 * VPD page 0x83 Association: SCSI Target Device
992 */ 991 */
993 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20); 992 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
994 993
995 static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_scsi_target_device( 994 static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_scsi_target_device(
996 struct t10_wwn *t10_wwn, 995 struct t10_wwn *t10_wwn,
997 const char *page, 996 const char *page,
998 size_t count) 997 size_t count)
999 { 998 {
1000 return -ENOSYS; 999 return -ENOSYS;
1001 } 1000 }
1002 1001
1003 SE_DEV_WWN_ATTR(vpd_assoc_scsi_target_device, S_IRUGO | S_IWUSR); 1002 SE_DEV_WWN_ATTR(vpd_assoc_scsi_target_device, S_IRUGO | S_IWUSR);
1004 1003
1005 CONFIGFS_EATTR_OPS(target_core_dev_wwn, t10_wwn, t10_wwn_group); 1004 CONFIGFS_EATTR_OPS(target_core_dev_wwn, t10_wwn, t10_wwn_group);
1006 1005
1007 static struct configfs_attribute *target_core_dev_wwn_attrs[] = { 1006 static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
1008 &target_core_dev_wwn_vpd_unit_serial.attr, 1007 &target_core_dev_wwn_vpd_unit_serial.attr,
1009 &target_core_dev_wwn_vpd_protocol_identifier.attr, 1008 &target_core_dev_wwn_vpd_protocol_identifier.attr,
1010 &target_core_dev_wwn_vpd_assoc_logical_unit.attr, 1009 &target_core_dev_wwn_vpd_assoc_logical_unit.attr,
1011 &target_core_dev_wwn_vpd_assoc_target_port.attr, 1010 &target_core_dev_wwn_vpd_assoc_target_port.attr,
1012 &target_core_dev_wwn_vpd_assoc_scsi_target_device.attr, 1011 &target_core_dev_wwn_vpd_assoc_scsi_target_device.attr,
1013 NULL, 1012 NULL,
1014 }; 1013 };
1015 1014
1016 static struct configfs_item_operations target_core_dev_wwn_ops = { 1015 static struct configfs_item_operations target_core_dev_wwn_ops = {
1017 .show_attribute = target_core_dev_wwn_attr_show, 1016 .show_attribute = target_core_dev_wwn_attr_show,
1018 .store_attribute = target_core_dev_wwn_attr_store, 1017 .store_attribute = target_core_dev_wwn_attr_store,
1019 }; 1018 };
1020 1019
1021 static struct config_item_type target_core_dev_wwn_cit = { 1020 static struct config_item_type target_core_dev_wwn_cit = {
1022 .ct_item_ops = &target_core_dev_wwn_ops, 1021 .ct_item_ops = &target_core_dev_wwn_ops,
1023 .ct_attrs = target_core_dev_wwn_attrs, 1022 .ct_attrs = target_core_dev_wwn_attrs,
1024 .ct_owner = THIS_MODULE, 1023 .ct_owner = THIS_MODULE,
1025 }; 1024 };
1026 1025
1027 /* End functions for struct config_item_type target_core_dev_wwn_cit */ 1026 /* End functions for struct config_item_type target_core_dev_wwn_cit */
1028 1027
1029 /* Start functions for struct config_item_type target_core_dev_pr_cit */ 1028 /* Start functions for struct config_item_type target_core_dev_pr_cit */
1030 1029
1031 CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_subsystem_dev); 1030 CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_subsystem_dev);
1032 #define SE_DEV_PR_ATTR(_name, _mode) \ 1031 #define SE_DEV_PR_ATTR(_name, _mode) \
1033 static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \ 1032 static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
1034 __CONFIGFS_EATTR(_name, _mode, \ 1033 __CONFIGFS_EATTR(_name, _mode, \
1035 target_core_dev_pr_show_attr_##_name, \ 1034 target_core_dev_pr_show_attr_##_name, \
1036 target_core_dev_pr_store_attr_##_name); 1035 target_core_dev_pr_store_attr_##_name);
1037 1036
1038 #define SE_DEV_PR_ATTR_RO(_name); \ 1037 #define SE_DEV_PR_ATTR_RO(_name); \
1039 static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \ 1038 static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
1040 __CONFIGFS_EATTR_RO(_name, \ 1039 __CONFIGFS_EATTR_RO(_name, \
1041 target_core_dev_pr_show_attr_##_name); 1040 target_core_dev_pr_show_attr_##_name);
1042 1041
1043 /* 1042 /*
1044 * res_holder 1043 * res_holder
1045 */ 1044 */
1046 static ssize_t target_core_dev_pr_show_spc3_res( 1045 static ssize_t target_core_dev_pr_show_spc3_res(
1047 struct se_device *dev, 1046 struct se_device *dev,
1048 char *page, 1047 char *page,
1049 ssize_t *len) 1048 ssize_t *len)
1050 { 1049 {
1051 struct se_node_acl *se_nacl; 1050 struct se_node_acl *se_nacl;
1052 struct t10_pr_registration *pr_reg; 1051 struct t10_pr_registration *pr_reg;
1053 char i_buf[PR_REG_ISID_ID_LEN]; 1052 char i_buf[PR_REG_ISID_ID_LEN];
1054 int prf_isid; 1053 int prf_isid;
1055 1054
1056 memset(i_buf, 0, PR_REG_ISID_ID_LEN); 1055 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1057 1056
1058 spin_lock(&dev->dev_reservation_lock); 1057 spin_lock(&dev->dev_reservation_lock);
1059 pr_reg = dev->dev_pr_res_holder; 1058 pr_reg = dev->dev_pr_res_holder;
1060 if (!pr_reg) { 1059 if (!pr_reg) {
1061 *len += sprintf(page + *len, "No SPC-3 Reservation holder\n"); 1060 *len += sprintf(page + *len, "No SPC-3 Reservation holder\n");
1062 spin_unlock(&dev->dev_reservation_lock); 1061 spin_unlock(&dev->dev_reservation_lock);
1063 return *len; 1062 return *len;
1064 } 1063 }
1065 se_nacl = pr_reg->pr_reg_nacl; 1064 se_nacl = pr_reg->pr_reg_nacl;
1066 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], 1065 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
1067 PR_REG_ISID_ID_LEN); 1066 PR_REG_ISID_ID_LEN);
1068 1067
1069 *len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n", 1068 *len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n",
1070 se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 1069 se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
1071 se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); 1070 se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
1072 spin_unlock(&dev->dev_reservation_lock); 1071 spin_unlock(&dev->dev_reservation_lock);
1073 1072
1074 return *len; 1073 return *len;
1075 } 1074 }
1076 1075
1077 static ssize_t target_core_dev_pr_show_spc2_res( 1076 static ssize_t target_core_dev_pr_show_spc2_res(
1078 struct se_device *dev, 1077 struct se_device *dev,
1079 char *page, 1078 char *page,
1080 ssize_t *len) 1079 ssize_t *len)
1081 { 1080 {
1082 struct se_node_acl *se_nacl; 1081 struct se_node_acl *se_nacl;
1083 1082
1084 spin_lock(&dev->dev_reservation_lock); 1083 spin_lock(&dev->dev_reservation_lock);
1085 se_nacl = dev->dev_reserved_node_acl; 1084 se_nacl = dev->dev_reserved_node_acl;
1086 if (!se_nacl) { 1085 if (!se_nacl) {
1087 *len += sprintf(page + *len, "No SPC-2 Reservation holder\n"); 1086 *len += sprintf(page + *len, "No SPC-2 Reservation holder\n");
1088 spin_unlock(&dev->dev_reservation_lock); 1087 spin_unlock(&dev->dev_reservation_lock);
1089 return *len; 1088 return *len;
1090 } 1089 }
1091 *len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n", 1090 *len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n",
1092 se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 1091 se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
1093 se_nacl->initiatorname); 1092 se_nacl->initiatorname);
1094 spin_unlock(&dev->dev_reservation_lock); 1093 spin_unlock(&dev->dev_reservation_lock);
1095 1094
1096 return *len; 1095 return *len;
1097 } 1096 }
1098 1097
1099 static ssize_t target_core_dev_pr_show_attr_res_holder( 1098 static ssize_t target_core_dev_pr_show_attr_res_holder(
1100 struct se_subsystem_dev *su_dev, 1099 struct se_subsystem_dev *su_dev,
1101 char *page) 1100 char *page)
1102 { 1101 {
1103 ssize_t len = 0; 1102 ssize_t len = 0;
1104 1103
1105 if (!su_dev->se_dev_ptr) 1104 if (!su_dev->se_dev_ptr)
1106 return -ENODEV; 1105 return -ENODEV;
1107 1106
1108 switch (su_dev->t10_pr.res_type) { 1107 switch (su_dev->t10_pr.res_type) {
1109 case SPC3_PERSISTENT_RESERVATIONS: 1108 case SPC3_PERSISTENT_RESERVATIONS:
1110 target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr, 1109 target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr,
1111 page, &len); 1110 page, &len);
1112 break; 1111 break;
1113 case SPC2_RESERVATIONS: 1112 case SPC2_RESERVATIONS:
1114 target_core_dev_pr_show_spc2_res(su_dev->se_dev_ptr, 1113 target_core_dev_pr_show_spc2_res(su_dev->se_dev_ptr,
1115 page, &len); 1114 page, &len);
1116 break; 1115 break;
1117 case SPC_PASSTHROUGH: 1116 case SPC_PASSTHROUGH:
1118 len += sprintf(page+len, "Passthrough\n"); 1117 len += sprintf(page+len, "Passthrough\n");
1119 break; 1118 break;
1120 default: 1119 default:
1121 len += sprintf(page+len, "Unknown\n"); 1120 len += sprintf(page+len, "Unknown\n");
1122 break; 1121 break;
1123 } 1122 }
1124 1123
1125 return len; 1124 return len;
1126 } 1125 }
1127 1126
1128 SE_DEV_PR_ATTR_RO(res_holder); 1127 SE_DEV_PR_ATTR_RO(res_holder);
1129 1128
1130 /* 1129 /*
1131 * res_pr_all_tgt_pts 1130 * res_pr_all_tgt_pts
1132 */ 1131 */
1133 static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts( 1132 static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
1134 struct se_subsystem_dev *su_dev, 1133 struct se_subsystem_dev *su_dev,
1135 char *page) 1134 char *page)
1136 { 1135 {
1137 struct se_device *dev; 1136 struct se_device *dev;
1138 struct t10_pr_registration *pr_reg; 1137 struct t10_pr_registration *pr_reg;
1139 ssize_t len = 0; 1138 ssize_t len = 0;
1140 1139
1141 dev = su_dev->se_dev_ptr; 1140 dev = su_dev->se_dev_ptr;
1142 if (!dev) 1141 if (!dev)
1143 return -ENODEV; 1142 return -ENODEV;
1144 1143
1145 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1144 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1146 return len; 1145 return len;
1147 1146
1148 spin_lock(&dev->dev_reservation_lock); 1147 spin_lock(&dev->dev_reservation_lock);
1149 pr_reg = dev->dev_pr_res_holder; 1148 pr_reg = dev->dev_pr_res_holder;
1150 if (!pr_reg) { 1149 if (!pr_reg) {
1151 len = sprintf(page, "No SPC-3 Reservation holder\n"); 1150 len = sprintf(page, "No SPC-3 Reservation holder\n");
1152 spin_unlock(&dev->dev_reservation_lock); 1151 spin_unlock(&dev->dev_reservation_lock);
1153 return len; 1152 return len;
1154 } 1153 }
1155 /* 1154 /*
1156 * See All Target Ports (ALL_TG_PT) bit in spcr17, section 6.14.3 1155 * See All Target Ports (ALL_TG_PT) bit in spcr17, section 6.14.3
1157 * Basic PERSISTENT RESERVER OUT parameter list, page 290 1156 * Basic PERSISTENT RESERVER OUT parameter list, page 290
1158 */ 1157 */
1159 if (pr_reg->pr_reg_all_tg_pt) 1158 if (pr_reg->pr_reg_all_tg_pt)
1160 len = sprintf(page, "SPC-3 Reservation: All Target" 1159 len = sprintf(page, "SPC-3 Reservation: All Target"
1161 " Ports registration\n"); 1160 " Ports registration\n");
1162 else 1161 else
1163 len = sprintf(page, "SPC-3 Reservation: Single" 1162 len = sprintf(page, "SPC-3 Reservation: Single"
1164 " Target Port registration\n"); 1163 " Target Port registration\n");
1165 spin_unlock(&dev->dev_reservation_lock); 1164 spin_unlock(&dev->dev_reservation_lock);
1166 1165
1167 return len; 1166 return len;
1168 } 1167 }
1169 1168
1170 SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts); 1169 SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts);
1171 1170
1172 /* 1171 /*
1173 * res_pr_generation 1172 * res_pr_generation
1174 */ 1173 */
1175 static ssize_t target_core_dev_pr_show_attr_res_pr_generation( 1174 static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
1176 struct se_subsystem_dev *su_dev, 1175 struct se_subsystem_dev *su_dev,
1177 char *page) 1176 char *page)
1178 { 1177 {
1179 if (!su_dev->se_dev_ptr) 1178 if (!su_dev->se_dev_ptr)
1180 return -ENODEV; 1179 return -ENODEV;
1181 1180
1182 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1181 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1183 return 0; 1182 return 0;
1184 1183
1185 return sprintf(page, "0x%08x\n", su_dev->t10_pr.pr_generation); 1184 return sprintf(page, "0x%08x\n", su_dev->t10_pr.pr_generation);
1186 } 1185 }
1187 1186
1188 SE_DEV_PR_ATTR_RO(res_pr_generation); 1187 SE_DEV_PR_ATTR_RO(res_pr_generation);
1189 1188
1190 /* 1189 /*
1191 * res_pr_holder_tg_port 1190 * res_pr_holder_tg_port
1192 */ 1191 */
1193 static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port( 1192 static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
1194 struct se_subsystem_dev *su_dev, 1193 struct se_subsystem_dev *su_dev,
1195 char *page) 1194 char *page)
1196 { 1195 {
1197 struct se_device *dev; 1196 struct se_device *dev;
1198 struct se_node_acl *se_nacl; 1197 struct se_node_acl *se_nacl;
1199 struct se_lun *lun; 1198 struct se_lun *lun;
1200 struct se_portal_group *se_tpg; 1199 struct se_portal_group *se_tpg;
1201 struct t10_pr_registration *pr_reg; 1200 struct t10_pr_registration *pr_reg;
1202 struct target_core_fabric_ops *tfo; 1201 struct target_core_fabric_ops *tfo;
1203 ssize_t len = 0; 1202 ssize_t len = 0;
1204 1203
1205 dev = su_dev->se_dev_ptr; 1204 dev = su_dev->se_dev_ptr;
1206 if (!dev) 1205 if (!dev)
1207 return -ENODEV; 1206 return -ENODEV;
1208 1207
1209 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1208 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1210 return len; 1209 return len;
1211 1210
1212 spin_lock(&dev->dev_reservation_lock); 1211 spin_lock(&dev->dev_reservation_lock);
1213 pr_reg = dev->dev_pr_res_holder; 1212 pr_reg = dev->dev_pr_res_holder;
1214 if (!pr_reg) { 1213 if (!pr_reg) {
1215 len = sprintf(page, "No SPC-3 Reservation holder\n"); 1214 len = sprintf(page, "No SPC-3 Reservation holder\n");
1216 spin_unlock(&dev->dev_reservation_lock); 1215 spin_unlock(&dev->dev_reservation_lock);
1217 return len; 1216 return len;
1218 } 1217 }
1219 se_nacl = pr_reg->pr_reg_nacl; 1218 se_nacl = pr_reg->pr_reg_nacl;
1220 se_tpg = se_nacl->se_tpg; 1219 se_tpg = se_nacl->se_tpg;
1221 lun = pr_reg->pr_reg_tg_pt_lun; 1220 lun = pr_reg->pr_reg_tg_pt_lun;
1222 tfo = se_tpg->se_tpg_tfo; 1221 tfo = se_tpg->se_tpg_tfo;
1223 1222
1224 len += sprintf(page+len, "SPC-3 Reservation: %s" 1223 len += sprintf(page+len, "SPC-3 Reservation: %s"
1225 " Target Node Endpoint: %s\n", tfo->get_fabric_name(), 1224 " Target Node Endpoint: %s\n", tfo->get_fabric_name(),
1226 tfo->tpg_get_wwn(se_tpg)); 1225 tfo->tpg_get_wwn(se_tpg));
1227 len += sprintf(page+len, "SPC-3 Reservation: Relative Port" 1226 len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
1228 " Identifer Tag: %hu %s Portal Group Tag: %hu" 1227 " Identifer Tag: %hu %s Portal Group Tag: %hu"
1229 " %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi, 1228 " %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi,
1230 tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg), 1229 tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
1231 tfo->get_fabric_name(), lun->unpacked_lun); 1230 tfo->get_fabric_name(), lun->unpacked_lun);
1232 spin_unlock(&dev->dev_reservation_lock); 1231 spin_unlock(&dev->dev_reservation_lock);
1233 1232
1234 return len; 1233 return len;
1235 } 1234 }
1236 1235
1237 SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port); 1236 SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port);
1238 1237
1239 /* 1238 /*
1240 * res_pr_registered_i_pts 1239 * res_pr_registered_i_pts
1241 */ 1240 */
1242 static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( 1241 static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
1243 struct se_subsystem_dev *su_dev, 1242 struct se_subsystem_dev *su_dev,
1244 char *page) 1243 char *page)
1245 { 1244 {
1246 struct target_core_fabric_ops *tfo; 1245 struct target_core_fabric_ops *tfo;
1247 struct t10_pr_registration *pr_reg; 1246 struct t10_pr_registration *pr_reg;
1248 unsigned char buf[384]; 1247 unsigned char buf[384];
1249 char i_buf[PR_REG_ISID_ID_LEN]; 1248 char i_buf[PR_REG_ISID_ID_LEN];
1250 ssize_t len = 0; 1249 ssize_t len = 0;
1251 int reg_count = 0, prf_isid; 1250 int reg_count = 0, prf_isid;
1252 1251
1253 if (!su_dev->se_dev_ptr) 1252 if (!su_dev->se_dev_ptr)
1254 return -ENODEV; 1253 return -ENODEV;
1255 1254
1256 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1255 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1257 return len; 1256 return len;
1258 1257
1259 len += sprintf(page+len, "SPC-3 PR Registrations:\n"); 1258 len += sprintf(page+len, "SPC-3 PR Registrations:\n");
1260 1259
1261 spin_lock(&su_dev->t10_pr.registration_lock); 1260 spin_lock(&su_dev->t10_pr.registration_lock);
1262 list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list, 1261 list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
1263 pr_reg_list) { 1262 pr_reg_list) {
1264 1263
1265 memset(buf, 0, 384); 1264 memset(buf, 0, 384);
1266 memset(i_buf, 0, PR_REG_ISID_ID_LEN); 1265 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1267 tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; 1266 tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
1268 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], 1267 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
1269 PR_REG_ISID_ID_LEN); 1268 PR_REG_ISID_ID_LEN);
1270 sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n", 1269 sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
1271 tfo->get_fabric_name(), 1270 tfo->get_fabric_name(),
1272 pr_reg->pr_reg_nacl->initiatorname, (prf_isid) ? 1271 pr_reg->pr_reg_nacl->initiatorname, (prf_isid) ?
1273 &i_buf[0] : "", pr_reg->pr_res_key, 1272 &i_buf[0] : "", pr_reg->pr_res_key,
1274 pr_reg->pr_res_generation); 1273 pr_reg->pr_res_generation);
1275 1274
1276 if (len + strlen(buf) >= PAGE_SIZE) 1275 if (len + strlen(buf) >= PAGE_SIZE)
1277 break; 1276 break;
1278 1277
1279 len += sprintf(page+len, "%s", buf); 1278 len += sprintf(page+len, "%s", buf);
1280 reg_count++; 1279 reg_count++;
1281 } 1280 }
1282 spin_unlock(&su_dev->t10_pr.registration_lock); 1281 spin_unlock(&su_dev->t10_pr.registration_lock);
1283 1282
1284 if (!reg_count) 1283 if (!reg_count)
1285 len += sprintf(page+len, "None\n"); 1284 len += sprintf(page+len, "None\n");
1286 1285
1287 return len; 1286 return len;
1288 } 1287 }
1289 1288
1290 SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts); 1289 SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts);
1291 1290
1292 /* 1291 /*
1293 * res_pr_type 1292 * res_pr_type
1294 */ 1293 */
1295 static ssize_t target_core_dev_pr_show_attr_res_pr_type( 1294 static ssize_t target_core_dev_pr_show_attr_res_pr_type(
1296 struct se_subsystem_dev *su_dev, 1295 struct se_subsystem_dev *su_dev,
1297 char *page) 1296 char *page)
1298 { 1297 {
1299 struct se_device *dev; 1298 struct se_device *dev;
1300 struct t10_pr_registration *pr_reg; 1299 struct t10_pr_registration *pr_reg;
1301 ssize_t len = 0; 1300 ssize_t len = 0;
1302 1301
1303 dev = su_dev->se_dev_ptr; 1302 dev = su_dev->se_dev_ptr;
1304 if (!dev) 1303 if (!dev)
1305 return -ENODEV; 1304 return -ENODEV;
1306 1305
1307 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1306 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1308 return len; 1307 return len;
1309 1308
1310 spin_lock(&dev->dev_reservation_lock); 1309 spin_lock(&dev->dev_reservation_lock);
1311 pr_reg = dev->dev_pr_res_holder; 1310 pr_reg = dev->dev_pr_res_holder;
1312 if (!pr_reg) { 1311 if (!pr_reg) {
1313 len = sprintf(page, "No SPC-3 Reservation holder\n"); 1312 len = sprintf(page, "No SPC-3 Reservation holder\n");
1314 spin_unlock(&dev->dev_reservation_lock); 1313 spin_unlock(&dev->dev_reservation_lock);
1315 return len; 1314 return len;
1316 } 1315 }
1317 len = sprintf(page, "SPC-3 Reservation Type: %s\n", 1316 len = sprintf(page, "SPC-3 Reservation Type: %s\n",
1318 core_scsi3_pr_dump_type(pr_reg->pr_res_type)); 1317 core_scsi3_pr_dump_type(pr_reg->pr_res_type));
1319 spin_unlock(&dev->dev_reservation_lock); 1318 spin_unlock(&dev->dev_reservation_lock);
1320 1319
1321 return len; 1320 return len;
1322 } 1321 }
1323 1322
1324 SE_DEV_PR_ATTR_RO(res_pr_type); 1323 SE_DEV_PR_ATTR_RO(res_pr_type);
1325 1324
1326 /* 1325 /*
1327 * res_type 1326 * res_type
1328 */ 1327 */
1329 static ssize_t target_core_dev_pr_show_attr_res_type( 1328 static ssize_t target_core_dev_pr_show_attr_res_type(
1330 struct se_subsystem_dev *su_dev, 1329 struct se_subsystem_dev *su_dev,
1331 char *page) 1330 char *page)
1332 { 1331 {
1333 ssize_t len = 0; 1332 ssize_t len = 0;
1334 1333
1335 if (!su_dev->se_dev_ptr) 1334 if (!su_dev->se_dev_ptr)
1336 return -ENODEV; 1335 return -ENODEV;
1337 1336
1338 switch (su_dev->t10_pr.res_type) { 1337 switch (su_dev->t10_pr.res_type) {
1339 case SPC3_PERSISTENT_RESERVATIONS: 1338 case SPC3_PERSISTENT_RESERVATIONS:
1340 len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n"); 1339 len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
1341 break; 1340 break;
1342 case SPC2_RESERVATIONS: 1341 case SPC2_RESERVATIONS:
1343 len = sprintf(page, "SPC2_RESERVATIONS\n"); 1342 len = sprintf(page, "SPC2_RESERVATIONS\n");
1344 break; 1343 break;
1345 case SPC_PASSTHROUGH: 1344 case SPC_PASSTHROUGH:
1346 len = sprintf(page, "SPC_PASSTHROUGH\n"); 1345 len = sprintf(page, "SPC_PASSTHROUGH\n");
1347 break; 1346 break;
1348 default: 1347 default:
1349 len = sprintf(page, "UNKNOWN\n"); 1348 len = sprintf(page, "UNKNOWN\n");
1350 break; 1349 break;
1351 } 1350 }
1352 1351
1353 return len; 1352 return len;
1354 } 1353 }
1355 1354
1356 SE_DEV_PR_ATTR_RO(res_type); 1355 SE_DEV_PR_ATTR_RO(res_type);
1357 1356
1358 /* 1357 /*
1359 * res_aptpl_active 1358 * res_aptpl_active
1360 */ 1359 */
1361 1360
1362 static ssize_t target_core_dev_pr_show_attr_res_aptpl_active( 1361 static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
1363 struct se_subsystem_dev *su_dev, 1362 struct se_subsystem_dev *su_dev,
1364 char *page) 1363 char *page)
1365 { 1364 {
1366 if (!su_dev->se_dev_ptr) 1365 if (!su_dev->se_dev_ptr)
1367 return -ENODEV; 1366 return -ENODEV;
1368 1367
1369 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1368 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1370 return 0; 1369 return 0;
1371 1370
1372 return sprintf(page, "APTPL Bit Status: %s\n", 1371 return sprintf(page, "APTPL Bit Status: %s\n",
1373 (su_dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled"); 1372 (su_dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
1374 } 1373 }
1375 1374
1376 SE_DEV_PR_ATTR_RO(res_aptpl_active); 1375 SE_DEV_PR_ATTR_RO(res_aptpl_active);
1377 1376
1378 /* 1377 /*
1379 * res_aptpl_metadata 1378 * res_aptpl_metadata
1380 */ 1379 */
1381 static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata( 1380 static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
1382 struct se_subsystem_dev *su_dev, 1381 struct se_subsystem_dev *su_dev,
1383 char *page) 1382 char *page)
1384 { 1383 {
1385 if (!su_dev->se_dev_ptr) 1384 if (!su_dev->se_dev_ptr)
1386 return -ENODEV; 1385 return -ENODEV;
1387 1386
1388 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1387 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1389 return 0; 1388 return 0;
1390 1389
1391 return sprintf(page, "Ready to process PR APTPL metadata..\n"); 1390 return sprintf(page, "Ready to process PR APTPL metadata..\n");
1392 } 1391 }
1393 1392
1394 enum { 1393 enum {
1395 Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid, 1394 Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid,
1396 Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope, 1395 Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope,
1397 Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric, 1396 Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric,
1398 Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err 1397 Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err
1399 }; 1398 };
1400 1399
1401 static match_table_t tokens = { 1400 static match_table_t tokens = {
1402 {Opt_initiator_fabric, "initiator_fabric=%s"}, 1401 {Opt_initiator_fabric, "initiator_fabric=%s"},
1403 {Opt_initiator_node, "initiator_node=%s"}, 1402 {Opt_initiator_node, "initiator_node=%s"},
1404 {Opt_initiator_sid, "initiator_sid=%s"}, 1403 {Opt_initiator_sid, "initiator_sid=%s"},
1405 {Opt_sa_res_key, "sa_res_key=%s"}, 1404 {Opt_sa_res_key, "sa_res_key=%s"},
1406 {Opt_res_holder, "res_holder=%d"}, 1405 {Opt_res_holder, "res_holder=%d"},
1407 {Opt_res_type, "res_type=%d"}, 1406 {Opt_res_type, "res_type=%d"},
1408 {Opt_res_scope, "res_scope=%d"}, 1407 {Opt_res_scope, "res_scope=%d"},
1409 {Opt_res_all_tg_pt, "res_all_tg_pt=%d"}, 1408 {Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
1410 {Opt_mapped_lun, "mapped_lun=%d"}, 1409 {Opt_mapped_lun, "mapped_lun=%d"},
1411 {Opt_target_fabric, "target_fabric=%s"}, 1410 {Opt_target_fabric, "target_fabric=%s"},
1412 {Opt_target_node, "target_node=%s"}, 1411 {Opt_target_node, "target_node=%s"},
1413 {Opt_tpgt, "tpgt=%d"}, 1412 {Opt_tpgt, "tpgt=%d"},
1414 {Opt_port_rtpi, "port_rtpi=%d"}, 1413 {Opt_port_rtpi, "port_rtpi=%d"},
1415 {Opt_target_lun, "target_lun=%d"}, 1414 {Opt_target_lun, "target_lun=%d"},
1416 {Opt_err, NULL} 1415 {Opt_err, NULL}
1417 }; 1416 };
1418 1417
1419 static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( 1418 static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1420 struct se_subsystem_dev *su_dev, 1419 struct se_subsystem_dev *su_dev,
1421 const char *page, 1420 const char *page,
1422 size_t count) 1421 size_t count)
1423 { 1422 {
1424 struct se_device *dev; 1423 struct se_device *dev;
1425 unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL; 1424 unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
1426 unsigned char *t_fabric = NULL, *t_port = NULL; 1425 unsigned char *t_fabric = NULL, *t_port = NULL;
1427 char *orig, *ptr, *arg_p, *opts; 1426 char *orig, *ptr, *arg_p, *opts;
1428 substring_t args[MAX_OPT_ARGS]; 1427 substring_t args[MAX_OPT_ARGS];
1429 unsigned long long tmp_ll; 1428 unsigned long long tmp_ll;
1430 u64 sa_res_key = 0; 1429 u64 sa_res_key = 0;
1431 u32 mapped_lun = 0, target_lun = 0; 1430 u32 mapped_lun = 0, target_lun = 0;
1432 int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token; 1431 int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
1433 u16 port_rpti = 0, tpgt = 0; 1432 u16 port_rpti = 0, tpgt = 0;
1434 u8 type = 0, scope; 1433 u8 type = 0, scope;
1435 1434
1436 dev = su_dev->se_dev_ptr; 1435 dev = su_dev->se_dev_ptr;
1437 if (!dev) 1436 if (!dev)
1438 return -ENODEV; 1437 return -ENODEV;
1439 1438
1440 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1439 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
1441 return 0; 1440 return 0;
1442 1441
1443 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1442 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1444 pr_debug("Unable to process APTPL metadata while" 1443 pr_debug("Unable to process APTPL metadata while"
1445 " active fabric exports exist\n"); 1444 " active fabric exports exist\n");
1446 return -EINVAL; 1445 return -EINVAL;
1447 } 1446 }
1448 1447
1449 opts = kstrdup(page, GFP_KERNEL); 1448 opts = kstrdup(page, GFP_KERNEL);
1450 if (!opts) 1449 if (!opts)
1451 return -ENOMEM; 1450 return -ENOMEM;
1452 1451
1453 orig = opts; 1452 orig = opts;
1454 while ((ptr = strsep(&opts, ",")) != NULL) { 1453 while ((ptr = strsep(&opts, ",")) != NULL) {
1455 if (!*ptr) 1454 if (!*ptr)
1456 continue; 1455 continue;
1457 1456
1458 token = match_token(ptr, tokens, args); 1457 token = match_token(ptr, tokens, args);
1459 switch (token) { 1458 switch (token) {
1460 case Opt_initiator_fabric: 1459 case Opt_initiator_fabric:
1461 i_fabric = match_strdup(&args[0]); 1460 i_fabric = match_strdup(&args[0]);
1462 if (!i_fabric) { 1461 if (!i_fabric) {
1463 ret = -ENOMEM; 1462 ret = -ENOMEM;
1464 goto out; 1463 goto out;
1465 } 1464 }
1466 break; 1465 break;
1467 case Opt_initiator_node: 1466 case Opt_initiator_node:
1468 i_port = match_strdup(&args[0]); 1467 i_port = match_strdup(&args[0]);
1469 if (!i_port) { 1468 if (!i_port) {
1470 ret = -ENOMEM; 1469 ret = -ENOMEM;
1471 goto out; 1470 goto out;
1472 } 1471 }
1473 if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) { 1472 if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
1474 pr_err("APTPL metadata initiator_node=" 1473 pr_err("APTPL metadata initiator_node="
1475 " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", 1474 " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
1476 PR_APTPL_MAX_IPORT_LEN); 1475 PR_APTPL_MAX_IPORT_LEN);
1477 ret = -EINVAL; 1476 ret = -EINVAL;
1478 break; 1477 break;
1479 } 1478 }
1480 break; 1479 break;
1481 case Opt_initiator_sid: 1480 case Opt_initiator_sid:
1482 isid = match_strdup(&args[0]); 1481 isid = match_strdup(&args[0]);
1483 if (!isid) { 1482 if (!isid) {
1484 ret = -ENOMEM; 1483 ret = -ENOMEM;
1485 goto out; 1484 goto out;
1486 } 1485 }
1487 if (strlen(isid) >= PR_REG_ISID_LEN) { 1486 if (strlen(isid) >= PR_REG_ISID_LEN) {
1488 pr_err("APTPL metadata initiator_isid" 1487 pr_err("APTPL metadata initiator_isid"
1489 "= exceeds PR_REG_ISID_LEN: %d\n", 1488 "= exceeds PR_REG_ISID_LEN: %d\n",
1490 PR_REG_ISID_LEN); 1489 PR_REG_ISID_LEN);
1491 ret = -EINVAL; 1490 ret = -EINVAL;
1492 break; 1491 break;
1493 } 1492 }
1494 break; 1493 break;
1495 case Opt_sa_res_key: 1494 case Opt_sa_res_key:
1496 arg_p = match_strdup(&args[0]); 1495 arg_p = match_strdup(&args[0]);
1497 if (!arg_p) { 1496 if (!arg_p) {
1498 ret = -ENOMEM; 1497 ret = -ENOMEM;
1499 goto out; 1498 goto out;
1500 } 1499 }
1501 ret = strict_strtoull(arg_p, 0, &tmp_ll); 1500 ret = strict_strtoull(arg_p, 0, &tmp_ll);
1502 if (ret < 0) { 1501 if (ret < 0) {
1503 pr_err("strict_strtoull() failed for" 1502 pr_err("strict_strtoull() failed for"
1504 " sa_res_key=\n"); 1503 " sa_res_key=\n");
1505 goto out; 1504 goto out;
1506 } 1505 }
1507 sa_res_key = (u64)tmp_ll; 1506 sa_res_key = (u64)tmp_ll;
1508 break; 1507 break;
1509 /* 1508 /*
1510 * PR APTPL Metadata for Reservation 1509 * PR APTPL Metadata for Reservation
1511 */ 1510 */
1512 case Opt_res_holder: 1511 case Opt_res_holder:
1513 match_int(args, &arg); 1512 match_int(args, &arg);
1514 res_holder = arg; 1513 res_holder = arg;
1515 break; 1514 break;
1516 case Opt_res_type: 1515 case Opt_res_type:
1517 match_int(args, &arg); 1516 match_int(args, &arg);
1518 type = (u8)arg; 1517 type = (u8)arg;
1519 break; 1518 break;
1520 case Opt_res_scope: 1519 case Opt_res_scope:
1521 match_int(args, &arg); 1520 match_int(args, &arg);
1522 scope = (u8)arg; 1521 scope = (u8)arg;
1523 break; 1522 break;
1524 case Opt_res_all_tg_pt: 1523 case Opt_res_all_tg_pt:
1525 match_int(args, &arg); 1524 match_int(args, &arg);
1526 all_tg_pt = (int)arg; 1525 all_tg_pt = (int)arg;
1527 break; 1526 break;
1528 case Opt_mapped_lun: 1527 case Opt_mapped_lun:
1529 match_int(args, &arg); 1528 match_int(args, &arg);
1530 mapped_lun = (u32)arg; 1529 mapped_lun = (u32)arg;
1531 break; 1530 break;
1532 /* 1531 /*
1533 * PR APTPL Metadata for Target Port 1532 * PR APTPL Metadata for Target Port
1534 */ 1533 */
1535 case Opt_target_fabric: 1534 case Opt_target_fabric:
1536 t_fabric = match_strdup(&args[0]); 1535 t_fabric = match_strdup(&args[0]);
1537 if (!t_fabric) { 1536 if (!t_fabric) {
1538 ret = -ENOMEM; 1537 ret = -ENOMEM;
1539 goto out; 1538 goto out;
1540 } 1539 }
1541 break; 1540 break;
1542 case Opt_target_node: 1541 case Opt_target_node:
1543 t_port = match_strdup(&args[0]); 1542 t_port = match_strdup(&args[0]);
1544 if (!t_port) { 1543 if (!t_port) {
1545 ret = -ENOMEM; 1544 ret = -ENOMEM;
1546 goto out; 1545 goto out;
1547 } 1546 }
1548 if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) { 1547 if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
1549 pr_err("APTPL metadata target_node=" 1548 pr_err("APTPL metadata target_node="
1550 " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", 1549 " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
1551 PR_APTPL_MAX_TPORT_LEN); 1550 PR_APTPL_MAX_TPORT_LEN);
1552 ret = -EINVAL; 1551 ret = -EINVAL;
1553 break; 1552 break;
1554 } 1553 }
1555 break; 1554 break;
1556 case Opt_tpgt: 1555 case Opt_tpgt:
1557 match_int(args, &arg); 1556 match_int(args, &arg);
1558 tpgt = (u16)arg; 1557 tpgt = (u16)arg;
1559 break; 1558 break;
1560 case Opt_port_rtpi: 1559 case Opt_port_rtpi:
1561 match_int(args, &arg); 1560 match_int(args, &arg);
1562 port_rpti = (u16)arg; 1561 port_rpti = (u16)arg;
1563 break; 1562 break;
1564 case Opt_target_lun: 1563 case Opt_target_lun:
1565 match_int(args, &arg); 1564 match_int(args, &arg);
1566 target_lun = (u32)arg; 1565 target_lun = (u32)arg;
1567 break; 1566 break;
1568 default: 1567 default:
1569 break; 1568 break;
1570 } 1569 }
1571 } 1570 }
1572 1571
1573 if (!i_port || !t_port || !sa_res_key) { 1572 if (!i_port || !t_port || !sa_res_key) {
1574 pr_err("Illegal parameters for APTPL registration\n"); 1573 pr_err("Illegal parameters for APTPL registration\n");
1575 ret = -EINVAL; 1574 ret = -EINVAL;
1576 goto out; 1575 goto out;
1577 } 1576 }
1578 1577
1579 if (res_holder && !(type)) { 1578 if (res_holder && !(type)) {
1580 pr_err("Illegal PR type: 0x%02x for reservation" 1579 pr_err("Illegal PR type: 0x%02x for reservation"
1581 " holder\n", type); 1580 " holder\n", type);
1582 ret = -EINVAL; 1581 ret = -EINVAL;
1583 goto out; 1582 goto out;
1584 } 1583 }
1585 1584
1586 ret = core_scsi3_alloc_aptpl_registration(&su_dev->t10_pr, sa_res_key, 1585 ret = core_scsi3_alloc_aptpl_registration(&su_dev->t10_pr, sa_res_key,
1587 i_port, isid, mapped_lun, t_port, tpgt, target_lun, 1586 i_port, isid, mapped_lun, t_port, tpgt, target_lun,
1588 res_holder, all_tg_pt, type); 1587 res_holder, all_tg_pt, type);
1589 out: 1588 out:
1590 kfree(i_fabric); 1589 kfree(i_fabric);
1591 kfree(i_port); 1590 kfree(i_port);
1592 kfree(isid); 1591 kfree(isid);
1593 kfree(t_fabric); 1592 kfree(t_fabric);
1594 kfree(t_port); 1593 kfree(t_port);
1595 kfree(orig); 1594 kfree(orig);
1596 return (ret == 0) ? count : ret; 1595 return (ret == 0) ? count : ret;
1597 } 1596 }
1598 1597
1599 SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR); 1598 SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR);
1600 1599
1601 CONFIGFS_EATTR_OPS(target_core_dev_pr, se_subsystem_dev, se_dev_pr_group); 1600 CONFIGFS_EATTR_OPS(target_core_dev_pr, se_subsystem_dev, se_dev_pr_group);
1602 1601
1603 static struct configfs_attribute *target_core_dev_pr_attrs[] = { 1602 static struct configfs_attribute *target_core_dev_pr_attrs[] = {
1604 &target_core_dev_pr_res_holder.attr, 1603 &target_core_dev_pr_res_holder.attr,
1605 &target_core_dev_pr_res_pr_all_tgt_pts.attr, 1604 &target_core_dev_pr_res_pr_all_tgt_pts.attr,
1606 &target_core_dev_pr_res_pr_generation.attr, 1605 &target_core_dev_pr_res_pr_generation.attr,
1607 &target_core_dev_pr_res_pr_holder_tg_port.attr, 1606 &target_core_dev_pr_res_pr_holder_tg_port.attr,
1608 &target_core_dev_pr_res_pr_registered_i_pts.attr, 1607 &target_core_dev_pr_res_pr_registered_i_pts.attr,
1609 &target_core_dev_pr_res_pr_type.attr, 1608 &target_core_dev_pr_res_pr_type.attr,
1610 &target_core_dev_pr_res_type.attr, 1609 &target_core_dev_pr_res_type.attr,
1611 &target_core_dev_pr_res_aptpl_active.attr, 1610 &target_core_dev_pr_res_aptpl_active.attr,
1612 &target_core_dev_pr_res_aptpl_metadata.attr, 1611 &target_core_dev_pr_res_aptpl_metadata.attr,
1613 NULL, 1612 NULL,
1614 }; 1613 };
1615 1614
1616 static struct configfs_item_operations target_core_dev_pr_ops = { 1615 static struct configfs_item_operations target_core_dev_pr_ops = {
1617 .show_attribute = target_core_dev_pr_attr_show, 1616 .show_attribute = target_core_dev_pr_attr_show,
1618 .store_attribute = target_core_dev_pr_attr_store, 1617 .store_attribute = target_core_dev_pr_attr_store,
1619 }; 1618 };
1620 1619
1621 static struct config_item_type target_core_dev_pr_cit = { 1620 static struct config_item_type target_core_dev_pr_cit = {
1622 .ct_item_ops = &target_core_dev_pr_ops, 1621 .ct_item_ops = &target_core_dev_pr_ops,
1623 .ct_attrs = target_core_dev_pr_attrs, 1622 .ct_attrs = target_core_dev_pr_attrs,
1624 .ct_owner = THIS_MODULE, 1623 .ct_owner = THIS_MODULE,
1625 }; 1624 };
1626 1625
1627 /* End functions for struct config_item_type target_core_dev_pr_cit */ 1626 /* End functions for struct config_item_type target_core_dev_pr_cit */
1628 1627
1629 /* Start functions for struct config_item_type target_core_dev_cit */ 1628 /* Start functions for struct config_item_type target_core_dev_cit */
1630 1629
1631 static ssize_t target_core_show_dev_info(void *p, char *page) 1630 static ssize_t target_core_show_dev_info(void *p, char *page)
1632 { 1631 {
1633 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; 1632 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1634 struct se_hba *hba = se_dev->se_dev_hba; 1633 struct se_hba *hba = se_dev->se_dev_hba;
1635 struct se_subsystem_api *t = hba->transport; 1634 struct se_subsystem_api *t = hba->transport;
1636 int bl = 0; 1635 int bl = 0;
1637 ssize_t read_bytes = 0; 1636 ssize_t read_bytes = 0;
1638 1637
1639 if (!se_dev->se_dev_ptr) 1638 if (!se_dev->se_dev_ptr)
1640 return -ENODEV; 1639 return -ENODEV;
1641 1640
1642 transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl); 1641 transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl);
1643 read_bytes += bl; 1642 read_bytes += bl;
1644 read_bytes += t->show_configfs_dev_params(hba, se_dev, page+read_bytes); 1643 read_bytes += t->show_configfs_dev_params(hba, se_dev, page+read_bytes);
1645 return read_bytes; 1644 return read_bytes;
1646 } 1645 }
1647 1646
1648 static struct target_core_configfs_attribute target_core_attr_dev_info = { 1647 static struct target_core_configfs_attribute target_core_attr_dev_info = {
1649 .attr = { .ca_owner = THIS_MODULE, 1648 .attr = { .ca_owner = THIS_MODULE,
1650 .ca_name = "info", 1649 .ca_name = "info",
1651 .ca_mode = S_IRUGO }, 1650 .ca_mode = S_IRUGO },
1652 .show = target_core_show_dev_info, 1651 .show = target_core_show_dev_info,
1653 .store = NULL, 1652 .store = NULL,
1654 }; 1653 };
1655 1654
1656 static ssize_t target_core_store_dev_control( 1655 static ssize_t target_core_store_dev_control(
1657 void *p, 1656 void *p,
1658 const char *page, 1657 const char *page,
1659 size_t count) 1658 size_t count)
1660 { 1659 {
1661 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; 1660 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1662 struct se_hba *hba = se_dev->se_dev_hba; 1661 struct se_hba *hba = se_dev->se_dev_hba;
1663 struct se_subsystem_api *t = hba->transport; 1662 struct se_subsystem_api *t = hba->transport;
1664 1663
1665 if (!se_dev->se_dev_su_ptr) { 1664 if (!se_dev->se_dev_su_ptr) {
1666 pr_err("Unable to locate struct se_subsystem_dev>se" 1665 pr_err("Unable to locate struct se_subsystem_dev>se"
1667 "_dev_su_ptr\n"); 1666 "_dev_su_ptr\n");
1668 return -EINVAL; 1667 return -EINVAL;
1669 } 1668 }
1670 1669
1671 return t->set_configfs_dev_params(hba, se_dev, page, count); 1670 return t->set_configfs_dev_params(hba, se_dev, page, count);
1672 } 1671 }
1673 1672
1674 static struct target_core_configfs_attribute target_core_attr_dev_control = { 1673 static struct target_core_configfs_attribute target_core_attr_dev_control = {
1675 .attr = { .ca_owner = THIS_MODULE, 1674 .attr = { .ca_owner = THIS_MODULE,
1676 .ca_name = "control", 1675 .ca_name = "control",
1677 .ca_mode = S_IWUSR }, 1676 .ca_mode = S_IWUSR },
1678 .show = NULL, 1677 .show = NULL,
1679 .store = target_core_store_dev_control, 1678 .store = target_core_store_dev_control,
1680 }; 1679 };
1681 1680
1682 static ssize_t target_core_show_dev_alias(void *p, char *page) 1681 static ssize_t target_core_show_dev_alias(void *p, char *page)
1683 { 1682 {
1684 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; 1683 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1685 1684
1686 if (!(se_dev->su_dev_flags & SDF_USING_ALIAS)) 1685 if (!(se_dev->su_dev_flags & SDF_USING_ALIAS))
1687 return 0; 1686 return 0;
1688 1687
1689 return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_alias); 1688 return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_alias);
1690 } 1689 }
1691 1690
1692 static ssize_t target_core_store_dev_alias( 1691 static ssize_t target_core_store_dev_alias(
1693 void *p, 1692 void *p,
1694 const char *page, 1693 const char *page,
1695 size_t count) 1694 size_t count)
1696 { 1695 {
1697 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; 1696 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1698 struct se_hba *hba = se_dev->se_dev_hba; 1697 struct se_hba *hba = se_dev->se_dev_hba;
1699 ssize_t read_bytes; 1698 ssize_t read_bytes;
1700 1699
1701 if (count > (SE_DEV_ALIAS_LEN-1)) { 1700 if (count > (SE_DEV_ALIAS_LEN-1)) {
1702 pr_err("alias count: %d exceeds" 1701 pr_err("alias count: %d exceeds"
1703 " SE_DEV_ALIAS_LEN-1: %u\n", (int)count, 1702 " SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
1704 SE_DEV_ALIAS_LEN-1); 1703 SE_DEV_ALIAS_LEN-1);
1705 return -EINVAL; 1704 return -EINVAL;
1706 } 1705 }
1707 1706
1708 se_dev->su_dev_flags |= SDF_USING_ALIAS; 1707 se_dev->su_dev_flags |= SDF_USING_ALIAS;
1709 read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN, 1708 read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
1710 "%s", page); 1709 "%s", page);
1711 1710
1712 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n", 1711 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
1713 config_item_name(&hba->hba_group.cg_item), 1712 config_item_name(&hba->hba_group.cg_item),
1714 config_item_name(&se_dev->se_dev_group.cg_item), 1713 config_item_name(&se_dev->se_dev_group.cg_item),
1715 se_dev->se_dev_alias); 1714 se_dev->se_dev_alias);
1716 1715
1717 return read_bytes; 1716 return read_bytes;
1718 } 1717 }
1719 1718
1720 static struct target_core_configfs_attribute target_core_attr_dev_alias = { 1719 static struct target_core_configfs_attribute target_core_attr_dev_alias = {
1721 .attr = { .ca_owner = THIS_MODULE, 1720 .attr = { .ca_owner = THIS_MODULE,
1722 .ca_name = "alias", 1721 .ca_name = "alias",
1723 .ca_mode = S_IRUGO | S_IWUSR }, 1722 .ca_mode = S_IRUGO | S_IWUSR },
1724 .show = target_core_show_dev_alias, 1723 .show = target_core_show_dev_alias,
1725 .store = target_core_store_dev_alias, 1724 .store = target_core_store_dev_alias,
1726 }; 1725 };
1727 1726
1728 static ssize_t target_core_show_dev_udev_path(void *p, char *page) 1727 static ssize_t target_core_show_dev_udev_path(void *p, char *page)
1729 { 1728 {
1730 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; 1729 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1731 1730
1732 if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) 1731 if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH))
1733 return 0; 1732 return 0;
1734 1733
1735 return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_udev_path); 1734 return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_udev_path);
1736 } 1735 }
1737 1736
1738 static ssize_t target_core_store_dev_udev_path( 1737 static ssize_t target_core_store_dev_udev_path(
1739 void *p, 1738 void *p,
1740 const char *page, 1739 const char *page,
1741 size_t count) 1740 size_t count)
1742 { 1741 {
1743 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; 1742 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1744 struct se_hba *hba = se_dev->se_dev_hba; 1743 struct se_hba *hba = se_dev->se_dev_hba;
1745 ssize_t read_bytes; 1744 ssize_t read_bytes;
1746 1745
1747 if (count > (SE_UDEV_PATH_LEN-1)) { 1746 if (count > (SE_UDEV_PATH_LEN-1)) {
1748 pr_err("udev_path count: %d exceeds" 1747 pr_err("udev_path count: %d exceeds"
1749 " SE_UDEV_PATH_LEN-1: %u\n", (int)count, 1748 " SE_UDEV_PATH_LEN-1: %u\n", (int)count,
1750 SE_UDEV_PATH_LEN-1); 1749 SE_UDEV_PATH_LEN-1);
1751 return -EINVAL; 1750 return -EINVAL;
1752 } 1751 }
1753 1752
1754 se_dev->su_dev_flags |= SDF_USING_UDEV_PATH; 1753 se_dev->su_dev_flags |= SDF_USING_UDEV_PATH;
1755 read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN, 1754 read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
1756 "%s", page); 1755 "%s", page);
1757 1756
1758 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n", 1757 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
1759 config_item_name(&hba->hba_group.cg_item), 1758 config_item_name(&hba->hba_group.cg_item),
1760 config_item_name(&se_dev->se_dev_group.cg_item), 1759 config_item_name(&se_dev->se_dev_group.cg_item),
1761 se_dev->se_dev_udev_path); 1760 se_dev->se_dev_udev_path);
1762 1761
1763 return read_bytes; 1762 return read_bytes;
1764 } 1763 }
1765 1764
1766 static struct target_core_configfs_attribute target_core_attr_dev_udev_path = { 1765 static struct target_core_configfs_attribute target_core_attr_dev_udev_path = {
1767 .attr = { .ca_owner = THIS_MODULE, 1766 .attr = { .ca_owner = THIS_MODULE,
1768 .ca_name = "udev_path", 1767 .ca_name = "udev_path",
1769 .ca_mode = S_IRUGO | S_IWUSR }, 1768 .ca_mode = S_IRUGO | S_IWUSR },
1770 .show = target_core_show_dev_udev_path, 1769 .show = target_core_show_dev_udev_path,
1771 .store = target_core_store_dev_udev_path, 1770 .store = target_core_store_dev_udev_path,
1772 }; 1771 };
1773 1772
1774 static ssize_t target_core_store_dev_enable( 1773 static ssize_t target_core_store_dev_enable(
1775 void *p, 1774 void *p,
1776 const char *page, 1775 const char *page,
1777 size_t count) 1776 size_t count)
1778 { 1777 {
1779 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; 1778 struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
1780 struct se_device *dev; 1779 struct se_device *dev;
1781 struct se_hba *hba = se_dev->se_dev_hba; 1780 struct se_hba *hba = se_dev->se_dev_hba;
1782 struct se_subsystem_api *t = hba->transport; 1781 struct se_subsystem_api *t = hba->transport;
1783 char *ptr; 1782 char *ptr;
1784 1783
1785 ptr = strstr(page, "1"); 1784 ptr = strstr(page, "1");
1786 if (!ptr) { 1785 if (!ptr) {
1787 pr_err("For dev_enable ops, only valid value" 1786 pr_err("For dev_enable ops, only valid value"
1788 " is \"1\"\n"); 1787 " is \"1\"\n");
1789 return -EINVAL; 1788 return -EINVAL;
1790 } 1789 }
1791 if (se_dev->se_dev_ptr) { 1790 if (se_dev->se_dev_ptr) {
1792 pr_err("se_dev->se_dev_ptr already set for storage" 1791 pr_err("se_dev->se_dev_ptr already set for storage"
1793 " object\n"); 1792 " object\n");
1794 return -EEXIST; 1793 return -EEXIST;
1795 } 1794 }
1796 1795
1797 if (t->check_configfs_dev_params(hba, se_dev) < 0) 1796 if (t->check_configfs_dev_params(hba, se_dev) < 0)
1798 return -EINVAL; 1797 return -EINVAL;
1799 1798
1800 dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); 1799 dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
1801 if (IS_ERR(dev)) 1800 if (IS_ERR(dev))
1802 return PTR_ERR(dev); 1801 return PTR_ERR(dev);
1803 else if (!dev) 1802 else if (!dev)
1804 return -EINVAL; 1803 return -EINVAL;
1805 1804
1806 se_dev->se_dev_ptr = dev; 1805 se_dev->se_dev_ptr = dev;
1807 pr_debug("Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:" 1806 pr_debug("Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
1808 " %p\n", se_dev->se_dev_ptr); 1807 " %p\n", se_dev->se_dev_ptr);
1809 1808
1810 return count; 1809 return count;
1811 } 1810 }
1812 1811
1813 static struct target_core_configfs_attribute target_core_attr_dev_enable = { 1812 static struct target_core_configfs_attribute target_core_attr_dev_enable = {
1814 .attr = { .ca_owner = THIS_MODULE, 1813 .attr = { .ca_owner = THIS_MODULE,
1815 .ca_name = "enable", 1814 .ca_name = "enable",
1816 .ca_mode = S_IWUSR }, 1815 .ca_mode = S_IWUSR },
1817 .show = NULL, 1816 .show = NULL,
1818 .store = target_core_store_dev_enable, 1817 .store = target_core_store_dev_enable,
1819 }; 1818 };
1820 1819
1821 static ssize_t target_core_show_alua_lu_gp(void *p, char *page) 1820 static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
1822 { 1821 {
1823 struct se_device *dev; 1822 struct se_device *dev;
1824 struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p; 1823 struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
1825 struct config_item *lu_ci; 1824 struct config_item *lu_ci;
1826 struct t10_alua_lu_gp *lu_gp; 1825 struct t10_alua_lu_gp *lu_gp;
1827 struct t10_alua_lu_gp_member *lu_gp_mem; 1826 struct t10_alua_lu_gp_member *lu_gp_mem;
1828 ssize_t len = 0; 1827 ssize_t len = 0;
1829 1828
1830 dev = su_dev->se_dev_ptr; 1829 dev = su_dev->se_dev_ptr;
1831 if (!dev) 1830 if (!dev)
1832 return -ENODEV; 1831 return -ENODEV;
1833 1832
1834 if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) 1833 if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED)
1835 return len; 1834 return len;
1836 1835
1837 lu_gp_mem = dev->dev_alua_lu_gp_mem; 1836 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1838 if (!lu_gp_mem) { 1837 if (!lu_gp_mem) {
1839 pr_err("NULL struct se_device->dev_alua_lu_gp_mem" 1838 pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
1840 " pointer\n"); 1839 " pointer\n");
1841 return -EINVAL; 1840 return -EINVAL;
1842 } 1841 }
1843 1842
1844 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1843 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1845 lu_gp = lu_gp_mem->lu_gp; 1844 lu_gp = lu_gp_mem->lu_gp;
1846 if (lu_gp) { 1845 if (lu_gp) {
1847 lu_ci = &lu_gp->lu_gp_group.cg_item; 1846 lu_ci = &lu_gp->lu_gp_group.cg_item;
1848 len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n", 1847 len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
1849 config_item_name(lu_ci), lu_gp->lu_gp_id); 1848 config_item_name(lu_ci), lu_gp->lu_gp_id);
1850 } 1849 }
1851 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 1850 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1852 1851
1853 return len; 1852 return len;
1854 } 1853 }
1855 1854
1856 static ssize_t target_core_store_alua_lu_gp( 1855 static ssize_t target_core_store_alua_lu_gp(
1857 void *p, 1856 void *p,
1858 const char *page, 1857 const char *page,
1859 size_t count) 1858 size_t count)
1860 { 1859 {
1861 struct se_device *dev; 1860 struct se_device *dev;
1862 struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p; 1861 struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
1863 struct se_hba *hba = su_dev->se_dev_hba; 1862 struct se_hba *hba = su_dev->se_dev_hba;
1864 struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL; 1863 struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
1865 struct t10_alua_lu_gp_member *lu_gp_mem; 1864 struct t10_alua_lu_gp_member *lu_gp_mem;
1866 unsigned char buf[LU_GROUP_NAME_BUF]; 1865 unsigned char buf[LU_GROUP_NAME_BUF];
1867 int move = 0; 1866 int move = 0;
1868 1867
1869 dev = su_dev->se_dev_ptr; 1868 dev = su_dev->se_dev_ptr;
1870 if (!dev) 1869 if (!dev)
1871 return -ENODEV; 1870 return -ENODEV;
1872 1871
1873 if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) { 1872 if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
1874 pr_warn("SPC3_ALUA_EMULATED not enabled for %s/%s\n", 1873 pr_warn("SPC3_ALUA_EMULATED not enabled for %s/%s\n",
1875 config_item_name(&hba->hba_group.cg_item), 1874 config_item_name(&hba->hba_group.cg_item),
1876 config_item_name(&su_dev->se_dev_group.cg_item)); 1875 config_item_name(&su_dev->se_dev_group.cg_item));
1877 return -EINVAL; 1876 return -EINVAL;
1878 } 1877 }
1879 if (count > LU_GROUP_NAME_BUF) { 1878 if (count > LU_GROUP_NAME_BUF) {
1880 pr_err("ALUA LU Group Alias too large!\n"); 1879 pr_err("ALUA LU Group Alias too large!\n");
1881 return -EINVAL; 1880 return -EINVAL;
1882 } 1881 }
1883 memset(buf, 0, LU_GROUP_NAME_BUF); 1882 memset(buf, 0, LU_GROUP_NAME_BUF);
1884 memcpy(buf, page, count); 1883 memcpy(buf, page, count);
1885 /* 1884 /*
1886 * Any ALUA logical unit alias besides "NULL" means we will be 1885 * Any ALUA logical unit alias besides "NULL" means we will be
1887 * making a new group association. 1886 * making a new group association.
1888 */ 1887 */
1889 if (strcmp(strstrip(buf), "NULL")) { 1888 if (strcmp(strstrip(buf), "NULL")) {
1890 /* 1889 /*
1891 * core_alua_get_lu_gp_by_name() will increment reference to 1890 * core_alua_get_lu_gp_by_name() will increment reference to
1892 * struct t10_alua_lu_gp. This reference is released with 1891 * struct t10_alua_lu_gp. This reference is released with
1893 * core_alua_get_lu_gp_by_name below(). 1892 * core_alua_get_lu_gp_by_name below().
1894 */ 1893 */
1895 lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf)); 1894 lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
1896 if (!lu_gp_new) 1895 if (!lu_gp_new)
1897 return -ENODEV; 1896 return -ENODEV;
1898 } 1897 }
1899 lu_gp_mem = dev->dev_alua_lu_gp_mem; 1898 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1900 if (!lu_gp_mem) { 1899 if (!lu_gp_mem) {
1901 if (lu_gp_new) 1900 if (lu_gp_new)
1902 core_alua_put_lu_gp_from_name(lu_gp_new); 1901 core_alua_put_lu_gp_from_name(lu_gp_new);
1903 pr_err("NULL struct se_device->dev_alua_lu_gp_mem" 1902 pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
1904 " pointer\n"); 1903 " pointer\n");
1905 return -EINVAL; 1904 return -EINVAL;
1906 } 1905 }
1907 1906
1908 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1907 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1909 lu_gp = lu_gp_mem->lu_gp; 1908 lu_gp = lu_gp_mem->lu_gp;
1910 if (lu_gp) { 1909 if (lu_gp) {
1911 /* 1910 /*
1912 * Clearing an existing lu_gp association, and replacing 1911 * Clearing an existing lu_gp association, and replacing
1913 * with NULL 1912 * with NULL
1914 */ 1913 */
1915 if (!lu_gp_new) { 1914 if (!lu_gp_new) {
1916 pr_debug("Target_Core_ConfigFS: Releasing %s/%s" 1915 pr_debug("Target_Core_ConfigFS: Releasing %s/%s"
1917 " from ALUA LU Group: core/alua/lu_gps/%s, ID:" 1916 " from ALUA LU Group: core/alua/lu_gps/%s, ID:"
1918 " %hu\n", 1917 " %hu\n",
1919 config_item_name(&hba->hba_group.cg_item), 1918 config_item_name(&hba->hba_group.cg_item),
1920 config_item_name(&su_dev->se_dev_group.cg_item), 1919 config_item_name(&su_dev->se_dev_group.cg_item),
1921 config_item_name(&lu_gp->lu_gp_group.cg_item), 1920 config_item_name(&lu_gp->lu_gp_group.cg_item),
1922 lu_gp->lu_gp_id); 1921 lu_gp->lu_gp_id);
1923 1922
1924 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp); 1923 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
1925 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 1924 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1926 1925
1927 return count; 1926 return count;
1928 } 1927 }
1929 /* 1928 /*
1930 * Removing existing association of lu_gp_mem with lu_gp 1929 * Removing existing association of lu_gp_mem with lu_gp
1931 */ 1930 */
1932 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp); 1931 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
1933 move = 1; 1932 move = 1;
1934 } 1933 }
1935 /* 1934 /*
1936 * Associate lu_gp_mem with lu_gp_new. 1935 * Associate lu_gp_mem with lu_gp_new.
1937 */ 1936 */
1938 __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new); 1937 __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
1939 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 1938 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1940 1939
1941 pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:" 1940 pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
1942 " core/alua/lu_gps/%s, ID: %hu\n", 1941 " core/alua/lu_gps/%s, ID: %hu\n",
1943 (move) ? "Moving" : "Adding", 1942 (move) ? "Moving" : "Adding",
1944 config_item_name(&hba->hba_group.cg_item), 1943 config_item_name(&hba->hba_group.cg_item),
1945 config_item_name(&su_dev->se_dev_group.cg_item), 1944 config_item_name(&su_dev->se_dev_group.cg_item),
1946 config_item_name(&lu_gp_new->lu_gp_group.cg_item), 1945 config_item_name(&lu_gp_new->lu_gp_group.cg_item),
1947 lu_gp_new->lu_gp_id); 1946 lu_gp_new->lu_gp_id);
1948 1947
1949 core_alua_put_lu_gp_from_name(lu_gp_new); 1948 core_alua_put_lu_gp_from_name(lu_gp_new);
1950 return count; 1949 return count;
1951 } 1950 }
1952 1951
1953 static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = { 1952 static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {
1954 .attr = { .ca_owner = THIS_MODULE, 1953 .attr = { .ca_owner = THIS_MODULE,
1955 .ca_name = "alua_lu_gp", 1954 .ca_name = "alua_lu_gp",
1956 .ca_mode = S_IRUGO | S_IWUSR }, 1955 .ca_mode = S_IRUGO | S_IWUSR },
1957 .show = target_core_show_alua_lu_gp, 1956 .show = target_core_show_alua_lu_gp,
1958 .store = target_core_store_alua_lu_gp, 1957 .store = target_core_store_alua_lu_gp,
1959 }; 1958 };
1960 1959
1961 static struct configfs_attribute *lio_core_dev_attrs[] = { 1960 static struct configfs_attribute *lio_core_dev_attrs[] = {
1962 &target_core_attr_dev_info.attr, 1961 &target_core_attr_dev_info.attr,
1963 &target_core_attr_dev_control.attr, 1962 &target_core_attr_dev_control.attr,
1964 &target_core_attr_dev_alias.attr, 1963 &target_core_attr_dev_alias.attr,
1965 &target_core_attr_dev_udev_path.attr, 1964 &target_core_attr_dev_udev_path.attr,
1966 &target_core_attr_dev_enable.attr, 1965 &target_core_attr_dev_enable.attr,
1967 &target_core_attr_dev_alua_lu_gp.attr, 1966 &target_core_attr_dev_alua_lu_gp.attr,
1968 NULL, 1967 NULL,
1969 }; 1968 };
1970 1969
1971 static void target_core_dev_release(struct config_item *item) 1970 static void target_core_dev_release(struct config_item *item)
1972 { 1971 {
1973 struct se_subsystem_dev *se_dev = container_of(to_config_group(item), 1972 struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
1974 struct se_subsystem_dev, se_dev_group); 1973 struct se_subsystem_dev, se_dev_group);
1975 struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); 1974 struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
1976 struct se_subsystem_api *t = hba->transport; 1975 struct se_subsystem_api *t = hba->transport;
1977 struct config_group *dev_cg = &se_dev->se_dev_group; 1976 struct config_group *dev_cg = &se_dev->se_dev_group;
1978 1977
1979 kfree(dev_cg->default_groups); 1978 kfree(dev_cg->default_groups);
1980 /* 1979 /*
1981 * This pointer will set when the storage is enabled with: 1980 * This pointer will set when the storage is enabled with:
1982 *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` 1981 *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
1983 */ 1982 */
1984 if (se_dev->se_dev_ptr) { 1983 if (se_dev->se_dev_ptr) {
1985 pr_debug("Target_Core_ConfigFS: Calling se_free_" 1984 pr_debug("Target_Core_ConfigFS: Calling se_free_"
1986 "virtual_device() for se_dev_ptr: %p\n", 1985 "virtual_device() for se_dev_ptr: %p\n",
1987 se_dev->se_dev_ptr); 1986 se_dev->se_dev_ptr);
1988 1987
1989 se_free_virtual_device(se_dev->se_dev_ptr, hba); 1988 se_free_virtual_device(se_dev->se_dev_ptr, hba);
1990 } else { 1989 } else {
1991 /* 1990 /*
1992 * Release struct se_subsystem_dev->se_dev_su_ptr.. 1991 * Release struct se_subsystem_dev->se_dev_su_ptr..
1993 */ 1992 */
1994 pr_debug("Target_Core_ConfigFS: Calling t->free_" 1993 pr_debug("Target_Core_ConfigFS: Calling t->free_"
1995 "device() for se_dev_su_ptr: %p\n", 1994 "device() for se_dev_su_ptr: %p\n",
1996 se_dev->se_dev_su_ptr); 1995 se_dev->se_dev_su_ptr);
1997 1996
1998 t->free_device(se_dev->se_dev_su_ptr); 1997 t->free_device(se_dev->se_dev_su_ptr);
1999 } 1998 }
2000 1999
2001 pr_debug("Target_Core_ConfigFS: Deallocating se_subsystem" 2000 pr_debug("Target_Core_ConfigFS: Deallocating se_subsystem"
2002 "_dev_t: %p\n", se_dev); 2001 "_dev_t: %p\n", se_dev);
2003 kfree(se_dev); 2002 kfree(se_dev);
2004 } 2003 }
2005 2004
2006 static ssize_t target_core_dev_show(struct config_item *item, 2005 static ssize_t target_core_dev_show(struct config_item *item,
2007 struct configfs_attribute *attr, 2006 struct configfs_attribute *attr,
2008 char *page) 2007 char *page)
2009 { 2008 {
2010 struct se_subsystem_dev *se_dev = container_of( 2009 struct se_subsystem_dev *se_dev = container_of(
2011 to_config_group(item), struct se_subsystem_dev, 2010 to_config_group(item), struct se_subsystem_dev,
2012 se_dev_group); 2011 se_dev_group);
2013 struct target_core_configfs_attribute *tc_attr = container_of( 2012 struct target_core_configfs_attribute *tc_attr = container_of(
2014 attr, struct target_core_configfs_attribute, attr); 2013 attr, struct target_core_configfs_attribute, attr);
2015 2014
2016 if (!tc_attr->show) 2015 if (!tc_attr->show)
2017 return -EINVAL; 2016 return -EINVAL;
2018 2017
2019 return tc_attr->show(se_dev, page); 2018 return tc_attr->show(se_dev, page);
2020 } 2019 }
2021 2020
2022 static ssize_t target_core_dev_store(struct config_item *item, 2021 static ssize_t target_core_dev_store(struct config_item *item,
2023 struct configfs_attribute *attr, 2022 struct configfs_attribute *attr,
2024 const char *page, size_t count) 2023 const char *page, size_t count)
2025 { 2024 {
2026 struct se_subsystem_dev *se_dev = container_of( 2025 struct se_subsystem_dev *se_dev = container_of(
2027 to_config_group(item), struct se_subsystem_dev, 2026 to_config_group(item), struct se_subsystem_dev,
2028 se_dev_group); 2027 se_dev_group);
2029 struct target_core_configfs_attribute *tc_attr = container_of( 2028 struct target_core_configfs_attribute *tc_attr = container_of(
2030 attr, struct target_core_configfs_attribute, attr); 2029 attr, struct target_core_configfs_attribute, attr);
2031 2030
2032 if (!tc_attr->store) 2031 if (!tc_attr->store)
2033 return -EINVAL; 2032 return -EINVAL;
2034 2033
2035 return tc_attr->store(se_dev, page, count); 2034 return tc_attr->store(se_dev, page, count);
2036 } 2035 }
2037 2036
2038 static struct configfs_item_operations target_core_dev_item_ops = { 2037 static struct configfs_item_operations target_core_dev_item_ops = {
2039 .release = target_core_dev_release, 2038 .release = target_core_dev_release,
2040 .show_attribute = target_core_dev_show, 2039 .show_attribute = target_core_dev_show,
2041 .store_attribute = target_core_dev_store, 2040 .store_attribute = target_core_dev_store,
2042 }; 2041 };
2043 2042
2044 static struct config_item_type target_core_dev_cit = { 2043 static struct config_item_type target_core_dev_cit = {
2045 .ct_item_ops = &target_core_dev_item_ops, 2044 .ct_item_ops = &target_core_dev_item_ops,
2046 .ct_attrs = lio_core_dev_attrs, 2045 .ct_attrs = lio_core_dev_attrs,
2047 .ct_owner = THIS_MODULE, 2046 .ct_owner = THIS_MODULE,
2048 }; 2047 };
2049 2048
2050 /* End functions for struct config_item_type target_core_dev_cit */ 2049 /* End functions for struct config_item_type target_core_dev_cit */
2051 2050
2052 /* Start functions for struct config_item_type target_core_alua_lu_gp_cit */ 2051 /* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
2053 2052
2054 CONFIGFS_EATTR_STRUCT(target_core_alua_lu_gp, t10_alua_lu_gp); 2053 CONFIGFS_EATTR_STRUCT(target_core_alua_lu_gp, t10_alua_lu_gp);
2055 #define SE_DEV_ALUA_LU_ATTR(_name, _mode) \ 2054 #define SE_DEV_ALUA_LU_ATTR(_name, _mode) \
2056 static struct target_core_alua_lu_gp_attribute \ 2055 static struct target_core_alua_lu_gp_attribute \
2057 target_core_alua_lu_gp_##_name = \ 2056 target_core_alua_lu_gp_##_name = \
2058 __CONFIGFS_EATTR(_name, _mode, \ 2057 __CONFIGFS_EATTR(_name, _mode, \
2059 target_core_alua_lu_gp_show_attr_##_name, \ 2058 target_core_alua_lu_gp_show_attr_##_name, \
2060 target_core_alua_lu_gp_store_attr_##_name); 2059 target_core_alua_lu_gp_store_attr_##_name);
2061 2060
2062 #define SE_DEV_ALUA_LU_ATTR_RO(_name) \ 2061 #define SE_DEV_ALUA_LU_ATTR_RO(_name) \
2063 static struct target_core_alua_lu_gp_attribute \ 2062 static struct target_core_alua_lu_gp_attribute \
2064 target_core_alua_lu_gp_##_name = \ 2063 target_core_alua_lu_gp_##_name = \
2065 __CONFIGFS_EATTR_RO(_name, \ 2064 __CONFIGFS_EATTR_RO(_name, \
2066 target_core_alua_lu_gp_show_attr_##_name); 2065 target_core_alua_lu_gp_show_attr_##_name);
2067 2066
2068 /* 2067 /*
2069 * lu_gp_id 2068 * lu_gp_id
2070 */ 2069 */
2071 static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id( 2070 static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id(
2072 struct t10_alua_lu_gp *lu_gp, 2071 struct t10_alua_lu_gp *lu_gp,
2073 char *page) 2072 char *page)
2074 { 2073 {
2075 if (!lu_gp->lu_gp_valid_id) 2074 if (!lu_gp->lu_gp_valid_id)
2076 return 0; 2075 return 0;
2077 2076
2078 return sprintf(page, "%hu\n", lu_gp->lu_gp_id); 2077 return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
2079 } 2078 }
2080 2079
2081 static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id( 2080 static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
2082 struct t10_alua_lu_gp *lu_gp, 2081 struct t10_alua_lu_gp *lu_gp,
2083 const char *page, 2082 const char *page,
2084 size_t count) 2083 size_t count)
2085 { 2084 {
2086 struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group; 2085 struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
2087 unsigned long lu_gp_id; 2086 unsigned long lu_gp_id;
2088 int ret; 2087 int ret;
2089 2088
2090 ret = strict_strtoul(page, 0, &lu_gp_id); 2089 ret = strict_strtoul(page, 0, &lu_gp_id);
2091 if (ret < 0) { 2090 if (ret < 0) {
2092 pr_err("strict_strtoul() returned %d for" 2091 pr_err("strict_strtoul() returned %d for"
2093 " lu_gp_id\n", ret); 2092 " lu_gp_id\n", ret);
2094 return -EINVAL; 2093 return -EINVAL;
2095 } 2094 }
2096 if (lu_gp_id > 0x0000ffff) { 2095 if (lu_gp_id > 0x0000ffff) {
2097 pr_err("ALUA lu_gp_id: %lu exceeds maximum:" 2096 pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
2098 " 0x0000ffff\n", lu_gp_id); 2097 " 0x0000ffff\n", lu_gp_id);
2099 return -EINVAL; 2098 return -EINVAL;
2100 } 2099 }
2101 2100
2102 ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id); 2101 ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id);
2103 if (ret < 0) 2102 if (ret < 0)
2104 return -EINVAL; 2103 return -EINVAL;
2105 2104
2106 pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit" 2105 pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit"
2107 " Group: core/alua/lu_gps/%s to ID: %hu\n", 2106 " Group: core/alua/lu_gps/%s to ID: %hu\n",
2108 config_item_name(&alua_lu_gp_cg->cg_item), 2107 config_item_name(&alua_lu_gp_cg->cg_item),
2109 lu_gp->lu_gp_id); 2108 lu_gp->lu_gp_id);
2110 2109
2111 return count; 2110 return count;
2112 } 2111 }
2113 2112
2114 SE_DEV_ALUA_LU_ATTR(lu_gp_id, S_IRUGO | S_IWUSR); 2113 SE_DEV_ALUA_LU_ATTR(lu_gp_id, S_IRUGO | S_IWUSR);
2115 2114
2116 /* 2115 /*
2117 * members 2116 * members
2118 */ 2117 */
2119 static ssize_t target_core_alua_lu_gp_show_attr_members( 2118 static ssize_t target_core_alua_lu_gp_show_attr_members(
2120 struct t10_alua_lu_gp *lu_gp, 2119 struct t10_alua_lu_gp *lu_gp,
2121 char *page) 2120 char *page)
2122 { 2121 {
2123 struct se_device *dev; 2122 struct se_device *dev;
2124 struct se_hba *hba; 2123 struct se_hba *hba;
2125 struct se_subsystem_dev *su_dev; 2124 struct se_subsystem_dev *su_dev;
2126 struct t10_alua_lu_gp_member *lu_gp_mem; 2125 struct t10_alua_lu_gp_member *lu_gp_mem;
2127 ssize_t len = 0, cur_len; 2126 ssize_t len = 0, cur_len;
2128 unsigned char buf[LU_GROUP_NAME_BUF]; 2127 unsigned char buf[LU_GROUP_NAME_BUF];
2129 2128
2130 memset(buf, 0, LU_GROUP_NAME_BUF); 2129 memset(buf, 0, LU_GROUP_NAME_BUF);
2131 2130
2132 spin_lock(&lu_gp->lu_gp_lock); 2131 spin_lock(&lu_gp->lu_gp_lock);
2133 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { 2132 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
2134 dev = lu_gp_mem->lu_gp_mem_dev; 2133 dev = lu_gp_mem->lu_gp_mem_dev;
2135 su_dev = dev->se_sub_dev; 2134 su_dev = dev->se_sub_dev;
2136 hba = su_dev->se_dev_hba; 2135 hba = su_dev->se_dev_hba;
2137 2136
2138 cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n", 2137 cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
2139 config_item_name(&hba->hba_group.cg_item), 2138 config_item_name(&hba->hba_group.cg_item),
2140 config_item_name(&su_dev->se_dev_group.cg_item)); 2139 config_item_name(&su_dev->se_dev_group.cg_item));
2141 cur_len++; /* Extra byte for NULL terminator */ 2140 cur_len++; /* Extra byte for NULL terminator */
2142 2141
2143 if ((cur_len + len) > PAGE_SIZE) { 2142 if ((cur_len + len) > PAGE_SIZE) {
2144 pr_warn("Ran out of lu_gp_show_attr" 2143 pr_warn("Ran out of lu_gp_show_attr"
2145 "_members buffer\n"); 2144 "_members buffer\n");
2146 break; 2145 break;
2147 } 2146 }
2148 memcpy(page+len, buf, cur_len); 2147 memcpy(page+len, buf, cur_len);
2149 len += cur_len; 2148 len += cur_len;
2150 } 2149 }
2151 spin_unlock(&lu_gp->lu_gp_lock); 2150 spin_unlock(&lu_gp->lu_gp_lock);
2152 2151
2153 return len; 2152 return len;
2154 } 2153 }
2155 2154
2156 SE_DEV_ALUA_LU_ATTR_RO(members); 2155 SE_DEV_ALUA_LU_ATTR_RO(members);
2157 2156
2158 CONFIGFS_EATTR_OPS(target_core_alua_lu_gp, t10_alua_lu_gp, lu_gp_group); 2157 CONFIGFS_EATTR_OPS(target_core_alua_lu_gp, t10_alua_lu_gp, lu_gp_group);
2159 2158
2160 static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = { 2159 static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
2161 &target_core_alua_lu_gp_lu_gp_id.attr, 2160 &target_core_alua_lu_gp_lu_gp_id.attr,
2162 &target_core_alua_lu_gp_members.attr, 2161 &target_core_alua_lu_gp_members.attr,
2163 NULL, 2162 NULL,
2164 }; 2163 };
2165 2164
2166 static void target_core_alua_lu_gp_release(struct config_item *item) 2165 static void target_core_alua_lu_gp_release(struct config_item *item)
2167 { 2166 {
2168 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), 2167 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
2169 struct t10_alua_lu_gp, lu_gp_group); 2168 struct t10_alua_lu_gp, lu_gp_group);
2170 2169
2171 core_alua_free_lu_gp(lu_gp); 2170 core_alua_free_lu_gp(lu_gp);
2172 } 2171 }
2173 2172
2174 static struct configfs_item_operations target_core_alua_lu_gp_ops = { 2173 static struct configfs_item_operations target_core_alua_lu_gp_ops = {
2175 .release = target_core_alua_lu_gp_release, 2174 .release = target_core_alua_lu_gp_release,
2176 .show_attribute = target_core_alua_lu_gp_attr_show, 2175 .show_attribute = target_core_alua_lu_gp_attr_show,
2177 .store_attribute = target_core_alua_lu_gp_attr_store, 2176 .store_attribute = target_core_alua_lu_gp_attr_store,
2178 }; 2177 };
2179 2178
2180 static struct config_item_type target_core_alua_lu_gp_cit = { 2179 static struct config_item_type target_core_alua_lu_gp_cit = {
2181 .ct_item_ops = &target_core_alua_lu_gp_ops, 2180 .ct_item_ops = &target_core_alua_lu_gp_ops,
2182 .ct_attrs = target_core_alua_lu_gp_attrs, 2181 .ct_attrs = target_core_alua_lu_gp_attrs,
2183 .ct_owner = THIS_MODULE, 2182 .ct_owner = THIS_MODULE,
2184 }; 2183 };
2185 2184
2186 /* End functions for struct config_item_type target_core_alua_lu_gp_cit */ 2185 /* End functions for struct config_item_type target_core_alua_lu_gp_cit */
2187 2186
2188 /* Start functions for struct config_item_type target_core_alua_lu_gps_cit */ 2187 /* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
2189 2188
2190 static struct config_group *target_core_alua_create_lu_gp( 2189 static struct config_group *target_core_alua_create_lu_gp(
2191 struct config_group *group, 2190 struct config_group *group,
2192 const char *name) 2191 const char *name)
2193 { 2192 {
2194 struct t10_alua_lu_gp *lu_gp; 2193 struct t10_alua_lu_gp *lu_gp;
2195 struct config_group *alua_lu_gp_cg = NULL; 2194 struct config_group *alua_lu_gp_cg = NULL;
2196 struct config_item *alua_lu_gp_ci = NULL; 2195 struct config_item *alua_lu_gp_ci = NULL;
2197 2196
2198 lu_gp = core_alua_allocate_lu_gp(name, 0); 2197 lu_gp = core_alua_allocate_lu_gp(name, 0);
2199 if (IS_ERR(lu_gp)) 2198 if (IS_ERR(lu_gp))
2200 return NULL; 2199 return NULL;
2201 2200
2202 alua_lu_gp_cg = &lu_gp->lu_gp_group; 2201 alua_lu_gp_cg = &lu_gp->lu_gp_group;
2203 alua_lu_gp_ci = &alua_lu_gp_cg->cg_item; 2202 alua_lu_gp_ci = &alua_lu_gp_cg->cg_item;
2204 2203
2205 config_group_init_type_name(alua_lu_gp_cg, name, 2204 config_group_init_type_name(alua_lu_gp_cg, name,
2206 &target_core_alua_lu_gp_cit); 2205 &target_core_alua_lu_gp_cit);
2207 2206
2208 pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit" 2207 pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit"
2209 " Group: core/alua/lu_gps/%s\n", 2208 " Group: core/alua/lu_gps/%s\n",
2210 config_item_name(alua_lu_gp_ci)); 2209 config_item_name(alua_lu_gp_ci));
2211 2210
2212 return alua_lu_gp_cg; 2211 return alua_lu_gp_cg;
2213 2212
2214 } 2213 }
2215 2214
2216 static void target_core_alua_drop_lu_gp( 2215 static void target_core_alua_drop_lu_gp(
2217 struct config_group *group, 2216 struct config_group *group,
2218 struct config_item *item) 2217 struct config_item *item)
2219 { 2218 {
2220 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), 2219 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
2221 struct t10_alua_lu_gp, lu_gp_group); 2220 struct t10_alua_lu_gp, lu_gp_group);
2222 2221
2223 pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit" 2222 pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit"
2224 " Group: core/alua/lu_gps/%s, ID: %hu\n", 2223 " Group: core/alua/lu_gps/%s, ID: %hu\n",
2225 config_item_name(item), lu_gp->lu_gp_id); 2224 config_item_name(item), lu_gp->lu_gp_id);
2226 /* 2225 /*
2227 * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release() 2226 * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
2228 * -> target_core_alua_lu_gp_release() 2227 * -> target_core_alua_lu_gp_release()
2229 */ 2228 */
2230 config_item_put(item); 2229 config_item_put(item);
2231 } 2230 }
2232 2231
2233 static struct configfs_group_operations target_core_alua_lu_gps_group_ops = { 2232 static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
2234 .make_group = &target_core_alua_create_lu_gp, 2233 .make_group = &target_core_alua_create_lu_gp,
2235 .drop_item = &target_core_alua_drop_lu_gp, 2234 .drop_item = &target_core_alua_drop_lu_gp,
2236 }; 2235 };
2237 2236
2238 static struct config_item_type target_core_alua_lu_gps_cit = { 2237 static struct config_item_type target_core_alua_lu_gps_cit = {
2239 .ct_item_ops = NULL, 2238 .ct_item_ops = NULL,
2240 .ct_group_ops = &target_core_alua_lu_gps_group_ops, 2239 .ct_group_ops = &target_core_alua_lu_gps_group_ops,
2241 .ct_owner = THIS_MODULE, 2240 .ct_owner = THIS_MODULE,
2242 }; 2241 };
2243 2242
2244 /* End functions for struct config_item_type target_core_alua_lu_gps_cit */ 2243 /* End functions for struct config_item_type target_core_alua_lu_gps_cit */
2245 2244
2246 /* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */ 2245 /* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2247 2246
2248 CONFIGFS_EATTR_STRUCT(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp); 2247 CONFIGFS_EATTR_STRUCT(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp);
2249 #define SE_DEV_ALUA_TG_PT_ATTR(_name, _mode) \ 2248 #define SE_DEV_ALUA_TG_PT_ATTR(_name, _mode) \
2250 static struct target_core_alua_tg_pt_gp_attribute \ 2249 static struct target_core_alua_tg_pt_gp_attribute \
2251 target_core_alua_tg_pt_gp_##_name = \ 2250 target_core_alua_tg_pt_gp_##_name = \
2252 __CONFIGFS_EATTR(_name, _mode, \ 2251 __CONFIGFS_EATTR(_name, _mode, \
2253 target_core_alua_tg_pt_gp_show_attr_##_name, \ 2252 target_core_alua_tg_pt_gp_show_attr_##_name, \
2254 target_core_alua_tg_pt_gp_store_attr_##_name); 2253 target_core_alua_tg_pt_gp_store_attr_##_name);
2255 2254
2256 #define SE_DEV_ALUA_TG_PT_ATTR_RO(_name) \ 2255 #define SE_DEV_ALUA_TG_PT_ATTR_RO(_name) \
2257 static struct target_core_alua_tg_pt_gp_attribute \ 2256 static struct target_core_alua_tg_pt_gp_attribute \
2258 target_core_alua_tg_pt_gp_##_name = \ 2257 target_core_alua_tg_pt_gp_##_name = \
2259 __CONFIGFS_EATTR_RO(_name, \ 2258 __CONFIGFS_EATTR_RO(_name, \
2260 target_core_alua_tg_pt_gp_show_attr_##_name); 2259 target_core_alua_tg_pt_gp_show_attr_##_name);
2261 2260
2262 /* 2261 /*
2263 * alua_access_state 2262 * alua_access_state
2264 */ 2263 */
2265 static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_state( 2264 static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_state(
2266 struct t10_alua_tg_pt_gp *tg_pt_gp, 2265 struct t10_alua_tg_pt_gp *tg_pt_gp,
2267 char *page) 2266 char *page)
2268 { 2267 {
2269 return sprintf(page, "%d\n", 2268 return sprintf(page, "%d\n",
2270 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state)); 2269 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state));
2271 } 2270 }
2272 2271
2273 static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state( 2272 static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
2274 struct t10_alua_tg_pt_gp *tg_pt_gp, 2273 struct t10_alua_tg_pt_gp *tg_pt_gp,
2275 const char *page, 2274 const char *page,
2276 size_t count) 2275 size_t count)
2277 { 2276 {
2278 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; 2277 struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
2279 unsigned long tmp; 2278 unsigned long tmp;
2280 int new_state, ret; 2279 int new_state, ret;
2281 2280
2282 if (!tg_pt_gp->tg_pt_gp_valid_id) { 2281 if (!tg_pt_gp->tg_pt_gp_valid_id) {
2283 pr_err("Unable to do implict ALUA on non valid" 2282 pr_err("Unable to do implict ALUA on non valid"
2284 " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); 2283 " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
2285 return -EINVAL; 2284 return -EINVAL;
2286 } 2285 }
2287 2286
2288 ret = strict_strtoul(page, 0, &tmp); 2287 ret = strict_strtoul(page, 0, &tmp);
2289 if (ret < 0) { 2288 if (ret < 0) {
2290 pr_err("Unable to extract new ALUA access state from" 2289 pr_err("Unable to extract new ALUA access state from"
2291 " %s\n", page); 2290 " %s\n", page);
2292 return -EINVAL; 2291 return -EINVAL;
2293 } 2292 }
2294 new_state = (int)tmp; 2293 new_state = (int)tmp;
2295 2294
2296 if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) { 2295 if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) {
2297 pr_err("Unable to process implict configfs ALUA" 2296 pr_err("Unable to process implict configfs ALUA"
2298 " transition while TPGS_IMPLICT_ALUA is diabled\n"); 2297 " transition while TPGS_IMPLICT_ALUA is diabled\n");
2299 return -EINVAL; 2298 return -EINVAL;
2300 } 2299 }
2301 2300
2302 ret = core_alua_do_port_transition(tg_pt_gp, su_dev->se_dev_ptr, 2301 ret = core_alua_do_port_transition(tg_pt_gp, su_dev->se_dev_ptr,
2303 NULL, NULL, new_state, 0); 2302 NULL, NULL, new_state, 0);
2304 return (!ret) ? count : -EINVAL; 2303 return (!ret) ? count : -EINVAL;
2305 } 2304 }
2306 2305
2307 SE_DEV_ALUA_TG_PT_ATTR(alua_access_state, S_IRUGO | S_IWUSR); 2306 SE_DEV_ALUA_TG_PT_ATTR(alua_access_state, S_IRUGO | S_IWUSR);
2308 2307
2309 /* 2308 /*
2310 * alua_access_status 2309 * alua_access_status
2311 */ 2310 */
2312 static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_status( 2311 static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_status(
2313 struct t10_alua_tg_pt_gp *tg_pt_gp, 2312 struct t10_alua_tg_pt_gp *tg_pt_gp,
2314 char *page) 2313 char *page)
2315 { 2314 {
2316 return sprintf(page, "%s\n", 2315 return sprintf(page, "%s\n",
2317 core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status)); 2316 core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
2318 } 2317 }
2319 2318
2320 static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status( 2319 static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
2321 struct t10_alua_tg_pt_gp *tg_pt_gp, 2320 struct t10_alua_tg_pt_gp *tg_pt_gp,
2322 const char *page, 2321 const char *page,
2323 size_t count) 2322 size_t count)
2324 { 2323 {
2325 unsigned long tmp; 2324 unsigned long tmp;
2326 int new_status, ret; 2325 int new_status, ret;
2327 2326
2328 if (!tg_pt_gp->tg_pt_gp_valid_id) { 2327 if (!tg_pt_gp->tg_pt_gp_valid_id) {
2329 pr_err("Unable to do set ALUA access status on non" 2328 pr_err("Unable to do set ALUA access status on non"
2330 " valid tg_pt_gp ID: %hu\n", 2329 " valid tg_pt_gp ID: %hu\n",
2331 tg_pt_gp->tg_pt_gp_valid_id); 2330 tg_pt_gp->tg_pt_gp_valid_id);
2332 return -EINVAL; 2331 return -EINVAL;
2333 } 2332 }
2334 2333
2335 ret = strict_strtoul(page, 0, &tmp); 2334 ret = strict_strtoul(page, 0, &tmp);
2336 if (ret < 0) { 2335 if (ret < 0) {
2337 pr_err("Unable to extract new ALUA access status" 2336 pr_err("Unable to extract new ALUA access status"
2338 " from %s\n", page); 2337 " from %s\n", page);
2339 return -EINVAL; 2338 return -EINVAL;
2340 } 2339 }
2341 new_status = (int)tmp; 2340 new_status = (int)tmp;
2342 2341
2343 if ((new_status != ALUA_STATUS_NONE) && 2342 if ((new_status != ALUA_STATUS_NONE) &&
2344 (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && 2343 (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
2345 (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { 2344 (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
2346 pr_err("Illegal ALUA access status: 0x%02x\n", 2345 pr_err("Illegal ALUA access status: 0x%02x\n",
2347 new_status); 2346 new_status);
2348 return -EINVAL; 2347 return -EINVAL;
2349 } 2348 }
2350 2349
2351 tg_pt_gp->tg_pt_gp_alua_access_status = new_status; 2350 tg_pt_gp->tg_pt_gp_alua_access_status = new_status;
2352 return count; 2351 return count;
2353 } 2352 }
2354 2353
2355 SE_DEV_ALUA_TG_PT_ATTR(alua_access_status, S_IRUGO | S_IWUSR); 2354 SE_DEV_ALUA_TG_PT_ATTR(alua_access_status, S_IRUGO | S_IWUSR);
2356 2355
2357 /* 2356 /*
2358 * alua_access_type 2357 * alua_access_type
2359 */ 2358 */
2360 static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_type( 2359 static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_type(
2361 struct t10_alua_tg_pt_gp *tg_pt_gp, 2360 struct t10_alua_tg_pt_gp *tg_pt_gp,
2362 char *page) 2361 char *page)
2363 { 2362 {
2364 return core_alua_show_access_type(tg_pt_gp, page); 2363 return core_alua_show_access_type(tg_pt_gp, page);
2365 } 2364 }
2366 2365
2367 static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type( 2366 static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type(
2368 struct t10_alua_tg_pt_gp *tg_pt_gp, 2367 struct t10_alua_tg_pt_gp *tg_pt_gp,
2369 const char *page, 2368 const char *page,
2370 size_t count) 2369 size_t count)
2371 { 2370 {
2372 return core_alua_store_access_type(tg_pt_gp, page, count); 2371 return core_alua_store_access_type(tg_pt_gp, page, count);
2373 } 2372 }
2374 2373
2375 SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR); 2374 SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR);
2376 2375
2377 /* 2376 /*
2378 * alua_write_metadata 2377 * alua_write_metadata
2379 */ 2378 */
2380 static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata( 2379 static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata(
2381 struct t10_alua_tg_pt_gp *tg_pt_gp, 2380 struct t10_alua_tg_pt_gp *tg_pt_gp,
2382 char *page) 2381 char *page)
2383 { 2382 {
2384 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_write_metadata); 2383 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_write_metadata);
2385 } 2384 }
2386 2385
2387 static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata( 2386 static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(
2388 struct t10_alua_tg_pt_gp *tg_pt_gp, 2387 struct t10_alua_tg_pt_gp *tg_pt_gp,
2389 const char *page, 2388 const char *page,
2390 size_t count) 2389 size_t count)
2391 { 2390 {
2392 unsigned long tmp; 2391 unsigned long tmp;
2393 int ret; 2392 int ret;
2394 2393
2395 ret = strict_strtoul(page, 0, &tmp); 2394 ret = strict_strtoul(page, 0, &tmp);
2396 if (ret < 0) { 2395 if (ret < 0) {
2397 pr_err("Unable to extract alua_write_metadata\n"); 2396 pr_err("Unable to extract alua_write_metadata\n");
2398 return -EINVAL; 2397 return -EINVAL;
2399 } 2398 }
2400 2399
2401 if ((tmp != 0) && (tmp != 1)) { 2400 if ((tmp != 0) && (tmp != 1)) {
2402 pr_err("Illegal value for alua_write_metadata:" 2401 pr_err("Illegal value for alua_write_metadata:"
2403 " %lu\n", tmp); 2402 " %lu\n", tmp);
2404 return -EINVAL; 2403 return -EINVAL;
2405 } 2404 }
2406 tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp; 2405 tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp;
2407 2406
2408 return count; 2407 return count;
2409 } 2408 }
2410 2409
2411 SE_DEV_ALUA_TG_PT_ATTR(alua_write_metadata, S_IRUGO | S_IWUSR); 2410 SE_DEV_ALUA_TG_PT_ATTR(alua_write_metadata, S_IRUGO | S_IWUSR);
2412 2411
2413 2412
2414 2413
2415 /* 2414 /*
2416 * nonop_delay_msecs 2415 * nonop_delay_msecs
2417 */ 2416 */
2418 static ssize_t target_core_alua_tg_pt_gp_show_attr_nonop_delay_msecs( 2417 static ssize_t target_core_alua_tg_pt_gp_show_attr_nonop_delay_msecs(
2419 struct t10_alua_tg_pt_gp *tg_pt_gp, 2418 struct t10_alua_tg_pt_gp *tg_pt_gp,
2420 char *page) 2419 char *page)
2421 { 2420 {
2422 return core_alua_show_nonop_delay_msecs(tg_pt_gp, page); 2421 return core_alua_show_nonop_delay_msecs(tg_pt_gp, page);
2423 2422
2424 } 2423 }
2425 2424
2426 static ssize_t target_core_alua_tg_pt_gp_store_attr_nonop_delay_msecs( 2425 static ssize_t target_core_alua_tg_pt_gp_store_attr_nonop_delay_msecs(
2427 struct t10_alua_tg_pt_gp *tg_pt_gp, 2426 struct t10_alua_tg_pt_gp *tg_pt_gp,
2428 const char *page, 2427 const char *page,
2429 size_t count) 2428 size_t count)
2430 { 2429 {
2431 return core_alua_store_nonop_delay_msecs(tg_pt_gp, page, count); 2430 return core_alua_store_nonop_delay_msecs(tg_pt_gp, page, count);
2432 } 2431 }
2433 2432
2434 SE_DEV_ALUA_TG_PT_ATTR(nonop_delay_msecs, S_IRUGO | S_IWUSR); 2433 SE_DEV_ALUA_TG_PT_ATTR(nonop_delay_msecs, S_IRUGO | S_IWUSR);
2435 2434
2436 /* 2435 /*
2437 * trans_delay_msecs 2436 * trans_delay_msecs
2438 */ 2437 */
2439 static ssize_t target_core_alua_tg_pt_gp_show_attr_trans_delay_msecs( 2438 static ssize_t target_core_alua_tg_pt_gp_show_attr_trans_delay_msecs(
2440 struct t10_alua_tg_pt_gp *tg_pt_gp, 2439 struct t10_alua_tg_pt_gp *tg_pt_gp,
2441 char *page) 2440 char *page)
2442 { 2441 {
2443 return core_alua_show_trans_delay_msecs(tg_pt_gp, page); 2442 return core_alua_show_trans_delay_msecs(tg_pt_gp, page);
2444 } 2443 }
2445 2444
2446 static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs( 2445 static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(
2447 struct t10_alua_tg_pt_gp *tg_pt_gp, 2446 struct t10_alua_tg_pt_gp *tg_pt_gp,
2448 const char *page, 2447 const char *page,
2449 size_t count) 2448 size_t count)
2450 { 2449 {
2451 return core_alua_store_trans_delay_msecs(tg_pt_gp, page, count); 2450 return core_alua_store_trans_delay_msecs(tg_pt_gp, page, count);
2452 } 2451 }
2453 2452
2454 SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR); 2453 SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);
2455 2454
2456 /* 2455 /*
2457 * preferred 2456 * preferred
2458 */ 2457 */
2459 2458
2460 static ssize_t target_core_alua_tg_pt_gp_show_attr_preferred( 2459 static ssize_t target_core_alua_tg_pt_gp_show_attr_preferred(
2461 struct t10_alua_tg_pt_gp *tg_pt_gp, 2460 struct t10_alua_tg_pt_gp *tg_pt_gp,
2462 char *page) 2461 char *page)
2463 { 2462 {
2464 return core_alua_show_preferred_bit(tg_pt_gp, page); 2463 return core_alua_show_preferred_bit(tg_pt_gp, page);
2465 } 2464 }
2466 2465
2467 static ssize_t target_core_alua_tg_pt_gp_store_attr_preferred( 2466 static ssize_t target_core_alua_tg_pt_gp_store_attr_preferred(
2468 struct t10_alua_tg_pt_gp *tg_pt_gp, 2467 struct t10_alua_tg_pt_gp *tg_pt_gp,
2469 const char *page, 2468 const char *page,
2470 size_t count) 2469 size_t count)
2471 { 2470 {
2472 return core_alua_store_preferred_bit(tg_pt_gp, page, count); 2471 return core_alua_store_preferred_bit(tg_pt_gp, page, count);
2473 } 2472 }
2474 2473
2475 SE_DEV_ALUA_TG_PT_ATTR(preferred, S_IRUGO | S_IWUSR); 2474 SE_DEV_ALUA_TG_PT_ATTR(preferred, S_IRUGO | S_IWUSR);
2476 2475
2477 /* 2476 /*
2478 * tg_pt_gp_id 2477 * tg_pt_gp_id
2479 */ 2478 */
2480 static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id( 2479 static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id(
2481 struct t10_alua_tg_pt_gp *tg_pt_gp, 2480 struct t10_alua_tg_pt_gp *tg_pt_gp,
2482 char *page) 2481 char *page)
2483 { 2482 {
2484 if (!tg_pt_gp->tg_pt_gp_valid_id) 2483 if (!tg_pt_gp->tg_pt_gp_valid_id)
2485 return 0; 2484 return 0;
2486 2485
2487 return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id); 2486 return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
2488 } 2487 }
2489 2488
2490 static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id( 2489 static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
2491 struct t10_alua_tg_pt_gp *tg_pt_gp, 2490 struct t10_alua_tg_pt_gp *tg_pt_gp,
2492 const char *page, 2491 const char *page,
2493 size_t count) 2492 size_t count)
2494 { 2493 {
2495 struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group; 2494 struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
2496 unsigned long tg_pt_gp_id; 2495 unsigned long tg_pt_gp_id;
2497 int ret; 2496 int ret;
2498 2497
2499 ret = strict_strtoul(page, 0, &tg_pt_gp_id); 2498 ret = strict_strtoul(page, 0, &tg_pt_gp_id);
2500 if (ret < 0) { 2499 if (ret < 0) {
2501 pr_err("strict_strtoul() returned %d for" 2500 pr_err("strict_strtoul() returned %d for"
2502 " tg_pt_gp_id\n", ret); 2501 " tg_pt_gp_id\n", ret);
2503 return -EINVAL; 2502 return -EINVAL;
2504 } 2503 }
2505 if (tg_pt_gp_id > 0x0000ffff) { 2504 if (tg_pt_gp_id > 0x0000ffff) {
2506 pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:" 2505 pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:"
2507 " 0x0000ffff\n", tg_pt_gp_id); 2506 " 0x0000ffff\n", tg_pt_gp_id);
2508 return -EINVAL; 2507 return -EINVAL;
2509 } 2508 }
2510 2509
2511 ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id); 2510 ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id);
2512 if (ret < 0) 2511 if (ret < 0)
2513 return -EINVAL; 2512 return -EINVAL;
2514 2513
2515 pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: " 2514 pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: "
2516 "core/alua/tg_pt_gps/%s to ID: %hu\n", 2515 "core/alua/tg_pt_gps/%s to ID: %hu\n",
2517 config_item_name(&alua_tg_pt_gp_cg->cg_item), 2516 config_item_name(&alua_tg_pt_gp_cg->cg_item),
2518 tg_pt_gp->tg_pt_gp_id); 2517 tg_pt_gp->tg_pt_gp_id);
2519 2518
2520 return count; 2519 return count;
2521 } 2520 }
2522 2521
2523 SE_DEV_ALUA_TG_PT_ATTR(tg_pt_gp_id, S_IRUGO | S_IWUSR); 2522 SE_DEV_ALUA_TG_PT_ATTR(tg_pt_gp_id, S_IRUGO | S_IWUSR);
2524 2523
2525 /* 2524 /*
2526 * members 2525 * members
2527 */ 2526 */
2528 static ssize_t target_core_alua_tg_pt_gp_show_attr_members( 2527 static ssize_t target_core_alua_tg_pt_gp_show_attr_members(
2529 struct t10_alua_tg_pt_gp *tg_pt_gp, 2528 struct t10_alua_tg_pt_gp *tg_pt_gp,
2530 char *page) 2529 char *page)
2531 { 2530 {
2532 struct se_port *port; 2531 struct se_port *port;
2533 struct se_portal_group *tpg; 2532 struct se_portal_group *tpg;
2534 struct se_lun *lun; 2533 struct se_lun *lun;
2535 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 2534 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
2536 ssize_t len = 0, cur_len; 2535 ssize_t len = 0, cur_len;
2537 unsigned char buf[TG_PT_GROUP_NAME_BUF]; 2536 unsigned char buf[TG_PT_GROUP_NAME_BUF];
2538 2537
2539 memset(buf, 0, TG_PT_GROUP_NAME_BUF); 2538 memset(buf, 0, TG_PT_GROUP_NAME_BUF);
2540 2539
2541 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 2540 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
2542 list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list, 2541 list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
2543 tg_pt_gp_mem_list) { 2542 tg_pt_gp_mem_list) {
2544 port = tg_pt_gp_mem->tg_pt; 2543 port = tg_pt_gp_mem->tg_pt;
2545 tpg = port->sep_tpg; 2544 tpg = port->sep_tpg;
2546 lun = port->sep_lun; 2545 lun = port->sep_lun;
2547 2546
2548 cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu" 2547 cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
2549 "/%s\n", tpg->se_tpg_tfo->get_fabric_name(), 2548 "/%s\n", tpg->se_tpg_tfo->get_fabric_name(),
2550 tpg->se_tpg_tfo->tpg_get_wwn(tpg), 2549 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2551 tpg->se_tpg_tfo->tpg_get_tag(tpg), 2550 tpg->se_tpg_tfo->tpg_get_tag(tpg),
2552 config_item_name(&lun->lun_group.cg_item)); 2551 config_item_name(&lun->lun_group.cg_item));
2553 cur_len++; /* Extra byte for NULL terminator */ 2552 cur_len++; /* Extra byte for NULL terminator */
2554 2553
2555 if ((cur_len + len) > PAGE_SIZE) { 2554 if ((cur_len + len) > PAGE_SIZE) {
2556 pr_warn("Ran out of lu_gp_show_attr" 2555 pr_warn("Ran out of lu_gp_show_attr"
2557 "_members buffer\n"); 2556 "_members buffer\n");
2558 break; 2557 break;
2559 } 2558 }
2560 memcpy(page+len, buf, cur_len); 2559 memcpy(page+len, buf, cur_len);
2561 len += cur_len; 2560 len += cur_len;
2562 } 2561 }
2563 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 2562 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
2564 2563
2565 return len; 2564 return len;
2566 } 2565 }
2567 2566
2568 SE_DEV_ALUA_TG_PT_ATTR_RO(members); 2567 SE_DEV_ALUA_TG_PT_ATTR_RO(members);
2569 2568
2570 CONFIGFS_EATTR_OPS(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp, 2569 CONFIGFS_EATTR_OPS(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp,
2571 tg_pt_gp_group); 2570 tg_pt_gp_group);
2572 2571
2573 static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = { 2572 static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
2574 &target_core_alua_tg_pt_gp_alua_access_state.attr, 2573 &target_core_alua_tg_pt_gp_alua_access_state.attr,
2575 &target_core_alua_tg_pt_gp_alua_access_status.attr, 2574 &target_core_alua_tg_pt_gp_alua_access_status.attr,
2576 &target_core_alua_tg_pt_gp_alua_access_type.attr, 2575 &target_core_alua_tg_pt_gp_alua_access_type.attr,
2577 &target_core_alua_tg_pt_gp_alua_write_metadata.attr, 2576 &target_core_alua_tg_pt_gp_alua_write_metadata.attr,
2578 &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr, 2577 &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,
2579 &target_core_alua_tg_pt_gp_trans_delay_msecs.attr, 2578 &target_core_alua_tg_pt_gp_trans_delay_msecs.attr,
2580 &target_core_alua_tg_pt_gp_preferred.attr, 2579 &target_core_alua_tg_pt_gp_preferred.attr,
2581 &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr, 2580 &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,
2582 &target_core_alua_tg_pt_gp_members.attr, 2581 &target_core_alua_tg_pt_gp_members.attr,
2583 NULL, 2582 NULL,
2584 }; 2583 };
2585 2584
2586 static void target_core_alua_tg_pt_gp_release(struct config_item *item) 2585 static void target_core_alua_tg_pt_gp_release(struct config_item *item)
2587 { 2586 {
2588 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), 2587 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
2589 struct t10_alua_tg_pt_gp, tg_pt_gp_group); 2588 struct t10_alua_tg_pt_gp, tg_pt_gp_group);
2590 2589
2591 core_alua_free_tg_pt_gp(tg_pt_gp); 2590 core_alua_free_tg_pt_gp(tg_pt_gp);
2592 } 2591 }
2593 2592
2594 static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = { 2593 static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
2595 .release = target_core_alua_tg_pt_gp_release, 2594 .release = target_core_alua_tg_pt_gp_release,
2596 .show_attribute = target_core_alua_tg_pt_gp_attr_show, 2595 .show_attribute = target_core_alua_tg_pt_gp_attr_show,
2597 .store_attribute = target_core_alua_tg_pt_gp_attr_store, 2596 .store_attribute = target_core_alua_tg_pt_gp_attr_store,
2598 }; 2597 };
2599 2598
2600 static struct config_item_type target_core_alua_tg_pt_gp_cit = { 2599 static struct config_item_type target_core_alua_tg_pt_gp_cit = {
2601 .ct_item_ops = &target_core_alua_tg_pt_gp_ops, 2600 .ct_item_ops = &target_core_alua_tg_pt_gp_ops,
2602 .ct_attrs = target_core_alua_tg_pt_gp_attrs, 2601 .ct_attrs = target_core_alua_tg_pt_gp_attrs,
2603 .ct_owner = THIS_MODULE, 2602 .ct_owner = THIS_MODULE,
2604 }; 2603 };
2605 2604
2606 /* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */ 2605 /* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2607 2606
2608 /* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */ 2607 /* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
2609 2608
2610 static struct config_group *target_core_alua_create_tg_pt_gp( 2609 static struct config_group *target_core_alua_create_tg_pt_gp(
2611 struct config_group *group, 2610 struct config_group *group,
2612 const char *name) 2611 const char *name)
2613 { 2612 {
2614 struct t10_alua *alua = container_of(group, struct t10_alua, 2613 struct t10_alua *alua = container_of(group, struct t10_alua,
2615 alua_tg_pt_gps_group); 2614 alua_tg_pt_gps_group);
2616 struct t10_alua_tg_pt_gp *tg_pt_gp; 2615 struct t10_alua_tg_pt_gp *tg_pt_gp;
2617 struct se_subsystem_dev *su_dev = alua->t10_sub_dev; 2616 struct se_subsystem_dev *su_dev = alua->t10_sub_dev;
2618 struct config_group *alua_tg_pt_gp_cg = NULL; 2617 struct config_group *alua_tg_pt_gp_cg = NULL;
2619 struct config_item *alua_tg_pt_gp_ci = NULL; 2618 struct config_item *alua_tg_pt_gp_ci = NULL;
2620 2619
2621 tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0); 2620 tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0);
2622 if (!tg_pt_gp) 2621 if (!tg_pt_gp)
2623 return NULL; 2622 return NULL;
2624 2623
2625 alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group; 2624 alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
2626 alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item; 2625 alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item;
2627 2626
2628 config_group_init_type_name(alua_tg_pt_gp_cg, name, 2627 config_group_init_type_name(alua_tg_pt_gp_cg, name,
2629 &target_core_alua_tg_pt_gp_cit); 2628 &target_core_alua_tg_pt_gp_cit);
2630 2629
2631 pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port" 2630 pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port"
2632 " Group: alua/tg_pt_gps/%s\n", 2631 " Group: alua/tg_pt_gps/%s\n",
2633 config_item_name(alua_tg_pt_gp_ci)); 2632 config_item_name(alua_tg_pt_gp_ci));
2634 2633
2635 return alua_tg_pt_gp_cg; 2634 return alua_tg_pt_gp_cg;
2636 } 2635 }
2637 2636
2638 static void target_core_alua_drop_tg_pt_gp( 2637 static void target_core_alua_drop_tg_pt_gp(
2639 struct config_group *group, 2638 struct config_group *group,
2640 struct config_item *item) 2639 struct config_item *item)
2641 { 2640 {
2642 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), 2641 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
2643 struct t10_alua_tg_pt_gp, tg_pt_gp_group); 2642 struct t10_alua_tg_pt_gp, tg_pt_gp_group);
2644 2643
2645 pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port" 2644 pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port"
2646 " Group: alua/tg_pt_gps/%s, ID: %hu\n", 2645 " Group: alua/tg_pt_gps/%s, ID: %hu\n",
2647 config_item_name(item), tg_pt_gp->tg_pt_gp_id); 2646 config_item_name(item), tg_pt_gp->tg_pt_gp_id);
2648 /* 2647 /*
2649 * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release() 2648 * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
2650 * -> target_core_alua_tg_pt_gp_release(). 2649 * -> target_core_alua_tg_pt_gp_release().
2651 */ 2650 */
2652 config_item_put(item); 2651 config_item_put(item);
2653 } 2652 }
2654 2653
2655 static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { 2654 static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
2656 .make_group = &target_core_alua_create_tg_pt_gp, 2655 .make_group = &target_core_alua_create_tg_pt_gp,
2657 .drop_item = &target_core_alua_drop_tg_pt_gp, 2656 .drop_item = &target_core_alua_drop_tg_pt_gp,
2658 }; 2657 };
2659 2658
2660 static struct config_item_type target_core_alua_tg_pt_gps_cit = { 2659 static struct config_item_type target_core_alua_tg_pt_gps_cit = {
2661 .ct_group_ops = &target_core_alua_tg_pt_gps_group_ops, 2660 .ct_group_ops = &target_core_alua_tg_pt_gps_group_ops,
2662 .ct_owner = THIS_MODULE, 2661 .ct_owner = THIS_MODULE,
2663 }; 2662 };
2664 2663
2665 /* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */ 2664 /* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
2666 2665
2667 /* Start functions for struct config_item_type target_core_alua_cit */ 2666 /* Start functions for struct config_item_type target_core_alua_cit */
2668 2667
2669 /* 2668 /*
2670 * target_core_alua_cit is a ConfigFS group that lives under 2669 * target_core_alua_cit is a ConfigFS group that lives under
2671 * /sys/kernel/config/target/core/alua. There are default groups 2670 * /sys/kernel/config/target/core/alua. There are default groups
2672 * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to 2671 * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
2673 * target_core_alua_cit in target_core_init_configfs() below. 2672 * target_core_alua_cit in target_core_init_configfs() below.
2674 */ 2673 */
2675 static struct config_item_type target_core_alua_cit = { 2674 static struct config_item_type target_core_alua_cit = {
2676 .ct_item_ops = NULL, 2675 .ct_item_ops = NULL,
2677 .ct_attrs = NULL, 2676 .ct_attrs = NULL,
2678 .ct_owner = THIS_MODULE, 2677 .ct_owner = THIS_MODULE,
2679 }; 2678 };
2680 2679
2681 /* End functions for struct config_item_type target_core_alua_cit */ 2680 /* End functions for struct config_item_type target_core_alua_cit */
2682 2681
2683 /* Start functions for struct config_item_type target_core_stat_cit */ 2682 /* Start functions for struct config_item_type target_core_stat_cit */
2684 2683
2685 static struct config_group *target_core_stat_mkdir( 2684 static struct config_group *target_core_stat_mkdir(
2686 struct config_group *group, 2685 struct config_group *group,
2687 const char *name) 2686 const char *name)
2688 { 2687 {
2689 return ERR_PTR(-ENOSYS); 2688 return ERR_PTR(-ENOSYS);
2690 } 2689 }
2691 2690
2692 static void target_core_stat_rmdir( 2691 static void target_core_stat_rmdir(
2693 struct config_group *group, 2692 struct config_group *group,
2694 struct config_item *item) 2693 struct config_item *item)
2695 { 2694 {
2696 return; 2695 return;
2697 } 2696 }
2698 2697
2699 static struct configfs_group_operations target_core_stat_group_ops = { 2698 static struct configfs_group_operations target_core_stat_group_ops = {
2700 .make_group = &target_core_stat_mkdir, 2699 .make_group = &target_core_stat_mkdir,
2701 .drop_item = &target_core_stat_rmdir, 2700 .drop_item = &target_core_stat_rmdir,
2702 }; 2701 };
2703 2702
2704 static struct config_item_type target_core_stat_cit = { 2703 static struct config_item_type target_core_stat_cit = {
2705 .ct_group_ops = &target_core_stat_group_ops, 2704 .ct_group_ops = &target_core_stat_group_ops,
2706 .ct_owner = THIS_MODULE, 2705 .ct_owner = THIS_MODULE,
2707 }; 2706 };
2708 2707
2709 /* End functions for struct config_item_type target_core_stat_cit */ 2708 /* End functions for struct config_item_type target_core_stat_cit */
2710 2709
2711 /* Start functions for struct config_item_type target_core_hba_cit */ 2710 /* Start functions for struct config_item_type target_core_hba_cit */
2712 2711
2713 static struct config_group *target_core_make_subdev( 2712 static struct config_group *target_core_make_subdev(
2714 struct config_group *group, 2713 struct config_group *group,
2715 const char *name) 2714 const char *name)
2716 { 2715 {
2717 struct t10_alua_tg_pt_gp *tg_pt_gp; 2716 struct t10_alua_tg_pt_gp *tg_pt_gp;
2718 struct se_subsystem_dev *se_dev; 2717 struct se_subsystem_dev *se_dev;
2719 struct se_subsystem_api *t; 2718 struct se_subsystem_api *t;
2720 struct config_item *hba_ci = &group->cg_item; 2719 struct config_item *hba_ci = &group->cg_item;
2721 struct se_hba *hba = item_to_hba(hba_ci); 2720 struct se_hba *hba = item_to_hba(hba_ci);
2722 struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL; 2721 struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
2723 struct config_group *dev_stat_grp = NULL; 2722 struct config_group *dev_stat_grp = NULL;
2724 int errno = -ENOMEM, ret; 2723 int errno = -ENOMEM, ret;
2725 2724
2726 ret = mutex_lock_interruptible(&hba->hba_access_mutex); 2725 ret = mutex_lock_interruptible(&hba->hba_access_mutex);
2727 if (ret) 2726 if (ret)
2728 return ERR_PTR(ret); 2727 return ERR_PTR(ret);
2729 /* 2728 /*
2730 * Locate the struct se_subsystem_api from parent's struct se_hba. 2729 * Locate the struct se_subsystem_api from parent's struct se_hba.
2731 */ 2730 */
2732 t = hba->transport; 2731 t = hba->transport;
2733 2732
2734 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); 2733 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
2735 if (!se_dev) { 2734 if (!se_dev) {
2736 pr_err("Unable to allocate memory for" 2735 pr_err("Unable to allocate memory for"
2737 " struct se_subsystem_dev\n"); 2736 " struct se_subsystem_dev\n");
2738 goto unlock; 2737 goto unlock;
2739 } 2738 }
2740 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); 2739 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
2741 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); 2740 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
2742 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); 2741 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
2743 INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list); 2742 INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
2744 spin_lock_init(&se_dev->t10_pr.registration_lock); 2743 spin_lock_init(&se_dev->t10_pr.registration_lock);
2745 spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock); 2744 spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
2746 INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); 2745 INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
2747 spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); 2746 spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
2748 spin_lock_init(&se_dev->se_dev_lock); 2747 spin_lock_init(&se_dev->se_dev_lock);
2749 se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; 2748 se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
2750 se_dev->t10_wwn.t10_sub_dev = se_dev; 2749 se_dev->t10_wwn.t10_sub_dev = se_dev;
2751 se_dev->t10_alua.t10_sub_dev = se_dev; 2750 se_dev->t10_alua.t10_sub_dev = se_dev;
2752 se_dev->se_dev_attrib.da_sub_dev = se_dev; 2751 se_dev->se_dev_attrib.da_sub_dev = se_dev;
2753 2752
2754 se_dev->se_dev_hba = hba; 2753 se_dev->se_dev_hba = hba;
2755 dev_cg = &se_dev->se_dev_group; 2754 dev_cg = &se_dev->se_dev_group;
2756 2755
2757 dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7, 2756 dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7,
2758 GFP_KERNEL); 2757 GFP_KERNEL);
2759 if (!dev_cg->default_groups) 2758 if (!dev_cg->default_groups)
2760 goto out; 2759 goto out;
2761 /* 2760 /*
2762 * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr 2761 * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr
2763 * for ->allocate_virtdevice() 2762 * for ->allocate_virtdevice()
2764 * 2763 *
2765 * se_dev->se_dev_ptr will be set after ->create_virtdev() 2764 * se_dev->se_dev_ptr will be set after ->create_virtdev()
2766 * has been called successfully in the next level up in the 2765 * has been called successfully in the next level up in the
2767 * configfs tree for device object's struct config_group. 2766 * configfs tree for device object's struct config_group.
2768 */ 2767 */
2769 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name); 2768 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name);
2770 if (!se_dev->se_dev_su_ptr) { 2769 if (!se_dev->se_dev_su_ptr) {
2771 pr_err("Unable to locate subsystem dependent pointer" 2770 pr_err("Unable to locate subsystem dependent pointer"
2772 " from allocate_virtdevice()\n"); 2771 " from allocate_virtdevice()\n");
2773 goto out; 2772 goto out;
2774 } 2773 }
2775 2774
2776 config_group_init_type_name(&se_dev->se_dev_group, name, 2775 config_group_init_type_name(&se_dev->se_dev_group, name,
2777 &target_core_dev_cit); 2776 &target_core_dev_cit);
2778 config_group_init_type_name(&se_dev->se_dev_attrib.da_group, "attrib", 2777 config_group_init_type_name(&se_dev->se_dev_attrib.da_group, "attrib",
2779 &target_core_dev_attrib_cit); 2778 &target_core_dev_attrib_cit);
2780 config_group_init_type_name(&se_dev->se_dev_pr_group, "pr", 2779 config_group_init_type_name(&se_dev->se_dev_pr_group, "pr",
2781 &target_core_dev_pr_cit); 2780 &target_core_dev_pr_cit);
2782 config_group_init_type_name(&se_dev->t10_wwn.t10_wwn_group, "wwn", 2781 config_group_init_type_name(&se_dev->t10_wwn.t10_wwn_group, "wwn",
2783 &target_core_dev_wwn_cit); 2782 &target_core_dev_wwn_cit);
2784 config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group, 2783 config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group,
2785 "alua", &target_core_alua_tg_pt_gps_cit); 2784 "alua", &target_core_alua_tg_pt_gps_cit);
2786 config_group_init_type_name(&se_dev->dev_stat_grps.stat_group, 2785 config_group_init_type_name(&se_dev->dev_stat_grps.stat_group,
2787 "statistics", &target_core_stat_cit); 2786 "statistics", &target_core_stat_cit);
2788 2787
2789 dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group; 2788 dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group;
2790 dev_cg->default_groups[1] = &se_dev->se_dev_pr_group; 2789 dev_cg->default_groups[1] = &se_dev->se_dev_pr_group;
2791 dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group; 2790 dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group;
2792 dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group; 2791 dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group;
2793 dev_cg->default_groups[4] = &se_dev->dev_stat_grps.stat_group; 2792 dev_cg->default_groups[4] = &se_dev->dev_stat_grps.stat_group;
2794 dev_cg->default_groups[5] = NULL; 2793 dev_cg->default_groups[5] = NULL;
2795 /* 2794 /*
2796 * Add core/$HBA/$DEV/alua/default_tg_pt_gp 2795 * Add core/$HBA/$DEV/alua/default_tg_pt_gp
2797 */ 2796 */
2798 tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1); 2797 tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1);
2799 if (!tg_pt_gp) 2798 if (!tg_pt_gp)
2800 goto out; 2799 goto out;
2801 2800
2802 tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group; 2801 tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
2803 tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 2802 tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
2804 GFP_KERNEL); 2803 GFP_KERNEL);
2805 if (!tg_pt_gp_cg->default_groups) { 2804 if (!tg_pt_gp_cg->default_groups) {
2806 pr_err("Unable to allocate tg_pt_gp_cg->" 2805 pr_err("Unable to allocate tg_pt_gp_cg->"
2807 "default_groups\n"); 2806 "default_groups\n");
2808 goto out; 2807 goto out;
2809 } 2808 }
2810 2809
2811 config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group, 2810 config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
2812 "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit); 2811 "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
2813 tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group; 2812 tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
2814 tg_pt_gp_cg->default_groups[1] = NULL; 2813 tg_pt_gp_cg->default_groups[1] = NULL;
2815 se_dev->t10_alua.default_tg_pt_gp = tg_pt_gp; 2814 se_dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
2816 /* 2815 /*
2817 * Add core/$HBA/$DEV/statistics/ default groups 2816 * Add core/$HBA/$DEV/statistics/ default groups
2818 */ 2817 */
2819 dev_stat_grp = &se_dev->dev_stat_grps.stat_group; 2818 dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
2820 dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4, 2819 dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4,
2821 GFP_KERNEL); 2820 GFP_KERNEL);
2822 if (!dev_stat_grp->default_groups) { 2821 if (!dev_stat_grp->default_groups) {
2823 pr_err("Unable to allocate dev_stat_grp->default_groups\n"); 2822 pr_err("Unable to allocate dev_stat_grp->default_groups\n");
2824 goto out; 2823 goto out;
2825 } 2824 }
2826 target_stat_setup_dev_default_groups(se_dev); 2825 target_stat_setup_dev_default_groups(se_dev);
2827 2826
2828 pr_debug("Target_Core_ConfigFS: Allocated struct se_subsystem_dev:" 2827 pr_debug("Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
2829 " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr); 2828 " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
2830 2829
2831 mutex_unlock(&hba->hba_access_mutex); 2830 mutex_unlock(&hba->hba_access_mutex);
2832 return &se_dev->se_dev_group; 2831 return &se_dev->se_dev_group;
2833 out: 2832 out:
2834 if (se_dev->t10_alua.default_tg_pt_gp) { 2833 if (se_dev->t10_alua.default_tg_pt_gp) {
2835 core_alua_free_tg_pt_gp(se_dev->t10_alua.default_tg_pt_gp); 2834 core_alua_free_tg_pt_gp(se_dev->t10_alua.default_tg_pt_gp);
2836 se_dev->t10_alua.default_tg_pt_gp = NULL; 2835 se_dev->t10_alua.default_tg_pt_gp = NULL;
2837 } 2836 }
2838 if (dev_stat_grp) 2837 if (dev_stat_grp)
2839 kfree(dev_stat_grp->default_groups); 2838 kfree(dev_stat_grp->default_groups);
2840 if (tg_pt_gp_cg) 2839 if (tg_pt_gp_cg)
2841 kfree(tg_pt_gp_cg->default_groups); 2840 kfree(tg_pt_gp_cg->default_groups);
2842 if (dev_cg) 2841 if (dev_cg)
2843 kfree(dev_cg->default_groups); 2842 kfree(dev_cg->default_groups);
2844 if (se_dev->se_dev_su_ptr) 2843 if (se_dev->se_dev_su_ptr)
2845 t->free_device(se_dev->se_dev_su_ptr); 2844 t->free_device(se_dev->se_dev_su_ptr);
2846 kfree(se_dev); 2845 kfree(se_dev);
2847 unlock: 2846 unlock:
2848 mutex_unlock(&hba->hba_access_mutex); 2847 mutex_unlock(&hba->hba_access_mutex);
2849 return ERR_PTR(errno); 2848 return ERR_PTR(errno);
2850 } 2849 }
2851 2850
2852 static void target_core_drop_subdev( 2851 static void target_core_drop_subdev(
2853 struct config_group *group, 2852 struct config_group *group,
2854 struct config_item *item) 2853 struct config_item *item)
2855 { 2854 {
2856 struct se_subsystem_dev *se_dev = container_of(to_config_group(item), 2855 struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
2857 struct se_subsystem_dev, se_dev_group); 2856 struct se_subsystem_dev, se_dev_group);
2858 struct se_hba *hba; 2857 struct se_hba *hba;
2859 struct se_subsystem_api *t; 2858 struct se_subsystem_api *t;
2860 struct config_item *df_item; 2859 struct config_item *df_item;
2861 struct config_group *dev_cg, *tg_pt_gp_cg, *dev_stat_grp; 2860 struct config_group *dev_cg, *tg_pt_gp_cg, *dev_stat_grp;
2862 int i; 2861 int i;
2863 2862
2864 hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); 2863 hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
2865 2864
2866 mutex_lock(&hba->hba_access_mutex); 2865 mutex_lock(&hba->hba_access_mutex);
2867 t = hba->transport; 2866 t = hba->transport;
2868 2867
2869 dev_stat_grp = &se_dev->dev_stat_grps.stat_group; 2868 dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
2870 for (i = 0; dev_stat_grp->default_groups[i]; i++) { 2869 for (i = 0; dev_stat_grp->default_groups[i]; i++) {
2871 df_item = &dev_stat_grp->default_groups[i]->cg_item; 2870 df_item = &dev_stat_grp->default_groups[i]->cg_item;
2872 dev_stat_grp->default_groups[i] = NULL; 2871 dev_stat_grp->default_groups[i] = NULL;
2873 config_item_put(df_item); 2872 config_item_put(df_item);
2874 } 2873 }
2875 kfree(dev_stat_grp->default_groups); 2874 kfree(dev_stat_grp->default_groups);
2876 2875
2877 tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group; 2876 tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
2878 for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) { 2877 for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
2879 df_item = &tg_pt_gp_cg->default_groups[i]->cg_item; 2878 df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
2880 tg_pt_gp_cg->default_groups[i] = NULL; 2879 tg_pt_gp_cg->default_groups[i] = NULL;
2881 config_item_put(df_item); 2880 config_item_put(df_item);
2882 } 2881 }
2883 kfree(tg_pt_gp_cg->default_groups); 2882 kfree(tg_pt_gp_cg->default_groups);
2884 /* 2883 /*
2885 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp 2884 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
2886 * directly from target_core_alua_tg_pt_gp_release(). 2885 * directly from target_core_alua_tg_pt_gp_release().
2887 */ 2886 */
2888 se_dev->t10_alua.default_tg_pt_gp = NULL; 2887 se_dev->t10_alua.default_tg_pt_gp = NULL;
2889 2888
2890 dev_cg = &se_dev->se_dev_group; 2889 dev_cg = &se_dev->se_dev_group;
2891 for (i = 0; dev_cg->default_groups[i]; i++) { 2890 for (i = 0; dev_cg->default_groups[i]; i++) {
2892 df_item = &dev_cg->default_groups[i]->cg_item; 2891 df_item = &dev_cg->default_groups[i]->cg_item;
2893 dev_cg->default_groups[i] = NULL; 2892 dev_cg->default_groups[i] = NULL;
2894 config_item_put(df_item); 2893 config_item_put(df_item);
2895 } 2894 }
2896 /* 2895 /*
2897 * The releasing of se_dev and associated se_dev->se_dev_ptr is done 2896 * The releasing of se_dev and associated se_dev->se_dev_ptr is done
2898 * from target_core_dev_item_ops->release() ->target_core_dev_release(). 2897 * from target_core_dev_item_ops->release() ->target_core_dev_release().
2899 */ 2898 */
2900 config_item_put(item); 2899 config_item_put(item);
2901 mutex_unlock(&hba->hba_access_mutex); 2900 mutex_unlock(&hba->hba_access_mutex);
2902 } 2901 }
2903 2902
2904 static struct configfs_group_operations target_core_hba_group_ops = { 2903 static struct configfs_group_operations target_core_hba_group_ops = {
2905 .make_group = target_core_make_subdev, 2904 .make_group = target_core_make_subdev,
2906 .drop_item = target_core_drop_subdev, 2905 .drop_item = target_core_drop_subdev,
2907 }; 2906 };
2908 2907
2909 CONFIGFS_EATTR_STRUCT(target_core_hba, se_hba); 2908 CONFIGFS_EATTR_STRUCT(target_core_hba, se_hba);
2910 #define SE_HBA_ATTR(_name, _mode) \ 2909 #define SE_HBA_ATTR(_name, _mode) \
2911 static struct target_core_hba_attribute \ 2910 static struct target_core_hba_attribute \
2912 target_core_hba_##_name = \ 2911 target_core_hba_##_name = \
2913 __CONFIGFS_EATTR(_name, _mode, \ 2912 __CONFIGFS_EATTR(_name, _mode, \
2914 target_core_hba_show_attr_##_name, \ 2913 target_core_hba_show_attr_##_name, \
2915 target_core_hba_store_attr_##_name); 2914 target_core_hba_store_attr_##_name);
2916 2915
2917 #define SE_HBA_ATTR_RO(_name) \ 2916 #define SE_HBA_ATTR_RO(_name) \
2918 static struct target_core_hba_attribute \ 2917 static struct target_core_hba_attribute \
2919 target_core_hba_##_name = \ 2918 target_core_hba_##_name = \
2920 __CONFIGFS_EATTR_RO(_name, \ 2919 __CONFIGFS_EATTR_RO(_name, \
2921 target_core_hba_show_attr_##_name); 2920 target_core_hba_show_attr_##_name);
2922 2921
2923 static ssize_t target_core_hba_show_attr_hba_info( 2922 static ssize_t target_core_hba_show_attr_hba_info(
2924 struct se_hba *hba, 2923 struct se_hba *hba,
2925 char *page) 2924 char *page)
2926 { 2925 {
2927 return sprintf(page, "HBA Index: %d plugin: %s version: %s\n", 2926 return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
2928 hba->hba_id, hba->transport->name, 2927 hba->hba_id, hba->transport->name,
2929 TARGET_CORE_CONFIGFS_VERSION); 2928 TARGET_CORE_CONFIGFS_VERSION);
2930 } 2929 }
2931 2930
2932 SE_HBA_ATTR_RO(hba_info); 2931 SE_HBA_ATTR_RO(hba_info);
2933 2932
2934 static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba, 2933 static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba,
2935 char *page) 2934 char *page)
2936 { 2935 {
2937 int hba_mode = 0; 2936 int hba_mode = 0;
2938 2937
2939 if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE) 2938 if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
2940 hba_mode = 1; 2939 hba_mode = 1;
2941 2940
2942 return sprintf(page, "%d\n", hba_mode); 2941 return sprintf(page, "%d\n", hba_mode);
2943 } 2942 }
2944 2943
2945 static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba, 2944 static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
2946 const char *page, size_t count) 2945 const char *page, size_t count)
2947 { 2946 {
2948 struct se_subsystem_api *transport = hba->transport; 2947 struct se_subsystem_api *transport = hba->transport;
2949 unsigned long mode_flag; 2948 unsigned long mode_flag;
2950 int ret; 2949 int ret;
2951 2950
2952 if (transport->pmode_enable_hba == NULL) 2951 if (transport->pmode_enable_hba == NULL)
2953 return -EINVAL; 2952 return -EINVAL;
2954 2953
2955 ret = strict_strtoul(page, 0, &mode_flag); 2954 ret = strict_strtoul(page, 0, &mode_flag);
2956 if (ret < 0) { 2955 if (ret < 0) {
2957 pr_err("Unable to extract hba mode flag: %d\n", ret); 2956 pr_err("Unable to extract hba mode flag: %d\n", ret);
2958 return -EINVAL; 2957 return -EINVAL;
2959 } 2958 }
2960 2959
2961 spin_lock(&hba->device_lock); 2960 spin_lock(&hba->device_lock);
2962 if (!list_empty(&hba->hba_dev_list)) { 2961 if (!list_empty(&hba->hba_dev_list)) {
2963 pr_err("Unable to set hba_mode with active devices\n"); 2962 pr_err("Unable to set hba_mode with active devices\n");
2964 spin_unlock(&hba->device_lock); 2963 spin_unlock(&hba->device_lock);
2965 return -EINVAL; 2964 return -EINVAL;
2966 } 2965 }
2967 spin_unlock(&hba->device_lock); 2966 spin_unlock(&hba->device_lock);
2968 2967
2969 ret = transport->pmode_enable_hba(hba, mode_flag); 2968 ret = transport->pmode_enable_hba(hba, mode_flag);
2970 if (ret < 0) 2969 if (ret < 0)
2971 return -EINVAL; 2970 return -EINVAL;
2972 if (ret > 0) 2971 if (ret > 0)
2973 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; 2972 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
2974 else if (ret == 0) 2973 else if (ret == 0)
2975 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; 2974 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
2976 2975
2977 return count; 2976 return count;
2978 } 2977 }
2979 2978
2980 SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR); 2979 SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR);
2981 2980
2982 CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group); 2981 CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group);
2983 2982
2984 static void target_core_hba_release(struct config_item *item) 2983 static void target_core_hba_release(struct config_item *item)
2985 { 2984 {
2986 struct se_hba *hba = container_of(to_config_group(item), 2985 struct se_hba *hba = container_of(to_config_group(item),
2987 struct se_hba, hba_group); 2986 struct se_hba, hba_group);
2988 core_delete_hba(hba); 2987 core_delete_hba(hba);
2989 } 2988 }
2990 2989
2991 static struct configfs_attribute *target_core_hba_attrs[] = { 2990 static struct configfs_attribute *target_core_hba_attrs[] = {
2992 &target_core_hba_hba_info.attr, 2991 &target_core_hba_hba_info.attr,
2993 &target_core_hba_hba_mode.attr, 2992 &target_core_hba_hba_mode.attr,
2994 NULL, 2993 NULL,
2995 }; 2994 };
2996 2995
2997 static struct configfs_item_operations target_core_hba_item_ops = { 2996 static struct configfs_item_operations target_core_hba_item_ops = {
2998 .release = target_core_hba_release, 2997 .release = target_core_hba_release,
2999 .show_attribute = target_core_hba_attr_show, 2998 .show_attribute = target_core_hba_attr_show,
3000 .store_attribute = target_core_hba_attr_store, 2999 .store_attribute = target_core_hba_attr_store,
3001 }; 3000 };
3002 3001
3003 static struct config_item_type target_core_hba_cit = { 3002 static struct config_item_type target_core_hba_cit = {
3004 .ct_item_ops = &target_core_hba_item_ops, 3003 .ct_item_ops = &target_core_hba_item_ops,
3005 .ct_group_ops = &target_core_hba_group_ops, 3004 .ct_group_ops = &target_core_hba_group_ops,
3006 .ct_attrs = target_core_hba_attrs, 3005 .ct_attrs = target_core_hba_attrs,
3007 .ct_owner = THIS_MODULE, 3006 .ct_owner = THIS_MODULE,
3008 }; 3007 };
3009 3008
3010 static struct config_group *target_core_call_addhbatotarget( 3009 static struct config_group *target_core_call_addhbatotarget(
3011 struct config_group *group, 3010 struct config_group *group,
3012 const char *name) 3011 const char *name)
3013 { 3012 {
3014 char *se_plugin_str, *str, *str2; 3013 char *se_plugin_str, *str, *str2;
3015 struct se_hba *hba; 3014 struct se_hba *hba;
3016 char buf[TARGET_CORE_NAME_MAX_LEN]; 3015 char buf[TARGET_CORE_NAME_MAX_LEN];
3017 unsigned long plugin_dep_id = 0; 3016 unsigned long plugin_dep_id = 0;
3018 int ret; 3017 int ret;
3019 3018
3020 memset(buf, 0, TARGET_CORE_NAME_MAX_LEN); 3019 memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
3021 if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) { 3020 if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
3022 pr_err("Passed *name strlen(): %d exceeds" 3021 pr_err("Passed *name strlen(): %d exceeds"
3023 " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name), 3022 " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
3024 TARGET_CORE_NAME_MAX_LEN); 3023 TARGET_CORE_NAME_MAX_LEN);
3025 return ERR_PTR(-ENAMETOOLONG); 3024 return ERR_PTR(-ENAMETOOLONG);
3026 } 3025 }
3027 snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name); 3026 snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
3028 3027
3029 str = strstr(buf, "_"); 3028 str = strstr(buf, "_");
3030 if (!str) { 3029 if (!str) {
3031 pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n"); 3030 pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
3032 return ERR_PTR(-EINVAL); 3031 return ERR_PTR(-EINVAL);
3033 } 3032 }
3034 se_plugin_str = buf; 3033 se_plugin_str = buf;
3035 /* 3034 /*
3036 * Special case for subsystem plugins that have "_" in their names. 3035 * Special case for subsystem plugins that have "_" in their names.
3037 * Namely rd_direct and rd_mcp.. 3036 * Namely rd_direct and rd_mcp..
3038 */ 3037 */
3039 str2 = strstr(str+1, "_"); 3038 str2 = strstr(str+1, "_");
3040 if (str2) { 3039 if (str2) {
3041 *str2 = '\0'; /* Terminate for *se_plugin_str */ 3040 *str2 = '\0'; /* Terminate for *se_plugin_str */
3042 str2++; /* Skip to start of plugin dependent ID */ 3041 str2++; /* Skip to start of plugin dependent ID */
3043 str = str2; 3042 str = str2;
3044 } else { 3043 } else {
3045 *str = '\0'; /* Terminate for *se_plugin_str */ 3044 *str = '\0'; /* Terminate for *se_plugin_str */
3046 str++; /* Skip to start of plugin dependent ID */ 3045 str++; /* Skip to start of plugin dependent ID */
3047 } 3046 }
3048 3047
3049 ret = strict_strtoul(str, 0, &plugin_dep_id); 3048 ret = strict_strtoul(str, 0, &plugin_dep_id);
3050 if (ret < 0) { 3049 if (ret < 0) {
3051 pr_err("strict_strtoul() returned %d for" 3050 pr_err("strict_strtoul() returned %d for"
3052 " plugin_dep_id\n", ret); 3051 " plugin_dep_id\n", ret);
3053 return ERR_PTR(-EINVAL); 3052 return ERR_PTR(-EINVAL);
3054 } 3053 }
3055 /* 3054 /*
3056 * Load up TCM subsystem plugins if they have not already been loaded. 3055 * Load up TCM subsystem plugins if they have not already been loaded.
3057 */ 3056 */
3058 transport_subsystem_check_init(); 3057 transport_subsystem_check_init();
3059 3058
3060 hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0); 3059 hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
3061 if (IS_ERR(hba)) 3060 if (IS_ERR(hba))
3062 return ERR_CAST(hba); 3061 return ERR_CAST(hba);
3063 3062
3064 config_group_init_type_name(&hba->hba_group, name, 3063 config_group_init_type_name(&hba->hba_group, name,
3065 &target_core_hba_cit); 3064 &target_core_hba_cit);
3066 3065
3067 return &hba->hba_group; 3066 return &hba->hba_group;
3068 } 3067 }
3069 3068
3070 static void target_core_call_delhbafromtarget( 3069 static void target_core_call_delhbafromtarget(
3071 struct config_group *group, 3070 struct config_group *group,
3072 struct config_item *item) 3071 struct config_item *item)
3073 { 3072 {
3074 /* 3073 /*
3075 * core_delete_hba() is called from target_core_hba_item_ops->release() 3074 * core_delete_hba() is called from target_core_hba_item_ops->release()
3076 * -> target_core_hba_release() 3075 * -> target_core_hba_release()
3077 */ 3076 */
3078 config_item_put(item); 3077 config_item_put(item);
3079 } 3078 }
3080 3079
3081 static struct configfs_group_operations target_core_group_ops = { 3080 static struct configfs_group_operations target_core_group_ops = {
3082 .make_group = target_core_call_addhbatotarget, 3081 .make_group = target_core_call_addhbatotarget,
3083 .drop_item = target_core_call_delhbafromtarget, 3082 .drop_item = target_core_call_delhbafromtarget,
3084 }; 3083 };
3085 3084
3086 static struct config_item_type target_core_cit = { 3085 static struct config_item_type target_core_cit = {
3087 .ct_item_ops = NULL, 3086 .ct_item_ops = NULL,
3088 .ct_group_ops = &target_core_group_ops, 3087 .ct_group_ops = &target_core_group_ops,
3089 .ct_attrs = NULL, 3088 .ct_attrs = NULL,
3090 .ct_owner = THIS_MODULE, 3089 .ct_owner = THIS_MODULE,
3091 }; 3090 };
3092 3091
3093 /* Stop functions for struct config_item_type target_core_hba_cit */ 3092 /* Stop functions for struct config_item_type target_core_hba_cit */
3094 3093
3095 static int __init target_core_init_configfs(void) 3094 static int __init target_core_init_configfs(void)
3096 { 3095 {
3097 struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; 3096 struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
3098 struct config_group *lu_gp_cg = NULL; 3097 struct config_group *lu_gp_cg = NULL;
3099 struct configfs_subsystem *subsys; 3098 struct configfs_subsystem *subsys;
3100 struct t10_alua_lu_gp *lu_gp; 3099 struct t10_alua_lu_gp *lu_gp;
3101 int ret; 3100 int ret;
3102 3101
3103 pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage" 3102 pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
3104 " Engine: %s on %s/%s on "UTS_RELEASE"\n", 3103 " Engine: %s on %s/%s on "UTS_RELEASE"\n",
3105 TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine); 3104 TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
3106 3105
3107 subsys = target_core_subsystem[0]; 3106 subsys = target_core_subsystem[0];
3108 config_group_init(&subsys->su_group); 3107 config_group_init(&subsys->su_group);
3109 mutex_init(&subsys->su_mutex); 3108 mutex_init(&subsys->su_mutex);
3110 3109
3111 INIT_LIST_HEAD(&g_tf_list); 3110 INIT_LIST_HEAD(&g_tf_list);
3112 mutex_init(&g_tf_lock); 3111 mutex_init(&g_tf_lock);
3113 ret = init_se_kmem_caches(); 3112 ret = init_se_kmem_caches();
3114 if (ret < 0) 3113 if (ret < 0)
3115 return ret; 3114 return ret;
3116 /* 3115 /*
3117 * Create $CONFIGFS/target/core default group for HBA <-> Storage Object 3116 * Create $CONFIGFS/target/core default group for HBA <-> Storage Object
3118 * and ALUA Logical Unit Group and Target Port Group infrastructure. 3117 * and ALUA Logical Unit Group and Target Port Group infrastructure.
3119 */ 3118 */
3120 target_cg = &subsys->su_group; 3119 target_cg = &subsys->su_group;
3121 target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 3120 target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
3122 GFP_KERNEL); 3121 GFP_KERNEL);
3123 if (!target_cg->default_groups) { 3122 if (!target_cg->default_groups) {
3124 pr_err("Unable to allocate target_cg->default_groups\n"); 3123 pr_err("Unable to allocate target_cg->default_groups\n");
3125 goto out_global; 3124 goto out_global;
3126 } 3125 }
3127 3126
3128 config_group_init_type_name(&target_core_hbagroup, 3127 config_group_init_type_name(&target_core_hbagroup,
3129 "core", &target_core_cit); 3128 "core", &target_core_cit);
3130 target_cg->default_groups[0] = &target_core_hbagroup; 3129 target_cg->default_groups[0] = &target_core_hbagroup;
3131 target_cg->default_groups[1] = NULL; 3130 target_cg->default_groups[1] = NULL;
3132 /* 3131 /*
3133 * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/ 3132 * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
3134 */ 3133 */
3135 hba_cg = &target_core_hbagroup; 3134 hba_cg = &target_core_hbagroup;
3136 hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 3135 hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
3137 GFP_KERNEL); 3136 GFP_KERNEL);
3138 if (!hba_cg->default_groups) { 3137 if (!hba_cg->default_groups) {
3139 pr_err("Unable to allocate hba_cg->default_groups\n"); 3138 pr_err("Unable to allocate hba_cg->default_groups\n");
3140 goto out_global; 3139 goto out_global;
3141 } 3140 }
3142 config_group_init_type_name(&alua_group, 3141 config_group_init_type_name(&alua_group,
3143 "alua", &target_core_alua_cit); 3142 "alua", &target_core_alua_cit);
3144 hba_cg->default_groups[0] = &alua_group; 3143 hba_cg->default_groups[0] = &alua_group;
3145 hba_cg->default_groups[1] = NULL; 3144 hba_cg->default_groups[1] = NULL;
3146 /* 3145 /*
3147 * Add ALUA Logical Unit Group and Target Port Group ConfigFS 3146 * Add ALUA Logical Unit Group and Target Port Group ConfigFS
3148 * groups under /sys/kernel/config/target/core/alua/ 3147 * groups under /sys/kernel/config/target/core/alua/
3149 */ 3148 */
3150 alua_cg = &alua_group; 3149 alua_cg = &alua_group;
3151 alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 3150 alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
3152 GFP_KERNEL); 3151 GFP_KERNEL);
3153 if (!alua_cg->default_groups) { 3152 if (!alua_cg->default_groups) {
3154 pr_err("Unable to allocate alua_cg->default_groups\n"); 3153 pr_err("Unable to allocate alua_cg->default_groups\n");
3155 goto out_global; 3154 goto out_global;
3156 } 3155 }
3157 3156
3158 config_group_init_type_name(&alua_lu_gps_group, 3157 config_group_init_type_name(&alua_lu_gps_group,
3159 "lu_gps", &target_core_alua_lu_gps_cit); 3158 "lu_gps", &target_core_alua_lu_gps_cit);
3160 alua_cg->default_groups[0] = &alua_lu_gps_group; 3159 alua_cg->default_groups[0] = &alua_lu_gps_group;
3161 alua_cg->default_groups[1] = NULL; 3160 alua_cg->default_groups[1] = NULL;
3162 /* 3161 /*
3163 * Add core/alua/lu_gps/default_lu_gp 3162 * Add core/alua/lu_gps/default_lu_gp
3164 */ 3163 */
3165 lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1); 3164 lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
3166 if (IS_ERR(lu_gp)) 3165 if (IS_ERR(lu_gp))
3167 goto out_global; 3166 goto out_global;
3168 3167
3169 lu_gp_cg = &alua_lu_gps_group; 3168 lu_gp_cg = &alua_lu_gps_group;
3170 lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 3169 lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
3171 GFP_KERNEL); 3170 GFP_KERNEL);
3172 if (!lu_gp_cg->default_groups) { 3171 if (!lu_gp_cg->default_groups) {
3173 pr_err("Unable to allocate lu_gp_cg->default_groups\n"); 3172 pr_err("Unable to allocate lu_gp_cg->default_groups\n");
3174 goto out_global; 3173 goto out_global;
3175 } 3174 }
3176 3175
3177 config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp", 3176 config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
3178 &target_core_alua_lu_gp_cit); 3177 &target_core_alua_lu_gp_cit);
3179 lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group; 3178 lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group;
3180 lu_gp_cg->default_groups[1] = NULL; 3179 lu_gp_cg->default_groups[1] = NULL;
3181 default_lu_gp = lu_gp; 3180 default_lu_gp = lu_gp;
3182 /* 3181 /*
3183 * Register the target_core_mod subsystem with configfs. 3182 * Register the target_core_mod subsystem with configfs.
3184 */ 3183 */
3185 ret = configfs_register_subsystem(subsys); 3184 ret = configfs_register_subsystem(subsys);
3186 if (ret < 0) { 3185 if (ret < 0) {
3187 pr_err("Error %d while registering subsystem %s\n", 3186 pr_err("Error %d while registering subsystem %s\n",
3188 ret, subsys->su_group.cg_item.ci_namebuf); 3187 ret, subsys->su_group.cg_item.ci_namebuf);
3189 goto out_global; 3188 goto out_global;
3190 } 3189 }
3191 pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric" 3190 pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
3192 " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s" 3191 " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s"
3193 " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine); 3192 " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
3194 /* 3193 /*
3195 * Register built-in RAMDISK subsystem logic for virtual LUN 0 3194 * Register built-in RAMDISK subsystem logic for virtual LUN 0
3196 */ 3195 */
3197 ret = rd_module_init(); 3196 ret = rd_module_init();
3198 if (ret < 0) 3197 if (ret < 0)
3199 goto out; 3198 goto out;
3200 3199
3201 if (core_dev_setup_virtual_lun0() < 0) 3200 if (core_dev_setup_virtual_lun0() < 0)
3202 goto out; 3201 goto out;
3203 3202
3204 return 0; 3203 return 0;
3205 3204
3206 out: 3205 out:
3207 configfs_unregister_subsystem(subsys); 3206 configfs_unregister_subsystem(subsys);
3208 core_dev_release_virtual_lun0(); 3207 core_dev_release_virtual_lun0();
3209 rd_module_exit(); 3208 rd_module_exit();
3210 out_global: 3209 out_global:
3211 if (default_lu_gp) { 3210 if (default_lu_gp) {
3212 core_alua_free_lu_gp(default_lu_gp); 3211 core_alua_free_lu_gp(default_lu_gp);
3213 default_lu_gp = NULL; 3212 default_lu_gp = NULL;
3214 } 3213 }
3215 if (lu_gp_cg) 3214 if (lu_gp_cg)
3216 kfree(lu_gp_cg->default_groups); 3215 kfree(lu_gp_cg->default_groups);
3217 if (alua_cg) 3216 if (alua_cg)
3218 kfree(alua_cg->default_groups); 3217 kfree(alua_cg->default_groups);
3219 if (hba_cg) 3218 if (hba_cg)
3220 kfree(hba_cg->default_groups); 3219 kfree(hba_cg->default_groups);
3221 kfree(target_cg->default_groups); 3220 kfree(target_cg->default_groups);
3222 release_se_kmem_caches(); 3221 release_se_kmem_caches();
3223 return ret; 3222 return ret;
3224 } 3223 }
3225 3224
3226 static void __exit target_core_exit_configfs(void) 3225 static void __exit target_core_exit_configfs(void)
3227 { 3226 {
3228 struct configfs_subsystem *subsys; 3227 struct configfs_subsystem *subsys;
3229 struct config_group *hba_cg, *alua_cg, *lu_gp_cg; 3228 struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
3230 struct config_item *item; 3229 struct config_item *item;
3231 int i; 3230 int i;
3232 3231
3233 subsys = target_core_subsystem[0]; 3232 subsys = target_core_subsystem[0];
3234 3233
3235 lu_gp_cg = &alua_lu_gps_group; 3234 lu_gp_cg = &alua_lu_gps_group;
3236 for (i = 0; lu_gp_cg->default_groups[i]; i++) { 3235 for (i = 0; lu_gp_cg->default_groups[i]; i++) {
3237 item = &lu_gp_cg->default_groups[i]->cg_item; 3236 item = &lu_gp_cg->default_groups[i]->cg_item;
3238 lu_gp_cg->default_groups[i] = NULL; 3237 lu_gp_cg->default_groups[i] = NULL;
3239 config_item_put(item); 3238 config_item_put(item);
3240 } 3239 }
3241 kfree(lu_gp_cg->default_groups); 3240 kfree(lu_gp_cg->default_groups);
3242 lu_gp_cg->default_groups = NULL; 3241 lu_gp_cg->default_groups = NULL;
3243 3242
3244 alua_cg = &alua_group; 3243 alua_cg = &alua_group;
3245 for (i = 0; alua_cg->default_groups[i]; i++) { 3244 for (i = 0; alua_cg->default_groups[i]; i++) {
3246 item = &alua_cg->default_groups[i]->cg_item; 3245 item = &alua_cg->default_groups[i]->cg_item;
3247 alua_cg->default_groups[i] = NULL; 3246 alua_cg->default_groups[i] = NULL;
3248 config_item_put(item); 3247 config_item_put(item);
3249 } 3248 }
3250 kfree(alua_cg->default_groups); 3249 kfree(alua_cg->default_groups);
3251 alua_cg->default_groups = NULL; 3250 alua_cg->default_groups = NULL;
3252 3251
3253 hba_cg = &target_core_hbagroup; 3252 hba_cg = &target_core_hbagroup;
3254 for (i = 0; hba_cg->default_groups[i]; i++) { 3253 for (i = 0; hba_cg->default_groups[i]; i++) {
3255 item = &hba_cg->default_groups[i]->cg_item; 3254 item = &hba_cg->default_groups[i]->cg_item;
3256 hba_cg->default_groups[i] = NULL; 3255 hba_cg->default_groups[i] = NULL;
3257 config_item_put(item); 3256 config_item_put(item);
3258 } 3257 }
3259 kfree(hba_cg->default_groups); 3258 kfree(hba_cg->default_groups);
3260 hba_cg->default_groups = NULL; 3259 hba_cg->default_groups = NULL;
3261 /* 3260 /*
3262 * We expect subsys->su_group.default_groups to be released 3261 * We expect subsys->su_group.default_groups to be released
3263 * by configfs subsystem provider logic.. 3262 * by configfs subsystem provider logic..
3264 */ 3263 */
3265 configfs_unregister_subsystem(subsys); 3264 configfs_unregister_subsystem(subsys);
3266 kfree(subsys->su_group.default_groups); 3265 kfree(subsys->su_group.default_groups);
3267 3266
3268 core_alua_free_lu_gp(default_lu_gp); 3267 core_alua_free_lu_gp(default_lu_gp);
3269 default_lu_gp = NULL; 3268 default_lu_gp = NULL;
3270 3269
3271 pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric" 3270 pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric"
3272 " Infrastructure\n"); 3271 " Infrastructure\n");
3273 3272
3274 core_dev_release_virtual_lun0(); 3273 core_dev_release_virtual_lun0();
3275 rd_module_exit(); 3274 rd_module_exit();
3276 release_se_kmem_caches(); 3275 release_se_kmem_caches();
3277 } 3276 }
3278 3277
3279 MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS"); 3278 MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
3280 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 3279 MODULE_AUTHOR("nab@Linux-iSCSI.org");
3281 MODULE_LICENSE("GPL"); 3280 MODULE_LICENSE("GPL");
3282 3281
3283 module_init(target_core_init_configfs); 3282 module_init(target_core_init_configfs);
3284 module_exit(target_core_exit_configfs); 3283 module_exit(target_core_exit_configfs);
3285 3284
drivers/target/target_core_device.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c) 2 * Filename: target_core_device.c (based on iscsi_target_device.c)
3 * 3 *
4 * This file contains the TCM Virtual Device and Disk Transport 4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions. 5 * agnostic related functions.
6 * 6 *
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. 7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved. 8 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
9 * Copyright (c) 2007-2010 Rising Tide Systems 9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org 10 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 * 11 *
12 * Nicholas A. Bellinger <nab@kernel.org> 12 * Nicholas A. Bellinger <nab@kernel.org>
13 * 13 *
14 * This program is free software; you can redistribute it and/or modify 14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by 15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or 16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version. 17 * (at your option) any later version.
18 * 18 *
19 * This program is distributed in the hope that it will be useful, 19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details. 22 * GNU General Public License for more details.
23 * 23 *
24 * You should have received a copy of the GNU General Public License 24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software 25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 * 27 *
28 ******************************************************************************/ 28 ******************************************************************************/
29 29
30 #include <linux/net.h> 30 #include <linux/net.h>
31 #include <linux/string.h> 31 #include <linux/string.h>
32 #include <linux/delay.h> 32 #include <linux/delay.h>
33 #include <linux/timer.h> 33 #include <linux/timer.h>
34 #include <linux/slab.h> 34 #include <linux/slab.h>
35 #include <linux/spinlock.h> 35 #include <linux/spinlock.h>
36 #include <linux/kthread.h> 36 #include <linux/kthread.h>
37 #include <linux/in.h> 37 #include <linux/in.h>
38 #include <linux/export.h> 38 #include <linux/export.h>
39 #include <net/sock.h> 39 #include <net/sock.h>
40 #include <net/tcp.h> 40 #include <net/tcp.h>
41 #include <scsi/scsi.h> 41 #include <scsi/scsi.h>
42 #include <scsi/scsi_device.h> 42 #include <scsi/scsi_device.h>
43 43
44 #include <target/target_core_base.h> 44 #include <target/target_core_base.h>
45 #include <target/target_core_device.h> 45 #include <target/target_core_backend.h>
46 #include <target/target_core_tpg.h> 46 #include <target/target_core_fabric.h>
47 #include <target/target_core_transport.h>
48 #include <target/target_core_fabric_ops.h>
49 47
50 #include "target_core_internal.h" 48 #include "target_core_internal.h"
51 #include "target_core_alua.h" 49 #include "target_core_alua.h"
52 #include "target_core_pr.h" 50 #include "target_core_pr.h"
53 #include "target_core_ua.h" 51 #include "target_core_ua.h"
54 52
55 static void se_dev_start(struct se_device *dev); 53 static void se_dev_start(struct se_device *dev);
56 static void se_dev_stop(struct se_device *dev); 54 static void se_dev_stop(struct se_device *dev);
57 55
58 static struct se_hba *lun0_hba; 56 static struct se_hba *lun0_hba;
59 static struct se_subsystem_dev *lun0_su_dev; 57 static struct se_subsystem_dev *lun0_su_dev;
60 /* not static, needed by tpg.c */ 58 /* not static, needed by tpg.c */
61 struct se_device *g_lun0_dev; 59 struct se_device *g_lun0_dev;
62 60
63 int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) 61 int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
64 { 62 {
65 struct se_lun *se_lun = NULL; 63 struct se_lun *se_lun = NULL;
66 struct se_session *se_sess = se_cmd->se_sess; 64 struct se_session *se_sess = se_cmd->se_sess;
67 struct se_device *dev; 65 struct se_device *dev;
68 unsigned long flags; 66 unsigned long flags;
69 67
70 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { 68 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
71 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; 69 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
72 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 70 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
73 return -ENODEV; 71 return -ENODEV;
74 } 72 }
75 73
76 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); 74 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
77 se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun]; 75 se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
78 if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 76 if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
79 struct se_dev_entry *deve = se_cmd->se_deve; 77 struct se_dev_entry *deve = se_cmd->se_deve;
80 78
81 deve->total_cmds++; 79 deve->total_cmds++;
82 deve->total_bytes += se_cmd->data_length; 80 deve->total_bytes += se_cmd->data_length;
83 81
84 if ((se_cmd->data_direction == DMA_TO_DEVICE) && 82 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
85 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { 83 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
86 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; 84 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
87 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 85 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
88 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 86 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
89 " Access for 0x%08x\n", 87 " Access for 0x%08x\n",
90 se_cmd->se_tfo->get_fabric_name(), 88 se_cmd->se_tfo->get_fabric_name(),
91 unpacked_lun); 89 unpacked_lun);
92 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 90 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
93 return -EACCES; 91 return -EACCES;
94 } 92 }
95 93
96 if (se_cmd->data_direction == DMA_TO_DEVICE) 94 if (se_cmd->data_direction == DMA_TO_DEVICE)
97 deve->write_bytes += se_cmd->data_length; 95 deve->write_bytes += se_cmd->data_length;
98 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 96 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
99 deve->read_bytes += se_cmd->data_length; 97 deve->read_bytes += se_cmd->data_length;
100 98
101 deve->deve_cmds++; 99 deve->deve_cmds++;
102 100
103 se_lun = deve->se_lun; 101 se_lun = deve->se_lun;
104 se_cmd->se_lun = deve->se_lun; 102 se_cmd->se_lun = deve->se_lun;
105 se_cmd->pr_res_key = deve->pr_res_key; 103 se_cmd->pr_res_key = deve->pr_res_key;
106 se_cmd->orig_fe_lun = unpacked_lun; 104 se_cmd->orig_fe_lun = unpacked_lun;
107 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 105 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
108 } 106 }
109 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 107 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
110 108
111 if (!se_lun) { 109 if (!se_lun) {
112 /* 110 /*
113 * Use the se_portal_group->tpg_virt_lun0 to allow for 111 * Use the se_portal_group->tpg_virt_lun0 to allow for
114 * REPORT_LUNS, et al to be returned when no active 112 * REPORT_LUNS, et al to be returned when no active
115 * MappedLUN=0 exists for this Initiator Port. 113 * MappedLUN=0 exists for this Initiator Port.
116 */ 114 */
117 if (unpacked_lun != 0) { 115 if (unpacked_lun != 0) {
118 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; 116 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
119 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 117 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
120 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 118 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
121 " Access for 0x%08x\n", 119 " Access for 0x%08x\n",
122 se_cmd->se_tfo->get_fabric_name(), 120 se_cmd->se_tfo->get_fabric_name(),
123 unpacked_lun); 121 unpacked_lun);
124 return -ENODEV; 122 return -ENODEV;
125 } 123 }
126 /* 124 /*
127 * Force WRITE PROTECT for virtual LUN 0 125 * Force WRITE PROTECT for virtual LUN 0
128 */ 126 */
129 if ((se_cmd->data_direction != DMA_FROM_DEVICE) && 127 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
130 (se_cmd->data_direction != DMA_NONE)) { 128 (se_cmd->data_direction != DMA_NONE)) {
131 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; 129 se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
132 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 130 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
133 return -EACCES; 131 return -EACCES;
134 } 132 }
135 133
136 se_lun = &se_sess->se_tpg->tpg_virt_lun0; 134 se_lun = &se_sess->se_tpg->tpg_virt_lun0;
137 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; 135 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
138 se_cmd->orig_fe_lun = 0; 136 se_cmd->orig_fe_lun = 0;
139 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 137 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
140 } 138 }
141 /* 139 /*
142 * Determine if the struct se_lun is online. 140 * Determine if the struct se_lun is online.
143 * FIXME: Check for LUN_RESET + UNIT Attention 141 * FIXME: Check for LUN_RESET + UNIT Attention
144 */ 142 */
145 if (se_dev_check_online(se_lun->lun_se_dev) != 0) { 143 if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
146 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; 144 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
147 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 145 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
148 return -ENODEV; 146 return -ENODEV;
149 } 147 }
150 148
151 /* Directly associate cmd with se_dev */ 149 /* Directly associate cmd with se_dev */
152 se_cmd->se_dev = se_lun->lun_se_dev; 150 se_cmd->se_dev = se_lun->lun_se_dev;
153 151
154 /* TODO: get rid of this and use atomics for stats */ 152 /* TODO: get rid of this and use atomics for stats */
155 dev = se_lun->lun_se_dev; 153 dev = se_lun->lun_se_dev;
156 spin_lock_irqsave(&dev->stats_lock, flags); 154 spin_lock_irqsave(&dev->stats_lock, flags);
157 dev->num_cmds++; 155 dev->num_cmds++;
158 if (se_cmd->data_direction == DMA_TO_DEVICE) 156 if (se_cmd->data_direction == DMA_TO_DEVICE)
159 dev->write_bytes += se_cmd->data_length; 157 dev->write_bytes += se_cmd->data_length;
160 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 158 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
161 dev->read_bytes += se_cmd->data_length; 159 dev->read_bytes += se_cmd->data_length;
162 spin_unlock_irqrestore(&dev->stats_lock, flags); 160 spin_unlock_irqrestore(&dev->stats_lock, flags);
163 161
164 /* 162 /*
165 * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used 163 * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used
166 * for tracking state of struct se_cmds during LUN shutdown events. 164 * for tracking state of struct se_cmds during LUN shutdown events.
167 */ 165 */
168 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); 166 spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
169 list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list); 167 list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
170 atomic_set(&se_cmd->transport_lun_active, 1); 168 atomic_set(&se_cmd->transport_lun_active, 1);
171 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); 169 spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
172 170
173 return 0; 171 return 0;
174 } 172 }
175 EXPORT_SYMBOL(transport_lookup_cmd_lun); 173 EXPORT_SYMBOL(transport_lookup_cmd_lun);
176 174
177 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun) 175 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
178 { 176 {
179 struct se_dev_entry *deve; 177 struct se_dev_entry *deve;
180 struct se_lun *se_lun = NULL; 178 struct se_lun *se_lun = NULL;
181 struct se_session *se_sess = se_cmd->se_sess; 179 struct se_session *se_sess = se_cmd->se_sess;
182 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 180 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
183 unsigned long flags; 181 unsigned long flags;
184 182
185 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { 183 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
186 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; 184 se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
187 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 185 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
188 return -ENODEV; 186 return -ENODEV;
189 } 187 }
190 188
191 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); 189 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
192 se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun]; 190 se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
193 deve = se_cmd->se_deve; 191 deve = se_cmd->se_deve;
194 192
195 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 193 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
196 se_tmr->tmr_lun = deve->se_lun; 194 se_tmr->tmr_lun = deve->se_lun;
197 se_cmd->se_lun = deve->se_lun; 195 se_cmd->se_lun = deve->se_lun;
198 se_lun = deve->se_lun; 196 se_lun = deve->se_lun;
199 se_cmd->pr_res_key = deve->pr_res_key; 197 se_cmd->pr_res_key = deve->pr_res_key;
200 se_cmd->orig_fe_lun = unpacked_lun; 198 se_cmd->orig_fe_lun = unpacked_lun;
201 } 199 }
202 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 200 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
203 201
204 if (!se_lun) { 202 if (!se_lun) {
205 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 203 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
206 " Access for 0x%08x\n", 204 " Access for 0x%08x\n",
207 se_cmd->se_tfo->get_fabric_name(), 205 se_cmd->se_tfo->get_fabric_name(),
208 unpacked_lun); 206 unpacked_lun);
209 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 207 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
210 return -ENODEV; 208 return -ENODEV;
211 } 209 }
212 /* 210 /*
213 * Determine if the struct se_lun is online. 211 * Determine if the struct se_lun is online.
214 * FIXME: Check for LUN_RESET + UNIT Attention 212 * FIXME: Check for LUN_RESET + UNIT Attention
215 */ 213 */
216 if (se_dev_check_online(se_lun->lun_se_dev) != 0) { 214 if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
217 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 215 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
218 return -ENODEV; 216 return -ENODEV;
219 } 217 }
220 218
221 /* Directly associate cmd with se_dev */ 219 /* Directly associate cmd with se_dev */
222 se_cmd->se_dev = se_lun->lun_se_dev; 220 se_cmd->se_dev = se_lun->lun_se_dev;
223 se_tmr->tmr_dev = se_lun->lun_se_dev; 221 se_tmr->tmr_dev = se_lun->lun_se_dev;
224 222
225 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); 223 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
226 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); 224 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
227 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); 225 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
228 226
229 return 0; 227 return 0;
230 } 228 }
231 EXPORT_SYMBOL(transport_lookup_tmr_lun); 229 EXPORT_SYMBOL(transport_lookup_tmr_lun);
232 230
233 /* 231 /*
234 * This function is called from core_scsi3_emulate_pro_register_and_move() 232 * This function is called from core_scsi3_emulate_pro_register_and_move()
235 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count 233 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
236 * when a matching rtpi is found. 234 * when a matching rtpi is found.
237 */ 235 */
238 struct se_dev_entry *core_get_se_deve_from_rtpi( 236 struct se_dev_entry *core_get_se_deve_from_rtpi(
239 struct se_node_acl *nacl, 237 struct se_node_acl *nacl,
240 u16 rtpi) 238 u16 rtpi)
241 { 239 {
242 struct se_dev_entry *deve; 240 struct se_dev_entry *deve;
243 struct se_lun *lun; 241 struct se_lun *lun;
244 struct se_port *port; 242 struct se_port *port;
245 struct se_portal_group *tpg = nacl->se_tpg; 243 struct se_portal_group *tpg = nacl->se_tpg;
246 u32 i; 244 u32 i;
247 245
248 spin_lock_irq(&nacl->device_list_lock); 246 spin_lock_irq(&nacl->device_list_lock);
249 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 247 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
250 deve = &nacl->device_list[i]; 248 deve = &nacl->device_list[i];
251 249
252 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 250 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
253 continue; 251 continue;
254 252
255 lun = deve->se_lun; 253 lun = deve->se_lun;
256 if (!lun) { 254 if (!lun) {
257 pr_err("%s device entries device pointer is" 255 pr_err("%s device entries device pointer is"
258 " NULL, but Initiator has access.\n", 256 " NULL, but Initiator has access.\n",
259 tpg->se_tpg_tfo->get_fabric_name()); 257 tpg->se_tpg_tfo->get_fabric_name());
260 continue; 258 continue;
261 } 259 }
262 port = lun->lun_sep; 260 port = lun->lun_sep;
263 if (!port) { 261 if (!port) {
264 pr_err("%s device entries device pointer is" 262 pr_err("%s device entries device pointer is"
265 " NULL, but Initiator has access.\n", 263 " NULL, but Initiator has access.\n",
266 tpg->se_tpg_tfo->get_fabric_name()); 264 tpg->se_tpg_tfo->get_fabric_name());
267 continue; 265 continue;
268 } 266 }
269 if (port->sep_rtpi != rtpi) 267 if (port->sep_rtpi != rtpi)
270 continue; 268 continue;
271 269
272 atomic_inc(&deve->pr_ref_count); 270 atomic_inc(&deve->pr_ref_count);
273 smp_mb__after_atomic_inc(); 271 smp_mb__after_atomic_inc();
274 spin_unlock_irq(&nacl->device_list_lock); 272 spin_unlock_irq(&nacl->device_list_lock);
275 273
276 return deve; 274 return deve;
277 } 275 }
278 spin_unlock_irq(&nacl->device_list_lock); 276 spin_unlock_irq(&nacl->device_list_lock);
279 277
280 return NULL; 278 return NULL;
281 } 279 }
282 280
283 int core_free_device_list_for_node( 281 int core_free_device_list_for_node(
284 struct se_node_acl *nacl, 282 struct se_node_acl *nacl,
285 struct se_portal_group *tpg) 283 struct se_portal_group *tpg)
286 { 284 {
287 struct se_dev_entry *deve; 285 struct se_dev_entry *deve;
288 struct se_lun *lun; 286 struct se_lun *lun;
289 u32 i; 287 u32 i;
290 288
291 if (!nacl->device_list) 289 if (!nacl->device_list)
292 return 0; 290 return 0;
293 291
294 spin_lock_irq(&nacl->device_list_lock); 292 spin_lock_irq(&nacl->device_list_lock);
295 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 293 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
296 deve = &nacl->device_list[i]; 294 deve = &nacl->device_list[i];
297 295
298 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 296 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
299 continue; 297 continue;
300 298
301 if (!deve->se_lun) { 299 if (!deve->se_lun) {
302 pr_err("%s device entries device pointer is" 300 pr_err("%s device entries device pointer is"
303 " NULL, but Initiator has access.\n", 301 " NULL, but Initiator has access.\n",
304 tpg->se_tpg_tfo->get_fabric_name()); 302 tpg->se_tpg_tfo->get_fabric_name());
305 continue; 303 continue;
306 } 304 }
307 lun = deve->se_lun; 305 lun = deve->se_lun;
308 306
309 spin_unlock_irq(&nacl->device_list_lock); 307 spin_unlock_irq(&nacl->device_list_lock);
310 core_update_device_list_for_node(lun, NULL, deve->mapped_lun, 308 core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
311 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); 309 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
312 spin_lock_irq(&nacl->device_list_lock); 310 spin_lock_irq(&nacl->device_list_lock);
313 } 311 }
314 spin_unlock_irq(&nacl->device_list_lock); 312 spin_unlock_irq(&nacl->device_list_lock);
315 313
316 kfree(nacl->device_list); 314 kfree(nacl->device_list);
317 nacl->device_list = NULL; 315 nacl->device_list = NULL;
318 316
319 return 0; 317 return 0;
320 } 318 }
321 319
322 void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) 320 void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
323 { 321 {
324 struct se_dev_entry *deve; 322 struct se_dev_entry *deve;
325 323
326 spin_lock_irq(&se_nacl->device_list_lock); 324 spin_lock_irq(&se_nacl->device_list_lock);
327 deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; 325 deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
328 deve->deve_cmds--; 326 deve->deve_cmds--;
329 spin_unlock_irq(&se_nacl->device_list_lock); 327 spin_unlock_irq(&se_nacl->device_list_lock);
330 } 328 }
331 329
332 void core_update_device_list_access( 330 void core_update_device_list_access(
333 u32 mapped_lun, 331 u32 mapped_lun,
334 u32 lun_access, 332 u32 lun_access,
335 struct se_node_acl *nacl) 333 struct se_node_acl *nacl)
336 { 334 {
337 struct se_dev_entry *deve; 335 struct se_dev_entry *deve;
338 336
339 spin_lock_irq(&nacl->device_list_lock); 337 spin_lock_irq(&nacl->device_list_lock);
340 deve = &nacl->device_list[mapped_lun]; 338 deve = &nacl->device_list[mapped_lun];
341 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { 339 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
342 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; 340 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
343 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; 341 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
344 } else { 342 } else {
345 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; 343 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
346 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; 344 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
347 } 345 }
348 spin_unlock_irq(&nacl->device_list_lock); 346 spin_unlock_irq(&nacl->device_list_lock);
349 } 347 }
350 348
351 /* core_update_device_list_for_node(): 349 /* core_update_device_list_for_node():
352 * 350 *
353 * 351 *
354 */ 352 */
355 int core_update_device_list_for_node( 353 int core_update_device_list_for_node(
356 struct se_lun *lun, 354 struct se_lun *lun,
357 struct se_lun_acl *lun_acl, 355 struct se_lun_acl *lun_acl,
358 u32 mapped_lun, 356 u32 mapped_lun,
359 u32 lun_access, 357 u32 lun_access,
360 struct se_node_acl *nacl, 358 struct se_node_acl *nacl,
361 struct se_portal_group *tpg, 359 struct se_portal_group *tpg,
362 int enable) 360 int enable)
363 { 361 {
364 struct se_port *port = lun->lun_sep; 362 struct se_port *port = lun->lun_sep;
365 struct se_dev_entry *deve = &nacl->device_list[mapped_lun]; 363 struct se_dev_entry *deve = &nacl->device_list[mapped_lun];
366 int trans = 0; 364 int trans = 0;
367 /* 365 /*
368 * If the MappedLUN entry is being disabled, the entry in 366 * If the MappedLUN entry is being disabled, the entry in
369 * port->sep_alua_list must be removed now before clearing the 367 * port->sep_alua_list must be removed now before clearing the
370 * struct se_dev_entry pointers below as logic in 368 * struct se_dev_entry pointers below as logic in
371 * core_alua_do_transition_tg_pt() depends on these being present. 369 * core_alua_do_transition_tg_pt() depends on these being present.
372 */ 370 */
373 if (!enable) { 371 if (!enable) {
374 /* 372 /*
375 * deve->se_lun_acl will be NULL for demo-mode created LUNs 373 * deve->se_lun_acl will be NULL for demo-mode created LUNs
376 * that have not been explicitly concerted to MappedLUNs -> 374 * that have not been explicitly concerted to MappedLUNs ->
377 * struct se_lun_acl, but we remove deve->alua_port_list from 375 * struct se_lun_acl, but we remove deve->alua_port_list from
378 * port->sep_alua_list. This also means that active UAs and 376 * port->sep_alua_list. This also means that active UAs and
379 * NodeACL context specific PR metadata for demo-mode 377 * NodeACL context specific PR metadata for demo-mode
380 * MappedLUN *deve will be released below.. 378 * MappedLUN *deve will be released below..
381 */ 379 */
382 spin_lock_bh(&port->sep_alua_lock); 380 spin_lock_bh(&port->sep_alua_lock);
383 list_del(&deve->alua_port_list); 381 list_del(&deve->alua_port_list);
384 spin_unlock_bh(&port->sep_alua_lock); 382 spin_unlock_bh(&port->sep_alua_lock);
385 } 383 }
386 384
387 spin_lock_irq(&nacl->device_list_lock); 385 spin_lock_irq(&nacl->device_list_lock);
388 if (enable) { 386 if (enable) {
389 /* 387 /*
390 * Check if the call is handling demo mode -> explict LUN ACL 388 * Check if the call is handling demo mode -> explict LUN ACL
391 * transition. This transition must be for the same struct se_lun 389 * transition. This transition must be for the same struct se_lun
392 * + mapped_lun that was setup in demo mode.. 390 * + mapped_lun that was setup in demo mode..
393 */ 391 */
394 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 392 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
395 if (deve->se_lun_acl != NULL) { 393 if (deve->se_lun_acl != NULL) {
396 pr_err("struct se_dev_entry->se_lun_acl" 394 pr_err("struct se_dev_entry->se_lun_acl"
397 " already set for demo mode -> explict" 395 " already set for demo mode -> explict"
398 " LUN ACL transition\n"); 396 " LUN ACL transition\n");
399 spin_unlock_irq(&nacl->device_list_lock); 397 spin_unlock_irq(&nacl->device_list_lock);
400 return -EINVAL; 398 return -EINVAL;
401 } 399 }
402 if (deve->se_lun != lun) { 400 if (deve->se_lun != lun) {
403 pr_err("struct se_dev_entry->se_lun does" 401 pr_err("struct se_dev_entry->se_lun does"
404 " match passed struct se_lun for demo mode" 402 " match passed struct se_lun for demo mode"
405 " -> explict LUN ACL transition\n"); 403 " -> explict LUN ACL transition\n");
406 spin_unlock_irq(&nacl->device_list_lock); 404 spin_unlock_irq(&nacl->device_list_lock);
407 return -EINVAL; 405 return -EINVAL;
408 } 406 }
409 deve->se_lun_acl = lun_acl; 407 deve->se_lun_acl = lun_acl;
410 trans = 1; 408 trans = 1;
411 } else { 409 } else {
412 deve->se_lun = lun; 410 deve->se_lun = lun;
413 deve->se_lun_acl = lun_acl; 411 deve->se_lun_acl = lun_acl;
414 deve->mapped_lun = mapped_lun; 412 deve->mapped_lun = mapped_lun;
415 deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS; 413 deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
416 } 414 }
417 415
418 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { 416 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
419 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; 417 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
420 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; 418 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
421 } else { 419 } else {
422 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; 420 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
423 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; 421 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
424 } 422 }
425 423
426 if (trans) { 424 if (trans) {
427 spin_unlock_irq(&nacl->device_list_lock); 425 spin_unlock_irq(&nacl->device_list_lock);
428 return 0; 426 return 0;
429 } 427 }
430 deve->creation_time = get_jiffies_64(); 428 deve->creation_time = get_jiffies_64();
431 deve->attach_count++; 429 deve->attach_count++;
432 spin_unlock_irq(&nacl->device_list_lock); 430 spin_unlock_irq(&nacl->device_list_lock);
433 431
434 spin_lock_bh(&port->sep_alua_lock); 432 spin_lock_bh(&port->sep_alua_lock);
435 list_add_tail(&deve->alua_port_list, &port->sep_alua_list); 433 list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
436 spin_unlock_bh(&port->sep_alua_lock); 434 spin_unlock_bh(&port->sep_alua_lock);
437 435
438 return 0; 436 return 0;
439 } 437 }
440 /* 438 /*
441 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE 439 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
442 * PR operation to complete. 440 * PR operation to complete.
443 */ 441 */
444 spin_unlock_irq(&nacl->device_list_lock); 442 spin_unlock_irq(&nacl->device_list_lock);
445 while (atomic_read(&deve->pr_ref_count) != 0) 443 while (atomic_read(&deve->pr_ref_count) != 0)
446 cpu_relax(); 444 cpu_relax();
447 spin_lock_irq(&nacl->device_list_lock); 445 spin_lock_irq(&nacl->device_list_lock);
448 /* 446 /*
449 * Disable struct se_dev_entry LUN ACL mapping 447 * Disable struct se_dev_entry LUN ACL mapping
450 */ 448 */
451 core_scsi3_ua_release_all(deve); 449 core_scsi3_ua_release_all(deve);
452 deve->se_lun = NULL; 450 deve->se_lun = NULL;
453 deve->se_lun_acl = NULL; 451 deve->se_lun_acl = NULL;
454 deve->lun_flags = 0; 452 deve->lun_flags = 0;
455 deve->creation_time = 0; 453 deve->creation_time = 0;
456 deve->attach_count--; 454 deve->attach_count--;
457 spin_unlock_irq(&nacl->device_list_lock); 455 spin_unlock_irq(&nacl->device_list_lock);
458 456
459 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl); 457 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
460 return 0; 458 return 0;
461 } 459 }
462 460
463 /* core_clear_lun_from_tpg(): 461 /* core_clear_lun_from_tpg():
464 * 462 *
465 * 463 *
466 */ 464 */
467 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) 465 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
468 { 466 {
469 struct se_node_acl *nacl; 467 struct se_node_acl *nacl;
470 struct se_dev_entry *deve; 468 struct se_dev_entry *deve;
471 u32 i; 469 u32 i;
472 470
473 spin_lock_irq(&tpg->acl_node_lock); 471 spin_lock_irq(&tpg->acl_node_lock);
474 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 472 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
475 spin_unlock_irq(&tpg->acl_node_lock); 473 spin_unlock_irq(&tpg->acl_node_lock);
476 474
477 spin_lock_irq(&nacl->device_list_lock); 475 spin_lock_irq(&nacl->device_list_lock);
478 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 476 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
479 deve = &nacl->device_list[i]; 477 deve = &nacl->device_list[i];
480 if (lun != deve->se_lun) 478 if (lun != deve->se_lun)
481 continue; 479 continue;
482 spin_unlock_irq(&nacl->device_list_lock); 480 spin_unlock_irq(&nacl->device_list_lock);
483 481
484 core_update_device_list_for_node(lun, NULL, 482 core_update_device_list_for_node(lun, NULL,
485 deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, 483 deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
486 nacl, tpg, 0); 484 nacl, tpg, 0);
487 485
488 spin_lock_irq(&nacl->device_list_lock); 486 spin_lock_irq(&nacl->device_list_lock);
489 } 487 }
490 spin_unlock_irq(&nacl->device_list_lock); 488 spin_unlock_irq(&nacl->device_list_lock);
491 489
492 spin_lock_irq(&tpg->acl_node_lock); 490 spin_lock_irq(&tpg->acl_node_lock);
493 } 491 }
494 spin_unlock_irq(&tpg->acl_node_lock); 492 spin_unlock_irq(&tpg->acl_node_lock);
495 } 493 }
496 494
497 static struct se_port *core_alloc_port(struct se_device *dev) 495 static struct se_port *core_alloc_port(struct se_device *dev)
498 { 496 {
499 struct se_port *port, *port_tmp; 497 struct se_port *port, *port_tmp;
500 498
501 port = kzalloc(sizeof(struct se_port), GFP_KERNEL); 499 port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
502 if (!port) { 500 if (!port) {
503 pr_err("Unable to allocate struct se_port\n"); 501 pr_err("Unable to allocate struct se_port\n");
504 return ERR_PTR(-ENOMEM); 502 return ERR_PTR(-ENOMEM);
505 } 503 }
506 INIT_LIST_HEAD(&port->sep_alua_list); 504 INIT_LIST_HEAD(&port->sep_alua_list);
507 INIT_LIST_HEAD(&port->sep_list); 505 INIT_LIST_HEAD(&port->sep_list);
508 atomic_set(&port->sep_tg_pt_secondary_offline, 0); 506 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
509 spin_lock_init(&port->sep_alua_lock); 507 spin_lock_init(&port->sep_alua_lock);
510 mutex_init(&port->sep_tg_pt_md_mutex); 508 mutex_init(&port->sep_tg_pt_md_mutex);
511 509
512 spin_lock(&dev->se_port_lock); 510 spin_lock(&dev->se_port_lock);
513 if (dev->dev_port_count == 0x0000ffff) { 511 if (dev->dev_port_count == 0x0000ffff) {
514 pr_warn("Reached dev->dev_port_count ==" 512 pr_warn("Reached dev->dev_port_count =="
515 " 0x0000ffff\n"); 513 " 0x0000ffff\n");
516 spin_unlock(&dev->se_port_lock); 514 spin_unlock(&dev->se_port_lock);
517 return ERR_PTR(-ENOSPC); 515 return ERR_PTR(-ENOSPC);
518 } 516 }
519 again: 517 again:
520 /* 518 /*
521 * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device 519 * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
522 * Here is the table from spc4r17 section 7.7.3.8. 520 * Here is the table from spc4r17 section 7.7.3.8.
523 * 521 *
524 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field 522 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
525 * 523 *
526 * Code Description 524 * Code Description
527 * 0h Reserved 525 * 0h Reserved
528 * 1h Relative port 1, historically known as port A 526 * 1h Relative port 1, historically known as port A
529 * 2h Relative port 2, historically known as port B 527 * 2h Relative port 2, historically known as port B
530 * 3h to FFFFh Relative port 3 through 65 535 528 * 3h to FFFFh Relative port 3 through 65 535
531 */ 529 */
532 port->sep_rtpi = dev->dev_rpti_counter++; 530 port->sep_rtpi = dev->dev_rpti_counter++;
533 if (!port->sep_rtpi) 531 if (!port->sep_rtpi)
534 goto again; 532 goto again;
535 533
536 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { 534 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
537 /* 535 /*
538 * Make sure RELATIVE TARGET PORT IDENTIFER is unique 536 * Make sure RELATIVE TARGET PORT IDENTIFER is unique
539 * for 16-bit wrap.. 537 * for 16-bit wrap..
540 */ 538 */
541 if (port->sep_rtpi == port_tmp->sep_rtpi) 539 if (port->sep_rtpi == port_tmp->sep_rtpi)
542 goto again; 540 goto again;
543 } 541 }
544 spin_unlock(&dev->se_port_lock); 542 spin_unlock(&dev->se_port_lock);
545 543
546 return port; 544 return port;
547 } 545 }
548 546
549 static void core_export_port( 547 static void core_export_port(
550 struct se_device *dev, 548 struct se_device *dev,
551 struct se_portal_group *tpg, 549 struct se_portal_group *tpg,
552 struct se_port *port, 550 struct se_port *port,
553 struct se_lun *lun) 551 struct se_lun *lun)
554 { 552 {
555 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 553 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
556 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; 554 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
557 555
558 spin_lock(&dev->se_port_lock); 556 spin_lock(&dev->se_port_lock);
559 spin_lock(&lun->lun_sep_lock); 557 spin_lock(&lun->lun_sep_lock);
560 port->sep_tpg = tpg; 558 port->sep_tpg = tpg;
561 port->sep_lun = lun; 559 port->sep_lun = lun;
562 lun->lun_sep = port; 560 lun->lun_sep = port;
563 spin_unlock(&lun->lun_sep_lock); 561 spin_unlock(&lun->lun_sep_lock);
564 562
565 list_add_tail(&port->sep_list, &dev->dev_sep_list); 563 list_add_tail(&port->sep_list, &dev->dev_sep_list);
566 spin_unlock(&dev->se_port_lock); 564 spin_unlock(&dev->se_port_lock);
567 565
568 if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 566 if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
569 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); 567 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
570 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { 568 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
571 pr_err("Unable to allocate t10_alua_tg_pt" 569 pr_err("Unable to allocate t10_alua_tg_pt"
572 "_gp_member_t\n"); 570 "_gp_member_t\n");
573 return; 571 return;
574 } 572 }
575 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 573 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
576 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 574 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
577 su_dev->t10_alua.default_tg_pt_gp); 575 su_dev->t10_alua.default_tg_pt_gp);
578 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 576 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
579 pr_debug("%s/%s: Adding to default ALUA Target Port" 577 pr_debug("%s/%s: Adding to default ALUA Target Port"
580 " Group: alua/default_tg_pt_gp\n", 578 " Group: alua/default_tg_pt_gp\n",
581 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name()); 579 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
582 } 580 }
583 581
584 dev->dev_port_count++; 582 dev->dev_port_count++;
585 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */ 583 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */
586 } 584 }
587 585
588 /* 586 /*
589 * Called with struct se_device->se_port_lock spinlock held. 587 * Called with struct se_device->se_port_lock spinlock held.
590 */ 588 */
591 static void core_release_port(struct se_device *dev, struct se_port *port) 589 static void core_release_port(struct se_device *dev, struct se_port *port)
592 __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock) 590 __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
593 { 591 {
594 /* 592 /*
595 * Wait for any port reference for PR ALL_TG_PT=1 operation 593 * Wait for any port reference for PR ALL_TG_PT=1 operation
596 * to complete in __core_scsi3_alloc_registration() 594 * to complete in __core_scsi3_alloc_registration()
597 */ 595 */
598 spin_unlock(&dev->se_port_lock); 596 spin_unlock(&dev->se_port_lock);
599 if (atomic_read(&port->sep_tg_pt_ref_cnt)) 597 if (atomic_read(&port->sep_tg_pt_ref_cnt))
600 cpu_relax(); 598 cpu_relax();
601 spin_lock(&dev->se_port_lock); 599 spin_lock(&dev->se_port_lock);
602 600
603 core_alua_free_tg_pt_gp_mem(port); 601 core_alua_free_tg_pt_gp_mem(port);
604 602
605 list_del(&port->sep_list); 603 list_del(&port->sep_list);
606 dev->dev_port_count--; 604 dev->dev_port_count--;
607 kfree(port); 605 kfree(port);
608 } 606 }
609 607
610 int core_dev_export( 608 int core_dev_export(
611 struct se_device *dev, 609 struct se_device *dev,
612 struct se_portal_group *tpg, 610 struct se_portal_group *tpg,
613 struct se_lun *lun) 611 struct se_lun *lun)
614 { 612 {
615 struct se_port *port; 613 struct se_port *port;
616 614
617 port = core_alloc_port(dev); 615 port = core_alloc_port(dev);
618 if (IS_ERR(port)) 616 if (IS_ERR(port))
619 return PTR_ERR(port); 617 return PTR_ERR(port);
620 618
621 lun->lun_se_dev = dev; 619 lun->lun_se_dev = dev;
622 se_dev_start(dev); 620 se_dev_start(dev);
623 621
624 atomic_inc(&dev->dev_export_obj.obj_access_count); 622 atomic_inc(&dev->dev_export_obj.obj_access_count);
625 core_export_port(dev, tpg, port, lun); 623 core_export_port(dev, tpg, port, lun);
626 return 0; 624 return 0;
627 } 625 }
628 626
629 void core_dev_unexport( 627 void core_dev_unexport(
630 struct se_device *dev, 628 struct se_device *dev,
631 struct se_portal_group *tpg, 629 struct se_portal_group *tpg,
632 struct se_lun *lun) 630 struct se_lun *lun)
633 { 631 {
634 struct se_port *port = lun->lun_sep; 632 struct se_port *port = lun->lun_sep;
635 633
636 spin_lock(&lun->lun_sep_lock); 634 spin_lock(&lun->lun_sep_lock);
637 if (lun->lun_se_dev == NULL) { 635 if (lun->lun_se_dev == NULL) {
638 spin_unlock(&lun->lun_sep_lock); 636 spin_unlock(&lun->lun_sep_lock);
639 return; 637 return;
640 } 638 }
641 spin_unlock(&lun->lun_sep_lock); 639 spin_unlock(&lun->lun_sep_lock);
642 640
643 spin_lock(&dev->se_port_lock); 641 spin_lock(&dev->se_port_lock);
644 atomic_dec(&dev->dev_export_obj.obj_access_count); 642 atomic_dec(&dev->dev_export_obj.obj_access_count);
645 core_release_port(dev, port); 643 core_release_port(dev, port);
646 spin_unlock(&dev->se_port_lock); 644 spin_unlock(&dev->se_port_lock);
647 645
648 se_dev_stop(dev); 646 se_dev_stop(dev);
649 lun->lun_se_dev = NULL; 647 lun->lun_se_dev = NULL;
650 } 648 }
651 649
652 int target_report_luns(struct se_task *se_task) 650 int target_report_luns(struct se_task *se_task)
653 { 651 {
654 struct se_cmd *se_cmd = se_task->task_se_cmd; 652 struct se_cmd *se_cmd = se_task->task_se_cmd;
655 struct se_dev_entry *deve; 653 struct se_dev_entry *deve;
656 struct se_lun *se_lun; 654 struct se_lun *se_lun;
657 struct se_session *se_sess = se_cmd->se_sess; 655 struct se_session *se_sess = se_cmd->se_sess;
658 unsigned char *buf; 656 unsigned char *buf;
659 u32 cdb_offset = 0, lun_count = 0, offset = 8, i; 657 u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
660 658
661 buf = transport_kmap_first_data_page(se_cmd); 659 buf = transport_kmap_first_data_page(se_cmd);
662 660
663 /* 661 /*
664 * If no struct se_session pointer is present, this struct se_cmd is 662 * If no struct se_session pointer is present, this struct se_cmd is
665 * coming via a target_core_mod PASSTHROUGH op, and not through 663 * coming via a target_core_mod PASSTHROUGH op, and not through
666 * a $FABRIC_MOD. In that case, report LUN=0 only. 664 * a $FABRIC_MOD. In that case, report LUN=0 only.
667 */ 665 */
668 if (!se_sess) { 666 if (!se_sess) {
669 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]); 667 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
670 lun_count = 1; 668 lun_count = 1;
671 goto done; 669 goto done;
672 } 670 }
673 671
674 spin_lock_irq(&se_sess->se_node_acl->device_list_lock); 672 spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
675 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 673 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
676 deve = &se_sess->se_node_acl->device_list[i]; 674 deve = &se_sess->se_node_acl->device_list[i];
677 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 675 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
678 continue; 676 continue;
679 se_lun = deve->se_lun; 677 se_lun = deve->se_lun;
680 /* 678 /*
681 * We determine the correct LUN LIST LENGTH even once we 679 * We determine the correct LUN LIST LENGTH even once we
682 * have reached the initial allocation length. 680 * have reached the initial allocation length.
683 * See SPC2-R20 7.19. 681 * See SPC2-R20 7.19.
684 */ 682 */
685 lun_count++; 683 lun_count++;
686 if ((cdb_offset + 8) >= se_cmd->data_length) 684 if ((cdb_offset + 8) >= se_cmd->data_length)
687 continue; 685 continue;
688 686
689 int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]); 687 int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
690 offset += 8; 688 offset += 8;
691 cdb_offset += 8; 689 cdb_offset += 8;
692 } 690 }
693 spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); 691 spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
694 692
695 /* 693 /*
696 * See SPC3 r07, page 159. 694 * See SPC3 r07, page 159.
697 */ 695 */
698 done: 696 done:
699 transport_kunmap_first_data_page(se_cmd); 697 transport_kunmap_first_data_page(se_cmd);
700 lun_count *= 8; 698 lun_count *= 8;
701 buf[0] = ((lun_count >> 24) & 0xff); 699 buf[0] = ((lun_count >> 24) & 0xff);
702 buf[1] = ((lun_count >> 16) & 0xff); 700 buf[1] = ((lun_count >> 16) & 0xff);
703 buf[2] = ((lun_count >> 8) & 0xff); 701 buf[2] = ((lun_count >> 8) & 0xff);
704 buf[3] = (lun_count & 0xff); 702 buf[3] = (lun_count & 0xff);
705 703
706 se_task->task_scsi_status = GOOD; 704 se_task->task_scsi_status = GOOD;
707 transport_complete_task(se_task, 1); 705 transport_complete_task(se_task, 1);
708 return 0; 706 return 0;
709 } 707 }
710 708
711 /* se_release_device_for_hba(): 709 /* se_release_device_for_hba():
712 * 710 *
713 * 711 *
714 */ 712 */
715 void se_release_device_for_hba(struct se_device *dev) 713 void se_release_device_for_hba(struct se_device *dev)
716 { 714 {
717 struct se_hba *hba = dev->se_hba; 715 struct se_hba *hba = dev->se_hba;
718 716
719 if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || 717 if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
720 (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) || 718 (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
721 (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) || 719 (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
722 (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) || 720 (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
723 (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED)) 721 (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
724 se_dev_stop(dev); 722 se_dev_stop(dev);
725 723
726 if (dev->dev_ptr) { 724 if (dev->dev_ptr) {
727 kthread_stop(dev->process_thread); 725 kthread_stop(dev->process_thread);
728 if (dev->transport->free_device) 726 if (dev->transport->free_device)
729 dev->transport->free_device(dev->dev_ptr); 727 dev->transport->free_device(dev->dev_ptr);
730 } 728 }
731 729
732 spin_lock(&hba->device_lock); 730 spin_lock(&hba->device_lock);
733 list_del(&dev->dev_list); 731 list_del(&dev->dev_list);
734 hba->dev_count--; 732 hba->dev_count--;
735 spin_unlock(&hba->device_lock); 733 spin_unlock(&hba->device_lock);
736 734
737 core_scsi3_free_all_registrations(dev); 735 core_scsi3_free_all_registrations(dev);
738 se_release_vpd_for_dev(dev); 736 se_release_vpd_for_dev(dev);
739 737
740 kfree(dev); 738 kfree(dev);
741 } 739 }
742 740
743 void se_release_vpd_for_dev(struct se_device *dev) 741 void se_release_vpd_for_dev(struct se_device *dev)
744 { 742 {
745 struct t10_vpd *vpd, *vpd_tmp; 743 struct t10_vpd *vpd, *vpd_tmp;
746 744
747 spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock); 745 spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
748 list_for_each_entry_safe(vpd, vpd_tmp, 746 list_for_each_entry_safe(vpd, vpd_tmp,
749 &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) { 747 &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) {
750 list_del(&vpd->vpd_list); 748 list_del(&vpd->vpd_list);
751 kfree(vpd); 749 kfree(vpd);
752 } 750 }
753 spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock); 751 spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
754 } 752 }
755 753
756 /* se_free_virtual_device(): 754 /* se_free_virtual_device():
757 * 755 *
758 * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers. 756 * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
759 */ 757 */
760 int se_free_virtual_device(struct se_device *dev, struct se_hba *hba) 758 int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
761 { 759 {
762 if (!list_empty(&dev->dev_sep_list)) 760 if (!list_empty(&dev->dev_sep_list))
763 dump_stack(); 761 dump_stack();
764 762
765 core_alua_free_lu_gp_mem(dev); 763 core_alua_free_lu_gp_mem(dev);
766 se_release_device_for_hba(dev); 764 se_release_device_for_hba(dev);
767 765
768 return 0; 766 return 0;
769 } 767 }
770 768
771 static void se_dev_start(struct se_device *dev) 769 static void se_dev_start(struct se_device *dev)
772 { 770 {
773 struct se_hba *hba = dev->se_hba; 771 struct se_hba *hba = dev->se_hba;
774 772
775 spin_lock(&hba->device_lock); 773 spin_lock(&hba->device_lock);
776 atomic_inc(&dev->dev_obj.obj_access_count); 774 atomic_inc(&dev->dev_obj.obj_access_count);
777 if (atomic_read(&dev->dev_obj.obj_access_count) == 1) { 775 if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
778 if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) { 776 if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
779 dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED; 777 dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
780 dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED; 778 dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
781 } else if (dev->dev_status & 779 } else if (dev->dev_status &
782 TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) { 780 TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
783 dev->dev_status &= 781 dev->dev_status &=
784 ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; 782 ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
785 dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED; 783 dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
786 } 784 }
787 } 785 }
788 spin_unlock(&hba->device_lock); 786 spin_unlock(&hba->device_lock);
789 } 787 }
790 788
791 static void se_dev_stop(struct se_device *dev) 789 static void se_dev_stop(struct se_device *dev)
792 { 790 {
793 struct se_hba *hba = dev->se_hba; 791 struct se_hba *hba = dev->se_hba;
794 792
795 spin_lock(&hba->device_lock); 793 spin_lock(&hba->device_lock);
796 atomic_dec(&dev->dev_obj.obj_access_count); 794 atomic_dec(&dev->dev_obj.obj_access_count);
797 if (atomic_read(&dev->dev_obj.obj_access_count) == 0) { 795 if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
798 if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) { 796 if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
799 dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED; 797 dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
800 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; 798 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
801 } else if (dev->dev_status & 799 } else if (dev->dev_status &
802 TRANSPORT_DEVICE_OFFLINE_ACTIVATED) { 800 TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
803 dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED; 801 dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
804 dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; 802 dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
805 } 803 }
806 } 804 }
807 spin_unlock(&hba->device_lock); 805 spin_unlock(&hba->device_lock);
808 } 806 }
809 807
810 int se_dev_check_online(struct se_device *dev) 808 int se_dev_check_online(struct se_device *dev)
811 { 809 {
812 unsigned long flags; 810 unsigned long flags;
813 int ret; 811 int ret;
814 812
815 spin_lock_irqsave(&dev->dev_status_lock, flags); 813 spin_lock_irqsave(&dev->dev_status_lock, flags);
816 ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || 814 ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
817 (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1; 815 (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
818 spin_unlock_irqrestore(&dev->dev_status_lock, flags); 816 spin_unlock_irqrestore(&dev->dev_status_lock, flags);
819 817
820 return ret; 818 return ret;
821 } 819 }
822 820
823 int se_dev_check_shutdown(struct se_device *dev) 821 int se_dev_check_shutdown(struct se_device *dev)
824 { 822 {
825 int ret; 823 int ret;
826 824
827 spin_lock_irq(&dev->dev_status_lock); 825 spin_lock_irq(&dev->dev_status_lock);
828 ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN); 826 ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
829 spin_unlock_irq(&dev->dev_status_lock); 827 spin_unlock_irq(&dev->dev_status_lock);
830 828
831 return ret; 829 return ret;
832 } 830 }
833 831
834 u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) 832 u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
835 { 833 {
836 u32 tmp, aligned_max_sectors; 834 u32 tmp, aligned_max_sectors;
837 /* 835 /*
838 * Limit max_sectors to a PAGE_SIZE aligned value for modern 836 * Limit max_sectors to a PAGE_SIZE aligned value for modern
839 * transport_allocate_data_tasks() operation. 837 * transport_allocate_data_tasks() operation.
840 */ 838 */
841 tmp = rounddown((max_sectors * block_size), PAGE_SIZE); 839 tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
842 aligned_max_sectors = (tmp / block_size); 840 aligned_max_sectors = (tmp / block_size);
843 if (max_sectors != aligned_max_sectors) { 841 if (max_sectors != aligned_max_sectors) {
844 printk(KERN_INFO "Rounding down aligned max_sectors from %u" 842 printk(KERN_INFO "Rounding down aligned max_sectors from %u"
845 " to %u\n", max_sectors, aligned_max_sectors); 843 " to %u\n", max_sectors, aligned_max_sectors);
846 return aligned_max_sectors; 844 return aligned_max_sectors;
847 } 845 }
848 846
849 return max_sectors; 847 return max_sectors;
850 } 848 }
851 849
852 void se_dev_set_default_attribs( 850 void se_dev_set_default_attribs(
853 struct se_device *dev, 851 struct se_device *dev,
854 struct se_dev_limits *dev_limits) 852 struct se_dev_limits *dev_limits)
855 { 853 {
856 struct queue_limits *limits = &dev_limits->limits; 854 struct queue_limits *limits = &dev_limits->limits;
857 855
858 dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO; 856 dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO;
859 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE; 857 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
860 dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ; 858 dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
861 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; 859 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
862 dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; 860 dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
863 dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS; 861 dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS;
864 dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU; 862 dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU;
865 dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 863 dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
866 dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS; 864 dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
867 dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA; 865 dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
868 dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 866 dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
869 dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT; 867 dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT;
870 dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 868 dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
871 /* 869 /*
872 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK 870 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
873 * iblock_create_virtdevice() from struct queue_limits values 871 * iblock_create_virtdevice() from struct queue_limits values
874 * if blk_queue_discard()==1 872 * if blk_queue_discard()==1
875 */ 873 */
876 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 874 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
877 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 875 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
878 DA_MAX_UNMAP_BLOCK_DESC_COUNT; 876 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
879 dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; 877 dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
880 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = 878 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
881 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 879 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
882 /* 880 /*
883 * block_size is based on subsystem plugin dependent requirements. 881 * block_size is based on subsystem plugin dependent requirements.
884 */ 882 */
885 dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size; 883 dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
886 dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size; 884 dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
887 /* 885 /*
888 * max_sectors is based on subsystem plugin dependent requirements. 886 * max_sectors is based on subsystem plugin dependent requirements.
889 */ 887 */
890 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; 888 dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
891 /* 889 /*
892 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() 890 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
893 */ 891 */
894 limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors, 892 limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
895 limits->logical_block_size); 893 limits->logical_block_size);
896 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; 894 dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
897 /* 895 /*
898 * Set optimal_sectors from max_sectors, which can be lowered via 896 * Set optimal_sectors from max_sectors, which can be lowered via
899 * configfs. 897 * configfs.
900 */ 898 */
901 dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors; 899 dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors;
902 /* 900 /*
903 * queue_depth is based on subsystem plugin dependent requirements. 901 * queue_depth is based on subsystem plugin dependent requirements.
904 */ 902 */
905 dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth; 903 dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth;
906 dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth; 904 dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
907 } 905 }
908 906
909 int se_dev_set_max_unmap_lba_count( 907 int se_dev_set_max_unmap_lba_count(
910 struct se_device *dev, 908 struct se_device *dev,
911 u32 max_unmap_lba_count) 909 u32 max_unmap_lba_count)
912 { 910 {
913 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count; 911 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
914 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n", 912 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
915 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count); 913 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
916 return 0; 914 return 0;
917 } 915 }
918 916
919 int se_dev_set_max_unmap_block_desc_count( 917 int se_dev_set_max_unmap_block_desc_count(
920 struct se_device *dev, 918 struct se_device *dev,
921 u32 max_unmap_block_desc_count) 919 u32 max_unmap_block_desc_count)
922 { 920 {
923 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 921 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
924 max_unmap_block_desc_count; 922 max_unmap_block_desc_count;
925 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n", 923 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
926 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count); 924 dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
927 return 0; 925 return 0;
928 } 926 }
929 927
930 int se_dev_set_unmap_granularity( 928 int se_dev_set_unmap_granularity(
931 struct se_device *dev, 929 struct se_device *dev,
932 u32 unmap_granularity) 930 u32 unmap_granularity)
933 { 931 {
934 dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity; 932 dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
935 pr_debug("dev[%p]: Set unmap_granularity: %u\n", 933 pr_debug("dev[%p]: Set unmap_granularity: %u\n",
936 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity); 934 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
937 return 0; 935 return 0;
938 } 936 }
939 937
940 int se_dev_set_unmap_granularity_alignment( 938 int se_dev_set_unmap_granularity_alignment(
941 struct se_device *dev, 939 struct se_device *dev,
942 u32 unmap_granularity_alignment) 940 u32 unmap_granularity_alignment)
943 { 941 {
944 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment; 942 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
945 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n", 943 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
946 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment); 944 dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
947 return 0; 945 return 0;
948 } 946 }
949 947
950 int se_dev_set_emulate_dpo(struct se_device *dev, int flag) 948 int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
951 { 949 {
952 if (flag != 0 && flag != 1) { 950 if (flag != 0 && flag != 1) {
953 pr_err("Illegal value %d\n", flag); 951 pr_err("Illegal value %d\n", flag);
954 return -EINVAL; 952 return -EINVAL;
955 } 953 }
956 954
957 if (flag) { 955 if (flag) {
958 pr_err("dpo_emulated not supported\n"); 956 pr_err("dpo_emulated not supported\n");
959 return -EINVAL; 957 return -EINVAL;
960 } 958 }
961 959
962 return 0; 960 return 0;
963 } 961 }
964 962
965 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) 963 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
966 { 964 {
967 if (flag != 0 && flag != 1) { 965 if (flag != 0 && flag != 1) {
968 pr_err("Illegal value %d\n", flag); 966 pr_err("Illegal value %d\n", flag);
969 return -EINVAL; 967 return -EINVAL;
970 } 968 }
971 969
972 if (flag && dev->transport->fua_write_emulated == 0) { 970 if (flag && dev->transport->fua_write_emulated == 0) {
973 pr_err("fua_write_emulated not supported\n"); 971 pr_err("fua_write_emulated not supported\n");
974 return -EINVAL; 972 return -EINVAL;
975 } 973 }
976 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag; 974 dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
977 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", 975 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
978 dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write); 976 dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
979 return 0; 977 return 0;
980 } 978 }
981 979
982 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) 980 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
983 { 981 {
984 if (flag != 0 && flag != 1) { 982 if (flag != 0 && flag != 1) {
985 pr_err("Illegal value %d\n", flag); 983 pr_err("Illegal value %d\n", flag);
986 return -EINVAL; 984 return -EINVAL;
987 } 985 }
988 986
989 if (flag) { 987 if (flag) {
990 pr_err("ua read emulated not supported\n"); 988 pr_err("ua read emulated not supported\n");
991 return -EINVAL; 989 return -EINVAL;
992 } 990 }
993 991
994 return 0; 992 return 0;
995 } 993 }
996 994
997 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) 995 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
998 { 996 {
999 if (flag != 0 && flag != 1) { 997 if (flag != 0 && flag != 1) {
1000 pr_err("Illegal value %d\n", flag); 998 pr_err("Illegal value %d\n", flag);
1001 return -EINVAL; 999 return -EINVAL;
1002 } 1000 }
1003 if (flag && dev->transport->write_cache_emulated == 0) { 1001 if (flag && dev->transport->write_cache_emulated == 0) {
1004 pr_err("write_cache_emulated not supported\n"); 1002 pr_err("write_cache_emulated not supported\n");
1005 return -EINVAL; 1003 return -EINVAL;
1006 } 1004 }
1007 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag; 1005 dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
1008 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", 1006 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
1009 dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache); 1007 dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
1010 return 0; 1008 return 0;
1011 } 1009 }
1012 1010
1013 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) 1011 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
1014 { 1012 {
1015 if ((flag != 0) && (flag != 1) && (flag != 2)) { 1013 if ((flag != 0) && (flag != 1) && (flag != 2)) {
1016 pr_err("Illegal value %d\n", flag); 1014 pr_err("Illegal value %d\n", flag);
1017 return -EINVAL; 1015 return -EINVAL;
1018 } 1016 }
1019 1017
1020 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1018 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1021 pr_err("dev[%p]: Unable to change SE Device" 1019 pr_err("dev[%p]: Unable to change SE Device"
1022 " UA_INTRLCK_CTRL while dev_export_obj: %d count" 1020 " UA_INTRLCK_CTRL while dev_export_obj: %d count"
1023 " exists\n", dev, 1021 " exists\n", dev,
1024 atomic_read(&dev->dev_export_obj.obj_access_count)); 1022 atomic_read(&dev->dev_export_obj.obj_access_count));
1025 return -EINVAL; 1023 return -EINVAL;
1026 } 1024 }
1027 dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag; 1025 dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
1028 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", 1026 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
1029 dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl); 1027 dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
1030 1028
1031 return 0; 1029 return 0;
1032 } 1030 }
1033 1031
1034 int se_dev_set_emulate_tas(struct se_device *dev, int flag) 1032 int se_dev_set_emulate_tas(struct se_device *dev, int flag)
1035 { 1033 {
1036 if ((flag != 0) && (flag != 1)) { 1034 if ((flag != 0) && (flag != 1)) {
1037 pr_err("Illegal value %d\n", flag); 1035 pr_err("Illegal value %d\n", flag);
1038 return -EINVAL; 1036 return -EINVAL;
1039 } 1037 }
1040 1038
1041 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1039 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1042 pr_err("dev[%p]: Unable to change SE Device TAS while" 1040 pr_err("dev[%p]: Unable to change SE Device TAS while"
1043 " dev_export_obj: %d count exists\n", dev, 1041 " dev_export_obj: %d count exists\n", dev,
1044 atomic_read(&dev->dev_export_obj.obj_access_count)); 1042 atomic_read(&dev->dev_export_obj.obj_access_count));
1045 return -EINVAL; 1043 return -EINVAL;
1046 } 1044 }
1047 dev->se_sub_dev->se_dev_attrib.emulate_tas = flag; 1045 dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
1048 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n", 1046 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
1049 dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled"); 1047 dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
1050 1048
1051 return 0; 1049 return 0;
1052 } 1050 }
1053 1051
1054 int se_dev_set_emulate_tpu(struct se_device *dev, int flag) 1052 int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
1055 { 1053 {
1056 if ((flag != 0) && (flag != 1)) { 1054 if ((flag != 0) && (flag != 1)) {
1057 pr_err("Illegal value %d\n", flag); 1055 pr_err("Illegal value %d\n", flag);
1058 return -EINVAL; 1056 return -EINVAL;
1059 } 1057 }
1060 /* 1058 /*
1061 * We expect this value to be non-zero when generic Block Layer 1059 * We expect this value to be non-zero when generic Block Layer
1062 * Discard supported is detected iblock_create_virtdevice(). 1060 * Discard supported is detected iblock_create_virtdevice().
1063 */ 1061 */
1064 if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { 1062 if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1065 pr_err("Generic Block Discard not supported\n"); 1063 pr_err("Generic Block Discard not supported\n");
1066 return -ENOSYS; 1064 return -ENOSYS;
1067 } 1065 }
1068 1066
1069 dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag; 1067 dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
1070 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", 1068 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
1071 dev, flag); 1069 dev, flag);
1072 return 0; 1070 return 0;
1073 } 1071 }
1074 1072
1075 int se_dev_set_emulate_tpws(struct se_device *dev, int flag) 1073 int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
1076 { 1074 {
1077 if ((flag != 0) && (flag != 1)) { 1075 if ((flag != 0) && (flag != 1)) {
1078 pr_err("Illegal value %d\n", flag); 1076 pr_err("Illegal value %d\n", flag);
1079 return -EINVAL; 1077 return -EINVAL;
1080 } 1078 }
1081 /* 1079 /*
1082 * We expect this value to be non-zero when generic Block Layer 1080 * We expect this value to be non-zero when generic Block Layer
1083 * Discard supported is detected iblock_create_virtdevice(). 1081 * Discard supported is detected iblock_create_virtdevice().
1084 */ 1082 */
1085 if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { 1083 if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1086 pr_err("Generic Block Discard not supported\n"); 1084 pr_err("Generic Block Discard not supported\n");
1087 return -ENOSYS; 1085 return -ENOSYS;
1088 } 1086 }
1089 1087
1090 dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag; 1088 dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
1091 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", 1089 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
1092 dev, flag); 1090 dev, flag);
1093 return 0; 1091 return 0;
1094 } 1092 }
1095 1093
1096 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) 1094 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1097 { 1095 {
1098 if ((flag != 0) && (flag != 1)) { 1096 if ((flag != 0) && (flag != 1)) {
1099 pr_err("Illegal value %d\n", flag); 1097 pr_err("Illegal value %d\n", flag);
1100 return -EINVAL; 1098 return -EINVAL;
1101 } 1099 }
1102 dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag; 1100 dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
1103 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, 1101 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
1104 (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); 1102 (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
1105 return 0; 1103 return 0;
1106 } 1104 }
1107 1105
1108 int se_dev_set_is_nonrot(struct se_device *dev, int flag) 1106 int se_dev_set_is_nonrot(struct se_device *dev, int flag)
1109 { 1107 {
1110 if ((flag != 0) && (flag != 1)) { 1108 if ((flag != 0) && (flag != 1)) {
1111 printk(KERN_ERR "Illegal value %d\n", flag); 1109 printk(KERN_ERR "Illegal value %d\n", flag);
1112 return -EINVAL; 1110 return -EINVAL;
1113 } 1111 }
1114 dev->se_sub_dev->se_dev_attrib.is_nonrot = flag; 1112 dev->se_sub_dev->se_dev_attrib.is_nonrot = flag;
1115 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n", 1113 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
1116 dev, flag); 1114 dev, flag);
1117 return 0; 1115 return 0;
1118 } 1116 }
1119 1117
1120 int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) 1118 int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
1121 { 1119 {
1122 if (flag != 0) { 1120 if (flag != 0) {
1123 printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted" 1121 printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
1124 " reordering not implemented\n", dev); 1122 " reordering not implemented\n", dev);
1125 return -ENOSYS; 1123 return -ENOSYS;
1126 } 1124 }
1127 dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag; 1125 dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag;
1128 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag); 1126 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
1129 return 0; 1127 return 0;
1130 } 1128 }
1131 1129
1132 /* 1130 /*
1133 * Note, this can only be called on unexported SE Device Object. 1131 * Note, this can only be called on unexported SE Device Object.
1134 */ 1132 */
1135 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) 1133 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1136 { 1134 {
1137 u32 orig_queue_depth = dev->queue_depth; 1135 u32 orig_queue_depth = dev->queue_depth;
1138 1136
1139 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1137 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1140 pr_err("dev[%p]: Unable to change SE Device TCQ while" 1138 pr_err("dev[%p]: Unable to change SE Device TCQ while"
1141 " dev_export_obj: %d count exists\n", dev, 1139 " dev_export_obj: %d count exists\n", dev,
1142 atomic_read(&dev->dev_export_obj.obj_access_count)); 1140 atomic_read(&dev->dev_export_obj.obj_access_count));
1143 return -EINVAL; 1141 return -EINVAL;
1144 } 1142 }
1145 if (!queue_depth) { 1143 if (!queue_depth) {
1146 pr_err("dev[%p]: Illegal ZERO value for queue" 1144 pr_err("dev[%p]: Illegal ZERO value for queue"
1147 "_depth\n", dev); 1145 "_depth\n", dev);
1148 return -EINVAL; 1146 return -EINVAL;
1149 } 1147 }
1150 1148
1151 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1149 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1152 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { 1150 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
1153 pr_err("dev[%p]: Passed queue_depth: %u" 1151 pr_err("dev[%p]: Passed queue_depth: %u"
1154 " exceeds TCM/SE_Device TCQ: %u\n", 1152 " exceeds TCM/SE_Device TCQ: %u\n",
1155 dev, queue_depth, 1153 dev, queue_depth,
1156 dev->se_sub_dev->se_dev_attrib.hw_queue_depth); 1154 dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
1157 return -EINVAL; 1155 return -EINVAL;
1158 } 1156 }
1159 } else { 1157 } else {
1160 if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) { 1158 if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
1161 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { 1159 if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
1162 pr_err("dev[%p]: Passed queue_depth:" 1160 pr_err("dev[%p]: Passed queue_depth:"
1163 " %u exceeds TCM/SE_Device MAX" 1161 " %u exceeds TCM/SE_Device MAX"
1164 " TCQ: %u\n", dev, queue_depth, 1162 " TCQ: %u\n", dev, queue_depth,
1165 dev->se_sub_dev->se_dev_attrib.hw_queue_depth); 1163 dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
1166 return -EINVAL; 1164 return -EINVAL;
1167 } 1165 }
1168 } 1166 }
1169 } 1167 }
1170 1168
1171 dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth; 1169 dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
1172 if (queue_depth > orig_queue_depth) 1170 if (queue_depth > orig_queue_depth)
1173 atomic_add(queue_depth - orig_queue_depth, &dev->depth_left); 1171 atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
1174 else if (queue_depth < orig_queue_depth) 1172 else if (queue_depth < orig_queue_depth)
1175 atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left); 1173 atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
1176 1174
1177 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", 1175 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1178 dev, queue_depth); 1176 dev, queue_depth);
1179 return 0; 1177 return 0;
1180 } 1178 }
1181 1179
1182 int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) 1180 int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
1183 { 1181 {
1184 int force = 0; /* Force setting for VDEVS */ 1182 int force = 0; /* Force setting for VDEVS */
1185 1183
1186 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1184 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1187 pr_err("dev[%p]: Unable to change SE Device" 1185 pr_err("dev[%p]: Unable to change SE Device"
1188 " max_sectors while dev_export_obj: %d count exists\n", 1186 " max_sectors while dev_export_obj: %d count exists\n",
1189 dev, atomic_read(&dev->dev_export_obj.obj_access_count)); 1187 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1190 return -EINVAL; 1188 return -EINVAL;
1191 } 1189 }
1192 if (!max_sectors) { 1190 if (!max_sectors) {
1193 pr_err("dev[%p]: Illegal ZERO value for" 1191 pr_err("dev[%p]: Illegal ZERO value for"
1194 " max_sectors\n", dev); 1192 " max_sectors\n", dev);
1195 return -EINVAL; 1193 return -EINVAL;
1196 } 1194 }
1197 if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { 1195 if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1198 pr_err("dev[%p]: Passed max_sectors: %u less than" 1196 pr_err("dev[%p]: Passed max_sectors: %u less than"
1199 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, 1197 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
1200 DA_STATUS_MAX_SECTORS_MIN); 1198 DA_STATUS_MAX_SECTORS_MIN);
1201 return -EINVAL; 1199 return -EINVAL;
1202 } 1200 }
1203 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1201 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1204 if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) { 1202 if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
1205 pr_err("dev[%p]: Passed max_sectors: %u" 1203 pr_err("dev[%p]: Passed max_sectors: %u"
1206 " greater than TCM/SE_Device max_sectors:" 1204 " greater than TCM/SE_Device max_sectors:"
1207 " %u\n", dev, max_sectors, 1205 " %u\n", dev, max_sectors,
1208 dev->se_sub_dev->se_dev_attrib.hw_max_sectors); 1206 dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1209 return -EINVAL; 1207 return -EINVAL;
1210 } 1208 }
1211 } else { 1209 } else {
1212 if (!force && (max_sectors > 1210 if (!force && (max_sectors >
1213 dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) { 1211 dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
1214 pr_err("dev[%p]: Passed max_sectors: %u" 1212 pr_err("dev[%p]: Passed max_sectors: %u"
1215 " greater than TCM/SE_Device max_sectors" 1213 " greater than TCM/SE_Device max_sectors"
1216 ": %u, use force=1 to override.\n", dev, 1214 ": %u, use force=1 to override.\n", dev,
1217 max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors); 1215 max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
1218 return -EINVAL; 1216 return -EINVAL;
1219 } 1217 }
1220 if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { 1218 if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1221 pr_err("dev[%p]: Passed max_sectors: %u" 1219 pr_err("dev[%p]: Passed max_sectors: %u"
1222 " greater than DA_STATUS_MAX_SECTORS_MAX:" 1220 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1223 " %u\n", dev, max_sectors, 1221 " %u\n", dev, max_sectors,
1224 DA_STATUS_MAX_SECTORS_MAX); 1222 DA_STATUS_MAX_SECTORS_MAX);
1225 return -EINVAL; 1223 return -EINVAL;
1226 } 1224 }
1227 } 1225 }
1228 /* 1226 /*
1229 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() 1227 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1230 */ 1228 */
1231 max_sectors = se_dev_align_max_sectors(max_sectors, 1229 max_sectors = se_dev_align_max_sectors(max_sectors,
1232 dev->se_sub_dev->se_dev_attrib.block_size); 1230 dev->se_sub_dev->se_dev_attrib.block_size);
1233 1231
1234 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; 1232 dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
1235 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", 1233 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1236 dev, max_sectors); 1234 dev, max_sectors);
1237 return 0; 1235 return 0;
1238 } 1236 }
1239 1237
1240 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) 1238 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1241 { 1239 {
1242 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1240 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1243 pr_err("dev[%p]: Unable to change SE Device" 1241 pr_err("dev[%p]: Unable to change SE Device"
1244 " optimal_sectors while dev_export_obj: %d count exists\n", 1242 " optimal_sectors while dev_export_obj: %d count exists\n",
1245 dev, atomic_read(&dev->dev_export_obj.obj_access_count)); 1243 dev, atomic_read(&dev->dev_export_obj.obj_access_count));
1246 return -EINVAL; 1244 return -EINVAL;
1247 } 1245 }
1248 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1246 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1249 pr_err("dev[%p]: Passed optimal_sectors cannot be" 1247 pr_err("dev[%p]: Passed optimal_sectors cannot be"
1250 " changed for TCM/pSCSI\n", dev); 1248 " changed for TCM/pSCSI\n", dev);
1251 return -EINVAL; 1249 return -EINVAL;
1252 } 1250 }
1253 if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { 1251 if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
1254 pr_err("dev[%p]: Passed optimal_sectors %u cannot be" 1252 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1255 " greater than max_sectors: %u\n", dev, 1253 " greater than max_sectors: %u\n", dev,
1256 optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); 1254 optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
1257 return -EINVAL; 1255 return -EINVAL;
1258 } 1256 }
1259 1257
1260 dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors; 1258 dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
1261 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n", 1259 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1262 dev, optimal_sectors); 1260 dev, optimal_sectors);
1263 return 0; 1261 return 0;
1264 } 1262 }
1265 1263
1266 int se_dev_set_block_size(struct se_device *dev, u32 block_size) 1264 int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1267 { 1265 {
1268 if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1266 if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
1269 pr_err("dev[%p]: Unable to change SE Device block_size" 1267 pr_err("dev[%p]: Unable to change SE Device block_size"
1270 " while dev_export_obj: %d count exists\n", dev, 1268 " while dev_export_obj: %d count exists\n", dev,
1271 atomic_read(&dev->dev_export_obj.obj_access_count)); 1269 atomic_read(&dev->dev_export_obj.obj_access_count));
1272 return -EINVAL; 1270 return -EINVAL;
1273 } 1271 }
1274 1272
1275 if ((block_size != 512) && 1273 if ((block_size != 512) &&
1276 (block_size != 1024) && 1274 (block_size != 1024) &&
1277 (block_size != 2048) && 1275 (block_size != 2048) &&
1278 (block_size != 4096)) { 1276 (block_size != 4096)) {
1279 pr_err("dev[%p]: Illegal value for block_device: %u" 1277 pr_err("dev[%p]: Illegal value for block_device: %u"
1280 " for SE device, must be 512, 1024, 2048 or 4096\n", 1278 " for SE device, must be 512, 1024, 2048 or 4096\n",
1281 dev, block_size); 1279 dev, block_size);
1282 return -EINVAL; 1280 return -EINVAL;
1283 } 1281 }
1284 1282
1285 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1283 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1286 pr_err("dev[%p]: Not allowed to change block_size for" 1284 pr_err("dev[%p]: Not allowed to change block_size for"
1287 " Physical Device, use for Linux/SCSI to change" 1285 " Physical Device, use for Linux/SCSI to change"
1288 " block_size for underlying hardware\n", dev); 1286 " block_size for underlying hardware\n", dev);
1289 return -EINVAL; 1287 return -EINVAL;
1290 } 1288 }
1291 1289
1292 dev->se_sub_dev->se_dev_attrib.block_size = block_size; 1290 dev->se_sub_dev->se_dev_attrib.block_size = block_size;
1293 pr_debug("dev[%p]: SE Device block_size changed to %u\n", 1291 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1294 dev, block_size); 1292 dev, block_size);
1295 return 0; 1293 return 0;
1296 } 1294 }
1297 1295
1298 struct se_lun *core_dev_add_lun( 1296 struct se_lun *core_dev_add_lun(
1299 struct se_portal_group *tpg, 1297 struct se_portal_group *tpg,
1300 struct se_hba *hba, 1298 struct se_hba *hba,
1301 struct se_device *dev, 1299 struct se_device *dev,
1302 u32 lun) 1300 u32 lun)
1303 { 1301 {
1304 struct se_lun *lun_p; 1302 struct se_lun *lun_p;
1305 u32 lun_access = 0; 1303 u32 lun_access = 0;
1306 1304
1307 if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { 1305 if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
1308 pr_err("Unable to export struct se_device while dev_access_obj: %d\n", 1306 pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
1309 atomic_read(&dev->dev_access_obj.obj_access_count)); 1307 atomic_read(&dev->dev_access_obj.obj_access_count));
1310 return NULL; 1308 return NULL;
1311 } 1309 }
1312 1310
1313 lun_p = core_tpg_pre_addlun(tpg, lun); 1311 lun_p = core_tpg_pre_addlun(tpg, lun);
1314 if ((IS_ERR(lun_p)) || !lun_p) 1312 if ((IS_ERR(lun_p)) || !lun_p)
1315 return NULL; 1313 return NULL;
1316 1314
1317 if (dev->dev_flags & DF_READ_ONLY) 1315 if (dev->dev_flags & DF_READ_ONLY)
1318 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 1316 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1319 else 1317 else
1320 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; 1318 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
1321 1319
1322 if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0) 1320 if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
1323 return NULL; 1321 return NULL;
1324 1322
1325 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" 1323 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1326 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 1324 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1327 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, 1325 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
1328 tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id); 1326 tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id);
1329 /* 1327 /*
1330 * Update LUN maps for dynamically added initiators when 1328 * Update LUN maps for dynamically added initiators when
1331 * generate_node_acl is enabled. 1329 * generate_node_acl is enabled.
1332 */ 1330 */
1333 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 1331 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
1334 struct se_node_acl *acl; 1332 struct se_node_acl *acl;
1335 spin_lock_irq(&tpg->acl_node_lock); 1333 spin_lock_irq(&tpg->acl_node_lock);
1336 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 1334 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1337 if (acl->dynamic_node_acl && 1335 if (acl->dynamic_node_acl &&
1338 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || 1336 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
1339 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { 1337 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
1340 spin_unlock_irq(&tpg->acl_node_lock); 1338 spin_unlock_irq(&tpg->acl_node_lock);
1341 core_tpg_add_node_to_devs(acl, tpg); 1339 core_tpg_add_node_to_devs(acl, tpg);
1342 spin_lock_irq(&tpg->acl_node_lock); 1340 spin_lock_irq(&tpg->acl_node_lock);
1343 } 1341 }
1344 } 1342 }
1345 spin_unlock_irq(&tpg->acl_node_lock); 1343 spin_unlock_irq(&tpg->acl_node_lock);
1346 } 1344 }
1347 1345
1348 return lun_p; 1346 return lun_p;
1349 } 1347 }
1350 1348
1351 /* core_dev_del_lun(): 1349 /* core_dev_del_lun():
1352 * 1350 *
1353 * 1351 *
1354 */ 1352 */
1355 int core_dev_del_lun( 1353 int core_dev_del_lun(
1356 struct se_portal_group *tpg, 1354 struct se_portal_group *tpg,
1357 u32 unpacked_lun) 1355 u32 unpacked_lun)
1358 { 1356 {
1359 struct se_lun *lun; 1357 struct se_lun *lun;
1360 int ret = 0; 1358 int ret = 0;
1361 1359
1362 lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret); 1360 lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
1363 if (!lun) 1361 if (!lun)
1364 return ret; 1362 return ret;
1365 1363
1366 core_tpg_post_dellun(tpg, lun); 1364 core_tpg_post_dellun(tpg, lun);
1367 1365
1368 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" 1366 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1369 " device object\n", tpg->se_tpg_tfo->get_fabric_name(), 1367 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1370 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, 1368 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
1371 tpg->se_tpg_tfo->get_fabric_name()); 1369 tpg->se_tpg_tfo->get_fabric_name());
1372 1370
1373 return 0; 1371 return 0;
1374 } 1372 }
1375 1373
1376 struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun) 1374 struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
1377 { 1375 {
1378 struct se_lun *lun; 1376 struct se_lun *lun;
1379 1377
1380 spin_lock(&tpg->tpg_lun_lock); 1378 spin_lock(&tpg->tpg_lun_lock);
1381 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 1379 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1382 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS" 1380 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1383 "_PER_TPG-1: %u for Target Portal Group: %hu\n", 1381 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1384 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1382 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1385 TRANSPORT_MAX_LUNS_PER_TPG-1, 1383 TRANSPORT_MAX_LUNS_PER_TPG-1,
1386 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1384 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1387 spin_unlock(&tpg->tpg_lun_lock); 1385 spin_unlock(&tpg->tpg_lun_lock);
1388 return NULL; 1386 return NULL;
1389 } 1387 }
1390 lun = &tpg->tpg_lun_list[unpacked_lun]; 1388 lun = &tpg->tpg_lun_list[unpacked_lun];
1391 1389
1392 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { 1390 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
1393 pr_err("%s Logical Unit Number: %u is not free on" 1391 pr_err("%s Logical Unit Number: %u is not free on"
1394 " Target Portal Group: %hu, ignoring request.\n", 1392 " Target Portal Group: %hu, ignoring request.\n",
1395 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1393 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1396 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1394 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1397 spin_unlock(&tpg->tpg_lun_lock); 1395 spin_unlock(&tpg->tpg_lun_lock);
1398 return NULL; 1396 return NULL;
1399 } 1397 }
1400 spin_unlock(&tpg->tpg_lun_lock); 1398 spin_unlock(&tpg->tpg_lun_lock);
1401 1399
1402 return lun; 1400 return lun;
1403 } 1401 }
1404 1402
1405 /* core_dev_get_lun(): 1403 /* core_dev_get_lun():
1406 * 1404 *
1407 * 1405 *
1408 */ 1406 */
1409 static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun) 1407 static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
1410 { 1408 {
1411 struct se_lun *lun; 1409 struct se_lun *lun;
1412 1410
1413 spin_lock(&tpg->tpg_lun_lock); 1411 spin_lock(&tpg->tpg_lun_lock);
1414 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 1412 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1415 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" 1413 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1416 "_TPG-1: %u for Target Portal Group: %hu\n", 1414 "_TPG-1: %u for Target Portal Group: %hu\n",
1417 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1415 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1418 TRANSPORT_MAX_LUNS_PER_TPG-1, 1416 TRANSPORT_MAX_LUNS_PER_TPG-1,
1419 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1417 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1420 spin_unlock(&tpg->tpg_lun_lock); 1418 spin_unlock(&tpg->tpg_lun_lock);
1421 return NULL; 1419 return NULL;
1422 } 1420 }
1423 lun = &tpg->tpg_lun_list[unpacked_lun]; 1421 lun = &tpg->tpg_lun_list[unpacked_lun];
1424 1422
1425 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { 1423 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
1426 pr_err("%s Logical Unit Number: %u is not active on" 1424 pr_err("%s Logical Unit Number: %u is not active on"
1427 " Target Portal Group: %hu, ignoring request.\n", 1425 " Target Portal Group: %hu, ignoring request.\n",
1428 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1426 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1429 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1427 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1430 spin_unlock(&tpg->tpg_lun_lock); 1428 spin_unlock(&tpg->tpg_lun_lock);
1431 return NULL; 1429 return NULL;
1432 } 1430 }
1433 spin_unlock(&tpg->tpg_lun_lock); 1431 spin_unlock(&tpg->tpg_lun_lock);
1434 1432
1435 return lun; 1433 return lun;
1436 } 1434 }
1437 1435
1438 struct se_lun_acl *core_dev_init_initiator_node_lun_acl( 1436 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1439 struct se_portal_group *tpg, 1437 struct se_portal_group *tpg,
1440 u32 mapped_lun, 1438 u32 mapped_lun,
1441 char *initiatorname, 1439 char *initiatorname,
1442 int *ret) 1440 int *ret)
1443 { 1441 {
1444 struct se_lun_acl *lacl; 1442 struct se_lun_acl *lacl;
1445 struct se_node_acl *nacl; 1443 struct se_node_acl *nacl;
1446 1444
1447 if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) { 1445 if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
1448 pr_err("%s InitiatorName exceeds maximum size.\n", 1446 pr_err("%s InitiatorName exceeds maximum size.\n",
1449 tpg->se_tpg_tfo->get_fabric_name()); 1447 tpg->se_tpg_tfo->get_fabric_name());
1450 *ret = -EOVERFLOW; 1448 *ret = -EOVERFLOW;
1451 return NULL; 1449 return NULL;
1452 } 1450 }
1453 nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname); 1451 nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
1454 if (!nacl) { 1452 if (!nacl) {
1455 *ret = -EINVAL; 1453 *ret = -EINVAL;
1456 return NULL; 1454 return NULL;
1457 } 1455 }
1458 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); 1456 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1459 if (!lacl) { 1457 if (!lacl) {
1460 pr_err("Unable to allocate memory for struct se_lun_acl.\n"); 1458 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1461 *ret = -ENOMEM; 1459 *ret = -ENOMEM;
1462 return NULL; 1460 return NULL;
1463 } 1461 }
1464 1462
1465 INIT_LIST_HEAD(&lacl->lacl_list); 1463 INIT_LIST_HEAD(&lacl->lacl_list);
1466 lacl->mapped_lun = mapped_lun; 1464 lacl->mapped_lun = mapped_lun;
1467 lacl->se_lun_nacl = nacl; 1465 lacl->se_lun_nacl = nacl;
1468 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 1466 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
1469 1467
1470 return lacl; 1468 return lacl;
1471 } 1469 }
1472 1470
1473 int core_dev_add_initiator_node_lun_acl( 1471 int core_dev_add_initiator_node_lun_acl(
1474 struct se_portal_group *tpg, 1472 struct se_portal_group *tpg,
1475 struct se_lun_acl *lacl, 1473 struct se_lun_acl *lacl,
1476 u32 unpacked_lun, 1474 u32 unpacked_lun,
1477 u32 lun_access) 1475 u32 lun_access)
1478 { 1476 {
1479 struct se_lun *lun; 1477 struct se_lun *lun;
1480 struct se_node_acl *nacl; 1478 struct se_node_acl *nacl;
1481 1479
1482 lun = core_dev_get_lun(tpg, unpacked_lun); 1480 lun = core_dev_get_lun(tpg, unpacked_lun);
1483 if (!lun) { 1481 if (!lun) {
1484 pr_err("%s Logical Unit Number: %u is not active on" 1482 pr_err("%s Logical Unit Number: %u is not active on"
1485 " Target Portal Group: %hu, ignoring request.\n", 1483 " Target Portal Group: %hu, ignoring request.\n",
1486 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1484 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1487 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1485 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1488 return -EINVAL; 1486 return -EINVAL;
1489 } 1487 }
1490 1488
1491 nacl = lacl->se_lun_nacl; 1489 nacl = lacl->se_lun_nacl;
1492 if (!nacl) 1490 if (!nacl)
1493 return -EINVAL; 1491 return -EINVAL;
1494 1492
1495 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && 1493 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
1496 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)) 1494 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
1497 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 1495 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1498 1496
1499 lacl->se_lun = lun; 1497 lacl->se_lun = lun;
1500 1498
1501 if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun, 1499 if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
1502 lun_access, nacl, tpg, 1) < 0) 1500 lun_access, nacl, tpg, 1) < 0)
1503 return -EINVAL; 1501 return -EINVAL;
1504 1502
1505 spin_lock(&lun->lun_acl_lock); 1503 spin_lock(&lun->lun_acl_lock);
1506 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); 1504 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1507 atomic_inc(&lun->lun_acl_count); 1505 atomic_inc(&lun->lun_acl_count);
1508 smp_mb__after_atomic_inc(); 1506 smp_mb__after_atomic_inc();
1509 spin_unlock(&lun->lun_acl_lock); 1507 spin_unlock(&lun->lun_acl_lock);
1510 1508
1511 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " 1509 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1512 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 1510 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
1513 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, 1511 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
1514 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", 1512 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1515 lacl->initiatorname); 1513 lacl->initiatorname);
1516 /* 1514 /*
1517 * Check to see if there are any existing persistent reservation APTPL 1515 * Check to see if there are any existing persistent reservation APTPL
1518 * pre-registrations that need to be enabled for this LUN ACL.. 1516 * pre-registrations that need to be enabled for this LUN ACL..
1519 */ 1517 */
1520 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl); 1518 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
1521 return 0; 1519 return 0;
1522 } 1520 }
1523 1521
1524 /* core_dev_del_initiator_node_lun_acl(): 1522 /* core_dev_del_initiator_node_lun_acl():
1525 * 1523 *
1526 * 1524 *
1527 */ 1525 */
1528 int core_dev_del_initiator_node_lun_acl( 1526 int core_dev_del_initiator_node_lun_acl(
1529 struct se_portal_group *tpg, 1527 struct se_portal_group *tpg,
1530 struct se_lun *lun, 1528 struct se_lun *lun,
1531 struct se_lun_acl *lacl) 1529 struct se_lun_acl *lacl)
1532 { 1530 {
1533 struct se_node_acl *nacl; 1531 struct se_node_acl *nacl;
1534 1532
1535 nacl = lacl->se_lun_nacl; 1533 nacl = lacl->se_lun_nacl;
1536 if (!nacl) 1534 if (!nacl)
1537 return -EINVAL; 1535 return -EINVAL;
1538 1536
1539 spin_lock(&lun->lun_acl_lock); 1537 spin_lock(&lun->lun_acl_lock);
1540 list_del(&lacl->lacl_list); 1538 list_del(&lacl->lacl_list);
1541 atomic_dec(&lun->lun_acl_count); 1539 atomic_dec(&lun->lun_acl_count);
1542 smp_mb__after_atomic_dec(); 1540 smp_mb__after_atomic_dec();
1543 spin_unlock(&lun->lun_acl_lock); 1541 spin_unlock(&lun->lun_acl_lock);
1544 1542
1545 core_update_device_list_for_node(lun, NULL, lacl->mapped_lun, 1543 core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
1546 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); 1544 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
1547 1545
1548 lacl->se_lun = NULL; 1546 lacl->se_lun = NULL;
1549 1547
1550 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for" 1548 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1551 " InitiatorNode: %s Mapped LUN: %u\n", 1549 " InitiatorNode: %s Mapped LUN: %u\n",
1552 tpg->se_tpg_tfo->get_fabric_name(), 1550 tpg->se_tpg_tfo->get_fabric_name(),
1553 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 1551 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1554 lacl->initiatorname, lacl->mapped_lun); 1552 lacl->initiatorname, lacl->mapped_lun);
1555 1553
1556 return 0; 1554 return 0;
1557 } 1555 }
1558 1556
1559 void core_dev_free_initiator_node_lun_acl( 1557 void core_dev_free_initiator_node_lun_acl(
1560 struct se_portal_group *tpg, 1558 struct se_portal_group *tpg,
1561 struct se_lun_acl *lacl) 1559 struct se_lun_acl *lacl)
1562 { 1560 {
1563 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 1561 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1564 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 1562 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1565 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1563 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1566 tpg->se_tpg_tfo->get_fabric_name(), 1564 tpg->se_tpg_tfo->get_fabric_name(),
1567 lacl->initiatorname, lacl->mapped_lun); 1565 lacl->initiatorname, lacl->mapped_lun);
1568 1566
1569 kfree(lacl); 1567 kfree(lacl);
1570 } 1568 }
1571 1569
1572 int core_dev_setup_virtual_lun0(void) 1570 int core_dev_setup_virtual_lun0(void)
1573 { 1571 {
1574 struct se_hba *hba; 1572 struct se_hba *hba;
1575 struct se_device *dev; 1573 struct se_device *dev;
1576 struct se_subsystem_dev *se_dev = NULL; 1574 struct se_subsystem_dev *se_dev = NULL;
1577 struct se_subsystem_api *t; 1575 struct se_subsystem_api *t;
1578 char buf[16]; 1576 char buf[16];
1579 int ret; 1577 int ret;
1580 1578
1581 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); 1579 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1582 if (IS_ERR(hba)) 1580 if (IS_ERR(hba))
1583 return PTR_ERR(hba); 1581 return PTR_ERR(hba);
1584 1582
1585 lun0_hba = hba; 1583 lun0_hba = hba;
1586 t = hba->transport; 1584 t = hba->transport;
1587 1585
1588 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); 1586 se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
1589 if (!se_dev) { 1587 if (!se_dev) {
1590 pr_err("Unable to allocate memory for" 1588 pr_err("Unable to allocate memory for"
1591 " struct se_subsystem_dev\n"); 1589 " struct se_subsystem_dev\n");
1592 ret = -ENOMEM; 1590 ret = -ENOMEM;
1593 goto out; 1591 goto out;
1594 } 1592 }
1595 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); 1593 INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
1596 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); 1594 spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
1597 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); 1595 INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
1598 INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list); 1596 INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
1599 spin_lock_init(&se_dev->t10_pr.registration_lock); 1597 spin_lock_init(&se_dev->t10_pr.registration_lock);
1600 spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock); 1598 spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
1601 INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); 1599 INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
1602 spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); 1600 spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
1603 spin_lock_init(&se_dev->se_dev_lock); 1601 spin_lock_init(&se_dev->se_dev_lock);
1604 se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; 1602 se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
1605 se_dev->t10_wwn.t10_sub_dev = se_dev; 1603 se_dev->t10_wwn.t10_sub_dev = se_dev;
1606 se_dev->t10_alua.t10_sub_dev = se_dev; 1604 se_dev->t10_alua.t10_sub_dev = se_dev;
1607 se_dev->se_dev_attrib.da_sub_dev = se_dev; 1605 se_dev->se_dev_attrib.da_sub_dev = se_dev;
1608 se_dev->se_dev_hba = hba; 1606 se_dev->se_dev_hba = hba;
1609 1607
1610 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0"); 1608 se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
1611 if (!se_dev->se_dev_su_ptr) { 1609 if (!se_dev->se_dev_su_ptr) {
1612 pr_err("Unable to locate subsystem dependent pointer" 1610 pr_err("Unable to locate subsystem dependent pointer"
1613 " from allocate_virtdevice()\n"); 1611 " from allocate_virtdevice()\n");
1614 ret = -ENOMEM; 1612 ret = -ENOMEM;
1615 goto out; 1613 goto out;
1616 } 1614 }
1617 lun0_su_dev = se_dev; 1615 lun0_su_dev = se_dev;
1618 1616
1619 memset(buf, 0, 16); 1617 memset(buf, 0, 16);
1620 sprintf(buf, "rd_pages=8"); 1618 sprintf(buf, "rd_pages=8");
1621 t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf)); 1619 t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
1622 1620
1623 dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); 1621 dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
1624 if (IS_ERR(dev)) { 1622 if (IS_ERR(dev)) {
1625 ret = PTR_ERR(dev); 1623 ret = PTR_ERR(dev);
1626 goto out; 1624 goto out;
1627 } 1625 }
1628 se_dev->se_dev_ptr = dev; 1626 se_dev->se_dev_ptr = dev;
1629 g_lun0_dev = dev; 1627 g_lun0_dev = dev;
1630 1628
1631 return 0; 1629 return 0;
1632 out: 1630 out:
1633 lun0_su_dev = NULL; 1631 lun0_su_dev = NULL;
1634 kfree(se_dev); 1632 kfree(se_dev);
1635 if (lun0_hba) { 1633 if (lun0_hba) {
1636 core_delete_hba(lun0_hba); 1634 core_delete_hba(lun0_hba);
1637 lun0_hba = NULL; 1635 lun0_hba = NULL;
1638 } 1636 }
1639 return ret; 1637 return ret;
1640 } 1638 }
1641 1639
1642 1640
1643 void core_dev_release_virtual_lun0(void) 1641 void core_dev_release_virtual_lun0(void)
1644 { 1642 {
1645 struct se_hba *hba = lun0_hba; 1643 struct se_hba *hba = lun0_hba;
1646 struct se_subsystem_dev *su_dev = lun0_su_dev; 1644 struct se_subsystem_dev *su_dev = lun0_su_dev;
1647 1645
1648 if (!hba) 1646 if (!hba)
1649 return; 1647 return;
1650 1648
1651 if (g_lun0_dev) 1649 if (g_lun0_dev)
1652 se_free_virtual_device(g_lun0_dev, hba); 1650 se_free_virtual_device(g_lun0_dev, hba);
1653 1651
1654 kfree(su_dev); 1652 kfree(su_dev);
1655 core_delete_hba(hba); 1653 core_delete_hba(hba);
1656 } 1654 }
1657 1655
drivers/target/target_core_fabric_configfs.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * Filename: target_core_fabric_configfs.c 2 * Filename: target_core_fabric_configfs.c
3 * 3 *
4 * This file contains generic fabric module configfs infrastructure for 4 * This file contains generic fabric module configfs infrastructure for
5 * TCM v4.x code 5 * TCM v4.x code
6 * 6 *
7 * Copyright (c) 2010,2011 Rising Tide Systems 7 * Copyright (c) 2010,2011 Rising Tide Systems
8 * Copyright (c) 2010,2011 Linux-iSCSI.org 8 * Copyright (c) 2010,2011 Linux-iSCSI.org
9 * 9 *
10 * Copyright (c) Nicholas A. Bellinger <nab@linux-iscsi.org> 10 * Copyright (c) Nicholas A. Bellinger <nab@linux-iscsi.org>
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or 14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version. 15 * (at your option) any later version.
16 * 16 *
17 * This program is distributed in the hope that it will be useful, 17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details. 20 * GNU General Public License for more details.
21 ****************************************************************************/ 21 ****************************************************************************/
22 22
23 #include <linux/module.h> 23 #include <linux/module.h>
24 #include <linux/moduleparam.h> 24 #include <linux/moduleparam.h>
25 #include <generated/utsrelease.h> 25 #include <generated/utsrelease.h>
26 #include <linux/utsname.h> 26 #include <linux/utsname.h>
27 #include <linux/init.h> 27 #include <linux/init.h>
28 #include <linux/fs.h> 28 #include <linux/fs.h>
29 #include <linux/namei.h> 29 #include <linux/namei.h>
30 #include <linux/slab.h> 30 #include <linux/slab.h>
31 #include <linux/types.h> 31 #include <linux/types.h>
32 #include <linux/delay.h> 32 #include <linux/delay.h>
33 #include <linux/unistd.h> 33 #include <linux/unistd.h>
34 #include <linux/string.h> 34 #include <linux/string.h>
35 #include <linux/syscalls.h> 35 #include <linux/syscalls.h>
36 #include <linux/configfs.h> 36 #include <linux/configfs.h>
37 37
38 #include <target/target_core_base.h> 38 #include <target/target_core_base.h>
39 #include <target/target_core_device.h> 39 #include <target/target_core_fabric.h>
40 #include <target/target_core_tpg.h>
41 #include <target/target_core_transport.h>
42 #include <target/target_core_fabric_ops.h>
43 #include <target/target_core_fabric_configfs.h> 40 #include <target/target_core_fabric_configfs.h>
44 #include <target/target_core_configfs.h> 41 #include <target/target_core_configfs.h>
45 #include <target/configfs_macros.h> 42 #include <target/configfs_macros.h>
46 43
47 #include "target_core_internal.h" 44 #include "target_core_internal.h"
48 #include "target_core_alua.h" 45 #include "target_core_alua.h"
49 #include "target_core_pr.h" 46 #include "target_core_pr.h"
50 47
51 #define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ 48 #define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
52 static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \ 49 static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
53 { \ 50 { \
54 struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl; \ 51 struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl; \
55 struct config_item_type *cit = &tfc->tfc_##_name##_cit; \ 52 struct config_item_type *cit = &tfc->tfc_##_name##_cit; \
56 \ 53 \
57 cit->ct_item_ops = _item_ops; \ 54 cit->ct_item_ops = _item_ops; \
58 cit->ct_group_ops = _group_ops; \ 55 cit->ct_group_ops = _group_ops; \
59 cit->ct_attrs = _attrs; \ 56 cit->ct_attrs = _attrs; \
60 cit->ct_owner = tf->tf_module; \ 57 cit->ct_owner = tf->tf_module; \
61 pr_debug("Setup generic %s\n", __stringify(_name)); \ 58 pr_debug("Setup generic %s\n", __stringify(_name)); \
62 } 59 }
63 60
64 /* Start of tfc_tpg_mappedlun_cit */ 61 /* Start of tfc_tpg_mappedlun_cit */
65 62
66 static int target_fabric_mappedlun_link( 63 static int target_fabric_mappedlun_link(
67 struct config_item *lun_acl_ci, 64 struct config_item *lun_acl_ci,
68 struct config_item *lun_ci) 65 struct config_item *lun_ci)
69 { 66 {
70 struct se_dev_entry *deve; 67 struct se_dev_entry *deve;
71 struct se_lun *lun = container_of(to_config_group(lun_ci), 68 struct se_lun *lun = container_of(to_config_group(lun_ci),
72 struct se_lun, lun_group); 69 struct se_lun, lun_group);
73 struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci), 70 struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
74 struct se_lun_acl, se_lun_group); 71 struct se_lun_acl, se_lun_group);
75 struct se_portal_group *se_tpg; 72 struct se_portal_group *se_tpg;
76 struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s; 73 struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
77 int ret = 0, lun_access; 74 int ret = 0, lun_access;
78 /* 75 /*
79 * Ensure that the source port exists 76 * Ensure that the source port exists
80 */ 77 */
81 if (!lun->lun_sep || !lun->lun_sep->sep_tpg) { 78 if (!lun->lun_sep || !lun->lun_sep->sep_tpg) {
82 pr_err("Source se_lun->lun_sep or lun->lun_sep->sep" 79 pr_err("Source se_lun->lun_sep or lun->lun_sep->sep"
83 "_tpg does not exist\n"); 80 "_tpg does not exist\n");
84 return -EINVAL; 81 return -EINVAL;
85 } 82 }
86 se_tpg = lun->lun_sep->sep_tpg; 83 se_tpg = lun->lun_sep->sep_tpg;
87 84
88 nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item; 85 nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
89 tpg_ci = &nacl_ci->ci_group->cg_item; 86 tpg_ci = &nacl_ci->ci_group->cg_item;
90 wwn_ci = &tpg_ci->ci_group->cg_item; 87 wwn_ci = &tpg_ci->ci_group->cg_item;
91 tpg_ci_s = &lun_ci->ci_parent->ci_group->cg_item; 88 tpg_ci_s = &lun_ci->ci_parent->ci_group->cg_item;
92 wwn_ci_s = &tpg_ci_s->ci_group->cg_item; 89 wwn_ci_s = &tpg_ci_s->ci_group->cg_item;
93 /* 90 /*
94 * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT 91 * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT
95 */ 92 */
96 if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) { 93 if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) {
97 pr_err("Illegal Initiator ACL SymLink outside of %s\n", 94 pr_err("Illegal Initiator ACL SymLink outside of %s\n",
98 config_item_name(wwn_ci)); 95 config_item_name(wwn_ci));
99 return -EINVAL; 96 return -EINVAL;
100 } 97 }
101 if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) { 98 if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) {
102 pr_err("Illegal Initiator ACL Symlink outside of %s" 99 pr_err("Illegal Initiator ACL Symlink outside of %s"
103 " TPGT: %s\n", config_item_name(wwn_ci), 100 " TPGT: %s\n", config_item_name(wwn_ci),
104 config_item_name(tpg_ci)); 101 config_item_name(tpg_ci));
105 return -EINVAL; 102 return -EINVAL;
106 } 103 }
107 /* 104 /*
108 * If this struct se_node_acl was dynamically generated with 105 * If this struct se_node_acl was dynamically generated with
109 * tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags, 106 * tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags,
110 * which be will write protected (READ-ONLY) when 107 * which be will write protected (READ-ONLY) when
111 * tpg_1/attrib/demo_mode_write_protect=1 108 * tpg_1/attrib/demo_mode_write_protect=1
112 */ 109 */
113 spin_lock_irq(&lacl->se_lun_nacl->device_list_lock); 110 spin_lock_irq(&lacl->se_lun_nacl->device_list_lock);
114 deve = &lacl->se_lun_nacl->device_list[lacl->mapped_lun]; 111 deve = &lacl->se_lun_nacl->device_list[lacl->mapped_lun];
115 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) 112 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)
116 lun_access = deve->lun_flags; 113 lun_access = deve->lun_flags;
117 else 114 else
118 lun_access = 115 lun_access =
119 (se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect( 116 (se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
120 se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY : 117 se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
121 TRANSPORT_LUNFLAGS_READ_WRITE; 118 TRANSPORT_LUNFLAGS_READ_WRITE;
122 spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock); 119 spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock);
123 /* 120 /*
124 * Determine the actual mapped LUN value user wants.. 121 * Determine the actual mapped LUN value user wants..
125 * 122 *
126 * This value is what the SCSI Initiator actually sees the 123 * This value is what the SCSI Initiator actually sees the
127 * iscsi/$IQN/$TPGT/lun/lun_* as on their SCSI Initiator Ports. 124 * iscsi/$IQN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
128 */ 125 */
129 ret = core_dev_add_initiator_node_lun_acl(se_tpg, lacl, 126 ret = core_dev_add_initiator_node_lun_acl(se_tpg, lacl,
130 lun->unpacked_lun, lun_access); 127 lun->unpacked_lun, lun_access);
131 128
132 return (ret < 0) ? -EINVAL : 0; 129 return (ret < 0) ? -EINVAL : 0;
133 } 130 }
134 131
135 static int target_fabric_mappedlun_unlink( 132 static int target_fabric_mappedlun_unlink(
136 struct config_item *lun_acl_ci, 133 struct config_item *lun_acl_ci,
137 struct config_item *lun_ci) 134 struct config_item *lun_ci)
138 { 135 {
139 struct se_lun *lun; 136 struct se_lun *lun;
140 struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci), 137 struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
141 struct se_lun_acl, se_lun_group); 138 struct se_lun_acl, se_lun_group);
142 struct se_node_acl *nacl = lacl->se_lun_nacl; 139 struct se_node_acl *nacl = lacl->se_lun_nacl;
143 struct se_dev_entry *deve = &nacl->device_list[lacl->mapped_lun]; 140 struct se_dev_entry *deve = &nacl->device_list[lacl->mapped_lun];
144 struct se_portal_group *se_tpg; 141 struct se_portal_group *se_tpg;
145 /* 142 /*
146 * Determine if the underlying MappedLUN has already been released.. 143 * Determine if the underlying MappedLUN has already been released..
147 */ 144 */
148 if (!deve->se_lun) 145 if (!deve->se_lun)
149 return 0; 146 return 0;
150 147
151 lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group); 148 lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
152 se_tpg = lun->lun_sep->sep_tpg; 149 se_tpg = lun->lun_sep->sep_tpg;
153 150
154 core_dev_del_initiator_node_lun_acl(se_tpg, lun, lacl); 151 core_dev_del_initiator_node_lun_acl(se_tpg, lun, lacl);
155 return 0; 152 return 0;
156 } 153 }
157 154
158 CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl); 155 CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl);
159 #define TCM_MAPPEDLUN_ATTR(_name, _mode) \ 156 #define TCM_MAPPEDLUN_ATTR(_name, _mode) \
160 static struct target_fabric_mappedlun_attribute target_fabric_mappedlun_##_name = \ 157 static struct target_fabric_mappedlun_attribute target_fabric_mappedlun_##_name = \
161 __CONFIGFS_EATTR(_name, _mode, \ 158 __CONFIGFS_EATTR(_name, _mode, \
162 target_fabric_mappedlun_show_##_name, \ 159 target_fabric_mappedlun_show_##_name, \
163 target_fabric_mappedlun_store_##_name); 160 target_fabric_mappedlun_store_##_name);
164 161
165 static ssize_t target_fabric_mappedlun_show_write_protect( 162 static ssize_t target_fabric_mappedlun_show_write_protect(
166 struct se_lun_acl *lacl, 163 struct se_lun_acl *lacl,
167 char *page) 164 char *page)
168 { 165 {
169 struct se_node_acl *se_nacl = lacl->se_lun_nacl; 166 struct se_node_acl *se_nacl = lacl->se_lun_nacl;
170 struct se_dev_entry *deve; 167 struct se_dev_entry *deve;
171 ssize_t len; 168 ssize_t len;
172 169
173 spin_lock_irq(&se_nacl->device_list_lock); 170 spin_lock_irq(&se_nacl->device_list_lock);
174 deve = &se_nacl->device_list[lacl->mapped_lun]; 171 deve = &se_nacl->device_list[lacl->mapped_lun];
175 len = sprintf(page, "%d\n", 172 len = sprintf(page, "%d\n",
176 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ? 173 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ?
177 1 : 0); 174 1 : 0);
178 spin_unlock_irq(&se_nacl->device_list_lock); 175 spin_unlock_irq(&se_nacl->device_list_lock);
179 176
180 return len; 177 return len;
181 } 178 }
182 179
183 static ssize_t target_fabric_mappedlun_store_write_protect( 180 static ssize_t target_fabric_mappedlun_store_write_protect(
184 struct se_lun_acl *lacl, 181 struct se_lun_acl *lacl,
185 const char *page, 182 const char *page,
186 size_t count) 183 size_t count)
187 { 184 {
188 struct se_node_acl *se_nacl = lacl->se_lun_nacl; 185 struct se_node_acl *se_nacl = lacl->se_lun_nacl;
189 struct se_portal_group *se_tpg = se_nacl->se_tpg; 186 struct se_portal_group *se_tpg = se_nacl->se_tpg;
190 unsigned long op; 187 unsigned long op;
191 188
192 if (strict_strtoul(page, 0, &op)) 189 if (strict_strtoul(page, 0, &op))
193 return -EINVAL; 190 return -EINVAL;
194 191
195 if ((op != 1) && (op != 0)) 192 if ((op != 1) && (op != 0))
196 return -EINVAL; 193 return -EINVAL;
197 194
198 core_update_device_list_access(lacl->mapped_lun, (op) ? 195 core_update_device_list_access(lacl->mapped_lun, (op) ?
199 TRANSPORT_LUNFLAGS_READ_ONLY : 196 TRANSPORT_LUNFLAGS_READ_ONLY :
200 TRANSPORT_LUNFLAGS_READ_WRITE, 197 TRANSPORT_LUNFLAGS_READ_WRITE,
201 lacl->se_lun_nacl); 198 lacl->se_lun_nacl);
202 199
203 pr_debug("%s_ConfigFS: Changed Initiator ACL: %s" 200 pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
204 " Mapped LUN: %u Write Protect bit to %s\n", 201 " Mapped LUN: %u Write Protect bit to %s\n",
205 se_tpg->se_tpg_tfo->get_fabric_name(), 202 se_tpg->se_tpg_tfo->get_fabric_name(),
206 lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF"); 203 lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
207 204
208 return count; 205 return count;
209 206
210 } 207 }
211 208
212 TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR); 209 TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR);
213 210
214 CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group); 211 CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group);
215 212
216 static void target_fabric_mappedlun_release(struct config_item *item) 213 static void target_fabric_mappedlun_release(struct config_item *item)
217 { 214 {
218 struct se_lun_acl *lacl = container_of(to_config_group(item), 215 struct se_lun_acl *lacl = container_of(to_config_group(item),
219 struct se_lun_acl, se_lun_group); 216 struct se_lun_acl, se_lun_group);
220 struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg; 217 struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
221 218
222 core_dev_free_initiator_node_lun_acl(se_tpg, lacl); 219 core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
223 } 220 }
224 221
225 static struct configfs_attribute *target_fabric_mappedlun_attrs[] = { 222 static struct configfs_attribute *target_fabric_mappedlun_attrs[] = {
226 &target_fabric_mappedlun_write_protect.attr, 223 &target_fabric_mappedlun_write_protect.attr,
227 NULL, 224 NULL,
228 }; 225 };
229 226
230 static struct configfs_item_operations target_fabric_mappedlun_item_ops = { 227 static struct configfs_item_operations target_fabric_mappedlun_item_ops = {
231 .release = target_fabric_mappedlun_release, 228 .release = target_fabric_mappedlun_release,
232 .show_attribute = target_fabric_mappedlun_attr_show, 229 .show_attribute = target_fabric_mappedlun_attr_show,
233 .store_attribute = target_fabric_mappedlun_attr_store, 230 .store_attribute = target_fabric_mappedlun_attr_store,
234 .allow_link = target_fabric_mappedlun_link, 231 .allow_link = target_fabric_mappedlun_link,
235 .drop_link = target_fabric_mappedlun_unlink, 232 .drop_link = target_fabric_mappedlun_unlink,
236 }; 233 };
237 234
238 TF_CIT_SETUP(tpg_mappedlun, &target_fabric_mappedlun_item_ops, NULL, 235 TF_CIT_SETUP(tpg_mappedlun, &target_fabric_mappedlun_item_ops, NULL,
239 target_fabric_mappedlun_attrs); 236 target_fabric_mappedlun_attrs);
240 237
241 /* End of tfc_tpg_mappedlun_cit */ 238 /* End of tfc_tpg_mappedlun_cit */
242 239
243 /* Start of tfc_tpg_mappedlun_port_cit */ 240 /* Start of tfc_tpg_mappedlun_port_cit */
244 241
245 static struct config_group *target_core_mappedlun_stat_mkdir( 242 static struct config_group *target_core_mappedlun_stat_mkdir(
246 struct config_group *group, 243 struct config_group *group,
247 const char *name) 244 const char *name)
248 { 245 {
249 return ERR_PTR(-ENOSYS); 246 return ERR_PTR(-ENOSYS);
250 } 247 }
251 248
252 static void target_core_mappedlun_stat_rmdir( 249 static void target_core_mappedlun_stat_rmdir(
253 struct config_group *group, 250 struct config_group *group,
254 struct config_item *item) 251 struct config_item *item)
255 { 252 {
256 return; 253 return;
257 } 254 }
258 255
259 static struct configfs_group_operations target_fabric_mappedlun_stat_group_ops = { 256 static struct configfs_group_operations target_fabric_mappedlun_stat_group_ops = {
260 .make_group = target_core_mappedlun_stat_mkdir, 257 .make_group = target_core_mappedlun_stat_mkdir,
261 .drop_item = target_core_mappedlun_stat_rmdir, 258 .drop_item = target_core_mappedlun_stat_rmdir,
262 }; 259 };
263 260
264 TF_CIT_SETUP(tpg_mappedlun_stat, NULL, &target_fabric_mappedlun_stat_group_ops, 261 TF_CIT_SETUP(tpg_mappedlun_stat, NULL, &target_fabric_mappedlun_stat_group_ops,
265 NULL); 262 NULL);
266 263
267 /* End of tfc_tpg_mappedlun_port_cit */ 264 /* End of tfc_tpg_mappedlun_port_cit */
268 265
269 /* Start of tfc_tpg_nacl_attrib_cit */ 266 /* Start of tfc_tpg_nacl_attrib_cit */
270 267
271 CONFIGFS_EATTR_OPS(target_fabric_nacl_attrib, se_node_acl, acl_attrib_group); 268 CONFIGFS_EATTR_OPS(target_fabric_nacl_attrib, se_node_acl, acl_attrib_group);
272 269
273 static struct configfs_item_operations target_fabric_nacl_attrib_item_ops = { 270 static struct configfs_item_operations target_fabric_nacl_attrib_item_ops = {
274 .show_attribute = target_fabric_nacl_attrib_attr_show, 271 .show_attribute = target_fabric_nacl_attrib_attr_show,
275 .store_attribute = target_fabric_nacl_attrib_attr_store, 272 .store_attribute = target_fabric_nacl_attrib_attr_store,
276 }; 273 };
277 274
278 TF_CIT_SETUP(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL, NULL); 275 TF_CIT_SETUP(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL, NULL);
279 276
280 /* End of tfc_tpg_nacl_attrib_cit */ 277 /* End of tfc_tpg_nacl_attrib_cit */
281 278
282 /* Start of tfc_tpg_nacl_auth_cit */ 279 /* Start of tfc_tpg_nacl_auth_cit */
283 280
284 CONFIGFS_EATTR_OPS(target_fabric_nacl_auth, se_node_acl, acl_auth_group); 281 CONFIGFS_EATTR_OPS(target_fabric_nacl_auth, se_node_acl, acl_auth_group);
285 282
286 static struct configfs_item_operations target_fabric_nacl_auth_item_ops = { 283 static struct configfs_item_operations target_fabric_nacl_auth_item_ops = {
287 .show_attribute = target_fabric_nacl_auth_attr_show, 284 .show_attribute = target_fabric_nacl_auth_attr_show,
288 .store_attribute = target_fabric_nacl_auth_attr_store, 285 .store_attribute = target_fabric_nacl_auth_attr_store,
289 }; 286 };
290 287
291 TF_CIT_SETUP(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL, NULL); 288 TF_CIT_SETUP(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL, NULL);
292 289
293 /* End of tfc_tpg_nacl_auth_cit */ 290 /* End of tfc_tpg_nacl_auth_cit */
294 291
295 /* Start of tfc_tpg_nacl_param_cit */ 292 /* Start of tfc_tpg_nacl_param_cit */
296 293
297 CONFIGFS_EATTR_OPS(target_fabric_nacl_param, se_node_acl, acl_param_group); 294 CONFIGFS_EATTR_OPS(target_fabric_nacl_param, se_node_acl, acl_param_group);
298 295
299 static struct configfs_item_operations target_fabric_nacl_param_item_ops = { 296 static struct configfs_item_operations target_fabric_nacl_param_item_ops = {
300 .show_attribute = target_fabric_nacl_param_attr_show, 297 .show_attribute = target_fabric_nacl_param_attr_show,
301 .store_attribute = target_fabric_nacl_param_attr_store, 298 .store_attribute = target_fabric_nacl_param_attr_store,
302 }; 299 };
303 300
304 TF_CIT_SETUP(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL, NULL); 301 TF_CIT_SETUP(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL, NULL);
305 302
306 /* End of tfc_tpg_nacl_param_cit */ 303 /* End of tfc_tpg_nacl_param_cit */
307 304
308 /* Start of tfc_tpg_nacl_base_cit */ 305 /* Start of tfc_tpg_nacl_base_cit */
309 306
310 CONFIGFS_EATTR_OPS(target_fabric_nacl_base, se_node_acl, acl_group); 307 CONFIGFS_EATTR_OPS(target_fabric_nacl_base, se_node_acl, acl_group);
311 308
312 static struct config_group *target_fabric_make_mappedlun( 309 static struct config_group *target_fabric_make_mappedlun(
313 struct config_group *group, 310 struct config_group *group,
314 const char *name) 311 const char *name)
315 { 312 {
316 struct se_node_acl *se_nacl = container_of(group, 313 struct se_node_acl *se_nacl = container_of(group,
317 struct se_node_acl, acl_group); 314 struct se_node_acl, acl_group);
318 struct se_portal_group *se_tpg = se_nacl->se_tpg; 315 struct se_portal_group *se_tpg = se_nacl->se_tpg;
319 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; 316 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
320 struct se_lun_acl *lacl; 317 struct se_lun_acl *lacl;
321 struct config_item *acl_ci; 318 struct config_item *acl_ci;
322 struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL; 319 struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
323 char *buf; 320 char *buf;
324 unsigned long mapped_lun; 321 unsigned long mapped_lun;
325 int ret = 0; 322 int ret = 0;
326 323
327 acl_ci = &group->cg_item; 324 acl_ci = &group->cg_item;
328 if (!acl_ci) { 325 if (!acl_ci) {
329 pr_err("Unable to locatel acl_ci\n"); 326 pr_err("Unable to locatel acl_ci\n");
330 return NULL; 327 return NULL;
331 } 328 }
332 329
333 buf = kzalloc(strlen(name) + 1, GFP_KERNEL); 330 buf = kzalloc(strlen(name) + 1, GFP_KERNEL);
334 if (!buf) { 331 if (!buf) {
335 pr_err("Unable to allocate memory for name buf\n"); 332 pr_err("Unable to allocate memory for name buf\n");
336 return ERR_PTR(-ENOMEM); 333 return ERR_PTR(-ENOMEM);
337 } 334 }
338 snprintf(buf, strlen(name) + 1, "%s", name); 335 snprintf(buf, strlen(name) + 1, "%s", name);
339 /* 336 /*
340 * Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID. 337 * Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID.
341 */ 338 */
342 if (strstr(buf, "lun_") != buf) { 339 if (strstr(buf, "lun_") != buf) {
343 pr_err("Unable to locate \"lun_\" from buf: %s" 340 pr_err("Unable to locate \"lun_\" from buf: %s"
344 " name: %s\n", buf, name); 341 " name: %s\n", buf, name);
345 ret = -EINVAL; 342 ret = -EINVAL;
346 goto out; 343 goto out;
347 } 344 }
348 /* 345 /*
349 * Determine the Mapped LUN value. This is what the SCSI Initiator 346 * Determine the Mapped LUN value. This is what the SCSI Initiator
350 * Port will actually see. 347 * Port will actually see.
351 */ 348 */
352 if (strict_strtoul(buf + 4, 0, &mapped_lun) || mapped_lun > UINT_MAX) { 349 if (strict_strtoul(buf + 4, 0, &mapped_lun) || mapped_lun > UINT_MAX) {
353 ret = -EINVAL; 350 ret = -EINVAL;
354 goto out; 351 goto out;
355 } 352 }
356 353
357 lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun, 354 lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
358 config_item_name(acl_ci), &ret); 355 config_item_name(acl_ci), &ret);
359 if (!lacl) { 356 if (!lacl) {
360 ret = -EINVAL; 357 ret = -EINVAL;
361 goto out; 358 goto out;
362 } 359 }
363 360
364 lacl_cg = &lacl->se_lun_group; 361 lacl_cg = &lacl->se_lun_group;
365 lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 362 lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
366 GFP_KERNEL); 363 GFP_KERNEL);
367 if (!lacl_cg->default_groups) { 364 if (!lacl_cg->default_groups) {
368 pr_err("Unable to allocate lacl_cg->default_groups\n"); 365 pr_err("Unable to allocate lacl_cg->default_groups\n");
369 ret = -ENOMEM; 366 ret = -ENOMEM;
370 goto out; 367 goto out;
371 } 368 }
372 369
373 config_group_init_type_name(&lacl->se_lun_group, name, 370 config_group_init_type_name(&lacl->se_lun_group, name,
374 &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit); 371 &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit);
375 config_group_init_type_name(&lacl->ml_stat_grps.stat_group, 372 config_group_init_type_name(&lacl->ml_stat_grps.stat_group,
376 "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_stat_cit); 373 "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_stat_cit);
377 lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group; 374 lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;
378 lacl_cg->default_groups[1] = NULL; 375 lacl_cg->default_groups[1] = NULL;
379 376
380 ml_stat_grp = &lacl->ml_stat_grps.stat_group; 377 ml_stat_grp = &lacl->ml_stat_grps.stat_group;
381 ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, 378 ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
382 GFP_KERNEL); 379 GFP_KERNEL);
383 if (!ml_stat_grp->default_groups) { 380 if (!ml_stat_grp->default_groups) {
384 pr_err("Unable to allocate ml_stat_grp->default_groups\n"); 381 pr_err("Unable to allocate ml_stat_grp->default_groups\n");
385 ret = -ENOMEM; 382 ret = -ENOMEM;
386 goto out; 383 goto out;
387 } 384 }
388 target_stat_setup_mappedlun_default_groups(lacl); 385 target_stat_setup_mappedlun_default_groups(lacl);
389 386
390 kfree(buf); 387 kfree(buf);
391 return &lacl->se_lun_group; 388 return &lacl->se_lun_group;
392 out: 389 out:
393 if (lacl_cg) 390 if (lacl_cg)
394 kfree(lacl_cg->default_groups); 391 kfree(lacl_cg->default_groups);
395 kfree(buf); 392 kfree(buf);
396 return ERR_PTR(ret); 393 return ERR_PTR(ret);
397 } 394 }
398 395
399 static void target_fabric_drop_mappedlun( 396 static void target_fabric_drop_mappedlun(
400 struct config_group *group, 397 struct config_group *group,
401 struct config_item *item) 398 struct config_item *item)
402 { 399 {
403 struct se_lun_acl *lacl = container_of(to_config_group(item), 400 struct se_lun_acl *lacl = container_of(to_config_group(item),
404 struct se_lun_acl, se_lun_group); 401 struct se_lun_acl, se_lun_group);
405 struct config_item *df_item; 402 struct config_item *df_item;
406 struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL; 403 struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
407 int i; 404 int i;
408 405
409 ml_stat_grp = &lacl->ml_stat_grps.stat_group; 406 ml_stat_grp = &lacl->ml_stat_grps.stat_group;
410 for (i = 0; ml_stat_grp->default_groups[i]; i++) { 407 for (i = 0; ml_stat_grp->default_groups[i]; i++) {
411 df_item = &ml_stat_grp->default_groups[i]->cg_item; 408 df_item = &ml_stat_grp->default_groups[i]->cg_item;
412 ml_stat_grp->default_groups[i] = NULL; 409 ml_stat_grp->default_groups[i] = NULL;
413 config_item_put(df_item); 410 config_item_put(df_item);
414 } 411 }
415 kfree(ml_stat_grp->default_groups); 412 kfree(ml_stat_grp->default_groups);
416 413
417 lacl_cg = &lacl->se_lun_group; 414 lacl_cg = &lacl->se_lun_group;
418 for (i = 0; lacl_cg->default_groups[i]; i++) { 415 for (i = 0; lacl_cg->default_groups[i]; i++) {
419 df_item = &lacl_cg->default_groups[i]->cg_item; 416 df_item = &lacl_cg->default_groups[i]->cg_item;
420 lacl_cg->default_groups[i] = NULL; 417 lacl_cg->default_groups[i] = NULL;
421 config_item_put(df_item); 418 config_item_put(df_item);
422 } 419 }
423 kfree(lacl_cg->default_groups); 420 kfree(lacl_cg->default_groups);
424 421
425 config_item_put(item); 422 config_item_put(item);
426 } 423 }
427 424
428 static void target_fabric_nacl_base_release(struct config_item *item) 425 static void target_fabric_nacl_base_release(struct config_item *item)
429 { 426 {
430 struct se_node_acl *se_nacl = container_of(to_config_group(item), 427 struct se_node_acl *se_nacl = container_of(to_config_group(item),
431 struct se_node_acl, acl_group); 428 struct se_node_acl, acl_group);
432 struct se_portal_group *se_tpg = se_nacl->se_tpg; 429 struct se_portal_group *se_tpg = se_nacl->se_tpg;
433 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; 430 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
434 431
435 tf->tf_ops.fabric_drop_nodeacl(se_nacl); 432 tf->tf_ops.fabric_drop_nodeacl(se_nacl);
436 } 433 }
437 434
438 static struct configfs_item_operations target_fabric_nacl_base_item_ops = { 435 static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
439 .release = target_fabric_nacl_base_release, 436 .release = target_fabric_nacl_base_release,
440 .show_attribute = target_fabric_nacl_base_attr_show, 437 .show_attribute = target_fabric_nacl_base_attr_show,
441 .store_attribute = target_fabric_nacl_base_attr_store, 438 .store_attribute = target_fabric_nacl_base_attr_store,
442 }; 439 };
443 440
444 static struct configfs_group_operations target_fabric_nacl_base_group_ops = { 441 static struct configfs_group_operations target_fabric_nacl_base_group_ops = {
445 .make_group = target_fabric_make_mappedlun, 442 .make_group = target_fabric_make_mappedlun,
446 .drop_item = target_fabric_drop_mappedlun, 443 .drop_item = target_fabric_drop_mappedlun,
447 }; 444 };
448 445
449 TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops, 446 TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops,
450 &target_fabric_nacl_base_group_ops, NULL); 447 &target_fabric_nacl_base_group_ops, NULL);
451 448
452 /* End of tfc_tpg_nacl_base_cit */ 449 /* End of tfc_tpg_nacl_base_cit */
453 450
454 /* Start of tfc_node_fabric_stats_cit */ 451 /* Start of tfc_node_fabric_stats_cit */
455 /* 452 /*
456 * This is used as a placeholder for struct se_node_acl->acl_fabric_stat_group 453 * This is used as a placeholder for struct se_node_acl->acl_fabric_stat_group
457 * to allow fabrics access to ->acl_fabric_stat_group->default_groups[] 454 * to allow fabrics access to ->acl_fabric_stat_group->default_groups[]
458 */ 455 */
459 TF_CIT_SETUP(tpg_nacl_stat, NULL, NULL, NULL); 456 TF_CIT_SETUP(tpg_nacl_stat, NULL, NULL, NULL);
460 457
461 /* End of tfc_wwn_fabric_stats_cit */ 458 /* End of tfc_wwn_fabric_stats_cit */
462 459
463 /* Start of tfc_tpg_nacl_cit */ 460 /* Start of tfc_tpg_nacl_cit */
464 461
465 static struct config_group *target_fabric_make_nodeacl( 462 static struct config_group *target_fabric_make_nodeacl(
466 struct config_group *group, 463 struct config_group *group,
467 const char *name) 464 const char *name)
468 { 465 {
469 struct se_portal_group *se_tpg = container_of(group, 466 struct se_portal_group *se_tpg = container_of(group,
470 struct se_portal_group, tpg_acl_group); 467 struct se_portal_group, tpg_acl_group);
471 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; 468 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
472 struct se_node_acl *se_nacl; 469 struct se_node_acl *se_nacl;
473 struct config_group *nacl_cg; 470 struct config_group *nacl_cg;
474 471
475 if (!tf->tf_ops.fabric_make_nodeacl) { 472 if (!tf->tf_ops.fabric_make_nodeacl) {
476 pr_err("tf->tf_ops.fabric_make_nodeacl is NULL\n"); 473 pr_err("tf->tf_ops.fabric_make_nodeacl is NULL\n");
477 return ERR_PTR(-ENOSYS); 474 return ERR_PTR(-ENOSYS);
478 } 475 }
479 476
480 se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name); 477 se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name);
481 if (IS_ERR(se_nacl)) 478 if (IS_ERR(se_nacl))
482 return ERR_CAST(se_nacl); 479 return ERR_CAST(se_nacl);
483 480
484 nacl_cg = &se_nacl->acl_group; 481 nacl_cg = &se_nacl->acl_group;
485 nacl_cg->default_groups = se_nacl->acl_default_groups; 482 nacl_cg->default_groups = se_nacl->acl_default_groups;
486 nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group; 483 nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group;
487 nacl_cg->default_groups[1] = &se_nacl->acl_auth_group; 484 nacl_cg->default_groups[1] = &se_nacl->acl_auth_group;
488 nacl_cg->default_groups[2] = &se_nacl->acl_param_group; 485 nacl_cg->default_groups[2] = &se_nacl->acl_param_group;
489 nacl_cg->default_groups[3] = &se_nacl->acl_fabric_stat_group; 486 nacl_cg->default_groups[3] = &se_nacl->acl_fabric_stat_group;
490 nacl_cg->default_groups[4] = NULL; 487 nacl_cg->default_groups[4] = NULL;
491 488
492 config_group_init_type_name(&se_nacl->acl_group, name, 489 config_group_init_type_name(&se_nacl->acl_group, name,
493 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit); 490 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit);
494 config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib", 491 config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
495 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit); 492 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit);
496 config_group_init_type_name(&se_nacl->acl_auth_group, "auth", 493 config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
497 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit); 494 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit);
498 config_group_init_type_name(&se_nacl->acl_param_group, "param", 495 config_group_init_type_name(&se_nacl->acl_param_group, "param",
499 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit); 496 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit);
500 config_group_init_type_name(&se_nacl->acl_fabric_stat_group, 497 config_group_init_type_name(&se_nacl->acl_fabric_stat_group,
501 "fabric_statistics", 498 "fabric_statistics",
502 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_stat_cit); 499 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_stat_cit);
503 500
504 return &se_nacl->acl_group; 501 return &se_nacl->acl_group;
505 } 502 }
506 503
507 static void target_fabric_drop_nodeacl( 504 static void target_fabric_drop_nodeacl(
508 struct config_group *group, 505 struct config_group *group,
509 struct config_item *item) 506 struct config_item *item)
510 { 507 {
511 struct se_node_acl *se_nacl = container_of(to_config_group(item), 508 struct se_node_acl *se_nacl = container_of(to_config_group(item),
512 struct se_node_acl, acl_group); 509 struct se_node_acl, acl_group);
513 struct config_item *df_item; 510 struct config_item *df_item;
514 struct config_group *nacl_cg; 511 struct config_group *nacl_cg;
515 int i; 512 int i;
516 513
517 nacl_cg = &se_nacl->acl_group; 514 nacl_cg = &se_nacl->acl_group;
518 for (i = 0; nacl_cg->default_groups[i]; i++) { 515 for (i = 0; nacl_cg->default_groups[i]; i++) {
519 df_item = &nacl_cg->default_groups[i]->cg_item; 516 df_item = &nacl_cg->default_groups[i]->cg_item;
520 nacl_cg->default_groups[i] = NULL; 517 nacl_cg->default_groups[i] = NULL;
521 config_item_put(df_item); 518 config_item_put(df_item);
522 } 519 }
523 /* 520 /*
524 * struct se_node_acl free is done in target_fabric_nacl_base_release() 521 * struct se_node_acl free is done in target_fabric_nacl_base_release()
525 */ 522 */
526 config_item_put(item); 523 config_item_put(item);
527 } 524 }
528 525
529 static struct configfs_group_operations target_fabric_nacl_group_ops = { 526 static struct configfs_group_operations target_fabric_nacl_group_ops = {
530 .make_group = target_fabric_make_nodeacl, 527 .make_group = target_fabric_make_nodeacl,
531 .drop_item = target_fabric_drop_nodeacl, 528 .drop_item = target_fabric_drop_nodeacl,
532 }; 529 };
533 530
534 TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL); 531 TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL);
535 532
536 /* End of tfc_tpg_nacl_cit */ 533 /* End of tfc_tpg_nacl_cit */
537 534
538 /* Start of tfc_tpg_np_base_cit */ 535 /* Start of tfc_tpg_np_base_cit */
539 536
540 CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group); 537 CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group);
541 538
542 static void target_fabric_np_base_release(struct config_item *item) 539 static void target_fabric_np_base_release(struct config_item *item)
543 { 540 {
544 struct se_tpg_np *se_tpg_np = container_of(to_config_group(item), 541 struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
545 struct se_tpg_np, tpg_np_group); 542 struct se_tpg_np, tpg_np_group);
546 struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent; 543 struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent;
547 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; 544 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
548 545
549 tf->tf_ops.fabric_drop_np(se_tpg_np); 546 tf->tf_ops.fabric_drop_np(se_tpg_np);
550 } 547 }
551 548
552 static struct configfs_item_operations target_fabric_np_base_item_ops = { 549 static struct configfs_item_operations target_fabric_np_base_item_ops = {
553 .release = target_fabric_np_base_release, 550 .release = target_fabric_np_base_release,
554 .show_attribute = target_fabric_np_base_attr_show, 551 .show_attribute = target_fabric_np_base_attr_show,
555 .store_attribute = target_fabric_np_base_attr_store, 552 .store_attribute = target_fabric_np_base_attr_store,
556 }; 553 };
557 554
558 TF_CIT_SETUP(tpg_np_base, &target_fabric_np_base_item_ops, NULL, NULL); 555 TF_CIT_SETUP(tpg_np_base, &target_fabric_np_base_item_ops, NULL, NULL);
559 556
560 /* End of tfc_tpg_np_base_cit */ 557 /* End of tfc_tpg_np_base_cit */
561 558
562 /* Start of tfc_tpg_np_cit */ 559 /* Start of tfc_tpg_np_cit */
563 560
564 static struct config_group *target_fabric_make_np( 561 static struct config_group *target_fabric_make_np(
565 struct config_group *group, 562 struct config_group *group,
566 const char *name) 563 const char *name)
567 { 564 {
568 struct se_portal_group *se_tpg = container_of(group, 565 struct se_portal_group *se_tpg = container_of(group,
569 struct se_portal_group, tpg_np_group); 566 struct se_portal_group, tpg_np_group);
570 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; 567 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
571 struct se_tpg_np *se_tpg_np; 568 struct se_tpg_np *se_tpg_np;
572 569
573 if (!tf->tf_ops.fabric_make_np) { 570 if (!tf->tf_ops.fabric_make_np) {
574 pr_err("tf->tf_ops.fabric_make_np is NULL\n"); 571 pr_err("tf->tf_ops.fabric_make_np is NULL\n");
575 return ERR_PTR(-ENOSYS); 572 return ERR_PTR(-ENOSYS);
576 } 573 }
577 574
578 se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name); 575 se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name);
579 if (!se_tpg_np || IS_ERR(se_tpg_np)) 576 if (!se_tpg_np || IS_ERR(se_tpg_np))
580 return ERR_PTR(-EINVAL); 577 return ERR_PTR(-EINVAL);
581 578
582 se_tpg_np->tpg_np_parent = se_tpg; 579 se_tpg_np->tpg_np_parent = se_tpg;
583 config_group_init_type_name(&se_tpg_np->tpg_np_group, name, 580 config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
584 &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit); 581 &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
585 582
586 return &se_tpg_np->tpg_np_group; 583 return &se_tpg_np->tpg_np_group;
587 } 584 }
588 585
589 static void target_fabric_drop_np( 586 static void target_fabric_drop_np(
590 struct config_group *group, 587 struct config_group *group,
591 struct config_item *item) 588 struct config_item *item)
592 { 589 {
593 /* 590 /*
594 * struct se_tpg_np is released via target_fabric_np_base_release() 591 * struct se_tpg_np is released via target_fabric_np_base_release()
595 */ 592 */
596 config_item_put(item); 593 config_item_put(item);
597 } 594 }
598 595
599 static struct configfs_group_operations target_fabric_np_group_ops = { 596 static struct configfs_group_operations target_fabric_np_group_ops = {
600 .make_group = &target_fabric_make_np, 597 .make_group = &target_fabric_make_np,
601 .drop_item = &target_fabric_drop_np, 598 .drop_item = &target_fabric_drop_np,
602 }; 599 };
603 600
604 TF_CIT_SETUP(tpg_np, NULL, &target_fabric_np_group_ops, NULL); 601 TF_CIT_SETUP(tpg_np, NULL, &target_fabric_np_group_ops, NULL);
605 602
606 /* End of tfc_tpg_np_cit */ 603 /* End of tfc_tpg_np_cit */
607 604
608 /* Start of tfc_tpg_port_cit */ 605 /* Start of tfc_tpg_port_cit */
609 606
610 CONFIGFS_EATTR_STRUCT(target_fabric_port, se_lun); 607 CONFIGFS_EATTR_STRUCT(target_fabric_port, se_lun);
611 #define TCM_PORT_ATTR(_name, _mode) \ 608 #define TCM_PORT_ATTR(_name, _mode) \
612 static struct target_fabric_port_attribute target_fabric_port_##_name = \ 609 static struct target_fabric_port_attribute target_fabric_port_##_name = \
613 __CONFIGFS_EATTR(_name, _mode, \ 610 __CONFIGFS_EATTR(_name, _mode, \
614 target_fabric_port_show_attr_##_name, \ 611 target_fabric_port_show_attr_##_name, \
615 target_fabric_port_store_attr_##_name); 612 target_fabric_port_store_attr_##_name);
616 613
617 #define TCM_PORT_ATTOR_RO(_name) \ 614 #define TCM_PORT_ATTOR_RO(_name) \
618 __CONFIGFS_EATTR_RO(_name, \ 615 __CONFIGFS_EATTR_RO(_name, \
619 target_fabric_port_show_attr_##_name); 616 target_fabric_port_show_attr_##_name);
620 617
621 /* 618 /*
622 * alua_tg_pt_gp 619 * alua_tg_pt_gp
623 */ 620 */
624 static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp( 621 static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp(
625 struct se_lun *lun, 622 struct se_lun *lun,
626 char *page) 623 char *page)
627 { 624 {
628 if (!lun || !lun->lun_sep) 625 if (!lun || !lun->lun_sep)
629 return -ENODEV; 626 return -ENODEV;
630 627
631 return core_alua_show_tg_pt_gp_info(lun->lun_sep, page); 628 return core_alua_show_tg_pt_gp_info(lun->lun_sep, page);
632 } 629 }
633 630
634 static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp( 631 static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
635 struct se_lun *lun, 632 struct se_lun *lun,
636 const char *page, 633 const char *page,
637 size_t count) 634 size_t count)
638 { 635 {
639 if (!lun || !lun->lun_sep) 636 if (!lun || !lun->lun_sep)
640 return -ENODEV; 637 return -ENODEV;
641 638
642 return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count); 639 return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count);
643 } 640 }
644 641
645 TCM_PORT_ATTR(alua_tg_pt_gp, S_IRUGO | S_IWUSR); 642 TCM_PORT_ATTR(alua_tg_pt_gp, S_IRUGO | S_IWUSR);
646 643
647 /* 644 /*
648 * alua_tg_pt_offline 645 * alua_tg_pt_offline
649 */ 646 */
650 static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline( 647 static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline(
651 struct se_lun *lun, 648 struct se_lun *lun,
652 char *page) 649 char *page)
653 { 650 {
654 if (!lun || !lun->lun_sep) 651 if (!lun || !lun->lun_sep)
655 return -ENODEV; 652 return -ENODEV;
656 653
657 return core_alua_show_offline_bit(lun, page); 654 return core_alua_show_offline_bit(lun, page);
658 } 655 }
659 656
660 static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline( 657 static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline(
661 struct se_lun *lun, 658 struct se_lun *lun,
662 const char *page, 659 const char *page,
663 size_t count) 660 size_t count)
664 { 661 {
665 if (!lun || !lun->lun_sep) 662 if (!lun || !lun->lun_sep)
666 return -ENODEV; 663 return -ENODEV;
667 664
668 return core_alua_store_offline_bit(lun, page, count); 665 return core_alua_store_offline_bit(lun, page, count);
669 } 666 }
670 667
671 TCM_PORT_ATTR(alua_tg_pt_offline, S_IRUGO | S_IWUSR); 668 TCM_PORT_ATTR(alua_tg_pt_offline, S_IRUGO | S_IWUSR);
672 669
673 /* 670 /*
674 * alua_tg_pt_status 671 * alua_tg_pt_status
675 */ 672 */
676 static ssize_t target_fabric_port_show_attr_alua_tg_pt_status( 673 static ssize_t target_fabric_port_show_attr_alua_tg_pt_status(
677 struct se_lun *lun, 674 struct se_lun *lun,
678 char *page) 675 char *page)
679 { 676 {
680 if (!lun || !lun->lun_sep) 677 if (!lun || !lun->lun_sep)
681 return -ENODEV; 678 return -ENODEV;
682 679
683 return core_alua_show_secondary_status(lun, page); 680 return core_alua_show_secondary_status(lun, page);
684 } 681 }
685 682
686 static ssize_t target_fabric_port_store_attr_alua_tg_pt_status( 683 static ssize_t target_fabric_port_store_attr_alua_tg_pt_status(
687 struct se_lun *lun, 684 struct se_lun *lun,
688 const char *page, 685 const char *page,
689 size_t count) 686 size_t count)
690 { 687 {
691 if (!lun || !lun->lun_sep) 688 if (!lun || !lun->lun_sep)
692 return -ENODEV; 689 return -ENODEV;
693 690
694 return core_alua_store_secondary_status(lun, page, count); 691 return core_alua_store_secondary_status(lun, page, count);
695 } 692 }
696 693
697 TCM_PORT_ATTR(alua_tg_pt_status, S_IRUGO | S_IWUSR); 694 TCM_PORT_ATTR(alua_tg_pt_status, S_IRUGO | S_IWUSR);
698 695
699 /* 696 /*
700 * alua_tg_pt_write_md 697 * alua_tg_pt_write_md
701 */ 698 */
702 static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md( 699 static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md(
703 struct se_lun *lun, 700 struct se_lun *lun,
704 char *page) 701 char *page)
705 { 702 {
706 if (!lun || !lun->lun_sep) 703 if (!lun || !lun->lun_sep)
707 return -ENODEV; 704 return -ENODEV;
708 705
709 return core_alua_show_secondary_write_metadata(lun, page); 706 return core_alua_show_secondary_write_metadata(lun, page);
710 } 707 }
711 708
712 static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md( 709 static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md(
713 struct se_lun *lun, 710 struct se_lun *lun,
714 const char *page, 711 const char *page,
715 size_t count) 712 size_t count)
716 { 713 {
717 if (!lun || !lun->lun_sep) 714 if (!lun || !lun->lun_sep)
718 return -ENODEV; 715 return -ENODEV;
719 716
720 return core_alua_store_secondary_write_metadata(lun, page, count); 717 return core_alua_store_secondary_write_metadata(lun, page, count);
721 } 718 }
722 719
723 TCM_PORT_ATTR(alua_tg_pt_write_md, S_IRUGO | S_IWUSR); 720 TCM_PORT_ATTR(alua_tg_pt_write_md, S_IRUGO | S_IWUSR);
724 721
725 722
726 static struct configfs_attribute *target_fabric_port_attrs[] = { 723 static struct configfs_attribute *target_fabric_port_attrs[] = {
727 &target_fabric_port_alua_tg_pt_gp.attr, 724 &target_fabric_port_alua_tg_pt_gp.attr,
728 &target_fabric_port_alua_tg_pt_offline.attr, 725 &target_fabric_port_alua_tg_pt_offline.attr,
729 &target_fabric_port_alua_tg_pt_status.attr, 726 &target_fabric_port_alua_tg_pt_status.attr,
730 &target_fabric_port_alua_tg_pt_write_md.attr, 727 &target_fabric_port_alua_tg_pt_write_md.attr,
731 NULL, 728 NULL,
732 }; 729 };
733 730
734 CONFIGFS_EATTR_OPS(target_fabric_port, se_lun, lun_group); 731 CONFIGFS_EATTR_OPS(target_fabric_port, se_lun, lun_group);
735 732
736 static int target_fabric_port_link( 733 static int target_fabric_port_link(
737 struct config_item *lun_ci, 734 struct config_item *lun_ci,
738 struct config_item *se_dev_ci) 735 struct config_item *se_dev_ci)
739 { 736 {
740 struct config_item *tpg_ci; 737 struct config_item *tpg_ci;
741 struct se_device *dev; 738 struct se_device *dev;
742 struct se_lun *lun = container_of(to_config_group(lun_ci), 739 struct se_lun *lun = container_of(to_config_group(lun_ci),
743 struct se_lun, lun_group); 740 struct se_lun, lun_group);
744 struct se_lun *lun_p; 741 struct se_lun *lun_p;
745 struct se_portal_group *se_tpg; 742 struct se_portal_group *se_tpg;
746 struct se_subsystem_dev *se_dev = container_of( 743 struct se_subsystem_dev *se_dev = container_of(
747 to_config_group(se_dev_ci), struct se_subsystem_dev, 744 to_config_group(se_dev_ci), struct se_subsystem_dev,
748 se_dev_group); 745 se_dev_group);
749 struct target_fabric_configfs *tf; 746 struct target_fabric_configfs *tf;
750 int ret; 747 int ret;
751 748
752 tpg_ci = &lun_ci->ci_parent->ci_group->cg_item; 749 tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
753 se_tpg = container_of(to_config_group(tpg_ci), 750 se_tpg = container_of(to_config_group(tpg_ci),
754 struct se_portal_group, tpg_group); 751 struct se_portal_group, tpg_group);
755 tf = se_tpg->se_tpg_wwn->wwn_tf; 752 tf = se_tpg->se_tpg_wwn->wwn_tf;
756 753
757 if (lun->lun_se_dev != NULL) { 754 if (lun->lun_se_dev != NULL) {
758 pr_err("Port Symlink already exists\n"); 755 pr_err("Port Symlink already exists\n");
759 return -EEXIST; 756 return -EEXIST;
760 } 757 }
761 758
762 dev = se_dev->se_dev_ptr; 759 dev = se_dev->se_dev_ptr;
763 if (!dev) { 760 if (!dev) {
764 pr_err("Unable to locate struct se_device pointer from" 761 pr_err("Unable to locate struct se_device pointer from"
765 " %s\n", config_item_name(se_dev_ci)); 762 " %s\n", config_item_name(se_dev_ci));
766 ret = -ENODEV; 763 ret = -ENODEV;
767 goto out; 764 goto out;
768 } 765 }
769 766
770 lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev, 767 lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
771 lun->unpacked_lun); 768 lun->unpacked_lun);
772 if (IS_ERR(lun_p) || !lun_p) { 769 if (IS_ERR(lun_p) || !lun_p) {
773 pr_err("core_dev_add_lun() failed\n"); 770 pr_err("core_dev_add_lun() failed\n");
774 ret = -EINVAL; 771 ret = -EINVAL;
775 goto out; 772 goto out;
776 } 773 }
777 774
778 if (tf->tf_ops.fabric_post_link) { 775 if (tf->tf_ops.fabric_post_link) {
779 /* 776 /*
780 * Call the optional fabric_post_link() to allow a 777 * Call the optional fabric_post_link() to allow a
781 * fabric module to setup any additional state once 778 * fabric module to setup any additional state once
782 * core_dev_add_lun() has been called.. 779 * core_dev_add_lun() has been called..
783 */ 780 */
784 tf->tf_ops.fabric_post_link(se_tpg, lun); 781 tf->tf_ops.fabric_post_link(se_tpg, lun);
785 } 782 }
786 783
787 return 0; 784 return 0;
788 out: 785 out:
789 return ret; 786 return ret;
790 } 787 }
791 788
792 static int target_fabric_port_unlink( 789 static int target_fabric_port_unlink(
793 struct config_item *lun_ci, 790 struct config_item *lun_ci,
794 struct config_item *se_dev_ci) 791 struct config_item *se_dev_ci)
795 { 792 {
796 struct se_lun *lun = container_of(to_config_group(lun_ci), 793 struct se_lun *lun = container_of(to_config_group(lun_ci),
797 struct se_lun, lun_group); 794 struct se_lun, lun_group);
798 struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg; 795 struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg;
799 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; 796 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
800 797
801 if (tf->tf_ops.fabric_pre_unlink) { 798 if (tf->tf_ops.fabric_pre_unlink) {
802 /* 799 /*
803 * Call the optional fabric_pre_unlink() to allow a 800 * Call the optional fabric_pre_unlink() to allow a
804 * fabric module to release any additional stat before 801 * fabric module to release any additional stat before
805 * core_dev_del_lun() is called. 802 * core_dev_del_lun() is called.
806 */ 803 */
807 tf->tf_ops.fabric_pre_unlink(se_tpg, lun); 804 tf->tf_ops.fabric_pre_unlink(se_tpg, lun);
808 } 805 }
809 806
810 core_dev_del_lun(se_tpg, lun->unpacked_lun); 807 core_dev_del_lun(se_tpg, lun->unpacked_lun);
811 return 0; 808 return 0;
812 } 809 }
813 810
814 static struct configfs_item_operations target_fabric_port_item_ops = { 811 static struct configfs_item_operations target_fabric_port_item_ops = {
815 .show_attribute = target_fabric_port_attr_show, 812 .show_attribute = target_fabric_port_attr_show,
816 .store_attribute = target_fabric_port_attr_store, 813 .store_attribute = target_fabric_port_attr_store,
817 .allow_link = target_fabric_port_link, 814 .allow_link = target_fabric_port_link,
818 .drop_link = target_fabric_port_unlink, 815 .drop_link = target_fabric_port_unlink,
819 }; 816 };
820 817
821 TF_CIT_SETUP(tpg_port, &target_fabric_port_item_ops, NULL, target_fabric_port_attrs); 818 TF_CIT_SETUP(tpg_port, &target_fabric_port_item_ops, NULL, target_fabric_port_attrs);
822 819
823 /* End of tfc_tpg_port_cit */ 820 /* End of tfc_tpg_port_cit */
824 821
825 /* Start of tfc_tpg_port_stat_cit */ 822 /* Start of tfc_tpg_port_stat_cit */
826 823
827 static struct config_group *target_core_port_stat_mkdir( 824 static struct config_group *target_core_port_stat_mkdir(
828 struct config_group *group, 825 struct config_group *group,
829 const char *name) 826 const char *name)
830 { 827 {
831 return ERR_PTR(-ENOSYS); 828 return ERR_PTR(-ENOSYS);
832 } 829 }
833 830
834 static void target_core_port_stat_rmdir( 831 static void target_core_port_stat_rmdir(
835 struct config_group *group, 832 struct config_group *group,
836 struct config_item *item) 833 struct config_item *item)
837 { 834 {
838 return; 835 return;
839 } 836 }
840 837
841 static struct configfs_group_operations target_fabric_port_stat_group_ops = { 838 static struct configfs_group_operations target_fabric_port_stat_group_ops = {
842 .make_group = target_core_port_stat_mkdir, 839 .make_group = target_core_port_stat_mkdir,
843 .drop_item = target_core_port_stat_rmdir, 840 .drop_item = target_core_port_stat_rmdir,
844 }; 841 };
845 842
846 TF_CIT_SETUP(tpg_port_stat, NULL, &target_fabric_port_stat_group_ops, NULL); 843 TF_CIT_SETUP(tpg_port_stat, NULL, &target_fabric_port_stat_group_ops, NULL);
847 844
848 /* End of tfc_tpg_port_stat_cit */ 845 /* End of tfc_tpg_port_stat_cit */
849 846
850 /* Start of tfc_tpg_lun_cit */ 847 /* Start of tfc_tpg_lun_cit */
851 848
852 static struct config_group *target_fabric_make_lun( 849 static struct config_group *target_fabric_make_lun(
853 struct config_group *group, 850 struct config_group *group,
854 const char *name) 851 const char *name)
855 { 852 {
856 struct se_lun *lun; 853 struct se_lun *lun;
857 struct se_portal_group *se_tpg = container_of(group, 854 struct se_portal_group *se_tpg = container_of(group,
858 struct se_portal_group, tpg_lun_group); 855 struct se_portal_group, tpg_lun_group);
859 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; 856 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
860 struct config_group *lun_cg = NULL, *port_stat_grp = NULL; 857 struct config_group *lun_cg = NULL, *port_stat_grp = NULL;
861 unsigned long unpacked_lun; 858 unsigned long unpacked_lun;
862 int errno; 859 int errno;
863 860
864 if (strstr(name, "lun_") != name) { 861 if (strstr(name, "lun_") != name) {
865 pr_err("Unable to locate \'_\" in" 862 pr_err("Unable to locate \'_\" in"
866 " \"lun_$LUN_NUMBER\"\n"); 863 " \"lun_$LUN_NUMBER\"\n");
867 return ERR_PTR(-EINVAL); 864 return ERR_PTR(-EINVAL);
868 } 865 }
869 if (strict_strtoul(name + 4, 0, &unpacked_lun) || unpacked_lun > UINT_MAX) 866 if (strict_strtoul(name + 4, 0, &unpacked_lun) || unpacked_lun > UINT_MAX)
870 return ERR_PTR(-EINVAL); 867 return ERR_PTR(-EINVAL);
871 868
872 lun = core_get_lun_from_tpg(se_tpg, unpacked_lun); 869 lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
873 if (!lun) 870 if (!lun)
874 return ERR_PTR(-EINVAL); 871 return ERR_PTR(-EINVAL);
875 872
876 lun_cg = &lun->lun_group; 873 lun_cg = &lun->lun_group;
877 lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 874 lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
878 GFP_KERNEL); 875 GFP_KERNEL);
879 if (!lun_cg->default_groups) { 876 if (!lun_cg->default_groups) {
880 pr_err("Unable to allocate lun_cg->default_groups\n"); 877 pr_err("Unable to allocate lun_cg->default_groups\n");
881 return ERR_PTR(-ENOMEM); 878 return ERR_PTR(-ENOMEM);
882 } 879 }
883 880
884 config_group_init_type_name(&lun->lun_group, name, 881 config_group_init_type_name(&lun->lun_group, name,
885 &TF_CIT_TMPL(tf)->tfc_tpg_port_cit); 882 &TF_CIT_TMPL(tf)->tfc_tpg_port_cit);
886 config_group_init_type_name(&lun->port_stat_grps.stat_group, 883 config_group_init_type_name(&lun->port_stat_grps.stat_group,
887 "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_port_stat_cit); 884 "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_port_stat_cit);
888 lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group; 885 lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;
889 lun_cg->default_groups[1] = NULL; 886 lun_cg->default_groups[1] = NULL;
890 887
891 port_stat_grp = &lun->port_stat_grps.stat_group; 888 port_stat_grp = &lun->port_stat_grps.stat_group;
892 port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, 889 port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
893 GFP_KERNEL); 890 GFP_KERNEL);
894 if (!port_stat_grp->default_groups) { 891 if (!port_stat_grp->default_groups) {
895 pr_err("Unable to allocate port_stat_grp->default_groups\n"); 892 pr_err("Unable to allocate port_stat_grp->default_groups\n");
896 errno = -ENOMEM; 893 errno = -ENOMEM;
897 goto out; 894 goto out;
898 } 895 }
899 target_stat_setup_port_default_groups(lun); 896 target_stat_setup_port_default_groups(lun);
900 897
901 return &lun->lun_group; 898 return &lun->lun_group;
902 out: 899 out:
903 if (lun_cg) 900 if (lun_cg)
904 kfree(lun_cg->default_groups); 901 kfree(lun_cg->default_groups);
905 return ERR_PTR(errno); 902 return ERR_PTR(errno);
906 } 903 }
907 904
908 static void target_fabric_drop_lun( 905 static void target_fabric_drop_lun(
909 struct config_group *group, 906 struct config_group *group,
910 struct config_item *item) 907 struct config_item *item)
911 { 908 {
912 struct se_lun *lun = container_of(to_config_group(item), 909 struct se_lun *lun = container_of(to_config_group(item),
913 struct se_lun, lun_group); 910 struct se_lun, lun_group);
914 struct config_item *df_item; 911 struct config_item *df_item;
915 struct config_group *lun_cg, *port_stat_grp; 912 struct config_group *lun_cg, *port_stat_grp;
916 int i; 913 int i;
917 914
918 port_stat_grp = &lun->port_stat_grps.stat_group; 915 port_stat_grp = &lun->port_stat_grps.stat_group;
919 for (i = 0; port_stat_grp->default_groups[i]; i++) { 916 for (i = 0; port_stat_grp->default_groups[i]; i++) {
920 df_item = &port_stat_grp->default_groups[i]->cg_item; 917 df_item = &port_stat_grp->default_groups[i]->cg_item;
921 port_stat_grp->default_groups[i] = NULL; 918 port_stat_grp->default_groups[i] = NULL;
922 config_item_put(df_item); 919 config_item_put(df_item);
923 } 920 }
924 kfree(port_stat_grp->default_groups); 921 kfree(port_stat_grp->default_groups);
925 922
926 lun_cg = &lun->lun_group; 923 lun_cg = &lun->lun_group;
927 for (i = 0; lun_cg->default_groups[i]; i++) { 924 for (i = 0; lun_cg->default_groups[i]; i++) {
928 df_item = &lun_cg->default_groups[i]->cg_item; 925 df_item = &lun_cg->default_groups[i]->cg_item;
929 lun_cg->default_groups[i] = NULL; 926 lun_cg->default_groups[i] = NULL;
930 config_item_put(df_item); 927 config_item_put(df_item);
931 } 928 }
932 kfree(lun_cg->default_groups); 929 kfree(lun_cg->default_groups);
933 930
934 config_item_put(item); 931 config_item_put(item);
935 } 932 }
936 933
937 static struct configfs_group_operations target_fabric_lun_group_ops = { 934 static struct configfs_group_operations target_fabric_lun_group_ops = {
938 .make_group = &target_fabric_make_lun, 935 .make_group = &target_fabric_make_lun,
939 .drop_item = &target_fabric_drop_lun, 936 .drop_item = &target_fabric_drop_lun,
940 }; 937 };
941 938
942 TF_CIT_SETUP(tpg_lun, NULL, &target_fabric_lun_group_ops, NULL); 939 TF_CIT_SETUP(tpg_lun, NULL, &target_fabric_lun_group_ops, NULL);
943 940
944 /* End of tfc_tpg_lun_cit */ 941 /* End of tfc_tpg_lun_cit */
945 942
946 /* Start of tfc_tpg_attrib_cit */ 943 /* Start of tfc_tpg_attrib_cit */
947 944
948 CONFIGFS_EATTR_OPS(target_fabric_tpg_attrib, se_portal_group, tpg_attrib_group); 945 CONFIGFS_EATTR_OPS(target_fabric_tpg_attrib, se_portal_group, tpg_attrib_group);
949 946
950 static struct configfs_item_operations target_fabric_tpg_attrib_item_ops = { 947 static struct configfs_item_operations target_fabric_tpg_attrib_item_ops = {
951 .show_attribute = target_fabric_tpg_attrib_attr_show, 948 .show_attribute = target_fabric_tpg_attrib_attr_show,
952 .store_attribute = target_fabric_tpg_attrib_attr_store, 949 .store_attribute = target_fabric_tpg_attrib_attr_store,
953 }; 950 };
954 951
955 TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL); 952 TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL);
956 953
957 /* End of tfc_tpg_attrib_cit */ 954 /* End of tfc_tpg_attrib_cit */
958 955
959 /* Start of tfc_tpg_param_cit */ 956 /* Start of tfc_tpg_param_cit */
960 957
961 CONFIGFS_EATTR_OPS(target_fabric_tpg_param, se_portal_group, tpg_param_group); 958 CONFIGFS_EATTR_OPS(target_fabric_tpg_param, se_portal_group, tpg_param_group);
962 959
963 static struct configfs_item_operations target_fabric_tpg_param_item_ops = { 960 static struct configfs_item_operations target_fabric_tpg_param_item_ops = {
964 .show_attribute = target_fabric_tpg_param_attr_show, 961 .show_attribute = target_fabric_tpg_param_attr_show,
965 .store_attribute = target_fabric_tpg_param_attr_store, 962 .store_attribute = target_fabric_tpg_param_attr_store,
966 }; 963 };
967 964
968 TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL); 965 TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL);
969 966
970 /* End of tfc_tpg_param_cit */ 967 /* End of tfc_tpg_param_cit */
971 968
972 /* Start of tfc_tpg_base_cit */ 969 /* Start of tfc_tpg_base_cit */
973 /* 970 /*
974 * For use with TF_TPG_ATTR() and TF_TPG_ATTR_RO() 971 * For use with TF_TPG_ATTR() and TF_TPG_ATTR_RO()
975 */ 972 */
976 CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group); 973 CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group);
977 974
978 static void target_fabric_tpg_release(struct config_item *item) 975 static void target_fabric_tpg_release(struct config_item *item)
979 { 976 {
980 struct se_portal_group *se_tpg = container_of(to_config_group(item), 977 struct se_portal_group *se_tpg = container_of(to_config_group(item),
981 struct se_portal_group, tpg_group); 978 struct se_portal_group, tpg_group);
982 struct se_wwn *wwn = se_tpg->se_tpg_wwn; 979 struct se_wwn *wwn = se_tpg->se_tpg_wwn;
983 struct target_fabric_configfs *tf = wwn->wwn_tf; 980 struct target_fabric_configfs *tf = wwn->wwn_tf;
984 981
985 tf->tf_ops.fabric_drop_tpg(se_tpg); 982 tf->tf_ops.fabric_drop_tpg(se_tpg);
986 } 983 }
987 984
988 static struct configfs_item_operations target_fabric_tpg_base_item_ops = { 985 static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
989 .release = target_fabric_tpg_release, 986 .release = target_fabric_tpg_release,
990 .show_attribute = target_fabric_tpg_attr_show, 987 .show_attribute = target_fabric_tpg_attr_show,
991 .store_attribute = target_fabric_tpg_attr_store, 988 .store_attribute = target_fabric_tpg_attr_store,
992 }; 989 };
993 990
994 TF_CIT_SETUP(tpg_base, &target_fabric_tpg_base_item_ops, NULL, NULL); 991 TF_CIT_SETUP(tpg_base, &target_fabric_tpg_base_item_ops, NULL, NULL);
995 992
996 /* End of tfc_tpg_base_cit */ 993 /* End of tfc_tpg_base_cit */
997 994
998 /* Start of tfc_tpg_cit */ 995 /* Start of tfc_tpg_cit */
999 996
1000 static struct config_group *target_fabric_make_tpg( 997 static struct config_group *target_fabric_make_tpg(
1001 struct config_group *group, 998 struct config_group *group,
1002 const char *name) 999 const char *name)
1003 { 1000 {
1004 struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group); 1001 struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
1005 struct target_fabric_configfs *tf = wwn->wwn_tf; 1002 struct target_fabric_configfs *tf = wwn->wwn_tf;
1006 struct se_portal_group *se_tpg; 1003 struct se_portal_group *se_tpg;
1007 1004
1008 if (!tf->tf_ops.fabric_make_tpg) { 1005 if (!tf->tf_ops.fabric_make_tpg) {
1009 pr_err("tf->tf_ops.fabric_make_tpg is NULL\n"); 1006 pr_err("tf->tf_ops.fabric_make_tpg is NULL\n");
1010 return ERR_PTR(-ENOSYS); 1007 return ERR_PTR(-ENOSYS);
1011 } 1008 }
1012 1009
1013 se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name); 1010 se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name);
1014 if (!se_tpg || IS_ERR(se_tpg)) 1011 if (!se_tpg || IS_ERR(se_tpg))
1015 return ERR_PTR(-EINVAL); 1012 return ERR_PTR(-EINVAL);
1016 /* 1013 /*
1017 * Setup default groups from pre-allocated se_tpg->tpg_default_groups 1014 * Setup default groups from pre-allocated se_tpg->tpg_default_groups
1018 */ 1015 */
1019 se_tpg->tpg_group.default_groups = se_tpg->tpg_default_groups; 1016 se_tpg->tpg_group.default_groups = se_tpg->tpg_default_groups;
1020 se_tpg->tpg_group.default_groups[0] = &se_tpg->tpg_lun_group; 1017 se_tpg->tpg_group.default_groups[0] = &se_tpg->tpg_lun_group;
1021 se_tpg->tpg_group.default_groups[1] = &se_tpg->tpg_np_group; 1018 se_tpg->tpg_group.default_groups[1] = &se_tpg->tpg_np_group;
1022 se_tpg->tpg_group.default_groups[2] = &se_tpg->tpg_acl_group; 1019 se_tpg->tpg_group.default_groups[2] = &se_tpg->tpg_acl_group;
1023 se_tpg->tpg_group.default_groups[3] = &se_tpg->tpg_attrib_group; 1020 se_tpg->tpg_group.default_groups[3] = &se_tpg->tpg_attrib_group;
1024 se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_param_group; 1021 se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_param_group;
1025 se_tpg->tpg_group.default_groups[5] = NULL; 1022 se_tpg->tpg_group.default_groups[5] = NULL;
1026 1023
1027 config_group_init_type_name(&se_tpg->tpg_group, name, 1024 config_group_init_type_name(&se_tpg->tpg_group, name,
1028 &TF_CIT_TMPL(tf)->tfc_tpg_base_cit); 1025 &TF_CIT_TMPL(tf)->tfc_tpg_base_cit);
1029 config_group_init_type_name(&se_tpg->tpg_lun_group, "lun", 1026 config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
1030 &TF_CIT_TMPL(tf)->tfc_tpg_lun_cit); 1027 &TF_CIT_TMPL(tf)->tfc_tpg_lun_cit);
1031 config_group_init_type_name(&se_tpg->tpg_np_group, "np", 1028 config_group_init_type_name(&se_tpg->tpg_np_group, "np",
1032 &TF_CIT_TMPL(tf)->tfc_tpg_np_cit); 1029 &TF_CIT_TMPL(tf)->tfc_tpg_np_cit);
1033 config_group_init_type_name(&se_tpg->tpg_acl_group, "acls", 1030 config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
1034 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit); 1031 &TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit);
1035 config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib", 1032 config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
1036 &TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit); 1033 &TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit);
1037 config_group_init_type_name(&se_tpg->tpg_param_group, "param", 1034 config_group_init_type_name(&se_tpg->tpg_param_group, "param",
1038 &TF_CIT_TMPL(tf)->tfc_tpg_param_cit); 1035 &TF_CIT_TMPL(tf)->tfc_tpg_param_cit);
1039 1036
1040 return &se_tpg->tpg_group; 1037 return &se_tpg->tpg_group;
1041 } 1038 }
1042 1039
1043 static void target_fabric_drop_tpg( 1040 static void target_fabric_drop_tpg(
1044 struct config_group *group, 1041 struct config_group *group,
1045 struct config_item *item) 1042 struct config_item *item)
1046 { 1043 {
1047 struct se_portal_group *se_tpg = container_of(to_config_group(item), 1044 struct se_portal_group *se_tpg = container_of(to_config_group(item),
1048 struct se_portal_group, tpg_group); 1045 struct se_portal_group, tpg_group);
1049 struct config_group *tpg_cg = &se_tpg->tpg_group; 1046 struct config_group *tpg_cg = &se_tpg->tpg_group;
1050 struct config_item *df_item; 1047 struct config_item *df_item;
1051 int i; 1048 int i;
1052 /* 1049 /*
1053 * Release default groups, but do not release tpg_cg->default_groups 1050 * Release default groups, but do not release tpg_cg->default_groups
1054 * memory as it is statically allocated at se_tpg->tpg_default_groups. 1051 * memory as it is statically allocated at se_tpg->tpg_default_groups.
1055 */ 1052 */
1056 for (i = 0; tpg_cg->default_groups[i]; i++) { 1053 for (i = 0; tpg_cg->default_groups[i]; i++) {
1057 df_item = &tpg_cg->default_groups[i]->cg_item; 1054 df_item = &tpg_cg->default_groups[i]->cg_item;
1058 tpg_cg->default_groups[i] = NULL; 1055 tpg_cg->default_groups[i] = NULL;
1059 config_item_put(df_item); 1056 config_item_put(df_item);
1060 } 1057 }
1061 1058
1062 config_item_put(item); 1059 config_item_put(item);
1063 } 1060 }
1064 1061
1065 static void target_fabric_release_wwn(struct config_item *item) 1062 static void target_fabric_release_wwn(struct config_item *item)
1066 { 1063 {
1067 struct se_wwn *wwn = container_of(to_config_group(item), 1064 struct se_wwn *wwn = container_of(to_config_group(item),
1068 struct se_wwn, wwn_group); 1065 struct se_wwn, wwn_group);
1069 struct target_fabric_configfs *tf = wwn->wwn_tf; 1066 struct target_fabric_configfs *tf = wwn->wwn_tf;
1070 1067
1071 tf->tf_ops.fabric_drop_wwn(wwn); 1068 tf->tf_ops.fabric_drop_wwn(wwn);
1072 } 1069 }
1073 1070
1074 static struct configfs_item_operations target_fabric_tpg_item_ops = { 1071 static struct configfs_item_operations target_fabric_tpg_item_ops = {
1075 .release = target_fabric_release_wwn, 1072 .release = target_fabric_release_wwn,
1076 }; 1073 };
1077 1074
1078 static struct configfs_group_operations target_fabric_tpg_group_ops = { 1075 static struct configfs_group_operations target_fabric_tpg_group_ops = {
1079 .make_group = target_fabric_make_tpg, 1076 .make_group = target_fabric_make_tpg,
1080 .drop_item = target_fabric_drop_tpg, 1077 .drop_item = target_fabric_drop_tpg,
1081 }; 1078 };
1082 1079
1083 TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops, 1080 TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops,
1084 NULL); 1081 NULL);
1085 1082
1086 /* End of tfc_tpg_cit */ 1083 /* End of tfc_tpg_cit */
1087 1084
1088 /* Start of tfc_wwn_fabric_stats_cit */ 1085 /* Start of tfc_wwn_fabric_stats_cit */
1089 /* 1086 /*
1090 * This is used as a placeholder for struct se_wwn->fabric_stat_group 1087 * This is used as a placeholder for struct se_wwn->fabric_stat_group
1091 * to allow fabrics access to ->fabric_stat_group->default_groups[] 1088 * to allow fabrics access to ->fabric_stat_group->default_groups[]
1092 */ 1089 */
1093 TF_CIT_SETUP(wwn_fabric_stats, NULL, NULL, NULL); 1090 TF_CIT_SETUP(wwn_fabric_stats, NULL, NULL, NULL);
1094 1091
1095 /* End of tfc_wwn_fabric_stats_cit */ 1092 /* End of tfc_wwn_fabric_stats_cit */
1096 1093
1097 /* Start of tfc_wwn_cit */ 1094 /* Start of tfc_wwn_cit */
1098 1095
1099 static struct config_group *target_fabric_make_wwn( 1096 static struct config_group *target_fabric_make_wwn(
1100 struct config_group *group, 1097 struct config_group *group,
1101 const char *name) 1098 const char *name)
1102 { 1099 {
1103 struct target_fabric_configfs *tf = container_of(group, 1100 struct target_fabric_configfs *tf = container_of(group,
1104 struct target_fabric_configfs, tf_group); 1101 struct target_fabric_configfs, tf_group);
1105 struct se_wwn *wwn; 1102 struct se_wwn *wwn;
1106 1103
1107 if (!tf->tf_ops.fabric_make_wwn) { 1104 if (!tf->tf_ops.fabric_make_wwn) {
1108 pr_err("tf->tf_ops.fabric_make_wwn is NULL\n"); 1105 pr_err("tf->tf_ops.fabric_make_wwn is NULL\n");
1109 return ERR_PTR(-ENOSYS); 1106 return ERR_PTR(-ENOSYS);
1110 } 1107 }
1111 1108
1112 wwn = tf->tf_ops.fabric_make_wwn(tf, group, name); 1109 wwn = tf->tf_ops.fabric_make_wwn(tf, group, name);
1113 if (!wwn || IS_ERR(wwn)) 1110 if (!wwn || IS_ERR(wwn))
1114 return ERR_PTR(-EINVAL); 1111 return ERR_PTR(-EINVAL);
1115 1112
1116 wwn->wwn_tf = tf; 1113 wwn->wwn_tf = tf;
1117 /* 1114 /*
1118 * Setup default groups from pre-allocated wwn->wwn_default_groups 1115 * Setup default groups from pre-allocated wwn->wwn_default_groups
1119 */ 1116 */
1120 wwn->wwn_group.default_groups = wwn->wwn_default_groups; 1117 wwn->wwn_group.default_groups = wwn->wwn_default_groups;
1121 wwn->wwn_group.default_groups[0] = &wwn->fabric_stat_group; 1118 wwn->wwn_group.default_groups[0] = &wwn->fabric_stat_group;
1122 wwn->wwn_group.default_groups[1] = NULL; 1119 wwn->wwn_group.default_groups[1] = NULL;
1123 1120
1124 config_group_init_type_name(&wwn->wwn_group, name, 1121 config_group_init_type_name(&wwn->wwn_group, name,
1125 &TF_CIT_TMPL(tf)->tfc_tpg_cit); 1122 &TF_CIT_TMPL(tf)->tfc_tpg_cit);
1126 config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics", 1123 config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics",
1127 &TF_CIT_TMPL(tf)->tfc_wwn_fabric_stats_cit); 1124 &TF_CIT_TMPL(tf)->tfc_wwn_fabric_stats_cit);
1128 1125
1129 return &wwn->wwn_group; 1126 return &wwn->wwn_group;
1130 } 1127 }
1131 1128
1132 static void target_fabric_drop_wwn( 1129 static void target_fabric_drop_wwn(
1133 struct config_group *group, 1130 struct config_group *group,
1134 struct config_item *item) 1131 struct config_item *item)
1135 { 1132 {
1136 struct se_wwn *wwn = container_of(to_config_group(item), 1133 struct se_wwn *wwn = container_of(to_config_group(item),
1137 struct se_wwn, wwn_group); 1134 struct se_wwn, wwn_group);
1138 struct config_item *df_item; 1135 struct config_item *df_item;
1139 struct config_group *cg = &wwn->wwn_group; 1136 struct config_group *cg = &wwn->wwn_group;
1140 int i; 1137 int i;
1141 1138
1142 for (i = 0; cg->default_groups[i]; i++) { 1139 for (i = 0; cg->default_groups[i]; i++) {
1143 df_item = &cg->default_groups[i]->cg_item; 1140 df_item = &cg->default_groups[i]->cg_item;
1144 cg->default_groups[i] = NULL; 1141 cg->default_groups[i] = NULL;
1145 config_item_put(df_item); 1142 config_item_put(df_item);
1146 } 1143 }
1147 1144
1148 config_item_put(item); 1145 config_item_put(item);
1149 } 1146 }
1150 1147
1151 static struct configfs_group_operations target_fabric_wwn_group_ops = { 1148 static struct configfs_group_operations target_fabric_wwn_group_ops = {
1152 .make_group = target_fabric_make_wwn, 1149 .make_group = target_fabric_make_wwn,
1153 .drop_item = target_fabric_drop_wwn, 1150 .drop_item = target_fabric_drop_wwn,
1154 }; 1151 };
1155 /* 1152 /*
1156 * For use with TF_WWN_ATTR() and TF_WWN_ATTR_RO() 1153 * For use with TF_WWN_ATTR() and TF_WWN_ATTR_RO()
1157 */ 1154 */
1158 CONFIGFS_EATTR_OPS(target_fabric_wwn, target_fabric_configfs, tf_group); 1155 CONFIGFS_EATTR_OPS(target_fabric_wwn, target_fabric_configfs, tf_group);
1159 1156
1160 static struct configfs_item_operations target_fabric_wwn_item_ops = { 1157 static struct configfs_item_operations target_fabric_wwn_item_ops = {
1161 .show_attribute = target_fabric_wwn_attr_show, 1158 .show_attribute = target_fabric_wwn_attr_show,
1162 .store_attribute = target_fabric_wwn_attr_store, 1159 .store_attribute = target_fabric_wwn_attr_store,
1163 }; 1160 };
1164 1161
1165 TF_CIT_SETUP(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops, NULL); 1162 TF_CIT_SETUP(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops, NULL);
1166 1163
1167 /* End of tfc_wwn_cit */ 1164 /* End of tfc_wwn_cit */
1168 1165
1169 /* Start of tfc_discovery_cit */ 1166 /* Start of tfc_discovery_cit */
1170 1167
1171 CONFIGFS_EATTR_OPS(target_fabric_discovery, target_fabric_configfs, 1168 CONFIGFS_EATTR_OPS(target_fabric_discovery, target_fabric_configfs,
1172 tf_disc_group); 1169 tf_disc_group);
1173 1170
1174 static struct configfs_item_operations target_fabric_discovery_item_ops = { 1171 static struct configfs_item_operations target_fabric_discovery_item_ops = {
1175 .show_attribute = target_fabric_discovery_attr_show, 1172 .show_attribute = target_fabric_discovery_attr_show,
1176 .store_attribute = target_fabric_discovery_attr_store, 1173 .store_attribute = target_fabric_discovery_attr_store,
1177 }; 1174 };
1178 1175
1179 TF_CIT_SETUP(discovery, &target_fabric_discovery_item_ops, NULL, NULL); 1176 TF_CIT_SETUP(discovery, &target_fabric_discovery_item_ops, NULL, NULL);
1180 1177
1181 /* End of tfc_discovery_cit */ 1178 /* End of tfc_discovery_cit */
1182 1179
1183 int target_fabric_setup_cits(struct target_fabric_configfs *tf) 1180 int target_fabric_setup_cits(struct target_fabric_configfs *tf)
1184 { 1181 {
1185 target_fabric_setup_discovery_cit(tf); 1182 target_fabric_setup_discovery_cit(tf);
1186 target_fabric_setup_wwn_cit(tf); 1183 target_fabric_setup_wwn_cit(tf);
1187 target_fabric_setup_wwn_fabric_stats_cit(tf); 1184 target_fabric_setup_wwn_fabric_stats_cit(tf);
1188 target_fabric_setup_tpg_cit(tf); 1185 target_fabric_setup_tpg_cit(tf);
1189 target_fabric_setup_tpg_base_cit(tf); 1186 target_fabric_setup_tpg_base_cit(tf);
1190 target_fabric_setup_tpg_port_cit(tf); 1187 target_fabric_setup_tpg_port_cit(tf);
1191 target_fabric_setup_tpg_port_stat_cit(tf); 1188 target_fabric_setup_tpg_port_stat_cit(tf);
1192 target_fabric_setup_tpg_lun_cit(tf); 1189 target_fabric_setup_tpg_lun_cit(tf);
1193 target_fabric_setup_tpg_np_cit(tf); 1190 target_fabric_setup_tpg_np_cit(tf);
1194 target_fabric_setup_tpg_np_base_cit(tf); 1191 target_fabric_setup_tpg_np_base_cit(tf);
1195 target_fabric_setup_tpg_attrib_cit(tf); 1192 target_fabric_setup_tpg_attrib_cit(tf);
1196 target_fabric_setup_tpg_param_cit(tf); 1193 target_fabric_setup_tpg_param_cit(tf);
1197 target_fabric_setup_tpg_nacl_cit(tf); 1194 target_fabric_setup_tpg_nacl_cit(tf);
1198 target_fabric_setup_tpg_nacl_base_cit(tf); 1195 target_fabric_setup_tpg_nacl_base_cit(tf);
1199 target_fabric_setup_tpg_nacl_attrib_cit(tf); 1196 target_fabric_setup_tpg_nacl_attrib_cit(tf);
1200 target_fabric_setup_tpg_nacl_auth_cit(tf); 1197 target_fabric_setup_tpg_nacl_auth_cit(tf);
1201 target_fabric_setup_tpg_nacl_param_cit(tf); 1198 target_fabric_setup_tpg_nacl_param_cit(tf);
1202 target_fabric_setup_tpg_nacl_stat_cit(tf); 1199 target_fabric_setup_tpg_nacl_stat_cit(tf);
1203 target_fabric_setup_tpg_mappedlun_cit(tf); 1200 target_fabric_setup_tpg_mappedlun_cit(tf);
1204 target_fabric_setup_tpg_mappedlun_stat_cit(tf); 1201 target_fabric_setup_tpg_mappedlun_stat_cit(tf);
1205 1202
1206 return 0; 1203 return 0;
1207 } 1204 }
1208 1205
drivers/target/target_core_fabric_lib.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * Filename: target_core_fabric_lib.c 2 * Filename: target_core_fabric_lib.c
3 * 3 *
4 * This file contains generic high level protocol identifier and PR 4 * This file contains generic high level protocol identifier and PR
5 * handlers for TCM fabric modules 5 * handlers for TCM fabric modules
6 * 6 *
7 * Copyright (c) 2010 Rising Tide Systems, Inc. 7 * Copyright (c) 2010 Rising Tide Systems, Inc.
8 * Copyright (c) 2010 Linux-iSCSI.org 8 * Copyright (c) 2010 Linux-iSCSI.org
9 * 9 *
10 * Nicholas A. Bellinger <nab@linux-iscsi.org> 10 * Nicholas A. Bellinger <nab@linux-iscsi.org>
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or 14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version. 15 * (at your option) any later version.
16 * 16 *
17 * This program is distributed in the hope that it will be useful, 17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details. 20 * GNU General Public License for more details.
21 * 21 *
22 * You should have received a copy of the GNU General Public License 22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software 23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 24 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 * 25 *
26 ******************************************************************************/ 26 ******************************************************************************/
27 27
28 #include <linux/kernel.h> 28 #include <linux/kernel.h>
29 #include <linux/string.h> 29 #include <linux/string.h>
30 #include <linux/ctype.h> 30 #include <linux/ctype.h>
31 #include <linux/spinlock.h> 31 #include <linux/spinlock.h>
32 #include <linux/export.h> 32 #include <linux/export.h>
33 #include <scsi/scsi.h> 33 #include <scsi/scsi.h>
34 #include <scsi/scsi_cmnd.h> 34 #include <scsi/scsi_cmnd.h>
35 35
36 #include <target/target_core_base.h> 36 #include <target/target_core_base.h>
37 #include <target/target_core_device.h> 37 #include <target/target_core_fabric.h>
38 #include <target/target_core_transport.h>
39 #include <target/target_core_fabric_lib.h>
40 #include <target/target_core_fabric_ops.h>
41 #include <target/target_core_configfs.h> 38 #include <target/target_core_configfs.h>
42 39
43 #include "target_core_internal.h" 40 #include "target_core_internal.h"
44 #include "target_core_pr.h" 41 #include "target_core_pr.h"
45 42
46 /* 43 /*
47 * Handlers for Serial Attached SCSI (SAS) 44 * Handlers for Serial Attached SCSI (SAS)
48 */ 45 */
49 u8 sas_get_fabric_proto_ident(struct se_portal_group *se_tpg) 46 u8 sas_get_fabric_proto_ident(struct se_portal_group *se_tpg)
50 { 47 {
51 /* 48 /*
52 * Return a SAS Serial SCSI Protocol identifier for loopback operations 49 * Return a SAS Serial SCSI Protocol identifier for loopback operations
53 * This is defined in section 7.5.1 Table 362 in spc4r17 50 * This is defined in section 7.5.1 Table 362 in spc4r17
54 */ 51 */
55 return 0x6; 52 return 0x6;
56 } 53 }
57 EXPORT_SYMBOL(sas_get_fabric_proto_ident); 54 EXPORT_SYMBOL(sas_get_fabric_proto_ident);
58 55
59 u32 sas_get_pr_transport_id( 56 u32 sas_get_pr_transport_id(
60 struct se_portal_group *se_tpg, 57 struct se_portal_group *se_tpg,
61 struct se_node_acl *se_nacl, 58 struct se_node_acl *se_nacl,
62 struct t10_pr_registration *pr_reg, 59 struct t10_pr_registration *pr_reg,
63 int *format_code, 60 int *format_code,
64 unsigned char *buf) 61 unsigned char *buf)
65 { 62 {
66 unsigned char *ptr; 63 unsigned char *ptr;
67 int ret; 64 int ret;
68 65
69 /* 66 /*
70 * Set PROTOCOL IDENTIFIER to 6h for SAS 67 * Set PROTOCOL IDENTIFIER to 6h for SAS
71 */ 68 */
72 buf[0] = 0x06; 69 buf[0] = 0x06;
73 /* 70 /*
74 * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI 71 * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
75 * over SAS Serial SCSI Protocol 72 * over SAS Serial SCSI Protocol
76 */ 73 */
77 ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */ 74 ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */
78 75
79 ret = hex2bin(&buf[4], ptr, 8); 76 ret = hex2bin(&buf[4], ptr, 8);
80 if (ret < 0) 77 if (ret < 0)
81 pr_debug("sas transport_id: invalid hex string\n"); 78 pr_debug("sas transport_id: invalid hex string\n");
82 79
83 /* 80 /*
84 * The SAS Transport ID is a hardcoded 24-byte length 81 * The SAS Transport ID is a hardcoded 24-byte length
85 */ 82 */
86 return 24; 83 return 24;
87 } 84 }
88 EXPORT_SYMBOL(sas_get_pr_transport_id); 85 EXPORT_SYMBOL(sas_get_pr_transport_id);
89 86
90 u32 sas_get_pr_transport_id_len( 87 u32 sas_get_pr_transport_id_len(
91 struct se_portal_group *se_tpg, 88 struct se_portal_group *se_tpg,
92 struct se_node_acl *se_nacl, 89 struct se_node_acl *se_nacl,
93 struct t10_pr_registration *pr_reg, 90 struct t10_pr_registration *pr_reg,
94 int *format_code) 91 int *format_code)
95 { 92 {
96 *format_code = 0; 93 *format_code = 0;
97 /* 94 /*
98 * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI 95 * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
99 * over SAS Serial SCSI Protocol 96 * over SAS Serial SCSI Protocol
100 * 97 *
101 * The SAS Transport ID is a hardcoded 24-byte length 98 * The SAS Transport ID is a hardcoded 24-byte length
102 */ 99 */
103 return 24; 100 return 24;
104 } 101 }
105 EXPORT_SYMBOL(sas_get_pr_transport_id_len); 102 EXPORT_SYMBOL(sas_get_pr_transport_id_len);
106 103
107 /* 104 /*
108 * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above 105 * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
109 * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations. 106 * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
110 */ 107 */
111 char *sas_parse_pr_out_transport_id( 108 char *sas_parse_pr_out_transport_id(
112 struct se_portal_group *se_tpg, 109 struct se_portal_group *se_tpg,
113 const char *buf, 110 const char *buf,
114 u32 *out_tid_len, 111 u32 *out_tid_len,
115 char **port_nexus_ptr) 112 char **port_nexus_ptr)
116 { 113 {
117 /* 114 /*
118 * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID 115 * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID
119 * for initiator ports using SCSI over SAS Serial SCSI Protocol 116 * for initiator ports using SCSI over SAS Serial SCSI Protocol
120 * 117 *
121 * The TransportID for a SAS Initiator Port is of fixed size of 118 * The TransportID for a SAS Initiator Port is of fixed size of
122 * 24 bytes, and SAS does not contain a I_T nexus identifier, 119 * 24 bytes, and SAS does not contain a I_T nexus identifier,
123 * so we return the **port_nexus_ptr set to NULL. 120 * so we return the **port_nexus_ptr set to NULL.
124 */ 121 */
125 *port_nexus_ptr = NULL; 122 *port_nexus_ptr = NULL;
126 *out_tid_len = 24; 123 *out_tid_len = 24;
127 124
128 return (char *)&buf[4]; 125 return (char *)&buf[4];
129 } 126 }
130 EXPORT_SYMBOL(sas_parse_pr_out_transport_id); 127 EXPORT_SYMBOL(sas_parse_pr_out_transport_id);
131 128
132 /* 129 /*
133 * Handlers for Fibre Channel Protocol (FCP) 130 * Handlers for Fibre Channel Protocol (FCP)
134 */ 131 */
135 u8 fc_get_fabric_proto_ident(struct se_portal_group *se_tpg) 132 u8 fc_get_fabric_proto_ident(struct se_portal_group *se_tpg)
136 { 133 {
137 return 0x0; /* 0 = fcp-2 per SPC4 section 7.5.1 */ 134 return 0x0; /* 0 = fcp-2 per SPC4 section 7.5.1 */
138 } 135 }
139 EXPORT_SYMBOL(fc_get_fabric_proto_ident); 136 EXPORT_SYMBOL(fc_get_fabric_proto_ident);
140 137
141 u32 fc_get_pr_transport_id_len( 138 u32 fc_get_pr_transport_id_len(
142 struct se_portal_group *se_tpg, 139 struct se_portal_group *se_tpg,
143 struct se_node_acl *se_nacl, 140 struct se_node_acl *se_nacl,
144 struct t10_pr_registration *pr_reg, 141 struct t10_pr_registration *pr_reg,
145 int *format_code) 142 int *format_code)
146 { 143 {
147 *format_code = 0; 144 *format_code = 0;
148 /* 145 /*
149 * The FC Transport ID is a hardcoded 24-byte length 146 * The FC Transport ID is a hardcoded 24-byte length
150 */ 147 */
151 return 24; 148 return 24;
152 } 149 }
153 EXPORT_SYMBOL(fc_get_pr_transport_id_len); 150 EXPORT_SYMBOL(fc_get_pr_transport_id_len);
154 151
155 u32 fc_get_pr_transport_id( 152 u32 fc_get_pr_transport_id(
156 struct se_portal_group *se_tpg, 153 struct se_portal_group *se_tpg,
157 struct se_node_acl *se_nacl, 154 struct se_node_acl *se_nacl,
158 struct t10_pr_registration *pr_reg, 155 struct t10_pr_registration *pr_reg,
159 int *format_code, 156 int *format_code,
160 unsigned char *buf) 157 unsigned char *buf)
161 { 158 {
162 unsigned char *ptr; 159 unsigned char *ptr;
163 int i, ret; 160 int i, ret;
164 u32 off = 8; 161 u32 off = 8;
165 162
166 /* 163 /*
167 * PROTOCOL IDENTIFIER is 0h for FCP-2 164 * PROTOCOL IDENTIFIER is 0h for FCP-2
168 * 165 *
169 * From spc4r17, 7.5.4.2 TransportID for initiator ports using 166 * From spc4r17, 7.5.4.2 TransportID for initiator ports using
170 * SCSI over Fibre Channel 167 * SCSI over Fibre Channel
171 * 168 *
172 * We convert the ASCII formatted N Port name into a binary 169 * We convert the ASCII formatted N Port name into a binary
173 * encoded TransportID. 170 * encoded TransportID.
174 */ 171 */
175 ptr = &se_nacl->initiatorname[0]; 172 ptr = &se_nacl->initiatorname[0];
176 173
177 for (i = 0; i < 24; ) { 174 for (i = 0; i < 24; ) {
178 if (!strncmp(&ptr[i], ":", 1)) { 175 if (!strncmp(&ptr[i], ":", 1)) {
179 i++; 176 i++;
180 continue; 177 continue;
181 } 178 }
182 ret = hex2bin(&buf[off++], &ptr[i], 1); 179 ret = hex2bin(&buf[off++], &ptr[i], 1);
183 if (ret < 0) 180 if (ret < 0)
184 pr_debug("fc transport_id: invalid hex string\n"); 181 pr_debug("fc transport_id: invalid hex string\n");
185 i += 2; 182 i += 2;
186 } 183 }
187 /* 184 /*
188 * The FC Transport ID is a hardcoded 24-byte length 185 * The FC Transport ID is a hardcoded 24-byte length
189 */ 186 */
190 return 24; 187 return 24;
191 } 188 }
192 EXPORT_SYMBOL(fc_get_pr_transport_id); 189 EXPORT_SYMBOL(fc_get_pr_transport_id);
193 190
194 char *fc_parse_pr_out_transport_id( 191 char *fc_parse_pr_out_transport_id(
195 struct se_portal_group *se_tpg, 192 struct se_portal_group *se_tpg,
196 const char *buf, 193 const char *buf,
197 u32 *out_tid_len, 194 u32 *out_tid_len,
198 char **port_nexus_ptr) 195 char **port_nexus_ptr)
199 { 196 {
200 /* 197 /*
201 * The TransportID for a FC N Port is of fixed size of 198 * The TransportID for a FC N Port is of fixed size of
202 * 24 bytes, and FC does not contain a I_T nexus identifier, 199 * 24 bytes, and FC does not contain a I_T nexus identifier,
203 * so we return the **port_nexus_ptr set to NULL. 200 * so we return the **port_nexus_ptr set to NULL.
204 */ 201 */
205 *port_nexus_ptr = NULL; 202 *port_nexus_ptr = NULL;
206 *out_tid_len = 24; 203 *out_tid_len = 24;
207 204
208 return (char *)&buf[8]; 205 return (char *)&buf[8];
209 } 206 }
210 EXPORT_SYMBOL(fc_parse_pr_out_transport_id); 207 EXPORT_SYMBOL(fc_parse_pr_out_transport_id);
211 208
212 /* 209 /*
213 * Handlers for Internet Small Computer Systems Interface (iSCSI) 210 * Handlers for Internet Small Computer Systems Interface (iSCSI)
214 */ 211 */
215 212
216 u8 iscsi_get_fabric_proto_ident(struct se_portal_group *se_tpg) 213 u8 iscsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
217 { 214 {
218 /* 215 /*
219 * This value is defined for "Internet SCSI (iSCSI)" 216 * This value is defined for "Internet SCSI (iSCSI)"
220 * in spc4r17 section 7.5.1 Table 362 217 * in spc4r17 section 7.5.1 Table 362
221 */ 218 */
222 return 0x5; 219 return 0x5;
223 } 220 }
224 EXPORT_SYMBOL(iscsi_get_fabric_proto_ident); 221 EXPORT_SYMBOL(iscsi_get_fabric_proto_ident);
225 222
226 u32 iscsi_get_pr_transport_id( 223 u32 iscsi_get_pr_transport_id(
227 struct se_portal_group *se_tpg, 224 struct se_portal_group *se_tpg,
228 struct se_node_acl *se_nacl, 225 struct se_node_acl *se_nacl,
229 struct t10_pr_registration *pr_reg, 226 struct t10_pr_registration *pr_reg,
230 int *format_code, 227 int *format_code,
231 unsigned char *buf) 228 unsigned char *buf)
232 { 229 {
233 u32 off = 4, padding = 0; 230 u32 off = 4, padding = 0;
234 u16 len = 0; 231 u16 len = 0;
235 232
236 spin_lock_irq(&se_nacl->nacl_sess_lock); 233 spin_lock_irq(&se_nacl->nacl_sess_lock);
237 /* 234 /*
238 * Set PROTOCOL IDENTIFIER to 5h for iSCSI 235 * Set PROTOCOL IDENTIFIER to 5h for iSCSI
239 */ 236 */
240 buf[0] = 0x05; 237 buf[0] = 0x05;
241 /* 238 /*
242 * From spc4r17 Section 7.5.4.6: TransportID for initiator 239 * From spc4r17 Section 7.5.4.6: TransportID for initiator
243 * ports using SCSI over iSCSI. 240 * ports using SCSI over iSCSI.
244 * 241 *
245 * The null-terminated, null-padded (see 4.4.2) ISCSI NAME field 242 * The null-terminated, null-padded (see 4.4.2) ISCSI NAME field
246 * shall contain the iSCSI name of an iSCSI initiator node (see 243 * shall contain the iSCSI name of an iSCSI initiator node (see
247 * RFC 3720). The first ISCSI NAME field byte containing an ASCII 244 * RFC 3720). The first ISCSI NAME field byte containing an ASCII
248 * null character terminates the ISCSI NAME field without regard for 245 * null character terminates the ISCSI NAME field without regard for
249 * the specified length of the iSCSI TransportID or the contents of 246 * the specified length of the iSCSI TransportID or the contents of
250 * the ADDITIONAL LENGTH field. 247 * the ADDITIONAL LENGTH field.
251 */ 248 */
252 len = sprintf(&buf[off], "%s", se_nacl->initiatorname); 249 len = sprintf(&buf[off], "%s", se_nacl->initiatorname);
253 /* 250 /*
254 * Add Extra byte for NULL terminator 251 * Add Extra byte for NULL terminator
255 */ 252 */
256 len++; 253 len++;
257 /* 254 /*
258 * If there is ISID present with the registration and *format code == 1 255 * If there is ISID present with the registration and *format code == 1
259 * 1, use iSCSI Initiator port TransportID format. 256 * 1, use iSCSI Initiator port TransportID format.
260 * 257 *
261 * Otherwise use iSCSI Initiator device TransportID format that 258 * Otherwise use iSCSI Initiator device TransportID format that
262 * does not contain the ASCII encoded iSCSI Initiator iSID value 259 * does not contain the ASCII encoded iSCSI Initiator iSID value
263 * provied by the iSCSi Initiator during the iSCSI login process. 260 * provied by the iSCSi Initiator during the iSCSI login process.
264 */ 261 */
265 if ((*format_code == 1) && (pr_reg->isid_present_at_reg)) { 262 if ((*format_code == 1) && (pr_reg->isid_present_at_reg)) {
266 /* 263 /*
267 * Set FORMAT CODE 01b for iSCSI Initiator port TransportID 264 * Set FORMAT CODE 01b for iSCSI Initiator port TransportID
268 * format. 265 * format.
269 */ 266 */
270 buf[0] |= 0x40; 267 buf[0] |= 0x40;
271 /* 268 /*
272 * From spc4r17 Section 7.5.4.6: TransportID for initiator 269 * From spc4r17 Section 7.5.4.6: TransportID for initiator
273 * ports using SCSI over iSCSI. Table 390 270 * ports using SCSI over iSCSI. Table 390
274 * 271 *
275 * The SEPARATOR field shall contain the five ASCII 272 * The SEPARATOR field shall contain the five ASCII
276 * characters ",i,0x". 273 * characters ",i,0x".
277 * 274 *
278 * The null-terminated, null-padded ISCSI INITIATOR SESSION ID 275 * The null-terminated, null-padded ISCSI INITIATOR SESSION ID
279 * field shall contain the iSCSI initiator session identifier 276 * field shall contain the iSCSI initiator session identifier
280 * (see RFC 3720) in the form of ASCII characters that are the 277 * (see RFC 3720) in the form of ASCII characters that are the
281 * hexadecimal digits converted from the binary iSCSI initiator 278 * hexadecimal digits converted from the binary iSCSI initiator
282 * session identifier value. The first ISCSI INITIATOR SESSION 279 * session identifier value. The first ISCSI INITIATOR SESSION
283 * ID field byte containing an ASCII null character 280 * ID field byte containing an ASCII null character
284 */ 281 */
285 buf[off+len] = 0x2c; off++; /* ASCII Character: "," */ 282 buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
286 buf[off+len] = 0x69; off++; /* ASCII Character: "i" */ 283 buf[off+len] = 0x69; off++; /* ASCII Character: "i" */
287 buf[off+len] = 0x2c; off++; /* ASCII Character: "," */ 284 buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
288 buf[off+len] = 0x30; off++; /* ASCII Character: "0" */ 285 buf[off+len] = 0x30; off++; /* ASCII Character: "0" */
289 buf[off+len] = 0x78; off++; /* ASCII Character: "x" */ 286 buf[off+len] = 0x78; off++; /* ASCII Character: "x" */
290 len += 5; 287 len += 5;
291 buf[off+len] = pr_reg->pr_reg_isid[0]; off++; 288 buf[off+len] = pr_reg->pr_reg_isid[0]; off++;
292 buf[off+len] = pr_reg->pr_reg_isid[1]; off++; 289 buf[off+len] = pr_reg->pr_reg_isid[1]; off++;
293 buf[off+len] = pr_reg->pr_reg_isid[2]; off++; 290 buf[off+len] = pr_reg->pr_reg_isid[2]; off++;
294 buf[off+len] = pr_reg->pr_reg_isid[3]; off++; 291 buf[off+len] = pr_reg->pr_reg_isid[3]; off++;
295 buf[off+len] = pr_reg->pr_reg_isid[4]; off++; 292 buf[off+len] = pr_reg->pr_reg_isid[4]; off++;
296 buf[off+len] = pr_reg->pr_reg_isid[5]; off++; 293 buf[off+len] = pr_reg->pr_reg_isid[5]; off++;
297 buf[off+len] = '\0'; off++; 294 buf[off+len] = '\0'; off++;
298 len += 7; 295 len += 7;
299 } 296 }
300 spin_unlock_irq(&se_nacl->nacl_sess_lock); 297 spin_unlock_irq(&se_nacl->nacl_sess_lock);
301 /* 298 /*
302 * The ADDITIONAL LENGTH field specifies the number of bytes that follow 299 * The ADDITIONAL LENGTH field specifies the number of bytes that follow
303 * in the TransportID. The additional length shall be at least 20 and 300 * in the TransportID. The additional length shall be at least 20 and
304 * shall be a multiple of four. 301 * shall be a multiple of four.
305 */ 302 */
306 padding = ((-len) & 3); 303 padding = ((-len) & 3);
307 if (padding != 0) 304 if (padding != 0)
308 len += padding; 305 len += padding;
309 306
310 buf[2] = ((len >> 8) & 0xff); 307 buf[2] = ((len >> 8) & 0xff);
311 buf[3] = (len & 0xff); 308 buf[3] = (len & 0xff);
312 /* 309 /*
313 * Increment value for total payload + header length for 310 * Increment value for total payload + header length for
314 * full status descriptor 311 * full status descriptor
315 */ 312 */
316 len += 4; 313 len += 4;
317 314
318 return len; 315 return len;
319 } 316 }
320 EXPORT_SYMBOL(iscsi_get_pr_transport_id); 317 EXPORT_SYMBOL(iscsi_get_pr_transport_id);
321 318
322 u32 iscsi_get_pr_transport_id_len( 319 u32 iscsi_get_pr_transport_id_len(
323 struct se_portal_group *se_tpg, 320 struct se_portal_group *se_tpg,
324 struct se_node_acl *se_nacl, 321 struct se_node_acl *se_nacl,
325 struct t10_pr_registration *pr_reg, 322 struct t10_pr_registration *pr_reg,
326 int *format_code) 323 int *format_code)
327 { 324 {
328 u32 len = 0, padding = 0; 325 u32 len = 0, padding = 0;
329 326
330 spin_lock_irq(&se_nacl->nacl_sess_lock); 327 spin_lock_irq(&se_nacl->nacl_sess_lock);
331 len = strlen(se_nacl->initiatorname); 328 len = strlen(se_nacl->initiatorname);
332 /* 329 /*
333 * Add extra byte for NULL terminator 330 * Add extra byte for NULL terminator
334 */ 331 */
335 len++; 332 len++;
336 /* 333 /*
337 * If there is ISID present with the registration, use format code: 334 * If there is ISID present with the registration, use format code:
338 * 01b: iSCSI Initiator port TransportID format 335 * 01b: iSCSI Initiator port TransportID format
339 * 336 *
340 * If there is not an active iSCSI session, use format code: 337 * If there is not an active iSCSI session, use format code:
341 * 00b: iSCSI Initiator device TransportID format 338 * 00b: iSCSI Initiator device TransportID format
342 */ 339 */
343 if (pr_reg->isid_present_at_reg) { 340 if (pr_reg->isid_present_at_reg) {
344 len += 5; /* For ",i,0x" ASCII seperator */ 341 len += 5; /* For ",i,0x" ASCII seperator */
345 len += 7; /* For iSCSI Initiator Session ID + Null terminator */ 342 len += 7; /* For iSCSI Initiator Session ID + Null terminator */
346 *format_code = 1; 343 *format_code = 1;
347 } else 344 } else
348 *format_code = 0; 345 *format_code = 0;
349 spin_unlock_irq(&se_nacl->nacl_sess_lock); 346 spin_unlock_irq(&se_nacl->nacl_sess_lock);
350 /* 347 /*
351 * The ADDITIONAL LENGTH field specifies the number of bytes that follow 348 * The ADDITIONAL LENGTH field specifies the number of bytes that follow
352 * in the TransportID. The additional length shall be at least 20 and 349 * in the TransportID. The additional length shall be at least 20 and
353 * shall be a multiple of four. 350 * shall be a multiple of four.
354 */ 351 */
355 padding = ((-len) & 3); 352 padding = ((-len) & 3);
356 if (padding != 0) 353 if (padding != 0)
357 len += padding; 354 len += padding;
358 /* 355 /*
359 * Increment value for total payload + header length for 356 * Increment value for total payload + header length for
360 * full status descriptor 357 * full status descriptor
361 */ 358 */
362 len += 4; 359 len += 4;
363 360
364 return len; 361 return len;
365 } 362 }
366 EXPORT_SYMBOL(iscsi_get_pr_transport_id_len); 363 EXPORT_SYMBOL(iscsi_get_pr_transport_id_len);
367 364
368 char *iscsi_parse_pr_out_transport_id( 365 char *iscsi_parse_pr_out_transport_id(
369 struct se_portal_group *se_tpg, 366 struct se_portal_group *se_tpg,
370 const char *buf, 367 const char *buf,
371 u32 *out_tid_len, 368 u32 *out_tid_len,
372 char **port_nexus_ptr) 369 char **port_nexus_ptr)
373 { 370 {
374 char *p; 371 char *p;
375 u32 tid_len, padding; 372 u32 tid_len, padding;
376 int i; 373 int i;
377 u16 add_len; 374 u16 add_len;
378 u8 format_code = (buf[0] & 0xc0); 375 u8 format_code = (buf[0] & 0xc0);
379 /* 376 /*
380 * Check for FORMAT CODE 00b or 01b from spc4r17, section 7.5.4.6: 377 * Check for FORMAT CODE 00b or 01b from spc4r17, section 7.5.4.6:
381 * 378 *
382 * TransportID for initiator ports using SCSI over iSCSI, 379 * TransportID for initiator ports using SCSI over iSCSI,
383 * from Table 388 -- iSCSI TransportID formats. 380 * from Table 388 -- iSCSI TransportID formats.
384 * 381 *
385 * 00b Initiator port is identified using the world wide unique 382 * 00b Initiator port is identified using the world wide unique
386 * SCSI device name of the iSCSI initiator 383 * SCSI device name of the iSCSI initiator
387 * device containing the initiator port (see table 389). 384 * device containing the initiator port (see table 389).
388 * 01b Initiator port is identified using the world wide unique 385 * 01b Initiator port is identified using the world wide unique
389 * initiator port identifier (see table 390).10b to 11b 386 * initiator port identifier (see table 390).10b to 11b
390 * Reserved 387 * Reserved
391 */ 388 */
392 if ((format_code != 0x00) && (format_code != 0x40)) { 389 if ((format_code != 0x00) && (format_code != 0x40)) {
393 pr_err("Illegal format code: 0x%02x for iSCSI" 390 pr_err("Illegal format code: 0x%02x for iSCSI"
394 " Initiator Transport ID\n", format_code); 391 " Initiator Transport ID\n", format_code);
395 return NULL; 392 return NULL;
396 } 393 }
397 /* 394 /*
398 * If the caller wants the TransportID Length, we set that value for the 395 * If the caller wants the TransportID Length, we set that value for the
399 * entire iSCSI Tarnsport ID now. 396 * entire iSCSI Tarnsport ID now.
400 */ 397 */
401 if (out_tid_len != NULL) { 398 if (out_tid_len != NULL) {
402 add_len = ((buf[2] >> 8) & 0xff); 399 add_len = ((buf[2] >> 8) & 0xff);
403 add_len |= (buf[3] & 0xff); 400 add_len |= (buf[3] & 0xff);
404 401
405 tid_len = strlen((char *)&buf[4]); 402 tid_len = strlen((char *)&buf[4]);
406 tid_len += 4; /* Add four bytes for iSCSI Transport ID header */ 403 tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
407 tid_len += 1; /* Add one byte for NULL terminator */ 404 tid_len += 1; /* Add one byte for NULL terminator */
408 padding = ((-tid_len) & 3); 405 padding = ((-tid_len) & 3);
409 if (padding != 0) 406 if (padding != 0)
410 tid_len += padding; 407 tid_len += padding;
411 408
412 if ((add_len + 4) != tid_len) { 409 if ((add_len + 4) != tid_len) {
413 pr_debug("LIO-Target Extracted add_len: %hu " 410 pr_debug("LIO-Target Extracted add_len: %hu "
414 "does not match calculated tid_len: %u," 411 "does not match calculated tid_len: %u,"
415 " using tid_len instead\n", add_len+4, tid_len); 412 " using tid_len instead\n", add_len+4, tid_len);
416 *out_tid_len = tid_len; 413 *out_tid_len = tid_len;
417 } else 414 } else
418 *out_tid_len = (add_len + 4); 415 *out_tid_len = (add_len + 4);
419 } 416 }
420 /* 417 /*
421 * Check for ',i,0x' seperator between iSCSI Name and iSCSI Initiator 418 * Check for ',i,0x' seperator between iSCSI Name and iSCSI Initiator
422 * Session ID as defined in Table 390 - iSCSI initiator port TransportID 419 * Session ID as defined in Table 390 - iSCSI initiator port TransportID
423 * format. 420 * format.
424 */ 421 */
425 if (format_code == 0x40) { 422 if (format_code == 0x40) {
426 p = strstr((char *)&buf[4], ",i,0x"); 423 p = strstr((char *)&buf[4], ",i,0x");
427 if (!p) { 424 if (!p) {
428 pr_err("Unable to locate \",i,0x\" seperator" 425 pr_err("Unable to locate \",i,0x\" seperator"
429 " for Initiator port identifier: %s\n", 426 " for Initiator port identifier: %s\n",
430 (char *)&buf[4]); 427 (char *)&buf[4]);
431 return NULL; 428 return NULL;
432 } 429 }
433 *p = '\0'; /* Terminate iSCSI Name */ 430 *p = '\0'; /* Terminate iSCSI Name */
434 p += 5; /* Skip over ",i,0x" seperator */ 431 p += 5; /* Skip over ",i,0x" seperator */
435 432
436 *port_nexus_ptr = p; 433 *port_nexus_ptr = p;
437 /* 434 /*
438 * Go ahead and do the lower case conversion of the received 435 * Go ahead and do the lower case conversion of the received
439 * 12 ASCII characters representing the ISID in the TransportID 436 * 12 ASCII characters representing the ISID in the TransportID
440 * for comparison against the running iSCSI session's ISID from 437 * for comparison against the running iSCSI session's ISID from
441 * iscsi_target.c:lio_sess_get_initiator_sid() 438 * iscsi_target.c:lio_sess_get_initiator_sid()
442 */ 439 */
443 for (i = 0; i < 12; i++) { 440 for (i = 0; i < 12; i++) {
444 if (isdigit(*p)) { 441 if (isdigit(*p)) {
445 p++; 442 p++;
446 continue; 443 continue;
447 } 444 }
448 *p = tolower(*p); 445 *p = tolower(*p);
449 p++; 446 p++;
450 } 447 }
451 } 448 }
452 449
453 return (char *)&buf[4]; 450 return (char *)&buf[4];
454 } 451 }
455 EXPORT_SYMBOL(iscsi_parse_pr_out_transport_id); 452 EXPORT_SYMBOL(iscsi_parse_pr_out_transport_id);
456 453
drivers/target/target_core_file.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * Filename: target_core_file.c 2 * Filename: target_core_file.c
3 * 3 *
4 * This file contains the Storage Engine <-> FILEIO transport specific functions 4 * This file contains the Storage Engine <-> FILEIO transport specific functions
5 * 5 *
6 * Copyright (c) 2005 PyX Technologies, Inc. 6 * Copyright (c) 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved. 7 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
8 * Copyright (c) 2007-2010 Rising Tide Systems 8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org 9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 * 10 *
11 * Nicholas A. Bellinger <nab@kernel.org> 11 * Nicholas A. Bellinger <nab@kernel.org>
12 * 12 *
13 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by 14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or 15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version. 16 * (at your option) any later version.
17 * 17 *
18 * This program is distributed in the hope that it will be useful, 18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details. 21 * GNU General Public License for more details.
22 * 22 *
23 * You should have received a copy of the GNU General Public License 23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software 24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 * 26 *
27 ******************************************************************************/ 27 ******************************************************************************/
28 28
29 #include <linux/string.h> 29 #include <linux/string.h>
30 #include <linux/parser.h> 30 #include <linux/parser.h>
31 #include <linux/timer.h> 31 #include <linux/timer.h>
32 #include <linux/blkdev.h> 32 #include <linux/blkdev.h>
33 #include <linux/slab.h> 33 #include <linux/slab.h>
34 #include <linux/spinlock.h> 34 #include <linux/spinlock.h>
35 #include <linux/module.h> 35 #include <linux/module.h>
36 #include <scsi/scsi.h> 36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h> 37 #include <scsi/scsi_host.h>
38 38
39 #include <target/target_core_base.h> 39 #include <target/target_core_base.h>
40 #include <target/target_core_device.h> 40 #include <target/target_core_backend.h>
41 #include <target/target_core_transport.h>
42 41
43 #include "target_core_file.h" 42 #include "target_core_file.h"
44 43
45 static struct se_subsystem_api fileio_template; 44 static struct se_subsystem_api fileio_template;
46 45
47 /* fd_attach_hba(): (Part of se_subsystem_api_t template) 46 /* fd_attach_hba(): (Part of se_subsystem_api_t template)
48 * 47 *
49 * 48 *
50 */ 49 */
51 static int fd_attach_hba(struct se_hba *hba, u32 host_id) 50 static int fd_attach_hba(struct se_hba *hba, u32 host_id)
52 { 51 {
53 struct fd_host *fd_host; 52 struct fd_host *fd_host;
54 53
55 fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL); 54 fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
56 if (!fd_host) { 55 if (!fd_host) {
57 pr_err("Unable to allocate memory for struct fd_host\n"); 56 pr_err("Unable to allocate memory for struct fd_host\n");
58 return -ENOMEM; 57 return -ENOMEM;
59 } 58 }
60 59
61 fd_host->fd_host_id = host_id; 60 fd_host->fd_host_id = host_id;
62 61
63 hba->hba_ptr = fd_host; 62 hba->hba_ptr = fd_host;
64 63
65 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" 64 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
66 " Target Core Stack %s\n", hba->hba_id, FD_VERSION, 65 " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
67 TARGET_CORE_MOD_VERSION); 66 TARGET_CORE_MOD_VERSION);
68 pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" 67 pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
69 " MaxSectors: %u\n", 68 " MaxSectors: %u\n",
70 hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS); 69 hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
71 70
72 return 0; 71 return 0;
73 } 72 }
74 73
75 static void fd_detach_hba(struct se_hba *hba) 74 static void fd_detach_hba(struct se_hba *hba)
76 { 75 {
77 struct fd_host *fd_host = hba->hba_ptr; 76 struct fd_host *fd_host = hba->hba_ptr;
78 77
79 pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic" 78 pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
80 " Target Core\n", hba->hba_id, fd_host->fd_host_id); 79 " Target Core\n", hba->hba_id, fd_host->fd_host_id);
81 80
82 kfree(fd_host); 81 kfree(fd_host);
83 hba->hba_ptr = NULL; 82 hba->hba_ptr = NULL;
84 } 83 }
85 84
86 static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name) 85 static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
87 { 86 {
88 struct fd_dev *fd_dev; 87 struct fd_dev *fd_dev;
89 struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr; 88 struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
90 89
91 fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL); 90 fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
92 if (!fd_dev) { 91 if (!fd_dev) {
93 pr_err("Unable to allocate memory for struct fd_dev\n"); 92 pr_err("Unable to allocate memory for struct fd_dev\n");
94 return NULL; 93 return NULL;
95 } 94 }
96 95
97 fd_dev->fd_host = fd_host; 96 fd_dev->fd_host = fd_host;
98 97
99 pr_debug("FILEIO: Allocated fd_dev for %p\n", name); 98 pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
100 99
101 return fd_dev; 100 return fd_dev;
102 } 101 }
103 102
104 /* fd_create_virtdevice(): (Part of se_subsystem_api_t template) 103 /* fd_create_virtdevice(): (Part of se_subsystem_api_t template)
105 * 104 *
106 * 105 *
107 */ 106 */
108 static struct se_device *fd_create_virtdevice( 107 static struct se_device *fd_create_virtdevice(
109 struct se_hba *hba, 108 struct se_hba *hba,
110 struct se_subsystem_dev *se_dev, 109 struct se_subsystem_dev *se_dev,
111 void *p) 110 void *p)
112 { 111 {
113 char *dev_p = NULL; 112 char *dev_p = NULL;
114 struct se_device *dev; 113 struct se_device *dev;
115 struct se_dev_limits dev_limits; 114 struct se_dev_limits dev_limits;
116 struct queue_limits *limits; 115 struct queue_limits *limits;
117 struct fd_dev *fd_dev = (struct fd_dev *) p; 116 struct fd_dev *fd_dev = (struct fd_dev *) p;
118 struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr; 117 struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
119 mm_segment_t old_fs; 118 mm_segment_t old_fs;
120 struct file *file; 119 struct file *file;
121 struct inode *inode = NULL; 120 struct inode *inode = NULL;
122 int dev_flags = 0, flags, ret = -EINVAL; 121 int dev_flags = 0, flags, ret = -EINVAL;
123 122
124 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 123 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
125 124
126 old_fs = get_fs(); 125 old_fs = get_fs();
127 set_fs(get_ds()); 126 set_fs(get_ds());
128 dev_p = getname(fd_dev->fd_dev_name); 127 dev_p = getname(fd_dev->fd_dev_name);
129 set_fs(old_fs); 128 set_fs(old_fs);
130 129
131 if (IS_ERR(dev_p)) { 130 if (IS_ERR(dev_p)) {
132 pr_err("getname(%s) failed: %lu\n", 131 pr_err("getname(%s) failed: %lu\n",
133 fd_dev->fd_dev_name, IS_ERR(dev_p)); 132 fd_dev->fd_dev_name, IS_ERR(dev_p));
134 ret = PTR_ERR(dev_p); 133 ret = PTR_ERR(dev_p);
135 goto fail; 134 goto fail;
136 } 135 }
137 #if 0 136 #if 0
138 if (di->no_create_file) 137 if (di->no_create_file)
139 flags = O_RDWR | O_LARGEFILE; 138 flags = O_RDWR | O_LARGEFILE;
140 else 139 else
141 flags = O_RDWR | O_CREAT | O_LARGEFILE; 140 flags = O_RDWR | O_CREAT | O_LARGEFILE;
142 #else 141 #else
143 flags = O_RDWR | O_CREAT | O_LARGEFILE; 142 flags = O_RDWR | O_CREAT | O_LARGEFILE;
144 #endif 143 #endif
145 /* flags |= O_DIRECT; */ 144 /* flags |= O_DIRECT; */
146 /* 145 /*
147 * If fd_buffered_io=1 has not been set explicitly (the default), 146 * If fd_buffered_io=1 has not been set explicitly (the default),
148 * use O_SYNC to force FILEIO writes to disk. 147 * use O_SYNC to force FILEIO writes to disk.
149 */ 148 */
150 if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO)) 149 if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
151 flags |= O_SYNC; 150 flags |= O_SYNC;
152 151
153 file = filp_open(dev_p, flags, 0600); 152 file = filp_open(dev_p, flags, 0600);
154 if (IS_ERR(file)) { 153 if (IS_ERR(file)) {
155 pr_err("filp_open(%s) failed\n", dev_p); 154 pr_err("filp_open(%s) failed\n", dev_p);
156 ret = PTR_ERR(file); 155 ret = PTR_ERR(file);
157 goto fail; 156 goto fail;
158 } 157 }
159 if (!file || !file->f_dentry) { 158 if (!file || !file->f_dentry) {
160 pr_err("filp_open(%s) failed\n", dev_p); 159 pr_err("filp_open(%s) failed\n", dev_p);
161 goto fail; 160 goto fail;
162 } 161 }
163 fd_dev->fd_file = file; 162 fd_dev->fd_file = file;
164 /* 163 /*
165 * If using a block backend with this struct file, we extract 164 * If using a block backend with this struct file, we extract
166 * fd_dev->fd_[block,dev]_size from struct block_device. 165 * fd_dev->fd_[block,dev]_size from struct block_device.
167 * 166 *
168 * Otherwise, we use the passed fd_size= from configfs 167 * Otherwise, we use the passed fd_size= from configfs
169 */ 168 */
170 inode = file->f_mapping->host; 169 inode = file->f_mapping->host;
171 if (S_ISBLK(inode->i_mode)) { 170 if (S_ISBLK(inode->i_mode)) {
172 struct request_queue *q; 171 struct request_queue *q;
173 /* 172 /*
174 * Setup the local scope queue_limits from struct request_queue->limits 173 * Setup the local scope queue_limits from struct request_queue->limits
175 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. 174 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
176 */ 175 */
177 q = bdev_get_queue(inode->i_bdev); 176 q = bdev_get_queue(inode->i_bdev);
178 limits = &dev_limits.limits; 177 limits = &dev_limits.limits;
179 limits->logical_block_size = bdev_logical_block_size(inode->i_bdev); 178 limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
180 limits->max_hw_sectors = queue_max_hw_sectors(q); 179 limits->max_hw_sectors = queue_max_hw_sectors(q);
181 limits->max_sectors = queue_max_sectors(q); 180 limits->max_sectors = queue_max_sectors(q);
182 /* 181 /*
183 * Determine the number of bytes from i_size_read() minus 182 * Determine the number of bytes from i_size_read() minus
184 * one (1) logical sector from underlying struct block_device 183 * one (1) logical sector from underlying struct block_device
185 */ 184 */
186 fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev); 185 fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
187 fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) - 186 fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
188 fd_dev->fd_block_size); 187 fd_dev->fd_block_size);
189 188
190 pr_debug("FILEIO: Using size: %llu bytes from struct" 189 pr_debug("FILEIO: Using size: %llu bytes from struct"
191 " block_device blocks: %llu logical_block_size: %d\n", 190 " block_device blocks: %llu logical_block_size: %d\n",
192 fd_dev->fd_dev_size, 191 fd_dev->fd_dev_size,
193 div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size), 192 div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
194 fd_dev->fd_block_size); 193 fd_dev->fd_block_size);
195 } else { 194 } else {
196 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { 195 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
197 pr_err("FILEIO: Missing fd_dev_size=" 196 pr_err("FILEIO: Missing fd_dev_size="
198 " parameter, and no backing struct" 197 " parameter, and no backing struct"
199 " block_device\n"); 198 " block_device\n");
200 goto fail; 199 goto fail;
201 } 200 }
202 201
203 limits = &dev_limits.limits; 202 limits = &dev_limits.limits;
204 limits->logical_block_size = FD_BLOCKSIZE; 203 limits->logical_block_size = FD_BLOCKSIZE;
205 limits->max_hw_sectors = FD_MAX_SECTORS; 204 limits->max_hw_sectors = FD_MAX_SECTORS;
206 limits->max_sectors = FD_MAX_SECTORS; 205 limits->max_sectors = FD_MAX_SECTORS;
207 fd_dev->fd_block_size = FD_BLOCKSIZE; 206 fd_dev->fd_block_size = FD_BLOCKSIZE;
208 } 207 }
209 208
210 dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; 209 dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
211 dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH; 210 dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
212 211
213 dev = transport_add_device_to_core_hba(hba, &fileio_template, 212 dev = transport_add_device_to_core_hba(hba, &fileio_template,
214 se_dev, dev_flags, fd_dev, 213 se_dev, dev_flags, fd_dev,
215 &dev_limits, "FILEIO", FD_VERSION); 214 &dev_limits, "FILEIO", FD_VERSION);
216 if (!dev) 215 if (!dev)
217 goto fail; 216 goto fail;
218 217
219 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; 218 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
220 fd_dev->fd_queue_depth = dev->queue_depth; 219 fd_dev->fd_queue_depth = dev->queue_depth;
221 220
222 pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," 221 pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
223 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, 222 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
224 fd_dev->fd_dev_name, fd_dev->fd_dev_size); 223 fd_dev->fd_dev_name, fd_dev->fd_dev_size);
225 224
226 putname(dev_p); 225 putname(dev_p);
227 return dev; 226 return dev;
228 fail: 227 fail:
229 if (fd_dev->fd_file) { 228 if (fd_dev->fd_file) {
230 filp_close(fd_dev->fd_file, NULL); 229 filp_close(fd_dev->fd_file, NULL);
231 fd_dev->fd_file = NULL; 230 fd_dev->fd_file = NULL;
232 } 231 }
233 putname(dev_p); 232 putname(dev_p);
234 return ERR_PTR(ret); 233 return ERR_PTR(ret);
235 } 234 }
236 235
237 /* fd_free_device(): (Part of se_subsystem_api_t template) 236 /* fd_free_device(): (Part of se_subsystem_api_t template)
238 * 237 *
239 * 238 *
240 */ 239 */
241 static void fd_free_device(void *p) 240 static void fd_free_device(void *p)
242 { 241 {
243 struct fd_dev *fd_dev = (struct fd_dev *) p; 242 struct fd_dev *fd_dev = (struct fd_dev *) p;
244 243
245 if (fd_dev->fd_file) { 244 if (fd_dev->fd_file) {
246 filp_close(fd_dev->fd_file, NULL); 245 filp_close(fd_dev->fd_file, NULL);
247 fd_dev->fd_file = NULL; 246 fd_dev->fd_file = NULL;
248 } 247 }
249 248
250 kfree(fd_dev); 249 kfree(fd_dev);
251 } 250 }
252 251
253 static inline struct fd_request *FILE_REQ(struct se_task *task) 252 static inline struct fd_request *FILE_REQ(struct se_task *task)
254 { 253 {
255 return container_of(task, struct fd_request, fd_task); 254 return container_of(task, struct fd_request, fd_task);
256 } 255 }
257 256
258 257
259 static struct se_task * 258 static struct se_task *
260 fd_alloc_task(unsigned char *cdb) 259 fd_alloc_task(unsigned char *cdb)
261 { 260 {
262 struct fd_request *fd_req; 261 struct fd_request *fd_req;
263 262
264 fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL); 263 fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
265 if (!fd_req) { 264 if (!fd_req) {
266 pr_err("Unable to allocate struct fd_request\n"); 265 pr_err("Unable to allocate struct fd_request\n");
267 return NULL; 266 return NULL;
268 } 267 }
269 268
270 return &fd_req->fd_task; 269 return &fd_req->fd_task;
271 } 270 }
272 271
273 static int fd_do_readv(struct se_task *task) 272 static int fd_do_readv(struct se_task *task)
274 { 273 {
275 struct fd_request *req = FILE_REQ(task); 274 struct fd_request *req = FILE_REQ(task);
276 struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev; 275 struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
277 struct fd_dev *dev = se_dev->dev_ptr; 276 struct fd_dev *dev = se_dev->dev_ptr;
278 struct file *fd = dev->fd_file; 277 struct file *fd = dev->fd_file;
279 struct scatterlist *sg = task->task_sg; 278 struct scatterlist *sg = task->task_sg;
280 struct iovec *iov; 279 struct iovec *iov;
281 mm_segment_t old_fs; 280 mm_segment_t old_fs;
282 loff_t pos = (task->task_lba * 281 loff_t pos = (task->task_lba *
283 se_dev->se_sub_dev->se_dev_attrib.block_size); 282 se_dev->se_sub_dev->se_dev_attrib.block_size);
284 int ret = 0, i; 283 int ret = 0, i;
285 284
286 iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); 285 iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
287 if (!iov) { 286 if (!iov) {
288 pr_err("Unable to allocate fd_do_readv iov[]\n"); 287 pr_err("Unable to allocate fd_do_readv iov[]\n");
289 return -ENOMEM; 288 return -ENOMEM;
290 } 289 }
291 290
292 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { 291 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
293 iov[i].iov_len = sg->length; 292 iov[i].iov_len = sg->length;
294 iov[i].iov_base = sg_virt(sg); 293 iov[i].iov_base = sg_virt(sg);
295 } 294 }
296 295
297 old_fs = get_fs(); 296 old_fs = get_fs();
298 set_fs(get_ds()); 297 set_fs(get_ds());
299 ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos); 298 ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos);
300 set_fs(old_fs); 299 set_fs(old_fs);
301 300
302 kfree(iov); 301 kfree(iov);
303 /* 302 /*
304 * Return zeros and GOOD status even if the READ did not return 303 * Return zeros and GOOD status even if the READ did not return
305 * the expected virt_size for struct file w/o a backing struct 304 * the expected virt_size for struct file w/o a backing struct
306 * block_device. 305 * block_device.
307 */ 306 */
308 if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) { 307 if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
309 if (ret < 0 || ret != task->task_size) { 308 if (ret < 0 || ret != task->task_size) {
310 pr_err("vfs_readv() returned %d," 309 pr_err("vfs_readv() returned %d,"
311 " expecting %d for S_ISBLK\n", ret, 310 " expecting %d for S_ISBLK\n", ret,
312 (int)task->task_size); 311 (int)task->task_size);
313 return (ret < 0 ? ret : -EINVAL); 312 return (ret < 0 ? ret : -EINVAL);
314 } 313 }
315 } else { 314 } else {
316 if (ret < 0) { 315 if (ret < 0) {
317 pr_err("vfs_readv() returned %d for non" 316 pr_err("vfs_readv() returned %d for non"
318 " S_ISBLK\n", ret); 317 " S_ISBLK\n", ret);
319 return ret; 318 return ret;
320 } 319 }
321 } 320 }
322 321
323 return 1; 322 return 1;
324 } 323 }
325 324
326 static int fd_do_writev(struct se_task *task) 325 static int fd_do_writev(struct se_task *task)
327 { 326 {
328 struct fd_request *req = FILE_REQ(task); 327 struct fd_request *req = FILE_REQ(task);
329 struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev; 328 struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
330 struct fd_dev *dev = se_dev->dev_ptr; 329 struct fd_dev *dev = se_dev->dev_ptr;
331 struct file *fd = dev->fd_file; 330 struct file *fd = dev->fd_file;
332 struct scatterlist *sg = task->task_sg; 331 struct scatterlist *sg = task->task_sg;
333 struct iovec *iov; 332 struct iovec *iov;
334 mm_segment_t old_fs; 333 mm_segment_t old_fs;
335 loff_t pos = (task->task_lba * 334 loff_t pos = (task->task_lba *
336 se_dev->se_sub_dev->se_dev_attrib.block_size); 335 se_dev->se_sub_dev->se_dev_attrib.block_size);
337 int ret, i = 0; 336 int ret, i = 0;
338 337
339 iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); 338 iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
340 if (!iov) { 339 if (!iov) {
341 pr_err("Unable to allocate fd_do_writev iov[]\n"); 340 pr_err("Unable to allocate fd_do_writev iov[]\n");
342 return -ENOMEM; 341 return -ENOMEM;
343 } 342 }
344 343
345 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { 344 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
346 iov[i].iov_len = sg->length; 345 iov[i].iov_len = sg->length;
347 iov[i].iov_base = sg_virt(sg); 346 iov[i].iov_base = sg_virt(sg);
348 } 347 }
349 348
350 old_fs = get_fs(); 349 old_fs = get_fs();
351 set_fs(get_ds()); 350 set_fs(get_ds());
352 ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos); 351 ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos);
353 set_fs(old_fs); 352 set_fs(old_fs);
354 353
355 kfree(iov); 354 kfree(iov);
356 355
357 if (ret < 0 || ret != task->task_size) { 356 if (ret < 0 || ret != task->task_size) {
358 pr_err("vfs_writev() returned %d\n", ret); 357 pr_err("vfs_writev() returned %d\n", ret);
359 return (ret < 0 ? ret : -EINVAL); 358 return (ret < 0 ? ret : -EINVAL);
360 } 359 }
361 360
362 return 1; 361 return 1;
363 } 362 }
364 363
365 static void fd_emulate_sync_cache(struct se_task *task) 364 static void fd_emulate_sync_cache(struct se_task *task)
366 { 365 {
367 struct se_cmd *cmd = task->task_se_cmd; 366 struct se_cmd *cmd = task->task_se_cmd;
368 struct se_device *dev = cmd->se_dev; 367 struct se_device *dev = cmd->se_dev;
369 struct fd_dev *fd_dev = dev->dev_ptr; 368 struct fd_dev *fd_dev = dev->dev_ptr;
370 int immed = (cmd->t_task_cdb[1] & 0x2); 369 int immed = (cmd->t_task_cdb[1] & 0x2);
371 loff_t start, end; 370 loff_t start, end;
372 int ret; 371 int ret;
373 372
374 /* 373 /*
375 * If the Immediate bit is set, queue up the GOOD response 374 * If the Immediate bit is set, queue up the GOOD response
376 * for this SYNCHRONIZE_CACHE op 375 * for this SYNCHRONIZE_CACHE op
377 */ 376 */
378 if (immed) 377 if (immed)
379 transport_complete_sync_cache(cmd, 1); 378 transport_complete_sync_cache(cmd, 1);
380 379
381 /* 380 /*
382 * Determine if we will be flushing the entire device. 381 * Determine if we will be flushing the entire device.
383 */ 382 */
384 if (cmd->t_task_lba == 0 && cmd->data_length == 0) { 383 if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
385 start = 0; 384 start = 0;
386 end = LLONG_MAX; 385 end = LLONG_MAX;
387 } else { 386 } else {
388 start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size; 387 start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
389 if (cmd->data_length) 388 if (cmd->data_length)
390 end = start + cmd->data_length; 389 end = start + cmd->data_length;
391 else 390 else
392 end = LLONG_MAX; 391 end = LLONG_MAX;
393 } 392 }
394 393
395 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); 394 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
396 if (ret != 0) 395 if (ret != 0)
397 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); 396 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
398 397
399 if (!immed) 398 if (!immed)
400 transport_complete_sync_cache(cmd, ret == 0); 399 transport_complete_sync_cache(cmd, ret == 0);
401 } 400 }
402 401
403 /* 402 /*
404 * WRITE Force Unit Access (FUA) emulation on a per struct se_task 403 * WRITE Force Unit Access (FUA) emulation on a per struct se_task
405 * LBA range basis.. 404 * LBA range basis..
406 */ 405 */
407 static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task) 406 static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
408 { 407 {
409 struct se_device *dev = cmd->se_dev; 408 struct se_device *dev = cmd->se_dev;
410 struct fd_dev *fd_dev = dev->dev_ptr; 409 struct fd_dev *fd_dev = dev->dev_ptr;
411 loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size; 410 loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
412 loff_t end = start + task->task_size; 411 loff_t end = start + task->task_size;
413 int ret; 412 int ret;
414 413
415 pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", 414 pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
416 task->task_lba, task->task_size); 415 task->task_lba, task->task_size);
417 416
418 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); 417 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
419 if (ret != 0) 418 if (ret != 0)
420 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); 419 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
421 } 420 }
422 421
423 static int fd_do_task(struct se_task *task) 422 static int fd_do_task(struct se_task *task)
424 { 423 {
425 struct se_cmd *cmd = task->task_se_cmd; 424 struct se_cmd *cmd = task->task_se_cmd;
426 struct se_device *dev = cmd->se_dev; 425 struct se_device *dev = cmd->se_dev;
427 int ret = 0; 426 int ret = 0;
428 427
429 /* 428 /*
430 * Call vectorized fileio functions to map struct scatterlist 429 * Call vectorized fileio functions to map struct scatterlist
431 * physical memory addresses to struct iovec virtual memory. 430 * physical memory addresses to struct iovec virtual memory.
432 */ 431 */
433 if (task->task_data_direction == DMA_FROM_DEVICE) { 432 if (task->task_data_direction == DMA_FROM_DEVICE) {
434 ret = fd_do_readv(task); 433 ret = fd_do_readv(task);
435 } else { 434 } else {
436 ret = fd_do_writev(task); 435 ret = fd_do_writev(task);
437 436
438 if (ret > 0 && 437 if (ret > 0 &&
439 dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && 438 dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
440 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 439 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
441 (cmd->se_cmd_flags & SCF_FUA)) { 440 (cmd->se_cmd_flags & SCF_FUA)) {
442 /* 441 /*
443 * We might need to be a bit smarter here 442 * We might need to be a bit smarter here
444 * and return some sense data to let the initiator 443 * and return some sense data to let the initiator
445 * know the FUA WRITE cache sync failed..? 444 * know the FUA WRITE cache sync failed..?
446 */ 445 */
447 fd_emulate_write_fua(cmd, task); 446 fd_emulate_write_fua(cmd, task);
448 } 447 }
449 448
450 } 449 }
451 450
452 if (ret < 0) { 451 if (ret < 0) {
453 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 452 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
454 return ret; 453 return ret;
455 } 454 }
456 if (ret) { 455 if (ret) {
457 task->task_scsi_status = GOOD; 456 task->task_scsi_status = GOOD;
458 transport_complete_task(task, 1); 457 transport_complete_task(task, 1);
459 } 458 }
460 return 0; 459 return 0;
461 } 460 }
462 461
463 /* fd_free_task(): (Part of se_subsystem_api_t template) 462 /* fd_free_task(): (Part of se_subsystem_api_t template)
464 * 463 *
465 * 464 *
466 */ 465 */
467 static void fd_free_task(struct se_task *task) 466 static void fd_free_task(struct se_task *task)
468 { 467 {
469 struct fd_request *req = FILE_REQ(task); 468 struct fd_request *req = FILE_REQ(task);
470 469
471 kfree(req); 470 kfree(req);
472 } 471 }
473 472
474 enum { 473 enum {
475 Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err 474 Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
476 }; 475 };
477 476
478 static match_table_t tokens = { 477 static match_table_t tokens = {
479 {Opt_fd_dev_name, "fd_dev_name=%s"}, 478 {Opt_fd_dev_name, "fd_dev_name=%s"},
480 {Opt_fd_dev_size, "fd_dev_size=%s"}, 479 {Opt_fd_dev_size, "fd_dev_size=%s"},
481 {Opt_fd_buffered_io, "fd_buffered_io=%d"}, 480 {Opt_fd_buffered_io, "fd_buffered_io=%d"},
482 {Opt_err, NULL} 481 {Opt_err, NULL}
483 }; 482 };
484 483
485 static ssize_t fd_set_configfs_dev_params( 484 static ssize_t fd_set_configfs_dev_params(
486 struct se_hba *hba, 485 struct se_hba *hba,
487 struct se_subsystem_dev *se_dev, 486 struct se_subsystem_dev *se_dev,
488 const char *page, ssize_t count) 487 const char *page, ssize_t count)
489 { 488 {
490 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; 489 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
491 char *orig, *ptr, *arg_p, *opts; 490 char *orig, *ptr, *arg_p, *opts;
492 substring_t args[MAX_OPT_ARGS]; 491 substring_t args[MAX_OPT_ARGS];
493 int ret = 0, arg, token; 492 int ret = 0, arg, token;
494 493
495 opts = kstrdup(page, GFP_KERNEL); 494 opts = kstrdup(page, GFP_KERNEL);
496 if (!opts) 495 if (!opts)
497 return -ENOMEM; 496 return -ENOMEM;
498 497
499 orig = opts; 498 orig = opts;
500 499
501 while ((ptr = strsep(&opts, ",")) != NULL) { 500 while ((ptr = strsep(&opts, ",")) != NULL) {
502 if (!*ptr) 501 if (!*ptr)
503 continue; 502 continue;
504 503
505 token = match_token(ptr, tokens, args); 504 token = match_token(ptr, tokens, args);
506 switch (token) { 505 switch (token) {
507 case Opt_fd_dev_name: 506 case Opt_fd_dev_name:
508 arg_p = match_strdup(&args[0]); 507 arg_p = match_strdup(&args[0]);
509 if (!arg_p) { 508 if (!arg_p) {
510 ret = -ENOMEM; 509 ret = -ENOMEM;
511 break; 510 break;
512 } 511 }
513 snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME, 512 snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
514 "%s", arg_p); 513 "%s", arg_p);
515 kfree(arg_p); 514 kfree(arg_p);
516 pr_debug("FILEIO: Referencing Path: %s\n", 515 pr_debug("FILEIO: Referencing Path: %s\n",
517 fd_dev->fd_dev_name); 516 fd_dev->fd_dev_name);
518 fd_dev->fbd_flags |= FBDF_HAS_PATH; 517 fd_dev->fbd_flags |= FBDF_HAS_PATH;
519 break; 518 break;
520 case Opt_fd_dev_size: 519 case Opt_fd_dev_size:
521 arg_p = match_strdup(&args[0]); 520 arg_p = match_strdup(&args[0]);
522 if (!arg_p) { 521 if (!arg_p) {
523 ret = -ENOMEM; 522 ret = -ENOMEM;
524 break; 523 break;
525 } 524 }
526 ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); 525 ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
527 kfree(arg_p); 526 kfree(arg_p);
528 if (ret < 0) { 527 if (ret < 0) {
529 pr_err("strict_strtoull() failed for" 528 pr_err("strict_strtoull() failed for"
530 " fd_dev_size=\n"); 529 " fd_dev_size=\n");
531 goto out; 530 goto out;
532 } 531 }
533 pr_debug("FILEIO: Referencing Size: %llu" 532 pr_debug("FILEIO: Referencing Size: %llu"
534 " bytes\n", fd_dev->fd_dev_size); 533 " bytes\n", fd_dev->fd_dev_size);
535 fd_dev->fbd_flags |= FBDF_HAS_SIZE; 534 fd_dev->fbd_flags |= FBDF_HAS_SIZE;
536 break; 535 break;
537 case Opt_fd_buffered_io: 536 case Opt_fd_buffered_io:
538 match_int(args, &arg); 537 match_int(args, &arg);
539 if (arg != 1) { 538 if (arg != 1) {
540 pr_err("bogus fd_buffered_io=%d value\n", arg); 539 pr_err("bogus fd_buffered_io=%d value\n", arg);
541 ret = -EINVAL; 540 ret = -EINVAL;
542 goto out; 541 goto out;
543 } 542 }
544 543
545 pr_debug("FILEIO: Using buffered I/O" 544 pr_debug("FILEIO: Using buffered I/O"
546 " operations for struct fd_dev\n"); 545 " operations for struct fd_dev\n");
547 546
548 fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO; 547 fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
549 break; 548 break;
550 default: 549 default:
551 break; 550 break;
552 } 551 }
553 } 552 }
554 553
555 out: 554 out:
556 kfree(orig); 555 kfree(orig);
557 return (!ret) ? count : ret; 556 return (!ret) ? count : ret;
558 } 557 }
559 558
560 static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) 559 static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
561 { 560 {
562 struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr; 561 struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
563 562
564 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { 563 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
565 pr_err("Missing fd_dev_name=\n"); 564 pr_err("Missing fd_dev_name=\n");
566 return -EINVAL; 565 return -EINVAL;
567 } 566 }
568 567
569 return 0; 568 return 0;
570 } 569 }
571 570
572 static ssize_t fd_show_configfs_dev_params( 571 static ssize_t fd_show_configfs_dev_params(
573 struct se_hba *hba, 572 struct se_hba *hba,
574 struct se_subsystem_dev *se_dev, 573 struct se_subsystem_dev *se_dev,
575 char *b) 574 char *b)
576 { 575 {
577 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; 576 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
578 ssize_t bl = 0; 577 ssize_t bl = 0;
579 578
580 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); 579 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
581 bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n", 580 bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
582 fd_dev->fd_dev_name, fd_dev->fd_dev_size, 581 fd_dev->fd_dev_name, fd_dev->fd_dev_size,
583 (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ? 582 (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
584 "Buffered" : "Synchronous"); 583 "Buffered" : "Synchronous");
585 return bl; 584 return bl;
586 } 585 }
587 586
588 /* fd_get_device_rev(): (Part of se_subsystem_api_t template) 587 /* fd_get_device_rev(): (Part of se_subsystem_api_t template)
589 * 588 *
590 * 589 *
591 */ 590 */
592 static u32 fd_get_device_rev(struct se_device *dev) 591 static u32 fd_get_device_rev(struct se_device *dev)
593 { 592 {
594 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ 593 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
595 } 594 }
596 595
597 /* fd_get_device_type(): (Part of se_subsystem_api_t template) 596 /* fd_get_device_type(): (Part of se_subsystem_api_t template)
598 * 597 *
599 * 598 *
600 */ 599 */
601 static u32 fd_get_device_type(struct se_device *dev) 600 static u32 fd_get_device_type(struct se_device *dev)
602 { 601 {
603 return TYPE_DISK; 602 return TYPE_DISK;
604 } 603 }
605 604
606 static sector_t fd_get_blocks(struct se_device *dev) 605 static sector_t fd_get_blocks(struct se_device *dev)
607 { 606 {
608 struct fd_dev *fd_dev = dev->dev_ptr; 607 struct fd_dev *fd_dev = dev->dev_ptr;
609 unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size, 608 unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
610 dev->se_sub_dev->se_dev_attrib.block_size); 609 dev->se_sub_dev->se_dev_attrib.block_size);
611 610
612 return blocks_long; 611 return blocks_long;
613 } 612 }
614 613
615 static struct se_subsystem_api fileio_template = { 614 static struct se_subsystem_api fileio_template = {
616 .name = "fileio", 615 .name = "fileio",
617 .owner = THIS_MODULE, 616 .owner = THIS_MODULE,
618 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 617 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
619 .write_cache_emulated = 1, 618 .write_cache_emulated = 1,
620 .fua_write_emulated = 1, 619 .fua_write_emulated = 1,
621 .attach_hba = fd_attach_hba, 620 .attach_hba = fd_attach_hba,
622 .detach_hba = fd_detach_hba, 621 .detach_hba = fd_detach_hba,
623 .allocate_virtdevice = fd_allocate_virtdevice, 622 .allocate_virtdevice = fd_allocate_virtdevice,
624 .create_virtdevice = fd_create_virtdevice, 623 .create_virtdevice = fd_create_virtdevice,
625 .free_device = fd_free_device, 624 .free_device = fd_free_device,
626 .alloc_task = fd_alloc_task, 625 .alloc_task = fd_alloc_task,
627 .do_task = fd_do_task, 626 .do_task = fd_do_task,
628 .do_sync_cache = fd_emulate_sync_cache, 627 .do_sync_cache = fd_emulate_sync_cache,
629 .free_task = fd_free_task, 628 .free_task = fd_free_task,
630 .check_configfs_dev_params = fd_check_configfs_dev_params, 629 .check_configfs_dev_params = fd_check_configfs_dev_params,
631 .set_configfs_dev_params = fd_set_configfs_dev_params, 630 .set_configfs_dev_params = fd_set_configfs_dev_params,
632 .show_configfs_dev_params = fd_show_configfs_dev_params, 631 .show_configfs_dev_params = fd_show_configfs_dev_params,
633 .get_device_rev = fd_get_device_rev, 632 .get_device_rev = fd_get_device_rev,
634 .get_device_type = fd_get_device_type, 633 .get_device_type = fd_get_device_type,
635 .get_blocks = fd_get_blocks, 634 .get_blocks = fd_get_blocks,
636 }; 635 };
637 636
638 static int __init fileio_module_init(void) 637 static int __init fileio_module_init(void)
639 { 638 {
640 return transport_subsystem_register(&fileio_template); 639 return transport_subsystem_register(&fileio_template);
641 } 640 }
642 641
643 static void fileio_module_exit(void) 642 static void fileio_module_exit(void)
644 { 643 {
645 transport_subsystem_release(&fileio_template); 644 transport_subsystem_release(&fileio_template);
646 } 645 }
647 646
648 MODULE_DESCRIPTION("TCM FILEIO subsystem plugin"); 647 MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
649 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 648 MODULE_AUTHOR("nab@Linux-iSCSI.org");
650 MODULE_LICENSE("GPL"); 649 MODULE_LICENSE("GPL");
651 650
652 module_init(fileio_module_init); 651 module_init(fileio_module_init);
653 module_exit(fileio_module_exit); 652 module_exit(fileio_module_exit);
654 653
drivers/target/target_core_hba.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * Filename: target_core_hba.c 2 * Filename: target_core_hba.c
3 * 3 *
4 * This file contains the TCM HBA Transport related functions. 4 * This file contains the TCM HBA Transport related functions.
5 * 5 *
6 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. 6 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc. 7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems 8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org 9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 * 10 *
11 * Nicholas A. Bellinger <nab@kernel.org> 11 * Nicholas A. Bellinger <nab@kernel.org>
12 * 12 *
13 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by 14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or 15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version. 16 * (at your option) any later version.
17 * 17 *
18 * This program is distributed in the hope that it will be useful, 18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details. 21 * GNU General Public License for more details.
22 * 22 *
23 * You should have received a copy of the GNU General Public License 23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software 24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 * 26 *
27 ******************************************************************************/ 27 ******************************************************************************/
28 28
29 #include <linux/net.h> 29 #include <linux/net.h>
30 #include <linux/string.h> 30 #include <linux/string.h>
31 #include <linux/timer.h> 31 #include <linux/timer.h>
32 #include <linux/slab.h> 32 #include <linux/slab.h>
33 #include <linux/spinlock.h> 33 #include <linux/spinlock.h>
34 #include <linux/in.h> 34 #include <linux/in.h>
35 #include <linux/module.h> 35 #include <linux/module.h>
36 #include <net/sock.h> 36 #include <net/sock.h>
37 #include <net/tcp.h> 37 #include <net/tcp.h>
38 38
39 #include <target/target_core_base.h> 39 #include <target/target_core_base.h>
40 #include <target/target_core_device.h> 40 #include <target/target_core_backend.h>
41 #include <target/target_core_tpg.h> 41 #include <target/target_core_fabric.h>
42 #include <target/target_core_transport.h>
43 42
44 #include "target_core_internal.h" 43 #include "target_core_internal.h"
45 44
46 static LIST_HEAD(subsystem_list); 45 static LIST_HEAD(subsystem_list);
47 static DEFINE_MUTEX(subsystem_mutex); 46 static DEFINE_MUTEX(subsystem_mutex);
48 47
49 static u32 hba_id_counter; 48 static u32 hba_id_counter;
50 49
51 static DEFINE_SPINLOCK(hba_lock); 50 static DEFINE_SPINLOCK(hba_lock);
52 static LIST_HEAD(hba_list); 51 static LIST_HEAD(hba_list);
53 52
54 int transport_subsystem_register(struct se_subsystem_api *sub_api) 53 int transport_subsystem_register(struct se_subsystem_api *sub_api)
55 { 54 {
56 struct se_subsystem_api *s; 55 struct se_subsystem_api *s;
57 56
58 INIT_LIST_HEAD(&sub_api->sub_api_list); 57 INIT_LIST_HEAD(&sub_api->sub_api_list);
59 58
60 mutex_lock(&subsystem_mutex); 59 mutex_lock(&subsystem_mutex);
61 list_for_each_entry(s, &subsystem_list, sub_api_list) { 60 list_for_each_entry(s, &subsystem_list, sub_api_list) {
62 if (!strcmp(s->name, sub_api->name)) { 61 if (!strcmp(s->name, sub_api->name)) {
63 pr_err("%p is already registered with" 62 pr_err("%p is already registered with"
64 " duplicate name %s, unable to process" 63 " duplicate name %s, unable to process"
65 " request\n", s, s->name); 64 " request\n", s, s->name);
66 mutex_unlock(&subsystem_mutex); 65 mutex_unlock(&subsystem_mutex);
67 return -EEXIST; 66 return -EEXIST;
68 } 67 }
69 } 68 }
70 list_add_tail(&sub_api->sub_api_list, &subsystem_list); 69 list_add_tail(&sub_api->sub_api_list, &subsystem_list);
71 mutex_unlock(&subsystem_mutex); 70 mutex_unlock(&subsystem_mutex);
72 71
73 pr_debug("TCM: Registered subsystem plugin: %s struct module:" 72 pr_debug("TCM: Registered subsystem plugin: %s struct module:"
74 " %p\n", sub_api->name, sub_api->owner); 73 " %p\n", sub_api->name, sub_api->owner);
75 return 0; 74 return 0;
76 } 75 }
77 EXPORT_SYMBOL(transport_subsystem_register); 76 EXPORT_SYMBOL(transport_subsystem_register);
78 77
79 void transport_subsystem_release(struct se_subsystem_api *sub_api) 78 void transport_subsystem_release(struct se_subsystem_api *sub_api)
80 { 79 {
81 mutex_lock(&subsystem_mutex); 80 mutex_lock(&subsystem_mutex);
82 list_del(&sub_api->sub_api_list); 81 list_del(&sub_api->sub_api_list);
83 mutex_unlock(&subsystem_mutex); 82 mutex_unlock(&subsystem_mutex);
84 } 83 }
85 EXPORT_SYMBOL(transport_subsystem_release); 84 EXPORT_SYMBOL(transport_subsystem_release);
86 85
87 static struct se_subsystem_api *core_get_backend(const char *sub_name) 86 static struct se_subsystem_api *core_get_backend(const char *sub_name)
88 { 87 {
89 struct se_subsystem_api *s; 88 struct se_subsystem_api *s;
90 89
91 mutex_lock(&subsystem_mutex); 90 mutex_lock(&subsystem_mutex);
92 list_for_each_entry(s, &subsystem_list, sub_api_list) { 91 list_for_each_entry(s, &subsystem_list, sub_api_list) {
93 if (!strcmp(s->name, sub_name)) 92 if (!strcmp(s->name, sub_name))
94 goto found; 93 goto found;
95 } 94 }
96 mutex_unlock(&subsystem_mutex); 95 mutex_unlock(&subsystem_mutex);
97 return NULL; 96 return NULL;
98 found: 97 found:
99 if (s->owner && !try_module_get(s->owner)) 98 if (s->owner && !try_module_get(s->owner))
100 s = NULL; 99 s = NULL;
101 mutex_unlock(&subsystem_mutex); 100 mutex_unlock(&subsystem_mutex);
102 return s; 101 return s;
103 } 102 }
104 103
105 struct se_hba * 104 struct se_hba *
106 core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) 105 core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
107 { 106 {
108 struct se_hba *hba; 107 struct se_hba *hba;
109 int ret = 0; 108 int ret = 0;
110 109
111 hba = kzalloc(sizeof(*hba), GFP_KERNEL); 110 hba = kzalloc(sizeof(*hba), GFP_KERNEL);
112 if (!hba) { 111 if (!hba) {
113 pr_err("Unable to allocate struct se_hba\n"); 112 pr_err("Unable to allocate struct se_hba\n");
114 return ERR_PTR(-ENOMEM); 113 return ERR_PTR(-ENOMEM);
115 } 114 }
116 115
117 INIT_LIST_HEAD(&hba->hba_dev_list); 116 INIT_LIST_HEAD(&hba->hba_dev_list);
118 spin_lock_init(&hba->device_lock); 117 spin_lock_init(&hba->device_lock);
119 mutex_init(&hba->hba_access_mutex); 118 mutex_init(&hba->hba_access_mutex);
120 119
121 hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX); 120 hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
122 hba->hba_flags |= hba_flags; 121 hba->hba_flags |= hba_flags;
123 122
124 hba->transport = core_get_backend(plugin_name); 123 hba->transport = core_get_backend(plugin_name);
125 if (!hba->transport) { 124 if (!hba->transport) {
126 ret = -EINVAL; 125 ret = -EINVAL;
127 goto out_free_hba; 126 goto out_free_hba;
128 } 127 }
129 128
130 ret = hba->transport->attach_hba(hba, plugin_dep_id); 129 ret = hba->transport->attach_hba(hba, plugin_dep_id);
131 if (ret < 0) 130 if (ret < 0)
132 goto out_module_put; 131 goto out_module_put;
133 132
134 spin_lock(&hba_lock); 133 spin_lock(&hba_lock);
135 hba->hba_id = hba_id_counter++; 134 hba->hba_id = hba_id_counter++;
136 list_add_tail(&hba->hba_node, &hba_list); 135 list_add_tail(&hba->hba_node, &hba_list);
137 spin_unlock(&hba_lock); 136 spin_unlock(&hba_lock);
138 137
139 pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target" 138 pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target"
140 " Core\n", hba->hba_id); 139 " Core\n", hba->hba_id);
141 140
142 return hba; 141 return hba;
143 142
144 out_module_put: 143 out_module_put:
145 if (hba->transport->owner) 144 if (hba->transport->owner)
146 module_put(hba->transport->owner); 145 module_put(hba->transport->owner);
147 hba->transport = NULL; 146 hba->transport = NULL;
148 out_free_hba: 147 out_free_hba:
149 kfree(hba); 148 kfree(hba);
150 return ERR_PTR(ret); 149 return ERR_PTR(ret);
151 } 150 }
152 151
153 int 152 int
154 core_delete_hba(struct se_hba *hba) 153 core_delete_hba(struct se_hba *hba)
155 { 154 {
156 if (!list_empty(&hba->hba_dev_list)) 155 if (!list_empty(&hba->hba_dev_list))
157 dump_stack(); 156 dump_stack();
158 157
159 hba->transport->detach_hba(hba); 158 hba->transport->detach_hba(hba);
160 159
161 spin_lock(&hba_lock); 160 spin_lock(&hba_lock);
162 list_del(&hba->hba_node); 161 list_del(&hba->hba_node);
163 spin_unlock(&hba_lock); 162 spin_unlock(&hba_lock);
164 163
165 pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target" 164 pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
166 " Core\n", hba->hba_id); 165 " Core\n", hba->hba_id);
167 166
168 if (hba->transport->owner) 167 if (hba->transport->owner)
169 module_put(hba->transport->owner); 168 module_put(hba->transport->owner);
170 169
171 hba->transport = NULL; 170 hba->transport = NULL;
172 kfree(hba); 171 kfree(hba);
173 return 0; 172 return 0;
174 } 173 }
175 174
drivers/target/target_core_iblock.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * Filename: target_core_iblock.c 2 * Filename: target_core_iblock.c
3 * 3 *
4 * This file contains the Storage Engine <-> Linux BlockIO transport 4 * This file contains the Storage Engine <-> Linux BlockIO transport
5 * specific functions. 5 * specific functions.
6 * 6 *
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. 7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005, 2006, 2007 SBE, Inc. 8 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
9 * Copyright (c) 2007-2010 Rising Tide Systems 9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org 10 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 * 11 *
12 * Nicholas A. Bellinger <nab@kernel.org> 12 * Nicholas A. Bellinger <nab@kernel.org>
13 * 13 *
14 * This program is free software; you can redistribute it and/or modify 14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by 15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or 16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version. 17 * (at your option) any later version.
18 * 18 *
19 * This program is distributed in the hope that it will be useful, 19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details. 22 * GNU General Public License for more details.
23 * 23 *
24 * You should have received a copy of the GNU General Public License 24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software 25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 * 27 *
28 ******************************************************************************/ 28 ******************************************************************************/
29 29
30 #include <linux/string.h> 30 #include <linux/string.h>
31 #include <linux/parser.h> 31 #include <linux/parser.h>
32 #include <linux/timer.h> 32 #include <linux/timer.h>
33 #include <linux/fs.h> 33 #include <linux/fs.h>
34 #include <linux/blkdev.h> 34 #include <linux/blkdev.h>
35 #include <linux/slab.h> 35 #include <linux/slab.h>
36 #include <linux/spinlock.h> 36 #include <linux/spinlock.h>
37 #include <linux/bio.h> 37 #include <linux/bio.h>
38 #include <linux/genhd.h> 38 #include <linux/genhd.h>
39 #include <linux/file.h> 39 #include <linux/file.h>
40 #include <linux/module.h> 40 #include <linux/module.h>
41 #include <scsi/scsi.h> 41 #include <scsi/scsi.h>
42 #include <scsi/scsi_host.h> 42 #include <scsi/scsi_host.h>
43 43
44 #include <target/target_core_base.h> 44 #include <target/target_core_base.h>
45 #include <target/target_core_device.h> 45 #include <target/target_core_backend.h>
46 #include <target/target_core_transport.h>
47 46
48 #include "target_core_iblock.h" 47 #include "target_core_iblock.h"
49 48
50 static struct se_subsystem_api iblock_template; 49 static struct se_subsystem_api iblock_template;
51 50
52 static void iblock_bio_done(struct bio *, int); 51 static void iblock_bio_done(struct bio *, int);
53 52
54 /* iblock_attach_hba(): (Part of se_subsystem_api_t template) 53 /* iblock_attach_hba(): (Part of se_subsystem_api_t template)
55 * 54 *
56 * 55 *
57 */ 56 */
58 static int iblock_attach_hba(struct se_hba *hba, u32 host_id) 57 static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
59 { 58 {
60 struct iblock_hba *ib_host; 59 struct iblock_hba *ib_host;
61 60
62 ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL); 61 ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL);
63 if (!ib_host) { 62 if (!ib_host) {
64 pr_err("Unable to allocate memory for" 63 pr_err("Unable to allocate memory for"
65 " struct iblock_hba\n"); 64 " struct iblock_hba\n");
66 return -ENOMEM; 65 return -ENOMEM;
67 } 66 }
68 67
69 ib_host->iblock_host_id = host_id; 68 ib_host->iblock_host_id = host_id;
70 69
71 hba->hba_ptr = ib_host; 70 hba->hba_ptr = ib_host;
72 71
73 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" 72 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
74 " Generic Target Core Stack %s\n", hba->hba_id, 73 " Generic Target Core Stack %s\n", hba->hba_id,
75 IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); 74 IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
76 75
77 pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n", 76 pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n",
78 hba->hba_id, ib_host->iblock_host_id); 77 hba->hba_id, ib_host->iblock_host_id);
79 78
80 return 0; 79 return 0;
81 } 80 }
82 81
83 static void iblock_detach_hba(struct se_hba *hba) 82 static void iblock_detach_hba(struct se_hba *hba)
84 { 83 {
85 struct iblock_hba *ib_host = hba->hba_ptr; 84 struct iblock_hba *ib_host = hba->hba_ptr;
86 85
87 pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic" 86 pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
88 " Target Core\n", hba->hba_id, ib_host->iblock_host_id); 87 " Target Core\n", hba->hba_id, ib_host->iblock_host_id);
89 88
90 kfree(ib_host); 89 kfree(ib_host);
91 hba->hba_ptr = NULL; 90 hba->hba_ptr = NULL;
92 } 91 }
93 92
94 static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name) 93 static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
95 { 94 {
96 struct iblock_dev *ib_dev = NULL; 95 struct iblock_dev *ib_dev = NULL;
97 struct iblock_hba *ib_host = hba->hba_ptr; 96 struct iblock_hba *ib_host = hba->hba_ptr;
98 97
99 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); 98 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
100 if (!ib_dev) { 99 if (!ib_dev) {
101 pr_err("Unable to allocate struct iblock_dev\n"); 100 pr_err("Unable to allocate struct iblock_dev\n");
102 return NULL; 101 return NULL;
103 } 102 }
104 ib_dev->ibd_host = ib_host; 103 ib_dev->ibd_host = ib_host;
105 104
106 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); 105 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
107 106
108 return ib_dev; 107 return ib_dev;
109 } 108 }
110 109
111 static struct se_device *iblock_create_virtdevice( 110 static struct se_device *iblock_create_virtdevice(
112 struct se_hba *hba, 111 struct se_hba *hba,
113 struct se_subsystem_dev *se_dev, 112 struct se_subsystem_dev *se_dev,
114 void *p) 113 void *p)
115 { 114 {
116 struct iblock_dev *ib_dev = p; 115 struct iblock_dev *ib_dev = p;
117 struct se_device *dev; 116 struct se_device *dev;
118 struct se_dev_limits dev_limits; 117 struct se_dev_limits dev_limits;
119 struct block_device *bd = NULL; 118 struct block_device *bd = NULL;
120 struct request_queue *q; 119 struct request_queue *q;
121 struct queue_limits *limits; 120 struct queue_limits *limits;
122 u32 dev_flags = 0; 121 u32 dev_flags = 0;
123 int ret = -EINVAL; 122 int ret = -EINVAL;
124 123
125 if (!ib_dev) { 124 if (!ib_dev) {
126 pr_err("Unable to locate struct iblock_dev parameter\n"); 125 pr_err("Unable to locate struct iblock_dev parameter\n");
127 return ERR_PTR(ret); 126 return ERR_PTR(ret);
128 } 127 }
129 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 128 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
130 /* 129 /*
131 * These settings need to be made tunable.. 130 * These settings need to be made tunable..
132 */ 131 */
133 ib_dev->ibd_bio_set = bioset_create(32, 64); 132 ib_dev->ibd_bio_set = bioset_create(32, 64);
134 if (!ib_dev->ibd_bio_set) { 133 if (!ib_dev->ibd_bio_set) {
135 pr_err("IBLOCK: Unable to create bioset()\n"); 134 pr_err("IBLOCK: Unable to create bioset()\n");
136 return ERR_PTR(-ENOMEM); 135 return ERR_PTR(-ENOMEM);
137 } 136 }
138 pr_debug("IBLOCK: Created bio_set()\n"); 137 pr_debug("IBLOCK: Created bio_set()\n");
139 /* 138 /*
140 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path 139 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
141 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run. 140 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
142 */ 141 */
143 pr_debug( "IBLOCK: Claiming struct block_device: %s\n", 142 pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
144 ib_dev->ibd_udev_path); 143 ib_dev->ibd_udev_path);
145 144
146 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, 145 bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
147 FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); 146 FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
148 if (IS_ERR(bd)) { 147 if (IS_ERR(bd)) {
149 ret = PTR_ERR(bd); 148 ret = PTR_ERR(bd);
150 goto failed; 149 goto failed;
151 } 150 }
152 /* 151 /*
153 * Setup the local scope queue_limits from struct request_queue->limits 152 * Setup the local scope queue_limits from struct request_queue->limits
154 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. 153 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
155 */ 154 */
156 q = bdev_get_queue(bd); 155 q = bdev_get_queue(bd);
157 limits = &dev_limits.limits; 156 limits = &dev_limits.limits;
158 limits->logical_block_size = bdev_logical_block_size(bd); 157 limits->logical_block_size = bdev_logical_block_size(bd);
159 limits->max_hw_sectors = queue_max_hw_sectors(q); 158 limits->max_hw_sectors = queue_max_hw_sectors(q);
160 limits->max_sectors = queue_max_sectors(q); 159 limits->max_sectors = queue_max_sectors(q);
161 dev_limits.hw_queue_depth = q->nr_requests; 160 dev_limits.hw_queue_depth = q->nr_requests;
162 dev_limits.queue_depth = q->nr_requests; 161 dev_limits.queue_depth = q->nr_requests;
163 162
164 ib_dev->ibd_bd = bd; 163 ib_dev->ibd_bd = bd;
165 164
166 dev = transport_add_device_to_core_hba(hba, 165 dev = transport_add_device_to_core_hba(hba,
167 &iblock_template, se_dev, dev_flags, ib_dev, 166 &iblock_template, se_dev, dev_flags, ib_dev,
168 &dev_limits, "IBLOCK", IBLOCK_VERSION); 167 &dev_limits, "IBLOCK", IBLOCK_VERSION);
169 if (!dev) 168 if (!dev)
170 goto failed; 169 goto failed;
171 170
172 /* 171 /*
173 * Check if the underlying struct block_device request_queue supports 172 * Check if the underlying struct block_device request_queue supports
174 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM 173 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
175 * in ATA and we need to set TPE=1 174 * in ATA and we need to set TPE=1
176 */ 175 */
177 if (blk_queue_discard(q)) { 176 if (blk_queue_discard(q)) {
178 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = 177 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count =
179 q->limits.max_discard_sectors; 178 q->limits.max_discard_sectors;
180 /* 179 /*
181 * Currently hardcoded to 1 in Linux/SCSI code.. 180 * Currently hardcoded to 1 in Linux/SCSI code..
182 */ 181 */
183 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1; 182 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
184 dev->se_sub_dev->se_dev_attrib.unmap_granularity = 183 dev->se_sub_dev->se_dev_attrib.unmap_granularity =
185 q->limits.discard_granularity; 184 q->limits.discard_granularity;
186 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = 185 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
187 q->limits.discard_alignment; 186 q->limits.discard_alignment;
188 187
189 pr_debug("IBLOCK: BLOCK Discard support available," 188 pr_debug("IBLOCK: BLOCK Discard support available,"
190 " disabled by default\n"); 189 " disabled by default\n");
191 } 190 }
192 191
193 if (blk_queue_nonrot(q)) 192 if (blk_queue_nonrot(q))
194 dev->se_sub_dev->se_dev_attrib.is_nonrot = 1; 193 dev->se_sub_dev->se_dev_attrib.is_nonrot = 1;
195 194
196 return dev; 195 return dev;
197 196
198 failed: 197 failed:
199 if (ib_dev->ibd_bio_set) { 198 if (ib_dev->ibd_bio_set) {
200 bioset_free(ib_dev->ibd_bio_set); 199 bioset_free(ib_dev->ibd_bio_set);
201 ib_dev->ibd_bio_set = NULL; 200 ib_dev->ibd_bio_set = NULL;
202 } 201 }
203 ib_dev->ibd_bd = NULL; 202 ib_dev->ibd_bd = NULL;
204 return ERR_PTR(ret); 203 return ERR_PTR(ret);
205 } 204 }
206 205
207 static void iblock_free_device(void *p) 206 static void iblock_free_device(void *p)
208 { 207 {
209 struct iblock_dev *ib_dev = p; 208 struct iblock_dev *ib_dev = p;
210 209
211 if (ib_dev->ibd_bd != NULL) 210 if (ib_dev->ibd_bd != NULL)
212 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 211 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
213 if (ib_dev->ibd_bio_set != NULL) 212 if (ib_dev->ibd_bio_set != NULL)
214 bioset_free(ib_dev->ibd_bio_set); 213 bioset_free(ib_dev->ibd_bio_set);
215 kfree(ib_dev); 214 kfree(ib_dev);
216 } 215 }
217 216
218 static inline struct iblock_req *IBLOCK_REQ(struct se_task *task) 217 static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
219 { 218 {
220 return container_of(task, struct iblock_req, ib_task); 219 return container_of(task, struct iblock_req, ib_task);
221 } 220 }
222 221
223 static struct se_task * 222 static struct se_task *
224 iblock_alloc_task(unsigned char *cdb) 223 iblock_alloc_task(unsigned char *cdb)
225 { 224 {
226 struct iblock_req *ib_req; 225 struct iblock_req *ib_req;
227 226
228 ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); 227 ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
229 if (!ib_req) { 228 if (!ib_req) {
230 pr_err("Unable to allocate memory for struct iblock_req\n"); 229 pr_err("Unable to allocate memory for struct iblock_req\n");
231 return NULL; 230 return NULL;
232 } 231 }
233 232
234 atomic_set(&ib_req->ib_bio_cnt, 0); 233 atomic_set(&ib_req->ib_bio_cnt, 0);
235 return &ib_req->ib_task; 234 return &ib_req->ib_task;
236 } 235 }
237 236
238 static unsigned long long iblock_emulate_read_cap_with_block_size( 237 static unsigned long long iblock_emulate_read_cap_with_block_size(
239 struct se_device *dev, 238 struct se_device *dev,
240 struct block_device *bd, 239 struct block_device *bd,
241 struct request_queue *q) 240 struct request_queue *q)
242 { 241 {
243 unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode), 242 unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
244 bdev_logical_block_size(bd)) - 1); 243 bdev_logical_block_size(bd)) - 1);
245 u32 block_size = bdev_logical_block_size(bd); 244 u32 block_size = bdev_logical_block_size(bd);
246 245
247 if (block_size == dev->se_sub_dev->se_dev_attrib.block_size) 246 if (block_size == dev->se_sub_dev->se_dev_attrib.block_size)
248 return blocks_long; 247 return blocks_long;
249 248
250 switch (block_size) { 249 switch (block_size) {
251 case 4096: 250 case 4096:
252 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 251 switch (dev->se_sub_dev->se_dev_attrib.block_size) {
253 case 2048: 252 case 2048:
254 blocks_long <<= 1; 253 blocks_long <<= 1;
255 break; 254 break;
256 case 1024: 255 case 1024:
257 blocks_long <<= 2; 256 blocks_long <<= 2;
258 break; 257 break;
259 case 512: 258 case 512:
260 blocks_long <<= 3; 259 blocks_long <<= 3;
261 default: 260 default:
262 break; 261 break;
263 } 262 }
264 break; 263 break;
265 case 2048: 264 case 2048:
266 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 265 switch (dev->se_sub_dev->se_dev_attrib.block_size) {
267 case 4096: 266 case 4096:
268 blocks_long >>= 1; 267 blocks_long >>= 1;
269 break; 268 break;
270 case 1024: 269 case 1024:
271 blocks_long <<= 1; 270 blocks_long <<= 1;
272 break; 271 break;
273 case 512: 272 case 512:
274 blocks_long <<= 2; 273 blocks_long <<= 2;
275 break; 274 break;
276 default: 275 default:
277 break; 276 break;
278 } 277 }
279 break; 278 break;
280 case 1024: 279 case 1024:
281 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 280 switch (dev->se_sub_dev->se_dev_attrib.block_size) {
282 case 4096: 281 case 4096:
283 blocks_long >>= 2; 282 blocks_long >>= 2;
284 break; 283 break;
285 case 2048: 284 case 2048:
286 blocks_long >>= 1; 285 blocks_long >>= 1;
287 break; 286 break;
288 case 512: 287 case 512:
289 blocks_long <<= 1; 288 blocks_long <<= 1;
290 break; 289 break;
291 default: 290 default:
292 break; 291 break;
293 } 292 }
294 break; 293 break;
295 case 512: 294 case 512:
296 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 295 switch (dev->se_sub_dev->se_dev_attrib.block_size) {
297 case 4096: 296 case 4096:
298 blocks_long >>= 3; 297 blocks_long >>= 3;
299 break; 298 break;
300 case 2048: 299 case 2048:
301 blocks_long >>= 2; 300 blocks_long >>= 2;
302 break; 301 break;
303 case 1024: 302 case 1024:
304 blocks_long >>= 1; 303 blocks_long >>= 1;
305 break; 304 break;
306 default: 305 default:
307 break; 306 break;
308 } 307 }
309 break; 308 break;
310 default: 309 default:
311 break; 310 break;
312 } 311 }
313 312
314 return blocks_long; 313 return blocks_long;
315 } 314 }
316 315
317 static void iblock_end_io_flush(struct bio *bio, int err) 316 static void iblock_end_io_flush(struct bio *bio, int err)
318 { 317 {
319 struct se_cmd *cmd = bio->bi_private; 318 struct se_cmd *cmd = bio->bi_private;
320 319
321 if (err) 320 if (err)
322 pr_err("IBLOCK: cache flush failed: %d\n", err); 321 pr_err("IBLOCK: cache flush failed: %d\n", err);
323 322
324 if (cmd) 323 if (cmd)
325 transport_complete_sync_cache(cmd, err == 0); 324 transport_complete_sync_cache(cmd, err == 0);
326 bio_put(bio); 325 bio_put(bio);
327 } 326 }
328 327
329 /* 328 /*
330 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must 329 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
331 * always flush the whole cache. 330 * always flush the whole cache.
332 */ 331 */
333 static void iblock_emulate_sync_cache(struct se_task *task) 332 static void iblock_emulate_sync_cache(struct se_task *task)
334 { 333 {
335 struct se_cmd *cmd = task->task_se_cmd; 334 struct se_cmd *cmd = task->task_se_cmd;
336 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; 335 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
337 int immed = (cmd->t_task_cdb[1] & 0x2); 336 int immed = (cmd->t_task_cdb[1] & 0x2);
338 struct bio *bio; 337 struct bio *bio;
339 338
340 /* 339 /*
341 * If the Immediate bit is set, queue up the GOOD response 340 * If the Immediate bit is set, queue up the GOOD response
342 * for this SYNCHRONIZE_CACHE op. 341 * for this SYNCHRONIZE_CACHE op.
343 */ 342 */
344 if (immed) 343 if (immed)
345 transport_complete_sync_cache(cmd, 1); 344 transport_complete_sync_cache(cmd, 1);
346 345
347 bio = bio_alloc(GFP_KERNEL, 0); 346 bio = bio_alloc(GFP_KERNEL, 0);
348 bio->bi_end_io = iblock_end_io_flush; 347 bio->bi_end_io = iblock_end_io_flush;
349 bio->bi_bdev = ib_dev->ibd_bd; 348 bio->bi_bdev = ib_dev->ibd_bd;
350 if (!immed) 349 if (!immed)
351 bio->bi_private = cmd; 350 bio->bi_private = cmd;
352 submit_bio(WRITE_FLUSH, bio); 351 submit_bio(WRITE_FLUSH, bio);
353 } 352 }
354 353
355 static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) 354 static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
356 { 355 {
357 struct iblock_dev *ibd = dev->dev_ptr; 356 struct iblock_dev *ibd = dev->dev_ptr;
358 struct block_device *bd = ibd->ibd_bd; 357 struct block_device *bd = ibd->ibd_bd;
359 int barrier = 0; 358 int barrier = 0;
360 359
361 return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier); 360 return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier);
362 } 361 }
363 362
364 static void iblock_free_task(struct se_task *task) 363 static void iblock_free_task(struct se_task *task)
365 { 364 {
366 kfree(IBLOCK_REQ(task)); 365 kfree(IBLOCK_REQ(task));
367 } 366 }
368 367
369 enum { 368 enum {
370 Opt_udev_path, Opt_force, Opt_err 369 Opt_udev_path, Opt_force, Opt_err
371 }; 370 };
372 371
373 static match_table_t tokens = { 372 static match_table_t tokens = {
374 {Opt_udev_path, "udev_path=%s"}, 373 {Opt_udev_path, "udev_path=%s"},
375 {Opt_force, "force=%d"}, 374 {Opt_force, "force=%d"},
376 {Opt_err, NULL} 375 {Opt_err, NULL}
377 }; 376 };
378 377
379 static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, 378 static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
380 struct se_subsystem_dev *se_dev, 379 struct se_subsystem_dev *se_dev,
381 const char *page, ssize_t count) 380 const char *page, ssize_t count)
382 { 381 {
383 struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; 382 struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
384 char *orig, *ptr, *arg_p, *opts; 383 char *orig, *ptr, *arg_p, *opts;
385 substring_t args[MAX_OPT_ARGS]; 384 substring_t args[MAX_OPT_ARGS];
386 int ret = 0, token; 385 int ret = 0, token;
387 386
388 opts = kstrdup(page, GFP_KERNEL); 387 opts = kstrdup(page, GFP_KERNEL);
389 if (!opts) 388 if (!opts)
390 return -ENOMEM; 389 return -ENOMEM;
391 390
392 orig = opts; 391 orig = opts;
393 392
394 while ((ptr = strsep(&opts, ",")) != NULL) { 393 while ((ptr = strsep(&opts, ",")) != NULL) {
395 if (!*ptr) 394 if (!*ptr)
396 continue; 395 continue;
397 396
398 token = match_token(ptr, tokens, args); 397 token = match_token(ptr, tokens, args);
399 switch (token) { 398 switch (token) {
400 case Opt_udev_path: 399 case Opt_udev_path:
401 if (ib_dev->ibd_bd) { 400 if (ib_dev->ibd_bd) {
402 pr_err("Unable to set udev_path= while" 401 pr_err("Unable to set udev_path= while"
403 " ib_dev->ibd_bd exists\n"); 402 " ib_dev->ibd_bd exists\n");
404 ret = -EEXIST; 403 ret = -EEXIST;
405 goto out; 404 goto out;
406 } 405 }
407 arg_p = match_strdup(&args[0]); 406 arg_p = match_strdup(&args[0]);
408 if (!arg_p) { 407 if (!arg_p) {
409 ret = -ENOMEM; 408 ret = -ENOMEM;
410 break; 409 break;
411 } 410 }
412 snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, 411 snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
413 "%s", arg_p); 412 "%s", arg_p);
414 kfree(arg_p); 413 kfree(arg_p);
415 pr_debug("IBLOCK: Referencing UDEV path: %s\n", 414 pr_debug("IBLOCK: Referencing UDEV path: %s\n",
416 ib_dev->ibd_udev_path); 415 ib_dev->ibd_udev_path);
417 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; 416 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
418 break; 417 break;
419 case Opt_force: 418 case Opt_force:
420 break; 419 break;
421 default: 420 default:
422 break; 421 break;
423 } 422 }
424 } 423 }
425 424
426 out: 425 out:
427 kfree(orig); 426 kfree(orig);
428 return (!ret) ? count : ret; 427 return (!ret) ? count : ret;
429 } 428 }
430 429
431 static ssize_t iblock_check_configfs_dev_params( 430 static ssize_t iblock_check_configfs_dev_params(
432 struct se_hba *hba, 431 struct se_hba *hba,
433 struct se_subsystem_dev *se_dev) 432 struct se_subsystem_dev *se_dev)
434 { 433 {
435 struct iblock_dev *ibd = se_dev->se_dev_su_ptr; 434 struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
436 435
437 if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { 436 if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
438 pr_err("Missing udev_path= parameters for IBLOCK\n"); 437 pr_err("Missing udev_path= parameters for IBLOCK\n");
439 return -EINVAL; 438 return -EINVAL;
440 } 439 }
441 440
442 return 0; 441 return 0;
443 } 442 }
444 443
445 static ssize_t iblock_show_configfs_dev_params( 444 static ssize_t iblock_show_configfs_dev_params(
446 struct se_hba *hba, 445 struct se_hba *hba,
447 struct se_subsystem_dev *se_dev, 446 struct se_subsystem_dev *se_dev,
448 char *b) 447 char *b)
449 { 448 {
450 struct iblock_dev *ibd = se_dev->se_dev_su_ptr; 449 struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
451 struct block_device *bd = ibd->ibd_bd; 450 struct block_device *bd = ibd->ibd_bd;
452 char buf[BDEVNAME_SIZE]; 451 char buf[BDEVNAME_SIZE];
453 ssize_t bl = 0; 452 ssize_t bl = 0;
454 453
455 if (bd) 454 if (bd)
456 bl += sprintf(b + bl, "iBlock device: %s", 455 bl += sprintf(b + bl, "iBlock device: %s",
457 bdevname(bd, buf)); 456 bdevname(bd, buf));
458 if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) { 457 if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) {
459 bl += sprintf(b + bl, " UDEV PATH: %s\n", 458 bl += sprintf(b + bl, " UDEV PATH: %s\n",
460 ibd->ibd_udev_path); 459 ibd->ibd_udev_path);
461 } else 460 } else
462 bl += sprintf(b + bl, "\n"); 461 bl += sprintf(b + bl, "\n");
463 462
464 bl += sprintf(b + bl, " "); 463 bl += sprintf(b + bl, " ");
465 if (bd) { 464 if (bd) {
466 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", 465 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
467 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? 466 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
468 "" : (bd->bd_holder == (struct iblock_dev *)ibd) ? 467 "" : (bd->bd_holder == (struct iblock_dev *)ibd) ?
469 "CLAIMED: IBLOCK" : "CLAIMED: OS"); 468 "CLAIMED: IBLOCK" : "CLAIMED: OS");
470 } else { 469 } else {
471 bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); 470 bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
472 } 471 }
473 472
474 return bl; 473 return bl;
475 } 474 }
476 475
477 static void iblock_bio_destructor(struct bio *bio) 476 static void iblock_bio_destructor(struct bio *bio)
478 { 477 {
479 struct se_task *task = bio->bi_private; 478 struct se_task *task = bio->bi_private;
480 struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; 479 struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
481 480
482 bio_free(bio, ib_dev->ibd_bio_set); 481 bio_free(bio, ib_dev->ibd_bio_set);
483 } 482 }
484 483
485 static struct bio * 484 static struct bio *
486 iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num) 485 iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)
487 { 486 {
488 struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; 487 struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
489 struct iblock_req *ib_req = IBLOCK_REQ(task); 488 struct iblock_req *ib_req = IBLOCK_REQ(task);
490 struct bio *bio; 489 struct bio *bio;
491 490
492 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); 491 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
493 if (!bio) { 492 if (!bio) {
494 pr_err("Unable to allocate memory for bio\n"); 493 pr_err("Unable to allocate memory for bio\n");
495 return NULL; 494 return NULL;
496 } 495 }
497 496
498 pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:" 497 pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:"
499 " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set); 498 " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set);
500 pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size); 499 pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size);
501 500
502 bio->bi_bdev = ib_dev->ibd_bd; 501 bio->bi_bdev = ib_dev->ibd_bd;
503 bio->bi_private = task; 502 bio->bi_private = task;
504 bio->bi_destructor = iblock_bio_destructor; 503 bio->bi_destructor = iblock_bio_destructor;
505 bio->bi_end_io = &iblock_bio_done; 504 bio->bi_end_io = &iblock_bio_done;
506 bio->bi_sector = lba; 505 bio->bi_sector = lba;
507 atomic_inc(&ib_req->ib_bio_cnt); 506 atomic_inc(&ib_req->ib_bio_cnt);
508 507
509 pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector); 508 pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector);
510 pr_debug("Set ib_req->ib_bio_cnt: %d\n", 509 pr_debug("Set ib_req->ib_bio_cnt: %d\n",
511 atomic_read(&ib_req->ib_bio_cnt)); 510 atomic_read(&ib_req->ib_bio_cnt));
512 return bio; 511 return bio;
513 } 512 }
514 513
515 static int iblock_do_task(struct se_task *task) 514 static int iblock_do_task(struct se_task *task)
516 { 515 {
517 struct se_cmd *cmd = task->task_se_cmd; 516 struct se_cmd *cmd = task->task_se_cmd;
518 struct se_device *dev = cmd->se_dev; 517 struct se_device *dev = cmd->se_dev;
519 struct bio *bio; 518 struct bio *bio;
520 struct bio_list list; 519 struct bio_list list;
521 struct scatterlist *sg; 520 struct scatterlist *sg;
522 u32 i, sg_num = task->task_sg_nents; 521 u32 i, sg_num = task->task_sg_nents;
523 sector_t block_lba; 522 sector_t block_lba;
524 struct blk_plug plug; 523 struct blk_plug plug;
525 int rw; 524 int rw;
526 525
527 if (task->task_data_direction == DMA_TO_DEVICE) { 526 if (task->task_data_direction == DMA_TO_DEVICE) {
528 /* 527 /*
529 * Force data to disk if we pretend to not have a volatile 528 * Force data to disk if we pretend to not have a volatile
530 * write cache, or the initiator set the Force Unit Access bit. 529 * write cache, or the initiator set the Force Unit Access bit.
531 */ 530 */
532 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || 531 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
533 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 532 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
534 (cmd->se_cmd_flags & SCF_FUA))) 533 (cmd->se_cmd_flags & SCF_FUA)))
535 rw = WRITE_FUA; 534 rw = WRITE_FUA;
536 else 535 else
537 rw = WRITE; 536 rw = WRITE;
538 } else { 537 } else {
539 rw = READ; 538 rw = READ;
540 } 539 }
541 540
542 /* 541 /*
543 * Do starting conversion up from non 512-byte blocksize with 542 * Do starting conversion up from non 512-byte blocksize with
544 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. 543 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
545 */ 544 */
546 if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) 545 if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
547 block_lba = (task->task_lba << 3); 546 block_lba = (task->task_lba << 3);
548 else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048) 547 else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
549 block_lba = (task->task_lba << 2); 548 block_lba = (task->task_lba << 2);
550 else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024) 549 else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
551 block_lba = (task->task_lba << 1); 550 block_lba = (task->task_lba << 1);
552 else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) 551 else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
553 block_lba = task->task_lba; 552 block_lba = task->task_lba;
554 else { 553 else {
555 pr_err("Unsupported SCSI -> BLOCK LBA conversion:" 554 pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
556 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); 555 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
557 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 556 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
558 return -ENOSYS; 557 return -ENOSYS;
559 } 558 }
560 559
561 bio = iblock_get_bio(task, block_lba, sg_num); 560 bio = iblock_get_bio(task, block_lba, sg_num);
562 if (!bio) { 561 if (!bio) {
563 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 562 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
564 return -ENOMEM; 563 return -ENOMEM;
565 } 564 }
566 565
567 bio_list_init(&list); 566 bio_list_init(&list);
568 bio_list_add(&list, bio); 567 bio_list_add(&list, bio);
569 568
570 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { 569 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
571 /* 570 /*
572 * XXX: if the length the device accepts is shorter than the 571 * XXX: if the length the device accepts is shorter than the
573 * length of the S/G list entry this will cause and 572 * length of the S/G list entry this will cause and
574 * endless loop. Better hope no driver uses huge pages. 573 * endless loop. Better hope no driver uses huge pages.
575 */ 574 */
576 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) 575 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
577 != sg->length) { 576 != sg->length) {
578 bio = iblock_get_bio(task, block_lba, sg_num); 577 bio = iblock_get_bio(task, block_lba, sg_num);
579 if (!bio) 578 if (!bio)
580 goto fail; 579 goto fail;
581 bio_list_add(&list, bio); 580 bio_list_add(&list, bio);
582 } 581 }
583 582
584 /* Always in 512 byte units for Linux/Block */ 583 /* Always in 512 byte units for Linux/Block */
585 block_lba += sg->length >> IBLOCK_LBA_SHIFT; 584 block_lba += sg->length >> IBLOCK_LBA_SHIFT;
586 sg_num--; 585 sg_num--;
587 } 586 }
588 587
589 blk_start_plug(&plug); 588 blk_start_plug(&plug);
590 while ((bio = bio_list_pop(&list))) 589 while ((bio = bio_list_pop(&list)))
591 submit_bio(rw, bio); 590 submit_bio(rw, bio);
592 blk_finish_plug(&plug); 591 blk_finish_plug(&plug);
593 592
594 return 0; 593 return 0;
595 594
596 fail: 595 fail:
597 while ((bio = bio_list_pop(&list))) 596 while ((bio = bio_list_pop(&list)))
598 bio_put(bio); 597 bio_put(bio);
599 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 598 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
600 return -ENOMEM; 599 return -ENOMEM;
601 } 600 }
602 601
603 static u32 iblock_get_device_rev(struct se_device *dev) 602 static u32 iblock_get_device_rev(struct se_device *dev)
604 { 603 {
605 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ 604 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
606 } 605 }
607 606
608 static u32 iblock_get_device_type(struct se_device *dev) 607 static u32 iblock_get_device_type(struct se_device *dev)
609 { 608 {
610 return TYPE_DISK; 609 return TYPE_DISK;
611 } 610 }
612 611
613 static sector_t iblock_get_blocks(struct se_device *dev) 612 static sector_t iblock_get_blocks(struct se_device *dev)
614 { 613 {
615 struct iblock_dev *ibd = dev->dev_ptr; 614 struct iblock_dev *ibd = dev->dev_ptr;
616 struct block_device *bd = ibd->ibd_bd; 615 struct block_device *bd = ibd->ibd_bd;
617 struct request_queue *q = bdev_get_queue(bd); 616 struct request_queue *q = bdev_get_queue(bd);
618 617
619 return iblock_emulate_read_cap_with_block_size(dev, bd, q); 618 return iblock_emulate_read_cap_with_block_size(dev, bd, q);
620 } 619 }
621 620
622 static void iblock_bio_done(struct bio *bio, int err) 621 static void iblock_bio_done(struct bio *bio, int err)
623 { 622 {
624 struct se_task *task = bio->bi_private; 623 struct se_task *task = bio->bi_private;
625 struct iblock_req *ibr = IBLOCK_REQ(task); 624 struct iblock_req *ibr = IBLOCK_REQ(task);
626 625
627 /* 626 /*
628 * Set -EIO if !BIO_UPTODATE and the passed is still err=0 627 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
629 */ 628 */
630 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) 629 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
631 err = -EIO; 630 err = -EIO;
632 631
633 if (err != 0) { 632 if (err != 0) {
634 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p," 633 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
635 " err: %d\n", bio, err); 634 " err: %d\n", bio, err);
636 /* 635 /*
637 * Bump the ib_bio_err_cnt and release bio. 636 * Bump the ib_bio_err_cnt and release bio.
638 */ 637 */
639 atomic_inc(&ibr->ib_bio_err_cnt); 638 atomic_inc(&ibr->ib_bio_err_cnt);
640 smp_mb__after_atomic_inc(); 639 smp_mb__after_atomic_inc();
641 } 640 }
642 641
643 bio_put(bio); 642 bio_put(bio);
644 643
645 if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) 644 if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
646 return; 645 return;
647 646
648 pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", 647 pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
649 task, bio, task->task_lba, 648 task, bio, task->task_lba,
650 (unsigned long long)bio->bi_sector, err); 649 (unsigned long long)bio->bi_sector, err);
651 650
652 transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt)); 651 transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt));
653 } 652 }
654 653
655 static struct se_subsystem_api iblock_template = { 654 static struct se_subsystem_api iblock_template = {
656 .name = "iblock", 655 .name = "iblock",
657 .owner = THIS_MODULE, 656 .owner = THIS_MODULE,
658 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 657 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
659 .write_cache_emulated = 1, 658 .write_cache_emulated = 1,
660 .fua_write_emulated = 1, 659 .fua_write_emulated = 1,
661 .attach_hba = iblock_attach_hba, 660 .attach_hba = iblock_attach_hba,
662 .detach_hba = iblock_detach_hba, 661 .detach_hba = iblock_detach_hba,
663 .allocate_virtdevice = iblock_allocate_virtdevice, 662 .allocate_virtdevice = iblock_allocate_virtdevice,
664 .create_virtdevice = iblock_create_virtdevice, 663 .create_virtdevice = iblock_create_virtdevice,
665 .free_device = iblock_free_device, 664 .free_device = iblock_free_device,
666 .alloc_task = iblock_alloc_task, 665 .alloc_task = iblock_alloc_task,
667 .do_task = iblock_do_task, 666 .do_task = iblock_do_task,
668 .do_discard = iblock_do_discard, 667 .do_discard = iblock_do_discard,
669 .do_sync_cache = iblock_emulate_sync_cache, 668 .do_sync_cache = iblock_emulate_sync_cache,
670 .free_task = iblock_free_task, 669 .free_task = iblock_free_task,
671 .check_configfs_dev_params = iblock_check_configfs_dev_params, 670 .check_configfs_dev_params = iblock_check_configfs_dev_params,
672 .set_configfs_dev_params = iblock_set_configfs_dev_params, 671 .set_configfs_dev_params = iblock_set_configfs_dev_params,
673 .show_configfs_dev_params = iblock_show_configfs_dev_params, 672 .show_configfs_dev_params = iblock_show_configfs_dev_params,
674 .get_device_rev = iblock_get_device_rev, 673 .get_device_rev = iblock_get_device_rev,
675 .get_device_type = iblock_get_device_type, 674 .get_device_type = iblock_get_device_type,
676 .get_blocks = iblock_get_blocks, 675 .get_blocks = iblock_get_blocks,
677 }; 676 };
678 677
679 static int __init iblock_module_init(void) 678 static int __init iblock_module_init(void)
680 { 679 {
681 return transport_subsystem_register(&iblock_template); 680 return transport_subsystem_register(&iblock_template);
682 } 681 }
683 682
684 static void iblock_module_exit(void) 683 static void iblock_module_exit(void)
685 { 684 {
686 transport_subsystem_release(&iblock_template); 685 transport_subsystem_release(&iblock_template);
687 } 686 }
688 687
689 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin"); 688 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
690 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 689 MODULE_AUTHOR("nab@Linux-iSCSI.org");
691 MODULE_LICENSE("GPL"); 690 MODULE_LICENSE("GPL");
692 691
693 module_init(iblock_module_init); 692 module_init(iblock_module_init);
694 module_exit(iblock_module_exit); 693 module_exit(iblock_module_exit);
695 694
drivers/target/target_core_pr.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * Filename: target_core_pr.c 2 * Filename: target_core_pr.c
3 * 3 *
4 * This file contains SPC-3 compliant persistent reservations and 4 * This file contains SPC-3 compliant persistent reservations and
5 * legacy SPC-2 reservations with compatible reservation handling (CRH=1) 5 * legacy SPC-2 reservations with compatible reservation handling (CRH=1)
6 * 6 *
7 * Copyright (c) 2009, 2010 Rising Tide Systems 7 * Copyright (c) 2009, 2010 Rising Tide Systems
8 * Copyright (c) 2009, 2010 Linux-iSCSI.org 8 * Copyright (c) 2009, 2010 Linux-iSCSI.org
9 * 9 *
10 * Nicholas A. Bellinger <nab@kernel.org> 10 * Nicholas A. Bellinger <nab@kernel.org>
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or 14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version. 15 * (at your option) any later version.
16 * 16 *
17 * This program is distributed in the hope that it will be useful, 17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details. 20 * GNU General Public License for more details.
21 * 21 *
22 * You should have received a copy of the GNU General Public License 22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software 23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 24 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 * 25 *
26 ******************************************************************************/ 26 ******************************************************************************/
27 27
28 #include <linux/slab.h> 28 #include <linux/slab.h>
29 #include <linux/spinlock.h> 29 #include <linux/spinlock.h>
30 #include <linux/list.h> 30 #include <linux/list.h>
31 #include <scsi/scsi.h> 31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h> 32 #include <scsi/scsi_cmnd.h>
33 #include <asm/unaligned.h> 33 #include <asm/unaligned.h>
34 34
35 #include <target/target_core_base.h> 35 #include <target/target_core_base.h>
36 #include <target/target_core_device.h> 36 #include <target/target_core_backend.h>
37 #include <target/target_core_tmr.h> 37 #include <target/target_core_fabric.h>
38 #include <target/target_core_tpg.h>
39 #include <target/target_core_transport.h>
40 #include <target/target_core_fabric_ops.h>
41 #include <target/target_core_configfs.h> 38 #include <target/target_core_configfs.h>
42 39
43 #include "target_core_internal.h" 40 #include "target_core_internal.h"
44 #include "target_core_pr.h" 41 #include "target_core_pr.h"
45 #include "target_core_ua.h" 42 #include "target_core_ua.h"
46 43
47 /* 44 /*
48 * Used for Specify Initiator Ports Capable Bit (SPEC_I_PT) 45 * Used for Specify Initiator Ports Capable Bit (SPEC_I_PT)
49 */ 46 */
50 struct pr_transport_id_holder { 47 struct pr_transport_id_holder {
51 int dest_local_nexus; 48 int dest_local_nexus;
52 struct t10_pr_registration *dest_pr_reg; 49 struct t10_pr_registration *dest_pr_reg;
53 struct se_portal_group *dest_tpg; 50 struct se_portal_group *dest_tpg;
54 struct se_node_acl *dest_node_acl; 51 struct se_node_acl *dest_node_acl;
55 struct se_dev_entry *dest_se_deve; 52 struct se_dev_entry *dest_se_deve;
56 struct list_head dest_list; 53 struct list_head dest_list;
57 }; 54 };
58 55
59 int core_pr_dump_initiator_port( 56 int core_pr_dump_initiator_port(
60 struct t10_pr_registration *pr_reg, 57 struct t10_pr_registration *pr_reg,
61 char *buf, 58 char *buf,
62 u32 size) 59 u32 size)
63 { 60 {
64 if (!pr_reg->isid_present_at_reg) 61 if (!pr_reg->isid_present_at_reg)
65 return 0; 62 return 0;
66 63
67 snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]); 64 snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]);
68 return 1; 65 return 1;
69 } 66 }
70 67
71 static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *, 68 static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
72 struct t10_pr_registration *, int); 69 struct t10_pr_registration *, int);
73 70
74 static int core_scsi2_reservation_seq_non_holder( 71 static int core_scsi2_reservation_seq_non_holder(
75 struct se_cmd *cmd, 72 struct se_cmd *cmd,
76 unsigned char *cdb, 73 unsigned char *cdb,
77 u32 pr_reg_type) 74 u32 pr_reg_type)
78 { 75 {
79 switch (cdb[0]) { 76 switch (cdb[0]) {
80 case INQUIRY: 77 case INQUIRY:
81 case RELEASE: 78 case RELEASE:
82 case RELEASE_10: 79 case RELEASE_10:
83 return 0; 80 return 0;
84 default: 81 default:
85 return 1; 82 return 1;
86 } 83 }
87 84
88 return 1; 85 return 1;
89 } 86 }
90 87
91 static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type) 88 static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
92 { 89 {
93 struct se_device *dev = cmd->se_dev; 90 struct se_device *dev = cmd->se_dev;
94 struct se_session *sess = cmd->se_sess; 91 struct se_session *sess = cmd->se_sess;
95 int ret; 92 int ret;
96 93
97 if (!sess) 94 if (!sess)
98 return 0; 95 return 0;
99 96
100 spin_lock(&dev->dev_reservation_lock); 97 spin_lock(&dev->dev_reservation_lock);
101 if (!dev->dev_reserved_node_acl || !sess) { 98 if (!dev->dev_reserved_node_acl || !sess) {
102 spin_unlock(&dev->dev_reservation_lock); 99 spin_unlock(&dev->dev_reservation_lock);
103 return 0; 100 return 0;
104 } 101 }
105 if (dev->dev_reserved_node_acl != sess->se_node_acl) { 102 if (dev->dev_reserved_node_acl != sess->se_node_acl) {
106 spin_unlock(&dev->dev_reservation_lock); 103 spin_unlock(&dev->dev_reservation_lock);
107 return -EINVAL; 104 return -EINVAL;
108 } 105 }
109 if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) { 106 if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) {
110 spin_unlock(&dev->dev_reservation_lock); 107 spin_unlock(&dev->dev_reservation_lock);
111 return 0; 108 return 0;
112 } 109 }
113 ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -EINVAL; 110 ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -EINVAL;
114 spin_unlock(&dev->dev_reservation_lock); 111 spin_unlock(&dev->dev_reservation_lock);
115 112
116 return ret; 113 return ret;
117 } 114 }
118 115
119 static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *, 116 static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *,
120 struct se_node_acl *, struct se_session *); 117 struct se_node_acl *, struct se_session *);
121 static void core_scsi3_put_pr_reg(struct t10_pr_registration *); 118 static void core_scsi3_put_pr_reg(struct t10_pr_registration *);
122 119
123 static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret) 120 static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
124 { 121 {
125 struct se_session *se_sess = cmd->se_sess; 122 struct se_session *se_sess = cmd->se_sess;
126 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; 123 struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
127 struct t10_pr_registration *pr_reg; 124 struct t10_pr_registration *pr_reg;
128 struct t10_reservation *pr_tmpl = &su_dev->t10_pr; 125 struct t10_reservation *pr_tmpl = &su_dev->t10_pr;
129 int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS); 126 int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS);
130 int conflict = 0; 127 int conflict = 0;
131 128
132 if (!crh) 129 if (!crh)
133 return false; 130 return false;
134 131
135 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, 132 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
136 se_sess); 133 se_sess);
137 if (pr_reg) { 134 if (pr_reg) {
138 /* 135 /*
139 * From spc4r17 5.7.3 Exceptions to SPC-2 RESERVE and RELEASE 136 * From spc4r17 5.7.3 Exceptions to SPC-2 RESERVE and RELEASE
140 * behavior 137 * behavior
141 * 138 *
142 * A RESERVE(6) or RESERVE(10) command shall complete with GOOD 139 * A RESERVE(6) or RESERVE(10) command shall complete with GOOD
143 * status, but no reservation shall be established and the 140 * status, but no reservation shall be established and the
144 * persistent reservation shall not be changed, if the command 141 * persistent reservation shall not be changed, if the command
145 * is received from a) and b) below. 142 * is received from a) and b) below.
146 * 143 *
147 * A RELEASE(6) or RELEASE(10) command shall complete with GOOD 144 * A RELEASE(6) or RELEASE(10) command shall complete with GOOD
148 * status, but the persistent reservation shall not be released, 145 * status, but the persistent reservation shall not be released,
149 * if the command is received from a) and b) 146 * if the command is received from a) and b)
150 * 147 *
151 * a) An I_T nexus that is a persistent reservation holder; or 148 * a) An I_T nexus that is a persistent reservation holder; or
152 * b) An I_T nexus that is registered if a registrants only or 149 * b) An I_T nexus that is registered if a registrants only or
153 * all registrants type persistent reservation is present. 150 * all registrants type persistent reservation is present.
154 * 151 *
155 * In all other cases, a RESERVE(6) command, RESERVE(10) command, 152 * In all other cases, a RESERVE(6) command, RESERVE(10) command,
156 * RELEASE(6) command, or RELEASE(10) command shall be processed 153 * RELEASE(6) command, or RELEASE(10) command shall be processed
157 * as defined in SPC-2. 154 * as defined in SPC-2.
158 */ 155 */
159 if (pr_reg->pr_res_holder) { 156 if (pr_reg->pr_res_holder) {
160 core_scsi3_put_pr_reg(pr_reg); 157 core_scsi3_put_pr_reg(pr_reg);
161 *ret = 0; 158 *ret = 0;
162 return false; 159 return false;
163 } 160 }
164 if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) || 161 if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
165 (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) || 162 (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) ||
166 (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || 163 (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
167 (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { 164 (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
168 core_scsi3_put_pr_reg(pr_reg); 165 core_scsi3_put_pr_reg(pr_reg);
169 *ret = 0; 166 *ret = 0;
170 return true; 167 return true;
171 } 168 }
172 core_scsi3_put_pr_reg(pr_reg); 169 core_scsi3_put_pr_reg(pr_reg);
173 conflict = 1; 170 conflict = 1;
174 } else { 171 } else {
175 /* 172 /*
176 * Following spc2r20 5.5.1 Reservations overview: 173 * Following spc2r20 5.5.1 Reservations overview:
177 * 174 *
178 * If a logical unit has executed a PERSISTENT RESERVE OUT 175 * If a logical unit has executed a PERSISTENT RESERVE OUT
179 * command with the REGISTER or the REGISTER AND IGNORE 176 * command with the REGISTER or the REGISTER AND IGNORE
180 * EXISTING KEY service action and is still registered by any 177 * EXISTING KEY service action and is still registered by any
181 * initiator, all RESERVE commands and all RELEASE commands 178 * initiator, all RESERVE commands and all RELEASE commands
182 * regardless of initiator shall conflict and shall terminate 179 * regardless of initiator shall conflict and shall terminate
183 * with a RESERVATION CONFLICT status. 180 * with a RESERVATION CONFLICT status.
184 */ 181 */
185 spin_lock(&pr_tmpl->registration_lock); 182 spin_lock(&pr_tmpl->registration_lock);
186 conflict = (list_empty(&pr_tmpl->registration_list)) ? 0 : 1; 183 conflict = (list_empty(&pr_tmpl->registration_list)) ? 0 : 1;
187 spin_unlock(&pr_tmpl->registration_lock); 184 spin_unlock(&pr_tmpl->registration_lock);
188 } 185 }
189 186
190 if (conflict) { 187 if (conflict) {
191 pr_err("Received legacy SPC-2 RESERVE/RELEASE" 188 pr_err("Received legacy SPC-2 RESERVE/RELEASE"
192 " while active SPC-3 registrations exist," 189 " while active SPC-3 registrations exist,"
193 " returning RESERVATION_CONFLICT\n"); 190 " returning RESERVATION_CONFLICT\n");
194 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; 191 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
195 return true; 192 return true;
196 } 193 }
197 194
198 return false; 195 return false;
199 } 196 }
200 197
201 int target_scsi2_reservation_release(struct se_task *task) 198 int target_scsi2_reservation_release(struct se_task *task)
202 { 199 {
203 struct se_cmd *cmd = task->task_se_cmd; 200 struct se_cmd *cmd = task->task_se_cmd;
204 struct se_device *dev = cmd->se_dev; 201 struct se_device *dev = cmd->se_dev;
205 struct se_session *sess = cmd->se_sess; 202 struct se_session *sess = cmd->se_sess;
206 struct se_portal_group *tpg = sess->se_tpg; 203 struct se_portal_group *tpg = sess->se_tpg;
207 int ret = 0; 204 int ret = 0;
208 205
209 if (!sess || !tpg) 206 if (!sess || !tpg)
210 goto out; 207 goto out;
211 if (target_check_scsi2_reservation_conflict(cmd, &ret)) 208 if (target_check_scsi2_reservation_conflict(cmd, &ret))
212 goto out; 209 goto out;
213 210
214 ret = 0; 211 ret = 0;
215 spin_lock(&dev->dev_reservation_lock); 212 spin_lock(&dev->dev_reservation_lock);
216 if (!dev->dev_reserved_node_acl || !sess) 213 if (!dev->dev_reserved_node_acl || !sess)
217 goto out_unlock; 214 goto out_unlock;
218 215
219 if (dev->dev_reserved_node_acl != sess->se_node_acl) 216 if (dev->dev_reserved_node_acl != sess->se_node_acl)
220 goto out_unlock; 217 goto out_unlock;
221 218
222 dev->dev_reserved_node_acl = NULL; 219 dev->dev_reserved_node_acl = NULL;
223 dev->dev_flags &= ~DF_SPC2_RESERVATIONS; 220 dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
224 if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) { 221 if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
225 dev->dev_res_bin_isid = 0; 222 dev->dev_res_bin_isid = 0;
226 dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID; 223 dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
227 } 224 }
228 pr_debug("SCSI-2 Released reservation for %s LUN: %u ->" 225 pr_debug("SCSI-2 Released reservation for %s LUN: %u ->"
229 " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 226 " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
230 cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, 227 cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
231 sess->se_node_acl->initiatorname); 228 sess->se_node_acl->initiatorname);
232 229
233 out_unlock: 230 out_unlock:
234 spin_unlock(&dev->dev_reservation_lock); 231 spin_unlock(&dev->dev_reservation_lock);
235 out: 232 out:
236 if (!ret) { 233 if (!ret) {
237 task->task_scsi_status = GOOD; 234 task->task_scsi_status = GOOD;
238 transport_complete_task(task, 1); 235 transport_complete_task(task, 1);
239 } 236 }
240 return ret; 237 return ret;
241 } 238 }
242 239
243 int target_scsi2_reservation_reserve(struct se_task *task) 240 int target_scsi2_reservation_reserve(struct se_task *task)
244 { 241 {
245 struct se_cmd *cmd = task->task_se_cmd; 242 struct se_cmd *cmd = task->task_se_cmd;
246 struct se_device *dev = cmd->se_dev; 243 struct se_device *dev = cmd->se_dev;
247 struct se_session *sess = cmd->se_sess; 244 struct se_session *sess = cmd->se_sess;
248 struct se_portal_group *tpg = sess->se_tpg; 245 struct se_portal_group *tpg = sess->se_tpg;
249 int ret = 0; 246 int ret = 0;
250 247
251 if ((cmd->t_task_cdb[1] & 0x01) && 248 if ((cmd->t_task_cdb[1] & 0x01) &&
252 (cmd->t_task_cdb[1] & 0x02)) { 249 (cmd->t_task_cdb[1] & 0x02)) {
253 pr_err("LongIO and Obselete Bits set, returning" 250 pr_err("LongIO and Obselete Bits set, returning"
254 " ILLEGAL_REQUEST\n"); 251 " ILLEGAL_REQUEST\n");
255 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 252 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
256 ret = -EINVAL; 253 ret = -EINVAL;
257 goto out; 254 goto out;
258 } 255 }
259 /* 256 /*
260 * This is currently the case for target_core_mod passthrough struct se_cmd 257 * This is currently the case for target_core_mod passthrough struct se_cmd
261 * ops 258 * ops
262 */ 259 */
263 if (!sess || !tpg) 260 if (!sess || !tpg)
264 goto out; 261 goto out;
265 if (target_check_scsi2_reservation_conflict(cmd, &ret)) 262 if (target_check_scsi2_reservation_conflict(cmd, &ret))
266 goto out; 263 goto out;
267 264
268 ret = 0; 265 ret = 0;
269 spin_lock(&dev->dev_reservation_lock); 266 spin_lock(&dev->dev_reservation_lock);
270 if (dev->dev_reserved_node_acl && 267 if (dev->dev_reserved_node_acl &&
271 (dev->dev_reserved_node_acl != sess->se_node_acl)) { 268 (dev->dev_reserved_node_acl != sess->se_node_acl)) {
272 pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n", 269 pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
273 tpg->se_tpg_tfo->get_fabric_name()); 270 tpg->se_tpg_tfo->get_fabric_name());
274 pr_err("Original reserver LUN: %u %s\n", 271 pr_err("Original reserver LUN: %u %s\n",
275 cmd->se_lun->unpacked_lun, 272 cmd->se_lun->unpacked_lun,
276 dev->dev_reserved_node_acl->initiatorname); 273 dev->dev_reserved_node_acl->initiatorname);
277 pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u" 274 pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u"
278 " from %s \n", cmd->se_lun->unpacked_lun, 275 " from %s \n", cmd->se_lun->unpacked_lun,
279 cmd->se_deve->mapped_lun, 276 cmd->se_deve->mapped_lun,
280 sess->se_node_acl->initiatorname); 277 sess->se_node_acl->initiatorname);
281 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; 278 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
282 ret = -EINVAL; 279 ret = -EINVAL;
283 goto out_unlock; 280 goto out_unlock;
284 } 281 }
285 282
286 dev->dev_reserved_node_acl = sess->se_node_acl; 283 dev->dev_reserved_node_acl = sess->se_node_acl;
287 dev->dev_flags |= DF_SPC2_RESERVATIONS; 284 dev->dev_flags |= DF_SPC2_RESERVATIONS;
288 if (sess->sess_bin_isid != 0) { 285 if (sess->sess_bin_isid != 0) {
289 dev->dev_res_bin_isid = sess->sess_bin_isid; 286 dev->dev_res_bin_isid = sess->sess_bin_isid;
290 dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID; 287 dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
291 } 288 }
292 pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u" 289 pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
293 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 290 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
294 cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, 291 cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
295 sess->se_node_acl->initiatorname); 292 sess->se_node_acl->initiatorname);
296 293
297 out_unlock: 294 out_unlock:
298 spin_unlock(&dev->dev_reservation_lock); 295 spin_unlock(&dev->dev_reservation_lock);
299 out: 296 out:
300 if (!ret) { 297 if (!ret) {
301 task->task_scsi_status = GOOD; 298 task->task_scsi_status = GOOD;
302 transport_complete_task(task, 1); 299 transport_complete_task(task, 1);
303 } 300 }
304 return ret; 301 return ret;
305 } 302 }
306 303
307 304
308 /* 305 /*
309 * Begin SPC-3/SPC-4 Persistent Reservations emulation support 306 * Begin SPC-3/SPC-4 Persistent Reservations emulation support
310 * 307 *
311 * This function is called by those initiator ports who are *NOT* 308 * This function is called by those initiator ports who are *NOT*
312 * the active PR reservation holder when a reservation is present. 309 * the active PR reservation holder when a reservation is present.
313 */ 310 */
314 static int core_scsi3_pr_seq_non_holder( 311 static int core_scsi3_pr_seq_non_holder(
315 struct se_cmd *cmd, 312 struct se_cmd *cmd,
316 unsigned char *cdb, 313 unsigned char *cdb,
317 u32 pr_reg_type) 314 u32 pr_reg_type)
318 { 315 {
319 struct se_dev_entry *se_deve; 316 struct se_dev_entry *se_deve;
320 struct se_session *se_sess = cmd->se_sess; 317 struct se_session *se_sess = cmd->se_sess;
321 int other_cdb = 0, ignore_reg; 318 int other_cdb = 0, ignore_reg;
322 int registered_nexus = 0, ret = 1; /* Conflict by default */ 319 int registered_nexus = 0, ret = 1; /* Conflict by default */
323 int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */ 320 int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */
324 int we = 0; /* Write Exclusive */ 321 int we = 0; /* Write Exclusive */
325 int legacy = 0; /* Act like a legacy device and return 322 int legacy = 0; /* Act like a legacy device and return
326 * RESERVATION CONFLICT on some CDBs */ 323 * RESERVATION CONFLICT on some CDBs */
327 /* 324 /*
328 * A legacy SPC-2 reservation is being held. 325 * A legacy SPC-2 reservation is being held.
329 */ 326 */
330 if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) 327 if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS)
331 return core_scsi2_reservation_seq_non_holder(cmd, 328 return core_scsi2_reservation_seq_non_holder(cmd,
332 cdb, pr_reg_type); 329 cdb, pr_reg_type);
333 330
334 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 331 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
335 /* 332 /*
336 * Determine if the registration should be ignored due to 333 * Determine if the registration should be ignored due to
337 * non-matching ISIDs in core_scsi3_pr_reservation_check(). 334 * non-matching ISIDs in core_scsi3_pr_reservation_check().
338 */ 335 */
339 ignore_reg = (pr_reg_type & 0x80000000); 336 ignore_reg = (pr_reg_type & 0x80000000);
340 if (ignore_reg) 337 if (ignore_reg)
341 pr_reg_type &= ~0x80000000; 338 pr_reg_type &= ~0x80000000;
342 339
343 switch (pr_reg_type) { 340 switch (pr_reg_type) {
344 case PR_TYPE_WRITE_EXCLUSIVE: 341 case PR_TYPE_WRITE_EXCLUSIVE:
345 we = 1; 342 we = 1;
346 case PR_TYPE_EXCLUSIVE_ACCESS: 343 case PR_TYPE_EXCLUSIVE_ACCESS:
347 /* 344 /*
348 * Some commands are only allowed for the persistent reservation 345 * Some commands are only allowed for the persistent reservation
349 * holder. 346 * holder.
350 */ 347 */
351 if ((se_deve->def_pr_registered) && !(ignore_reg)) 348 if ((se_deve->def_pr_registered) && !(ignore_reg))
352 registered_nexus = 1; 349 registered_nexus = 1;
353 break; 350 break;
354 case PR_TYPE_WRITE_EXCLUSIVE_REGONLY: 351 case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
355 we = 1; 352 we = 1;
356 case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY: 353 case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
357 /* 354 /*
358 * Some commands are only allowed for registered I_T Nexuses. 355 * Some commands are only allowed for registered I_T Nexuses.
359 */ 356 */
360 reg_only = 1; 357 reg_only = 1;
361 if ((se_deve->def_pr_registered) && !(ignore_reg)) 358 if ((se_deve->def_pr_registered) && !(ignore_reg))
362 registered_nexus = 1; 359 registered_nexus = 1;
363 break; 360 break;
364 case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: 361 case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
365 we = 1; 362 we = 1;
366 case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG: 363 case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
367 /* 364 /*
368 * Each registered I_T Nexus is a reservation holder. 365 * Each registered I_T Nexus is a reservation holder.
369 */ 366 */
370 all_reg = 1; 367 all_reg = 1;
371 if ((se_deve->def_pr_registered) && !(ignore_reg)) 368 if ((se_deve->def_pr_registered) && !(ignore_reg))
372 registered_nexus = 1; 369 registered_nexus = 1;
373 break; 370 break;
374 default: 371 default:
375 return -EINVAL; 372 return -EINVAL;
376 } 373 }
377 /* 374 /*
378 * Referenced from spc4r17 table 45 for *NON* PR holder access 375 * Referenced from spc4r17 table 45 for *NON* PR holder access
379 */ 376 */
380 switch (cdb[0]) { 377 switch (cdb[0]) {
381 case SECURITY_PROTOCOL_IN: 378 case SECURITY_PROTOCOL_IN:
382 if (registered_nexus) 379 if (registered_nexus)
383 return 0; 380 return 0;
384 ret = (we) ? 0 : 1; 381 ret = (we) ? 0 : 1;
385 break; 382 break;
386 case MODE_SENSE: 383 case MODE_SENSE:
387 case MODE_SENSE_10: 384 case MODE_SENSE_10:
388 case READ_ATTRIBUTE: 385 case READ_ATTRIBUTE:
389 case READ_BUFFER: 386 case READ_BUFFER:
390 case RECEIVE_DIAGNOSTIC: 387 case RECEIVE_DIAGNOSTIC:
391 if (legacy) { 388 if (legacy) {
392 ret = 1; 389 ret = 1;
393 break; 390 break;
394 } 391 }
395 if (registered_nexus) { 392 if (registered_nexus) {
396 ret = 0; 393 ret = 0;
397 break; 394 break;
398 } 395 }
399 ret = (we) ? 0 : 1; /* Allowed Write Exclusive */ 396 ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
400 break; 397 break;
401 case PERSISTENT_RESERVE_OUT: 398 case PERSISTENT_RESERVE_OUT:
402 /* 399 /*
403 * This follows PERSISTENT_RESERVE_OUT service actions that 400 * This follows PERSISTENT_RESERVE_OUT service actions that
404 * are allowed in the presence of various reservations. 401 * are allowed in the presence of various reservations.
405 * See spc4r17, table 46 402 * See spc4r17, table 46
406 */ 403 */
407 switch (cdb[1] & 0x1f) { 404 switch (cdb[1] & 0x1f) {
408 case PRO_CLEAR: 405 case PRO_CLEAR:
409 case PRO_PREEMPT: 406 case PRO_PREEMPT:
410 case PRO_PREEMPT_AND_ABORT: 407 case PRO_PREEMPT_AND_ABORT:
411 ret = (registered_nexus) ? 0 : 1; 408 ret = (registered_nexus) ? 0 : 1;
412 break; 409 break;
413 case PRO_REGISTER: 410 case PRO_REGISTER:
414 case PRO_REGISTER_AND_IGNORE_EXISTING_KEY: 411 case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
415 ret = 0; 412 ret = 0;
416 break; 413 break;
417 case PRO_REGISTER_AND_MOVE: 414 case PRO_REGISTER_AND_MOVE:
418 case PRO_RESERVE: 415 case PRO_RESERVE:
419 ret = 1; 416 ret = 1;
420 break; 417 break;
421 case PRO_RELEASE: 418 case PRO_RELEASE:
422 ret = (registered_nexus) ? 0 : 1; 419 ret = (registered_nexus) ? 0 : 1;
423 break; 420 break;
424 default: 421 default:
425 pr_err("Unknown PERSISTENT_RESERVE_OUT service" 422 pr_err("Unknown PERSISTENT_RESERVE_OUT service"
426 " action: 0x%02x\n", cdb[1] & 0x1f); 423 " action: 0x%02x\n", cdb[1] & 0x1f);
427 return -EINVAL; 424 return -EINVAL;
428 } 425 }
429 break; 426 break;
430 case RELEASE: 427 case RELEASE:
431 case RELEASE_10: 428 case RELEASE_10:
432 /* Handled by CRH=1 in target_scsi2_reservation_release() */ 429 /* Handled by CRH=1 in target_scsi2_reservation_release() */
433 ret = 0; 430 ret = 0;
434 break; 431 break;
435 case RESERVE: 432 case RESERVE:
436 case RESERVE_10: 433 case RESERVE_10:
437 /* Handled by CRH=1 in target_scsi2_reservation_reserve() */ 434 /* Handled by CRH=1 in target_scsi2_reservation_reserve() */
438 ret = 0; 435 ret = 0;
439 break; 436 break;
440 case TEST_UNIT_READY: 437 case TEST_UNIT_READY:
441 ret = (legacy) ? 1 : 0; /* Conflict for legacy */ 438 ret = (legacy) ? 1 : 0; /* Conflict for legacy */
442 break; 439 break;
443 case MAINTENANCE_IN: 440 case MAINTENANCE_IN:
444 switch (cdb[1] & 0x1f) { 441 switch (cdb[1] & 0x1f) {
445 case MI_MANAGEMENT_PROTOCOL_IN: 442 case MI_MANAGEMENT_PROTOCOL_IN:
446 if (registered_nexus) { 443 if (registered_nexus) {
447 ret = 0; 444 ret = 0;
448 break; 445 break;
449 } 446 }
450 ret = (we) ? 0 : 1; /* Allowed Write Exclusive */ 447 ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
451 break; 448 break;
452 case MI_REPORT_SUPPORTED_OPERATION_CODES: 449 case MI_REPORT_SUPPORTED_OPERATION_CODES:
453 case MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS: 450 case MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS:
454 if (legacy) { 451 if (legacy) {
455 ret = 1; 452 ret = 1;
456 break; 453 break;
457 } 454 }
458 if (registered_nexus) { 455 if (registered_nexus) {
459 ret = 0; 456 ret = 0;
460 break; 457 break;
461 } 458 }
462 ret = (we) ? 0 : 1; /* Allowed Write Exclusive */ 459 ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
463 break; 460 break;
464 case MI_REPORT_ALIASES: 461 case MI_REPORT_ALIASES:
465 case MI_REPORT_IDENTIFYING_INFORMATION: 462 case MI_REPORT_IDENTIFYING_INFORMATION:
466 case MI_REPORT_PRIORITY: 463 case MI_REPORT_PRIORITY:
467 case MI_REPORT_TARGET_PGS: 464 case MI_REPORT_TARGET_PGS:
468 case MI_REPORT_TIMESTAMP: 465 case MI_REPORT_TIMESTAMP:
469 ret = 0; /* Allowed */ 466 ret = 0; /* Allowed */
470 break; 467 break;
471 default: 468 default:
472 pr_err("Unknown MI Service Action: 0x%02x\n", 469 pr_err("Unknown MI Service Action: 0x%02x\n",
473 (cdb[1] & 0x1f)); 470 (cdb[1] & 0x1f));
474 return -EINVAL; 471 return -EINVAL;
475 } 472 }
476 break; 473 break;
477 case ACCESS_CONTROL_IN: 474 case ACCESS_CONTROL_IN:
478 case ACCESS_CONTROL_OUT: 475 case ACCESS_CONTROL_OUT:
479 case INQUIRY: 476 case INQUIRY:
480 case LOG_SENSE: 477 case LOG_SENSE:
481 case READ_MEDIA_SERIAL_NUMBER: 478 case READ_MEDIA_SERIAL_NUMBER:
482 case REPORT_LUNS: 479 case REPORT_LUNS:
483 case REQUEST_SENSE: 480 case REQUEST_SENSE:
484 ret = 0; /*/ Allowed CDBs */ 481 ret = 0; /*/ Allowed CDBs */
485 break; 482 break;
486 default: 483 default:
487 other_cdb = 1; 484 other_cdb = 1;
488 break; 485 break;
489 } 486 }
490 /* 487 /*
491 * Case where the CDB is explicitly allowed in the above switch 488 * Case where the CDB is explicitly allowed in the above switch
492 * statement. 489 * statement.
493 */ 490 */
494 if (!ret && !other_cdb) { 491 if (!ret && !other_cdb) {
495 #if 0 492 #if 0
496 pr_debug("Allowing explict CDB: 0x%02x for %s" 493 pr_debug("Allowing explict CDB: 0x%02x for %s"
497 " reservation holder\n", cdb[0], 494 " reservation holder\n", cdb[0],
498 core_scsi3_pr_dump_type(pr_reg_type)); 495 core_scsi3_pr_dump_type(pr_reg_type));
499 #endif 496 #endif
500 return ret; 497 return ret;
501 } 498 }
502 /* 499 /*
503 * Check if write exclusive initiator ports *NOT* holding the 500 * Check if write exclusive initiator ports *NOT* holding the
504 * WRITE_EXCLUSIVE_* reservation. 501 * WRITE_EXCLUSIVE_* reservation.
505 */ 502 */
506 if ((we) && !(registered_nexus)) { 503 if ((we) && !(registered_nexus)) {
507 if (cmd->data_direction == DMA_TO_DEVICE) { 504 if (cmd->data_direction == DMA_TO_DEVICE) {
508 /* 505 /*
509 * Conflict for write exclusive 506 * Conflict for write exclusive
510 */ 507 */
511 pr_debug("%s Conflict for unregistered nexus" 508 pr_debug("%s Conflict for unregistered nexus"
512 " %s CDB: 0x%02x to %s reservation\n", 509 " %s CDB: 0x%02x to %s reservation\n",
513 transport_dump_cmd_direction(cmd), 510 transport_dump_cmd_direction(cmd),
514 se_sess->se_node_acl->initiatorname, cdb[0], 511 se_sess->se_node_acl->initiatorname, cdb[0],
515 core_scsi3_pr_dump_type(pr_reg_type)); 512 core_scsi3_pr_dump_type(pr_reg_type));
516 return 1; 513 return 1;
517 } else { 514 } else {
518 /* 515 /*
519 * Allow non WRITE CDBs for all Write Exclusive 516 * Allow non WRITE CDBs for all Write Exclusive
520 * PR TYPEs to pass for registered and 517 * PR TYPEs to pass for registered and
521 * non-registered_nexuxes NOT holding the reservation. 518 * non-registered_nexuxes NOT holding the reservation.
522 * 519 *
523 * We only make noise for the unregisterd nexuses, 520 * We only make noise for the unregisterd nexuses,
524 * as we expect registered non-reservation holding 521 * as we expect registered non-reservation holding
525 * nexuses to issue CDBs. 522 * nexuses to issue CDBs.
526 */ 523 */
527 #if 0 524 #if 0
528 if (!registered_nexus) { 525 if (!registered_nexus) {
529 pr_debug("Allowing implict CDB: 0x%02x" 526 pr_debug("Allowing implict CDB: 0x%02x"
530 " for %s reservation on unregistered" 527 " for %s reservation on unregistered"
531 " nexus\n", cdb[0], 528 " nexus\n", cdb[0],
532 core_scsi3_pr_dump_type(pr_reg_type)); 529 core_scsi3_pr_dump_type(pr_reg_type));
533 } 530 }
534 #endif 531 #endif
535 return 0; 532 return 0;
536 } 533 }
537 } else if ((reg_only) || (all_reg)) { 534 } else if ((reg_only) || (all_reg)) {
538 if (registered_nexus) { 535 if (registered_nexus) {
539 /* 536 /*
540 * For PR_*_REG_ONLY and PR_*_ALL_REG reservations, 537 * For PR_*_REG_ONLY and PR_*_ALL_REG reservations,
541 * allow commands from registered nexuses. 538 * allow commands from registered nexuses.
542 */ 539 */
543 #if 0 540 #if 0
544 pr_debug("Allowing implict CDB: 0x%02x for %s" 541 pr_debug("Allowing implict CDB: 0x%02x for %s"
545 " reservation\n", cdb[0], 542 " reservation\n", cdb[0],
546 core_scsi3_pr_dump_type(pr_reg_type)); 543 core_scsi3_pr_dump_type(pr_reg_type));
547 #endif 544 #endif
548 return 0; 545 return 0;
549 } 546 }
550 } 547 }
551 pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x" 548 pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x"
552 " for %s reservation\n", transport_dump_cmd_direction(cmd), 549 " for %s reservation\n", transport_dump_cmd_direction(cmd),
553 (registered_nexus) ? "" : "un", 550 (registered_nexus) ? "" : "un",
554 se_sess->se_node_acl->initiatorname, cdb[0], 551 se_sess->se_node_acl->initiatorname, cdb[0],
555 core_scsi3_pr_dump_type(pr_reg_type)); 552 core_scsi3_pr_dump_type(pr_reg_type));
556 553
557 return 1; /* Conflict by default */ 554 return 1; /* Conflict by default */
558 } 555 }
559 556
560 static u32 core_scsi3_pr_generation(struct se_device *dev) 557 static u32 core_scsi3_pr_generation(struct se_device *dev)
561 { 558 {
562 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 559 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
563 u32 prg; 560 u32 prg;
564 /* 561 /*
565 * PRGeneration field shall contain the value of a 32-bit wrapping 562 * PRGeneration field shall contain the value of a 32-bit wrapping
566 * counter mainted by the device server. 563 * counter mainted by the device server.
567 * 564 *
568 * Note that this is done regardless of Active Persist across 565 * Note that this is done regardless of Active Persist across
569 * Target PowerLoss (APTPL) 566 * Target PowerLoss (APTPL)
570 * 567 *
571 * See spc4r17 section 6.3.12 READ_KEYS service action 568 * See spc4r17 section 6.3.12 READ_KEYS service action
572 */ 569 */
573 spin_lock(&dev->dev_reservation_lock); 570 spin_lock(&dev->dev_reservation_lock);
574 prg = su_dev->t10_pr.pr_generation++; 571 prg = su_dev->t10_pr.pr_generation++;
575 spin_unlock(&dev->dev_reservation_lock); 572 spin_unlock(&dev->dev_reservation_lock);
576 573
577 return prg; 574 return prg;
578 } 575 }
579 576
580 static int core_scsi3_pr_reservation_check( 577 static int core_scsi3_pr_reservation_check(
581 struct se_cmd *cmd, 578 struct se_cmd *cmd,
582 u32 *pr_reg_type) 579 u32 *pr_reg_type)
583 { 580 {
584 struct se_device *dev = cmd->se_dev; 581 struct se_device *dev = cmd->se_dev;
585 struct se_session *sess = cmd->se_sess; 582 struct se_session *sess = cmd->se_sess;
586 int ret; 583 int ret;
587 584
588 if (!sess) 585 if (!sess)
589 return 0; 586 return 0;
590 /* 587 /*
591 * A legacy SPC-2 reservation is being held. 588 * A legacy SPC-2 reservation is being held.
592 */ 589 */
593 if (dev->dev_flags & DF_SPC2_RESERVATIONS) 590 if (dev->dev_flags & DF_SPC2_RESERVATIONS)
594 return core_scsi2_reservation_check(cmd, pr_reg_type); 591 return core_scsi2_reservation_check(cmd, pr_reg_type);
595 592
596 spin_lock(&dev->dev_reservation_lock); 593 spin_lock(&dev->dev_reservation_lock);
597 if (!dev->dev_pr_res_holder) { 594 if (!dev->dev_pr_res_holder) {
598 spin_unlock(&dev->dev_reservation_lock); 595 spin_unlock(&dev->dev_reservation_lock);
599 return 0; 596 return 0;
600 } 597 }
601 *pr_reg_type = dev->dev_pr_res_holder->pr_res_type; 598 *pr_reg_type = dev->dev_pr_res_holder->pr_res_type;
602 cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key; 599 cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key;
603 if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) { 600 if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) {
604 spin_unlock(&dev->dev_reservation_lock); 601 spin_unlock(&dev->dev_reservation_lock);
605 return -EINVAL; 602 return -EINVAL;
606 } 603 }
607 if (!dev->dev_pr_res_holder->isid_present_at_reg) { 604 if (!dev->dev_pr_res_holder->isid_present_at_reg) {
608 spin_unlock(&dev->dev_reservation_lock); 605 spin_unlock(&dev->dev_reservation_lock);
609 return 0; 606 return 0;
610 } 607 }
611 ret = (dev->dev_pr_res_holder->pr_reg_bin_isid == 608 ret = (dev->dev_pr_res_holder->pr_reg_bin_isid ==
612 sess->sess_bin_isid) ? 0 : -EINVAL; 609 sess->sess_bin_isid) ? 0 : -EINVAL;
613 /* 610 /*
614 * Use bit in *pr_reg_type to notify ISID mismatch in 611 * Use bit in *pr_reg_type to notify ISID mismatch in
615 * core_scsi3_pr_seq_non_holder(). 612 * core_scsi3_pr_seq_non_holder().
616 */ 613 */
617 if (ret != 0) 614 if (ret != 0)
618 *pr_reg_type |= 0x80000000; 615 *pr_reg_type |= 0x80000000;
619 spin_unlock(&dev->dev_reservation_lock); 616 spin_unlock(&dev->dev_reservation_lock);
620 617
621 return ret; 618 return ret;
622 } 619 }
623 620
624 static struct t10_pr_registration *__core_scsi3_do_alloc_registration( 621 static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
625 struct se_device *dev, 622 struct se_device *dev,
626 struct se_node_acl *nacl, 623 struct se_node_acl *nacl,
627 struct se_dev_entry *deve, 624 struct se_dev_entry *deve,
628 unsigned char *isid, 625 unsigned char *isid,
629 u64 sa_res_key, 626 u64 sa_res_key,
630 int all_tg_pt, 627 int all_tg_pt,
631 int aptpl) 628 int aptpl)
632 { 629 {
633 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 630 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
634 struct t10_pr_registration *pr_reg; 631 struct t10_pr_registration *pr_reg;
635 632
636 pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC); 633 pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC);
637 if (!pr_reg) { 634 if (!pr_reg) {
638 pr_err("Unable to allocate struct t10_pr_registration\n"); 635 pr_err("Unable to allocate struct t10_pr_registration\n");
639 return NULL; 636 return NULL;
640 } 637 }
641 638
642 pr_reg->pr_aptpl_buf = kzalloc(su_dev->t10_pr.pr_aptpl_buf_len, 639 pr_reg->pr_aptpl_buf = kzalloc(su_dev->t10_pr.pr_aptpl_buf_len,
643 GFP_ATOMIC); 640 GFP_ATOMIC);
644 if (!pr_reg->pr_aptpl_buf) { 641 if (!pr_reg->pr_aptpl_buf) {
645 pr_err("Unable to allocate pr_reg->pr_aptpl_buf\n"); 642 pr_err("Unable to allocate pr_reg->pr_aptpl_buf\n");
646 kmem_cache_free(t10_pr_reg_cache, pr_reg); 643 kmem_cache_free(t10_pr_reg_cache, pr_reg);
647 return NULL; 644 return NULL;
648 } 645 }
649 646
650 INIT_LIST_HEAD(&pr_reg->pr_reg_list); 647 INIT_LIST_HEAD(&pr_reg->pr_reg_list);
651 INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list); 648 INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
652 INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list); 649 INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list);
653 INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list); 650 INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list);
654 INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list); 651 INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
655 atomic_set(&pr_reg->pr_res_holders, 0); 652 atomic_set(&pr_reg->pr_res_holders, 0);
656 pr_reg->pr_reg_nacl = nacl; 653 pr_reg->pr_reg_nacl = nacl;
657 pr_reg->pr_reg_deve = deve; 654 pr_reg->pr_reg_deve = deve;
658 pr_reg->pr_res_mapped_lun = deve->mapped_lun; 655 pr_reg->pr_res_mapped_lun = deve->mapped_lun;
659 pr_reg->pr_aptpl_target_lun = deve->se_lun->unpacked_lun; 656 pr_reg->pr_aptpl_target_lun = deve->se_lun->unpacked_lun;
660 pr_reg->pr_res_key = sa_res_key; 657 pr_reg->pr_res_key = sa_res_key;
661 pr_reg->pr_reg_all_tg_pt = all_tg_pt; 658 pr_reg->pr_reg_all_tg_pt = all_tg_pt;
662 pr_reg->pr_reg_aptpl = aptpl; 659 pr_reg->pr_reg_aptpl = aptpl;
663 pr_reg->pr_reg_tg_pt_lun = deve->se_lun; 660 pr_reg->pr_reg_tg_pt_lun = deve->se_lun;
664 /* 661 /*
665 * If an ISID value for this SCSI Initiator Port exists, 662 * If an ISID value for this SCSI Initiator Port exists,
666 * save it to the registration now. 663 * save it to the registration now.
667 */ 664 */
668 if (isid != NULL) { 665 if (isid != NULL) {
669 pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid); 666 pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid);
670 snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid); 667 snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid);
671 pr_reg->isid_present_at_reg = 1; 668 pr_reg->isid_present_at_reg = 1;
672 } 669 }
673 670
674 return pr_reg; 671 return pr_reg;
675 } 672 }
676 673
677 static int core_scsi3_lunacl_depend_item(struct se_dev_entry *); 674 static int core_scsi3_lunacl_depend_item(struct se_dev_entry *);
678 static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *); 675 static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *);
679 676
680 /* 677 /*
681 * Function used for handling PR registrations for ALL_TG_PT=1 and ALL_TG_PT=0 678 * Function used for handling PR registrations for ALL_TG_PT=1 and ALL_TG_PT=0
682 * modes. 679 * modes.
683 */ 680 */
684 static struct t10_pr_registration *__core_scsi3_alloc_registration( 681 static struct t10_pr_registration *__core_scsi3_alloc_registration(
685 struct se_device *dev, 682 struct se_device *dev,
686 struct se_node_acl *nacl, 683 struct se_node_acl *nacl,
687 struct se_dev_entry *deve, 684 struct se_dev_entry *deve,
688 unsigned char *isid, 685 unsigned char *isid,
689 u64 sa_res_key, 686 u64 sa_res_key,
690 int all_tg_pt, 687 int all_tg_pt,
691 int aptpl) 688 int aptpl)
692 { 689 {
693 struct se_dev_entry *deve_tmp; 690 struct se_dev_entry *deve_tmp;
694 struct se_node_acl *nacl_tmp; 691 struct se_node_acl *nacl_tmp;
695 struct se_port *port, *port_tmp; 692 struct se_port *port, *port_tmp;
696 struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; 693 struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
697 struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe; 694 struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe;
698 int ret; 695 int ret;
699 /* 696 /*
700 * Create a registration for the I_T Nexus upon which the 697 * Create a registration for the I_T Nexus upon which the
701 * PROUT REGISTER was received. 698 * PROUT REGISTER was received.
702 */ 699 */
703 pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid, 700 pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid,
704 sa_res_key, all_tg_pt, aptpl); 701 sa_res_key, all_tg_pt, aptpl);
705 if (!pr_reg) 702 if (!pr_reg)
706 return NULL; 703 return NULL;
707 /* 704 /*
708 * Return pointer to pr_reg for ALL_TG_PT=0 705 * Return pointer to pr_reg for ALL_TG_PT=0
709 */ 706 */
710 if (!all_tg_pt) 707 if (!all_tg_pt)
711 return pr_reg; 708 return pr_reg;
712 /* 709 /*
713 * Create list of matching SCSI Initiator Port registrations 710 * Create list of matching SCSI Initiator Port registrations
714 * for ALL_TG_PT=1 711 * for ALL_TG_PT=1
715 */ 712 */
716 spin_lock(&dev->se_port_lock); 713 spin_lock(&dev->se_port_lock);
717 list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) { 714 list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {
718 atomic_inc(&port->sep_tg_pt_ref_cnt); 715 atomic_inc(&port->sep_tg_pt_ref_cnt);
719 smp_mb__after_atomic_inc(); 716 smp_mb__after_atomic_inc();
720 spin_unlock(&dev->se_port_lock); 717 spin_unlock(&dev->se_port_lock);
721 718
722 spin_lock_bh(&port->sep_alua_lock); 719 spin_lock_bh(&port->sep_alua_lock);
723 list_for_each_entry(deve_tmp, &port->sep_alua_list, 720 list_for_each_entry(deve_tmp, &port->sep_alua_list,
724 alua_port_list) { 721 alua_port_list) {
725 /* 722 /*
726 * This pointer will be NULL for demo mode MappedLUNs 723 * This pointer will be NULL for demo mode MappedLUNs
727 * that have not been make explict via a ConfigFS 724 * that have not been make explict via a ConfigFS
728 * MappedLUN group for the SCSI Initiator Node ACL. 725 * MappedLUN group for the SCSI Initiator Node ACL.
729 */ 726 */
730 if (!deve_tmp->se_lun_acl) 727 if (!deve_tmp->se_lun_acl)
731 continue; 728 continue;
732 729
733 nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl; 730 nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl;
734 /* 731 /*
735 * Skip the matching struct se_node_acl that is allocated 732 * Skip the matching struct se_node_acl that is allocated
736 * above.. 733 * above..
737 */ 734 */
738 if (nacl == nacl_tmp) 735 if (nacl == nacl_tmp)
739 continue; 736 continue;
740 /* 737 /*
741 * Only perform PR registrations for target ports on 738 * Only perform PR registrations for target ports on
742 * the same fabric module as the REGISTER w/ ALL_TG_PT=1 739 * the same fabric module as the REGISTER w/ ALL_TG_PT=1
743 * arrived. 740 * arrived.
744 */ 741 */
745 if (tfo != nacl_tmp->se_tpg->se_tpg_tfo) 742 if (tfo != nacl_tmp->se_tpg->se_tpg_tfo)
746 continue; 743 continue;
747 /* 744 /*
748 * Look for a matching Initiator Node ACL in ASCII format 745 * Look for a matching Initiator Node ACL in ASCII format
749 */ 746 */
750 if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname)) 747 if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname))
751 continue; 748 continue;
752 749
753 atomic_inc(&deve_tmp->pr_ref_count); 750 atomic_inc(&deve_tmp->pr_ref_count);
754 smp_mb__after_atomic_inc(); 751 smp_mb__after_atomic_inc();
755 spin_unlock_bh(&port->sep_alua_lock); 752 spin_unlock_bh(&port->sep_alua_lock);
756 /* 753 /*
757 * Grab a configfs group dependency that is released 754 * Grab a configfs group dependency that is released
758 * for the exception path at label out: below, or upon 755 * for the exception path at label out: below, or upon
759 * completion of adding ALL_TG_PT=1 registrations in 756 * completion of adding ALL_TG_PT=1 registrations in
760 * __core_scsi3_add_registration() 757 * __core_scsi3_add_registration()
761 */ 758 */
762 ret = core_scsi3_lunacl_depend_item(deve_tmp); 759 ret = core_scsi3_lunacl_depend_item(deve_tmp);
763 if (ret < 0) { 760 if (ret < 0) {
764 pr_err("core_scsi3_lunacl_depend" 761 pr_err("core_scsi3_lunacl_depend"
765 "_item() failed\n"); 762 "_item() failed\n");
766 atomic_dec(&port->sep_tg_pt_ref_cnt); 763 atomic_dec(&port->sep_tg_pt_ref_cnt);
767 smp_mb__after_atomic_dec(); 764 smp_mb__after_atomic_dec();
768 atomic_dec(&deve_tmp->pr_ref_count); 765 atomic_dec(&deve_tmp->pr_ref_count);
769 smp_mb__after_atomic_dec(); 766 smp_mb__after_atomic_dec();
770 goto out; 767 goto out;
771 } 768 }
772 /* 769 /*
773 * Located a matching SCSI Initiator Port on a different 770 * Located a matching SCSI Initiator Port on a different
774 * port, allocate the pr_reg_atp and attach it to the 771 * port, allocate the pr_reg_atp and attach it to the
775 * pr_reg->pr_reg_atp_list that will be processed once 772 * pr_reg->pr_reg_atp_list that will be processed once
776 * the original *pr_reg is processed in 773 * the original *pr_reg is processed in
777 * __core_scsi3_add_registration() 774 * __core_scsi3_add_registration()
778 */ 775 */
779 pr_reg_atp = __core_scsi3_do_alloc_registration(dev, 776 pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
780 nacl_tmp, deve_tmp, NULL, 777 nacl_tmp, deve_tmp, NULL,
781 sa_res_key, all_tg_pt, aptpl); 778 sa_res_key, all_tg_pt, aptpl);
782 if (!pr_reg_atp) { 779 if (!pr_reg_atp) {
783 atomic_dec(&port->sep_tg_pt_ref_cnt); 780 atomic_dec(&port->sep_tg_pt_ref_cnt);
784 smp_mb__after_atomic_dec(); 781 smp_mb__after_atomic_dec();
785 atomic_dec(&deve_tmp->pr_ref_count); 782 atomic_dec(&deve_tmp->pr_ref_count);
786 smp_mb__after_atomic_dec(); 783 smp_mb__after_atomic_dec();
787 core_scsi3_lunacl_undepend_item(deve_tmp); 784 core_scsi3_lunacl_undepend_item(deve_tmp);
788 goto out; 785 goto out;
789 } 786 }
790 787
791 list_add_tail(&pr_reg_atp->pr_reg_atp_mem_list, 788 list_add_tail(&pr_reg_atp->pr_reg_atp_mem_list,
792 &pr_reg->pr_reg_atp_list); 789 &pr_reg->pr_reg_atp_list);
793 spin_lock_bh(&port->sep_alua_lock); 790 spin_lock_bh(&port->sep_alua_lock);
794 } 791 }
795 spin_unlock_bh(&port->sep_alua_lock); 792 spin_unlock_bh(&port->sep_alua_lock);
796 793
797 spin_lock(&dev->se_port_lock); 794 spin_lock(&dev->se_port_lock);
798 atomic_dec(&port->sep_tg_pt_ref_cnt); 795 atomic_dec(&port->sep_tg_pt_ref_cnt);
799 smp_mb__after_atomic_dec(); 796 smp_mb__after_atomic_dec();
800 } 797 }
801 spin_unlock(&dev->se_port_lock); 798 spin_unlock(&dev->se_port_lock);
802 799
803 return pr_reg; 800 return pr_reg;
804 out: 801 out:
805 list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe, 802 list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
806 &pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) { 803 &pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
807 list_del(&pr_reg_tmp->pr_reg_atp_mem_list); 804 list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
808 core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve); 805 core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
809 kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp); 806 kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp);
810 } 807 }
811 kmem_cache_free(t10_pr_reg_cache, pr_reg); 808 kmem_cache_free(t10_pr_reg_cache, pr_reg);
812 return NULL; 809 return NULL;
813 } 810 }
814 811
815 int core_scsi3_alloc_aptpl_registration( 812 int core_scsi3_alloc_aptpl_registration(
816 struct t10_reservation *pr_tmpl, 813 struct t10_reservation *pr_tmpl,
817 u64 sa_res_key, 814 u64 sa_res_key,
818 unsigned char *i_port, 815 unsigned char *i_port,
819 unsigned char *isid, 816 unsigned char *isid,
820 u32 mapped_lun, 817 u32 mapped_lun,
821 unsigned char *t_port, 818 unsigned char *t_port,
822 u16 tpgt, 819 u16 tpgt,
823 u32 target_lun, 820 u32 target_lun,
824 int res_holder, 821 int res_holder,
825 int all_tg_pt, 822 int all_tg_pt,
826 u8 type) 823 u8 type)
827 { 824 {
828 struct t10_pr_registration *pr_reg; 825 struct t10_pr_registration *pr_reg;
829 826
830 if (!i_port || !t_port || !sa_res_key) { 827 if (!i_port || !t_port || !sa_res_key) {
831 pr_err("Illegal parameters for APTPL registration\n"); 828 pr_err("Illegal parameters for APTPL registration\n");
832 return -EINVAL; 829 return -EINVAL;
833 } 830 }
834 831
835 pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL); 832 pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL);
836 if (!pr_reg) { 833 if (!pr_reg) {
837 pr_err("Unable to allocate struct t10_pr_registration\n"); 834 pr_err("Unable to allocate struct t10_pr_registration\n");
838 return -ENOMEM; 835 return -ENOMEM;
839 } 836 }
840 pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL); 837 pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL);
841 838
842 INIT_LIST_HEAD(&pr_reg->pr_reg_list); 839 INIT_LIST_HEAD(&pr_reg->pr_reg_list);
843 INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list); 840 INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
844 INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list); 841 INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list);
845 INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list); 842 INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list);
846 INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list); 843 INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
847 atomic_set(&pr_reg->pr_res_holders, 0); 844 atomic_set(&pr_reg->pr_res_holders, 0);
848 pr_reg->pr_reg_nacl = NULL; 845 pr_reg->pr_reg_nacl = NULL;
849 pr_reg->pr_reg_deve = NULL; 846 pr_reg->pr_reg_deve = NULL;
850 pr_reg->pr_res_mapped_lun = mapped_lun; 847 pr_reg->pr_res_mapped_lun = mapped_lun;
851 pr_reg->pr_aptpl_target_lun = target_lun; 848 pr_reg->pr_aptpl_target_lun = target_lun;
852 pr_reg->pr_res_key = sa_res_key; 849 pr_reg->pr_res_key = sa_res_key;
853 pr_reg->pr_reg_all_tg_pt = all_tg_pt; 850 pr_reg->pr_reg_all_tg_pt = all_tg_pt;
854 pr_reg->pr_reg_aptpl = 1; 851 pr_reg->pr_reg_aptpl = 1;
855 pr_reg->pr_reg_tg_pt_lun = NULL; 852 pr_reg->pr_reg_tg_pt_lun = NULL;
856 pr_reg->pr_res_scope = 0; /* Always LUN_SCOPE */ 853 pr_reg->pr_res_scope = 0; /* Always LUN_SCOPE */
857 pr_reg->pr_res_type = type; 854 pr_reg->pr_res_type = type;
858 /* 855 /*
859 * If an ISID value had been saved in APTPL metadata for this 856 * If an ISID value had been saved in APTPL metadata for this
860 * SCSI Initiator Port, restore it now. 857 * SCSI Initiator Port, restore it now.
861 */ 858 */
862 if (isid != NULL) { 859 if (isid != NULL) {
863 pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid); 860 pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid);
864 snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid); 861 snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid);
865 pr_reg->isid_present_at_reg = 1; 862 pr_reg->isid_present_at_reg = 1;
866 } 863 }
867 /* 864 /*
868 * Copy the i_port and t_port information from caller. 865 * Copy the i_port and t_port information from caller.
869 */ 866 */
870 snprintf(pr_reg->pr_iport, PR_APTPL_MAX_IPORT_LEN, "%s", i_port); 867 snprintf(pr_reg->pr_iport, PR_APTPL_MAX_IPORT_LEN, "%s", i_port);
871 snprintf(pr_reg->pr_tport, PR_APTPL_MAX_TPORT_LEN, "%s", t_port); 868 snprintf(pr_reg->pr_tport, PR_APTPL_MAX_TPORT_LEN, "%s", t_port);
872 pr_reg->pr_reg_tpgt = tpgt; 869 pr_reg->pr_reg_tpgt = tpgt;
873 /* 870 /*
874 * Set pr_res_holder from caller, the pr_reg who is the reservation 871 * Set pr_res_holder from caller, the pr_reg who is the reservation
875 * holder will get it's pointer set in core_scsi3_aptpl_reserve() once 872 * holder will get it's pointer set in core_scsi3_aptpl_reserve() once
876 * the Initiator Node LUN ACL from the fabric module is created for 873 * the Initiator Node LUN ACL from the fabric module is created for
877 * this registration. 874 * this registration.
878 */ 875 */
879 pr_reg->pr_res_holder = res_holder; 876 pr_reg->pr_res_holder = res_holder;
880 877
881 list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list); 878 list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list);
882 pr_debug("SPC-3 PR APTPL Successfully added registration%s from" 879 pr_debug("SPC-3 PR APTPL Successfully added registration%s from"
883 " metadata\n", (res_holder) ? "+reservation" : ""); 880 " metadata\n", (res_holder) ? "+reservation" : "");
884 return 0; 881 return 0;
885 } 882 }
886 883
887 static void core_scsi3_aptpl_reserve( 884 static void core_scsi3_aptpl_reserve(
888 struct se_device *dev, 885 struct se_device *dev,
889 struct se_portal_group *tpg, 886 struct se_portal_group *tpg,
890 struct se_node_acl *node_acl, 887 struct se_node_acl *node_acl,
891 struct t10_pr_registration *pr_reg) 888 struct t10_pr_registration *pr_reg)
892 { 889 {
893 char i_buf[PR_REG_ISID_ID_LEN]; 890 char i_buf[PR_REG_ISID_ID_LEN];
894 int prf_isid; 891 int prf_isid;
895 892
896 memset(i_buf, 0, PR_REG_ISID_ID_LEN); 893 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
897 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], 894 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
898 PR_REG_ISID_ID_LEN); 895 PR_REG_ISID_ID_LEN);
899 896
900 spin_lock(&dev->dev_reservation_lock); 897 spin_lock(&dev->dev_reservation_lock);
901 dev->dev_pr_res_holder = pr_reg; 898 dev->dev_pr_res_holder = pr_reg;
902 spin_unlock(&dev->dev_reservation_lock); 899 spin_unlock(&dev->dev_reservation_lock);
903 900
904 pr_debug("SPC-3 PR [%s] Service Action: APTPL RESERVE created" 901 pr_debug("SPC-3 PR [%s] Service Action: APTPL RESERVE created"
905 " new reservation holder TYPE: %s ALL_TG_PT: %d\n", 902 " new reservation holder TYPE: %s ALL_TG_PT: %d\n",
906 tpg->se_tpg_tfo->get_fabric_name(), 903 tpg->se_tpg_tfo->get_fabric_name(),
907 core_scsi3_pr_dump_type(pr_reg->pr_res_type), 904 core_scsi3_pr_dump_type(pr_reg->pr_res_type),
908 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 905 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
909 pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n", 906 pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
910 tpg->se_tpg_tfo->get_fabric_name(), node_acl->initiatorname, 907 tpg->se_tpg_tfo->get_fabric_name(), node_acl->initiatorname,
911 (prf_isid) ? &i_buf[0] : ""); 908 (prf_isid) ? &i_buf[0] : "");
912 } 909 }
913 910
914 static void __core_scsi3_add_registration(struct se_device *, struct se_node_acl *, 911 static void __core_scsi3_add_registration(struct se_device *, struct se_node_acl *,
915 struct t10_pr_registration *, int, int); 912 struct t10_pr_registration *, int, int);
916 913
917 static int __core_scsi3_check_aptpl_registration( 914 static int __core_scsi3_check_aptpl_registration(
918 struct se_device *dev, 915 struct se_device *dev,
919 struct se_portal_group *tpg, 916 struct se_portal_group *tpg,
920 struct se_lun *lun, 917 struct se_lun *lun,
921 u32 target_lun, 918 u32 target_lun,
922 struct se_node_acl *nacl, 919 struct se_node_acl *nacl,
923 struct se_dev_entry *deve) 920 struct se_dev_entry *deve)
924 { 921 {
925 struct t10_pr_registration *pr_reg, *pr_reg_tmp; 922 struct t10_pr_registration *pr_reg, *pr_reg_tmp;
926 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 923 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
927 unsigned char i_port[PR_APTPL_MAX_IPORT_LEN]; 924 unsigned char i_port[PR_APTPL_MAX_IPORT_LEN];
928 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN]; 925 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
929 u16 tpgt; 926 u16 tpgt;
930 927
931 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN); 928 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
932 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN); 929 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
933 /* 930 /*
934 * Copy Initiator Port information from struct se_node_acl 931 * Copy Initiator Port information from struct se_node_acl
935 */ 932 */
936 snprintf(i_port, PR_APTPL_MAX_IPORT_LEN, "%s", nacl->initiatorname); 933 snprintf(i_port, PR_APTPL_MAX_IPORT_LEN, "%s", nacl->initiatorname);
937 snprintf(t_port, PR_APTPL_MAX_TPORT_LEN, "%s", 934 snprintf(t_port, PR_APTPL_MAX_TPORT_LEN, "%s",
938 tpg->se_tpg_tfo->tpg_get_wwn(tpg)); 935 tpg->se_tpg_tfo->tpg_get_wwn(tpg));
939 tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg); 936 tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
940 /* 937 /*
941 * Look for the matching registrations+reservation from those 938 * Look for the matching registrations+reservation from those
942 * created from APTPL metadata. Note that multiple registrations 939 * created from APTPL metadata. Note that multiple registrations
943 * may exist for fabrics that use ISIDs in their SCSI Initiator Port 940 * may exist for fabrics that use ISIDs in their SCSI Initiator Port
944 * TransportIDs. 941 * TransportIDs.
945 */ 942 */
946 spin_lock(&pr_tmpl->aptpl_reg_lock); 943 spin_lock(&pr_tmpl->aptpl_reg_lock);
947 list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list, 944 list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
948 pr_reg_aptpl_list) { 945 pr_reg_aptpl_list) {
949 if (!strcmp(pr_reg->pr_iport, i_port) && 946 if (!strcmp(pr_reg->pr_iport, i_port) &&
950 (pr_reg->pr_res_mapped_lun == deve->mapped_lun) && 947 (pr_reg->pr_res_mapped_lun == deve->mapped_lun) &&
951 !(strcmp(pr_reg->pr_tport, t_port)) && 948 !(strcmp(pr_reg->pr_tport, t_port)) &&
952 (pr_reg->pr_reg_tpgt == tpgt) && 949 (pr_reg->pr_reg_tpgt == tpgt) &&
953 (pr_reg->pr_aptpl_target_lun == target_lun)) { 950 (pr_reg->pr_aptpl_target_lun == target_lun)) {
954 951
955 pr_reg->pr_reg_nacl = nacl; 952 pr_reg->pr_reg_nacl = nacl;
956 pr_reg->pr_reg_deve = deve; 953 pr_reg->pr_reg_deve = deve;
957 pr_reg->pr_reg_tg_pt_lun = lun; 954 pr_reg->pr_reg_tg_pt_lun = lun;
958 955
959 list_del(&pr_reg->pr_reg_aptpl_list); 956 list_del(&pr_reg->pr_reg_aptpl_list);
960 spin_unlock(&pr_tmpl->aptpl_reg_lock); 957 spin_unlock(&pr_tmpl->aptpl_reg_lock);
961 /* 958 /*
962 * At this point all of the pointers in *pr_reg will 959 * At this point all of the pointers in *pr_reg will
963 * be setup, so go ahead and add the registration. 960 * be setup, so go ahead and add the registration.
964 */ 961 */
965 962
966 __core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0); 963 __core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0);
967 /* 964 /*
968 * If this registration is the reservation holder, 965 * If this registration is the reservation holder,
969 * make that happen now.. 966 * make that happen now..
970 */ 967 */
971 if (pr_reg->pr_res_holder) 968 if (pr_reg->pr_res_holder)
972 core_scsi3_aptpl_reserve(dev, tpg, 969 core_scsi3_aptpl_reserve(dev, tpg,
973 nacl, pr_reg); 970 nacl, pr_reg);
974 /* 971 /*
975 * Reenable pr_aptpl_active to accept new metadata 972 * Reenable pr_aptpl_active to accept new metadata
976 * updates once the SCSI device is active again.. 973 * updates once the SCSI device is active again..
977 */ 974 */
978 spin_lock(&pr_tmpl->aptpl_reg_lock); 975 spin_lock(&pr_tmpl->aptpl_reg_lock);
979 pr_tmpl->pr_aptpl_active = 1; 976 pr_tmpl->pr_aptpl_active = 1;
980 } 977 }
981 } 978 }
982 spin_unlock(&pr_tmpl->aptpl_reg_lock); 979 spin_unlock(&pr_tmpl->aptpl_reg_lock);
983 980
984 return 0; 981 return 0;
985 } 982 }
986 983
987 int core_scsi3_check_aptpl_registration( 984 int core_scsi3_check_aptpl_registration(
988 struct se_device *dev, 985 struct se_device *dev,
989 struct se_portal_group *tpg, 986 struct se_portal_group *tpg,
990 struct se_lun *lun, 987 struct se_lun *lun,
991 struct se_lun_acl *lun_acl) 988 struct se_lun_acl *lun_acl)
992 { 989 {
993 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 990 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
994 struct se_node_acl *nacl = lun_acl->se_lun_nacl; 991 struct se_node_acl *nacl = lun_acl->se_lun_nacl;
995 struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun]; 992 struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun];
996 993
997 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 994 if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
998 return 0; 995 return 0;
999 996
1000 return __core_scsi3_check_aptpl_registration(dev, tpg, lun, 997 return __core_scsi3_check_aptpl_registration(dev, tpg, lun,
1001 lun->unpacked_lun, nacl, deve); 998 lun->unpacked_lun, nacl, deve);
1002 } 999 }
1003 1000
1004 static void __core_scsi3_dump_registration( 1001 static void __core_scsi3_dump_registration(
1005 struct target_core_fabric_ops *tfo, 1002 struct target_core_fabric_ops *tfo,
1006 struct se_device *dev, 1003 struct se_device *dev,
1007 struct se_node_acl *nacl, 1004 struct se_node_acl *nacl,
1008 struct t10_pr_registration *pr_reg, 1005 struct t10_pr_registration *pr_reg,
1009 int register_type) 1006 int register_type)
1010 { 1007 {
1011 struct se_portal_group *se_tpg = nacl->se_tpg; 1008 struct se_portal_group *se_tpg = nacl->se_tpg;
1012 char i_buf[PR_REG_ISID_ID_LEN]; 1009 char i_buf[PR_REG_ISID_ID_LEN];
1013 int prf_isid; 1010 int prf_isid;
1014 1011
1015 memset(&i_buf[0], 0, PR_REG_ISID_ID_LEN); 1012 memset(&i_buf[0], 0, PR_REG_ISID_ID_LEN);
1016 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], 1013 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
1017 PR_REG_ISID_ID_LEN); 1014 PR_REG_ISID_ID_LEN);
1018 1015
1019 pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator" 1016 pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
1020 " Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ? 1017 " Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ?
1021 "_AND_MOVE" : (register_type == 1) ? 1018 "_AND_MOVE" : (register_type == 1) ?
1022 "_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname, 1019 "_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname,
1023 (prf_isid) ? i_buf : ""); 1020 (prf_isid) ? i_buf : "");
1024 pr_debug("SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n", 1021 pr_debug("SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
1025 tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg), 1022 tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg),
1026 tfo->tpg_get_tag(se_tpg)); 1023 tfo->tpg_get_tag(se_tpg));
1027 pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" 1024 pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
1028 " Port(s)\n", tfo->get_fabric_name(), 1025 " Port(s)\n", tfo->get_fabric_name(),
1029 (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", 1026 (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
1030 dev->transport->name); 1027 dev->transport->name);
1031 pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" 1028 pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
1032 " 0x%08x APTPL: %d\n", tfo->get_fabric_name(), 1029 " 0x%08x APTPL: %d\n", tfo->get_fabric_name(),
1033 pr_reg->pr_res_key, pr_reg->pr_res_generation, 1030 pr_reg->pr_res_key, pr_reg->pr_res_generation,
1034 pr_reg->pr_reg_aptpl); 1031 pr_reg->pr_reg_aptpl);
1035 } 1032 }
1036 1033
1037 /* 1034 /*
1038 * this function can be called with struct se_device->dev_reservation_lock 1035 * this function can be called with struct se_device->dev_reservation_lock
1039 * when register_move = 1 1036 * when register_move = 1
1040 */ 1037 */
1041 static void __core_scsi3_add_registration( 1038 static void __core_scsi3_add_registration(
1042 struct se_device *dev, 1039 struct se_device *dev,
1043 struct se_node_acl *nacl, 1040 struct se_node_acl *nacl,
1044 struct t10_pr_registration *pr_reg, 1041 struct t10_pr_registration *pr_reg,
1045 int register_type, 1042 int register_type,
1046 int register_move) 1043 int register_move)
1047 { 1044 {
1048 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 1045 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
1049 struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; 1046 struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
1050 struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; 1047 struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
1051 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 1048 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
1052 1049
1053 /* 1050 /*
1054 * Increment PRgeneration counter for struct se_device upon a successful 1051 * Increment PRgeneration counter for struct se_device upon a successful
1055 * REGISTER, see spc4r17 section 6.3.2 READ_KEYS service action 1052 * REGISTER, see spc4r17 section 6.3.2 READ_KEYS service action
1056 * 1053 *
1057 * Also, when register_move = 1 for PROUT REGISTER_AND_MOVE service 1054 * Also, when register_move = 1 for PROUT REGISTER_AND_MOVE service
1058 * action, the struct se_device->dev_reservation_lock will already be held, 1055 * action, the struct se_device->dev_reservation_lock will already be held,
1059 * so we do not call core_scsi3_pr_generation() which grabs the lock 1056 * so we do not call core_scsi3_pr_generation() which grabs the lock
1060 * for the REGISTER. 1057 * for the REGISTER.
1061 */ 1058 */
1062 pr_reg->pr_res_generation = (register_move) ? 1059 pr_reg->pr_res_generation = (register_move) ?
1063 su_dev->t10_pr.pr_generation++ : 1060 su_dev->t10_pr.pr_generation++ :
1064 core_scsi3_pr_generation(dev); 1061 core_scsi3_pr_generation(dev);
1065 1062
1066 spin_lock(&pr_tmpl->registration_lock); 1063 spin_lock(&pr_tmpl->registration_lock);
1067 list_add_tail(&pr_reg->pr_reg_list, &pr_tmpl->registration_list); 1064 list_add_tail(&pr_reg->pr_reg_list, &pr_tmpl->registration_list);
1068 pr_reg->pr_reg_deve->def_pr_registered = 1; 1065 pr_reg->pr_reg_deve->def_pr_registered = 1;
1069 1066
1070 __core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type); 1067 __core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type);
1071 spin_unlock(&pr_tmpl->registration_lock); 1068 spin_unlock(&pr_tmpl->registration_lock);
1072 /* 1069 /*
1073 * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE. 1070 * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
1074 */ 1071 */
1075 if (!pr_reg->pr_reg_all_tg_pt || register_move) 1072 if (!pr_reg->pr_reg_all_tg_pt || register_move)
1076 return; 1073 return;
1077 /* 1074 /*
1078 * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1 1075 * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1
1079 * allocated in __core_scsi3_alloc_registration() 1076 * allocated in __core_scsi3_alloc_registration()
1080 */ 1077 */
1081 list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe, 1078 list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
1082 &pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) { 1079 &pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
1083 list_del(&pr_reg_tmp->pr_reg_atp_mem_list); 1080 list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
1084 1081
1085 pr_reg_tmp->pr_res_generation = core_scsi3_pr_generation(dev); 1082 pr_reg_tmp->pr_res_generation = core_scsi3_pr_generation(dev);
1086 1083
1087 spin_lock(&pr_tmpl->registration_lock); 1084 spin_lock(&pr_tmpl->registration_lock);
1088 list_add_tail(&pr_reg_tmp->pr_reg_list, 1085 list_add_tail(&pr_reg_tmp->pr_reg_list,
1089 &pr_tmpl->registration_list); 1086 &pr_tmpl->registration_list);
1090 pr_reg_tmp->pr_reg_deve->def_pr_registered = 1; 1087 pr_reg_tmp->pr_reg_deve->def_pr_registered = 1;
1091 1088
1092 __core_scsi3_dump_registration(tfo, dev, 1089 __core_scsi3_dump_registration(tfo, dev,
1093 pr_reg_tmp->pr_reg_nacl, pr_reg_tmp, 1090 pr_reg_tmp->pr_reg_nacl, pr_reg_tmp,
1094 register_type); 1091 register_type);
1095 spin_unlock(&pr_tmpl->registration_lock); 1092 spin_unlock(&pr_tmpl->registration_lock);
1096 /* 1093 /*
1097 * Drop configfs group dependency reference from 1094 * Drop configfs group dependency reference from
1098 * __core_scsi3_alloc_registration() 1095 * __core_scsi3_alloc_registration()
1099 */ 1096 */
1100 core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve); 1097 core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
1101 } 1098 }
1102 } 1099 }
1103 1100
1104 static int core_scsi3_alloc_registration( 1101 static int core_scsi3_alloc_registration(
1105 struct se_device *dev, 1102 struct se_device *dev,
1106 struct se_node_acl *nacl, 1103 struct se_node_acl *nacl,
1107 struct se_dev_entry *deve, 1104 struct se_dev_entry *deve,
1108 unsigned char *isid, 1105 unsigned char *isid,
1109 u64 sa_res_key, 1106 u64 sa_res_key,
1110 int all_tg_pt, 1107 int all_tg_pt,
1111 int aptpl, 1108 int aptpl,
1112 int register_type, 1109 int register_type,
1113 int register_move) 1110 int register_move)
1114 { 1111 {
1115 struct t10_pr_registration *pr_reg; 1112 struct t10_pr_registration *pr_reg;
1116 1113
1117 pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid, 1114 pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid,
1118 sa_res_key, all_tg_pt, aptpl); 1115 sa_res_key, all_tg_pt, aptpl);
1119 if (!pr_reg) 1116 if (!pr_reg)
1120 return -EPERM; 1117 return -EPERM;
1121 1118
1122 __core_scsi3_add_registration(dev, nacl, pr_reg, 1119 __core_scsi3_add_registration(dev, nacl, pr_reg,
1123 register_type, register_move); 1120 register_type, register_move);
1124 return 0; 1121 return 0;
1125 } 1122 }
1126 1123
1127 static struct t10_pr_registration *__core_scsi3_locate_pr_reg( 1124 static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
1128 struct se_device *dev, 1125 struct se_device *dev,
1129 struct se_node_acl *nacl, 1126 struct se_node_acl *nacl,
1130 unsigned char *isid) 1127 unsigned char *isid)
1131 { 1128 {
1132 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 1129 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
1133 struct t10_pr_registration *pr_reg, *pr_reg_tmp; 1130 struct t10_pr_registration *pr_reg, *pr_reg_tmp;
1134 struct se_portal_group *tpg; 1131 struct se_portal_group *tpg;
1135 1132
1136 spin_lock(&pr_tmpl->registration_lock); 1133 spin_lock(&pr_tmpl->registration_lock);
1137 list_for_each_entry_safe(pr_reg, pr_reg_tmp, 1134 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
1138 &pr_tmpl->registration_list, pr_reg_list) { 1135 &pr_tmpl->registration_list, pr_reg_list) {
1139 /* 1136 /*
1140 * First look for a matching struct se_node_acl 1137 * First look for a matching struct se_node_acl
1141 */ 1138 */
1142 if (pr_reg->pr_reg_nacl != nacl) 1139 if (pr_reg->pr_reg_nacl != nacl)
1143 continue; 1140 continue;
1144 1141
1145 tpg = pr_reg->pr_reg_nacl->se_tpg; 1142 tpg = pr_reg->pr_reg_nacl->se_tpg;
1146 /* 1143 /*
1147 * If this registration does NOT contain a fabric provided 1144 * If this registration does NOT contain a fabric provided
1148 * ISID, then we have found a match. 1145 * ISID, then we have found a match.
1149 */ 1146 */
1150 if (!pr_reg->isid_present_at_reg) { 1147 if (!pr_reg->isid_present_at_reg) {
1151 /* 1148 /*
1152 * Determine if this SCSI device server requires that 1149 * Determine if this SCSI device server requires that
1153 * SCSI Intiatior TransportID w/ ISIDs is enforced 1150 * SCSI Intiatior TransportID w/ ISIDs is enforced
1154 * for fabric modules (iSCSI) requiring them. 1151 * for fabric modules (iSCSI) requiring them.
1155 */ 1152 */
1156 if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 1153 if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
1157 if (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) 1154 if (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids)
1158 continue; 1155 continue;
1159 } 1156 }
1160 atomic_inc(&pr_reg->pr_res_holders); 1157 atomic_inc(&pr_reg->pr_res_holders);
1161 smp_mb__after_atomic_inc(); 1158 smp_mb__after_atomic_inc();
1162 spin_unlock(&pr_tmpl->registration_lock); 1159 spin_unlock(&pr_tmpl->registration_lock);
1163 return pr_reg; 1160 return pr_reg;
1164 } 1161 }
1165 /* 1162 /*
1166 * If the *pr_reg contains a fabric defined ISID for multi-value 1163 * If the *pr_reg contains a fabric defined ISID for multi-value
1167 * SCSI Initiator Port TransportIDs, then we expect a valid 1164 * SCSI Initiator Port TransportIDs, then we expect a valid
1168 * matching ISID to be provided by the local SCSI Initiator Port. 1165 * matching ISID to be provided by the local SCSI Initiator Port.
1169 */ 1166 */
1170 if (!isid) 1167 if (!isid)
1171 continue; 1168 continue;
1172 if (strcmp(isid, pr_reg->pr_reg_isid)) 1169 if (strcmp(isid, pr_reg->pr_reg_isid))
1173 continue; 1170 continue;
1174 1171
1175 atomic_inc(&pr_reg->pr_res_holders); 1172 atomic_inc(&pr_reg->pr_res_holders);
1176 smp_mb__after_atomic_inc(); 1173 smp_mb__after_atomic_inc();
1177 spin_unlock(&pr_tmpl->registration_lock); 1174 spin_unlock(&pr_tmpl->registration_lock);
1178 return pr_reg; 1175 return pr_reg;
1179 } 1176 }
1180 spin_unlock(&pr_tmpl->registration_lock); 1177 spin_unlock(&pr_tmpl->registration_lock);
1181 1178
1182 return NULL; 1179 return NULL;
1183 } 1180 }
1184 1181
1185 static struct t10_pr_registration *core_scsi3_locate_pr_reg( 1182 static struct t10_pr_registration *core_scsi3_locate_pr_reg(
1186 struct se_device *dev, 1183 struct se_device *dev,
1187 struct se_node_acl *nacl, 1184 struct se_node_acl *nacl,
1188 struct se_session *sess) 1185 struct se_session *sess)
1189 { 1186 {
1190 struct se_portal_group *tpg = nacl->se_tpg; 1187 struct se_portal_group *tpg = nacl->se_tpg;
1191 unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL; 1188 unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
1192 1189
1193 if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 1190 if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
1194 memset(&buf[0], 0, PR_REG_ISID_LEN); 1191 memset(&buf[0], 0, PR_REG_ISID_LEN);
1195 tpg->se_tpg_tfo->sess_get_initiator_sid(sess, &buf[0], 1192 tpg->se_tpg_tfo->sess_get_initiator_sid(sess, &buf[0],
1196 PR_REG_ISID_LEN); 1193 PR_REG_ISID_LEN);
1197 isid_ptr = &buf[0]; 1194 isid_ptr = &buf[0];
1198 } 1195 }
1199 1196
1200 return __core_scsi3_locate_pr_reg(dev, nacl, isid_ptr); 1197 return __core_scsi3_locate_pr_reg(dev, nacl, isid_ptr);
1201 } 1198 }
1202 1199
1203 static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg) 1200 static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
1204 { 1201 {
1205 atomic_dec(&pr_reg->pr_res_holders); 1202 atomic_dec(&pr_reg->pr_res_holders);
1206 smp_mb__after_atomic_dec(); 1203 smp_mb__after_atomic_dec();
1207 } 1204 }
1208 1205
1209 static int core_scsi3_check_implict_release( 1206 static int core_scsi3_check_implict_release(
1210 struct se_device *dev, 1207 struct se_device *dev,
1211 struct t10_pr_registration *pr_reg) 1208 struct t10_pr_registration *pr_reg)
1212 { 1209 {
1213 struct se_node_acl *nacl = pr_reg->pr_reg_nacl; 1210 struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
1214 struct t10_pr_registration *pr_res_holder; 1211 struct t10_pr_registration *pr_res_holder;
1215 int ret = 0; 1212 int ret = 0;
1216 1213
1217 spin_lock(&dev->dev_reservation_lock); 1214 spin_lock(&dev->dev_reservation_lock);
1218 pr_res_holder = dev->dev_pr_res_holder; 1215 pr_res_holder = dev->dev_pr_res_holder;
1219 if (!pr_res_holder) { 1216 if (!pr_res_holder) {
1220 spin_unlock(&dev->dev_reservation_lock); 1217 spin_unlock(&dev->dev_reservation_lock);
1221 return ret; 1218 return ret;
1222 } 1219 }
1223 if (pr_res_holder == pr_reg) { 1220 if (pr_res_holder == pr_reg) {
1224 /* 1221 /*
1225 * Perform an implict RELEASE if the registration that 1222 * Perform an implict RELEASE if the registration that
1226 * is being released is holding the reservation. 1223 * is being released is holding the reservation.
1227 * 1224 *
1228 * From spc4r17, section 5.7.11.1: 1225 * From spc4r17, section 5.7.11.1:
1229 * 1226 *
1230 * e) If the I_T nexus is the persistent reservation holder 1227 * e) If the I_T nexus is the persistent reservation holder
1231 * and the persistent reservation is not an all registrants 1228 * and the persistent reservation is not an all registrants
1232 * type, then a PERSISTENT RESERVE OUT command with REGISTER 1229 * type, then a PERSISTENT RESERVE OUT command with REGISTER
1233 * service action or REGISTER AND IGNORE EXISTING KEY 1230 * service action or REGISTER AND IGNORE EXISTING KEY
1234 * service action with the SERVICE ACTION RESERVATION KEY 1231 * service action with the SERVICE ACTION RESERVATION KEY
1235 * field set to zero (see 5.7.11.3). 1232 * field set to zero (see 5.7.11.3).
1236 */ 1233 */
1237 __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0); 1234 __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0);
1238 ret = 1; 1235 ret = 1;
1239 /* 1236 /*
1240 * For 'All Registrants' reservation types, all existing 1237 * For 'All Registrants' reservation types, all existing
1241 * registrations are still processed as reservation holders 1238 * registrations are still processed as reservation holders
1242 * in core_scsi3_pr_seq_non_holder() after the initial 1239 * in core_scsi3_pr_seq_non_holder() after the initial
1243 * reservation holder is implictly released here. 1240 * reservation holder is implictly released here.
1244 */ 1241 */
1245 } else if (pr_reg->pr_reg_all_tg_pt && 1242 } else if (pr_reg->pr_reg_all_tg_pt &&
1246 (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname, 1243 (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname,
1247 pr_reg->pr_reg_nacl->initiatorname)) && 1244 pr_reg->pr_reg_nacl->initiatorname)) &&
1248 (pr_res_holder->pr_res_key == pr_reg->pr_res_key)) { 1245 (pr_res_holder->pr_res_key == pr_reg->pr_res_key)) {
1249 pr_err("SPC-3 PR: Unable to perform ALL_TG_PT=1" 1246 pr_err("SPC-3 PR: Unable to perform ALL_TG_PT=1"
1250 " UNREGISTER while existing reservation with matching" 1247 " UNREGISTER while existing reservation with matching"
1251 " key 0x%016Lx is present from another SCSI Initiator" 1248 " key 0x%016Lx is present from another SCSI Initiator"
1252 " Port\n", pr_reg->pr_res_key); 1249 " Port\n", pr_reg->pr_res_key);
1253 ret = -EPERM; 1250 ret = -EPERM;
1254 } 1251 }
1255 spin_unlock(&dev->dev_reservation_lock); 1252 spin_unlock(&dev->dev_reservation_lock);
1256 1253
1257 return ret; 1254 return ret;
1258 } 1255 }
1259 1256
1260 /* 1257 /*
1261 * Called with struct t10_reservation->registration_lock held. 1258 * Called with struct t10_reservation->registration_lock held.
1262 */ 1259 */
1263 static void __core_scsi3_free_registration( 1260 static void __core_scsi3_free_registration(
1264 struct se_device *dev, 1261 struct se_device *dev,
1265 struct t10_pr_registration *pr_reg, 1262 struct t10_pr_registration *pr_reg,
1266 struct list_head *preempt_and_abort_list, 1263 struct list_head *preempt_and_abort_list,
1267 int dec_holders) 1264 int dec_holders)
1268 { 1265 {
1269 struct target_core_fabric_ops *tfo = 1266 struct target_core_fabric_ops *tfo =
1270 pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; 1267 pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
1271 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 1268 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
1272 char i_buf[PR_REG_ISID_ID_LEN]; 1269 char i_buf[PR_REG_ISID_ID_LEN];
1273 int prf_isid; 1270 int prf_isid;
1274 1271
1275 memset(i_buf, 0, PR_REG_ISID_ID_LEN); 1272 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1276 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], 1273 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
1277 PR_REG_ISID_ID_LEN); 1274 PR_REG_ISID_ID_LEN);
1278 1275
1279 pr_reg->pr_reg_deve->def_pr_registered = 0; 1276 pr_reg->pr_reg_deve->def_pr_registered = 0;
1280 pr_reg->pr_reg_deve->pr_res_key = 0; 1277 pr_reg->pr_reg_deve->pr_res_key = 0;
1281 list_del(&pr_reg->pr_reg_list); 1278 list_del(&pr_reg->pr_reg_list);
1282 /* 1279 /*
1283 * Caller accessing *pr_reg using core_scsi3_locate_pr_reg(), 1280 * Caller accessing *pr_reg using core_scsi3_locate_pr_reg(),
1284 * so call core_scsi3_put_pr_reg() to decrement our reference. 1281 * so call core_scsi3_put_pr_reg() to decrement our reference.
1285 */ 1282 */
1286 if (dec_holders) 1283 if (dec_holders)
1287 core_scsi3_put_pr_reg(pr_reg); 1284 core_scsi3_put_pr_reg(pr_reg);
1288 /* 1285 /*
1289 * Wait until all reference from any other I_T nexuses for this 1286 * Wait until all reference from any other I_T nexuses for this
1290 * *pr_reg have been released. Because list_del() is called above, 1287 * *pr_reg have been released. Because list_del() is called above,
1291 * the last core_scsi3_put_pr_reg(pr_reg) will release this reference 1288 * the last core_scsi3_put_pr_reg(pr_reg) will release this reference
1292 * count back to zero, and we release *pr_reg. 1289 * count back to zero, and we release *pr_reg.
1293 */ 1290 */
1294 while (atomic_read(&pr_reg->pr_res_holders) != 0) { 1291 while (atomic_read(&pr_reg->pr_res_holders) != 0) {
1295 spin_unlock(&pr_tmpl->registration_lock); 1292 spin_unlock(&pr_tmpl->registration_lock);
1296 pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n", 1293 pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n",
1297 tfo->get_fabric_name()); 1294 tfo->get_fabric_name());
1298 cpu_relax(); 1295 cpu_relax();
1299 spin_lock(&pr_tmpl->registration_lock); 1296 spin_lock(&pr_tmpl->registration_lock);
1300 } 1297 }
1301 1298
1302 pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator" 1299 pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
1303 " Node: %s%s\n", tfo->get_fabric_name(), 1300 " Node: %s%s\n", tfo->get_fabric_name(),
1304 pr_reg->pr_reg_nacl->initiatorname, 1301 pr_reg->pr_reg_nacl->initiatorname,
1305 (prf_isid) ? &i_buf[0] : ""); 1302 (prf_isid) ? &i_buf[0] : "");
1306 pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" 1303 pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
1307 " Port(s)\n", tfo->get_fabric_name(), 1304 " Port(s)\n", tfo->get_fabric_name(),
1308 (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", 1305 (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
1309 dev->transport->name); 1306 dev->transport->name);
1310 pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" 1307 pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
1311 " 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key, 1308 " 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key,
1312 pr_reg->pr_res_generation); 1309 pr_reg->pr_res_generation);
1313 1310
1314 if (!preempt_and_abort_list) { 1311 if (!preempt_and_abort_list) {
1315 pr_reg->pr_reg_deve = NULL; 1312 pr_reg->pr_reg_deve = NULL;
1316 pr_reg->pr_reg_nacl = NULL; 1313 pr_reg->pr_reg_nacl = NULL;
1317 kfree(pr_reg->pr_aptpl_buf); 1314 kfree(pr_reg->pr_aptpl_buf);
1318 kmem_cache_free(t10_pr_reg_cache, pr_reg); 1315 kmem_cache_free(t10_pr_reg_cache, pr_reg);
1319 return; 1316 return;
1320 } 1317 }
1321 /* 1318 /*
1322 * For PREEMPT_AND_ABORT, the list of *pr_reg in preempt_and_abort_list 1319 * For PREEMPT_AND_ABORT, the list of *pr_reg in preempt_and_abort_list
1323 * are released once the ABORT_TASK_SET has completed.. 1320 * are released once the ABORT_TASK_SET has completed..
1324 */ 1321 */
1325 list_add_tail(&pr_reg->pr_reg_abort_list, preempt_and_abort_list); 1322 list_add_tail(&pr_reg->pr_reg_abort_list, preempt_and_abort_list);
1326 } 1323 }
1327 1324
1328 void core_scsi3_free_pr_reg_from_nacl( 1325 void core_scsi3_free_pr_reg_from_nacl(
1329 struct se_device *dev, 1326 struct se_device *dev,
1330 struct se_node_acl *nacl) 1327 struct se_node_acl *nacl)
1331 { 1328 {
1332 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 1329 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
1333 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; 1330 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
1334 /* 1331 /*
1335 * If the passed se_node_acl matches the reservation holder, 1332 * If the passed se_node_acl matches the reservation holder,
1336 * release the reservation. 1333 * release the reservation.
1337 */ 1334 */
1338 spin_lock(&dev->dev_reservation_lock); 1335 spin_lock(&dev->dev_reservation_lock);
1339 pr_res_holder = dev->dev_pr_res_holder; 1336 pr_res_holder = dev->dev_pr_res_holder;
1340 if ((pr_res_holder != NULL) && 1337 if ((pr_res_holder != NULL) &&
1341 (pr_res_holder->pr_reg_nacl == nacl)) 1338 (pr_res_holder->pr_reg_nacl == nacl))
1342 __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0); 1339 __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0);
1343 spin_unlock(&dev->dev_reservation_lock); 1340 spin_unlock(&dev->dev_reservation_lock);
1344 /* 1341 /*
1345 * Release any registration associated with the struct se_node_acl. 1342 * Release any registration associated with the struct se_node_acl.
1346 */ 1343 */
1347 spin_lock(&pr_tmpl->registration_lock); 1344 spin_lock(&pr_tmpl->registration_lock);
1348 list_for_each_entry_safe(pr_reg, pr_reg_tmp, 1345 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
1349 &pr_tmpl->registration_list, pr_reg_list) { 1346 &pr_tmpl->registration_list, pr_reg_list) {
1350 1347
1351 if (pr_reg->pr_reg_nacl != nacl) 1348 if (pr_reg->pr_reg_nacl != nacl)
1352 continue; 1349 continue;
1353 1350
1354 __core_scsi3_free_registration(dev, pr_reg, NULL, 0); 1351 __core_scsi3_free_registration(dev, pr_reg, NULL, 0);
1355 } 1352 }
1356 spin_unlock(&pr_tmpl->registration_lock); 1353 spin_unlock(&pr_tmpl->registration_lock);
1357 } 1354 }
1358 1355
1359 void core_scsi3_free_all_registrations( 1356 void core_scsi3_free_all_registrations(
1360 struct se_device *dev) 1357 struct se_device *dev)
1361 { 1358 {
1362 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 1359 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
1363 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; 1360 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
1364 1361
1365 spin_lock(&dev->dev_reservation_lock); 1362 spin_lock(&dev->dev_reservation_lock);
1366 pr_res_holder = dev->dev_pr_res_holder; 1363 pr_res_holder = dev->dev_pr_res_holder;
1367 if (pr_res_holder != NULL) { 1364 if (pr_res_holder != NULL) {
1368 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; 1365 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
1369 __core_scsi3_complete_pro_release(dev, pr_res_nacl, 1366 __core_scsi3_complete_pro_release(dev, pr_res_nacl,
1370 pr_res_holder, 0); 1367 pr_res_holder, 0);
1371 } 1368 }
1372 spin_unlock(&dev->dev_reservation_lock); 1369 spin_unlock(&dev->dev_reservation_lock);
1373 1370
1374 spin_lock(&pr_tmpl->registration_lock); 1371 spin_lock(&pr_tmpl->registration_lock);
1375 list_for_each_entry_safe(pr_reg, pr_reg_tmp, 1372 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
1376 &pr_tmpl->registration_list, pr_reg_list) { 1373 &pr_tmpl->registration_list, pr_reg_list) {
1377 1374
1378 __core_scsi3_free_registration(dev, pr_reg, NULL, 0); 1375 __core_scsi3_free_registration(dev, pr_reg, NULL, 0);
1379 } 1376 }
1380 spin_unlock(&pr_tmpl->registration_lock); 1377 spin_unlock(&pr_tmpl->registration_lock);
1381 1378
1382 spin_lock(&pr_tmpl->aptpl_reg_lock); 1379 spin_lock(&pr_tmpl->aptpl_reg_lock);
1383 list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list, 1380 list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
1384 pr_reg_aptpl_list) { 1381 pr_reg_aptpl_list) {
1385 list_del(&pr_reg->pr_reg_aptpl_list); 1382 list_del(&pr_reg->pr_reg_aptpl_list);
1386 kfree(pr_reg->pr_aptpl_buf); 1383 kfree(pr_reg->pr_aptpl_buf);
1387 kmem_cache_free(t10_pr_reg_cache, pr_reg); 1384 kmem_cache_free(t10_pr_reg_cache, pr_reg);
1388 } 1385 }
1389 spin_unlock(&pr_tmpl->aptpl_reg_lock); 1386 spin_unlock(&pr_tmpl->aptpl_reg_lock);
1390 } 1387 }
1391 1388
1392 static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg) 1389 static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg)
1393 { 1390 {
1394 return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, 1391 return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
1395 &tpg->tpg_group.cg_item); 1392 &tpg->tpg_group.cg_item);
1396 } 1393 }
1397 1394
1398 static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) 1395 static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
1399 { 1396 {
1400 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, 1397 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
1401 &tpg->tpg_group.cg_item); 1398 &tpg->tpg_group.cg_item);
1402 1399
1403 atomic_dec(&tpg->tpg_pr_ref_count); 1400 atomic_dec(&tpg->tpg_pr_ref_count);
1404 smp_mb__after_atomic_dec(); 1401 smp_mb__after_atomic_dec();
1405 } 1402 }
1406 1403
1407 static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) 1404 static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
1408 { 1405 {
1409 struct se_portal_group *tpg = nacl->se_tpg; 1406 struct se_portal_group *tpg = nacl->se_tpg;
1410 1407
1411 if (nacl->dynamic_node_acl) 1408 if (nacl->dynamic_node_acl)
1412 return 0; 1409 return 0;
1413 1410
1414 return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, 1411 return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
1415 &nacl->acl_group.cg_item); 1412 &nacl->acl_group.cg_item);
1416 } 1413 }
1417 1414
1418 static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) 1415 static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
1419 { 1416 {
1420 struct se_portal_group *tpg = nacl->se_tpg; 1417 struct se_portal_group *tpg = nacl->se_tpg;
1421 1418
1422 if (nacl->dynamic_node_acl) { 1419 if (nacl->dynamic_node_acl) {
1423 atomic_dec(&nacl->acl_pr_ref_count); 1420 atomic_dec(&nacl->acl_pr_ref_count);
1424 smp_mb__after_atomic_dec(); 1421 smp_mb__after_atomic_dec();
1425 return; 1422 return;
1426 } 1423 }
1427 1424
1428 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, 1425 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
1429 &nacl->acl_group.cg_item); 1426 &nacl->acl_group.cg_item);
1430 1427
1431 atomic_dec(&nacl->acl_pr_ref_count); 1428 atomic_dec(&nacl->acl_pr_ref_count);
1432 smp_mb__after_atomic_dec(); 1429 smp_mb__after_atomic_dec();
1433 } 1430 }
1434 1431
1435 static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) 1432 static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
1436 { 1433 {
1437 struct se_lun_acl *lun_acl = se_deve->se_lun_acl; 1434 struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
1438 struct se_node_acl *nacl; 1435 struct se_node_acl *nacl;
1439 struct se_portal_group *tpg; 1436 struct se_portal_group *tpg;
1440 /* 1437 /*
1441 * For nacl->dynamic_node_acl=1 1438 * For nacl->dynamic_node_acl=1
1442 */ 1439 */
1443 if (!lun_acl) 1440 if (!lun_acl)
1444 return 0; 1441 return 0;
1445 1442
1446 nacl = lun_acl->se_lun_nacl; 1443 nacl = lun_acl->se_lun_nacl;
1447 tpg = nacl->se_tpg; 1444 tpg = nacl->se_tpg;
1448 1445
1449 return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, 1446 return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
1450 &lun_acl->se_lun_group.cg_item); 1447 &lun_acl->se_lun_group.cg_item);
1451 } 1448 }
1452 1449
1453 static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) 1450 static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
1454 { 1451 {
1455 struct se_lun_acl *lun_acl = se_deve->se_lun_acl; 1452 struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
1456 struct se_node_acl *nacl; 1453 struct se_node_acl *nacl;
1457 struct se_portal_group *tpg; 1454 struct se_portal_group *tpg;
1458 /* 1455 /*
1459 * For nacl->dynamic_node_acl=1 1456 * For nacl->dynamic_node_acl=1
1460 */ 1457 */
1461 if (!lun_acl) { 1458 if (!lun_acl) {
1462 atomic_dec(&se_deve->pr_ref_count); 1459 atomic_dec(&se_deve->pr_ref_count);
1463 smp_mb__after_atomic_dec(); 1460 smp_mb__after_atomic_dec();
1464 return; 1461 return;
1465 } 1462 }
1466 nacl = lun_acl->se_lun_nacl; 1463 nacl = lun_acl->se_lun_nacl;
1467 tpg = nacl->se_tpg; 1464 tpg = nacl->se_tpg;
1468 1465
1469 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, 1466 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
1470 &lun_acl->se_lun_group.cg_item); 1467 &lun_acl->se_lun_group.cg_item);
1471 1468
1472 atomic_dec(&se_deve->pr_ref_count); 1469 atomic_dec(&se_deve->pr_ref_count);
1473 smp_mb__after_atomic_dec(); 1470 smp_mb__after_atomic_dec();
1474 } 1471 }
1475 1472
1476 static int core_scsi3_decode_spec_i_port( 1473 static int core_scsi3_decode_spec_i_port(
1477 struct se_cmd *cmd, 1474 struct se_cmd *cmd,
1478 struct se_portal_group *tpg, 1475 struct se_portal_group *tpg,
1479 unsigned char *l_isid, 1476 unsigned char *l_isid,
1480 u64 sa_res_key, 1477 u64 sa_res_key,
1481 int all_tg_pt, 1478 int all_tg_pt,
1482 int aptpl) 1479 int aptpl)
1483 { 1480 {
1484 struct se_device *dev = cmd->se_dev; 1481 struct se_device *dev = cmd->se_dev;
1485 struct se_port *tmp_port; 1482 struct se_port *tmp_port;
1486 struct se_portal_group *dest_tpg = NULL, *tmp_tpg; 1483 struct se_portal_group *dest_tpg = NULL, *tmp_tpg;
1487 struct se_session *se_sess = cmd->se_sess; 1484 struct se_session *se_sess = cmd->se_sess;
1488 struct se_node_acl *dest_node_acl = NULL; 1485 struct se_node_acl *dest_node_acl = NULL;
1489 struct se_dev_entry *dest_se_deve = NULL, *local_se_deve; 1486 struct se_dev_entry *dest_se_deve = NULL, *local_se_deve;
1490 struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e; 1487 struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e;
1491 struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; 1488 struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
1492 struct list_head tid_dest_list; 1489 struct list_head tid_dest_list;
1493 struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; 1490 struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
1494 struct target_core_fabric_ops *tmp_tf_ops; 1491 struct target_core_fabric_ops *tmp_tf_ops;
1495 unsigned char *buf; 1492 unsigned char *buf;
1496 unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; 1493 unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
1497 char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; 1494 char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
1498 u32 tpdl, tid_len = 0; 1495 u32 tpdl, tid_len = 0;
1499 int ret, dest_local_nexus, prf_isid; 1496 int ret, dest_local_nexus, prf_isid;
1500 u32 dest_rtpi = 0; 1497 u32 dest_rtpi = 0;
1501 1498
1502 memset(dest_iport, 0, 64); 1499 memset(dest_iport, 0, 64);
1503 INIT_LIST_HEAD(&tid_dest_list); 1500 INIT_LIST_HEAD(&tid_dest_list);
1504 1501
1505 local_se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 1502 local_se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
1506 /* 1503 /*
1507 * Allocate a struct pr_transport_id_holder and setup the 1504 * Allocate a struct pr_transport_id_holder and setup the
1508 * local_node_acl and local_se_deve pointers and add to 1505 * local_node_acl and local_se_deve pointers and add to
1509 * struct list_head tid_dest_list for add registration 1506 * struct list_head tid_dest_list for add registration
1510 * processing in the loop of tid_dest_list below. 1507 * processing in the loop of tid_dest_list below.
1511 */ 1508 */
1512 tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL); 1509 tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
1513 if (!tidh_new) { 1510 if (!tidh_new) {
1514 pr_err("Unable to allocate tidh_new\n"); 1511 pr_err("Unable to allocate tidh_new\n");
1515 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1512 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1516 return -EINVAL; 1513 return -EINVAL;
1517 } 1514 }
1518 INIT_LIST_HEAD(&tidh_new->dest_list); 1515 INIT_LIST_HEAD(&tidh_new->dest_list);
1519 tidh_new->dest_tpg = tpg; 1516 tidh_new->dest_tpg = tpg;
1520 tidh_new->dest_node_acl = se_sess->se_node_acl; 1517 tidh_new->dest_node_acl = se_sess->se_node_acl;
1521 tidh_new->dest_se_deve = local_se_deve; 1518 tidh_new->dest_se_deve = local_se_deve;
1522 1519
1523 local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, 1520 local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
1524 se_sess->se_node_acl, local_se_deve, l_isid, 1521 se_sess->se_node_acl, local_se_deve, l_isid,
1525 sa_res_key, all_tg_pt, aptpl); 1522 sa_res_key, all_tg_pt, aptpl);
1526 if (!local_pr_reg) { 1523 if (!local_pr_reg) {
1527 kfree(tidh_new); 1524 kfree(tidh_new);
1528 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1525 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1529 return -ENOMEM; 1526 return -ENOMEM;
1530 } 1527 }
1531 tidh_new->dest_pr_reg = local_pr_reg; 1528 tidh_new->dest_pr_reg = local_pr_reg;
1532 /* 1529 /*
1533 * The local I_T nexus does not hold any configfs dependances, 1530 * The local I_T nexus does not hold any configfs dependances,
1534 * so we set tid_h->dest_local_nexus=1 to prevent the 1531 * so we set tid_h->dest_local_nexus=1 to prevent the
1535 * configfs_undepend_item() calls in the tid_dest_list loops below. 1532 * configfs_undepend_item() calls in the tid_dest_list loops below.
1536 */ 1533 */
1537 tidh_new->dest_local_nexus = 1; 1534 tidh_new->dest_local_nexus = 1;
1538 list_add_tail(&tidh_new->dest_list, &tid_dest_list); 1535 list_add_tail(&tidh_new->dest_list, &tid_dest_list);
1539 1536
1540 buf = transport_kmap_first_data_page(cmd); 1537 buf = transport_kmap_first_data_page(cmd);
1541 /* 1538 /*
1542 * For a PERSISTENT RESERVE OUT specify initiator ports payload, 1539 * For a PERSISTENT RESERVE OUT specify initiator ports payload,
1543 * first extract TransportID Parameter Data Length, and make sure 1540 * first extract TransportID Parameter Data Length, and make sure
1544 * the value matches up to the SCSI expected data transfer length. 1541 * the value matches up to the SCSI expected data transfer length.
1545 */ 1542 */
1546 tpdl = (buf[24] & 0xff) << 24; 1543 tpdl = (buf[24] & 0xff) << 24;
1547 tpdl |= (buf[25] & 0xff) << 16; 1544 tpdl |= (buf[25] & 0xff) << 16;
1548 tpdl |= (buf[26] & 0xff) << 8; 1545 tpdl |= (buf[26] & 0xff) << 8;
1549 tpdl |= buf[27] & 0xff; 1546 tpdl |= buf[27] & 0xff;
1550 1547
1551 if ((tpdl + 28) != cmd->data_length) { 1548 if ((tpdl + 28) != cmd->data_length) {
1552 pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header" 1549 pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
1553 " does not equal CDB data_length: %u\n", tpdl, 1550 " does not equal CDB data_length: %u\n", tpdl,
1554 cmd->data_length); 1551 cmd->data_length);
1555 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 1552 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1556 ret = -EINVAL; 1553 ret = -EINVAL;
1557 goto out; 1554 goto out;
1558 } 1555 }
1559 /* 1556 /*
1560 * Start processing the received transport IDs using the 1557 * Start processing the received transport IDs using the
1561 * receiving I_T Nexus portal's fabric dependent methods to 1558 * receiving I_T Nexus portal's fabric dependent methods to
1562 * obtain the SCSI Initiator Port/Device Identifiers. 1559 * obtain the SCSI Initiator Port/Device Identifiers.
1563 */ 1560 */
1564 ptr = &buf[28]; 1561 ptr = &buf[28];
1565 1562
1566 while (tpdl > 0) { 1563 while (tpdl > 0) {
1567 proto_ident = (ptr[0] & 0x0f); 1564 proto_ident = (ptr[0] & 0x0f);
1568 dest_tpg = NULL; 1565 dest_tpg = NULL;
1569 1566
1570 spin_lock(&dev->se_port_lock); 1567 spin_lock(&dev->se_port_lock);
1571 list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) { 1568 list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) {
1572 tmp_tpg = tmp_port->sep_tpg; 1569 tmp_tpg = tmp_port->sep_tpg;
1573 if (!tmp_tpg) 1570 if (!tmp_tpg)
1574 continue; 1571 continue;
1575 tmp_tf_ops = tmp_tpg->se_tpg_tfo; 1572 tmp_tf_ops = tmp_tpg->se_tpg_tfo;
1576 if (!tmp_tf_ops) 1573 if (!tmp_tf_ops)
1577 continue; 1574 continue;
1578 if (!tmp_tf_ops->get_fabric_proto_ident || 1575 if (!tmp_tf_ops->get_fabric_proto_ident ||
1579 !tmp_tf_ops->tpg_parse_pr_out_transport_id) 1576 !tmp_tf_ops->tpg_parse_pr_out_transport_id)
1580 continue; 1577 continue;
1581 /* 1578 /*
1582 * Look for the matching proto_ident provided by 1579 * Look for the matching proto_ident provided by
1583 * the received TransportID 1580 * the received TransportID
1584 */ 1581 */
1585 tmp_proto_ident = tmp_tf_ops->get_fabric_proto_ident(tmp_tpg); 1582 tmp_proto_ident = tmp_tf_ops->get_fabric_proto_ident(tmp_tpg);
1586 if (tmp_proto_ident != proto_ident) 1583 if (tmp_proto_ident != proto_ident)
1587 continue; 1584 continue;
1588 dest_rtpi = tmp_port->sep_rtpi; 1585 dest_rtpi = tmp_port->sep_rtpi;
1589 1586
1590 i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id( 1587 i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id(
1591 tmp_tpg, (const char *)ptr, &tid_len, 1588 tmp_tpg, (const char *)ptr, &tid_len,
1592 &iport_ptr); 1589 &iport_ptr);
1593 if (!i_str) 1590 if (!i_str)
1594 continue; 1591 continue;
1595 1592
1596 atomic_inc(&tmp_tpg->tpg_pr_ref_count); 1593 atomic_inc(&tmp_tpg->tpg_pr_ref_count);
1597 smp_mb__after_atomic_inc(); 1594 smp_mb__after_atomic_inc();
1598 spin_unlock(&dev->se_port_lock); 1595 spin_unlock(&dev->se_port_lock);
1599 1596
1600 ret = core_scsi3_tpg_depend_item(tmp_tpg); 1597 ret = core_scsi3_tpg_depend_item(tmp_tpg);
1601 if (ret != 0) { 1598 if (ret != 0) {
1602 pr_err(" core_scsi3_tpg_depend_item()" 1599 pr_err(" core_scsi3_tpg_depend_item()"
1603 " for tmp_tpg\n"); 1600 " for tmp_tpg\n");
1604 atomic_dec(&tmp_tpg->tpg_pr_ref_count); 1601 atomic_dec(&tmp_tpg->tpg_pr_ref_count);
1605 smp_mb__after_atomic_dec(); 1602 smp_mb__after_atomic_dec();
1606 cmd->scsi_sense_reason = 1603 cmd->scsi_sense_reason =
1607 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1604 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1608 ret = -EINVAL; 1605 ret = -EINVAL;
1609 goto out; 1606 goto out;
1610 } 1607 }
1611 /* 1608 /*
1612 * Locate the desination initiator ACL to be registered 1609 * Locate the desination initiator ACL to be registered
1613 * from the decoded fabric module specific TransportID 1610 * from the decoded fabric module specific TransportID
1614 * at *i_str. 1611 * at *i_str.
1615 */ 1612 */
1616 spin_lock_irq(&tmp_tpg->acl_node_lock); 1613 spin_lock_irq(&tmp_tpg->acl_node_lock);
1617 dest_node_acl = __core_tpg_get_initiator_node_acl( 1614 dest_node_acl = __core_tpg_get_initiator_node_acl(
1618 tmp_tpg, i_str); 1615 tmp_tpg, i_str);
1619 if (dest_node_acl) { 1616 if (dest_node_acl) {
1620 atomic_inc(&dest_node_acl->acl_pr_ref_count); 1617 atomic_inc(&dest_node_acl->acl_pr_ref_count);
1621 smp_mb__after_atomic_inc(); 1618 smp_mb__after_atomic_inc();
1622 } 1619 }
1623 spin_unlock_irq(&tmp_tpg->acl_node_lock); 1620 spin_unlock_irq(&tmp_tpg->acl_node_lock);
1624 1621
1625 if (!dest_node_acl) { 1622 if (!dest_node_acl) {
1626 core_scsi3_tpg_undepend_item(tmp_tpg); 1623 core_scsi3_tpg_undepend_item(tmp_tpg);
1627 spin_lock(&dev->se_port_lock); 1624 spin_lock(&dev->se_port_lock);
1628 continue; 1625 continue;
1629 } 1626 }
1630 1627
1631 ret = core_scsi3_nodeacl_depend_item(dest_node_acl); 1628 ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
1632 if (ret != 0) { 1629 if (ret != 0) {
1633 pr_err("configfs_depend_item() failed" 1630 pr_err("configfs_depend_item() failed"
1634 " for dest_node_acl->acl_group\n"); 1631 " for dest_node_acl->acl_group\n");
1635 atomic_dec(&dest_node_acl->acl_pr_ref_count); 1632 atomic_dec(&dest_node_acl->acl_pr_ref_count);
1636 smp_mb__after_atomic_dec(); 1633 smp_mb__after_atomic_dec();
1637 core_scsi3_tpg_undepend_item(tmp_tpg); 1634 core_scsi3_tpg_undepend_item(tmp_tpg);
1638 cmd->scsi_sense_reason = 1635 cmd->scsi_sense_reason =
1639 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1636 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1640 ret = -EINVAL; 1637 ret = -EINVAL;
1641 goto out; 1638 goto out;
1642 } 1639 }
1643 1640
1644 dest_tpg = tmp_tpg; 1641 dest_tpg = tmp_tpg;
1645 pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node:" 1642 pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node:"
1646 " %s Port RTPI: %hu\n", 1643 " %s Port RTPI: %hu\n",
1647 dest_tpg->se_tpg_tfo->get_fabric_name(), 1644 dest_tpg->se_tpg_tfo->get_fabric_name(),
1648 dest_node_acl->initiatorname, dest_rtpi); 1645 dest_node_acl->initiatorname, dest_rtpi);
1649 1646
1650 spin_lock(&dev->se_port_lock); 1647 spin_lock(&dev->se_port_lock);
1651 break; 1648 break;
1652 } 1649 }
1653 spin_unlock(&dev->se_port_lock); 1650 spin_unlock(&dev->se_port_lock);
1654 1651
1655 if (!dest_tpg) { 1652 if (!dest_tpg) {
1656 pr_err("SPC-3 PR SPEC_I_PT: Unable to locate" 1653 pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
1657 " dest_tpg\n"); 1654 " dest_tpg\n");
1658 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 1655 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1659 ret = -EINVAL; 1656 ret = -EINVAL;
1660 goto out; 1657 goto out;
1661 } 1658 }
1662 #if 0 1659 #if 0
1663 pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u" 1660 pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
1664 " tid_len: %d for %s + %s\n", 1661 " tid_len: %d for %s + %s\n",
1665 dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length, 1662 dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length,
1666 tpdl, tid_len, i_str, iport_ptr); 1663 tpdl, tid_len, i_str, iport_ptr);
1667 #endif 1664 #endif
1668 if (tid_len > tpdl) { 1665 if (tid_len > tpdl) {
1669 pr_err("SPC-3 PR SPEC_I_PT: Illegal tid_len:" 1666 pr_err("SPC-3 PR SPEC_I_PT: Illegal tid_len:"
1670 " %u for Transport ID: %s\n", tid_len, ptr); 1667 " %u for Transport ID: %s\n", tid_len, ptr);
1671 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1668 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1672 core_scsi3_tpg_undepend_item(dest_tpg); 1669 core_scsi3_tpg_undepend_item(dest_tpg);
1673 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 1670 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1674 ret = -EINVAL; 1671 ret = -EINVAL;
1675 goto out; 1672 goto out;
1676 } 1673 }
1677 /* 1674 /*
1678 * Locate the desintation struct se_dev_entry pointer for matching 1675 * Locate the desintation struct se_dev_entry pointer for matching
1679 * RELATIVE TARGET PORT IDENTIFIER on the receiving I_T Nexus 1676 * RELATIVE TARGET PORT IDENTIFIER on the receiving I_T Nexus
1680 * Target Port. 1677 * Target Port.
1681 */ 1678 */
1682 dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, 1679 dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl,
1683 dest_rtpi); 1680 dest_rtpi);
1684 if (!dest_se_deve) { 1681 if (!dest_se_deve) {
1685 pr_err("Unable to locate %s dest_se_deve" 1682 pr_err("Unable to locate %s dest_se_deve"
1686 " from destination RTPI: %hu\n", 1683 " from destination RTPI: %hu\n",
1687 dest_tpg->se_tpg_tfo->get_fabric_name(), 1684 dest_tpg->se_tpg_tfo->get_fabric_name(),
1688 dest_rtpi); 1685 dest_rtpi);
1689 1686
1690 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1687 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1691 core_scsi3_tpg_undepend_item(dest_tpg); 1688 core_scsi3_tpg_undepend_item(dest_tpg);
1692 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 1689 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1693 ret = -EINVAL; 1690 ret = -EINVAL;
1694 goto out; 1691 goto out;
1695 } 1692 }
1696 1693
1697 ret = core_scsi3_lunacl_depend_item(dest_se_deve); 1694 ret = core_scsi3_lunacl_depend_item(dest_se_deve);
1698 if (ret < 0) { 1695 if (ret < 0) {
1699 pr_err("core_scsi3_lunacl_depend_item()" 1696 pr_err("core_scsi3_lunacl_depend_item()"
1700 " failed\n"); 1697 " failed\n");
1701 atomic_dec(&dest_se_deve->pr_ref_count); 1698 atomic_dec(&dest_se_deve->pr_ref_count);
1702 smp_mb__after_atomic_dec(); 1699 smp_mb__after_atomic_dec();
1703 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1700 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1704 core_scsi3_tpg_undepend_item(dest_tpg); 1701 core_scsi3_tpg_undepend_item(dest_tpg);
1705 cmd->scsi_sense_reason = 1702 cmd->scsi_sense_reason =
1706 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1703 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1707 ret = -EINVAL; 1704 ret = -EINVAL;
1708 goto out; 1705 goto out;
1709 } 1706 }
1710 #if 0 1707 #if 0
1711 pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s" 1708 pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s"
1712 " dest_se_deve mapped_lun: %u\n", 1709 " dest_se_deve mapped_lun: %u\n",
1713 dest_tpg->se_tpg_tfo->get_fabric_name(), 1710 dest_tpg->se_tpg_tfo->get_fabric_name(),
1714 dest_node_acl->initiatorname, dest_se_deve->mapped_lun); 1711 dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
1715 #endif 1712 #endif
1716 /* 1713 /*
1717 * Skip any TransportIDs that already have a registration for 1714 * Skip any TransportIDs that already have a registration for
1718 * this target port. 1715 * this target port.
1719 */ 1716 */
1720 pr_reg_e = __core_scsi3_locate_pr_reg(dev, dest_node_acl, 1717 pr_reg_e = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
1721 iport_ptr); 1718 iport_ptr);
1722 if (pr_reg_e) { 1719 if (pr_reg_e) {
1723 core_scsi3_put_pr_reg(pr_reg_e); 1720 core_scsi3_put_pr_reg(pr_reg_e);
1724 core_scsi3_lunacl_undepend_item(dest_se_deve); 1721 core_scsi3_lunacl_undepend_item(dest_se_deve);
1725 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1722 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1726 core_scsi3_tpg_undepend_item(dest_tpg); 1723 core_scsi3_tpg_undepend_item(dest_tpg);
1727 ptr += tid_len; 1724 ptr += tid_len;
1728 tpdl -= tid_len; 1725 tpdl -= tid_len;
1729 tid_len = 0; 1726 tid_len = 0;
1730 continue; 1727 continue;
1731 } 1728 }
1732 /* 1729 /*
1733 * Allocate a struct pr_transport_id_holder and setup 1730 * Allocate a struct pr_transport_id_holder and setup
1734 * the dest_node_acl and dest_se_deve pointers for the 1731 * the dest_node_acl and dest_se_deve pointers for the
1735 * loop below. 1732 * loop below.
1736 */ 1733 */
1737 tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), 1734 tidh_new = kzalloc(sizeof(struct pr_transport_id_holder),
1738 GFP_KERNEL); 1735 GFP_KERNEL);
1739 if (!tidh_new) { 1736 if (!tidh_new) {
1740 pr_err("Unable to allocate tidh_new\n"); 1737 pr_err("Unable to allocate tidh_new\n");
1741 core_scsi3_lunacl_undepend_item(dest_se_deve); 1738 core_scsi3_lunacl_undepend_item(dest_se_deve);
1742 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1739 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1743 core_scsi3_tpg_undepend_item(dest_tpg); 1740 core_scsi3_tpg_undepend_item(dest_tpg);
1744 cmd->scsi_sense_reason = 1741 cmd->scsi_sense_reason =
1745 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1742 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1746 ret = -ENOMEM; 1743 ret = -ENOMEM;
1747 goto out; 1744 goto out;
1748 } 1745 }
1749 INIT_LIST_HEAD(&tidh_new->dest_list); 1746 INIT_LIST_HEAD(&tidh_new->dest_list);
1750 tidh_new->dest_tpg = dest_tpg; 1747 tidh_new->dest_tpg = dest_tpg;
1751 tidh_new->dest_node_acl = dest_node_acl; 1748 tidh_new->dest_node_acl = dest_node_acl;
1752 tidh_new->dest_se_deve = dest_se_deve; 1749 tidh_new->dest_se_deve = dest_se_deve;
1753 1750
1754 /* 1751 /*
1755 * Allocate, but do NOT add the registration for the 1752 * Allocate, but do NOT add the registration for the
1756 * TransportID referenced SCSI Initiator port. This 1753 * TransportID referenced SCSI Initiator port. This
1757 * done because of the following from spc4r17 in section 1754 * done because of the following from spc4r17 in section
1758 * 6.14.3 wrt SPEC_I_PT: 1755 * 6.14.3 wrt SPEC_I_PT:
1759 * 1756 *
1760 * "If a registration fails for any initiator port (e.g., if th 1757 * "If a registration fails for any initiator port (e.g., if th
1761 * logical unit does not have enough resources available to 1758 * logical unit does not have enough resources available to
1762 * hold the registration information), no registrations shall be 1759 * hold the registration information), no registrations shall be
1763 * made, and the command shall be terminated with 1760 * made, and the command shall be terminated with
1764 * CHECK CONDITION status." 1761 * CHECK CONDITION status."
1765 * 1762 *
1766 * That means we call __core_scsi3_alloc_registration() here, 1763 * That means we call __core_scsi3_alloc_registration() here,
1767 * and then call __core_scsi3_add_registration() in the 1764 * and then call __core_scsi3_add_registration() in the
1768 * 2nd loop which will never fail. 1765 * 2nd loop which will never fail.
1769 */ 1766 */
1770 dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, 1767 dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
1771 dest_node_acl, dest_se_deve, iport_ptr, 1768 dest_node_acl, dest_se_deve, iport_ptr,
1772 sa_res_key, all_tg_pt, aptpl); 1769 sa_res_key, all_tg_pt, aptpl);
1773 if (!dest_pr_reg) { 1770 if (!dest_pr_reg) {
1774 core_scsi3_lunacl_undepend_item(dest_se_deve); 1771 core_scsi3_lunacl_undepend_item(dest_se_deve);
1775 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1772 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1776 core_scsi3_tpg_undepend_item(dest_tpg); 1773 core_scsi3_tpg_undepend_item(dest_tpg);
1777 kfree(tidh_new); 1774 kfree(tidh_new);
1778 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 1775 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1779 ret = -EINVAL; 1776 ret = -EINVAL;
1780 goto out; 1777 goto out;
1781 } 1778 }
1782 tidh_new->dest_pr_reg = dest_pr_reg; 1779 tidh_new->dest_pr_reg = dest_pr_reg;
1783 list_add_tail(&tidh_new->dest_list, &tid_dest_list); 1780 list_add_tail(&tidh_new->dest_list, &tid_dest_list);
1784 1781
1785 ptr += tid_len; 1782 ptr += tid_len;
1786 tpdl -= tid_len; 1783 tpdl -= tid_len;
1787 tid_len = 0; 1784 tid_len = 0;
1788 1785
1789 } 1786 }
1790 1787
1791 transport_kunmap_first_data_page(cmd); 1788 transport_kunmap_first_data_page(cmd);
1792 1789
1793 /* 1790 /*
1794 * Go ahead and create a registrations from tid_dest_list for the 1791 * Go ahead and create a registrations from tid_dest_list for the
1795 * SPEC_I_PT provided TransportID for the *tidh referenced dest_node_acl 1792 * SPEC_I_PT provided TransportID for the *tidh referenced dest_node_acl
1796 * and dest_se_deve. 1793 * and dest_se_deve.
1797 * 1794 *
1798 * The SA Reservation Key from the PROUT is set for the 1795 * The SA Reservation Key from the PROUT is set for the
1799 * registration, and ALL_TG_PT is also passed. ALL_TG_PT=1 1796 * registration, and ALL_TG_PT is also passed. ALL_TG_PT=1
1800 * means that the TransportID Initiator port will be 1797 * means that the TransportID Initiator port will be
1801 * registered on all of the target ports in the SCSI target device 1798 * registered on all of the target ports in the SCSI target device
1802 * ALL_TG_PT=0 means the registration will only be for the 1799 * ALL_TG_PT=0 means the registration will only be for the
1803 * SCSI target port the PROUT REGISTER with SPEC_I_PT=1 1800 * SCSI target port the PROUT REGISTER with SPEC_I_PT=1
1804 * was received. 1801 * was received.
1805 */ 1802 */
1806 list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) { 1803 list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) {
1807 dest_tpg = tidh->dest_tpg; 1804 dest_tpg = tidh->dest_tpg;
1808 dest_node_acl = tidh->dest_node_acl; 1805 dest_node_acl = tidh->dest_node_acl;
1809 dest_se_deve = tidh->dest_se_deve; 1806 dest_se_deve = tidh->dest_se_deve;
1810 dest_pr_reg = tidh->dest_pr_reg; 1807 dest_pr_reg = tidh->dest_pr_reg;
1811 dest_local_nexus = tidh->dest_local_nexus; 1808 dest_local_nexus = tidh->dest_local_nexus;
1812 1809
1813 list_del(&tidh->dest_list); 1810 list_del(&tidh->dest_list);
1814 kfree(tidh); 1811 kfree(tidh);
1815 1812
1816 memset(i_buf, 0, PR_REG_ISID_ID_LEN); 1813 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1817 prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0], 1814 prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0],
1818 PR_REG_ISID_ID_LEN); 1815 PR_REG_ISID_ID_LEN);
1819 1816
1820 __core_scsi3_add_registration(cmd->se_dev, dest_node_acl, 1817 __core_scsi3_add_registration(cmd->se_dev, dest_node_acl,
1821 dest_pr_reg, 0, 0); 1818 dest_pr_reg, 0, 0);
1822 1819
1823 pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully" 1820 pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully"
1824 " registered Transport ID for Node: %s%s Mapped LUN:" 1821 " registered Transport ID for Node: %s%s Mapped LUN:"
1825 " %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(), 1822 " %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(),
1826 dest_node_acl->initiatorname, (prf_isid) ? 1823 dest_node_acl->initiatorname, (prf_isid) ?
1827 &i_buf[0] : "", dest_se_deve->mapped_lun); 1824 &i_buf[0] : "", dest_se_deve->mapped_lun);
1828 1825
1829 if (dest_local_nexus) 1826 if (dest_local_nexus)
1830 continue; 1827 continue;
1831 1828
1832 core_scsi3_lunacl_undepend_item(dest_se_deve); 1829 core_scsi3_lunacl_undepend_item(dest_se_deve);
1833 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1830 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1834 core_scsi3_tpg_undepend_item(dest_tpg); 1831 core_scsi3_tpg_undepend_item(dest_tpg);
1835 } 1832 }
1836 1833
1837 return 0; 1834 return 0;
1838 out: 1835 out:
1839 transport_kunmap_first_data_page(cmd); 1836 transport_kunmap_first_data_page(cmd);
1840 /* 1837 /*
1841 * For the failure case, release everything from tid_dest_list 1838 * For the failure case, release everything from tid_dest_list
1842 * including *dest_pr_reg and the configfs dependances.. 1839 * including *dest_pr_reg and the configfs dependances..
1843 */ 1840 */
1844 list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) { 1841 list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) {
1845 dest_tpg = tidh->dest_tpg; 1842 dest_tpg = tidh->dest_tpg;
1846 dest_node_acl = tidh->dest_node_acl; 1843 dest_node_acl = tidh->dest_node_acl;
1847 dest_se_deve = tidh->dest_se_deve; 1844 dest_se_deve = tidh->dest_se_deve;
1848 dest_pr_reg = tidh->dest_pr_reg; 1845 dest_pr_reg = tidh->dest_pr_reg;
1849 dest_local_nexus = tidh->dest_local_nexus; 1846 dest_local_nexus = tidh->dest_local_nexus;
1850 1847
1851 list_del(&tidh->dest_list); 1848 list_del(&tidh->dest_list);
1852 kfree(tidh); 1849 kfree(tidh);
1853 /* 1850 /*
1854 * Release any extra ALL_TG_PT=1 registrations for 1851 * Release any extra ALL_TG_PT=1 registrations for
1855 * the SPEC_I_PT=1 case. 1852 * the SPEC_I_PT=1 case.
1856 */ 1853 */
1857 list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe, 1854 list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
1858 &dest_pr_reg->pr_reg_atp_list, 1855 &dest_pr_reg->pr_reg_atp_list,
1859 pr_reg_atp_mem_list) { 1856 pr_reg_atp_mem_list) {
1860 list_del(&pr_reg_tmp->pr_reg_atp_mem_list); 1857 list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
1861 core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve); 1858 core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
1862 kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp); 1859 kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp);
1863 } 1860 }
1864 1861
1865 kfree(dest_pr_reg->pr_aptpl_buf); 1862 kfree(dest_pr_reg->pr_aptpl_buf);
1866 kmem_cache_free(t10_pr_reg_cache, dest_pr_reg); 1863 kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
1867 1864
1868 if (dest_local_nexus) 1865 if (dest_local_nexus)
1869 continue; 1866 continue;
1870 1867
1871 core_scsi3_lunacl_undepend_item(dest_se_deve); 1868 core_scsi3_lunacl_undepend_item(dest_se_deve);
1872 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1869 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1873 core_scsi3_tpg_undepend_item(dest_tpg); 1870 core_scsi3_tpg_undepend_item(dest_tpg);
1874 } 1871 }
1875 return ret; 1872 return ret;
1876 } 1873 }
1877 1874
1878 /* 1875 /*
1879 * Called with struct se_device->dev_reservation_lock held 1876 * Called with struct se_device->dev_reservation_lock held
1880 */ 1877 */
1881 static int __core_scsi3_update_aptpl_buf( 1878 static int __core_scsi3_update_aptpl_buf(
1882 struct se_device *dev, 1879 struct se_device *dev,
1883 unsigned char *buf, 1880 unsigned char *buf,
1884 u32 pr_aptpl_buf_len, 1881 u32 pr_aptpl_buf_len,
1885 int clear_aptpl_metadata) 1882 int clear_aptpl_metadata)
1886 { 1883 {
1887 struct se_lun *lun; 1884 struct se_lun *lun;
1888 struct se_portal_group *tpg; 1885 struct se_portal_group *tpg;
1889 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 1886 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
1890 struct t10_pr_registration *pr_reg; 1887 struct t10_pr_registration *pr_reg;
1891 unsigned char tmp[512], isid_buf[32]; 1888 unsigned char tmp[512], isid_buf[32];
1892 ssize_t len = 0; 1889 ssize_t len = 0;
1893 int reg_count = 0; 1890 int reg_count = 0;
1894 1891
1895 memset(buf, 0, pr_aptpl_buf_len); 1892 memset(buf, 0, pr_aptpl_buf_len);
1896 /* 1893 /*
1897 * Called to clear metadata once APTPL has been deactivated. 1894 * Called to clear metadata once APTPL has been deactivated.
1898 */ 1895 */
1899 if (clear_aptpl_metadata) { 1896 if (clear_aptpl_metadata) {
1900 snprintf(buf, pr_aptpl_buf_len, 1897 snprintf(buf, pr_aptpl_buf_len,
1901 "No Registrations or Reservations\n"); 1898 "No Registrations or Reservations\n");
1902 return 0; 1899 return 0;
1903 } 1900 }
1904 /* 1901 /*
1905 * Walk the registration list.. 1902 * Walk the registration list..
1906 */ 1903 */
1907 spin_lock(&su_dev->t10_pr.registration_lock); 1904 spin_lock(&su_dev->t10_pr.registration_lock);
1908 list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list, 1905 list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
1909 pr_reg_list) { 1906 pr_reg_list) {
1910 1907
1911 tmp[0] = '\0'; 1908 tmp[0] = '\0';
1912 isid_buf[0] = '\0'; 1909 isid_buf[0] = '\0';
1913 tpg = pr_reg->pr_reg_nacl->se_tpg; 1910 tpg = pr_reg->pr_reg_nacl->se_tpg;
1914 lun = pr_reg->pr_reg_tg_pt_lun; 1911 lun = pr_reg->pr_reg_tg_pt_lun;
1915 /* 1912 /*
1916 * Write out any ISID value to APTPL metadata that was included 1913 * Write out any ISID value to APTPL metadata that was included
1917 * in the original registration. 1914 * in the original registration.
1918 */ 1915 */
1919 if (pr_reg->isid_present_at_reg) 1916 if (pr_reg->isid_present_at_reg)
1920 snprintf(isid_buf, 32, "initiator_sid=%s\n", 1917 snprintf(isid_buf, 32, "initiator_sid=%s\n",
1921 pr_reg->pr_reg_isid); 1918 pr_reg->pr_reg_isid);
1922 /* 1919 /*
1923 * Include special metadata if the pr_reg matches the 1920 * Include special metadata if the pr_reg matches the
1924 * reservation holder. 1921 * reservation holder.
1925 */ 1922 */
1926 if (dev->dev_pr_res_holder == pr_reg) { 1923 if (dev->dev_pr_res_holder == pr_reg) {
1927 snprintf(tmp, 512, "PR_REG_START: %d" 1924 snprintf(tmp, 512, "PR_REG_START: %d"
1928 "\ninitiator_fabric=%s\n" 1925 "\ninitiator_fabric=%s\n"
1929 "initiator_node=%s\n%s" 1926 "initiator_node=%s\n%s"
1930 "sa_res_key=%llu\n" 1927 "sa_res_key=%llu\n"
1931 "res_holder=1\nres_type=%02x\n" 1928 "res_holder=1\nres_type=%02x\n"
1932 "res_scope=%02x\nres_all_tg_pt=%d\n" 1929 "res_scope=%02x\nres_all_tg_pt=%d\n"
1933 "mapped_lun=%u\n", reg_count, 1930 "mapped_lun=%u\n", reg_count,
1934 tpg->se_tpg_tfo->get_fabric_name(), 1931 tpg->se_tpg_tfo->get_fabric_name(),
1935 pr_reg->pr_reg_nacl->initiatorname, isid_buf, 1932 pr_reg->pr_reg_nacl->initiatorname, isid_buf,
1936 pr_reg->pr_res_key, pr_reg->pr_res_type, 1933 pr_reg->pr_res_key, pr_reg->pr_res_type,
1937 pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt, 1934 pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt,
1938 pr_reg->pr_res_mapped_lun); 1935 pr_reg->pr_res_mapped_lun);
1939 } else { 1936 } else {
1940 snprintf(tmp, 512, "PR_REG_START: %d\n" 1937 snprintf(tmp, 512, "PR_REG_START: %d\n"
1941 "initiator_fabric=%s\ninitiator_node=%s\n%s" 1938 "initiator_fabric=%s\ninitiator_node=%s\n%s"
1942 "sa_res_key=%llu\nres_holder=0\n" 1939 "sa_res_key=%llu\nres_holder=0\n"
1943 "res_all_tg_pt=%d\nmapped_lun=%u\n", 1940 "res_all_tg_pt=%d\nmapped_lun=%u\n",
1944 reg_count, tpg->se_tpg_tfo->get_fabric_name(), 1941 reg_count, tpg->se_tpg_tfo->get_fabric_name(),
1945 pr_reg->pr_reg_nacl->initiatorname, isid_buf, 1942 pr_reg->pr_reg_nacl->initiatorname, isid_buf,
1946 pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt, 1943 pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt,
1947 pr_reg->pr_res_mapped_lun); 1944 pr_reg->pr_res_mapped_lun);
1948 } 1945 }
1949 1946
1950 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { 1947 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
1951 pr_err("Unable to update renaming" 1948 pr_err("Unable to update renaming"
1952 " APTPL metadata\n"); 1949 " APTPL metadata\n");
1953 spin_unlock(&su_dev->t10_pr.registration_lock); 1950 spin_unlock(&su_dev->t10_pr.registration_lock);
1954 return -EMSGSIZE; 1951 return -EMSGSIZE;
1955 } 1952 }
1956 len += sprintf(buf+len, "%s", tmp); 1953 len += sprintf(buf+len, "%s", tmp);
1957 1954
1958 /* 1955 /*
1959 * Include information about the associated SCSI target port. 1956 * Include information about the associated SCSI target port.
1960 */ 1957 */
1961 snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n" 1958 snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n"
1962 "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:" 1959 "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:"
1963 " %d\n", tpg->se_tpg_tfo->get_fabric_name(), 1960 " %d\n", tpg->se_tpg_tfo->get_fabric_name(),
1964 tpg->se_tpg_tfo->tpg_get_wwn(tpg), 1961 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1965 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1962 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1966 lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); 1963 lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
1967 1964
1968 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { 1965 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
1969 pr_err("Unable to update renaming" 1966 pr_err("Unable to update renaming"
1970 " APTPL metadata\n"); 1967 " APTPL metadata\n");
1971 spin_unlock(&su_dev->t10_pr.registration_lock); 1968 spin_unlock(&su_dev->t10_pr.registration_lock);
1972 return -EMSGSIZE; 1969 return -EMSGSIZE;
1973 } 1970 }
1974 len += sprintf(buf+len, "%s", tmp); 1971 len += sprintf(buf+len, "%s", tmp);
1975 reg_count++; 1972 reg_count++;
1976 } 1973 }
1977 spin_unlock(&su_dev->t10_pr.registration_lock); 1974 spin_unlock(&su_dev->t10_pr.registration_lock);
1978 1975
1979 if (!reg_count) 1976 if (!reg_count)
1980 len += sprintf(buf+len, "No Registrations or Reservations"); 1977 len += sprintf(buf+len, "No Registrations or Reservations");
1981 1978
1982 return 0; 1979 return 0;
1983 } 1980 }
1984 1981
1985 static int core_scsi3_update_aptpl_buf( 1982 static int core_scsi3_update_aptpl_buf(
1986 struct se_device *dev, 1983 struct se_device *dev,
1987 unsigned char *buf, 1984 unsigned char *buf,
1988 u32 pr_aptpl_buf_len, 1985 u32 pr_aptpl_buf_len,
1989 int clear_aptpl_metadata) 1986 int clear_aptpl_metadata)
1990 { 1987 {
1991 int ret; 1988 int ret;
1992 1989
1993 spin_lock(&dev->dev_reservation_lock); 1990 spin_lock(&dev->dev_reservation_lock);
1994 ret = __core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len, 1991 ret = __core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
1995 clear_aptpl_metadata); 1992 clear_aptpl_metadata);
1996 spin_unlock(&dev->dev_reservation_lock); 1993 spin_unlock(&dev->dev_reservation_lock);
1997 1994
1998 return ret; 1995 return ret;
1999 } 1996 }
2000 1997
2001 /* 1998 /*
2002 * Called with struct se_device->aptpl_file_mutex held 1999 * Called with struct se_device->aptpl_file_mutex held
2003 */ 2000 */
2004 static int __core_scsi3_write_aptpl_to_file( 2001 static int __core_scsi3_write_aptpl_to_file(
2005 struct se_device *dev, 2002 struct se_device *dev,
2006 unsigned char *buf, 2003 unsigned char *buf,
2007 u32 pr_aptpl_buf_len) 2004 u32 pr_aptpl_buf_len)
2008 { 2005 {
2009 struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; 2006 struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
2010 struct file *file; 2007 struct file *file;
2011 struct iovec iov[1]; 2008 struct iovec iov[1];
2012 mm_segment_t old_fs; 2009 mm_segment_t old_fs;
2013 int flags = O_RDWR | O_CREAT | O_TRUNC; 2010 int flags = O_RDWR | O_CREAT | O_TRUNC;
2014 char path[512]; 2011 char path[512];
2015 int ret; 2012 int ret;
2016 2013
2017 memset(iov, 0, sizeof(struct iovec)); 2014 memset(iov, 0, sizeof(struct iovec));
2018 memset(path, 0, 512); 2015 memset(path, 0, 512);
2019 2016
2020 if (strlen(&wwn->unit_serial[0]) >= 512) { 2017 if (strlen(&wwn->unit_serial[0]) >= 512) {
2021 pr_err("WWN value for struct se_device does not fit" 2018 pr_err("WWN value for struct se_device does not fit"
2022 " into path buffer\n"); 2019 " into path buffer\n");
2023 return -EMSGSIZE; 2020 return -EMSGSIZE;
2024 } 2021 }
2025 2022
2026 snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]); 2023 snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]);
2027 file = filp_open(path, flags, 0600); 2024 file = filp_open(path, flags, 0600);
2028 if (IS_ERR(file) || !file || !file->f_dentry) { 2025 if (IS_ERR(file) || !file || !file->f_dentry) {
2029 pr_err("filp_open(%s) for APTPL metadata" 2026 pr_err("filp_open(%s) for APTPL metadata"
2030 " failed\n", path); 2027 " failed\n", path);
2031 return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT); 2028 return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
2032 } 2029 }
2033 2030
2034 iov[0].iov_base = &buf[0]; 2031 iov[0].iov_base = &buf[0];
2035 if (!pr_aptpl_buf_len) 2032 if (!pr_aptpl_buf_len)
2036 iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */ 2033 iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */
2037 else 2034 else
2038 iov[0].iov_len = pr_aptpl_buf_len; 2035 iov[0].iov_len = pr_aptpl_buf_len;
2039 2036
2040 old_fs = get_fs(); 2037 old_fs = get_fs();
2041 set_fs(get_ds()); 2038 set_fs(get_ds());
2042 ret = vfs_writev(file, &iov[0], 1, &file->f_pos); 2039 ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
2043 set_fs(old_fs); 2040 set_fs(old_fs);
2044 2041
2045 if (ret < 0) { 2042 if (ret < 0) {
2046 pr_debug("Error writing APTPL metadata file: %s\n", path); 2043 pr_debug("Error writing APTPL metadata file: %s\n", path);
2047 filp_close(file, NULL); 2044 filp_close(file, NULL);
2048 return -EIO; 2045 return -EIO;
2049 } 2046 }
2050 filp_close(file, NULL); 2047 filp_close(file, NULL);
2051 2048
2052 return 0; 2049 return 0;
2053 } 2050 }
2054 2051
2055 static int core_scsi3_update_and_write_aptpl( 2052 static int core_scsi3_update_and_write_aptpl(
2056 struct se_device *dev, 2053 struct se_device *dev,
2057 unsigned char *in_buf, 2054 unsigned char *in_buf,
2058 u32 in_pr_aptpl_buf_len) 2055 u32 in_pr_aptpl_buf_len)
2059 { 2056 {
2060 unsigned char null_buf[64], *buf; 2057 unsigned char null_buf[64], *buf;
2061 u32 pr_aptpl_buf_len; 2058 u32 pr_aptpl_buf_len;
2062 int ret, clear_aptpl_metadata = 0; 2059 int ret, clear_aptpl_metadata = 0;
2063 /* 2060 /*
2064 * Can be called with a NULL pointer from PROUT service action CLEAR 2061 * Can be called with a NULL pointer from PROUT service action CLEAR
2065 */ 2062 */
2066 if (!in_buf) { 2063 if (!in_buf) {
2067 memset(null_buf, 0, 64); 2064 memset(null_buf, 0, 64);
2068 buf = &null_buf[0]; 2065 buf = &null_buf[0];
2069 /* 2066 /*
2070 * This will clear the APTPL metadata to: 2067 * This will clear the APTPL metadata to:
2071 * "No Registrations or Reservations" status 2068 * "No Registrations or Reservations" status
2072 */ 2069 */
2073 pr_aptpl_buf_len = 64; 2070 pr_aptpl_buf_len = 64;
2074 clear_aptpl_metadata = 1; 2071 clear_aptpl_metadata = 1;
2075 } else { 2072 } else {
2076 buf = in_buf; 2073 buf = in_buf;
2077 pr_aptpl_buf_len = in_pr_aptpl_buf_len; 2074 pr_aptpl_buf_len = in_pr_aptpl_buf_len;
2078 } 2075 }
2079 2076
2080 ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len, 2077 ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
2081 clear_aptpl_metadata); 2078 clear_aptpl_metadata);
2082 if (ret != 0) 2079 if (ret != 0)
2083 return ret; 2080 return ret;
2084 /* 2081 /*
2085 * __core_scsi3_write_aptpl_to_file() will call strlen() 2082 * __core_scsi3_write_aptpl_to_file() will call strlen()
2086 * on the passed buf to determine pr_aptpl_buf_len. 2083 * on the passed buf to determine pr_aptpl_buf_len.
2087 */ 2084 */
2088 ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0); 2085 ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0);
2089 if (ret != 0) 2086 if (ret != 0)
2090 return ret; 2087 return ret;
2091 2088
2092 return ret; 2089 return ret;
2093 } 2090 }
2094 2091
2095 static int core_scsi3_emulate_pro_register( 2092 static int core_scsi3_emulate_pro_register(
2096 struct se_cmd *cmd, 2093 struct se_cmd *cmd,
2097 u64 res_key, 2094 u64 res_key,
2098 u64 sa_res_key, 2095 u64 sa_res_key,
2099 int aptpl, 2096 int aptpl,
2100 int all_tg_pt, 2097 int all_tg_pt,
2101 int spec_i_pt, 2098 int spec_i_pt,
2102 int ignore_key) 2099 int ignore_key)
2103 { 2100 {
2104 struct se_session *se_sess = cmd->se_sess; 2101 struct se_session *se_sess = cmd->se_sess;
2105 struct se_device *dev = cmd->se_dev; 2102 struct se_device *dev = cmd->se_dev;
2106 struct se_dev_entry *se_deve; 2103 struct se_dev_entry *se_deve;
2107 struct se_lun *se_lun = cmd->se_lun; 2104 struct se_lun *se_lun = cmd->se_lun;
2108 struct se_portal_group *se_tpg; 2105 struct se_portal_group *se_tpg;
2109 struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e; 2106 struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e;
2110 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 2107 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
2111 /* Used for APTPL metadata w/ UNREGISTER */ 2108 /* Used for APTPL metadata w/ UNREGISTER */
2112 unsigned char *pr_aptpl_buf = NULL; 2109 unsigned char *pr_aptpl_buf = NULL;
2113 unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; 2110 unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
2114 int pr_holder = 0, ret = 0, type; 2111 int pr_holder = 0, ret = 0, type;
2115 2112
2116 if (!se_sess || !se_lun) { 2113 if (!se_sess || !se_lun) {
2117 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 2114 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
2118 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2115 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2119 return -EINVAL; 2116 return -EINVAL;
2120 } 2117 }
2121 se_tpg = se_sess->se_tpg; 2118 se_tpg = se_sess->se_tpg;
2122 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 2119 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
2123 2120
2124 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) { 2121 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) {
2125 memset(&isid_buf[0], 0, PR_REG_ISID_LEN); 2122 memset(&isid_buf[0], 0, PR_REG_ISID_LEN);
2126 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, &isid_buf[0], 2123 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, &isid_buf[0],
2127 PR_REG_ISID_LEN); 2124 PR_REG_ISID_LEN);
2128 isid_ptr = &isid_buf[0]; 2125 isid_ptr = &isid_buf[0];
2129 } 2126 }
2130 /* 2127 /*
2131 * Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47 2128 * Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47
2132 */ 2129 */
2133 pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); 2130 pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
2134 if (!pr_reg_e) { 2131 if (!pr_reg_e) {
2135 if (res_key) { 2132 if (res_key) {
2136 pr_warn("SPC-3 PR: Reservation Key non-zero" 2133 pr_warn("SPC-3 PR: Reservation Key non-zero"
2137 " for SA REGISTER, returning CONFLICT\n"); 2134 " for SA REGISTER, returning CONFLICT\n");
2138 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; 2135 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2139 return -EINVAL; 2136 return -EINVAL;
2140 } 2137 }
2141 /* 2138 /*
2142 * Do nothing but return GOOD status. 2139 * Do nothing but return GOOD status.
2143 */ 2140 */
2144 if (!sa_res_key) 2141 if (!sa_res_key)
2145 return 0; 2142 return 0;
2146 2143
2147 if (!spec_i_pt) { 2144 if (!spec_i_pt) {
2148 /* 2145 /*
2149 * Perform the Service Action REGISTER on the Initiator 2146 * Perform the Service Action REGISTER on the Initiator
2150 * Port Endpoint that the PRO was received from on the 2147 * Port Endpoint that the PRO was received from on the
2151 * Logical Unit of the SCSI device server. 2148 * Logical Unit of the SCSI device server.
2152 */ 2149 */
2153 ret = core_scsi3_alloc_registration(cmd->se_dev, 2150 ret = core_scsi3_alloc_registration(cmd->se_dev,
2154 se_sess->se_node_acl, se_deve, isid_ptr, 2151 se_sess->se_node_acl, se_deve, isid_ptr,
2155 sa_res_key, all_tg_pt, aptpl, 2152 sa_res_key, all_tg_pt, aptpl,
2156 ignore_key, 0); 2153 ignore_key, 0);
2157 if (ret != 0) { 2154 if (ret != 0) {
2158 pr_err("Unable to allocate" 2155 pr_err("Unable to allocate"
2159 " struct t10_pr_registration\n"); 2156 " struct t10_pr_registration\n");
2160 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 2157 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
2161 return -EINVAL; 2158 return -EINVAL;
2162 } 2159 }
2163 } else { 2160 } else {
2164 /* 2161 /*
2165 * Register both the Initiator port that received 2162 * Register both the Initiator port that received
2166 * PROUT SA REGISTER + SPEC_I_PT=1 and extract SCSI 2163 * PROUT SA REGISTER + SPEC_I_PT=1 and extract SCSI
2167 * TransportID from Parameter list and loop through 2164 * TransportID from Parameter list and loop through
2168 * fabric dependent parameter list while calling 2165 * fabric dependent parameter list while calling
2169 * logic from of core_scsi3_alloc_registration() for 2166 * logic from of core_scsi3_alloc_registration() for
2170 * each TransportID provided SCSI Initiator Port/Device 2167 * each TransportID provided SCSI Initiator Port/Device
2171 */ 2168 */
2172 ret = core_scsi3_decode_spec_i_port(cmd, se_tpg, 2169 ret = core_scsi3_decode_spec_i_port(cmd, se_tpg,
2173 isid_ptr, sa_res_key, all_tg_pt, aptpl); 2170 isid_ptr, sa_res_key, all_tg_pt, aptpl);
2174 if (ret != 0) 2171 if (ret != 0)
2175 return ret; 2172 return ret;
2176 } 2173 }
2177 /* 2174 /*
2178 * Nothing left to do for the APTPL=0 case. 2175 * Nothing left to do for the APTPL=0 case.
2179 */ 2176 */
2180 if (!aptpl) { 2177 if (!aptpl) {
2181 pr_tmpl->pr_aptpl_active = 0; 2178 pr_tmpl->pr_aptpl_active = 0;
2182 core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); 2179 core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
2183 pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for" 2180 pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for"
2184 " REGISTER\n"); 2181 " REGISTER\n");
2185 return 0; 2182 return 0;
2186 } 2183 }
2187 /* 2184 /*
2188 * Locate the newly allocated local I_T Nexus *pr_reg, and 2185 * Locate the newly allocated local I_T Nexus *pr_reg, and
2189 * update the APTPL metadata information using its 2186 * update the APTPL metadata information using its
2190 * preallocated *pr_reg->pr_aptpl_buf. 2187 * preallocated *pr_reg->pr_aptpl_buf.
2191 */ 2188 */
2192 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, 2189 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev,
2193 se_sess->se_node_acl, se_sess); 2190 se_sess->se_node_acl, se_sess);
2194 2191
2195 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, 2192 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
2196 &pr_reg->pr_aptpl_buf[0], 2193 &pr_reg->pr_aptpl_buf[0],
2197 pr_tmpl->pr_aptpl_buf_len); 2194 pr_tmpl->pr_aptpl_buf_len);
2198 if (!ret) { 2195 if (!ret) {
2199 pr_tmpl->pr_aptpl_active = 1; 2196 pr_tmpl->pr_aptpl_active = 1;
2200 pr_debug("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n"); 2197 pr_debug("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n");
2201 } 2198 }
2202 2199
2203 core_scsi3_put_pr_reg(pr_reg); 2200 core_scsi3_put_pr_reg(pr_reg);
2204 return ret; 2201 return ret;
2205 } else { 2202 } else {
2206 /* 2203 /*
2207 * Locate the existing *pr_reg via struct se_node_acl pointers 2204 * Locate the existing *pr_reg via struct se_node_acl pointers
2208 */ 2205 */
2209 pr_reg = pr_reg_e; 2206 pr_reg = pr_reg_e;
2210 type = pr_reg->pr_res_type; 2207 type = pr_reg->pr_res_type;
2211 2208
2212 if (!ignore_key) { 2209 if (!ignore_key) {
2213 if (res_key != pr_reg->pr_res_key) { 2210 if (res_key != pr_reg->pr_res_key) {
2214 pr_err("SPC-3 PR REGISTER: Received" 2211 pr_err("SPC-3 PR REGISTER: Received"
2215 " res_key: 0x%016Lx does not match" 2212 " res_key: 0x%016Lx does not match"
2216 " existing SA REGISTER res_key:" 2213 " existing SA REGISTER res_key:"
2217 " 0x%016Lx\n", res_key, 2214 " 0x%016Lx\n", res_key,
2218 pr_reg->pr_res_key); 2215 pr_reg->pr_res_key);
2219 core_scsi3_put_pr_reg(pr_reg); 2216 core_scsi3_put_pr_reg(pr_reg);
2220 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; 2217 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2221 return -EINVAL; 2218 return -EINVAL;
2222 } 2219 }
2223 } 2220 }
2224 if (spec_i_pt) { 2221 if (spec_i_pt) {
2225 pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT" 2222 pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
2226 " set while sa_res_key=0\n"); 2223 " set while sa_res_key=0\n");
2227 core_scsi3_put_pr_reg(pr_reg); 2224 core_scsi3_put_pr_reg(pr_reg);
2228 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 2225 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
2229 return -EINVAL; 2226 return -EINVAL;
2230 } 2227 }
2231 /* 2228 /*
2232 * An existing ALL_TG_PT=1 registration being released 2229 * An existing ALL_TG_PT=1 registration being released
2233 * must also set ALL_TG_PT=1 in the incoming PROUT. 2230 * must also set ALL_TG_PT=1 in the incoming PROUT.
2234 */ 2231 */
2235 if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) { 2232 if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) {
2236 pr_err("SPC-3 PR UNREGISTER: ALL_TG_PT=1" 2233 pr_err("SPC-3 PR UNREGISTER: ALL_TG_PT=1"
2237 " registration exists, but ALL_TG_PT=1 bit not" 2234 " registration exists, but ALL_TG_PT=1 bit not"
2238 " present in received PROUT\n"); 2235 " present in received PROUT\n");
2239 core_scsi3_put_pr_reg(pr_reg); 2236 core_scsi3_put_pr_reg(pr_reg);
2240 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 2237 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
2241 return -EINVAL; 2238 return -EINVAL;
2242 } 2239 }
2243 /* 2240 /*
2244 * Allocate APTPL metadata buffer used for UNREGISTER ops 2241 * Allocate APTPL metadata buffer used for UNREGISTER ops
2245 */ 2242 */
2246 if (aptpl) { 2243 if (aptpl) {
2247 pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, 2244 pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len,
2248 GFP_KERNEL); 2245 GFP_KERNEL);
2249 if (!pr_aptpl_buf) { 2246 if (!pr_aptpl_buf) {
2250 pr_err("Unable to allocate" 2247 pr_err("Unable to allocate"
2251 " pr_aptpl_buf\n"); 2248 " pr_aptpl_buf\n");
2252 core_scsi3_put_pr_reg(pr_reg); 2249 core_scsi3_put_pr_reg(pr_reg);
2253 cmd->scsi_sense_reason = 2250 cmd->scsi_sense_reason =
2254 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2251 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2255 return -EINVAL; 2252 return -EINVAL;
2256 } 2253 }
2257 } 2254 }
2258 /* 2255 /*
2259 * sa_res_key=0 Unregister Reservation Key for registered I_T 2256 * sa_res_key=0 Unregister Reservation Key for registered I_T
2260 * Nexus sa_res_key=1 Change Reservation Key for registered I_T 2257 * Nexus sa_res_key=1 Change Reservation Key for registered I_T
2261 * Nexus. 2258 * Nexus.
2262 */ 2259 */
2263 if (!sa_res_key) { 2260 if (!sa_res_key) {
2264 pr_holder = core_scsi3_check_implict_release( 2261 pr_holder = core_scsi3_check_implict_release(
2265 cmd->se_dev, pr_reg); 2262 cmd->se_dev, pr_reg);
2266 if (pr_holder < 0) { 2263 if (pr_holder < 0) {
2267 kfree(pr_aptpl_buf); 2264 kfree(pr_aptpl_buf);
2268 core_scsi3_put_pr_reg(pr_reg); 2265 core_scsi3_put_pr_reg(pr_reg);
2269 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; 2266 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2270 return -EINVAL; 2267 return -EINVAL;
2271 } 2268 }
2272 2269
2273 spin_lock(&pr_tmpl->registration_lock); 2270 spin_lock(&pr_tmpl->registration_lock);
2274 /* 2271 /*
2275 * Release all ALL_TG_PT=1 for the matching SCSI Initiator Port 2272 * Release all ALL_TG_PT=1 for the matching SCSI Initiator Port
2276 * and matching pr_res_key. 2273 * and matching pr_res_key.
2277 */ 2274 */
2278 if (pr_reg->pr_reg_all_tg_pt) { 2275 if (pr_reg->pr_reg_all_tg_pt) {
2279 list_for_each_entry_safe(pr_reg_p, pr_reg_tmp, 2276 list_for_each_entry_safe(pr_reg_p, pr_reg_tmp,
2280 &pr_tmpl->registration_list, 2277 &pr_tmpl->registration_list,
2281 pr_reg_list) { 2278 pr_reg_list) {
2282 2279
2283 if (!pr_reg_p->pr_reg_all_tg_pt) 2280 if (!pr_reg_p->pr_reg_all_tg_pt)
2284 continue; 2281 continue;
2285 2282
2286 if (pr_reg_p->pr_res_key != res_key) 2283 if (pr_reg_p->pr_res_key != res_key)
2287 continue; 2284 continue;
2288 2285
2289 if (pr_reg == pr_reg_p) 2286 if (pr_reg == pr_reg_p)
2290 continue; 2287 continue;
2291 2288
2292 if (strcmp(pr_reg->pr_reg_nacl->initiatorname, 2289 if (strcmp(pr_reg->pr_reg_nacl->initiatorname,
2293 pr_reg_p->pr_reg_nacl->initiatorname)) 2290 pr_reg_p->pr_reg_nacl->initiatorname))
2294 continue; 2291 continue;
2295 2292
2296 __core_scsi3_free_registration(dev, 2293 __core_scsi3_free_registration(dev,
2297 pr_reg_p, NULL, 0); 2294 pr_reg_p, NULL, 0);
2298 } 2295 }
2299 } 2296 }
2300 /* 2297 /*
2301 * Release the calling I_T Nexus registration now.. 2298 * Release the calling I_T Nexus registration now..
2302 */ 2299 */
2303 __core_scsi3_free_registration(cmd->se_dev, pr_reg, 2300 __core_scsi3_free_registration(cmd->se_dev, pr_reg,
2304 NULL, 1); 2301 NULL, 1);
2305 /* 2302 /*
2306 * From spc4r17, section 5.7.11.3 Unregistering 2303 * From spc4r17, section 5.7.11.3 Unregistering
2307 * 2304 *
2308 * If the persistent reservation is a registrants only 2305 * If the persistent reservation is a registrants only
2309 * type, the device server shall establish a unit 2306 * type, the device server shall establish a unit
2310 * attention condition for the initiator port associated 2307 * attention condition for the initiator port associated
2311 * with every registered I_T nexus except for the I_T 2308 * with every registered I_T nexus except for the I_T
2312 * nexus on which the PERSISTENT RESERVE OUT command was 2309 * nexus on which the PERSISTENT RESERVE OUT command was
2313 * received, with the additional sense code set to 2310 * received, with the additional sense code set to
2314 * RESERVATIONS RELEASED. 2311 * RESERVATIONS RELEASED.
2315 */ 2312 */
2316 if (pr_holder && 2313 if (pr_holder &&
2317 ((type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) || 2314 ((type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
2318 (type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY))) { 2315 (type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY))) {
2319 list_for_each_entry(pr_reg_p, 2316 list_for_each_entry(pr_reg_p,
2320 &pr_tmpl->registration_list, 2317 &pr_tmpl->registration_list,
2321 pr_reg_list) { 2318 pr_reg_list) {
2322 2319
2323 core_scsi3_ua_allocate( 2320 core_scsi3_ua_allocate(
2324 pr_reg_p->pr_reg_nacl, 2321 pr_reg_p->pr_reg_nacl,
2325 pr_reg_p->pr_res_mapped_lun, 2322 pr_reg_p->pr_res_mapped_lun,
2326 0x2A, 2323 0x2A,
2327 ASCQ_2AH_RESERVATIONS_RELEASED); 2324 ASCQ_2AH_RESERVATIONS_RELEASED);
2328 } 2325 }
2329 } 2326 }
2330 spin_unlock(&pr_tmpl->registration_lock); 2327 spin_unlock(&pr_tmpl->registration_lock);
2331 2328
2332 if (!aptpl) { 2329 if (!aptpl) {
2333 pr_tmpl->pr_aptpl_active = 0; 2330 pr_tmpl->pr_aptpl_active = 0;
2334 core_scsi3_update_and_write_aptpl(dev, NULL, 0); 2331 core_scsi3_update_and_write_aptpl(dev, NULL, 0);
2335 pr_debug("SPC-3 PR: Set APTPL Bit Deactivated" 2332 pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
2336 " for UNREGISTER\n"); 2333 " for UNREGISTER\n");
2337 return 0; 2334 return 0;
2338 } 2335 }
2339 2336
2340 ret = core_scsi3_update_and_write_aptpl(dev, 2337 ret = core_scsi3_update_and_write_aptpl(dev,
2341 &pr_aptpl_buf[0], 2338 &pr_aptpl_buf[0],
2342 pr_tmpl->pr_aptpl_buf_len); 2339 pr_tmpl->pr_aptpl_buf_len);
2343 if (!ret) { 2340 if (!ret) {
2344 pr_tmpl->pr_aptpl_active = 1; 2341 pr_tmpl->pr_aptpl_active = 1;
2345 pr_debug("SPC-3 PR: Set APTPL Bit Activated" 2342 pr_debug("SPC-3 PR: Set APTPL Bit Activated"
2346 " for UNREGISTER\n"); 2343 " for UNREGISTER\n");
2347 } 2344 }
2348 2345
2349 kfree(pr_aptpl_buf); 2346 kfree(pr_aptpl_buf);
2350 return ret; 2347 return ret;
2351 } else { 2348 } else {
2352 /* 2349 /*
2353 * Increment PRgeneration counter for struct se_device" 2350 * Increment PRgeneration counter for struct se_device"
2354 * upon a successful REGISTER, see spc4r17 section 6.3.2 2351 * upon a successful REGISTER, see spc4r17 section 6.3.2
2355 * READ_KEYS service action. 2352 * READ_KEYS service action.
2356 */ 2353 */
2357 pr_reg->pr_res_generation = core_scsi3_pr_generation( 2354 pr_reg->pr_res_generation = core_scsi3_pr_generation(
2358 cmd->se_dev); 2355 cmd->se_dev);
2359 pr_reg->pr_res_key = sa_res_key; 2356 pr_reg->pr_res_key = sa_res_key;
2360 pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation" 2357 pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
2361 " Key for %s to: 0x%016Lx PRgeneration:" 2358 " Key for %s to: 0x%016Lx PRgeneration:"
2362 " 0x%08x\n", cmd->se_tfo->get_fabric_name(), 2359 " 0x%08x\n", cmd->se_tfo->get_fabric_name(),
2363 (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "", 2360 (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "",
2364 pr_reg->pr_reg_nacl->initiatorname, 2361 pr_reg->pr_reg_nacl->initiatorname,
2365 pr_reg->pr_res_key, pr_reg->pr_res_generation); 2362 pr_reg->pr_res_key, pr_reg->pr_res_generation);
2366 2363
2367 if (!aptpl) { 2364 if (!aptpl) {
2368 pr_tmpl->pr_aptpl_active = 0; 2365 pr_tmpl->pr_aptpl_active = 0;
2369 core_scsi3_update_and_write_aptpl(dev, NULL, 0); 2366 core_scsi3_update_and_write_aptpl(dev, NULL, 0);
2370 core_scsi3_put_pr_reg(pr_reg); 2367 core_scsi3_put_pr_reg(pr_reg);
2371 pr_debug("SPC-3 PR: Set APTPL Bit Deactivated" 2368 pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
2372 " for REGISTER\n"); 2369 " for REGISTER\n");
2373 return 0; 2370 return 0;
2374 } 2371 }
2375 2372
2376 ret = core_scsi3_update_and_write_aptpl(dev, 2373 ret = core_scsi3_update_and_write_aptpl(dev,
2377 &pr_aptpl_buf[0], 2374 &pr_aptpl_buf[0],
2378 pr_tmpl->pr_aptpl_buf_len); 2375 pr_tmpl->pr_aptpl_buf_len);
2379 if (!ret) { 2376 if (!ret) {
2380 pr_tmpl->pr_aptpl_active = 1; 2377 pr_tmpl->pr_aptpl_active = 1;
2381 pr_debug("SPC-3 PR: Set APTPL Bit Activated" 2378 pr_debug("SPC-3 PR: Set APTPL Bit Activated"
2382 " for REGISTER\n"); 2379 " for REGISTER\n");
2383 } 2380 }
2384 2381
2385 kfree(pr_aptpl_buf); 2382 kfree(pr_aptpl_buf);
2386 core_scsi3_put_pr_reg(pr_reg); 2383 core_scsi3_put_pr_reg(pr_reg);
2387 } 2384 }
2388 } 2385 }
2389 return 0; 2386 return 0;
2390 } 2387 }
2391 2388
2392 unsigned char *core_scsi3_pr_dump_type(int type) 2389 unsigned char *core_scsi3_pr_dump_type(int type)
2393 { 2390 {
2394 switch (type) { 2391 switch (type) {
2395 case PR_TYPE_WRITE_EXCLUSIVE: 2392 case PR_TYPE_WRITE_EXCLUSIVE:
2396 return "Write Exclusive Access"; 2393 return "Write Exclusive Access";
2397 case PR_TYPE_EXCLUSIVE_ACCESS: 2394 case PR_TYPE_EXCLUSIVE_ACCESS:
2398 return "Exclusive Access"; 2395 return "Exclusive Access";
2399 case PR_TYPE_WRITE_EXCLUSIVE_REGONLY: 2396 case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
2400 return "Write Exclusive Access, Registrants Only"; 2397 return "Write Exclusive Access, Registrants Only";
2401 case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY: 2398 case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
2402 return "Exclusive Access, Registrants Only"; 2399 return "Exclusive Access, Registrants Only";
2403 case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: 2400 case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
2404 return "Write Exclusive Access, All Registrants"; 2401 return "Write Exclusive Access, All Registrants";
2405 case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG: 2402 case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
2406 return "Exclusive Access, All Registrants"; 2403 return "Exclusive Access, All Registrants";
2407 default: 2404 default:
2408 break; 2405 break;
2409 } 2406 }
2410 2407
2411 return "Unknown SPC-3 PR Type"; 2408 return "Unknown SPC-3 PR Type";
2412 } 2409 }
2413 2410
2414 static int core_scsi3_pro_reserve( 2411 static int core_scsi3_pro_reserve(
2415 struct se_cmd *cmd, 2412 struct se_cmd *cmd,
2416 struct se_device *dev, 2413 struct se_device *dev,
2417 int type, 2414 int type,
2418 int scope, 2415 int scope,
2419 u64 res_key) 2416 u64 res_key)
2420 { 2417 {
2421 struct se_session *se_sess = cmd->se_sess; 2418 struct se_session *se_sess = cmd->se_sess;
2422 struct se_dev_entry *se_deve; 2419 struct se_dev_entry *se_deve;
2423 struct se_lun *se_lun = cmd->se_lun; 2420 struct se_lun *se_lun = cmd->se_lun;
2424 struct se_portal_group *se_tpg; 2421 struct se_portal_group *se_tpg;
2425 struct t10_pr_registration *pr_reg, *pr_res_holder; 2422 struct t10_pr_registration *pr_reg, *pr_res_holder;
2426 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 2423 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
2427 char i_buf[PR_REG_ISID_ID_LEN]; 2424 char i_buf[PR_REG_ISID_ID_LEN];
2428 int ret, prf_isid; 2425 int ret, prf_isid;
2429 2426
2430 memset(i_buf, 0, PR_REG_ISID_ID_LEN); 2427 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
2431 2428
2432 if (!se_sess || !se_lun) { 2429 if (!se_sess || !se_lun) {
2433 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 2430 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
2434 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2431 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2435 return -EINVAL; 2432 return -EINVAL;
2436 } 2433 }
2437 se_tpg = se_sess->se_tpg; 2434 se_tpg = se_sess->se_tpg;
2438 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 2435 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
2439 /* 2436 /*
2440 * Locate the existing *pr_reg via struct se_node_acl pointers 2437 * Locate the existing *pr_reg via struct se_node_acl pointers
2441 */ 2438 */
2442 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, 2439 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
2443 se_sess); 2440 se_sess);
2444 if (!pr_reg) { 2441 if (!pr_reg) {
2445 pr_err("SPC-3 PR: Unable to locate" 2442 pr_err("SPC-3 PR: Unable to locate"
2446 " PR_REGISTERED *pr_reg for RESERVE\n"); 2443 " PR_REGISTERED *pr_reg for RESERVE\n");
2447 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2444 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2448 return -EINVAL; 2445 return -EINVAL;
2449 } 2446 }
2450 /* 2447 /*
2451 * From spc4r17 Section 5.7.9: Reserving: 2448 * From spc4r17 Section 5.7.9: Reserving:
2452 * 2449 *
2453 * An application client creates a persistent reservation by issuing 2450 * An application client creates a persistent reservation by issuing
2454 * a PERSISTENT RESERVE OUT command with RESERVE service action through 2451 * a PERSISTENT RESERVE OUT command with RESERVE service action through
2455 * a registered I_T nexus with the following parameters: 2452 * a registered I_T nexus with the following parameters:
2456 * a) RESERVATION KEY set to the value of the reservation key that is 2453 * a) RESERVATION KEY set to the value of the reservation key that is
2457 * registered with the logical unit for the I_T nexus; and 2454 * registered with the logical unit for the I_T nexus; and
2458 */ 2455 */
2459 if (res_key != pr_reg->pr_res_key) { 2456 if (res_key != pr_reg->pr_res_key) {
2460 pr_err("SPC-3 PR RESERVE: Received res_key: 0x%016Lx" 2457 pr_err("SPC-3 PR RESERVE: Received res_key: 0x%016Lx"
2461 " does not match existing SA REGISTER res_key:" 2458 " does not match existing SA REGISTER res_key:"
2462 " 0x%016Lx\n", res_key, pr_reg->pr_res_key); 2459 " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
2463 core_scsi3_put_pr_reg(pr_reg); 2460 core_scsi3_put_pr_reg(pr_reg);
2464 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; 2461 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2465 return -EINVAL; 2462 return -EINVAL;
2466 } 2463 }
2467 /* 2464 /*
2468 * From spc4r17 Section 5.7.9: Reserving: 2465 * From spc4r17 Section 5.7.9: Reserving:
2469 * 2466 *
2470 * From above: 2467 * From above:
2471 * b) TYPE field and SCOPE field set to the persistent reservation 2468 * b) TYPE field and SCOPE field set to the persistent reservation
2472 * being created. 2469 * being created.
2473 * 2470 *
2474 * Only one persistent reservation is allowed at a time per logical unit 2471 * Only one persistent reservation is allowed at a time per logical unit
2475 * and that persistent reservation has a scope of LU_SCOPE. 2472 * and that persistent reservation has a scope of LU_SCOPE.
2476 */ 2473 */
2477 if (scope != PR_SCOPE_LU_SCOPE) { 2474 if (scope != PR_SCOPE_LU_SCOPE) {
2478 pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); 2475 pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
2479 core_scsi3_put_pr_reg(pr_reg); 2476 core_scsi3_put_pr_reg(pr_reg);
2480 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 2477 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
2481 return -EINVAL; 2478 return -EINVAL;
2482 } 2479 }
2483 /* 2480 /*
2484 * See if we have an existing PR reservation holder pointer at 2481 * See if we have an existing PR reservation holder pointer at
2485 * struct se_device->dev_pr_res_holder in the form struct t10_pr_registration 2482 * struct se_device->dev_pr_res_holder in the form struct t10_pr_registration
2486 * *pr_res_holder. 2483 * *pr_res_holder.
2487 */ 2484 */
2488 spin_lock(&dev->dev_reservation_lock); 2485 spin_lock(&dev->dev_reservation_lock);
2489 pr_res_holder = dev->dev_pr_res_holder; 2486 pr_res_holder = dev->dev_pr_res_holder;
2490 if ((pr_res_holder)) { 2487 if ((pr_res_holder)) {
2491 /* 2488 /*
2492 * From spc4r17 Section 5.7.9: Reserving: 2489 * From spc4r17 Section 5.7.9: Reserving:
2493 * 2490 *
2494 * If the device server receives a PERSISTENT RESERVE OUT 2491 * If the device server receives a PERSISTENT RESERVE OUT
2495 * command from an I_T nexus other than a persistent reservation 2492 * command from an I_T nexus other than a persistent reservation
2496 * holder (see 5.7.10) that attempts to create a persistent 2493 * holder (see 5.7.10) that attempts to create a persistent
2497 * reservation when a persistent reservation already exists for 2494 * reservation when a persistent reservation already exists for
2498 * the logical unit, then the command shall be completed with 2495 * the logical unit, then the command shall be completed with
2499 * RESERVATION CONFLICT status. 2496 * RESERVATION CONFLICT status.
2500 */ 2497 */
2501 if (pr_res_holder != pr_reg) { 2498 if (pr_res_holder != pr_reg) {
2502 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; 2499 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
2503 pr_err("SPC-3 PR: Attempted RESERVE from" 2500 pr_err("SPC-3 PR: Attempted RESERVE from"
2504 " [%s]: %s while reservation already held by" 2501 " [%s]: %s while reservation already held by"
2505 " [%s]: %s, returning RESERVATION_CONFLICT\n", 2502 " [%s]: %s, returning RESERVATION_CONFLICT\n",
2506 cmd->se_tfo->get_fabric_name(), 2503 cmd->se_tfo->get_fabric_name(),
2507 se_sess->se_node_acl->initiatorname, 2504 se_sess->se_node_acl->initiatorname,
2508 pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 2505 pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
2509 pr_res_holder->pr_reg_nacl->initiatorname); 2506 pr_res_holder->pr_reg_nacl->initiatorname);
2510 2507
2511 spin_unlock(&dev->dev_reservation_lock); 2508 spin_unlock(&dev->dev_reservation_lock);
2512 core_scsi3_put_pr_reg(pr_reg); 2509 core_scsi3_put_pr_reg(pr_reg);
2513 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; 2510 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2514 return -EINVAL; 2511 return -EINVAL;
2515 } 2512 }
2516 /* 2513 /*
2517 * From spc4r17 Section 5.7.9: Reserving: 2514 * From spc4r17 Section 5.7.9: Reserving:
2518 * 2515 *
2519 * If a persistent reservation holder attempts to modify the 2516 * If a persistent reservation holder attempts to modify the
2520 * type or scope of an existing persistent reservation, the 2517 * type or scope of an existing persistent reservation, the
2521 * command shall be completed with RESERVATION CONFLICT status. 2518 * command shall be completed with RESERVATION CONFLICT status.
2522 */ 2519 */
2523 if ((pr_res_holder->pr_res_type != type) || 2520 if ((pr_res_holder->pr_res_type != type) ||
2524 (pr_res_holder->pr_res_scope != scope)) { 2521 (pr_res_holder->pr_res_scope != scope)) {
2525 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; 2522 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
2526 pr_err("SPC-3 PR: Attempted RESERVE from" 2523 pr_err("SPC-3 PR: Attempted RESERVE from"
2527 " [%s]: %s trying to change TYPE and/or SCOPE," 2524 " [%s]: %s trying to change TYPE and/or SCOPE,"
2528 " while reservation already held by [%s]: %s," 2525 " while reservation already held by [%s]: %s,"
2529 " returning RESERVATION_CONFLICT\n", 2526 " returning RESERVATION_CONFLICT\n",
2530 cmd->se_tfo->get_fabric_name(), 2527 cmd->se_tfo->get_fabric_name(),
2531 se_sess->se_node_acl->initiatorname, 2528 se_sess->se_node_acl->initiatorname,
2532 pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 2529 pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
2533 pr_res_holder->pr_reg_nacl->initiatorname); 2530 pr_res_holder->pr_reg_nacl->initiatorname);
2534 2531
2535 spin_unlock(&dev->dev_reservation_lock); 2532 spin_unlock(&dev->dev_reservation_lock);
2536 core_scsi3_put_pr_reg(pr_reg); 2533 core_scsi3_put_pr_reg(pr_reg);
2537 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; 2534 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2538 return -EINVAL; 2535 return -EINVAL;
2539 } 2536 }
2540 /* 2537 /*
2541 * From spc4r17 Section 5.7.9: Reserving: 2538 * From spc4r17 Section 5.7.9: Reserving:
2542 * 2539 *
2543 * If the device server receives a PERSISTENT RESERVE OUT 2540 * If the device server receives a PERSISTENT RESERVE OUT
2544 * command with RESERVE service action where the TYPE field and 2541 * command with RESERVE service action where the TYPE field and
2545 * the SCOPE field contain the same values as the existing type 2542 * the SCOPE field contain the same values as the existing type
2546 * and scope from a persistent reservation holder, it shall not 2543 * and scope from a persistent reservation holder, it shall not
2547 * make any change to the existing persistent reservation and 2544 * make any change to the existing persistent reservation and
2548 * shall completethe command with GOOD status. 2545 * shall completethe command with GOOD status.
2549 */ 2546 */
2550 spin_unlock(&dev->dev_reservation_lock); 2547 spin_unlock(&dev->dev_reservation_lock);
2551 core_scsi3_put_pr_reg(pr_reg); 2548 core_scsi3_put_pr_reg(pr_reg);
2552 return 0; 2549 return 0;
2553 } 2550 }
2554 /* 2551 /*
2555 * Otherwise, our *pr_reg becomes the PR reservation holder for said 2552 * Otherwise, our *pr_reg becomes the PR reservation holder for said
2556 * TYPE/SCOPE. Also set the received scope and type in *pr_reg. 2553 * TYPE/SCOPE. Also set the received scope and type in *pr_reg.
2557 */ 2554 */
2558 pr_reg->pr_res_scope = scope; 2555 pr_reg->pr_res_scope = scope;
2559 pr_reg->pr_res_type = type; 2556 pr_reg->pr_res_type = type;
2560 pr_reg->pr_res_holder = 1; 2557 pr_reg->pr_res_holder = 1;
2561 dev->dev_pr_res_holder = pr_reg; 2558 dev->dev_pr_res_holder = pr_reg;
2562 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], 2559 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
2563 PR_REG_ISID_ID_LEN); 2560 PR_REG_ISID_ID_LEN);
2564 2561
2565 pr_debug("SPC-3 PR [%s] Service Action: RESERVE created new" 2562 pr_debug("SPC-3 PR [%s] Service Action: RESERVE created new"
2566 " reservation holder TYPE: %s ALL_TG_PT: %d\n", 2563 " reservation holder TYPE: %s ALL_TG_PT: %d\n",
2567 cmd->se_tfo->get_fabric_name(), core_scsi3_pr_dump_type(type), 2564 cmd->se_tfo->get_fabric_name(), core_scsi3_pr_dump_type(type),
2568 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 2565 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
2569 pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n", 2566 pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
2570 cmd->se_tfo->get_fabric_name(), 2567 cmd->se_tfo->get_fabric_name(),
2571 se_sess->se_node_acl->initiatorname, 2568 se_sess->se_node_acl->initiatorname,
2572 (prf_isid) ? &i_buf[0] : ""); 2569 (prf_isid) ? &i_buf[0] : "");
2573 spin_unlock(&dev->dev_reservation_lock); 2570 spin_unlock(&dev->dev_reservation_lock);
2574 2571
2575 if (pr_tmpl->pr_aptpl_active) { 2572 if (pr_tmpl->pr_aptpl_active) {
2576 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, 2573 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
2577 &pr_reg->pr_aptpl_buf[0], 2574 &pr_reg->pr_aptpl_buf[0],
2578 pr_tmpl->pr_aptpl_buf_len); 2575 pr_tmpl->pr_aptpl_buf_len);
2579 if (!ret) 2576 if (!ret)
2580 pr_debug("SPC-3 PR: Updated APTPL metadata" 2577 pr_debug("SPC-3 PR: Updated APTPL metadata"
2581 " for RESERVE\n"); 2578 " for RESERVE\n");
2582 } 2579 }
2583 2580
2584 core_scsi3_put_pr_reg(pr_reg); 2581 core_scsi3_put_pr_reg(pr_reg);
2585 return 0; 2582 return 0;
2586 } 2583 }
2587 2584
2588 static int core_scsi3_emulate_pro_reserve( 2585 static int core_scsi3_emulate_pro_reserve(
2589 struct se_cmd *cmd, 2586 struct se_cmd *cmd,
2590 int type, 2587 int type,
2591 int scope, 2588 int scope,
2592 u64 res_key) 2589 u64 res_key)
2593 { 2590 {
2594 struct se_device *dev = cmd->se_dev; 2591 struct se_device *dev = cmd->se_dev;
2595 int ret = 0; 2592 int ret = 0;
2596 2593
2597 switch (type) { 2594 switch (type) {
2598 case PR_TYPE_WRITE_EXCLUSIVE: 2595 case PR_TYPE_WRITE_EXCLUSIVE:
2599 case PR_TYPE_EXCLUSIVE_ACCESS: 2596 case PR_TYPE_EXCLUSIVE_ACCESS:
2600 case PR_TYPE_WRITE_EXCLUSIVE_REGONLY: 2597 case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
2601 case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY: 2598 case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
2602 case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: 2599 case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
2603 case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG: 2600 case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
2604 ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key); 2601 ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key);
2605 break; 2602 break;
2606 default: 2603 default:
2607 pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:" 2604 pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
2608 " 0x%02x\n", type); 2605 " 0x%02x\n", type);
2609 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 2606 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
2610 return -EINVAL; 2607 return -EINVAL;
2611 } 2608 }
2612 2609
2613 return ret; 2610 return ret;
2614 } 2611 }
2615 2612
2616 /* 2613 /*
2617 * Called with struct se_device->dev_reservation_lock held. 2614 * Called with struct se_device->dev_reservation_lock held.
2618 */ 2615 */
2619 static void __core_scsi3_complete_pro_release( 2616 static void __core_scsi3_complete_pro_release(
2620 struct se_device *dev, 2617 struct se_device *dev,
2621 struct se_node_acl *se_nacl, 2618 struct se_node_acl *se_nacl,
2622 struct t10_pr_registration *pr_reg, 2619 struct t10_pr_registration *pr_reg,
2623 int explict) 2620 int explict)
2624 { 2621 {
2625 struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo; 2622 struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
2626 char i_buf[PR_REG_ISID_ID_LEN]; 2623 char i_buf[PR_REG_ISID_ID_LEN];
2627 int prf_isid; 2624 int prf_isid;
2628 2625
2629 memset(i_buf, 0, PR_REG_ISID_ID_LEN); 2626 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
2630 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], 2627 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
2631 PR_REG_ISID_ID_LEN); 2628 PR_REG_ISID_ID_LEN);
2632 /* 2629 /*
2633 * Go ahead and release the current PR reservation holder. 2630 * Go ahead and release the current PR reservation holder.
2634 */ 2631 */
2635 dev->dev_pr_res_holder = NULL; 2632 dev->dev_pr_res_holder = NULL;
2636 2633
2637 pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared" 2634 pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
2638 " reservation holder TYPE: %s ALL_TG_PT: %d\n", 2635 " reservation holder TYPE: %s ALL_TG_PT: %d\n",
2639 tfo->get_fabric_name(), (explict) ? "explict" : "implict", 2636 tfo->get_fabric_name(), (explict) ? "explict" : "implict",
2640 core_scsi3_pr_dump_type(pr_reg->pr_res_type), 2637 core_scsi3_pr_dump_type(pr_reg->pr_res_type),
2641 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 2638 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
2642 pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n", 2639 pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
2643 tfo->get_fabric_name(), se_nacl->initiatorname, 2640 tfo->get_fabric_name(), se_nacl->initiatorname,
2644 (prf_isid) ? &i_buf[0] : ""); 2641 (prf_isid) ? &i_buf[0] : "");
2645 /* 2642 /*
2646 * Clear TYPE and SCOPE for the next PROUT Service Action: RESERVE 2643 * Clear TYPE and SCOPE for the next PROUT Service Action: RESERVE
2647 */ 2644 */
2648 pr_reg->pr_res_holder = pr_reg->pr_res_type = pr_reg->pr_res_scope = 0; 2645 pr_reg->pr_res_holder = pr_reg->pr_res_type = pr_reg->pr_res_scope = 0;
2649 } 2646 }
2650 2647
2651 static int core_scsi3_emulate_pro_release( 2648 static int core_scsi3_emulate_pro_release(
2652 struct se_cmd *cmd, 2649 struct se_cmd *cmd,
2653 int type, 2650 int type,
2654 int scope, 2651 int scope,
2655 u64 res_key) 2652 u64 res_key)
2656 { 2653 {
2657 struct se_device *dev = cmd->se_dev; 2654 struct se_device *dev = cmd->se_dev;
2658 struct se_session *se_sess = cmd->se_sess; 2655 struct se_session *se_sess = cmd->se_sess;
2659 struct se_lun *se_lun = cmd->se_lun; 2656 struct se_lun *se_lun = cmd->se_lun;
2660 struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder; 2657 struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder;
2661 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 2658 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
2662 int ret, all_reg = 0; 2659 int ret, all_reg = 0;
2663 2660
2664 if (!se_sess || !se_lun) { 2661 if (!se_sess || !se_lun) {
2665 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 2662 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
2666 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2663 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2667 return -EINVAL; 2664 return -EINVAL;
2668 } 2665 }
2669 /* 2666 /*
2670 * Locate the existing *pr_reg via struct se_node_acl pointers 2667 * Locate the existing *pr_reg via struct se_node_acl pointers
2671 */ 2668 */
2672 pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); 2669 pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
2673 if (!pr_reg) { 2670 if (!pr_reg) {
2674 pr_err("SPC-3 PR: Unable to locate" 2671 pr_err("SPC-3 PR: Unable to locate"
2675 " PR_REGISTERED *pr_reg for RELEASE\n"); 2672 " PR_REGISTERED *pr_reg for RELEASE\n");
2676 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2673 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2677 return -EINVAL; 2674 return -EINVAL;
2678 } 2675 }
2679 /* 2676 /*
2680 * From spc4r17 Section 5.7.11.2 Releasing: 2677 * From spc4r17 Section 5.7.11.2 Releasing:
2681 * 2678 *
2682 * If there is no persistent reservation or in response to a persistent 2679 * If there is no persistent reservation or in response to a persistent
2683 * reservation release request from a registered I_T nexus that is not a 2680 * reservation release request from a registered I_T nexus that is not a
2684 * persistent reservation holder (see 5.7.10), the device server shall 2681 * persistent reservation holder (see 5.7.10), the device server shall
2685 * do the following: 2682 * do the following:
2686 * 2683 *
2687 * a) Not release the persistent reservation, if any; 2684 * a) Not release the persistent reservation, if any;
2688 * b) Not remove any registrations; and 2685 * b) Not remove any registrations; and
2689 * c) Complete the command with GOOD status. 2686 * c) Complete the command with GOOD status.
2690 */ 2687 */
2691 spin_lock(&dev->dev_reservation_lock); 2688 spin_lock(&dev->dev_reservation_lock);
2692 pr_res_holder = dev->dev_pr_res_holder; 2689 pr_res_holder = dev->dev_pr_res_holder;
2693 if (!pr_res_holder) { 2690 if (!pr_res_holder) {
2694 /* 2691 /*
2695 * No persistent reservation, return GOOD status. 2692 * No persistent reservation, return GOOD status.
2696 */ 2693 */
2697 spin_unlock(&dev->dev_reservation_lock); 2694 spin_unlock(&dev->dev_reservation_lock);
2698 core_scsi3_put_pr_reg(pr_reg); 2695 core_scsi3_put_pr_reg(pr_reg);
2699 return 0; 2696 return 0;
2700 } 2697 }
2701 if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || 2698 if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
2702 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) 2699 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
2703 all_reg = 1; 2700 all_reg = 1;
2704 2701
2705 if ((all_reg == 0) && (pr_res_holder != pr_reg)) { 2702 if ((all_reg == 0) && (pr_res_holder != pr_reg)) {
2706 /* 2703 /*
2707 * Non 'All Registrants' PR Type cases.. 2704 * Non 'All Registrants' PR Type cases..
2708 * Release request from a registered I_T nexus that is not a 2705 * Release request from a registered I_T nexus that is not a
2709 * persistent reservation holder. return GOOD status. 2706 * persistent reservation holder. return GOOD status.
2710 */ 2707 */
2711 spin_unlock(&dev->dev_reservation_lock); 2708 spin_unlock(&dev->dev_reservation_lock);
2712 core_scsi3_put_pr_reg(pr_reg); 2709 core_scsi3_put_pr_reg(pr_reg);
2713 return 0; 2710 return 0;
2714 } 2711 }
2715 /* 2712 /*
2716 * From spc4r17 Section 5.7.11.2 Releasing: 2713 * From spc4r17 Section 5.7.11.2 Releasing:
2717 * 2714 *
2718 * Only the persistent reservation holder (see 5.7.10) is allowed to 2715 * Only the persistent reservation holder (see 5.7.10) is allowed to
2719 * release a persistent reservation. 2716 * release a persistent reservation.
2720 * 2717 *
2721 * An application client releases the persistent reservation by issuing 2718 * An application client releases the persistent reservation by issuing
2722 * a PERSISTENT RESERVE OUT command with RELEASE service action through 2719 * a PERSISTENT RESERVE OUT command with RELEASE service action through
2723 * an I_T nexus that is a persistent reservation holder with the 2720 * an I_T nexus that is a persistent reservation holder with the
2724 * following parameters: 2721 * following parameters:
2725 * 2722 *
2726 * a) RESERVATION KEY field set to the value of the reservation key 2723 * a) RESERVATION KEY field set to the value of the reservation key
2727 * that is registered with the logical unit for the I_T nexus; 2724 * that is registered with the logical unit for the I_T nexus;
2728 */ 2725 */
2729 if (res_key != pr_reg->pr_res_key) { 2726 if (res_key != pr_reg->pr_res_key) {
2730 pr_err("SPC-3 PR RELEASE: Received res_key: 0x%016Lx" 2727 pr_err("SPC-3 PR RELEASE: Received res_key: 0x%016Lx"
2731 " does not match existing SA REGISTER res_key:" 2728 " does not match existing SA REGISTER res_key:"
2732 " 0x%016Lx\n", res_key, pr_reg->pr_res_key); 2729 " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
2733 spin_unlock(&dev->dev_reservation_lock); 2730 spin_unlock(&dev->dev_reservation_lock);
2734 core_scsi3_put_pr_reg(pr_reg); 2731 core_scsi3_put_pr_reg(pr_reg);
2735 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; 2732 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2736 return -EINVAL; 2733 return -EINVAL;
2737 } 2734 }
2738 /* 2735 /*
2739 * From spc4r17 Section 5.7.11.2 Releasing and above: 2736 * From spc4r17 Section 5.7.11.2 Releasing and above:
2740 * 2737 *
2741 * b) TYPE field and SCOPE field set to match the persistent 2738 * b) TYPE field and SCOPE field set to match the persistent
2742 * reservation being released. 2739 * reservation being released.
2743 */ 2740 */
2744 if ((pr_res_holder->pr_res_type != type) || 2741 if ((pr_res_holder->pr_res_type != type) ||
2745 (pr_res_holder->pr_res_scope != scope)) { 2742 (pr_res_holder->pr_res_scope != scope)) {
2746 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; 2743 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
2747 pr_err("SPC-3 PR RELEASE: Attempted to release" 2744 pr_err("SPC-3 PR RELEASE: Attempted to release"
2748 " reservation from [%s]: %s with different TYPE " 2745 " reservation from [%s]: %s with different TYPE "
2749 "and/or SCOPE while reservation already held by" 2746 "and/or SCOPE while reservation already held by"
2750 " [%s]: %s, returning RESERVATION_CONFLICT\n", 2747 " [%s]: %s, returning RESERVATION_CONFLICT\n",
2751 cmd->se_tfo->get_fabric_name(), 2748 cmd->se_tfo->get_fabric_name(),
2752 se_sess->se_node_acl->initiatorname, 2749 se_sess->se_node_acl->initiatorname,
2753 pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 2750 pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
2754 pr_res_holder->pr_reg_nacl->initiatorname); 2751 pr_res_holder->pr_reg_nacl->initiatorname);
2755 2752
2756 spin_unlock(&dev->dev_reservation_lock); 2753 spin_unlock(&dev->dev_reservation_lock);
2757 core_scsi3_put_pr_reg(pr_reg); 2754 core_scsi3_put_pr_reg(pr_reg);
2758 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; 2755 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2759 return -EINVAL; 2756 return -EINVAL;
2760 } 2757 }
2761 /* 2758 /*
2762 * In response to a persistent reservation release request from the 2759 * In response to a persistent reservation release request from the
2763 * persistent reservation holder the device server shall perform a 2760 * persistent reservation holder the device server shall perform a
2764 * release by doing the following as an uninterrupted series of actions: 2761 * release by doing the following as an uninterrupted series of actions:
2765 * a) Release the persistent reservation; 2762 * a) Release the persistent reservation;
2766 * b) Not remove any registration(s); 2763 * b) Not remove any registration(s);
2767 * c) If the released persistent reservation is a registrants only type 2764 * c) If the released persistent reservation is a registrants only type
2768 * or all registrants type persistent reservation, 2765 * or all registrants type persistent reservation,
2769 * the device server shall establish a unit attention condition for 2766 * the device server shall establish a unit attention condition for
2770 * the initiator port associated with every regis- 2767 * the initiator port associated with every regis-
2771 * tered I_T nexus other than I_T nexus on which the PERSISTENT 2768 * tered I_T nexus other than I_T nexus on which the PERSISTENT
2772 * RESERVE OUT command with RELEASE service action was received, 2769 * RESERVE OUT command with RELEASE service action was received,
2773 * with the additional sense code set to RESERVATIONS RELEASED; and 2770 * with the additional sense code set to RESERVATIONS RELEASED; and
2774 * d) If the persistent reservation is of any other type, the device 2771 * d) If the persistent reservation is of any other type, the device
2775 * server shall not establish a unit attention condition. 2772 * server shall not establish a unit attention condition.
2776 */ 2773 */
2777 __core_scsi3_complete_pro_release(dev, se_sess->se_node_acl, 2774 __core_scsi3_complete_pro_release(dev, se_sess->se_node_acl,
2778 pr_reg, 1); 2775 pr_reg, 1);
2779 2776
2780 spin_unlock(&dev->dev_reservation_lock); 2777 spin_unlock(&dev->dev_reservation_lock);
2781 2778
2782 if ((type != PR_TYPE_WRITE_EXCLUSIVE_REGONLY) && 2779 if ((type != PR_TYPE_WRITE_EXCLUSIVE_REGONLY) &&
2783 (type != PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) && 2780 (type != PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) &&
2784 (type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) && 2781 (type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) &&
2785 (type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { 2782 (type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
2786 /* 2783 /*
2787 * If no UNIT ATTENTION conditions will be established for 2784 * If no UNIT ATTENTION conditions will be established for
2788 * PR_TYPE_WRITE_EXCLUSIVE or PR_TYPE_EXCLUSIVE_ACCESS 2785 * PR_TYPE_WRITE_EXCLUSIVE or PR_TYPE_EXCLUSIVE_ACCESS
2789 * go ahead and check for APTPL=1 update+write below 2786 * go ahead and check for APTPL=1 update+write below
2790 */ 2787 */
2791 goto write_aptpl; 2788 goto write_aptpl;
2792 } 2789 }
2793 2790
2794 spin_lock(&pr_tmpl->registration_lock); 2791 spin_lock(&pr_tmpl->registration_lock);
2795 list_for_each_entry(pr_reg_p, &pr_tmpl->registration_list, 2792 list_for_each_entry(pr_reg_p, &pr_tmpl->registration_list,
2796 pr_reg_list) { 2793 pr_reg_list) {
2797 /* 2794 /*
2798 * Do not establish a UNIT ATTENTION condition 2795 * Do not establish a UNIT ATTENTION condition
2799 * for the calling I_T Nexus 2796 * for the calling I_T Nexus
2800 */ 2797 */
2801 if (pr_reg_p == pr_reg) 2798 if (pr_reg_p == pr_reg)
2802 continue; 2799 continue;
2803 2800
2804 core_scsi3_ua_allocate(pr_reg_p->pr_reg_nacl, 2801 core_scsi3_ua_allocate(pr_reg_p->pr_reg_nacl,
2805 pr_reg_p->pr_res_mapped_lun, 2802 pr_reg_p->pr_res_mapped_lun,
2806 0x2A, ASCQ_2AH_RESERVATIONS_RELEASED); 2803 0x2A, ASCQ_2AH_RESERVATIONS_RELEASED);
2807 } 2804 }
2808 spin_unlock(&pr_tmpl->registration_lock); 2805 spin_unlock(&pr_tmpl->registration_lock);
2809 2806
2810 write_aptpl: 2807 write_aptpl:
2811 if (pr_tmpl->pr_aptpl_active) { 2808 if (pr_tmpl->pr_aptpl_active) {
2812 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, 2809 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
2813 &pr_reg->pr_aptpl_buf[0], 2810 &pr_reg->pr_aptpl_buf[0],
2814 pr_tmpl->pr_aptpl_buf_len); 2811 pr_tmpl->pr_aptpl_buf_len);
2815 if (!ret) 2812 if (!ret)
2816 pr_debug("SPC-3 PR: Updated APTPL metadata for RELEASE\n"); 2813 pr_debug("SPC-3 PR: Updated APTPL metadata for RELEASE\n");
2817 } 2814 }
2818 2815
2819 core_scsi3_put_pr_reg(pr_reg); 2816 core_scsi3_put_pr_reg(pr_reg);
2820 return 0; 2817 return 0;
2821 } 2818 }
2822 2819
2823 static int core_scsi3_emulate_pro_clear( 2820 static int core_scsi3_emulate_pro_clear(
2824 struct se_cmd *cmd, 2821 struct se_cmd *cmd,
2825 u64 res_key) 2822 u64 res_key)
2826 { 2823 {
2827 struct se_device *dev = cmd->se_dev; 2824 struct se_device *dev = cmd->se_dev;
2828 struct se_node_acl *pr_reg_nacl; 2825 struct se_node_acl *pr_reg_nacl;
2829 struct se_session *se_sess = cmd->se_sess; 2826 struct se_session *se_sess = cmd->se_sess;
2830 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 2827 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
2831 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; 2828 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
2832 u32 pr_res_mapped_lun = 0; 2829 u32 pr_res_mapped_lun = 0;
2833 int calling_it_nexus = 0; 2830 int calling_it_nexus = 0;
2834 /* 2831 /*
2835 * Locate the existing *pr_reg via struct se_node_acl pointers 2832 * Locate the existing *pr_reg via struct se_node_acl pointers
2836 */ 2833 */
2837 pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, 2834 pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev,
2838 se_sess->se_node_acl, se_sess); 2835 se_sess->se_node_acl, se_sess);
2839 if (!pr_reg_n) { 2836 if (!pr_reg_n) {
2840 pr_err("SPC-3 PR: Unable to locate" 2837 pr_err("SPC-3 PR: Unable to locate"
2841 " PR_REGISTERED *pr_reg for CLEAR\n"); 2838 " PR_REGISTERED *pr_reg for CLEAR\n");
2842 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2839 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2843 return -EINVAL; 2840 return -EINVAL;
2844 } 2841 }
2845 /* 2842 /*
2846 * From spc4r17 section 5.7.11.6, Clearing: 2843 * From spc4r17 section 5.7.11.6, Clearing:
2847 * 2844 *
2848 * Any application client may release the persistent reservation and 2845 * Any application client may release the persistent reservation and
2849 * remove all registrations from a device server by issuing a 2846 * remove all registrations from a device server by issuing a
2850 * PERSISTENT RESERVE OUT command with CLEAR service action through a 2847 * PERSISTENT RESERVE OUT command with CLEAR service action through a
2851 * registered I_T nexus with the following parameter: 2848 * registered I_T nexus with the following parameter:
2852 * 2849 *
2853 * a) RESERVATION KEY field set to the value of the reservation key 2850 * a) RESERVATION KEY field set to the value of the reservation key
2854 * that is registered with the logical unit for the I_T nexus. 2851 * that is registered with the logical unit for the I_T nexus.
2855 */ 2852 */
2856 if (res_key != pr_reg_n->pr_res_key) { 2853 if (res_key != pr_reg_n->pr_res_key) {
2857 pr_err("SPC-3 PR REGISTER: Received" 2854 pr_err("SPC-3 PR REGISTER: Received"
2858 " res_key: 0x%016Lx does not match" 2855 " res_key: 0x%016Lx does not match"
2859 " existing SA REGISTER res_key:" 2856 " existing SA REGISTER res_key:"
2860 " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key); 2857 " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
2861 core_scsi3_put_pr_reg(pr_reg_n); 2858 core_scsi3_put_pr_reg(pr_reg_n);
2862 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; 2859 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2863 return -EINVAL; 2860 return -EINVAL;
2864 } 2861 }
2865 /* 2862 /*
2866 * a) Release the persistent reservation, if any; 2863 * a) Release the persistent reservation, if any;
2867 */ 2864 */
2868 spin_lock(&dev->dev_reservation_lock); 2865 spin_lock(&dev->dev_reservation_lock);
2869 pr_res_holder = dev->dev_pr_res_holder; 2866 pr_res_holder = dev->dev_pr_res_holder;
2870 if (pr_res_holder) { 2867 if (pr_res_holder) {
2871 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; 2868 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
2872 __core_scsi3_complete_pro_release(dev, pr_res_nacl, 2869 __core_scsi3_complete_pro_release(dev, pr_res_nacl,
2873 pr_res_holder, 0); 2870 pr_res_holder, 0);
2874 } 2871 }
2875 spin_unlock(&dev->dev_reservation_lock); 2872 spin_unlock(&dev->dev_reservation_lock);
2876 /* 2873 /*
2877 * b) Remove all registration(s) (see spc4r17 5.7.7); 2874 * b) Remove all registration(s) (see spc4r17 5.7.7);
2878 */ 2875 */
2879 spin_lock(&pr_tmpl->registration_lock); 2876 spin_lock(&pr_tmpl->registration_lock);
2880 list_for_each_entry_safe(pr_reg, pr_reg_tmp, 2877 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
2881 &pr_tmpl->registration_list, pr_reg_list) { 2878 &pr_tmpl->registration_list, pr_reg_list) {
2882 2879
2883 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; 2880 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
2884 pr_reg_nacl = pr_reg->pr_reg_nacl; 2881 pr_reg_nacl = pr_reg->pr_reg_nacl;
2885 pr_res_mapped_lun = pr_reg->pr_res_mapped_lun; 2882 pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
2886 __core_scsi3_free_registration(dev, pr_reg, NULL, 2883 __core_scsi3_free_registration(dev, pr_reg, NULL,
2887 calling_it_nexus); 2884 calling_it_nexus);
2888 /* 2885 /*
2889 * e) Establish a unit attention condition for the initiator 2886 * e) Establish a unit attention condition for the initiator
2890 * port associated with every registered I_T nexus other 2887 * port associated with every registered I_T nexus other
2891 * than the I_T nexus on which the PERSISTENT RESERVE OUT 2888 * than the I_T nexus on which the PERSISTENT RESERVE OUT
2892 * command with CLEAR service action was received, with the 2889 * command with CLEAR service action was received, with the
2893 * additional sense code set to RESERVATIONS PREEMPTED. 2890 * additional sense code set to RESERVATIONS PREEMPTED.
2894 */ 2891 */
2895 if (!calling_it_nexus) 2892 if (!calling_it_nexus)
2896 core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 2893 core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun,
2897 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED); 2894 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED);
2898 } 2895 }
2899 spin_unlock(&pr_tmpl->registration_lock); 2896 spin_unlock(&pr_tmpl->registration_lock);
2900 2897
2901 pr_debug("SPC-3 PR [%s] Service Action: CLEAR complete\n", 2898 pr_debug("SPC-3 PR [%s] Service Action: CLEAR complete\n",
2902 cmd->se_tfo->get_fabric_name()); 2899 cmd->se_tfo->get_fabric_name());
2903 2900
2904 if (pr_tmpl->pr_aptpl_active) { 2901 if (pr_tmpl->pr_aptpl_active) {
2905 core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); 2902 core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
2906 pr_debug("SPC-3 PR: Updated APTPL metadata" 2903 pr_debug("SPC-3 PR: Updated APTPL metadata"
2907 " for CLEAR\n"); 2904 " for CLEAR\n");
2908 } 2905 }
2909 2906
2910 core_scsi3_pr_generation(dev); 2907 core_scsi3_pr_generation(dev);
2911 return 0; 2908 return 0;
2912 } 2909 }
2913 2910
2914 /* 2911 /*
2915 * Called with struct se_device->dev_reservation_lock held. 2912 * Called with struct se_device->dev_reservation_lock held.
2916 */ 2913 */
2917 static void __core_scsi3_complete_pro_preempt( 2914 static void __core_scsi3_complete_pro_preempt(
2918 struct se_device *dev, 2915 struct se_device *dev,
2919 struct t10_pr_registration *pr_reg, 2916 struct t10_pr_registration *pr_reg,
2920 struct list_head *preempt_and_abort_list, 2917 struct list_head *preempt_and_abort_list,
2921 int type, 2918 int type,
2922 int scope, 2919 int scope,
2923 int abort) 2920 int abort)
2924 { 2921 {
2925 struct se_node_acl *nacl = pr_reg->pr_reg_nacl; 2922 struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
2926 struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; 2923 struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
2927 char i_buf[PR_REG_ISID_ID_LEN]; 2924 char i_buf[PR_REG_ISID_ID_LEN];
2928 int prf_isid; 2925 int prf_isid;
2929 2926
2930 memset(i_buf, 0, PR_REG_ISID_ID_LEN); 2927 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
2931 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], 2928 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
2932 PR_REG_ISID_ID_LEN); 2929 PR_REG_ISID_ID_LEN);
2933 /* 2930 /*
2934 * Do an implict RELEASE of the existing reservation. 2931 * Do an implict RELEASE of the existing reservation.
2935 */ 2932 */
2936 if (dev->dev_pr_res_holder) 2933 if (dev->dev_pr_res_holder)
2937 __core_scsi3_complete_pro_release(dev, nacl, 2934 __core_scsi3_complete_pro_release(dev, nacl,
2938 dev->dev_pr_res_holder, 0); 2935 dev->dev_pr_res_holder, 0);
2939 2936
2940 dev->dev_pr_res_holder = pr_reg; 2937 dev->dev_pr_res_holder = pr_reg;
2941 pr_reg->pr_res_holder = 1; 2938 pr_reg->pr_res_holder = 1;
2942 pr_reg->pr_res_type = type; 2939 pr_reg->pr_res_type = type;
2943 pr_reg->pr_res_scope = scope; 2940 pr_reg->pr_res_scope = scope;
2944 2941
2945 pr_debug("SPC-3 PR [%s] Service Action: PREEMPT%s created new" 2942 pr_debug("SPC-3 PR [%s] Service Action: PREEMPT%s created new"
2946 " reservation holder TYPE: %s ALL_TG_PT: %d\n", 2943 " reservation holder TYPE: %s ALL_TG_PT: %d\n",
2947 tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "", 2944 tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
2948 core_scsi3_pr_dump_type(type), 2945 core_scsi3_pr_dump_type(type),
2949 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 2946 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
2950 pr_debug("SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n", 2947 pr_debug("SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
2951 tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "", 2948 tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
2952 nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); 2949 nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
2953 /* 2950 /*
2954 * For PREEMPT_AND_ABORT, add the preempting reservation's 2951 * For PREEMPT_AND_ABORT, add the preempting reservation's
2955 * struct t10_pr_registration to the list that will be compared 2952 * struct t10_pr_registration to the list that will be compared
2956 * against received CDBs.. 2953 * against received CDBs..
2957 */ 2954 */
2958 if (preempt_and_abort_list) 2955 if (preempt_and_abort_list)
2959 list_add_tail(&pr_reg->pr_reg_abort_list, 2956 list_add_tail(&pr_reg->pr_reg_abort_list,
2960 preempt_and_abort_list); 2957 preempt_and_abort_list);
2961 } 2958 }
2962 2959
2963 static void core_scsi3_release_preempt_and_abort( 2960 static void core_scsi3_release_preempt_and_abort(
2964 struct list_head *preempt_and_abort_list, 2961 struct list_head *preempt_and_abort_list,
2965 struct t10_pr_registration *pr_reg_holder) 2962 struct t10_pr_registration *pr_reg_holder)
2966 { 2963 {
2967 struct t10_pr_registration *pr_reg, *pr_reg_tmp; 2964 struct t10_pr_registration *pr_reg, *pr_reg_tmp;
2968 2965
2969 list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list, 2966 list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
2970 pr_reg_abort_list) { 2967 pr_reg_abort_list) {
2971 2968
2972 list_del(&pr_reg->pr_reg_abort_list); 2969 list_del(&pr_reg->pr_reg_abort_list);
2973 if (pr_reg_holder == pr_reg) 2970 if (pr_reg_holder == pr_reg)
2974 continue; 2971 continue;
2975 if (pr_reg->pr_res_holder) { 2972 if (pr_reg->pr_res_holder) {
2976 pr_warn("pr_reg->pr_res_holder still set\n"); 2973 pr_warn("pr_reg->pr_res_holder still set\n");
2977 continue; 2974 continue;
2978 } 2975 }
2979 2976
2980 pr_reg->pr_reg_deve = NULL; 2977 pr_reg->pr_reg_deve = NULL;
2981 pr_reg->pr_reg_nacl = NULL; 2978 pr_reg->pr_reg_nacl = NULL;
2982 kfree(pr_reg->pr_aptpl_buf); 2979 kfree(pr_reg->pr_aptpl_buf);
2983 kmem_cache_free(t10_pr_reg_cache, pr_reg); 2980 kmem_cache_free(t10_pr_reg_cache, pr_reg);
2984 } 2981 }
2985 } 2982 }
2986 2983
2987 int core_scsi3_check_cdb_abort_and_preempt( 2984 int core_scsi3_check_cdb_abort_and_preempt(
2988 struct list_head *preempt_and_abort_list, 2985 struct list_head *preempt_and_abort_list,
2989 struct se_cmd *cmd) 2986 struct se_cmd *cmd)
2990 { 2987 {
2991 struct t10_pr_registration *pr_reg, *pr_reg_tmp; 2988 struct t10_pr_registration *pr_reg, *pr_reg_tmp;
2992 2989
2993 list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list, 2990 list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
2994 pr_reg_abort_list) { 2991 pr_reg_abort_list) {
2995 if (pr_reg->pr_res_key == cmd->pr_res_key) 2992 if (pr_reg->pr_res_key == cmd->pr_res_key)
2996 return 0; 2993 return 0;
2997 } 2994 }
2998 2995
2999 return 1; 2996 return 1;
3000 } 2997 }
3001 2998
3002 static int core_scsi3_pro_preempt( 2999 static int core_scsi3_pro_preempt(
3003 struct se_cmd *cmd, 3000 struct se_cmd *cmd,
3004 int type, 3001 int type,
3005 int scope, 3002 int scope,
3006 u64 res_key, 3003 u64 res_key,
3007 u64 sa_res_key, 3004 u64 sa_res_key,
3008 int abort) 3005 int abort)
3009 { 3006 {
3010 struct se_device *dev = cmd->se_dev; 3007 struct se_device *dev = cmd->se_dev;
3011 struct se_dev_entry *se_deve; 3008 struct se_dev_entry *se_deve;
3012 struct se_node_acl *pr_reg_nacl; 3009 struct se_node_acl *pr_reg_nacl;
3013 struct se_session *se_sess = cmd->se_sess; 3010 struct se_session *se_sess = cmd->se_sess;
3014 struct list_head preempt_and_abort_list; 3011 struct list_head preempt_and_abort_list;
3015 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; 3012 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
3016 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 3013 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
3017 u32 pr_res_mapped_lun = 0; 3014 u32 pr_res_mapped_lun = 0;
3018 int all_reg = 0, calling_it_nexus = 0, released_regs = 0; 3015 int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
3019 int prh_type = 0, prh_scope = 0, ret; 3016 int prh_type = 0, prh_scope = 0, ret;
3020 3017
3021 if (!se_sess) { 3018 if (!se_sess) {
3022 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3019 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3023 return -EINVAL; 3020 return -EINVAL;
3024 } 3021 }
3025 3022
3026 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 3023 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
3027 pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, 3024 pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
3028 se_sess); 3025 se_sess);
3029 if (!pr_reg_n) { 3026 if (!pr_reg_n) {
3030 pr_err("SPC-3 PR: Unable to locate" 3027 pr_err("SPC-3 PR: Unable to locate"
3031 " PR_REGISTERED *pr_reg for PREEMPT%s\n", 3028 " PR_REGISTERED *pr_reg for PREEMPT%s\n",
3032 (abort) ? "_AND_ABORT" : ""); 3029 (abort) ? "_AND_ABORT" : "");
3033 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; 3030 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3034 return -EINVAL; 3031 return -EINVAL;
3035 } 3032 }
3036 if (pr_reg_n->pr_res_key != res_key) { 3033 if (pr_reg_n->pr_res_key != res_key) {
3037 core_scsi3_put_pr_reg(pr_reg_n); 3034 core_scsi3_put_pr_reg(pr_reg_n);
3038 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; 3035 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3039 return -EINVAL; 3036 return -EINVAL;
3040 } 3037 }
3041 if (scope != PR_SCOPE_LU_SCOPE) { 3038 if (scope != PR_SCOPE_LU_SCOPE) {
3042 pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); 3039 pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
3043 core_scsi3_put_pr_reg(pr_reg_n); 3040 core_scsi3_put_pr_reg(pr_reg_n);
3044 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 3041 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3045 return -EINVAL; 3042 return -EINVAL;
3046 } 3043 }
3047 INIT_LIST_HEAD(&preempt_and_abort_list); 3044 INIT_LIST_HEAD(&preempt_and_abort_list);
3048 3045
3049 spin_lock(&dev->dev_reservation_lock); 3046 spin_lock(&dev->dev_reservation_lock);
3050 pr_res_holder = dev->dev_pr_res_holder; 3047 pr_res_holder = dev->dev_pr_res_holder;
3051 if (pr_res_holder && 3048 if (pr_res_holder &&
3052 ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || 3049 ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
3053 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))) 3050 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)))
3054 all_reg = 1; 3051 all_reg = 1;
3055 3052
3056 if (!all_reg && !sa_res_key) { 3053 if (!all_reg && !sa_res_key) {
3057 spin_unlock(&dev->dev_reservation_lock); 3054 spin_unlock(&dev->dev_reservation_lock);
3058 core_scsi3_put_pr_reg(pr_reg_n); 3055 core_scsi3_put_pr_reg(pr_reg_n);
3059 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 3056 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3060 return -EINVAL; 3057 return -EINVAL;
3061 } 3058 }
3062 /* 3059 /*
3063 * From spc4r17, section 5.7.11.4.4 Removing Registrations: 3060 * From spc4r17, section 5.7.11.4.4 Removing Registrations:
3064 * 3061 *
3065 * If the SERVICE ACTION RESERVATION KEY field does not identify a 3062 * If the SERVICE ACTION RESERVATION KEY field does not identify a
3066 * persistent reservation holder or there is no persistent reservation 3063 * persistent reservation holder or there is no persistent reservation
3067 * holder (i.e., there is no persistent reservation), then the device 3064 * holder (i.e., there is no persistent reservation), then the device
3068 * server shall perform a preempt by doing the following in an 3065 * server shall perform a preempt by doing the following in an
3069 * uninterrupted series of actions. (See below..) 3066 * uninterrupted series of actions. (See below..)
3070 */ 3067 */
3071 if (!pr_res_holder || (pr_res_holder->pr_res_key != sa_res_key)) { 3068 if (!pr_res_holder || (pr_res_holder->pr_res_key != sa_res_key)) {
3072 /* 3069 /*
3073 * No existing or SA Reservation Key matching reservations.. 3070 * No existing or SA Reservation Key matching reservations..
3074 * 3071 *
3075 * PROUT SA PREEMPT with All Registrant type reservations are 3072 * PROUT SA PREEMPT with All Registrant type reservations are
3076 * allowed to be processed without a matching SA Reservation Key 3073 * allowed to be processed without a matching SA Reservation Key
3077 */ 3074 */
3078 spin_lock(&pr_tmpl->registration_lock); 3075 spin_lock(&pr_tmpl->registration_lock);
3079 list_for_each_entry_safe(pr_reg, pr_reg_tmp, 3076 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
3080 &pr_tmpl->registration_list, pr_reg_list) { 3077 &pr_tmpl->registration_list, pr_reg_list) {
3081 /* 3078 /*
3082 * Removing of registrations in non all registrants 3079 * Removing of registrations in non all registrants
3083 * type reservations without a matching SA reservation 3080 * type reservations without a matching SA reservation
3084 * key. 3081 * key.
3085 * 3082 *
3086 * a) Remove the registrations for all I_T nexuses 3083 * a) Remove the registrations for all I_T nexuses
3087 * specified by the SERVICE ACTION RESERVATION KEY 3084 * specified by the SERVICE ACTION RESERVATION KEY
3088 * field; 3085 * field;
3089 * b) Ignore the contents of the SCOPE and TYPE fields; 3086 * b) Ignore the contents of the SCOPE and TYPE fields;
3090 * c) Process tasks as defined in 5.7.1; and 3087 * c) Process tasks as defined in 5.7.1; and
3091 * d) Establish a unit attention condition for the 3088 * d) Establish a unit attention condition for the
3092 * initiator port associated with every I_T nexus 3089 * initiator port associated with every I_T nexus
3093 * that lost its registration other than the I_T 3090 * that lost its registration other than the I_T
3094 * nexus on which the PERSISTENT RESERVE OUT command 3091 * nexus on which the PERSISTENT RESERVE OUT command
3095 * was received, with the additional sense code set 3092 * was received, with the additional sense code set
3096 * to REGISTRATIONS PREEMPTED. 3093 * to REGISTRATIONS PREEMPTED.
3097 */ 3094 */
3098 if (!all_reg) { 3095 if (!all_reg) {
3099 if (pr_reg->pr_res_key != sa_res_key) 3096 if (pr_reg->pr_res_key != sa_res_key)
3100 continue; 3097 continue;
3101 3098
3102 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; 3099 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
3103 pr_reg_nacl = pr_reg->pr_reg_nacl; 3100 pr_reg_nacl = pr_reg->pr_reg_nacl;
3104 pr_res_mapped_lun = pr_reg->pr_res_mapped_lun; 3101 pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
3105 __core_scsi3_free_registration(dev, pr_reg, 3102 __core_scsi3_free_registration(dev, pr_reg,
3106 (abort) ? &preempt_and_abort_list : 3103 (abort) ? &preempt_and_abort_list :
3107 NULL, calling_it_nexus); 3104 NULL, calling_it_nexus);
3108 released_regs++; 3105 released_regs++;
3109 } else { 3106 } else {
3110 /* 3107 /*
3111 * Case for any existing all registrants type 3108 * Case for any existing all registrants type
3112 * reservation, follow logic in spc4r17 section 3109 * reservation, follow logic in spc4r17 section
3113 * 5.7.11.4 Preempting, Table 52 and Figure 7. 3110 * 5.7.11.4 Preempting, Table 52 and Figure 7.
3114 * 3111 *
3115 * For a ZERO SA Reservation key, release 3112 * For a ZERO SA Reservation key, release
3116 * all other registrations and do an implict 3113 * all other registrations and do an implict
3117 * release of active persistent reservation. 3114 * release of active persistent reservation.
3118 * 3115 *
3119 * For a non-ZERO SA Reservation key, only 3116 * For a non-ZERO SA Reservation key, only
3120 * release the matching reservation key from 3117 * release the matching reservation key from
3121 * registrations. 3118 * registrations.
3122 */ 3119 */
3123 if ((sa_res_key) && 3120 if ((sa_res_key) &&
3124 (pr_reg->pr_res_key != sa_res_key)) 3121 (pr_reg->pr_res_key != sa_res_key))
3125 continue; 3122 continue;
3126 3123
3127 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; 3124 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
3128 if (calling_it_nexus) 3125 if (calling_it_nexus)
3129 continue; 3126 continue;
3130 3127
3131 pr_reg_nacl = pr_reg->pr_reg_nacl; 3128 pr_reg_nacl = pr_reg->pr_reg_nacl;
3132 pr_res_mapped_lun = pr_reg->pr_res_mapped_lun; 3129 pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
3133 __core_scsi3_free_registration(dev, pr_reg, 3130 __core_scsi3_free_registration(dev, pr_reg,
3134 (abort) ? &preempt_and_abort_list : 3131 (abort) ? &preempt_and_abort_list :
3135 NULL, 0); 3132 NULL, 0);
3136 released_regs++; 3133 released_regs++;
3137 } 3134 }
3138 if (!calling_it_nexus) 3135 if (!calling_it_nexus)
3139 core_scsi3_ua_allocate(pr_reg_nacl, 3136 core_scsi3_ua_allocate(pr_reg_nacl,
3140 pr_res_mapped_lun, 0x2A, 3137 pr_res_mapped_lun, 0x2A,
3141 ASCQ_2AH_RESERVATIONS_PREEMPTED); 3138 ASCQ_2AH_RESERVATIONS_PREEMPTED);
3142 } 3139 }
3143 spin_unlock(&pr_tmpl->registration_lock); 3140 spin_unlock(&pr_tmpl->registration_lock);
3144 /* 3141 /*
3145 * If a PERSISTENT RESERVE OUT with a PREEMPT service action or 3142 * If a PERSISTENT RESERVE OUT with a PREEMPT service action or
3146 * a PREEMPT AND ABORT service action sets the SERVICE ACTION 3143 * a PREEMPT AND ABORT service action sets the SERVICE ACTION
3147 * RESERVATION KEY field to a value that does not match any 3144 * RESERVATION KEY field to a value that does not match any
3148 * registered reservation key, then the device server shall 3145 * registered reservation key, then the device server shall
3149 * complete the command with RESERVATION CONFLICT status. 3146 * complete the command with RESERVATION CONFLICT status.
3150 */ 3147 */
3151 if (!released_regs) { 3148 if (!released_regs) {
3152 spin_unlock(&dev->dev_reservation_lock); 3149 spin_unlock(&dev->dev_reservation_lock);
3153 core_scsi3_put_pr_reg(pr_reg_n); 3150 core_scsi3_put_pr_reg(pr_reg_n);
3154 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; 3151 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3155 return -EINVAL; 3152 return -EINVAL;
3156 } 3153 }
3157 /* 3154 /*
3158 * For an existing all registrants type reservation 3155 * For an existing all registrants type reservation
3159 * with a zero SA rservation key, preempt the existing 3156 * with a zero SA rservation key, preempt the existing
3160 * reservation with the new PR type and scope. 3157 * reservation with the new PR type and scope.
3161 */ 3158 */
3162 if (pr_res_holder && all_reg && !(sa_res_key)) { 3159 if (pr_res_holder && all_reg && !(sa_res_key)) {
3163 __core_scsi3_complete_pro_preempt(dev, pr_reg_n, 3160 __core_scsi3_complete_pro_preempt(dev, pr_reg_n,
3164 (abort) ? &preempt_and_abort_list : NULL, 3161 (abort) ? &preempt_and_abort_list : NULL,
3165 type, scope, abort); 3162 type, scope, abort);
3166 3163
3167 if (abort) 3164 if (abort)
3168 core_scsi3_release_preempt_and_abort( 3165 core_scsi3_release_preempt_and_abort(
3169 &preempt_and_abort_list, pr_reg_n); 3166 &preempt_and_abort_list, pr_reg_n);
3170 } 3167 }
3171 spin_unlock(&dev->dev_reservation_lock); 3168 spin_unlock(&dev->dev_reservation_lock);
3172 3169
3173 if (pr_tmpl->pr_aptpl_active) { 3170 if (pr_tmpl->pr_aptpl_active) {
3174 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, 3171 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
3175 &pr_reg_n->pr_aptpl_buf[0], 3172 &pr_reg_n->pr_aptpl_buf[0],
3176 pr_tmpl->pr_aptpl_buf_len); 3173 pr_tmpl->pr_aptpl_buf_len);
3177 if (!ret) 3174 if (!ret)
3178 pr_debug("SPC-3 PR: Updated APTPL" 3175 pr_debug("SPC-3 PR: Updated APTPL"
3179 " metadata for PREEMPT%s\n", (abort) ? 3176 " metadata for PREEMPT%s\n", (abort) ?
3180 "_AND_ABORT" : ""); 3177 "_AND_ABORT" : "");
3181 } 3178 }
3182 3179
3183 core_scsi3_put_pr_reg(pr_reg_n); 3180 core_scsi3_put_pr_reg(pr_reg_n);
3184 core_scsi3_pr_generation(cmd->se_dev); 3181 core_scsi3_pr_generation(cmd->se_dev);
3185 return 0; 3182 return 0;
3186 } 3183 }
3187 /* 3184 /*
3188 * The PREEMPTing SA reservation key matches that of the 3185 * The PREEMPTing SA reservation key matches that of the
3189 * existing persistent reservation, first, we check if 3186 * existing persistent reservation, first, we check if
3190 * we are preempting our own reservation. 3187 * we are preempting our own reservation.
3191 * From spc4r17, section 5.7.11.4.3 Preempting 3188 * From spc4r17, section 5.7.11.4.3 Preempting
3192 * persistent reservations and registration handling 3189 * persistent reservations and registration handling
3193 * 3190 *
3194 * If an all registrants persistent reservation is not 3191 * If an all registrants persistent reservation is not
3195 * present, it is not an error for the persistent 3192 * present, it is not an error for the persistent
3196 * reservation holder to preempt itself (i.e., a 3193 * reservation holder to preempt itself (i.e., a
3197 * PERSISTENT RESERVE OUT with a PREEMPT service action 3194 * PERSISTENT RESERVE OUT with a PREEMPT service action
3198 * or a PREEMPT AND ABORT service action with the 3195 * or a PREEMPT AND ABORT service action with the
3199 * SERVICE ACTION RESERVATION KEY value equal to the 3196 * SERVICE ACTION RESERVATION KEY value equal to the
3200 * persistent reservation holder's reservation key that 3197 * persistent reservation holder's reservation key that
3201 * is received from the persistent reservation holder). 3198 * is received from the persistent reservation holder).
3202 * In that case, the device server shall establish the 3199 * In that case, the device server shall establish the
3203 * new persistent reservation and maintain the 3200 * new persistent reservation and maintain the
3204 * registration. 3201 * registration.
3205 */ 3202 */
3206 prh_type = pr_res_holder->pr_res_type; 3203 prh_type = pr_res_holder->pr_res_type;
3207 prh_scope = pr_res_holder->pr_res_scope; 3204 prh_scope = pr_res_holder->pr_res_scope;
3208 /* 3205 /*
3209 * If the SERVICE ACTION RESERVATION KEY field identifies a 3206 * If the SERVICE ACTION RESERVATION KEY field identifies a
3210 * persistent reservation holder (see 5.7.10), the device 3207 * persistent reservation holder (see 5.7.10), the device
3211 * server shall perform a preempt by doing the following as 3208 * server shall perform a preempt by doing the following as
3212 * an uninterrupted series of actions: 3209 * an uninterrupted series of actions:
3213 * 3210 *
3214 * a) Release the persistent reservation for the holder 3211 * a) Release the persistent reservation for the holder
3215 * identified by the SERVICE ACTION RESERVATION KEY field; 3212 * identified by the SERVICE ACTION RESERVATION KEY field;
3216 */ 3213 */
3217 if (pr_reg_n != pr_res_holder) 3214 if (pr_reg_n != pr_res_holder)
3218 __core_scsi3_complete_pro_release(dev, 3215 __core_scsi3_complete_pro_release(dev,
3219 pr_res_holder->pr_reg_nacl, 3216 pr_res_holder->pr_reg_nacl,
3220 dev->dev_pr_res_holder, 0); 3217 dev->dev_pr_res_holder, 0);
3221 /* 3218 /*
3222 * b) Remove the registrations for all I_T nexuses identified 3219 * b) Remove the registrations for all I_T nexuses identified
3223 * by the SERVICE ACTION RESERVATION KEY field, except the 3220 * by the SERVICE ACTION RESERVATION KEY field, except the
3224 * I_T nexus that is being used for the PERSISTENT RESERVE 3221 * I_T nexus that is being used for the PERSISTENT RESERVE
3225 * OUT command. If an all registrants persistent reservation 3222 * OUT command. If an all registrants persistent reservation
3226 * is present and the SERVICE ACTION RESERVATION KEY field 3223 * is present and the SERVICE ACTION RESERVATION KEY field
3227 * is set to zero, then all registrations shall be removed 3224 * is set to zero, then all registrations shall be removed
3228 * except for that of the I_T nexus that is being used for 3225 * except for that of the I_T nexus that is being used for
3229 * the PERSISTENT RESERVE OUT command; 3226 * the PERSISTENT RESERVE OUT command;
3230 */ 3227 */
3231 spin_lock(&pr_tmpl->registration_lock); 3228 spin_lock(&pr_tmpl->registration_lock);
3232 list_for_each_entry_safe(pr_reg, pr_reg_tmp, 3229 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
3233 &pr_tmpl->registration_list, pr_reg_list) { 3230 &pr_tmpl->registration_list, pr_reg_list) {
3234 3231
3235 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; 3232 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
3236 if (calling_it_nexus) 3233 if (calling_it_nexus)
3237 continue; 3234 continue;
3238 3235
3239 if (pr_reg->pr_res_key != sa_res_key) 3236 if (pr_reg->pr_res_key != sa_res_key)
3240 continue; 3237 continue;
3241 3238
3242 pr_reg_nacl = pr_reg->pr_reg_nacl; 3239 pr_reg_nacl = pr_reg->pr_reg_nacl;
3243 pr_res_mapped_lun = pr_reg->pr_res_mapped_lun; 3240 pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
3244 __core_scsi3_free_registration(dev, pr_reg, 3241 __core_scsi3_free_registration(dev, pr_reg,
3245 (abort) ? &preempt_and_abort_list : NULL, 3242 (abort) ? &preempt_and_abort_list : NULL,
3246 calling_it_nexus); 3243 calling_it_nexus);
3247 /* 3244 /*
3248 * e) Establish a unit attention condition for the initiator 3245 * e) Establish a unit attention condition for the initiator
3249 * port associated with every I_T nexus that lost its 3246 * port associated with every I_T nexus that lost its
3250 * persistent reservation and/or registration, with the 3247 * persistent reservation and/or registration, with the
3251 * additional sense code set to REGISTRATIONS PREEMPTED; 3248 * additional sense code set to REGISTRATIONS PREEMPTED;
3252 */ 3249 */
3253 core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A, 3250 core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A,
3254 ASCQ_2AH_RESERVATIONS_PREEMPTED); 3251 ASCQ_2AH_RESERVATIONS_PREEMPTED);
3255 } 3252 }
3256 spin_unlock(&pr_tmpl->registration_lock); 3253 spin_unlock(&pr_tmpl->registration_lock);
3257 /* 3254 /*
3258 * c) Establish a persistent reservation for the preempting 3255 * c) Establish a persistent reservation for the preempting
3259 * I_T nexus using the contents of the SCOPE and TYPE fields; 3256 * I_T nexus using the contents of the SCOPE and TYPE fields;
3260 */ 3257 */
3261 __core_scsi3_complete_pro_preempt(dev, pr_reg_n, 3258 __core_scsi3_complete_pro_preempt(dev, pr_reg_n,
3262 (abort) ? &preempt_and_abort_list : NULL, 3259 (abort) ? &preempt_and_abort_list : NULL,
3263 type, scope, abort); 3260 type, scope, abort);
3264 /* 3261 /*
3265 * d) Process tasks as defined in 5.7.1; 3262 * d) Process tasks as defined in 5.7.1;
3266 * e) See above.. 3263 * e) See above..
3267 * f) If the type or scope has changed, then for every I_T nexus 3264 * f) If the type or scope has changed, then for every I_T nexus
3268 * whose reservation key was not removed, except for the I_T 3265 * whose reservation key was not removed, except for the I_T
3269 * nexus on which the PERSISTENT RESERVE OUT command was 3266 * nexus on which the PERSISTENT RESERVE OUT command was
3270 * received, the device server shall establish a unit 3267 * received, the device server shall establish a unit
3271 * attention condition for the initiator port associated with 3268 * attention condition for the initiator port associated with
3272 * that I_T nexus, with the additional sense code set to 3269 * that I_T nexus, with the additional sense code set to
3273 * RESERVATIONS RELEASED. If the type or scope have not 3270 * RESERVATIONS RELEASED. If the type or scope have not
3274 * changed, then no unit attention condition(s) shall be 3271 * changed, then no unit attention condition(s) shall be
3275 * established for this reason. 3272 * established for this reason.
3276 */ 3273 */
3277 if ((prh_type != type) || (prh_scope != scope)) { 3274 if ((prh_type != type) || (prh_scope != scope)) {
3278 spin_lock(&pr_tmpl->registration_lock); 3275 spin_lock(&pr_tmpl->registration_lock);
3279 list_for_each_entry_safe(pr_reg, pr_reg_tmp, 3276 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
3280 &pr_tmpl->registration_list, pr_reg_list) { 3277 &pr_tmpl->registration_list, pr_reg_list) {
3281 3278
3282 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; 3279 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
3283 if (calling_it_nexus) 3280 if (calling_it_nexus)
3284 continue; 3281 continue;
3285 3282
3286 core_scsi3_ua_allocate(pr_reg->pr_reg_nacl, 3283 core_scsi3_ua_allocate(pr_reg->pr_reg_nacl,
3287 pr_reg->pr_res_mapped_lun, 0x2A, 3284 pr_reg->pr_res_mapped_lun, 0x2A,
3288 ASCQ_2AH_RESERVATIONS_RELEASED); 3285 ASCQ_2AH_RESERVATIONS_RELEASED);
3289 } 3286 }
3290 spin_unlock(&pr_tmpl->registration_lock); 3287 spin_unlock(&pr_tmpl->registration_lock);
3291 } 3288 }
3292 spin_unlock(&dev->dev_reservation_lock); 3289 spin_unlock(&dev->dev_reservation_lock);
3293 /* 3290 /*
3294 * Call LUN_RESET logic upon list of struct t10_pr_registration, 3291 * Call LUN_RESET logic upon list of struct t10_pr_registration,
3295 * All received CDBs for the matching existing reservation and 3292 * All received CDBs for the matching existing reservation and
3296 * registrations undergo ABORT_TASK logic. 3293 * registrations undergo ABORT_TASK logic.
3297 * 3294 *
3298 * From there, core_scsi3_release_preempt_and_abort() will 3295 * From there, core_scsi3_release_preempt_and_abort() will
3299 * release every registration in the list (which have already 3296 * release every registration in the list (which have already
3300 * been removed from the primary pr_reg list), except the 3297 * been removed from the primary pr_reg list), except the
3301 * new persistent reservation holder, the calling Initiator Port. 3298 * new persistent reservation holder, the calling Initiator Port.
3302 */ 3299 */
3303 if (abort) { 3300 if (abort) {
3304 core_tmr_lun_reset(dev, NULL, &preempt_and_abort_list, cmd); 3301 core_tmr_lun_reset(dev, NULL, &preempt_and_abort_list, cmd);
3305 core_scsi3_release_preempt_and_abort(&preempt_and_abort_list, 3302 core_scsi3_release_preempt_and_abort(&preempt_and_abort_list,
3306 pr_reg_n); 3303 pr_reg_n);
3307 } 3304 }
3308 3305
3309 if (pr_tmpl->pr_aptpl_active) { 3306 if (pr_tmpl->pr_aptpl_active) {
3310 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, 3307 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
3311 &pr_reg_n->pr_aptpl_buf[0], 3308 &pr_reg_n->pr_aptpl_buf[0],
3312 pr_tmpl->pr_aptpl_buf_len); 3309 pr_tmpl->pr_aptpl_buf_len);
3313 if (!ret) 3310 if (!ret)
3314 pr_debug("SPC-3 PR: Updated APTPL metadata for PREEMPT" 3311 pr_debug("SPC-3 PR: Updated APTPL metadata for PREEMPT"
3315 "%s\n", (abort) ? "_AND_ABORT" : ""); 3312 "%s\n", (abort) ? "_AND_ABORT" : "");
3316 } 3313 }
3317 3314
3318 core_scsi3_put_pr_reg(pr_reg_n); 3315 core_scsi3_put_pr_reg(pr_reg_n);
3319 core_scsi3_pr_generation(cmd->se_dev); 3316 core_scsi3_pr_generation(cmd->se_dev);
3320 return 0; 3317 return 0;
3321 } 3318 }
3322 3319
3323 static int core_scsi3_emulate_pro_preempt( 3320 static int core_scsi3_emulate_pro_preempt(
3324 struct se_cmd *cmd, 3321 struct se_cmd *cmd,
3325 int type, 3322 int type,
3326 int scope, 3323 int scope,
3327 u64 res_key, 3324 u64 res_key,
3328 u64 sa_res_key, 3325 u64 sa_res_key,
3329 int abort) 3326 int abort)
3330 { 3327 {
3331 int ret = 0; 3328 int ret = 0;
3332 3329
3333 switch (type) { 3330 switch (type) {
3334 case PR_TYPE_WRITE_EXCLUSIVE: 3331 case PR_TYPE_WRITE_EXCLUSIVE:
3335 case PR_TYPE_EXCLUSIVE_ACCESS: 3332 case PR_TYPE_EXCLUSIVE_ACCESS:
3336 case PR_TYPE_WRITE_EXCLUSIVE_REGONLY: 3333 case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
3337 case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY: 3334 case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
3338 case PR_TYPE_WRITE_EXCLUSIVE_ALLREG: 3335 case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
3339 case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG: 3336 case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
3340 ret = core_scsi3_pro_preempt(cmd, type, scope, 3337 ret = core_scsi3_pro_preempt(cmd, type, scope,
3341 res_key, sa_res_key, abort); 3338 res_key, sa_res_key, abort);
3342 break; 3339 break;
3343 default: 3340 default:
3344 pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s" 3341 pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
3345 " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type); 3342 " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
3346 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 3343 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3347 return -EINVAL; 3344 return -EINVAL;
3348 } 3345 }
3349 3346
3350 return ret; 3347 return ret;
3351 } 3348 }
3352 3349
3353 3350
3354 static int core_scsi3_emulate_pro_register_and_move( 3351 static int core_scsi3_emulate_pro_register_and_move(
3355 struct se_cmd *cmd, 3352 struct se_cmd *cmd,
3356 u64 res_key, 3353 u64 res_key,
3357 u64 sa_res_key, 3354 u64 sa_res_key,
3358 int aptpl, 3355 int aptpl,
3359 int unreg) 3356 int unreg)
3360 { 3357 {
3361 struct se_session *se_sess = cmd->se_sess; 3358 struct se_session *se_sess = cmd->se_sess;
3362 struct se_device *dev = cmd->se_dev; 3359 struct se_device *dev = cmd->se_dev;
3363 struct se_dev_entry *se_deve, *dest_se_deve = NULL; 3360 struct se_dev_entry *se_deve, *dest_se_deve = NULL;
3364 struct se_lun *se_lun = cmd->se_lun; 3361 struct se_lun *se_lun = cmd->se_lun;
3365 struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; 3362 struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL;
3366 struct se_port *se_port; 3363 struct se_port *se_port;
3367 struct se_portal_group *se_tpg, *dest_se_tpg = NULL; 3364 struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
3368 struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; 3365 struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
3369 struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; 3366 struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
3370 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 3367 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
3371 unsigned char *buf; 3368 unsigned char *buf;
3372 unsigned char *initiator_str; 3369 unsigned char *initiator_str;
3373 char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; 3370 char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
3374 u32 tid_len, tmp_tid_len; 3371 u32 tid_len, tmp_tid_len;
3375 int new_reg = 0, type, scope, ret, matching_iname, prf_isid; 3372 int new_reg = 0, type, scope, ret, matching_iname, prf_isid;
3376 unsigned short rtpi; 3373 unsigned short rtpi;
3377 unsigned char proto_ident; 3374 unsigned char proto_ident;
3378 3375
3379 if (!se_sess || !se_lun) { 3376 if (!se_sess || !se_lun) {
3380 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 3377 pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
3381 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3378 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3382 return -EINVAL; 3379 return -EINVAL;
3383 } 3380 }
3384 memset(dest_iport, 0, 64); 3381 memset(dest_iport, 0, 64);
3385 memset(i_buf, 0, PR_REG_ISID_ID_LEN); 3382 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
3386 se_tpg = se_sess->se_tpg; 3383 se_tpg = se_sess->se_tpg;
3387 tf_ops = se_tpg->se_tpg_tfo; 3384 tf_ops = se_tpg->se_tpg_tfo;
3388 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 3385 se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
3389 /* 3386 /*
3390 * Follow logic from spc4r17 Section 5.7.8, Table 50 -- 3387 * Follow logic from spc4r17 Section 5.7.8, Table 50 --
3391 * Register behaviors for a REGISTER AND MOVE service action 3388 * Register behaviors for a REGISTER AND MOVE service action
3392 * 3389 *
3393 * Locate the existing *pr_reg via struct se_node_acl pointers 3390 * Locate the existing *pr_reg via struct se_node_acl pointers
3394 */ 3391 */
3395 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, 3392 pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
3396 se_sess); 3393 se_sess);
3397 if (!pr_reg) { 3394 if (!pr_reg) {
3398 pr_err("SPC-3 PR: Unable to locate PR_REGISTERED" 3395 pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
3399 " *pr_reg for REGISTER_AND_MOVE\n"); 3396 " *pr_reg for REGISTER_AND_MOVE\n");
3400 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3397 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3401 return -EINVAL; 3398 return -EINVAL;
3402 } 3399 }
3403 /* 3400 /*
3404 * The provided reservation key much match the existing reservation key 3401 * The provided reservation key much match the existing reservation key
3405 * provided during this initiator's I_T nexus registration. 3402 * provided during this initiator's I_T nexus registration.
3406 */ 3403 */
3407 if (res_key != pr_reg->pr_res_key) { 3404 if (res_key != pr_reg->pr_res_key) {
3408 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received" 3405 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received"
3409 " res_key: 0x%016Lx does not match existing SA REGISTER" 3406 " res_key: 0x%016Lx does not match existing SA REGISTER"
3410 " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key); 3407 " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
3411 core_scsi3_put_pr_reg(pr_reg); 3408 core_scsi3_put_pr_reg(pr_reg);
3412 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; 3409 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3413 return -EINVAL; 3410 return -EINVAL;
3414 } 3411 }
3415 /* 3412 /*
3416 * The service active reservation key needs to be non zero 3413 * The service active reservation key needs to be non zero
3417 */ 3414 */
3418 if (!sa_res_key) { 3415 if (!sa_res_key) {
3419 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero" 3416 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
3420 " sa_res_key\n"); 3417 " sa_res_key\n");
3421 core_scsi3_put_pr_reg(pr_reg); 3418 core_scsi3_put_pr_reg(pr_reg);
3422 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 3419 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3423 return -EINVAL; 3420 return -EINVAL;
3424 } 3421 }
3425 3422
3426 /* 3423 /*
3427 * Determine the Relative Target Port Identifier where the reservation 3424 * Determine the Relative Target Port Identifier where the reservation
3428 * will be moved to for the TransportID containing SCSI initiator WWN 3425 * will be moved to for the TransportID containing SCSI initiator WWN
3429 * information. 3426 * information.
3430 */ 3427 */
3431 buf = transport_kmap_first_data_page(cmd); 3428 buf = transport_kmap_first_data_page(cmd);
3432 rtpi = (buf[18] & 0xff) << 8; 3429 rtpi = (buf[18] & 0xff) << 8;
3433 rtpi |= buf[19] & 0xff; 3430 rtpi |= buf[19] & 0xff;
3434 tid_len = (buf[20] & 0xff) << 24; 3431 tid_len = (buf[20] & 0xff) << 24;
3435 tid_len |= (buf[21] & 0xff) << 16; 3432 tid_len |= (buf[21] & 0xff) << 16;
3436 tid_len |= (buf[22] & 0xff) << 8; 3433 tid_len |= (buf[22] & 0xff) << 8;
3437 tid_len |= buf[23] & 0xff; 3434 tid_len |= buf[23] & 0xff;
3438 transport_kunmap_first_data_page(cmd); 3435 transport_kunmap_first_data_page(cmd);
3439 buf = NULL; 3436 buf = NULL;
3440 3437
3441 if ((tid_len + 24) != cmd->data_length) { 3438 if ((tid_len + 24) != cmd->data_length) {
3442 pr_err("SPC-3 PR: Illegal tid_len: %u + 24 byte header" 3439 pr_err("SPC-3 PR: Illegal tid_len: %u + 24 byte header"
3443 " does not equal CDB data_length: %u\n", tid_len, 3440 " does not equal CDB data_length: %u\n", tid_len,
3444 cmd->data_length); 3441 cmd->data_length);
3445 core_scsi3_put_pr_reg(pr_reg); 3442 core_scsi3_put_pr_reg(pr_reg);
3446 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 3443 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3447 return -EINVAL; 3444 return -EINVAL;
3448 } 3445 }
3449 3446
3450 spin_lock(&dev->se_port_lock); 3447 spin_lock(&dev->se_port_lock);
3451 list_for_each_entry(se_port, &dev->dev_sep_list, sep_list) { 3448 list_for_each_entry(se_port, &dev->dev_sep_list, sep_list) {
3452 if (se_port->sep_rtpi != rtpi) 3449 if (se_port->sep_rtpi != rtpi)
3453 continue; 3450 continue;
3454 dest_se_tpg = se_port->sep_tpg; 3451 dest_se_tpg = se_port->sep_tpg;
3455 if (!dest_se_tpg) 3452 if (!dest_se_tpg)
3456 continue; 3453 continue;
3457 dest_tf_ops = dest_se_tpg->se_tpg_tfo; 3454 dest_tf_ops = dest_se_tpg->se_tpg_tfo;
3458 if (!dest_tf_ops) 3455 if (!dest_tf_ops)
3459 continue; 3456 continue;
3460 3457
3461 atomic_inc(&dest_se_tpg->tpg_pr_ref_count); 3458 atomic_inc(&dest_se_tpg->tpg_pr_ref_count);
3462 smp_mb__after_atomic_inc(); 3459 smp_mb__after_atomic_inc();
3463 spin_unlock(&dev->se_port_lock); 3460 spin_unlock(&dev->se_port_lock);
3464 3461
3465 ret = core_scsi3_tpg_depend_item(dest_se_tpg); 3462 ret = core_scsi3_tpg_depend_item(dest_se_tpg);
3466 if (ret != 0) { 3463 if (ret != 0) {
3467 pr_err("core_scsi3_tpg_depend_item() failed" 3464 pr_err("core_scsi3_tpg_depend_item() failed"
3468 " for dest_se_tpg\n"); 3465 " for dest_se_tpg\n");
3469 atomic_dec(&dest_se_tpg->tpg_pr_ref_count); 3466 atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
3470 smp_mb__after_atomic_dec(); 3467 smp_mb__after_atomic_dec();
3471 core_scsi3_put_pr_reg(pr_reg); 3468 core_scsi3_put_pr_reg(pr_reg);
3472 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3469 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3473 return -EINVAL; 3470 return -EINVAL;
3474 } 3471 }
3475 3472
3476 spin_lock(&dev->se_port_lock); 3473 spin_lock(&dev->se_port_lock);
3477 break; 3474 break;
3478 } 3475 }
3479 spin_unlock(&dev->se_port_lock); 3476 spin_unlock(&dev->se_port_lock);
3480 3477
3481 if (!dest_se_tpg || !dest_tf_ops) { 3478 if (!dest_se_tpg || !dest_tf_ops) {
3482 pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" 3479 pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
3483 " fabric ops from Relative Target Port Identifier:" 3480 " fabric ops from Relative Target Port Identifier:"
3484 " %hu\n", rtpi); 3481 " %hu\n", rtpi);
3485 core_scsi3_put_pr_reg(pr_reg); 3482 core_scsi3_put_pr_reg(pr_reg);
3486 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 3483 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3487 return -EINVAL; 3484 return -EINVAL;
3488 } 3485 }
3489 3486
3490 buf = transport_kmap_first_data_page(cmd); 3487 buf = transport_kmap_first_data_page(cmd);
3491 proto_ident = (buf[24] & 0x0f); 3488 proto_ident = (buf[24] & 0x0f);
3492 #if 0 3489 #if 0
3493 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:" 3490 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
3494 " 0x%02x\n", proto_ident); 3491 " 0x%02x\n", proto_ident);
3495 #endif 3492 #endif
3496 if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) { 3493 if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) {
3497 pr_err("SPC-3 PR REGISTER_AND_MOVE: Received" 3494 pr_err("SPC-3 PR REGISTER_AND_MOVE: Received"
3498 " proto_ident: 0x%02x does not match ident: 0x%02x" 3495 " proto_ident: 0x%02x does not match ident: 0x%02x"
3499 " from fabric: %s\n", proto_ident, 3496 " from fabric: %s\n", proto_ident,
3500 dest_tf_ops->get_fabric_proto_ident(dest_se_tpg), 3497 dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
3501 dest_tf_ops->get_fabric_name()); 3498 dest_tf_ops->get_fabric_name());
3502 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 3499 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3503 ret = -EINVAL; 3500 ret = -EINVAL;
3504 goto out; 3501 goto out;
3505 } 3502 }
3506 if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) { 3503 if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
3507 pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not" 3504 pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
3508 " containg a valid tpg_parse_pr_out_transport_id" 3505 " containg a valid tpg_parse_pr_out_transport_id"
3509 " function pointer\n"); 3506 " function pointer\n");
3510 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3507 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3511 ret = -EINVAL; 3508 ret = -EINVAL;
3512 goto out; 3509 goto out;
3513 } 3510 }
3514 initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg, 3511 initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
3515 (const char *)&buf[24], &tmp_tid_len, &iport_ptr); 3512 (const char *)&buf[24], &tmp_tid_len, &iport_ptr);
3516 if (!initiator_str) { 3513 if (!initiator_str) {
3517 pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" 3514 pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
3518 " initiator_str from Transport ID\n"); 3515 " initiator_str from Transport ID\n");
3519 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 3516 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3520 ret = -EINVAL; 3517 ret = -EINVAL;
3521 goto out; 3518 goto out;
3522 } 3519 }
3523 3520
3524 transport_kunmap_first_data_page(cmd); 3521 transport_kunmap_first_data_page(cmd);
3525 buf = NULL; 3522 buf = NULL;
3526 3523
3527 pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s" 3524 pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s"
3528 " %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ? 3525 " %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ?
3529 "port" : "device", initiator_str, (iport_ptr != NULL) ? 3526 "port" : "device", initiator_str, (iport_ptr != NULL) ?
3530 iport_ptr : ""); 3527 iport_ptr : "");
3531 /* 3528 /*
3532 * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service 3529 * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service
3533 * action specifies a TransportID that is the same as the initiator port 3530 * action specifies a TransportID that is the same as the initiator port
3534 * of the I_T nexus for the command received, then the command shall 3531 * of the I_T nexus for the command received, then the command shall
3535 * be terminated with CHECK CONDITION status, with the sense key set to 3532 * be terminated with CHECK CONDITION status, with the sense key set to
3536 * ILLEGAL REQUEST, and the additional sense code set to INVALID FIELD 3533 * ILLEGAL REQUEST, and the additional sense code set to INVALID FIELD
3537 * IN PARAMETER LIST. 3534 * IN PARAMETER LIST.
3538 */ 3535 */
3539 pr_reg_nacl = pr_reg->pr_reg_nacl; 3536 pr_reg_nacl = pr_reg->pr_reg_nacl;
3540 matching_iname = (!strcmp(initiator_str, 3537 matching_iname = (!strcmp(initiator_str,
3541 pr_reg_nacl->initiatorname)) ? 1 : 0; 3538 pr_reg_nacl->initiatorname)) ? 1 : 0;
3542 if (!matching_iname) 3539 if (!matching_iname)
3543 goto after_iport_check; 3540 goto after_iport_check;
3544 3541
3545 if (!iport_ptr || !pr_reg->isid_present_at_reg) { 3542 if (!iport_ptr || !pr_reg->isid_present_at_reg) {
3546 pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s" 3543 pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
3547 " matches: %s on received I_T Nexus\n", initiator_str, 3544 " matches: %s on received I_T Nexus\n", initiator_str,
3548 pr_reg_nacl->initiatorname); 3545 pr_reg_nacl->initiatorname);
3549 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 3546 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3550 ret = -EINVAL; 3547 ret = -EINVAL;
3551 goto out; 3548 goto out;
3552 } 3549 }
3553 if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) { 3550 if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
3554 pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s" 3551 pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s"
3555 " matches: %s %s on received I_T Nexus\n", 3552 " matches: %s %s on received I_T Nexus\n",
3556 initiator_str, iport_ptr, pr_reg_nacl->initiatorname, 3553 initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
3557 pr_reg->pr_reg_isid); 3554 pr_reg->pr_reg_isid);
3558 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 3555 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3559 ret = -EINVAL; 3556 ret = -EINVAL;
3560 goto out; 3557 goto out;
3561 } 3558 }
3562 after_iport_check: 3559 after_iport_check:
3563 /* 3560 /*
3564 * Locate the destination struct se_node_acl from the received Transport ID 3561 * Locate the destination struct se_node_acl from the received Transport ID
3565 */ 3562 */
3566 spin_lock_irq(&dest_se_tpg->acl_node_lock); 3563 spin_lock_irq(&dest_se_tpg->acl_node_lock);
3567 dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, 3564 dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
3568 initiator_str); 3565 initiator_str);
3569 if (dest_node_acl) { 3566 if (dest_node_acl) {
3570 atomic_inc(&dest_node_acl->acl_pr_ref_count); 3567 atomic_inc(&dest_node_acl->acl_pr_ref_count);
3571 smp_mb__after_atomic_inc(); 3568 smp_mb__after_atomic_inc();
3572 } 3569 }
3573 spin_unlock_irq(&dest_se_tpg->acl_node_lock); 3570 spin_unlock_irq(&dest_se_tpg->acl_node_lock);
3574 3571
3575 if (!dest_node_acl) { 3572 if (!dest_node_acl) {
3576 pr_err("Unable to locate %s dest_node_acl for" 3573 pr_err("Unable to locate %s dest_node_acl for"
3577 " TransportID%s\n", dest_tf_ops->get_fabric_name(), 3574 " TransportID%s\n", dest_tf_ops->get_fabric_name(),
3578 initiator_str); 3575 initiator_str);
3579 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 3576 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3580 ret = -EINVAL; 3577 ret = -EINVAL;
3581 goto out; 3578 goto out;
3582 } 3579 }
3583 ret = core_scsi3_nodeacl_depend_item(dest_node_acl); 3580 ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
3584 if (ret != 0) { 3581 if (ret != 0) {
3585 pr_err("core_scsi3_nodeacl_depend_item() for" 3582 pr_err("core_scsi3_nodeacl_depend_item() for"
3586 " dest_node_acl\n"); 3583 " dest_node_acl\n");
3587 atomic_dec(&dest_node_acl->acl_pr_ref_count); 3584 atomic_dec(&dest_node_acl->acl_pr_ref_count);
3588 smp_mb__after_atomic_dec(); 3585 smp_mb__after_atomic_dec();
3589 dest_node_acl = NULL; 3586 dest_node_acl = NULL;
3590 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 3587 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3591 ret = -EINVAL; 3588 ret = -EINVAL;
3592 goto out; 3589 goto out;
3593 } 3590 }
3594 #if 0 3591 #if 0
3595 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:" 3592 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
3596 " %s from TransportID\n", dest_tf_ops->get_fabric_name(), 3593 " %s from TransportID\n", dest_tf_ops->get_fabric_name(),
3597 dest_node_acl->initiatorname); 3594 dest_node_acl->initiatorname);
3598 #endif 3595 #endif
3599 /* 3596 /*
3600 * Locate the struct se_dev_entry pointer for the matching RELATIVE TARGET 3597 * Locate the struct se_dev_entry pointer for the matching RELATIVE TARGET
3601 * PORT IDENTIFIER. 3598 * PORT IDENTIFIER.
3602 */ 3599 */
3603 dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi); 3600 dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi);
3604 if (!dest_se_deve) { 3601 if (!dest_se_deve) {
3605 pr_err("Unable to locate %s dest_se_deve from RTPI:" 3602 pr_err("Unable to locate %s dest_se_deve from RTPI:"
3606 " %hu\n", dest_tf_ops->get_fabric_name(), rtpi); 3603 " %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
3607 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 3604 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3608 ret = -EINVAL; 3605 ret = -EINVAL;
3609 goto out; 3606 goto out;
3610 } 3607 }
3611 3608
3612 ret = core_scsi3_lunacl_depend_item(dest_se_deve); 3609 ret = core_scsi3_lunacl_depend_item(dest_se_deve);
3613 if (ret < 0) { 3610 if (ret < 0) {
3614 pr_err("core_scsi3_lunacl_depend_item() failed\n"); 3611 pr_err("core_scsi3_lunacl_depend_item() failed\n");
3615 atomic_dec(&dest_se_deve->pr_ref_count); 3612 atomic_dec(&dest_se_deve->pr_ref_count);
3616 smp_mb__after_atomic_dec(); 3613 smp_mb__after_atomic_dec();
3617 dest_se_deve = NULL; 3614 dest_se_deve = NULL;
3618 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3615 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3619 ret = -EINVAL; 3616 ret = -EINVAL;
3620 goto out; 3617 goto out;
3621 } 3618 }
3622 #if 0 3619 #if 0
3623 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN" 3620 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
3624 " ACL for dest_se_deve->mapped_lun: %u\n", 3621 " ACL for dest_se_deve->mapped_lun: %u\n",
3625 dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname, 3622 dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname,
3626 dest_se_deve->mapped_lun); 3623 dest_se_deve->mapped_lun);
3627 #endif 3624 #endif
3628 /* 3625 /*
3629 * A persistent reservation needs to already existing in order to 3626 * A persistent reservation needs to already existing in order to
3630 * successfully complete the REGISTER_AND_MOVE service action.. 3627 * successfully complete the REGISTER_AND_MOVE service action..
3631 */ 3628 */
3632 spin_lock(&dev->dev_reservation_lock); 3629 spin_lock(&dev->dev_reservation_lock);
3633 pr_res_holder = dev->dev_pr_res_holder; 3630 pr_res_holder = dev->dev_pr_res_holder;
3634 if (!pr_res_holder) { 3631 if (!pr_res_holder) {
3635 pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation" 3632 pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
3636 " currently held\n"); 3633 " currently held\n");
3637 spin_unlock(&dev->dev_reservation_lock); 3634 spin_unlock(&dev->dev_reservation_lock);
3638 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 3635 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3639 ret = -EINVAL; 3636 ret = -EINVAL;
3640 goto out; 3637 goto out;
3641 } 3638 }
3642 /* 3639 /*
3643 * The received on I_T Nexus must be the reservation holder. 3640 * The received on I_T Nexus must be the reservation holder.
3644 * 3641 *
3645 * From spc4r17 section 5.7.8 Table 50 -- 3642 * From spc4r17 section 5.7.8 Table 50 --
3646 * Register behaviors for a REGISTER AND MOVE service action 3643 * Register behaviors for a REGISTER AND MOVE service action
3647 */ 3644 */
3648 if (pr_res_holder != pr_reg) { 3645 if (pr_res_holder != pr_reg) {
3649 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T" 3646 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
3650 " Nexus is not reservation holder\n"); 3647 " Nexus is not reservation holder\n");
3651 spin_unlock(&dev->dev_reservation_lock); 3648 spin_unlock(&dev->dev_reservation_lock);
3652 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; 3649 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3653 ret = -EINVAL; 3650 ret = -EINVAL;
3654 goto out; 3651 goto out;
3655 } 3652 }
3656 /* 3653 /*
3657 * From spc4r17 section 5.7.8: registering and moving reservation 3654 * From spc4r17 section 5.7.8: registering and moving reservation
3658 * 3655 *
3659 * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service 3656 * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service
3660 * action is received and the established persistent reservation is a 3657 * action is received and the established persistent reservation is a
3661 * Write Exclusive - All Registrants type or Exclusive Access - 3658 * Write Exclusive - All Registrants type or Exclusive Access -
3662 * All Registrants type reservation, then the command shall be completed 3659 * All Registrants type reservation, then the command shall be completed
3663 * with RESERVATION CONFLICT status. 3660 * with RESERVATION CONFLICT status.
3664 */ 3661 */
3665 if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || 3662 if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
3666 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { 3663 (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
3667 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Unable to move" 3664 pr_warn("SPC-3 PR REGISTER_AND_MOVE: Unable to move"
3668 " reservation for type: %s\n", 3665 " reservation for type: %s\n",
3669 core_scsi3_pr_dump_type(pr_res_holder->pr_res_type)); 3666 core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
3670 spin_unlock(&dev->dev_reservation_lock); 3667 spin_unlock(&dev->dev_reservation_lock);
3671 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; 3668 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3672 ret = -EINVAL; 3669 ret = -EINVAL;
3673 goto out; 3670 goto out;
3674 } 3671 }
3675 pr_res_nacl = pr_res_holder->pr_reg_nacl; 3672 pr_res_nacl = pr_res_holder->pr_reg_nacl;
3676 /* 3673 /*
3677 * b) Ignore the contents of the (received) SCOPE and TYPE fields; 3674 * b) Ignore the contents of the (received) SCOPE and TYPE fields;
3678 */ 3675 */
3679 type = pr_res_holder->pr_res_type; 3676 type = pr_res_holder->pr_res_type;
3680 scope = pr_res_holder->pr_res_type; 3677 scope = pr_res_holder->pr_res_type;
3681 /* 3678 /*
3682 * c) Associate the reservation key specified in the SERVICE ACTION 3679 * c) Associate the reservation key specified in the SERVICE ACTION
3683 * RESERVATION KEY field with the I_T nexus specified as the 3680 * RESERVATION KEY field with the I_T nexus specified as the
3684 * destination of the register and move, where: 3681 * destination of the register and move, where:
3685 * A) The I_T nexus is specified by the TransportID and the 3682 * A) The I_T nexus is specified by the TransportID and the
3686 * RELATIVE TARGET PORT IDENTIFIER field (see 6.14.4); and 3683 * RELATIVE TARGET PORT IDENTIFIER field (see 6.14.4); and
3687 * B) Regardless of the TransportID format used, the association for 3684 * B) Regardless of the TransportID format used, the association for
3688 * the initiator port is based on either the initiator port name 3685 * the initiator port is based on either the initiator port name
3689 * (see 3.1.71) on SCSI transport protocols where port names are 3686 * (see 3.1.71) on SCSI transport protocols where port names are
3690 * required or the initiator port identifier (see 3.1.70) on SCSI 3687 * required or the initiator port identifier (see 3.1.70) on SCSI
3691 * transport protocols where port names are not required; 3688 * transport protocols where port names are not required;
3692 * d) Register the reservation key specified in the SERVICE ACTION 3689 * d) Register the reservation key specified in the SERVICE ACTION
3693 * RESERVATION KEY field; 3690 * RESERVATION KEY field;
3694 * e) Retain the reservation key specified in the SERVICE ACTION 3691 * e) Retain the reservation key specified in the SERVICE ACTION
3695 * RESERVATION KEY field and associated information; 3692 * RESERVATION KEY field and associated information;
3696 * 3693 *
3697 * Also, It is not an error for a REGISTER AND MOVE service action to 3694 * Also, It is not an error for a REGISTER AND MOVE service action to
3698 * register an I_T nexus that is already registered with the same 3695 * register an I_T nexus that is already registered with the same
3699 * reservation key or a different reservation key. 3696 * reservation key or a different reservation key.
3700 */ 3697 */
3701 dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, 3698 dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
3702 iport_ptr); 3699 iport_ptr);
3703 if (!dest_pr_reg) { 3700 if (!dest_pr_reg) {
3704 ret = core_scsi3_alloc_registration(cmd->se_dev, 3701 ret = core_scsi3_alloc_registration(cmd->se_dev,
3705 dest_node_acl, dest_se_deve, iport_ptr, 3702 dest_node_acl, dest_se_deve, iport_ptr,
3706 sa_res_key, 0, aptpl, 2, 1); 3703 sa_res_key, 0, aptpl, 2, 1);
3707 if (ret != 0) { 3704 if (ret != 0) {
3708 spin_unlock(&dev->dev_reservation_lock); 3705 spin_unlock(&dev->dev_reservation_lock);
3709 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 3706 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3710 ret = -EINVAL; 3707 ret = -EINVAL;
3711 goto out; 3708 goto out;
3712 } 3709 }
3713 dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, 3710 dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
3714 iport_ptr); 3711 iport_ptr);
3715 new_reg = 1; 3712 new_reg = 1;
3716 } 3713 }
3717 /* 3714 /*
3718 * f) Release the persistent reservation for the persistent reservation 3715 * f) Release the persistent reservation for the persistent reservation
3719 * holder (i.e., the I_T nexus on which the 3716 * holder (i.e., the I_T nexus on which the
3720 */ 3717 */
3721 __core_scsi3_complete_pro_release(dev, pr_res_nacl, 3718 __core_scsi3_complete_pro_release(dev, pr_res_nacl,
3722 dev->dev_pr_res_holder, 0); 3719 dev->dev_pr_res_holder, 0);
3723 /* 3720 /*
3724 * g) Move the persistent reservation to the specified I_T nexus using 3721 * g) Move the persistent reservation to the specified I_T nexus using
3725 * the same scope and type as the persistent reservation released in 3722 * the same scope and type as the persistent reservation released in
3726 * item f); and 3723 * item f); and
3727 */ 3724 */
3728 dev->dev_pr_res_holder = dest_pr_reg; 3725 dev->dev_pr_res_holder = dest_pr_reg;
3729 dest_pr_reg->pr_res_holder = 1; 3726 dest_pr_reg->pr_res_holder = 1;
3730 dest_pr_reg->pr_res_type = type; 3727 dest_pr_reg->pr_res_type = type;
3731 pr_reg->pr_res_scope = scope; 3728 pr_reg->pr_res_scope = scope;
3732 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], 3729 prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
3733 PR_REG_ISID_ID_LEN); 3730 PR_REG_ISID_ID_LEN);
3734 /* 3731 /*
3735 * Increment PRGeneration for existing registrations.. 3732 * Increment PRGeneration for existing registrations..
3736 */ 3733 */
3737 if (!new_reg) 3734 if (!new_reg)
3738 dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++; 3735 dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++;
3739 spin_unlock(&dev->dev_reservation_lock); 3736 spin_unlock(&dev->dev_reservation_lock);
3740 3737
3741 pr_debug("SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE" 3738 pr_debug("SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE"
3742 " created new reservation holder TYPE: %s on object RTPI:" 3739 " created new reservation holder TYPE: %s on object RTPI:"
3743 " %hu PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(), 3740 " %hu PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(),
3744 core_scsi3_pr_dump_type(type), rtpi, 3741 core_scsi3_pr_dump_type(type), rtpi,
3745 dest_pr_reg->pr_res_generation); 3742 dest_pr_reg->pr_res_generation);
3746 pr_debug("SPC-3 PR Successfully moved reservation from" 3743 pr_debug("SPC-3 PR Successfully moved reservation from"
3747 " %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n", 3744 " %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n",
3748 tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname, 3745 tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname,
3749 (prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(), 3746 (prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(),
3750 dest_node_acl->initiatorname, (iport_ptr != NULL) ? 3747 dest_node_acl->initiatorname, (iport_ptr != NULL) ?
3751 iport_ptr : ""); 3748 iport_ptr : "");
3752 /* 3749 /*
3753 * It is now safe to release configfs group dependencies for destination 3750 * It is now safe to release configfs group dependencies for destination
3754 * of Transport ID Initiator Device/Port Identifier 3751 * of Transport ID Initiator Device/Port Identifier
3755 */ 3752 */
3756 core_scsi3_lunacl_undepend_item(dest_se_deve); 3753 core_scsi3_lunacl_undepend_item(dest_se_deve);
3757 core_scsi3_nodeacl_undepend_item(dest_node_acl); 3754 core_scsi3_nodeacl_undepend_item(dest_node_acl);
3758 core_scsi3_tpg_undepend_item(dest_se_tpg); 3755 core_scsi3_tpg_undepend_item(dest_se_tpg);
3759 /* 3756 /*
3760 * h) If the UNREG bit is set to one, unregister (see 5.7.11.3) the I_T 3757 * h) If the UNREG bit is set to one, unregister (see 5.7.11.3) the I_T
3761 * nexus on which PERSISTENT RESERVE OUT command was received. 3758 * nexus on which PERSISTENT RESERVE OUT command was received.
3762 */ 3759 */
3763 if (unreg) { 3760 if (unreg) {
3764 spin_lock(&pr_tmpl->registration_lock); 3761 spin_lock(&pr_tmpl->registration_lock);
3765 __core_scsi3_free_registration(dev, pr_reg, NULL, 1); 3762 __core_scsi3_free_registration(dev, pr_reg, NULL, 1);
3766 spin_unlock(&pr_tmpl->registration_lock); 3763 spin_unlock(&pr_tmpl->registration_lock);
3767 } else 3764 } else
3768 core_scsi3_put_pr_reg(pr_reg); 3765 core_scsi3_put_pr_reg(pr_reg);
3769 3766
3770 /* 3767 /*
3771 * Clear the APTPL metadata if APTPL has been disabled, otherwise 3768 * Clear the APTPL metadata if APTPL has been disabled, otherwise
3772 * write out the updated metadata to struct file for this SCSI device. 3769 * write out the updated metadata to struct file for this SCSI device.
3773 */ 3770 */
3774 if (!aptpl) { 3771 if (!aptpl) {
3775 pr_tmpl->pr_aptpl_active = 0; 3772 pr_tmpl->pr_aptpl_active = 0;
3776 core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); 3773 core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
3777 pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for" 3774 pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for"
3778 " REGISTER_AND_MOVE\n"); 3775 " REGISTER_AND_MOVE\n");
3779 } else { 3776 } else {
3780 pr_tmpl->pr_aptpl_active = 1; 3777 pr_tmpl->pr_aptpl_active = 1;
3781 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, 3778 ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
3782 &dest_pr_reg->pr_aptpl_buf[0], 3779 &dest_pr_reg->pr_aptpl_buf[0],
3783 pr_tmpl->pr_aptpl_buf_len); 3780 pr_tmpl->pr_aptpl_buf_len);
3784 if (!ret) 3781 if (!ret)
3785 pr_debug("SPC-3 PR: Set APTPL Bit Activated for" 3782 pr_debug("SPC-3 PR: Set APTPL Bit Activated for"
3786 " REGISTER_AND_MOVE\n"); 3783 " REGISTER_AND_MOVE\n");
3787 } 3784 }
3788 3785
3789 transport_kunmap_first_data_page(cmd); 3786 transport_kunmap_first_data_page(cmd);
3790 3787
3791 core_scsi3_put_pr_reg(dest_pr_reg); 3788 core_scsi3_put_pr_reg(dest_pr_reg);
3792 return 0; 3789 return 0;
3793 out: 3790 out:
3794 if (buf) 3791 if (buf)
3795 transport_kunmap_first_data_page(cmd); 3792 transport_kunmap_first_data_page(cmd);
3796 if (dest_se_deve) 3793 if (dest_se_deve)
3797 core_scsi3_lunacl_undepend_item(dest_se_deve); 3794 core_scsi3_lunacl_undepend_item(dest_se_deve);
3798 if (dest_node_acl) 3795 if (dest_node_acl)
3799 core_scsi3_nodeacl_undepend_item(dest_node_acl); 3796 core_scsi3_nodeacl_undepend_item(dest_node_acl);
3800 core_scsi3_tpg_undepend_item(dest_se_tpg); 3797 core_scsi3_tpg_undepend_item(dest_se_tpg);
3801 core_scsi3_put_pr_reg(pr_reg); 3798 core_scsi3_put_pr_reg(pr_reg);
3802 return ret; 3799 return ret;
3803 } 3800 }
3804 3801
3805 static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb) 3802 static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
3806 { 3803 {
3807 unsigned int __v1, __v2; 3804 unsigned int __v1, __v2;
3808 3805
3809 __v1 = (cdb[0] << 24) | (cdb[1] << 16) | (cdb[2] << 8) | cdb[3]; 3806 __v1 = (cdb[0] << 24) | (cdb[1] << 16) | (cdb[2] << 8) | cdb[3];
3810 __v2 = (cdb[4] << 24) | (cdb[5] << 16) | (cdb[6] << 8) | cdb[7]; 3807 __v2 = (cdb[4] << 24) | (cdb[5] << 16) | (cdb[6] << 8) | cdb[7];
3811 3808
3812 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 3809 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
3813 } 3810 }
3814 3811
3815 /* 3812 /*
3816 * See spc4r17 section 6.14 Table 170 3813 * See spc4r17 section 6.14 Table 170
3817 */ 3814 */
3818 int target_scsi3_emulate_pr_out(struct se_task *task) 3815 int target_scsi3_emulate_pr_out(struct se_task *task)
3819 { 3816 {
3820 struct se_cmd *cmd = task->task_se_cmd; 3817 struct se_cmd *cmd = task->task_se_cmd;
3821 unsigned char *cdb = &cmd->t_task_cdb[0]; 3818 unsigned char *cdb = &cmd->t_task_cdb[0];
3822 unsigned char *buf; 3819 unsigned char *buf;
3823 u64 res_key, sa_res_key; 3820 u64 res_key, sa_res_key;
3824 int sa, scope, type, aptpl; 3821 int sa, scope, type, aptpl;
3825 int spec_i_pt = 0, all_tg_pt = 0, unreg = 0; 3822 int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
3826 int ret; 3823 int ret;
3827 3824
3828 /* 3825 /*
3829 * Following spc2r20 5.5.1 Reservations overview: 3826 * Following spc2r20 5.5.1 Reservations overview:
3830 * 3827 *
3831 * If a logical unit has been reserved by any RESERVE command and is 3828 * If a logical unit has been reserved by any RESERVE command and is
3832 * still reserved by any initiator, all PERSISTENT RESERVE IN and all 3829 * still reserved by any initiator, all PERSISTENT RESERVE IN and all
3833 * PERSISTENT RESERVE OUT commands shall conflict regardless of 3830 * PERSISTENT RESERVE OUT commands shall conflict regardless of
3834 * initiator or service action and shall terminate with a RESERVATION 3831 * initiator or service action and shall terminate with a RESERVATION
3835 * CONFLICT status. 3832 * CONFLICT status.
3836 */ 3833 */
3837 if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) { 3834 if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) {
3838 pr_err("Received PERSISTENT_RESERVE CDB while legacy" 3835 pr_err("Received PERSISTENT_RESERVE CDB while legacy"
3839 " SPC-2 reservation is held, returning" 3836 " SPC-2 reservation is held, returning"
3840 " RESERVATION_CONFLICT\n"); 3837 " RESERVATION_CONFLICT\n");
3841 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; 3838 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
3842 ret = EINVAL; 3839 ret = EINVAL;
3843 goto out; 3840 goto out;
3844 } 3841 }
3845 3842
3846 /* 3843 /*
3847 * FIXME: A NULL struct se_session pointer means an this is not coming from 3844 * FIXME: A NULL struct se_session pointer means an this is not coming from
3848 * a $FABRIC_MOD's nexus, but from internal passthrough ops. 3845 * a $FABRIC_MOD's nexus, but from internal passthrough ops.
3849 */ 3846 */
3850 if (!cmd->se_sess) { 3847 if (!cmd->se_sess) {
3851 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3848 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3852 return -EINVAL; 3849 return -EINVAL;
3853 } 3850 }
3854 3851
3855 if (cmd->data_length < 24) { 3852 if (cmd->data_length < 24) {
3856 pr_warn("SPC-PR: Received PR OUT parameter list" 3853 pr_warn("SPC-PR: Received PR OUT parameter list"
3857 " length too small: %u\n", cmd->data_length); 3854 " length too small: %u\n", cmd->data_length);
3858 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 3855 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3859 ret = -EINVAL; 3856 ret = -EINVAL;
3860 goto out; 3857 goto out;
3861 } 3858 }
3862 /* 3859 /*
3863 * From the PERSISTENT_RESERVE_OUT command descriptor block (CDB) 3860 * From the PERSISTENT_RESERVE_OUT command descriptor block (CDB)
3864 */ 3861 */
3865 sa = (cdb[1] & 0x1f); 3862 sa = (cdb[1] & 0x1f);
3866 scope = (cdb[2] & 0xf0); 3863 scope = (cdb[2] & 0xf0);
3867 type = (cdb[2] & 0x0f); 3864 type = (cdb[2] & 0x0f);
3868 3865
3869 buf = transport_kmap_first_data_page(cmd); 3866 buf = transport_kmap_first_data_page(cmd);
3870 /* 3867 /*
3871 * From PERSISTENT_RESERVE_OUT parameter list (payload) 3868 * From PERSISTENT_RESERVE_OUT parameter list (payload)
3872 */ 3869 */
3873 res_key = core_scsi3_extract_reservation_key(&buf[0]); 3870 res_key = core_scsi3_extract_reservation_key(&buf[0]);
3874 sa_res_key = core_scsi3_extract_reservation_key(&buf[8]); 3871 sa_res_key = core_scsi3_extract_reservation_key(&buf[8]);
3875 /* 3872 /*
3876 * REGISTER_AND_MOVE uses a different SA parameter list containing 3873 * REGISTER_AND_MOVE uses a different SA parameter list containing
3877 * SCSI TransportIDs. 3874 * SCSI TransportIDs.
3878 */ 3875 */
3879 if (sa != PRO_REGISTER_AND_MOVE) { 3876 if (sa != PRO_REGISTER_AND_MOVE) {
3880 spec_i_pt = (buf[20] & 0x08); 3877 spec_i_pt = (buf[20] & 0x08);
3881 all_tg_pt = (buf[20] & 0x04); 3878 all_tg_pt = (buf[20] & 0x04);
3882 aptpl = (buf[20] & 0x01); 3879 aptpl = (buf[20] & 0x01);
3883 } else { 3880 } else {
3884 aptpl = (buf[17] & 0x01); 3881 aptpl = (buf[17] & 0x01);
3885 unreg = (buf[17] & 0x02); 3882 unreg = (buf[17] & 0x02);
3886 } 3883 }
3887 transport_kunmap_first_data_page(cmd); 3884 transport_kunmap_first_data_page(cmd);
3888 buf = NULL; 3885 buf = NULL;
3889 3886
3890 /* 3887 /*
3891 * SPEC_I_PT=1 is only valid for Service action: REGISTER 3888 * SPEC_I_PT=1 is only valid for Service action: REGISTER
3892 */ 3889 */
3893 if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) { 3890 if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
3894 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 3891 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3895 ret = -EINVAL; 3892 ret = -EINVAL;
3896 goto out; 3893 goto out;
3897 } 3894 }
3898 3895
3899 /* 3896 /*
3900 * From spc4r17 section 6.14: 3897 * From spc4r17 section 6.14:
3901 * 3898 *
3902 * If the SPEC_I_PT bit is set to zero, the service action is not 3899 * If the SPEC_I_PT bit is set to zero, the service action is not
3903 * REGISTER AND MOVE, and the parameter list length is not 24, then 3900 * REGISTER AND MOVE, and the parameter list length is not 24, then
3904 * the command shall be terminated with CHECK CONDITION status, with 3901 * the command shall be terminated with CHECK CONDITION status, with
3905 * the sense key set to ILLEGAL REQUEST, and the additional sense 3902 * the sense key set to ILLEGAL REQUEST, and the additional sense
3906 * code set to PARAMETER LIST LENGTH ERROR. 3903 * code set to PARAMETER LIST LENGTH ERROR.
3907 */ 3904 */
3908 if (!spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) && 3905 if (!spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) &&
3909 (cmd->data_length != 24)) { 3906 (cmd->data_length != 24)) {
3910 pr_warn("SPC-PR: Received PR OUT illegal parameter" 3907 pr_warn("SPC-PR: Received PR OUT illegal parameter"
3911 " list length: %u\n", cmd->data_length); 3908 " list length: %u\n", cmd->data_length);
3912 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 3909 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
3913 ret = -EINVAL; 3910 ret = -EINVAL;
3914 goto out; 3911 goto out;
3915 } 3912 }
3916 /* 3913 /*
3917 * (core_scsi3_emulate_pro_* function parameters 3914 * (core_scsi3_emulate_pro_* function parameters
3918 * are defined by spc4r17 Table 174: 3915 * are defined by spc4r17 Table 174:
3919 * PERSISTENT_RESERVE_OUT service actions and valid parameters. 3916 * PERSISTENT_RESERVE_OUT service actions and valid parameters.
3920 */ 3917 */
3921 switch (sa) { 3918 switch (sa) {
3922 case PRO_REGISTER: 3919 case PRO_REGISTER:
3923 ret = core_scsi3_emulate_pro_register(cmd, 3920 ret = core_scsi3_emulate_pro_register(cmd,
3924 res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 0); 3921 res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 0);
3925 break; 3922 break;
3926 case PRO_RESERVE: 3923 case PRO_RESERVE:
3927 ret = core_scsi3_emulate_pro_reserve(cmd, type, scope, res_key); 3924 ret = core_scsi3_emulate_pro_reserve(cmd, type, scope, res_key);
3928 break; 3925 break;
3929 case PRO_RELEASE: 3926 case PRO_RELEASE:
3930 ret = core_scsi3_emulate_pro_release(cmd, type, scope, res_key); 3927 ret = core_scsi3_emulate_pro_release(cmd, type, scope, res_key);
3931 break; 3928 break;
3932 case PRO_CLEAR: 3929 case PRO_CLEAR:
3933 ret = core_scsi3_emulate_pro_clear(cmd, res_key); 3930 ret = core_scsi3_emulate_pro_clear(cmd, res_key);
3934 break; 3931 break;
3935 case PRO_PREEMPT: 3932 case PRO_PREEMPT:
3936 ret = core_scsi3_emulate_pro_preempt(cmd, type, scope, 3933 ret = core_scsi3_emulate_pro_preempt(cmd, type, scope,
3937 res_key, sa_res_key, 0); 3934 res_key, sa_res_key, 0);
3938 break; 3935 break;
3939 case PRO_PREEMPT_AND_ABORT: 3936 case PRO_PREEMPT_AND_ABORT:
3940 ret = core_scsi3_emulate_pro_preempt(cmd, type, scope, 3937 ret = core_scsi3_emulate_pro_preempt(cmd, type, scope,
3941 res_key, sa_res_key, 1); 3938 res_key, sa_res_key, 1);
3942 break; 3939 break;
3943 case PRO_REGISTER_AND_IGNORE_EXISTING_KEY: 3940 case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
3944 ret = core_scsi3_emulate_pro_register(cmd, 3941 ret = core_scsi3_emulate_pro_register(cmd,
3945 0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 1); 3942 0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 1);
3946 break; 3943 break;
3947 case PRO_REGISTER_AND_MOVE: 3944 case PRO_REGISTER_AND_MOVE:
3948 ret = core_scsi3_emulate_pro_register_and_move(cmd, res_key, 3945 ret = core_scsi3_emulate_pro_register_and_move(cmd, res_key,
3949 sa_res_key, aptpl, unreg); 3946 sa_res_key, aptpl, unreg);
3950 break; 3947 break;
3951 default: 3948 default:
3952 pr_err("Unknown PERSISTENT_RESERVE_OUT service" 3949 pr_err("Unknown PERSISTENT_RESERVE_OUT service"
3953 " action: 0x%02x\n", cdb[1] & 0x1f); 3950 " action: 0x%02x\n", cdb[1] & 0x1f);
3954 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 3951 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3955 ret = -EINVAL; 3952 ret = -EINVAL;
3956 break; 3953 break;
3957 } 3954 }
3958 3955
3959 out: 3956 out:
3960 if (!ret) { 3957 if (!ret) {
3961 task->task_scsi_status = GOOD; 3958 task->task_scsi_status = GOOD;
3962 transport_complete_task(task, 1); 3959 transport_complete_task(task, 1);
3963 } 3960 }
3964 return ret; 3961 return ret;
3965 } 3962 }
3966 3963
3967 /* 3964 /*
3968 * PERSISTENT_RESERVE_IN Service Action READ_KEYS 3965 * PERSISTENT_RESERVE_IN Service Action READ_KEYS
3969 * 3966 *
3970 * See spc4r17 section 5.7.6.2 and section 6.13.2, Table 160 3967 * See spc4r17 section 5.7.6.2 and section 6.13.2, Table 160
3971 */ 3968 */
3972 static int core_scsi3_pri_read_keys(struct se_cmd *cmd) 3969 static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
3973 { 3970 {
3974 struct se_device *se_dev = cmd->se_dev; 3971 struct se_device *se_dev = cmd->se_dev;
3975 struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; 3972 struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
3976 struct t10_pr_registration *pr_reg; 3973 struct t10_pr_registration *pr_reg;
3977 unsigned char *buf; 3974 unsigned char *buf;
3978 u32 add_len = 0, off = 8; 3975 u32 add_len = 0, off = 8;
3979 3976
3980 if (cmd->data_length < 8) { 3977 if (cmd->data_length < 8) {
3981 pr_err("PRIN SA READ_KEYS SCSI Data Length: %u" 3978 pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
3982 " too small\n", cmd->data_length); 3979 " too small\n", cmd->data_length);
3983 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 3980 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3984 return -EINVAL; 3981 return -EINVAL;
3985 } 3982 }
3986 3983
3987 buf = transport_kmap_first_data_page(cmd); 3984 buf = transport_kmap_first_data_page(cmd);
3988 buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); 3985 buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
3989 buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); 3986 buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
3990 buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); 3987 buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
3991 buf[3] = (su_dev->t10_pr.pr_generation & 0xff); 3988 buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
3992 3989
3993 spin_lock(&su_dev->t10_pr.registration_lock); 3990 spin_lock(&su_dev->t10_pr.registration_lock);
3994 list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list, 3991 list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
3995 pr_reg_list) { 3992 pr_reg_list) {
3996 /* 3993 /*
3997 * Check for overflow of 8byte PRI READ_KEYS payload and 3994 * Check for overflow of 8byte PRI READ_KEYS payload and
3998 * next reservation key list descriptor. 3995 * next reservation key list descriptor.
3999 */ 3996 */
4000 if ((add_len + 8) > (cmd->data_length - 8)) 3997 if ((add_len + 8) > (cmd->data_length - 8))
4001 break; 3998 break;
4002 3999
4003 buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff); 4000 buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
4004 buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff); 4001 buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
4005 buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff); 4002 buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
4006 buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff); 4003 buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
4007 buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff); 4004 buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
4008 buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff); 4005 buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
4009 buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff); 4006 buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
4010 buf[off++] = (pr_reg->pr_res_key & 0xff); 4007 buf[off++] = (pr_reg->pr_res_key & 0xff);
4011 4008
4012 add_len += 8; 4009 add_len += 8;
4013 } 4010 }
4014 spin_unlock(&su_dev->t10_pr.registration_lock); 4011 spin_unlock(&su_dev->t10_pr.registration_lock);
4015 4012
4016 buf[4] = ((add_len >> 24) & 0xff); 4013 buf[4] = ((add_len >> 24) & 0xff);
4017 buf[5] = ((add_len >> 16) & 0xff); 4014 buf[5] = ((add_len >> 16) & 0xff);
4018 buf[6] = ((add_len >> 8) & 0xff); 4015 buf[6] = ((add_len >> 8) & 0xff);
4019 buf[7] = (add_len & 0xff); 4016 buf[7] = (add_len & 0xff);
4020 4017
4021 transport_kunmap_first_data_page(cmd); 4018 transport_kunmap_first_data_page(cmd);
4022 4019
4023 return 0; 4020 return 0;
4024 } 4021 }
4025 4022
4026 /* 4023 /*
4027 * PERSISTENT_RESERVE_IN Service Action READ_RESERVATION 4024 * PERSISTENT_RESERVE_IN Service Action READ_RESERVATION
4028 * 4025 *
4029 * See spc4r17 section 5.7.6.3 and section 6.13.3.2 Table 161 and 162 4026 * See spc4r17 section 5.7.6.3 and section 6.13.3.2 Table 161 and 162
4030 */ 4027 */
4031 static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) 4028 static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
4032 { 4029 {
4033 struct se_device *se_dev = cmd->se_dev; 4030 struct se_device *se_dev = cmd->se_dev;
4034 struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; 4031 struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
4035 struct t10_pr_registration *pr_reg; 4032 struct t10_pr_registration *pr_reg;
4036 unsigned char *buf; 4033 unsigned char *buf;
4037 u64 pr_res_key; 4034 u64 pr_res_key;
4038 u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */ 4035 u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
4039 4036
4040 if (cmd->data_length < 8) { 4037 if (cmd->data_length < 8) {
4041 pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u" 4038 pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
4042 " too small\n", cmd->data_length); 4039 " too small\n", cmd->data_length);
4043 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 4040 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
4044 return -EINVAL; 4041 return -EINVAL;
4045 } 4042 }
4046 4043
4047 buf = transport_kmap_first_data_page(cmd); 4044 buf = transport_kmap_first_data_page(cmd);
4048 buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); 4045 buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
4049 buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); 4046 buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
4050 buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); 4047 buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
4051 buf[3] = (su_dev->t10_pr.pr_generation & 0xff); 4048 buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
4052 4049
4053 spin_lock(&se_dev->dev_reservation_lock); 4050 spin_lock(&se_dev->dev_reservation_lock);
4054 pr_reg = se_dev->dev_pr_res_holder; 4051 pr_reg = se_dev->dev_pr_res_holder;
4055 if ((pr_reg)) { 4052 if ((pr_reg)) {
4056 /* 4053 /*
4057 * Set the hardcoded Additional Length 4054 * Set the hardcoded Additional Length
4058 */ 4055 */
4059 buf[4] = ((add_len >> 24) & 0xff); 4056 buf[4] = ((add_len >> 24) & 0xff);
4060 buf[5] = ((add_len >> 16) & 0xff); 4057 buf[5] = ((add_len >> 16) & 0xff);
4061 buf[6] = ((add_len >> 8) & 0xff); 4058 buf[6] = ((add_len >> 8) & 0xff);
4062 buf[7] = (add_len & 0xff); 4059 buf[7] = (add_len & 0xff);
4063 4060
4064 if (cmd->data_length < 22) 4061 if (cmd->data_length < 22)
4065 goto err; 4062 goto err;
4066 4063
4067 /* 4064 /*
4068 * Set the Reservation key. 4065 * Set the Reservation key.
4069 * 4066 *
4070 * From spc4r17, section 5.7.10: 4067 * From spc4r17, section 5.7.10:
4071 * A persistent reservation holder has its reservation key 4068 * A persistent reservation holder has its reservation key
4072 * returned in the parameter data from a PERSISTENT 4069 * returned in the parameter data from a PERSISTENT
4073 * RESERVE IN command with READ RESERVATION service action as 4070 * RESERVE IN command with READ RESERVATION service action as
4074 * follows: 4071 * follows:
4075 * a) For a persistent reservation of the type Write Exclusive 4072 * a) For a persistent reservation of the type Write Exclusive
4076 * - All Registrants or Exclusive Access ยญ All Regitrants, 4073 * - All Registrants or Exclusive Access ยญ All Regitrants,
4077 * the reservation key shall be set to zero; or 4074 * the reservation key shall be set to zero; or
4078 * b) For all other persistent reservation types, the 4075 * b) For all other persistent reservation types, the
4079 * reservation key shall be set to the registered 4076 * reservation key shall be set to the registered
4080 * reservation key for the I_T nexus that holds the 4077 * reservation key for the I_T nexus that holds the
4081 * persistent reservation. 4078 * persistent reservation.
4082 */ 4079 */
4083 if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || 4080 if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
4084 (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) 4081 (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
4085 pr_res_key = 0; 4082 pr_res_key = 0;
4086 else 4083 else
4087 pr_res_key = pr_reg->pr_res_key; 4084 pr_res_key = pr_reg->pr_res_key;
4088 4085
4089 buf[8] = ((pr_res_key >> 56) & 0xff); 4086 buf[8] = ((pr_res_key >> 56) & 0xff);
4090 buf[9] = ((pr_res_key >> 48) & 0xff); 4087 buf[9] = ((pr_res_key >> 48) & 0xff);
4091 buf[10] = ((pr_res_key >> 40) & 0xff); 4088 buf[10] = ((pr_res_key >> 40) & 0xff);
4092 buf[11] = ((pr_res_key >> 32) & 0xff); 4089 buf[11] = ((pr_res_key >> 32) & 0xff);
4093 buf[12] = ((pr_res_key >> 24) & 0xff); 4090 buf[12] = ((pr_res_key >> 24) & 0xff);
4094 buf[13] = ((pr_res_key >> 16) & 0xff); 4091 buf[13] = ((pr_res_key >> 16) & 0xff);
4095 buf[14] = ((pr_res_key >> 8) & 0xff); 4092 buf[14] = ((pr_res_key >> 8) & 0xff);
4096 buf[15] = (pr_res_key & 0xff); 4093 buf[15] = (pr_res_key & 0xff);
4097 /* 4094 /*
4098 * Set the SCOPE and TYPE 4095 * Set the SCOPE and TYPE
4099 */ 4096 */
4100 buf[21] = (pr_reg->pr_res_scope & 0xf0) | 4097 buf[21] = (pr_reg->pr_res_scope & 0xf0) |
4101 (pr_reg->pr_res_type & 0x0f); 4098 (pr_reg->pr_res_type & 0x0f);
4102 } 4099 }
4103 4100
4104 err: 4101 err:
4105 spin_unlock(&se_dev->dev_reservation_lock); 4102 spin_unlock(&se_dev->dev_reservation_lock);
4106 transport_kunmap_first_data_page(cmd); 4103 transport_kunmap_first_data_page(cmd);
4107 4104
4108 return 0; 4105 return 0;
4109 } 4106 }
4110 4107
4111 /* 4108 /*
4112 * PERSISTENT_RESERVE_IN Service Action REPORT_CAPABILITIES 4109 * PERSISTENT_RESERVE_IN Service Action REPORT_CAPABILITIES
4113 * 4110 *
4114 * See spc4r17 section 6.13.4 Table 165 4111 * See spc4r17 section 6.13.4 Table 165
4115 */ 4112 */
4116 static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) 4113 static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
4117 { 4114 {
4118 struct se_device *dev = cmd->se_dev; 4115 struct se_device *dev = cmd->se_dev;
4119 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 4116 struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
4120 unsigned char *buf; 4117 unsigned char *buf;
4121 u16 add_len = 8; /* Hardcoded to 8. */ 4118 u16 add_len = 8; /* Hardcoded to 8. */
4122 4119
4123 if (cmd->data_length < 6) { 4120 if (cmd->data_length < 6) {
4124 pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:" 4121 pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
4125 " %u too small\n", cmd->data_length); 4122 " %u too small\n", cmd->data_length);
4126 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 4123 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
4127 return -EINVAL; 4124 return -EINVAL;
4128 } 4125 }
4129 4126
4130 buf = transport_kmap_first_data_page(cmd); 4127 buf = transport_kmap_first_data_page(cmd);
4131 4128
4132 buf[0] = ((add_len << 8) & 0xff); 4129 buf[0] = ((add_len << 8) & 0xff);
4133 buf[1] = (add_len & 0xff); 4130 buf[1] = (add_len & 0xff);
4134 buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */ 4131 buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
4135 buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */ 4132 buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */
4136 buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */ 4133 buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */
4137 buf[2] |= 0x01; /* PTPL_C: Persistence across Target Power Loss bit */ 4134 buf[2] |= 0x01; /* PTPL_C: Persistence across Target Power Loss bit */
4138 /* 4135 /*
4139 * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so 4136 * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so
4140 * set the TMV: Task Mask Valid bit. 4137 * set the TMV: Task Mask Valid bit.
4141 */ 4138 */
4142 buf[3] |= 0x80; 4139 buf[3] |= 0x80;
4143 /* 4140 /*
4144 * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166 4141 * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166
4145 */ 4142 */
4146 buf[3] |= 0x10; /* ALLOW COMMANDs field 001b */ 4143 buf[3] |= 0x10; /* ALLOW COMMANDs field 001b */
4147 /* 4144 /*
4148 * PTPL_A: Persistence across Target Power Loss Active bit 4145 * PTPL_A: Persistence across Target Power Loss Active bit
4149 */ 4146 */
4150 if (pr_tmpl->pr_aptpl_active) 4147 if (pr_tmpl->pr_aptpl_active)
4151 buf[3] |= 0x01; 4148 buf[3] |= 0x01;
4152 /* 4149 /*
4153 * Setup the PERSISTENT RESERVATION TYPE MASK from Table 167 4150 * Setup the PERSISTENT RESERVATION TYPE MASK from Table 167
4154 */ 4151 */
4155 buf[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ 4152 buf[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
4156 buf[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */ 4153 buf[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */
4157 buf[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */ 4154 buf[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */
4158 buf[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */ 4155 buf[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */
4159 buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */ 4156 buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
4160 buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ 4157 buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
4161 4158
4162 transport_kunmap_first_data_page(cmd); 4159 transport_kunmap_first_data_page(cmd);
4163 4160
4164 return 0; 4161 return 0;
4165 } 4162 }
4166 4163
4167 /* 4164 /*
4168 * PERSISTENT_RESERVE_IN Service Action READ_FULL_STATUS 4165 * PERSISTENT_RESERVE_IN Service Action READ_FULL_STATUS
4169 * 4166 *
4170 * See spc4r17 section 6.13.5 Table 168 and 169 4167 * See spc4r17 section 6.13.5 Table 168 and 169
4171 */ 4168 */
4172 static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) 4169 static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
4173 { 4170 {
4174 struct se_device *se_dev = cmd->se_dev; 4171 struct se_device *se_dev = cmd->se_dev;
4175 struct se_node_acl *se_nacl; 4172 struct se_node_acl *se_nacl;
4176 struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; 4173 struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
4177 struct se_portal_group *se_tpg; 4174 struct se_portal_group *se_tpg;
4178 struct t10_pr_registration *pr_reg, *pr_reg_tmp; 4175 struct t10_pr_registration *pr_reg, *pr_reg_tmp;
4179 struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr; 4176 struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr;
4180 unsigned char *buf; 4177 unsigned char *buf;
4181 u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len; 4178 u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
4182 u32 off = 8; /* off into first Full Status descriptor */ 4179 u32 off = 8; /* off into first Full Status descriptor */
4183 int format_code = 0; 4180 int format_code = 0;
4184 4181
4185 if (cmd->data_length < 8) { 4182 if (cmd->data_length < 8) {
4186 pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u" 4183 pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
4187 " too small\n", cmd->data_length); 4184 " too small\n", cmd->data_length);
4188 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 4185 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
4189 return -EINVAL; 4186 return -EINVAL;
4190 } 4187 }
4191 4188
4192 buf = transport_kmap_first_data_page(cmd); 4189 buf = transport_kmap_first_data_page(cmd);
4193 4190
4194 buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); 4191 buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
4195 buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); 4192 buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
4196 buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); 4193 buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
4197 buf[3] = (su_dev->t10_pr.pr_generation & 0xff); 4194 buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
4198 4195
4199 spin_lock(&pr_tmpl->registration_lock); 4196 spin_lock(&pr_tmpl->registration_lock);
4200 list_for_each_entry_safe(pr_reg, pr_reg_tmp, 4197 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
4201 &pr_tmpl->registration_list, pr_reg_list) { 4198 &pr_tmpl->registration_list, pr_reg_list) {
4202 4199
4203 se_nacl = pr_reg->pr_reg_nacl; 4200 se_nacl = pr_reg->pr_reg_nacl;
4204 se_tpg = pr_reg->pr_reg_nacl->se_tpg; 4201 se_tpg = pr_reg->pr_reg_nacl->se_tpg;
4205 add_desc_len = 0; 4202 add_desc_len = 0;
4206 4203
4207 atomic_inc(&pr_reg->pr_res_holders); 4204 atomic_inc(&pr_reg->pr_res_holders);
4208 smp_mb__after_atomic_inc(); 4205 smp_mb__after_atomic_inc();
4209 spin_unlock(&pr_tmpl->registration_lock); 4206 spin_unlock(&pr_tmpl->registration_lock);
4210 /* 4207 /*
4211 * Determine expected length of $FABRIC_MOD specific 4208 * Determine expected length of $FABRIC_MOD specific
4212 * TransportID full status descriptor.. 4209 * TransportID full status descriptor..
4213 */ 4210 */
4214 exp_desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id_len( 4211 exp_desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id_len(
4215 se_tpg, se_nacl, pr_reg, &format_code); 4212 se_tpg, se_nacl, pr_reg, &format_code);
4216 4213
4217 if ((exp_desc_len + add_len) > cmd->data_length) { 4214 if ((exp_desc_len + add_len) > cmd->data_length) {
4218 pr_warn("SPC-3 PRIN READ_FULL_STATUS ran" 4215 pr_warn("SPC-3 PRIN READ_FULL_STATUS ran"
4219 " out of buffer: %d\n", cmd->data_length); 4216 " out of buffer: %d\n", cmd->data_length);
4220 spin_lock(&pr_tmpl->registration_lock); 4217 spin_lock(&pr_tmpl->registration_lock);
4221 atomic_dec(&pr_reg->pr_res_holders); 4218 atomic_dec(&pr_reg->pr_res_holders);
4222 smp_mb__after_atomic_dec(); 4219 smp_mb__after_atomic_dec();
4223 break; 4220 break;
4224 } 4221 }
4225 /* 4222 /*
4226 * Set RESERVATION KEY 4223 * Set RESERVATION KEY
4227 */ 4224 */
4228 buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff); 4225 buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
4229 buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff); 4226 buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
4230 buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff); 4227 buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
4231 buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff); 4228 buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
4232 buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff); 4229 buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
4233 buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff); 4230 buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
4234 buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff); 4231 buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
4235 buf[off++] = (pr_reg->pr_res_key & 0xff); 4232 buf[off++] = (pr_reg->pr_res_key & 0xff);
4236 off += 4; /* Skip Over Reserved area */ 4233 off += 4; /* Skip Over Reserved area */
4237 4234
4238 /* 4235 /*
4239 * Set ALL_TG_PT bit if PROUT SA REGISTER had this set. 4236 * Set ALL_TG_PT bit if PROUT SA REGISTER had this set.
4240 */ 4237 */
4241 if (pr_reg->pr_reg_all_tg_pt) 4238 if (pr_reg->pr_reg_all_tg_pt)
4242 buf[off] = 0x02; 4239 buf[off] = 0x02;
4243 /* 4240 /*
4244 * The struct se_lun pointer will be present for the 4241 * The struct se_lun pointer will be present for the
4245 * reservation holder for PR_HOLDER bit. 4242 * reservation holder for PR_HOLDER bit.
4246 * 4243 *
4247 * Also, if this registration is the reservation 4244 * Also, if this registration is the reservation
4248 * holder, fill in SCOPE and TYPE in the next byte. 4245 * holder, fill in SCOPE and TYPE in the next byte.
4249 */ 4246 */
4250 if (pr_reg->pr_res_holder) { 4247 if (pr_reg->pr_res_holder) {
4251 buf[off++] |= 0x01; 4248 buf[off++] |= 0x01;
4252 buf[off++] = (pr_reg->pr_res_scope & 0xf0) | 4249 buf[off++] = (pr_reg->pr_res_scope & 0xf0) |
4253 (pr_reg->pr_res_type & 0x0f); 4250 (pr_reg->pr_res_type & 0x0f);
4254 } else 4251 } else
4255 off += 2; 4252 off += 2;
4256 4253
4257 off += 4; /* Skip over reserved area */ 4254 off += 4; /* Skip over reserved area */
4258 /* 4255 /*
4259 * From spc4r17 6.3.15: 4256 * From spc4r17 6.3.15:
4260 * 4257 *
4261 * If the ALL_TG_PT bit set to zero, the RELATIVE TARGET PORT 4258 * If the ALL_TG_PT bit set to zero, the RELATIVE TARGET PORT
4262 * IDENTIFIER field contains the relative port identifier (see 4259 * IDENTIFIER field contains the relative port identifier (see
4263 * 3.1.120) of the target port that is part of the I_T nexus 4260 * 3.1.120) of the target port that is part of the I_T nexus
4264 * described by this full status descriptor. If the ALL_TG_PT 4261 * described by this full status descriptor. If the ALL_TG_PT
4265 * bit is set to one, the contents of the RELATIVE TARGET PORT 4262 * bit is set to one, the contents of the RELATIVE TARGET PORT
4266 * IDENTIFIER field are not defined by this standard. 4263 * IDENTIFIER field are not defined by this standard.
4267 */ 4264 */
4268 if (!pr_reg->pr_reg_all_tg_pt) { 4265 if (!pr_reg->pr_reg_all_tg_pt) {
4269 struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep; 4266 struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep;
4270 4267
4271 buf[off++] = ((port->sep_rtpi >> 8) & 0xff); 4268 buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
4272 buf[off++] = (port->sep_rtpi & 0xff); 4269 buf[off++] = (port->sep_rtpi & 0xff);
4273 } else 4270 } else
4274 off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFER */ 4271 off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFER */
4275 4272
4276 /* 4273 /*
4277 * Now, have the $FABRIC_MOD fill in the protocol identifier 4274 * Now, have the $FABRIC_MOD fill in the protocol identifier
4278 */ 4275 */
4279 desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id(se_tpg, 4276 desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id(se_tpg,
4280 se_nacl, pr_reg, &format_code, &buf[off+4]); 4277 se_nacl, pr_reg, &format_code, &buf[off+4]);
4281 4278
4282 spin_lock(&pr_tmpl->registration_lock); 4279 spin_lock(&pr_tmpl->registration_lock);
4283 atomic_dec(&pr_reg->pr_res_holders); 4280 atomic_dec(&pr_reg->pr_res_holders);
4284 smp_mb__after_atomic_dec(); 4281 smp_mb__after_atomic_dec();
4285 /* 4282 /*
4286 * Set the ADDITIONAL DESCRIPTOR LENGTH 4283 * Set the ADDITIONAL DESCRIPTOR LENGTH
4287 */ 4284 */
4288 buf[off++] = ((desc_len >> 24) & 0xff); 4285 buf[off++] = ((desc_len >> 24) & 0xff);
4289 buf[off++] = ((desc_len >> 16) & 0xff); 4286 buf[off++] = ((desc_len >> 16) & 0xff);
4290 buf[off++] = ((desc_len >> 8) & 0xff); 4287 buf[off++] = ((desc_len >> 8) & 0xff);
4291 buf[off++] = (desc_len & 0xff); 4288 buf[off++] = (desc_len & 0xff);
4292 /* 4289 /*
4293 * Size of full desctipor header minus TransportID 4290 * Size of full desctipor header minus TransportID
4294 * containing $FABRIC_MOD specific) initiator device/port 4291 * containing $FABRIC_MOD specific) initiator device/port
4295 * WWN information. 4292 * WWN information.
4296 * 4293 *
4297 * See spc4r17 Section 6.13.5 Table 169 4294 * See spc4r17 Section 6.13.5 Table 169
4298 */ 4295 */
4299 add_desc_len = (24 + desc_len); 4296 add_desc_len = (24 + desc_len);
4300 4297
4301 off += desc_len; 4298 off += desc_len;
4302 add_len += add_desc_len; 4299 add_len += add_desc_len;
4303 } 4300 }
4304 spin_unlock(&pr_tmpl->registration_lock); 4301 spin_unlock(&pr_tmpl->registration_lock);
4305 /* 4302 /*
4306 * Set ADDITIONAL_LENGTH 4303 * Set ADDITIONAL_LENGTH
4307 */ 4304 */
4308 buf[4] = ((add_len >> 24) & 0xff); 4305 buf[4] = ((add_len >> 24) & 0xff);
4309 buf[5] = ((add_len >> 16) & 0xff); 4306 buf[5] = ((add_len >> 16) & 0xff);
4310 buf[6] = ((add_len >> 8) & 0xff); 4307 buf[6] = ((add_len >> 8) & 0xff);
4311 buf[7] = (add_len & 0xff); 4308 buf[7] = (add_len & 0xff);
4312 4309
4313 transport_kunmap_first_data_page(cmd); 4310 transport_kunmap_first_data_page(cmd);
4314 4311
4315 return 0; 4312 return 0;
4316 } 4313 }
4317 4314
4318 int target_scsi3_emulate_pr_in(struct se_task *task) 4315 int target_scsi3_emulate_pr_in(struct se_task *task)
4319 { 4316 {
4320 struct se_cmd *cmd = task->task_se_cmd; 4317 struct se_cmd *cmd = task->task_se_cmd;
4321 int ret; 4318 int ret;
4322 4319
4323 /* 4320 /*
4324 * Following spc2r20 5.5.1 Reservations overview: 4321 * Following spc2r20 5.5.1 Reservations overview:
4325 * 4322 *
4326 * If a logical unit has been reserved by any RESERVE command and is 4323 * If a logical unit has been reserved by any RESERVE command and is
4327 * still reserved by any initiator, all PERSISTENT RESERVE IN and all 4324 * still reserved by any initiator, all PERSISTENT RESERVE IN and all
4328 * PERSISTENT RESERVE OUT commands shall conflict regardless of 4325 * PERSISTENT RESERVE OUT commands shall conflict regardless of
4329 * initiator or service action and shall terminate with a RESERVATION 4326 * initiator or service action and shall terminate with a RESERVATION
4330 * CONFLICT status. 4327 * CONFLICT status.
4331 */ 4328 */
4332 if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) { 4329 if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) {
4333 pr_err("Received PERSISTENT_RESERVE CDB while legacy" 4330 pr_err("Received PERSISTENT_RESERVE CDB while legacy"
4334 " SPC-2 reservation is held, returning" 4331 " SPC-2 reservation is held, returning"
4335 " RESERVATION_CONFLICT\n"); 4332 " RESERVATION_CONFLICT\n");
4336 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; 4333 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
4337 return -EINVAL; 4334 return -EINVAL;
4338 } 4335 }
4339 4336
4340 switch (cmd->t_task_cdb[1] & 0x1f) { 4337 switch (cmd->t_task_cdb[1] & 0x1f) {
4341 case PRI_READ_KEYS: 4338 case PRI_READ_KEYS:
4342 ret = core_scsi3_pri_read_keys(cmd); 4339 ret = core_scsi3_pri_read_keys(cmd);
4343 break; 4340 break;
4344 case PRI_READ_RESERVATION: 4341 case PRI_READ_RESERVATION:
4345 ret = core_scsi3_pri_read_reservation(cmd); 4342 ret = core_scsi3_pri_read_reservation(cmd);
4346 break; 4343 break;
4347 case PRI_REPORT_CAPABILITIES: 4344 case PRI_REPORT_CAPABILITIES:
4348 ret = core_scsi3_pri_report_capabilities(cmd); 4345 ret = core_scsi3_pri_report_capabilities(cmd);
4349 break; 4346 break;
4350 case PRI_READ_FULL_STATUS: 4347 case PRI_READ_FULL_STATUS:
4351 ret = core_scsi3_pri_read_full_status(cmd); 4348 ret = core_scsi3_pri_read_full_status(cmd);
4352 break; 4349 break;
4353 default: 4350 default:
4354 pr_err("Unknown PERSISTENT_RESERVE_IN service" 4351 pr_err("Unknown PERSISTENT_RESERVE_IN service"
4355 " action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f); 4352 " action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
4356 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 4353 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
4357 ret = -EINVAL; 4354 ret = -EINVAL;
4358 break; 4355 break;
4359 } 4356 }
4360 4357
4361 if (!ret) { 4358 if (!ret) {
4362 task->task_scsi_status = GOOD; 4359 task->task_scsi_status = GOOD;
4363 transport_complete_task(task, 1); 4360 transport_complete_task(task, 1);
4364 } 4361 }
4365 return ret; 4362 return ret;
4366 } 4363 }
4367 4364
4368 static int core_pt_reservation_check(struct se_cmd *cmd, u32 *pr_res_type) 4365 static int core_pt_reservation_check(struct se_cmd *cmd, u32 *pr_res_type)
4369 { 4366 {
4370 return 0; 4367 return 0;
4371 } 4368 }
4372 4369
4373 static int core_pt_seq_non_holder( 4370 static int core_pt_seq_non_holder(
4374 struct se_cmd *cmd, 4371 struct se_cmd *cmd,
4375 unsigned char *cdb, 4372 unsigned char *cdb,
4376 u32 pr_reg_type) 4373 u32 pr_reg_type)
4377 { 4374 {
4378 return 0; 4375 return 0;
4379 } 4376 }
4380 4377
4381 int core_setup_reservations(struct se_device *dev, int force_pt) 4378 int core_setup_reservations(struct se_device *dev, int force_pt)
4382 { 4379 {
4383 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 4380 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
4384 struct t10_reservation *rest = &su_dev->t10_pr; 4381 struct t10_reservation *rest = &su_dev->t10_pr;
4385 /* 4382 /*
4386 * If this device is from Target_Core_Mod/pSCSI, use the reservations 4383 * If this device is from Target_Core_Mod/pSCSI, use the reservations
4387 * of the Underlying SCSI hardware. In Linux/SCSI terms, this can 4384 * of the Underlying SCSI hardware. In Linux/SCSI terms, this can
4388 * cause a problem because libata and some SATA RAID HBAs appear 4385 * cause a problem because libata and some SATA RAID HBAs appear
4389 * under Linux/SCSI, but to emulate reservations themselves. 4386 * under Linux/SCSI, but to emulate reservations themselves.
4390 */ 4387 */
4391 if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && 4388 if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
4392 !(dev->se_sub_dev->se_dev_attrib.emulate_reservations)) || force_pt) { 4389 !(dev->se_sub_dev->se_dev_attrib.emulate_reservations)) || force_pt) {
4393 rest->res_type = SPC_PASSTHROUGH; 4390 rest->res_type = SPC_PASSTHROUGH;
4394 rest->pr_ops.t10_reservation_check = &core_pt_reservation_check; 4391 rest->pr_ops.t10_reservation_check = &core_pt_reservation_check;
4395 rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder; 4392 rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder;
4396 pr_debug("%s: Using SPC_PASSTHROUGH, no reservation" 4393 pr_debug("%s: Using SPC_PASSTHROUGH, no reservation"
4397 " emulation\n", dev->transport->name); 4394 " emulation\n", dev->transport->name);
4398 return 0; 4395 return 0;
4399 } 4396 }
4400 /* 4397 /*
4401 * If SPC-3 or above is reported by real or emulated struct se_device, 4398 * If SPC-3 or above is reported by real or emulated struct se_device,
4402 * use emulated Persistent Reservations. 4399 * use emulated Persistent Reservations.
4403 */ 4400 */
4404 if (dev->transport->get_device_rev(dev) >= SCSI_3) { 4401 if (dev->transport->get_device_rev(dev) >= SCSI_3) {
4405 rest->res_type = SPC3_PERSISTENT_RESERVATIONS; 4402 rest->res_type = SPC3_PERSISTENT_RESERVATIONS;
4406 rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check; 4403 rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check;
4407 rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder; 4404 rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder;
4408 pr_debug("%s: Using SPC3_PERSISTENT_RESERVATIONS" 4405 pr_debug("%s: Using SPC3_PERSISTENT_RESERVATIONS"
4409 " emulation\n", dev->transport->name); 4406 " emulation\n", dev->transport->name);
4410 } else { 4407 } else {
4411 rest->res_type = SPC2_RESERVATIONS; 4408 rest->res_type = SPC2_RESERVATIONS;
4412 rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check; 4409 rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check;
4413 rest->pr_ops.t10_seq_non_holder = 4410 rest->pr_ops.t10_seq_non_holder =
4414 &core_scsi2_reservation_seq_non_holder; 4411 &core_scsi2_reservation_seq_non_holder;
4415 pr_debug("%s: Using SPC2_RESERVATIONS emulation\n", 4412 pr_debug("%s: Using SPC2_RESERVATIONS emulation\n",
4416 dev->transport->name); 4413 dev->transport->name);
4417 } 4414 }
4418 4415
4419 return 0; 4416 return 0;
4420 } 4417 }
4421 4418
drivers/target/target_core_pscsi.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * Filename: target_core_pscsi.c 2 * Filename: target_core_pscsi.c
3 * 3 *
4 * This file contains the generic target mode <-> Linux SCSI subsystem plugin. 4 * This file contains the generic target mode <-> Linux SCSI subsystem plugin.
5 * 5 *
6 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. 6 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc. 7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems 8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org 9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 * 10 *
11 * Nicholas A. Bellinger <nab@kernel.org> 11 * Nicholas A. Bellinger <nab@kernel.org>
12 * 12 *
13 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by 14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or 15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version. 16 * (at your option) any later version.
17 * 17 *
18 * This program is distributed in the hope that it will be useful, 18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details. 21 * GNU General Public License for more details.
22 * 22 *
23 * You should have received a copy of the GNU General Public License 23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software 24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 * 26 *
27 ******************************************************************************/ 27 ******************************************************************************/
28 28
29 #include <linux/string.h> 29 #include <linux/string.h>
30 #include <linux/parser.h> 30 #include <linux/parser.h>
31 #include <linux/timer.h> 31 #include <linux/timer.h>
32 #include <linux/blkdev.h> 32 #include <linux/blkdev.h>
33 #include <linux/blk_types.h> 33 #include <linux/blk_types.h>
34 #include <linux/slab.h> 34 #include <linux/slab.h>
35 #include <linux/spinlock.h> 35 #include <linux/spinlock.h>
36 #include <linux/genhd.h> 36 #include <linux/genhd.h>
37 #include <linux/cdrom.h> 37 #include <linux/cdrom.h>
38 #include <linux/file.h> 38 #include <linux/file.h>
39 #include <linux/module.h> 39 #include <linux/module.h>
40 #include <scsi/scsi.h> 40 #include <scsi/scsi.h>
41 #include <scsi/scsi_device.h> 41 #include <scsi/scsi_device.h>
42 #include <scsi/scsi_cmnd.h> 42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_host.h> 43 #include <scsi/scsi_host.h>
44 #include <scsi/scsi_tcq.h> 44 #include <scsi/scsi_tcq.h>
45 45
46 #include <target/target_core_base.h> 46 #include <target/target_core_base.h>
47 #include <target/target_core_device.h> 47 #include <target/target_core_backend.h>
48 #include <target/target_core_transport.h>
49 48
50 #include "target_core_pscsi.h" 49 #include "target_core_pscsi.h"
51 50
52 #define ISPRINT(a) ((a >= ' ') && (a <= '~')) 51 #define ISPRINT(a) ((a >= ' ') && (a <= '~'))
53 52
54 static struct se_subsystem_api pscsi_template; 53 static struct se_subsystem_api pscsi_template;
55 54
56 static void pscsi_req_done(struct request *, int); 55 static void pscsi_req_done(struct request *, int);
57 56
58 /* pscsi_attach_hba(): 57 /* pscsi_attach_hba():
59 * 58 *
60 * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host. 59 * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host.
61 * from the passed SCSI Host ID. 60 * from the passed SCSI Host ID.
62 */ 61 */
63 static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) 62 static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
64 { 63 {
65 struct pscsi_hba_virt *phv; 64 struct pscsi_hba_virt *phv;
66 65
67 phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL); 66 phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
68 if (!phv) { 67 if (!phv) {
69 pr_err("Unable to allocate struct pscsi_hba_virt\n"); 68 pr_err("Unable to allocate struct pscsi_hba_virt\n");
70 return -ENOMEM; 69 return -ENOMEM;
71 } 70 }
72 phv->phv_host_id = host_id; 71 phv->phv_host_id = host_id;
73 phv->phv_mode = PHV_VIRUTAL_HOST_ID; 72 phv->phv_mode = PHV_VIRUTAL_HOST_ID;
74 73
75 hba->hba_ptr = phv; 74 hba->hba_ptr = phv;
76 75
77 pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on" 76 pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
78 " Generic Target Core Stack %s\n", hba->hba_id, 77 " Generic Target Core Stack %s\n", hba->hba_id,
79 PSCSI_VERSION, TARGET_CORE_MOD_VERSION); 78 PSCSI_VERSION, TARGET_CORE_MOD_VERSION);
80 pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n", 79 pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n",
81 hba->hba_id); 80 hba->hba_id);
82 81
83 return 0; 82 return 0;
84 } 83 }
85 84
86 static void pscsi_detach_hba(struct se_hba *hba) 85 static void pscsi_detach_hba(struct se_hba *hba)
87 { 86 {
88 struct pscsi_hba_virt *phv = hba->hba_ptr; 87 struct pscsi_hba_virt *phv = hba->hba_ptr;
89 struct Scsi_Host *scsi_host = phv->phv_lld_host; 88 struct Scsi_Host *scsi_host = phv->phv_lld_host;
90 89
91 if (scsi_host) { 90 if (scsi_host) {
92 scsi_host_put(scsi_host); 91 scsi_host_put(scsi_host);
93 92
94 pr_debug("CORE_HBA[%d] - Detached SCSI HBA: %s from" 93 pr_debug("CORE_HBA[%d] - Detached SCSI HBA: %s from"
95 " Generic Target Core\n", hba->hba_id, 94 " Generic Target Core\n", hba->hba_id,
96 (scsi_host->hostt->name) ? (scsi_host->hostt->name) : 95 (scsi_host->hostt->name) ? (scsi_host->hostt->name) :
97 "Unknown"); 96 "Unknown");
98 } else 97 } else
99 pr_debug("CORE_HBA[%d] - Detached Virtual SCSI HBA" 98 pr_debug("CORE_HBA[%d] - Detached Virtual SCSI HBA"
100 " from Generic Target Core\n", hba->hba_id); 99 " from Generic Target Core\n", hba->hba_id);
101 100
102 kfree(phv); 101 kfree(phv);
103 hba->hba_ptr = NULL; 102 hba->hba_ptr = NULL;
104 } 103 }
105 104
106 static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) 105 static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
107 { 106 {
108 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; 107 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
109 struct Scsi_Host *sh = phv->phv_lld_host; 108 struct Scsi_Host *sh = phv->phv_lld_host;
110 /* 109 /*
111 * Release the struct Scsi_Host 110 * Release the struct Scsi_Host
112 */ 111 */
113 if (!mode_flag) { 112 if (!mode_flag) {
114 if (!sh) 113 if (!sh)
115 return 0; 114 return 0;
116 115
117 phv->phv_lld_host = NULL; 116 phv->phv_lld_host = NULL;
118 phv->phv_mode = PHV_VIRUTAL_HOST_ID; 117 phv->phv_mode = PHV_VIRUTAL_HOST_ID;
119 118
120 pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" 119 pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
121 " %s\n", hba->hba_id, (sh->hostt->name) ? 120 " %s\n", hba->hba_id, (sh->hostt->name) ?
122 (sh->hostt->name) : "Unknown"); 121 (sh->hostt->name) : "Unknown");
123 122
124 scsi_host_put(sh); 123 scsi_host_put(sh);
125 return 0; 124 return 0;
126 } 125 }
127 /* 126 /*
128 * Otherwise, locate struct Scsi_Host from the original passed 127 * Otherwise, locate struct Scsi_Host from the original passed
129 * pSCSI Host ID and enable for phba mode 128 * pSCSI Host ID and enable for phba mode
130 */ 129 */
131 sh = scsi_host_lookup(phv->phv_host_id); 130 sh = scsi_host_lookup(phv->phv_host_id);
132 if (IS_ERR(sh)) { 131 if (IS_ERR(sh)) {
133 pr_err("pSCSI: Unable to locate SCSI Host for" 132 pr_err("pSCSI: Unable to locate SCSI Host for"
134 " phv_host_id: %d\n", phv->phv_host_id); 133 " phv_host_id: %d\n", phv->phv_host_id);
135 return PTR_ERR(sh); 134 return PTR_ERR(sh);
136 } 135 }
137 136
138 phv->phv_lld_host = sh; 137 phv->phv_lld_host = sh;
139 phv->phv_mode = PHV_LLD_SCSI_HOST_NO; 138 phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
140 139
141 pr_debug("CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n", 140 pr_debug("CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
142 hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown"); 141 hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown");
143 142
144 return 1; 143 return 1;
145 } 144 }
146 145
147 static void pscsi_tape_read_blocksize(struct se_device *dev, 146 static void pscsi_tape_read_blocksize(struct se_device *dev,
148 struct scsi_device *sdev) 147 struct scsi_device *sdev)
149 { 148 {
150 unsigned char cdb[MAX_COMMAND_SIZE], *buf; 149 unsigned char cdb[MAX_COMMAND_SIZE], *buf;
151 int ret; 150 int ret;
152 151
153 buf = kzalloc(12, GFP_KERNEL); 152 buf = kzalloc(12, GFP_KERNEL);
154 if (!buf) 153 if (!buf)
155 return; 154 return;
156 155
157 memset(cdb, 0, MAX_COMMAND_SIZE); 156 memset(cdb, 0, MAX_COMMAND_SIZE);
158 cdb[0] = MODE_SENSE; 157 cdb[0] = MODE_SENSE;
159 cdb[4] = 0x0c; /* 12 bytes */ 158 cdb[4] = 0x0c; /* 12 bytes */
160 159
161 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL, 160 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL,
162 HZ, 1, NULL); 161 HZ, 1, NULL);
163 if (ret) 162 if (ret)
164 goto out_free; 163 goto out_free;
165 164
166 /* 165 /*
167 * If MODE_SENSE still returns zero, set the default value to 1024. 166 * If MODE_SENSE still returns zero, set the default value to 1024.
168 */ 167 */
169 sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); 168 sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
170 if (!sdev->sector_size) 169 if (!sdev->sector_size)
171 sdev->sector_size = 1024; 170 sdev->sector_size = 1024;
172 out_free: 171 out_free:
173 kfree(buf); 172 kfree(buf);
174 } 173 }
175 174
176 static void 175 static void
177 pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn) 176 pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn)
178 { 177 {
179 unsigned char *buf; 178 unsigned char *buf;
180 179
181 if (sdev->inquiry_len < INQUIRY_LEN) 180 if (sdev->inquiry_len < INQUIRY_LEN)
182 return; 181 return;
183 182
184 buf = sdev->inquiry; 183 buf = sdev->inquiry;
185 if (!buf) 184 if (!buf)
186 return; 185 return;
187 /* 186 /*
188 * Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev() 187 * Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev()
189 */ 188 */
190 memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor)); 189 memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor));
191 memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model)); 190 memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model));
192 memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision)); 191 memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision));
193 } 192 }
194 193
195 static int 194 static int
196 pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn) 195 pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
197 { 196 {
198 unsigned char cdb[MAX_COMMAND_SIZE], *buf; 197 unsigned char cdb[MAX_COMMAND_SIZE], *buf;
199 int ret; 198 int ret;
200 199
201 buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); 200 buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
202 if (!buf) 201 if (!buf)
203 return -ENOMEM; 202 return -ENOMEM;
204 203
205 memset(cdb, 0, MAX_COMMAND_SIZE); 204 memset(cdb, 0, MAX_COMMAND_SIZE);
206 cdb[0] = INQUIRY; 205 cdb[0] = INQUIRY;
207 cdb[1] = 0x01; /* Query VPD */ 206 cdb[1] = 0x01; /* Query VPD */
208 cdb[2] = 0x80; /* Unit Serial Number */ 207 cdb[2] = 0x80; /* Unit Serial Number */
209 cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff; 208 cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff;
210 cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff); 209 cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff);
211 210
212 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 211 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
213 INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL); 212 INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL);
214 if (ret) 213 if (ret)
215 goto out_free; 214 goto out_free;
216 215
217 snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]); 216 snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]);
218 217
219 wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL; 218 wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL;
220 219
221 kfree(buf); 220 kfree(buf);
222 return 0; 221 return 0;
223 222
224 out_free: 223 out_free:
225 kfree(buf); 224 kfree(buf);
226 return -EPERM; 225 return -EPERM;
227 } 226 }
228 227
229 static void 228 static void
230 pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev, 229 pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
231 struct t10_wwn *wwn) 230 struct t10_wwn *wwn)
232 { 231 {
233 unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83; 232 unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83;
234 int ident_len, page_len, off = 4, ret; 233 int ident_len, page_len, off = 4, ret;
235 struct t10_vpd *vpd; 234 struct t10_vpd *vpd;
236 235
237 buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); 236 buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
238 if (!buf) 237 if (!buf)
239 return; 238 return;
240 239
241 memset(cdb, 0, MAX_COMMAND_SIZE); 240 memset(cdb, 0, MAX_COMMAND_SIZE);
242 cdb[0] = INQUIRY; 241 cdb[0] = INQUIRY;
243 cdb[1] = 0x01; /* Query VPD */ 242 cdb[1] = 0x01; /* Query VPD */
244 cdb[2] = 0x83; /* Device Identifier */ 243 cdb[2] = 0x83; /* Device Identifier */
245 cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff; 244 cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff;
246 cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff); 245 cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff);
247 246
248 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 247 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
249 INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, 248 INQUIRY_VPD_DEVICE_IDENTIFIER_LEN,
250 NULL, HZ, 1, NULL); 249 NULL, HZ, 1, NULL);
251 if (ret) 250 if (ret)
252 goto out; 251 goto out;
253 252
254 page_len = (buf[2] << 8) | buf[3]; 253 page_len = (buf[2] << 8) | buf[3];
255 while (page_len > 0) { 254 while (page_len > 0) {
256 /* Grab a pointer to the Identification descriptor */ 255 /* Grab a pointer to the Identification descriptor */
257 page_83 = &buf[off]; 256 page_83 = &buf[off];
258 ident_len = page_83[3]; 257 ident_len = page_83[3];
259 if (!ident_len) { 258 if (!ident_len) {
260 pr_err("page_83[3]: identifier" 259 pr_err("page_83[3]: identifier"
261 " length zero!\n"); 260 " length zero!\n");
262 break; 261 break;
263 } 262 }
264 pr_debug("T10 VPD Identifer Length: %d\n", ident_len); 263 pr_debug("T10 VPD Identifer Length: %d\n", ident_len);
265 264
266 vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL); 265 vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
267 if (!vpd) { 266 if (!vpd) {
268 pr_err("Unable to allocate memory for" 267 pr_err("Unable to allocate memory for"
269 " struct t10_vpd\n"); 268 " struct t10_vpd\n");
270 goto out; 269 goto out;
271 } 270 }
272 INIT_LIST_HEAD(&vpd->vpd_list); 271 INIT_LIST_HEAD(&vpd->vpd_list);
273 272
274 transport_set_vpd_proto_id(vpd, page_83); 273 transport_set_vpd_proto_id(vpd, page_83);
275 transport_set_vpd_assoc(vpd, page_83); 274 transport_set_vpd_assoc(vpd, page_83);
276 275
277 if (transport_set_vpd_ident_type(vpd, page_83) < 0) { 276 if (transport_set_vpd_ident_type(vpd, page_83) < 0) {
278 off += (ident_len + 4); 277 off += (ident_len + 4);
279 page_len -= (ident_len + 4); 278 page_len -= (ident_len + 4);
280 kfree(vpd); 279 kfree(vpd);
281 continue; 280 continue;
282 } 281 }
283 if (transport_set_vpd_ident(vpd, page_83) < 0) { 282 if (transport_set_vpd_ident(vpd, page_83) < 0) {
284 off += (ident_len + 4); 283 off += (ident_len + 4);
285 page_len -= (ident_len + 4); 284 page_len -= (ident_len + 4);
286 kfree(vpd); 285 kfree(vpd);
287 continue; 286 continue;
288 } 287 }
289 288
290 list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list); 289 list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list);
291 off += (ident_len + 4); 290 off += (ident_len + 4);
292 page_len -= (ident_len + 4); 291 page_len -= (ident_len + 4);
293 } 292 }
294 293
295 out: 294 out:
296 kfree(buf); 295 kfree(buf);
297 } 296 }
298 297
299 /* pscsi_add_device_to_list(): 298 /* pscsi_add_device_to_list():
300 * 299 *
301 * 300 *
302 */ 301 */
303 static struct se_device *pscsi_add_device_to_list( 302 static struct se_device *pscsi_add_device_to_list(
304 struct se_hba *hba, 303 struct se_hba *hba,
305 struct se_subsystem_dev *se_dev, 304 struct se_subsystem_dev *se_dev,
306 struct pscsi_dev_virt *pdv, 305 struct pscsi_dev_virt *pdv,
307 struct scsi_device *sd, 306 struct scsi_device *sd,
308 int dev_flags) 307 int dev_flags)
309 { 308 {
310 struct se_device *dev; 309 struct se_device *dev;
311 struct se_dev_limits dev_limits; 310 struct se_dev_limits dev_limits;
312 struct request_queue *q; 311 struct request_queue *q;
313 struct queue_limits *limits; 312 struct queue_limits *limits;
314 313
315 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 314 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
316 315
317 if (!sd->queue_depth) { 316 if (!sd->queue_depth) {
318 sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH; 317 sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
319 318
320 pr_err("Set broken SCSI Device %d:%d:%d" 319 pr_err("Set broken SCSI Device %d:%d:%d"
321 " queue_depth to %d\n", sd->channel, sd->id, 320 " queue_depth to %d\n", sd->channel, sd->id,
322 sd->lun, sd->queue_depth); 321 sd->lun, sd->queue_depth);
323 } 322 }
324 /* 323 /*
325 * Setup the local scope queue_limits from struct request_queue->limits 324 * Setup the local scope queue_limits from struct request_queue->limits
326 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. 325 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
327 */ 326 */
328 q = sd->request_queue; 327 q = sd->request_queue;
329 limits = &dev_limits.limits; 328 limits = &dev_limits.limits;
330 limits->logical_block_size = sd->sector_size; 329 limits->logical_block_size = sd->sector_size;
331 limits->max_hw_sectors = min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q)); 330 limits->max_hw_sectors = min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
332 limits->max_sectors = min_t(int, sd->host->max_sectors, queue_max_sectors(q)); 331 limits->max_sectors = min_t(int, sd->host->max_sectors, queue_max_sectors(q));
333 dev_limits.hw_queue_depth = sd->queue_depth; 332 dev_limits.hw_queue_depth = sd->queue_depth;
334 dev_limits.queue_depth = sd->queue_depth; 333 dev_limits.queue_depth = sd->queue_depth;
335 /* 334 /*
336 * Setup our standard INQUIRY info into se_dev->t10_wwn 335 * Setup our standard INQUIRY info into se_dev->t10_wwn
337 */ 336 */
338 pscsi_set_inquiry_info(sd, &se_dev->t10_wwn); 337 pscsi_set_inquiry_info(sd, &se_dev->t10_wwn);
339 338
340 /* 339 /*
341 * Set the pointer pdv->pdv_sd to from passed struct scsi_device, 340 * Set the pointer pdv->pdv_sd to from passed struct scsi_device,
342 * which has already been referenced with Linux SCSI code with 341 * which has already been referenced with Linux SCSI code with
343 * scsi_device_get() in this file's pscsi_create_virtdevice(). 342 * scsi_device_get() in this file's pscsi_create_virtdevice().
344 * 343 *
345 * The passthrough operations called by the transport_add_device_* 344 * The passthrough operations called by the transport_add_device_*
346 * function below will require this pointer to be set for passthroug 345 * function below will require this pointer to be set for passthroug
347 * ops. 346 * ops.
348 * 347 *
349 * For the shutdown case in pscsi_free_device(), this struct 348 * For the shutdown case in pscsi_free_device(), this struct
350 * scsi_device reference is released with Linux SCSI code 349 * scsi_device reference is released with Linux SCSI code
351 * scsi_device_put() and the pdv->pdv_sd cleared. 350 * scsi_device_put() and the pdv->pdv_sd cleared.
352 */ 351 */
353 pdv->pdv_sd = sd; 352 pdv->pdv_sd = sd;
354 353
355 dev = transport_add_device_to_core_hba(hba, &pscsi_template, 354 dev = transport_add_device_to_core_hba(hba, &pscsi_template,
356 se_dev, dev_flags, pdv, 355 se_dev, dev_flags, pdv,
357 &dev_limits, NULL, NULL); 356 &dev_limits, NULL, NULL);
358 if (!dev) { 357 if (!dev) {
359 pdv->pdv_sd = NULL; 358 pdv->pdv_sd = NULL;
360 return NULL; 359 return NULL;
361 } 360 }
362 361
363 /* 362 /*
364 * Locate VPD WWN Information used for various purposes within 363 * Locate VPD WWN Information used for various purposes within
365 * the Storage Engine. 364 * the Storage Engine.
366 */ 365 */
367 if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) { 366 if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) {
368 /* 367 /*
369 * If VPD Unit Serial returned GOOD status, try 368 * If VPD Unit Serial returned GOOD status, try
370 * VPD Device Identification page (0x83). 369 * VPD Device Identification page (0x83).
371 */ 370 */
372 pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn); 371 pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn);
373 } 372 }
374 373
375 /* 374 /*
376 * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. 375 * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
377 */ 376 */
378 if (sd->type == TYPE_TAPE) 377 if (sd->type == TYPE_TAPE)
379 pscsi_tape_read_blocksize(dev, sd); 378 pscsi_tape_read_blocksize(dev, sd);
380 return dev; 379 return dev;
381 } 380 }
382 381
383 static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name) 382 static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
384 { 383 {
385 struct pscsi_dev_virt *pdv; 384 struct pscsi_dev_virt *pdv;
386 385
387 pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL); 386 pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL);
388 if (!pdv) { 387 if (!pdv) {
389 pr_err("Unable to allocate memory for struct pscsi_dev_virt\n"); 388 pr_err("Unable to allocate memory for struct pscsi_dev_virt\n");
390 return NULL; 389 return NULL;
391 } 390 }
392 pdv->pdv_se_hba = hba; 391 pdv->pdv_se_hba = hba;
393 392
394 pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name); 393 pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name);
395 return pdv; 394 return pdv;
396 } 395 }
397 396
398 /* 397 /*
399 * Called with struct Scsi_Host->host_lock called. 398 * Called with struct Scsi_Host->host_lock called.
400 */ 399 */
401 static struct se_device *pscsi_create_type_disk( 400 static struct se_device *pscsi_create_type_disk(
402 struct scsi_device *sd, 401 struct scsi_device *sd,
403 struct pscsi_dev_virt *pdv, 402 struct pscsi_dev_virt *pdv,
404 struct se_subsystem_dev *se_dev, 403 struct se_subsystem_dev *se_dev,
405 struct se_hba *hba) 404 struct se_hba *hba)
406 __releases(sh->host_lock) 405 __releases(sh->host_lock)
407 { 406 {
408 struct se_device *dev; 407 struct se_device *dev;
409 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; 408 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
410 struct Scsi_Host *sh = sd->host; 409 struct Scsi_Host *sh = sd->host;
411 struct block_device *bd; 410 struct block_device *bd;
412 u32 dev_flags = 0; 411 u32 dev_flags = 0;
413 412
414 if (scsi_device_get(sd)) { 413 if (scsi_device_get(sd)) {
415 pr_err("scsi_device_get() failed for %d:%d:%d:%d\n", 414 pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
416 sh->host_no, sd->channel, sd->id, sd->lun); 415 sh->host_no, sd->channel, sd->id, sd->lun);
417 spin_unlock_irq(sh->host_lock); 416 spin_unlock_irq(sh->host_lock);
418 return NULL; 417 return NULL;
419 } 418 }
420 spin_unlock_irq(sh->host_lock); 419 spin_unlock_irq(sh->host_lock);
421 /* 420 /*
422 * Claim exclusive struct block_device access to struct scsi_device 421 * Claim exclusive struct block_device access to struct scsi_device
423 * for TYPE_DISK using supplied udev_path 422 * for TYPE_DISK using supplied udev_path
424 */ 423 */
425 bd = blkdev_get_by_path(se_dev->se_dev_udev_path, 424 bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
426 FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); 425 FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
427 if (IS_ERR(bd)) { 426 if (IS_ERR(bd)) {
428 pr_err("pSCSI: blkdev_get_by_path() failed\n"); 427 pr_err("pSCSI: blkdev_get_by_path() failed\n");
429 scsi_device_put(sd); 428 scsi_device_put(sd);
430 return NULL; 429 return NULL;
431 } 430 }
432 pdv->pdv_bd = bd; 431 pdv->pdv_bd = bd;
433 432
434 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); 433 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
435 if (!dev) { 434 if (!dev) {
436 blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 435 blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
437 scsi_device_put(sd); 436 scsi_device_put(sd);
438 return NULL; 437 return NULL;
439 } 438 }
440 pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n", 439 pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
441 phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun); 440 phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
442 441
443 return dev; 442 return dev;
444 } 443 }
445 444
446 /* 445 /*
447 * Called with struct Scsi_Host->host_lock called. 446 * Called with struct Scsi_Host->host_lock called.
448 */ 447 */
449 static struct se_device *pscsi_create_type_rom( 448 static struct se_device *pscsi_create_type_rom(
450 struct scsi_device *sd, 449 struct scsi_device *sd,
451 struct pscsi_dev_virt *pdv, 450 struct pscsi_dev_virt *pdv,
452 struct se_subsystem_dev *se_dev, 451 struct se_subsystem_dev *se_dev,
453 struct se_hba *hba) 452 struct se_hba *hba)
454 __releases(sh->host_lock) 453 __releases(sh->host_lock)
455 { 454 {
456 struct se_device *dev; 455 struct se_device *dev;
457 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; 456 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
458 struct Scsi_Host *sh = sd->host; 457 struct Scsi_Host *sh = sd->host;
459 u32 dev_flags = 0; 458 u32 dev_flags = 0;
460 459
461 if (scsi_device_get(sd)) { 460 if (scsi_device_get(sd)) {
462 pr_err("scsi_device_get() failed for %d:%d:%d:%d\n", 461 pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
463 sh->host_no, sd->channel, sd->id, sd->lun); 462 sh->host_no, sd->channel, sd->id, sd->lun);
464 spin_unlock_irq(sh->host_lock); 463 spin_unlock_irq(sh->host_lock);
465 return NULL; 464 return NULL;
466 } 465 }
467 spin_unlock_irq(sh->host_lock); 466 spin_unlock_irq(sh->host_lock);
468 467
469 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); 468 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
470 if (!dev) { 469 if (!dev) {
471 scsi_device_put(sd); 470 scsi_device_put(sd);
472 return NULL; 471 return NULL;
473 } 472 }
474 pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", 473 pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
475 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, 474 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
476 sd->channel, sd->id, sd->lun); 475 sd->channel, sd->id, sd->lun);
477 476
478 return dev; 477 return dev;
479 } 478 }
480 479
481 /* 480 /*
482 *Called with struct Scsi_Host->host_lock called. 481 *Called with struct Scsi_Host->host_lock called.
483 */ 482 */
484 static struct se_device *pscsi_create_type_other( 483 static struct se_device *pscsi_create_type_other(
485 struct scsi_device *sd, 484 struct scsi_device *sd,
486 struct pscsi_dev_virt *pdv, 485 struct pscsi_dev_virt *pdv,
487 struct se_subsystem_dev *se_dev, 486 struct se_subsystem_dev *se_dev,
488 struct se_hba *hba) 487 struct se_hba *hba)
489 __releases(sh->host_lock) 488 __releases(sh->host_lock)
490 { 489 {
491 struct se_device *dev; 490 struct se_device *dev;
492 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; 491 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
493 struct Scsi_Host *sh = sd->host; 492 struct Scsi_Host *sh = sd->host;
494 u32 dev_flags = 0; 493 u32 dev_flags = 0;
495 494
496 spin_unlock_irq(sh->host_lock); 495 spin_unlock_irq(sh->host_lock);
497 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); 496 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
498 if (!dev) 497 if (!dev)
499 return NULL; 498 return NULL;
500 499
501 pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", 500 pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
502 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, 501 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
503 sd->channel, sd->id, sd->lun); 502 sd->channel, sd->id, sd->lun);
504 503
505 return dev; 504 return dev;
506 } 505 }
507 506
508 static struct se_device *pscsi_create_virtdevice( 507 static struct se_device *pscsi_create_virtdevice(
509 struct se_hba *hba, 508 struct se_hba *hba,
510 struct se_subsystem_dev *se_dev, 509 struct se_subsystem_dev *se_dev,
511 void *p) 510 void *p)
512 { 511 {
513 struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p; 512 struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p;
514 struct se_device *dev; 513 struct se_device *dev;
515 struct scsi_device *sd; 514 struct scsi_device *sd;
516 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; 515 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
517 struct Scsi_Host *sh = phv->phv_lld_host; 516 struct Scsi_Host *sh = phv->phv_lld_host;
518 int legacy_mode_enable = 0; 517 int legacy_mode_enable = 0;
519 518
520 if (!pdv) { 519 if (!pdv) {
521 pr_err("Unable to locate struct pscsi_dev_virt" 520 pr_err("Unable to locate struct pscsi_dev_virt"
522 " parameter\n"); 521 " parameter\n");
523 return ERR_PTR(-EINVAL); 522 return ERR_PTR(-EINVAL);
524 } 523 }
525 /* 524 /*
526 * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the 525 * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
527 * struct Scsi_Host we will need to bring the TCM/pSCSI object online 526 * struct Scsi_Host we will need to bring the TCM/pSCSI object online
528 */ 527 */
529 if (!sh) { 528 if (!sh) {
530 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { 529 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
531 pr_err("pSCSI: Unable to locate struct" 530 pr_err("pSCSI: Unable to locate struct"
532 " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); 531 " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
533 return ERR_PTR(-ENODEV); 532 return ERR_PTR(-ENODEV);
534 } 533 }
535 /* 534 /*
536 * For the newer PHV_VIRUTAL_HOST_ID struct scsi_device 535 * For the newer PHV_VIRUTAL_HOST_ID struct scsi_device
537 * reference, we enforce that udev_path has been set 536 * reference, we enforce that udev_path has been set
538 */ 537 */
539 if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { 538 if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
540 pr_err("pSCSI: udev_path attribute has not" 539 pr_err("pSCSI: udev_path attribute has not"
541 " been set before ENABLE=1\n"); 540 " been set before ENABLE=1\n");
542 return ERR_PTR(-EINVAL); 541 return ERR_PTR(-EINVAL);
543 } 542 }
544 /* 543 /*
545 * If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID, 544 * If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID,
546 * use the original TCM hba ID to reference Linux/SCSI Host No 545 * use the original TCM hba ID to reference Linux/SCSI Host No
547 * and enable for PHV_LLD_SCSI_HOST_NO mode. 546 * and enable for PHV_LLD_SCSI_HOST_NO mode.
548 */ 547 */
549 if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) { 548 if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
550 spin_lock(&hba->device_lock); 549 spin_lock(&hba->device_lock);
551 if (!list_empty(&hba->hba_dev_list)) { 550 if (!list_empty(&hba->hba_dev_list)) {
552 pr_err("pSCSI: Unable to set hba_mode" 551 pr_err("pSCSI: Unable to set hba_mode"
553 " with active devices\n"); 552 " with active devices\n");
554 spin_unlock(&hba->device_lock); 553 spin_unlock(&hba->device_lock);
555 return ERR_PTR(-EEXIST); 554 return ERR_PTR(-EEXIST);
556 } 555 }
557 spin_unlock(&hba->device_lock); 556 spin_unlock(&hba->device_lock);
558 557
559 if (pscsi_pmode_enable_hba(hba, 1) != 1) 558 if (pscsi_pmode_enable_hba(hba, 1) != 1)
560 return ERR_PTR(-ENODEV); 559 return ERR_PTR(-ENODEV);
561 560
562 legacy_mode_enable = 1; 561 legacy_mode_enable = 1;
563 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; 562 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
564 sh = phv->phv_lld_host; 563 sh = phv->phv_lld_host;
565 } else { 564 } else {
566 sh = scsi_host_lookup(pdv->pdv_host_id); 565 sh = scsi_host_lookup(pdv->pdv_host_id);
567 if (IS_ERR(sh)) { 566 if (IS_ERR(sh)) {
568 pr_err("pSCSI: Unable to locate" 567 pr_err("pSCSI: Unable to locate"
569 " pdv_host_id: %d\n", pdv->pdv_host_id); 568 " pdv_host_id: %d\n", pdv->pdv_host_id);
570 return ERR_CAST(sh); 569 return ERR_CAST(sh);
571 } 570 }
572 } 571 }
573 } else { 572 } else {
574 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) { 573 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) {
575 pr_err("pSCSI: PHV_VIRUTAL_HOST_ID set while" 574 pr_err("pSCSI: PHV_VIRUTAL_HOST_ID set while"
576 " struct Scsi_Host exists\n"); 575 " struct Scsi_Host exists\n");
577 return ERR_PTR(-EEXIST); 576 return ERR_PTR(-EEXIST);
578 } 577 }
579 } 578 }
580 579
581 spin_lock_irq(sh->host_lock); 580 spin_lock_irq(sh->host_lock);
582 list_for_each_entry(sd, &sh->__devices, siblings) { 581 list_for_each_entry(sd, &sh->__devices, siblings) {
583 if ((pdv->pdv_channel_id != sd->channel) || 582 if ((pdv->pdv_channel_id != sd->channel) ||
584 (pdv->pdv_target_id != sd->id) || 583 (pdv->pdv_target_id != sd->id) ||
585 (pdv->pdv_lun_id != sd->lun)) 584 (pdv->pdv_lun_id != sd->lun))
586 continue; 585 continue;
587 /* 586 /*
588 * Functions will release the held struct scsi_host->host_lock 587 * Functions will release the held struct scsi_host->host_lock
589 * before calling calling pscsi_add_device_to_list() to register 588 * before calling calling pscsi_add_device_to_list() to register
590 * struct scsi_device with target_core_mod. 589 * struct scsi_device with target_core_mod.
591 */ 590 */
592 switch (sd->type) { 591 switch (sd->type) {
593 case TYPE_DISK: 592 case TYPE_DISK:
594 dev = pscsi_create_type_disk(sd, pdv, se_dev, hba); 593 dev = pscsi_create_type_disk(sd, pdv, se_dev, hba);
595 break; 594 break;
596 case TYPE_ROM: 595 case TYPE_ROM:
597 dev = pscsi_create_type_rom(sd, pdv, se_dev, hba); 596 dev = pscsi_create_type_rom(sd, pdv, se_dev, hba);
598 break; 597 break;
599 default: 598 default:
600 dev = pscsi_create_type_other(sd, pdv, se_dev, hba); 599 dev = pscsi_create_type_other(sd, pdv, se_dev, hba);
601 break; 600 break;
602 } 601 }
603 602
604 if (!dev) { 603 if (!dev) {
605 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) 604 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
606 scsi_host_put(sh); 605 scsi_host_put(sh);
607 else if (legacy_mode_enable) { 606 else if (legacy_mode_enable) {
608 pscsi_pmode_enable_hba(hba, 0); 607 pscsi_pmode_enable_hba(hba, 0);
609 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; 608 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
610 } 609 }
611 pdv->pdv_sd = NULL; 610 pdv->pdv_sd = NULL;
612 return ERR_PTR(-ENODEV); 611 return ERR_PTR(-ENODEV);
613 } 612 }
614 return dev; 613 return dev;
615 } 614 }
616 spin_unlock_irq(sh->host_lock); 615 spin_unlock_irq(sh->host_lock);
617 616
618 pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, 617 pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
619 pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id); 618 pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id);
620 619
621 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) 620 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
622 scsi_host_put(sh); 621 scsi_host_put(sh);
623 else if (legacy_mode_enable) { 622 else if (legacy_mode_enable) {
624 pscsi_pmode_enable_hba(hba, 0); 623 pscsi_pmode_enable_hba(hba, 0);
625 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; 624 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
626 } 625 }
627 626
628 return ERR_PTR(-ENODEV); 627 return ERR_PTR(-ENODEV);
629 } 628 }
630 629
631 /* pscsi_free_device(): (Part of se_subsystem_api_t template) 630 /* pscsi_free_device(): (Part of se_subsystem_api_t template)
632 * 631 *
633 * 632 *
634 */ 633 */
635 static void pscsi_free_device(void *p) 634 static void pscsi_free_device(void *p)
636 { 635 {
637 struct pscsi_dev_virt *pdv = p; 636 struct pscsi_dev_virt *pdv = p;
638 struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; 637 struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
639 struct scsi_device *sd = pdv->pdv_sd; 638 struct scsi_device *sd = pdv->pdv_sd;
640 639
641 if (sd) { 640 if (sd) {
642 /* 641 /*
643 * Release exclusive pSCSI internal struct block_device claim for 642 * Release exclusive pSCSI internal struct block_device claim for
644 * struct scsi_device with TYPE_DISK from pscsi_create_type_disk() 643 * struct scsi_device with TYPE_DISK from pscsi_create_type_disk()
645 */ 644 */
646 if ((sd->type == TYPE_DISK) && pdv->pdv_bd) { 645 if ((sd->type == TYPE_DISK) && pdv->pdv_bd) {
647 blkdev_put(pdv->pdv_bd, 646 blkdev_put(pdv->pdv_bd,
648 FMODE_WRITE|FMODE_READ|FMODE_EXCL); 647 FMODE_WRITE|FMODE_READ|FMODE_EXCL);
649 pdv->pdv_bd = NULL; 648 pdv->pdv_bd = NULL;
650 } 649 }
651 /* 650 /*
652 * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference 651 * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference
653 * to struct Scsi_Host now. 652 * to struct Scsi_Host now.
654 */ 653 */
655 if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) && 654 if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
656 (phv->phv_lld_host != NULL)) 655 (phv->phv_lld_host != NULL))
657 scsi_host_put(phv->phv_lld_host); 656 scsi_host_put(phv->phv_lld_host);
658 657
659 if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) 658 if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
660 scsi_device_put(sd); 659 scsi_device_put(sd);
661 660
662 pdv->pdv_sd = NULL; 661 pdv->pdv_sd = NULL;
663 } 662 }
664 663
665 kfree(pdv); 664 kfree(pdv);
666 } 665 }
667 666
668 static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task) 667 static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task)
669 { 668 {
670 return container_of(task, struct pscsi_plugin_task, pscsi_task); 669 return container_of(task, struct pscsi_plugin_task, pscsi_task);
671 } 670 }
672 671
673 672
674 /* pscsi_transport_complete(): 673 /* pscsi_transport_complete():
675 * 674 *
676 * 675 *
677 */ 676 */
678 static int pscsi_transport_complete(struct se_task *task) 677 static int pscsi_transport_complete(struct se_task *task)
679 { 678 {
680 struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; 679 struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
681 struct scsi_device *sd = pdv->pdv_sd; 680 struct scsi_device *sd = pdv->pdv_sd;
682 int result; 681 int result;
683 struct pscsi_plugin_task *pt = PSCSI_TASK(task); 682 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
684 unsigned char *cdb = &pt->pscsi_cdb[0]; 683 unsigned char *cdb = &pt->pscsi_cdb[0];
685 684
686 result = pt->pscsi_result; 685 result = pt->pscsi_result;
687 /* 686 /*
688 * Hack to make sure that Write-Protect modepage is set if R/O mode is 687 * Hack to make sure that Write-Protect modepage is set if R/O mode is
689 * forced. 688 * forced.
690 */ 689 */
691 if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && 690 if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
692 (status_byte(result) << 1) == SAM_STAT_GOOD) { 691 (status_byte(result) << 1) == SAM_STAT_GOOD) {
693 if (!task->task_se_cmd->se_deve) 692 if (!task->task_se_cmd->se_deve)
694 goto after_mode_sense; 693 goto after_mode_sense;
695 694
696 if (task->task_se_cmd->se_deve->lun_flags & 695 if (task->task_se_cmd->se_deve->lun_flags &
697 TRANSPORT_LUNFLAGS_READ_ONLY) { 696 TRANSPORT_LUNFLAGS_READ_ONLY) {
698 unsigned char *buf = transport_kmap_first_data_page(task->task_se_cmd); 697 unsigned char *buf = transport_kmap_first_data_page(task->task_se_cmd);
699 698
700 if (cdb[0] == MODE_SENSE_10) { 699 if (cdb[0] == MODE_SENSE_10) {
701 if (!(buf[3] & 0x80)) 700 if (!(buf[3] & 0x80))
702 buf[3] |= 0x80; 701 buf[3] |= 0x80;
703 } else { 702 } else {
704 if (!(buf[2] & 0x80)) 703 if (!(buf[2] & 0x80))
705 buf[2] |= 0x80; 704 buf[2] |= 0x80;
706 } 705 }
707 706
708 transport_kunmap_first_data_page(task->task_se_cmd); 707 transport_kunmap_first_data_page(task->task_se_cmd);
709 } 708 }
710 } 709 }
711 after_mode_sense: 710 after_mode_sense:
712 711
713 if (sd->type != TYPE_TAPE) 712 if (sd->type != TYPE_TAPE)
714 goto after_mode_select; 713 goto after_mode_select;
715 714
716 /* 715 /*
717 * Hack to correctly obtain the initiator requested blocksize for 716 * Hack to correctly obtain the initiator requested blocksize for
718 * TYPE_TAPE. Since this value is dependent upon each tape media, 717 * TYPE_TAPE. Since this value is dependent upon each tape media,
719 * struct scsi_device->sector_size will not contain the correct value 718 * struct scsi_device->sector_size will not contain the correct value
720 * by default, so we go ahead and set it so 719 * by default, so we go ahead and set it so
721 * TRANSPORT(dev)->get_blockdev() returns the correct value to the 720 * TRANSPORT(dev)->get_blockdev() returns the correct value to the
722 * storage engine. 721 * storage engine.
723 */ 722 */
724 if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) && 723 if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
725 (status_byte(result) << 1) == SAM_STAT_GOOD) { 724 (status_byte(result) << 1) == SAM_STAT_GOOD) {
726 unsigned char *buf; 725 unsigned char *buf;
727 struct scatterlist *sg = task->task_sg; 726 struct scatterlist *sg = task->task_sg;
728 u16 bdl; 727 u16 bdl;
729 u32 blocksize; 728 u32 blocksize;
730 729
731 buf = sg_virt(&sg[0]); 730 buf = sg_virt(&sg[0]);
732 if (!buf) { 731 if (!buf) {
733 pr_err("Unable to get buf for scatterlist\n"); 732 pr_err("Unable to get buf for scatterlist\n");
734 goto after_mode_select; 733 goto after_mode_select;
735 } 734 }
736 735
737 if (cdb[0] == MODE_SELECT) 736 if (cdb[0] == MODE_SELECT)
738 bdl = (buf[3]); 737 bdl = (buf[3]);
739 else 738 else
740 bdl = (buf[6] << 8) | (buf[7]); 739 bdl = (buf[6] << 8) | (buf[7]);
741 740
742 if (!bdl) 741 if (!bdl)
743 goto after_mode_select; 742 goto after_mode_select;
744 743
745 if (cdb[0] == MODE_SELECT) 744 if (cdb[0] == MODE_SELECT)
746 blocksize = (buf[9] << 16) | (buf[10] << 8) | 745 blocksize = (buf[9] << 16) | (buf[10] << 8) |
747 (buf[11]); 746 (buf[11]);
748 else 747 else
749 blocksize = (buf[13] << 16) | (buf[14] << 8) | 748 blocksize = (buf[13] << 16) | (buf[14] << 8) |
750 (buf[15]); 749 (buf[15]);
751 750
752 sd->sector_size = blocksize; 751 sd->sector_size = blocksize;
753 } 752 }
754 after_mode_select: 753 after_mode_select:
755 754
756 if (status_byte(result) & CHECK_CONDITION) 755 if (status_byte(result) & CHECK_CONDITION)
757 return 1; 756 return 1;
758 757
759 return 0; 758 return 0;
760 } 759 }
761 760
762 static struct se_task * 761 static struct se_task *
763 pscsi_alloc_task(unsigned char *cdb) 762 pscsi_alloc_task(unsigned char *cdb)
764 { 763 {
765 struct pscsi_plugin_task *pt; 764 struct pscsi_plugin_task *pt;
766 765
767 /* 766 /*
768 * Dynamically alloc cdb space, since it may be larger than 767 * Dynamically alloc cdb space, since it may be larger than
769 * TCM_MAX_COMMAND_SIZE 768 * TCM_MAX_COMMAND_SIZE
770 */ 769 */
771 pt = kzalloc(sizeof(*pt) + scsi_command_size(cdb), GFP_KERNEL); 770 pt = kzalloc(sizeof(*pt) + scsi_command_size(cdb), GFP_KERNEL);
772 if (!pt) { 771 if (!pt) {
773 pr_err("Unable to allocate struct pscsi_plugin_task\n"); 772 pr_err("Unable to allocate struct pscsi_plugin_task\n");
774 return NULL; 773 return NULL;
775 } 774 }
776 775
777 return &pt->pscsi_task; 776 return &pt->pscsi_task;
778 } 777 }
779 778
780 static void pscsi_free_task(struct se_task *task) 779 static void pscsi_free_task(struct se_task *task)
781 { 780 {
782 struct pscsi_plugin_task *pt = PSCSI_TASK(task); 781 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
783 782
784 /* 783 /*
785 * We do not release the bio(s) here associated with this task, as 784 * We do not release the bio(s) here associated with this task, as
786 * this is handled by bio_put() and pscsi_bi_endio(). 785 * this is handled by bio_put() and pscsi_bi_endio().
787 */ 786 */
788 kfree(pt); 787 kfree(pt);
789 } 788 }
790 789
791 enum { 790 enum {
792 Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id, 791 Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id,
793 Opt_scsi_lun_id, Opt_err 792 Opt_scsi_lun_id, Opt_err
794 }; 793 };
795 794
796 static match_table_t tokens = { 795 static match_table_t tokens = {
797 {Opt_scsi_host_id, "scsi_host_id=%d"}, 796 {Opt_scsi_host_id, "scsi_host_id=%d"},
798 {Opt_scsi_channel_id, "scsi_channel_id=%d"}, 797 {Opt_scsi_channel_id, "scsi_channel_id=%d"},
799 {Opt_scsi_target_id, "scsi_target_id=%d"}, 798 {Opt_scsi_target_id, "scsi_target_id=%d"},
800 {Opt_scsi_lun_id, "scsi_lun_id=%d"}, 799 {Opt_scsi_lun_id, "scsi_lun_id=%d"},
801 {Opt_err, NULL} 800 {Opt_err, NULL}
802 }; 801 };
803 802
804 static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, 803 static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
805 struct se_subsystem_dev *se_dev, 804 struct se_subsystem_dev *se_dev,
806 const char *page, 805 const char *page,
807 ssize_t count) 806 ssize_t count)
808 { 807 {
809 struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; 808 struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
810 struct pscsi_hba_virt *phv = hba->hba_ptr; 809 struct pscsi_hba_virt *phv = hba->hba_ptr;
811 char *orig, *ptr, *opts; 810 char *orig, *ptr, *opts;
812 substring_t args[MAX_OPT_ARGS]; 811 substring_t args[MAX_OPT_ARGS];
813 int ret = 0, arg, token; 812 int ret = 0, arg, token;
814 813
815 opts = kstrdup(page, GFP_KERNEL); 814 opts = kstrdup(page, GFP_KERNEL);
816 if (!opts) 815 if (!opts)
817 return -ENOMEM; 816 return -ENOMEM;
818 817
819 orig = opts; 818 orig = opts;
820 819
821 while ((ptr = strsep(&opts, ",")) != NULL) { 820 while ((ptr = strsep(&opts, ",")) != NULL) {
822 if (!*ptr) 821 if (!*ptr)
823 continue; 822 continue;
824 823
825 token = match_token(ptr, tokens, args); 824 token = match_token(ptr, tokens, args);
826 switch (token) { 825 switch (token) {
827 case Opt_scsi_host_id: 826 case Opt_scsi_host_id:
828 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { 827 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
829 pr_err("PSCSI[%d]: Unable to accept" 828 pr_err("PSCSI[%d]: Unable to accept"
830 " scsi_host_id while phv_mode ==" 829 " scsi_host_id while phv_mode =="
831 " PHV_LLD_SCSI_HOST_NO\n", 830 " PHV_LLD_SCSI_HOST_NO\n",
832 phv->phv_host_id); 831 phv->phv_host_id);
833 ret = -EINVAL; 832 ret = -EINVAL;
834 goto out; 833 goto out;
835 } 834 }
836 match_int(args, &arg); 835 match_int(args, &arg);
837 pdv->pdv_host_id = arg; 836 pdv->pdv_host_id = arg;
838 pr_debug("PSCSI[%d]: Referencing SCSI Host ID:" 837 pr_debug("PSCSI[%d]: Referencing SCSI Host ID:"
839 " %d\n", phv->phv_host_id, pdv->pdv_host_id); 838 " %d\n", phv->phv_host_id, pdv->pdv_host_id);
840 pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID; 839 pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
841 break; 840 break;
842 case Opt_scsi_channel_id: 841 case Opt_scsi_channel_id:
843 match_int(args, &arg); 842 match_int(args, &arg);
844 pdv->pdv_channel_id = arg; 843 pdv->pdv_channel_id = arg;
845 pr_debug("PSCSI[%d]: Referencing SCSI Channel" 844 pr_debug("PSCSI[%d]: Referencing SCSI Channel"
846 " ID: %d\n", phv->phv_host_id, 845 " ID: %d\n", phv->phv_host_id,
847 pdv->pdv_channel_id); 846 pdv->pdv_channel_id);
848 pdv->pdv_flags |= PDF_HAS_CHANNEL_ID; 847 pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
849 break; 848 break;
850 case Opt_scsi_target_id: 849 case Opt_scsi_target_id:
851 match_int(args, &arg); 850 match_int(args, &arg);
852 pdv->pdv_target_id = arg; 851 pdv->pdv_target_id = arg;
853 pr_debug("PSCSI[%d]: Referencing SCSI Target" 852 pr_debug("PSCSI[%d]: Referencing SCSI Target"
854 " ID: %d\n", phv->phv_host_id, 853 " ID: %d\n", phv->phv_host_id,
855 pdv->pdv_target_id); 854 pdv->pdv_target_id);
856 pdv->pdv_flags |= PDF_HAS_TARGET_ID; 855 pdv->pdv_flags |= PDF_HAS_TARGET_ID;
857 break; 856 break;
858 case Opt_scsi_lun_id: 857 case Opt_scsi_lun_id:
859 match_int(args, &arg); 858 match_int(args, &arg);
860 pdv->pdv_lun_id = arg; 859 pdv->pdv_lun_id = arg;
861 pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:" 860 pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:"
862 " %d\n", phv->phv_host_id, pdv->pdv_lun_id); 861 " %d\n", phv->phv_host_id, pdv->pdv_lun_id);
863 pdv->pdv_flags |= PDF_HAS_LUN_ID; 862 pdv->pdv_flags |= PDF_HAS_LUN_ID;
864 break; 863 break;
865 default: 864 default:
866 break; 865 break;
867 } 866 }
868 } 867 }
869 868
870 out: 869 out:
871 kfree(orig); 870 kfree(orig);
872 return (!ret) ? count : ret; 871 return (!ret) ? count : ret;
873 } 872 }
874 873
875 static ssize_t pscsi_check_configfs_dev_params( 874 static ssize_t pscsi_check_configfs_dev_params(
876 struct se_hba *hba, 875 struct se_hba *hba,
877 struct se_subsystem_dev *se_dev) 876 struct se_subsystem_dev *se_dev)
878 { 877 {
879 struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; 878 struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
880 879
881 if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) || 880 if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
882 !(pdv->pdv_flags & PDF_HAS_TARGET_ID) || 881 !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
883 !(pdv->pdv_flags & PDF_HAS_LUN_ID)) { 882 !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
884 pr_err("Missing scsi_channel_id=, scsi_target_id= and" 883 pr_err("Missing scsi_channel_id=, scsi_target_id= and"
885 " scsi_lun_id= parameters\n"); 884 " scsi_lun_id= parameters\n");
886 return -EINVAL; 885 return -EINVAL;
887 } 886 }
888 887
889 return 0; 888 return 0;
890 } 889 }
891 890
892 static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba, 891 static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba,
893 struct se_subsystem_dev *se_dev, 892 struct se_subsystem_dev *se_dev,
894 char *b) 893 char *b)
895 { 894 {
896 struct pscsi_hba_virt *phv = hba->hba_ptr; 895 struct pscsi_hba_virt *phv = hba->hba_ptr;
897 struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; 896 struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
898 struct scsi_device *sd = pdv->pdv_sd; 897 struct scsi_device *sd = pdv->pdv_sd;
899 unsigned char host_id[16]; 898 unsigned char host_id[16];
900 ssize_t bl; 899 ssize_t bl;
901 int i; 900 int i;
902 901
903 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) 902 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
904 snprintf(host_id, 16, "%d", pdv->pdv_host_id); 903 snprintf(host_id, 16, "%d", pdv->pdv_host_id);
905 else 904 else
906 snprintf(host_id, 16, "PHBA Mode"); 905 snprintf(host_id, 16, "PHBA Mode");
907 906
908 bl = sprintf(b, "SCSI Device Bus Location:" 907 bl = sprintf(b, "SCSI Device Bus Location:"
909 " Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n", 908 " Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n",
910 pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id, 909 pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id,
911 host_id); 910 host_id);
912 911
913 if (sd) { 912 if (sd) {
914 bl += sprintf(b + bl, " "); 913 bl += sprintf(b + bl, " ");
915 bl += sprintf(b + bl, "Vendor: "); 914 bl += sprintf(b + bl, "Vendor: ");
916 for (i = 0; i < 8; i++) { 915 for (i = 0; i < 8; i++) {
917 if (ISPRINT(sd->vendor[i])) /* printable character? */ 916 if (ISPRINT(sd->vendor[i])) /* printable character? */
918 bl += sprintf(b + bl, "%c", sd->vendor[i]); 917 bl += sprintf(b + bl, "%c", sd->vendor[i]);
919 else 918 else
920 bl += sprintf(b + bl, " "); 919 bl += sprintf(b + bl, " ");
921 } 920 }
922 bl += sprintf(b + bl, " Model: "); 921 bl += sprintf(b + bl, " Model: ");
923 for (i = 0; i < 16; i++) { 922 for (i = 0; i < 16; i++) {
924 if (ISPRINT(sd->model[i])) /* printable character ? */ 923 if (ISPRINT(sd->model[i])) /* printable character ? */
925 bl += sprintf(b + bl, "%c", sd->model[i]); 924 bl += sprintf(b + bl, "%c", sd->model[i]);
926 else 925 else
927 bl += sprintf(b + bl, " "); 926 bl += sprintf(b + bl, " ");
928 } 927 }
929 bl += sprintf(b + bl, " Rev: "); 928 bl += sprintf(b + bl, " Rev: ");
930 for (i = 0; i < 4; i++) { 929 for (i = 0; i < 4; i++) {
931 if (ISPRINT(sd->rev[i])) /* printable character ? */ 930 if (ISPRINT(sd->rev[i])) /* printable character ? */
932 bl += sprintf(b + bl, "%c", sd->rev[i]); 931 bl += sprintf(b + bl, "%c", sd->rev[i]);
933 else 932 else
934 bl += sprintf(b + bl, " "); 933 bl += sprintf(b + bl, " ");
935 } 934 }
936 bl += sprintf(b + bl, "\n"); 935 bl += sprintf(b + bl, "\n");
937 } 936 }
938 return bl; 937 return bl;
939 } 938 }
940 939
941 static void pscsi_bi_endio(struct bio *bio, int error) 940 static void pscsi_bi_endio(struct bio *bio, int error)
942 { 941 {
943 bio_put(bio); 942 bio_put(bio);
944 } 943 }
945 944
946 static inline struct bio *pscsi_get_bio(int sg_num) 945 static inline struct bio *pscsi_get_bio(int sg_num)
947 { 946 {
948 struct bio *bio; 947 struct bio *bio;
949 /* 948 /*
950 * Use bio_malloc() following the comment in for bio -> struct request 949 * Use bio_malloc() following the comment in for bio -> struct request
951 * in block/blk-core.c:blk_make_request() 950 * in block/blk-core.c:blk_make_request()
952 */ 951 */
953 bio = bio_kmalloc(GFP_KERNEL, sg_num); 952 bio = bio_kmalloc(GFP_KERNEL, sg_num);
954 if (!bio) { 953 if (!bio) {
955 pr_err("PSCSI: bio_kmalloc() failed\n"); 954 pr_err("PSCSI: bio_kmalloc() failed\n");
956 return NULL; 955 return NULL;
957 } 956 }
958 bio->bi_end_io = pscsi_bi_endio; 957 bio->bi_end_io = pscsi_bi_endio;
959 958
960 return bio; 959 return bio;
961 } 960 }
962 961
963 static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, 962 static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
964 struct bio **hbio) 963 struct bio **hbio)
965 { 964 {
966 struct se_cmd *cmd = task->task_se_cmd; 965 struct se_cmd *cmd = task->task_se_cmd;
967 struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; 966 struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
968 u32 task_sg_num = task->task_sg_nents; 967 u32 task_sg_num = task->task_sg_nents;
969 struct bio *bio = NULL, *tbio = NULL; 968 struct bio *bio = NULL, *tbio = NULL;
970 struct page *page; 969 struct page *page;
971 struct scatterlist *sg; 970 struct scatterlist *sg;
972 u32 data_len = task->task_size, i, len, bytes, off; 971 u32 data_len = task->task_size, i, len, bytes, off;
973 int nr_pages = (task->task_size + task_sg[0].offset + 972 int nr_pages = (task->task_size + task_sg[0].offset +
974 PAGE_SIZE - 1) >> PAGE_SHIFT; 973 PAGE_SIZE - 1) >> PAGE_SHIFT;
975 int nr_vecs = 0, rc; 974 int nr_vecs = 0, rc;
976 int rw = (task->task_data_direction == DMA_TO_DEVICE); 975 int rw = (task->task_data_direction == DMA_TO_DEVICE);
977 976
978 *hbio = NULL; 977 *hbio = NULL;
979 978
980 pr_debug("PSCSI: nr_pages: %d\n", nr_pages); 979 pr_debug("PSCSI: nr_pages: %d\n", nr_pages);
981 980
982 for_each_sg(task_sg, sg, task_sg_num, i) { 981 for_each_sg(task_sg, sg, task_sg_num, i) {
983 page = sg_page(sg); 982 page = sg_page(sg);
984 off = sg->offset; 983 off = sg->offset;
985 len = sg->length; 984 len = sg->length;
986 985
987 pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i, 986 pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i,
988 page, len, off); 987 page, len, off);
989 988
990 while (len > 0 && data_len > 0) { 989 while (len > 0 && data_len > 0) {
991 bytes = min_t(unsigned int, len, PAGE_SIZE - off); 990 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
992 bytes = min(bytes, data_len); 991 bytes = min(bytes, data_len);
993 992
994 if (!bio) { 993 if (!bio) {
995 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); 994 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
996 nr_pages -= nr_vecs; 995 nr_pages -= nr_vecs;
997 /* 996 /*
998 * Calls bio_kmalloc() and sets bio->bi_end_io() 997 * Calls bio_kmalloc() and sets bio->bi_end_io()
999 */ 998 */
1000 bio = pscsi_get_bio(nr_vecs); 999 bio = pscsi_get_bio(nr_vecs);
1001 if (!bio) 1000 if (!bio)
1002 goto fail; 1001 goto fail;
1003 1002
1004 if (rw) 1003 if (rw)
1005 bio->bi_rw |= REQ_WRITE; 1004 bio->bi_rw |= REQ_WRITE;
1006 1005
1007 pr_debug("PSCSI: Allocated bio: %p," 1006 pr_debug("PSCSI: Allocated bio: %p,"
1008 " dir: %s nr_vecs: %d\n", bio, 1007 " dir: %s nr_vecs: %d\n", bio,
1009 (rw) ? "rw" : "r", nr_vecs); 1008 (rw) ? "rw" : "r", nr_vecs);
1010 /* 1009 /*
1011 * Set *hbio pointer to handle the case: 1010 * Set *hbio pointer to handle the case:
1012 * nr_pages > BIO_MAX_PAGES, where additional 1011 * nr_pages > BIO_MAX_PAGES, where additional
1013 * bios need to be added to complete a given 1012 * bios need to be added to complete a given
1014 * struct se_task 1013 * struct se_task
1015 */ 1014 */
1016 if (!*hbio) 1015 if (!*hbio)
1017 *hbio = tbio = bio; 1016 *hbio = tbio = bio;
1018 else 1017 else
1019 tbio = tbio->bi_next = bio; 1018 tbio = tbio->bi_next = bio;
1020 } 1019 }
1021 1020
1022 pr_debug("PSCSI: Calling bio_add_pc_page() i: %d" 1021 pr_debug("PSCSI: Calling bio_add_pc_page() i: %d"
1023 " bio: %p page: %p len: %d off: %d\n", i, bio, 1022 " bio: %p page: %p len: %d off: %d\n", i, bio,
1024 page, len, off); 1023 page, len, off);
1025 1024
1026 rc = bio_add_pc_page(pdv->pdv_sd->request_queue, 1025 rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
1027 bio, page, bytes, off); 1026 bio, page, bytes, off);
1028 if (rc != bytes) 1027 if (rc != bytes)
1029 goto fail; 1028 goto fail;
1030 1029
1031 pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n", 1030 pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
1032 bio->bi_vcnt, nr_vecs); 1031 bio->bi_vcnt, nr_vecs);
1033 1032
1034 if (bio->bi_vcnt > nr_vecs) { 1033 if (bio->bi_vcnt > nr_vecs) {
1035 pr_debug("PSCSI: Reached bio->bi_vcnt max:" 1034 pr_debug("PSCSI: Reached bio->bi_vcnt max:"
1036 " %d i: %d bio: %p, allocating another" 1035 " %d i: %d bio: %p, allocating another"
1037 " bio\n", bio->bi_vcnt, i, bio); 1036 " bio\n", bio->bi_vcnt, i, bio);
1038 /* 1037 /*
1039 * Clear the pointer so that another bio will 1038 * Clear the pointer so that another bio will
1040 * be allocated with pscsi_get_bio() above, the 1039 * be allocated with pscsi_get_bio() above, the
1041 * current bio has already been set *tbio and 1040 * current bio has already been set *tbio and
1042 * bio->bi_next. 1041 * bio->bi_next.
1043 */ 1042 */
1044 bio = NULL; 1043 bio = NULL;
1045 } 1044 }
1046 1045
1047 page++; 1046 page++;
1048 len -= bytes; 1047 len -= bytes;
1049 data_len -= bytes; 1048 data_len -= bytes;
1050 off = 0; 1049 off = 0;
1051 } 1050 }
1052 } 1051 }
1053 1052
1054 return task->task_sg_nents; 1053 return task->task_sg_nents;
1055 fail: 1054 fail:
1056 while (*hbio) { 1055 while (*hbio) {
1057 bio = *hbio; 1056 bio = *hbio;
1058 *hbio = (*hbio)->bi_next; 1057 *hbio = (*hbio)->bi_next;
1059 bio->bi_next = NULL; 1058 bio->bi_next = NULL;
1060 bio_endio(bio, 0); /* XXX: should be error */ 1059 bio_endio(bio, 0); /* XXX: should be error */
1061 } 1060 }
1062 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1061 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1063 return -ENOMEM; 1062 return -ENOMEM;
1064 } 1063 }
1065 1064
1066 static int pscsi_do_task(struct se_task *task) 1065 static int pscsi_do_task(struct se_task *task)
1067 { 1066 {
1068 struct se_cmd *cmd = task->task_se_cmd; 1067 struct se_cmd *cmd = task->task_se_cmd;
1069 struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; 1068 struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
1070 struct pscsi_plugin_task *pt = PSCSI_TASK(task); 1069 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
1071 struct request *req; 1070 struct request *req;
1072 struct bio *hbio; 1071 struct bio *hbio;
1073 int ret; 1072 int ret;
1074 1073
1075 target_get_task_cdb(task, pt->pscsi_cdb); 1074 target_get_task_cdb(task, pt->pscsi_cdb);
1076 1075
1077 if (task->task_se_cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { 1076 if (task->task_se_cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
1078 req = blk_get_request(pdv->pdv_sd->request_queue, 1077 req = blk_get_request(pdv->pdv_sd->request_queue,
1079 (task->task_data_direction == DMA_TO_DEVICE), 1078 (task->task_data_direction == DMA_TO_DEVICE),
1080 GFP_KERNEL); 1079 GFP_KERNEL);
1081 if (!req || IS_ERR(req)) { 1080 if (!req || IS_ERR(req)) {
1082 pr_err("PSCSI: blk_get_request() failed: %ld\n", 1081 pr_err("PSCSI: blk_get_request() failed: %ld\n",
1083 req ? IS_ERR(req) : -ENOMEM); 1082 req ? IS_ERR(req) : -ENOMEM);
1084 cmd->scsi_sense_reason = 1083 cmd->scsi_sense_reason =
1085 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1084 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1086 return -ENODEV; 1085 return -ENODEV;
1087 } 1086 }
1088 } else { 1087 } else {
1089 BUG_ON(!task->task_size); 1088 BUG_ON(!task->task_size);
1090 1089
1091 /* 1090 /*
1092 * Setup the main struct request for the task->task_sg[] payload 1091 * Setup the main struct request for the task->task_sg[] payload
1093 */ 1092 */
1094 ret = pscsi_map_sg(task, task->task_sg, &hbio); 1093 ret = pscsi_map_sg(task, task->task_sg, &hbio);
1095 if (ret < 0) { 1094 if (ret < 0) {
1096 cmd->scsi_sense_reason = 1095 cmd->scsi_sense_reason =
1097 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1096 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1098 return ret; 1097 return ret;
1099 } 1098 }
1100 1099
1101 req = blk_make_request(pdv->pdv_sd->request_queue, hbio, 1100 req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
1102 GFP_KERNEL); 1101 GFP_KERNEL);
1103 if (IS_ERR(req)) { 1102 if (IS_ERR(req)) {
1104 pr_err("pSCSI: blk_make_request() failed\n"); 1103 pr_err("pSCSI: blk_make_request() failed\n");
1105 goto fail; 1104 goto fail;
1106 } 1105 }
1107 } 1106 }
1108 1107
1109 req->cmd_type = REQ_TYPE_BLOCK_PC; 1108 req->cmd_type = REQ_TYPE_BLOCK_PC;
1110 req->end_io = pscsi_req_done; 1109 req->end_io = pscsi_req_done;
1111 req->end_io_data = task; 1110 req->end_io_data = task;
1112 req->cmd_len = scsi_command_size(pt->pscsi_cdb); 1111 req->cmd_len = scsi_command_size(pt->pscsi_cdb);
1113 req->cmd = &pt->pscsi_cdb[0]; 1112 req->cmd = &pt->pscsi_cdb[0];
1114 req->sense = &pt->pscsi_sense[0]; 1113 req->sense = &pt->pscsi_sense[0];
1115 req->sense_len = 0; 1114 req->sense_len = 0;
1116 if (pdv->pdv_sd->type == TYPE_DISK) 1115 if (pdv->pdv_sd->type == TYPE_DISK)
1117 req->timeout = PS_TIMEOUT_DISK; 1116 req->timeout = PS_TIMEOUT_DISK;
1118 else 1117 else
1119 req->timeout = PS_TIMEOUT_OTHER; 1118 req->timeout = PS_TIMEOUT_OTHER;
1120 req->retries = PS_RETRY; 1119 req->retries = PS_RETRY;
1121 1120
1122 blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req, 1121 blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req,
1123 (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG), 1122 (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
1124 pscsi_req_done); 1123 pscsi_req_done);
1125 1124
1126 return 0; 1125 return 0;
1127 1126
1128 fail: 1127 fail:
1129 while (hbio) { 1128 while (hbio) {
1130 struct bio *bio = hbio; 1129 struct bio *bio = hbio;
1131 hbio = hbio->bi_next; 1130 hbio = hbio->bi_next;
1132 bio->bi_next = NULL; 1131 bio->bi_next = NULL;
1133 bio_endio(bio, 0); /* XXX: should be error */ 1132 bio_endio(bio, 0); /* XXX: should be error */
1134 } 1133 }
1135 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1134 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1136 return -ENOMEM; 1135 return -ENOMEM;
1137 } 1136 }
1138 1137
1139 /* pscsi_get_sense_buffer(): 1138 /* pscsi_get_sense_buffer():
1140 * 1139 *
1141 * 1140 *
1142 */ 1141 */
1143 static unsigned char *pscsi_get_sense_buffer(struct se_task *task) 1142 static unsigned char *pscsi_get_sense_buffer(struct se_task *task)
1144 { 1143 {
1145 struct pscsi_plugin_task *pt = PSCSI_TASK(task); 1144 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
1146 1145
1147 return (unsigned char *)&pt->pscsi_sense[0]; 1146 return (unsigned char *)&pt->pscsi_sense[0];
1148 } 1147 }
1149 1148
1150 /* pscsi_get_device_rev(): 1149 /* pscsi_get_device_rev():
1151 * 1150 *
1152 * 1151 *
1153 */ 1152 */
1154 static u32 pscsi_get_device_rev(struct se_device *dev) 1153 static u32 pscsi_get_device_rev(struct se_device *dev)
1155 { 1154 {
1156 struct pscsi_dev_virt *pdv = dev->dev_ptr; 1155 struct pscsi_dev_virt *pdv = dev->dev_ptr;
1157 struct scsi_device *sd = pdv->pdv_sd; 1156 struct scsi_device *sd = pdv->pdv_sd;
1158 1157
1159 return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1; 1158 return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1;
1160 } 1159 }
1161 1160
1162 /* pscsi_get_device_type(): 1161 /* pscsi_get_device_type():
1163 * 1162 *
1164 * 1163 *
1165 */ 1164 */
1166 static u32 pscsi_get_device_type(struct se_device *dev) 1165 static u32 pscsi_get_device_type(struct se_device *dev)
1167 { 1166 {
1168 struct pscsi_dev_virt *pdv = dev->dev_ptr; 1167 struct pscsi_dev_virt *pdv = dev->dev_ptr;
1169 struct scsi_device *sd = pdv->pdv_sd; 1168 struct scsi_device *sd = pdv->pdv_sd;
1170 1169
1171 return sd->type; 1170 return sd->type;
1172 } 1171 }
1173 1172
1174 static sector_t pscsi_get_blocks(struct se_device *dev) 1173 static sector_t pscsi_get_blocks(struct se_device *dev)
1175 { 1174 {
1176 struct pscsi_dev_virt *pdv = dev->dev_ptr; 1175 struct pscsi_dev_virt *pdv = dev->dev_ptr;
1177 1176
1178 if (pdv->pdv_bd && pdv->pdv_bd->bd_part) 1177 if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
1179 return pdv->pdv_bd->bd_part->nr_sects; 1178 return pdv->pdv_bd->bd_part->nr_sects;
1180 1179
1181 dump_stack(); 1180 dump_stack();
1182 return 0; 1181 return 0;
1183 } 1182 }
1184 1183
1185 /* pscsi_handle_SAM_STATUS_failures(): 1184 /* pscsi_handle_SAM_STATUS_failures():
1186 * 1185 *
1187 * 1186 *
1188 */ 1187 */
1189 static inline void pscsi_process_SAM_status( 1188 static inline void pscsi_process_SAM_status(
1190 struct se_task *task, 1189 struct se_task *task,
1191 struct pscsi_plugin_task *pt) 1190 struct pscsi_plugin_task *pt)
1192 { 1191 {
1193 task->task_scsi_status = status_byte(pt->pscsi_result); 1192 task->task_scsi_status = status_byte(pt->pscsi_result);
1194 if (task->task_scsi_status) { 1193 if (task->task_scsi_status) {
1195 task->task_scsi_status <<= 1; 1194 task->task_scsi_status <<= 1;
1196 pr_debug("PSCSI Status Byte exception at task: %p CDB:" 1195 pr_debug("PSCSI Status Byte exception at task: %p CDB:"
1197 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], 1196 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
1198 pt->pscsi_result); 1197 pt->pscsi_result);
1199 } 1198 }
1200 1199
1201 switch (host_byte(pt->pscsi_result)) { 1200 switch (host_byte(pt->pscsi_result)) {
1202 case DID_OK: 1201 case DID_OK:
1203 transport_complete_task(task, (!task->task_scsi_status)); 1202 transport_complete_task(task, (!task->task_scsi_status));
1204 break; 1203 break;
1205 default: 1204 default:
1206 pr_debug("PSCSI Host Byte exception at task: %p CDB:" 1205 pr_debug("PSCSI Host Byte exception at task: %p CDB:"
1207 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], 1206 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
1208 pt->pscsi_result); 1207 pt->pscsi_result);
1209 task->task_scsi_status = SAM_STAT_CHECK_CONDITION; 1208 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
1210 task->task_se_cmd->scsi_sense_reason = 1209 task->task_se_cmd->scsi_sense_reason =
1211 TCM_UNSUPPORTED_SCSI_OPCODE; 1210 TCM_UNSUPPORTED_SCSI_OPCODE;
1212 transport_complete_task(task, 0); 1211 transport_complete_task(task, 0);
1213 break; 1212 break;
1214 } 1213 }
1215 } 1214 }
1216 1215
1217 static void pscsi_req_done(struct request *req, int uptodate) 1216 static void pscsi_req_done(struct request *req, int uptodate)
1218 { 1217 {
1219 struct se_task *task = req->end_io_data; 1218 struct se_task *task = req->end_io_data;
1220 struct pscsi_plugin_task *pt = PSCSI_TASK(task); 1219 struct pscsi_plugin_task *pt = PSCSI_TASK(task);
1221 1220
1222 pt->pscsi_result = req->errors; 1221 pt->pscsi_result = req->errors;
1223 pt->pscsi_resid = req->resid_len; 1222 pt->pscsi_resid = req->resid_len;
1224 1223
1225 pscsi_process_SAM_status(task, pt); 1224 pscsi_process_SAM_status(task, pt);
1226 __blk_put_request(req->q, req); 1225 __blk_put_request(req->q, req);
1227 } 1226 }
1228 1227
1229 static struct se_subsystem_api pscsi_template = { 1228 static struct se_subsystem_api pscsi_template = {
1230 .name = "pscsi", 1229 .name = "pscsi",
1231 .owner = THIS_MODULE, 1230 .owner = THIS_MODULE,
1232 .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, 1231 .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV,
1233 .attach_hba = pscsi_attach_hba, 1232 .attach_hba = pscsi_attach_hba,
1234 .detach_hba = pscsi_detach_hba, 1233 .detach_hba = pscsi_detach_hba,
1235 .pmode_enable_hba = pscsi_pmode_enable_hba, 1234 .pmode_enable_hba = pscsi_pmode_enable_hba,
1236 .allocate_virtdevice = pscsi_allocate_virtdevice, 1235 .allocate_virtdevice = pscsi_allocate_virtdevice,
1237 .create_virtdevice = pscsi_create_virtdevice, 1236 .create_virtdevice = pscsi_create_virtdevice,
1238 .free_device = pscsi_free_device, 1237 .free_device = pscsi_free_device,
1239 .transport_complete = pscsi_transport_complete, 1238 .transport_complete = pscsi_transport_complete,
1240 .alloc_task = pscsi_alloc_task, 1239 .alloc_task = pscsi_alloc_task,
1241 .do_task = pscsi_do_task, 1240 .do_task = pscsi_do_task,
1242 .free_task = pscsi_free_task, 1241 .free_task = pscsi_free_task,
1243 .check_configfs_dev_params = pscsi_check_configfs_dev_params, 1242 .check_configfs_dev_params = pscsi_check_configfs_dev_params,
1244 .set_configfs_dev_params = pscsi_set_configfs_dev_params, 1243 .set_configfs_dev_params = pscsi_set_configfs_dev_params,
1245 .show_configfs_dev_params = pscsi_show_configfs_dev_params, 1244 .show_configfs_dev_params = pscsi_show_configfs_dev_params,
1246 .get_sense_buffer = pscsi_get_sense_buffer, 1245 .get_sense_buffer = pscsi_get_sense_buffer,
1247 .get_device_rev = pscsi_get_device_rev, 1246 .get_device_rev = pscsi_get_device_rev,
1248 .get_device_type = pscsi_get_device_type, 1247 .get_device_type = pscsi_get_device_type,
1249 .get_blocks = pscsi_get_blocks, 1248 .get_blocks = pscsi_get_blocks,
1250 }; 1249 };
1251 1250
1252 static int __init pscsi_module_init(void) 1251 static int __init pscsi_module_init(void)
1253 { 1252 {
1254 return transport_subsystem_register(&pscsi_template); 1253 return transport_subsystem_register(&pscsi_template);
1255 } 1254 }
1256 1255
1257 static void pscsi_module_exit(void) 1256 static void pscsi_module_exit(void)
1258 { 1257 {
1259 transport_subsystem_release(&pscsi_template); 1258 transport_subsystem_release(&pscsi_template);
1260 } 1259 }
1261 1260
1262 MODULE_DESCRIPTION("TCM PSCSI subsystem plugin"); 1261 MODULE_DESCRIPTION("TCM PSCSI subsystem plugin");
1263 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 1262 MODULE_AUTHOR("nab@Linux-iSCSI.org");
1264 MODULE_LICENSE("GPL"); 1263 MODULE_LICENSE("GPL");
1265 1264
1266 module_init(pscsi_module_init); 1265 module_init(pscsi_module_init);
1267 module_exit(pscsi_module_exit); 1266 module_exit(pscsi_module_exit);
1268 1267
drivers/target/target_core_rd.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * Filename: target_core_rd.c 2 * Filename: target_core_rd.c
3 * 3 *
4 * This file contains the Storage Engine <-> Ramdisk transport 4 * This file contains the Storage Engine <-> Ramdisk transport
5 * specific functions. 5 * specific functions.
6 * 6 *
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. 7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005, 2006, 2007 SBE, Inc. 8 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
9 * Copyright (c) 2007-2010 Rising Tide Systems 9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org 10 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 * 11 *
12 * Nicholas A. Bellinger <nab@kernel.org> 12 * Nicholas A. Bellinger <nab@kernel.org>
13 * 13 *
14 * This program is free software; you can redistribute it and/or modify 14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by 15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or 16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version. 17 * (at your option) any later version.
18 * 18 *
19 * This program is distributed in the hope that it will be useful, 19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details. 22 * GNU General Public License for more details.
23 * 23 *
24 * You should have received a copy of the GNU General Public License 24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software 25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 * 27 *
28 ******************************************************************************/ 28 ******************************************************************************/
29 29
30 #include <linux/string.h> 30 #include <linux/string.h>
31 #include <linux/parser.h> 31 #include <linux/parser.h>
32 #include <linux/timer.h> 32 #include <linux/timer.h>
33 #include <linux/blkdev.h> 33 #include <linux/blkdev.h>
34 #include <linux/slab.h> 34 #include <linux/slab.h>
35 #include <linux/spinlock.h> 35 #include <linux/spinlock.h>
36 #include <scsi/scsi.h> 36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h> 37 #include <scsi/scsi_host.h>
38 38
39 #include <target/target_core_base.h> 39 #include <target/target_core_base.h>
40 #include <target/target_core_device.h> 40 #include <target/target_core_backend.h>
41 #include <target/target_core_transport.h>
42 #include <target/target_core_fabric_ops.h>
43 41
44 #include "target_core_rd.h" 42 #include "target_core_rd.h"
45 43
46 static struct se_subsystem_api rd_mcp_template; 44 static struct se_subsystem_api rd_mcp_template;
47 45
48 /* rd_attach_hba(): (Part of se_subsystem_api_t template) 46 /* rd_attach_hba(): (Part of se_subsystem_api_t template)
49 * 47 *
50 * 48 *
51 */ 49 */
52 static int rd_attach_hba(struct se_hba *hba, u32 host_id) 50 static int rd_attach_hba(struct se_hba *hba, u32 host_id)
53 { 51 {
54 struct rd_host *rd_host; 52 struct rd_host *rd_host;
55 53
56 rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL); 54 rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
57 if (!rd_host) { 55 if (!rd_host) {
58 pr_err("Unable to allocate memory for struct rd_host\n"); 56 pr_err("Unable to allocate memory for struct rd_host\n");
59 return -ENOMEM; 57 return -ENOMEM;
60 } 58 }
61 59
62 rd_host->rd_host_id = host_id; 60 rd_host->rd_host_id = host_id;
63 61
64 hba->hba_ptr = rd_host; 62 hba->hba_ptr = rd_host;
65 63
66 pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" 64 pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
67 " Generic Target Core Stack %s\n", hba->hba_id, 65 " Generic Target Core Stack %s\n", hba->hba_id,
68 RD_HBA_VERSION, TARGET_CORE_MOD_VERSION); 66 RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
69 pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic" 67 pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
70 " MaxSectors: %u\n", hba->hba_id, 68 " MaxSectors: %u\n", hba->hba_id,
71 rd_host->rd_host_id, RD_MAX_SECTORS); 69 rd_host->rd_host_id, RD_MAX_SECTORS);
72 70
73 return 0; 71 return 0;
74 } 72 }
75 73
76 static void rd_detach_hba(struct se_hba *hba) 74 static void rd_detach_hba(struct se_hba *hba)
77 { 75 {
78 struct rd_host *rd_host = hba->hba_ptr; 76 struct rd_host *rd_host = hba->hba_ptr;
79 77
80 pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from" 78 pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
81 " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id); 79 " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
82 80
83 kfree(rd_host); 81 kfree(rd_host);
84 hba->hba_ptr = NULL; 82 hba->hba_ptr = NULL;
85 } 83 }
86 84
87 /* rd_release_device_space(): 85 /* rd_release_device_space():
88 * 86 *
89 * 87 *
90 */ 88 */
91 static void rd_release_device_space(struct rd_dev *rd_dev) 89 static void rd_release_device_space(struct rd_dev *rd_dev)
92 { 90 {
93 u32 i, j, page_count = 0, sg_per_table; 91 u32 i, j, page_count = 0, sg_per_table;
94 struct rd_dev_sg_table *sg_table; 92 struct rd_dev_sg_table *sg_table;
95 struct page *pg; 93 struct page *pg;
96 struct scatterlist *sg; 94 struct scatterlist *sg;
97 95
98 if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) 96 if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
99 return; 97 return;
100 98
101 sg_table = rd_dev->sg_table_array; 99 sg_table = rd_dev->sg_table_array;
102 100
103 for (i = 0; i < rd_dev->sg_table_count; i++) { 101 for (i = 0; i < rd_dev->sg_table_count; i++) {
104 sg = sg_table[i].sg_table; 102 sg = sg_table[i].sg_table;
105 sg_per_table = sg_table[i].rd_sg_count; 103 sg_per_table = sg_table[i].rd_sg_count;
106 104
107 for (j = 0; j < sg_per_table; j++) { 105 for (j = 0; j < sg_per_table; j++) {
108 pg = sg_page(&sg[j]); 106 pg = sg_page(&sg[j]);
109 if (pg) { 107 if (pg) {
110 __free_page(pg); 108 __free_page(pg);
111 page_count++; 109 page_count++;
112 } 110 }
113 } 111 }
114 112
115 kfree(sg); 113 kfree(sg);
116 } 114 }
117 115
118 pr_debug("CORE_RD[%u] - Released device space for Ramdisk" 116 pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
119 " Device ID: %u, pages %u in %u tables total bytes %lu\n", 117 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
120 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, 118 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
121 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); 119 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
122 120
123 kfree(sg_table); 121 kfree(sg_table);
124 rd_dev->sg_table_array = NULL; 122 rd_dev->sg_table_array = NULL;
125 rd_dev->sg_table_count = 0; 123 rd_dev->sg_table_count = 0;
126 } 124 }
127 125
128 126
129 /* rd_build_device_space(): 127 /* rd_build_device_space():
130 * 128 *
131 * 129 *
132 */ 130 */
133 static int rd_build_device_space(struct rd_dev *rd_dev) 131 static int rd_build_device_space(struct rd_dev *rd_dev)
134 { 132 {
135 u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed; 133 u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
136 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / 134 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
137 sizeof(struct scatterlist)); 135 sizeof(struct scatterlist));
138 struct rd_dev_sg_table *sg_table; 136 struct rd_dev_sg_table *sg_table;
139 struct page *pg; 137 struct page *pg;
140 struct scatterlist *sg; 138 struct scatterlist *sg;
141 139
142 if (rd_dev->rd_page_count <= 0) { 140 if (rd_dev->rd_page_count <= 0) {
143 pr_err("Illegal page count: %u for Ramdisk device\n", 141 pr_err("Illegal page count: %u for Ramdisk device\n",
144 rd_dev->rd_page_count); 142 rd_dev->rd_page_count);
145 return -EINVAL; 143 return -EINVAL;
146 } 144 }
147 total_sg_needed = rd_dev->rd_page_count; 145 total_sg_needed = rd_dev->rd_page_count;
148 146
149 sg_tables = (total_sg_needed / max_sg_per_table) + 1; 147 sg_tables = (total_sg_needed / max_sg_per_table) + 1;
150 148
151 sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); 149 sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
152 if (!sg_table) { 150 if (!sg_table) {
153 pr_err("Unable to allocate memory for Ramdisk" 151 pr_err("Unable to allocate memory for Ramdisk"
154 " scatterlist tables\n"); 152 " scatterlist tables\n");
155 return -ENOMEM; 153 return -ENOMEM;
156 } 154 }
157 155
158 rd_dev->sg_table_array = sg_table; 156 rd_dev->sg_table_array = sg_table;
159 rd_dev->sg_table_count = sg_tables; 157 rd_dev->sg_table_count = sg_tables;
160 158
161 while (total_sg_needed) { 159 while (total_sg_needed) {
162 sg_per_table = (total_sg_needed > max_sg_per_table) ? 160 sg_per_table = (total_sg_needed > max_sg_per_table) ?
163 max_sg_per_table : total_sg_needed; 161 max_sg_per_table : total_sg_needed;
164 162
165 sg = kzalloc(sg_per_table * sizeof(struct scatterlist), 163 sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
166 GFP_KERNEL); 164 GFP_KERNEL);
167 if (!sg) { 165 if (!sg) {
168 pr_err("Unable to allocate scatterlist array" 166 pr_err("Unable to allocate scatterlist array"
169 " for struct rd_dev\n"); 167 " for struct rd_dev\n");
170 return -ENOMEM; 168 return -ENOMEM;
171 } 169 }
172 170
173 sg_init_table(sg, sg_per_table); 171 sg_init_table(sg, sg_per_table);
174 172
175 sg_table[i].sg_table = sg; 173 sg_table[i].sg_table = sg;
176 sg_table[i].rd_sg_count = sg_per_table; 174 sg_table[i].rd_sg_count = sg_per_table;
177 sg_table[i].page_start_offset = page_offset; 175 sg_table[i].page_start_offset = page_offset;
178 sg_table[i++].page_end_offset = (page_offset + sg_per_table) 176 sg_table[i++].page_end_offset = (page_offset + sg_per_table)
179 - 1; 177 - 1;
180 178
181 for (j = 0; j < sg_per_table; j++) { 179 for (j = 0; j < sg_per_table; j++) {
182 pg = alloc_pages(GFP_KERNEL, 0); 180 pg = alloc_pages(GFP_KERNEL, 0);
183 if (!pg) { 181 if (!pg) {
184 pr_err("Unable to allocate scatterlist" 182 pr_err("Unable to allocate scatterlist"
185 " pages for struct rd_dev_sg_table\n"); 183 " pages for struct rd_dev_sg_table\n");
186 return -ENOMEM; 184 return -ENOMEM;
187 } 185 }
188 sg_assign_page(&sg[j], pg); 186 sg_assign_page(&sg[j], pg);
189 sg[j].length = PAGE_SIZE; 187 sg[j].length = PAGE_SIZE;
190 } 188 }
191 189
192 page_offset += sg_per_table; 190 page_offset += sg_per_table;
193 total_sg_needed -= sg_per_table; 191 total_sg_needed -= sg_per_table;
194 } 192 }
195 193
196 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of" 194 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
197 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, 195 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
198 rd_dev->rd_dev_id, rd_dev->rd_page_count, 196 rd_dev->rd_dev_id, rd_dev->rd_page_count,
199 rd_dev->sg_table_count); 197 rd_dev->sg_table_count);
200 198
201 return 0; 199 return 0;
202 } 200 }
203 201
204 static void *rd_allocate_virtdevice( 202 static void *rd_allocate_virtdevice(
205 struct se_hba *hba, 203 struct se_hba *hba,
206 const char *name, 204 const char *name,
207 int rd_direct) 205 int rd_direct)
208 { 206 {
209 struct rd_dev *rd_dev; 207 struct rd_dev *rd_dev;
210 struct rd_host *rd_host = hba->hba_ptr; 208 struct rd_host *rd_host = hba->hba_ptr;
211 209
212 rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL); 210 rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
213 if (!rd_dev) { 211 if (!rd_dev) {
214 pr_err("Unable to allocate memory for struct rd_dev\n"); 212 pr_err("Unable to allocate memory for struct rd_dev\n");
215 return NULL; 213 return NULL;
216 } 214 }
217 215
218 rd_dev->rd_host = rd_host; 216 rd_dev->rd_host = rd_host;
219 rd_dev->rd_direct = rd_direct; 217 rd_dev->rd_direct = rd_direct;
220 218
221 return rd_dev; 219 return rd_dev;
222 } 220 }
223 221
224 static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name) 222 static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
225 { 223 {
226 return rd_allocate_virtdevice(hba, name, 0); 224 return rd_allocate_virtdevice(hba, name, 0);
227 } 225 }
228 226
229 /* rd_create_virtdevice(): 227 /* rd_create_virtdevice():
230 * 228 *
231 * 229 *
232 */ 230 */
233 static struct se_device *rd_create_virtdevice( 231 static struct se_device *rd_create_virtdevice(
234 struct se_hba *hba, 232 struct se_hba *hba,
235 struct se_subsystem_dev *se_dev, 233 struct se_subsystem_dev *se_dev,
236 void *p, 234 void *p,
237 int rd_direct) 235 int rd_direct)
238 { 236 {
239 struct se_device *dev; 237 struct se_device *dev;
240 struct se_dev_limits dev_limits; 238 struct se_dev_limits dev_limits;
241 struct rd_dev *rd_dev = p; 239 struct rd_dev *rd_dev = p;
242 struct rd_host *rd_host = hba->hba_ptr; 240 struct rd_host *rd_host = hba->hba_ptr;
243 int dev_flags = 0, ret; 241 int dev_flags = 0, ret;
244 char prod[16], rev[4]; 242 char prod[16], rev[4];
245 243
246 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 244 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
247 245
248 ret = rd_build_device_space(rd_dev); 246 ret = rd_build_device_space(rd_dev);
249 if (ret < 0) 247 if (ret < 0)
250 goto fail; 248 goto fail;
251 249
252 snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP"); 250 snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
253 snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION : 251 snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION :
254 RD_MCP_VERSION); 252 RD_MCP_VERSION);
255 253
256 dev_limits.limits.logical_block_size = RD_BLOCKSIZE; 254 dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
257 dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS; 255 dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS;
258 dev_limits.limits.max_sectors = RD_MAX_SECTORS; 256 dev_limits.limits.max_sectors = RD_MAX_SECTORS;
259 dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; 257 dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
260 dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH; 258 dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
261 259
262 dev = transport_add_device_to_core_hba(hba, 260 dev = transport_add_device_to_core_hba(hba,
263 &rd_mcp_template, se_dev, dev_flags, rd_dev, 261 &rd_mcp_template, se_dev, dev_flags, rd_dev,
264 &dev_limits, prod, rev); 262 &dev_limits, prod, rev);
265 if (!dev) 263 if (!dev)
266 goto fail; 264 goto fail;
267 265
268 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; 266 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
269 rd_dev->rd_queue_depth = dev->queue_depth; 267 rd_dev->rd_queue_depth = dev->queue_depth;
270 268
271 pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of" 269 pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
272 " %u pages in %u tables, %lu total bytes\n", 270 " %u pages in %u tables, %lu total bytes\n",
273 rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" : 271 rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
274 "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count, 272 "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
275 rd_dev->sg_table_count, 273 rd_dev->sg_table_count,
276 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE)); 274 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
277 275
278 return dev; 276 return dev;
279 277
280 fail: 278 fail:
281 rd_release_device_space(rd_dev); 279 rd_release_device_space(rd_dev);
282 return ERR_PTR(ret); 280 return ERR_PTR(ret);
283 } 281 }
284 282
285 static struct se_device *rd_MEMCPY_create_virtdevice( 283 static struct se_device *rd_MEMCPY_create_virtdevice(
286 struct se_hba *hba, 284 struct se_hba *hba,
287 struct se_subsystem_dev *se_dev, 285 struct se_subsystem_dev *se_dev,
288 void *p) 286 void *p)
289 { 287 {
290 return rd_create_virtdevice(hba, se_dev, p, 0); 288 return rd_create_virtdevice(hba, se_dev, p, 0);
291 } 289 }
292 290
293 /* rd_free_device(): (Part of se_subsystem_api_t template) 291 /* rd_free_device(): (Part of se_subsystem_api_t template)
294 * 292 *
295 * 293 *
296 */ 294 */
297 static void rd_free_device(void *p) 295 static void rd_free_device(void *p)
298 { 296 {
299 struct rd_dev *rd_dev = p; 297 struct rd_dev *rd_dev = p;
300 298
301 rd_release_device_space(rd_dev); 299 rd_release_device_space(rd_dev);
302 kfree(rd_dev); 300 kfree(rd_dev);
303 } 301 }
304 302
305 static inline struct rd_request *RD_REQ(struct se_task *task) 303 static inline struct rd_request *RD_REQ(struct se_task *task)
306 { 304 {
307 return container_of(task, struct rd_request, rd_task); 305 return container_of(task, struct rd_request, rd_task);
308 } 306 }
309 307
310 static struct se_task * 308 static struct se_task *
311 rd_alloc_task(unsigned char *cdb) 309 rd_alloc_task(unsigned char *cdb)
312 { 310 {
313 struct rd_request *rd_req; 311 struct rd_request *rd_req;
314 312
315 rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL); 313 rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
316 if (!rd_req) { 314 if (!rd_req) {
317 pr_err("Unable to allocate struct rd_request\n"); 315 pr_err("Unable to allocate struct rd_request\n");
318 return NULL; 316 return NULL;
319 } 317 }
320 318
321 return &rd_req->rd_task; 319 return &rd_req->rd_task;
322 } 320 }
323 321
324 /* rd_get_sg_table(): 322 /* rd_get_sg_table():
325 * 323 *
326 * 324 *
327 */ 325 */
328 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) 326 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
329 { 327 {
330 u32 i; 328 u32 i;
331 struct rd_dev_sg_table *sg_table; 329 struct rd_dev_sg_table *sg_table;
332 330
333 for (i = 0; i < rd_dev->sg_table_count; i++) { 331 for (i = 0; i < rd_dev->sg_table_count; i++) {
334 sg_table = &rd_dev->sg_table_array[i]; 332 sg_table = &rd_dev->sg_table_array[i];
335 if ((sg_table->page_start_offset <= page) && 333 if ((sg_table->page_start_offset <= page) &&
336 (sg_table->page_end_offset >= page)) 334 (sg_table->page_end_offset >= page))
337 return sg_table; 335 return sg_table;
338 } 336 }
339 337
340 pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n", 338 pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
341 page); 339 page);
342 340
343 return NULL; 341 return NULL;
344 } 342 }
345 343
346 static int rd_MEMCPY(struct rd_request *req, u32 read_rd) 344 static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
347 { 345 {
348 struct se_task *task = &req->rd_task; 346 struct se_task *task = &req->rd_task;
349 struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr; 347 struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
350 struct rd_dev_sg_table *table; 348 struct rd_dev_sg_table *table;
351 struct scatterlist *rd_sg; 349 struct scatterlist *rd_sg;
352 struct sg_mapping_iter m; 350 struct sg_mapping_iter m;
353 u32 rd_offset = req->rd_offset; 351 u32 rd_offset = req->rd_offset;
354 u32 src_len; 352 u32 src_len;
355 353
356 table = rd_get_sg_table(dev, req->rd_page); 354 table = rd_get_sg_table(dev, req->rd_page);
357 if (!table) 355 if (!table)
358 return -EINVAL; 356 return -EINVAL;
359 357
360 rd_sg = &table->sg_table[req->rd_page - table->page_start_offset]; 358 rd_sg = &table->sg_table[req->rd_page - table->page_start_offset];
361 359
362 pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", 360 pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
363 dev->rd_dev_id, read_rd ? "Read" : "Write", 361 dev->rd_dev_id, read_rd ? "Read" : "Write",
364 task->task_lba, req->rd_size, req->rd_page, 362 task->task_lba, req->rd_size, req->rd_page,
365 rd_offset); 363 rd_offset);
366 364
367 src_len = PAGE_SIZE - rd_offset; 365 src_len = PAGE_SIZE - rd_offset;
368 sg_miter_start(&m, task->task_sg, task->task_sg_nents, 366 sg_miter_start(&m, task->task_sg, task->task_sg_nents,
369 read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG); 367 read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
370 while (req->rd_size) { 368 while (req->rd_size) {
371 u32 len; 369 u32 len;
372 void *rd_addr; 370 void *rd_addr;
373 371
374 sg_miter_next(&m); 372 sg_miter_next(&m);
375 len = min((u32)m.length, src_len); 373 len = min((u32)m.length, src_len);
376 m.consumed = len; 374 m.consumed = len;
377 375
378 rd_addr = sg_virt(rd_sg) + rd_offset; 376 rd_addr = sg_virt(rd_sg) + rd_offset;
379 377
380 if (read_rd) 378 if (read_rd)
381 memcpy(m.addr, rd_addr, len); 379 memcpy(m.addr, rd_addr, len);
382 else 380 else
383 memcpy(rd_addr, m.addr, len); 381 memcpy(rd_addr, m.addr, len);
384 382
385 req->rd_size -= len; 383 req->rd_size -= len;
386 if (!req->rd_size) 384 if (!req->rd_size)
387 continue; 385 continue;
388 386
389 src_len -= len; 387 src_len -= len;
390 if (src_len) { 388 if (src_len) {
391 rd_offset += len; 389 rd_offset += len;
392 continue; 390 continue;
393 } 391 }
394 392
395 /* rd page completed, next one please */ 393 /* rd page completed, next one please */
396 req->rd_page++; 394 req->rd_page++;
397 rd_offset = 0; 395 rd_offset = 0;
398 src_len = PAGE_SIZE; 396 src_len = PAGE_SIZE;
399 if (req->rd_page <= table->page_end_offset) { 397 if (req->rd_page <= table->page_end_offset) {
400 rd_sg++; 398 rd_sg++;
401 continue; 399 continue;
402 } 400 }
403 401
404 table = rd_get_sg_table(dev, req->rd_page); 402 table = rd_get_sg_table(dev, req->rd_page);
405 if (!table) { 403 if (!table) {
406 sg_miter_stop(&m); 404 sg_miter_stop(&m);
407 return -EINVAL; 405 return -EINVAL;
408 } 406 }
409 407
410 /* since we increment, the first sg entry is correct */ 408 /* since we increment, the first sg entry is correct */
411 rd_sg = table->sg_table; 409 rd_sg = table->sg_table;
412 } 410 }
413 sg_miter_stop(&m); 411 sg_miter_stop(&m);
414 return 0; 412 return 0;
415 } 413 }
416 414
417 /* rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template) 415 /* rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template)
418 * 416 *
419 * 417 *
420 */ 418 */
421 static int rd_MEMCPY_do_task(struct se_task *task) 419 static int rd_MEMCPY_do_task(struct se_task *task)
422 { 420 {
423 struct se_device *dev = task->task_se_cmd->se_dev; 421 struct se_device *dev = task->task_se_cmd->se_dev;
424 struct rd_request *req = RD_REQ(task); 422 struct rd_request *req = RD_REQ(task);
425 u64 tmp; 423 u64 tmp;
426 int ret; 424 int ret;
427 425
428 tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size; 426 tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
429 req->rd_offset = do_div(tmp, PAGE_SIZE); 427 req->rd_offset = do_div(tmp, PAGE_SIZE);
430 req->rd_page = tmp; 428 req->rd_page = tmp;
431 req->rd_size = task->task_size; 429 req->rd_size = task->task_size;
432 430
433 ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE); 431 ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE);
434 if (ret != 0) 432 if (ret != 0)
435 return ret; 433 return ret;
436 434
437 task->task_scsi_status = GOOD; 435 task->task_scsi_status = GOOD;
438 transport_complete_task(task, 1); 436 transport_complete_task(task, 1);
439 return 0; 437 return 0;
440 } 438 }
441 439
442 /* rd_free_task(): (Part of se_subsystem_api_t template) 440 /* rd_free_task(): (Part of se_subsystem_api_t template)
443 * 441 *
444 * 442 *
445 */ 443 */
446 static void rd_free_task(struct se_task *task) 444 static void rd_free_task(struct se_task *task)
447 { 445 {
448 kfree(RD_REQ(task)); 446 kfree(RD_REQ(task));
449 } 447 }
450 448
451 enum { 449 enum {
452 Opt_rd_pages, Opt_err 450 Opt_rd_pages, Opt_err
453 }; 451 };
454 452
455 static match_table_t tokens = { 453 static match_table_t tokens = {
456 {Opt_rd_pages, "rd_pages=%d"}, 454 {Opt_rd_pages, "rd_pages=%d"},
457 {Opt_err, NULL} 455 {Opt_err, NULL}
458 }; 456 };
459 457
460 static ssize_t rd_set_configfs_dev_params( 458 static ssize_t rd_set_configfs_dev_params(
461 struct se_hba *hba, 459 struct se_hba *hba,
462 struct se_subsystem_dev *se_dev, 460 struct se_subsystem_dev *se_dev,
463 const char *page, 461 const char *page,
464 ssize_t count) 462 ssize_t count)
465 { 463 {
466 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; 464 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
467 char *orig, *ptr, *opts; 465 char *orig, *ptr, *opts;
468 substring_t args[MAX_OPT_ARGS]; 466 substring_t args[MAX_OPT_ARGS];
469 int ret = 0, arg, token; 467 int ret = 0, arg, token;
470 468
471 opts = kstrdup(page, GFP_KERNEL); 469 opts = kstrdup(page, GFP_KERNEL);
472 if (!opts) 470 if (!opts)
473 return -ENOMEM; 471 return -ENOMEM;
474 472
475 orig = opts; 473 orig = opts;
476 474
477 while ((ptr = strsep(&opts, ",")) != NULL) { 475 while ((ptr = strsep(&opts, ",")) != NULL) {
478 if (!*ptr) 476 if (!*ptr)
479 continue; 477 continue;
480 478
481 token = match_token(ptr, tokens, args); 479 token = match_token(ptr, tokens, args);
482 switch (token) { 480 switch (token) {
483 case Opt_rd_pages: 481 case Opt_rd_pages:
484 match_int(args, &arg); 482 match_int(args, &arg);
485 rd_dev->rd_page_count = arg; 483 rd_dev->rd_page_count = arg;
486 pr_debug("RAMDISK: Referencing Page" 484 pr_debug("RAMDISK: Referencing Page"
487 " Count: %u\n", rd_dev->rd_page_count); 485 " Count: %u\n", rd_dev->rd_page_count);
488 rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; 486 rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
489 break; 487 break;
490 default: 488 default:
491 break; 489 break;
492 } 490 }
493 } 491 }
494 492
495 kfree(orig); 493 kfree(orig);
496 return (!ret) ? count : ret; 494 return (!ret) ? count : ret;
497 } 495 }
498 496
499 static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) 497 static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
500 { 498 {
501 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; 499 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
502 500
503 if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { 501 if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
504 pr_debug("Missing rd_pages= parameter\n"); 502 pr_debug("Missing rd_pages= parameter\n");
505 return -EINVAL; 503 return -EINVAL;
506 } 504 }
507 505
508 return 0; 506 return 0;
509 } 507 }
510 508
511 static ssize_t rd_show_configfs_dev_params( 509 static ssize_t rd_show_configfs_dev_params(
512 struct se_hba *hba, 510 struct se_hba *hba,
513 struct se_subsystem_dev *se_dev, 511 struct se_subsystem_dev *se_dev,
514 char *b) 512 char *b)
515 { 513 {
516 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; 514 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
517 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: %s\n", 515 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: %s\n",
518 rd_dev->rd_dev_id, (rd_dev->rd_direct) ? 516 rd_dev->rd_dev_id, (rd_dev->rd_direct) ?
519 "rd_direct" : "rd_mcp"); 517 "rd_direct" : "rd_mcp");
520 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" 518 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
521 " SG_table_count: %u\n", rd_dev->rd_page_count, 519 " SG_table_count: %u\n", rd_dev->rd_page_count,
522 PAGE_SIZE, rd_dev->sg_table_count); 520 PAGE_SIZE, rd_dev->sg_table_count);
523 return bl; 521 return bl;
524 } 522 }
525 523
526 static u32 rd_get_device_rev(struct se_device *dev) 524 static u32 rd_get_device_rev(struct se_device *dev)
527 { 525 {
528 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ 526 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
529 } 527 }
530 528
531 static u32 rd_get_device_type(struct se_device *dev) 529 static u32 rd_get_device_type(struct se_device *dev)
532 { 530 {
533 return TYPE_DISK; 531 return TYPE_DISK;
534 } 532 }
535 533
536 static sector_t rd_get_blocks(struct se_device *dev) 534 static sector_t rd_get_blocks(struct se_device *dev)
537 { 535 {
538 struct rd_dev *rd_dev = dev->dev_ptr; 536 struct rd_dev *rd_dev = dev->dev_ptr;
539 unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / 537 unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
540 dev->se_sub_dev->se_dev_attrib.block_size) - 1; 538 dev->se_sub_dev->se_dev_attrib.block_size) - 1;
541 539
542 return blocks_long; 540 return blocks_long;
543 } 541 }
544 542
545 static struct se_subsystem_api rd_mcp_template = { 543 static struct se_subsystem_api rd_mcp_template = {
546 .name = "rd_mcp", 544 .name = "rd_mcp",
547 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, 545 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
548 .attach_hba = rd_attach_hba, 546 .attach_hba = rd_attach_hba,
549 .detach_hba = rd_detach_hba, 547 .detach_hba = rd_detach_hba,
550 .allocate_virtdevice = rd_MEMCPY_allocate_virtdevice, 548 .allocate_virtdevice = rd_MEMCPY_allocate_virtdevice,
551 .create_virtdevice = rd_MEMCPY_create_virtdevice, 549 .create_virtdevice = rd_MEMCPY_create_virtdevice,
552 .free_device = rd_free_device, 550 .free_device = rd_free_device,
553 .alloc_task = rd_alloc_task, 551 .alloc_task = rd_alloc_task,
554 .do_task = rd_MEMCPY_do_task, 552 .do_task = rd_MEMCPY_do_task,
555 .free_task = rd_free_task, 553 .free_task = rd_free_task,
556 .check_configfs_dev_params = rd_check_configfs_dev_params, 554 .check_configfs_dev_params = rd_check_configfs_dev_params,
557 .set_configfs_dev_params = rd_set_configfs_dev_params, 555 .set_configfs_dev_params = rd_set_configfs_dev_params,
558 .show_configfs_dev_params = rd_show_configfs_dev_params, 556 .show_configfs_dev_params = rd_show_configfs_dev_params,
559 .get_device_rev = rd_get_device_rev, 557 .get_device_rev = rd_get_device_rev,
560 .get_device_type = rd_get_device_type, 558 .get_device_type = rd_get_device_type,
561 .get_blocks = rd_get_blocks, 559 .get_blocks = rd_get_blocks,
562 }; 560 };
563 561
564 int __init rd_module_init(void) 562 int __init rd_module_init(void)
565 { 563 {
566 int ret; 564 int ret;
567 565
568 ret = transport_subsystem_register(&rd_mcp_template); 566 ret = transport_subsystem_register(&rd_mcp_template);
569 if (ret < 0) { 567 if (ret < 0) {
570 return ret; 568 return ret;
571 } 569 }
572 570
573 return 0; 571 return 0;
574 } 572 }
575 573
576 void rd_module_exit(void) 574 void rd_module_exit(void)
577 { 575 {
578 transport_subsystem_release(&rd_mcp_template); 576 transport_subsystem_release(&rd_mcp_template);
579 } 577 }
580 578
drivers/target/target_core_stat.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * Filename: target_core_stat.c 2 * Filename: target_core_stat.c
3 * 3 *
4 * Copyright (c) 2011 Rising Tide Systems 4 * Copyright (c) 2011 Rising Tide Systems
5 * Copyright (c) 2011 Linux-iSCSI.org 5 * Copyright (c) 2011 Linux-iSCSI.org
6 * 6 *
7 * Modern ConfigFS group context specific statistics based on original 7 * Modern ConfigFS group context specific statistics based on original
8 * target_core_mib.c code 8 * target_core_mib.c code
9 * 9 *
10 * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved. 10 * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved.
11 * 11 *
12 * Nicholas A. Bellinger <nab@linux-iscsi.org> 12 * Nicholas A. Bellinger <nab@linux-iscsi.org>
13 * 13 *
14 * This program is free software; you can redistribute it and/or modify 14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by 15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or 16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version. 17 * (at your option) any later version.
18 * 18 *
19 * This program is distributed in the hope that it will be useful, 19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details. 22 * GNU General Public License for more details.
23 * 23 *
24 * You should have received a copy of the GNU General Public License 24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software 25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 * 27 *
28 ******************************************************************************/ 28 ******************************************************************************/
29 29
30 #include <linux/kernel.h> 30 #include <linux/kernel.h>
31 #include <linux/module.h> 31 #include <linux/module.h>
32 #include <linux/delay.h> 32 #include <linux/delay.h>
33 #include <linux/timer.h> 33 #include <linux/timer.h>
34 #include <linux/string.h> 34 #include <linux/string.h>
35 #include <generated/utsrelease.h> 35 #include <generated/utsrelease.h>
36 #include <linux/utsname.h> 36 #include <linux/utsname.h>
37 #include <linux/proc_fs.h> 37 #include <linux/proc_fs.h>
38 #include <linux/seq_file.h> 38 #include <linux/seq_file.h>
39 #include <linux/blkdev.h> 39 #include <linux/blkdev.h>
40 #include <linux/configfs.h> 40 #include <linux/configfs.h>
41 #include <scsi/scsi.h> 41 #include <scsi/scsi.h>
42 #include <scsi/scsi_device.h> 42 #include <scsi/scsi_device.h>
43 #include <scsi/scsi_host.h> 43 #include <scsi/scsi_host.h>
44 44
45 #include <target/target_core_base.h> 45 #include <target/target_core_base.h>
46 #include <target/target_core_transport.h> 46 #include <target/target_core_backend.h>
47 #include <target/target_core_fabric_ops.h> 47 #include <target/target_core_fabric.h>
48 #include <target/target_core_configfs.h> 48 #include <target/target_core_configfs.h>
49 #include <target/configfs_macros.h> 49 #include <target/configfs_macros.h>
50 50
51 #include "target_core_internal.h" 51 #include "target_core_internal.h"
52 52
53 #ifndef INITIAL_JIFFIES 53 #ifndef INITIAL_JIFFIES
54 #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) 54 #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
55 #endif 55 #endif
56 56
57 #define NONE "None" 57 #define NONE "None"
58 #define ISPRINT(a) ((a >= ' ') && (a <= '~')) 58 #define ISPRINT(a) ((a >= ' ') && (a <= '~'))
59 59
60 #define SCSI_LU_INDEX 1 60 #define SCSI_LU_INDEX 1
61 #define LU_COUNT 1 61 #define LU_COUNT 1
62 62
63 /* 63 /*
64 * SCSI Device Table 64 * SCSI Device Table
65 */ 65 */
66 66
67 CONFIGFS_EATTR_STRUCT(target_stat_scsi_dev, se_dev_stat_grps); 67 CONFIGFS_EATTR_STRUCT(target_stat_scsi_dev, se_dev_stat_grps);
68 #define DEV_STAT_SCSI_DEV_ATTR(_name, _mode) \ 68 #define DEV_STAT_SCSI_DEV_ATTR(_name, _mode) \
69 static struct target_stat_scsi_dev_attribute \ 69 static struct target_stat_scsi_dev_attribute \
70 target_stat_scsi_dev_##_name = \ 70 target_stat_scsi_dev_##_name = \
71 __CONFIGFS_EATTR(_name, _mode, \ 71 __CONFIGFS_EATTR(_name, _mode, \
72 target_stat_scsi_dev_show_attr_##_name, \ 72 target_stat_scsi_dev_show_attr_##_name, \
73 target_stat_scsi_dev_store_attr_##_name); 73 target_stat_scsi_dev_store_attr_##_name);
74 74
75 #define DEV_STAT_SCSI_DEV_ATTR_RO(_name) \ 75 #define DEV_STAT_SCSI_DEV_ATTR_RO(_name) \
76 static struct target_stat_scsi_dev_attribute \ 76 static struct target_stat_scsi_dev_attribute \
77 target_stat_scsi_dev_##_name = \ 77 target_stat_scsi_dev_##_name = \
78 __CONFIGFS_EATTR_RO(_name, \ 78 __CONFIGFS_EATTR_RO(_name, \
79 target_stat_scsi_dev_show_attr_##_name); 79 target_stat_scsi_dev_show_attr_##_name);
80 80
81 static ssize_t target_stat_scsi_dev_show_attr_inst( 81 static ssize_t target_stat_scsi_dev_show_attr_inst(
82 struct se_dev_stat_grps *sgrps, char *page) 82 struct se_dev_stat_grps *sgrps, char *page)
83 { 83 {
84 struct se_subsystem_dev *se_subdev = container_of(sgrps, 84 struct se_subsystem_dev *se_subdev = container_of(sgrps,
85 struct se_subsystem_dev, dev_stat_grps); 85 struct se_subsystem_dev, dev_stat_grps);
86 struct se_hba *hba = se_subdev->se_dev_hba; 86 struct se_hba *hba = se_subdev->se_dev_hba;
87 struct se_device *dev = se_subdev->se_dev_ptr; 87 struct se_device *dev = se_subdev->se_dev_ptr;
88 88
89 if (!dev) 89 if (!dev)
90 return -ENODEV; 90 return -ENODEV;
91 91
92 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); 92 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
93 } 93 }
94 DEV_STAT_SCSI_DEV_ATTR_RO(inst); 94 DEV_STAT_SCSI_DEV_ATTR_RO(inst);
95 95
96 static ssize_t target_stat_scsi_dev_show_attr_indx( 96 static ssize_t target_stat_scsi_dev_show_attr_indx(
97 struct se_dev_stat_grps *sgrps, char *page) 97 struct se_dev_stat_grps *sgrps, char *page)
98 { 98 {
99 struct se_subsystem_dev *se_subdev = container_of(sgrps, 99 struct se_subsystem_dev *se_subdev = container_of(sgrps,
100 struct se_subsystem_dev, dev_stat_grps); 100 struct se_subsystem_dev, dev_stat_grps);
101 struct se_device *dev = se_subdev->se_dev_ptr; 101 struct se_device *dev = se_subdev->se_dev_ptr;
102 102
103 if (!dev) 103 if (!dev)
104 return -ENODEV; 104 return -ENODEV;
105 105
106 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); 106 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
107 } 107 }
108 DEV_STAT_SCSI_DEV_ATTR_RO(indx); 108 DEV_STAT_SCSI_DEV_ATTR_RO(indx);
109 109
110 static ssize_t target_stat_scsi_dev_show_attr_role( 110 static ssize_t target_stat_scsi_dev_show_attr_role(
111 struct se_dev_stat_grps *sgrps, char *page) 111 struct se_dev_stat_grps *sgrps, char *page)
112 { 112 {
113 struct se_subsystem_dev *se_subdev = container_of(sgrps, 113 struct se_subsystem_dev *se_subdev = container_of(sgrps,
114 struct se_subsystem_dev, dev_stat_grps); 114 struct se_subsystem_dev, dev_stat_grps);
115 struct se_device *dev = se_subdev->se_dev_ptr; 115 struct se_device *dev = se_subdev->se_dev_ptr;
116 116
117 if (!dev) 117 if (!dev)
118 return -ENODEV; 118 return -ENODEV;
119 119
120 return snprintf(page, PAGE_SIZE, "Target\n"); 120 return snprintf(page, PAGE_SIZE, "Target\n");
121 } 121 }
122 DEV_STAT_SCSI_DEV_ATTR_RO(role); 122 DEV_STAT_SCSI_DEV_ATTR_RO(role);
123 123
124 static ssize_t target_stat_scsi_dev_show_attr_ports( 124 static ssize_t target_stat_scsi_dev_show_attr_ports(
125 struct se_dev_stat_grps *sgrps, char *page) 125 struct se_dev_stat_grps *sgrps, char *page)
126 { 126 {
127 struct se_subsystem_dev *se_subdev = container_of(sgrps, 127 struct se_subsystem_dev *se_subdev = container_of(sgrps,
128 struct se_subsystem_dev, dev_stat_grps); 128 struct se_subsystem_dev, dev_stat_grps);
129 struct se_device *dev = se_subdev->se_dev_ptr; 129 struct se_device *dev = se_subdev->se_dev_ptr;
130 130
131 if (!dev) 131 if (!dev)
132 return -ENODEV; 132 return -ENODEV;
133 133
134 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count); 134 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count);
135 } 135 }
136 DEV_STAT_SCSI_DEV_ATTR_RO(ports); 136 DEV_STAT_SCSI_DEV_ATTR_RO(ports);
137 137
138 CONFIGFS_EATTR_OPS(target_stat_scsi_dev, se_dev_stat_grps, scsi_dev_group); 138 CONFIGFS_EATTR_OPS(target_stat_scsi_dev, se_dev_stat_grps, scsi_dev_group);
139 139
140 static struct configfs_attribute *target_stat_scsi_dev_attrs[] = { 140 static struct configfs_attribute *target_stat_scsi_dev_attrs[] = {
141 &target_stat_scsi_dev_inst.attr, 141 &target_stat_scsi_dev_inst.attr,
142 &target_stat_scsi_dev_indx.attr, 142 &target_stat_scsi_dev_indx.attr,
143 &target_stat_scsi_dev_role.attr, 143 &target_stat_scsi_dev_role.attr,
144 &target_stat_scsi_dev_ports.attr, 144 &target_stat_scsi_dev_ports.attr,
145 NULL, 145 NULL,
146 }; 146 };
147 147
148 static struct configfs_item_operations target_stat_scsi_dev_attrib_ops = { 148 static struct configfs_item_operations target_stat_scsi_dev_attrib_ops = {
149 .show_attribute = target_stat_scsi_dev_attr_show, 149 .show_attribute = target_stat_scsi_dev_attr_show,
150 .store_attribute = target_stat_scsi_dev_attr_store, 150 .store_attribute = target_stat_scsi_dev_attr_store,
151 }; 151 };
152 152
153 static struct config_item_type target_stat_scsi_dev_cit = { 153 static struct config_item_type target_stat_scsi_dev_cit = {
154 .ct_item_ops = &target_stat_scsi_dev_attrib_ops, 154 .ct_item_ops = &target_stat_scsi_dev_attrib_ops,
155 .ct_attrs = target_stat_scsi_dev_attrs, 155 .ct_attrs = target_stat_scsi_dev_attrs,
156 .ct_owner = THIS_MODULE, 156 .ct_owner = THIS_MODULE,
157 }; 157 };
158 158
159 /* 159 /*
160 * SCSI Target Device Table 160 * SCSI Target Device Table
161 */ 161 */
162 162
163 CONFIGFS_EATTR_STRUCT(target_stat_scsi_tgt_dev, se_dev_stat_grps); 163 CONFIGFS_EATTR_STRUCT(target_stat_scsi_tgt_dev, se_dev_stat_grps);
164 #define DEV_STAT_SCSI_TGT_DEV_ATTR(_name, _mode) \ 164 #define DEV_STAT_SCSI_TGT_DEV_ATTR(_name, _mode) \
165 static struct target_stat_scsi_tgt_dev_attribute \ 165 static struct target_stat_scsi_tgt_dev_attribute \
166 target_stat_scsi_tgt_dev_##_name = \ 166 target_stat_scsi_tgt_dev_##_name = \
167 __CONFIGFS_EATTR(_name, _mode, \ 167 __CONFIGFS_EATTR(_name, _mode, \
168 target_stat_scsi_tgt_dev_show_attr_##_name, \ 168 target_stat_scsi_tgt_dev_show_attr_##_name, \
169 target_stat_scsi_tgt_dev_store_attr_##_name); 169 target_stat_scsi_tgt_dev_store_attr_##_name);
170 170
171 #define DEV_STAT_SCSI_TGT_DEV_ATTR_RO(_name) \ 171 #define DEV_STAT_SCSI_TGT_DEV_ATTR_RO(_name) \
172 static struct target_stat_scsi_tgt_dev_attribute \ 172 static struct target_stat_scsi_tgt_dev_attribute \
173 target_stat_scsi_tgt_dev_##_name = \ 173 target_stat_scsi_tgt_dev_##_name = \
174 __CONFIGFS_EATTR_RO(_name, \ 174 __CONFIGFS_EATTR_RO(_name, \
175 target_stat_scsi_tgt_dev_show_attr_##_name); 175 target_stat_scsi_tgt_dev_show_attr_##_name);
176 176
177 static ssize_t target_stat_scsi_tgt_dev_show_attr_inst( 177 static ssize_t target_stat_scsi_tgt_dev_show_attr_inst(
178 struct se_dev_stat_grps *sgrps, char *page) 178 struct se_dev_stat_grps *sgrps, char *page)
179 { 179 {
180 struct se_subsystem_dev *se_subdev = container_of(sgrps, 180 struct se_subsystem_dev *se_subdev = container_of(sgrps,
181 struct se_subsystem_dev, dev_stat_grps); 181 struct se_subsystem_dev, dev_stat_grps);
182 struct se_hba *hba = se_subdev->se_dev_hba; 182 struct se_hba *hba = se_subdev->se_dev_hba;
183 struct se_device *dev = se_subdev->se_dev_ptr; 183 struct se_device *dev = se_subdev->se_dev_ptr;
184 184
185 if (!dev) 185 if (!dev)
186 return -ENODEV; 186 return -ENODEV;
187 187
188 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); 188 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
189 } 189 }
190 DEV_STAT_SCSI_TGT_DEV_ATTR_RO(inst); 190 DEV_STAT_SCSI_TGT_DEV_ATTR_RO(inst);
191 191
192 static ssize_t target_stat_scsi_tgt_dev_show_attr_indx( 192 static ssize_t target_stat_scsi_tgt_dev_show_attr_indx(
193 struct se_dev_stat_grps *sgrps, char *page) 193 struct se_dev_stat_grps *sgrps, char *page)
194 { 194 {
195 struct se_subsystem_dev *se_subdev = container_of(sgrps, 195 struct se_subsystem_dev *se_subdev = container_of(sgrps,
196 struct se_subsystem_dev, dev_stat_grps); 196 struct se_subsystem_dev, dev_stat_grps);
197 struct se_device *dev = se_subdev->se_dev_ptr; 197 struct se_device *dev = se_subdev->se_dev_ptr;
198 198
199 if (!dev) 199 if (!dev)
200 return -ENODEV; 200 return -ENODEV;
201 201
202 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); 202 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
203 } 203 }
204 DEV_STAT_SCSI_TGT_DEV_ATTR_RO(indx); 204 DEV_STAT_SCSI_TGT_DEV_ATTR_RO(indx);
205 205
206 static ssize_t target_stat_scsi_tgt_dev_show_attr_num_lus( 206 static ssize_t target_stat_scsi_tgt_dev_show_attr_num_lus(
207 struct se_dev_stat_grps *sgrps, char *page) 207 struct se_dev_stat_grps *sgrps, char *page)
208 { 208 {
209 struct se_subsystem_dev *se_subdev = container_of(sgrps, 209 struct se_subsystem_dev *se_subdev = container_of(sgrps,
210 struct se_subsystem_dev, dev_stat_grps); 210 struct se_subsystem_dev, dev_stat_grps);
211 struct se_device *dev = se_subdev->se_dev_ptr; 211 struct se_device *dev = se_subdev->se_dev_ptr;
212 212
213 if (!dev) 213 if (!dev)
214 return -ENODEV; 214 return -ENODEV;
215 215
216 return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT); 216 return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT);
217 } 217 }
218 DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus); 218 DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus);
219 219
220 static ssize_t target_stat_scsi_tgt_dev_show_attr_status( 220 static ssize_t target_stat_scsi_tgt_dev_show_attr_status(
221 struct se_dev_stat_grps *sgrps, char *page) 221 struct se_dev_stat_grps *sgrps, char *page)
222 { 222 {
223 struct se_subsystem_dev *se_subdev = container_of(sgrps, 223 struct se_subsystem_dev *se_subdev = container_of(sgrps,
224 struct se_subsystem_dev, dev_stat_grps); 224 struct se_subsystem_dev, dev_stat_grps);
225 struct se_device *dev = se_subdev->se_dev_ptr; 225 struct se_device *dev = se_subdev->se_dev_ptr;
226 char status[16]; 226 char status[16];
227 227
228 if (!dev) 228 if (!dev)
229 return -ENODEV; 229 return -ENODEV;
230 230
231 switch (dev->dev_status) { 231 switch (dev->dev_status) {
232 case TRANSPORT_DEVICE_ACTIVATED: 232 case TRANSPORT_DEVICE_ACTIVATED:
233 strcpy(status, "activated"); 233 strcpy(status, "activated");
234 break; 234 break;
235 case TRANSPORT_DEVICE_DEACTIVATED: 235 case TRANSPORT_DEVICE_DEACTIVATED:
236 strcpy(status, "deactivated"); 236 strcpy(status, "deactivated");
237 break; 237 break;
238 case TRANSPORT_DEVICE_SHUTDOWN: 238 case TRANSPORT_DEVICE_SHUTDOWN:
239 strcpy(status, "shutdown"); 239 strcpy(status, "shutdown");
240 break; 240 break;
241 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: 241 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
242 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: 242 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
243 strcpy(status, "offline"); 243 strcpy(status, "offline");
244 break; 244 break;
245 default: 245 default:
246 sprintf(status, "unknown(%d)", dev->dev_status); 246 sprintf(status, "unknown(%d)", dev->dev_status);
247 break; 247 break;
248 } 248 }
249 249
250 return snprintf(page, PAGE_SIZE, "%s\n", status); 250 return snprintf(page, PAGE_SIZE, "%s\n", status);
251 } 251 }
252 DEV_STAT_SCSI_TGT_DEV_ATTR_RO(status); 252 DEV_STAT_SCSI_TGT_DEV_ATTR_RO(status);
253 253
254 static ssize_t target_stat_scsi_tgt_dev_show_attr_non_access_lus( 254 static ssize_t target_stat_scsi_tgt_dev_show_attr_non_access_lus(
255 struct se_dev_stat_grps *sgrps, char *page) 255 struct se_dev_stat_grps *sgrps, char *page)
256 { 256 {
257 struct se_subsystem_dev *se_subdev = container_of(sgrps, 257 struct se_subsystem_dev *se_subdev = container_of(sgrps,
258 struct se_subsystem_dev, dev_stat_grps); 258 struct se_subsystem_dev, dev_stat_grps);
259 struct se_device *dev = se_subdev->se_dev_ptr; 259 struct se_device *dev = se_subdev->se_dev_ptr;
260 int non_accessible_lus; 260 int non_accessible_lus;
261 261
262 if (!dev) 262 if (!dev)
263 return -ENODEV; 263 return -ENODEV;
264 264
265 switch (dev->dev_status) { 265 switch (dev->dev_status) {
266 case TRANSPORT_DEVICE_ACTIVATED: 266 case TRANSPORT_DEVICE_ACTIVATED:
267 non_accessible_lus = 0; 267 non_accessible_lus = 0;
268 break; 268 break;
269 case TRANSPORT_DEVICE_DEACTIVATED: 269 case TRANSPORT_DEVICE_DEACTIVATED:
270 case TRANSPORT_DEVICE_SHUTDOWN: 270 case TRANSPORT_DEVICE_SHUTDOWN:
271 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: 271 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
272 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: 272 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
273 default: 273 default:
274 non_accessible_lus = 1; 274 non_accessible_lus = 1;
275 break; 275 break;
276 } 276 }
277 277
278 return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus); 278 return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus);
279 } 279 }
280 DEV_STAT_SCSI_TGT_DEV_ATTR_RO(non_access_lus); 280 DEV_STAT_SCSI_TGT_DEV_ATTR_RO(non_access_lus);
281 281
282 static ssize_t target_stat_scsi_tgt_dev_show_attr_resets( 282 static ssize_t target_stat_scsi_tgt_dev_show_attr_resets(
283 struct se_dev_stat_grps *sgrps, char *page) 283 struct se_dev_stat_grps *sgrps, char *page)
284 { 284 {
285 struct se_subsystem_dev *se_subdev = container_of(sgrps, 285 struct se_subsystem_dev *se_subdev = container_of(sgrps,
286 struct se_subsystem_dev, dev_stat_grps); 286 struct se_subsystem_dev, dev_stat_grps);
287 struct se_device *dev = se_subdev->se_dev_ptr; 287 struct se_device *dev = se_subdev->se_dev_ptr;
288 288
289 if (!dev) 289 if (!dev)
290 return -ENODEV; 290 return -ENODEV;
291 291
292 return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); 292 return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
293 } 293 }
294 DEV_STAT_SCSI_TGT_DEV_ATTR_RO(resets); 294 DEV_STAT_SCSI_TGT_DEV_ATTR_RO(resets);
295 295
296 296
297 CONFIGFS_EATTR_OPS(target_stat_scsi_tgt_dev, se_dev_stat_grps, scsi_tgt_dev_group); 297 CONFIGFS_EATTR_OPS(target_stat_scsi_tgt_dev, se_dev_stat_grps, scsi_tgt_dev_group);
298 298
299 static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = { 299 static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = {
300 &target_stat_scsi_tgt_dev_inst.attr, 300 &target_stat_scsi_tgt_dev_inst.attr,
301 &target_stat_scsi_tgt_dev_indx.attr, 301 &target_stat_scsi_tgt_dev_indx.attr,
302 &target_stat_scsi_tgt_dev_num_lus.attr, 302 &target_stat_scsi_tgt_dev_num_lus.attr,
303 &target_stat_scsi_tgt_dev_status.attr, 303 &target_stat_scsi_tgt_dev_status.attr,
304 &target_stat_scsi_tgt_dev_non_access_lus.attr, 304 &target_stat_scsi_tgt_dev_non_access_lus.attr,
305 &target_stat_scsi_tgt_dev_resets.attr, 305 &target_stat_scsi_tgt_dev_resets.attr,
306 NULL, 306 NULL,
307 }; 307 };
308 308
309 static struct configfs_item_operations target_stat_scsi_tgt_dev_attrib_ops = { 309 static struct configfs_item_operations target_stat_scsi_tgt_dev_attrib_ops = {
310 .show_attribute = target_stat_scsi_tgt_dev_attr_show, 310 .show_attribute = target_stat_scsi_tgt_dev_attr_show,
311 .store_attribute = target_stat_scsi_tgt_dev_attr_store, 311 .store_attribute = target_stat_scsi_tgt_dev_attr_store,
312 }; 312 };
313 313
314 static struct config_item_type target_stat_scsi_tgt_dev_cit = { 314 static struct config_item_type target_stat_scsi_tgt_dev_cit = {
315 .ct_item_ops = &target_stat_scsi_tgt_dev_attrib_ops, 315 .ct_item_ops = &target_stat_scsi_tgt_dev_attrib_ops,
316 .ct_attrs = target_stat_scsi_tgt_dev_attrs, 316 .ct_attrs = target_stat_scsi_tgt_dev_attrs,
317 .ct_owner = THIS_MODULE, 317 .ct_owner = THIS_MODULE,
318 }; 318 };
319 319
320 /* 320 /*
321 * SCSI Logical Unit Table 321 * SCSI Logical Unit Table
322 */ 322 */
323 323
324 CONFIGFS_EATTR_STRUCT(target_stat_scsi_lu, se_dev_stat_grps); 324 CONFIGFS_EATTR_STRUCT(target_stat_scsi_lu, se_dev_stat_grps);
325 #define DEV_STAT_SCSI_LU_ATTR(_name, _mode) \ 325 #define DEV_STAT_SCSI_LU_ATTR(_name, _mode) \
326 static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \ 326 static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \
327 __CONFIGFS_EATTR(_name, _mode, \ 327 __CONFIGFS_EATTR(_name, _mode, \
328 target_stat_scsi_lu_show_attr_##_name, \ 328 target_stat_scsi_lu_show_attr_##_name, \
329 target_stat_scsi_lu_store_attr_##_name); 329 target_stat_scsi_lu_store_attr_##_name);
330 330
331 #define DEV_STAT_SCSI_LU_ATTR_RO(_name) \ 331 #define DEV_STAT_SCSI_LU_ATTR_RO(_name) \
332 static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \ 332 static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \
333 __CONFIGFS_EATTR_RO(_name, \ 333 __CONFIGFS_EATTR_RO(_name, \
334 target_stat_scsi_lu_show_attr_##_name); 334 target_stat_scsi_lu_show_attr_##_name);
335 335
336 static ssize_t target_stat_scsi_lu_show_attr_inst( 336 static ssize_t target_stat_scsi_lu_show_attr_inst(
337 struct se_dev_stat_grps *sgrps, char *page) 337 struct se_dev_stat_grps *sgrps, char *page)
338 { 338 {
339 struct se_subsystem_dev *se_subdev = container_of(sgrps, 339 struct se_subsystem_dev *se_subdev = container_of(sgrps,
340 struct se_subsystem_dev, dev_stat_grps); 340 struct se_subsystem_dev, dev_stat_grps);
341 struct se_hba *hba = se_subdev->se_dev_hba; 341 struct se_hba *hba = se_subdev->se_dev_hba;
342 struct se_device *dev = se_subdev->se_dev_ptr; 342 struct se_device *dev = se_subdev->se_dev_ptr;
343 343
344 if (!dev) 344 if (!dev)
345 return -ENODEV; 345 return -ENODEV;
346 346
347 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); 347 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
348 } 348 }
349 DEV_STAT_SCSI_LU_ATTR_RO(inst); 349 DEV_STAT_SCSI_LU_ATTR_RO(inst);
350 350
351 static ssize_t target_stat_scsi_lu_show_attr_dev( 351 static ssize_t target_stat_scsi_lu_show_attr_dev(
352 struct se_dev_stat_grps *sgrps, char *page) 352 struct se_dev_stat_grps *sgrps, char *page)
353 { 353 {
354 struct se_subsystem_dev *se_subdev = container_of(sgrps, 354 struct se_subsystem_dev *se_subdev = container_of(sgrps,
355 struct se_subsystem_dev, dev_stat_grps); 355 struct se_subsystem_dev, dev_stat_grps);
356 struct se_device *dev = se_subdev->se_dev_ptr; 356 struct se_device *dev = se_subdev->se_dev_ptr;
357 357
358 if (!dev) 358 if (!dev)
359 return -ENODEV; 359 return -ENODEV;
360 360
361 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); 361 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
362 } 362 }
363 DEV_STAT_SCSI_LU_ATTR_RO(dev); 363 DEV_STAT_SCSI_LU_ATTR_RO(dev);
364 364
365 static ssize_t target_stat_scsi_lu_show_attr_indx( 365 static ssize_t target_stat_scsi_lu_show_attr_indx(
366 struct se_dev_stat_grps *sgrps, char *page) 366 struct se_dev_stat_grps *sgrps, char *page)
367 { 367 {
368 struct se_subsystem_dev *se_subdev = container_of(sgrps, 368 struct se_subsystem_dev *se_subdev = container_of(sgrps,
369 struct se_subsystem_dev, dev_stat_grps); 369 struct se_subsystem_dev, dev_stat_grps);
370 struct se_device *dev = se_subdev->se_dev_ptr; 370 struct se_device *dev = se_subdev->se_dev_ptr;
371 371
372 if (!dev) 372 if (!dev)
373 return -ENODEV; 373 return -ENODEV;
374 374
375 return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX); 375 return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX);
376 } 376 }
377 DEV_STAT_SCSI_LU_ATTR_RO(indx); 377 DEV_STAT_SCSI_LU_ATTR_RO(indx);
378 378
379 static ssize_t target_stat_scsi_lu_show_attr_lun( 379 static ssize_t target_stat_scsi_lu_show_attr_lun(
380 struct se_dev_stat_grps *sgrps, char *page) 380 struct se_dev_stat_grps *sgrps, char *page)
381 { 381 {
382 struct se_subsystem_dev *se_subdev = container_of(sgrps, 382 struct se_subsystem_dev *se_subdev = container_of(sgrps,
383 struct se_subsystem_dev, dev_stat_grps); 383 struct se_subsystem_dev, dev_stat_grps);
384 struct se_device *dev = se_subdev->se_dev_ptr; 384 struct se_device *dev = se_subdev->se_dev_ptr;
385 385
386 if (!dev) 386 if (!dev)
387 return -ENODEV; 387 return -ENODEV;
388 /* FIXME: scsiLuDefaultLun */ 388 /* FIXME: scsiLuDefaultLun */
389 return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0); 389 return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0);
390 } 390 }
391 DEV_STAT_SCSI_LU_ATTR_RO(lun); 391 DEV_STAT_SCSI_LU_ATTR_RO(lun);
392 392
393 static ssize_t target_stat_scsi_lu_show_attr_lu_name( 393 static ssize_t target_stat_scsi_lu_show_attr_lu_name(
394 struct se_dev_stat_grps *sgrps, char *page) 394 struct se_dev_stat_grps *sgrps, char *page)
395 { 395 {
396 struct se_subsystem_dev *se_subdev = container_of(sgrps, 396 struct se_subsystem_dev *se_subdev = container_of(sgrps,
397 struct se_subsystem_dev, dev_stat_grps); 397 struct se_subsystem_dev, dev_stat_grps);
398 struct se_device *dev = se_subdev->se_dev_ptr; 398 struct se_device *dev = se_subdev->se_dev_ptr;
399 399
400 if (!dev) 400 if (!dev)
401 return -ENODEV; 401 return -ENODEV;
402 /* scsiLuWwnName */ 402 /* scsiLuWwnName */
403 return snprintf(page, PAGE_SIZE, "%s\n", 403 return snprintf(page, PAGE_SIZE, "%s\n",
404 (strlen(dev->se_sub_dev->t10_wwn.unit_serial)) ? 404 (strlen(dev->se_sub_dev->t10_wwn.unit_serial)) ?
405 dev->se_sub_dev->t10_wwn.unit_serial : "None"); 405 dev->se_sub_dev->t10_wwn.unit_serial : "None");
406 } 406 }
407 DEV_STAT_SCSI_LU_ATTR_RO(lu_name); 407 DEV_STAT_SCSI_LU_ATTR_RO(lu_name);
408 408
409 static ssize_t target_stat_scsi_lu_show_attr_vend( 409 static ssize_t target_stat_scsi_lu_show_attr_vend(
410 struct se_dev_stat_grps *sgrps, char *page) 410 struct se_dev_stat_grps *sgrps, char *page)
411 { 411 {
412 struct se_subsystem_dev *se_subdev = container_of(sgrps, 412 struct se_subsystem_dev *se_subdev = container_of(sgrps,
413 struct se_subsystem_dev, dev_stat_grps); 413 struct se_subsystem_dev, dev_stat_grps);
414 struct se_device *dev = se_subdev->se_dev_ptr; 414 struct se_device *dev = se_subdev->se_dev_ptr;
415 int i; 415 int i;
416 char str[sizeof(dev->se_sub_dev->t10_wwn.vendor)+1]; 416 char str[sizeof(dev->se_sub_dev->t10_wwn.vendor)+1];
417 417
418 if (!dev) 418 if (!dev)
419 return -ENODEV; 419 return -ENODEV;
420 420
421 /* scsiLuVendorId */ 421 /* scsiLuVendorId */
422 for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++) 422 for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++)
423 str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.vendor[i]) ? 423 str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.vendor[i]) ?
424 dev->se_sub_dev->t10_wwn.vendor[i] : ' '; 424 dev->se_sub_dev->t10_wwn.vendor[i] : ' ';
425 str[i] = '\0'; 425 str[i] = '\0';
426 return snprintf(page, PAGE_SIZE, "%s\n", str); 426 return snprintf(page, PAGE_SIZE, "%s\n", str);
427 } 427 }
428 DEV_STAT_SCSI_LU_ATTR_RO(vend); 428 DEV_STAT_SCSI_LU_ATTR_RO(vend);
429 429
430 static ssize_t target_stat_scsi_lu_show_attr_prod( 430 static ssize_t target_stat_scsi_lu_show_attr_prod(
431 struct se_dev_stat_grps *sgrps, char *page) 431 struct se_dev_stat_grps *sgrps, char *page)
432 { 432 {
433 struct se_subsystem_dev *se_subdev = container_of(sgrps, 433 struct se_subsystem_dev *se_subdev = container_of(sgrps,
434 struct se_subsystem_dev, dev_stat_grps); 434 struct se_subsystem_dev, dev_stat_grps);
435 struct se_device *dev = se_subdev->se_dev_ptr; 435 struct se_device *dev = se_subdev->se_dev_ptr;
436 int i; 436 int i;
437 char str[sizeof(dev->se_sub_dev->t10_wwn.model)+1]; 437 char str[sizeof(dev->se_sub_dev->t10_wwn.model)+1];
438 438
439 if (!dev) 439 if (!dev)
440 return -ENODEV; 440 return -ENODEV;
441 441
442 /* scsiLuProductId */ 442 /* scsiLuProductId */
443 for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++) 443 for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++)
444 str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.model[i]) ? 444 str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.model[i]) ?
445 dev->se_sub_dev->t10_wwn.model[i] : ' '; 445 dev->se_sub_dev->t10_wwn.model[i] : ' ';
446 str[i] = '\0'; 446 str[i] = '\0';
447 return snprintf(page, PAGE_SIZE, "%s\n", str); 447 return snprintf(page, PAGE_SIZE, "%s\n", str);
448 } 448 }
449 DEV_STAT_SCSI_LU_ATTR_RO(prod); 449 DEV_STAT_SCSI_LU_ATTR_RO(prod);
450 450
451 static ssize_t target_stat_scsi_lu_show_attr_rev( 451 static ssize_t target_stat_scsi_lu_show_attr_rev(
452 struct se_dev_stat_grps *sgrps, char *page) 452 struct se_dev_stat_grps *sgrps, char *page)
453 { 453 {
454 struct se_subsystem_dev *se_subdev = container_of(sgrps, 454 struct se_subsystem_dev *se_subdev = container_of(sgrps,
455 struct se_subsystem_dev, dev_stat_grps); 455 struct se_subsystem_dev, dev_stat_grps);
456 struct se_device *dev = se_subdev->se_dev_ptr; 456 struct se_device *dev = se_subdev->se_dev_ptr;
457 int i; 457 int i;
458 char str[sizeof(dev->se_sub_dev->t10_wwn.revision)+1]; 458 char str[sizeof(dev->se_sub_dev->t10_wwn.revision)+1];
459 459
460 if (!dev) 460 if (!dev)
461 return -ENODEV; 461 return -ENODEV;
462 462
463 /* scsiLuRevisionId */ 463 /* scsiLuRevisionId */
464 for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.revision); i++) 464 for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.revision); i++)
465 str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.revision[i]) ? 465 str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.revision[i]) ?
466 dev->se_sub_dev->t10_wwn.revision[i] : ' '; 466 dev->se_sub_dev->t10_wwn.revision[i] : ' ';
467 str[i] = '\0'; 467 str[i] = '\0';
468 return snprintf(page, PAGE_SIZE, "%s\n", str); 468 return snprintf(page, PAGE_SIZE, "%s\n", str);
469 } 469 }
470 DEV_STAT_SCSI_LU_ATTR_RO(rev); 470 DEV_STAT_SCSI_LU_ATTR_RO(rev);
471 471
472 static ssize_t target_stat_scsi_lu_show_attr_dev_type( 472 static ssize_t target_stat_scsi_lu_show_attr_dev_type(
473 struct se_dev_stat_grps *sgrps, char *page) 473 struct se_dev_stat_grps *sgrps, char *page)
474 { 474 {
475 struct se_subsystem_dev *se_subdev = container_of(sgrps, 475 struct se_subsystem_dev *se_subdev = container_of(sgrps,
476 struct se_subsystem_dev, dev_stat_grps); 476 struct se_subsystem_dev, dev_stat_grps);
477 struct se_device *dev = se_subdev->se_dev_ptr; 477 struct se_device *dev = se_subdev->se_dev_ptr;
478 478
479 if (!dev) 479 if (!dev)
480 return -ENODEV; 480 return -ENODEV;
481 481
482 /* scsiLuPeripheralType */ 482 /* scsiLuPeripheralType */
483 return snprintf(page, PAGE_SIZE, "%u\n", 483 return snprintf(page, PAGE_SIZE, "%u\n",
484 dev->transport->get_device_type(dev)); 484 dev->transport->get_device_type(dev));
485 } 485 }
486 DEV_STAT_SCSI_LU_ATTR_RO(dev_type); 486 DEV_STAT_SCSI_LU_ATTR_RO(dev_type);
487 487
488 static ssize_t target_stat_scsi_lu_show_attr_status( 488 static ssize_t target_stat_scsi_lu_show_attr_status(
489 struct se_dev_stat_grps *sgrps, char *page) 489 struct se_dev_stat_grps *sgrps, char *page)
490 { 490 {
491 struct se_subsystem_dev *se_subdev = container_of(sgrps, 491 struct se_subsystem_dev *se_subdev = container_of(sgrps,
492 struct se_subsystem_dev, dev_stat_grps); 492 struct se_subsystem_dev, dev_stat_grps);
493 struct se_device *dev = se_subdev->se_dev_ptr; 493 struct se_device *dev = se_subdev->se_dev_ptr;
494 494
495 if (!dev) 495 if (!dev)
496 return -ENODEV; 496 return -ENODEV;
497 497
498 /* scsiLuStatus */ 498 /* scsiLuStatus */
499 return snprintf(page, PAGE_SIZE, "%s\n", 499 return snprintf(page, PAGE_SIZE, "%s\n",
500 (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ? 500 (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ?
501 "available" : "notavailable"); 501 "available" : "notavailable");
502 } 502 }
503 DEV_STAT_SCSI_LU_ATTR_RO(status); 503 DEV_STAT_SCSI_LU_ATTR_RO(status);
504 504
505 static ssize_t target_stat_scsi_lu_show_attr_state_bit( 505 static ssize_t target_stat_scsi_lu_show_attr_state_bit(
506 struct se_dev_stat_grps *sgrps, char *page) 506 struct se_dev_stat_grps *sgrps, char *page)
507 { 507 {
508 struct se_subsystem_dev *se_subdev = container_of(sgrps, 508 struct se_subsystem_dev *se_subdev = container_of(sgrps,
509 struct se_subsystem_dev, dev_stat_grps); 509 struct se_subsystem_dev, dev_stat_grps);
510 struct se_device *dev = se_subdev->se_dev_ptr; 510 struct se_device *dev = se_subdev->se_dev_ptr;
511 511
512 if (!dev) 512 if (!dev)
513 return -ENODEV; 513 return -ENODEV;
514 514
515 /* scsiLuState */ 515 /* scsiLuState */
516 return snprintf(page, PAGE_SIZE, "exposed\n"); 516 return snprintf(page, PAGE_SIZE, "exposed\n");
517 } 517 }
518 DEV_STAT_SCSI_LU_ATTR_RO(state_bit); 518 DEV_STAT_SCSI_LU_ATTR_RO(state_bit);
519 519
520 static ssize_t target_stat_scsi_lu_show_attr_num_cmds( 520 static ssize_t target_stat_scsi_lu_show_attr_num_cmds(
521 struct se_dev_stat_grps *sgrps, char *page) 521 struct se_dev_stat_grps *sgrps, char *page)
522 { 522 {
523 struct se_subsystem_dev *se_subdev = container_of(sgrps, 523 struct se_subsystem_dev *se_subdev = container_of(sgrps,
524 struct se_subsystem_dev, dev_stat_grps); 524 struct se_subsystem_dev, dev_stat_grps);
525 struct se_device *dev = se_subdev->se_dev_ptr; 525 struct se_device *dev = se_subdev->se_dev_ptr;
526 526
527 if (!dev) 527 if (!dev)
528 return -ENODEV; 528 return -ENODEV;
529 529
530 /* scsiLuNumCommands */ 530 /* scsiLuNumCommands */
531 return snprintf(page, PAGE_SIZE, "%llu\n", 531 return snprintf(page, PAGE_SIZE, "%llu\n",
532 (unsigned long long)dev->num_cmds); 532 (unsigned long long)dev->num_cmds);
533 } 533 }
534 DEV_STAT_SCSI_LU_ATTR_RO(num_cmds); 534 DEV_STAT_SCSI_LU_ATTR_RO(num_cmds);
535 535
536 static ssize_t target_stat_scsi_lu_show_attr_read_mbytes( 536 static ssize_t target_stat_scsi_lu_show_attr_read_mbytes(
537 struct se_dev_stat_grps *sgrps, char *page) 537 struct se_dev_stat_grps *sgrps, char *page)
538 { 538 {
539 struct se_subsystem_dev *se_subdev = container_of(sgrps, 539 struct se_subsystem_dev *se_subdev = container_of(sgrps,
540 struct se_subsystem_dev, dev_stat_grps); 540 struct se_subsystem_dev, dev_stat_grps);
541 struct se_device *dev = se_subdev->se_dev_ptr; 541 struct se_device *dev = se_subdev->se_dev_ptr;
542 542
543 if (!dev) 543 if (!dev)
544 return -ENODEV; 544 return -ENODEV;
545 545
546 /* scsiLuReadMegaBytes */ 546 /* scsiLuReadMegaBytes */
547 return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20)); 547 return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20));
548 } 548 }
549 DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes); 549 DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes);
550 550
551 static ssize_t target_stat_scsi_lu_show_attr_write_mbytes( 551 static ssize_t target_stat_scsi_lu_show_attr_write_mbytes(
552 struct se_dev_stat_grps *sgrps, char *page) 552 struct se_dev_stat_grps *sgrps, char *page)
553 { 553 {
554 struct se_subsystem_dev *se_subdev = container_of(sgrps, 554 struct se_subsystem_dev *se_subdev = container_of(sgrps,
555 struct se_subsystem_dev, dev_stat_grps); 555 struct se_subsystem_dev, dev_stat_grps);
556 struct se_device *dev = se_subdev->se_dev_ptr; 556 struct se_device *dev = se_subdev->se_dev_ptr;
557 557
558 if (!dev) 558 if (!dev)
559 return -ENODEV; 559 return -ENODEV;
560 560
561 /* scsiLuWrittenMegaBytes */ 561 /* scsiLuWrittenMegaBytes */
562 return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20)); 562 return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20));
563 } 563 }
564 DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes); 564 DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes);
565 565
566 static ssize_t target_stat_scsi_lu_show_attr_resets( 566 static ssize_t target_stat_scsi_lu_show_attr_resets(
567 struct se_dev_stat_grps *sgrps, char *page) 567 struct se_dev_stat_grps *sgrps, char *page)
568 { 568 {
569 struct se_subsystem_dev *se_subdev = container_of(sgrps, 569 struct se_subsystem_dev *se_subdev = container_of(sgrps,
570 struct se_subsystem_dev, dev_stat_grps); 570 struct se_subsystem_dev, dev_stat_grps);
571 struct se_device *dev = se_subdev->se_dev_ptr; 571 struct se_device *dev = se_subdev->se_dev_ptr;
572 572
573 if (!dev) 573 if (!dev)
574 return -ENODEV; 574 return -ENODEV;
575 575
576 /* scsiLuInResets */ 576 /* scsiLuInResets */
577 return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); 577 return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
578 } 578 }
579 DEV_STAT_SCSI_LU_ATTR_RO(resets); 579 DEV_STAT_SCSI_LU_ATTR_RO(resets);
580 580
581 static ssize_t target_stat_scsi_lu_show_attr_full_stat( 581 static ssize_t target_stat_scsi_lu_show_attr_full_stat(
582 struct se_dev_stat_grps *sgrps, char *page) 582 struct se_dev_stat_grps *sgrps, char *page)
583 { 583 {
584 struct se_subsystem_dev *se_subdev = container_of(sgrps, 584 struct se_subsystem_dev *se_subdev = container_of(sgrps,
585 struct se_subsystem_dev, dev_stat_grps); 585 struct se_subsystem_dev, dev_stat_grps);
586 struct se_device *dev = se_subdev->se_dev_ptr; 586 struct se_device *dev = se_subdev->se_dev_ptr;
587 587
588 if (!dev) 588 if (!dev)
589 return -ENODEV; 589 return -ENODEV;
590 590
591 /* FIXME: scsiLuOutTaskSetFullStatus */ 591 /* FIXME: scsiLuOutTaskSetFullStatus */
592 return snprintf(page, PAGE_SIZE, "%u\n", 0); 592 return snprintf(page, PAGE_SIZE, "%u\n", 0);
593 } 593 }
594 DEV_STAT_SCSI_LU_ATTR_RO(full_stat); 594 DEV_STAT_SCSI_LU_ATTR_RO(full_stat);
595 595
596 static ssize_t target_stat_scsi_lu_show_attr_hs_num_cmds( 596 static ssize_t target_stat_scsi_lu_show_attr_hs_num_cmds(
597 struct se_dev_stat_grps *sgrps, char *page) 597 struct se_dev_stat_grps *sgrps, char *page)
598 { 598 {
599 struct se_subsystem_dev *se_subdev = container_of(sgrps, 599 struct se_subsystem_dev *se_subdev = container_of(sgrps,
600 struct se_subsystem_dev, dev_stat_grps); 600 struct se_subsystem_dev, dev_stat_grps);
601 struct se_device *dev = se_subdev->se_dev_ptr; 601 struct se_device *dev = se_subdev->se_dev_ptr;
602 602
603 if (!dev) 603 if (!dev)
604 return -ENODEV; 604 return -ENODEV;
605 605
606 /* FIXME: scsiLuHSInCommands */ 606 /* FIXME: scsiLuHSInCommands */
607 return snprintf(page, PAGE_SIZE, "%u\n", 0); 607 return snprintf(page, PAGE_SIZE, "%u\n", 0);
608 } 608 }
609 DEV_STAT_SCSI_LU_ATTR_RO(hs_num_cmds); 609 DEV_STAT_SCSI_LU_ATTR_RO(hs_num_cmds);
610 610
611 static ssize_t target_stat_scsi_lu_show_attr_creation_time( 611 static ssize_t target_stat_scsi_lu_show_attr_creation_time(
612 struct se_dev_stat_grps *sgrps, char *page) 612 struct se_dev_stat_grps *sgrps, char *page)
613 { 613 {
614 struct se_subsystem_dev *se_subdev = container_of(sgrps, 614 struct se_subsystem_dev *se_subdev = container_of(sgrps,
615 struct se_subsystem_dev, dev_stat_grps); 615 struct se_subsystem_dev, dev_stat_grps);
616 struct se_device *dev = se_subdev->se_dev_ptr; 616 struct se_device *dev = se_subdev->se_dev_ptr;
617 617
618 if (!dev) 618 if (!dev)
619 return -ENODEV; 619 return -ENODEV;
620 620
621 /* scsiLuCreationTime */ 621 /* scsiLuCreationTime */
622 return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time - 622 return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time -
623 INITIAL_JIFFIES) * 100 / HZ)); 623 INITIAL_JIFFIES) * 100 / HZ));
624 } 624 }
625 DEV_STAT_SCSI_LU_ATTR_RO(creation_time); 625 DEV_STAT_SCSI_LU_ATTR_RO(creation_time);
626 626
627 CONFIGFS_EATTR_OPS(target_stat_scsi_lu, se_dev_stat_grps, scsi_lu_group); 627 CONFIGFS_EATTR_OPS(target_stat_scsi_lu, se_dev_stat_grps, scsi_lu_group);
628 628
629 static struct configfs_attribute *target_stat_scsi_lu_attrs[] = { 629 static struct configfs_attribute *target_stat_scsi_lu_attrs[] = {
630 &target_stat_scsi_lu_inst.attr, 630 &target_stat_scsi_lu_inst.attr,
631 &target_stat_scsi_lu_dev.attr, 631 &target_stat_scsi_lu_dev.attr,
632 &target_stat_scsi_lu_indx.attr, 632 &target_stat_scsi_lu_indx.attr,
633 &target_stat_scsi_lu_lun.attr, 633 &target_stat_scsi_lu_lun.attr,
634 &target_stat_scsi_lu_lu_name.attr, 634 &target_stat_scsi_lu_lu_name.attr,
635 &target_stat_scsi_lu_vend.attr, 635 &target_stat_scsi_lu_vend.attr,
636 &target_stat_scsi_lu_prod.attr, 636 &target_stat_scsi_lu_prod.attr,
637 &target_stat_scsi_lu_rev.attr, 637 &target_stat_scsi_lu_rev.attr,
638 &target_stat_scsi_lu_dev_type.attr, 638 &target_stat_scsi_lu_dev_type.attr,
639 &target_stat_scsi_lu_status.attr, 639 &target_stat_scsi_lu_status.attr,
640 &target_stat_scsi_lu_state_bit.attr, 640 &target_stat_scsi_lu_state_bit.attr,
641 &target_stat_scsi_lu_num_cmds.attr, 641 &target_stat_scsi_lu_num_cmds.attr,
642 &target_stat_scsi_lu_read_mbytes.attr, 642 &target_stat_scsi_lu_read_mbytes.attr,
643 &target_stat_scsi_lu_write_mbytes.attr, 643 &target_stat_scsi_lu_write_mbytes.attr,
644 &target_stat_scsi_lu_resets.attr, 644 &target_stat_scsi_lu_resets.attr,
645 &target_stat_scsi_lu_full_stat.attr, 645 &target_stat_scsi_lu_full_stat.attr,
646 &target_stat_scsi_lu_hs_num_cmds.attr, 646 &target_stat_scsi_lu_hs_num_cmds.attr,
647 &target_stat_scsi_lu_creation_time.attr, 647 &target_stat_scsi_lu_creation_time.attr,
648 NULL, 648 NULL,
649 }; 649 };
650 650
651 static struct configfs_item_operations target_stat_scsi_lu_attrib_ops = { 651 static struct configfs_item_operations target_stat_scsi_lu_attrib_ops = {
652 .show_attribute = target_stat_scsi_lu_attr_show, 652 .show_attribute = target_stat_scsi_lu_attr_show,
653 .store_attribute = target_stat_scsi_lu_attr_store, 653 .store_attribute = target_stat_scsi_lu_attr_store,
654 }; 654 };
655 655
656 static struct config_item_type target_stat_scsi_lu_cit = { 656 static struct config_item_type target_stat_scsi_lu_cit = {
657 .ct_item_ops = &target_stat_scsi_lu_attrib_ops, 657 .ct_item_ops = &target_stat_scsi_lu_attrib_ops,
658 .ct_attrs = target_stat_scsi_lu_attrs, 658 .ct_attrs = target_stat_scsi_lu_attrs,
659 .ct_owner = THIS_MODULE, 659 .ct_owner = THIS_MODULE,
660 }; 660 };
661 661
662 /* 662 /*
663 * Called from target_core_configfs.c:target_core_make_subdev() to setup 663 * Called from target_core_configfs.c:target_core_make_subdev() to setup
664 * the target statistics groups + configfs CITs located in target_core_stat.c 664 * the target statistics groups + configfs CITs located in target_core_stat.c
665 */ 665 */
666 void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev) 666 void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev)
667 { 667 {
668 struct config_group *dev_stat_grp = &se_subdev->dev_stat_grps.stat_group; 668 struct config_group *dev_stat_grp = &se_subdev->dev_stat_grps.stat_group;
669 669
670 config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_dev_group, 670 config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_dev_group,
671 "scsi_dev", &target_stat_scsi_dev_cit); 671 "scsi_dev", &target_stat_scsi_dev_cit);
672 config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_tgt_dev_group, 672 config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_tgt_dev_group,
673 "scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit); 673 "scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit);
674 config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_lu_group, 674 config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_lu_group,
675 "scsi_lu", &target_stat_scsi_lu_cit); 675 "scsi_lu", &target_stat_scsi_lu_cit);
676 676
677 dev_stat_grp->default_groups[0] = &se_subdev->dev_stat_grps.scsi_dev_group; 677 dev_stat_grp->default_groups[0] = &se_subdev->dev_stat_grps.scsi_dev_group;
678 dev_stat_grp->default_groups[1] = &se_subdev->dev_stat_grps.scsi_tgt_dev_group; 678 dev_stat_grp->default_groups[1] = &se_subdev->dev_stat_grps.scsi_tgt_dev_group;
679 dev_stat_grp->default_groups[2] = &se_subdev->dev_stat_grps.scsi_lu_group; 679 dev_stat_grp->default_groups[2] = &se_subdev->dev_stat_grps.scsi_lu_group;
680 dev_stat_grp->default_groups[3] = NULL; 680 dev_stat_grp->default_groups[3] = NULL;
681 } 681 }
682 682
683 /* 683 /*
684 * SCSI Port Table 684 * SCSI Port Table
685 */ 685 */
686 686
687 CONFIGFS_EATTR_STRUCT(target_stat_scsi_port, se_port_stat_grps); 687 CONFIGFS_EATTR_STRUCT(target_stat_scsi_port, se_port_stat_grps);
688 #define DEV_STAT_SCSI_PORT_ATTR(_name, _mode) \ 688 #define DEV_STAT_SCSI_PORT_ATTR(_name, _mode) \
689 static struct target_stat_scsi_port_attribute \ 689 static struct target_stat_scsi_port_attribute \
690 target_stat_scsi_port_##_name = \ 690 target_stat_scsi_port_##_name = \
691 __CONFIGFS_EATTR(_name, _mode, \ 691 __CONFIGFS_EATTR(_name, _mode, \
692 target_stat_scsi_port_show_attr_##_name, \ 692 target_stat_scsi_port_show_attr_##_name, \
693 target_stat_scsi_port_store_attr_##_name); 693 target_stat_scsi_port_store_attr_##_name);
694 694
695 #define DEV_STAT_SCSI_PORT_ATTR_RO(_name) \ 695 #define DEV_STAT_SCSI_PORT_ATTR_RO(_name) \
696 static struct target_stat_scsi_port_attribute \ 696 static struct target_stat_scsi_port_attribute \
697 target_stat_scsi_port_##_name = \ 697 target_stat_scsi_port_##_name = \
698 __CONFIGFS_EATTR_RO(_name, \ 698 __CONFIGFS_EATTR_RO(_name, \
699 target_stat_scsi_port_show_attr_##_name); 699 target_stat_scsi_port_show_attr_##_name);
700 700
701 static ssize_t target_stat_scsi_port_show_attr_inst( 701 static ssize_t target_stat_scsi_port_show_attr_inst(
702 struct se_port_stat_grps *pgrps, char *page) 702 struct se_port_stat_grps *pgrps, char *page)
703 { 703 {
704 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 704 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
705 struct se_port *sep; 705 struct se_port *sep;
706 struct se_device *dev = lun->lun_se_dev; 706 struct se_device *dev = lun->lun_se_dev;
707 struct se_hba *hba; 707 struct se_hba *hba;
708 ssize_t ret; 708 ssize_t ret;
709 709
710 spin_lock(&lun->lun_sep_lock); 710 spin_lock(&lun->lun_sep_lock);
711 sep = lun->lun_sep; 711 sep = lun->lun_sep;
712 if (!sep) { 712 if (!sep) {
713 spin_unlock(&lun->lun_sep_lock); 713 spin_unlock(&lun->lun_sep_lock);
714 return -ENODEV; 714 return -ENODEV;
715 } 715 }
716 hba = dev->se_hba; 716 hba = dev->se_hba;
717 ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); 717 ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
718 spin_unlock(&lun->lun_sep_lock); 718 spin_unlock(&lun->lun_sep_lock);
719 return ret; 719 return ret;
720 } 720 }
721 DEV_STAT_SCSI_PORT_ATTR_RO(inst); 721 DEV_STAT_SCSI_PORT_ATTR_RO(inst);
722 722
723 static ssize_t target_stat_scsi_port_show_attr_dev( 723 static ssize_t target_stat_scsi_port_show_attr_dev(
724 struct se_port_stat_grps *pgrps, char *page) 724 struct se_port_stat_grps *pgrps, char *page)
725 { 725 {
726 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 726 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
727 struct se_port *sep; 727 struct se_port *sep;
728 struct se_device *dev = lun->lun_se_dev; 728 struct se_device *dev = lun->lun_se_dev;
729 ssize_t ret; 729 ssize_t ret;
730 730
731 spin_lock(&lun->lun_sep_lock); 731 spin_lock(&lun->lun_sep_lock);
732 sep = lun->lun_sep; 732 sep = lun->lun_sep;
733 if (!sep) { 733 if (!sep) {
734 spin_unlock(&lun->lun_sep_lock); 734 spin_unlock(&lun->lun_sep_lock);
735 return -ENODEV; 735 return -ENODEV;
736 } 736 }
737 ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); 737 ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
738 spin_unlock(&lun->lun_sep_lock); 738 spin_unlock(&lun->lun_sep_lock);
739 return ret; 739 return ret;
740 } 740 }
741 DEV_STAT_SCSI_PORT_ATTR_RO(dev); 741 DEV_STAT_SCSI_PORT_ATTR_RO(dev);
742 742
743 static ssize_t target_stat_scsi_port_show_attr_indx( 743 static ssize_t target_stat_scsi_port_show_attr_indx(
744 struct se_port_stat_grps *pgrps, char *page) 744 struct se_port_stat_grps *pgrps, char *page)
745 { 745 {
746 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 746 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
747 struct se_port *sep; 747 struct se_port *sep;
748 ssize_t ret; 748 ssize_t ret;
749 749
750 spin_lock(&lun->lun_sep_lock); 750 spin_lock(&lun->lun_sep_lock);
751 sep = lun->lun_sep; 751 sep = lun->lun_sep;
752 if (!sep) { 752 if (!sep) {
753 spin_unlock(&lun->lun_sep_lock); 753 spin_unlock(&lun->lun_sep_lock);
754 return -ENODEV; 754 return -ENODEV;
755 } 755 }
756 ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index); 756 ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index);
757 spin_unlock(&lun->lun_sep_lock); 757 spin_unlock(&lun->lun_sep_lock);
758 return ret; 758 return ret;
759 } 759 }
760 DEV_STAT_SCSI_PORT_ATTR_RO(indx); 760 DEV_STAT_SCSI_PORT_ATTR_RO(indx);
761 761
762 static ssize_t target_stat_scsi_port_show_attr_role( 762 static ssize_t target_stat_scsi_port_show_attr_role(
763 struct se_port_stat_grps *pgrps, char *page) 763 struct se_port_stat_grps *pgrps, char *page)
764 { 764 {
765 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 765 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
766 struct se_device *dev = lun->lun_se_dev; 766 struct se_device *dev = lun->lun_se_dev;
767 struct se_port *sep; 767 struct se_port *sep;
768 ssize_t ret; 768 ssize_t ret;
769 769
770 if (!dev) 770 if (!dev)
771 return -ENODEV; 771 return -ENODEV;
772 772
773 spin_lock(&lun->lun_sep_lock); 773 spin_lock(&lun->lun_sep_lock);
774 sep = lun->lun_sep; 774 sep = lun->lun_sep;
775 if (!sep) { 775 if (!sep) {
776 spin_unlock(&lun->lun_sep_lock); 776 spin_unlock(&lun->lun_sep_lock);
777 return -ENODEV; 777 return -ENODEV;
778 } 778 }
779 ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index); 779 ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index);
780 spin_unlock(&lun->lun_sep_lock); 780 spin_unlock(&lun->lun_sep_lock);
781 return ret; 781 return ret;
782 } 782 }
783 DEV_STAT_SCSI_PORT_ATTR_RO(role); 783 DEV_STAT_SCSI_PORT_ATTR_RO(role);
784 784
785 static ssize_t target_stat_scsi_port_show_attr_busy_count( 785 static ssize_t target_stat_scsi_port_show_attr_busy_count(
786 struct se_port_stat_grps *pgrps, char *page) 786 struct se_port_stat_grps *pgrps, char *page)
787 { 787 {
788 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 788 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
789 struct se_port *sep; 789 struct se_port *sep;
790 ssize_t ret; 790 ssize_t ret;
791 791
792 spin_lock(&lun->lun_sep_lock); 792 spin_lock(&lun->lun_sep_lock);
793 sep = lun->lun_sep; 793 sep = lun->lun_sep;
794 if (!sep) { 794 if (!sep) {
795 spin_unlock(&lun->lun_sep_lock); 795 spin_unlock(&lun->lun_sep_lock);
796 return -ENODEV; 796 return -ENODEV;
797 } 797 }
798 /* FIXME: scsiPortBusyStatuses */ 798 /* FIXME: scsiPortBusyStatuses */
799 ret = snprintf(page, PAGE_SIZE, "%u\n", 0); 799 ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
800 spin_unlock(&lun->lun_sep_lock); 800 spin_unlock(&lun->lun_sep_lock);
801 return ret; 801 return ret;
802 } 802 }
803 DEV_STAT_SCSI_PORT_ATTR_RO(busy_count); 803 DEV_STAT_SCSI_PORT_ATTR_RO(busy_count);
804 804
805 CONFIGFS_EATTR_OPS(target_stat_scsi_port, se_port_stat_grps, scsi_port_group); 805 CONFIGFS_EATTR_OPS(target_stat_scsi_port, se_port_stat_grps, scsi_port_group);
806 806
807 static struct configfs_attribute *target_stat_scsi_port_attrs[] = { 807 static struct configfs_attribute *target_stat_scsi_port_attrs[] = {
808 &target_stat_scsi_port_inst.attr, 808 &target_stat_scsi_port_inst.attr,
809 &target_stat_scsi_port_dev.attr, 809 &target_stat_scsi_port_dev.attr,
810 &target_stat_scsi_port_indx.attr, 810 &target_stat_scsi_port_indx.attr,
811 &target_stat_scsi_port_role.attr, 811 &target_stat_scsi_port_role.attr,
812 &target_stat_scsi_port_busy_count.attr, 812 &target_stat_scsi_port_busy_count.attr,
813 NULL, 813 NULL,
814 }; 814 };
815 815
816 static struct configfs_item_operations target_stat_scsi_port_attrib_ops = { 816 static struct configfs_item_operations target_stat_scsi_port_attrib_ops = {
817 .show_attribute = target_stat_scsi_port_attr_show, 817 .show_attribute = target_stat_scsi_port_attr_show,
818 .store_attribute = target_stat_scsi_port_attr_store, 818 .store_attribute = target_stat_scsi_port_attr_store,
819 }; 819 };
820 820
821 static struct config_item_type target_stat_scsi_port_cit = { 821 static struct config_item_type target_stat_scsi_port_cit = {
822 .ct_item_ops = &target_stat_scsi_port_attrib_ops, 822 .ct_item_ops = &target_stat_scsi_port_attrib_ops,
823 .ct_attrs = target_stat_scsi_port_attrs, 823 .ct_attrs = target_stat_scsi_port_attrs,
824 .ct_owner = THIS_MODULE, 824 .ct_owner = THIS_MODULE,
825 }; 825 };
826 826
827 /* 827 /*
828 * SCSI Target Port Table 828 * SCSI Target Port Table
829 */ 829 */
830 CONFIGFS_EATTR_STRUCT(target_stat_scsi_tgt_port, se_port_stat_grps); 830 CONFIGFS_EATTR_STRUCT(target_stat_scsi_tgt_port, se_port_stat_grps);
831 #define DEV_STAT_SCSI_TGT_PORT_ATTR(_name, _mode) \ 831 #define DEV_STAT_SCSI_TGT_PORT_ATTR(_name, _mode) \
832 static struct target_stat_scsi_tgt_port_attribute \ 832 static struct target_stat_scsi_tgt_port_attribute \
833 target_stat_scsi_tgt_port_##_name = \ 833 target_stat_scsi_tgt_port_##_name = \
834 __CONFIGFS_EATTR(_name, _mode, \ 834 __CONFIGFS_EATTR(_name, _mode, \
835 target_stat_scsi_tgt_port_show_attr_##_name, \ 835 target_stat_scsi_tgt_port_show_attr_##_name, \
836 target_stat_scsi_tgt_port_store_attr_##_name); 836 target_stat_scsi_tgt_port_store_attr_##_name);
837 837
838 #define DEV_STAT_SCSI_TGT_PORT_ATTR_RO(_name) \ 838 #define DEV_STAT_SCSI_TGT_PORT_ATTR_RO(_name) \
839 static struct target_stat_scsi_tgt_port_attribute \ 839 static struct target_stat_scsi_tgt_port_attribute \
840 target_stat_scsi_tgt_port_##_name = \ 840 target_stat_scsi_tgt_port_##_name = \
841 __CONFIGFS_EATTR_RO(_name, \ 841 __CONFIGFS_EATTR_RO(_name, \
842 target_stat_scsi_tgt_port_show_attr_##_name); 842 target_stat_scsi_tgt_port_show_attr_##_name);
843 843
844 static ssize_t target_stat_scsi_tgt_port_show_attr_inst( 844 static ssize_t target_stat_scsi_tgt_port_show_attr_inst(
845 struct se_port_stat_grps *pgrps, char *page) 845 struct se_port_stat_grps *pgrps, char *page)
846 { 846 {
847 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 847 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
848 struct se_device *dev = lun->lun_se_dev; 848 struct se_device *dev = lun->lun_se_dev;
849 struct se_port *sep; 849 struct se_port *sep;
850 struct se_hba *hba; 850 struct se_hba *hba;
851 ssize_t ret; 851 ssize_t ret;
852 852
853 spin_lock(&lun->lun_sep_lock); 853 spin_lock(&lun->lun_sep_lock);
854 sep = lun->lun_sep; 854 sep = lun->lun_sep;
855 if (!sep) { 855 if (!sep) {
856 spin_unlock(&lun->lun_sep_lock); 856 spin_unlock(&lun->lun_sep_lock);
857 return -ENODEV; 857 return -ENODEV;
858 } 858 }
859 hba = dev->se_hba; 859 hba = dev->se_hba;
860 ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); 860 ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
861 spin_unlock(&lun->lun_sep_lock); 861 spin_unlock(&lun->lun_sep_lock);
862 return ret; 862 return ret;
863 } 863 }
864 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(inst); 864 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(inst);
865 865
866 static ssize_t target_stat_scsi_tgt_port_show_attr_dev( 866 static ssize_t target_stat_scsi_tgt_port_show_attr_dev(
867 struct se_port_stat_grps *pgrps, char *page) 867 struct se_port_stat_grps *pgrps, char *page)
868 { 868 {
869 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 869 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
870 struct se_device *dev = lun->lun_se_dev; 870 struct se_device *dev = lun->lun_se_dev;
871 struct se_port *sep; 871 struct se_port *sep;
872 ssize_t ret; 872 ssize_t ret;
873 873
874 spin_lock(&lun->lun_sep_lock); 874 spin_lock(&lun->lun_sep_lock);
875 sep = lun->lun_sep; 875 sep = lun->lun_sep;
876 if (!sep) { 876 if (!sep) {
877 spin_unlock(&lun->lun_sep_lock); 877 spin_unlock(&lun->lun_sep_lock);
878 return -ENODEV; 878 return -ENODEV;
879 } 879 }
880 ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); 880 ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
881 spin_unlock(&lun->lun_sep_lock); 881 spin_unlock(&lun->lun_sep_lock);
882 return ret; 882 return ret;
883 } 883 }
884 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(dev); 884 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(dev);
885 885
886 static ssize_t target_stat_scsi_tgt_port_show_attr_indx( 886 static ssize_t target_stat_scsi_tgt_port_show_attr_indx(
887 struct se_port_stat_grps *pgrps, char *page) 887 struct se_port_stat_grps *pgrps, char *page)
888 { 888 {
889 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 889 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
890 struct se_port *sep; 890 struct se_port *sep;
891 ssize_t ret; 891 ssize_t ret;
892 892
893 spin_lock(&lun->lun_sep_lock); 893 spin_lock(&lun->lun_sep_lock);
894 sep = lun->lun_sep; 894 sep = lun->lun_sep;
895 if (!sep) { 895 if (!sep) {
896 spin_unlock(&lun->lun_sep_lock); 896 spin_unlock(&lun->lun_sep_lock);
897 return -ENODEV; 897 return -ENODEV;
898 } 898 }
899 ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index); 899 ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index);
900 spin_unlock(&lun->lun_sep_lock); 900 spin_unlock(&lun->lun_sep_lock);
901 return ret; 901 return ret;
902 } 902 }
903 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(indx); 903 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(indx);
904 904
905 static ssize_t target_stat_scsi_tgt_port_show_attr_name( 905 static ssize_t target_stat_scsi_tgt_port_show_attr_name(
906 struct se_port_stat_grps *pgrps, char *page) 906 struct se_port_stat_grps *pgrps, char *page)
907 { 907 {
908 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 908 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
909 struct se_port *sep; 909 struct se_port *sep;
910 struct se_portal_group *tpg; 910 struct se_portal_group *tpg;
911 ssize_t ret; 911 ssize_t ret;
912 912
913 spin_lock(&lun->lun_sep_lock); 913 spin_lock(&lun->lun_sep_lock);
914 sep = lun->lun_sep; 914 sep = lun->lun_sep;
915 if (!sep) { 915 if (!sep) {
916 spin_unlock(&lun->lun_sep_lock); 916 spin_unlock(&lun->lun_sep_lock);
917 return -ENODEV; 917 return -ENODEV;
918 } 918 }
919 tpg = sep->sep_tpg; 919 tpg = sep->sep_tpg;
920 920
921 ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n", 921 ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n",
922 tpg->se_tpg_tfo->get_fabric_name(), sep->sep_index); 922 tpg->se_tpg_tfo->get_fabric_name(), sep->sep_index);
923 spin_unlock(&lun->lun_sep_lock); 923 spin_unlock(&lun->lun_sep_lock);
924 return ret; 924 return ret;
925 } 925 }
926 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(name); 926 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(name);
927 927
928 static ssize_t target_stat_scsi_tgt_port_show_attr_port_index( 928 static ssize_t target_stat_scsi_tgt_port_show_attr_port_index(
929 struct se_port_stat_grps *pgrps, char *page) 929 struct se_port_stat_grps *pgrps, char *page)
930 { 930 {
931 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 931 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
932 struct se_port *sep; 932 struct se_port *sep;
933 struct se_portal_group *tpg; 933 struct se_portal_group *tpg;
934 ssize_t ret; 934 ssize_t ret;
935 935
936 spin_lock(&lun->lun_sep_lock); 936 spin_lock(&lun->lun_sep_lock);
937 sep = lun->lun_sep; 937 sep = lun->lun_sep;
938 if (!sep) { 938 if (!sep) {
939 spin_unlock(&lun->lun_sep_lock); 939 spin_unlock(&lun->lun_sep_lock);
940 return -ENODEV; 940 return -ENODEV;
941 } 941 }
942 tpg = sep->sep_tpg; 942 tpg = sep->sep_tpg;
943 943
944 ret = snprintf(page, PAGE_SIZE, "%s%s%d\n", 944 ret = snprintf(page, PAGE_SIZE, "%s%s%d\n",
945 tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+", 945 tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+",
946 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 946 tpg->se_tpg_tfo->tpg_get_tag(tpg));
947 spin_unlock(&lun->lun_sep_lock); 947 spin_unlock(&lun->lun_sep_lock);
948 return ret; 948 return ret;
949 } 949 }
950 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(port_index); 950 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(port_index);
951 951
952 static ssize_t target_stat_scsi_tgt_port_show_attr_in_cmds( 952 static ssize_t target_stat_scsi_tgt_port_show_attr_in_cmds(
953 struct se_port_stat_grps *pgrps, char *page) 953 struct se_port_stat_grps *pgrps, char *page)
954 { 954 {
955 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 955 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
956 struct se_port *sep; 956 struct se_port *sep;
957 struct se_portal_group *tpg; 957 struct se_portal_group *tpg;
958 ssize_t ret; 958 ssize_t ret;
959 959
960 spin_lock(&lun->lun_sep_lock); 960 spin_lock(&lun->lun_sep_lock);
961 sep = lun->lun_sep; 961 sep = lun->lun_sep;
962 if (!sep) { 962 if (!sep) {
963 spin_unlock(&lun->lun_sep_lock); 963 spin_unlock(&lun->lun_sep_lock);
964 return -ENODEV; 964 return -ENODEV;
965 } 965 }
966 tpg = sep->sep_tpg; 966 tpg = sep->sep_tpg;
967 967
968 ret = snprintf(page, PAGE_SIZE, "%llu\n", sep->sep_stats.cmd_pdus); 968 ret = snprintf(page, PAGE_SIZE, "%llu\n", sep->sep_stats.cmd_pdus);
969 spin_unlock(&lun->lun_sep_lock); 969 spin_unlock(&lun->lun_sep_lock);
970 return ret; 970 return ret;
971 } 971 }
972 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(in_cmds); 972 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(in_cmds);
973 973
974 static ssize_t target_stat_scsi_tgt_port_show_attr_write_mbytes( 974 static ssize_t target_stat_scsi_tgt_port_show_attr_write_mbytes(
975 struct se_port_stat_grps *pgrps, char *page) 975 struct se_port_stat_grps *pgrps, char *page)
976 { 976 {
977 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 977 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
978 struct se_port *sep; 978 struct se_port *sep;
979 struct se_portal_group *tpg; 979 struct se_portal_group *tpg;
980 ssize_t ret; 980 ssize_t ret;
981 981
982 spin_lock(&lun->lun_sep_lock); 982 spin_lock(&lun->lun_sep_lock);
983 sep = lun->lun_sep; 983 sep = lun->lun_sep;
984 if (!sep) { 984 if (!sep) {
985 spin_unlock(&lun->lun_sep_lock); 985 spin_unlock(&lun->lun_sep_lock);
986 return -ENODEV; 986 return -ENODEV;
987 } 987 }
988 tpg = sep->sep_tpg; 988 tpg = sep->sep_tpg;
989 989
990 ret = snprintf(page, PAGE_SIZE, "%u\n", 990 ret = snprintf(page, PAGE_SIZE, "%u\n",
991 (u32)(sep->sep_stats.rx_data_octets >> 20)); 991 (u32)(sep->sep_stats.rx_data_octets >> 20));
992 spin_unlock(&lun->lun_sep_lock); 992 spin_unlock(&lun->lun_sep_lock);
993 return ret; 993 return ret;
994 } 994 }
995 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(write_mbytes); 995 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(write_mbytes);
996 996
997 static ssize_t target_stat_scsi_tgt_port_show_attr_read_mbytes( 997 static ssize_t target_stat_scsi_tgt_port_show_attr_read_mbytes(
998 struct se_port_stat_grps *pgrps, char *page) 998 struct se_port_stat_grps *pgrps, char *page)
999 { 999 {
1000 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 1000 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
1001 struct se_port *sep; 1001 struct se_port *sep;
1002 struct se_portal_group *tpg; 1002 struct se_portal_group *tpg;
1003 ssize_t ret; 1003 ssize_t ret;
1004 1004
1005 spin_lock(&lun->lun_sep_lock); 1005 spin_lock(&lun->lun_sep_lock);
1006 sep = lun->lun_sep; 1006 sep = lun->lun_sep;
1007 if (!sep) { 1007 if (!sep) {
1008 spin_unlock(&lun->lun_sep_lock); 1008 spin_unlock(&lun->lun_sep_lock);
1009 return -ENODEV; 1009 return -ENODEV;
1010 } 1010 }
1011 tpg = sep->sep_tpg; 1011 tpg = sep->sep_tpg;
1012 1012
1013 ret = snprintf(page, PAGE_SIZE, "%u\n", 1013 ret = snprintf(page, PAGE_SIZE, "%u\n",
1014 (u32)(sep->sep_stats.tx_data_octets >> 20)); 1014 (u32)(sep->sep_stats.tx_data_octets >> 20));
1015 spin_unlock(&lun->lun_sep_lock); 1015 spin_unlock(&lun->lun_sep_lock);
1016 return ret; 1016 return ret;
1017 } 1017 }
1018 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(read_mbytes); 1018 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(read_mbytes);
1019 1019
1020 static ssize_t target_stat_scsi_tgt_port_show_attr_hs_in_cmds( 1020 static ssize_t target_stat_scsi_tgt_port_show_attr_hs_in_cmds(
1021 struct se_port_stat_grps *pgrps, char *page) 1021 struct se_port_stat_grps *pgrps, char *page)
1022 { 1022 {
1023 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 1023 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
1024 struct se_port *sep; 1024 struct se_port *sep;
1025 struct se_portal_group *tpg; 1025 struct se_portal_group *tpg;
1026 ssize_t ret; 1026 ssize_t ret;
1027 1027
1028 spin_lock(&lun->lun_sep_lock); 1028 spin_lock(&lun->lun_sep_lock);
1029 sep = lun->lun_sep; 1029 sep = lun->lun_sep;
1030 if (!sep) { 1030 if (!sep) {
1031 spin_unlock(&lun->lun_sep_lock); 1031 spin_unlock(&lun->lun_sep_lock);
1032 return -ENODEV; 1032 return -ENODEV;
1033 } 1033 }
1034 tpg = sep->sep_tpg; 1034 tpg = sep->sep_tpg;
1035 1035
1036 /* FIXME: scsiTgtPortHsInCommands */ 1036 /* FIXME: scsiTgtPortHsInCommands */
1037 ret = snprintf(page, PAGE_SIZE, "%u\n", 0); 1037 ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
1038 spin_unlock(&lun->lun_sep_lock); 1038 spin_unlock(&lun->lun_sep_lock);
1039 return ret; 1039 return ret;
1040 } 1040 }
1041 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(hs_in_cmds); 1041 DEV_STAT_SCSI_TGT_PORT_ATTR_RO(hs_in_cmds);
1042 1042
1043 CONFIGFS_EATTR_OPS(target_stat_scsi_tgt_port, se_port_stat_grps, 1043 CONFIGFS_EATTR_OPS(target_stat_scsi_tgt_port, se_port_stat_grps,
1044 scsi_tgt_port_group); 1044 scsi_tgt_port_group);
1045 1045
1046 static struct configfs_attribute *target_stat_scsi_tgt_port_attrs[] = { 1046 static struct configfs_attribute *target_stat_scsi_tgt_port_attrs[] = {
1047 &target_stat_scsi_tgt_port_inst.attr, 1047 &target_stat_scsi_tgt_port_inst.attr,
1048 &target_stat_scsi_tgt_port_dev.attr, 1048 &target_stat_scsi_tgt_port_dev.attr,
1049 &target_stat_scsi_tgt_port_indx.attr, 1049 &target_stat_scsi_tgt_port_indx.attr,
1050 &target_stat_scsi_tgt_port_name.attr, 1050 &target_stat_scsi_tgt_port_name.attr,
1051 &target_stat_scsi_tgt_port_port_index.attr, 1051 &target_stat_scsi_tgt_port_port_index.attr,
1052 &target_stat_scsi_tgt_port_in_cmds.attr, 1052 &target_stat_scsi_tgt_port_in_cmds.attr,
1053 &target_stat_scsi_tgt_port_write_mbytes.attr, 1053 &target_stat_scsi_tgt_port_write_mbytes.attr,
1054 &target_stat_scsi_tgt_port_read_mbytes.attr, 1054 &target_stat_scsi_tgt_port_read_mbytes.attr,
1055 &target_stat_scsi_tgt_port_hs_in_cmds.attr, 1055 &target_stat_scsi_tgt_port_hs_in_cmds.attr,
1056 NULL, 1056 NULL,
1057 }; 1057 };
1058 1058
1059 static struct configfs_item_operations target_stat_scsi_tgt_port_attrib_ops = { 1059 static struct configfs_item_operations target_stat_scsi_tgt_port_attrib_ops = {
1060 .show_attribute = target_stat_scsi_tgt_port_attr_show, 1060 .show_attribute = target_stat_scsi_tgt_port_attr_show,
1061 .store_attribute = target_stat_scsi_tgt_port_attr_store, 1061 .store_attribute = target_stat_scsi_tgt_port_attr_store,
1062 }; 1062 };
1063 1063
1064 static struct config_item_type target_stat_scsi_tgt_port_cit = { 1064 static struct config_item_type target_stat_scsi_tgt_port_cit = {
1065 .ct_item_ops = &target_stat_scsi_tgt_port_attrib_ops, 1065 .ct_item_ops = &target_stat_scsi_tgt_port_attrib_ops,
1066 .ct_attrs = target_stat_scsi_tgt_port_attrs, 1066 .ct_attrs = target_stat_scsi_tgt_port_attrs,
1067 .ct_owner = THIS_MODULE, 1067 .ct_owner = THIS_MODULE,
1068 }; 1068 };
1069 1069
1070 /* 1070 /*
1071 * SCSI Transport Table 1071 * SCSI Transport Table
1072 o */ 1072 o */
1073 1073
1074 CONFIGFS_EATTR_STRUCT(target_stat_scsi_transport, se_port_stat_grps); 1074 CONFIGFS_EATTR_STRUCT(target_stat_scsi_transport, se_port_stat_grps);
1075 #define DEV_STAT_SCSI_TRANSPORT_ATTR(_name, _mode) \ 1075 #define DEV_STAT_SCSI_TRANSPORT_ATTR(_name, _mode) \
1076 static struct target_stat_scsi_transport_attribute \ 1076 static struct target_stat_scsi_transport_attribute \
1077 target_stat_scsi_transport_##_name = \ 1077 target_stat_scsi_transport_##_name = \
1078 __CONFIGFS_EATTR(_name, _mode, \ 1078 __CONFIGFS_EATTR(_name, _mode, \
1079 target_stat_scsi_transport_show_attr_##_name, \ 1079 target_stat_scsi_transport_show_attr_##_name, \
1080 target_stat_scsi_transport_store_attr_##_name); 1080 target_stat_scsi_transport_store_attr_##_name);
1081 1081
1082 #define DEV_STAT_SCSI_TRANSPORT_ATTR_RO(_name) \ 1082 #define DEV_STAT_SCSI_TRANSPORT_ATTR_RO(_name) \
1083 static struct target_stat_scsi_transport_attribute \ 1083 static struct target_stat_scsi_transport_attribute \
1084 target_stat_scsi_transport_##_name = \ 1084 target_stat_scsi_transport_##_name = \
1085 __CONFIGFS_EATTR_RO(_name, \ 1085 __CONFIGFS_EATTR_RO(_name, \
1086 target_stat_scsi_transport_show_attr_##_name); 1086 target_stat_scsi_transport_show_attr_##_name);
1087 1087
1088 static ssize_t target_stat_scsi_transport_show_attr_inst( 1088 static ssize_t target_stat_scsi_transport_show_attr_inst(
1089 struct se_port_stat_grps *pgrps, char *page) 1089 struct se_port_stat_grps *pgrps, char *page)
1090 { 1090 {
1091 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 1091 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
1092 struct se_device *dev = lun->lun_se_dev; 1092 struct se_device *dev = lun->lun_se_dev;
1093 struct se_port *sep; 1093 struct se_port *sep;
1094 struct se_hba *hba; 1094 struct se_hba *hba;
1095 ssize_t ret; 1095 ssize_t ret;
1096 1096
1097 spin_lock(&lun->lun_sep_lock); 1097 spin_lock(&lun->lun_sep_lock);
1098 sep = lun->lun_sep; 1098 sep = lun->lun_sep;
1099 if (!sep) { 1099 if (!sep) {
1100 spin_unlock(&lun->lun_sep_lock); 1100 spin_unlock(&lun->lun_sep_lock);
1101 return -ENODEV; 1101 return -ENODEV;
1102 } 1102 }
1103 1103
1104 hba = dev->se_hba; 1104 hba = dev->se_hba;
1105 ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); 1105 ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
1106 spin_unlock(&lun->lun_sep_lock); 1106 spin_unlock(&lun->lun_sep_lock);
1107 return ret; 1107 return ret;
1108 } 1108 }
1109 DEV_STAT_SCSI_TRANSPORT_ATTR_RO(inst); 1109 DEV_STAT_SCSI_TRANSPORT_ATTR_RO(inst);
1110 1110
1111 static ssize_t target_stat_scsi_transport_show_attr_device( 1111 static ssize_t target_stat_scsi_transport_show_attr_device(
1112 struct se_port_stat_grps *pgrps, char *page) 1112 struct se_port_stat_grps *pgrps, char *page)
1113 { 1113 {
1114 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 1114 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
1115 struct se_port *sep; 1115 struct se_port *sep;
1116 struct se_portal_group *tpg; 1116 struct se_portal_group *tpg;
1117 ssize_t ret; 1117 ssize_t ret;
1118 1118
1119 spin_lock(&lun->lun_sep_lock); 1119 spin_lock(&lun->lun_sep_lock);
1120 sep = lun->lun_sep; 1120 sep = lun->lun_sep;
1121 if (!sep) { 1121 if (!sep) {
1122 spin_unlock(&lun->lun_sep_lock); 1122 spin_unlock(&lun->lun_sep_lock);
1123 return -ENODEV; 1123 return -ENODEV;
1124 } 1124 }
1125 tpg = sep->sep_tpg; 1125 tpg = sep->sep_tpg;
1126 /* scsiTransportType */ 1126 /* scsiTransportType */
1127 ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n", 1127 ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n",
1128 tpg->se_tpg_tfo->get_fabric_name()); 1128 tpg->se_tpg_tfo->get_fabric_name());
1129 spin_unlock(&lun->lun_sep_lock); 1129 spin_unlock(&lun->lun_sep_lock);
1130 return ret; 1130 return ret;
1131 } 1131 }
1132 DEV_STAT_SCSI_TRANSPORT_ATTR_RO(device); 1132 DEV_STAT_SCSI_TRANSPORT_ATTR_RO(device);
1133 1133
1134 static ssize_t target_stat_scsi_transport_show_attr_indx( 1134 static ssize_t target_stat_scsi_transport_show_attr_indx(
1135 struct se_port_stat_grps *pgrps, char *page) 1135 struct se_port_stat_grps *pgrps, char *page)
1136 { 1136 {
1137 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 1137 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
1138 struct se_port *sep; 1138 struct se_port *sep;
1139 struct se_portal_group *tpg; 1139 struct se_portal_group *tpg;
1140 ssize_t ret; 1140 ssize_t ret;
1141 1141
1142 spin_lock(&lun->lun_sep_lock); 1142 spin_lock(&lun->lun_sep_lock);
1143 sep = lun->lun_sep; 1143 sep = lun->lun_sep;
1144 if (!sep) { 1144 if (!sep) {
1145 spin_unlock(&lun->lun_sep_lock); 1145 spin_unlock(&lun->lun_sep_lock);
1146 return -ENODEV; 1146 return -ENODEV;
1147 } 1147 }
1148 tpg = sep->sep_tpg; 1148 tpg = sep->sep_tpg;
1149 ret = snprintf(page, PAGE_SIZE, "%u\n", 1149 ret = snprintf(page, PAGE_SIZE, "%u\n",
1150 tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); 1150 tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
1151 spin_unlock(&lun->lun_sep_lock); 1151 spin_unlock(&lun->lun_sep_lock);
1152 return ret; 1152 return ret;
1153 } 1153 }
1154 DEV_STAT_SCSI_TRANSPORT_ATTR_RO(indx); 1154 DEV_STAT_SCSI_TRANSPORT_ATTR_RO(indx);
1155 1155
1156 static ssize_t target_stat_scsi_transport_show_attr_dev_name( 1156 static ssize_t target_stat_scsi_transport_show_attr_dev_name(
1157 struct se_port_stat_grps *pgrps, char *page) 1157 struct se_port_stat_grps *pgrps, char *page)
1158 { 1158 {
1159 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 1159 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
1160 struct se_device *dev = lun->lun_se_dev; 1160 struct se_device *dev = lun->lun_se_dev;
1161 struct se_port *sep; 1161 struct se_port *sep;
1162 struct se_portal_group *tpg; 1162 struct se_portal_group *tpg;
1163 struct t10_wwn *wwn; 1163 struct t10_wwn *wwn;
1164 ssize_t ret; 1164 ssize_t ret;
1165 1165
1166 spin_lock(&lun->lun_sep_lock); 1166 spin_lock(&lun->lun_sep_lock);
1167 sep = lun->lun_sep; 1167 sep = lun->lun_sep;
1168 if (!sep) { 1168 if (!sep) {
1169 spin_unlock(&lun->lun_sep_lock); 1169 spin_unlock(&lun->lun_sep_lock);
1170 return -ENODEV; 1170 return -ENODEV;
1171 } 1171 }
1172 tpg = sep->sep_tpg; 1172 tpg = sep->sep_tpg;
1173 wwn = &dev->se_sub_dev->t10_wwn; 1173 wwn = &dev->se_sub_dev->t10_wwn;
1174 /* scsiTransportDevName */ 1174 /* scsiTransportDevName */
1175 ret = snprintf(page, PAGE_SIZE, "%s+%s\n", 1175 ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
1176 tpg->se_tpg_tfo->tpg_get_wwn(tpg), 1176 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1177 (strlen(wwn->unit_serial)) ? wwn->unit_serial : 1177 (strlen(wwn->unit_serial)) ? wwn->unit_serial :
1178 wwn->vendor); 1178 wwn->vendor);
1179 spin_unlock(&lun->lun_sep_lock); 1179 spin_unlock(&lun->lun_sep_lock);
1180 return ret; 1180 return ret;
1181 } 1181 }
1182 DEV_STAT_SCSI_TRANSPORT_ATTR_RO(dev_name); 1182 DEV_STAT_SCSI_TRANSPORT_ATTR_RO(dev_name);
1183 1183
1184 CONFIGFS_EATTR_OPS(target_stat_scsi_transport, se_port_stat_grps, 1184 CONFIGFS_EATTR_OPS(target_stat_scsi_transport, se_port_stat_grps,
1185 scsi_transport_group); 1185 scsi_transport_group);
1186 1186
1187 static struct configfs_attribute *target_stat_scsi_transport_attrs[] = { 1187 static struct configfs_attribute *target_stat_scsi_transport_attrs[] = {
1188 &target_stat_scsi_transport_inst.attr, 1188 &target_stat_scsi_transport_inst.attr,
1189 &target_stat_scsi_transport_device.attr, 1189 &target_stat_scsi_transport_device.attr,
1190 &target_stat_scsi_transport_indx.attr, 1190 &target_stat_scsi_transport_indx.attr,
1191 &target_stat_scsi_transport_dev_name.attr, 1191 &target_stat_scsi_transport_dev_name.attr,
1192 NULL, 1192 NULL,
1193 }; 1193 };
1194 1194
1195 static struct configfs_item_operations target_stat_scsi_transport_attrib_ops = { 1195 static struct configfs_item_operations target_stat_scsi_transport_attrib_ops = {
1196 .show_attribute = target_stat_scsi_transport_attr_show, 1196 .show_attribute = target_stat_scsi_transport_attr_show,
1197 .store_attribute = target_stat_scsi_transport_attr_store, 1197 .store_attribute = target_stat_scsi_transport_attr_store,
1198 }; 1198 };
1199 1199
1200 static struct config_item_type target_stat_scsi_transport_cit = { 1200 static struct config_item_type target_stat_scsi_transport_cit = {
1201 .ct_item_ops = &target_stat_scsi_transport_attrib_ops, 1201 .ct_item_ops = &target_stat_scsi_transport_attrib_ops,
1202 .ct_attrs = target_stat_scsi_transport_attrs, 1202 .ct_attrs = target_stat_scsi_transport_attrs,
1203 .ct_owner = THIS_MODULE, 1203 .ct_owner = THIS_MODULE,
1204 }; 1204 };
1205 1205
1206 /* 1206 /*
1207 * Called from target_core_fabric_configfs.c:target_fabric_make_lun() to setup 1207 * Called from target_core_fabric_configfs.c:target_fabric_make_lun() to setup
1208 * the target port statistics groups + configfs CITs located in target_core_stat.c 1208 * the target port statistics groups + configfs CITs located in target_core_stat.c
1209 */ 1209 */
1210 void target_stat_setup_port_default_groups(struct se_lun *lun) 1210 void target_stat_setup_port_default_groups(struct se_lun *lun)
1211 { 1211 {
1212 struct config_group *port_stat_grp = &lun->port_stat_grps.stat_group; 1212 struct config_group *port_stat_grp = &lun->port_stat_grps.stat_group;
1213 1213
1214 config_group_init_type_name(&lun->port_stat_grps.scsi_port_group, 1214 config_group_init_type_name(&lun->port_stat_grps.scsi_port_group,
1215 "scsi_port", &target_stat_scsi_port_cit); 1215 "scsi_port", &target_stat_scsi_port_cit);
1216 config_group_init_type_name(&lun->port_stat_grps.scsi_tgt_port_group, 1216 config_group_init_type_name(&lun->port_stat_grps.scsi_tgt_port_group,
1217 "scsi_tgt_port", &target_stat_scsi_tgt_port_cit); 1217 "scsi_tgt_port", &target_stat_scsi_tgt_port_cit);
1218 config_group_init_type_name(&lun->port_stat_grps.scsi_transport_group, 1218 config_group_init_type_name(&lun->port_stat_grps.scsi_transport_group,
1219 "scsi_transport", &target_stat_scsi_transport_cit); 1219 "scsi_transport", &target_stat_scsi_transport_cit);
1220 1220
1221 port_stat_grp->default_groups[0] = &lun->port_stat_grps.scsi_port_group; 1221 port_stat_grp->default_groups[0] = &lun->port_stat_grps.scsi_port_group;
1222 port_stat_grp->default_groups[1] = &lun->port_stat_grps.scsi_tgt_port_group; 1222 port_stat_grp->default_groups[1] = &lun->port_stat_grps.scsi_tgt_port_group;
1223 port_stat_grp->default_groups[2] = &lun->port_stat_grps.scsi_transport_group; 1223 port_stat_grp->default_groups[2] = &lun->port_stat_grps.scsi_transport_group;
1224 port_stat_grp->default_groups[3] = NULL; 1224 port_stat_grp->default_groups[3] = NULL;
1225 } 1225 }
1226 1226
1227 /* 1227 /*
1228 * SCSI Authorized Initiator Table 1228 * SCSI Authorized Initiator Table
1229 */ 1229 */
1230 1230
1231 CONFIGFS_EATTR_STRUCT(target_stat_scsi_auth_intr, se_ml_stat_grps); 1231 CONFIGFS_EATTR_STRUCT(target_stat_scsi_auth_intr, se_ml_stat_grps);
1232 #define DEV_STAT_SCSI_AUTH_INTR_ATTR(_name, _mode) \ 1232 #define DEV_STAT_SCSI_AUTH_INTR_ATTR(_name, _mode) \
1233 static struct target_stat_scsi_auth_intr_attribute \ 1233 static struct target_stat_scsi_auth_intr_attribute \
1234 target_stat_scsi_auth_intr_##_name = \ 1234 target_stat_scsi_auth_intr_##_name = \
1235 __CONFIGFS_EATTR(_name, _mode, \ 1235 __CONFIGFS_EATTR(_name, _mode, \
1236 target_stat_scsi_auth_intr_show_attr_##_name, \ 1236 target_stat_scsi_auth_intr_show_attr_##_name, \
1237 target_stat_scsi_auth_intr_store_attr_##_name); 1237 target_stat_scsi_auth_intr_store_attr_##_name);
1238 1238
1239 #define DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(_name) \ 1239 #define DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(_name) \
1240 static struct target_stat_scsi_auth_intr_attribute \ 1240 static struct target_stat_scsi_auth_intr_attribute \
1241 target_stat_scsi_auth_intr_##_name = \ 1241 target_stat_scsi_auth_intr_##_name = \
1242 __CONFIGFS_EATTR_RO(_name, \ 1242 __CONFIGFS_EATTR_RO(_name, \
1243 target_stat_scsi_auth_intr_show_attr_##_name); 1243 target_stat_scsi_auth_intr_show_attr_##_name);
1244 1244
1245 static ssize_t target_stat_scsi_auth_intr_show_attr_inst( 1245 static ssize_t target_stat_scsi_auth_intr_show_attr_inst(
1246 struct se_ml_stat_grps *lgrps, char *page) 1246 struct se_ml_stat_grps *lgrps, char *page)
1247 { 1247 {
1248 struct se_lun_acl *lacl = container_of(lgrps, 1248 struct se_lun_acl *lacl = container_of(lgrps,
1249 struct se_lun_acl, ml_stat_grps); 1249 struct se_lun_acl, ml_stat_grps);
1250 struct se_node_acl *nacl = lacl->se_lun_nacl; 1250 struct se_node_acl *nacl = lacl->se_lun_nacl;
1251 struct se_dev_entry *deve; 1251 struct se_dev_entry *deve;
1252 struct se_portal_group *tpg; 1252 struct se_portal_group *tpg;
1253 ssize_t ret; 1253 ssize_t ret;
1254 1254
1255 spin_lock_irq(&nacl->device_list_lock); 1255 spin_lock_irq(&nacl->device_list_lock);
1256 deve = &nacl->device_list[lacl->mapped_lun]; 1256 deve = &nacl->device_list[lacl->mapped_lun];
1257 if (!deve->se_lun || !deve->se_lun_acl) { 1257 if (!deve->se_lun || !deve->se_lun_acl) {
1258 spin_unlock_irq(&nacl->device_list_lock); 1258 spin_unlock_irq(&nacl->device_list_lock);
1259 return -ENODEV; 1259 return -ENODEV;
1260 } 1260 }
1261 tpg = nacl->se_tpg; 1261 tpg = nacl->se_tpg;
1262 /* scsiInstIndex */ 1262 /* scsiInstIndex */
1263 ret = snprintf(page, PAGE_SIZE, "%u\n", 1263 ret = snprintf(page, PAGE_SIZE, "%u\n",
1264 tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); 1264 tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
1265 spin_unlock_irq(&nacl->device_list_lock); 1265 spin_unlock_irq(&nacl->device_list_lock);
1266 return ret; 1266 return ret;
1267 } 1267 }
1268 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(inst); 1268 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(inst);
1269 1269
1270 static ssize_t target_stat_scsi_auth_intr_show_attr_dev( 1270 static ssize_t target_stat_scsi_auth_intr_show_attr_dev(
1271 struct se_ml_stat_grps *lgrps, char *page) 1271 struct se_ml_stat_grps *lgrps, char *page)
1272 { 1272 {
1273 struct se_lun_acl *lacl = container_of(lgrps, 1273 struct se_lun_acl *lacl = container_of(lgrps,
1274 struct se_lun_acl, ml_stat_grps); 1274 struct se_lun_acl, ml_stat_grps);
1275 struct se_node_acl *nacl = lacl->se_lun_nacl; 1275 struct se_node_acl *nacl = lacl->se_lun_nacl;
1276 struct se_dev_entry *deve; 1276 struct se_dev_entry *deve;
1277 struct se_lun *lun; 1277 struct se_lun *lun;
1278 struct se_portal_group *tpg; 1278 struct se_portal_group *tpg;
1279 ssize_t ret; 1279 ssize_t ret;
1280 1280
1281 spin_lock_irq(&nacl->device_list_lock); 1281 spin_lock_irq(&nacl->device_list_lock);
1282 deve = &nacl->device_list[lacl->mapped_lun]; 1282 deve = &nacl->device_list[lacl->mapped_lun];
1283 if (!deve->se_lun || !deve->se_lun_acl) { 1283 if (!deve->se_lun || !deve->se_lun_acl) {
1284 spin_unlock_irq(&nacl->device_list_lock); 1284 spin_unlock_irq(&nacl->device_list_lock);
1285 return -ENODEV; 1285 return -ENODEV;
1286 } 1286 }
1287 tpg = nacl->se_tpg; 1287 tpg = nacl->se_tpg;
1288 lun = deve->se_lun; 1288 lun = deve->se_lun;
1289 /* scsiDeviceIndex */ 1289 /* scsiDeviceIndex */
1290 ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index); 1290 ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index);
1291 spin_unlock_irq(&nacl->device_list_lock); 1291 spin_unlock_irq(&nacl->device_list_lock);
1292 return ret; 1292 return ret;
1293 } 1293 }
1294 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev); 1294 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev);
1295 1295
1296 static ssize_t target_stat_scsi_auth_intr_show_attr_port( 1296 static ssize_t target_stat_scsi_auth_intr_show_attr_port(
1297 struct se_ml_stat_grps *lgrps, char *page) 1297 struct se_ml_stat_grps *lgrps, char *page)
1298 { 1298 {
1299 struct se_lun_acl *lacl = container_of(lgrps, 1299 struct se_lun_acl *lacl = container_of(lgrps,
1300 struct se_lun_acl, ml_stat_grps); 1300 struct se_lun_acl, ml_stat_grps);
1301 struct se_node_acl *nacl = lacl->se_lun_nacl; 1301 struct se_node_acl *nacl = lacl->se_lun_nacl;
1302 struct se_dev_entry *deve; 1302 struct se_dev_entry *deve;
1303 struct se_portal_group *tpg; 1303 struct se_portal_group *tpg;
1304 ssize_t ret; 1304 ssize_t ret;
1305 1305
1306 spin_lock_irq(&nacl->device_list_lock); 1306 spin_lock_irq(&nacl->device_list_lock);
1307 deve = &nacl->device_list[lacl->mapped_lun]; 1307 deve = &nacl->device_list[lacl->mapped_lun];
1308 if (!deve->se_lun || !deve->se_lun_acl) { 1308 if (!deve->se_lun || !deve->se_lun_acl) {
1309 spin_unlock_irq(&nacl->device_list_lock); 1309 spin_unlock_irq(&nacl->device_list_lock);
1310 return -ENODEV; 1310 return -ENODEV;
1311 } 1311 }
1312 tpg = nacl->se_tpg; 1312 tpg = nacl->se_tpg;
1313 /* scsiAuthIntrTgtPortIndex */ 1313 /* scsiAuthIntrTgtPortIndex */
1314 ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1314 ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
1315 spin_unlock_irq(&nacl->device_list_lock); 1315 spin_unlock_irq(&nacl->device_list_lock);
1316 return ret; 1316 return ret;
1317 } 1317 }
1318 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(port); 1318 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(port);
1319 1319
1320 static ssize_t target_stat_scsi_auth_intr_show_attr_indx( 1320 static ssize_t target_stat_scsi_auth_intr_show_attr_indx(
1321 struct se_ml_stat_grps *lgrps, char *page) 1321 struct se_ml_stat_grps *lgrps, char *page)
1322 { 1322 {
1323 struct se_lun_acl *lacl = container_of(lgrps, 1323 struct se_lun_acl *lacl = container_of(lgrps,
1324 struct se_lun_acl, ml_stat_grps); 1324 struct se_lun_acl, ml_stat_grps);
1325 struct se_node_acl *nacl = lacl->se_lun_nacl; 1325 struct se_node_acl *nacl = lacl->se_lun_nacl;
1326 struct se_dev_entry *deve; 1326 struct se_dev_entry *deve;
1327 ssize_t ret; 1327 ssize_t ret;
1328 1328
1329 spin_lock_irq(&nacl->device_list_lock); 1329 spin_lock_irq(&nacl->device_list_lock);
1330 deve = &nacl->device_list[lacl->mapped_lun]; 1330 deve = &nacl->device_list[lacl->mapped_lun];
1331 if (!deve->se_lun || !deve->se_lun_acl) { 1331 if (!deve->se_lun || !deve->se_lun_acl) {
1332 spin_unlock_irq(&nacl->device_list_lock); 1332 spin_unlock_irq(&nacl->device_list_lock);
1333 return -ENODEV; 1333 return -ENODEV;
1334 } 1334 }
1335 /* scsiAuthIntrIndex */ 1335 /* scsiAuthIntrIndex */
1336 ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index); 1336 ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
1337 spin_unlock_irq(&nacl->device_list_lock); 1337 spin_unlock_irq(&nacl->device_list_lock);
1338 return ret; 1338 return ret;
1339 } 1339 }
1340 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(indx); 1340 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(indx);
1341 1341
1342 static ssize_t target_stat_scsi_auth_intr_show_attr_dev_or_port( 1342 static ssize_t target_stat_scsi_auth_intr_show_attr_dev_or_port(
1343 struct se_ml_stat_grps *lgrps, char *page) 1343 struct se_ml_stat_grps *lgrps, char *page)
1344 { 1344 {
1345 struct se_lun_acl *lacl = container_of(lgrps, 1345 struct se_lun_acl *lacl = container_of(lgrps,
1346 struct se_lun_acl, ml_stat_grps); 1346 struct se_lun_acl, ml_stat_grps);
1347 struct se_node_acl *nacl = lacl->se_lun_nacl; 1347 struct se_node_acl *nacl = lacl->se_lun_nacl;
1348 struct se_dev_entry *deve; 1348 struct se_dev_entry *deve;
1349 ssize_t ret; 1349 ssize_t ret;
1350 1350
1351 spin_lock_irq(&nacl->device_list_lock); 1351 spin_lock_irq(&nacl->device_list_lock);
1352 deve = &nacl->device_list[lacl->mapped_lun]; 1352 deve = &nacl->device_list[lacl->mapped_lun];
1353 if (!deve->se_lun || !deve->se_lun_acl) { 1353 if (!deve->se_lun || !deve->se_lun_acl) {
1354 spin_unlock_irq(&nacl->device_list_lock); 1354 spin_unlock_irq(&nacl->device_list_lock);
1355 return -ENODEV; 1355 return -ENODEV;
1356 } 1356 }
1357 /* scsiAuthIntrDevOrPort */ 1357 /* scsiAuthIntrDevOrPort */
1358 ret = snprintf(page, PAGE_SIZE, "%u\n", 1); 1358 ret = snprintf(page, PAGE_SIZE, "%u\n", 1);
1359 spin_unlock_irq(&nacl->device_list_lock); 1359 spin_unlock_irq(&nacl->device_list_lock);
1360 return ret; 1360 return ret;
1361 } 1361 }
1362 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev_or_port); 1362 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev_or_port);
1363 1363
1364 static ssize_t target_stat_scsi_auth_intr_show_attr_intr_name( 1364 static ssize_t target_stat_scsi_auth_intr_show_attr_intr_name(
1365 struct se_ml_stat_grps *lgrps, char *page) 1365 struct se_ml_stat_grps *lgrps, char *page)
1366 { 1366 {
1367 struct se_lun_acl *lacl = container_of(lgrps, 1367 struct se_lun_acl *lacl = container_of(lgrps,
1368 struct se_lun_acl, ml_stat_grps); 1368 struct se_lun_acl, ml_stat_grps);
1369 struct se_node_acl *nacl = lacl->se_lun_nacl; 1369 struct se_node_acl *nacl = lacl->se_lun_nacl;
1370 struct se_dev_entry *deve; 1370 struct se_dev_entry *deve;
1371 ssize_t ret; 1371 ssize_t ret;
1372 1372
1373 spin_lock_irq(&nacl->device_list_lock); 1373 spin_lock_irq(&nacl->device_list_lock);
1374 deve = &nacl->device_list[lacl->mapped_lun]; 1374 deve = &nacl->device_list[lacl->mapped_lun];
1375 if (!deve->se_lun || !deve->se_lun_acl) { 1375 if (!deve->se_lun || !deve->se_lun_acl) {
1376 spin_unlock_irq(&nacl->device_list_lock); 1376 spin_unlock_irq(&nacl->device_list_lock);
1377 return -ENODEV; 1377 return -ENODEV;
1378 } 1378 }
1379 /* scsiAuthIntrName */ 1379 /* scsiAuthIntrName */
1380 ret = snprintf(page, PAGE_SIZE, "%s\n", nacl->initiatorname); 1380 ret = snprintf(page, PAGE_SIZE, "%s\n", nacl->initiatorname);
1381 spin_unlock_irq(&nacl->device_list_lock); 1381 spin_unlock_irq(&nacl->device_list_lock);
1382 return ret; 1382 return ret;
1383 } 1383 }
1384 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(intr_name); 1384 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(intr_name);
1385 1385
1386 static ssize_t target_stat_scsi_auth_intr_show_attr_map_indx( 1386 static ssize_t target_stat_scsi_auth_intr_show_attr_map_indx(
1387 struct se_ml_stat_grps *lgrps, char *page) 1387 struct se_ml_stat_grps *lgrps, char *page)
1388 { 1388 {
1389 struct se_lun_acl *lacl = container_of(lgrps, 1389 struct se_lun_acl *lacl = container_of(lgrps,
1390 struct se_lun_acl, ml_stat_grps); 1390 struct se_lun_acl, ml_stat_grps);
1391 struct se_node_acl *nacl = lacl->se_lun_nacl; 1391 struct se_node_acl *nacl = lacl->se_lun_nacl;
1392 struct se_dev_entry *deve; 1392 struct se_dev_entry *deve;
1393 ssize_t ret; 1393 ssize_t ret;
1394 1394
1395 spin_lock_irq(&nacl->device_list_lock); 1395 spin_lock_irq(&nacl->device_list_lock);
1396 deve = &nacl->device_list[lacl->mapped_lun]; 1396 deve = &nacl->device_list[lacl->mapped_lun];
1397 if (!deve->se_lun || !deve->se_lun_acl) { 1397 if (!deve->se_lun || !deve->se_lun_acl) {
1398 spin_unlock_irq(&nacl->device_list_lock); 1398 spin_unlock_irq(&nacl->device_list_lock);
1399 return -ENODEV; 1399 return -ENODEV;
1400 } 1400 }
1401 /* FIXME: scsiAuthIntrLunMapIndex */ 1401 /* FIXME: scsiAuthIntrLunMapIndex */
1402 ret = snprintf(page, PAGE_SIZE, "%u\n", 0); 1402 ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
1403 spin_unlock_irq(&nacl->device_list_lock); 1403 spin_unlock_irq(&nacl->device_list_lock);
1404 return ret; 1404 return ret;
1405 } 1405 }
1406 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(map_indx); 1406 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(map_indx);
1407 1407
1408 static ssize_t target_stat_scsi_auth_intr_show_attr_att_count( 1408 static ssize_t target_stat_scsi_auth_intr_show_attr_att_count(
1409 struct se_ml_stat_grps *lgrps, char *page) 1409 struct se_ml_stat_grps *lgrps, char *page)
1410 { 1410 {
1411 struct se_lun_acl *lacl = container_of(lgrps, 1411 struct se_lun_acl *lacl = container_of(lgrps,
1412 struct se_lun_acl, ml_stat_grps); 1412 struct se_lun_acl, ml_stat_grps);
1413 struct se_node_acl *nacl = lacl->se_lun_nacl; 1413 struct se_node_acl *nacl = lacl->se_lun_nacl;
1414 struct se_dev_entry *deve; 1414 struct se_dev_entry *deve;
1415 ssize_t ret; 1415 ssize_t ret;
1416 1416
1417 spin_lock_irq(&nacl->device_list_lock); 1417 spin_lock_irq(&nacl->device_list_lock);
1418 deve = &nacl->device_list[lacl->mapped_lun]; 1418 deve = &nacl->device_list[lacl->mapped_lun];
1419 if (!deve->se_lun || !deve->se_lun_acl) { 1419 if (!deve->se_lun || !deve->se_lun_acl) {
1420 spin_unlock_irq(&nacl->device_list_lock); 1420 spin_unlock_irq(&nacl->device_list_lock);
1421 return -ENODEV; 1421 return -ENODEV;
1422 } 1422 }
1423 /* scsiAuthIntrAttachedTimes */ 1423 /* scsiAuthIntrAttachedTimes */
1424 ret = snprintf(page, PAGE_SIZE, "%u\n", deve->attach_count); 1424 ret = snprintf(page, PAGE_SIZE, "%u\n", deve->attach_count);
1425 spin_unlock_irq(&nacl->device_list_lock); 1425 spin_unlock_irq(&nacl->device_list_lock);
1426 return ret; 1426 return ret;
1427 } 1427 }
1428 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(att_count); 1428 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(att_count);
1429 1429
1430 static ssize_t target_stat_scsi_auth_intr_show_attr_num_cmds( 1430 static ssize_t target_stat_scsi_auth_intr_show_attr_num_cmds(
1431 struct se_ml_stat_grps *lgrps, char *page) 1431 struct se_ml_stat_grps *lgrps, char *page)
1432 { 1432 {
1433 struct se_lun_acl *lacl = container_of(lgrps, 1433 struct se_lun_acl *lacl = container_of(lgrps,
1434 struct se_lun_acl, ml_stat_grps); 1434 struct se_lun_acl, ml_stat_grps);
1435 struct se_node_acl *nacl = lacl->se_lun_nacl; 1435 struct se_node_acl *nacl = lacl->se_lun_nacl;
1436 struct se_dev_entry *deve; 1436 struct se_dev_entry *deve;
1437 ssize_t ret; 1437 ssize_t ret;
1438 1438
1439 spin_lock_irq(&nacl->device_list_lock); 1439 spin_lock_irq(&nacl->device_list_lock);
1440 deve = &nacl->device_list[lacl->mapped_lun]; 1440 deve = &nacl->device_list[lacl->mapped_lun];
1441 if (!deve->se_lun || !deve->se_lun_acl) { 1441 if (!deve->se_lun || !deve->se_lun_acl) {
1442 spin_unlock_irq(&nacl->device_list_lock); 1442 spin_unlock_irq(&nacl->device_list_lock);
1443 return -ENODEV; 1443 return -ENODEV;
1444 } 1444 }
1445 /* scsiAuthIntrOutCommands */ 1445 /* scsiAuthIntrOutCommands */
1446 ret = snprintf(page, PAGE_SIZE, "%u\n", deve->total_cmds); 1446 ret = snprintf(page, PAGE_SIZE, "%u\n", deve->total_cmds);
1447 spin_unlock_irq(&nacl->device_list_lock); 1447 spin_unlock_irq(&nacl->device_list_lock);
1448 return ret; 1448 return ret;
1449 } 1449 }
1450 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(num_cmds); 1450 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(num_cmds);
1451 1451
1452 static ssize_t target_stat_scsi_auth_intr_show_attr_read_mbytes( 1452 static ssize_t target_stat_scsi_auth_intr_show_attr_read_mbytes(
1453 struct se_ml_stat_grps *lgrps, char *page) 1453 struct se_ml_stat_grps *lgrps, char *page)
1454 { 1454 {
1455 struct se_lun_acl *lacl = container_of(lgrps, 1455 struct se_lun_acl *lacl = container_of(lgrps,
1456 struct se_lun_acl, ml_stat_grps); 1456 struct se_lun_acl, ml_stat_grps);
1457 struct se_node_acl *nacl = lacl->se_lun_nacl; 1457 struct se_node_acl *nacl = lacl->se_lun_nacl;
1458 struct se_dev_entry *deve; 1458 struct se_dev_entry *deve;
1459 ssize_t ret; 1459 ssize_t ret;
1460 1460
1461 spin_lock_irq(&nacl->device_list_lock); 1461 spin_lock_irq(&nacl->device_list_lock);
1462 deve = &nacl->device_list[lacl->mapped_lun]; 1462 deve = &nacl->device_list[lacl->mapped_lun];
1463 if (!deve->se_lun || !deve->se_lun_acl) { 1463 if (!deve->se_lun || !deve->se_lun_acl) {
1464 spin_unlock_irq(&nacl->device_list_lock); 1464 spin_unlock_irq(&nacl->device_list_lock);
1465 return -ENODEV; 1465 return -ENODEV;
1466 } 1466 }
1467 /* scsiAuthIntrReadMegaBytes */ 1467 /* scsiAuthIntrReadMegaBytes */
1468 ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->read_bytes >> 20)); 1468 ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->read_bytes >> 20));
1469 spin_unlock_irq(&nacl->device_list_lock); 1469 spin_unlock_irq(&nacl->device_list_lock);
1470 return ret; 1470 return ret;
1471 } 1471 }
1472 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(read_mbytes); 1472 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(read_mbytes);
1473 1473
1474 static ssize_t target_stat_scsi_auth_intr_show_attr_write_mbytes( 1474 static ssize_t target_stat_scsi_auth_intr_show_attr_write_mbytes(
1475 struct se_ml_stat_grps *lgrps, char *page) 1475 struct se_ml_stat_grps *lgrps, char *page)
1476 { 1476 {
1477 struct se_lun_acl *lacl = container_of(lgrps, 1477 struct se_lun_acl *lacl = container_of(lgrps,
1478 struct se_lun_acl, ml_stat_grps); 1478 struct se_lun_acl, ml_stat_grps);
1479 struct se_node_acl *nacl = lacl->se_lun_nacl; 1479 struct se_node_acl *nacl = lacl->se_lun_nacl;
1480 struct se_dev_entry *deve; 1480 struct se_dev_entry *deve;
1481 ssize_t ret; 1481 ssize_t ret;
1482 1482
1483 spin_lock_irq(&nacl->device_list_lock); 1483 spin_lock_irq(&nacl->device_list_lock);
1484 deve = &nacl->device_list[lacl->mapped_lun]; 1484 deve = &nacl->device_list[lacl->mapped_lun];
1485 if (!deve->se_lun || !deve->se_lun_acl) { 1485 if (!deve->se_lun || !deve->se_lun_acl) {
1486 spin_unlock_irq(&nacl->device_list_lock); 1486 spin_unlock_irq(&nacl->device_list_lock);
1487 return -ENODEV; 1487 return -ENODEV;
1488 } 1488 }
1489 /* scsiAuthIntrWrittenMegaBytes */ 1489 /* scsiAuthIntrWrittenMegaBytes */
1490 ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->write_bytes >> 20)); 1490 ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->write_bytes >> 20));
1491 spin_unlock_irq(&nacl->device_list_lock); 1491 spin_unlock_irq(&nacl->device_list_lock);
1492 return ret; 1492 return ret;
1493 } 1493 }
1494 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(write_mbytes); 1494 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(write_mbytes);
1495 1495
1496 static ssize_t target_stat_scsi_auth_intr_show_attr_hs_num_cmds( 1496 static ssize_t target_stat_scsi_auth_intr_show_attr_hs_num_cmds(
1497 struct se_ml_stat_grps *lgrps, char *page) 1497 struct se_ml_stat_grps *lgrps, char *page)
1498 { 1498 {
1499 struct se_lun_acl *lacl = container_of(lgrps, 1499 struct se_lun_acl *lacl = container_of(lgrps,
1500 struct se_lun_acl, ml_stat_grps); 1500 struct se_lun_acl, ml_stat_grps);
1501 struct se_node_acl *nacl = lacl->se_lun_nacl; 1501 struct se_node_acl *nacl = lacl->se_lun_nacl;
1502 struct se_dev_entry *deve; 1502 struct se_dev_entry *deve;
1503 ssize_t ret; 1503 ssize_t ret;
1504 1504
1505 spin_lock_irq(&nacl->device_list_lock); 1505 spin_lock_irq(&nacl->device_list_lock);
1506 deve = &nacl->device_list[lacl->mapped_lun]; 1506 deve = &nacl->device_list[lacl->mapped_lun];
1507 if (!deve->se_lun || !deve->se_lun_acl) { 1507 if (!deve->se_lun || !deve->se_lun_acl) {
1508 spin_unlock_irq(&nacl->device_list_lock); 1508 spin_unlock_irq(&nacl->device_list_lock);
1509 return -ENODEV; 1509 return -ENODEV;
1510 } 1510 }
1511 /* FIXME: scsiAuthIntrHSOutCommands */ 1511 /* FIXME: scsiAuthIntrHSOutCommands */
1512 ret = snprintf(page, PAGE_SIZE, "%u\n", 0); 1512 ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
1513 spin_unlock_irq(&nacl->device_list_lock); 1513 spin_unlock_irq(&nacl->device_list_lock);
1514 return ret; 1514 return ret;
1515 } 1515 }
1516 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(hs_num_cmds); 1516 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(hs_num_cmds);
1517 1517
1518 static ssize_t target_stat_scsi_auth_intr_show_attr_creation_time( 1518 static ssize_t target_stat_scsi_auth_intr_show_attr_creation_time(
1519 struct se_ml_stat_grps *lgrps, char *page) 1519 struct se_ml_stat_grps *lgrps, char *page)
1520 { 1520 {
1521 struct se_lun_acl *lacl = container_of(lgrps, 1521 struct se_lun_acl *lacl = container_of(lgrps,
1522 struct se_lun_acl, ml_stat_grps); 1522 struct se_lun_acl, ml_stat_grps);
1523 struct se_node_acl *nacl = lacl->se_lun_nacl; 1523 struct se_node_acl *nacl = lacl->se_lun_nacl;
1524 struct se_dev_entry *deve; 1524 struct se_dev_entry *deve;
1525 ssize_t ret; 1525 ssize_t ret;
1526 1526
1527 spin_lock_irq(&nacl->device_list_lock); 1527 spin_lock_irq(&nacl->device_list_lock);
1528 deve = &nacl->device_list[lacl->mapped_lun]; 1528 deve = &nacl->device_list[lacl->mapped_lun];
1529 if (!deve->se_lun || !deve->se_lun_acl) { 1529 if (!deve->se_lun || !deve->se_lun_acl) {
1530 spin_unlock_irq(&nacl->device_list_lock); 1530 spin_unlock_irq(&nacl->device_list_lock);
1531 return -ENODEV; 1531 return -ENODEV;
1532 } 1532 }
1533 /* scsiAuthIntrLastCreation */ 1533 /* scsiAuthIntrLastCreation */
1534 ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)deve->creation_time - 1534 ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)deve->creation_time -
1535 INITIAL_JIFFIES) * 100 / HZ)); 1535 INITIAL_JIFFIES) * 100 / HZ));
1536 spin_unlock_irq(&nacl->device_list_lock); 1536 spin_unlock_irq(&nacl->device_list_lock);
1537 return ret; 1537 return ret;
1538 } 1538 }
1539 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(creation_time); 1539 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(creation_time);
1540 1540
1541 static ssize_t target_stat_scsi_auth_intr_show_attr_row_status( 1541 static ssize_t target_stat_scsi_auth_intr_show_attr_row_status(
1542 struct se_ml_stat_grps *lgrps, char *page) 1542 struct se_ml_stat_grps *lgrps, char *page)
1543 { 1543 {
1544 struct se_lun_acl *lacl = container_of(lgrps, 1544 struct se_lun_acl *lacl = container_of(lgrps,
1545 struct se_lun_acl, ml_stat_grps); 1545 struct se_lun_acl, ml_stat_grps);
1546 struct se_node_acl *nacl = lacl->se_lun_nacl; 1546 struct se_node_acl *nacl = lacl->se_lun_nacl;
1547 struct se_dev_entry *deve; 1547 struct se_dev_entry *deve;
1548 ssize_t ret; 1548 ssize_t ret;
1549 1549
1550 spin_lock_irq(&nacl->device_list_lock); 1550 spin_lock_irq(&nacl->device_list_lock);
1551 deve = &nacl->device_list[lacl->mapped_lun]; 1551 deve = &nacl->device_list[lacl->mapped_lun];
1552 if (!deve->se_lun || !deve->se_lun_acl) { 1552 if (!deve->se_lun || !deve->se_lun_acl) {
1553 spin_unlock_irq(&nacl->device_list_lock); 1553 spin_unlock_irq(&nacl->device_list_lock);
1554 return -ENODEV; 1554 return -ENODEV;
1555 } 1555 }
1556 /* FIXME: scsiAuthIntrRowStatus */ 1556 /* FIXME: scsiAuthIntrRowStatus */
1557 ret = snprintf(page, PAGE_SIZE, "Ready\n"); 1557 ret = snprintf(page, PAGE_SIZE, "Ready\n");
1558 spin_unlock_irq(&nacl->device_list_lock); 1558 spin_unlock_irq(&nacl->device_list_lock);
1559 return ret; 1559 return ret;
1560 } 1560 }
1561 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(row_status); 1561 DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(row_status);
1562 1562
1563 CONFIGFS_EATTR_OPS(target_stat_scsi_auth_intr, se_ml_stat_grps, 1563 CONFIGFS_EATTR_OPS(target_stat_scsi_auth_intr, se_ml_stat_grps,
1564 scsi_auth_intr_group); 1564 scsi_auth_intr_group);
1565 1565
1566 static struct configfs_attribute *target_stat_scsi_auth_intr_attrs[] = { 1566 static struct configfs_attribute *target_stat_scsi_auth_intr_attrs[] = {
1567 &target_stat_scsi_auth_intr_inst.attr, 1567 &target_stat_scsi_auth_intr_inst.attr,
1568 &target_stat_scsi_auth_intr_dev.attr, 1568 &target_stat_scsi_auth_intr_dev.attr,
1569 &target_stat_scsi_auth_intr_port.attr, 1569 &target_stat_scsi_auth_intr_port.attr,
1570 &target_stat_scsi_auth_intr_indx.attr, 1570 &target_stat_scsi_auth_intr_indx.attr,
1571 &target_stat_scsi_auth_intr_dev_or_port.attr, 1571 &target_stat_scsi_auth_intr_dev_or_port.attr,
1572 &target_stat_scsi_auth_intr_intr_name.attr, 1572 &target_stat_scsi_auth_intr_intr_name.attr,
1573 &target_stat_scsi_auth_intr_map_indx.attr, 1573 &target_stat_scsi_auth_intr_map_indx.attr,
1574 &target_stat_scsi_auth_intr_att_count.attr, 1574 &target_stat_scsi_auth_intr_att_count.attr,
1575 &target_stat_scsi_auth_intr_num_cmds.attr, 1575 &target_stat_scsi_auth_intr_num_cmds.attr,
1576 &target_stat_scsi_auth_intr_read_mbytes.attr, 1576 &target_stat_scsi_auth_intr_read_mbytes.attr,
1577 &target_stat_scsi_auth_intr_write_mbytes.attr, 1577 &target_stat_scsi_auth_intr_write_mbytes.attr,
1578 &target_stat_scsi_auth_intr_hs_num_cmds.attr, 1578 &target_stat_scsi_auth_intr_hs_num_cmds.attr,
1579 &target_stat_scsi_auth_intr_creation_time.attr, 1579 &target_stat_scsi_auth_intr_creation_time.attr,
1580 &target_stat_scsi_auth_intr_row_status.attr, 1580 &target_stat_scsi_auth_intr_row_status.attr,
1581 NULL, 1581 NULL,
1582 }; 1582 };
1583 1583
1584 static struct configfs_item_operations target_stat_scsi_auth_intr_attrib_ops = { 1584 static struct configfs_item_operations target_stat_scsi_auth_intr_attrib_ops = {
1585 .show_attribute = target_stat_scsi_auth_intr_attr_show, 1585 .show_attribute = target_stat_scsi_auth_intr_attr_show,
1586 .store_attribute = target_stat_scsi_auth_intr_attr_store, 1586 .store_attribute = target_stat_scsi_auth_intr_attr_store,
1587 }; 1587 };
1588 1588
1589 static struct config_item_type target_stat_scsi_auth_intr_cit = { 1589 static struct config_item_type target_stat_scsi_auth_intr_cit = {
1590 .ct_item_ops = &target_stat_scsi_auth_intr_attrib_ops, 1590 .ct_item_ops = &target_stat_scsi_auth_intr_attrib_ops,
1591 .ct_attrs = target_stat_scsi_auth_intr_attrs, 1591 .ct_attrs = target_stat_scsi_auth_intr_attrs,
1592 .ct_owner = THIS_MODULE, 1592 .ct_owner = THIS_MODULE,
1593 }; 1593 };
1594 1594
1595 /* 1595 /*
1596 * SCSI Attached Initiator Port Table 1596 * SCSI Attached Initiator Port Table
1597 */ 1597 */
1598 1598
1599 CONFIGFS_EATTR_STRUCT(target_stat_scsi_att_intr_port, se_ml_stat_grps); 1599 CONFIGFS_EATTR_STRUCT(target_stat_scsi_att_intr_port, se_ml_stat_grps);
1600 #define DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR(_name, _mode) \ 1600 #define DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR(_name, _mode) \
1601 static struct target_stat_scsi_att_intr_port_attribute \ 1601 static struct target_stat_scsi_att_intr_port_attribute \
1602 target_stat_scsi_att_intr_port_##_name = \ 1602 target_stat_scsi_att_intr_port_##_name = \
1603 __CONFIGFS_EATTR(_name, _mode, \ 1603 __CONFIGFS_EATTR(_name, _mode, \
1604 target_stat_scsi_att_intr_port_show_attr_##_name, \ 1604 target_stat_scsi_att_intr_port_show_attr_##_name, \
1605 target_stat_scsi_att_intr_port_store_attr_##_name); 1605 target_stat_scsi_att_intr_port_store_attr_##_name);
1606 1606
1607 #define DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(_name) \ 1607 #define DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(_name) \
1608 static struct target_stat_scsi_att_intr_port_attribute \ 1608 static struct target_stat_scsi_att_intr_port_attribute \
1609 target_stat_scsi_att_intr_port_##_name = \ 1609 target_stat_scsi_att_intr_port_##_name = \
1610 __CONFIGFS_EATTR_RO(_name, \ 1610 __CONFIGFS_EATTR_RO(_name, \
1611 target_stat_scsi_att_intr_port_show_attr_##_name); 1611 target_stat_scsi_att_intr_port_show_attr_##_name);
1612 1612
1613 static ssize_t target_stat_scsi_att_intr_port_show_attr_inst( 1613 static ssize_t target_stat_scsi_att_intr_port_show_attr_inst(
1614 struct se_ml_stat_grps *lgrps, char *page) 1614 struct se_ml_stat_grps *lgrps, char *page)
1615 { 1615 {
1616 struct se_lun_acl *lacl = container_of(lgrps, 1616 struct se_lun_acl *lacl = container_of(lgrps,
1617 struct se_lun_acl, ml_stat_grps); 1617 struct se_lun_acl, ml_stat_grps);
1618 struct se_node_acl *nacl = lacl->se_lun_nacl; 1618 struct se_node_acl *nacl = lacl->se_lun_nacl;
1619 struct se_dev_entry *deve; 1619 struct se_dev_entry *deve;
1620 struct se_portal_group *tpg; 1620 struct se_portal_group *tpg;
1621 ssize_t ret; 1621 ssize_t ret;
1622 1622
1623 spin_lock_irq(&nacl->device_list_lock); 1623 spin_lock_irq(&nacl->device_list_lock);
1624 deve = &nacl->device_list[lacl->mapped_lun]; 1624 deve = &nacl->device_list[lacl->mapped_lun];
1625 if (!deve->se_lun || !deve->se_lun_acl) { 1625 if (!deve->se_lun || !deve->se_lun_acl) {
1626 spin_unlock_irq(&nacl->device_list_lock); 1626 spin_unlock_irq(&nacl->device_list_lock);
1627 return -ENODEV; 1627 return -ENODEV;
1628 } 1628 }
1629 tpg = nacl->se_tpg; 1629 tpg = nacl->se_tpg;
1630 /* scsiInstIndex */ 1630 /* scsiInstIndex */
1631 ret = snprintf(page, PAGE_SIZE, "%u\n", 1631 ret = snprintf(page, PAGE_SIZE, "%u\n",
1632 tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); 1632 tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
1633 spin_unlock_irq(&nacl->device_list_lock); 1633 spin_unlock_irq(&nacl->device_list_lock);
1634 return ret; 1634 return ret;
1635 } 1635 }
1636 DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(inst); 1636 DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(inst);
1637 1637
1638 static ssize_t target_stat_scsi_att_intr_port_show_attr_dev( 1638 static ssize_t target_stat_scsi_att_intr_port_show_attr_dev(
1639 struct se_ml_stat_grps *lgrps, char *page) 1639 struct se_ml_stat_grps *lgrps, char *page)
1640 { 1640 {
1641 struct se_lun_acl *lacl = container_of(lgrps, 1641 struct se_lun_acl *lacl = container_of(lgrps,
1642 struct se_lun_acl, ml_stat_grps); 1642 struct se_lun_acl, ml_stat_grps);
1643 struct se_node_acl *nacl = lacl->se_lun_nacl; 1643 struct se_node_acl *nacl = lacl->se_lun_nacl;
1644 struct se_dev_entry *deve; 1644 struct se_dev_entry *deve;
1645 struct se_lun *lun; 1645 struct se_lun *lun;
1646 struct se_portal_group *tpg; 1646 struct se_portal_group *tpg;
1647 ssize_t ret; 1647 ssize_t ret;
1648 1648
1649 spin_lock_irq(&nacl->device_list_lock); 1649 spin_lock_irq(&nacl->device_list_lock);
1650 deve = &nacl->device_list[lacl->mapped_lun]; 1650 deve = &nacl->device_list[lacl->mapped_lun];
1651 if (!deve->se_lun || !deve->se_lun_acl) { 1651 if (!deve->se_lun || !deve->se_lun_acl) {
1652 spin_unlock_irq(&nacl->device_list_lock); 1652 spin_unlock_irq(&nacl->device_list_lock);
1653 return -ENODEV; 1653 return -ENODEV;
1654 } 1654 }
1655 tpg = nacl->se_tpg; 1655 tpg = nacl->se_tpg;
1656 lun = deve->se_lun; 1656 lun = deve->se_lun;
1657 /* scsiDeviceIndex */ 1657 /* scsiDeviceIndex */
1658 ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index); 1658 ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index);
1659 spin_unlock_irq(&nacl->device_list_lock); 1659 spin_unlock_irq(&nacl->device_list_lock);
1660 return ret; 1660 return ret;
1661 } 1661 }
1662 DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(dev); 1662 DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(dev);
1663 1663
1664 static ssize_t target_stat_scsi_att_intr_port_show_attr_port( 1664 static ssize_t target_stat_scsi_att_intr_port_show_attr_port(
1665 struct se_ml_stat_grps *lgrps, char *page) 1665 struct se_ml_stat_grps *lgrps, char *page)
1666 { 1666 {
1667 struct se_lun_acl *lacl = container_of(lgrps, 1667 struct se_lun_acl *lacl = container_of(lgrps,
1668 struct se_lun_acl, ml_stat_grps); 1668 struct se_lun_acl, ml_stat_grps);
1669 struct se_node_acl *nacl = lacl->se_lun_nacl; 1669 struct se_node_acl *nacl = lacl->se_lun_nacl;
1670 struct se_dev_entry *deve; 1670 struct se_dev_entry *deve;
1671 struct se_portal_group *tpg; 1671 struct se_portal_group *tpg;
1672 ssize_t ret; 1672 ssize_t ret;
1673 1673
1674 spin_lock_irq(&nacl->device_list_lock); 1674 spin_lock_irq(&nacl->device_list_lock);
1675 deve = &nacl->device_list[lacl->mapped_lun]; 1675 deve = &nacl->device_list[lacl->mapped_lun];
1676 if (!deve->se_lun || !deve->se_lun_acl) { 1676 if (!deve->se_lun || !deve->se_lun_acl) {
1677 spin_unlock_irq(&nacl->device_list_lock); 1677 spin_unlock_irq(&nacl->device_list_lock);
1678 return -ENODEV; 1678 return -ENODEV;
1679 } 1679 }
1680 tpg = nacl->se_tpg; 1680 tpg = nacl->se_tpg;
1681 /* scsiPortIndex */ 1681 /* scsiPortIndex */
1682 ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1682 ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
1683 spin_unlock_irq(&nacl->device_list_lock); 1683 spin_unlock_irq(&nacl->device_list_lock);
1684 return ret; 1684 return ret;
1685 } 1685 }
1686 DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port); 1686 DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port);
1687 1687
1688 static ssize_t target_stat_scsi_att_intr_port_show_attr_indx( 1688 static ssize_t target_stat_scsi_att_intr_port_show_attr_indx(
1689 struct se_ml_stat_grps *lgrps, char *page) 1689 struct se_ml_stat_grps *lgrps, char *page)
1690 { 1690 {
1691 struct se_lun_acl *lacl = container_of(lgrps, 1691 struct se_lun_acl *lacl = container_of(lgrps,
1692 struct se_lun_acl, ml_stat_grps); 1692 struct se_lun_acl, ml_stat_grps);
1693 struct se_node_acl *nacl = lacl->se_lun_nacl; 1693 struct se_node_acl *nacl = lacl->se_lun_nacl;
1694 struct se_session *se_sess; 1694 struct se_session *se_sess;
1695 struct se_portal_group *tpg; 1695 struct se_portal_group *tpg;
1696 ssize_t ret; 1696 ssize_t ret;
1697 1697
1698 spin_lock_irq(&nacl->nacl_sess_lock); 1698 spin_lock_irq(&nacl->nacl_sess_lock);
1699 se_sess = nacl->nacl_sess; 1699 se_sess = nacl->nacl_sess;
1700 if (!se_sess) { 1700 if (!se_sess) {
1701 spin_unlock_irq(&nacl->nacl_sess_lock); 1701 spin_unlock_irq(&nacl->nacl_sess_lock);
1702 return -ENODEV; 1702 return -ENODEV;
1703 } 1703 }
1704 1704
1705 tpg = nacl->se_tpg; 1705 tpg = nacl->se_tpg;
1706 /* scsiAttIntrPortIndex */ 1706 /* scsiAttIntrPortIndex */
1707 ret = snprintf(page, PAGE_SIZE, "%u\n", 1707 ret = snprintf(page, PAGE_SIZE, "%u\n",
1708 tpg->se_tpg_tfo->sess_get_index(se_sess)); 1708 tpg->se_tpg_tfo->sess_get_index(se_sess));
1709 spin_unlock_irq(&nacl->nacl_sess_lock); 1709 spin_unlock_irq(&nacl->nacl_sess_lock);
1710 return ret; 1710 return ret;
1711 } 1711 }
1712 DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(indx); 1712 DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(indx);
1713 1713
1714 static ssize_t target_stat_scsi_att_intr_port_show_attr_port_auth_indx( 1714 static ssize_t target_stat_scsi_att_intr_port_show_attr_port_auth_indx(
1715 struct se_ml_stat_grps *lgrps, char *page) 1715 struct se_ml_stat_grps *lgrps, char *page)
1716 { 1716 {
1717 struct se_lun_acl *lacl = container_of(lgrps, 1717 struct se_lun_acl *lacl = container_of(lgrps,
1718 struct se_lun_acl, ml_stat_grps); 1718 struct se_lun_acl, ml_stat_grps);
1719 struct se_node_acl *nacl = lacl->se_lun_nacl; 1719 struct se_node_acl *nacl = lacl->se_lun_nacl;
1720 struct se_dev_entry *deve; 1720 struct se_dev_entry *deve;
1721 ssize_t ret; 1721 ssize_t ret;
1722 1722
1723 spin_lock_irq(&nacl->device_list_lock); 1723 spin_lock_irq(&nacl->device_list_lock);
1724 deve = &nacl->device_list[lacl->mapped_lun]; 1724 deve = &nacl->device_list[lacl->mapped_lun];
1725 if (!deve->se_lun || !deve->se_lun_acl) { 1725 if (!deve->se_lun || !deve->se_lun_acl) {
1726 spin_unlock_irq(&nacl->device_list_lock); 1726 spin_unlock_irq(&nacl->device_list_lock);
1727 return -ENODEV; 1727 return -ENODEV;
1728 } 1728 }
1729 /* scsiAttIntrPortAuthIntrIdx */ 1729 /* scsiAttIntrPortAuthIntrIdx */
1730 ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index); 1730 ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
1731 spin_unlock_irq(&nacl->device_list_lock); 1731 spin_unlock_irq(&nacl->device_list_lock);
1732 return ret; 1732 return ret;
1733 } 1733 }
1734 DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_auth_indx); 1734 DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_auth_indx);
1735 1735
1736 static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident( 1736 static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident(
1737 struct se_ml_stat_grps *lgrps, char *page) 1737 struct se_ml_stat_grps *lgrps, char *page)
1738 { 1738 {
1739 struct se_lun_acl *lacl = container_of(lgrps, 1739 struct se_lun_acl *lacl = container_of(lgrps,
1740 struct se_lun_acl, ml_stat_grps); 1740 struct se_lun_acl, ml_stat_grps);
1741 struct se_node_acl *nacl = lacl->se_lun_nacl; 1741 struct se_node_acl *nacl = lacl->se_lun_nacl;
1742 struct se_session *se_sess; 1742 struct se_session *se_sess;
1743 struct se_portal_group *tpg; 1743 struct se_portal_group *tpg;
1744 ssize_t ret; 1744 ssize_t ret;
1745 unsigned char buf[64]; 1745 unsigned char buf[64];
1746 1746
1747 spin_lock_irq(&nacl->nacl_sess_lock); 1747 spin_lock_irq(&nacl->nacl_sess_lock);
1748 se_sess = nacl->nacl_sess; 1748 se_sess = nacl->nacl_sess;
1749 if (!se_sess) { 1749 if (!se_sess) {
1750 spin_unlock_irq(&nacl->nacl_sess_lock); 1750 spin_unlock_irq(&nacl->nacl_sess_lock);
1751 return -ENODEV; 1751 return -ENODEV;
1752 } 1752 }
1753 1753
1754 tpg = nacl->se_tpg; 1754 tpg = nacl->se_tpg;
1755 /* scsiAttIntrPortName+scsiAttIntrPortIdentifier */ 1755 /* scsiAttIntrPortName+scsiAttIntrPortIdentifier */
1756 memset(buf, 0, 64); 1756 memset(buf, 0, 64);
1757 if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) 1757 if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL)
1758 tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 1758 tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
1759 (unsigned char *)&buf[0], 64); 1759 (unsigned char *)&buf[0], 64);
1760 1760
1761 ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf); 1761 ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf);
1762 spin_unlock_irq(&nacl->nacl_sess_lock); 1762 spin_unlock_irq(&nacl->nacl_sess_lock);
1763 return ret; 1763 return ret;
1764 } 1764 }
1765 DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_ident); 1765 DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_ident);
1766 1766
1767 CONFIGFS_EATTR_OPS(target_stat_scsi_att_intr_port, se_ml_stat_grps, 1767 CONFIGFS_EATTR_OPS(target_stat_scsi_att_intr_port, se_ml_stat_grps,
1768 scsi_att_intr_port_group); 1768 scsi_att_intr_port_group);
1769 1769
1770 static struct configfs_attribute *target_stat_scsi_ath_intr_port_attrs[] = { 1770 static struct configfs_attribute *target_stat_scsi_ath_intr_port_attrs[] = {
1771 &target_stat_scsi_att_intr_port_inst.attr, 1771 &target_stat_scsi_att_intr_port_inst.attr,
1772 &target_stat_scsi_att_intr_port_dev.attr, 1772 &target_stat_scsi_att_intr_port_dev.attr,
1773 &target_stat_scsi_att_intr_port_port.attr, 1773 &target_stat_scsi_att_intr_port_port.attr,
1774 &target_stat_scsi_att_intr_port_indx.attr, 1774 &target_stat_scsi_att_intr_port_indx.attr,
1775 &target_stat_scsi_att_intr_port_port_auth_indx.attr, 1775 &target_stat_scsi_att_intr_port_port_auth_indx.attr,
1776 &target_stat_scsi_att_intr_port_port_ident.attr, 1776 &target_stat_scsi_att_intr_port_port_ident.attr,
1777 NULL, 1777 NULL,
1778 }; 1778 };
1779 1779
1780 static struct configfs_item_operations target_stat_scsi_att_intr_port_attrib_ops = { 1780 static struct configfs_item_operations target_stat_scsi_att_intr_port_attrib_ops = {
1781 .show_attribute = target_stat_scsi_att_intr_port_attr_show, 1781 .show_attribute = target_stat_scsi_att_intr_port_attr_show,
1782 .store_attribute = target_stat_scsi_att_intr_port_attr_store, 1782 .store_attribute = target_stat_scsi_att_intr_port_attr_store,
1783 }; 1783 };
1784 1784
1785 static struct config_item_type target_stat_scsi_att_intr_port_cit = { 1785 static struct config_item_type target_stat_scsi_att_intr_port_cit = {
1786 .ct_item_ops = &target_stat_scsi_att_intr_port_attrib_ops, 1786 .ct_item_ops = &target_stat_scsi_att_intr_port_attrib_ops,
1787 .ct_attrs = target_stat_scsi_ath_intr_port_attrs, 1787 .ct_attrs = target_stat_scsi_ath_intr_port_attrs,
1788 .ct_owner = THIS_MODULE, 1788 .ct_owner = THIS_MODULE,
1789 }; 1789 };
1790 1790
1791 /* 1791 /*
1792 * Called from target_core_fabric_configfs.c:target_fabric_make_mappedlun() to setup 1792 * Called from target_core_fabric_configfs.c:target_fabric_make_mappedlun() to setup
1793 * the target MappedLUN statistics groups + configfs CITs located in target_core_stat.c 1793 * the target MappedLUN statistics groups + configfs CITs located in target_core_stat.c
1794 */ 1794 */
1795 void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl) 1795 void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl)
1796 { 1796 {
1797 struct config_group *ml_stat_grp = &lacl->ml_stat_grps.stat_group; 1797 struct config_group *ml_stat_grp = &lacl->ml_stat_grps.stat_group;
1798 1798
1799 config_group_init_type_name(&lacl->ml_stat_grps.scsi_auth_intr_group, 1799 config_group_init_type_name(&lacl->ml_stat_grps.scsi_auth_intr_group,
1800 "scsi_auth_intr", &target_stat_scsi_auth_intr_cit); 1800 "scsi_auth_intr", &target_stat_scsi_auth_intr_cit);
1801 config_group_init_type_name(&lacl->ml_stat_grps.scsi_att_intr_port_group, 1801 config_group_init_type_name(&lacl->ml_stat_grps.scsi_att_intr_port_group,
1802 "scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit); 1802 "scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit);
1803 1803
1804 ml_stat_grp->default_groups[0] = &lacl->ml_stat_grps.scsi_auth_intr_group; 1804 ml_stat_grp->default_groups[0] = &lacl->ml_stat_grps.scsi_auth_intr_group;
1805 ml_stat_grp->default_groups[1] = &lacl->ml_stat_grps.scsi_att_intr_port_group; 1805 ml_stat_grp->default_groups[1] = &lacl->ml_stat_grps.scsi_att_intr_port_group;
1806 ml_stat_grp->default_groups[2] = NULL; 1806 ml_stat_grp->default_groups[2] = NULL;
1807 } 1807 }
1808 1808
drivers/target/target_core_tmr.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * Filename: target_core_tmr.c 2 * Filename: target_core_tmr.c
3 * 3 *
4 * This file contains SPC-3 task management infrastructure 4 * This file contains SPC-3 task management infrastructure
5 * 5 *
6 * Copyright (c) 2009,2010 Rising Tide Systems 6 * Copyright (c) 2009,2010 Rising Tide Systems
7 * Copyright (c) 2009,2010 Linux-iSCSI.org 7 * Copyright (c) 2009,2010 Linux-iSCSI.org
8 * 8 *
9 * Nicholas A. Bellinger <nab@kernel.org> 9 * Nicholas A. Bellinger <nab@kernel.org>
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or 13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version. 14 * (at your option) any later version.
15 * 15 *
16 * This program is distributed in the hope that it will be useful, 16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details. 19 * GNU General Public License for more details.
20 * 20 *
21 * You should have received a copy of the GNU General Public License 21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software 22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 * 24 *
25 ******************************************************************************/ 25 ******************************************************************************/
26 26
27 #include <linux/slab.h> 27 #include <linux/slab.h>
28 #include <linux/spinlock.h> 28 #include <linux/spinlock.h>
29 #include <linux/list.h> 29 #include <linux/list.h>
30 #include <linux/export.h> 30 #include <linux/export.h>
31 #include <scsi/scsi.h> 31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h> 32 #include <scsi/scsi_cmnd.h>
33 33
34 #include <target/target_core_base.h> 34 #include <target/target_core_base.h>
35 #include <target/target_core_device.h> 35 #include <target/target_core_backend.h>
36 #include <target/target_core_tmr.h> 36 #include <target/target_core_fabric.h>
37 #include <target/target_core_transport.h>
38 #include <target/target_core_fabric_ops.h>
39 #include <target/target_core_configfs.h> 37 #include <target/target_core_configfs.h>
40 38
41 #include "target_core_internal.h" 39 #include "target_core_internal.h"
42 #include "target_core_alua.h" 40 #include "target_core_alua.h"
43 #include "target_core_pr.h" 41 #include "target_core_pr.h"
44 42
45 struct se_tmr_req *core_tmr_alloc_req( 43 struct se_tmr_req *core_tmr_alloc_req(
46 struct se_cmd *se_cmd, 44 struct se_cmd *se_cmd,
47 void *fabric_tmr_ptr, 45 void *fabric_tmr_ptr,
48 u8 function, 46 u8 function,
49 gfp_t gfp_flags) 47 gfp_t gfp_flags)
50 { 48 {
51 struct se_tmr_req *tmr; 49 struct se_tmr_req *tmr;
52 50
53 tmr = kmem_cache_zalloc(se_tmr_req_cache, gfp_flags); 51 tmr = kmem_cache_zalloc(se_tmr_req_cache, gfp_flags);
54 if (!tmr) { 52 if (!tmr) {
55 pr_err("Unable to allocate struct se_tmr_req\n"); 53 pr_err("Unable to allocate struct se_tmr_req\n");
56 return ERR_PTR(-ENOMEM); 54 return ERR_PTR(-ENOMEM);
57 } 55 }
58 tmr->task_cmd = se_cmd; 56 tmr->task_cmd = se_cmd;
59 tmr->fabric_tmr_ptr = fabric_tmr_ptr; 57 tmr->fabric_tmr_ptr = fabric_tmr_ptr;
60 tmr->function = function; 58 tmr->function = function;
61 INIT_LIST_HEAD(&tmr->tmr_list); 59 INIT_LIST_HEAD(&tmr->tmr_list);
62 60
63 return tmr; 61 return tmr;
64 } 62 }
65 EXPORT_SYMBOL(core_tmr_alloc_req); 63 EXPORT_SYMBOL(core_tmr_alloc_req);
66 64
67 void core_tmr_release_req( 65 void core_tmr_release_req(
68 struct se_tmr_req *tmr) 66 struct se_tmr_req *tmr)
69 { 67 {
70 struct se_device *dev = tmr->tmr_dev; 68 struct se_device *dev = tmr->tmr_dev;
71 unsigned long flags; 69 unsigned long flags;
72 70
73 if (!dev) { 71 if (!dev) {
74 kmem_cache_free(se_tmr_req_cache, tmr); 72 kmem_cache_free(se_tmr_req_cache, tmr);
75 return; 73 return;
76 } 74 }
77 75
78 spin_lock_irqsave(&dev->se_tmr_lock, flags); 76 spin_lock_irqsave(&dev->se_tmr_lock, flags);
79 list_del(&tmr->tmr_list); 77 list_del(&tmr->tmr_list);
80 spin_unlock_irqrestore(&dev->se_tmr_lock, flags); 78 spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
81 79
82 kmem_cache_free(se_tmr_req_cache, tmr); 80 kmem_cache_free(se_tmr_req_cache, tmr);
83 } 81 }
84 82
85 static void core_tmr_handle_tas_abort( 83 static void core_tmr_handle_tas_abort(
86 struct se_node_acl *tmr_nacl, 84 struct se_node_acl *tmr_nacl,
87 struct se_cmd *cmd, 85 struct se_cmd *cmd,
88 int tas, 86 int tas,
89 int fe_count) 87 int fe_count)
90 { 88 {
91 if (!fe_count) { 89 if (!fe_count) {
92 transport_cmd_finish_abort(cmd, 1); 90 transport_cmd_finish_abort(cmd, 1);
93 return; 91 return;
94 } 92 }
95 /* 93 /*
96 * TASK ABORTED status (TAS) bit support 94 * TASK ABORTED status (TAS) bit support
97 */ 95 */
98 if ((tmr_nacl && 96 if ((tmr_nacl &&
99 (tmr_nacl == cmd->se_sess->se_node_acl)) || tas) 97 (tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
100 transport_send_task_abort(cmd); 98 transport_send_task_abort(cmd);
101 99
102 transport_cmd_finish_abort(cmd, 0); 100 transport_cmd_finish_abort(cmd, 0);
103 } 101 }
104 102
105 static void core_tmr_drain_tmr_list( 103 static void core_tmr_drain_tmr_list(
106 struct se_device *dev, 104 struct se_device *dev,
107 struct se_tmr_req *tmr, 105 struct se_tmr_req *tmr,
108 struct list_head *preempt_and_abort_list) 106 struct list_head *preempt_and_abort_list)
109 { 107 {
110 LIST_HEAD(drain_tmr_list); 108 LIST_HEAD(drain_tmr_list);
111 struct se_tmr_req *tmr_p, *tmr_pp; 109 struct se_tmr_req *tmr_p, *tmr_pp;
112 struct se_cmd *cmd; 110 struct se_cmd *cmd;
113 unsigned long flags; 111 unsigned long flags;
114 /* 112 /*
115 * Release all pending and outgoing TMRs aside from the received 113 * Release all pending and outgoing TMRs aside from the received
116 * LUN_RESET tmr.. 114 * LUN_RESET tmr..
117 */ 115 */
118 spin_lock_irqsave(&dev->se_tmr_lock, flags); 116 spin_lock_irqsave(&dev->se_tmr_lock, flags);
119 list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { 117 list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
120 /* 118 /*
121 * Allow the received TMR to return with FUNCTION_COMPLETE. 119 * Allow the received TMR to return with FUNCTION_COMPLETE.
122 */ 120 */
123 if (tmr_p == tmr) 121 if (tmr_p == tmr)
124 continue; 122 continue;
125 123
126 cmd = tmr_p->task_cmd; 124 cmd = tmr_p->task_cmd;
127 if (!cmd) { 125 if (!cmd) {
128 pr_err("Unable to locate struct se_cmd for TMR\n"); 126 pr_err("Unable to locate struct se_cmd for TMR\n");
129 continue; 127 continue;
130 } 128 }
131 /* 129 /*
132 * If this function was called with a valid pr_res_key 130 * If this function was called with a valid pr_res_key
133 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action 131 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
134 * skip non regisration key matching TMRs. 132 * skip non regisration key matching TMRs.
135 */ 133 */
136 if (preempt_and_abort_list && 134 if (preempt_and_abort_list &&
137 (core_scsi3_check_cdb_abort_and_preempt( 135 (core_scsi3_check_cdb_abort_and_preempt(
138 preempt_and_abort_list, cmd) != 0)) 136 preempt_and_abort_list, cmd) != 0))
139 continue; 137 continue;
140 138
141 spin_lock(&cmd->t_state_lock); 139 spin_lock(&cmd->t_state_lock);
142 if (!atomic_read(&cmd->t_transport_active)) { 140 if (!atomic_read(&cmd->t_transport_active)) {
143 spin_unlock(&cmd->t_state_lock); 141 spin_unlock(&cmd->t_state_lock);
144 continue; 142 continue;
145 } 143 }
146 if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { 144 if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
147 spin_unlock(&cmd->t_state_lock); 145 spin_unlock(&cmd->t_state_lock);
148 continue; 146 continue;
149 } 147 }
150 spin_unlock(&cmd->t_state_lock); 148 spin_unlock(&cmd->t_state_lock);
151 149
152 list_move_tail(&tmr_p->tmr_list, &drain_tmr_list); 150 list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
153 } 151 }
154 spin_unlock_irqrestore(&dev->se_tmr_lock, flags); 152 spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
155 153
156 list_for_each_entry_safe(tmr_p, tmr_pp, &drain_tmr_list, tmr_list) { 154 list_for_each_entry_safe(tmr_p, tmr_pp, &drain_tmr_list, tmr_list) {
157 list_del_init(&tmr_p->tmr_list); 155 list_del_init(&tmr_p->tmr_list);
158 cmd = tmr_p->task_cmd; 156 cmd = tmr_p->task_cmd;
159 157
160 pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x," 158 pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
161 " Response: 0x%02x, t_state: %d\n", 159 " Response: 0x%02x, t_state: %d\n",
162 (preempt_and_abort_list) ? "Preempt" : "", tmr_p, 160 (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
163 tmr_p->function, tmr_p->response, cmd->t_state); 161 tmr_p->function, tmr_p->response, cmd->t_state);
164 162
165 transport_cmd_finish_abort(cmd, 1); 163 transport_cmd_finish_abort(cmd, 1);
166 } 164 }
167 } 165 }
168 166
169 static void core_tmr_drain_task_list( 167 static void core_tmr_drain_task_list(
170 struct se_device *dev, 168 struct se_device *dev,
171 struct se_cmd *prout_cmd, 169 struct se_cmd *prout_cmd,
172 struct se_node_acl *tmr_nacl, 170 struct se_node_acl *tmr_nacl,
173 int tas, 171 int tas,
174 struct list_head *preempt_and_abort_list) 172 struct list_head *preempt_and_abort_list)
175 { 173 {
176 LIST_HEAD(drain_task_list); 174 LIST_HEAD(drain_task_list);
177 struct se_cmd *cmd; 175 struct se_cmd *cmd;
178 struct se_task *task, *task_tmp; 176 struct se_task *task, *task_tmp;
179 unsigned long flags; 177 unsigned long flags;
180 int fe_count; 178 int fe_count;
181 /* 179 /*
182 * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status. 180 * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
183 * This is following sam4r17, section 5.6 Aborting commands, Table 38 181 * This is following sam4r17, section 5.6 Aborting commands, Table 38
184 * for TMR LUN_RESET: 182 * for TMR LUN_RESET:
185 * 183 *
186 * a) "Yes" indicates that each command that is aborted on an I_T nexus 184 * a) "Yes" indicates that each command that is aborted on an I_T nexus
187 * other than the one that caused the SCSI device condition is 185 * other than the one that caused the SCSI device condition is
188 * completed with TASK ABORTED status, if the TAS bit is set to one in 186 * completed with TASK ABORTED status, if the TAS bit is set to one in
189 * the Control mode page (see SPC-4). "No" indicates that no status is 187 * the Control mode page (see SPC-4). "No" indicates that no status is
190 * returned for aborted commands. 188 * returned for aborted commands.
191 * 189 *
192 * d) If the logical unit reset is caused by a particular I_T nexus 190 * d) If the logical unit reset is caused by a particular I_T nexus
193 * (e.g., by a LOGICAL UNIT RESET task management function), then "yes" 191 * (e.g., by a LOGICAL UNIT RESET task management function), then "yes"
194 * (TASK_ABORTED status) applies. 192 * (TASK_ABORTED status) applies.
195 * 193 *
196 * Otherwise (e.g., if triggered by a hard reset), "no" 194 * Otherwise (e.g., if triggered by a hard reset), "no"
197 * (no TASK_ABORTED SAM status) applies. 195 * (no TASK_ABORTED SAM status) applies.
198 * 196 *
199 * Note that this seems to be independent of TAS (Task Aborted Status) 197 * Note that this seems to be independent of TAS (Task Aborted Status)
200 * in the Control Mode Page. 198 * in the Control Mode Page.
201 */ 199 */
202 spin_lock_irqsave(&dev->execute_task_lock, flags); 200 spin_lock_irqsave(&dev->execute_task_lock, flags);
203 list_for_each_entry_safe(task, task_tmp, &dev->state_task_list, 201 list_for_each_entry_safe(task, task_tmp, &dev->state_task_list,
204 t_state_list) { 202 t_state_list) {
205 if (!task->task_se_cmd) { 203 if (!task->task_se_cmd) {
206 pr_err("task->task_se_cmd is NULL!\n"); 204 pr_err("task->task_se_cmd is NULL!\n");
207 continue; 205 continue;
208 } 206 }
209 cmd = task->task_se_cmd; 207 cmd = task->task_se_cmd;
210 208
211 /* 209 /*
212 * For PREEMPT_AND_ABORT usage, only process commands 210 * For PREEMPT_AND_ABORT usage, only process commands
213 * with a matching reservation key. 211 * with a matching reservation key.
214 */ 212 */
215 if (preempt_and_abort_list && 213 if (preempt_and_abort_list &&
216 (core_scsi3_check_cdb_abort_and_preempt( 214 (core_scsi3_check_cdb_abort_and_preempt(
217 preempt_and_abort_list, cmd) != 0)) 215 preempt_and_abort_list, cmd) != 0))
218 continue; 216 continue;
219 /* 217 /*
220 * Not aborting PROUT PREEMPT_AND_ABORT CDB.. 218 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
221 */ 219 */
222 if (prout_cmd == cmd) 220 if (prout_cmd == cmd)
223 continue; 221 continue;
224 222
225 list_move_tail(&task->t_state_list, &drain_task_list); 223 list_move_tail(&task->t_state_list, &drain_task_list);
226 atomic_set(&task->task_state_active, 0); 224 atomic_set(&task->task_state_active, 0);
227 /* 225 /*
228 * Remove from task execute list before processing drain_task_list 226 * Remove from task execute list before processing drain_task_list
229 */ 227 */
230 if (!list_empty(&task->t_execute_list)) 228 if (!list_empty(&task->t_execute_list))
231 __transport_remove_task_from_execute_queue(task, dev); 229 __transport_remove_task_from_execute_queue(task, dev);
232 } 230 }
233 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 231 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
234 232
235 while (!list_empty(&drain_task_list)) { 233 while (!list_empty(&drain_task_list)) {
236 task = list_entry(drain_task_list.next, struct se_task, t_state_list); 234 task = list_entry(drain_task_list.next, struct se_task, t_state_list);
237 list_del(&task->t_state_list); 235 list_del(&task->t_state_list);
238 cmd = task->task_se_cmd; 236 cmd = task->task_se_cmd;
239 237
240 pr_debug("LUN_RESET: %s cmd: %p task: %p" 238 pr_debug("LUN_RESET: %s cmd: %p task: %p"
241 " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d" 239 " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d"
242 "cdb: 0x%02x\n", 240 "cdb: 0x%02x\n",
243 (preempt_and_abort_list) ? "Preempt" : "", cmd, task, 241 (preempt_and_abort_list) ? "Preempt" : "", cmd, task,
244 cmd->se_tfo->get_task_tag(cmd), 0, 242 cmd->se_tfo->get_task_tag(cmd), 0,
245 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, 243 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
246 cmd->t_task_cdb[0]); 244 cmd->t_task_cdb[0]);
247 pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" 245 pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
248 " t_task_cdbs: %d t_task_cdbs_left: %d" 246 " t_task_cdbs: %d t_task_cdbs_left: %d"
249 " t_task_cdbs_sent: %d -- t_transport_active: %d" 247 " t_task_cdbs_sent: %d -- t_transport_active: %d"
250 " t_transport_stop: %d t_transport_sent: %d\n", 248 " t_transport_stop: %d t_transport_sent: %d\n",
251 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key, 249 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
252 cmd->t_task_list_num, 250 cmd->t_task_list_num,
253 atomic_read(&cmd->t_task_cdbs_left), 251 atomic_read(&cmd->t_task_cdbs_left),
254 atomic_read(&cmd->t_task_cdbs_sent), 252 atomic_read(&cmd->t_task_cdbs_sent),
255 atomic_read(&cmd->t_transport_active), 253 atomic_read(&cmd->t_transport_active),
256 atomic_read(&cmd->t_transport_stop), 254 atomic_read(&cmd->t_transport_stop),
257 atomic_read(&cmd->t_transport_sent)); 255 atomic_read(&cmd->t_transport_sent));
258 256
259 /* 257 /*
260 * If the command may be queued onto a workqueue cancel it now. 258 * If the command may be queued onto a workqueue cancel it now.
261 * 259 *
262 * This is equivalent to removal from the execute queue in the 260 * This is equivalent to removal from the execute queue in the
263 * loop above, but we do it down here given that 261 * loop above, but we do it down here given that
264 * cancel_work_sync may block. 262 * cancel_work_sync may block.
265 */ 263 */
266 if (cmd->t_state == TRANSPORT_COMPLETE) 264 if (cmd->t_state == TRANSPORT_COMPLETE)
267 cancel_work_sync(&cmd->work); 265 cancel_work_sync(&cmd->work);
268 266
269 spin_lock_irqsave(&cmd->t_state_lock, flags); 267 spin_lock_irqsave(&cmd->t_state_lock, flags);
270 target_stop_task(task, &flags); 268 target_stop_task(task, &flags);
271 269
272 if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { 270 if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
273 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 271 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
274 pr_debug("LUN_RESET: Skipping task: %p, dev: %p for" 272 pr_debug("LUN_RESET: Skipping task: %p, dev: %p for"
275 " t_task_cdbs_ex_left: %d\n", task, dev, 273 " t_task_cdbs_ex_left: %d\n", task, dev,
276 atomic_read(&cmd->t_task_cdbs_ex_left)); 274 atomic_read(&cmd->t_task_cdbs_ex_left));
277 continue; 275 continue;
278 } 276 }
279 fe_count = atomic_read(&cmd->t_fe_count); 277 fe_count = atomic_read(&cmd->t_fe_count);
280 278
281 if (atomic_read(&cmd->t_transport_active)) { 279 if (atomic_read(&cmd->t_transport_active)) {
282 pr_debug("LUN_RESET: got t_transport_active = 1 for" 280 pr_debug("LUN_RESET: got t_transport_active = 1 for"
283 " task: %p, t_fe_count: %d dev: %p\n", task, 281 " task: %p, t_fe_count: %d dev: %p\n", task,
284 fe_count, dev); 282 fe_count, dev);
285 atomic_set(&cmd->t_transport_aborted, 1); 283 atomic_set(&cmd->t_transport_aborted, 1);
286 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 284 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
287 285
288 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); 286 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
289 continue; 287 continue;
290 } 288 }
291 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p," 289 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
292 " t_fe_count: %d dev: %p\n", task, fe_count, dev); 290 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
293 atomic_set(&cmd->t_transport_aborted, 1); 291 atomic_set(&cmd->t_transport_aborted, 1);
294 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 292 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
295 293
296 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); 294 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
297 } 295 }
298 } 296 }
299 297
300 static void core_tmr_drain_cmd_list( 298 static void core_tmr_drain_cmd_list(
301 struct se_device *dev, 299 struct se_device *dev,
302 struct se_cmd *prout_cmd, 300 struct se_cmd *prout_cmd,
303 struct se_node_acl *tmr_nacl, 301 struct se_node_acl *tmr_nacl,
304 int tas, 302 int tas,
305 struct list_head *preempt_and_abort_list) 303 struct list_head *preempt_and_abort_list)
306 { 304 {
307 LIST_HEAD(drain_cmd_list); 305 LIST_HEAD(drain_cmd_list);
308 struct se_queue_obj *qobj = &dev->dev_queue_obj; 306 struct se_queue_obj *qobj = &dev->dev_queue_obj;
309 struct se_cmd *cmd, *tcmd; 307 struct se_cmd *cmd, *tcmd;
310 unsigned long flags; 308 unsigned long flags;
311 /* 309 /*
312 * Release all commands remaining in the struct se_device cmd queue. 310 * Release all commands remaining in the struct se_device cmd queue.
313 * 311 *
314 * This follows the same logic as above for the struct se_device 312 * This follows the same logic as above for the struct se_device
315 * struct se_task state list, where commands are returned with 313 * struct se_task state list, where commands are returned with
316 * TASK_ABORTED status, if there is an outstanding $FABRIC_MOD 314 * TASK_ABORTED status, if there is an outstanding $FABRIC_MOD
317 * reference, otherwise the struct se_cmd is released. 315 * reference, otherwise the struct se_cmd is released.
318 */ 316 */
319 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 317 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
320 list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) { 318 list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) {
321 /* 319 /*
322 * For PREEMPT_AND_ABORT usage, only process commands 320 * For PREEMPT_AND_ABORT usage, only process commands
323 * with a matching reservation key. 321 * with a matching reservation key.
324 */ 322 */
325 if (preempt_and_abort_list && 323 if (preempt_and_abort_list &&
326 (core_scsi3_check_cdb_abort_and_preempt( 324 (core_scsi3_check_cdb_abort_and_preempt(
327 preempt_and_abort_list, cmd) != 0)) 325 preempt_and_abort_list, cmd) != 0))
328 continue; 326 continue;
329 /* 327 /*
330 * Not aborting PROUT PREEMPT_AND_ABORT CDB.. 328 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
331 */ 329 */
332 if (prout_cmd == cmd) 330 if (prout_cmd == cmd)
333 continue; 331 continue;
334 332
335 atomic_set(&cmd->t_transport_queue_active, 0); 333 atomic_set(&cmd->t_transport_queue_active, 0);
336 atomic_dec(&qobj->queue_cnt); 334 atomic_dec(&qobj->queue_cnt);
337 list_move_tail(&cmd->se_queue_node, &drain_cmd_list); 335 list_move_tail(&cmd->se_queue_node, &drain_cmd_list);
338 } 336 }
339 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 337 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
340 338
341 while (!list_empty(&drain_cmd_list)) { 339 while (!list_empty(&drain_cmd_list)) {
342 cmd = list_entry(drain_cmd_list.next, struct se_cmd, se_queue_node); 340 cmd = list_entry(drain_cmd_list.next, struct se_cmd, se_queue_node);
343 list_del_init(&cmd->se_queue_node); 341 list_del_init(&cmd->se_queue_node);
344 342
345 pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:" 343 pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
346 " %d t_fe_count: %d\n", (preempt_and_abort_list) ? 344 " %d t_fe_count: %d\n", (preempt_and_abort_list) ?
347 "Preempt" : "", cmd, cmd->t_state, 345 "Preempt" : "", cmd, cmd->t_state,
348 atomic_read(&cmd->t_fe_count)); 346 atomic_read(&cmd->t_fe_count));
349 347
350 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, 348 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
351 atomic_read(&cmd->t_fe_count)); 349 atomic_read(&cmd->t_fe_count));
352 } 350 }
353 } 351 }
354 352
355 int core_tmr_lun_reset( 353 int core_tmr_lun_reset(
356 struct se_device *dev, 354 struct se_device *dev,
357 struct se_tmr_req *tmr, 355 struct se_tmr_req *tmr,
358 struct list_head *preempt_and_abort_list, 356 struct list_head *preempt_and_abort_list,
359 struct se_cmd *prout_cmd) 357 struct se_cmd *prout_cmd)
360 { 358 {
361 struct se_node_acl *tmr_nacl = NULL; 359 struct se_node_acl *tmr_nacl = NULL;
362 struct se_portal_group *tmr_tpg = NULL; 360 struct se_portal_group *tmr_tpg = NULL;
363 int tas; 361 int tas;
364 /* 362 /*
365 * TASK_ABORTED status bit, this is configurable via ConfigFS 363 * TASK_ABORTED status bit, this is configurable via ConfigFS
366 * struct se_device attributes. spc4r17 section 7.4.6 Control mode page 364 * struct se_device attributes. spc4r17 section 7.4.6 Control mode page
367 * 365 *
368 * A task aborted status (TAS) bit set to zero specifies that aborted 366 * A task aborted status (TAS) bit set to zero specifies that aborted
369 * tasks shall be terminated by the device server without any response 367 * tasks shall be terminated by the device server without any response
370 * to the application client. A TAS bit set to one specifies that tasks 368 * to the application client. A TAS bit set to one specifies that tasks
371 * aborted by the actions of an I_T nexus other than the I_T nexus on 369 * aborted by the actions of an I_T nexus other than the I_T nexus on
372 * which the command was received shall be completed with TASK ABORTED 370 * which the command was received shall be completed with TASK ABORTED
373 * status (see SAM-4). 371 * status (see SAM-4).
374 */ 372 */
375 tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; 373 tas = dev->se_sub_dev->se_dev_attrib.emulate_tas;
376 /* 374 /*
377 * Determine if this se_tmr is coming from a $FABRIC_MOD 375 * Determine if this se_tmr is coming from a $FABRIC_MOD
378 * or struct se_device passthrough.. 376 * or struct se_device passthrough..
379 */ 377 */
380 if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { 378 if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
381 tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; 379 tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
382 tmr_tpg = tmr->task_cmd->se_sess->se_tpg; 380 tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
383 if (tmr_nacl && tmr_tpg) { 381 if (tmr_nacl && tmr_tpg) {
384 pr_debug("LUN_RESET: TMR caller fabric: %s" 382 pr_debug("LUN_RESET: TMR caller fabric: %s"
385 " initiator port %s\n", 383 " initiator port %s\n",
386 tmr_tpg->se_tpg_tfo->get_fabric_name(), 384 tmr_tpg->se_tpg_tfo->get_fabric_name(),
387 tmr_nacl->initiatorname); 385 tmr_nacl->initiatorname);
388 } 386 }
389 } 387 }
390 pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", 388 pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
391 (preempt_and_abort_list) ? "Preempt" : "TMR", 389 (preempt_and_abort_list) ? "Preempt" : "TMR",
392 dev->transport->name, tas); 390 dev->transport->name, tas);
393 391
394 core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); 392 core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
395 core_tmr_drain_task_list(dev, prout_cmd, tmr_nacl, tas, 393 core_tmr_drain_task_list(dev, prout_cmd, tmr_nacl, tas,
396 preempt_and_abort_list); 394 preempt_and_abort_list);
397 core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas, 395 core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas,
398 preempt_and_abort_list); 396 preempt_and_abort_list);
399 /* 397 /*
400 * Clear any legacy SPC-2 reservation when called during 398 * Clear any legacy SPC-2 reservation when called during
401 * LOGICAL UNIT RESET 399 * LOGICAL UNIT RESET
402 */ 400 */
403 if (!preempt_and_abort_list && 401 if (!preempt_and_abort_list &&
404 (dev->dev_flags & DF_SPC2_RESERVATIONS)) { 402 (dev->dev_flags & DF_SPC2_RESERVATIONS)) {
405 spin_lock(&dev->dev_reservation_lock); 403 spin_lock(&dev->dev_reservation_lock);
406 dev->dev_reserved_node_acl = NULL; 404 dev->dev_reserved_node_acl = NULL;
407 dev->dev_flags &= ~DF_SPC2_RESERVATIONS; 405 dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
408 spin_unlock(&dev->dev_reservation_lock); 406 spin_unlock(&dev->dev_reservation_lock);
409 pr_debug("LUN_RESET: SCSI-2 Released reservation\n"); 407 pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
410 } 408 }
411 409
412 spin_lock_irq(&dev->stats_lock); 410 spin_lock_irq(&dev->stats_lock);
413 dev->num_resets++; 411 dev->num_resets++;
414 spin_unlock_irq(&dev->stats_lock); 412 spin_unlock_irq(&dev->stats_lock);
415 413
416 pr_debug("LUN_RESET: %s for [%s] Complete\n", 414 pr_debug("LUN_RESET: %s for [%s] Complete\n",
417 (preempt_and_abort_list) ? "Preempt" : "TMR", 415 (preempt_and_abort_list) ? "Preempt" : "TMR",
418 dev->transport->name); 416 dev->transport->name);
419 return 0; 417 return 0;
420 } 418 }
421 419
422 420
drivers/target/target_core_tpg.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * Filename: target_core_tpg.c 2 * Filename: target_core_tpg.c
3 * 3 *
4 * This file contains generic Target Portal Group related functions. 4 * This file contains generic Target Portal Group related functions.
5 * 5 *
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. 6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc. 7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems 8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org 9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 * 10 *
11 * Nicholas A. Bellinger <nab@kernel.org> 11 * Nicholas A. Bellinger <nab@kernel.org>
12 * 12 *
13 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by 14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or 15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version. 16 * (at your option) any later version.
17 * 17 *
18 * This program is distributed in the hope that it will be useful, 18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details. 21 * GNU General Public License for more details.
22 * 22 *
23 * You should have received a copy of the GNU General Public License 23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software 24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 * 26 *
27 ******************************************************************************/ 27 ******************************************************************************/
28 28
29 #include <linux/net.h> 29 #include <linux/net.h>
30 #include <linux/string.h> 30 #include <linux/string.h>
31 #include <linux/timer.h> 31 #include <linux/timer.h>
32 #include <linux/slab.h> 32 #include <linux/slab.h>
33 #include <linux/spinlock.h> 33 #include <linux/spinlock.h>
34 #include <linux/in.h> 34 #include <linux/in.h>
35 #include <linux/export.h> 35 #include <linux/export.h>
36 #include <net/sock.h> 36 #include <net/sock.h>
37 #include <net/tcp.h> 37 #include <net/tcp.h>
38 #include <scsi/scsi.h> 38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h> 39 #include <scsi/scsi_cmnd.h>
40 40
41 #include <target/target_core_base.h> 41 #include <target/target_core_base.h>
42 #include <target/target_core_device.h> 42 #include <target/target_core_backend.h>
43 #include <target/target_core_tpg.h> 43 #include <target/target_core_fabric.h>
44 #include <target/target_core_transport.h>
45 #include <target/target_core_fabric_ops.h>
46 44
47 #include "target_core_internal.h" 45 #include "target_core_internal.h"
48 46
49 extern struct se_device *g_lun0_dev; 47 extern struct se_device *g_lun0_dev;
50 48
51 static DEFINE_SPINLOCK(tpg_lock); 49 static DEFINE_SPINLOCK(tpg_lock);
52 static LIST_HEAD(tpg_list); 50 static LIST_HEAD(tpg_list);
53 51
54 /* core_clear_initiator_node_from_tpg(): 52 /* core_clear_initiator_node_from_tpg():
55 * 53 *
56 * 54 *
57 */ 55 */
58 static void core_clear_initiator_node_from_tpg( 56 static void core_clear_initiator_node_from_tpg(
59 struct se_node_acl *nacl, 57 struct se_node_acl *nacl,
60 struct se_portal_group *tpg) 58 struct se_portal_group *tpg)
61 { 59 {
62 int i; 60 int i;
63 struct se_dev_entry *deve; 61 struct se_dev_entry *deve;
64 struct se_lun *lun; 62 struct se_lun *lun;
65 struct se_lun_acl *acl, *acl_tmp; 63 struct se_lun_acl *acl, *acl_tmp;
66 64
67 spin_lock_irq(&nacl->device_list_lock); 65 spin_lock_irq(&nacl->device_list_lock);
68 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 66 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
69 deve = &nacl->device_list[i]; 67 deve = &nacl->device_list[i];
70 68
71 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 69 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
72 continue; 70 continue;
73 71
74 if (!deve->se_lun) { 72 if (!deve->se_lun) {
75 pr_err("%s device entries device pointer is" 73 pr_err("%s device entries device pointer is"
76 " NULL, but Initiator has access.\n", 74 " NULL, but Initiator has access.\n",
77 tpg->se_tpg_tfo->get_fabric_name()); 75 tpg->se_tpg_tfo->get_fabric_name());
78 continue; 76 continue;
79 } 77 }
80 78
81 lun = deve->se_lun; 79 lun = deve->se_lun;
82 spin_unlock_irq(&nacl->device_list_lock); 80 spin_unlock_irq(&nacl->device_list_lock);
83 core_update_device_list_for_node(lun, NULL, deve->mapped_lun, 81 core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
84 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); 82 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
85 83
86 spin_lock(&lun->lun_acl_lock); 84 spin_lock(&lun->lun_acl_lock);
87 list_for_each_entry_safe(acl, acl_tmp, 85 list_for_each_entry_safe(acl, acl_tmp,
88 &lun->lun_acl_list, lacl_list) { 86 &lun->lun_acl_list, lacl_list) {
89 if (!strcmp(acl->initiatorname, nacl->initiatorname) && 87 if (!strcmp(acl->initiatorname, nacl->initiatorname) &&
90 (acl->mapped_lun == deve->mapped_lun)) 88 (acl->mapped_lun == deve->mapped_lun))
91 break; 89 break;
92 } 90 }
93 91
94 if (!acl) { 92 if (!acl) {
95 pr_err("Unable to locate struct se_lun_acl for %s," 93 pr_err("Unable to locate struct se_lun_acl for %s,"
96 " mapped_lun: %u\n", nacl->initiatorname, 94 " mapped_lun: %u\n", nacl->initiatorname,
97 deve->mapped_lun); 95 deve->mapped_lun);
98 spin_unlock(&lun->lun_acl_lock); 96 spin_unlock(&lun->lun_acl_lock);
99 spin_lock_irq(&nacl->device_list_lock); 97 spin_lock_irq(&nacl->device_list_lock);
100 continue; 98 continue;
101 } 99 }
102 100
103 list_del(&acl->lacl_list); 101 list_del(&acl->lacl_list);
104 spin_unlock(&lun->lun_acl_lock); 102 spin_unlock(&lun->lun_acl_lock);
105 103
106 spin_lock_irq(&nacl->device_list_lock); 104 spin_lock_irq(&nacl->device_list_lock);
107 kfree(acl); 105 kfree(acl);
108 } 106 }
109 spin_unlock_irq(&nacl->device_list_lock); 107 spin_unlock_irq(&nacl->device_list_lock);
110 } 108 }
111 109
112 /* __core_tpg_get_initiator_node_acl(): 110 /* __core_tpg_get_initiator_node_acl():
113 * 111 *
114 * spin_lock_bh(&tpg->acl_node_lock); must be held when calling 112 * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
115 */ 113 */
116 struct se_node_acl *__core_tpg_get_initiator_node_acl( 114 struct se_node_acl *__core_tpg_get_initiator_node_acl(
117 struct se_portal_group *tpg, 115 struct se_portal_group *tpg,
118 const char *initiatorname) 116 const char *initiatorname)
119 { 117 {
120 struct se_node_acl *acl; 118 struct se_node_acl *acl;
121 119
122 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 120 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
123 if (!strcmp(acl->initiatorname, initiatorname)) 121 if (!strcmp(acl->initiatorname, initiatorname))
124 return acl; 122 return acl;
125 } 123 }
126 124
127 return NULL; 125 return NULL;
128 } 126 }
129 127
130 /* core_tpg_get_initiator_node_acl(): 128 /* core_tpg_get_initiator_node_acl():
131 * 129 *
132 * 130 *
133 */ 131 */
134 struct se_node_acl *core_tpg_get_initiator_node_acl( 132 struct se_node_acl *core_tpg_get_initiator_node_acl(
135 struct se_portal_group *tpg, 133 struct se_portal_group *tpg,
136 unsigned char *initiatorname) 134 unsigned char *initiatorname)
137 { 135 {
138 struct se_node_acl *acl; 136 struct se_node_acl *acl;
139 137
140 spin_lock_irq(&tpg->acl_node_lock); 138 spin_lock_irq(&tpg->acl_node_lock);
141 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 139 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
142 if (!strcmp(acl->initiatorname, initiatorname) && 140 if (!strcmp(acl->initiatorname, initiatorname) &&
143 !acl->dynamic_node_acl) { 141 !acl->dynamic_node_acl) {
144 spin_unlock_irq(&tpg->acl_node_lock); 142 spin_unlock_irq(&tpg->acl_node_lock);
145 return acl; 143 return acl;
146 } 144 }
147 } 145 }
148 spin_unlock_irq(&tpg->acl_node_lock); 146 spin_unlock_irq(&tpg->acl_node_lock);
149 147
150 return NULL; 148 return NULL;
151 } 149 }
152 150
153 /* core_tpg_add_node_to_devs(): 151 /* core_tpg_add_node_to_devs():
154 * 152 *
155 * 153 *
156 */ 154 */
157 void core_tpg_add_node_to_devs( 155 void core_tpg_add_node_to_devs(
158 struct se_node_acl *acl, 156 struct se_node_acl *acl,
159 struct se_portal_group *tpg) 157 struct se_portal_group *tpg)
160 { 158 {
161 int i = 0; 159 int i = 0;
162 u32 lun_access = 0; 160 u32 lun_access = 0;
163 struct se_lun *lun; 161 struct se_lun *lun;
164 struct se_device *dev; 162 struct se_device *dev;
165 163
166 spin_lock(&tpg->tpg_lun_lock); 164 spin_lock(&tpg->tpg_lun_lock);
167 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 165 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
168 lun = &tpg->tpg_lun_list[i]; 166 lun = &tpg->tpg_lun_list[i];
169 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) 167 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
170 continue; 168 continue;
171 169
172 spin_unlock(&tpg->tpg_lun_lock); 170 spin_unlock(&tpg->tpg_lun_lock);
173 171
174 dev = lun->lun_se_dev; 172 dev = lun->lun_se_dev;
175 /* 173 /*
176 * By default in LIO-Target $FABRIC_MOD, 174 * By default in LIO-Target $FABRIC_MOD,
177 * demo_mode_write_protect is ON, or READ_ONLY; 175 * demo_mode_write_protect is ON, or READ_ONLY;
178 */ 176 */
179 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) { 177 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
180 if (dev->dev_flags & DF_READ_ONLY) 178 if (dev->dev_flags & DF_READ_ONLY)
181 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 179 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
182 else 180 else
183 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; 181 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
184 } else { 182 } else {
185 /* 183 /*
186 * Allow only optical drives to issue R/W in default RO 184 * Allow only optical drives to issue R/W in default RO
187 * demo mode. 185 * demo mode.
188 */ 186 */
189 if (dev->transport->get_device_type(dev) == TYPE_DISK) 187 if (dev->transport->get_device_type(dev) == TYPE_DISK)
190 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 188 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
191 else 189 else
192 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; 190 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
193 } 191 }
194 192
195 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s" 193 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
196 " access for LUN in Demo Mode\n", 194 " access for LUN in Demo Mode\n",
197 tpg->se_tpg_tfo->get_fabric_name(), 195 tpg->se_tpg_tfo->get_fabric_name(),
198 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 196 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
199 (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ? 197 (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
200 "READ-WRITE" : "READ-ONLY"); 198 "READ-WRITE" : "READ-ONLY");
201 199
202 core_update_device_list_for_node(lun, NULL, lun->unpacked_lun, 200 core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
203 lun_access, acl, tpg, 1); 201 lun_access, acl, tpg, 1);
204 spin_lock(&tpg->tpg_lun_lock); 202 spin_lock(&tpg->tpg_lun_lock);
205 } 203 }
206 spin_unlock(&tpg->tpg_lun_lock); 204 spin_unlock(&tpg->tpg_lun_lock);
207 } 205 }
208 206
209 /* core_set_queue_depth_for_node(): 207 /* core_set_queue_depth_for_node():
210 * 208 *
211 * 209 *
212 */ 210 */
213 static int core_set_queue_depth_for_node( 211 static int core_set_queue_depth_for_node(
214 struct se_portal_group *tpg, 212 struct se_portal_group *tpg,
215 struct se_node_acl *acl) 213 struct se_node_acl *acl)
216 { 214 {
217 if (!acl->queue_depth) { 215 if (!acl->queue_depth) {
218 pr_err("Queue depth for %s Initiator Node: %s is 0," 216 pr_err("Queue depth for %s Initiator Node: %s is 0,"
219 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(), 217 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
220 acl->initiatorname); 218 acl->initiatorname);
221 acl->queue_depth = 1; 219 acl->queue_depth = 1;
222 } 220 }
223 221
224 return 0; 222 return 0;
225 } 223 }
226 224
227 /* core_create_device_list_for_node(): 225 /* core_create_device_list_for_node():
228 * 226 *
229 * 227 *
230 */ 228 */
231 static int core_create_device_list_for_node(struct se_node_acl *nacl) 229 static int core_create_device_list_for_node(struct se_node_acl *nacl)
232 { 230 {
233 struct se_dev_entry *deve; 231 struct se_dev_entry *deve;
234 int i; 232 int i;
235 233
236 nacl->device_list = kzalloc(sizeof(struct se_dev_entry) * 234 nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
237 TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL); 235 TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
238 if (!nacl->device_list) { 236 if (!nacl->device_list) {
239 pr_err("Unable to allocate memory for" 237 pr_err("Unable to allocate memory for"
240 " struct se_node_acl->device_list\n"); 238 " struct se_node_acl->device_list\n");
241 return -ENOMEM; 239 return -ENOMEM;
242 } 240 }
243 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 241 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
244 deve = &nacl->device_list[i]; 242 deve = &nacl->device_list[i];
245 243
246 atomic_set(&deve->ua_count, 0); 244 atomic_set(&deve->ua_count, 0);
247 atomic_set(&deve->pr_ref_count, 0); 245 atomic_set(&deve->pr_ref_count, 0);
248 spin_lock_init(&deve->ua_lock); 246 spin_lock_init(&deve->ua_lock);
249 INIT_LIST_HEAD(&deve->alua_port_list); 247 INIT_LIST_HEAD(&deve->alua_port_list);
250 INIT_LIST_HEAD(&deve->ua_list); 248 INIT_LIST_HEAD(&deve->ua_list);
251 } 249 }
252 250
253 return 0; 251 return 0;
254 } 252 }
255 253
256 /* core_tpg_check_initiator_node_acl() 254 /* core_tpg_check_initiator_node_acl()
257 * 255 *
258 * 256 *
259 */ 257 */
260 struct se_node_acl *core_tpg_check_initiator_node_acl( 258 struct se_node_acl *core_tpg_check_initiator_node_acl(
261 struct se_portal_group *tpg, 259 struct se_portal_group *tpg,
262 unsigned char *initiatorname) 260 unsigned char *initiatorname)
263 { 261 {
264 struct se_node_acl *acl; 262 struct se_node_acl *acl;
265 263
266 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname); 264 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
267 if (acl) 265 if (acl)
268 return acl; 266 return acl;
269 267
270 if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) 268 if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
271 return NULL; 269 return NULL;
272 270
273 acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg); 271 acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
274 if (!acl) 272 if (!acl)
275 return NULL; 273 return NULL;
276 274
277 INIT_LIST_HEAD(&acl->acl_list); 275 INIT_LIST_HEAD(&acl->acl_list);
278 INIT_LIST_HEAD(&acl->acl_sess_list); 276 INIT_LIST_HEAD(&acl->acl_sess_list);
279 spin_lock_init(&acl->device_list_lock); 277 spin_lock_init(&acl->device_list_lock);
280 spin_lock_init(&acl->nacl_sess_lock); 278 spin_lock_init(&acl->nacl_sess_lock);
281 atomic_set(&acl->acl_pr_ref_count, 0); 279 atomic_set(&acl->acl_pr_ref_count, 0);
282 acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); 280 acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
283 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 281 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
284 acl->se_tpg = tpg; 282 acl->se_tpg = tpg;
285 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 283 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
286 spin_lock_init(&acl->stats_lock); 284 spin_lock_init(&acl->stats_lock);
287 acl->dynamic_node_acl = 1; 285 acl->dynamic_node_acl = 1;
288 286
289 tpg->se_tpg_tfo->set_default_node_attributes(acl); 287 tpg->se_tpg_tfo->set_default_node_attributes(acl);
290 288
291 if (core_create_device_list_for_node(acl) < 0) { 289 if (core_create_device_list_for_node(acl) < 0) {
292 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); 290 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
293 return NULL; 291 return NULL;
294 } 292 }
295 293
296 if (core_set_queue_depth_for_node(tpg, acl) < 0) { 294 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
297 core_free_device_list_for_node(acl, tpg); 295 core_free_device_list_for_node(acl, tpg);
298 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); 296 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
299 return NULL; 297 return NULL;
300 } 298 }
301 /* 299 /*
302 * Here we only create demo-mode MappedLUNs from the active 300 * Here we only create demo-mode MappedLUNs from the active
303 * TPG LUNs if the fabric is not explictly asking for 301 * TPG LUNs if the fabric is not explictly asking for
304 * tpg_check_demo_mode_login_only() == 1. 302 * tpg_check_demo_mode_login_only() == 1.
305 */ 303 */
306 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) && 304 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
307 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1)) 305 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
308 do { ; } while (0); 306 do { ; } while (0);
309 else 307 else
310 core_tpg_add_node_to_devs(acl, tpg); 308 core_tpg_add_node_to_devs(acl, tpg);
311 309
312 spin_lock_irq(&tpg->acl_node_lock); 310 spin_lock_irq(&tpg->acl_node_lock);
313 list_add_tail(&acl->acl_list, &tpg->acl_node_list); 311 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
314 tpg->num_node_acls++; 312 tpg->num_node_acls++;
315 spin_unlock_irq(&tpg->acl_node_lock); 313 spin_unlock_irq(&tpg->acl_node_lock);
316 314
317 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" 315 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
318 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 316 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
319 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, 317 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
320 tpg->se_tpg_tfo->get_fabric_name(), initiatorname); 318 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
321 319
322 return acl; 320 return acl;
323 } 321 }
324 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl); 322 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
325 323
326 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl) 324 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
327 { 325 {
328 while (atomic_read(&nacl->acl_pr_ref_count) != 0) 326 while (atomic_read(&nacl->acl_pr_ref_count) != 0)
329 cpu_relax(); 327 cpu_relax();
330 } 328 }
331 329
332 void core_tpg_clear_object_luns(struct se_portal_group *tpg) 330 void core_tpg_clear_object_luns(struct se_portal_group *tpg)
333 { 331 {
334 int i, ret; 332 int i, ret;
335 struct se_lun *lun; 333 struct se_lun *lun;
336 334
337 spin_lock(&tpg->tpg_lun_lock); 335 spin_lock(&tpg->tpg_lun_lock);
338 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 336 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
339 lun = &tpg->tpg_lun_list[i]; 337 lun = &tpg->tpg_lun_list[i];
340 338
341 if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) || 339 if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
342 (lun->lun_se_dev == NULL)) 340 (lun->lun_se_dev == NULL))
343 continue; 341 continue;
344 342
345 spin_unlock(&tpg->tpg_lun_lock); 343 spin_unlock(&tpg->tpg_lun_lock);
346 ret = core_dev_del_lun(tpg, lun->unpacked_lun); 344 ret = core_dev_del_lun(tpg, lun->unpacked_lun);
347 spin_lock(&tpg->tpg_lun_lock); 345 spin_lock(&tpg->tpg_lun_lock);
348 } 346 }
349 spin_unlock(&tpg->tpg_lun_lock); 347 spin_unlock(&tpg->tpg_lun_lock);
350 } 348 }
351 EXPORT_SYMBOL(core_tpg_clear_object_luns); 349 EXPORT_SYMBOL(core_tpg_clear_object_luns);
352 350
353 /* core_tpg_add_initiator_node_acl(): 351 /* core_tpg_add_initiator_node_acl():
354 * 352 *
355 * 353 *
356 */ 354 */
357 struct se_node_acl *core_tpg_add_initiator_node_acl( 355 struct se_node_acl *core_tpg_add_initiator_node_acl(
358 struct se_portal_group *tpg, 356 struct se_portal_group *tpg,
359 struct se_node_acl *se_nacl, 357 struct se_node_acl *se_nacl,
360 const char *initiatorname, 358 const char *initiatorname,
361 u32 queue_depth) 359 u32 queue_depth)
362 { 360 {
363 struct se_node_acl *acl = NULL; 361 struct se_node_acl *acl = NULL;
364 362
365 spin_lock_irq(&tpg->acl_node_lock); 363 spin_lock_irq(&tpg->acl_node_lock);
366 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 364 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
367 if (acl) { 365 if (acl) {
368 if (acl->dynamic_node_acl) { 366 if (acl->dynamic_node_acl) {
369 acl->dynamic_node_acl = 0; 367 acl->dynamic_node_acl = 0;
370 pr_debug("%s_TPG[%u] - Replacing dynamic ACL" 368 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
371 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), 369 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
372 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); 370 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
373 spin_unlock_irq(&tpg->acl_node_lock); 371 spin_unlock_irq(&tpg->acl_node_lock);
374 /* 372 /*
375 * Release the locally allocated struct se_node_acl 373 * Release the locally allocated struct se_node_acl
376 * because * core_tpg_add_initiator_node_acl() returned 374 * because * core_tpg_add_initiator_node_acl() returned
377 * a pointer to an existing demo mode node ACL. 375 * a pointer to an existing demo mode node ACL.
378 */ 376 */
379 if (se_nacl) 377 if (se_nacl)
380 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, 378 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
381 se_nacl); 379 se_nacl);
382 goto done; 380 goto done;
383 } 381 }
384 382
385 pr_err("ACL entry for %s Initiator" 383 pr_err("ACL entry for %s Initiator"
386 " Node %s already exists for TPG %u, ignoring" 384 " Node %s already exists for TPG %u, ignoring"
387 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 385 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
388 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 386 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
389 spin_unlock_irq(&tpg->acl_node_lock); 387 spin_unlock_irq(&tpg->acl_node_lock);
390 return ERR_PTR(-EEXIST); 388 return ERR_PTR(-EEXIST);
391 } 389 }
392 spin_unlock_irq(&tpg->acl_node_lock); 390 spin_unlock_irq(&tpg->acl_node_lock);
393 391
394 if (!se_nacl) { 392 if (!se_nacl) {
395 pr_err("struct se_node_acl pointer is NULL\n"); 393 pr_err("struct se_node_acl pointer is NULL\n");
396 return ERR_PTR(-EINVAL); 394 return ERR_PTR(-EINVAL);
397 } 395 }
398 /* 396 /*
399 * For v4.x logic the se_node_acl_s is hanging off a fabric 397 * For v4.x logic the se_node_acl_s is hanging off a fabric
400 * dependent structure allocated via 398 * dependent structure allocated via
401 * struct target_core_fabric_ops->fabric_make_nodeacl() 399 * struct target_core_fabric_ops->fabric_make_nodeacl()
402 */ 400 */
403 acl = se_nacl; 401 acl = se_nacl;
404 402
405 INIT_LIST_HEAD(&acl->acl_list); 403 INIT_LIST_HEAD(&acl->acl_list);
406 INIT_LIST_HEAD(&acl->acl_sess_list); 404 INIT_LIST_HEAD(&acl->acl_sess_list);
407 spin_lock_init(&acl->device_list_lock); 405 spin_lock_init(&acl->device_list_lock);
408 spin_lock_init(&acl->nacl_sess_lock); 406 spin_lock_init(&acl->nacl_sess_lock);
409 atomic_set(&acl->acl_pr_ref_count, 0); 407 atomic_set(&acl->acl_pr_ref_count, 0);
410 acl->queue_depth = queue_depth; 408 acl->queue_depth = queue_depth;
411 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 409 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
412 acl->se_tpg = tpg; 410 acl->se_tpg = tpg;
413 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 411 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
414 spin_lock_init(&acl->stats_lock); 412 spin_lock_init(&acl->stats_lock);
415 413
416 tpg->se_tpg_tfo->set_default_node_attributes(acl); 414 tpg->se_tpg_tfo->set_default_node_attributes(acl);
417 415
418 if (core_create_device_list_for_node(acl) < 0) { 416 if (core_create_device_list_for_node(acl) < 0) {
419 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); 417 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
420 return ERR_PTR(-ENOMEM); 418 return ERR_PTR(-ENOMEM);
421 } 419 }
422 420
423 if (core_set_queue_depth_for_node(tpg, acl) < 0) { 421 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
424 core_free_device_list_for_node(acl, tpg); 422 core_free_device_list_for_node(acl, tpg);
425 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); 423 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
426 return ERR_PTR(-EINVAL); 424 return ERR_PTR(-EINVAL);
427 } 425 }
428 426
429 spin_lock_irq(&tpg->acl_node_lock); 427 spin_lock_irq(&tpg->acl_node_lock);
430 list_add_tail(&acl->acl_list, &tpg->acl_node_list); 428 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
431 tpg->num_node_acls++; 429 tpg->num_node_acls++;
432 spin_unlock_irq(&tpg->acl_node_lock); 430 spin_unlock_irq(&tpg->acl_node_lock);
433 431
434 done: 432 done:
435 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" 433 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
436 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 434 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
437 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, 435 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
438 tpg->se_tpg_tfo->get_fabric_name(), initiatorname); 436 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
439 437
440 return acl; 438 return acl;
441 } 439 }
442 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl); 440 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
443 441
444 /* core_tpg_del_initiator_node_acl(): 442 /* core_tpg_del_initiator_node_acl():
445 * 443 *
446 * 444 *
447 */ 445 */
448 int core_tpg_del_initiator_node_acl( 446 int core_tpg_del_initiator_node_acl(
449 struct se_portal_group *tpg, 447 struct se_portal_group *tpg,
450 struct se_node_acl *acl, 448 struct se_node_acl *acl,
451 int force) 449 int force)
452 { 450 {
453 struct se_session *sess, *sess_tmp; 451 struct se_session *sess, *sess_tmp;
454 int dynamic_acl = 0; 452 int dynamic_acl = 0;
455 453
456 spin_lock_irq(&tpg->acl_node_lock); 454 spin_lock_irq(&tpg->acl_node_lock);
457 if (acl->dynamic_node_acl) { 455 if (acl->dynamic_node_acl) {
458 acl->dynamic_node_acl = 0; 456 acl->dynamic_node_acl = 0;
459 dynamic_acl = 1; 457 dynamic_acl = 1;
460 } 458 }
461 list_del(&acl->acl_list); 459 list_del(&acl->acl_list);
462 tpg->num_node_acls--; 460 tpg->num_node_acls--;
463 spin_unlock_irq(&tpg->acl_node_lock); 461 spin_unlock_irq(&tpg->acl_node_lock);
464 462
465 spin_lock_bh(&tpg->session_lock); 463 spin_lock_bh(&tpg->session_lock);
466 list_for_each_entry_safe(sess, sess_tmp, 464 list_for_each_entry_safe(sess, sess_tmp,
467 &tpg->tpg_sess_list, sess_list) { 465 &tpg->tpg_sess_list, sess_list) {
468 if (sess->se_node_acl != acl) 466 if (sess->se_node_acl != acl)
469 continue; 467 continue;
470 /* 468 /*
471 * Determine if the session needs to be closed by our context. 469 * Determine if the session needs to be closed by our context.
472 */ 470 */
473 if (!tpg->se_tpg_tfo->shutdown_session(sess)) 471 if (!tpg->se_tpg_tfo->shutdown_session(sess))
474 continue; 472 continue;
475 473
476 spin_unlock_bh(&tpg->session_lock); 474 spin_unlock_bh(&tpg->session_lock);
477 /* 475 /*
478 * If the $FABRIC_MOD session for the Initiator Node ACL exists, 476 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
479 * forcefully shutdown the $FABRIC_MOD session/nexus. 477 * forcefully shutdown the $FABRIC_MOD session/nexus.
480 */ 478 */
481 tpg->se_tpg_tfo->close_session(sess); 479 tpg->se_tpg_tfo->close_session(sess);
482 480
483 spin_lock_bh(&tpg->session_lock); 481 spin_lock_bh(&tpg->session_lock);
484 } 482 }
485 spin_unlock_bh(&tpg->session_lock); 483 spin_unlock_bh(&tpg->session_lock);
486 484
487 core_tpg_wait_for_nacl_pr_ref(acl); 485 core_tpg_wait_for_nacl_pr_ref(acl);
488 core_clear_initiator_node_from_tpg(acl, tpg); 486 core_clear_initiator_node_from_tpg(acl, tpg);
489 core_free_device_list_for_node(acl, tpg); 487 core_free_device_list_for_node(acl, tpg);
490 488
491 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" 489 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
492 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 490 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
493 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, 491 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
494 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname); 492 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
495 493
496 return 0; 494 return 0;
497 } 495 }
498 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl); 496 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
499 497
500 /* core_tpg_set_initiator_node_queue_depth(): 498 /* core_tpg_set_initiator_node_queue_depth():
501 * 499 *
502 * 500 *
503 */ 501 */
504 int core_tpg_set_initiator_node_queue_depth( 502 int core_tpg_set_initiator_node_queue_depth(
505 struct se_portal_group *tpg, 503 struct se_portal_group *tpg,
506 unsigned char *initiatorname, 504 unsigned char *initiatorname,
507 u32 queue_depth, 505 u32 queue_depth,
508 int force) 506 int force)
509 { 507 {
510 struct se_session *sess, *init_sess = NULL; 508 struct se_session *sess, *init_sess = NULL;
511 struct se_node_acl *acl; 509 struct se_node_acl *acl;
512 int dynamic_acl = 0; 510 int dynamic_acl = 0;
513 511
514 spin_lock_irq(&tpg->acl_node_lock); 512 spin_lock_irq(&tpg->acl_node_lock);
515 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); 513 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
516 if (!acl) { 514 if (!acl) {
517 pr_err("Access Control List entry for %s Initiator" 515 pr_err("Access Control List entry for %s Initiator"
518 " Node %s does not exists for TPG %hu, ignoring" 516 " Node %s does not exists for TPG %hu, ignoring"
519 " request.\n", tpg->se_tpg_tfo->get_fabric_name(), 517 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
520 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); 518 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
521 spin_unlock_irq(&tpg->acl_node_lock); 519 spin_unlock_irq(&tpg->acl_node_lock);
522 return -ENODEV; 520 return -ENODEV;
523 } 521 }
524 if (acl->dynamic_node_acl) { 522 if (acl->dynamic_node_acl) {
525 acl->dynamic_node_acl = 0; 523 acl->dynamic_node_acl = 0;
526 dynamic_acl = 1; 524 dynamic_acl = 1;
527 } 525 }
528 spin_unlock_irq(&tpg->acl_node_lock); 526 spin_unlock_irq(&tpg->acl_node_lock);
529 527
530 spin_lock_bh(&tpg->session_lock); 528 spin_lock_bh(&tpg->session_lock);
531 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { 529 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
532 if (sess->se_node_acl != acl) 530 if (sess->se_node_acl != acl)
533 continue; 531 continue;
534 532
535 if (!force) { 533 if (!force) {
536 pr_err("Unable to change queue depth for %s" 534 pr_err("Unable to change queue depth for %s"
537 " Initiator Node: %s while session is" 535 " Initiator Node: %s while session is"
538 " operational. To forcefully change the queue" 536 " operational. To forcefully change the queue"
539 " depth and force session reinstatement" 537 " depth and force session reinstatement"
540 " use the \"force=1\" parameter.\n", 538 " use the \"force=1\" parameter.\n",
541 tpg->se_tpg_tfo->get_fabric_name(), initiatorname); 539 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
542 spin_unlock_bh(&tpg->session_lock); 540 spin_unlock_bh(&tpg->session_lock);
543 541
544 spin_lock_irq(&tpg->acl_node_lock); 542 spin_lock_irq(&tpg->acl_node_lock);
545 if (dynamic_acl) 543 if (dynamic_acl)
546 acl->dynamic_node_acl = 1; 544 acl->dynamic_node_acl = 1;
547 spin_unlock_irq(&tpg->acl_node_lock); 545 spin_unlock_irq(&tpg->acl_node_lock);
548 return -EEXIST; 546 return -EEXIST;
549 } 547 }
550 /* 548 /*
551 * Determine if the session needs to be closed by our context. 549 * Determine if the session needs to be closed by our context.
552 */ 550 */
553 if (!tpg->se_tpg_tfo->shutdown_session(sess)) 551 if (!tpg->se_tpg_tfo->shutdown_session(sess))
554 continue; 552 continue;
555 553
556 init_sess = sess; 554 init_sess = sess;
557 break; 555 break;
558 } 556 }
559 557
560 /* 558 /*
561 * User has requested to change the queue depth for a Initiator Node. 559 * User has requested to change the queue depth for a Initiator Node.
562 * Change the value in the Node's struct se_node_acl, and call 560 * Change the value in the Node's struct se_node_acl, and call
563 * core_set_queue_depth_for_node() to add the requested queue depth. 561 * core_set_queue_depth_for_node() to add the requested queue depth.
564 * 562 *
565 * Finally call tpg->se_tpg_tfo->close_session() to force session 563 * Finally call tpg->se_tpg_tfo->close_session() to force session
566 * reinstatement to occur if there is an active session for the 564 * reinstatement to occur if there is an active session for the
567 * $FABRIC_MOD Initiator Node in question. 565 * $FABRIC_MOD Initiator Node in question.
568 */ 566 */
569 acl->queue_depth = queue_depth; 567 acl->queue_depth = queue_depth;
570 568
571 if (core_set_queue_depth_for_node(tpg, acl) < 0) { 569 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
572 spin_unlock_bh(&tpg->session_lock); 570 spin_unlock_bh(&tpg->session_lock);
573 /* 571 /*
574 * Force session reinstatement if 572 * Force session reinstatement if
575 * core_set_queue_depth_for_node() failed, because we assume 573 * core_set_queue_depth_for_node() failed, because we assume
576 * the $FABRIC_MOD has already the set session reinstatement 574 * the $FABRIC_MOD has already the set session reinstatement
577 * bit from tpg->se_tpg_tfo->shutdown_session() called above. 575 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
578 */ 576 */
579 if (init_sess) 577 if (init_sess)
580 tpg->se_tpg_tfo->close_session(init_sess); 578 tpg->se_tpg_tfo->close_session(init_sess);
581 579
582 spin_lock_irq(&tpg->acl_node_lock); 580 spin_lock_irq(&tpg->acl_node_lock);
583 if (dynamic_acl) 581 if (dynamic_acl)
584 acl->dynamic_node_acl = 1; 582 acl->dynamic_node_acl = 1;
585 spin_unlock_irq(&tpg->acl_node_lock); 583 spin_unlock_irq(&tpg->acl_node_lock);
586 return -EINVAL; 584 return -EINVAL;
587 } 585 }
588 spin_unlock_bh(&tpg->session_lock); 586 spin_unlock_bh(&tpg->session_lock);
589 /* 587 /*
590 * If the $FABRIC_MOD session for the Initiator Node ACL exists, 588 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
591 * forcefully shutdown the $FABRIC_MOD session/nexus. 589 * forcefully shutdown the $FABRIC_MOD session/nexus.
592 */ 590 */
593 if (init_sess) 591 if (init_sess)
594 tpg->se_tpg_tfo->close_session(init_sess); 592 tpg->se_tpg_tfo->close_session(init_sess);
595 593
596 pr_debug("Successfully changed queue depth to: %d for Initiator" 594 pr_debug("Successfully changed queue depth to: %d for Initiator"
597 " Node: %s on %s Target Portal Group: %u\n", queue_depth, 595 " Node: %s on %s Target Portal Group: %u\n", queue_depth,
598 initiatorname, tpg->se_tpg_tfo->get_fabric_name(), 596 initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
599 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 597 tpg->se_tpg_tfo->tpg_get_tag(tpg));
600 598
601 spin_lock_irq(&tpg->acl_node_lock); 599 spin_lock_irq(&tpg->acl_node_lock);
602 if (dynamic_acl) 600 if (dynamic_acl)
603 acl->dynamic_node_acl = 1; 601 acl->dynamic_node_acl = 1;
604 spin_unlock_irq(&tpg->acl_node_lock); 602 spin_unlock_irq(&tpg->acl_node_lock);
605 603
606 return 0; 604 return 0;
607 } 605 }
608 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth); 606 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
609 607
610 static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) 608 static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
611 { 609 {
612 /* Set in core_dev_setup_virtual_lun0() */ 610 /* Set in core_dev_setup_virtual_lun0() */
613 struct se_device *dev = g_lun0_dev; 611 struct se_device *dev = g_lun0_dev;
614 struct se_lun *lun = &se_tpg->tpg_virt_lun0; 612 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
615 u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 613 u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
616 int ret; 614 int ret;
617 615
618 lun->unpacked_lun = 0; 616 lun->unpacked_lun = 0;
619 lun->lun_status = TRANSPORT_LUN_STATUS_FREE; 617 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
620 atomic_set(&lun->lun_acl_count, 0); 618 atomic_set(&lun->lun_acl_count, 0);
621 init_completion(&lun->lun_shutdown_comp); 619 init_completion(&lun->lun_shutdown_comp);
622 INIT_LIST_HEAD(&lun->lun_acl_list); 620 INIT_LIST_HEAD(&lun->lun_acl_list);
623 INIT_LIST_HEAD(&lun->lun_cmd_list); 621 INIT_LIST_HEAD(&lun->lun_cmd_list);
624 spin_lock_init(&lun->lun_acl_lock); 622 spin_lock_init(&lun->lun_acl_lock);
625 spin_lock_init(&lun->lun_cmd_lock); 623 spin_lock_init(&lun->lun_cmd_lock);
626 spin_lock_init(&lun->lun_sep_lock); 624 spin_lock_init(&lun->lun_sep_lock);
627 625
628 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); 626 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
629 if (ret < 0) 627 if (ret < 0)
630 return ret; 628 return ret;
631 629
632 return 0; 630 return 0;
633 } 631 }
634 632
635 static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg) 633 static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
636 { 634 {
637 struct se_lun *lun = &se_tpg->tpg_virt_lun0; 635 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
638 636
639 core_tpg_post_dellun(se_tpg, lun); 637 core_tpg_post_dellun(se_tpg, lun);
640 } 638 }
641 639
642 int core_tpg_register( 640 int core_tpg_register(
643 struct target_core_fabric_ops *tfo, 641 struct target_core_fabric_ops *tfo,
644 struct se_wwn *se_wwn, 642 struct se_wwn *se_wwn,
645 struct se_portal_group *se_tpg, 643 struct se_portal_group *se_tpg,
646 void *tpg_fabric_ptr, 644 void *tpg_fabric_ptr,
647 int se_tpg_type) 645 int se_tpg_type)
648 { 646 {
649 struct se_lun *lun; 647 struct se_lun *lun;
650 u32 i; 648 u32 i;
651 649
652 se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) * 650 se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
653 TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL); 651 TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
654 if (!se_tpg->tpg_lun_list) { 652 if (!se_tpg->tpg_lun_list) {
655 pr_err("Unable to allocate struct se_portal_group->" 653 pr_err("Unable to allocate struct se_portal_group->"
656 "tpg_lun_list\n"); 654 "tpg_lun_list\n");
657 return -ENOMEM; 655 return -ENOMEM;
658 } 656 }
659 657
660 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 658 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
661 lun = &se_tpg->tpg_lun_list[i]; 659 lun = &se_tpg->tpg_lun_list[i];
662 lun->unpacked_lun = i; 660 lun->unpacked_lun = i;
663 lun->lun_status = TRANSPORT_LUN_STATUS_FREE; 661 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
664 atomic_set(&lun->lun_acl_count, 0); 662 atomic_set(&lun->lun_acl_count, 0);
665 init_completion(&lun->lun_shutdown_comp); 663 init_completion(&lun->lun_shutdown_comp);
666 INIT_LIST_HEAD(&lun->lun_acl_list); 664 INIT_LIST_HEAD(&lun->lun_acl_list);
667 INIT_LIST_HEAD(&lun->lun_cmd_list); 665 INIT_LIST_HEAD(&lun->lun_cmd_list);
668 spin_lock_init(&lun->lun_acl_lock); 666 spin_lock_init(&lun->lun_acl_lock);
669 spin_lock_init(&lun->lun_cmd_lock); 667 spin_lock_init(&lun->lun_cmd_lock);
670 spin_lock_init(&lun->lun_sep_lock); 668 spin_lock_init(&lun->lun_sep_lock);
671 } 669 }
672 670
673 se_tpg->se_tpg_type = se_tpg_type; 671 se_tpg->se_tpg_type = se_tpg_type;
674 se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr; 672 se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
675 se_tpg->se_tpg_tfo = tfo; 673 se_tpg->se_tpg_tfo = tfo;
676 se_tpg->se_tpg_wwn = se_wwn; 674 se_tpg->se_tpg_wwn = se_wwn;
677 atomic_set(&se_tpg->tpg_pr_ref_count, 0); 675 atomic_set(&se_tpg->tpg_pr_ref_count, 0);
678 INIT_LIST_HEAD(&se_tpg->acl_node_list); 676 INIT_LIST_HEAD(&se_tpg->acl_node_list);
679 INIT_LIST_HEAD(&se_tpg->se_tpg_node); 677 INIT_LIST_HEAD(&se_tpg->se_tpg_node);
680 INIT_LIST_HEAD(&se_tpg->tpg_sess_list); 678 INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
681 spin_lock_init(&se_tpg->acl_node_lock); 679 spin_lock_init(&se_tpg->acl_node_lock);
682 spin_lock_init(&se_tpg->session_lock); 680 spin_lock_init(&se_tpg->session_lock);
683 spin_lock_init(&se_tpg->tpg_lun_lock); 681 spin_lock_init(&se_tpg->tpg_lun_lock);
684 682
685 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) { 683 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
686 if (core_tpg_setup_virtual_lun0(se_tpg) < 0) { 684 if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
687 kfree(se_tpg); 685 kfree(se_tpg);
688 return -ENOMEM; 686 return -ENOMEM;
689 } 687 }
690 } 688 }
691 689
692 spin_lock_bh(&tpg_lock); 690 spin_lock_bh(&tpg_lock);
693 list_add_tail(&se_tpg->se_tpg_node, &tpg_list); 691 list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
694 spin_unlock_bh(&tpg_lock); 692 spin_unlock_bh(&tpg_lock);
695 693
696 pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for" 694 pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
697 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(), 695 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
698 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? 696 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
699 "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ? 697 "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
700 "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg)); 698 "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
701 699
702 return 0; 700 return 0;
703 } 701 }
704 EXPORT_SYMBOL(core_tpg_register); 702 EXPORT_SYMBOL(core_tpg_register);
705 703
706 int core_tpg_deregister(struct se_portal_group *se_tpg) 704 int core_tpg_deregister(struct se_portal_group *se_tpg)
707 { 705 {
708 struct se_node_acl *nacl, *nacl_tmp; 706 struct se_node_acl *nacl, *nacl_tmp;
709 707
710 pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group" 708 pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
711 " for endpoint: %s Portal Tag %u\n", 709 " for endpoint: %s Portal Tag %u\n",
712 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? 710 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
713 "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(), 711 "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
714 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg), 712 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
715 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); 713 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
716 714
717 spin_lock_bh(&tpg_lock); 715 spin_lock_bh(&tpg_lock);
718 list_del(&se_tpg->se_tpg_node); 716 list_del(&se_tpg->se_tpg_node);
719 spin_unlock_bh(&tpg_lock); 717 spin_unlock_bh(&tpg_lock);
720 718
721 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) 719 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
722 cpu_relax(); 720 cpu_relax();
723 /* 721 /*
724 * Release any remaining demo-mode generated se_node_acl that have 722 * Release any remaining demo-mode generated se_node_acl that have
725 * not been released because of TFO->tpg_check_demo_mode_cache() == 1 723 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
726 * in transport_deregister_session(). 724 * in transport_deregister_session().
727 */ 725 */
728 spin_lock_irq(&se_tpg->acl_node_lock); 726 spin_lock_irq(&se_tpg->acl_node_lock);
729 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, 727 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
730 acl_list) { 728 acl_list) {
731 list_del(&nacl->acl_list); 729 list_del(&nacl->acl_list);
732 se_tpg->num_node_acls--; 730 se_tpg->num_node_acls--;
733 spin_unlock_irq(&se_tpg->acl_node_lock); 731 spin_unlock_irq(&se_tpg->acl_node_lock);
734 732
735 core_tpg_wait_for_nacl_pr_ref(nacl); 733 core_tpg_wait_for_nacl_pr_ref(nacl);
736 core_free_device_list_for_node(nacl, se_tpg); 734 core_free_device_list_for_node(nacl, se_tpg);
737 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); 735 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
738 736
739 spin_lock_irq(&se_tpg->acl_node_lock); 737 spin_lock_irq(&se_tpg->acl_node_lock);
740 } 738 }
741 spin_unlock_irq(&se_tpg->acl_node_lock); 739 spin_unlock_irq(&se_tpg->acl_node_lock);
742 740
743 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) 741 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
744 core_tpg_release_virtual_lun0(se_tpg); 742 core_tpg_release_virtual_lun0(se_tpg);
745 743
746 se_tpg->se_tpg_fabric_ptr = NULL; 744 se_tpg->se_tpg_fabric_ptr = NULL;
747 kfree(se_tpg->tpg_lun_list); 745 kfree(se_tpg->tpg_lun_list);
748 return 0; 746 return 0;
749 } 747 }
750 EXPORT_SYMBOL(core_tpg_deregister); 748 EXPORT_SYMBOL(core_tpg_deregister);
751 749
752 struct se_lun *core_tpg_pre_addlun( 750 struct se_lun *core_tpg_pre_addlun(
753 struct se_portal_group *tpg, 751 struct se_portal_group *tpg,
754 u32 unpacked_lun) 752 u32 unpacked_lun)
755 { 753 {
756 struct se_lun *lun; 754 struct se_lun *lun;
757 755
758 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 756 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
759 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" 757 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
760 "-1: %u for Target Portal Group: %u\n", 758 "-1: %u for Target Portal Group: %u\n",
761 tpg->se_tpg_tfo->get_fabric_name(), 759 tpg->se_tpg_tfo->get_fabric_name(),
762 unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, 760 unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
763 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 761 tpg->se_tpg_tfo->tpg_get_tag(tpg));
764 return ERR_PTR(-EOVERFLOW); 762 return ERR_PTR(-EOVERFLOW);
765 } 763 }
766 764
767 spin_lock(&tpg->tpg_lun_lock); 765 spin_lock(&tpg->tpg_lun_lock);
768 lun = &tpg->tpg_lun_list[unpacked_lun]; 766 lun = &tpg->tpg_lun_list[unpacked_lun];
769 if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) { 767 if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
770 pr_err("TPG Logical Unit Number: %u is already active" 768 pr_err("TPG Logical Unit Number: %u is already active"
771 " on %s Target Portal Group: %u, ignoring request.\n", 769 " on %s Target Portal Group: %u, ignoring request.\n",
772 unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(), 770 unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
773 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 771 tpg->se_tpg_tfo->tpg_get_tag(tpg));
774 spin_unlock(&tpg->tpg_lun_lock); 772 spin_unlock(&tpg->tpg_lun_lock);
775 return ERR_PTR(-EINVAL); 773 return ERR_PTR(-EINVAL);
776 } 774 }
777 spin_unlock(&tpg->tpg_lun_lock); 775 spin_unlock(&tpg->tpg_lun_lock);
778 776
779 return lun; 777 return lun;
780 } 778 }
781 779
782 int core_tpg_post_addlun( 780 int core_tpg_post_addlun(
783 struct se_portal_group *tpg, 781 struct se_portal_group *tpg,
784 struct se_lun *lun, 782 struct se_lun *lun,
785 u32 lun_access, 783 u32 lun_access,
786 void *lun_ptr) 784 void *lun_ptr)
787 { 785 {
788 int ret; 786 int ret;
789 787
790 ret = core_dev_export(lun_ptr, tpg, lun); 788 ret = core_dev_export(lun_ptr, tpg, lun);
791 if (ret < 0) 789 if (ret < 0)
792 return ret; 790 return ret;
793 791
794 spin_lock(&tpg->tpg_lun_lock); 792 spin_lock(&tpg->tpg_lun_lock);
795 lun->lun_access = lun_access; 793 lun->lun_access = lun_access;
796 lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE; 794 lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
797 spin_unlock(&tpg->tpg_lun_lock); 795 spin_unlock(&tpg->tpg_lun_lock);
798 796
799 return 0; 797 return 0;
800 } 798 }
801 799
802 static void core_tpg_shutdown_lun( 800 static void core_tpg_shutdown_lun(
803 struct se_portal_group *tpg, 801 struct se_portal_group *tpg,
804 struct se_lun *lun) 802 struct se_lun *lun)
805 { 803 {
806 core_clear_lun_from_tpg(lun, tpg); 804 core_clear_lun_from_tpg(lun, tpg);
807 transport_clear_lun_from_sessions(lun); 805 transport_clear_lun_from_sessions(lun);
808 } 806 }
809 807
810 struct se_lun *core_tpg_pre_dellun( 808 struct se_lun *core_tpg_pre_dellun(
811 struct se_portal_group *tpg, 809 struct se_portal_group *tpg,
812 u32 unpacked_lun, 810 u32 unpacked_lun,
813 int *ret) 811 int *ret)
814 { 812 {
815 struct se_lun *lun; 813 struct se_lun *lun;
816 814
817 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 815 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
818 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" 816 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
819 "-1: %u for Target Portal Group: %u\n", 817 "-1: %u for Target Portal Group: %u\n",
820 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 818 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
821 TRANSPORT_MAX_LUNS_PER_TPG-1, 819 TRANSPORT_MAX_LUNS_PER_TPG-1,
822 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 820 tpg->se_tpg_tfo->tpg_get_tag(tpg));
823 return ERR_PTR(-EOVERFLOW); 821 return ERR_PTR(-EOVERFLOW);
824 } 822 }
825 823
826 spin_lock(&tpg->tpg_lun_lock); 824 spin_lock(&tpg->tpg_lun_lock);
827 lun = &tpg->tpg_lun_list[unpacked_lun]; 825 lun = &tpg->tpg_lun_list[unpacked_lun];
828 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { 826 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
829 pr_err("%s Logical Unit Number: %u is not active on" 827 pr_err("%s Logical Unit Number: %u is not active on"
830 " Target Portal Group: %u, ignoring request.\n", 828 " Target Portal Group: %u, ignoring request.\n",
831 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 829 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
832 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 830 tpg->se_tpg_tfo->tpg_get_tag(tpg));
833 spin_unlock(&tpg->tpg_lun_lock); 831 spin_unlock(&tpg->tpg_lun_lock);
834 return ERR_PTR(-ENODEV); 832 return ERR_PTR(-ENODEV);
835 } 833 }
836 spin_unlock(&tpg->tpg_lun_lock); 834 spin_unlock(&tpg->tpg_lun_lock);
837 835
838 return lun; 836 return lun;
839 } 837 }
840 838
841 int core_tpg_post_dellun( 839 int core_tpg_post_dellun(
842 struct se_portal_group *tpg, 840 struct se_portal_group *tpg,
843 struct se_lun *lun) 841 struct se_lun *lun)
844 { 842 {
845 core_tpg_shutdown_lun(tpg, lun); 843 core_tpg_shutdown_lun(tpg, lun);
846 844
847 core_dev_unexport(lun->lun_se_dev, tpg, lun); 845 core_dev_unexport(lun->lun_se_dev, tpg, lun);
848 846
849 spin_lock(&tpg->tpg_lun_lock); 847 spin_lock(&tpg->tpg_lun_lock);
850 lun->lun_status = TRANSPORT_LUN_STATUS_FREE; 848 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
851 spin_unlock(&tpg->tpg_lun_lock); 849 spin_unlock(&tpg->tpg_lun_lock);
852 850
853 return 0; 851 return 0;
854 } 852 }
855 853
drivers/target/target_core_transport.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * Filename: target_core_transport.c 2 * Filename: target_core_transport.c
3 * 3 *
4 * This file contains the Generic Target Engine Core. 4 * This file contains the Generic Target Engine Core.
5 * 5 *
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. 6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc. 7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems 8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org 9 * Copyright (c) 2008-2010 Linux-iSCSI.org
10 * 10 *
11 * Nicholas A. Bellinger <nab@kernel.org> 11 * Nicholas A. Bellinger <nab@kernel.org>
12 * 12 *
13 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by 14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or 15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version. 16 * (at your option) any later version.
17 * 17 *
18 * This program is distributed in the hope that it will be useful, 18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details. 21 * GNU General Public License for more details.
22 * 22 *
23 * You should have received a copy of the GNU General Public License 23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software 24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 * 26 *
27 ******************************************************************************/ 27 ******************************************************************************/
28 28
29 #include <linux/net.h> 29 #include <linux/net.h>
30 #include <linux/delay.h> 30 #include <linux/delay.h>
31 #include <linux/string.h> 31 #include <linux/string.h>
32 #include <linux/timer.h> 32 #include <linux/timer.h>
33 #include <linux/slab.h> 33 #include <linux/slab.h>
34 #include <linux/blkdev.h> 34 #include <linux/blkdev.h>
35 #include <linux/spinlock.h> 35 #include <linux/spinlock.h>
36 #include <linux/kthread.h> 36 #include <linux/kthread.h>
37 #include <linux/in.h> 37 #include <linux/in.h>
38 #include <linux/cdrom.h> 38 #include <linux/cdrom.h>
39 #include <linux/module.h> 39 #include <linux/module.h>
40 #include <asm/unaligned.h> 40 #include <asm/unaligned.h>
41 #include <net/sock.h> 41 #include <net/sock.h>
42 #include <net/tcp.h> 42 #include <net/tcp.h>
43 #include <scsi/scsi.h> 43 #include <scsi/scsi.h>
44 #include <scsi/scsi_cmnd.h> 44 #include <scsi/scsi_cmnd.h>
45 #include <scsi/scsi_tcq.h> 45 #include <scsi/scsi_tcq.h>
46 46
47 #include <target/target_core_base.h> 47 #include <target/target_core_base.h>
48 #include <target/target_core_device.h> 48 #include <target/target_core_backend.h>
49 #include <target/target_core_tmr.h> 49 #include <target/target_core_fabric.h>
50 #include <target/target_core_tpg.h>
51 #include <target/target_core_transport.h>
52 #include <target/target_core_fabric_ops.h>
53 #include <target/target_core_configfs.h> 50 #include <target/target_core_configfs.h>
54 51
55 #include "target_core_internal.h" 52 #include "target_core_internal.h"
56 #include "target_core_alua.h" 53 #include "target_core_alua.h"
57 #include "target_core_pr.h" 54 #include "target_core_pr.h"
58 #include "target_core_ua.h" 55 #include "target_core_ua.h"
59 56
60 static int sub_api_initialized; 57 static int sub_api_initialized;
61 58
62 static struct workqueue_struct *target_completion_wq; 59 static struct workqueue_struct *target_completion_wq;
63 static struct kmem_cache *se_sess_cache; 60 static struct kmem_cache *se_sess_cache;
64 struct kmem_cache *se_tmr_req_cache; 61 struct kmem_cache *se_tmr_req_cache;
65 struct kmem_cache *se_ua_cache; 62 struct kmem_cache *se_ua_cache;
66 struct kmem_cache *t10_pr_reg_cache; 63 struct kmem_cache *t10_pr_reg_cache;
67 struct kmem_cache *t10_alua_lu_gp_cache; 64 struct kmem_cache *t10_alua_lu_gp_cache;
68 struct kmem_cache *t10_alua_lu_gp_mem_cache; 65 struct kmem_cache *t10_alua_lu_gp_mem_cache;
69 struct kmem_cache *t10_alua_tg_pt_gp_cache; 66 struct kmem_cache *t10_alua_tg_pt_gp_cache;
70 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; 67 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
71 68
72 static int transport_generic_write_pending(struct se_cmd *); 69 static int transport_generic_write_pending(struct se_cmd *);
73 static int transport_processing_thread(void *param); 70 static int transport_processing_thread(void *param);
74 static int __transport_execute_tasks(struct se_device *dev); 71 static int __transport_execute_tasks(struct se_device *dev);
75 static void transport_complete_task_attr(struct se_cmd *cmd); 72 static void transport_complete_task_attr(struct se_cmd *cmd);
76 static void transport_handle_queue_full(struct se_cmd *cmd, 73 static void transport_handle_queue_full(struct se_cmd *cmd,
77 struct se_device *dev); 74 struct se_device *dev);
78 static void transport_free_dev_tasks(struct se_cmd *cmd); 75 static void transport_free_dev_tasks(struct se_cmd *cmd);
79 static int transport_generic_get_mem(struct se_cmd *cmd); 76 static int transport_generic_get_mem(struct se_cmd *cmd);
80 static void transport_put_cmd(struct se_cmd *cmd); 77 static void transport_put_cmd(struct se_cmd *cmd);
81 static void transport_remove_cmd_from_queue(struct se_cmd *cmd); 78 static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
82 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); 79 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
83 static void transport_generic_request_failure(struct se_cmd *); 80 static void transport_generic_request_failure(struct se_cmd *);
84 static void target_complete_ok_work(struct work_struct *work); 81 static void target_complete_ok_work(struct work_struct *work);
85 82
86 int init_se_kmem_caches(void) 83 int init_se_kmem_caches(void)
87 { 84 {
88 se_tmr_req_cache = kmem_cache_create("se_tmr_cache", 85 se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
89 sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), 86 sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
90 0, NULL); 87 0, NULL);
91 if (!se_tmr_req_cache) { 88 if (!se_tmr_req_cache) {
92 pr_err("kmem_cache_create() for struct se_tmr_req" 89 pr_err("kmem_cache_create() for struct se_tmr_req"
93 " failed\n"); 90 " failed\n");
94 goto out; 91 goto out;
95 } 92 }
96 se_sess_cache = kmem_cache_create("se_sess_cache", 93 se_sess_cache = kmem_cache_create("se_sess_cache",
97 sizeof(struct se_session), __alignof__(struct se_session), 94 sizeof(struct se_session), __alignof__(struct se_session),
98 0, NULL); 95 0, NULL);
99 if (!se_sess_cache) { 96 if (!se_sess_cache) {
100 pr_err("kmem_cache_create() for struct se_session" 97 pr_err("kmem_cache_create() for struct se_session"
101 " failed\n"); 98 " failed\n");
102 goto out_free_tmr_req_cache; 99 goto out_free_tmr_req_cache;
103 } 100 }
104 se_ua_cache = kmem_cache_create("se_ua_cache", 101 se_ua_cache = kmem_cache_create("se_ua_cache",
105 sizeof(struct se_ua), __alignof__(struct se_ua), 102 sizeof(struct se_ua), __alignof__(struct se_ua),
106 0, NULL); 103 0, NULL);
107 if (!se_ua_cache) { 104 if (!se_ua_cache) {
108 pr_err("kmem_cache_create() for struct se_ua failed\n"); 105 pr_err("kmem_cache_create() for struct se_ua failed\n");
109 goto out_free_sess_cache; 106 goto out_free_sess_cache;
110 } 107 }
111 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 108 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
112 sizeof(struct t10_pr_registration), 109 sizeof(struct t10_pr_registration),
113 __alignof__(struct t10_pr_registration), 0, NULL); 110 __alignof__(struct t10_pr_registration), 0, NULL);
114 if (!t10_pr_reg_cache) { 111 if (!t10_pr_reg_cache) {
115 pr_err("kmem_cache_create() for struct t10_pr_registration" 112 pr_err("kmem_cache_create() for struct t10_pr_registration"
116 " failed\n"); 113 " failed\n");
117 goto out_free_ua_cache; 114 goto out_free_ua_cache;
118 } 115 }
119 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 116 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
120 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 117 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
121 0, NULL); 118 0, NULL);
122 if (!t10_alua_lu_gp_cache) { 119 if (!t10_alua_lu_gp_cache) {
123 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 120 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
124 " failed\n"); 121 " failed\n");
125 goto out_free_pr_reg_cache; 122 goto out_free_pr_reg_cache;
126 } 123 }
127 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 124 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
128 sizeof(struct t10_alua_lu_gp_member), 125 sizeof(struct t10_alua_lu_gp_member),
129 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 126 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
130 if (!t10_alua_lu_gp_mem_cache) { 127 if (!t10_alua_lu_gp_mem_cache) {
131 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 128 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
132 "cache failed\n"); 129 "cache failed\n");
133 goto out_free_lu_gp_cache; 130 goto out_free_lu_gp_cache;
134 } 131 }
135 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 132 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
136 sizeof(struct t10_alua_tg_pt_gp), 133 sizeof(struct t10_alua_tg_pt_gp),
137 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 134 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
138 if (!t10_alua_tg_pt_gp_cache) { 135 if (!t10_alua_tg_pt_gp_cache) {
139 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 136 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
140 "cache failed\n"); 137 "cache failed\n");
141 goto out_free_lu_gp_mem_cache; 138 goto out_free_lu_gp_mem_cache;
142 } 139 }
143 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( 140 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
144 "t10_alua_tg_pt_gp_mem_cache", 141 "t10_alua_tg_pt_gp_mem_cache",
145 sizeof(struct t10_alua_tg_pt_gp_member), 142 sizeof(struct t10_alua_tg_pt_gp_member),
146 __alignof__(struct t10_alua_tg_pt_gp_member), 143 __alignof__(struct t10_alua_tg_pt_gp_member),
147 0, NULL); 144 0, NULL);
148 if (!t10_alua_tg_pt_gp_mem_cache) { 145 if (!t10_alua_tg_pt_gp_mem_cache) {
149 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 146 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
150 "mem_t failed\n"); 147 "mem_t failed\n");
151 goto out_free_tg_pt_gp_cache; 148 goto out_free_tg_pt_gp_cache;
152 } 149 }
153 150
154 target_completion_wq = alloc_workqueue("target_completion", 151 target_completion_wq = alloc_workqueue("target_completion",
155 WQ_MEM_RECLAIM, 0); 152 WQ_MEM_RECLAIM, 0);
156 if (!target_completion_wq) 153 if (!target_completion_wq)
157 goto out_free_tg_pt_gp_mem_cache; 154 goto out_free_tg_pt_gp_mem_cache;
158 155
159 return 0; 156 return 0;
160 157
161 out_free_tg_pt_gp_mem_cache: 158 out_free_tg_pt_gp_mem_cache:
162 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 159 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
163 out_free_tg_pt_gp_cache: 160 out_free_tg_pt_gp_cache:
164 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 161 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
165 out_free_lu_gp_mem_cache: 162 out_free_lu_gp_mem_cache:
166 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 163 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
167 out_free_lu_gp_cache: 164 out_free_lu_gp_cache:
168 kmem_cache_destroy(t10_alua_lu_gp_cache); 165 kmem_cache_destroy(t10_alua_lu_gp_cache);
169 out_free_pr_reg_cache: 166 out_free_pr_reg_cache:
170 kmem_cache_destroy(t10_pr_reg_cache); 167 kmem_cache_destroy(t10_pr_reg_cache);
171 out_free_ua_cache: 168 out_free_ua_cache:
172 kmem_cache_destroy(se_ua_cache); 169 kmem_cache_destroy(se_ua_cache);
173 out_free_sess_cache: 170 out_free_sess_cache:
174 kmem_cache_destroy(se_sess_cache); 171 kmem_cache_destroy(se_sess_cache);
175 out_free_tmr_req_cache: 172 out_free_tmr_req_cache:
176 kmem_cache_destroy(se_tmr_req_cache); 173 kmem_cache_destroy(se_tmr_req_cache);
177 out: 174 out:
178 return -ENOMEM; 175 return -ENOMEM;
179 } 176 }
180 177
181 void release_se_kmem_caches(void) 178 void release_se_kmem_caches(void)
182 { 179 {
183 destroy_workqueue(target_completion_wq); 180 destroy_workqueue(target_completion_wq);
184 kmem_cache_destroy(se_tmr_req_cache); 181 kmem_cache_destroy(se_tmr_req_cache);
185 kmem_cache_destroy(se_sess_cache); 182 kmem_cache_destroy(se_sess_cache);
186 kmem_cache_destroy(se_ua_cache); 183 kmem_cache_destroy(se_ua_cache);
187 kmem_cache_destroy(t10_pr_reg_cache); 184 kmem_cache_destroy(t10_pr_reg_cache);
188 kmem_cache_destroy(t10_alua_lu_gp_cache); 185 kmem_cache_destroy(t10_alua_lu_gp_cache);
189 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 186 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
190 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 187 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
191 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 188 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
192 } 189 }
193 190
194 /* This code ensures unique mib indexes are handed out. */ 191 /* This code ensures unique mib indexes are handed out. */
195 static DEFINE_SPINLOCK(scsi_mib_index_lock); 192 static DEFINE_SPINLOCK(scsi_mib_index_lock);
196 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 193 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
197 194
198 /* 195 /*
199 * Allocate a new row index for the entry type specified 196 * Allocate a new row index for the entry type specified
200 */ 197 */
201 u32 scsi_get_new_index(scsi_index_t type) 198 u32 scsi_get_new_index(scsi_index_t type)
202 { 199 {
203 u32 new_index; 200 u32 new_index;
204 201
205 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 202 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
206 203
207 spin_lock(&scsi_mib_index_lock); 204 spin_lock(&scsi_mib_index_lock);
208 new_index = ++scsi_mib_index[type]; 205 new_index = ++scsi_mib_index[type];
209 spin_unlock(&scsi_mib_index_lock); 206 spin_unlock(&scsi_mib_index_lock);
210 207
211 return new_index; 208 return new_index;
212 } 209 }
213 210
214 static void transport_init_queue_obj(struct se_queue_obj *qobj) 211 static void transport_init_queue_obj(struct se_queue_obj *qobj)
215 { 212 {
216 atomic_set(&qobj->queue_cnt, 0); 213 atomic_set(&qobj->queue_cnt, 0);
217 INIT_LIST_HEAD(&qobj->qobj_list); 214 INIT_LIST_HEAD(&qobj->qobj_list);
218 init_waitqueue_head(&qobj->thread_wq); 215 init_waitqueue_head(&qobj->thread_wq);
219 spin_lock_init(&qobj->cmd_queue_lock); 216 spin_lock_init(&qobj->cmd_queue_lock);
220 } 217 }
221 218
222 void transport_subsystem_check_init(void) 219 void transport_subsystem_check_init(void)
223 { 220 {
224 int ret; 221 int ret;
225 222
226 if (sub_api_initialized) 223 if (sub_api_initialized)
227 return; 224 return;
228 225
229 ret = request_module("target_core_iblock"); 226 ret = request_module("target_core_iblock");
230 if (ret != 0) 227 if (ret != 0)
231 pr_err("Unable to load target_core_iblock\n"); 228 pr_err("Unable to load target_core_iblock\n");
232 229
233 ret = request_module("target_core_file"); 230 ret = request_module("target_core_file");
234 if (ret != 0) 231 if (ret != 0)
235 pr_err("Unable to load target_core_file\n"); 232 pr_err("Unable to load target_core_file\n");
236 233
237 ret = request_module("target_core_pscsi"); 234 ret = request_module("target_core_pscsi");
238 if (ret != 0) 235 if (ret != 0)
239 pr_err("Unable to load target_core_pscsi\n"); 236 pr_err("Unable to load target_core_pscsi\n");
240 237
241 ret = request_module("target_core_stgt"); 238 ret = request_module("target_core_stgt");
242 if (ret != 0) 239 if (ret != 0)
243 pr_err("Unable to load target_core_stgt\n"); 240 pr_err("Unable to load target_core_stgt\n");
244 241
245 sub_api_initialized = 1; 242 sub_api_initialized = 1;
246 return; 243 return;
247 } 244 }
248 245
249 struct se_session *transport_init_session(void) 246 struct se_session *transport_init_session(void)
250 { 247 {
251 struct se_session *se_sess; 248 struct se_session *se_sess;
252 249
253 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 250 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
254 if (!se_sess) { 251 if (!se_sess) {
255 pr_err("Unable to allocate struct se_session from" 252 pr_err("Unable to allocate struct se_session from"
256 " se_sess_cache\n"); 253 " se_sess_cache\n");
257 return ERR_PTR(-ENOMEM); 254 return ERR_PTR(-ENOMEM);
258 } 255 }
259 INIT_LIST_HEAD(&se_sess->sess_list); 256 INIT_LIST_HEAD(&se_sess->sess_list);
260 INIT_LIST_HEAD(&se_sess->sess_acl_list); 257 INIT_LIST_HEAD(&se_sess->sess_acl_list);
261 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 258 INIT_LIST_HEAD(&se_sess->sess_cmd_list);
262 INIT_LIST_HEAD(&se_sess->sess_wait_list); 259 INIT_LIST_HEAD(&se_sess->sess_wait_list);
263 spin_lock_init(&se_sess->sess_cmd_lock); 260 spin_lock_init(&se_sess->sess_cmd_lock);
264 261
265 return se_sess; 262 return se_sess;
266 } 263 }
267 EXPORT_SYMBOL(transport_init_session); 264 EXPORT_SYMBOL(transport_init_session);
268 265
269 /* 266 /*
270 * Called with spin_lock_bh(&struct se_portal_group->session_lock called. 267 * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
271 */ 268 */
272 void __transport_register_session( 269 void __transport_register_session(
273 struct se_portal_group *se_tpg, 270 struct se_portal_group *se_tpg,
274 struct se_node_acl *se_nacl, 271 struct se_node_acl *se_nacl,
275 struct se_session *se_sess, 272 struct se_session *se_sess,
276 void *fabric_sess_ptr) 273 void *fabric_sess_ptr)
277 { 274 {
278 unsigned char buf[PR_REG_ISID_LEN]; 275 unsigned char buf[PR_REG_ISID_LEN];
279 276
280 se_sess->se_tpg = se_tpg; 277 se_sess->se_tpg = se_tpg;
281 se_sess->fabric_sess_ptr = fabric_sess_ptr; 278 se_sess->fabric_sess_ptr = fabric_sess_ptr;
282 /* 279 /*
283 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 280 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
284 * 281 *
285 * Only set for struct se_session's that will actually be moving I/O. 282 * Only set for struct se_session's that will actually be moving I/O.
286 * eg: *NOT* discovery sessions. 283 * eg: *NOT* discovery sessions.
287 */ 284 */
288 if (se_nacl) { 285 if (se_nacl) {
289 /* 286 /*
290 * If the fabric module supports an ISID based TransportID, 287 * If the fabric module supports an ISID based TransportID,
291 * save this value in binary from the fabric I_T Nexus now. 288 * save this value in binary from the fabric I_T Nexus now.
292 */ 289 */
293 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 290 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
294 memset(&buf[0], 0, PR_REG_ISID_LEN); 291 memset(&buf[0], 0, PR_REG_ISID_LEN);
295 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 292 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
296 &buf[0], PR_REG_ISID_LEN); 293 &buf[0], PR_REG_ISID_LEN);
297 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 294 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
298 } 295 }
299 spin_lock_irq(&se_nacl->nacl_sess_lock); 296 spin_lock_irq(&se_nacl->nacl_sess_lock);
300 /* 297 /*
301 * The se_nacl->nacl_sess pointer will be set to the 298 * The se_nacl->nacl_sess pointer will be set to the
302 * last active I_T Nexus for each struct se_node_acl. 299 * last active I_T Nexus for each struct se_node_acl.
303 */ 300 */
304 se_nacl->nacl_sess = se_sess; 301 se_nacl->nacl_sess = se_sess;
305 302
306 list_add_tail(&se_sess->sess_acl_list, 303 list_add_tail(&se_sess->sess_acl_list,
307 &se_nacl->acl_sess_list); 304 &se_nacl->acl_sess_list);
308 spin_unlock_irq(&se_nacl->nacl_sess_lock); 305 spin_unlock_irq(&se_nacl->nacl_sess_lock);
309 } 306 }
310 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 307 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
311 308
312 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 309 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
313 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); 310 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
314 } 311 }
315 EXPORT_SYMBOL(__transport_register_session); 312 EXPORT_SYMBOL(__transport_register_session);
316 313
317 void transport_register_session( 314 void transport_register_session(
318 struct se_portal_group *se_tpg, 315 struct se_portal_group *se_tpg,
319 struct se_node_acl *se_nacl, 316 struct se_node_acl *se_nacl,
320 struct se_session *se_sess, 317 struct se_session *se_sess,
321 void *fabric_sess_ptr) 318 void *fabric_sess_ptr)
322 { 319 {
323 spin_lock_bh(&se_tpg->session_lock); 320 spin_lock_bh(&se_tpg->session_lock);
324 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 321 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
325 spin_unlock_bh(&se_tpg->session_lock); 322 spin_unlock_bh(&se_tpg->session_lock);
326 } 323 }
327 EXPORT_SYMBOL(transport_register_session); 324 EXPORT_SYMBOL(transport_register_session);
328 325
329 void transport_deregister_session_configfs(struct se_session *se_sess) 326 void transport_deregister_session_configfs(struct se_session *se_sess)
330 { 327 {
331 struct se_node_acl *se_nacl; 328 struct se_node_acl *se_nacl;
332 unsigned long flags; 329 unsigned long flags;
333 /* 330 /*
334 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 331 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
335 */ 332 */
336 se_nacl = se_sess->se_node_acl; 333 se_nacl = se_sess->se_node_acl;
337 if (se_nacl) { 334 if (se_nacl) {
338 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 335 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
339 list_del(&se_sess->sess_acl_list); 336 list_del(&se_sess->sess_acl_list);
340 /* 337 /*
341 * If the session list is empty, then clear the pointer. 338 * If the session list is empty, then clear the pointer.
342 * Otherwise, set the struct se_session pointer from the tail 339 * Otherwise, set the struct se_session pointer from the tail
343 * element of the per struct se_node_acl active session list. 340 * element of the per struct se_node_acl active session list.
344 */ 341 */
345 if (list_empty(&se_nacl->acl_sess_list)) 342 if (list_empty(&se_nacl->acl_sess_list))
346 se_nacl->nacl_sess = NULL; 343 se_nacl->nacl_sess = NULL;
347 else { 344 else {
348 se_nacl->nacl_sess = container_of( 345 se_nacl->nacl_sess = container_of(
349 se_nacl->acl_sess_list.prev, 346 se_nacl->acl_sess_list.prev,
350 struct se_session, sess_acl_list); 347 struct se_session, sess_acl_list);
351 } 348 }
352 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 349 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
353 } 350 }
354 } 351 }
355 EXPORT_SYMBOL(transport_deregister_session_configfs); 352 EXPORT_SYMBOL(transport_deregister_session_configfs);
356 353
357 void transport_free_session(struct se_session *se_sess) 354 void transport_free_session(struct se_session *se_sess)
358 { 355 {
359 kmem_cache_free(se_sess_cache, se_sess); 356 kmem_cache_free(se_sess_cache, se_sess);
360 } 357 }
361 EXPORT_SYMBOL(transport_free_session); 358 EXPORT_SYMBOL(transport_free_session);
362 359
363 void transport_deregister_session(struct se_session *se_sess) 360 void transport_deregister_session(struct se_session *se_sess)
364 { 361 {
365 struct se_portal_group *se_tpg = se_sess->se_tpg; 362 struct se_portal_group *se_tpg = se_sess->se_tpg;
366 struct se_node_acl *se_nacl; 363 struct se_node_acl *se_nacl;
367 unsigned long flags; 364 unsigned long flags;
368 365
369 if (!se_tpg) { 366 if (!se_tpg) {
370 transport_free_session(se_sess); 367 transport_free_session(se_sess);
371 return; 368 return;
372 } 369 }
373 370
374 spin_lock_irqsave(&se_tpg->session_lock, flags); 371 spin_lock_irqsave(&se_tpg->session_lock, flags);
375 list_del(&se_sess->sess_list); 372 list_del(&se_sess->sess_list);
376 se_sess->se_tpg = NULL; 373 se_sess->se_tpg = NULL;
377 se_sess->fabric_sess_ptr = NULL; 374 se_sess->fabric_sess_ptr = NULL;
378 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 375 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
379 376
380 /* 377 /*
381 * Determine if we need to do extra work for this initiator node's 378 * Determine if we need to do extra work for this initiator node's
382 * struct se_node_acl if it had been previously dynamically generated. 379 * struct se_node_acl if it had been previously dynamically generated.
383 */ 380 */
384 se_nacl = se_sess->se_node_acl; 381 se_nacl = se_sess->se_node_acl;
385 if (se_nacl) { 382 if (se_nacl) {
386 spin_lock_irqsave(&se_tpg->acl_node_lock, flags); 383 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
387 if (se_nacl->dynamic_node_acl) { 384 if (se_nacl->dynamic_node_acl) {
388 if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( 385 if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
389 se_tpg)) { 386 se_tpg)) {
390 list_del(&se_nacl->acl_list); 387 list_del(&se_nacl->acl_list);
391 se_tpg->num_node_acls--; 388 se_tpg->num_node_acls--;
392 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); 389 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
393 390
394 core_tpg_wait_for_nacl_pr_ref(se_nacl); 391 core_tpg_wait_for_nacl_pr_ref(se_nacl);
395 core_free_device_list_for_node(se_nacl, se_tpg); 392 core_free_device_list_for_node(se_nacl, se_tpg);
396 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, 393 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
397 se_nacl); 394 se_nacl);
398 spin_lock_irqsave(&se_tpg->acl_node_lock, flags); 395 spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
399 } 396 }
400 } 397 }
401 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); 398 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
402 } 399 }
403 400
404 transport_free_session(se_sess); 401 transport_free_session(se_sess);
405 402
406 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 403 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
407 se_tpg->se_tpg_tfo->get_fabric_name()); 404 se_tpg->se_tpg_tfo->get_fabric_name());
408 } 405 }
409 EXPORT_SYMBOL(transport_deregister_session); 406 EXPORT_SYMBOL(transport_deregister_session);
410 407
411 /* 408 /*
412 * Called with cmd->t_state_lock held. 409 * Called with cmd->t_state_lock held.
413 */ 410 */
414 static void transport_all_task_dev_remove_state(struct se_cmd *cmd) 411 static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
415 { 412 {
416 struct se_device *dev = cmd->se_dev; 413 struct se_device *dev = cmd->se_dev;
417 struct se_task *task; 414 struct se_task *task;
418 unsigned long flags; 415 unsigned long flags;
419 416
420 if (!dev) 417 if (!dev)
421 return; 418 return;
422 419
423 list_for_each_entry(task, &cmd->t_task_list, t_list) { 420 list_for_each_entry(task, &cmd->t_task_list, t_list) {
424 if (task->task_flags & TF_ACTIVE) 421 if (task->task_flags & TF_ACTIVE)
425 continue; 422 continue;
426 423
427 if (!atomic_read(&task->task_state_active)) 424 if (!atomic_read(&task->task_state_active))
428 continue; 425 continue;
429 426
430 spin_lock_irqsave(&dev->execute_task_lock, flags); 427 spin_lock_irqsave(&dev->execute_task_lock, flags);
431 list_del(&task->t_state_list); 428 list_del(&task->t_state_list);
432 pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n", 429 pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
433 cmd->se_tfo->get_task_tag(cmd), dev, task); 430 cmd->se_tfo->get_task_tag(cmd), dev, task);
434 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 431 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
435 432
436 atomic_set(&task->task_state_active, 0); 433 atomic_set(&task->task_state_active, 0);
437 atomic_dec(&cmd->t_task_cdbs_ex_left); 434 atomic_dec(&cmd->t_task_cdbs_ex_left);
438 } 435 }
439 } 436 }
440 437
441 /* transport_cmd_check_stop(): 438 /* transport_cmd_check_stop():
442 * 439 *
443 * 'transport_off = 1' determines if t_transport_active should be cleared. 440 * 'transport_off = 1' determines if t_transport_active should be cleared.
444 * 'transport_off = 2' determines if task_dev_state should be removed. 441 * 'transport_off = 2' determines if task_dev_state should be removed.
445 * 442 *
446 * A non-zero u8 t_state sets cmd->t_state. 443 * A non-zero u8 t_state sets cmd->t_state.
447 * Returns 1 when command is stopped, else 0. 444 * Returns 1 when command is stopped, else 0.
448 */ 445 */
449 static int transport_cmd_check_stop( 446 static int transport_cmd_check_stop(
450 struct se_cmd *cmd, 447 struct se_cmd *cmd,
451 int transport_off, 448 int transport_off,
452 u8 t_state) 449 u8 t_state)
453 { 450 {
454 unsigned long flags; 451 unsigned long flags;
455 452
456 spin_lock_irqsave(&cmd->t_state_lock, flags); 453 spin_lock_irqsave(&cmd->t_state_lock, flags);
457 /* 454 /*
458 * Determine if IOCTL context caller in requesting the stopping of this 455 * Determine if IOCTL context caller in requesting the stopping of this
459 * command for LUN shutdown purposes. 456 * command for LUN shutdown purposes.
460 */ 457 */
461 if (atomic_read(&cmd->transport_lun_stop)) { 458 if (atomic_read(&cmd->transport_lun_stop)) {
462 pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)" 459 pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)"
463 " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, 460 " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
464 cmd->se_tfo->get_task_tag(cmd)); 461 cmd->se_tfo->get_task_tag(cmd));
465 462
466 atomic_set(&cmd->t_transport_active, 0); 463 atomic_set(&cmd->t_transport_active, 0);
467 if (transport_off == 2) 464 if (transport_off == 2)
468 transport_all_task_dev_remove_state(cmd); 465 transport_all_task_dev_remove_state(cmd);
469 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 466 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
470 467
471 complete(&cmd->transport_lun_stop_comp); 468 complete(&cmd->transport_lun_stop_comp);
472 return 1; 469 return 1;
473 } 470 }
474 /* 471 /*
475 * Determine if frontend context caller is requesting the stopping of 472 * Determine if frontend context caller is requesting the stopping of
476 * this command for frontend exceptions. 473 * this command for frontend exceptions.
477 */ 474 */
478 if (atomic_read(&cmd->t_transport_stop)) { 475 if (atomic_read(&cmd->t_transport_stop)) {
479 pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) ==" 476 pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) =="
480 " TRUE for ITT: 0x%08x\n", __func__, __LINE__, 477 " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
481 cmd->se_tfo->get_task_tag(cmd)); 478 cmd->se_tfo->get_task_tag(cmd));
482 479
483 if (transport_off == 2) 480 if (transport_off == 2)
484 transport_all_task_dev_remove_state(cmd); 481 transport_all_task_dev_remove_state(cmd);
485 482
486 /* 483 /*
487 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff 484 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
488 * to FE. 485 * to FE.
489 */ 486 */
490 if (transport_off == 2) 487 if (transport_off == 2)
491 cmd->se_lun = NULL; 488 cmd->se_lun = NULL;
492 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 489 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
493 490
494 complete(&cmd->t_transport_stop_comp); 491 complete(&cmd->t_transport_stop_comp);
495 return 1; 492 return 1;
496 } 493 }
497 if (transport_off) { 494 if (transport_off) {
498 atomic_set(&cmd->t_transport_active, 0); 495 atomic_set(&cmd->t_transport_active, 0);
499 if (transport_off == 2) { 496 if (transport_off == 2) {
500 transport_all_task_dev_remove_state(cmd); 497 transport_all_task_dev_remove_state(cmd);
501 /* 498 /*
502 * Clear struct se_cmd->se_lun before the transport_off == 2 499 * Clear struct se_cmd->se_lun before the transport_off == 2
503 * handoff to fabric module. 500 * handoff to fabric module.
504 */ 501 */
505 cmd->se_lun = NULL; 502 cmd->se_lun = NULL;
506 /* 503 /*
507 * Some fabric modules like tcm_loop can release 504 * Some fabric modules like tcm_loop can release
508 * their internally allocated I/O reference now and 505 * their internally allocated I/O reference now and
509 * struct se_cmd now. 506 * struct se_cmd now.
510 * 507 *
511 * Fabric modules are expected to return '1' here if the 508 * Fabric modules are expected to return '1' here if the
512 * se_cmd being passed is released at this point, 509 * se_cmd being passed is released at this point,
513 * or zero if not being released. 510 * or zero if not being released.
514 */ 511 */
515 if (cmd->se_tfo->check_stop_free != NULL) { 512 if (cmd->se_tfo->check_stop_free != NULL) {
516 spin_unlock_irqrestore( 513 spin_unlock_irqrestore(
517 &cmd->t_state_lock, flags); 514 &cmd->t_state_lock, flags);
518 515
519 return cmd->se_tfo->check_stop_free(cmd); 516 return cmd->se_tfo->check_stop_free(cmd);
520 } 517 }
521 } 518 }
522 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 519 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
523 520
524 return 0; 521 return 0;
525 } else if (t_state) 522 } else if (t_state)
526 cmd->t_state = t_state; 523 cmd->t_state = t_state;
527 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 524 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
528 525
529 return 0; 526 return 0;
530 } 527 }
531 528
532 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 529 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
533 { 530 {
534 return transport_cmd_check_stop(cmd, 2, 0); 531 return transport_cmd_check_stop(cmd, 2, 0);
535 } 532 }
536 533
537 static void transport_lun_remove_cmd(struct se_cmd *cmd) 534 static void transport_lun_remove_cmd(struct se_cmd *cmd)
538 { 535 {
539 struct se_lun *lun = cmd->se_lun; 536 struct se_lun *lun = cmd->se_lun;
540 unsigned long flags; 537 unsigned long flags;
541 538
542 if (!lun) 539 if (!lun)
543 return; 540 return;
544 541
545 spin_lock_irqsave(&cmd->t_state_lock, flags); 542 spin_lock_irqsave(&cmd->t_state_lock, flags);
546 if (!atomic_read(&cmd->transport_dev_active)) { 543 if (!atomic_read(&cmd->transport_dev_active)) {
547 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 544 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
548 goto check_lun; 545 goto check_lun;
549 } 546 }
550 atomic_set(&cmd->transport_dev_active, 0); 547 atomic_set(&cmd->transport_dev_active, 0);
551 transport_all_task_dev_remove_state(cmd); 548 transport_all_task_dev_remove_state(cmd);
552 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 549 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
553 550
554 551
555 check_lun: 552 check_lun:
556 spin_lock_irqsave(&lun->lun_cmd_lock, flags); 553 spin_lock_irqsave(&lun->lun_cmd_lock, flags);
557 if (atomic_read(&cmd->transport_lun_active)) { 554 if (atomic_read(&cmd->transport_lun_active)) {
558 list_del(&cmd->se_lun_node); 555 list_del(&cmd->se_lun_node);
559 atomic_set(&cmd->transport_lun_active, 0); 556 atomic_set(&cmd->transport_lun_active, 0);
560 #if 0 557 #if 0
561 pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n" 558 pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n"
562 cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun); 559 cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
563 #endif 560 #endif
564 } 561 }
565 spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); 562 spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
566 } 563 }
567 564
568 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 565 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
569 { 566 {
570 if (!cmd->se_tmr_req) 567 if (!cmd->se_tmr_req)
571 transport_lun_remove_cmd(cmd); 568 transport_lun_remove_cmd(cmd);
572 569
573 if (transport_cmd_check_stop_to_fabric(cmd)) 570 if (transport_cmd_check_stop_to_fabric(cmd))
574 return; 571 return;
575 if (remove) { 572 if (remove) {
576 transport_remove_cmd_from_queue(cmd); 573 transport_remove_cmd_from_queue(cmd);
577 transport_put_cmd(cmd); 574 transport_put_cmd(cmd);
578 } 575 }
579 } 576 }
580 577
581 static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state, 578 static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
582 bool at_head) 579 bool at_head)
583 { 580 {
584 struct se_device *dev = cmd->se_dev; 581 struct se_device *dev = cmd->se_dev;
585 struct se_queue_obj *qobj = &dev->dev_queue_obj; 582 struct se_queue_obj *qobj = &dev->dev_queue_obj;
586 unsigned long flags; 583 unsigned long flags;
587 584
588 if (t_state) { 585 if (t_state) {
589 spin_lock_irqsave(&cmd->t_state_lock, flags); 586 spin_lock_irqsave(&cmd->t_state_lock, flags);
590 cmd->t_state = t_state; 587 cmd->t_state = t_state;
591 atomic_set(&cmd->t_transport_active, 1); 588 atomic_set(&cmd->t_transport_active, 1);
592 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 589 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
593 } 590 }
594 591
595 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 592 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
596 593
597 /* If the cmd is already on the list, remove it before we add it */ 594 /* If the cmd is already on the list, remove it before we add it */
598 if (!list_empty(&cmd->se_queue_node)) 595 if (!list_empty(&cmd->se_queue_node))
599 list_del(&cmd->se_queue_node); 596 list_del(&cmd->se_queue_node);
600 else 597 else
601 atomic_inc(&qobj->queue_cnt); 598 atomic_inc(&qobj->queue_cnt);
602 599
603 if (at_head) 600 if (at_head)
604 list_add(&cmd->se_queue_node, &qobj->qobj_list); 601 list_add(&cmd->se_queue_node, &qobj->qobj_list);
605 else 602 else
606 list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); 603 list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
607 atomic_set(&cmd->t_transport_queue_active, 1); 604 atomic_set(&cmd->t_transport_queue_active, 1);
608 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 605 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
609 606
610 wake_up_interruptible(&qobj->thread_wq); 607 wake_up_interruptible(&qobj->thread_wq);
611 } 608 }
612 609
613 static struct se_cmd * 610 static struct se_cmd *
614 transport_get_cmd_from_queue(struct se_queue_obj *qobj) 611 transport_get_cmd_from_queue(struct se_queue_obj *qobj)
615 { 612 {
616 struct se_cmd *cmd; 613 struct se_cmd *cmd;
617 unsigned long flags; 614 unsigned long flags;
618 615
619 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 616 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
620 if (list_empty(&qobj->qobj_list)) { 617 if (list_empty(&qobj->qobj_list)) {
621 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 618 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
622 return NULL; 619 return NULL;
623 } 620 }
624 cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); 621 cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
625 622
626 atomic_set(&cmd->t_transport_queue_active, 0); 623 atomic_set(&cmd->t_transport_queue_active, 0);
627 624
628 list_del_init(&cmd->se_queue_node); 625 list_del_init(&cmd->se_queue_node);
629 atomic_dec(&qobj->queue_cnt); 626 atomic_dec(&qobj->queue_cnt);
630 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 627 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
631 628
632 return cmd; 629 return cmd;
633 } 630 }
634 631
635 static void transport_remove_cmd_from_queue(struct se_cmd *cmd) 632 static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
636 { 633 {
637 struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj; 634 struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
638 unsigned long flags; 635 unsigned long flags;
639 636
640 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 637 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
641 if (!atomic_read(&cmd->t_transport_queue_active)) { 638 if (!atomic_read(&cmd->t_transport_queue_active)) {
642 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 639 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
643 return; 640 return;
644 } 641 }
645 atomic_set(&cmd->t_transport_queue_active, 0); 642 atomic_set(&cmd->t_transport_queue_active, 0);
646 atomic_dec(&qobj->queue_cnt); 643 atomic_dec(&qobj->queue_cnt);
647 list_del_init(&cmd->se_queue_node); 644 list_del_init(&cmd->se_queue_node);
648 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 645 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
649 646
650 if (atomic_read(&cmd->t_transport_queue_active)) { 647 if (atomic_read(&cmd->t_transport_queue_active)) {
651 pr_err("ITT: 0x%08x t_transport_queue_active: %d\n", 648 pr_err("ITT: 0x%08x t_transport_queue_active: %d\n",
652 cmd->se_tfo->get_task_tag(cmd), 649 cmd->se_tfo->get_task_tag(cmd),
653 atomic_read(&cmd->t_transport_queue_active)); 650 atomic_read(&cmd->t_transport_queue_active));
654 } 651 }
655 } 652 }
656 653
657 /* 654 /*
658 * Completion function used by TCM subsystem plugins (such as FILEIO) 655 * Completion function used by TCM subsystem plugins (such as FILEIO)
659 * for queueing up response from struct se_subsystem_api->do_task() 656 * for queueing up response from struct se_subsystem_api->do_task()
660 */ 657 */
661 void transport_complete_sync_cache(struct se_cmd *cmd, int good) 658 void transport_complete_sync_cache(struct se_cmd *cmd, int good)
662 { 659 {
663 struct se_task *task = list_entry(cmd->t_task_list.next, 660 struct se_task *task = list_entry(cmd->t_task_list.next,
664 struct se_task, t_list); 661 struct se_task, t_list);
665 662
666 if (good) { 663 if (good) {
667 cmd->scsi_status = SAM_STAT_GOOD; 664 cmd->scsi_status = SAM_STAT_GOOD;
668 task->task_scsi_status = GOOD; 665 task->task_scsi_status = GOOD;
669 } else { 666 } else {
670 task->task_scsi_status = SAM_STAT_CHECK_CONDITION; 667 task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
671 task->task_se_cmd->scsi_sense_reason = 668 task->task_se_cmd->scsi_sense_reason =
672 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 669 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
673 670
674 } 671 }
675 672
676 transport_complete_task(task, good); 673 transport_complete_task(task, good);
677 } 674 }
678 EXPORT_SYMBOL(transport_complete_sync_cache); 675 EXPORT_SYMBOL(transport_complete_sync_cache);
679 676
680 static void target_complete_failure_work(struct work_struct *work) 677 static void target_complete_failure_work(struct work_struct *work)
681 { 678 {
682 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 679 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
683 680
684 transport_generic_request_failure(cmd); 681 transport_generic_request_failure(cmd);
685 } 682 }
686 683
687 /* transport_complete_task(): 684 /* transport_complete_task():
688 * 685 *
689 * Called from interrupt and non interrupt context depending 686 * Called from interrupt and non interrupt context depending
690 * on the transport plugin. 687 * on the transport plugin.
691 */ 688 */
692 void transport_complete_task(struct se_task *task, int success) 689 void transport_complete_task(struct se_task *task, int success)
693 { 690 {
694 struct se_cmd *cmd = task->task_se_cmd; 691 struct se_cmd *cmd = task->task_se_cmd;
695 struct se_device *dev = cmd->se_dev; 692 struct se_device *dev = cmd->se_dev;
696 unsigned long flags; 693 unsigned long flags;
697 #if 0 694 #if 0
698 pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task, 695 pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
699 cmd->t_task_cdb[0], dev); 696 cmd->t_task_cdb[0], dev);
700 #endif 697 #endif
701 if (dev) 698 if (dev)
702 atomic_inc(&dev->depth_left); 699 atomic_inc(&dev->depth_left);
703 700
704 spin_lock_irqsave(&cmd->t_state_lock, flags); 701 spin_lock_irqsave(&cmd->t_state_lock, flags);
705 task->task_flags &= ~TF_ACTIVE; 702 task->task_flags &= ~TF_ACTIVE;
706 703
707 /* 704 /*
708 * See if any sense data exists, if so set the TASK_SENSE flag. 705 * See if any sense data exists, if so set the TASK_SENSE flag.
709 * Also check for any other post completion work that needs to be 706 * Also check for any other post completion work that needs to be
710 * done by the plugins. 707 * done by the plugins.
711 */ 708 */
712 if (dev && dev->transport->transport_complete) { 709 if (dev && dev->transport->transport_complete) {
713 if (dev->transport->transport_complete(task) != 0) { 710 if (dev->transport->transport_complete(task) != 0) {
714 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; 711 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
715 task->task_sense = 1; 712 task->task_sense = 1;
716 success = 1; 713 success = 1;
717 } 714 }
718 } 715 }
719 716
720 /* 717 /*
721 * See if we are waiting for outstanding struct se_task 718 * See if we are waiting for outstanding struct se_task
722 * to complete for an exception condition 719 * to complete for an exception condition
723 */ 720 */
724 if (task->task_flags & TF_REQUEST_STOP) { 721 if (task->task_flags & TF_REQUEST_STOP) {
725 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 722 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
726 complete(&task->task_stop_comp); 723 complete(&task->task_stop_comp);
727 return; 724 return;
728 } 725 }
729 726
730 if (!success) 727 if (!success)
731 cmd->t_tasks_failed = 1; 728 cmd->t_tasks_failed = 1;
732 729
733 /* 730 /*
734 * Decrement the outstanding t_task_cdbs_left count. The last 731 * Decrement the outstanding t_task_cdbs_left count. The last
735 * struct se_task from struct se_cmd will complete itself into the 732 * struct se_task from struct se_cmd will complete itself into the
736 * device queue depending upon int success. 733 * device queue depending upon int success.
737 */ 734 */
738 if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { 735 if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
739 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 736 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
740 return; 737 return;
741 } 738 }
742 739
743 if (cmd->t_tasks_failed) { 740 if (cmd->t_tasks_failed) {
744 if (!task->task_error_status) { 741 if (!task->task_error_status) {
745 task->task_error_status = 742 task->task_error_status =
746 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 743 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
747 cmd->scsi_sense_reason = 744 cmd->scsi_sense_reason =
748 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 745 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
749 } 746 }
750 747
751 INIT_WORK(&cmd->work, target_complete_failure_work); 748 INIT_WORK(&cmd->work, target_complete_failure_work);
752 } else { 749 } else {
753 atomic_set(&cmd->t_transport_complete, 1); 750 atomic_set(&cmd->t_transport_complete, 1);
754 INIT_WORK(&cmd->work, target_complete_ok_work); 751 INIT_WORK(&cmd->work, target_complete_ok_work);
755 } 752 }
756 753
757 cmd->t_state = TRANSPORT_COMPLETE; 754 cmd->t_state = TRANSPORT_COMPLETE;
758 atomic_set(&cmd->t_transport_active, 1); 755 atomic_set(&cmd->t_transport_active, 1);
759 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 756 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
760 757
761 queue_work(target_completion_wq, &cmd->work); 758 queue_work(target_completion_wq, &cmd->work);
762 } 759 }
763 EXPORT_SYMBOL(transport_complete_task); 760 EXPORT_SYMBOL(transport_complete_task);
764 761
765 /* 762 /*
766 * Called by transport_add_tasks_from_cmd() once a struct se_cmd's 763 * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
767 * struct se_task list are ready to be added to the active execution list 764 * struct se_task list are ready to be added to the active execution list
768 * struct se_device 765 * struct se_device
769 766
770 * Called with se_dev_t->execute_task_lock called. 767 * Called with se_dev_t->execute_task_lock called.
771 */ 768 */
772 static inline int transport_add_task_check_sam_attr( 769 static inline int transport_add_task_check_sam_attr(
773 struct se_task *task, 770 struct se_task *task,
774 struct se_task *task_prev, 771 struct se_task *task_prev,
775 struct se_device *dev) 772 struct se_device *dev)
776 { 773 {
777 /* 774 /*
778 * No SAM Task attribute emulation enabled, add to tail of 775 * No SAM Task attribute emulation enabled, add to tail of
779 * execution queue 776 * execution queue
780 */ 777 */
781 if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) { 778 if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
782 list_add_tail(&task->t_execute_list, &dev->execute_task_list); 779 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
783 return 0; 780 return 0;
784 } 781 }
785 /* 782 /*
786 * HEAD_OF_QUEUE attribute for received CDB, which means 783 * HEAD_OF_QUEUE attribute for received CDB, which means
787 * the first task that is associated with a struct se_cmd goes to 784 * the first task that is associated with a struct se_cmd goes to
788 * head of the struct se_device->execute_task_list, and task_prev 785 * head of the struct se_device->execute_task_list, and task_prev
789 * after that for each subsequent task 786 * after that for each subsequent task
790 */ 787 */
791 if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) { 788 if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
792 list_add(&task->t_execute_list, 789 list_add(&task->t_execute_list,
793 (task_prev != NULL) ? 790 (task_prev != NULL) ?
794 &task_prev->t_execute_list : 791 &task_prev->t_execute_list :
795 &dev->execute_task_list); 792 &dev->execute_task_list);
796 793
797 pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x" 794 pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
798 " in execution queue\n", 795 " in execution queue\n",
799 task->task_se_cmd->t_task_cdb[0]); 796 task->task_se_cmd->t_task_cdb[0]);
800 return 1; 797 return 1;
801 } 798 }
802 /* 799 /*
803 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been 800 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
804 * transitioned from Dermant -> Active state, and are added to the end 801 * transitioned from Dermant -> Active state, and are added to the end
805 * of the struct se_device->execute_task_list 802 * of the struct se_device->execute_task_list
806 */ 803 */
807 list_add_tail(&task->t_execute_list, &dev->execute_task_list); 804 list_add_tail(&task->t_execute_list, &dev->execute_task_list);
808 return 0; 805 return 0;
809 } 806 }
810 807
811 /* __transport_add_task_to_execute_queue(): 808 /* __transport_add_task_to_execute_queue():
812 * 809 *
813 * Called with se_dev_t->execute_task_lock called. 810 * Called with se_dev_t->execute_task_lock called.
814 */ 811 */
815 static void __transport_add_task_to_execute_queue( 812 static void __transport_add_task_to_execute_queue(
816 struct se_task *task, 813 struct se_task *task,
817 struct se_task *task_prev, 814 struct se_task *task_prev,
818 struct se_device *dev) 815 struct se_device *dev)
819 { 816 {
820 int head_of_queue; 817 int head_of_queue;
821 818
822 head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev); 819 head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
823 atomic_inc(&dev->execute_tasks); 820 atomic_inc(&dev->execute_tasks);
824 821
825 if (atomic_read(&task->task_state_active)) 822 if (atomic_read(&task->task_state_active))
826 return; 823 return;
827 /* 824 /*
828 * Determine if this task needs to go to HEAD_OF_QUEUE for the 825 * Determine if this task needs to go to HEAD_OF_QUEUE for the
829 * state list as well. Running with SAM Task Attribute emulation 826 * state list as well. Running with SAM Task Attribute emulation
830 * will always return head_of_queue == 0 here 827 * will always return head_of_queue == 0 here
831 */ 828 */
832 if (head_of_queue) 829 if (head_of_queue)
833 list_add(&task->t_state_list, (task_prev) ? 830 list_add(&task->t_state_list, (task_prev) ?
834 &task_prev->t_state_list : 831 &task_prev->t_state_list :
835 &dev->state_task_list); 832 &dev->state_task_list);
836 else 833 else
837 list_add_tail(&task->t_state_list, &dev->state_task_list); 834 list_add_tail(&task->t_state_list, &dev->state_task_list);
838 835
839 atomic_set(&task->task_state_active, 1); 836 atomic_set(&task->task_state_active, 1);
840 837
841 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", 838 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
842 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), 839 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
843 task, dev); 840 task, dev);
844 } 841 }
845 842
846 static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) 843 static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
847 { 844 {
848 struct se_device *dev = cmd->se_dev; 845 struct se_device *dev = cmd->se_dev;
849 struct se_task *task; 846 struct se_task *task;
850 unsigned long flags; 847 unsigned long flags;
851 848
852 spin_lock_irqsave(&cmd->t_state_lock, flags); 849 spin_lock_irqsave(&cmd->t_state_lock, flags);
853 list_for_each_entry(task, &cmd->t_task_list, t_list) { 850 list_for_each_entry(task, &cmd->t_task_list, t_list) {
854 if (atomic_read(&task->task_state_active)) 851 if (atomic_read(&task->task_state_active))
855 continue; 852 continue;
856 853
857 spin_lock(&dev->execute_task_lock); 854 spin_lock(&dev->execute_task_lock);
858 list_add_tail(&task->t_state_list, &dev->state_task_list); 855 list_add_tail(&task->t_state_list, &dev->state_task_list);
859 atomic_set(&task->task_state_active, 1); 856 atomic_set(&task->task_state_active, 1);
860 857
861 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", 858 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
862 task->task_se_cmd->se_tfo->get_task_tag( 859 task->task_se_cmd->se_tfo->get_task_tag(
863 task->task_se_cmd), task, dev); 860 task->task_se_cmd), task, dev);
864 861
865 spin_unlock(&dev->execute_task_lock); 862 spin_unlock(&dev->execute_task_lock);
866 } 863 }
867 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 864 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
868 } 865 }
869 866
870 static void transport_add_tasks_from_cmd(struct se_cmd *cmd) 867 static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
871 { 868 {
872 struct se_device *dev = cmd->se_dev; 869 struct se_device *dev = cmd->se_dev;
873 struct se_task *task, *task_prev = NULL; 870 struct se_task *task, *task_prev = NULL;
874 unsigned long flags; 871 unsigned long flags;
875 872
876 spin_lock_irqsave(&dev->execute_task_lock, flags); 873 spin_lock_irqsave(&dev->execute_task_lock, flags);
877 list_for_each_entry(task, &cmd->t_task_list, t_list) { 874 list_for_each_entry(task, &cmd->t_task_list, t_list) {
878 if (!list_empty(&task->t_execute_list)) 875 if (!list_empty(&task->t_execute_list))
879 continue; 876 continue;
880 /* 877 /*
881 * __transport_add_task_to_execute_queue() handles the 878 * __transport_add_task_to_execute_queue() handles the
882 * SAM Task Attribute emulation if enabled 879 * SAM Task Attribute emulation if enabled
883 */ 880 */
884 __transport_add_task_to_execute_queue(task, task_prev, dev); 881 __transport_add_task_to_execute_queue(task, task_prev, dev);
885 task_prev = task; 882 task_prev = task;
886 } 883 }
887 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 884 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
888 } 885 }
889 886
890 void __transport_remove_task_from_execute_queue(struct se_task *task, 887 void __transport_remove_task_from_execute_queue(struct se_task *task,
891 struct se_device *dev) 888 struct se_device *dev)
892 { 889 {
893 list_del_init(&task->t_execute_list); 890 list_del_init(&task->t_execute_list);
894 atomic_dec(&dev->execute_tasks); 891 atomic_dec(&dev->execute_tasks);
895 } 892 }
896 893
897 static void transport_remove_task_from_execute_queue( 894 static void transport_remove_task_from_execute_queue(
898 struct se_task *task, 895 struct se_task *task,
899 struct se_device *dev) 896 struct se_device *dev)
900 { 897 {
901 unsigned long flags; 898 unsigned long flags;
902 899
903 if (WARN_ON(list_empty(&task->t_execute_list))) 900 if (WARN_ON(list_empty(&task->t_execute_list)))
904 return; 901 return;
905 902
906 spin_lock_irqsave(&dev->execute_task_lock, flags); 903 spin_lock_irqsave(&dev->execute_task_lock, flags);
907 __transport_remove_task_from_execute_queue(task, dev); 904 __transport_remove_task_from_execute_queue(task, dev);
908 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 905 spin_unlock_irqrestore(&dev->execute_task_lock, flags);
909 } 906 }
910 907
911 /* 908 /*
912 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 909 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
913 */ 910 */
914 911
915 static void target_qf_do_work(struct work_struct *work) 912 static void target_qf_do_work(struct work_struct *work)
916 { 913 {
917 struct se_device *dev = container_of(work, struct se_device, 914 struct se_device *dev = container_of(work, struct se_device,
918 qf_work_queue); 915 qf_work_queue);
919 LIST_HEAD(qf_cmd_list); 916 LIST_HEAD(qf_cmd_list);
920 struct se_cmd *cmd, *cmd_tmp; 917 struct se_cmd *cmd, *cmd_tmp;
921 918
922 spin_lock_irq(&dev->qf_cmd_lock); 919 spin_lock_irq(&dev->qf_cmd_lock);
923 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 920 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
924 spin_unlock_irq(&dev->qf_cmd_lock); 921 spin_unlock_irq(&dev->qf_cmd_lock);
925 922
926 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 923 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
927 list_del(&cmd->se_qf_node); 924 list_del(&cmd->se_qf_node);
928 atomic_dec(&dev->dev_qf_count); 925 atomic_dec(&dev->dev_qf_count);
929 smp_mb__after_atomic_dec(); 926 smp_mb__after_atomic_dec();
930 927
931 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 928 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
932 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 929 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
933 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 930 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
934 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 931 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
935 : "UNKNOWN"); 932 : "UNKNOWN");
936 933
937 transport_add_cmd_to_queue(cmd, cmd->t_state, true); 934 transport_add_cmd_to_queue(cmd, cmd->t_state, true);
938 } 935 }
939 } 936 }
940 937
941 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 938 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
942 { 939 {
943 switch (cmd->data_direction) { 940 switch (cmd->data_direction) {
944 case DMA_NONE: 941 case DMA_NONE:
945 return "NONE"; 942 return "NONE";
946 case DMA_FROM_DEVICE: 943 case DMA_FROM_DEVICE:
947 return "READ"; 944 return "READ";
948 case DMA_TO_DEVICE: 945 case DMA_TO_DEVICE:
949 return "WRITE"; 946 return "WRITE";
950 case DMA_BIDIRECTIONAL: 947 case DMA_BIDIRECTIONAL:
951 return "BIDI"; 948 return "BIDI";
952 default: 949 default:
953 break; 950 break;
954 } 951 }
955 952
956 return "UNKNOWN"; 953 return "UNKNOWN";
957 } 954 }
958 955
959 void transport_dump_dev_state( 956 void transport_dump_dev_state(
960 struct se_device *dev, 957 struct se_device *dev,
961 char *b, 958 char *b,
962 int *bl) 959 int *bl)
963 { 960 {
964 *bl += sprintf(b + *bl, "Status: "); 961 *bl += sprintf(b + *bl, "Status: ");
965 switch (dev->dev_status) { 962 switch (dev->dev_status) {
966 case TRANSPORT_DEVICE_ACTIVATED: 963 case TRANSPORT_DEVICE_ACTIVATED:
967 *bl += sprintf(b + *bl, "ACTIVATED"); 964 *bl += sprintf(b + *bl, "ACTIVATED");
968 break; 965 break;
969 case TRANSPORT_DEVICE_DEACTIVATED: 966 case TRANSPORT_DEVICE_DEACTIVATED:
970 *bl += sprintf(b + *bl, "DEACTIVATED"); 967 *bl += sprintf(b + *bl, "DEACTIVATED");
971 break; 968 break;
972 case TRANSPORT_DEVICE_SHUTDOWN: 969 case TRANSPORT_DEVICE_SHUTDOWN:
973 *bl += sprintf(b + *bl, "SHUTDOWN"); 970 *bl += sprintf(b + *bl, "SHUTDOWN");
974 break; 971 break;
975 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: 972 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
976 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: 973 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
977 *bl += sprintf(b + *bl, "OFFLINE"); 974 *bl += sprintf(b + *bl, "OFFLINE");
978 break; 975 break;
979 default: 976 default:
980 *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status); 977 *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
981 break; 978 break;
982 } 979 }
983 980
984 *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d", 981 *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d",
985 atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), 982 atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
986 dev->queue_depth); 983 dev->queue_depth);
987 *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", 984 *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
988 dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors); 985 dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
989 *bl += sprintf(b + *bl, " "); 986 *bl += sprintf(b + *bl, " ");
990 } 987 }
991 988
992 void transport_dump_vpd_proto_id( 989 void transport_dump_vpd_proto_id(
993 struct t10_vpd *vpd, 990 struct t10_vpd *vpd,
994 unsigned char *p_buf, 991 unsigned char *p_buf,
995 int p_buf_len) 992 int p_buf_len)
996 { 993 {
997 unsigned char buf[VPD_TMP_BUF_SIZE]; 994 unsigned char buf[VPD_TMP_BUF_SIZE];
998 int len; 995 int len;
999 996
1000 memset(buf, 0, VPD_TMP_BUF_SIZE); 997 memset(buf, 0, VPD_TMP_BUF_SIZE);
1001 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 998 len = sprintf(buf, "T10 VPD Protocol Identifier: ");
1002 999
1003 switch (vpd->protocol_identifier) { 1000 switch (vpd->protocol_identifier) {
1004 case 0x00: 1001 case 0x00:
1005 sprintf(buf+len, "Fibre Channel\n"); 1002 sprintf(buf+len, "Fibre Channel\n");
1006 break; 1003 break;
1007 case 0x10: 1004 case 0x10:
1008 sprintf(buf+len, "Parallel SCSI\n"); 1005 sprintf(buf+len, "Parallel SCSI\n");
1009 break; 1006 break;
1010 case 0x20: 1007 case 0x20:
1011 sprintf(buf+len, "SSA\n"); 1008 sprintf(buf+len, "SSA\n");
1012 break; 1009 break;
1013 case 0x30: 1010 case 0x30:
1014 sprintf(buf+len, "IEEE 1394\n"); 1011 sprintf(buf+len, "IEEE 1394\n");
1015 break; 1012 break;
1016 case 0x40: 1013 case 0x40:
1017 sprintf(buf+len, "SCSI Remote Direct Memory Access" 1014 sprintf(buf+len, "SCSI Remote Direct Memory Access"
1018 " Protocol\n"); 1015 " Protocol\n");
1019 break; 1016 break;
1020 case 0x50: 1017 case 0x50:
1021 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 1018 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
1022 break; 1019 break;
1023 case 0x60: 1020 case 0x60:
1024 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 1021 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
1025 break; 1022 break;
1026 case 0x70: 1023 case 0x70:
1027 sprintf(buf+len, "Automation/Drive Interface Transport" 1024 sprintf(buf+len, "Automation/Drive Interface Transport"
1028 " Protocol\n"); 1025 " Protocol\n");
1029 break; 1026 break;
1030 case 0x80: 1027 case 0x80:
1031 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 1028 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1032 break; 1029 break;
1033 default: 1030 default:
1034 sprintf(buf+len, "Unknown 0x%02x\n", 1031 sprintf(buf+len, "Unknown 0x%02x\n",
1035 vpd->protocol_identifier); 1032 vpd->protocol_identifier);
1036 break; 1033 break;
1037 } 1034 }
1038 1035
1039 if (p_buf) 1036 if (p_buf)
1040 strncpy(p_buf, buf, p_buf_len); 1037 strncpy(p_buf, buf, p_buf_len);
1041 else 1038 else
1042 pr_debug("%s", buf); 1039 pr_debug("%s", buf);
1043 } 1040 }
1044 1041
1045 void 1042 void
1046 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 1043 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1047 { 1044 {
1048 /* 1045 /*
1049 * Check if the Protocol Identifier Valid (PIV) bit is set.. 1046 * Check if the Protocol Identifier Valid (PIV) bit is set..
1050 * 1047 *
1051 * from spc3r23.pdf section 7.5.1 1048 * from spc3r23.pdf section 7.5.1
1052 */ 1049 */
1053 if (page_83[1] & 0x80) { 1050 if (page_83[1] & 0x80) {
1054 vpd->protocol_identifier = (page_83[0] & 0xf0); 1051 vpd->protocol_identifier = (page_83[0] & 0xf0);
1055 vpd->protocol_identifier_set = 1; 1052 vpd->protocol_identifier_set = 1;
1056 transport_dump_vpd_proto_id(vpd, NULL, 0); 1053 transport_dump_vpd_proto_id(vpd, NULL, 0);
1057 } 1054 }
1058 } 1055 }
1059 EXPORT_SYMBOL(transport_set_vpd_proto_id); 1056 EXPORT_SYMBOL(transport_set_vpd_proto_id);
1060 1057
1061 int transport_dump_vpd_assoc( 1058 int transport_dump_vpd_assoc(
1062 struct t10_vpd *vpd, 1059 struct t10_vpd *vpd,
1063 unsigned char *p_buf, 1060 unsigned char *p_buf,
1064 int p_buf_len) 1061 int p_buf_len)
1065 { 1062 {
1066 unsigned char buf[VPD_TMP_BUF_SIZE]; 1063 unsigned char buf[VPD_TMP_BUF_SIZE];
1067 int ret = 0; 1064 int ret = 0;
1068 int len; 1065 int len;
1069 1066
1070 memset(buf, 0, VPD_TMP_BUF_SIZE); 1067 memset(buf, 0, VPD_TMP_BUF_SIZE);
1071 len = sprintf(buf, "T10 VPD Identifier Association: "); 1068 len = sprintf(buf, "T10 VPD Identifier Association: ");
1072 1069
1073 switch (vpd->association) { 1070 switch (vpd->association) {
1074 case 0x00: 1071 case 0x00:
1075 sprintf(buf+len, "addressed logical unit\n"); 1072 sprintf(buf+len, "addressed logical unit\n");
1076 break; 1073 break;
1077 case 0x10: 1074 case 0x10:
1078 sprintf(buf+len, "target port\n"); 1075 sprintf(buf+len, "target port\n");
1079 break; 1076 break;
1080 case 0x20: 1077 case 0x20:
1081 sprintf(buf+len, "SCSI target device\n"); 1078 sprintf(buf+len, "SCSI target device\n");
1082 break; 1079 break;
1083 default: 1080 default:
1084 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 1081 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1085 ret = -EINVAL; 1082 ret = -EINVAL;
1086 break; 1083 break;
1087 } 1084 }
1088 1085
1089 if (p_buf) 1086 if (p_buf)
1090 strncpy(p_buf, buf, p_buf_len); 1087 strncpy(p_buf, buf, p_buf_len);
1091 else 1088 else
1092 pr_debug("%s", buf); 1089 pr_debug("%s", buf);
1093 1090
1094 return ret; 1091 return ret;
1095 } 1092 }
1096 1093
1097 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 1094 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1098 { 1095 {
1099 /* 1096 /*
1100 * The VPD identification association.. 1097 * The VPD identification association..
1101 * 1098 *
1102 * from spc3r23.pdf Section 7.6.3.1 Table 297 1099 * from spc3r23.pdf Section 7.6.3.1 Table 297
1103 */ 1100 */
1104 vpd->association = (page_83[1] & 0x30); 1101 vpd->association = (page_83[1] & 0x30);
1105 return transport_dump_vpd_assoc(vpd, NULL, 0); 1102 return transport_dump_vpd_assoc(vpd, NULL, 0);
1106 } 1103 }
1107 EXPORT_SYMBOL(transport_set_vpd_assoc); 1104 EXPORT_SYMBOL(transport_set_vpd_assoc);
1108 1105
1109 int transport_dump_vpd_ident_type( 1106 int transport_dump_vpd_ident_type(
1110 struct t10_vpd *vpd, 1107 struct t10_vpd *vpd,
1111 unsigned char *p_buf, 1108 unsigned char *p_buf,
1112 int p_buf_len) 1109 int p_buf_len)
1113 { 1110 {
1114 unsigned char buf[VPD_TMP_BUF_SIZE]; 1111 unsigned char buf[VPD_TMP_BUF_SIZE];
1115 int ret = 0; 1112 int ret = 0;
1116 int len; 1113 int len;
1117 1114
1118 memset(buf, 0, VPD_TMP_BUF_SIZE); 1115 memset(buf, 0, VPD_TMP_BUF_SIZE);
1119 len = sprintf(buf, "T10 VPD Identifier Type: "); 1116 len = sprintf(buf, "T10 VPD Identifier Type: ");
1120 1117
1121 switch (vpd->device_identifier_type) { 1118 switch (vpd->device_identifier_type) {
1122 case 0x00: 1119 case 0x00:
1123 sprintf(buf+len, "Vendor specific\n"); 1120 sprintf(buf+len, "Vendor specific\n");
1124 break; 1121 break;
1125 case 0x01: 1122 case 0x01:
1126 sprintf(buf+len, "T10 Vendor ID based\n"); 1123 sprintf(buf+len, "T10 Vendor ID based\n");
1127 break; 1124 break;
1128 case 0x02: 1125 case 0x02:
1129 sprintf(buf+len, "EUI-64 based\n"); 1126 sprintf(buf+len, "EUI-64 based\n");
1130 break; 1127 break;
1131 case 0x03: 1128 case 0x03:
1132 sprintf(buf+len, "NAA\n"); 1129 sprintf(buf+len, "NAA\n");
1133 break; 1130 break;
1134 case 0x04: 1131 case 0x04:
1135 sprintf(buf+len, "Relative target port identifier\n"); 1132 sprintf(buf+len, "Relative target port identifier\n");
1136 break; 1133 break;
1137 case 0x08: 1134 case 0x08:
1138 sprintf(buf+len, "SCSI name string\n"); 1135 sprintf(buf+len, "SCSI name string\n");
1139 break; 1136 break;
1140 default: 1137 default:
1141 sprintf(buf+len, "Unsupported: 0x%02x\n", 1138 sprintf(buf+len, "Unsupported: 0x%02x\n",
1142 vpd->device_identifier_type); 1139 vpd->device_identifier_type);
1143 ret = -EINVAL; 1140 ret = -EINVAL;
1144 break; 1141 break;
1145 } 1142 }
1146 1143
1147 if (p_buf) { 1144 if (p_buf) {
1148 if (p_buf_len < strlen(buf)+1) 1145 if (p_buf_len < strlen(buf)+1)
1149 return -EINVAL; 1146 return -EINVAL;
1150 strncpy(p_buf, buf, p_buf_len); 1147 strncpy(p_buf, buf, p_buf_len);
1151 } else { 1148 } else {
1152 pr_debug("%s", buf); 1149 pr_debug("%s", buf);
1153 } 1150 }
1154 1151
1155 return ret; 1152 return ret;
1156 } 1153 }
1157 1154
1158 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 1155 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1159 { 1156 {
1160 /* 1157 /*
1161 * The VPD identifier type.. 1158 * The VPD identifier type..
1162 * 1159 *
1163 * from spc3r23.pdf Section 7.6.3.1 Table 298 1160 * from spc3r23.pdf Section 7.6.3.1 Table 298
1164 */ 1161 */
1165 vpd->device_identifier_type = (page_83[1] & 0x0f); 1162 vpd->device_identifier_type = (page_83[1] & 0x0f);
1166 return transport_dump_vpd_ident_type(vpd, NULL, 0); 1163 return transport_dump_vpd_ident_type(vpd, NULL, 0);
1167 } 1164 }
1168 EXPORT_SYMBOL(transport_set_vpd_ident_type); 1165 EXPORT_SYMBOL(transport_set_vpd_ident_type);
1169 1166
1170 int transport_dump_vpd_ident( 1167 int transport_dump_vpd_ident(
1171 struct t10_vpd *vpd, 1168 struct t10_vpd *vpd,
1172 unsigned char *p_buf, 1169 unsigned char *p_buf,
1173 int p_buf_len) 1170 int p_buf_len)
1174 { 1171 {
1175 unsigned char buf[VPD_TMP_BUF_SIZE]; 1172 unsigned char buf[VPD_TMP_BUF_SIZE];
1176 int ret = 0; 1173 int ret = 0;
1177 1174
1178 memset(buf, 0, VPD_TMP_BUF_SIZE); 1175 memset(buf, 0, VPD_TMP_BUF_SIZE);
1179 1176
1180 switch (vpd->device_identifier_code_set) { 1177 switch (vpd->device_identifier_code_set) {
1181 case 0x01: /* Binary */ 1178 case 0x01: /* Binary */
1182 sprintf(buf, "T10 VPD Binary Device Identifier: %s\n", 1179 sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
1183 &vpd->device_identifier[0]); 1180 &vpd->device_identifier[0]);
1184 break; 1181 break;
1185 case 0x02: /* ASCII */ 1182 case 0x02: /* ASCII */
1186 sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n", 1183 sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
1187 &vpd->device_identifier[0]); 1184 &vpd->device_identifier[0]);
1188 break; 1185 break;
1189 case 0x03: /* UTF-8 */ 1186 case 0x03: /* UTF-8 */
1190 sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n", 1187 sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
1191 &vpd->device_identifier[0]); 1188 &vpd->device_identifier[0]);
1192 break; 1189 break;
1193 default: 1190 default:
1194 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 1191 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1195 " 0x%02x", vpd->device_identifier_code_set); 1192 " 0x%02x", vpd->device_identifier_code_set);
1196 ret = -EINVAL; 1193 ret = -EINVAL;
1197 break; 1194 break;
1198 } 1195 }
1199 1196
1200 if (p_buf) 1197 if (p_buf)
1201 strncpy(p_buf, buf, p_buf_len); 1198 strncpy(p_buf, buf, p_buf_len);
1202 else 1199 else
1203 pr_debug("%s", buf); 1200 pr_debug("%s", buf);
1204 1201
1205 return ret; 1202 return ret;
1206 } 1203 }
1207 1204
1208 int 1205 int
1209 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 1206 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1210 { 1207 {
1211 static const char hex_str[] = "0123456789abcdef"; 1208 static const char hex_str[] = "0123456789abcdef";
1212 int j = 0, i = 4; /* offset to start of the identifer */ 1209 int j = 0, i = 4; /* offset to start of the identifer */
1213 1210
1214 /* 1211 /*
1215 * The VPD Code Set (encoding) 1212 * The VPD Code Set (encoding)
1216 * 1213 *
1217 * from spc3r23.pdf Section 7.6.3.1 Table 296 1214 * from spc3r23.pdf Section 7.6.3.1 Table 296
1218 */ 1215 */
1219 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 1216 vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1220 switch (vpd->device_identifier_code_set) { 1217 switch (vpd->device_identifier_code_set) {
1221 case 0x01: /* Binary */ 1218 case 0x01: /* Binary */
1222 vpd->device_identifier[j++] = 1219 vpd->device_identifier[j++] =
1223 hex_str[vpd->device_identifier_type]; 1220 hex_str[vpd->device_identifier_type];
1224 while (i < (4 + page_83[3])) { 1221 while (i < (4 + page_83[3])) {
1225 vpd->device_identifier[j++] = 1222 vpd->device_identifier[j++] =
1226 hex_str[(page_83[i] & 0xf0) >> 4]; 1223 hex_str[(page_83[i] & 0xf0) >> 4];
1227 vpd->device_identifier[j++] = 1224 vpd->device_identifier[j++] =
1228 hex_str[page_83[i] & 0x0f]; 1225 hex_str[page_83[i] & 0x0f];
1229 i++; 1226 i++;
1230 } 1227 }
1231 break; 1228 break;
1232 case 0x02: /* ASCII */ 1229 case 0x02: /* ASCII */
1233 case 0x03: /* UTF-8 */ 1230 case 0x03: /* UTF-8 */
1234 while (i < (4 + page_83[3])) 1231 while (i < (4 + page_83[3]))
1235 vpd->device_identifier[j++] = page_83[i++]; 1232 vpd->device_identifier[j++] = page_83[i++];
1236 break; 1233 break;
1237 default: 1234 default:
1238 break; 1235 break;
1239 } 1236 }
1240 1237
1241 return transport_dump_vpd_ident(vpd, NULL, 0); 1238 return transport_dump_vpd_ident(vpd, NULL, 0);
1242 } 1239 }
1243 EXPORT_SYMBOL(transport_set_vpd_ident); 1240 EXPORT_SYMBOL(transport_set_vpd_ident);
1244 1241
1245 static void core_setup_task_attr_emulation(struct se_device *dev) 1242 static void core_setup_task_attr_emulation(struct se_device *dev)
1246 { 1243 {
1247 /* 1244 /*
1248 * If this device is from Target_Core_Mod/pSCSI, disable the 1245 * If this device is from Target_Core_Mod/pSCSI, disable the
1249 * SAM Task Attribute emulation. 1246 * SAM Task Attribute emulation.
1250 * 1247 *
1251 * This is currently not available in upsream Linux/SCSI Target 1248 * This is currently not available in upsream Linux/SCSI Target
1252 * mode code, and is assumed to be disabled while using TCM/pSCSI. 1249 * mode code, and is assumed to be disabled while using TCM/pSCSI.
1253 */ 1250 */
1254 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1251 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1255 dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; 1252 dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
1256 return; 1253 return;
1257 } 1254 }
1258 1255
1259 dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; 1256 dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
1260 pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" 1257 pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
1261 " device\n", dev->transport->name, 1258 " device\n", dev->transport->name,
1262 dev->transport->get_device_rev(dev)); 1259 dev->transport->get_device_rev(dev));
1263 } 1260 }
1264 1261
1265 static void scsi_dump_inquiry(struct se_device *dev) 1262 static void scsi_dump_inquiry(struct se_device *dev)
1266 { 1263 {
1267 struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; 1264 struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
1268 int i, device_type; 1265 int i, device_type;
1269 /* 1266 /*
1270 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 1267 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1271 */ 1268 */
1272 pr_debug(" Vendor: "); 1269 pr_debug(" Vendor: ");
1273 for (i = 0; i < 8; i++) 1270 for (i = 0; i < 8; i++)
1274 if (wwn->vendor[i] >= 0x20) 1271 if (wwn->vendor[i] >= 0x20)
1275 pr_debug("%c", wwn->vendor[i]); 1272 pr_debug("%c", wwn->vendor[i]);
1276 else 1273 else
1277 pr_debug(" "); 1274 pr_debug(" ");
1278 1275
1279 pr_debug(" Model: "); 1276 pr_debug(" Model: ");
1280 for (i = 0; i < 16; i++) 1277 for (i = 0; i < 16; i++)
1281 if (wwn->model[i] >= 0x20) 1278 if (wwn->model[i] >= 0x20)
1282 pr_debug("%c", wwn->model[i]); 1279 pr_debug("%c", wwn->model[i]);
1283 else 1280 else
1284 pr_debug(" "); 1281 pr_debug(" ");
1285 1282
1286 pr_debug(" Revision: "); 1283 pr_debug(" Revision: ");
1287 for (i = 0; i < 4; i++) 1284 for (i = 0; i < 4; i++)
1288 if (wwn->revision[i] >= 0x20) 1285 if (wwn->revision[i] >= 0x20)
1289 pr_debug("%c", wwn->revision[i]); 1286 pr_debug("%c", wwn->revision[i]);
1290 else 1287 else
1291 pr_debug(" "); 1288 pr_debug(" ");
1292 1289
1293 pr_debug("\n"); 1290 pr_debug("\n");
1294 1291
1295 device_type = dev->transport->get_device_type(dev); 1292 device_type = dev->transport->get_device_type(dev);
1296 pr_debug(" Type: %s ", scsi_device_type(device_type)); 1293 pr_debug(" Type: %s ", scsi_device_type(device_type));
1297 pr_debug(" ANSI SCSI revision: %02x\n", 1294 pr_debug(" ANSI SCSI revision: %02x\n",
1298 dev->transport->get_device_rev(dev)); 1295 dev->transport->get_device_rev(dev));
1299 } 1296 }
1300 1297
1301 struct se_device *transport_add_device_to_core_hba( 1298 struct se_device *transport_add_device_to_core_hba(
1302 struct se_hba *hba, 1299 struct se_hba *hba,
1303 struct se_subsystem_api *transport, 1300 struct se_subsystem_api *transport,
1304 struct se_subsystem_dev *se_dev, 1301 struct se_subsystem_dev *se_dev,
1305 u32 device_flags, 1302 u32 device_flags,
1306 void *transport_dev, 1303 void *transport_dev,
1307 struct se_dev_limits *dev_limits, 1304 struct se_dev_limits *dev_limits,
1308 const char *inquiry_prod, 1305 const char *inquiry_prod,
1309 const char *inquiry_rev) 1306 const char *inquiry_rev)
1310 { 1307 {
1311 int force_pt; 1308 int force_pt;
1312 struct se_device *dev; 1309 struct se_device *dev;
1313 1310
1314 dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); 1311 dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
1315 if (!dev) { 1312 if (!dev) {
1316 pr_err("Unable to allocate memory for se_dev_t\n"); 1313 pr_err("Unable to allocate memory for se_dev_t\n");
1317 return NULL; 1314 return NULL;
1318 } 1315 }
1319 1316
1320 transport_init_queue_obj(&dev->dev_queue_obj); 1317 transport_init_queue_obj(&dev->dev_queue_obj);
1321 dev->dev_flags = device_flags; 1318 dev->dev_flags = device_flags;
1322 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; 1319 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
1323 dev->dev_ptr = transport_dev; 1320 dev->dev_ptr = transport_dev;
1324 dev->se_hba = hba; 1321 dev->se_hba = hba;
1325 dev->se_sub_dev = se_dev; 1322 dev->se_sub_dev = se_dev;
1326 dev->transport = transport; 1323 dev->transport = transport;
1327 INIT_LIST_HEAD(&dev->dev_list); 1324 INIT_LIST_HEAD(&dev->dev_list);
1328 INIT_LIST_HEAD(&dev->dev_sep_list); 1325 INIT_LIST_HEAD(&dev->dev_sep_list);
1329 INIT_LIST_HEAD(&dev->dev_tmr_list); 1326 INIT_LIST_HEAD(&dev->dev_tmr_list);
1330 INIT_LIST_HEAD(&dev->execute_task_list); 1327 INIT_LIST_HEAD(&dev->execute_task_list);
1331 INIT_LIST_HEAD(&dev->delayed_cmd_list); 1328 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1332 INIT_LIST_HEAD(&dev->state_task_list); 1329 INIT_LIST_HEAD(&dev->state_task_list);
1333 INIT_LIST_HEAD(&dev->qf_cmd_list); 1330 INIT_LIST_HEAD(&dev->qf_cmd_list);
1334 spin_lock_init(&dev->execute_task_lock); 1331 spin_lock_init(&dev->execute_task_lock);
1335 spin_lock_init(&dev->delayed_cmd_lock); 1332 spin_lock_init(&dev->delayed_cmd_lock);
1336 spin_lock_init(&dev->dev_reservation_lock); 1333 spin_lock_init(&dev->dev_reservation_lock);
1337 spin_lock_init(&dev->dev_status_lock); 1334 spin_lock_init(&dev->dev_status_lock);
1338 spin_lock_init(&dev->se_port_lock); 1335 spin_lock_init(&dev->se_port_lock);
1339 spin_lock_init(&dev->se_tmr_lock); 1336 spin_lock_init(&dev->se_tmr_lock);
1340 spin_lock_init(&dev->qf_cmd_lock); 1337 spin_lock_init(&dev->qf_cmd_lock);
1341 1338
1342 dev->queue_depth = dev_limits->queue_depth; 1339 dev->queue_depth = dev_limits->queue_depth;
1343 atomic_set(&dev->depth_left, dev->queue_depth); 1340 atomic_set(&dev->depth_left, dev->queue_depth);
1344 atomic_set(&dev->dev_ordered_id, 0); 1341 atomic_set(&dev->dev_ordered_id, 0);
1345 1342
1346 se_dev_set_default_attribs(dev, dev_limits); 1343 se_dev_set_default_attribs(dev, dev_limits);
1347 1344
1348 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); 1345 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1349 dev->creation_time = get_jiffies_64(); 1346 dev->creation_time = get_jiffies_64();
1350 spin_lock_init(&dev->stats_lock); 1347 spin_lock_init(&dev->stats_lock);
1351 1348
1352 spin_lock(&hba->device_lock); 1349 spin_lock(&hba->device_lock);
1353 list_add_tail(&dev->dev_list, &hba->hba_dev_list); 1350 list_add_tail(&dev->dev_list, &hba->hba_dev_list);
1354 hba->dev_count++; 1351 hba->dev_count++;
1355 spin_unlock(&hba->device_lock); 1352 spin_unlock(&hba->device_lock);
1356 /* 1353 /*
1357 * Setup the SAM Task Attribute emulation for struct se_device 1354 * Setup the SAM Task Attribute emulation for struct se_device
1358 */ 1355 */
1359 core_setup_task_attr_emulation(dev); 1356 core_setup_task_attr_emulation(dev);
1360 /* 1357 /*
1361 * Force PR and ALUA passthrough emulation with internal object use. 1358 * Force PR and ALUA passthrough emulation with internal object use.
1362 */ 1359 */
1363 force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE); 1360 force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
1364 /* 1361 /*
1365 * Setup the Reservations infrastructure for struct se_device 1362 * Setup the Reservations infrastructure for struct se_device
1366 */ 1363 */
1367 core_setup_reservations(dev, force_pt); 1364 core_setup_reservations(dev, force_pt);
1368 /* 1365 /*
1369 * Setup the Asymmetric Logical Unit Assignment for struct se_device 1366 * Setup the Asymmetric Logical Unit Assignment for struct se_device
1370 */ 1367 */
1371 if (core_setup_alua(dev, force_pt) < 0) 1368 if (core_setup_alua(dev, force_pt) < 0)
1372 goto out; 1369 goto out;
1373 1370
1374 /* 1371 /*
1375 * Startup the struct se_device processing thread 1372 * Startup the struct se_device processing thread
1376 */ 1373 */
1377 dev->process_thread = kthread_run(transport_processing_thread, dev, 1374 dev->process_thread = kthread_run(transport_processing_thread, dev,
1378 "LIO_%s", dev->transport->name); 1375 "LIO_%s", dev->transport->name);
1379 if (IS_ERR(dev->process_thread)) { 1376 if (IS_ERR(dev->process_thread)) {
1380 pr_err("Unable to create kthread: LIO_%s\n", 1377 pr_err("Unable to create kthread: LIO_%s\n",
1381 dev->transport->name); 1378 dev->transport->name);
1382 goto out; 1379 goto out;
1383 } 1380 }
1384 /* 1381 /*
1385 * Setup work_queue for QUEUE_FULL 1382 * Setup work_queue for QUEUE_FULL
1386 */ 1383 */
1387 INIT_WORK(&dev->qf_work_queue, target_qf_do_work); 1384 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1388 /* 1385 /*
1389 * Preload the initial INQUIRY const values if we are doing 1386 * Preload the initial INQUIRY const values if we are doing
1390 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI 1387 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1391 * passthrough because this is being provided by the backend LLD. 1388 * passthrough because this is being provided by the backend LLD.
1392 * This is required so that transport_get_inquiry() copies these 1389 * This is required so that transport_get_inquiry() copies these
1393 * originals once back into DEV_T10_WWN(dev) for the virtual device 1390 * originals once back into DEV_T10_WWN(dev) for the virtual device
1394 * setup. 1391 * setup.
1395 */ 1392 */
1396 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { 1393 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1397 if (!inquiry_prod || !inquiry_rev) { 1394 if (!inquiry_prod || !inquiry_rev) {
1398 pr_err("All non TCM/pSCSI plugins require" 1395 pr_err("All non TCM/pSCSI plugins require"
1399 " INQUIRY consts\n"); 1396 " INQUIRY consts\n");
1400 goto out; 1397 goto out;
1401 } 1398 }
1402 1399
1403 strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8); 1400 strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1404 strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16); 1401 strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
1405 strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4); 1402 strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
1406 } 1403 }
1407 scsi_dump_inquiry(dev); 1404 scsi_dump_inquiry(dev);
1408 1405
1409 return dev; 1406 return dev;
1410 out: 1407 out:
1411 kthread_stop(dev->process_thread); 1408 kthread_stop(dev->process_thread);
1412 1409
1413 spin_lock(&hba->device_lock); 1410 spin_lock(&hba->device_lock);
1414 list_del(&dev->dev_list); 1411 list_del(&dev->dev_list);
1415 hba->dev_count--; 1412 hba->dev_count--;
1416 spin_unlock(&hba->device_lock); 1413 spin_unlock(&hba->device_lock);
1417 1414
1418 se_release_vpd_for_dev(dev); 1415 se_release_vpd_for_dev(dev);
1419 1416
1420 kfree(dev); 1417 kfree(dev);
1421 1418
1422 return NULL; 1419 return NULL;
1423 } 1420 }
1424 EXPORT_SYMBOL(transport_add_device_to_core_hba); 1421 EXPORT_SYMBOL(transport_add_device_to_core_hba);
1425 1422
1426 /* transport_generic_prepare_cdb(): 1423 /* transport_generic_prepare_cdb():
1427 * 1424 *
1428 * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will 1425 * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will
1429 * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2. 1426 * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
1430 * The point of this is since we are mapping iSCSI LUNs to 1427 * The point of this is since we are mapping iSCSI LUNs to
1431 * SCSI Target IDs having a non-zero LUN in the CDB will throw the 1428 * SCSI Target IDs having a non-zero LUN in the CDB will throw the
1432 * devices and HBAs for a loop. 1429 * devices and HBAs for a loop.
1433 */ 1430 */
1434 static inline void transport_generic_prepare_cdb( 1431 static inline void transport_generic_prepare_cdb(
1435 unsigned char *cdb) 1432 unsigned char *cdb)
1436 { 1433 {
1437 switch (cdb[0]) { 1434 switch (cdb[0]) {
1438 case READ_10: /* SBC - RDProtect */ 1435 case READ_10: /* SBC - RDProtect */
1439 case READ_12: /* SBC - RDProtect */ 1436 case READ_12: /* SBC - RDProtect */
1440 case READ_16: /* SBC - RDProtect */ 1437 case READ_16: /* SBC - RDProtect */
1441 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ 1438 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
1442 case VERIFY: /* SBC - VRProtect */ 1439 case VERIFY: /* SBC - VRProtect */
1443 case VERIFY_16: /* SBC - VRProtect */ 1440 case VERIFY_16: /* SBC - VRProtect */
1444 case WRITE_VERIFY: /* SBC - VRProtect */ 1441 case WRITE_VERIFY: /* SBC - VRProtect */
1445 case WRITE_VERIFY_12: /* SBC - VRProtect */ 1442 case WRITE_VERIFY_12: /* SBC - VRProtect */
1446 break; 1443 break;
1447 default: 1444 default:
1448 cdb[1] &= 0x1f; /* clear logical unit number */ 1445 cdb[1] &= 0x1f; /* clear logical unit number */
1449 break; 1446 break;
1450 } 1447 }
1451 } 1448 }
1452 1449
1453 static struct se_task * 1450 static struct se_task *
1454 transport_generic_get_task(struct se_cmd *cmd, 1451 transport_generic_get_task(struct se_cmd *cmd,
1455 enum dma_data_direction data_direction) 1452 enum dma_data_direction data_direction)
1456 { 1453 {
1457 struct se_task *task; 1454 struct se_task *task;
1458 struct se_device *dev = cmd->se_dev; 1455 struct se_device *dev = cmd->se_dev;
1459 1456
1460 task = dev->transport->alloc_task(cmd->t_task_cdb); 1457 task = dev->transport->alloc_task(cmd->t_task_cdb);
1461 if (!task) { 1458 if (!task) {
1462 pr_err("Unable to allocate struct se_task\n"); 1459 pr_err("Unable to allocate struct se_task\n");
1463 return NULL; 1460 return NULL;
1464 } 1461 }
1465 1462
1466 INIT_LIST_HEAD(&task->t_list); 1463 INIT_LIST_HEAD(&task->t_list);
1467 INIT_LIST_HEAD(&task->t_execute_list); 1464 INIT_LIST_HEAD(&task->t_execute_list);
1468 INIT_LIST_HEAD(&task->t_state_list); 1465 INIT_LIST_HEAD(&task->t_state_list);
1469 init_completion(&task->task_stop_comp); 1466 init_completion(&task->task_stop_comp);
1470 task->task_se_cmd = cmd; 1467 task->task_se_cmd = cmd;
1471 task->task_data_direction = data_direction; 1468 task->task_data_direction = data_direction;
1472 1469
1473 return task; 1470 return task;
1474 } 1471 }
1475 1472
1476 static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); 1473 static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
1477 1474
1478 /* 1475 /*
1479 * Used by fabric modules containing a local struct se_cmd within their 1476 * Used by fabric modules containing a local struct se_cmd within their
1480 * fabric dependent per I/O descriptor. 1477 * fabric dependent per I/O descriptor.
1481 */ 1478 */
1482 void transport_init_se_cmd( 1479 void transport_init_se_cmd(
1483 struct se_cmd *cmd, 1480 struct se_cmd *cmd,
1484 struct target_core_fabric_ops *tfo, 1481 struct target_core_fabric_ops *tfo,
1485 struct se_session *se_sess, 1482 struct se_session *se_sess,
1486 u32 data_length, 1483 u32 data_length,
1487 int data_direction, 1484 int data_direction,
1488 int task_attr, 1485 int task_attr,
1489 unsigned char *sense_buffer) 1486 unsigned char *sense_buffer)
1490 { 1487 {
1491 INIT_LIST_HEAD(&cmd->se_lun_node); 1488 INIT_LIST_HEAD(&cmd->se_lun_node);
1492 INIT_LIST_HEAD(&cmd->se_delayed_node); 1489 INIT_LIST_HEAD(&cmd->se_delayed_node);
1493 INIT_LIST_HEAD(&cmd->se_qf_node); 1490 INIT_LIST_HEAD(&cmd->se_qf_node);
1494 INIT_LIST_HEAD(&cmd->se_queue_node); 1491 INIT_LIST_HEAD(&cmd->se_queue_node);
1495 INIT_LIST_HEAD(&cmd->se_cmd_list); 1492 INIT_LIST_HEAD(&cmd->se_cmd_list);
1496 INIT_LIST_HEAD(&cmd->t_task_list); 1493 INIT_LIST_HEAD(&cmd->t_task_list);
1497 init_completion(&cmd->transport_lun_fe_stop_comp); 1494 init_completion(&cmd->transport_lun_fe_stop_comp);
1498 init_completion(&cmd->transport_lun_stop_comp); 1495 init_completion(&cmd->transport_lun_stop_comp);
1499 init_completion(&cmd->t_transport_stop_comp); 1496 init_completion(&cmd->t_transport_stop_comp);
1500 init_completion(&cmd->cmd_wait_comp); 1497 init_completion(&cmd->cmd_wait_comp);
1501 spin_lock_init(&cmd->t_state_lock); 1498 spin_lock_init(&cmd->t_state_lock);
1502 atomic_set(&cmd->transport_dev_active, 1); 1499 atomic_set(&cmd->transport_dev_active, 1);
1503 1500
1504 cmd->se_tfo = tfo; 1501 cmd->se_tfo = tfo;
1505 cmd->se_sess = se_sess; 1502 cmd->se_sess = se_sess;
1506 cmd->data_length = data_length; 1503 cmd->data_length = data_length;
1507 cmd->data_direction = data_direction; 1504 cmd->data_direction = data_direction;
1508 cmd->sam_task_attr = task_attr; 1505 cmd->sam_task_attr = task_attr;
1509 cmd->sense_buffer = sense_buffer; 1506 cmd->sense_buffer = sense_buffer;
1510 } 1507 }
1511 EXPORT_SYMBOL(transport_init_se_cmd); 1508 EXPORT_SYMBOL(transport_init_se_cmd);
1512 1509
1513 static int transport_check_alloc_task_attr(struct se_cmd *cmd) 1510 static int transport_check_alloc_task_attr(struct se_cmd *cmd)
1514 { 1511 {
1515 /* 1512 /*
1516 * Check if SAM Task Attribute emulation is enabled for this 1513 * Check if SAM Task Attribute emulation is enabled for this
1517 * struct se_device storage object 1514 * struct se_device storage object
1518 */ 1515 */
1519 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) 1516 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1520 return 0; 1517 return 0;
1521 1518
1522 if (cmd->sam_task_attr == MSG_ACA_TAG) { 1519 if (cmd->sam_task_attr == MSG_ACA_TAG) {
1523 pr_debug("SAM Task Attribute ACA" 1520 pr_debug("SAM Task Attribute ACA"
1524 " emulation is not supported\n"); 1521 " emulation is not supported\n");
1525 return -EINVAL; 1522 return -EINVAL;
1526 } 1523 }
1527 /* 1524 /*
1528 * Used to determine when ORDERED commands should go from 1525 * Used to determine when ORDERED commands should go from
1529 * Dormant to Active status. 1526 * Dormant to Active status.
1530 */ 1527 */
1531 cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id); 1528 cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
1532 smp_mb__after_atomic_inc(); 1529 smp_mb__after_atomic_inc();
1533 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", 1530 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1534 cmd->se_ordered_id, cmd->sam_task_attr, 1531 cmd->se_ordered_id, cmd->sam_task_attr,
1535 cmd->se_dev->transport->name); 1532 cmd->se_dev->transport->name);
1536 return 0; 1533 return 0;
1537 } 1534 }
1538 1535
1539 /* transport_generic_allocate_tasks(): 1536 /* transport_generic_allocate_tasks():
1540 * 1537 *
1541 * Called from fabric RX Thread. 1538 * Called from fabric RX Thread.
1542 */ 1539 */
1543 int transport_generic_allocate_tasks( 1540 int transport_generic_allocate_tasks(
1544 struct se_cmd *cmd, 1541 struct se_cmd *cmd,
1545 unsigned char *cdb) 1542 unsigned char *cdb)
1546 { 1543 {
1547 int ret; 1544 int ret;
1548 1545
1549 transport_generic_prepare_cdb(cdb); 1546 transport_generic_prepare_cdb(cdb);
1550 /* 1547 /*
1551 * Ensure that the received CDB is less than the max (252 + 8) bytes 1548 * Ensure that the received CDB is less than the max (252 + 8) bytes
1552 * for VARIABLE_LENGTH_CMD 1549 * for VARIABLE_LENGTH_CMD
1553 */ 1550 */
1554 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1551 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1555 pr_err("Received SCSI CDB with command_size: %d that" 1552 pr_err("Received SCSI CDB with command_size: %d that"
1556 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1553 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1557 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1554 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1558 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1555 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1559 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 1556 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1560 return -EINVAL; 1557 return -EINVAL;
1561 } 1558 }
1562 /* 1559 /*
1563 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1560 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1564 * allocate the additional extended CDB buffer now.. Otherwise 1561 * allocate the additional extended CDB buffer now.. Otherwise
1565 * setup the pointer from __t_task_cdb to t_task_cdb. 1562 * setup the pointer from __t_task_cdb to t_task_cdb.
1566 */ 1563 */
1567 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1564 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1568 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1565 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1569 GFP_KERNEL); 1566 GFP_KERNEL);
1570 if (!cmd->t_task_cdb) { 1567 if (!cmd->t_task_cdb) {
1571 pr_err("Unable to allocate cmd->t_task_cdb" 1568 pr_err("Unable to allocate cmd->t_task_cdb"
1572 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1569 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1573 scsi_command_size(cdb), 1570 scsi_command_size(cdb),
1574 (unsigned long)sizeof(cmd->__t_task_cdb)); 1571 (unsigned long)sizeof(cmd->__t_task_cdb));
1575 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1572 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1576 cmd->scsi_sense_reason = 1573 cmd->scsi_sense_reason =
1577 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1574 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1578 return -ENOMEM; 1575 return -ENOMEM;
1579 } 1576 }
1580 } else 1577 } else
1581 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1578 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1582 /* 1579 /*
1583 * Copy the original CDB into cmd-> 1580 * Copy the original CDB into cmd->
1584 */ 1581 */
1585 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1582 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1586 /* 1583 /*
1587 * Setup the received CDB based on SCSI defined opcodes and 1584 * Setup the received CDB based on SCSI defined opcodes and
1588 * perform unit attention, persistent reservations and ALUA 1585 * perform unit attention, persistent reservations and ALUA
1589 * checks for virtual device backends. The cmd->t_task_cdb 1586 * checks for virtual device backends. The cmd->t_task_cdb
1590 * pointer is expected to be setup before we reach this point. 1587 * pointer is expected to be setup before we reach this point.
1591 */ 1588 */
1592 ret = transport_generic_cmd_sequencer(cmd, cdb); 1589 ret = transport_generic_cmd_sequencer(cmd, cdb);
1593 if (ret < 0) 1590 if (ret < 0)
1594 return ret; 1591 return ret;
1595 /* 1592 /*
1596 * Check for SAM Task Attribute Emulation 1593 * Check for SAM Task Attribute Emulation
1597 */ 1594 */
1598 if (transport_check_alloc_task_attr(cmd) < 0) { 1595 if (transport_check_alloc_task_attr(cmd) < 0) {
1599 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1596 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1600 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 1597 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1601 return -EINVAL; 1598 return -EINVAL;
1602 } 1599 }
1603 spin_lock(&cmd->se_lun->lun_sep_lock); 1600 spin_lock(&cmd->se_lun->lun_sep_lock);
1604 if (cmd->se_lun->lun_sep) 1601 if (cmd->se_lun->lun_sep)
1605 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; 1602 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
1606 spin_unlock(&cmd->se_lun->lun_sep_lock); 1603 spin_unlock(&cmd->se_lun->lun_sep_lock);
1607 return 0; 1604 return 0;
1608 } 1605 }
1609 EXPORT_SYMBOL(transport_generic_allocate_tasks); 1606 EXPORT_SYMBOL(transport_generic_allocate_tasks);
1610 1607
1611 /* 1608 /*
1612 * Used by fabric module frontends to queue tasks directly. 1609 * Used by fabric module frontends to queue tasks directly.
1613 * Many only be used from process context only 1610 * Many only be used from process context only
1614 */ 1611 */
1615 int transport_handle_cdb_direct( 1612 int transport_handle_cdb_direct(
1616 struct se_cmd *cmd) 1613 struct se_cmd *cmd)
1617 { 1614 {
1618 int ret; 1615 int ret;
1619 1616
1620 if (!cmd->se_lun) { 1617 if (!cmd->se_lun) {
1621 dump_stack(); 1618 dump_stack();
1622 pr_err("cmd->se_lun is NULL\n"); 1619 pr_err("cmd->se_lun is NULL\n");
1623 return -EINVAL; 1620 return -EINVAL;
1624 } 1621 }
1625 if (in_interrupt()) { 1622 if (in_interrupt()) {
1626 dump_stack(); 1623 dump_stack();
1627 pr_err("transport_generic_handle_cdb cannot be called" 1624 pr_err("transport_generic_handle_cdb cannot be called"
1628 " from interrupt context\n"); 1625 " from interrupt context\n");
1629 return -EINVAL; 1626 return -EINVAL;
1630 } 1627 }
1631 /* 1628 /*
1632 * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following 1629 * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following
1633 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue() 1630 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
1634 * in existing usage to ensure that outstanding descriptors are handled 1631 * in existing usage to ensure that outstanding descriptors are handled
1635 * correctly during shutdown via transport_wait_for_tasks() 1632 * correctly during shutdown via transport_wait_for_tasks()
1636 * 1633 *
1637 * Also, we don't take cmd->t_state_lock here as we only expect 1634 * Also, we don't take cmd->t_state_lock here as we only expect
1638 * this to be called for initial descriptor submission. 1635 * this to be called for initial descriptor submission.
1639 */ 1636 */
1640 cmd->t_state = TRANSPORT_NEW_CMD; 1637 cmd->t_state = TRANSPORT_NEW_CMD;
1641 atomic_set(&cmd->t_transport_active, 1); 1638 atomic_set(&cmd->t_transport_active, 1);
1642 /* 1639 /*
1643 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1640 * transport_generic_new_cmd() is already handling QUEUE_FULL,
1644 * so follow TRANSPORT_NEW_CMD processing thread context usage 1641 * so follow TRANSPORT_NEW_CMD processing thread context usage
1645 * and call transport_generic_request_failure() if necessary.. 1642 * and call transport_generic_request_failure() if necessary..
1646 */ 1643 */
1647 ret = transport_generic_new_cmd(cmd); 1644 ret = transport_generic_new_cmd(cmd);
1648 if (ret < 0) 1645 if (ret < 0)
1649 transport_generic_request_failure(cmd); 1646 transport_generic_request_failure(cmd);
1650 1647
1651 return 0; 1648 return 0;
1652 } 1649 }
1653 EXPORT_SYMBOL(transport_handle_cdb_direct); 1650 EXPORT_SYMBOL(transport_handle_cdb_direct);
1654 1651
1655 /* 1652 /*
1656 * Used by fabric module frontends defining a TFO->new_cmd_map() caller 1653 * Used by fabric module frontends defining a TFO->new_cmd_map() caller
1657 * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to 1654 * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
1658 * complete setup in TCM process context w/ TFO->new_cmd_map(). 1655 * complete setup in TCM process context w/ TFO->new_cmd_map().
1659 */ 1656 */
1660 int transport_generic_handle_cdb_map( 1657 int transport_generic_handle_cdb_map(
1661 struct se_cmd *cmd) 1658 struct se_cmd *cmd)
1662 { 1659 {
1663 if (!cmd->se_lun) { 1660 if (!cmd->se_lun) {
1664 dump_stack(); 1661 dump_stack();
1665 pr_err("cmd->se_lun is NULL\n"); 1662 pr_err("cmd->se_lun is NULL\n");
1666 return -EINVAL; 1663 return -EINVAL;
1667 } 1664 }
1668 1665
1669 transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false); 1666 transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
1670 return 0; 1667 return 0;
1671 } 1668 }
1672 EXPORT_SYMBOL(transport_generic_handle_cdb_map); 1669 EXPORT_SYMBOL(transport_generic_handle_cdb_map);
1673 1670
1674 /* transport_generic_handle_data(): 1671 /* transport_generic_handle_data():
1675 * 1672 *
1676 * 1673 *
1677 */ 1674 */
1678 int transport_generic_handle_data( 1675 int transport_generic_handle_data(
1679 struct se_cmd *cmd) 1676 struct se_cmd *cmd)
1680 { 1677 {
1681 /* 1678 /*
1682 * For the software fabric case, then we assume the nexus is being 1679 * For the software fabric case, then we assume the nexus is being
1683 * failed/shutdown when signals are pending from the kthread context 1680 * failed/shutdown when signals are pending from the kthread context
1684 * caller, so we return a failure. For the HW target mode case running 1681 * caller, so we return a failure. For the HW target mode case running
1685 * in interrupt code, the signal_pending() check is skipped. 1682 * in interrupt code, the signal_pending() check is skipped.
1686 */ 1683 */
1687 if (!in_interrupt() && signal_pending(current)) 1684 if (!in_interrupt() && signal_pending(current))
1688 return -EPERM; 1685 return -EPERM;
1689 /* 1686 /*
1690 * If the received CDB has aleady been ABORTED by the generic 1687 * If the received CDB has aleady been ABORTED by the generic
1691 * target engine, we now call transport_check_aborted_status() 1688 * target engine, we now call transport_check_aborted_status()
1692 * to queue any delated TASK_ABORTED status for the received CDB to the 1689 * to queue any delated TASK_ABORTED status for the received CDB to the
1693 * fabric module as we are expecting no further incoming DATA OUT 1690 * fabric module as we are expecting no further incoming DATA OUT
1694 * sequences at this point. 1691 * sequences at this point.
1695 */ 1692 */
1696 if (transport_check_aborted_status(cmd, 1) != 0) 1693 if (transport_check_aborted_status(cmd, 1) != 0)
1697 return 0; 1694 return 0;
1698 1695
1699 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false); 1696 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
1700 return 0; 1697 return 0;
1701 } 1698 }
1702 EXPORT_SYMBOL(transport_generic_handle_data); 1699 EXPORT_SYMBOL(transport_generic_handle_data);
1703 1700
1704 /* transport_generic_handle_tmr(): 1701 /* transport_generic_handle_tmr():
1705 * 1702 *
1706 * 1703 *
1707 */ 1704 */
1708 int transport_generic_handle_tmr( 1705 int transport_generic_handle_tmr(
1709 struct se_cmd *cmd) 1706 struct se_cmd *cmd)
1710 { 1707 {
1711 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false); 1708 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
1712 return 0; 1709 return 0;
1713 } 1710 }
1714 EXPORT_SYMBOL(transport_generic_handle_tmr); 1711 EXPORT_SYMBOL(transport_generic_handle_tmr);
1715 1712
1716 /* 1713 /*
1717 * If the task is active, request it to be stopped and sleep until it 1714 * If the task is active, request it to be stopped and sleep until it
1718 * has completed. 1715 * has completed.
1719 */ 1716 */
1720 bool target_stop_task(struct se_task *task, unsigned long *flags) 1717 bool target_stop_task(struct se_task *task, unsigned long *flags)
1721 { 1718 {
1722 struct se_cmd *cmd = task->task_se_cmd; 1719 struct se_cmd *cmd = task->task_se_cmd;
1723 bool was_active = false; 1720 bool was_active = false;
1724 1721
1725 if (task->task_flags & TF_ACTIVE) { 1722 if (task->task_flags & TF_ACTIVE) {
1726 task->task_flags |= TF_REQUEST_STOP; 1723 task->task_flags |= TF_REQUEST_STOP;
1727 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 1724 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
1728 1725
1729 pr_debug("Task %p waiting to complete\n", task); 1726 pr_debug("Task %p waiting to complete\n", task);
1730 wait_for_completion(&task->task_stop_comp); 1727 wait_for_completion(&task->task_stop_comp);
1731 pr_debug("Task %p stopped successfully\n", task); 1728 pr_debug("Task %p stopped successfully\n", task);
1732 1729
1733 spin_lock_irqsave(&cmd->t_state_lock, *flags); 1730 spin_lock_irqsave(&cmd->t_state_lock, *flags);
1734 atomic_dec(&cmd->t_task_cdbs_left); 1731 atomic_dec(&cmd->t_task_cdbs_left);
1735 task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP); 1732 task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
1736 was_active = true; 1733 was_active = true;
1737 } 1734 }
1738 1735
1739 return was_active; 1736 return was_active;
1740 } 1737 }
1741 1738
1742 static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) 1739 static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
1743 { 1740 {
1744 struct se_task *task, *task_tmp; 1741 struct se_task *task, *task_tmp;
1745 unsigned long flags; 1742 unsigned long flags;
1746 int ret = 0; 1743 int ret = 0;
1747 1744
1748 pr_debug("ITT[0x%08x] - Stopping tasks\n", 1745 pr_debug("ITT[0x%08x] - Stopping tasks\n",
1749 cmd->se_tfo->get_task_tag(cmd)); 1746 cmd->se_tfo->get_task_tag(cmd));
1750 1747
1751 /* 1748 /*
1752 * No tasks remain in the execution queue 1749 * No tasks remain in the execution queue
1753 */ 1750 */
1754 spin_lock_irqsave(&cmd->t_state_lock, flags); 1751 spin_lock_irqsave(&cmd->t_state_lock, flags);
1755 list_for_each_entry_safe(task, task_tmp, 1752 list_for_each_entry_safe(task, task_tmp,
1756 &cmd->t_task_list, t_list) { 1753 &cmd->t_task_list, t_list) {
1757 pr_debug("Processing task %p\n", task); 1754 pr_debug("Processing task %p\n", task);
1758 /* 1755 /*
1759 * If the struct se_task has not been sent and is not active, 1756 * If the struct se_task has not been sent and is not active,
1760 * remove the struct se_task from the execution queue. 1757 * remove the struct se_task from the execution queue.
1761 */ 1758 */
1762 if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) { 1759 if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {
1763 spin_unlock_irqrestore(&cmd->t_state_lock, 1760 spin_unlock_irqrestore(&cmd->t_state_lock,
1764 flags); 1761 flags);
1765 transport_remove_task_from_execute_queue(task, 1762 transport_remove_task_from_execute_queue(task,
1766 cmd->se_dev); 1763 cmd->se_dev);
1767 1764
1768 pr_debug("Task %p removed from execute queue\n", task); 1765 pr_debug("Task %p removed from execute queue\n", task);
1769 spin_lock_irqsave(&cmd->t_state_lock, flags); 1766 spin_lock_irqsave(&cmd->t_state_lock, flags);
1770 continue; 1767 continue;
1771 } 1768 }
1772 1769
1773 if (!target_stop_task(task, &flags)) { 1770 if (!target_stop_task(task, &flags)) {
1774 pr_debug("Task %p - did nothing\n", task); 1771 pr_debug("Task %p - did nothing\n", task);
1775 ret++; 1772 ret++;
1776 } 1773 }
1777 } 1774 }
1778 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 1775 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1779 1776
1780 return ret; 1777 return ret;
1781 } 1778 }
1782 1779
1783 /* 1780 /*
1784 * Handle SAM-esque emulation for generic transport request failures. 1781 * Handle SAM-esque emulation for generic transport request failures.
1785 */ 1782 */
1786 static void transport_generic_request_failure(struct se_cmd *cmd) 1783 static void transport_generic_request_failure(struct se_cmd *cmd)
1787 { 1784 {
1788 int ret = 0; 1785 int ret = 0;
1789 1786
1790 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" 1787 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1791 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), 1788 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
1792 cmd->t_task_cdb[0]); 1789 cmd->t_task_cdb[0]);
1793 pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n", 1790 pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
1794 cmd->se_tfo->get_cmd_state(cmd), 1791 cmd->se_tfo->get_cmd_state(cmd),
1795 cmd->t_state, cmd->scsi_sense_reason); 1792 cmd->t_state, cmd->scsi_sense_reason);
1796 pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" 1793 pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
1797 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" 1794 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
1798 " t_transport_active: %d t_transport_stop: %d" 1795 " t_transport_active: %d t_transport_stop: %d"
1799 " t_transport_sent: %d\n", cmd->t_task_list_num, 1796 " t_transport_sent: %d\n", cmd->t_task_list_num,
1800 atomic_read(&cmd->t_task_cdbs_left), 1797 atomic_read(&cmd->t_task_cdbs_left),
1801 atomic_read(&cmd->t_task_cdbs_sent), 1798 atomic_read(&cmd->t_task_cdbs_sent),
1802 atomic_read(&cmd->t_task_cdbs_ex_left), 1799 atomic_read(&cmd->t_task_cdbs_ex_left),
1803 atomic_read(&cmd->t_transport_active), 1800 atomic_read(&cmd->t_transport_active),
1804 atomic_read(&cmd->t_transport_stop), 1801 atomic_read(&cmd->t_transport_stop),
1805 atomic_read(&cmd->t_transport_sent)); 1802 atomic_read(&cmd->t_transport_sent));
1806 1803
1807 /* 1804 /*
1808 * For SAM Task Attribute emulation for failed struct se_cmd 1805 * For SAM Task Attribute emulation for failed struct se_cmd
1809 */ 1806 */
1810 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 1807 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
1811 transport_complete_task_attr(cmd); 1808 transport_complete_task_attr(cmd);
1812 1809
1813 switch (cmd->scsi_sense_reason) { 1810 switch (cmd->scsi_sense_reason) {
1814 case TCM_NON_EXISTENT_LUN: 1811 case TCM_NON_EXISTENT_LUN:
1815 case TCM_UNSUPPORTED_SCSI_OPCODE: 1812 case TCM_UNSUPPORTED_SCSI_OPCODE:
1816 case TCM_INVALID_CDB_FIELD: 1813 case TCM_INVALID_CDB_FIELD:
1817 case TCM_INVALID_PARAMETER_LIST: 1814 case TCM_INVALID_PARAMETER_LIST:
1818 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1815 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1819 case TCM_UNKNOWN_MODE_PAGE: 1816 case TCM_UNKNOWN_MODE_PAGE:
1820 case TCM_WRITE_PROTECTED: 1817 case TCM_WRITE_PROTECTED:
1821 case TCM_CHECK_CONDITION_ABORT_CMD: 1818 case TCM_CHECK_CONDITION_ABORT_CMD:
1822 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1819 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1823 case TCM_CHECK_CONDITION_NOT_READY: 1820 case TCM_CHECK_CONDITION_NOT_READY:
1824 break; 1821 break;
1825 case TCM_RESERVATION_CONFLICT: 1822 case TCM_RESERVATION_CONFLICT:
1826 /* 1823 /*
1827 * No SENSE Data payload for this case, set SCSI Status 1824 * No SENSE Data payload for this case, set SCSI Status
1828 * and queue the response to $FABRIC_MOD. 1825 * and queue the response to $FABRIC_MOD.
1829 * 1826 *
1830 * Uses linux/include/scsi/scsi.h SAM status codes defs 1827 * Uses linux/include/scsi/scsi.h SAM status codes defs
1831 */ 1828 */
1832 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1829 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1833 /* 1830 /*
1834 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 1831 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1835 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 1832 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1836 * CONFLICT STATUS. 1833 * CONFLICT STATUS.
1837 * 1834 *
1838 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1835 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1839 */ 1836 */
1840 if (cmd->se_sess && 1837 if (cmd->se_sess &&
1841 cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) 1838 cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
1842 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, 1839 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
1843 cmd->orig_fe_lun, 0x2C, 1840 cmd->orig_fe_lun, 0x2C,
1844 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1841 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1845 1842
1846 ret = cmd->se_tfo->queue_status(cmd); 1843 ret = cmd->se_tfo->queue_status(cmd);
1847 if (ret == -EAGAIN || ret == -ENOMEM) 1844 if (ret == -EAGAIN || ret == -ENOMEM)
1848 goto queue_full; 1845 goto queue_full;
1849 goto check_stop; 1846 goto check_stop;
1850 default: 1847 default:
1851 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1848 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1852 cmd->t_task_cdb[0], cmd->scsi_sense_reason); 1849 cmd->t_task_cdb[0], cmd->scsi_sense_reason);
1853 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1850 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1854 break; 1851 break;
1855 } 1852 }
1856 /* 1853 /*
1857 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller, 1854 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
1858 * make the call to transport_send_check_condition_and_sense() 1855 * make the call to transport_send_check_condition_and_sense()
1859 * directly. Otherwise expect the fabric to make the call to 1856 * directly. Otherwise expect the fabric to make the call to
1860 * transport_send_check_condition_and_sense() after handling 1857 * transport_send_check_condition_and_sense() after handling
1861 * possible unsoliticied write data payloads. 1858 * possible unsoliticied write data payloads.
1862 */ 1859 */
1863 ret = transport_send_check_condition_and_sense(cmd, 1860 ret = transport_send_check_condition_and_sense(cmd,
1864 cmd->scsi_sense_reason, 0); 1861 cmd->scsi_sense_reason, 0);
1865 if (ret == -EAGAIN || ret == -ENOMEM) 1862 if (ret == -EAGAIN || ret == -ENOMEM)
1866 goto queue_full; 1863 goto queue_full;
1867 1864
1868 check_stop: 1865 check_stop:
1869 transport_lun_remove_cmd(cmd); 1866 transport_lun_remove_cmd(cmd);
1870 if (!transport_cmd_check_stop_to_fabric(cmd)) 1867 if (!transport_cmd_check_stop_to_fabric(cmd))
1871 ; 1868 ;
1872 return; 1869 return;
1873 1870
1874 queue_full: 1871 queue_full:
1875 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1872 cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
1876 transport_handle_queue_full(cmd, cmd->se_dev); 1873 transport_handle_queue_full(cmd, cmd->se_dev);
1877 } 1874 }
1878 1875
1879 static inline u32 transport_lba_21(unsigned char *cdb) 1876 static inline u32 transport_lba_21(unsigned char *cdb)
1880 { 1877 {
1881 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 1878 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
1882 } 1879 }
1883 1880
1884 static inline u32 transport_lba_32(unsigned char *cdb) 1881 static inline u32 transport_lba_32(unsigned char *cdb)
1885 { 1882 {
1886 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 1883 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1887 } 1884 }
1888 1885
1889 static inline unsigned long long transport_lba_64(unsigned char *cdb) 1886 static inline unsigned long long transport_lba_64(unsigned char *cdb)
1890 { 1887 {
1891 unsigned int __v1, __v2; 1888 unsigned int __v1, __v2;
1892 1889
1893 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 1890 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
1894 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1891 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1895 1892
1896 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 1893 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
1897 } 1894 }
1898 1895
1899 /* 1896 /*
1900 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs 1897 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
1901 */ 1898 */
1902 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) 1899 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
1903 { 1900 {
1904 unsigned int __v1, __v2; 1901 unsigned int __v1, __v2;
1905 1902
1906 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; 1903 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
1907 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; 1904 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
1908 1905
1909 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 1906 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
1910 } 1907 }
1911 1908
1912 static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) 1909 static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
1913 { 1910 {
1914 unsigned long flags; 1911 unsigned long flags;
1915 1912
1916 spin_lock_irqsave(&se_cmd->t_state_lock, flags); 1913 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
1917 se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1914 se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1918 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 1915 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
1919 } 1916 }
1920 1917
1921 static inline int transport_tcq_window_closed(struct se_device *dev) 1918 static inline int transport_tcq_window_closed(struct se_device *dev)
1922 { 1919 {
1923 if (dev->dev_tcq_window_closed++ < 1920 if (dev->dev_tcq_window_closed++ <
1924 PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) { 1921 PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
1925 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT); 1922 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
1926 } else 1923 } else
1927 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG); 1924 msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
1928 1925
1929 wake_up_interruptible(&dev->dev_queue_obj.thread_wq); 1926 wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
1930 return 0; 1927 return 0;
1931 } 1928 }
1932 1929
1933 /* 1930 /*
1934 * Called from Fabric Module context from transport_execute_tasks() 1931 * Called from Fabric Module context from transport_execute_tasks()
1935 * 1932 *
1936 * The return of this function determins if the tasks from struct se_cmd 1933 * The return of this function determins if the tasks from struct se_cmd
1937 * get added to the execution queue in transport_execute_tasks(), 1934 * get added to the execution queue in transport_execute_tasks(),
1938 * or are added to the delayed or ordered lists here. 1935 * or are added to the delayed or ordered lists here.
1939 */ 1936 */
1940 static inline int transport_execute_task_attr(struct se_cmd *cmd) 1937 static inline int transport_execute_task_attr(struct se_cmd *cmd)
1941 { 1938 {
1942 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) 1939 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
1943 return 1; 1940 return 1;
1944 /* 1941 /*
1945 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 1942 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
1946 * to allow the passed struct se_cmd list of tasks to the front of the list. 1943 * to allow the passed struct se_cmd list of tasks to the front of the list.
1947 */ 1944 */
1948 if (cmd->sam_task_attr == MSG_HEAD_TAG) { 1945 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
1949 pr_debug("Added HEAD_OF_QUEUE for CDB:" 1946 pr_debug("Added HEAD_OF_QUEUE for CDB:"
1950 " 0x%02x, se_ordered_id: %u\n", 1947 " 0x%02x, se_ordered_id: %u\n",
1951 cmd->t_task_cdb[0], 1948 cmd->t_task_cdb[0],
1952 cmd->se_ordered_id); 1949 cmd->se_ordered_id);
1953 return 1; 1950 return 1;
1954 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 1951 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
1955 atomic_inc(&cmd->se_dev->dev_ordered_sync); 1952 atomic_inc(&cmd->se_dev->dev_ordered_sync);
1956 smp_mb__after_atomic_inc(); 1953 smp_mb__after_atomic_inc();
1957 1954
1958 pr_debug("Added ORDERED for CDB: 0x%02x to ordered" 1955 pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
1959 " list, se_ordered_id: %u\n", 1956 " list, se_ordered_id: %u\n",
1960 cmd->t_task_cdb[0], 1957 cmd->t_task_cdb[0],
1961 cmd->se_ordered_id); 1958 cmd->se_ordered_id);
1962 /* 1959 /*
1963 * Add ORDERED command to tail of execution queue if 1960 * Add ORDERED command to tail of execution queue if
1964 * no other older commands exist that need to be 1961 * no other older commands exist that need to be
1965 * completed first. 1962 * completed first.
1966 */ 1963 */
1967 if (!atomic_read(&cmd->se_dev->simple_cmds)) 1964 if (!atomic_read(&cmd->se_dev->simple_cmds))
1968 return 1; 1965 return 1;
1969 } else { 1966 } else {
1970 /* 1967 /*
1971 * For SIMPLE and UNTAGGED Task Attribute commands 1968 * For SIMPLE and UNTAGGED Task Attribute commands
1972 */ 1969 */
1973 atomic_inc(&cmd->se_dev->simple_cmds); 1970 atomic_inc(&cmd->se_dev->simple_cmds);
1974 smp_mb__after_atomic_inc(); 1971 smp_mb__after_atomic_inc();
1975 } 1972 }
1976 /* 1973 /*
1977 * Otherwise if one or more outstanding ORDERED task attribute exist, 1974 * Otherwise if one or more outstanding ORDERED task attribute exist,
1978 * add the dormant task(s) built for the passed struct se_cmd to the 1975 * add the dormant task(s) built for the passed struct se_cmd to the
1979 * execution queue and become in Active state for this struct se_device. 1976 * execution queue and become in Active state for this struct se_device.
1980 */ 1977 */
1981 if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) { 1978 if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
1982 /* 1979 /*
1983 * Otherwise, add cmd w/ tasks to delayed cmd queue that 1980 * Otherwise, add cmd w/ tasks to delayed cmd queue that
1984 * will be drained upon completion of HEAD_OF_QUEUE task. 1981 * will be drained upon completion of HEAD_OF_QUEUE task.
1985 */ 1982 */
1986 spin_lock(&cmd->se_dev->delayed_cmd_lock); 1983 spin_lock(&cmd->se_dev->delayed_cmd_lock);
1987 cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; 1984 cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
1988 list_add_tail(&cmd->se_delayed_node, 1985 list_add_tail(&cmd->se_delayed_node,
1989 &cmd->se_dev->delayed_cmd_list); 1986 &cmd->se_dev->delayed_cmd_list);
1990 spin_unlock(&cmd->se_dev->delayed_cmd_lock); 1987 spin_unlock(&cmd->se_dev->delayed_cmd_lock);
1991 1988
1992 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" 1989 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
1993 " delayed CMD list, se_ordered_id: %u\n", 1990 " delayed CMD list, se_ordered_id: %u\n",
1994 cmd->t_task_cdb[0], cmd->sam_task_attr, 1991 cmd->t_task_cdb[0], cmd->sam_task_attr,
1995 cmd->se_ordered_id); 1992 cmd->se_ordered_id);
1996 /* 1993 /*
1997 * Return zero to let transport_execute_tasks() know 1994 * Return zero to let transport_execute_tasks() know
1998 * not to add the delayed tasks to the execution list. 1995 * not to add the delayed tasks to the execution list.
1999 */ 1996 */
2000 return 0; 1997 return 0;
2001 } 1998 }
2002 /* 1999 /*
2003 * Otherwise, no ORDERED task attributes exist.. 2000 * Otherwise, no ORDERED task attributes exist..
2004 */ 2001 */
2005 return 1; 2002 return 1;
2006 } 2003 }
2007 2004
2008 /* 2005 /*
2009 * Called from fabric module context in transport_generic_new_cmd() and 2006 * Called from fabric module context in transport_generic_new_cmd() and
2010 * transport_generic_process_write() 2007 * transport_generic_process_write()
2011 */ 2008 */
2012 static int transport_execute_tasks(struct se_cmd *cmd) 2009 static int transport_execute_tasks(struct se_cmd *cmd)
2013 { 2010 {
2014 int add_tasks; 2011 int add_tasks;
2015 2012
2016 if (se_dev_check_online(cmd->se_dev) != 0) { 2013 if (se_dev_check_online(cmd->se_dev) != 0) {
2017 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2014 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2018 transport_generic_request_failure(cmd); 2015 transport_generic_request_failure(cmd);
2019 return 0; 2016 return 0;
2020 } 2017 }
2021 2018
2022 /* 2019 /*
2023 * Call transport_cmd_check_stop() to see if a fabric exception 2020 * Call transport_cmd_check_stop() to see if a fabric exception
2024 * has occurred that prevents execution. 2021 * has occurred that prevents execution.
2025 */ 2022 */
2026 if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) { 2023 if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
2027 /* 2024 /*
2028 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE 2025 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
2029 * attribute for the tasks of the received struct se_cmd CDB 2026 * attribute for the tasks of the received struct se_cmd CDB
2030 */ 2027 */
2031 add_tasks = transport_execute_task_attr(cmd); 2028 add_tasks = transport_execute_task_attr(cmd);
2032 if (!add_tasks) 2029 if (!add_tasks)
2033 goto execute_tasks; 2030 goto execute_tasks;
2034 /* 2031 /*
2035 * This calls transport_add_tasks_from_cmd() to handle 2032 * This calls transport_add_tasks_from_cmd() to handle
2036 * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation 2033 * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
2037 * (if enabled) in __transport_add_task_to_execute_queue() and 2034 * (if enabled) in __transport_add_task_to_execute_queue() and
2038 * transport_add_task_check_sam_attr(). 2035 * transport_add_task_check_sam_attr().
2039 */ 2036 */
2040 transport_add_tasks_from_cmd(cmd); 2037 transport_add_tasks_from_cmd(cmd);
2041 } 2038 }
2042 /* 2039 /*
2043 * Kick the execution queue for the cmd associated struct se_device 2040 * Kick the execution queue for the cmd associated struct se_device
2044 * storage object. 2041 * storage object.
2045 */ 2042 */
2046 execute_tasks: 2043 execute_tasks:
2047 __transport_execute_tasks(cmd->se_dev); 2044 __transport_execute_tasks(cmd->se_dev);
2048 return 0; 2045 return 0;
2049 } 2046 }
2050 2047
2051 /* 2048 /*
2052 * Called to check struct se_device tcq depth window, and once open pull struct se_task 2049 * Called to check struct se_device tcq depth window, and once open pull struct se_task
2053 * from struct se_device->execute_task_list and 2050 * from struct se_device->execute_task_list and
2054 * 2051 *
2055 * Called from transport_processing_thread() 2052 * Called from transport_processing_thread()
2056 */ 2053 */
2057 static int __transport_execute_tasks(struct se_device *dev) 2054 static int __transport_execute_tasks(struct se_device *dev)
2058 { 2055 {
2059 int error; 2056 int error;
2060 struct se_cmd *cmd = NULL; 2057 struct se_cmd *cmd = NULL;
2061 struct se_task *task = NULL; 2058 struct se_task *task = NULL;
2062 unsigned long flags; 2059 unsigned long flags;
2063 2060
2064 /* 2061 /*
2065 * Check if there is enough room in the device and HBA queue to send 2062 * Check if there is enough room in the device and HBA queue to send
2066 * struct se_tasks to the selected transport. 2063 * struct se_tasks to the selected transport.
2067 */ 2064 */
2068 check_depth: 2065 check_depth:
2069 if (!atomic_read(&dev->depth_left)) 2066 if (!atomic_read(&dev->depth_left))
2070 return transport_tcq_window_closed(dev); 2067 return transport_tcq_window_closed(dev);
2071 2068
2072 dev->dev_tcq_window_closed = 0; 2069 dev->dev_tcq_window_closed = 0;
2073 2070
2074 spin_lock_irq(&dev->execute_task_lock); 2071 spin_lock_irq(&dev->execute_task_lock);
2075 if (list_empty(&dev->execute_task_list)) { 2072 if (list_empty(&dev->execute_task_list)) {
2076 spin_unlock_irq(&dev->execute_task_lock); 2073 spin_unlock_irq(&dev->execute_task_lock);
2077 return 0; 2074 return 0;
2078 } 2075 }
2079 task = list_first_entry(&dev->execute_task_list, 2076 task = list_first_entry(&dev->execute_task_list,
2080 struct se_task, t_execute_list); 2077 struct se_task, t_execute_list);
2081 __transport_remove_task_from_execute_queue(task, dev); 2078 __transport_remove_task_from_execute_queue(task, dev);
2082 spin_unlock_irq(&dev->execute_task_lock); 2079 spin_unlock_irq(&dev->execute_task_lock);
2083 2080
2084 atomic_dec(&dev->depth_left); 2081 atomic_dec(&dev->depth_left);
2085 2082
2086 cmd = task->task_se_cmd; 2083 cmd = task->task_se_cmd;
2087 2084
2088 spin_lock_irqsave(&cmd->t_state_lock, flags); 2085 spin_lock_irqsave(&cmd->t_state_lock, flags);
2089 task->task_flags |= (TF_ACTIVE | TF_SENT); 2086 task->task_flags |= (TF_ACTIVE | TF_SENT);
2090 atomic_inc(&cmd->t_task_cdbs_sent); 2087 atomic_inc(&cmd->t_task_cdbs_sent);
2091 2088
2092 if (atomic_read(&cmd->t_task_cdbs_sent) == 2089 if (atomic_read(&cmd->t_task_cdbs_sent) ==
2093 cmd->t_task_list_num) 2090 cmd->t_task_list_num)
2094 atomic_set(&cmd->t_transport_sent, 1); 2091 atomic_set(&cmd->t_transport_sent, 1);
2095 2092
2096 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2093 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2097 2094
2098 if (cmd->execute_task) 2095 if (cmd->execute_task)
2099 error = cmd->execute_task(task); 2096 error = cmd->execute_task(task);
2100 else 2097 else
2101 error = dev->transport->do_task(task); 2098 error = dev->transport->do_task(task);
2102 if (error != 0) { 2099 if (error != 0) {
2103 spin_lock_irqsave(&cmd->t_state_lock, flags); 2100 spin_lock_irqsave(&cmd->t_state_lock, flags);
2104 task->task_flags &= ~TF_ACTIVE; 2101 task->task_flags &= ~TF_ACTIVE;
2105 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2102 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2106 atomic_set(&cmd->t_transport_sent, 0); 2103 atomic_set(&cmd->t_transport_sent, 0);
2107 transport_stop_tasks_for_cmd(cmd); 2104 transport_stop_tasks_for_cmd(cmd);
2108 atomic_inc(&dev->depth_left); 2105 atomic_inc(&dev->depth_left);
2109 transport_generic_request_failure(cmd); 2106 transport_generic_request_failure(cmd);
2110 } 2107 }
2111 2108
2112 goto check_depth; 2109 goto check_depth;
2113 2110
2114 return 0; 2111 return 0;
2115 } 2112 }
2116 2113
2117 static inline u32 transport_get_sectors_6( 2114 static inline u32 transport_get_sectors_6(
2118 unsigned char *cdb, 2115 unsigned char *cdb,
2119 struct se_cmd *cmd, 2116 struct se_cmd *cmd,
2120 int *ret) 2117 int *ret)
2121 { 2118 {
2122 struct se_device *dev = cmd->se_dev; 2119 struct se_device *dev = cmd->se_dev;
2123 2120
2124 /* 2121 /*
2125 * Assume TYPE_DISK for non struct se_device objects. 2122 * Assume TYPE_DISK for non struct se_device objects.
2126 * Use 8-bit sector value. 2123 * Use 8-bit sector value.
2127 */ 2124 */
2128 if (!dev) 2125 if (!dev)
2129 goto type_disk; 2126 goto type_disk;
2130 2127
2131 /* 2128 /*
2132 * Use 24-bit allocation length for TYPE_TAPE. 2129 * Use 24-bit allocation length for TYPE_TAPE.
2133 */ 2130 */
2134 if (dev->transport->get_device_type(dev) == TYPE_TAPE) 2131 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2135 return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; 2132 return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
2136 2133
2137 /* 2134 /*
2138 * Everything else assume TYPE_DISK Sector CDB location. 2135 * Everything else assume TYPE_DISK Sector CDB location.
2139 * Use 8-bit sector value. SBC-3 says: 2136 * Use 8-bit sector value. SBC-3 says:
2140 * 2137 *
2141 * A TRANSFER LENGTH field set to zero specifies that 256 2138 * A TRANSFER LENGTH field set to zero specifies that 256
2142 * logical blocks shall be written. Any other value 2139 * logical blocks shall be written. Any other value
2143 * specifies the number of logical blocks that shall be 2140 * specifies the number of logical blocks that shall be
2144 * written. 2141 * written.
2145 */ 2142 */
2146 type_disk: 2143 type_disk:
2147 return cdb[4] ? : 256; 2144 return cdb[4] ? : 256;
2148 } 2145 }
2149 2146
2150 static inline u32 transport_get_sectors_10( 2147 static inline u32 transport_get_sectors_10(
2151 unsigned char *cdb, 2148 unsigned char *cdb,
2152 struct se_cmd *cmd, 2149 struct se_cmd *cmd,
2153 int *ret) 2150 int *ret)
2154 { 2151 {
2155 struct se_device *dev = cmd->se_dev; 2152 struct se_device *dev = cmd->se_dev;
2156 2153
2157 /* 2154 /*
2158 * Assume TYPE_DISK for non struct se_device objects. 2155 * Assume TYPE_DISK for non struct se_device objects.
2159 * Use 16-bit sector value. 2156 * Use 16-bit sector value.
2160 */ 2157 */
2161 if (!dev) 2158 if (!dev)
2162 goto type_disk; 2159 goto type_disk;
2163 2160
2164 /* 2161 /*
2165 * XXX_10 is not defined in SSC, throw an exception 2162 * XXX_10 is not defined in SSC, throw an exception
2166 */ 2163 */
2167 if (dev->transport->get_device_type(dev) == TYPE_TAPE) { 2164 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2168 *ret = -EINVAL; 2165 *ret = -EINVAL;
2169 return 0; 2166 return 0;
2170 } 2167 }
2171 2168
2172 /* 2169 /*
2173 * Everything else assume TYPE_DISK Sector CDB location. 2170 * Everything else assume TYPE_DISK Sector CDB location.
2174 * Use 16-bit sector value. 2171 * Use 16-bit sector value.
2175 */ 2172 */
2176 type_disk: 2173 type_disk:
2177 return (u32)(cdb[7] << 8) + cdb[8]; 2174 return (u32)(cdb[7] << 8) + cdb[8];
2178 } 2175 }
2179 2176
2180 static inline u32 transport_get_sectors_12( 2177 static inline u32 transport_get_sectors_12(
2181 unsigned char *cdb, 2178 unsigned char *cdb,
2182 struct se_cmd *cmd, 2179 struct se_cmd *cmd,
2183 int *ret) 2180 int *ret)
2184 { 2181 {
2185 struct se_device *dev = cmd->se_dev; 2182 struct se_device *dev = cmd->se_dev;
2186 2183
2187 /* 2184 /*
2188 * Assume TYPE_DISK for non struct se_device objects. 2185 * Assume TYPE_DISK for non struct se_device objects.
2189 * Use 32-bit sector value. 2186 * Use 32-bit sector value.
2190 */ 2187 */
2191 if (!dev) 2188 if (!dev)
2192 goto type_disk; 2189 goto type_disk;
2193 2190
2194 /* 2191 /*
2195 * XXX_12 is not defined in SSC, throw an exception 2192 * XXX_12 is not defined in SSC, throw an exception
2196 */ 2193 */
2197 if (dev->transport->get_device_type(dev) == TYPE_TAPE) { 2194 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2198 *ret = -EINVAL; 2195 *ret = -EINVAL;
2199 return 0; 2196 return 0;
2200 } 2197 }
2201 2198
2202 /* 2199 /*
2203 * Everything else assume TYPE_DISK Sector CDB location. 2200 * Everything else assume TYPE_DISK Sector CDB location.
2204 * Use 32-bit sector value. 2201 * Use 32-bit sector value.
2205 */ 2202 */
2206 type_disk: 2203 type_disk:
2207 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; 2204 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
2208 } 2205 }
2209 2206
2210 static inline u32 transport_get_sectors_16( 2207 static inline u32 transport_get_sectors_16(
2211 unsigned char *cdb, 2208 unsigned char *cdb,
2212 struct se_cmd *cmd, 2209 struct se_cmd *cmd,
2213 int *ret) 2210 int *ret)
2214 { 2211 {
2215 struct se_device *dev = cmd->se_dev; 2212 struct se_device *dev = cmd->se_dev;
2216 2213
2217 /* 2214 /*
2218 * Assume TYPE_DISK for non struct se_device objects. 2215 * Assume TYPE_DISK for non struct se_device objects.
2219 * Use 32-bit sector value. 2216 * Use 32-bit sector value.
2220 */ 2217 */
2221 if (!dev) 2218 if (!dev)
2222 goto type_disk; 2219 goto type_disk;
2223 2220
2224 /* 2221 /*
2225 * Use 24-bit allocation length for TYPE_TAPE. 2222 * Use 24-bit allocation length for TYPE_TAPE.
2226 */ 2223 */
2227 if (dev->transport->get_device_type(dev) == TYPE_TAPE) 2224 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
2228 return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; 2225 return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
2229 2226
2230 type_disk: 2227 type_disk:
2231 return (u32)(cdb[10] << 24) + (cdb[11] << 16) + 2228 return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
2232 (cdb[12] << 8) + cdb[13]; 2229 (cdb[12] << 8) + cdb[13];
2233 } 2230 }
2234 2231
2235 /* 2232 /*
2236 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants 2233 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
2237 */ 2234 */
2238 static inline u32 transport_get_sectors_32( 2235 static inline u32 transport_get_sectors_32(
2239 unsigned char *cdb, 2236 unsigned char *cdb,
2240 struct se_cmd *cmd, 2237 struct se_cmd *cmd,
2241 int *ret) 2238 int *ret)
2242 { 2239 {
2243 /* 2240 /*
2244 * Assume TYPE_DISK for non struct se_device objects. 2241 * Assume TYPE_DISK for non struct se_device objects.
2245 * Use 32-bit sector value. 2242 * Use 32-bit sector value.
2246 */ 2243 */
2247 return (u32)(cdb[28] << 24) + (cdb[29] << 16) + 2244 return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
2248 (cdb[30] << 8) + cdb[31]; 2245 (cdb[30] << 8) + cdb[31];
2249 2246
2250 } 2247 }
2251 2248
2252 static inline u32 transport_get_size( 2249 static inline u32 transport_get_size(
2253 u32 sectors, 2250 u32 sectors,
2254 unsigned char *cdb, 2251 unsigned char *cdb,
2255 struct se_cmd *cmd) 2252 struct se_cmd *cmd)
2256 { 2253 {
2257 struct se_device *dev = cmd->se_dev; 2254 struct se_device *dev = cmd->se_dev;
2258 2255
2259 if (dev->transport->get_device_type(dev) == TYPE_TAPE) { 2256 if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
2260 if (cdb[1] & 1) { /* sectors */ 2257 if (cdb[1] & 1) { /* sectors */
2261 return dev->se_sub_dev->se_dev_attrib.block_size * sectors; 2258 return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2262 } else /* bytes */ 2259 } else /* bytes */
2263 return sectors; 2260 return sectors;
2264 } 2261 }
2265 #if 0 2262 #if 0
2266 pr_debug("Returning block_size: %u, sectors: %u == %u for" 2263 pr_debug("Returning block_size: %u, sectors: %u == %u for"
2267 " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors, 2264 " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
2268 dev->se_sub_dev->se_dev_attrib.block_size * sectors, 2265 dev->se_sub_dev->se_dev_attrib.block_size * sectors,
2269 dev->transport->name); 2266 dev->transport->name);
2270 #endif 2267 #endif
2271 return dev->se_sub_dev->se_dev_attrib.block_size * sectors; 2268 return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
2272 } 2269 }
2273 2270
2274 static void transport_xor_callback(struct se_cmd *cmd) 2271 static void transport_xor_callback(struct se_cmd *cmd)
2275 { 2272 {
2276 unsigned char *buf, *addr; 2273 unsigned char *buf, *addr;
2277 struct scatterlist *sg; 2274 struct scatterlist *sg;
2278 unsigned int offset; 2275 unsigned int offset;
2279 int i; 2276 int i;
2280 int count; 2277 int count;
2281 /* 2278 /*
2282 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 2279 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
2283 * 2280 *
2284 * 1) read the specified logical block(s); 2281 * 1) read the specified logical block(s);
2285 * 2) transfer logical blocks from the data-out buffer; 2282 * 2) transfer logical blocks from the data-out buffer;
2286 * 3) XOR the logical blocks transferred from the data-out buffer with 2283 * 3) XOR the logical blocks transferred from the data-out buffer with
2287 * the logical blocks read, storing the resulting XOR data in a buffer; 2284 * the logical blocks read, storing the resulting XOR data in a buffer;
2288 * 4) if the DISABLE WRITE bit is set to zero, then write the logical 2285 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
2289 * blocks transferred from the data-out buffer; and 2286 * blocks transferred from the data-out buffer; and
2290 * 5) transfer the resulting XOR data to the data-in buffer. 2287 * 5) transfer the resulting XOR data to the data-in buffer.
2291 */ 2288 */
2292 buf = kmalloc(cmd->data_length, GFP_KERNEL); 2289 buf = kmalloc(cmd->data_length, GFP_KERNEL);
2293 if (!buf) { 2290 if (!buf) {
2294 pr_err("Unable to allocate xor_callback buf\n"); 2291 pr_err("Unable to allocate xor_callback buf\n");
2295 return; 2292 return;
2296 } 2293 }
2297 /* 2294 /*
2298 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg 2295 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
2299 * into the locally allocated *buf 2296 * into the locally allocated *buf
2300 */ 2297 */
2301 sg_copy_to_buffer(cmd->t_data_sg, 2298 sg_copy_to_buffer(cmd->t_data_sg,
2302 cmd->t_data_nents, 2299 cmd->t_data_nents,
2303 buf, 2300 buf,
2304 cmd->data_length); 2301 cmd->data_length);
2305 2302
2306 /* 2303 /*
2307 * Now perform the XOR against the BIDI read memory located at 2304 * Now perform the XOR against the BIDI read memory located at
2308 * cmd->t_mem_bidi_list 2305 * cmd->t_mem_bidi_list
2309 */ 2306 */
2310 2307
2311 offset = 0; 2308 offset = 0;
2312 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 2309 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
2313 addr = kmap_atomic(sg_page(sg), KM_USER0); 2310 addr = kmap_atomic(sg_page(sg), KM_USER0);
2314 if (!addr) 2311 if (!addr)
2315 goto out; 2312 goto out;
2316 2313
2317 for (i = 0; i < sg->length; i++) 2314 for (i = 0; i < sg->length; i++)
2318 *(addr + sg->offset + i) ^= *(buf + offset + i); 2315 *(addr + sg->offset + i) ^= *(buf + offset + i);
2319 2316
2320 offset += sg->length; 2317 offset += sg->length;
2321 kunmap_atomic(addr, KM_USER0); 2318 kunmap_atomic(addr, KM_USER0);
2322 } 2319 }
2323 2320
2324 out: 2321 out:
2325 kfree(buf); 2322 kfree(buf);
2326 } 2323 }
2327 2324
2328 /* 2325 /*
2329 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd 2326 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
2330 */ 2327 */
2331 static int transport_get_sense_data(struct se_cmd *cmd) 2328 static int transport_get_sense_data(struct se_cmd *cmd)
2332 { 2329 {
2333 unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; 2330 unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
2334 struct se_device *dev = cmd->se_dev; 2331 struct se_device *dev = cmd->se_dev;
2335 struct se_task *task = NULL, *task_tmp; 2332 struct se_task *task = NULL, *task_tmp;
2336 unsigned long flags; 2333 unsigned long flags;
2337 u32 offset = 0; 2334 u32 offset = 0;
2338 2335
2339 WARN_ON(!cmd->se_lun); 2336 WARN_ON(!cmd->se_lun);
2340 2337
2341 if (!dev) 2338 if (!dev)
2342 return 0; 2339 return 0;
2343 2340
2344 spin_lock_irqsave(&cmd->t_state_lock, flags); 2341 spin_lock_irqsave(&cmd->t_state_lock, flags);
2345 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 2342 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2346 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2343 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2347 return 0; 2344 return 0;
2348 } 2345 }
2349 2346
2350 list_for_each_entry_safe(task, task_tmp, 2347 list_for_each_entry_safe(task, task_tmp,
2351 &cmd->t_task_list, t_list) { 2348 &cmd->t_task_list, t_list) {
2352 if (!task->task_sense) 2349 if (!task->task_sense)
2353 continue; 2350 continue;
2354 2351
2355 if (!dev->transport->get_sense_buffer) { 2352 if (!dev->transport->get_sense_buffer) {
2356 pr_err("dev->transport->get_sense_buffer" 2353 pr_err("dev->transport->get_sense_buffer"
2357 " is NULL\n"); 2354 " is NULL\n");
2358 continue; 2355 continue;
2359 } 2356 }
2360 2357
2361 sense_buffer = dev->transport->get_sense_buffer(task); 2358 sense_buffer = dev->transport->get_sense_buffer(task);
2362 if (!sense_buffer) { 2359 if (!sense_buffer) {
2363 pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate" 2360 pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"
2364 " sense buffer for task with sense\n", 2361 " sense buffer for task with sense\n",
2365 cmd->se_tfo->get_task_tag(cmd), task); 2362 cmd->se_tfo->get_task_tag(cmd), task);
2366 continue; 2363 continue;
2367 } 2364 }
2368 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2365 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2369 2366
2370 offset = cmd->se_tfo->set_fabric_sense_len(cmd, 2367 offset = cmd->se_tfo->set_fabric_sense_len(cmd,
2371 TRANSPORT_SENSE_BUFFER); 2368 TRANSPORT_SENSE_BUFFER);
2372 2369
2373 memcpy(&buffer[offset], sense_buffer, 2370 memcpy(&buffer[offset], sense_buffer,
2374 TRANSPORT_SENSE_BUFFER); 2371 TRANSPORT_SENSE_BUFFER);
2375 cmd->scsi_status = task->task_scsi_status; 2372 cmd->scsi_status = task->task_scsi_status;
2376 /* Automatically padded */ 2373 /* Automatically padded */
2377 cmd->scsi_sense_length = 2374 cmd->scsi_sense_length =
2378 (TRANSPORT_SENSE_BUFFER + offset); 2375 (TRANSPORT_SENSE_BUFFER + offset);
2379 2376
2380 pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" 2377 pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
2381 " and sense\n", 2378 " and sense\n",
2382 dev->se_hba->hba_id, dev->transport->name, 2379 dev->se_hba->hba_id, dev->transport->name,
2383 cmd->scsi_status); 2380 cmd->scsi_status);
2384 return 0; 2381 return 0;
2385 } 2382 }
2386 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2383 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2387 2384
2388 return -1; 2385 return -1;
2389 } 2386 }
2390 2387
2391 static inline long long transport_dev_end_lba(struct se_device *dev) 2388 static inline long long transport_dev_end_lba(struct se_device *dev)
2392 { 2389 {
2393 return dev->transport->get_blocks(dev) + 1; 2390 return dev->transport->get_blocks(dev) + 1;
2394 } 2391 }
2395 2392
2396 static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) 2393 static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
2397 { 2394 {
2398 struct se_device *dev = cmd->se_dev; 2395 struct se_device *dev = cmd->se_dev;
2399 u32 sectors; 2396 u32 sectors;
2400 2397
2401 if (dev->transport->get_device_type(dev) != TYPE_DISK) 2398 if (dev->transport->get_device_type(dev) != TYPE_DISK)
2402 return 0; 2399 return 0;
2403 2400
2404 sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); 2401 sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
2405 2402
2406 if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) { 2403 if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
2407 pr_err("LBA: %llu Sectors: %u exceeds" 2404 pr_err("LBA: %llu Sectors: %u exceeds"
2408 " transport_dev_end_lba(): %llu\n", 2405 " transport_dev_end_lba(): %llu\n",
2409 cmd->t_task_lba, sectors, 2406 cmd->t_task_lba, sectors,
2410 transport_dev_end_lba(dev)); 2407 transport_dev_end_lba(dev));
2411 return -EINVAL; 2408 return -EINVAL;
2412 } 2409 }
2413 2410
2414 return 0; 2411 return 0;
2415 } 2412 }
2416 2413
2417 static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev) 2414 static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
2418 { 2415 {
2419 /* 2416 /*
2420 * Determine if the received WRITE_SAME is used to for direct 2417 * Determine if the received WRITE_SAME is used to for direct
2421 * passthrough into Linux/SCSI with struct request via TCM/pSCSI 2418 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
2422 * or we are signaling the use of internal WRITE_SAME + UNMAP=1 2419 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
2423 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code. 2420 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
2424 */ 2421 */
2425 int passthrough = (dev->transport->transport_type == 2422 int passthrough = (dev->transport->transport_type ==
2426 TRANSPORT_PLUGIN_PHBA_PDEV); 2423 TRANSPORT_PLUGIN_PHBA_PDEV);
2427 2424
2428 if (!passthrough) { 2425 if (!passthrough) {
2429 if ((flags[0] & 0x04) || (flags[0] & 0x02)) { 2426 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
2430 pr_err("WRITE_SAME PBDATA and LBDATA" 2427 pr_err("WRITE_SAME PBDATA and LBDATA"
2431 " bits not supported for Block Discard" 2428 " bits not supported for Block Discard"
2432 " Emulation\n"); 2429 " Emulation\n");
2433 return -ENOSYS; 2430 return -ENOSYS;
2434 } 2431 }
2435 /* 2432 /*
2436 * Currently for the emulated case we only accept 2433 * Currently for the emulated case we only accept
2437 * tpws with the UNMAP=1 bit set. 2434 * tpws with the UNMAP=1 bit set.
2438 */ 2435 */
2439 if (!(flags[0] & 0x08)) { 2436 if (!(flags[0] & 0x08)) {
2440 pr_err("WRITE_SAME w/o UNMAP bit not" 2437 pr_err("WRITE_SAME w/o UNMAP bit not"
2441 " supported for Block Discard Emulation\n"); 2438 " supported for Block Discard Emulation\n");
2442 return -ENOSYS; 2439 return -ENOSYS;
2443 } 2440 }
2444 } 2441 }
2445 2442
2446 return 0; 2443 return 0;
2447 } 2444 }
2448 2445
2449 /* transport_generic_cmd_sequencer(): 2446 /* transport_generic_cmd_sequencer():
2450 * 2447 *
2451 * Generic Command Sequencer that should work for most DAS transport 2448 * Generic Command Sequencer that should work for most DAS transport
2452 * drivers. 2449 * drivers.
2453 * 2450 *
2454 * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD 2451 * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
2455 * RX Thread. 2452 * RX Thread.
2456 * 2453 *
2457 * FIXME: Need to support other SCSI OPCODES where as well. 2454 * FIXME: Need to support other SCSI OPCODES where as well.
2458 */ 2455 */
2459 static int transport_generic_cmd_sequencer( 2456 static int transport_generic_cmd_sequencer(
2460 struct se_cmd *cmd, 2457 struct se_cmd *cmd,
2461 unsigned char *cdb) 2458 unsigned char *cdb)
2462 { 2459 {
2463 struct se_device *dev = cmd->se_dev; 2460 struct se_device *dev = cmd->se_dev;
2464 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 2461 struct se_subsystem_dev *su_dev = dev->se_sub_dev;
2465 int ret = 0, sector_ret = 0, passthrough; 2462 int ret = 0, sector_ret = 0, passthrough;
2466 u32 sectors = 0, size = 0, pr_reg_type = 0; 2463 u32 sectors = 0, size = 0, pr_reg_type = 0;
2467 u16 service_action; 2464 u16 service_action;
2468 u8 alua_ascq = 0; 2465 u8 alua_ascq = 0;
2469 /* 2466 /*
2470 * Check for an existing UNIT ATTENTION condition 2467 * Check for an existing UNIT ATTENTION condition
2471 */ 2468 */
2472 if (core_scsi3_ua_check(cmd, cdb) < 0) { 2469 if (core_scsi3_ua_check(cmd, cdb) < 0) {
2473 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 2470 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2474 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; 2471 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
2475 return -EINVAL; 2472 return -EINVAL;
2476 } 2473 }
2477 /* 2474 /*
2478 * Check status of Asymmetric Logical Unit Assignment port 2475 * Check status of Asymmetric Logical Unit Assignment port
2479 */ 2476 */
2480 ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); 2477 ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
2481 if (ret != 0) { 2478 if (ret != 0) {
2482 /* 2479 /*
2483 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; 2480 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
2484 * The ALUA additional sense code qualifier (ASCQ) is determined 2481 * The ALUA additional sense code qualifier (ASCQ) is determined
2485 * by the ALUA primary or secondary access state.. 2482 * by the ALUA primary or secondary access state..
2486 */ 2483 */
2487 if (ret > 0) { 2484 if (ret > 0) {
2488 #if 0 2485 #if 0
2489 pr_debug("[%s]: ALUA TG Port not available," 2486 pr_debug("[%s]: ALUA TG Port not available,"
2490 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", 2487 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
2491 cmd->se_tfo->get_fabric_name(), alua_ascq); 2488 cmd->se_tfo->get_fabric_name(), alua_ascq);
2492 #endif 2489 #endif
2493 transport_set_sense_codes(cmd, 0x04, alua_ascq); 2490 transport_set_sense_codes(cmd, 0x04, alua_ascq);
2494 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 2491 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2495 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; 2492 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
2496 return -EINVAL; 2493 return -EINVAL;
2497 } 2494 }
2498 goto out_invalid_cdb_field; 2495 goto out_invalid_cdb_field;
2499 } 2496 }
2500 /* 2497 /*
2501 * Check status for SPC-3 Persistent Reservations 2498 * Check status for SPC-3 Persistent Reservations
2502 */ 2499 */
2503 if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) { 2500 if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
2504 if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( 2501 if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
2505 cmd, cdb, pr_reg_type) != 0) { 2502 cmd, cdb, pr_reg_type) != 0) {
2506 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 2503 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
2507 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; 2504 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
2508 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; 2505 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
2509 return -EBUSY; 2506 return -EBUSY;
2510 } 2507 }
2511 /* 2508 /*
2512 * This means the CDB is allowed for the SCSI Initiator port 2509 * This means the CDB is allowed for the SCSI Initiator port
2513 * when said port is *NOT* holding the legacy SPC-2 or 2510 * when said port is *NOT* holding the legacy SPC-2 or
2514 * SPC-3 Persistent Reservation. 2511 * SPC-3 Persistent Reservation.
2515 */ 2512 */
2516 } 2513 }
2517 2514
2518 /* 2515 /*
2519 * If we operate in passthrough mode we skip most CDB emulation and 2516 * If we operate in passthrough mode we skip most CDB emulation and
2520 * instead hand the commands down to the physical SCSI device. 2517 * instead hand the commands down to the physical SCSI device.
2521 */ 2518 */
2522 passthrough = 2519 passthrough =
2523 (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); 2520 (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);
2524 2521
2525 switch (cdb[0]) { 2522 switch (cdb[0]) {
2526 case READ_6: 2523 case READ_6:
2527 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret); 2524 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2528 if (sector_ret) 2525 if (sector_ret)
2529 goto out_unsupported_cdb; 2526 goto out_unsupported_cdb;
2530 size = transport_get_size(sectors, cdb, cmd); 2527 size = transport_get_size(sectors, cdb, cmd);
2531 cmd->t_task_lba = transport_lba_21(cdb); 2528 cmd->t_task_lba = transport_lba_21(cdb);
2532 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2529 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2533 break; 2530 break;
2534 case READ_10: 2531 case READ_10:
2535 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret); 2532 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2536 if (sector_ret) 2533 if (sector_ret)
2537 goto out_unsupported_cdb; 2534 goto out_unsupported_cdb;
2538 size = transport_get_size(sectors, cdb, cmd); 2535 size = transport_get_size(sectors, cdb, cmd);
2539 cmd->t_task_lba = transport_lba_32(cdb); 2536 cmd->t_task_lba = transport_lba_32(cdb);
2540 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2537 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2541 break; 2538 break;
2542 case READ_12: 2539 case READ_12:
2543 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret); 2540 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2544 if (sector_ret) 2541 if (sector_ret)
2545 goto out_unsupported_cdb; 2542 goto out_unsupported_cdb;
2546 size = transport_get_size(sectors, cdb, cmd); 2543 size = transport_get_size(sectors, cdb, cmd);
2547 cmd->t_task_lba = transport_lba_32(cdb); 2544 cmd->t_task_lba = transport_lba_32(cdb);
2548 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2545 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2549 break; 2546 break;
2550 case READ_16: 2547 case READ_16:
2551 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret); 2548 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2552 if (sector_ret) 2549 if (sector_ret)
2553 goto out_unsupported_cdb; 2550 goto out_unsupported_cdb;
2554 size = transport_get_size(sectors, cdb, cmd); 2551 size = transport_get_size(sectors, cdb, cmd);
2555 cmd->t_task_lba = transport_lba_64(cdb); 2552 cmd->t_task_lba = transport_lba_64(cdb);
2556 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2553 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2557 break; 2554 break;
2558 case WRITE_6: 2555 case WRITE_6:
2559 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret); 2556 sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
2560 if (sector_ret) 2557 if (sector_ret)
2561 goto out_unsupported_cdb; 2558 goto out_unsupported_cdb;
2562 size = transport_get_size(sectors, cdb, cmd); 2559 size = transport_get_size(sectors, cdb, cmd);
2563 cmd->t_task_lba = transport_lba_21(cdb); 2560 cmd->t_task_lba = transport_lba_21(cdb);
2564 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2561 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2565 break; 2562 break;
2566 case WRITE_10: 2563 case WRITE_10:
2567 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret); 2564 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2568 if (sector_ret) 2565 if (sector_ret)
2569 goto out_unsupported_cdb; 2566 goto out_unsupported_cdb;
2570 size = transport_get_size(sectors, cdb, cmd); 2567 size = transport_get_size(sectors, cdb, cmd);
2571 cmd->t_task_lba = transport_lba_32(cdb); 2568 cmd->t_task_lba = transport_lba_32(cdb);
2572 if (cdb[1] & 0x8) 2569 if (cdb[1] & 0x8)
2573 cmd->se_cmd_flags |= SCF_FUA; 2570 cmd->se_cmd_flags |= SCF_FUA;
2574 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2571 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2575 break; 2572 break;
2576 case WRITE_12: 2573 case WRITE_12:
2577 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret); 2574 sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
2578 if (sector_ret) 2575 if (sector_ret)
2579 goto out_unsupported_cdb; 2576 goto out_unsupported_cdb;
2580 size = transport_get_size(sectors, cdb, cmd); 2577 size = transport_get_size(sectors, cdb, cmd);
2581 cmd->t_task_lba = transport_lba_32(cdb); 2578 cmd->t_task_lba = transport_lba_32(cdb);
2582 if (cdb[1] & 0x8) 2579 if (cdb[1] & 0x8)
2583 cmd->se_cmd_flags |= SCF_FUA; 2580 cmd->se_cmd_flags |= SCF_FUA;
2584 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2581 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2585 break; 2582 break;
2586 case WRITE_16: 2583 case WRITE_16:
2587 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret); 2584 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2588 if (sector_ret) 2585 if (sector_ret)
2589 goto out_unsupported_cdb; 2586 goto out_unsupported_cdb;
2590 size = transport_get_size(sectors, cdb, cmd); 2587 size = transport_get_size(sectors, cdb, cmd);
2591 cmd->t_task_lba = transport_lba_64(cdb); 2588 cmd->t_task_lba = transport_lba_64(cdb);
2592 if (cdb[1] & 0x8) 2589 if (cdb[1] & 0x8)
2593 cmd->se_cmd_flags |= SCF_FUA; 2590 cmd->se_cmd_flags |= SCF_FUA;
2594 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2591 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2595 break; 2592 break;
2596 case XDWRITEREAD_10: 2593 case XDWRITEREAD_10:
2597 if ((cmd->data_direction != DMA_TO_DEVICE) || 2594 if ((cmd->data_direction != DMA_TO_DEVICE) ||
2598 !(cmd->se_cmd_flags & SCF_BIDI)) 2595 !(cmd->se_cmd_flags & SCF_BIDI))
2599 goto out_invalid_cdb_field; 2596 goto out_invalid_cdb_field;
2600 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret); 2597 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2601 if (sector_ret) 2598 if (sector_ret)
2602 goto out_unsupported_cdb; 2599 goto out_unsupported_cdb;
2603 size = transport_get_size(sectors, cdb, cmd); 2600 size = transport_get_size(sectors, cdb, cmd);
2604 cmd->t_task_lba = transport_lba_32(cdb); 2601 cmd->t_task_lba = transport_lba_32(cdb);
2605 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2602 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2606 2603
2607 /* 2604 /*
2608 * Do now allow BIDI commands for passthrough mode. 2605 * Do now allow BIDI commands for passthrough mode.
2609 */ 2606 */
2610 if (passthrough) 2607 if (passthrough)
2611 goto out_unsupported_cdb; 2608 goto out_unsupported_cdb;
2612 2609
2613 /* 2610 /*
2614 * Setup BIDI XOR callback to be run after I/O completion. 2611 * Setup BIDI XOR callback to be run after I/O completion.
2615 */ 2612 */
2616 cmd->transport_complete_callback = &transport_xor_callback; 2613 cmd->transport_complete_callback = &transport_xor_callback;
2617 if (cdb[1] & 0x8) 2614 if (cdb[1] & 0x8)
2618 cmd->se_cmd_flags |= SCF_FUA; 2615 cmd->se_cmd_flags |= SCF_FUA;
2619 break; 2616 break;
2620 case VARIABLE_LENGTH_CMD: 2617 case VARIABLE_LENGTH_CMD:
2621 service_action = get_unaligned_be16(&cdb[8]); 2618 service_action = get_unaligned_be16(&cdb[8]);
2622 switch (service_action) { 2619 switch (service_action) {
2623 case XDWRITEREAD_32: 2620 case XDWRITEREAD_32:
2624 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret); 2621 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
2625 if (sector_ret) 2622 if (sector_ret)
2626 goto out_unsupported_cdb; 2623 goto out_unsupported_cdb;
2627 size = transport_get_size(sectors, cdb, cmd); 2624 size = transport_get_size(sectors, cdb, cmd);
2628 /* 2625 /*
2629 * Use WRITE_32 and READ_32 opcodes for the emulated 2626 * Use WRITE_32 and READ_32 opcodes for the emulated
2630 * XDWRITE_READ_32 logic. 2627 * XDWRITE_READ_32 logic.
2631 */ 2628 */
2632 cmd->t_task_lba = transport_lba_64_ext(cdb); 2629 cmd->t_task_lba = transport_lba_64_ext(cdb);
2633 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2630 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
2634 2631
2635 /* 2632 /*
2636 * Do now allow BIDI commands for passthrough mode. 2633 * Do now allow BIDI commands for passthrough mode.
2637 */ 2634 */
2638 if (passthrough) 2635 if (passthrough)
2639 goto out_unsupported_cdb; 2636 goto out_unsupported_cdb;
2640 2637
2641 /* 2638 /*
2642 * Setup BIDI XOR callback to be run during after I/O 2639 * Setup BIDI XOR callback to be run during after I/O
2643 * completion. 2640 * completion.
2644 */ 2641 */
2645 cmd->transport_complete_callback = &transport_xor_callback; 2642 cmd->transport_complete_callback = &transport_xor_callback;
2646 if (cdb[1] & 0x8) 2643 if (cdb[1] & 0x8)
2647 cmd->se_cmd_flags |= SCF_FUA; 2644 cmd->se_cmd_flags |= SCF_FUA;
2648 break; 2645 break;
2649 case WRITE_SAME_32: 2646 case WRITE_SAME_32:
2650 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret); 2647 sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
2651 if (sector_ret) 2648 if (sector_ret)
2652 goto out_unsupported_cdb; 2649 goto out_unsupported_cdb;
2653 2650
2654 if (sectors) 2651 if (sectors)
2655 size = transport_get_size(1, cdb, cmd); 2652 size = transport_get_size(1, cdb, cmd);
2656 else { 2653 else {
2657 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 2654 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
2658 " supported\n"); 2655 " supported\n");
2659 goto out_invalid_cdb_field; 2656 goto out_invalid_cdb_field;
2660 } 2657 }
2661 2658
2662 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 2659 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
2663 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2660 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2664 2661
2665 if (target_check_write_same_discard(&cdb[10], dev) < 0) 2662 if (target_check_write_same_discard(&cdb[10], dev) < 0)
2666 goto out_invalid_cdb_field; 2663 goto out_invalid_cdb_field;
2667 if (!passthrough) 2664 if (!passthrough)
2668 cmd->execute_task = target_emulate_write_same; 2665 cmd->execute_task = target_emulate_write_same;
2669 break; 2666 break;
2670 default: 2667 default:
2671 pr_err("VARIABLE_LENGTH_CMD service action" 2668 pr_err("VARIABLE_LENGTH_CMD service action"
2672 " 0x%04x not supported\n", service_action); 2669 " 0x%04x not supported\n", service_action);
2673 goto out_unsupported_cdb; 2670 goto out_unsupported_cdb;
2674 } 2671 }
2675 break; 2672 break;
2676 case MAINTENANCE_IN: 2673 case MAINTENANCE_IN:
2677 if (dev->transport->get_device_type(dev) != TYPE_ROM) { 2674 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2678 /* MAINTENANCE_IN from SCC-2 */ 2675 /* MAINTENANCE_IN from SCC-2 */
2679 /* 2676 /*
2680 * Check for emulated MI_REPORT_TARGET_PGS. 2677 * Check for emulated MI_REPORT_TARGET_PGS.
2681 */ 2678 */
2682 if (cdb[1] == MI_REPORT_TARGET_PGS && 2679 if (cdb[1] == MI_REPORT_TARGET_PGS &&
2683 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 2680 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2684 cmd->execute_task = 2681 cmd->execute_task =
2685 target_emulate_report_target_port_groups; 2682 target_emulate_report_target_port_groups;
2686 } 2683 }
2687 size = (cdb[6] << 24) | (cdb[7] << 16) | 2684 size = (cdb[6] << 24) | (cdb[7] << 16) |
2688 (cdb[8] << 8) | cdb[9]; 2685 (cdb[8] << 8) | cdb[9];
2689 } else { 2686 } else {
2690 /* GPCMD_SEND_KEY from multi media commands */ 2687 /* GPCMD_SEND_KEY from multi media commands */
2691 size = (cdb[8] << 8) + cdb[9]; 2688 size = (cdb[8] << 8) + cdb[9];
2692 } 2689 }
2693 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2690 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2694 break; 2691 break;
2695 case MODE_SELECT: 2692 case MODE_SELECT:
2696 size = cdb[4]; 2693 size = cdb[4];
2697 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2694 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2698 break; 2695 break;
2699 case MODE_SELECT_10: 2696 case MODE_SELECT_10:
2700 size = (cdb[7] << 8) + cdb[8]; 2697 size = (cdb[7] << 8) + cdb[8];
2701 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2698 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2702 break; 2699 break;
2703 case MODE_SENSE: 2700 case MODE_SENSE:
2704 size = cdb[4]; 2701 size = cdb[4];
2705 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2702 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2706 if (!passthrough) 2703 if (!passthrough)
2707 cmd->execute_task = target_emulate_modesense; 2704 cmd->execute_task = target_emulate_modesense;
2708 break; 2705 break;
2709 case MODE_SENSE_10: 2706 case MODE_SENSE_10:
2710 size = (cdb[7] << 8) + cdb[8]; 2707 size = (cdb[7] << 8) + cdb[8];
2711 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2708 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2712 if (!passthrough) 2709 if (!passthrough)
2713 cmd->execute_task = target_emulate_modesense; 2710 cmd->execute_task = target_emulate_modesense;
2714 break; 2711 break;
2715 case GPCMD_READ_BUFFER_CAPACITY: 2712 case GPCMD_READ_BUFFER_CAPACITY:
2716 case GPCMD_SEND_OPC: 2713 case GPCMD_SEND_OPC:
2717 case LOG_SELECT: 2714 case LOG_SELECT:
2718 case LOG_SENSE: 2715 case LOG_SENSE:
2719 size = (cdb[7] << 8) + cdb[8]; 2716 size = (cdb[7] << 8) + cdb[8];
2720 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2717 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2721 break; 2718 break;
2722 case READ_BLOCK_LIMITS: 2719 case READ_BLOCK_LIMITS:
2723 size = READ_BLOCK_LEN; 2720 size = READ_BLOCK_LEN;
2724 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2721 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2725 break; 2722 break;
2726 case GPCMD_GET_CONFIGURATION: 2723 case GPCMD_GET_CONFIGURATION:
2727 case GPCMD_READ_FORMAT_CAPACITIES: 2724 case GPCMD_READ_FORMAT_CAPACITIES:
2728 case GPCMD_READ_DISC_INFO: 2725 case GPCMD_READ_DISC_INFO:
2729 case GPCMD_READ_TRACK_RZONE_INFO: 2726 case GPCMD_READ_TRACK_RZONE_INFO:
2730 size = (cdb[7] << 8) + cdb[8]; 2727 size = (cdb[7] << 8) + cdb[8];
2731 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2728 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2732 break; 2729 break;
2733 case PERSISTENT_RESERVE_IN: 2730 case PERSISTENT_RESERVE_IN:
2734 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) 2731 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2735 cmd->execute_task = target_scsi3_emulate_pr_in; 2732 cmd->execute_task = target_scsi3_emulate_pr_in;
2736 size = (cdb[7] << 8) + cdb[8]; 2733 size = (cdb[7] << 8) + cdb[8];
2737 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2734 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2738 break; 2735 break;
2739 case PERSISTENT_RESERVE_OUT: 2736 case PERSISTENT_RESERVE_OUT:
2740 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) 2737 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
2741 cmd->execute_task = target_scsi3_emulate_pr_out; 2738 cmd->execute_task = target_scsi3_emulate_pr_out;
2742 size = (cdb[7] << 8) + cdb[8]; 2739 size = (cdb[7] << 8) + cdb[8];
2743 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2740 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2744 break; 2741 break;
2745 case GPCMD_MECHANISM_STATUS: 2742 case GPCMD_MECHANISM_STATUS:
2746 case GPCMD_READ_DVD_STRUCTURE: 2743 case GPCMD_READ_DVD_STRUCTURE:
2747 size = (cdb[8] << 8) + cdb[9]; 2744 size = (cdb[8] << 8) + cdb[9];
2748 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2745 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2749 break; 2746 break;
2750 case READ_POSITION: 2747 case READ_POSITION:
2751 size = READ_POSITION_LEN; 2748 size = READ_POSITION_LEN;
2752 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2749 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2753 break; 2750 break;
2754 case MAINTENANCE_OUT: 2751 case MAINTENANCE_OUT:
2755 if (dev->transport->get_device_type(dev) != TYPE_ROM) { 2752 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
2756 /* MAINTENANCE_OUT from SCC-2 2753 /* MAINTENANCE_OUT from SCC-2
2757 * 2754 *
2758 * Check for emulated MO_SET_TARGET_PGS. 2755 * Check for emulated MO_SET_TARGET_PGS.
2759 */ 2756 */
2760 if (cdb[1] == MO_SET_TARGET_PGS && 2757 if (cdb[1] == MO_SET_TARGET_PGS &&
2761 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 2758 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
2762 cmd->execute_task = 2759 cmd->execute_task =
2763 target_emulate_set_target_port_groups; 2760 target_emulate_set_target_port_groups;
2764 } 2761 }
2765 2762
2766 size = (cdb[6] << 24) | (cdb[7] << 16) | 2763 size = (cdb[6] << 24) | (cdb[7] << 16) |
2767 (cdb[8] << 8) | cdb[9]; 2764 (cdb[8] << 8) | cdb[9];
2768 } else { 2765 } else {
2769 /* GPCMD_REPORT_KEY from multi media commands */ 2766 /* GPCMD_REPORT_KEY from multi media commands */
2770 size = (cdb[8] << 8) + cdb[9]; 2767 size = (cdb[8] << 8) + cdb[9];
2771 } 2768 }
2772 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2769 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2773 break; 2770 break;
2774 case INQUIRY: 2771 case INQUIRY:
2775 size = (cdb[3] << 8) + cdb[4]; 2772 size = (cdb[3] << 8) + cdb[4];
2776 /* 2773 /*
2777 * Do implict HEAD_OF_QUEUE processing for INQUIRY. 2774 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
2778 * See spc4r17 section 5.3 2775 * See spc4r17 section 5.3
2779 */ 2776 */
2780 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 2777 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
2781 cmd->sam_task_attr = MSG_HEAD_TAG; 2778 cmd->sam_task_attr = MSG_HEAD_TAG;
2782 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2779 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2783 if (!passthrough) 2780 if (!passthrough)
2784 cmd->execute_task = target_emulate_inquiry; 2781 cmd->execute_task = target_emulate_inquiry;
2785 break; 2782 break;
2786 case READ_BUFFER: 2783 case READ_BUFFER:
2787 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; 2784 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2788 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2785 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2789 break; 2786 break;
2790 case READ_CAPACITY: 2787 case READ_CAPACITY:
2791 size = READ_CAP_LEN; 2788 size = READ_CAP_LEN;
2792 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2789 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2793 if (!passthrough) 2790 if (!passthrough)
2794 cmd->execute_task = target_emulate_readcapacity; 2791 cmd->execute_task = target_emulate_readcapacity;
2795 break; 2792 break;
2796 case READ_MEDIA_SERIAL_NUMBER: 2793 case READ_MEDIA_SERIAL_NUMBER:
2797 case SECURITY_PROTOCOL_IN: 2794 case SECURITY_PROTOCOL_IN:
2798 case SECURITY_PROTOCOL_OUT: 2795 case SECURITY_PROTOCOL_OUT:
2799 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 2796 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2800 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2797 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2801 break; 2798 break;
2802 case SERVICE_ACTION_IN: 2799 case SERVICE_ACTION_IN:
2803 switch (cmd->t_task_cdb[1] & 0x1f) { 2800 switch (cmd->t_task_cdb[1] & 0x1f) {
2804 case SAI_READ_CAPACITY_16: 2801 case SAI_READ_CAPACITY_16:
2805 if (!passthrough) 2802 if (!passthrough)
2806 cmd->execute_task = 2803 cmd->execute_task =
2807 target_emulate_readcapacity_16; 2804 target_emulate_readcapacity_16;
2808 break; 2805 break;
2809 default: 2806 default:
2810 if (passthrough) 2807 if (passthrough)
2811 break; 2808 break;
2812 2809
2813 pr_err("Unsupported SA: 0x%02x\n", 2810 pr_err("Unsupported SA: 0x%02x\n",
2814 cmd->t_task_cdb[1] & 0x1f); 2811 cmd->t_task_cdb[1] & 0x1f);
2815 goto out_unsupported_cdb; 2812 goto out_unsupported_cdb;
2816 } 2813 }
2817 /*FALLTHROUGH*/ 2814 /*FALLTHROUGH*/
2818 case ACCESS_CONTROL_IN: 2815 case ACCESS_CONTROL_IN:
2819 case ACCESS_CONTROL_OUT: 2816 case ACCESS_CONTROL_OUT:
2820 case EXTENDED_COPY: 2817 case EXTENDED_COPY:
2821 case READ_ATTRIBUTE: 2818 case READ_ATTRIBUTE:
2822 case RECEIVE_COPY_RESULTS: 2819 case RECEIVE_COPY_RESULTS:
2823 case WRITE_ATTRIBUTE: 2820 case WRITE_ATTRIBUTE:
2824 size = (cdb[10] << 24) | (cdb[11] << 16) | 2821 size = (cdb[10] << 24) | (cdb[11] << 16) |
2825 (cdb[12] << 8) | cdb[13]; 2822 (cdb[12] << 8) | cdb[13];
2826 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2823 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2827 break; 2824 break;
2828 case RECEIVE_DIAGNOSTIC: 2825 case RECEIVE_DIAGNOSTIC:
2829 case SEND_DIAGNOSTIC: 2826 case SEND_DIAGNOSTIC:
2830 size = (cdb[3] << 8) | cdb[4]; 2827 size = (cdb[3] << 8) | cdb[4];
2831 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2828 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2832 break; 2829 break;
2833 /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */ 2830 /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
2834 #if 0 2831 #if 0
2835 case GPCMD_READ_CD: 2832 case GPCMD_READ_CD:
2836 sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; 2833 sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2837 size = (2336 * sectors); 2834 size = (2336 * sectors);
2838 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2835 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2839 break; 2836 break;
2840 #endif 2837 #endif
2841 case READ_TOC: 2838 case READ_TOC:
2842 size = cdb[8]; 2839 size = cdb[8];
2843 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2840 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2844 break; 2841 break;
2845 case REQUEST_SENSE: 2842 case REQUEST_SENSE:
2846 size = cdb[4]; 2843 size = cdb[4];
2847 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2844 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2848 if (!passthrough) 2845 if (!passthrough)
2849 cmd->execute_task = target_emulate_request_sense; 2846 cmd->execute_task = target_emulate_request_sense;
2850 break; 2847 break;
2851 case READ_ELEMENT_STATUS: 2848 case READ_ELEMENT_STATUS:
2852 size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; 2849 size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
2853 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2850 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2854 break; 2851 break;
2855 case WRITE_BUFFER: 2852 case WRITE_BUFFER:
2856 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; 2853 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
2857 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2854 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2858 break; 2855 break;
2859 case RESERVE: 2856 case RESERVE:
2860 case RESERVE_10: 2857 case RESERVE_10:
2861 /* 2858 /*
2862 * The SPC-2 RESERVE does not contain a size in the SCSI CDB. 2859 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
2863 * Assume the passthrough or $FABRIC_MOD will tell us about it. 2860 * Assume the passthrough or $FABRIC_MOD will tell us about it.
2864 */ 2861 */
2865 if (cdb[0] == RESERVE_10) 2862 if (cdb[0] == RESERVE_10)
2866 size = (cdb[7] << 8) | cdb[8]; 2863 size = (cdb[7] << 8) | cdb[8];
2867 else 2864 else
2868 size = cmd->data_length; 2865 size = cmd->data_length;
2869 2866
2870 /* 2867 /*
2871 * Setup the legacy emulated handler for SPC-2 and 2868 * Setup the legacy emulated handler for SPC-2 and
2872 * >= SPC-3 compatible reservation handling (CRH=1) 2869 * >= SPC-3 compatible reservation handling (CRH=1)
2873 * Otherwise, we assume the underlying SCSI logic is 2870 * Otherwise, we assume the underlying SCSI logic is
2874 * is running in SPC_PASSTHROUGH, and wants reservations 2871 * is running in SPC_PASSTHROUGH, and wants reservations
2875 * emulation disabled. 2872 * emulation disabled.
2876 */ 2873 */
2877 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) 2874 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
2878 cmd->execute_task = target_scsi2_reservation_reserve; 2875 cmd->execute_task = target_scsi2_reservation_reserve;
2879 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; 2876 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2880 break; 2877 break;
2881 case RELEASE: 2878 case RELEASE:
2882 case RELEASE_10: 2879 case RELEASE_10:
2883 /* 2880 /*
2884 * The SPC-2 RELEASE does not contain a size in the SCSI CDB. 2881 * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
2885 * Assume the passthrough or $FABRIC_MOD will tell us about it. 2882 * Assume the passthrough or $FABRIC_MOD will tell us about it.
2886 */ 2883 */
2887 if (cdb[0] == RELEASE_10) 2884 if (cdb[0] == RELEASE_10)
2888 size = (cdb[7] << 8) | cdb[8]; 2885 size = (cdb[7] << 8) | cdb[8];
2889 else 2886 else
2890 size = cmd->data_length; 2887 size = cmd->data_length;
2891 2888
2892 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) 2889 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
2893 cmd->execute_task = target_scsi2_reservation_release; 2890 cmd->execute_task = target_scsi2_reservation_release;
2894 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; 2891 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2895 break; 2892 break;
2896 case SYNCHRONIZE_CACHE: 2893 case SYNCHRONIZE_CACHE:
2897 case 0x91: /* SYNCHRONIZE_CACHE_16: */ 2894 case 0x91: /* SYNCHRONIZE_CACHE_16: */
2898 /* 2895 /*
2899 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE 2896 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
2900 */ 2897 */
2901 if (cdb[0] == SYNCHRONIZE_CACHE) { 2898 if (cdb[0] == SYNCHRONIZE_CACHE) {
2902 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret); 2899 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2903 cmd->t_task_lba = transport_lba_32(cdb); 2900 cmd->t_task_lba = transport_lba_32(cdb);
2904 } else { 2901 } else {
2905 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret); 2902 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2906 cmd->t_task_lba = transport_lba_64(cdb); 2903 cmd->t_task_lba = transport_lba_64(cdb);
2907 } 2904 }
2908 if (sector_ret) 2905 if (sector_ret)
2909 goto out_unsupported_cdb; 2906 goto out_unsupported_cdb;
2910 2907
2911 size = transport_get_size(sectors, cdb, cmd); 2908 size = transport_get_size(sectors, cdb, cmd);
2912 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; 2909 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2913 2910
2914 if (passthrough) 2911 if (passthrough)
2915 break; 2912 break;
2916 2913
2917 /* 2914 /*
2918 * Check to ensure that LBA + Range does not exceed past end of 2915 * Check to ensure that LBA + Range does not exceed past end of
2919 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls 2916 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
2920 */ 2917 */
2921 if ((cmd->t_task_lba != 0) || (sectors != 0)) { 2918 if ((cmd->t_task_lba != 0) || (sectors != 0)) {
2922 if (transport_cmd_get_valid_sectors(cmd) < 0) 2919 if (transport_cmd_get_valid_sectors(cmd) < 0)
2923 goto out_invalid_cdb_field; 2920 goto out_invalid_cdb_field;
2924 } 2921 }
2925 cmd->execute_task = target_emulate_synchronize_cache; 2922 cmd->execute_task = target_emulate_synchronize_cache;
2926 break; 2923 break;
2927 case UNMAP: 2924 case UNMAP:
2928 size = get_unaligned_be16(&cdb[7]); 2925 size = get_unaligned_be16(&cdb[7]);
2929 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2926 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2930 if (!passthrough) 2927 if (!passthrough)
2931 cmd->execute_task = target_emulate_unmap; 2928 cmd->execute_task = target_emulate_unmap;
2932 break; 2929 break;
2933 case WRITE_SAME_16: 2930 case WRITE_SAME_16:
2934 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret); 2931 sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
2935 if (sector_ret) 2932 if (sector_ret)
2936 goto out_unsupported_cdb; 2933 goto out_unsupported_cdb;
2937 2934
2938 if (sectors) 2935 if (sectors)
2939 size = transport_get_size(1, cdb, cmd); 2936 size = transport_get_size(1, cdb, cmd);
2940 else { 2937 else {
2941 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 2938 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
2942 goto out_invalid_cdb_field; 2939 goto out_invalid_cdb_field;
2943 } 2940 }
2944 2941
2945 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 2942 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
2946 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2943 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2947 2944
2948 if (target_check_write_same_discard(&cdb[1], dev) < 0) 2945 if (target_check_write_same_discard(&cdb[1], dev) < 0)
2949 goto out_invalid_cdb_field; 2946 goto out_invalid_cdb_field;
2950 if (!passthrough) 2947 if (!passthrough)
2951 cmd->execute_task = target_emulate_write_same; 2948 cmd->execute_task = target_emulate_write_same;
2952 break; 2949 break;
2953 case WRITE_SAME: 2950 case WRITE_SAME:
2954 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret); 2951 sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
2955 if (sector_ret) 2952 if (sector_ret)
2956 goto out_unsupported_cdb; 2953 goto out_unsupported_cdb;
2957 2954
2958 if (sectors) 2955 if (sectors)
2959 size = transport_get_size(1, cdb, cmd); 2956 size = transport_get_size(1, cdb, cmd);
2960 else { 2957 else {
2961 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 2958 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
2962 goto out_invalid_cdb_field; 2959 goto out_invalid_cdb_field;
2963 } 2960 }
2964 2961
2965 cmd->t_task_lba = get_unaligned_be32(&cdb[2]); 2962 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
2966 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2963 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
2967 /* 2964 /*
2968 * Follow sbcr26 with WRITE_SAME (10) and check for the existence 2965 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
2969 * of byte 1 bit 3 UNMAP instead of original reserved field 2966 * of byte 1 bit 3 UNMAP instead of original reserved field
2970 */ 2967 */
2971 if (target_check_write_same_discard(&cdb[1], dev) < 0) 2968 if (target_check_write_same_discard(&cdb[1], dev) < 0)
2972 goto out_invalid_cdb_field; 2969 goto out_invalid_cdb_field;
2973 if (!passthrough) 2970 if (!passthrough)
2974 cmd->execute_task = target_emulate_write_same; 2971 cmd->execute_task = target_emulate_write_same;
2975 break; 2972 break;
2976 case ALLOW_MEDIUM_REMOVAL: 2973 case ALLOW_MEDIUM_REMOVAL:
2977 case ERASE: 2974 case ERASE:
2978 case REZERO_UNIT: 2975 case REZERO_UNIT:
2979 case SEEK_10: 2976 case SEEK_10:
2980 case SPACE: 2977 case SPACE:
2981 case START_STOP: 2978 case START_STOP:
2982 case TEST_UNIT_READY: 2979 case TEST_UNIT_READY:
2983 case VERIFY: 2980 case VERIFY:
2984 case WRITE_FILEMARKS: 2981 case WRITE_FILEMARKS:
2985 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; 2982 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2986 if (!passthrough) 2983 if (!passthrough)
2987 cmd->execute_task = target_emulate_noop; 2984 cmd->execute_task = target_emulate_noop;
2988 break; 2985 break;
2989 case GPCMD_CLOSE_TRACK: 2986 case GPCMD_CLOSE_TRACK:
2990 case INITIALIZE_ELEMENT_STATUS: 2987 case INITIALIZE_ELEMENT_STATUS:
2991 case GPCMD_LOAD_UNLOAD: 2988 case GPCMD_LOAD_UNLOAD:
2992 case GPCMD_SET_SPEED: 2989 case GPCMD_SET_SPEED:
2993 case MOVE_MEDIUM: 2990 case MOVE_MEDIUM:
2994 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; 2991 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
2995 break; 2992 break;
2996 case REPORT_LUNS: 2993 case REPORT_LUNS:
2997 cmd->execute_task = target_report_luns; 2994 cmd->execute_task = target_report_luns;
2998 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 2995 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
2999 /* 2996 /*
3000 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS 2997 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
3001 * See spc4r17 section 5.3 2998 * See spc4r17 section 5.3
3002 */ 2999 */
3003 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 3000 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3004 cmd->sam_task_attr = MSG_HEAD_TAG; 3001 cmd->sam_task_attr = MSG_HEAD_TAG;
3005 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3002 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
3006 break; 3003 break;
3007 default: 3004 default:
3008 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" 3005 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
3009 " 0x%02x, sending CHECK_CONDITION.\n", 3006 " 0x%02x, sending CHECK_CONDITION.\n",
3010 cmd->se_tfo->get_fabric_name(), cdb[0]); 3007 cmd->se_tfo->get_fabric_name(), cdb[0]);
3011 goto out_unsupported_cdb; 3008 goto out_unsupported_cdb;
3012 } 3009 }
3013 3010
3014 if (size != cmd->data_length) { 3011 if (size != cmd->data_length) {
3015 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" 3012 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
3016 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 3013 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
3017 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 3014 " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
3018 cmd->data_length, size, cdb[0]); 3015 cmd->data_length, size, cdb[0]);
3019 3016
3020 cmd->cmd_spdtl = size; 3017 cmd->cmd_spdtl = size;
3021 3018
3022 if (cmd->data_direction == DMA_TO_DEVICE) { 3019 if (cmd->data_direction == DMA_TO_DEVICE) {
3023 pr_err("Rejecting underflow/overflow" 3020 pr_err("Rejecting underflow/overflow"
3024 " WRITE data\n"); 3021 " WRITE data\n");
3025 goto out_invalid_cdb_field; 3022 goto out_invalid_cdb_field;
3026 } 3023 }
3027 /* 3024 /*
3028 * Reject READ_* or WRITE_* with overflow/underflow for 3025 * Reject READ_* or WRITE_* with overflow/underflow for
3029 * type SCF_SCSI_DATA_SG_IO_CDB. 3026 * type SCF_SCSI_DATA_SG_IO_CDB.
3030 */ 3027 */
3031 if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) { 3028 if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) {
3032 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 3029 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
3033 " CDB on non 512-byte sector setup subsystem" 3030 " CDB on non 512-byte sector setup subsystem"
3034 " plugin: %s\n", dev->transport->name); 3031 " plugin: %s\n", dev->transport->name);
3035 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 3032 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
3036 goto out_invalid_cdb_field; 3033 goto out_invalid_cdb_field;
3037 } 3034 }
3038 3035
3039 if (size > cmd->data_length) { 3036 if (size > cmd->data_length) {
3040 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 3037 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
3041 cmd->residual_count = (size - cmd->data_length); 3038 cmd->residual_count = (size - cmd->data_length);
3042 } else { 3039 } else {
3043 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 3040 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
3044 cmd->residual_count = (cmd->data_length - size); 3041 cmd->residual_count = (cmd->data_length - size);
3045 } 3042 }
3046 cmd->data_length = size; 3043 cmd->data_length = size;
3047 } 3044 }
3048 3045
3049 /* reject any command that we don't have a handler for */ 3046 /* reject any command that we don't have a handler for */
3050 if (!(passthrough || cmd->execute_task || 3047 if (!(passthrough || cmd->execute_task ||
3051 (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) 3048 (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
3052 goto out_unsupported_cdb; 3049 goto out_unsupported_cdb;
3053 3050
3054 /* Let's limit control cdbs to a page, for simplicity's sake. */ 3051 /* Let's limit control cdbs to a page, for simplicity's sake. */
3055 if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && 3052 if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
3056 size > PAGE_SIZE) 3053 size > PAGE_SIZE)
3057 goto out_invalid_cdb_field; 3054 goto out_invalid_cdb_field;
3058 3055
3059 transport_set_supported_SAM_opcode(cmd); 3056 transport_set_supported_SAM_opcode(cmd);
3060 return ret; 3057 return ret;
3061 3058
3062 out_unsupported_cdb: 3059 out_unsupported_cdb:
3063 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3060 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3064 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 3061 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
3065 return -EINVAL; 3062 return -EINVAL;
3066 out_invalid_cdb_field: 3063 out_invalid_cdb_field:
3067 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3064 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3068 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 3065 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3069 return -EINVAL; 3066 return -EINVAL;
3070 } 3067 }
3071 3068
3072 /* 3069 /*
3073 * Called from I/O completion to determine which dormant/delayed 3070 * Called from I/O completion to determine which dormant/delayed
3074 * and ordered cmds need to have their tasks added to the execution queue. 3071 * and ordered cmds need to have their tasks added to the execution queue.
3075 */ 3072 */
3076 static void transport_complete_task_attr(struct se_cmd *cmd) 3073 static void transport_complete_task_attr(struct se_cmd *cmd)
3077 { 3074 {
3078 struct se_device *dev = cmd->se_dev; 3075 struct se_device *dev = cmd->se_dev;
3079 struct se_cmd *cmd_p, *cmd_tmp; 3076 struct se_cmd *cmd_p, *cmd_tmp;
3080 int new_active_tasks = 0; 3077 int new_active_tasks = 0;
3081 3078
3082 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { 3079 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
3083 atomic_dec(&dev->simple_cmds); 3080 atomic_dec(&dev->simple_cmds);
3084 smp_mb__after_atomic_dec(); 3081 smp_mb__after_atomic_dec();
3085 dev->dev_cur_ordered_id++; 3082 dev->dev_cur_ordered_id++;
3086 pr_debug("Incremented dev->dev_cur_ordered_id: %u for" 3083 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
3087 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 3084 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
3088 cmd->se_ordered_id); 3085 cmd->se_ordered_id);
3089 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { 3086 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
3090 dev->dev_cur_ordered_id++; 3087 dev->dev_cur_ordered_id++;
3091 pr_debug("Incremented dev_cur_ordered_id: %u for" 3088 pr_debug("Incremented dev_cur_ordered_id: %u for"
3092 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 3089 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
3093 cmd->se_ordered_id); 3090 cmd->se_ordered_id);
3094 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 3091 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
3095 atomic_dec(&dev->dev_ordered_sync); 3092 atomic_dec(&dev->dev_ordered_sync);
3096 smp_mb__after_atomic_dec(); 3093 smp_mb__after_atomic_dec();
3097 3094
3098 dev->dev_cur_ordered_id++; 3095 dev->dev_cur_ordered_id++;
3099 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" 3096 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
3100 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); 3097 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
3101 } 3098 }
3102 /* 3099 /*
3103 * Process all commands up to the last received 3100 * Process all commands up to the last received
3104 * ORDERED task attribute which requires another blocking 3101 * ORDERED task attribute which requires another blocking
3105 * boundary 3102 * boundary
3106 */ 3103 */
3107 spin_lock(&dev->delayed_cmd_lock); 3104 spin_lock(&dev->delayed_cmd_lock);
3108 list_for_each_entry_safe(cmd_p, cmd_tmp, 3105 list_for_each_entry_safe(cmd_p, cmd_tmp,
3109 &dev->delayed_cmd_list, se_delayed_node) { 3106 &dev->delayed_cmd_list, se_delayed_node) {
3110 3107
3111 list_del(&cmd_p->se_delayed_node); 3108 list_del(&cmd_p->se_delayed_node);
3112 spin_unlock(&dev->delayed_cmd_lock); 3109 spin_unlock(&dev->delayed_cmd_lock);
3113 3110
3114 pr_debug("Calling add_tasks() for" 3111 pr_debug("Calling add_tasks() for"
3115 " cmd_p: 0x%02x Task Attr: 0x%02x" 3112 " cmd_p: 0x%02x Task Attr: 0x%02x"
3116 " Dormant -> Active, se_ordered_id: %u\n", 3113 " Dormant -> Active, se_ordered_id: %u\n",
3117 cmd_p->t_task_cdb[0], 3114 cmd_p->t_task_cdb[0],
3118 cmd_p->sam_task_attr, cmd_p->se_ordered_id); 3115 cmd_p->sam_task_attr, cmd_p->se_ordered_id);
3119 3116
3120 transport_add_tasks_from_cmd(cmd_p); 3117 transport_add_tasks_from_cmd(cmd_p);
3121 new_active_tasks++; 3118 new_active_tasks++;
3122 3119
3123 spin_lock(&dev->delayed_cmd_lock); 3120 spin_lock(&dev->delayed_cmd_lock);
3124 if (cmd_p->sam_task_attr == MSG_ORDERED_TAG) 3121 if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
3125 break; 3122 break;
3126 } 3123 }
3127 spin_unlock(&dev->delayed_cmd_lock); 3124 spin_unlock(&dev->delayed_cmd_lock);
3128 /* 3125 /*
3129 * If new tasks have become active, wake up the transport thread 3126 * If new tasks have become active, wake up the transport thread
3130 * to do the processing of the Active tasks. 3127 * to do the processing of the Active tasks.
3131 */ 3128 */
3132 if (new_active_tasks != 0) 3129 if (new_active_tasks != 0)
3133 wake_up_interruptible(&dev->dev_queue_obj.thread_wq); 3130 wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
3134 } 3131 }
3135 3132
3136 static void transport_complete_qf(struct se_cmd *cmd) 3133 static void transport_complete_qf(struct se_cmd *cmd)
3137 { 3134 {
3138 int ret = 0; 3135 int ret = 0;
3139 3136
3140 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 3137 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3141 transport_complete_task_attr(cmd); 3138 transport_complete_task_attr(cmd);
3142 3139
3143 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 3140 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3144 ret = cmd->se_tfo->queue_status(cmd); 3141 ret = cmd->se_tfo->queue_status(cmd);
3145 if (ret) 3142 if (ret)
3146 goto out; 3143 goto out;
3147 } 3144 }
3148 3145
3149 switch (cmd->data_direction) { 3146 switch (cmd->data_direction) {
3150 case DMA_FROM_DEVICE: 3147 case DMA_FROM_DEVICE:
3151 ret = cmd->se_tfo->queue_data_in(cmd); 3148 ret = cmd->se_tfo->queue_data_in(cmd);
3152 break; 3149 break;
3153 case DMA_TO_DEVICE: 3150 case DMA_TO_DEVICE:
3154 if (cmd->t_bidi_data_sg) { 3151 if (cmd->t_bidi_data_sg) {
3155 ret = cmd->se_tfo->queue_data_in(cmd); 3152 ret = cmd->se_tfo->queue_data_in(cmd);
3156 if (ret < 0) 3153 if (ret < 0)
3157 break; 3154 break;
3158 } 3155 }
3159 /* Fall through for DMA_TO_DEVICE */ 3156 /* Fall through for DMA_TO_DEVICE */
3160 case DMA_NONE: 3157 case DMA_NONE:
3161 ret = cmd->se_tfo->queue_status(cmd); 3158 ret = cmd->se_tfo->queue_status(cmd);
3162 break; 3159 break;
3163 default: 3160 default:
3164 break; 3161 break;
3165 } 3162 }
3166 3163
3167 out: 3164 out:
3168 if (ret < 0) { 3165 if (ret < 0) {
3169 transport_handle_queue_full(cmd, cmd->se_dev); 3166 transport_handle_queue_full(cmd, cmd->se_dev);
3170 return; 3167 return;
3171 } 3168 }
3172 transport_lun_remove_cmd(cmd); 3169 transport_lun_remove_cmd(cmd);
3173 transport_cmd_check_stop_to_fabric(cmd); 3170 transport_cmd_check_stop_to_fabric(cmd);
3174 } 3171 }
3175 3172
3176 static void transport_handle_queue_full( 3173 static void transport_handle_queue_full(
3177 struct se_cmd *cmd, 3174 struct se_cmd *cmd,
3178 struct se_device *dev) 3175 struct se_device *dev)
3179 { 3176 {
3180 spin_lock_irq(&dev->qf_cmd_lock); 3177 spin_lock_irq(&dev->qf_cmd_lock);
3181 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 3178 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
3182 atomic_inc(&dev->dev_qf_count); 3179 atomic_inc(&dev->dev_qf_count);
3183 smp_mb__after_atomic_inc(); 3180 smp_mb__after_atomic_inc();
3184 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 3181 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
3185 3182
3186 schedule_work(&cmd->se_dev->qf_work_queue); 3183 schedule_work(&cmd->se_dev->qf_work_queue);
3187 } 3184 }
3188 3185
3189 static void target_complete_ok_work(struct work_struct *work) 3186 static void target_complete_ok_work(struct work_struct *work)
3190 { 3187 {
3191 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 3188 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3192 int reason = 0, ret; 3189 int reason = 0, ret;
3193 3190
3194 /* 3191 /*
3195 * Check if we need to move delayed/dormant tasks from cmds on the 3192 * Check if we need to move delayed/dormant tasks from cmds on the
3196 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 3193 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
3197 * Attribute. 3194 * Attribute.
3198 */ 3195 */
3199 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 3196 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
3200 transport_complete_task_attr(cmd); 3197 transport_complete_task_attr(cmd);
3201 /* 3198 /*
3202 * Check to schedule QUEUE_FULL work, or execute an existing 3199 * Check to schedule QUEUE_FULL work, or execute an existing
3203 * cmd->transport_qf_callback() 3200 * cmd->transport_qf_callback()
3204 */ 3201 */
3205 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 3202 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
3206 schedule_work(&cmd->se_dev->qf_work_queue); 3203 schedule_work(&cmd->se_dev->qf_work_queue);
3207 3204
3208 /* 3205 /*
3209 * Check if we need to retrieve a sense buffer from 3206 * Check if we need to retrieve a sense buffer from
3210 * the struct se_cmd in question. 3207 * the struct se_cmd in question.
3211 */ 3208 */
3212 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 3209 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
3213 if (transport_get_sense_data(cmd) < 0) 3210 if (transport_get_sense_data(cmd) < 0)
3214 reason = TCM_NON_EXISTENT_LUN; 3211 reason = TCM_NON_EXISTENT_LUN;
3215 3212
3216 /* 3213 /*
3217 * Only set when an struct se_task->task_scsi_status returned 3214 * Only set when an struct se_task->task_scsi_status returned
3218 * a non GOOD status. 3215 * a non GOOD status.
3219 */ 3216 */
3220 if (cmd->scsi_status) { 3217 if (cmd->scsi_status) {
3221 ret = transport_send_check_condition_and_sense( 3218 ret = transport_send_check_condition_and_sense(
3222 cmd, reason, 1); 3219 cmd, reason, 1);
3223 if (ret == -EAGAIN || ret == -ENOMEM) 3220 if (ret == -EAGAIN || ret == -ENOMEM)
3224 goto queue_full; 3221 goto queue_full;
3225 3222
3226 transport_lun_remove_cmd(cmd); 3223 transport_lun_remove_cmd(cmd);
3227 transport_cmd_check_stop_to_fabric(cmd); 3224 transport_cmd_check_stop_to_fabric(cmd);
3228 return; 3225 return;
3229 } 3226 }
3230 } 3227 }
3231 /* 3228 /*
3232 * Check for a callback, used by amongst other things 3229 * Check for a callback, used by amongst other things
3233 * XDWRITE_READ_10 emulation. 3230 * XDWRITE_READ_10 emulation.
3234 */ 3231 */
3235 if (cmd->transport_complete_callback) 3232 if (cmd->transport_complete_callback)
3236 cmd->transport_complete_callback(cmd); 3233 cmd->transport_complete_callback(cmd);
3237 3234
3238 switch (cmd->data_direction) { 3235 switch (cmd->data_direction) {
3239 case DMA_FROM_DEVICE: 3236 case DMA_FROM_DEVICE:
3240 spin_lock(&cmd->se_lun->lun_sep_lock); 3237 spin_lock(&cmd->se_lun->lun_sep_lock);
3241 if (cmd->se_lun->lun_sep) { 3238 if (cmd->se_lun->lun_sep) {
3242 cmd->se_lun->lun_sep->sep_stats.tx_data_octets += 3239 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3243 cmd->data_length; 3240 cmd->data_length;
3244 } 3241 }
3245 spin_unlock(&cmd->se_lun->lun_sep_lock); 3242 spin_unlock(&cmd->se_lun->lun_sep_lock);
3246 3243
3247 ret = cmd->se_tfo->queue_data_in(cmd); 3244 ret = cmd->se_tfo->queue_data_in(cmd);
3248 if (ret == -EAGAIN || ret == -ENOMEM) 3245 if (ret == -EAGAIN || ret == -ENOMEM)
3249 goto queue_full; 3246 goto queue_full;
3250 break; 3247 break;
3251 case DMA_TO_DEVICE: 3248 case DMA_TO_DEVICE:
3252 spin_lock(&cmd->se_lun->lun_sep_lock); 3249 spin_lock(&cmd->se_lun->lun_sep_lock);
3253 if (cmd->se_lun->lun_sep) { 3250 if (cmd->se_lun->lun_sep) {
3254 cmd->se_lun->lun_sep->sep_stats.rx_data_octets += 3251 cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
3255 cmd->data_length; 3252 cmd->data_length;
3256 } 3253 }
3257 spin_unlock(&cmd->se_lun->lun_sep_lock); 3254 spin_unlock(&cmd->se_lun->lun_sep_lock);
3258 /* 3255 /*
3259 * Check if we need to send READ payload for BIDI-COMMAND 3256 * Check if we need to send READ payload for BIDI-COMMAND
3260 */ 3257 */
3261 if (cmd->t_bidi_data_sg) { 3258 if (cmd->t_bidi_data_sg) {
3262 spin_lock(&cmd->se_lun->lun_sep_lock); 3259 spin_lock(&cmd->se_lun->lun_sep_lock);
3263 if (cmd->se_lun->lun_sep) { 3260 if (cmd->se_lun->lun_sep) {
3264 cmd->se_lun->lun_sep->sep_stats.tx_data_octets += 3261 cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
3265 cmd->data_length; 3262 cmd->data_length;
3266 } 3263 }
3267 spin_unlock(&cmd->se_lun->lun_sep_lock); 3264 spin_unlock(&cmd->se_lun->lun_sep_lock);
3268 ret = cmd->se_tfo->queue_data_in(cmd); 3265 ret = cmd->se_tfo->queue_data_in(cmd);
3269 if (ret == -EAGAIN || ret == -ENOMEM) 3266 if (ret == -EAGAIN || ret == -ENOMEM)
3270 goto queue_full; 3267 goto queue_full;
3271 break; 3268 break;
3272 } 3269 }
3273 /* Fall through for DMA_TO_DEVICE */ 3270 /* Fall through for DMA_TO_DEVICE */
3274 case DMA_NONE: 3271 case DMA_NONE:
3275 ret = cmd->se_tfo->queue_status(cmd); 3272 ret = cmd->se_tfo->queue_status(cmd);
3276 if (ret == -EAGAIN || ret == -ENOMEM) 3273 if (ret == -EAGAIN || ret == -ENOMEM)
3277 goto queue_full; 3274 goto queue_full;
3278 break; 3275 break;
3279 default: 3276 default:
3280 break; 3277 break;
3281 } 3278 }
3282 3279
3283 transport_lun_remove_cmd(cmd); 3280 transport_lun_remove_cmd(cmd);
3284 transport_cmd_check_stop_to_fabric(cmd); 3281 transport_cmd_check_stop_to_fabric(cmd);
3285 return; 3282 return;
3286 3283
3287 queue_full: 3284 queue_full:
3288 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 3285 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
3289 " data_direction: %d\n", cmd, cmd->data_direction); 3286 " data_direction: %d\n", cmd, cmd->data_direction);
3290 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 3287 cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
3291 transport_handle_queue_full(cmd, cmd->se_dev); 3288 transport_handle_queue_full(cmd, cmd->se_dev);
3292 } 3289 }
3293 3290
3294 static void transport_free_dev_tasks(struct se_cmd *cmd) 3291 static void transport_free_dev_tasks(struct se_cmd *cmd)
3295 { 3292 {
3296 struct se_task *task, *task_tmp; 3293 struct se_task *task, *task_tmp;
3297 unsigned long flags; 3294 unsigned long flags;
3298 LIST_HEAD(dispose_list); 3295 LIST_HEAD(dispose_list);
3299 3296
3300 spin_lock_irqsave(&cmd->t_state_lock, flags); 3297 spin_lock_irqsave(&cmd->t_state_lock, flags);
3301 list_for_each_entry_safe(task, task_tmp, 3298 list_for_each_entry_safe(task, task_tmp,
3302 &cmd->t_task_list, t_list) { 3299 &cmd->t_task_list, t_list) {
3303 if (!(task->task_flags & TF_ACTIVE)) 3300 if (!(task->task_flags & TF_ACTIVE))
3304 list_move_tail(&task->t_list, &dispose_list); 3301 list_move_tail(&task->t_list, &dispose_list);
3305 } 3302 }
3306 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3303 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3307 3304
3308 while (!list_empty(&dispose_list)) { 3305 while (!list_empty(&dispose_list)) {
3309 task = list_first_entry(&dispose_list, struct se_task, t_list); 3306 task = list_first_entry(&dispose_list, struct se_task, t_list);
3310 3307
3311 if (task->task_sg != cmd->t_data_sg && 3308 if (task->task_sg != cmd->t_data_sg &&
3312 task->task_sg != cmd->t_bidi_data_sg) 3309 task->task_sg != cmd->t_bidi_data_sg)
3313 kfree(task->task_sg); 3310 kfree(task->task_sg);
3314 3311
3315 list_del(&task->t_list); 3312 list_del(&task->t_list);
3316 3313
3317 cmd->se_dev->transport->free_task(task); 3314 cmd->se_dev->transport->free_task(task);
3318 } 3315 }
3319 } 3316 }
3320 3317
3321 static inline void transport_free_sgl(struct scatterlist *sgl, int nents) 3318 static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
3322 { 3319 {
3323 struct scatterlist *sg; 3320 struct scatterlist *sg;
3324 int count; 3321 int count;
3325 3322
3326 for_each_sg(sgl, sg, nents, count) 3323 for_each_sg(sgl, sg, nents, count)
3327 __free_page(sg_page(sg)); 3324 __free_page(sg_page(sg));
3328 3325
3329 kfree(sgl); 3326 kfree(sgl);
3330 } 3327 }
3331 3328
3332 static inline void transport_free_pages(struct se_cmd *cmd) 3329 static inline void transport_free_pages(struct se_cmd *cmd)
3333 { 3330 {
3334 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) 3331 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
3335 return; 3332 return;
3336 3333
3337 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 3334 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
3338 cmd->t_data_sg = NULL; 3335 cmd->t_data_sg = NULL;
3339 cmd->t_data_nents = 0; 3336 cmd->t_data_nents = 0;
3340 3337
3341 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 3338 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
3342 cmd->t_bidi_data_sg = NULL; 3339 cmd->t_bidi_data_sg = NULL;
3343 cmd->t_bidi_data_nents = 0; 3340 cmd->t_bidi_data_nents = 0;
3344 } 3341 }
3345 3342
3346 /** 3343 /**
3347 * transport_release_cmd - free a command 3344 * transport_release_cmd - free a command
3348 * @cmd: command to free 3345 * @cmd: command to free
3349 * 3346 *
3350 * This routine unconditionally frees a command, and reference counting 3347 * This routine unconditionally frees a command, and reference counting
3351 * or list removal must be done in the caller. 3348 * or list removal must be done in the caller.
3352 */ 3349 */
3353 static void transport_release_cmd(struct se_cmd *cmd) 3350 static void transport_release_cmd(struct se_cmd *cmd)
3354 { 3351 {
3355 BUG_ON(!cmd->se_tfo); 3352 BUG_ON(!cmd->se_tfo);
3356 3353
3357 if (cmd->se_tmr_req) 3354 if (cmd->se_tmr_req)
3358 core_tmr_release_req(cmd->se_tmr_req); 3355 core_tmr_release_req(cmd->se_tmr_req);
3359 if (cmd->t_task_cdb != cmd->__t_task_cdb) 3356 if (cmd->t_task_cdb != cmd->__t_task_cdb)
3360 kfree(cmd->t_task_cdb); 3357 kfree(cmd->t_task_cdb);
3361 /* 3358 /*
3362 * Check if target_wait_for_sess_cmds() is expecting to 3359 * Check if target_wait_for_sess_cmds() is expecting to
3363 * release se_cmd directly here.. 3360 * release se_cmd directly here..
3364 */ 3361 */
3365 if (cmd->check_release != 0 && cmd->se_tfo->check_release_cmd) 3362 if (cmd->check_release != 0 && cmd->se_tfo->check_release_cmd)
3366 if (cmd->se_tfo->check_release_cmd(cmd) != 0) 3363 if (cmd->se_tfo->check_release_cmd(cmd) != 0)
3367 return; 3364 return;
3368 3365
3369 cmd->se_tfo->release_cmd(cmd); 3366 cmd->se_tfo->release_cmd(cmd);
3370 } 3367 }
3371 3368
3372 /** 3369 /**
3373 * transport_put_cmd - release a reference to a command 3370 * transport_put_cmd - release a reference to a command
3374 * @cmd: command to release 3371 * @cmd: command to release
3375 * 3372 *
3376 * This routine releases our reference to the command and frees it if possible. 3373 * This routine releases our reference to the command and frees it if possible.
3377 */ 3374 */
3378 static void transport_put_cmd(struct se_cmd *cmd) 3375 static void transport_put_cmd(struct se_cmd *cmd)
3379 { 3376 {
3380 unsigned long flags; 3377 unsigned long flags;
3381 int free_tasks = 0; 3378 int free_tasks = 0;
3382 3379
3383 spin_lock_irqsave(&cmd->t_state_lock, flags); 3380 spin_lock_irqsave(&cmd->t_state_lock, flags);
3384 if (atomic_read(&cmd->t_fe_count)) { 3381 if (atomic_read(&cmd->t_fe_count)) {
3385 if (!atomic_dec_and_test(&cmd->t_fe_count)) 3382 if (!atomic_dec_and_test(&cmd->t_fe_count))
3386 goto out_busy; 3383 goto out_busy;
3387 } 3384 }
3388 3385
3389 if (atomic_read(&cmd->t_se_count)) { 3386 if (atomic_read(&cmd->t_se_count)) {
3390 if (!atomic_dec_and_test(&cmd->t_se_count)) 3387 if (!atomic_dec_and_test(&cmd->t_se_count))
3391 goto out_busy; 3388 goto out_busy;
3392 } 3389 }
3393 3390
3394 if (atomic_read(&cmd->transport_dev_active)) { 3391 if (atomic_read(&cmd->transport_dev_active)) {
3395 atomic_set(&cmd->transport_dev_active, 0); 3392 atomic_set(&cmd->transport_dev_active, 0);
3396 transport_all_task_dev_remove_state(cmd); 3393 transport_all_task_dev_remove_state(cmd);
3397 free_tasks = 1; 3394 free_tasks = 1;
3398 } 3395 }
3399 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3396 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3400 3397
3401 if (free_tasks != 0) 3398 if (free_tasks != 0)
3402 transport_free_dev_tasks(cmd); 3399 transport_free_dev_tasks(cmd);
3403 3400
3404 transport_free_pages(cmd); 3401 transport_free_pages(cmd);
3405 transport_release_cmd(cmd); 3402 transport_release_cmd(cmd);
3406 return; 3403 return;
3407 out_busy: 3404 out_busy:
3408 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3405 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3409 } 3406 }
3410 3407
3411 /* 3408 /*
3412 * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of 3409 * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
3413 * allocating in the core. 3410 * allocating in the core.
3414 * @cmd: Associated se_cmd descriptor 3411 * @cmd: Associated se_cmd descriptor
3415 * @mem: SGL style memory for TCM WRITE / READ 3412 * @mem: SGL style memory for TCM WRITE / READ
3416 * @sg_mem_num: Number of SGL elements 3413 * @sg_mem_num: Number of SGL elements
3417 * @mem_bidi_in: SGL style memory for TCM BIDI READ 3414 * @mem_bidi_in: SGL style memory for TCM BIDI READ
3418 * @sg_mem_bidi_num: Number of BIDI READ SGL elements 3415 * @sg_mem_bidi_num: Number of BIDI READ SGL elements
3419 * 3416 *
3420 * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage 3417 * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
3421 * of parameters. 3418 * of parameters.
3422 */ 3419 */
3423 int transport_generic_map_mem_to_cmd( 3420 int transport_generic_map_mem_to_cmd(
3424 struct se_cmd *cmd, 3421 struct se_cmd *cmd,
3425 struct scatterlist *sgl, 3422 struct scatterlist *sgl,
3426 u32 sgl_count, 3423 u32 sgl_count,
3427 struct scatterlist *sgl_bidi, 3424 struct scatterlist *sgl_bidi,
3428 u32 sgl_bidi_count) 3425 u32 sgl_bidi_count)
3429 { 3426 {
3430 if (!sgl || !sgl_count) 3427 if (!sgl || !sgl_count)
3431 return 0; 3428 return 0;
3432 3429
3433 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || 3430 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
3434 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { 3431 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
3435 /* 3432 /*
3436 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 3433 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
3437 * scatterlists already have been set to follow what the fabric 3434 * scatterlists already have been set to follow what the fabric
3438 * passes for the original expected data transfer length. 3435 * passes for the original expected data transfer length.
3439 */ 3436 */
3440 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 3437 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
3441 pr_warn("Rejecting SCSI DATA overflow for fabric using" 3438 pr_warn("Rejecting SCSI DATA overflow for fabric using"
3442 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 3439 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
3443 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3440 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3444 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 3441 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
3445 return -EINVAL; 3442 return -EINVAL;
3446 } 3443 }
3447 3444
3448 cmd->t_data_sg = sgl; 3445 cmd->t_data_sg = sgl;
3449 cmd->t_data_nents = sgl_count; 3446 cmd->t_data_nents = sgl_count;
3450 3447
3451 if (sgl_bidi && sgl_bidi_count) { 3448 if (sgl_bidi && sgl_bidi_count) {
3452 cmd->t_bidi_data_sg = sgl_bidi; 3449 cmd->t_bidi_data_sg = sgl_bidi;
3453 cmd->t_bidi_data_nents = sgl_bidi_count; 3450 cmd->t_bidi_data_nents = sgl_bidi_count;
3454 } 3451 }
3455 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 3452 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
3456 } 3453 }
3457 3454
3458 return 0; 3455 return 0;
3459 } 3456 }
3460 EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); 3457 EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
3461 3458
3462 void *transport_kmap_first_data_page(struct se_cmd *cmd) 3459 void *transport_kmap_first_data_page(struct se_cmd *cmd)
3463 { 3460 {
3464 struct scatterlist *sg = cmd->t_data_sg; 3461 struct scatterlist *sg = cmd->t_data_sg;
3465 3462
3466 BUG_ON(!sg); 3463 BUG_ON(!sg);
3467 /* 3464 /*
3468 * We need to take into account a possible offset here for fabrics like 3465 * We need to take into account a possible offset here for fabrics like
3469 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 3466 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
3470 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 3467 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
3471 */ 3468 */
3472 return kmap(sg_page(sg)) + sg->offset; 3469 return kmap(sg_page(sg)) + sg->offset;
3473 } 3470 }
3474 EXPORT_SYMBOL(transport_kmap_first_data_page); 3471 EXPORT_SYMBOL(transport_kmap_first_data_page);
3475 3472
3476 void transport_kunmap_first_data_page(struct se_cmd *cmd) 3473 void transport_kunmap_first_data_page(struct se_cmd *cmd)
3477 { 3474 {
3478 kunmap(sg_page(cmd->t_data_sg)); 3475 kunmap(sg_page(cmd->t_data_sg));
3479 } 3476 }
3480 EXPORT_SYMBOL(transport_kunmap_first_data_page); 3477 EXPORT_SYMBOL(transport_kunmap_first_data_page);
3481 3478
3482 static int 3479 static int
3483 transport_generic_get_mem(struct se_cmd *cmd) 3480 transport_generic_get_mem(struct se_cmd *cmd)
3484 { 3481 {
3485 u32 length = cmd->data_length; 3482 u32 length = cmd->data_length;
3486 unsigned int nents; 3483 unsigned int nents;
3487 struct page *page; 3484 struct page *page;
3488 int i = 0; 3485 int i = 0;
3489 3486
3490 nents = DIV_ROUND_UP(length, PAGE_SIZE); 3487 nents = DIV_ROUND_UP(length, PAGE_SIZE);
3491 cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); 3488 cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
3492 if (!cmd->t_data_sg) 3489 if (!cmd->t_data_sg)
3493 return -ENOMEM; 3490 return -ENOMEM;
3494 3491
3495 cmd->t_data_nents = nents; 3492 cmd->t_data_nents = nents;
3496 sg_init_table(cmd->t_data_sg, nents); 3493 sg_init_table(cmd->t_data_sg, nents);
3497 3494
3498 while (length) { 3495 while (length) {
3499 u32 page_len = min_t(u32, length, PAGE_SIZE); 3496 u32 page_len = min_t(u32, length, PAGE_SIZE);
3500 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 3497 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3501 if (!page) 3498 if (!page)
3502 goto out; 3499 goto out;
3503 3500
3504 sg_set_page(&cmd->t_data_sg[i], page, page_len, 0); 3501 sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
3505 length -= page_len; 3502 length -= page_len;
3506 i++; 3503 i++;
3507 } 3504 }
3508 return 0; 3505 return 0;
3509 3506
3510 out: 3507 out:
3511 while (i >= 0) { 3508 while (i >= 0) {
3512 __free_page(sg_page(&cmd->t_data_sg[i])); 3509 __free_page(sg_page(&cmd->t_data_sg[i]));
3513 i--; 3510 i--;
3514 } 3511 }
3515 kfree(cmd->t_data_sg); 3512 kfree(cmd->t_data_sg);
3516 cmd->t_data_sg = NULL; 3513 cmd->t_data_sg = NULL;
3517 return -ENOMEM; 3514 return -ENOMEM;
3518 } 3515 }
3519 3516
3520 /* Reduce sectors if they are too long for the device */ 3517 /* Reduce sectors if they are too long for the device */
3521 static inline sector_t transport_limit_task_sectors( 3518 static inline sector_t transport_limit_task_sectors(
3522 struct se_device *dev, 3519 struct se_device *dev,
3523 unsigned long long lba, 3520 unsigned long long lba,
3524 sector_t sectors) 3521 sector_t sectors)
3525 { 3522 {
3526 sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); 3523 sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
3527 3524
3528 if (dev->transport->get_device_type(dev) == TYPE_DISK) 3525 if (dev->transport->get_device_type(dev) == TYPE_DISK)
3529 if ((lba + sectors) > transport_dev_end_lba(dev)) 3526 if ((lba + sectors) > transport_dev_end_lba(dev))
3530 sectors = ((transport_dev_end_lba(dev) - lba) + 1); 3527 sectors = ((transport_dev_end_lba(dev) - lba) + 1);
3531 3528
3532 return sectors; 3529 return sectors;
3533 } 3530 }
3534 3531
3535 3532
3536 /* 3533 /*
3537 * This function can be used by HW target mode drivers to create a linked 3534 * This function can be used by HW target mode drivers to create a linked
3538 * scatterlist from all contiguously allocated struct se_task->task_sg[]. 3535 * scatterlist from all contiguously allocated struct se_task->task_sg[].
3539 * This is intended to be called during the completion path by TCM Core 3536 * This is intended to be called during the completion path by TCM Core
3540 * when struct target_core_fabric_ops->check_task_sg_chaining is enabled. 3537 * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
3541 */ 3538 */
3542 void transport_do_task_sg_chain(struct se_cmd *cmd) 3539 void transport_do_task_sg_chain(struct se_cmd *cmd)
3543 { 3540 {
3544 struct scatterlist *sg_first = NULL; 3541 struct scatterlist *sg_first = NULL;
3545 struct scatterlist *sg_prev = NULL; 3542 struct scatterlist *sg_prev = NULL;
3546 int sg_prev_nents = 0; 3543 int sg_prev_nents = 0;
3547 struct scatterlist *sg; 3544 struct scatterlist *sg;
3548 struct se_task *task; 3545 struct se_task *task;
3549 u32 chained_nents = 0; 3546 u32 chained_nents = 0;
3550 int i; 3547 int i;
3551 3548
3552 BUG_ON(!cmd->se_tfo->task_sg_chaining); 3549 BUG_ON(!cmd->se_tfo->task_sg_chaining);
3553 3550
3554 /* 3551 /*
3555 * Walk the struct se_task list and setup scatterlist chains 3552 * Walk the struct se_task list and setup scatterlist chains
3556 * for each contiguously allocated struct se_task->task_sg[]. 3553 * for each contiguously allocated struct se_task->task_sg[].
3557 */ 3554 */
3558 list_for_each_entry(task, &cmd->t_task_list, t_list) { 3555 list_for_each_entry(task, &cmd->t_task_list, t_list) {
3559 if (!task->task_sg) 3556 if (!task->task_sg)
3560 continue; 3557 continue;
3561 3558
3562 if (!sg_first) { 3559 if (!sg_first) {
3563 sg_first = task->task_sg; 3560 sg_first = task->task_sg;
3564 chained_nents = task->task_sg_nents; 3561 chained_nents = task->task_sg_nents;
3565 } else { 3562 } else {
3566 sg_chain(sg_prev, sg_prev_nents, task->task_sg); 3563 sg_chain(sg_prev, sg_prev_nents, task->task_sg);
3567 chained_nents += task->task_sg_nents; 3564 chained_nents += task->task_sg_nents;
3568 } 3565 }
3569 /* 3566 /*
3570 * For the padded tasks, use the extra SGL vector allocated 3567 * For the padded tasks, use the extra SGL vector allocated
3571 * in transport_allocate_data_tasks() for the sg_prev_nents 3568 * in transport_allocate_data_tasks() for the sg_prev_nents
3572 * offset into sg_chain() above. 3569 * offset into sg_chain() above.
3573 * 3570 *
3574 * We do not need the padding for the last task (or a single 3571 * We do not need the padding for the last task (or a single
3575 * task), but in that case we will never use the sg_prev_nents 3572 * task), but in that case we will never use the sg_prev_nents
3576 * value below which would be incorrect. 3573 * value below which would be incorrect.
3577 */ 3574 */
3578 sg_prev_nents = (task->task_sg_nents + 1); 3575 sg_prev_nents = (task->task_sg_nents + 1);
3579 sg_prev = task->task_sg; 3576 sg_prev = task->task_sg;
3580 } 3577 }
3581 /* 3578 /*
3582 * Setup the starting pointer and total t_tasks_sg_linked_no including 3579 * Setup the starting pointer and total t_tasks_sg_linked_no including
3583 * padding SGs for linking and to mark the end. 3580 * padding SGs for linking and to mark the end.
3584 */ 3581 */
3585 cmd->t_tasks_sg_chained = sg_first; 3582 cmd->t_tasks_sg_chained = sg_first;
3586 cmd->t_tasks_sg_chained_no = chained_nents; 3583 cmd->t_tasks_sg_chained_no = chained_nents;
3587 3584
3588 pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" 3585 pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
3589 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained, 3586 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
3590 cmd->t_tasks_sg_chained_no); 3587 cmd->t_tasks_sg_chained_no);
3591 3588
3592 for_each_sg(cmd->t_tasks_sg_chained, sg, 3589 for_each_sg(cmd->t_tasks_sg_chained, sg,
3593 cmd->t_tasks_sg_chained_no, i) { 3590 cmd->t_tasks_sg_chained_no, i) {
3594 3591
3595 pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n", 3592 pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
3596 i, sg, sg_page(sg), sg->length, sg->offset); 3593 i, sg, sg_page(sg), sg->length, sg->offset);
3597 if (sg_is_chain(sg)) 3594 if (sg_is_chain(sg))
3598 pr_debug("SG: %p sg_is_chain=1\n", sg); 3595 pr_debug("SG: %p sg_is_chain=1\n", sg);
3599 if (sg_is_last(sg)) 3596 if (sg_is_last(sg))
3600 pr_debug("SG: %p sg_is_last=1\n", sg); 3597 pr_debug("SG: %p sg_is_last=1\n", sg);
3601 } 3598 }
3602 } 3599 }
3603 EXPORT_SYMBOL(transport_do_task_sg_chain); 3600 EXPORT_SYMBOL(transport_do_task_sg_chain);
3604 3601
3605 /* 3602 /*
3606 * Break up cmd into chunks transport can handle 3603 * Break up cmd into chunks transport can handle
3607 */ 3604 */
3608 static int 3605 static int
3609 transport_allocate_data_tasks(struct se_cmd *cmd, 3606 transport_allocate_data_tasks(struct se_cmd *cmd,
3610 enum dma_data_direction data_direction, 3607 enum dma_data_direction data_direction,
3611 struct scatterlist *cmd_sg, unsigned int sgl_nents) 3608 struct scatterlist *cmd_sg, unsigned int sgl_nents)
3612 { 3609 {
3613 struct se_device *dev = cmd->se_dev; 3610 struct se_device *dev = cmd->se_dev;
3614 int task_count, i; 3611 int task_count, i;
3615 unsigned long long lba; 3612 unsigned long long lba;
3616 sector_t sectors, dev_max_sectors; 3613 sector_t sectors, dev_max_sectors;
3617 u32 sector_size; 3614 u32 sector_size;
3618 3615
3619 if (transport_cmd_get_valid_sectors(cmd) < 0) 3616 if (transport_cmd_get_valid_sectors(cmd) < 0)
3620 return -EINVAL; 3617 return -EINVAL;
3621 3618
3622 dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; 3619 dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
3623 sector_size = dev->se_sub_dev->se_dev_attrib.block_size; 3620 sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
3624 3621
3625 WARN_ON(cmd->data_length % sector_size); 3622 WARN_ON(cmd->data_length % sector_size);
3626 3623
3627 lba = cmd->t_task_lba; 3624 lba = cmd->t_task_lba;
3628 sectors = DIV_ROUND_UP(cmd->data_length, sector_size); 3625 sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
3629 task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors); 3626 task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
3630 3627
3631 /* 3628 /*
3632 * If we need just a single task reuse the SG list in the command 3629 * If we need just a single task reuse the SG list in the command
3633 * and avoid a lot of work. 3630 * and avoid a lot of work.
3634 */ 3631 */
3635 if (task_count == 1) { 3632 if (task_count == 1) {
3636 struct se_task *task; 3633 struct se_task *task;
3637 unsigned long flags; 3634 unsigned long flags;
3638 3635
3639 task = transport_generic_get_task(cmd, data_direction); 3636 task = transport_generic_get_task(cmd, data_direction);
3640 if (!task) 3637 if (!task)
3641 return -ENOMEM; 3638 return -ENOMEM;
3642 3639
3643 task->task_sg = cmd_sg; 3640 task->task_sg = cmd_sg;
3644 task->task_sg_nents = sgl_nents; 3641 task->task_sg_nents = sgl_nents;
3645 3642
3646 task->task_lba = lba; 3643 task->task_lba = lba;
3647 task->task_sectors = sectors; 3644 task->task_sectors = sectors;
3648 task->task_size = task->task_sectors * sector_size; 3645 task->task_size = task->task_sectors * sector_size;
3649 3646
3650 spin_lock_irqsave(&cmd->t_state_lock, flags); 3647 spin_lock_irqsave(&cmd->t_state_lock, flags);
3651 list_add_tail(&task->t_list, &cmd->t_task_list); 3648 list_add_tail(&task->t_list, &cmd->t_task_list);
3652 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3649 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3653 3650
3654 return task_count; 3651 return task_count;
3655 } 3652 }
3656 3653
3657 for (i = 0; i < task_count; i++) { 3654 for (i = 0; i < task_count; i++) {
3658 struct se_task *task; 3655 struct se_task *task;
3659 unsigned int task_size, task_sg_nents_padded; 3656 unsigned int task_size, task_sg_nents_padded;
3660 struct scatterlist *sg; 3657 struct scatterlist *sg;
3661 unsigned long flags; 3658 unsigned long flags;
3662 int count; 3659 int count;
3663 3660
3664 task = transport_generic_get_task(cmd, data_direction); 3661 task = transport_generic_get_task(cmd, data_direction);
3665 if (!task) 3662 if (!task)
3666 return -ENOMEM; 3663 return -ENOMEM;
3667 3664
3668 task->task_lba = lba; 3665 task->task_lba = lba;
3669 task->task_sectors = min(sectors, dev_max_sectors); 3666 task->task_sectors = min(sectors, dev_max_sectors);
3670 task->task_size = task->task_sectors * sector_size; 3667 task->task_size = task->task_sectors * sector_size;
3671 3668
3672 /* 3669 /*
3673 * This now assumes that passed sg_ents are in PAGE_SIZE chunks 3670 * This now assumes that passed sg_ents are in PAGE_SIZE chunks
3674 * in order to calculate the number per task SGL entries 3671 * in order to calculate the number per task SGL entries
3675 */ 3672 */
3676 task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE); 3673 task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
3677 /* 3674 /*
3678 * Check if the fabric module driver is requesting that all 3675 * Check if the fabric module driver is requesting that all
3679 * struct se_task->task_sg[] be chained together.. If so, 3676 * struct se_task->task_sg[] be chained together.. If so,
3680 * then allocate an extra padding SG entry for linking and 3677 * then allocate an extra padding SG entry for linking and
3681 * marking the end of the chained SGL for every task except 3678 * marking the end of the chained SGL for every task except
3682 * the last one for (task_count > 1) operation, or skipping 3679 * the last one for (task_count > 1) operation, or skipping
3683 * the extra padding for the (task_count == 1) case. 3680 * the extra padding for the (task_count == 1) case.
3684 */ 3681 */
3685 if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) { 3682 if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
3686 task_sg_nents_padded = (task->task_sg_nents + 1); 3683 task_sg_nents_padded = (task->task_sg_nents + 1);
3687 } else 3684 } else
3688 task_sg_nents_padded = task->task_sg_nents; 3685 task_sg_nents_padded = task->task_sg_nents;
3689 3686
3690 task->task_sg = kmalloc(sizeof(struct scatterlist) * 3687 task->task_sg = kmalloc(sizeof(struct scatterlist) *
3691 task_sg_nents_padded, GFP_KERNEL); 3688 task_sg_nents_padded, GFP_KERNEL);
3692 if (!task->task_sg) { 3689 if (!task->task_sg) {
3693 cmd->se_dev->transport->free_task(task); 3690 cmd->se_dev->transport->free_task(task);
3694 return -ENOMEM; 3691 return -ENOMEM;
3695 } 3692 }
3696 3693
3697 sg_init_table(task->task_sg, task_sg_nents_padded); 3694 sg_init_table(task->task_sg, task_sg_nents_padded);
3698 3695
3699 task_size = task->task_size; 3696 task_size = task->task_size;
3700 3697
3701 /* Build new sgl, only up to task_size */ 3698 /* Build new sgl, only up to task_size */
3702 for_each_sg(task->task_sg, sg, task->task_sg_nents, count) { 3699 for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
3703 if (cmd_sg->length > task_size) 3700 if (cmd_sg->length > task_size)
3704 break; 3701 break;
3705 3702
3706 *sg = *cmd_sg; 3703 *sg = *cmd_sg;
3707 task_size -= cmd_sg->length; 3704 task_size -= cmd_sg->length;
3708 cmd_sg = sg_next(cmd_sg); 3705 cmd_sg = sg_next(cmd_sg);
3709 } 3706 }
3710 3707
3711 lba += task->task_sectors; 3708 lba += task->task_sectors;
3712 sectors -= task->task_sectors; 3709 sectors -= task->task_sectors;
3713 3710
3714 spin_lock_irqsave(&cmd->t_state_lock, flags); 3711 spin_lock_irqsave(&cmd->t_state_lock, flags);
3715 list_add_tail(&task->t_list, &cmd->t_task_list); 3712 list_add_tail(&task->t_list, &cmd->t_task_list);
3716 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3713 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3717 } 3714 }
3718 3715
3719 return task_count; 3716 return task_count;
3720 } 3717 }
3721 3718
3722 static int 3719 static int
3723 transport_allocate_control_task(struct se_cmd *cmd) 3720 transport_allocate_control_task(struct se_cmd *cmd)
3724 { 3721 {
3725 struct se_task *task; 3722 struct se_task *task;
3726 unsigned long flags; 3723 unsigned long flags;
3727 3724
3728 task = transport_generic_get_task(cmd, cmd->data_direction); 3725 task = transport_generic_get_task(cmd, cmd->data_direction);
3729 if (!task) 3726 if (!task)
3730 return -ENOMEM; 3727 return -ENOMEM;
3731 3728
3732 task->task_sg = cmd->t_data_sg; 3729 task->task_sg = cmd->t_data_sg;
3733 task->task_size = cmd->data_length; 3730 task->task_size = cmd->data_length;
3734 task->task_sg_nents = cmd->t_data_nents; 3731 task->task_sg_nents = cmd->t_data_nents;
3735 3732
3736 spin_lock_irqsave(&cmd->t_state_lock, flags); 3733 spin_lock_irqsave(&cmd->t_state_lock, flags);
3737 list_add_tail(&task->t_list, &cmd->t_task_list); 3734 list_add_tail(&task->t_list, &cmd->t_task_list);
3738 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3735 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3739 3736
3740 /* Success! Return number of tasks allocated */ 3737 /* Success! Return number of tasks allocated */
3741 return 1; 3738 return 1;
3742 } 3739 }
3743 3740
3744 /* 3741 /*
3745 * Allocate any required ressources to execute the command, and either place 3742 * Allocate any required ressources to execute the command, and either place
3746 * it on the execution queue if possible. For writes we might not have the 3743 * it on the execution queue if possible. For writes we might not have the
3747 * payload yet, thus notify the fabric via a call to ->write_pending instead. 3744 * payload yet, thus notify the fabric via a call to ->write_pending instead.
3748 */ 3745 */
3749 int transport_generic_new_cmd(struct se_cmd *cmd) 3746 int transport_generic_new_cmd(struct se_cmd *cmd)
3750 { 3747 {
3751 struct se_device *dev = cmd->se_dev; 3748 struct se_device *dev = cmd->se_dev;
3752 int task_cdbs, task_cdbs_bidi = 0; 3749 int task_cdbs, task_cdbs_bidi = 0;
3753 int set_counts = 1; 3750 int set_counts = 1;
3754 int ret = 0; 3751 int ret = 0;
3755 3752
3756 /* 3753 /*
3757 * Determine is the TCM fabric module has already allocated physical 3754 * Determine is the TCM fabric module has already allocated physical
3758 * memory, and is directly calling transport_generic_map_mem_to_cmd() 3755 * memory, and is directly calling transport_generic_map_mem_to_cmd()
3759 * beforehand. 3756 * beforehand.
3760 */ 3757 */
3761 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 3758 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
3762 cmd->data_length) { 3759 cmd->data_length) {
3763 ret = transport_generic_get_mem(cmd); 3760 ret = transport_generic_get_mem(cmd);
3764 if (ret < 0) 3761 if (ret < 0)
3765 goto out_fail; 3762 goto out_fail;
3766 } 3763 }
3767 3764
3768 /* 3765 /*
3769 * For BIDI command set up the read tasks first. 3766 * For BIDI command set up the read tasks first.
3770 */ 3767 */
3771 if (cmd->t_bidi_data_sg && 3768 if (cmd->t_bidi_data_sg &&
3772 dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { 3769 dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
3773 BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)); 3770 BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));
3774 3771
3775 task_cdbs_bidi = transport_allocate_data_tasks(cmd, 3772 task_cdbs_bidi = transport_allocate_data_tasks(cmd,
3776 DMA_FROM_DEVICE, cmd->t_bidi_data_sg, 3773 DMA_FROM_DEVICE, cmd->t_bidi_data_sg,
3777 cmd->t_bidi_data_nents); 3774 cmd->t_bidi_data_nents);
3778 if (task_cdbs_bidi <= 0) 3775 if (task_cdbs_bidi <= 0)
3779 goto out_fail; 3776 goto out_fail;
3780 3777
3781 atomic_inc(&cmd->t_fe_count); 3778 atomic_inc(&cmd->t_fe_count);
3782 atomic_inc(&cmd->t_se_count); 3779 atomic_inc(&cmd->t_se_count);
3783 set_counts = 0; 3780 set_counts = 0;
3784 } 3781 }
3785 3782
3786 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { 3783 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
3787 task_cdbs = transport_allocate_data_tasks(cmd, 3784 task_cdbs = transport_allocate_data_tasks(cmd,
3788 cmd->data_direction, cmd->t_data_sg, 3785 cmd->data_direction, cmd->t_data_sg,
3789 cmd->t_data_nents); 3786 cmd->t_data_nents);
3790 } else { 3787 } else {
3791 task_cdbs = transport_allocate_control_task(cmd); 3788 task_cdbs = transport_allocate_control_task(cmd);
3792 } 3789 }
3793 3790
3794 if (task_cdbs < 0) 3791 if (task_cdbs < 0)
3795 goto out_fail; 3792 goto out_fail;
3796 else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { 3793 else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
3797 cmd->t_state = TRANSPORT_COMPLETE; 3794 cmd->t_state = TRANSPORT_COMPLETE;
3798 atomic_set(&cmd->t_transport_active, 1); 3795 atomic_set(&cmd->t_transport_active, 1);
3799 INIT_WORK(&cmd->work, target_complete_ok_work); 3796 INIT_WORK(&cmd->work, target_complete_ok_work);
3800 queue_work(target_completion_wq, &cmd->work); 3797 queue_work(target_completion_wq, &cmd->work);
3801 return 0; 3798 return 0;
3802 } 3799 }
3803 3800
3804 if (set_counts) { 3801 if (set_counts) {
3805 atomic_inc(&cmd->t_fe_count); 3802 atomic_inc(&cmd->t_fe_count);
3806 atomic_inc(&cmd->t_se_count); 3803 atomic_inc(&cmd->t_se_count);
3807 } 3804 }
3808 3805
3809 cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi); 3806 cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi);
3810 atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num); 3807 atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
3811 atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num); 3808 atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
3812 3809
3813 /* 3810 /*
3814 * For WRITEs, let the fabric know its buffer is ready.. 3811 * For WRITEs, let the fabric know its buffer is ready..
3815 * This WRITE struct se_cmd (and all of its associated struct se_task's) 3812 * This WRITE struct se_cmd (and all of its associated struct se_task's)
3816 * will be added to the struct se_device execution queue after its WRITE 3813 * will be added to the struct se_device execution queue after its WRITE
3817 * data has arrived. (ie: It gets handled by the transport processing 3814 * data has arrived. (ie: It gets handled by the transport processing
3818 * thread a second time) 3815 * thread a second time)
3819 */ 3816 */
3820 if (cmd->data_direction == DMA_TO_DEVICE) { 3817 if (cmd->data_direction == DMA_TO_DEVICE) {
3821 transport_add_tasks_to_state_queue(cmd); 3818 transport_add_tasks_to_state_queue(cmd);
3822 return transport_generic_write_pending(cmd); 3819 return transport_generic_write_pending(cmd);
3823 } 3820 }
3824 /* 3821 /*
3825 * Everything else but a WRITE, add the struct se_cmd's struct se_task's 3822 * Everything else but a WRITE, add the struct se_cmd's struct se_task's
3826 * to the execution queue. 3823 * to the execution queue.
3827 */ 3824 */
3828 transport_execute_tasks(cmd); 3825 transport_execute_tasks(cmd);
3829 return 0; 3826 return 0;
3830 3827
3831 out_fail: 3828 out_fail:
3832 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3829 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
3833 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3830 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3834 return -EINVAL; 3831 return -EINVAL;
3835 } 3832 }
3836 EXPORT_SYMBOL(transport_generic_new_cmd); 3833 EXPORT_SYMBOL(transport_generic_new_cmd);
3837 3834
3838 /* transport_generic_process_write(): 3835 /* transport_generic_process_write():
3839 * 3836 *
3840 * 3837 *
3841 */ 3838 */
3842 void transport_generic_process_write(struct se_cmd *cmd) 3839 void transport_generic_process_write(struct se_cmd *cmd)
3843 { 3840 {
3844 transport_execute_tasks(cmd); 3841 transport_execute_tasks(cmd);
3845 } 3842 }
3846 EXPORT_SYMBOL(transport_generic_process_write); 3843 EXPORT_SYMBOL(transport_generic_process_write);
3847 3844
3848 static void transport_write_pending_qf(struct se_cmd *cmd) 3845 static void transport_write_pending_qf(struct se_cmd *cmd)
3849 { 3846 {
3850 int ret; 3847 int ret;
3851 3848
3852 ret = cmd->se_tfo->write_pending(cmd); 3849 ret = cmd->se_tfo->write_pending(cmd);
3853 if (ret == -EAGAIN || ret == -ENOMEM) { 3850 if (ret == -EAGAIN || ret == -ENOMEM) {
3854 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 3851 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
3855 cmd); 3852 cmd);
3856 transport_handle_queue_full(cmd, cmd->se_dev); 3853 transport_handle_queue_full(cmd, cmd->se_dev);
3857 } 3854 }
3858 } 3855 }
3859 3856
3860 static int transport_generic_write_pending(struct se_cmd *cmd) 3857 static int transport_generic_write_pending(struct se_cmd *cmd)
3861 { 3858 {
3862 unsigned long flags; 3859 unsigned long flags;
3863 int ret; 3860 int ret;
3864 3861
3865 spin_lock_irqsave(&cmd->t_state_lock, flags); 3862 spin_lock_irqsave(&cmd->t_state_lock, flags);
3866 cmd->t_state = TRANSPORT_WRITE_PENDING; 3863 cmd->t_state = TRANSPORT_WRITE_PENDING;
3867 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3864 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3868 3865
3869 /* 3866 /*
3870 * Clear the se_cmd for WRITE_PENDING status in order to set 3867 * Clear the se_cmd for WRITE_PENDING status in order to set
3871 * cmd->t_transport_active=0 so that transport_generic_handle_data 3868 * cmd->t_transport_active=0 so that transport_generic_handle_data
3872 * can be called from HW target mode interrupt code. This is safe 3869 * can be called from HW target mode interrupt code. This is safe
3873 * to be called with transport_off=1 before the cmd->se_tfo->write_pending 3870 * to be called with transport_off=1 before the cmd->se_tfo->write_pending
3874 * because the se_cmd->se_lun pointer is not being cleared. 3871 * because the se_cmd->se_lun pointer is not being cleared.
3875 */ 3872 */
3876 transport_cmd_check_stop(cmd, 1, 0); 3873 transport_cmd_check_stop(cmd, 1, 0);
3877 3874
3878 /* 3875 /*
3879 * Call the fabric write_pending function here to let the 3876 * Call the fabric write_pending function here to let the
3880 * frontend know that WRITE buffers are ready. 3877 * frontend know that WRITE buffers are ready.
3881 */ 3878 */
3882 ret = cmd->se_tfo->write_pending(cmd); 3879 ret = cmd->se_tfo->write_pending(cmd);
3883 if (ret == -EAGAIN || ret == -ENOMEM) 3880 if (ret == -EAGAIN || ret == -ENOMEM)
3884 goto queue_full; 3881 goto queue_full;
3885 else if (ret < 0) 3882 else if (ret < 0)
3886 return ret; 3883 return ret;
3887 3884
3888 return 1; 3885 return 1;
3889 3886
3890 queue_full: 3887 queue_full:
3891 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 3888 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
3892 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 3889 cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
3893 transport_handle_queue_full(cmd, cmd->se_dev); 3890 transport_handle_queue_full(cmd, cmd->se_dev);
3894 return 0; 3891 return 0;
3895 } 3892 }
3896 3893
3897 void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 3894 void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
3898 { 3895 {
3899 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { 3896 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
3900 if (wait_for_tasks && cmd->se_tmr_req) 3897 if (wait_for_tasks && cmd->se_tmr_req)
3901 transport_wait_for_tasks(cmd); 3898 transport_wait_for_tasks(cmd);
3902 3899
3903 transport_release_cmd(cmd); 3900 transport_release_cmd(cmd);
3904 } else { 3901 } else {
3905 if (wait_for_tasks) 3902 if (wait_for_tasks)
3906 transport_wait_for_tasks(cmd); 3903 transport_wait_for_tasks(cmd);
3907 3904
3908 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); 3905 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
3909 3906
3910 if (cmd->se_lun) 3907 if (cmd->se_lun)
3911 transport_lun_remove_cmd(cmd); 3908 transport_lun_remove_cmd(cmd);
3912 3909
3913 transport_free_dev_tasks(cmd); 3910 transport_free_dev_tasks(cmd);
3914 3911
3915 transport_put_cmd(cmd); 3912 transport_put_cmd(cmd);
3916 } 3913 }
3917 } 3914 }
3918 EXPORT_SYMBOL(transport_generic_free_cmd); 3915 EXPORT_SYMBOL(transport_generic_free_cmd);
3919 3916
3920 /* target_get_sess_cmd - Add command to active ->sess_cmd_list 3917 /* target_get_sess_cmd - Add command to active ->sess_cmd_list
3921 * @se_sess: session to reference 3918 * @se_sess: session to reference
3922 * @se_cmd: command descriptor to add 3919 * @se_cmd: command descriptor to add
3923 */ 3920 */
3924 void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) 3921 void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
3925 { 3922 {
3926 unsigned long flags; 3923 unsigned long flags;
3927 3924
3928 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 3925 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3929 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 3926 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
3930 se_cmd->check_release = 1; 3927 se_cmd->check_release = 1;
3931 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 3928 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3932 } 3929 }
3933 EXPORT_SYMBOL(target_get_sess_cmd); 3930 EXPORT_SYMBOL(target_get_sess_cmd);
3934 3931
3935 /* target_put_sess_cmd - Check for active I/O shutdown or list delete 3932 /* target_put_sess_cmd - Check for active I/O shutdown or list delete
3936 * @se_sess: session to reference 3933 * @se_sess: session to reference
3937 * @se_cmd: command descriptor to drop 3934 * @se_cmd: command descriptor to drop
3938 */ 3935 */
3939 int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) 3936 int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
3940 { 3937 {
3941 unsigned long flags; 3938 unsigned long flags;
3942 3939
3943 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 3940 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3944 if (list_empty(&se_cmd->se_cmd_list)) { 3941 if (list_empty(&se_cmd->se_cmd_list)) {
3945 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 3942 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3946 WARN_ON(1); 3943 WARN_ON(1);
3947 return 0; 3944 return 0;
3948 } 3945 }
3949 3946
3950 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 3947 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
3951 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 3948 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3952 complete(&se_cmd->cmd_wait_comp); 3949 complete(&se_cmd->cmd_wait_comp);
3953 return 1; 3950 return 1;
3954 } 3951 }
3955 list_del(&se_cmd->se_cmd_list); 3952 list_del(&se_cmd->se_cmd_list);
3956 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 3953 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3957 3954
3958 return 0; 3955 return 0;
3959 } 3956 }
3960 EXPORT_SYMBOL(target_put_sess_cmd); 3957 EXPORT_SYMBOL(target_put_sess_cmd);
3961 3958
3962 /* target_splice_sess_cmd_list - Split active cmds into sess_wait_list 3959 /* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
3963 * @se_sess: session to split 3960 * @se_sess: session to split
3964 */ 3961 */
3965 void target_splice_sess_cmd_list(struct se_session *se_sess) 3962 void target_splice_sess_cmd_list(struct se_session *se_sess)
3966 { 3963 {
3967 struct se_cmd *se_cmd; 3964 struct se_cmd *se_cmd;
3968 unsigned long flags; 3965 unsigned long flags;
3969 3966
3970 WARN_ON(!list_empty(&se_sess->sess_wait_list)); 3967 WARN_ON(!list_empty(&se_sess->sess_wait_list));
3971 INIT_LIST_HEAD(&se_sess->sess_wait_list); 3968 INIT_LIST_HEAD(&se_sess->sess_wait_list);
3972 3969
3973 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 3970 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
3974 se_sess->sess_tearing_down = 1; 3971 se_sess->sess_tearing_down = 1;
3975 3972
3976 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); 3973 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
3977 3974
3978 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) 3975 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
3979 se_cmd->cmd_wait_set = 1; 3976 se_cmd->cmd_wait_set = 1;
3980 3977
3981 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 3978 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3982 } 3979 }
3983 EXPORT_SYMBOL(target_splice_sess_cmd_list); 3980 EXPORT_SYMBOL(target_splice_sess_cmd_list);
3984 3981
3985 /* target_wait_for_sess_cmds - Wait for outstanding descriptors 3982 /* target_wait_for_sess_cmds - Wait for outstanding descriptors
3986 * @se_sess: session to wait for active I/O 3983 * @se_sess: session to wait for active I/O
3987 * @wait_for_tasks: Make extra transport_wait_for_tasks call 3984 * @wait_for_tasks: Make extra transport_wait_for_tasks call
3988 */ 3985 */
3989 void target_wait_for_sess_cmds( 3986 void target_wait_for_sess_cmds(
3990 struct se_session *se_sess, 3987 struct se_session *se_sess,
3991 int wait_for_tasks) 3988 int wait_for_tasks)
3992 { 3989 {
3993 struct se_cmd *se_cmd, *tmp_cmd; 3990 struct se_cmd *se_cmd, *tmp_cmd;
3994 bool rc = false; 3991 bool rc = false;
3995 3992
3996 list_for_each_entry_safe(se_cmd, tmp_cmd, 3993 list_for_each_entry_safe(se_cmd, tmp_cmd,
3997 &se_sess->sess_wait_list, se_cmd_list) { 3994 &se_sess->sess_wait_list, se_cmd_list) {
3998 list_del(&se_cmd->se_cmd_list); 3995 list_del(&se_cmd->se_cmd_list);
3999 3996
4000 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" 3997 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
4001 " %d\n", se_cmd, se_cmd->t_state, 3998 " %d\n", se_cmd, se_cmd->t_state,
4002 se_cmd->se_tfo->get_cmd_state(se_cmd)); 3999 se_cmd->se_tfo->get_cmd_state(se_cmd));
4003 4000
4004 if (wait_for_tasks) { 4001 if (wait_for_tasks) {
4005 pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d," 4002 pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
4006 " fabric state: %d\n", se_cmd, se_cmd->t_state, 4003 " fabric state: %d\n", se_cmd, se_cmd->t_state,
4007 se_cmd->se_tfo->get_cmd_state(se_cmd)); 4004 se_cmd->se_tfo->get_cmd_state(se_cmd));
4008 4005
4009 rc = transport_wait_for_tasks(se_cmd); 4006 rc = transport_wait_for_tasks(se_cmd);
4010 4007
4011 pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d," 4008 pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
4012 " fabric state: %d\n", se_cmd, se_cmd->t_state, 4009 " fabric state: %d\n", se_cmd, se_cmd->t_state,
4013 se_cmd->se_tfo->get_cmd_state(se_cmd)); 4010 se_cmd->se_tfo->get_cmd_state(se_cmd));
4014 } 4011 }
4015 4012
4016 if (!rc) { 4013 if (!rc) {
4017 wait_for_completion(&se_cmd->cmd_wait_comp); 4014 wait_for_completion(&se_cmd->cmd_wait_comp);
4018 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" 4015 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
4019 " fabric state: %d\n", se_cmd, se_cmd->t_state, 4016 " fabric state: %d\n", se_cmd, se_cmd->t_state,
4020 se_cmd->se_tfo->get_cmd_state(se_cmd)); 4017 se_cmd->se_tfo->get_cmd_state(se_cmd));
4021 } 4018 }
4022 4019
4023 se_cmd->se_tfo->release_cmd(se_cmd); 4020 se_cmd->se_tfo->release_cmd(se_cmd);
4024 } 4021 }
4025 } 4022 }
4026 EXPORT_SYMBOL(target_wait_for_sess_cmds); 4023 EXPORT_SYMBOL(target_wait_for_sess_cmds);
4027 4024
4028 /* transport_lun_wait_for_tasks(): 4025 /* transport_lun_wait_for_tasks():
4029 * 4026 *
4030 * Called from ConfigFS context to stop the passed struct se_cmd to allow 4027 * Called from ConfigFS context to stop the passed struct se_cmd to allow
4031 * an struct se_lun to be successfully shutdown. 4028 * an struct se_lun to be successfully shutdown.
4032 */ 4029 */
4033 static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) 4030 static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
4034 { 4031 {
4035 unsigned long flags; 4032 unsigned long flags;
4036 int ret; 4033 int ret;
4037 /* 4034 /*
4038 * If the frontend has already requested this struct se_cmd to 4035 * If the frontend has already requested this struct se_cmd to
4039 * be stopped, we can safely ignore this struct se_cmd. 4036 * be stopped, we can safely ignore this struct se_cmd.
4040 */ 4037 */
4041 spin_lock_irqsave(&cmd->t_state_lock, flags); 4038 spin_lock_irqsave(&cmd->t_state_lock, flags);
4042 if (atomic_read(&cmd->t_transport_stop)) { 4039 if (atomic_read(&cmd->t_transport_stop)) {
4043 atomic_set(&cmd->transport_lun_stop, 0); 4040 atomic_set(&cmd->transport_lun_stop, 0);
4044 pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop ==" 4041 pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop =="
4045 " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd)); 4042 " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
4046 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4043 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4047 transport_cmd_check_stop(cmd, 1, 0); 4044 transport_cmd_check_stop(cmd, 1, 0);
4048 return -EPERM; 4045 return -EPERM;
4049 } 4046 }
4050 atomic_set(&cmd->transport_lun_fe_stop, 1); 4047 atomic_set(&cmd->transport_lun_fe_stop, 1);
4051 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4048 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4052 4049
4053 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); 4050 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4054 4051
4055 ret = transport_stop_tasks_for_cmd(cmd); 4052 ret = transport_stop_tasks_for_cmd(cmd);
4056 4053
4057 pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:" 4054 pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
4058 " %d\n", cmd, cmd->t_task_list_num, ret); 4055 " %d\n", cmd, cmd->t_task_list_num, ret);
4059 if (!ret) { 4056 if (!ret) {
4060 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", 4057 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
4061 cmd->se_tfo->get_task_tag(cmd)); 4058 cmd->se_tfo->get_task_tag(cmd));
4062 wait_for_completion(&cmd->transport_lun_stop_comp); 4059 wait_for_completion(&cmd->transport_lun_stop_comp);
4063 pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", 4060 pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
4064 cmd->se_tfo->get_task_tag(cmd)); 4061 cmd->se_tfo->get_task_tag(cmd));
4065 } 4062 }
4066 transport_remove_cmd_from_queue(cmd); 4063 transport_remove_cmd_from_queue(cmd);
4067 4064
4068 return 0; 4065 return 0;
4069 } 4066 }
4070 4067
4071 static void __transport_clear_lun_from_sessions(struct se_lun *lun) 4068 static void __transport_clear_lun_from_sessions(struct se_lun *lun)
4072 { 4069 {
4073 struct se_cmd *cmd = NULL; 4070 struct se_cmd *cmd = NULL;
4074 unsigned long lun_flags, cmd_flags; 4071 unsigned long lun_flags, cmd_flags;
4075 /* 4072 /*
4076 * Do exception processing and return CHECK_CONDITION status to the 4073 * Do exception processing and return CHECK_CONDITION status to the
4077 * Initiator Port. 4074 * Initiator Port.
4078 */ 4075 */
4079 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 4076 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4080 while (!list_empty(&lun->lun_cmd_list)) { 4077 while (!list_empty(&lun->lun_cmd_list)) {
4081 cmd = list_first_entry(&lun->lun_cmd_list, 4078 cmd = list_first_entry(&lun->lun_cmd_list,
4082 struct se_cmd, se_lun_node); 4079 struct se_cmd, se_lun_node);
4083 list_del(&cmd->se_lun_node); 4080 list_del(&cmd->se_lun_node);
4084 4081
4085 atomic_set(&cmd->transport_lun_active, 0); 4082 atomic_set(&cmd->transport_lun_active, 0);
4086 /* 4083 /*
4087 * This will notify iscsi_target_transport.c: 4084 * This will notify iscsi_target_transport.c:
4088 * transport_cmd_check_stop() that a LUN shutdown is in 4085 * transport_cmd_check_stop() that a LUN shutdown is in
4089 * progress for the iscsi_cmd_t. 4086 * progress for the iscsi_cmd_t.
4090 */ 4087 */
4091 spin_lock(&cmd->t_state_lock); 4088 spin_lock(&cmd->t_state_lock);
4092 pr_debug("SE_LUN[%d] - Setting cmd->transport" 4089 pr_debug("SE_LUN[%d] - Setting cmd->transport"
4093 "_lun_stop for ITT: 0x%08x\n", 4090 "_lun_stop for ITT: 0x%08x\n",
4094 cmd->se_lun->unpacked_lun, 4091 cmd->se_lun->unpacked_lun,
4095 cmd->se_tfo->get_task_tag(cmd)); 4092 cmd->se_tfo->get_task_tag(cmd));
4096 atomic_set(&cmd->transport_lun_stop, 1); 4093 atomic_set(&cmd->transport_lun_stop, 1);
4097 spin_unlock(&cmd->t_state_lock); 4094 spin_unlock(&cmd->t_state_lock);
4098 4095
4099 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); 4096 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4100 4097
4101 if (!cmd->se_lun) { 4098 if (!cmd->se_lun) {
4102 pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n", 4099 pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
4103 cmd->se_tfo->get_task_tag(cmd), 4100 cmd->se_tfo->get_task_tag(cmd),
4104 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 4101 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4105 BUG(); 4102 BUG();
4106 } 4103 }
4107 /* 4104 /*
4108 * If the Storage engine still owns the iscsi_cmd_t, determine 4105 * If the Storage engine still owns the iscsi_cmd_t, determine
4109 * and/or stop its context. 4106 * and/or stop its context.
4110 */ 4107 */
4111 pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport" 4108 pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
4112 "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, 4109 "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
4113 cmd->se_tfo->get_task_tag(cmd)); 4110 cmd->se_tfo->get_task_tag(cmd));
4114 4111
4115 if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { 4112 if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
4116 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 4113 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4117 continue; 4114 continue;
4118 } 4115 }
4119 4116
4120 pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun" 4117 pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
4121 "_wait_for_tasks(): SUCCESS\n", 4118 "_wait_for_tasks(): SUCCESS\n",
4122 cmd->se_lun->unpacked_lun, 4119 cmd->se_lun->unpacked_lun,
4123 cmd->se_tfo->get_task_tag(cmd)); 4120 cmd->se_tfo->get_task_tag(cmd));
4124 4121
4125 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); 4122 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4126 if (!atomic_read(&cmd->transport_dev_active)) { 4123 if (!atomic_read(&cmd->transport_dev_active)) {
4127 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 4124 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4128 goto check_cond; 4125 goto check_cond;
4129 } 4126 }
4130 atomic_set(&cmd->transport_dev_active, 0); 4127 atomic_set(&cmd->transport_dev_active, 0);
4131 transport_all_task_dev_remove_state(cmd); 4128 transport_all_task_dev_remove_state(cmd);
4132 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 4129 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4133 4130
4134 transport_free_dev_tasks(cmd); 4131 transport_free_dev_tasks(cmd);
4135 /* 4132 /*
4136 * The Storage engine stopped this struct se_cmd before it was 4133 * The Storage engine stopped this struct se_cmd before it was
4137 * send to the fabric frontend for delivery back to the 4134 * send to the fabric frontend for delivery back to the
4138 * Initiator Node. Return this SCSI CDB back with an 4135 * Initiator Node. Return this SCSI CDB back with an
4139 * CHECK_CONDITION status. 4136 * CHECK_CONDITION status.
4140 */ 4137 */
4141 check_cond: 4138 check_cond:
4142 transport_send_check_condition_and_sense(cmd, 4139 transport_send_check_condition_and_sense(cmd,
4143 TCM_NON_EXISTENT_LUN, 0); 4140 TCM_NON_EXISTENT_LUN, 0);
4144 /* 4141 /*
4145 * If the fabric frontend is waiting for this iscsi_cmd_t to 4142 * If the fabric frontend is waiting for this iscsi_cmd_t to
4146 * be released, notify the waiting thread now that LU has 4143 * be released, notify the waiting thread now that LU has
4147 * finished accessing it. 4144 * finished accessing it.
4148 */ 4145 */
4149 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); 4146 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
4150 if (atomic_read(&cmd->transport_lun_fe_stop)) { 4147 if (atomic_read(&cmd->transport_lun_fe_stop)) {
4151 pr_debug("SE_LUN[%d] - Detected FE stop for" 4148 pr_debug("SE_LUN[%d] - Detected FE stop for"
4152 " struct se_cmd: %p ITT: 0x%08x\n", 4149 " struct se_cmd: %p ITT: 0x%08x\n",
4153 lun->unpacked_lun, 4150 lun->unpacked_lun,
4154 cmd, cmd->se_tfo->get_task_tag(cmd)); 4151 cmd, cmd->se_tfo->get_task_tag(cmd));
4155 4152
4156 spin_unlock_irqrestore(&cmd->t_state_lock, 4153 spin_unlock_irqrestore(&cmd->t_state_lock,
4157 cmd_flags); 4154 cmd_flags);
4158 transport_cmd_check_stop(cmd, 1, 0); 4155 transport_cmd_check_stop(cmd, 1, 0);
4159 complete(&cmd->transport_lun_fe_stop_comp); 4156 complete(&cmd->transport_lun_fe_stop_comp);
4160 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 4157 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4161 continue; 4158 continue;
4162 } 4159 }
4163 pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n", 4160 pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
4164 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); 4161 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
4165 4162
4166 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 4163 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
4167 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 4164 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
4168 } 4165 }
4169 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); 4166 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
4170 } 4167 }
4171 4168
4172 static int transport_clear_lun_thread(void *p) 4169 static int transport_clear_lun_thread(void *p)
4173 { 4170 {
4174 struct se_lun *lun = (struct se_lun *)p; 4171 struct se_lun *lun = (struct se_lun *)p;
4175 4172
4176 __transport_clear_lun_from_sessions(lun); 4173 __transport_clear_lun_from_sessions(lun);
4177 complete(&lun->lun_shutdown_comp); 4174 complete(&lun->lun_shutdown_comp);
4178 4175
4179 return 0; 4176 return 0;
4180 } 4177 }
4181 4178
4182 int transport_clear_lun_from_sessions(struct se_lun *lun) 4179 int transport_clear_lun_from_sessions(struct se_lun *lun)
4183 { 4180 {
4184 struct task_struct *kt; 4181 struct task_struct *kt;
4185 4182
4186 kt = kthread_run(transport_clear_lun_thread, lun, 4183 kt = kthread_run(transport_clear_lun_thread, lun,
4187 "tcm_cl_%u", lun->unpacked_lun); 4184 "tcm_cl_%u", lun->unpacked_lun);
4188 if (IS_ERR(kt)) { 4185 if (IS_ERR(kt)) {
4189 pr_err("Unable to start clear_lun thread\n"); 4186 pr_err("Unable to start clear_lun thread\n");
4190 return PTR_ERR(kt); 4187 return PTR_ERR(kt);
4191 } 4188 }
4192 wait_for_completion(&lun->lun_shutdown_comp); 4189 wait_for_completion(&lun->lun_shutdown_comp);
4193 4190
4194 return 0; 4191 return 0;
4195 } 4192 }
4196 4193
4197 /** 4194 /**
4198 * transport_wait_for_tasks - wait for completion to occur 4195 * transport_wait_for_tasks - wait for completion to occur
4199 * @cmd: command to wait 4196 * @cmd: command to wait
4200 * 4197 *
4201 * Called from frontend fabric context to wait for storage engine 4198 * Called from frontend fabric context to wait for storage engine
4202 * to pause and/or release frontend generated struct se_cmd. 4199 * to pause and/or release frontend generated struct se_cmd.
4203 */ 4200 */
4204 bool transport_wait_for_tasks(struct se_cmd *cmd) 4201 bool transport_wait_for_tasks(struct se_cmd *cmd)
4205 { 4202 {
4206 unsigned long flags; 4203 unsigned long flags;
4207 4204
4208 spin_lock_irqsave(&cmd->t_state_lock, flags); 4205 spin_lock_irqsave(&cmd->t_state_lock, flags);
4209 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) { 4206 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) {
4210 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4207 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4211 return false; 4208 return false;
4212 } 4209 }
4213 /* 4210 /*
4214 * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE 4211 * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
4215 * has been set in transport_set_supported_SAM_opcode(). 4212 * has been set in transport_set_supported_SAM_opcode().
4216 */ 4213 */
4217 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) { 4214 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) {
4218 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4215 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4219 return false; 4216 return false;
4220 } 4217 }
4221 /* 4218 /*
4222 * If we are already stopped due to an external event (ie: LUN shutdown) 4219 * If we are already stopped due to an external event (ie: LUN shutdown)
4223 * sleep until the connection can have the passed struct se_cmd back. 4220 * sleep until the connection can have the passed struct se_cmd back.
4224 * The cmd->transport_lun_stopped_sem will be upped by 4221 * The cmd->transport_lun_stopped_sem will be upped by
4225 * transport_clear_lun_from_sessions() once the ConfigFS context caller 4222 * transport_clear_lun_from_sessions() once the ConfigFS context caller
4226 * has completed its operation on the struct se_cmd. 4223 * has completed its operation on the struct se_cmd.
4227 */ 4224 */
4228 if (atomic_read(&cmd->transport_lun_stop)) { 4225 if (atomic_read(&cmd->transport_lun_stop)) {
4229 4226
4230 pr_debug("wait_for_tasks: Stopping" 4227 pr_debug("wait_for_tasks: Stopping"
4231 " wait_for_completion(&cmd->t_tasktransport_lun_fe" 4228 " wait_for_completion(&cmd->t_tasktransport_lun_fe"
4232 "_stop_comp); for ITT: 0x%08x\n", 4229 "_stop_comp); for ITT: 0x%08x\n",
4233 cmd->se_tfo->get_task_tag(cmd)); 4230 cmd->se_tfo->get_task_tag(cmd));
4234 /* 4231 /*
4235 * There is a special case for WRITES where a FE exception + 4232 * There is a special case for WRITES where a FE exception +
4236 * LUN shutdown means ConfigFS context is still sleeping on 4233 * LUN shutdown means ConfigFS context is still sleeping on
4237 * transport_lun_stop_comp in transport_lun_wait_for_tasks(). 4234 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
4238 * We go ahead and up transport_lun_stop_comp just to be sure 4235 * We go ahead and up transport_lun_stop_comp just to be sure
4239 * here. 4236 * here.
4240 */ 4237 */
4241 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4238 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4242 complete(&cmd->transport_lun_stop_comp); 4239 complete(&cmd->transport_lun_stop_comp);
4243 wait_for_completion(&cmd->transport_lun_fe_stop_comp); 4240 wait_for_completion(&cmd->transport_lun_fe_stop_comp);
4244 spin_lock_irqsave(&cmd->t_state_lock, flags); 4241 spin_lock_irqsave(&cmd->t_state_lock, flags);
4245 4242
4246 transport_all_task_dev_remove_state(cmd); 4243 transport_all_task_dev_remove_state(cmd);
4247 /* 4244 /*
4248 * At this point, the frontend who was the originator of this 4245 * At this point, the frontend who was the originator of this
4249 * struct se_cmd, now owns the structure and can be released through 4246 * struct se_cmd, now owns the structure and can be released through
4250 * normal means below. 4247 * normal means below.
4251 */ 4248 */
4252 pr_debug("wait_for_tasks: Stopped" 4249 pr_debug("wait_for_tasks: Stopped"
4253 " wait_for_completion(&cmd->t_tasktransport_lun_fe_" 4250 " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
4254 "stop_comp); for ITT: 0x%08x\n", 4251 "stop_comp); for ITT: 0x%08x\n",
4255 cmd->se_tfo->get_task_tag(cmd)); 4252 cmd->se_tfo->get_task_tag(cmd));
4256 4253
4257 atomic_set(&cmd->transport_lun_stop, 0); 4254 atomic_set(&cmd->transport_lun_stop, 0);
4258 } 4255 }
4259 if (!atomic_read(&cmd->t_transport_active) || 4256 if (!atomic_read(&cmd->t_transport_active) ||
4260 atomic_read(&cmd->t_transport_aborted)) { 4257 atomic_read(&cmd->t_transport_aborted)) {
4261 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4258 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4262 return false; 4259 return false;
4263 } 4260 }
4264 4261
4265 atomic_set(&cmd->t_transport_stop, 1); 4262 atomic_set(&cmd->t_transport_stop, 1);
4266 4263
4267 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" 4264 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
4268 " i_state: %d, t_state: %d, t_transport_stop = TRUE\n", 4265 " i_state: %d, t_state: %d, t_transport_stop = TRUE\n",
4269 cmd, cmd->se_tfo->get_task_tag(cmd), 4266 cmd, cmd->se_tfo->get_task_tag(cmd),
4270 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 4267 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
4271 4268
4272 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4269 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4273 4270
4274 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); 4271 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
4275 4272
4276 wait_for_completion(&cmd->t_transport_stop_comp); 4273 wait_for_completion(&cmd->t_transport_stop_comp);
4277 4274
4278 spin_lock_irqsave(&cmd->t_state_lock, flags); 4275 spin_lock_irqsave(&cmd->t_state_lock, flags);
4279 atomic_set(&cmd->t_transport_active, 0); 4276 atomic_set(&cmd->t_transport_active, 0);
4280 atomic_set(&cmd->t_transport_stop, 0); 4277 atomic_set(&cmd->t_transport_stop, 0);
4281 4278
4282 pr_debug("wait_for_tasks: Stopped wait_for_compltion(" 4279 pr_debug("wait_for_tasks: Stopped wait_for_compltion("
4283 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", 4280 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
4284 cmd->se_tfo->get_task_tag(cmd)); 4281 cmd->se_tfo->get_task_tag(cmd));
4285 4282
4286 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4283 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4287 4284
4288 return true; 4285 return true;
4289 } 4286 }
4290 EXPORT_SYMBOL(transport_wait_for_tasks); 4287 EXPORT_SYMBOL(transport_wait_for_tasks);
4291 4288
4292 static int transport_get_sense_codes( 4289 static int transport_get_sense_codes(
4293 struct se_cmd *cmd, 4290 struct se_cmd *cmd,
4294 u8 *asc, 4291 u8 *asc,
4295 u8 *ascq) 4292 u8 *ascq)
4296 { 4293 {
4297 *asc = cmd->scsi_asc; 4294 *asc = cmd->scsi_asc;
4298 *ascq = cmd->scsi_ascq; 4295 *ascq = cmd->scsi_ascq;
4299 4296
4300 return 0; 4297 return 0;
4301 } 4298 }
4302 4299
4303 static int transport_set_sense_codes( 4300 static int transport_set_sense_codes(
4304 struct se_cmd *cmd, 4301 struct se_cmd *cmd,
4305 u8 asc, 4302 u8 asc,
4306 u8 ascq) 4303 u8 ascq)
4307 { 4304 {
4308 cmd->scsi_asc = asc; 4305 cmd->scsi_asc = asc;
4309 cmd->scsi_ascq = ascq; 4306 cmd->scsi_ascq = ascq;
4310 4307
4311 return 0; 4308 return 0;
4312 } 4309 }
4313 4310
4314 int transport_send_check_condition_and_sense( 4311 int transport_send_check_condition_and_sense(
4315 struct se_cmd *cmd, 4312 struct se_cmd *cmd,
4316 u8 reason, 4313 u8 reason,
4317 int from_transport) 4314 int from_transport)
4318 { 4315 {
4319 unsigned char *buffer = cmd->sense_buffer; 4316 unsigned char *buffer = cmd->sense_buffer;
4320 unsigned long flags; 4317 unsigned long flags;
4321 int offset; 4318 int offset;
4322 u8 asc = 0, ascq = 0; 4319 u8 asc = 0, ascq = 0;
4323 4320
4324 spin_lock_irqsave(&cmd->t_state_lock, flags); 4321 spin_lock_irqsave(&cmd->t_state_lock, flags);
4325 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 4322 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4326 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4323 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4327 return 0; 4324 return 0;
4328 } 4325 }
4329 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 4326 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
4330 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4327 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4331 4328
4332 if (!reason && from_transport) 4329 if (!reason && from_transport)
4333 goto after_reason; 4330 goto after_reason;
4334 4331
4335 if (!from_transport) 4332 if (!from_transport)
4336 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 4333 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
4337 /* 4334 /*
4338 * Data Segment and SenseLength of the fabric response PDU. 4335 * Data Segment and SenseLength of the fabric response PDU.
4339 * 4336 *
4340 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE 4337 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
4341 * from include/scsi/scsi_cmnd.h 4338 * from include/scsi/scsi_cmnd.h
4342 */ 4339 */
4343 offset = cmd->se_tfo->set_fabric_sense_len(cmd, 4340 offset = cmd->se_tfo->set_fabric_sense_len(cmd,
4344 TRANSPORT_SENSE_BUFFER); 4341 TRANSPORT_SENSE_BUFFER);
4345 /* 4342 /*
4346 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses 4343 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
4347 * SENSE KEY values from include/scsi/scsi.h 4344 * SENSE KEY values from include/scsi/scsi.h
4348 */ 4345 */
4349 switch (reason) { 4346 switch (reason) {
4350 case TCM_NON_EXISTENT_LUN: 4347 case TCM_NON_EXISTENT_LUN:
4351 /* CURRENT ERROR */ 4348 /* CURRENT ERROR */
4352 buffer[offset] = 0x70; 4349 buffer[offset] = 0x70;
4353 /* ILLEGAL REQUEST */ 4350 /* ILLEGAL REQUEST */
4354 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4351 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4355 /* LOGICAL UNIT NOT SUPPORTED */ 4352 /* LOGICAL UNIT NOT SUPPORTED */
4356 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25; 4353 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
4357 break; 4354 break;
4358 case TCM_UNSUPPORTED_SCSI_OPCODE: 4355 case TCM_UNSUPPORTED_SCSI_OPCODE:
4359 case TCM_SECTOR_COUNT_TOO_MANY: 4356 case TCM_SECTOR_COUNT_TOO_MANY:
4360 /* CURRENT ERROR */ 4357 /* CURRENT ERROR */
4361 buffer[offset] = 0x70; 4358 buffer[offset] = 0x70;
4362 /* ILLEGAL REQUEST */ 4359 /* ILLEGAL REQUEST */
4363 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4360 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4364 /* INVALID COMMAND OPERATION CODE */ 4361 /* INVALID COMMAND OPERATION CODE */
4365 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20; 4362 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
4366 break; 4363 break;
4367 case TCM_UNKNOWN_MODE_PAGE: 4364 case TCM_UNKNOWN_MODE_PAGE:
4368 /* CURRENT ERROR */ 4365 /* CURRENT ERROR */
4369 buffer[offset] = 0x70; 4366 buffer[offset] = 0x70;
4370 /* ILLEGAL REQUEST */ 4367 /* ILLEGAL REQUEST */
4371 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4368 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4372 /* INVALID FIELD IN CDB */ 4369 /* INVALID FIELD IN CDB */
4373 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; 4370 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4374 break; 4371 break;
4375 case TCM_CHECK_CONDITION_ABORT_CMD: 4372 case TCM_CHECK_CONDITION_ABORT_CMD:
4376 /* CURRENT ERROR */ 4373 /* CURRENT ERROR */
4377 buffer[offset] = 0x70; 4374 buffer[offset] = 0x70;
4378 /* ABORTED COMMAND */ 4375 /* ABORTED COMMAND */
4379 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4376 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4380 /* BUS DEVICE RESET FUNCTION OCCURRED */ 4377 /* BUS DEVICE RESET FUNCTION OCCURRED */
4381 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29; 4378 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
4382 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03; 4379 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
4383 break; 4380 break;
4384 case TCM_INCORRECT_AMOUNT_OF_DATA: 4381 case TCM_INCORRECT_AMOUNT_OF_DATA:
4385 /* CURRENT ERROR */ 4382 /* CURRENT ERROR */
4386 buffer[offset] = 0x70; 4383 buffer[offset] = 0x70;
4387 /* ABORTED COMMAND */ 4384 /* ABORTED COMMAND */
4388 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4385 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4389 /* WRITE ERROR */ 4386 /* WRITE ERROR */
4390 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; 4387 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4391 /* NOT ENOUGH UNSOLICITED DATA */ 4388 /* NOT ENOUGH UNSOLICITED DATA */
4392 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d; 4389 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
4393 break; 4390 break;
4394 case TCM_INVALID_CDB_FIELD: 4391 case TCM_INVALID_CDB_FIELD:
4395 /* CURRENT ERROR */ 4392 /* CURRENT ERROR */
4396 buffer[offset] = 0x70; 4393 buffer[offset] = 0x70;
4397 /* ABORTED COMMAND */ 4394 /* ABORTED COMMAND */
4398 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4395 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4399 /* INVALID FIELD IN CDB */ 4396 /* INVALID FIELD IN CDB */
4400 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; 4397 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
4401 break; 4398 break;
4402 case TCM_INVALID_PARAMETER_LIST: 4399 case TCM_INVALID_PARAMETER_LIST:
4403 /* CURRENT ERROR */ 4400 /* CURRENT ERROR */
4404 buffer[offset] = 0x70; 4401 buffer[offset] = 0x70;
4405 /* ABORTED COMMAND */ 4402 /* ABORTED COMMAND */
4406 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4403 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4407 /* INVALID FIELD IN PARAMETER LIST */ 4404 /* INVALID FIELD IN PARAMETER LIST */
4408 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26; 4405 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
4409 break; 4406 break;
4410 case TCM_UNEXPECTED_UNSOLICITED_DATA: 4407 case TCM_UNEXPECTED_UNSOLICITED_DATA:
4411 /* CURRENT ERROR */ 4408 /* CURRENT ERROR */
4412 buffer[offset] = 0x70; 4409 buffer[offset] = 0x70;
4413 /* ABORTED COMMAND */ 4410 /* ABORTED COMMAND */
4414 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4411 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4415 /* WRITE ERROR */ 4412 /* WRITE ERROR */
4416 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; 4413 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
4417 /* UNEXPECTED_UNSOLICITED_DATA */ 4414 /* UNEXPECTED_UNSOLICITED_DATA */
4418 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c; 4415 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
4419 break; 4416 break;
4420 case TCM_SERVICE_CRC_ERROR: 4417 case TCM_SERVICE_CRC_ERROR:
4421 /* CURRENT ERROR */ 4418 /* CURRENT ERROR */
4422 buffer[offset] = 0x70; 4419 buffer[offset] = 0x70;
4423 /* ABORTED COMMAND */ 4420 /* ABORTED COMMAND */
4424 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4421 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4425 /* PROTOCOL SERVICE CRC ERROR */ 4422 /* PROTOCOL SERVICE CRC ERROR */
4426 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47; 4423 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
4427 /* N/A */ 4424 /* N/A */
4428 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05; 4425 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
4429 break; 4426 break;
4430 case TCM_SNACK_REJECTED: 4427 case TCM_SNACK_REJECTED:
4431 /* CURRENT ERROR */ 4428 /* CURRENT ERROR */
4432 buffer[offset] = 0x70; 4429 buffer[offset] = 0x70;
4433 /* ABORTED COMMAND */ 4430 /* ABORTED COMMAND */
4434 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4431 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
4435 /* READ ERROR */ 4432 /* READ ERROR */
4436 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11; 4433 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
4437 /* FAILED RETRANSMISSION REQUEST */ 4434 /* FAILED RETRANSMISSION REQUEST */
4438 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13; 4435 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
4439 break; 4436 break;
4440 case TCM_WRITE_PROTECTED: 4437 case TCM_WRITE_PROTECTED:
4441 /* CURRENT ERROR */ 4438 /* CURRENT ERROR */
4442 buffer[offset] = 0x70; 4439 buffer[offset] = 0x70;
4443 /* DATA PROTECT */ 4440 /* DATA PROTECT */
4444 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; 4441 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
4445 /* WRITE PROTECTED */ 4442 /* WRITE PROTECTED */
4446 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27; 4443 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
4447 break; 4444 break;
4448 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 4445 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
4449 /* CURRENT ERROR */ 4446 /* CURRENT ERROR */
4450 buffer[offset] = 0x70; 4447 buffer[offset] = 0x70;
4451 /* UNIT ATTENTION */ 4448 /* UNIT ATTENTION */
4452 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; 4449 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
4453 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); 4450 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
4454 buffer[offset+SPC_ASC_KEY_OFFSET] = asc; 4451 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4455 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; 4452 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4456 break; 4453 break;
4457 case TCM_CHECK_CONDITION_NOT_READY: 4454 case TCM_CHECK_CONDITION_NOT_READY:
4458 /* CURRENT ERROR */ 4455 /* CURRENT ERROR */
4459 buffer[offset] = 0x70; 4456 buffer[offset] = 0x70;
4460 /* Not Ready */ 4457 /* Not Ready */
4461 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY; 4458 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
4462 transport_get_sense_codes(cmd, &asc, &ascq); 4459 transport_get_sense_codes(cmd, &asc, &ascq);
4463 buffer[offset+SPC_ASC_KEY_OFFSET] = asc; 4460 buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
4464 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; 4461 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
4465 break; 4462 break;
4466 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 4463 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
4467 default: 4464 default:
4468 /* CURRENT ERROR */ 4465 /* CURRENT ERROR */
4469 buffer[offset] = 0x70; 4466 buffer[offset] = 0x70;
4470 /* ILLEGAL REQUEST */ 4467 /* ILLEGAL REQUEST */
4471 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4468 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
4472 /* LOGICAL UNIT COMMUNICATION FAILURE */ 4469 /* LOGICAL UNIT COMMUNICATION FAILURE */
4473 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80; 4470 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
4474 break; 4471 break;
4475 } 4472 }
4476 /* 4473 /*
4477 * This code uses linux/include/scsi/scsi.h SAM status codes! 4474 * This code uses linux/include/scsi/scsi.h SAM status codes!
4478 */ 4475 */
4479 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 4476 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
4480 /* 4477 /*
4481 * Automatically padded, this value is encoded in the fabric's 4478 * Automatically padded, this value is encoded in the fabric's
4482 * data_length response PDU containing the SCSI defined sense data. 4479 * data_length response PDU containing the SCSI defined sense data.
4483 */ 4480 */
4484 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; 4481 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
4485 4482
4486 after_reason: 4483 after_reason:
4487 return cmd->se_tfo->queue_status(cmd); 4484 return cmd->se_tfo->queue_status(cmd);
4488 } 4485 }
4489 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 4486 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
4490 4487
4491 int transport_check_aborted_status(struct se_cmd *cmd, int send_status) 4488 int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
4492 { 4489 {
4493 int ret = 0; 4490 int ret = 0;
4494 4491
4495 if (atomic_read(&cmd->t_transport_aborted) != 0) { 4492 if (atomic_read(&cmd->t_transport_aborted) != 0) {
4496 if (!send_status || 4493 if (!send_status ||
4497 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) 4494 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
4498 return 1; 4495 return 1;
4499 #if 0 4496 #if 0
4500 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED" 4497 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
4501 " status for CDB: 0x%02x ITT: 0x%08x\n", 4498 " status for CDB: 0x%02x ITT: 0x%08x\n",
4502 cmd->t_task_cdb[0], 4499 cmd->t_task_cdb[0],
4503 cmd->se_tfo->get_task_tag(cmd)); 4500 cmd->se_tfo->get_task_tag(cmd));
4504 #endif 4501 #endif
4505 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; 4502 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
4506 cmd->se_tfo->queue_status(cmd); 4503 cmd->se_tfo->queue_status(cmd);
4507 ret = 1; 4504 ret = 1;
4508 } 4505 }
4509 return ret; 4506 return ret;
4510 } 4507 }
4511 EXPORT_SYMBOL(transport_check_aborted_status); 4508 EXPORT_SYMBOL(transport_check_aborted_status);
4512 4509
4513 void transport_send_task_abort(struct se_cmd *cmd) 4510 void transport_send_task_abort(struct se_cmd *cmd)
4514 { 4511 {
4515 unsigned long flags; 4512 unsigned long flags;
4516 4513
4517 spin_lock_irqsave(&cmd->t_state_lock, flags); 4514 spin_lock_irqsave(&cmd->t_state_lock, flags);
4518 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 4515 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
4519 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4516 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4520 return; 4517 return;
4521 } 4518 }
4522 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4519 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
4523 4520
4524 /* 4521 /*
4525 * If there are still expected incoming fabric WRITEs, we wait 4522 * If there are still expected incoming fabric WRITEs, we wait
4526 * until until they have completed before sending a TASK_ABORTED 4523 * until until they have completed before sending a TASK_ABORTED
4527 * response. This response with TASK_ABORTED status will be 4524 * response. This response with TASK_ABORTED status will be
4528 * queued back to fabric module by transport_check_aborted_status(). 4525 * queued back to fabric module by transport_check_aborted_status().
4529 */ 4526 */
4530 if (cmd->data_direction == DMA_TO_DEVICE) { 4527 if (cmd->data_direction == DMA_TO_DEVICE) {
4531 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 4528 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
4532 atomic_inc(&cmd->t_transport_aborted); 4529 atomic_inc(&cmd->t_transport_aborted);
4533 smp_mb__after_atomic_inc(); 4530 smp_mb__after_atomic_inc();
4534 } 4531 }
4535 } 4532 }
4536 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 4533 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
4537 #if 0 4534 #if 0
4538 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," 4535 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
4539 " ITT: 0x%08x\n", cmd->t_task_cdb[0], 4536 " ITT: 0x%08x\n", cmd->t_task_cdb[0],
4540 cmd->se_tfo->get_task_tag(cmd)); 4537 cmd->se_tfo->get_task_tag(cmd));
4541 #endif 4538 #endif
4542 cmd->se_tfo->queue_status(cmd); 4539 cmd->se_tfo->queue_status(cmd);
4543 } 4540 }
4544 4541
4545 static int transport_generic_do_tmr(struct se_cmd *cmd) 4542 static int transport_generic_do_tmr(struct se_cmd *cmd)
4546 { 4543 {
4547 struct se_device *dev = cmd->se_dev; 4544 struct se_device *dev = cmd->se_dev;
4548 struct se_tmr_req *tmr = cmd->se_tmr_req; 4545 struct se_tmr_req *tmr = cmd->se_tmr_req;
4549 int ret; 4546 int ret;
4550 4547
4551 switch (tmr->function) { 4548 switch (tmr->function) {
4552 case TMR_ABORT_TASK: 4549 case TMR_ABORT_TASK:
4553 tmr->response = TMR_FUNCTION_REJECTED; 4550 tmr->response = TMR_FUNCTION_REJECTED;
4554 break; 4551 break;
4555 case TMR_ABORT_TASK_SET: 4552 case TMR_ABORT_TASK_SET:
4556 case TMR_CLEAR_ACA: 4553 case TMR_CLEAR_ACA:
4557 case TMR_CLEAR_TASK_SET: 4554 case TMR_CLEAR_TASK_SET:
4558 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 4555 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
4559 break; 4556 break;
4560 case TMR_LUN_RESET: 4557 case TMR_LUN_RESET:
4561 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 4558 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
4562 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 4559 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
4563 TMR_FUNCTION_REJECTED; 4560 TMR_FUNCTION_REJECTED;
4564 break; 4561 break;
4565 case TMR_TARGET_WARM_RESET: 4562 case TMR_TARGET_WARM_RESET:
4566 tmr->response = TMR_FUNCTION_REJECTED; 4563 tmr->response = TMR_FUNCTION_REJECTED;
4567 break; 4564 break;
4568 case TMR_TARGET_COLD_RESET: 4565 case TMR_TARGET_COLD_RESET:
4569 tmr->response = TMR_FUNCTION_REJECTED; 4566 tmr->response = TMR_FUNCTION_REJECTED;
4570 break; 4567 break;
4571 default: 4568 default:
4572 pr_err("Uknown TMR function: 0x%02x.\n", 4569 pr_err("Uknown TMR function: 0x%02x.\n",
4573 tmr->function); 4570 tmr->function);
4574 tmr->response = TMR_FUNCTION_REJECTED; 4571 tmr->response = TMR_FUNCTION_REJECTED;
4575 break; 4572 break;
4576 } 4573 }
4577 4574
4578 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 4575 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
4579 cmd->se_tfo->queue_tm_rsp(cmd); 4576 cmd->se_tfo->queue_tm_rsp(cmd);
4580 4577
4581 transport_cmd_check_stop_to_fabric(cmd); 4578 transport_cmd_check_stop_to_fabric(cmd);
4582 return 0; 4579 return 0;
4583 } 4580 }
4584 4581
4585 /* transport_processing_thread(): 4582 /* transport_processing_thread():
4586 * 4583 *
4587 * 4584 *
4588 */ 4585 */
4589 static int transport_processing_thread(void *param) 4586 static int transport_processing_thread(void *param)
4590 { 4587 {
4591 int ret; 4588 int ret;
4592 struct se_cmd *cmd; 4589 struct se_cmd *cmd;
4593 struct se_device *dev = (struct se_device *) param; 4590 struct se_device *dev = (struct se_device *) param;
4594 4591
4595 while (!kthread_should_stop()) { 4592 while (!kthread_should_stop()) {
4596 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, 4593 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
4597 atomic_read(&dev->dev_queue_obj.queue_cnt) || 4594 atomic_read(&dev->dev_queue_obj.queue_cnt) ||
4598 kthread_should_stop()); 4595 kthread_should_stop());
4599 if (ret < 0) 4596 if (ret < 0)
4600 goto out; 4597 goto out;
4601 4598
4602 get_cmd: 4599 get_cmd:
4603 __transport_execute_tasks(dev); 4600 __transport_execute_tasks(dev);
4604 4601
4605 cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj); 4602 cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
4606 if (!cmd) 4603 if (!cmd)
4607 continue; 4604 continue;
4608 4605
4609 switch (cmd->t_state) { 4606 switch (cmd->t_state) {
4610 case TRANSPORT_NEW_CMD: 4607 case TRANSPORT_NEW_CMD:
4611 BUG(); 4608 BUG();
4612 break; 4609 break;
4613 case TRANSPORT_NEW_CMD_MAP: 4610 case TRANSPORT_NEW_CMD_MAP:
4614 if (!cmd->se_tfo->new_cmd_map) { 4611 if (!cmd->se_tfo->new_cmd_map) {
4615 pr_err("cmd->se_tfo->new_cmd_map is" 4612 pr_err("cmd->se_tfo->new_cmd_map is"
4616 " NULL for TRANSPORT_NEW_CMD_MAP\n"); 4613 " NULL for TRANSPORT_NEW_CMD_MAP\n");
4617 BUG(); 4614 BUG();
4618 } 4615 }
4619 ret = cmd->se_tfo->new_cmd_map(cmd); 4616 ret = cmd->se_tfo->new_cmd_map(cmd);
4620 if (ret < 0) { 4617 if (ret < 0) {
4621 transport_generic_request_failure(cmd); 4618 transport_generic_request_failure(cmd);
4622 break; 4619 break;
4623 } 4620 }
4624 ret = transport_generic_new_cmd(cmd); 4621 ret = transport_generic_new_cmd(cmd);
4625 if (ret < 0) { 4622 if (ret < 0) {
4626 transport_generic_request_failure(cmd); 4623 transport_generic_request_failure(cmd);
4627 break; 4624 break;
4628 } 4625 }
4629 break; 4626 break;
4630 case TRANSPORT_PROCESS_WRITE: 4627 case TRANSPORT_PROCESS_WRITE:
4631 transport_generic_process_write(cmd); 4628 transport_generic_process_write(cmd);
4632 break; 4629 break;
4633 case TRANSPORT_PROCESS_TMR: 4630 case TRANSPORT_PROCESS_TMR:
4634 transport_generic_do_tmr(cmd); 4631 transport_generic_do_tmr(cmd);
4635 break; 4632 break;
4636 case TRANSPORT_COMPLETE_QF_WP: 4633 case TRANSPORT_COMPLETE_QF_WP:
4637 transport_write_pending_qf(cmd); 4634 transport_write_pending_qf(cmd);
4638 break; 4635 break;
4639 case TRANSPORT_COMPLETE_QF_OK: 4636 case TRANSPORT_COMPLETE_QF_OK:
4640 transport_complete_qf(cmd); 4637 transport_complete_qf(cmd);
4641 break; 4638 break;
4642 default: 4639 default:
4643 pr_err("Unknown t_state: %d for ITT: 0x%08x " 4640 pr_err("Unknown t_state: %d for ITT: 0x%08x "
4644 "i_state: %d on SE LUN: %u\n", 4641 "i_state: %d on SE LUN: %u\n",
4645 cmd->t_state, 4642 cmd->t_state,
4646 cmd->se_tfo->get_task_tag(cmd), 4643 cmd->se_tfo->get_task_tag(cmd),
4647 cmd->se_tfo->get_cmd_state(cmd), 4644 cmd->se_tfo->get_cmd_state(cmd),
4648 cmd->se_lun->unpacked_lun); 4645 cmd->se_lun->unpacked_lun);
4649 BUG(); 4646 BUG();
4650 } 4647 }
4651 4648
4652 goto get_cmd; 4649 goto get_cmd;
4653 } 4650 }
4654 4651
4655 out: 4652 out:
4656 WARN_ON(!list_empty(&dev->state_task_list)); 4653 WARN_ON(!list_empty(&dev->state_task_list));
4657 WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list)); 4654 WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
4658 dev->process_thread = NULL; 4655 dev->process_thread = NULL;
4659 return 0; 4656 return 0;
4660 } 4657 }
4661 4658
drivers/target/target_core_ua.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * Filename: target_core_ua.c 2 * Filename: target_core_ua.c
3 * 3 *
4 * This file contains logic for SPC-3 Unit Attention emulation 4 * This file contains logic for SPC-3 Unit Attention emulation
5 * 5 *
6 * Copyright (c) 2009,2010 Rising Tide Systems 6 * Copyright (c) 2009,2010 Rising Tide Systems
7 * Copyright (c) 2009,2010 Linux-iSCSI.org 7 * Copyright (c) 2009,2010 Linux-iSCSI.org
8 * 8 *
9 * Nicholas A. Bellinger <nab@kernel.org> 9 * Nicholas A. Bellinger <nab@kernel.org>
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or 13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version. 14 * (at your option) any later version.
15 * 15 *
16 * This program is distributed in the hope that it will be useful, 16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details. 19 * GNU General Public License for more details.
20 * 20 *
21 * You should have received a copy of the GNU General Public License 21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software 22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 * 24 *
25 ******************************************************************************/ 25 ******************************************************************************/
26 26
27 #include <linux/slab.h> 27 #include <linux/slab.h>
28 #include <linux/spinlock.h> 28 #include <linux/spinlock.h>
29 #include <scsi/scsi.h> 29 #include <scsi/scsi.h>
30 #include <scsi/scsi_cmnd.h> 30 #include <scsi/scsi_cmnd.h>
31 31
32 #include <target/target_core_base.h> 32 #include <target/target_core_base.h>
33 #include <target/target_core_device.h> 33 #include <target/target_core_fabric.h>
34 #include <target/target_core_transport.h>
35 #include <target/target_core_fabric_ops.h>
36 #include <target/target_core_configfs.h> 34 #include <target/target_core_configfs.h>
37 35
38 #include "target_core_internal.h" 36 #include "target_core_internal.h"
39 #include "target_core_alua.h" 37 #include "target_core_alua.h"
40 #include "target_core_pr.h" 38 #include "target_core_pr.h"
41 #include "target_core_ua.h" 39 #include "target_core_ua.h"
42 40
43 int core_scsi3_ua_check( 41 int core_scsi3_ua_check(
44 struct se_cmd *cmd, 42 struct se_cmd *cmd,
45 unsigned char *cdb) 43 unsigned char *cdb)
46 { 44 {
47 struct se_dev_entry *deve; 45 struct se_dev_entry *deve;
48 struct se_session *sess = cmd->se_sess; 46 struct se_session *sess = cmd->se_sess;
49 struct se_node_acl *nacl; 47 struct se_node_acl *nacl;
50 48
51 if (!sess) 49 if (!sess)
52 return 0; 50 return 0;
53 51
54 nacl = sess->se_node_acl; 52 nacl = sess->se_node_acl;
55 if (!nacl) 53 if (!nacl)
56 return 0; 54 return 0;
57 55
58 deve = &nacl->device_list[cmd->orig_fe_lun]; 56 deve = &nacl->device_list[cmd->orig_fe_lun];
59 if (!atomic_read(&deve->ua_count)) 57 if (!atomic_read(&deve->ua_count))
60 return 0; 58 return 0;
61 /* 59 /*
62 * From sam4r14, section 5.14 Unit attention condition: 60 * From sam4r14, section 5.14 Unit attention condition:
63 * 61 *
64 * a) if an INQUIRY command enters the enabled command state, the 62 * a) if an INQUIRY command enters the enabled command state, the
65 * device server shall process the INQUIRY command and shall neither 63 * device server shall process the INQUIRY command and shall neither
66 * report nor clear any unit attention condition; 64 * report nor clear any unit attention condition;
67 * b) if a REPORT LUNS command enters the enabled command state, the 65 * b) if a REPORT LUNS command enters the enabled command state, the
68 * device server shall process the REPORT LUNS command and shall not 66 * device server shall process the REPORT LUNS command and shall not
69 * report any unit attention condition; 67 * report any unit attention condition;
70 * e) if a REQUEST SENSE command enters the enabled command state while 68 * e) if a REQUEST SENSE command enters the enabled command state while
71 * a unit attention condition exists for the SCSI initiator port 69 * a unit attention condition exists for the SCSI initiator port
72 * associated with the I_T nexus on which the REQUEST SENSE command 70 * associated with the I_T nexus on which the REQUEST SENSE command
73 * was received, then the device server shall process the command 71 * was received, then the device server shall process the command
74 * and either: 72 * and either:
75 */ 73 */
76 switch (cdb[0]) { 74 switch (cdb[0]) {
77 case INQUIRY: 75 case INQUIRY:
78 case REPORT_LUNS: 76 case REPORT_LUNS:
79 case REQUEST_SENSE: 77 case REQUEST_SENSE:
80 return 0; 78 return 0;
81 default: 79 default:
82 return -EINVAL; 80 return -EINVAL;
83 } 81 }
84 82
85 return -EINVAL; 83 return -EINVAL;
86 } 84 }
87 85
88 int core_scsi3_ua_allocate( 86 int core_scsi3_ua_allocate(
89 struct se_node_acl *nacl, 87 struct se_node_acl *nacl,
90 u32 unpacked_lun, 88 u32 unpacked_lun,
91 u8 asc, 89 u8 asc,
92 u8 ascq) 90 u8 ascq)
93 { 91 {
94 struct se_dev_entry *deve; 92 struct se_dev_entry *deve;
95 struct se_ua *ua, *ua_p, *ua_tmp; 93 struct se_ua *ua, *ua_p, *ua_tmp;
96 /* 94 /*
97 * PASSTHROUGH OPS 95 * PASSTHROUGH OPS
98 */ 96 */
99 if (!nacl) 97 if (!nacl)
100 return -EINVAL; 98 return -EINVAL;
101 99
102 ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC); 100 ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
103 if (!ua) { 101 if (!ua) {
104 pr_err("Unable to allocate struct se_ua\n"); 102 pr_err("Unable to allocate struct se_ua\n");
105 return -ENOMEM; 103 return -ENOMEM;
106 } 104 }
107 INIT_LIST_HEAD(&ua->ua_dev_list); 105 INIT_LIST_HEAD(&ua->ua_dev_list);
108 INIT_LIST_HEAD(&ua->ua_nacl_list); 106 INIT_LIST_HEAD(&ua->ua_nacl_list);
109 107
110 ua->ua_nacl = nacl; 108 ua->ua_nacl = nacl;
111 ua->ua_asc = asc; 109 ua->ua_asc = asc;
112 ua->ua_ascq = ascq; 110 ua->ua_ascq = ascq;
113 111
114 spin_lock_irq(&nacl->device_list_lock); 112 spin_lock_irq(&nacl->device_list_lock);
115 deve = &nacl->device_list[unpacked_lun]; 113 deve = &nacl->device_list[unpacked_lun];
116 114
117 spin_lock(&deve->ua_lock); 115 spin_lock(&deve->ua_lock);
118 list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) { 116 list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
119 /* 117 /*
120 * Do not report the same UNIT ATTENTION twice.. 118 * Do not report the same UNIT ATTENTION twice..
121 */ 119 */
122 if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) { 120 if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) {
123 spin_unlock(&deve->ua_lock); 121 spin_unlock(&deve->ua_lock);
124 spin_unlock_irq(&nacl->device_list_lock); 122 spin_unlock_irq(&nacl->device_list_lock);
125 kmem_cache_free(se_ua_cache, ua); 123 kmem_cache_free(se_ua_cache, ua);
126 return 0; 124 return 0;
127 } 125 }
128 /* 126 /*
129 * Attach the highest priority Unit Attention to 127 * Attach the highest priority Unit Attention to
130 * the head of the list following sam4r14, 128 * the head of the list following sam4r14,
131 * Section 5.14 Unit Attention Condition: 129 * Section 5.14 Unit Attention Condition:
132 * 130 *
133 * POWER ON, RESET, OR BUS DEVICE RESET OCCURRED highest 131 * POWER ON, RESET, OR BUS DEVICE RESET OCCURRED highest
134 * POWER ON OCCURRED or 132 * POWER ON OCCURRED or
135 * DEVICE INTERNAL RESET 133 * DEVICE INTERNAL RESET
136 * SCSI BUS RESET OCCURRED or 134 * SCSI BUS RESET OCCURRED or
137 * MICROCODE HAS BEEN CHANGED or 135 * MICROCODE HAS BEEN CHANGED or
138 * protocol specific 136 * protocol specific
139 * BUS DEVICE RESET FUNCTION OCCURRED 137 * BUS DEVICE RESET FUNCTION OCCURRED
140 * I_T NEXUS LOSS OCCURRED 138 * I_T NEXUS LOSS OCCURRED
141 * COMMANDS CLEARED BY POWER LOSS NOTIFICATION 139 * COMMANDS CLEARED BY POWER LOSS NOTIFICATION
142 * all others Lowest 140 * all others Lowest
143 * 141 *
144 * Each of the ASCQ codes listed above are defined in 142 * Each of the ASCQ codes listed above are defined in
145 * the 29h ASC family, see spc4r17 Table D.1 143 * the 29h ASC family, see spc4r17 Table D.1
146 */ 144 */
147 if (ua_p->ua_asc == 0x29) { 145 if (ua_p->ua_asc == 0x29) {
148 if ((asc == 0x29) && (ascq > ua_p->ua_ascq)) 146 if ((asc == 0x29) && (ascq > ua_p->ua_ascq))
149 list_add(&ua->ua_nacl_list, 147 list_add(&ua->ua_nacl_list,
150 &deve->ua_list); 148 &deve->ua_list);
151 else 149 else
152 list_add_tail(&ua->ua_nacl_list, 150 list_add_tail(&ua->ua_nacl_list,
153 &deve->ua_list); 151 &deve->ua_list);
154 } else if (ua_p->ua_asc == 0x2a) { 152 } else if (ua_p->ua_asc == 0x2a) {
155 /* 153 /*
156 * Incoming Family 29h ASCQ codes will override 154 * Incoming Family 29h ASCQ codes will override
157 * Family 2AHh ASCQ codes for Unit Attention condition. 155 * Family 2AHh ASCQ codes for Unit Attention condition.
158 */ 156 */
159 if ((asc == 0x29) || (ascq > ua_p->ua_asc)) 157 if ((asc == 0x29) || (ascq > ua_p->ua_asc))
160 list_add(&ua->ua_nacl_list, 158 list_add(&ua->ua_nacl_list,
161 &deve->ua_list); 159 &deve->ua_list);
162 else 160 else
163 list_add_tail(&ua->ua_nacl_list, 161 list_add_tail(&ua->ua_nacl_list,
164 &deve->ua_list); 162 &deve->ua_list);
165 } else 163 } else
166 list_add_tail(&ua->ua_nacl_list, 164 list_add_tail(&ua->ua_nacl_list,
167 &deve->ua_list); 165 &deve->ua_list);
168 spin_unlock(&deve->ua_lock); 166 spin_unlock(&deve->ua_lock);
169 spin_unlock_irq(&nacl->device_list_lock); 167 spin_unlock_irq(&nacl->device_list_lock);
170 168
171 atomic_inc(&deve->ua_count); 169 atomic_inc(&deve->ua_count);
172 smp_mb__after_atomic_inc(); 170 smp_mb__after_atomic_inc();
173 return 0; 171 return 0;
174 } 172 }
175 list_add_tail(&ua->ua_nacl_list, &deve->ua_list); 173 list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
176 spin_unlock(&deve->ua_lock); 174 spin_unlock(&deve->ua_lock);
177 spin_unlock_irq(&nacl->device_list_lock); 175 spin_unlock_irq(&nacl->device_list_lock);
178 176
179 pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:" 177 pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
180 " 0x%02x, ASCQ: 0x%02x\n", 178 " 0x%02x, ASCQ: 0x%02x\n",
181 nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 179 nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
182 asc, ascq); 180 asc, ascq);
183 181
184 atomic_inc(&deve->ua_count); 182 atomic_inc(&deve->ua_count);
185 smp_mb__after_atomic_inc(); 183 smp_mb__after_atomic_inc();
186 return 0; 184 return 0;
187 } 185 }
188 186
189 void core_scsi3_ua_release_all( 187 void core_scsi3_ua_release_all(
190 struct se_dev_entry *deve) 188 struct se_dev_entry *deve)
191 { 189 {
192 struct se_ua *ua, *ua_p; 190 struct se_ua *ua, *ua_p;
193 191
194 spin_lock(&deve->ua_lock); 192 spin_lock(&deve->ua_lock);
195 list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) { 193 list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
196 list_del(&ua->ua_nacl_list); 194 list_del(&ua->ua_nacl_list);
197 kmem_cache_free(se_ua_cache, ua); 195 kmem_cache_free(se_ua_cache, ua);
198 196
199 atomic_dec(&deve->ua_count); 197 atomic_dec(&deve->ua_count);
200 smp_mb__after_atomic_dec(); 198 smp_mb__after_atomic_dec();
201 } 199 }
202 spin_unlock(&deve->ua_lock); 200 spin_unlock(&deve->ua_lock);
203 } 201 }
204 202
205 void core_scsi3_ua_for_check_condition( 203 void core_scsi3_ua_for_check_condition(
206 struct se_cmd *cmd, 204 struct se_cmd *cmd,
207 u8 *asc, 205 u8 *asc,
208 u8 *ascq) 206 u8 *ascq)
209 { 207 {
210 struct se_device *dev = cmd->se_dev; 208 struct se_device *dev = cmd->se_dev;
211 struct se_dev_entry *deve; 209 struct se_dev_entry *deve;
212 struct se_session *sess = cmd->se_sess; 210 struct se_session *sess = cmd->se_sess;
213 struct se_node_acl *nacl; 211 struct se_node_acl *nacl;
214 struct se_ua *ua = NULL, *ua_p; 212 struct se_ua *ua = NULL, *ua_p;
215 int head = 1; 213 int head = 1;
216 214
217 if (!sess) 215 if (!sess)
218 return; 216 return;
219 217
220 nacl = sess->se_node_acl; 218 nacl = sess->se_node_acl;
221 if (!nacl) 219 if (!nacl)
222 return; 220 return;
223 221
224 spin_lock_irq(&nacl->device_list_lock); 222 spin_lock_irq(&nacl->device_list_lock);
225 deve = &nacl->device_list[cmd->orig_fe_lun]; 223 deve = &nacl->device_list[cmd->orig_fe_lun];
226 if (!atomic_read(&deve->ua_count)) { 224 if (!atomic_read(&deve->ua_count)) {
227 spin_unlock_irq(&nacl->device_list_lock); 225 spin_unlock_irq(&nacl->device_list_lock);
228 return; 226 return;
229 } 227 }
230 /* 228 /*
231 * The highest priority Unit Attentions are placed at the head of the 229 * The highest priority Unit Attentions are placed at the head of the
232 * struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION + 230 * struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION +
233 * sense data for the received CDB. 231 * sense data for the received CDB.
234 */ 232 */
235 spin_lock(&deve->ua_lock); 233 spin_lock(&deve->ua_lock);
236 list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) { 234 list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
237 /* 235 /*
238 * For ua_intlck_ctrl code not equal to 00b, only report the 236 * For ua_intlck_ctrl code not equal to 00b, only report the
239 * highest priority UNIT_ATTENTION and ASC/ASCQ without 237 * highest priority UNIT_ATTENTION and ASC/ASCQ without
240 * clearing it. 238 * clearing it.
241 */ 239 */
242 if (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) { 240 if (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) {
243 *asc = ua->ua_asc; 241 *asc = ua->ua_asc;
244 *ascq = ua->ua_ascq; 242 *ascq = ua->ua_ascq;
245 break; 243 break;
246 } 244 }
247 /* 245 /*
248 * Otherwise for the default 00b, release the UNIT ATTENTION 246 * Otherwise for the default 00b, release the UNIT ATTENTION
249 * condition. Return the ASC/ASCQ of the highest priority UA 247 * condition. Return the ASC/ASCQ of the highest priority UA
250 * (head of the list) in the outgoing CHECK_CONDITION + sense. 248 * (head of the list) in the outgoing CHECK_CONDITION + sense.
251 */ 249 */
252 if (head) { 250 if (head) {
253 *asc = ua->ua_asc; 251 *asc = ua->ua_asc;
254 *ascq = ua->ua_ascq; 252 *ascq = ua->ua_ascq;
255 head = 0; 253 head = 0;
256 } 254 }
257 list_del(&ua->ua_nacl_list); 255 list_del(&ua->ua_nacl_list);
258 kmem_cache_free(se_ua_cache, ua); 256 kmem_cache_free(se_ua_cache, ua);
259 257
260 atomic_dec(&deve->ua_count); 258 atomic_dec(&deve->ua_count);
261 smp_mb__after_atomic_dec(); 259 smp_mb__after_atomic_dec();
262 } 260 }
263 spin_unlock(&deve->ua_lock); 261 spin_unlock(&deve->ua_lock);
264 spin_unlock_irq(&nacl->device_list_lock); 262 spin_unlock_irq(&nacl->device_list_lock);
265 263
266 pr_debug("[%s]: %s UNIT ATTENTION condition with" 264 pr_debug("[%s]: %s UNIT ATTENTION condition with"
267 " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x" 265 " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
268 " reported ASC: 0x%02x, ASCQ: 0x%02x\n", 266 " reported ASC: 0x%02x, ASCQ: 0x%02x\n",
269 nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 267 nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
270 (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" : 268 (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
271 "Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl, 269 "Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl,
272 cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq); 270 cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
273 } 271 }
274 272
275 int core_scsi3_ua_clear_for_request_sense( 273 int core_scsi3_ua_clear_for_request_sense(
276 struct se_cmd *cmd, 274 struct se_cmd *cmd,
277 u8 *asc, 275 u8 *asc,
278 u8 *ascq) 276 u8 *ascq)
279 { 277 {
280 struct se_dev_entry *deve; 278 struct se_dev_entry *deve;
281 struct se_session *sess = cmd->se_sess; 279 struct se_session *sess = cmd->se_sess;
282 struct se_node_acl *nacl; 280 struct se_node_acl *nacl;
283 struct se_ua *ua = NULL, *ua_p; 281 struct se_ua *ua = NULL, *ua_p;
284 int head = 1; 282 int head = 1;
285 283
286 if (!sess) 284 if (!sess)
287 return -EINVAL; 285 return -EINVAL;
288 286
289 nacl = sess->se_node_acl; 287 nacl = sess->se_node_acl;
290 if (!nacl) 288 if (!nacl)
291 return -EINVAL; 289 return -EINVAL;
292 290
293 spin_lock_irq(&nacl->device_list_lock); 291 spin_lock_irq(&nacl->device_list_lock);
294 deve = &nacl->device_list[cmd->orig_fe_lun]; 292 deve = &nacl->device_list[cmd->orig_fe_lun];
295 if (!atomic_read(&deve->ua_count)) { 293 if (!atomic_read(&deve->ua_count)) {
296 spin_unlock_irq(&nacl->device_list_lock); 294 spin_unlock_irq(&nacl->device_list_lock);
297 return -EPERM; 295 return -EPERM;
298 } 296 }
299 /* 297 /*
300 * The highest priority Unit Attentions are placed at the head of the 298 * The highest priority Unit Attentions are placed at the head of the
301 * struct se_dev_entry->ua_list. The First (and hence highest priority) 299 * struct se_dev_entry->ua_list. The First (and hence highest priority)
302 * ASC/ASCQ will be returned in REQUEST_SENSE payload data for the 300 * ASC/ASCQ will be returned in REQUEST_SENSE payload data for the
303 * matching struct se_lun. 301 * matching struct se_lun.
304 * 302 *
305 * Once the returning ASC/ASCQ values are set, we go ahead and 303 * Once the returning ASC/ASCQ values are set, we go ahead and
306 * release all of the Unit Attention conditions for the associated 304 * release all of the Unit Attention conditions for the associated
307 * struct se_lun. 305 * struct se_lun.
308 */ 306 */
309 spin_lock(&deve->ua_lock); 307 spin_lock(&deve->ua_lock);
310 list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) { 308 list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
311 if (head) { 309 if (head) {
312 *asc = ua->ua_asc; 310 *asc = ua->ua_asc;
313 *ascq = ua->ua_ascq; 311 *ascq = ua->ua_ascq;
314 head = 0; 312 head = 0;
315 } 313 }
316 list_del(&ua->ua_nacl_list); 314 list_del(&ua->ua_nacl_list);
317 kmem_cache_free(se_ua_cache, ua); 315 kmem_cache_free(se_ua_cache, ua);
318 316
319 atomic_dec(&deve->ua_count); 317 atomic_dec(&deve->ua_count);
320 smp_mb__after_atomic_dec(); 318 smp_mb__after_atomic_dec();
321 } 319 }
322 spin_unlock(&deve->ua_lock); 320 spin_unlock(&deve->ua_lock);
323 spin_unlock_irq(&nacl->device_list_lock); 321 spin_unlock_irq(&nacl->device_list_lock);
324 322
325 pr_debug("[%s]: Released UNIT ATTENTION condition, mapped" 323 pr_debug("[%s]: Released UNIT ATTENTION condition, mapped"
326 " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x," 324 " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
327 " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 325 " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
328 cmd->orig_fe_lun, *asc, *ascq); 326 cmd->orig_fe_lun, *asc, *ascq);
329 327
330 return (head) ? -EPERM : 0; 328 return (head) ? -EPERM : 0;
331 } 329 }
332 330
drivers/target/tcm_fc/tfc_cmd.c
1 /* 1 /*
2 * Copyright (c) 2010 Cisco Systems, Inc. 2 * Copyright (c) 2010 Cisco Systems, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation. 6 * version 2, as published by the Free Software Foundation.
7 * 7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details. 11 * more details.
12 * 12 *
13 * You should have received a copy of the GNU General Public License along with 13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */ 16 */
17 17
18 /* XXX TBD some includes may be extraneous */ 18 /* XXX TBD some includes may be extraneous */
19 19
20 #include <linux/module.h> 20 #include <linux/module.h>
21 #include <linux/moduleparam.h> 21 #include <linux/moduleparam.h>
22 #include <generated/utsrelease.h> 22 #include <generated/utsrelease.h>
23 #include <linux/utsname.h> 23 #include <linux/utsname.h>
24 #include <linux/init.h> 24 #include <linux/init.h>
25 #include <linux/slab.h> 25 #include <linux/slab.h>
26 #include <linux/kthread.h> 26 #include <linux/kthread.h>
27 #include <linux/types.h> 27 #include <linux/types.h>
28 #include <linux/string.h> 28 #include <linux/string.h>
29 #include <linux/configfs.h> 29 #include <linux/configfs.h>
30 #include <linux/ctype.h> 30 #include <linux/ctype.h>
31 #include <linux/hash.h> 31 #include <linux/hash.h>
32 #include <asm/unaligned.h> 32 #include <asm/unaligned.h>
33 #include <scsi/scsi.h> 33 #include <scsi/scsi.h>
34 #include <scsi/scsi_host.h> 34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_device.h> 35 #include <scsi/scsi_device.h>
36 #include <scsi/scsi_cmnd.h> 36 #include <scsi/scsi_cmnd.h>
37 #include <scsi/scsi_tcq.h> 37 #include <scsi/scsi_tcq.h>
38 #include <scsi/libfc.h> 38 #include <scsi/libfc.h>
39 #include <scsi/fc_encode.h> 39 #include <scsi/fc_encode.h>
40 40
41 #include <target/target_core_base.h> 41 #include <target/target_core_base.h>
42 #include <target/target_core_transport.h> 42 #include <target/target_core_fabric.h>
43 #include <target/target_core_fabric_ops.h>
44 #include <target/target_core_device.h>
45 #include <target/target_core_tpg.h>
46 #include <target/target_core_configfs.h> 43 #include <target/target_core_configfs.h>
47 #include <target/target_core_tmr.h>
48 #include <target/configfs_macros.h> 44 #include <target/configfs_macros.h>
49 45
50 #include "tcm_fc.h" 46 #include "tcm_fc.h"
51 47
52 /* 48 /*
53 * Dump cmd state for debugging. 49 * Dump cmd state for debugging.
54 */ 50 */
55 void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) 51 void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
56 { 52 {
57 struct fc_exch *ep; 53 struct fc_exch *ep;
58 struct fc_seq *sp; 54 struct fc_seq *sp;
59 struct se_cmd *se_cmd; 55 struct se_cmd *se_cmd;
60 struct scatterlist *sg; 56 struct scatterlist *sg;
61 int count; 57 int count;
62 58
63 se_cmd = &cmd->se_cmd; 59 se_cmd = &cmd->se_cmd;
64 pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n", 60 pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n",
65 caller, cmd, cmd->sess, cmd->seq, se_cmd); 61 caller, cmd, cmd->sess, cmd->seq, se_cmd);
66 pr_debug("%s: cmd %p cdb %p\n", 62 pr_debug("%s: cmd %p cdb %p\n",
67 caller, cmd, cmd->cdb); 63 caller, cmd, cmd->cdb);
68 pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun); 64 pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
69 65
70 pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n", 66 pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n",
71 caller, cmd, se_cmd->t_data_nents, 67 caller, cmd, se_cmd->t_data_nents,
72 se_cmd->data_length, se_cmd->se_cmd_flags); 68 se_cmd->data_length, se_cmd->se_cmd_flags);
73 69
74 for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count) 70 for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count)
75 pr_debug("%s: cmd %p sg %p page %p " 71 pr_debug("%s: cmd %p sg %p page %p "
76 "len 0x%x off 0x%x\n", 72 "len 0x%x off 0x%x\n",
77 caller, cmd, sg, 73 caller, cmd, sg,
78 sg_page(sg), sg->length, sg->offset); 74 sg_page(sg), sg->length, sg->offset);
79 75
80 sp = cmd->seq; 76 sp = cmd->seq;
81 if (sp) { 77 if (sp) {
82 ep = fc_seq_exch(sp); 78 ep = fc_seq_exch(sp);
83 pr_debug("%s: cmd %p sid %x did %x " 79 pr_debug("%s: cmd %p sid %x did %x "
84 "ox_id %x rx_id %x seq_id %x e_stat %x\n", 80 "ox_id %x rx_id %x seq_id %x e_stat %x\n",
85 caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid, 81 caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
86 sp->id, ep->esb_stat); 82 sp->id, ep->esb_stat);
87 } 83 }
88 print_hex_dump(KERN_INFO, "ft_dump_cmd ", DUMP_PREFIX_NONE, 84 print_hex_dump(KERN_INFO, "ft_dump_cmd ", DUMP_PREFIX_NONE,
89 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0); 85 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
90 } 86 }
91 87
92 static void ft_free_cmd(struct ft_cmd *cmd) 88 static void ft_free_cmd(struct ft_cmd *cmd)
93 { 89 {
94 struct fc_frame *fp; 90 struct fc_frame *fp;
95 struct fc_lport *lport; 91 struct fc_lport *lport;
96 92
97 if (!cmd) 93 if (!cmd)
98 return; 94 return;
99 fp = cmd->req_frame; 95 fp = cmd->req_frame;
100 lport = fr_dev(fp); 96 lport = fr_dev(fp);
101 if (fr_seq(fp)) 97 if (fr_seq(fp))
102 lport->tt.seq_release(fr_seq(fp)); 98 lport->tt.seq_release(fr_seq(fp));
103 fc_frame_free(fp); 99 fc_frame_free(fp);
104 ft_sess_put(cmd->sess); /* undo get from lookup at recv */ 100 ft_sess_put(cmd->sess); /* undo get from lookup at recv */
105 kfree(cmd); 101 kfree(cmd);
106 } 102 }
107 103
108 void ft_release_cmd(struct se_cmd *se_cmd) 104 void ft_release_cmd(struct se_cmd *se_cmd)
109 { 105 {
110 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); 106 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
111 107
112 ft_free_cmd(cmd); 108 ft_free_cmd(cmd);
113 } 109 }
114 110
115 int ft_check_stop_free(struct se_cmd *se_cmd) 111 int ft_check_stop_free(struct se_cmd *se_cmd)
116 { 112 {
117 transport_generic_free_cmd(se_cmd, 0); 113 transport_generic_free_cmd(se_cmd, 0);
118 return 1; 114 return 1;
119 } 115 }
120 116
121 /* 117 /*
122 * Send response. 118 * Send response.
123 */ 119 */
124 int ft_queue_status(struct se_cmd *se_cmd) 120 int ft_queue_status(struct se_cmd *se_cmd)
125 { 121 {
126 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); 122 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
127 struct fc_frame *fp; 123 struct fc_frame *fp;
128 struct fcp_resp_with_ext *fcp; 124 struct fcp_resp_with_ext *fcp;
129 struct fc_lport *lport; 125 struct fc_lport *lport;
130 struct fc_exch *ep; 126 struct fc_exch *ep;
131 size_t len; 127 size_t len;
132 128
133 ft_dump_cmd(cmd, __func__); 129 ft_dump_cmd(cmd, __func__);
134 ep = fc_seq_exch(cmd->seq); 130 ep = fc_seq_exch(cmd->seq);
135 lport = ep->lp; 131 lport = ep->lp;
136 len = sizeof(*fcp) + se_cmd->scsi_sense_length; 132 len = sizeof(*fcp) + se_cmd->scsi_sense_length;
137 fp = fc_frame_alloc(lport, len); 133 fp = fc_frame_alloc(lport, len);
138 if (!fp) { 134 if (!fp) {
139 /* XXX shouldn't just drop it - requeue and retry? */ 135 /* XXX shouldn't just drop it - requeue and retry? */
140 return 0; 136 return 0;
141 } 137 }
142 fcp = fc_frame_payload_get(fp, len); 138 fcp = fc_frame_payload_get(fp, len);
143 memset(fcp, 0, len); 139 memset(fcp, 0, len);
144 fcp->resp.fr_status = se_cmd->scsi_status; 140 fcp->resp.fr_status = se_cmd->scsi_status;
145 141
146 len = se_cmd->scsi_sense_length; 142 len = se_cmd->scsi_sense_length;
147 if (len) { 143 if (len) {
148 fcp->resp.fr_flags |= FCP_SNS_LEN_VAL; 144 fcp->resp.fr_flags |= FCP_SNS_LEN_VAL;
149 fcp->ext.fr_sns_len = htonl(len); 145 fcp->ext.fr_sns_len = htonl(len);
150 memcpy((fcp + 1), se_cmd->sense_buffer, len); 146 memcpy((fcp + 1), se_cmd->sense_buffer, len);
151 } 147 }
152 148
153 /* 149 /*
154 * Test underflow and overflow with one mask. Usually both are off. 150 * Test underflow and overflow with one mask. Usually both are off.
155 * Bidirectional commands are not handled yet. 151 * Bidirectional commands are not handled yet.
156 */ 152 */
157 if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) { 153 if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) {
158 if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) 154 if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
159 fcp->resp.fr_flags |= FCP_RESID_OVER; 155 fcp->resp.fr_flags |= FCP_RESID_OVER;
160 else 156 else
161 fcp->resp.fr_flags |= FCP_RESID_UNDER; 157 fcp->resp.fr_flags |= FCP_RESID_UNDER;
162 fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count); 158 fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count);
163 } 159 }
164 160
165 /* 161 /*
166 * Send response. 162 * Send response.
167 */ 163 */
168 cmd->seq = lport->tt.seq_start_next(cmd->seq); 164 cmd->seq = lport->tt.seq_start_next(cmd->seq);
169 fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP, 165 fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
170 FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0); 166 FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
171 167
172 lport->tt.seq_send(lport, cmd->seq, fp); 168 lport->tt.seq_send(lport, cmd->seq, fp);
173 lport->tt.exch_done(cmd->seq); 169 lport->tt.exch_done(cmd->seq);
174 return 0; 170 return 0;
175 } 171 }
176 172
177 int ft_write_pending_status(struct se_cmd *se_cmd) 173 int ft_write_pending_status(struct se_cmd *se_cmd)
178 { 174 {
179 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); 175 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
180 176
181 return cmd->write_data_len != se_cmd->data_length; 177 return cmd->write_data_len != se_cmd->data_length;
182 } 178 }
183 179
184 /* 180 /*
185 * Send TX_RDY (transfer ready). 181 * Send TX_RDY (transfer ready).
186 */ 182 */
187 int ft_write_pending(struct se_cmd *se_cmd) 183 int ft_write_pending(struct se_cmd *se_cmd)
188 { 184 {
189 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); 185 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
190 struct fc_frame *fp; 186 struct fc_frame *fp;
191 struct fcp_txrdy *txrdy; 187 struct fcp_txrdy *txrdy;
192 struct fc_lport *lport; 188 struct fc_lport *lport;
193 struct fc_exch *ep; 189 struct fc_exch *ep;
194 struct fc_frame_header *fh; 190 struct fc_frame_header *fh;
195 u32 f_ctl; 191 u32 f_ctl;
196 192
197 ft_dump_cmd(cmd, __func__); 193 ft_dump_cmd(cmd, __func__);
198 194
199 ep = fc_seq_exch(cmd->seq); 195 ep = fc_seq_exch(cmd->seq);
200 lport = ep->lp; 196 lport = ep->lp;
201 fp = fc_frame_alloc(lport, sizeof(*txrdy)); 197 fp = fc_frame_alloc(lport, sizeof(*txrdy));
202 if (!fp) 198 if (!fp)
203 return -ENOMEM; /* Signal QUEUE_FULL */ 199 return -ENOMEM; /* Signal QUEUE_FULL */
204 200
205 txrdy = fc_frame_payload_get(fp, sizeof(*txrdy)); 201 txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
206 memset(txrdy, 0, sizeof(*txrdy)); 202 memset(txrdy, 0, sizeof(*txrdy));
207 txrdy->ft_burst_len = htonl(se_cmd->data_length); 203 txrdy->ft_burst_len = htonl(se_cmd->data_length);
208 204
209 cmd->seq = lport->tt.seq_start_next(cmd->seq); 205 cmd->seq = lport->tt.seq_start_next(cmd->seq);
210 fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP, 206 fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
211 FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 207 FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
212 208
213 fh = fc_frame_header_get(fp); 209 fh = fc_frame_header_get(fp);
214 f_ctl = ntoh24(fh->fh_f_ctl); 210 f_ctl = ntoh24(fh->fh_f_ctl);
215 211
216 /* Only if it is 'Exchange Responder' */ 212 /* Only if it is 'Exchange Responder' */
217 if (f_ctl & FC_FC_EX_CTX) { 213 if (f_ctl & FC_FC_EX_CTX) {
218 /* Target is 'exchange responder' and sending XFER_READY 214 /* Target is 'exchange responder' and sending XFER_READY
219 * to 'exchange initiator (initiator)' 215 * to 'exchange initiator (initiator)'
220 */ 216 */
221 if ((ep->xid <= lport->lro_xid) && 217 if ((ep->xid <= lport->lro_xid) &&
222 (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { 218 (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
223 if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { 219 if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
224 /* 220 /*
225 * cmd may have been broken up into multiple 221 * cmd may have been broken up into multiple
226 * tasks. Link their sgs together so we can 222 * tasks. Link their sgs together so we can
227 * operate on them all at once. 223 * operate on them all at once.
228 */ 224 */
229 transport_do_task_sg_chain(se_cmd); 225 transport_do_task_sg_chain(se_cmd);
230 cmd->sg = se_cmd->t_tasks_sg_chained; 226 cmd->sg = se_cmd->t_tasks_sg_chained;
231 cmd->sg_cnt = 227 cmd->sg_cnt =
232 se_cmd->t_tasks_sg_chained_no; 228 se_cmd->t_tasks_sg_chained_no;
233 } 229 }
234 if (cmd->sg && lport->tt.ddp_target(lport, ep->xid, 230 if (cmd->sg && lport->tt.ddp_target(lport, ep->xid,
235 cmd->sg, 231 cmd->sg,
236 cmd->sg_cnt)) 232 cmd->sg_cnt))
237 cmd->was_ddp_setup = 1; 233 cmd->was_ddp_setup = 1;
238 } 234 }
239 } 235 }
240 lport->tt.seq_send(lport, cmd->seq, fp); 236 lport->tt.seq_send(lport, cmd->seq, fp);
241 return 0; 237 return 0;
242 } 238 }
243 239
244 u32 ft_get_task_tag(struct se_cmd *se_cmd) 240 u32 ft_get_task_tag(struct se_cmd *se_cmd)
245 { 241 {
246 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); 242 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
247 243
248 return fc_seq_exch(cmd->seq)->rxid; 244 return fc_seq_exch(cmd->seq)->rxid;
249 } 245 }
250 246
251 int ft_get_cmd_state(struct se_cmd *se_cmd) 247 int ft_get_cmd_state(struct se_cmd *se_cmd)
252 { 248 {
253 return 0; 249 return 0;
254 } 250 }
255 251
256 int ft_is_state_remove(struct se_cmd *se_cmd) 252 int ft_is_state_remove(struct se_cmd *se_cmd)
257 { 253 {
258 return 0; /* XXX TBD */ 254 return 0; /* XXX TBD */
259 } 255 }
260 256
261 /* 257 /*
262 * FC sequence response handler for follow-on sequences (data) and aborts. 258 * FC sequence response handler for follow-on sequences (data) and aborts.
263 */ 259 */
264 static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg) 260 static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
265 { 261 {
266 struct ft_cmd *cmd = arg; 262 struct ft_cmd *cmd = arg;
267 struct fc_frame_header *fh; 263 struct fc_frame_header *fh;
268 264
269 if (IS_ERR(fp)) { 265 if (IS_ERR(fp)) {
270 /* XXX need to find cmd if queued */ 266 /* XXX need to find cmd if queued */
271 cmd->seq = NULL; 267 cmd->seq = NULL;
272 transport_generic_free_cmd(&cmd->se_cmd, 0); 268 transport_generic_free_cmd(&cmd->se_cmd, 0);
273 return; 269 return;
274 } 270 }
275 271
276 fh = fc_frame_header_get(fp); 272 fh = fc_frame_header_get(fp);
277 273
278 switch (fh->fh_r_ctl) { 274 switch (fh->fh_r_ctl) {
279 case FC_RCTL_DD_SOL_DATA: /* write data */ 275 case FC_RCTL_DD_SOL_DATA: /* write data */
280 ft_recv_write_data(cmd, fp); 276 ft_recv_write_data(cmd, fp);
281 break; 277 break;
282 case FC_RCTL_DD_UNSOL_CTL: /* command */ 278 case FC_RCTL_DD_UNSOL_CTL: /* command */
283 case FC_RCTL_DD_SOL_CTL: /* transfer ready */ 279 case FC_RCTL_DD_SOL_CTL: /* transfer ready */
284 case FC_RCTL_DD_DATA_DESC: /* transfer ready */ 280 case FC_RCTL_DD_DATA_DESC: /* transfer ready */
285 default: 281 default:
286 pr_debug("%s: unhandled frame r_ctl %x\n", 282 pr_debug("%s: unhandled frame r_ctl %x\n",
287 __func__, fh->fh_r_ctl); 283 __func__, fh->fh_r_ctl);
288 ft_invl_hw_context(cmd); 284 ft_invl_hw_context(cmd);
289 fc_frame_free(fp); 285 fc_frame_free(fp);
290 transport_generic_free_cmd(&cmd->se_cmd, 0); 286 transport_generic_free_cmd(&cmd->se_cmd, 0);
291 break; 287 break;
292 } 288 }
293 } 289 }
294 290
295 /* 291 /*
296 * Send a FCP response including SCSI status and optional FCP rsp_code. 292 * Send a FCP response including SCSI status and optional FCP rsp_code.
297 * status is SAM_STAT_GOOD (zero) iff code is valid. 293 * status is SAM_STAT_GOOD (zero) iff code is valid.
298 * This is used in error cases, such as allocation failures. 294 * This is used in error cases, such as allocation failures.
299 */ 295 */
300 static void ft_send_resp_status(struct fc_lport *lport, 296 static void ft_send_resp_status(struct fc_lport *lport,
301 const struct fc_frame *rx_fp, 297 const struct fc_frame *rx_fp,
302 u32 status, enum fcp_resp_rsp_codes code) 298 u32 status, enum fcp_resp_rsp_codes code)
303 { 299 {
304 struct fc_frame *fp; 300 struct fc_frame *fp;
305 struct fc_seq *sp; 301 struct fc_seq *sp;
306 const struct fc_frame_header *fh; 302 const struct fc_frame_header *fh;
307 size_t len; 303 size_t len;
308 struct fcp_resp_with_ext *fcp; 304 struct fcp_resp_with_ext *fcp;
309 struct fcp_resp_rsp_info *info; 305 struct fcp_resp_rsp_info *info;
310 306
311 fh = fc_frame_header_get(rx_fp); 307 fh = fc_frame_header_get(rx_fp);
312 pr_debug("FCP error response: did %x oxid %x status %x code %x\n", 308 pr_debug("FCP error response: did %x oxid %x status %x code %x\n",
313 ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code); 309 ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
314 len = sizeof(*fcp); 310 len = sizeof(*fcp);
315 if (status == SAM_STAT_GOOD) 311 if (status == SAM_STAT_GOOD)
316 len += sizeof(*info); 312 len += sizeof(*info);
317 fp = fc_frame_alloc(lport, len); 313 fp = fc_frame_alloc(lport, len);
318 if (!fp) 314 if (!fp)
319 return; 315 return;
320 fcp = fc_frame_payload_get(fp, len); 316 fcp = fc_frame_payload_get(fp, len);
321 memset(fcp, 0, len); 317 memset(fcp, 0, len);
322 fcp->resp.fr_status = status; 318 fcp->resp.fr_status = status;
323 if (status == SAM_STAT_GOOD) { 319 if (status == SAM_STAT_GOOD) {
324 fcp->ext.fr_rsp_len = htonl(sizeof(*info)); 320 fcp->ext.fr_rsp_len = htonl(sizeof(*info));
325 fcp->resp.fr_flags |= FCP_RSP_LEN_VAL; 321 fcp->resp.fr_flags |= FCP_RSP_LEN_VAL;
326 info = (struct fcp_resp_rsp_info *)(fcp + 1); 322 info = (struct fcp_resp_rsp_info *)(fcp + 1);
327 info->rsp_code = code; 323 info->rsp_code = code;
328 } 324 }
329 325
330 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0); 326 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
331 sp = fr_seq(fp); 327 sp = fr_seq(fp);
332 if (sp) 328 if (sp)
333 lport->tt.seq_send(lport, sp, fp); 329 lport->tt.seq_send(lport, sp, fp);
334 else 330 else
335 lport->tt.frame_send(lport, fp); 331 lport->tt.frame_send(lport, fp);
336 } 332 }
337 333
338 /* 334 /*
339 * Send error or task management response. 335 * Send error or task management response.
340 */ 336 */
341 static void ft_send_resp_code(struct ft_cmd *cmd, 337 static void ft_send_resp_code(struct ft_cmd *cmd,
342 enum fcp_resp_rsp_codes code) 338 enum fcp_resp_rsp_codes code)
343 { 339 {
344 ft_send_resp_status(cmd->sess->tport->lport, 340 ft_send_resp_status(cmd->sess->tport->lport,
345 cmd->req_frame, SAM_STAT_GOOD, code); 341 cmd->req_frame, SAM_STAT_GOOD, code);
346 } 342 }
347 343
348 344
349 /* 345 /*
350 * Send error or task management response. 346 * Send error or task management response.
351 * Always frees the cmd and associated state. 347 * Always frees the cmd and associated state.
352 */ 348 */
353 static void ft_send_resp_code_and_free(struct ft_cmd *cmd, 349 static void ft_send_resp_code_and_free(struct ft_cmd *cmd,
354 enum fcp_resp_rsp_codes code) 350 enum fcp_resp_rsp_codes code)
355 { 351 {
356 ft_send_resp_code(cmd, code); 352 ft_send_resp_code(cmd, code);
357 ft_free_cmd(cmd); 353 ft_free_cmd(cmd);
358 } 354 }
359 355
360 /* 356 /*
361 * Handle Task Management Request. 357 * Handle Task Management Request.
362 */ 358 */
363 static void ft_send_tm(struct ft_cmd *cmd) 359 static void ft_send_tm(struct ft_cmd *cmd)
364 { 360 {
365 struct se_tmr_req *tmr; 361 struct se_tmr_req *tmr;
366 struct fcp_cmnd *fcp; 362 struct fcp_cmnd *fcp;
367 struct ft_sess *sess; 363 struct ft_sess *sess;
368 u8 tm_func; 364 u8 tm_func;
369 365
370 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); 366 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
371 367
372 switch (fcp->fc_tm_flags) { 368 switch (fcp->fc_tm_flags) {
373 case FCP_TMF_LUN_RESET: 369 case FCP_TMF_LUN_RESET:
374 tm_func = TMR_LUN_RESET; 370 tm_func = TMR_LUN_RESET;
375 break; 371 break;
376 case FCP_TMF_TGT_RESET: 372 case FCP_TMF_TGT_RESET:
377 tm_func = TMR_TARGET_WARM_RESET; 373 tm_func = TMR_TARGET_WARM_RESET;
378 break; 374 break;
379 case FCP_TMF_CLR_TASK_SET: 375 case FCP_TMF_CLR_TASK_SET:
380 tm_func = TMR_CLEAR_TASK_SET; 376 tm_func = TMR_CLEAR_TASK_SET;
381 break; 377 break;
382 case FCP_TMF_ABT_TASK_SET: 378 case FCP_TMF_ABT_TASK_SET:
383 tm_func = TMR_ABORT_TASK_SET; 379 tm_func = TMR_ABORT_TASK_SET;
384 break; 380 break;
385 case FCP_TMF_CLR_ACA: 381 case FCP_TMF_CLR_ACA:
386 tm_func = TMR_CLEAR_ACA; 382 tm_func = TMR_CLEAR_ACA;
387 break; 383 break;
388 default: 384 default:
389 /* 385 /*
390 * FCP4r01 indicates having a combination of 386 * FCP4r01 indicates having a combination of
391 * tm_flags set is invalid. 387 * tm_flags set is invalid.
392 */ 388 */
393 pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags); 389 pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
394 ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); 390 ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
395 return; 391 return;
396 } 392 }
397 393
398 pr_debug("alloc tm cmd fn %d\n", tm_func); 394 pr_debug("alloc tm cmd fn %d\n", tm_func);
399 tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func, GFP_KERNEL); 395 tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func, GFP_KERNEL);
400 if (!tmr) { 396 if (!tmr) {
401 pr_debug("alloc failed\n"); 397 pr_debug("alloc failed\n");
402 ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED); 398 ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
403 return; 399 return;
404 } 400 }
405 cmd->se_cmd.se_tmr_req = tmr; 401 cmd->se_cmd.se_tmr_req = tmr;
406 402
407 switch (fcp->fc_tm_flags) { 403 switch (fcp->fc_tm_flags) {
408 case FCP_TMF_LUN_RESET: 404 case FCP_TMF_LUN_RESET:
409 cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun); 405 cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
410 if (transport_lookup_tmr_lun(&cmd->se_cmd, cmd->lun) < 0) { 406 if (transport_lookup_tmr_lun(&cmd->se_cmd, cmd->lun) < 0) {
411 /* 407 /*
412 * Make sure to clean up newly allocated TMR request 408 * Make sure to clean up newly allocated TMR request
413 * since "unable to handle TMR request because failed 409 * since "unable to handle TMR request because failed
414 * to get to LUN" 410 * to get to LUN"
415 */ 411 */
416 pr_debug("Failed to get LUN for TMR func %d, " 412 pr_debug("Failed to get LUN for TMR func %d, "
417 "se_cmd %p, unpacked_lun %d\n", 413 "se_cmd %p, unpacked_lun %d\n",
418 tm_func, &cmd->se_cmd, cmd->lun); 414 tm_func, &cmd->se_cmd, cmd->lun);
419 ft_dump_cmd(cmd, __func__); 415 ft_dump_cmd(cmd, __func__);
420 sess = cmd->sess; 416 sess = cmd->sess;
421 transport_send_check_condition_and_sense(&cmd->se_cmd, 417 transport_send_check_condition_and_sense(&cmd->se_cmd,
422 cmd->se_cmd.scsi_sense_reason, 0); 418 cmd->se_cmd.scsi_sense_reason, 0);
423 transport_generic_free_cmd(&cmd->se_cmd, 0); 419 transport_generic_free_cmd(&cmd->se_cmd, 0);
424 ft_sess_put(sess); 420 ft_sess_put(sess);
425 return; 421 return;
426 } 422 }
427 break; 423 break;
428 case FCP_TMF_TGT_RESET: 424 case FCP_TMF_TGT_RESET:
429 case FCP_TMF_CLR_TASK_SET: 425 case FCP_TMF_CLR_TASK_SET:
430 case FCP_TMF_ABT_TASK_SET: 426 case FCP_TMF_ABT_TASK_SET:
431 case FCP_TMF_CLR_ACA: 427 case FCP_TMF_CLR_ACA:
432 break; 428 break;
433 default: 429 default:
434 return; 430 return;
435 } 431 }
436 transport_generic_handle_tmr(&cmd->se_cmd); 432 transport_generic_handle_tmr(&cmd->se_cmd);
437 } 433 }
438 434
439 /* 435 /*
440 * Send status from completed task management request. 436 * Send status from completed task management request.
441 */ 437 */
442 int ft_queue_tm_resp(struct se_cmd *se_cmd) 438 int ft_queue_tm_resp(struct se_cmd *se_cmd)
443 { 439 {
444 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); 440 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
445 struct se_tmr_req *tmr = se_cmd->se_tmr_req; 441 struct se_tmr_req *tmr = se_cmd->se_tmr_req;
446 enum fcp_resp_rsp_codes code; 442 enum fcp_resp_rsp_codes code;
447 443
448 switch (tmr->response) { 444 switch (tmr->response) {
449 case TMR_FUNCTION_COMPLETE: 445 case TMR_FUNCTION_COMPLETE:
450 code = FCP_TMF_CMPL; 446 code = FCP_TMF_CMPL;
451 break; 447 break;
452 case TMR_LUN_DOES_NOT_EXIST: 448 case TMR_LUN_DOES_NOT_EXIST:
453 code = FCP_TMF_INVALID_LUN; 449 code = FCP_TMF_INVALID_LUN;
454 break; 450 break;
455 case TMR_FUNCTION_REJECTED: 451 case TMR_FUNCTION_REJECTED:
456 code = FCP_TMF_REJECTED; 452 code = FCP_TMF_REJECTED;
457 break; 453 break;
458 case TMR_TASK_DOES_NOT_EXIST: 454 case TMR_TASK_DOES_NOT_EXIST:
459 case TMR_TASK_STILL_ALLEGIANT: 455 case TMR_TASK_STILL_ALLEGIANT:
460 case TMR_TASK_FAILOVER_NOT_SUPPORTED: 456 case TMR_TASK_FAILOVER_NOT_SUPPORTED:
461 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED: 457 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
462 case TMR_FUNCTION_AUTHORIZATION_FAILED: 458 case TMR_FUNCTION_AUTHORIZATION_FAILED:
463 default: 459 default:
464 code = FCP_TMF_FAILED; 460 code = FCP_TMF_FAILED;
465 break; 461 break;
466 } 462 }
467 pr_debug("tmr fn %d resp %d fcp code %d\n", 463 pr_debug("tmr fn %d resp %d fcp code %d\n",
468 tmr->function, tmr->response, code); 464 tmr->function, tmr->response, code);
469 ft_send_resp_code(cmd, code); 465 ft_send_resp_code(cmd, code);
470 return 0; 466 return 0;
471 } 467 }
472 468
473 static void ft_send_work(struct work_struct *work); 469 static void ft_send_work(struct work_struct *work);
474 470
475 /* 471 /*
476 * Handle incoming FCP command. 472 * Handle incoming FCP command.
477 */ 473 */
478 static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp) 474 static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
479 { 475 {
480 struct ft_cmd *cmd; 476 struct ft_cmd *cmd;
481 struct fc_lport *lport = sess->tport->lport; 477 struct fc_lport *lport = sess->tport->lport;
482 478
483 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); 479 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
484 if (!cmd) 480 if (!cmd)
485 goto busy; 481 goto busy;
486 cmd->sess = sess; 482 cmd->sess = sess;
487 cmd->seq = lport->tt.seq_assign(lport, fp); 483 cmd->seq = lport->tt.seq_assign(lport, fp);
488 if (!cmd->seq) { 484 if (!cmd->seq) {
489 kfree(cmd); 485 kfree(cmd);
490 goto busy; 486 goto busy;
491 } 487 }
492 cmd->req_frame = fp; /* hold frame during cmd */ 488 cmd->req_frame = fp; /* hold frame during cmd */
493 489
494 INIT_WORK(&cmd->work, ft_send_work); 490 INIT_WORK(&cmd->work, ft_send_work);
495 queue_work(sess->tport->tpg->workqueue, &cmd->work); 491 queue_work(sess->tport->tpg->workqueue, &cmd->work);
496 return; 492 return;
497 493
498 busy: 494 busy:
499 pr_debug("cmd or seq allocation failure - sending BUSY\n"); 495 pr_debug("cmd or seq allocation failure - sending BUSY\n");
500 ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0); 496 ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0);
501 fc_frame_free(fp); 497 fc_frame_free(fp);
502 ft_sess_put(sess); /* undo get from lookup */ 498 ft_sess_put(sess); /* undo get from lookup */
503 } 499 }
504 500
505 501
506 /* 502 /*
507 * Handle incoming FCP frame. 503 * Handle incoming FCP frame.
508 * Caller has verified that the frame is type FCP. 504 * Caller has verified that the frame is type FCP.
509 */ 505 */
510 void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp) 506 void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
511 { 507 {
512 struct fc_frame_header *fh = fc_frame_header_get(fp); 508 struct fc_frame_header *fh = fc_frame_header_get(fp);
513 509
514 switch (fh->fh_r_ctl) { 510 switch (fh->fh_r_ctl) {
515 case FC_RCTL_DD_UNSOL_CMD: /* command */ 511 case FC_RCTL_DD_UNSOL_CMD: /* command */
516 ft_recv_cmd(sess, fp); 512 ft_recv_cmd(sess, fp);
517 break; 513 break;
518 case FC_RCTL_DD_SOL_DATA: /* write data */ 514 case FC_RCTL_DD_SOL_DATA: /* write data */
519 case FC_RCTL_DD_UNSOL_CTL: 515 case FC_RCTL_DD_UNSOL_CTL:
520 case FC_RCTL_DD_SOL_CTL: 516 case FC_RCTL_DD_SOL_CTL:
521 case FC_RCTL_DD_DATA_DESC: /* transfer ready */ 517 case FC_RCTL_DD_DATA_DESC: /* transfer ready */
522 case FC_RCTL_ELS4_REQ: /* SRR, perhaps */ 518 case FC_RCTL_ELS4_REQ: /* SRR, perhaps */
523 default: 519 default:
524 pr_debug("%s: unhandled frame r_ctl %x\n", 520 pr_debug("%s: unhandled frame r_ctl %x\n",
525 __func__, fh->fh_r_ctl); 521 __func__, fh->fh_r_ctl);
526 fc_frame_free(fp); 522 fc_frame_free(fp);
527 ft_sess_put(sess); /* undo get from lookup */ 523 ft_sess_put(sess); /* undo get from lookup */
528 break; 524 break;
529 } 525 }
530 } 526 }
531 527
532 /* 528 /*
533 * Send new command to target. 529 * Send new command to target.
534 */ 530 */
535 static void ft_send_work(struct work_struct *work) 531 static void ft_send_work(struct work_struct *work)
536 { 532 {
537 struct ft_cmd *cmd = container_of(work, struct ft_cmd, work); 533 struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
538 struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame); 534 struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
539 struct se_cmd *se_cmd; 535 struct se_cmd *se_cmd;
540 struct fcp_cmnd *fcp; 536 struct fcp_cmnd *fcp;
541 int data_dir = 0; 537 int data_dir = 0;
542 u32 data_len; 538 u32 data_len;
543 int task_attr; 539 int task_attr;
544 int ret; 540 int ret;
545 541
546 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); 542 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
547 if (!fcp) 543 if (!fcp)
548 goto err; 544 goto err;
549 545
550 if (fcp->fc_flags & FCP_CFL_LEN_MASK) 546 if (fcp->fc_flags & FCP_CFL_LEN_MASK)
551 goto err; /* not handling longer CDBs yet */ 547 goto err; /* not handling longer CDBs yet */
552 548
553 if (fcp->fc_tm_flags) { 549 if (fcp->fc_tm_flags) {
554 task_attr = FCP_PTA_SIMPLE; 550 task_attr = FCP_PTA_SIMPLE;
555 data_dir = DMA_NONE; 551 data_dir = DMA_NONE;
556 data_len = 0; 552 data_len = 0;
557 } else { 553 } else {
558 switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) { 554 switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
559 case 0: 555 case 0:
560 data_dir = DMA_NONE; 556 data_dir = DMA_NONE;
561 break; 557 break;
562 case FCP_CFL_RDDATA: 558 case FCP_CFL_RDDATA:
563 data_dir = DMA_FROM_DEVICE; 559 data_dir = DMA_FROM_DEVICE;
564 break; 560 break;
565 case FCP_CFL_WRDATA: 561 case FCP_CFL_WRDATA:
566 data_dir = DMA_TO_DEVICE; 562 data_dir = DMA_TO_DEVICE;
567 break; 563 break;
568 case FCP_CFL_WRDATA | FCP_CFL_RDDATA: 564 case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
569 goto err; /* TBD not supported by tcm_fc yet */ 565 goto err; /* TBD not supported by tcm_fc yet */
570 } 566 }
571 /* 567 /*
572 * Locate the SAM Task Attr from fc_pri_ta 568 * Locate the SAM Task Attr from fc_pri_ta
573 */ 569 */
574 switch (fcp->fc_pri_ta & FCP_PTA_MASK) { 570 switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
575 case FCP_PTA_HEADQ: 571 case FCP_PTA_HEADQ:
576 task_attr = MSG_HEAD_TAG; 572 task_attr = MSG_HEAD_TAG;
577 break; 573 break;
578 case FCP_PTA_ORDERED: 574 case FCP_PTA_ORDERED:
579 task_attr = MSG_ORDERED_TAG; 575 task_attr = MSG_ORDERED_TAG;
580 break; 576 break;
581 case FCP_PTA_ACA: 577 case FCP_PTA_ACA:
582 task_attr = MSG_ACA_TAG; 578 task_attr = MSG_ACA_TAG;
583 break; 579 break;
584 case FCP_PTA_SIMPLE: /* Fallthrough */ 580 case FCP_PTA_SIMPLE: /* Fallthrough */
585 default: 581 default:
586 task_attr = MSG_SIMPLE_TAG; 582 task_attr = MSG_SIMPLE_TAG;
587 } 583 }
588 584
589 585
590 task_attr = fcp->fc_pri_ta & FCP_PTA_MASK; 586 task_attr = fcp->fc_pri_ta & FCP_PTA_MASK;
591 data_len = ntohl(fcp->fc_dl); 587 data_len = ntohl(fcp->fc_dl);
592 cmd->cdb = fcp->fc_cdb; 588 cmd->cdb = fcp->fc_cdb;
593 } 589 }
594 590
595 se_cmd = &cmd->se_cmd; 591 se_cmd = &cmd->se_cmd;
596 /* 592 /*
597 * Initialize struct se_cmd descriptor from target_core_mod 593 * Initialize struct se_cmd descriptor from target_core_mod
598 * infrastructure 594 * infrastructure
599 */ 595 */
600 transport_init_se_cmd(se_cmd, &ft_configfs->tf_ops, cmd->sess->se_sess, 596 transport_init_se_cmd(se_cmd, &ft_configfs->tf_ops, cmd->sess->se_sess,
601 data_len, data_dir, task_attr, 597 data_len, data_dir, task_attr,
602 &cmd->ft_sense_buffer[0]); 598 &cmd->ft_sense_buffer[0]);
603 /* 599 /*
604 * Check for FCP task management flags 600 * Check for FCP task management flags
605 */ 601 */
606 if (fcp->fc_tm_flags) { 602 if (fcp->fc_tm_flags) {
607 ft_send_tm(cmd); 603 ft_send_tm(cmd);
608 return; 604 return;
609 } 605 }
610 606
611 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); 607 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
612 608
613 cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun); 609 cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
614 ret = transport_lookup_cmd_lun(&cmd->se_cmd, cmd->lun); 610 ret = transport_lookup_cmd_lun(&cmd->se_cmd, cmd->lun);
615 if (ret < 0) { 611 if (ret < 0) {
616 ft_dump_cmd(cmd, __func__); 612 ft_dump_cmd(cmd, __func__);
617 transport_send_check_condition_and_sense(&cmd->se_cmd, 613 transport_send_check_condition_and_sense(&cmd->se_cmd,
618 cmd->se_cmd.scsi_sense_reason, 0); 614 cmd->se_cmd.scsi_sense_reason, 0);
619 return; 615 return;
620 } 616 }
621 617
622 ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb); 618 ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
623 619
624 pr_debug("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret); 620 pr_debug("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
625 ft_dump_cmd(cmd, __func__); 621 ft_dump_cmd(cmd, __func__);
626 622
627 if (ret == -ENOMEM) { 623 if (ret == -ENOMEM) {
628 transport_send_check_condition_and_sense(se_cmd, 624 transport_send_check_condition_and_sense(se_cmd,
629 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); 625 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
630 transport_generic_free_cmd(se_cmd, 0); 626 transport_generic_free_cmd(se_cmd, 0);
631 return; 627 return;
632 } 628 }
633 if (ret == -EINVAL) { 629 if (ret == -EINVAL) {
634 if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) 630 if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
635 ft_queue_status(se_cmd); 631 ft_queue_status(se_cmd);
636 else 632 else
637 transport_send_check_condition_and_sense(se_cmd, 633 transport_send_check_condition_and_sense(se_cmd,
638 se_cmd->scsi_sense_reason, 0); 634 se_cmd->scsi_sense_reason, 0);
639 transport_generic_free_cmd(se_cmd, 0); 635 transport_generic_free_cmd(se_cmd, 0);
640 return; 636 return;
641 } 637 }
642 transport_handle_cdb_direct(se_cmd); 638 transport_handle_cdb_direct(se_cmd);
643 return; 639 return;
644 640
645 err: 641 err:
646 ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); 642 ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
647 } 643 }
648 644
drivers/target/tcm_fc/tfc_conf.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * Filename: tcm_fc.c 2 * Filename: tcm_fc.c
3 * 3 *
4 * This file contains the configfs implementation for TCM_fc fabric node. 4 * This file contains the configfs implementation for TCM_fc fabric node.
5 * Based on tcm_loop_configfs.c 5 * Based on tcm_loop_configfs.c
6 * 6 *
7 * Copyright (c) 2010 Cisco Systems, Inc. 7 * Copyright (c) 2010 Cisco Systems, Inc.
8 * Copyright (c) 2009,2010 Rising Tide, Inc. 8 * Copyright (c) 2009,2010 Rising Tide, Inc.
9 * Copyright (c) 2009,2010 Linux-iSCSI.org 9 * Copyright (c) 2009,2010 Linux-iSCSI.org
10 * 10 *
11 * Copyright (c) 2009,2010 Nicholas A. Bellinger <nab@linux-iscsi.org> 11 * Copyright (c) 2009,2010 Nicholas A. Bellinger <nab@linux-iscsi.org>
12 * 12 *
13 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by 14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or 15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version. 16 * (at your option) any later version.
17 * 17 *
18 * This program is distributed in the hope that it will be useful, 18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details. 21 * GNU General Public License for more details.
22 ****************************************************************************/ 22 ****************************************************************************/
23 23
24 #include <linux/module.h> 24 #include <linux/module.h>
25 #include <linux/moduleparam.h> 25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h> 26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h> 27 #include <linux/utsname.h>
28 #include <linux/init.h> 28 #include <linux/init.h>
29 #include <linux/slab.h> 29 #include <linux/slab.h>
30 #include <linux/kthread.h> 30 #include <linux/kthread.h>
31 #include <linux/types.h> 31 #include <linux/types.h>
32 #include <linux/string.h> 32 #include <linux/string.h>
33 #include <linux/configfs.h> 33 #include <linux/configfs.h>
34 #include <linux/kernel.h> 34 #include <linux/kernel.h>
35 #include <linux/ctype.h> 35 #include <linux/ctype.h>
36 #include <asm/unaligned.h> 36 #include <asm/unaligned.h>
37 #include <scsi/scsi.h> 37 #include <scsi/scsi.h>
38 #include <scsi/scsi_host.h> 38 #include <scsi/scsi_host.h>
39 #include <scsi/scsi_device.h> 39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_cmnd.h> 40 #include <scsi/scsi_cmnd.h>
41 #include <scsi/libfc.h> 41 #include <scsi/libfc.h>
42 42
43 #include <target/target_core_base.h> 43 #include <target/target_core_base.h>
44 #include <target/target_core_transport.h> 44 #include <target/target_core_fabric.h>
45 #include <target/target_core_fabric_ops.h>
46 #include <target/target_core_fabric_configfs.h> 45 #include <target/target_core_fabric_configfs.h>
47 #include <target/target_core_fabric_lib.h>
48 #include <target/target_core_device.h>
49 #include <target/target_core_tpg.h>
50 #include <target/target_core_configfs.h> 46 #include <target/target_core_configfs.h>
51 #include <target/configfs_macros.h> 47 #include <target/configfs_macros.h>
52 48
53 #include "tcm_fc.h" 49 #include "tcm_fc.h"
54 50
55 struct target_fabric_configfs *ft_configfs; 51 struct target_fabric_configfs *ft_configfs;
56 52
57 LIST_HEAD(ft_lport_list); 53 LIST_HEAD(ft_lport_list);
58 DEFINE_MUTEX(ft_lport_lock); 54 DEFINE_MUTEX(ft_lport_lock);
59 55
60 unsigned int ft_debug_logging; 56 unsigned int ft_debug_logging;
61 module_param_named(debug_logging, ft_debug_logging, int, S_IRUGO|S_IWUSR); 57 module_param_named(debug_logging, ft_debug_logging, int, S_IRUGO|S_IWUSR);
62 MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); 58 MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
63 59
64 /* 60 /*
65 * Parse WWN. 61 * Parse WWN.
66 * If strict, we require lower-case hex and colon separators to be sure 62 * If strict, we require lower-case hex and colon separators to be sure
67 * the name is the same as what would be generated by ft_format_wwn() 63 * the name is the same as what would be generated by ft_format_wwn()
68 * so the name and wwn are mapped one-to-one. 64 * so the name and wwn are mapped one-to-one.
69 */ 65 */
70 static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict) 66 static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)
71 { 67 {
72 const char *cp; 68 const char *cp;
73 char c; 69 char c;
74 u32 byte = 0; 70 u32 byte = 0;
75 u32 pos = 0; 71 u32 pos = 0;
76 u32 err; 72 u32 err;
77 int val; 73 int val;
78 74
79 *wwn = 0; 75 *wwn = 0;
80 for (cp = name; cp < &name[FT_NAMELEN - 1]; cp++) { 76 for (cp = name; cp < &name[FT_NAMELEN - 1]; cp++) {
81 c = *cp; 77 c = *cp;
82 if (c == '\n' && cp[1] == '\0') 78 if (c == '\n' && cp[1] == '\0')
83 continue; 79 continue;
84 if (strict && pos++ == 2 && byte++ < 7) { 80 if (strict && pos++ == 2 && byte++ < 7) {
85 pos = 0; 81 pos = 0;
86 if (c == ':') 82 if (c == ':')
87 continue; 83 continue;
88 err = 1; 84 err = 1;
89 goto fail; 85 goto fail;
90 } 86 }
91 if (c == '\0') { 87 if (c == '\0') {
92 err = 2; 88 err = 2;
93 if (strict && byte != 8) 89 if (strict && byte != 8)
94 goto fail; 90 goto fail;
95 return cp - name; 91 return cp - name;
96 } 92 }
97 err = 3; 93 err = 3;
98 val = hex_to_bin(c); 94 val = hex_to_bin(c);
99 if (val < 0 || (strict && isupper(c))) 95 if (val < 0 || (strict && isupper(c)))
100 goto fail; 96 goto fail;
101 *wwn = (*wwn << 4) | val; 97 *wwn = (*wwn << 4) | val;
102 } 98 }
103 err = 4; 99 err = 4;
104 fail: 100 fail:
105 pr_debug("err %u len %zu pos %u byte %u\n", 101 pr_debug("err %u len %zu pos %u byte %u\n",
106 err, cp - name, pos, byte); 102 err, cp - name, pos, byte);
107 return -1; 103 return -1;
108 } 104 }
109 105
110 ssize_t ft_format_wwn(char *buf, size_t len, u64 wwn) 106 ssize_t ft_format_wwn(char *buf, size_t len, u64 wwn)
111 { 107 {
112 u8 b[8]; 108 u8 b[8];
113 109
114 put_unaligned_be64(wwn, b); 110 put_unaligned_be64(wwn, b);
115 return snprintf(buf, len, 111 return snprintf(buf, len,
116 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x", 112 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
117 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]); 113 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
118 } 114 }
119 115
120 static ssize_t ft_wwn_show(void *arg, char *buf) 116 static ssize_t ft_wwn_show(void *arg, char *buf)
121 { 117 {
122 u64 *wwn = arg; 118 u64 *wwn = arg;
123 ssize_t len; 119 ssize_t len;
124 120
125 len = ft_format_wwn(buf, PAGE_SIZE - 2, *wwn); 121 len = ft_format_wwn(buf, PAGE_SIZE - 2, *wwn);
126 buf[len++] = '\n'; 122 buf[len++] = '\n';
127 return len; 123 return len;
128 } 124 }
129 125
130 static ssize_t ft_wwn_store(void *arg, const char *buf, size_t len) 126 static ssize_t ft_wwn_store(void *arg, const char *buf, size_t len)
131 { 127 {
132 ssize_t ret; 128 ssize_t ret;
133 u64 wwn; 129 u64 wwn;
134 130
135 ret = ft_parse_wwn(buf, &wwn, 0); 131 ret = ft_parse_wwn(buf, &wwn, 0);
136 if (ret > 0) 132 if (ret > 0)
137 *(u64 *)arg = wwn; 133 *(u64 *)arg = wwn;
138 return ret; 134 return ret;
139 } 135 }
140 136
141 /* 137 /*
142 * ACL auth ops. 138 * ACL auth ops.
143 */ 139 */
144 140
145 static ssize_t ft_nacl_show_port_name( 141 static ssize_t ft_nacl_show_port_name(
146 struct se_node_acl *se_nacl, 142 struct se_node_acl *se_nacl,
147 char *page) 143 char *page)
148 { 144 {
149 struct ft_node_acl *acl = container_of(se_nacl, 145 struct ft_node_acl *acl = container_of(se_nacl,
150 struct ft_node_acl, se_node_acl); 146 struct ft_node_acl, se_node_acl);
151 147
152 return ft_wwn_show(&acl->node_auth.port_name, page); 148 return ft_wwn_show(&acl->node_auth.port_name, page);
153 } 149 }
154 150
155 static ssize_t ft_nacl_store_port_name( 151 static ssize_t ft_nacl_store_port_name(
156 struct se_node_acl *se_nacl, 152 struct se_node_acl *se_nacl,
157 const char *page, 153 const char *page,
158 size_t count) 154 size_t count)
159 { 155 {
160 struct ft_node_acl *acl = container_of(se_nacl, 156 struct ft_node_acl *acl = container_of(se_nacl,
161 struct ft_node_acl, se_node_acl); 157 struct ft_node_acl, se_node_acl);
162 158
163 return ft_wwn_store(&acl->node_auth.port_name, page, count); 159 return ft_wwn_store(&acl->node_auth.port_name, page, count);
164 } 160 }
165 161
166 TF_NACL_BASE_ATTR(ft, port_name, S_IRUGO | S_IWUSR); 162 TF_NACL_BASE_ATTR(ft, port_name, S_IRUGO | S_IWUSR);
167 163
168 static ssize_t ft_nacl_show_node_name( 164 static ssize_t ft_nacl_show_node_name(
169 struct se_node_acl *se_nacl, 165 struct se_node_acl *se_nacl,
170 char *page) 166 char *page)
171 { 167 {
172 struct ft_node_acl *acl = container_of(se_nacl, 168 struct ft_node_acl *acl = container_of(se_nacl,
173 struct ft_node_acl, se_node_acl); 169 struct ft_node_acl, se_node_acl);
174 170
175 return ft_wwn_show(&acl->node_auth.node_name, page); 171 return ft_wwn_show(&acl->node_auth.node_name, page);
176 } 172 }
177 173
178 static ssize_t ft_nacl_store_node_name( 174 static ssize_t ft_nacl_store_node_name(
179 struct se_node_acl *se_nacl, 175 struct se_node_acl *se_nacl,
180 const char *page, 176 const char *page,
181 size_t count) 177 size_t count)
182 { 178 {
183 struct ft_node_acl *acl = container_of(se_nacl, 179 struct ft_node_acl *acl = container_of(se_nacl,
184 struct ft_node_acl, se_node_acl); 180 struct ft_node_acl, se_node_acl);
185 181
186 return ft_wwn_store(&acl->node_auth.node_name, page, count); 182 return ft_wwn_store(&acl->node_auth.node_name, page, count);
187 } 183 }
188 184
189 TF_NACL_BASE_ATTR(ft, node_name, S_IRUGO | S_IWUSR); 185 TF_NACL_BASE_ATTR(ft, node_name, S_IRUGO | S_IWUSR);
190 186
191 static struct configfs_attribute *ft_nacl_base_attrs[] = { 187 static struct configfs_attribute *ft_nacl_base_attrs[] = {
192 &ft_nacl_port_name.attr, 188 &ft_nacl_port_name.attr,
193 &ft_nacl_node_name.attr, 189 &ft_nacl_node_name.attr,
194 NULL, 190 NULL,
195 }; 191 };
196 192
197 /* 193 /*
198 * ACL ops. 194 * ACL ops.
199 */ 195 */
200 196
201 /* 197 /*
202 * Add ACL for an initiator. The ACL is named arbitrarily. 198 * Add ACL for an initiator. The ACL is named arbitrarily.
203 * The port_name and/or node_name are attributes. 199 * The port_name and/or node_name are attributes.
204 */ 200 */
205 static struct se_node_acl *ft_add_acl( 201 static struct se_node_acl *ft_add_acl(
206 struct se_portal_group *se_tpg, 202 struct se_portal_group *se_tpg,
207 struct config_group *group, 203 struct config_group *group,
208 const char *name) 204 const char *name)
209 { 205 {
210 struct ft_node_acl *acl; 206 struct ft_node_acl *acl;
211 struct ft_tpg *tpg; 207 struct ft_tpg *tpg;
212 u64 wwpn; 208 u64 wwpn;
213 u32 q_depth; 209 u32 q_depth;
214 210
215 pr_debug("add acl %s\n", name); 211 pr_debug("add acl %s\n", name);
216 tpg = container_of(se_tpg, struct ft_tpg, se_tpg); 212 tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
217 213
218 if (ft_parse_wwn(name, &wwpn, 1) < 0) 214 if (ft_parse_wwn(name, &wwpn, 1) < 0)
219 return ERR_PTR(-EINVAL); 215 return ERR_PTR(-EINVAL);
220 216
221 acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL); 217 acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL);
222 if (!acl) 218 if (!acl)
223 return ERR_PTR(-ENOMEM); 219 return ERR_PTR(-ENOMEM);
224 acl->node_auth.port_name = wwpn; 220 acl->node_auth.port_name = wwpn;
225 221
226 q_depth = 32; /* XXX bogus default - get from tpg? */ 222 q_depth = 32; /* XXX bogus default - get from tpg? */
227 return core_tpg_add_initiator_node_acl(&tpg->se_tpg, 223 return core_tpg_add_initiator_node_acl(&tpg->se_tpg,
228 &acl->se_node_acl, name, q_depth); 224 &acl->se_node_acl, name, q_depth);
229 } 225 }
230 226
231 static void ft_del_acl(struct se_node_acl *se_acl) 227 static void ft_del_acl(struct se_node_acl *se_acl)
232 { 228 {
233 struct se_portal_group *se_tpg = se_acl->se_tpg; 229 struct se_portal_group *se_tpg = se_acl->se_tpg;
234 struct ft_tpg *tpg; 230 struct ft_tpg *tpg;
235 struct ft_node_acl *acl = container_of(se_acl, 231 struct ft_node_acl *acl = container_of(se_acl,
236 struct ft_node_acl, se_node_acl); 232 struct ft_node_acl, se_node_acl);
237 233
238 pr_debug("del acl %s\n", 234 pr_debug("del acl %s\n",
239 config_item_name(&se_acl->acl_group.cg_item)); 235 config_item_name(&se_acl->acl_group.cg_item));
240 236
241 tpg = container_of(se_tpg, struct ft_tpg, se_tpg); 237 tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
242 pr_debug("del acl %p se_acl %p tpg %p se_tpg %p\n", 238 pr_debug("del acl %p se_acl %p tpg %p se_tpg %p\n",
243 acl, se_acl, tpg, &tpg->se_tpg); 239 acl, se_acl, tpg, &tpg->se_tpg);
244 240
245 core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1); 241 core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1);
246 kfree(acl); 242 kfree(acl);
247 } 243 }
248 244
249 struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata) 245 struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
250 { 246 {
251 struct ft_node_acl *found = NULL; 247 struct ft_node_acl *found = NULL;
252 struct ft_node_acl *acl; 248 struct ft_node_acl *acl;
253 struct se_portal_group *se_tpg = &tpg->se_tpg; 249 struct se_portal_group *se_tpg = &tpg->se_tpg;
254 struct se_node_acl *se_acl; 250 struct se_node_acl *se_acl;
255 251
256 spin_lock_irq(&se_tpg->acl_node_lock); 252 spin_lock_irq(&se_tpg->acl_node_lock);
257 list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) { 253 list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
258 acl = container_of(se_acl, struct ft_node_acl, se_node_acl); 254 acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
259 pr_debug("acl %p port_name %llx\n", 255 pr_debug("acl %p port_name %llx\n",
260 acl, (unsigned long long)acl->node_auth.port_name); 256 acl, (unsigned long long)acl->node_auth.port_name);
261 if (acl->node_auth.port_name == rdata->ids.port_name || 257 if (acl->node_auth.port_name == rdata->ids.port_name ||
262 acl->node_auth.node_name == rdata->ids.node_name) { 258 acl->node_auth.node_name == rdata->ids.node_name) {
263 pr_debug("acl %p port_name %llx matched\n", acl, 259 pr_debug("acl %p port_name %llx matched\n", acl,
264 (unsigned long long)rdata->ids.port_name); 260 (unsigned long long)rdata->ids.port_name);
265 found = acl; 261 found = acl;
266 /* XXX need to hold onto ACL */ 262 /* XXX need to hold onto ACL */
267 break; 263 break;
268 } 264 }
269 } 265 }
270 spin_unlock_irq(&se_tpg->acl_node_lock); 266 spin_unlock_irq(&se_tpg->acl_node_lock);
271 return found; 267 return found;
272 } 268 }
273 269
274 struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg) 270 struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
275 { 271 {
276 struct ft_node_acl *acl; 272 struct ft_node_acl *acl;
277 273
278 acl = kzalloc(sizeof(*acl), GFP_KERNEL); 274 acl = kzalloc(sizeof(*acl), GFP_KERNEL);
279 if (!acl) { 275 if (!acl) {
280 pr_err("Unable to allocate struct ft_node_acl\n"); 276 pr_err("Unable to allocate struct ft_node_acl\n");
281 return NULL; 277 return NULL;
282 } 278 }
283 pr_debug("acl %p\n", acl); 279 pr_debug("acl %p\n", acl);
284 return &acl->se_node_acl; 280 return &acl->se_node_acl;
285 } 281 }
286 282
287 static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg, 283 static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg,
288 struct se_node_acl *se_acl) 284 struct se_node_acl *se_acl)
289 { 285 {
290 struct ft_node_acl *acl = container_of(se_acl, 286 struct ft_node_acl *acl = container_of(se_acl,
291 struct ft_node_acl, se_node_acl); 287 struct ft_node_acl, se_node_acl);
292 288
293 pr_debug("acl %p\n", acl); 289 pr_debug("acl %p\n", acl);
294 kfree(acl); 290 kfree(acl);
295 } 291 }
296 292
297 /* 293 /*
298 * local_port port_group (tpg) ops. 294 * local_port port_group (tpg) ops.
299 */ 295 */
300 static struct se_portal_group *ft_add_tpg( 296 static struct se_portal_group *ft_add_tpg(
301 struct se_wwn *wwn, 297 struct se_wwn *wwn,
302 struct config_group *group, 298 struct config_group *group,
303 const char *name) 299 const char *name)
304 { 300 {
305 struct ft_lport_acl *lacl; 301 struct ft_lport_acl *lacl;
306 struct ft_tpg *tpg; 302 struct ft_tpg *tpg;
307 unsigned long index; 303 unsigned long index;
308 int ret; 304 int ret;
309 305
310 pr_debug("tcm_fc: add tpg %s\n", name); 306 pr_debug("tcm_fc: add tpg %s\n", name);
311 307
312 /* 308 /*
313 * Name must be "tpgt_" followed by the index. 309 * Name must be "tpgt_" followed by the index.
314 */ 310 */
315 if (strstr(name, "tpgt_") != name) 311 if (strstr(name, "tpgt_") != name)
316 return NULL; 312 return NULL;
317 if (strict_strtoul(name + 5, 10, &index) || index > UINT_MAX) 313 if (strict_strtoul(name + 5, 10, &index) || index > UINT_MAX)
318 return NULL; 314 return NULL;
319 315
320 lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn); 316 lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn);
321 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL); 317 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
322 if (!tpg) 318 if (!tpg)
323 return NULL; 319 return NULL;
324 tpg->index = index; 320 tpg->index = index;
325 tpg->lport_acl = lacl; 321 tpg->lport_acl = lacl;
326 INIT_LIST_HEAD(&tpg->lun_list); 322 INIT_LIST_HEAD(&tpg->lun_list);
327 323
328 ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg, 324 ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
329 tpg, TRANSPORT_TPG_TYPE_NORMAL); 325 tpg, TRANSPORT_TPG_TYPE_NORMAL);
330 if (ret < 0) { 326 if (ret < 0) {
331 kfree(tpg); 327 kfree(tpg);
332 return NULL; 328 return NULL;
333 } 329 }
334 330
335 tpg->workqueue = alloc_workqueue("tcm_fc", 0, 1); 331 tpg->workqueue = alloc_workqueue("tcm_fc", 0, 1);
336 if (!tpg->workqueue) { 332 if (!tpg->workqueue) {
337 kfree(tpg); 333 kfree(tpg);
338 return NULL; 334 return NULL;
339 } 335 }
340 336
341 mutex_lock(&ft_lport_lock); 337 mutex_lock(&ft_lport_lock);
342 list_add_tail(&tpg->list, &lacl->tpg_list); 338 list_add_tail(&tpg->list, &lacl->tpg_list);
343 mutex_unlock(&ft_lport_lock); 339 mutex_unlock(&ft_lport_lock);
344 340
345 return &tpg->se_tpg; 341 return &tpg->se_tpg;
346 } 342 }
347 343
348 static void ft_del_tpg(struct se_portal_group *se_tpg) 344 static void ft_del_tpg(struct se_portal_group *se_tpg)
349 { 345 {
350 struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg); 346 struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
351 347
352 pr_debug("del tpg %s\n", 348 pr_debug("del tpg %s\n",
353 config_item_name(&tpg->se_tpg.tpg_group.cg_item)); 349 config_item_name(&tpg->se_tpg.tpg_group.cg_item));
354 350
355 destroy_workqueue(tpg->workqueue); 351 destroy_workqueue(tpg->workqueue);
356 352
357 /* Wait for sessions to be freed thru RCU, for BUG_ON below */ 353 /* Wait for sessions to be freed thru RCU, for BUG_ON below */
358 synchronize_rcu(); 354 synchronize_rcu();
359 355
360 mutex_lock(&ft_lport_lock); 356 mutex_lock(&ft_lport_lock);
361 list_del(&tpg->list); 357 list_del(&tpg->list);
362 if (tpg->tport) { 358 if (tpg->tport) {
363 tpg->tport->tpg = NULL; 359 tpg->tport->tpg = NULL;
364 tpg->tport = NULL; 360 tpg->tport = NULL;
365 } 361 }
366 mutex_unlock(&ft_lport_lock); 362 mutex_unlock(&ft_lport_lock);
367 363
368 core_tpg_deregister(se_tpg); 364 core_tpg_deregister(se_tpg);
369 kfree(tpg); 365 kfree(tpg);
370 } 366 }
371 367
372 /* 368 /*
373 * Verify that an lport is configured to use the tcm_fc module, and return 369 * Verify that an lport is configured to use the tcm_fc module, and return
374 * the target port group that should be used. 370 * the target port group that should be used.
375 * 371 *
376 * The caller holds ft_lport_lock. 372 * The caller holds ft_lport_lock.
377 */ 373 */
378 struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport) 374 struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
379 { 375 {
380 struct ft_lport_acl *lacl; 376 struct ft_lport_acl *lacl;
381 struct ft_tpg *tpg; 377 struct ft_tpg *tpg;
382 378
383 list_for_each_entry(lacl, &ft_lport_list, list) { 379 list_for_each_entry(lacl, &ft_lport_list, list) {
384 if (lacl->wwpn == lport->wwpn) { 380 if (lacl->wwpn == lport->wwpn) {
385 list_for_each_entry(tpg, &lacl->tpg_list, list) 381 list_for_each_entry(tpg, &lacl->tpg_list, list)
386 return tpg; /* XXX for now return first entry */ 382 return tpg; /* XXX for now return first entry */
387 return NULL; 383 return NULL;
388 } 384 }
389 } 385 }
390 return NULL; 386 return NULL;
391 } 387 }
392 388
393 /* 389 /*
394 * target config instance ops. 390 * target config instance ops.
395 */ 391 */
396 392
397 /* 393 /*
398 * Add lport to allowed config. 394 * Add lport to allowed config.
399 * The name is the WWPN in lower-case ASCII, colon-separated bytes. 395 * The name is the WWPN in lower-case ASCII, colon-separated bytes.
400 */ 396 */
401 static struct se_wwn *ft_add_lport( 397 static struct se_wwn *ft_add_lport(
402 struct target_fabric_configfs *tf, 398 struct target_fabric_configfs *tf,
403 struct config_group *group, 399 struct config_group *group,
404 const char *name) 400 const char *name)
405 { 401 {
406 struct ft_lport_acl *lacl; 402 struct ft_lport_acl *lacl;
407 struct ft_lport_acl *old_lacl; 403 struct ft_lport_acl *old_lacl;
408 u64 wwpn; 404 u64 wwpn;
409 405
410 pr_debug("add lport %s\n", name); 406 pr_debug("add lport %s\n", name);
411 if (ft_parse_wwn(name, &wwpn, 1) < 0) 407 if (ft_parse_wwn(name, &wwpn, 1) < 0)
412 return NULL; 408 return NULL;
413 lacl = kzalloc(sizeof(*lacl), GFP_KERNEL); 409 lacl = kzalloc(sizeof(*lacl), GFP_KERNEL);
414 if (!lacl) 410 if (!lacl)
415 return NULL; 411 return NULL;
416 lacl->wwpn = wwpn; 412 lacl->wwpn = wwpn;
417 INIT_LIST_HEAD(&lacl->tpg_list); 413 INIT_LIST_HEAD(&lacl->tpg_list);
418 414
419 mutex_lock(&ft_lport_lock); 415 mutex_lock(&ft_lport_lock);
420 list_for_each_entry(old_lacl, &ft_lport_list, list) { 416 list_for_each_entry(old_lacl, &ft_lport_list, list) {
421 if (old_lacl->wwpn == wwpn) { 417 if (old_lacl->wwpn == wwpn) {
422 mutex_unlock(&ft_lport_lock); 418 mutex_unlock(&ft_lport_lock);
423 kfree(lacl); 419 kfree(lacl);
424 return NULL; 420 return NULL;
425 } 421 }
426 } 422 }
427 list_add_tail(&lacl->list, &ft_lport_list); 423 list_add_tail(&lacl->list, &ft_lport_list);
428 ft_format_wwn(lacl->name, sizeof(lacl->name), wwpn); 424 ft_format_wwn(lacl->name, sizeof(lacl->name), wwpn);
429 mutex_unlock(&ft_lport_lock); 425 mutex_unlock(&ft_lport_lock);
430 426
431 return &lacl->fc_lport_wwn; 427 return &lacl->fc_lport_wwn;
432 } 428 }
433 429
434 static void ft_del_lport(struct se_wwn *wwn) 430 static void ft_del_lport(struct se_wwn *wwn)
435 { 431 {
436 struct ft_lport_acl *lacl = container_of(wwn, 432 struct ft_lport_acl *lacl = container_of(wwn,
437 struct ft_lport_acl, fc_lport_wwn); 433 struct ft_lport_acl, fc_lport_wwn);
438 434
439 pr_debug("del lport %s\n", lacl->name); 435 pr_debug("del lport %s\n", lacl->name);
440 mutex_lock(&ft_lport_lock); 436 mutex_lock(&ft_lport_lock);
441 list_del(&lacl->list); 437 list_del(&lacl->list);
442 mutex_unlock(&ft_lport_lock); 438 mutex_unlock(&ft_lport_lock);
443 439
444 kfree(lacl); 440 kfree(lacl);
445 } 441 }
446 442
447 static ssize_t ft_wwn_show_attr_version( 443 static ssize_t ft_wwn_show_attr_version(
448 struct target_fabric_configfs *tf, 444 struct target_fabric_configfs *tf,
449 char *page) 445 char *page)
450 { 446 {
451 return sprintf(page, "TCM FC " FT_VERSION " on %s/%s on " 447 return sprintf(page, "TCM FC " FT_VERSION " on %s/%s on "
452 ""UTS_RELEASE"\n", utsname()->sysname, utsname()->machine); 448 ""UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
453 } 449 }
454 450
455 TF_WWN_ATTR_RO(ft, version); 451 TF_WWN_ATTR_RO(ft, version);
456 452
457 static struct configfs_attribute *ft_wwn_attrs[] = { 453 static struct configfs_attribute *ft_wwn_attrs[] = {
458 &ft_wwn_version.attr, 454 &ft_wwn_version.attr,
459 NULL, 455 NULL,
460 }; 456 };
461 457
462 static char *ft_get_fabric_name(void) 458 static char *ft_get_fabric_name(void)
463 { 459 {
464 return "fc"; 460 return "fc";
465 } 461 }
466 462
467 static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg) 463 static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)
468 { 464 {
469 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr; 465 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
470 466
471 return tpg->lport_acl->name; 467 return tpg->lport_acl->name;
472 } 468 }
473 469
474 static u16 ft_get_tag(struct se_portal_group *se_tpg) 470 static u16 ft_get_tag(struct se_portal_group *se_tpg)
475 { 471 {
476 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr; 472 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
477 473
478 /* 474 /*
479 * This tag is used when forming SCSI Name identifier in EVPD=1 0x83 475 * This tag is used when forming SCSI Name identifier in EVPD=1 0x83
480 * to represent the SCSI Target Port. 476 * to represent the SCSI Target Port.
481 */ 477 */
482 return tpg->index; 478 return tpg->index;
483 } 479 }
484 480
485 static u32 ft_get_default_depth(struct se_portal_group *se_tpg) 481 static u32 ft_get_default_depth(struct se_portal_group *se_tpg)
486 { 482 {
487 return 1; 483 return 1;
488 } 484 }
489 485
490 static int ft_check_false(struct se_portal_group *se_tpg) 486 static int ft_check_false(struct se_portal_group *se_tpg)
491 { 487 {
492 return 0; 488 return 0;
493 } 489 }
494 490
495 static void ft_set_default_node_attr(struct se_node_acl *se_nacl) 491 static void ft_set_default_node_attr(struct se_node_acl *se_nacl)
496 { 492 {
497 } 493 }
498 494
499 static u16 ft_get_fabric_sense_len(void) 495 static u16 ft_get_fabric_sense_len(void)
500 { 496 {
501 return 0; 497 return 0;
502 } 498 }
503 499
504 static u16 ft_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_len) 500 static u16 ft_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_len)
505 { 501 {
506 return 0; 502 return 0;
507 } 503 }
508 504
509 static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg) 505 static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
510 { 506 {
511 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr; 507 struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
512 508
513 return tpg->index; 509 return tpg->index;
514 } 510 }
515 511
516 static struct target_core_fabric_ops ft_fabric_ops = { 512 static struct target_core_fabric_ops ft_fabric_ops = {
517 .get_fabric_name = ft_get_fabric_name, 513 .get_fabric_name = ft_get_fabric_name,
518 .get_fabric_proto_ident = fc_get_fabric_proto_ident, 514 .get_fabric_proto_ident = fc_get_fabric_proto_ident,
519 .tpg_get_wwn = ft_get_fabric_wwn, 515 .tpg_get_wwn = ft_get_fabric_wwn,
520 .tpg_get_tag = ft_get_tag, 516 .tpg_get_tag = ft_get_tag,
521 .tpg_get_default_depth = ft_get_default_depth, 517 .tpg_get_default_depth = ft_get_default_depth,
522 .tpg_get_pr_transport_id = fc_get_pr_transport_id, 518 .tpg_get_pr_transport_id = fc_get_pr_transport_id,
523 .tpg_get_pr_transport_id_len = fc_get_pr_transport_id_len, 519 .tpg_get_pr_transport_id_len = fc_get_pr_transport_id_len,
524 .tpg_parse_pr_out_transport_id = fc_parse_pr_out_transport_id, 520 .tpg_parse_pr_out_transport_id = fc_parse_pr_out_transport_id,
525 .tpg_check_demo_mode = ft_check_false, 521 .tpg_check_demo_mode = ft_check_false,
526 .tpg_check_demo_mode_cache = ft_check_false, 522 .tpg_check_demo_mode_cache = ft_check_false,
527 .tpg_check_demo_mode_write_protect = ft_check_false, 523 .tpg_check_demo_mode_write_protect = ft_check_false,
528 .tpg_check_prod_mode_write_protect = ft_check_false, 524 .tpg_check_prod_mode_write_protect = ft_check_false,
529 .tpg_alloc_fabric_acl = ft_tpg_alloc_fabric_acl, 525 .tpg_alloc_fabric_acl = ft_tpg_alloc_fabric_acl,
530 .tpg_release_fabric_acl = ft_tpg_release_fabric_acl, 526 .tpg_release_fabric_acl = ft_tpg_release_fabric_acl,
531 .tpg_get_inst_index = ft_tpg_get_inst_index, 527 .tpg_get_inst_index = ft_tpg_get_inst_index,
532 .check_stop_free = ft_check_stop_free, 528 .check_stop_free = ft_check_stop_free,
533 .release_cmd = ft_release_cmd, 529 .release_cmd = ft_release_cmd,
534 .shutdown_session = ft_sess_shutdown, 530 .shutdown_session = ft_sess_shutdown,
535 .close_session = ft_sess_close, 531 .close_session = ft_sess_close,
536 .stop_session = ft_sess_stop, 532 .stop_session = ft_sess_stop,
537 .fall_back_to_erl0 = ft_sess_set_erl0, 533 .fall_back_to_erl0 = ft_sess_set_erl0,
538 .sess_logged_in = ft_sess_logged_in, 534 .sess_logged_in = ft_sess_logged_in,
539 .sess_get_index = ft_sess_get_index, 535 .sess_get_index = ft_sess_get_index,
540 .sess_get_initiator_sid = NULL, 536 .sess_get_initiator_sid = NULL,
541 .write_pending = ft_write_pending, 537 .write_pending = ft_write_pending,
542 .write_pending_status = ft_write_pending_status, 538 .write_pending_status = ft_write_pending_status,
543 .set_default_node_attributes = ft_set_default_node_attr, 539 .set_default_node_attributes = ft_set_default_node_attr,
544 .get_task_tag = ft_get_task_tag, 540 .get_task_tag = ft_get_task_tag,
545 .get_cmd_state = ft_get_cmd_state, 541 .get_cmd_state = ft_get_cmd_state,
546 .queue_data_in = ft_queue_data_in, 542 .queue_data_in = ft_queue_data_in,
547 .queue_status = ft_queue_status, 543 .queue_status = ft_queue_status,
548 .queue_tm_rsp = ft_queue_tm_resp, 544 .queue_tm_rsp = ft_queue_tm_resp,
549 .get_fabric_sense_len = ft_get_fabric_sense_len, 545 .get_fabric_sense_len = ft_get_fabric_sense_len,
550 .set_fabric_sense_len = ft_set_fabric_sense_len, 546 .set_fabric_sense_len = ft_set_fabric_sense_len,
551 .is_state_remove = ft_is_state_remove, 547 .is_state_remove = ft_is_state_remove,
552 /* 548 /*
553 * Setup function pointers for generic logic in 549 * Setup function pointers for generic logic in
554 * target_core_fabric_configfs.c 550 * target_core_fabric_configfs.c
555 */ 551 */
556 .fabric_make_wwn = &ft_add_lport, 552 .fabric_make_wwn = &ft_add_lport,
557 .fabric_drop_wwn = &ft_del_lport, 553 .fabric_drop_wwn = &ft_del_lport,
558 .fabric_make_tpg = &ft_add_tpg, 554 .fabric_make_tpg = &ft_add_tpg,
559 .fabric_drop_tpg = &ft_del_tpg, 555 .fabric_drop_tpg = &ft_del_tpg,
560 .fabric_post_link = NULL, 556 .fabric_post_link = NULL,
561 .fabric_pre_unlink = NULL, 557 .fabric_pre_unlink = NULL,
562 .fabric_make_np = NULL, 558 .fabric_make_np = NULL,
563 .fabric_drop_np = NULL, 559 .fabric_drop_np = NULL,
564 .fabric_make_nodeacl = &ft_add_acl, 560 .fabric_make_nodeacl = &ft_add_acl,
565 .fabric_drop_nodeacl = &ft_del_acl, 561 .fabric_drop_nodeacl = &ft_del_acl,
566 }; 562 };
567 563
568 int ft_register_configfs(void) 564 int ft_register_configfs(void)
569 { 565 {
570 struct target_fabric_configfs *fabric; 566 struct target_fabric_configfs *fabric;
571 int ret; 567 int ret;
572 568
573 /* 569 /*
574 * Register the top level struct config_item_type with TCM core 570 * Register the top level struct config_item_type with TCM core
575 */ 571 */
576 fabric = target_fabric_configfs_init(THIS_MODULE, "fc"); 572 fabric = target_fabric_configfs_init(THIS_MODULE, "fc");
577 if (IS_ERR(fabric)) { 573 if (IS_ERR(fabric)) {
578 pr_err("%s: target_fabric_configfs_init() failed!\n", 574 pr_err("%s: target_fabric_configfs_init() failed!\n",
579 __func__); 575 __func__);
580 return PTR_ERR(fabric); 576 return PTR_ERR(fabric);
581 } 577 }
582 fabric->tf_ops = ft_fabric_ops; 578 fabric->tf_ops = ft_fabric_ops;
583 579
584 /* Allowing support for task_sg_chaining */ 580 /* Allowing support for task_sg_chaining */
585 fabric->tf_ops.task_sg_chaining = 1; 581 fabric->tf_ops.task_sg_chaining = 1;
586 582
587 /* 583 /*
588 * Setup default attribute lists for various fabric->tf_cit_tmpl 584 * Setup default attribute lists for various fabric->tf_cit_tmpl
589 */ 585 */
590 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = ft_wwn_attrs; 586 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = ft_wwn_attrs;
591 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL; 587 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
592 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; 588 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
593 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; 589 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
594 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; 590 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
595 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = 591 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs =
596 ft_nacl_base_attrs; 592 ft_nacl_base_attrs;
597 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 593 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
598 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 594 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
599 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; 595 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
600 /* 596 /*
601 * register the fabric for use within TCM 597 * register the fabric for use within TCM
602 */ 598 */
603 ret = target_fabric_configfs_register(fabric); 599 ret = target_fabric_configfs_register(fabric);
604 if (ret < 0) { 600 if (ret < 0) {
605 pr_debug("target_fabric_configfs_register() for" 601 pr_debug("target_fabric_configfs_register() for"
606 " FC Target failed!\n"); 602 " FC Target failed!\n");
607 target_fabric_configfs_free(fabric); 603 target_fabric_configfs_free(fabric);
608 return -1; 604 return -1;
609 } 605 }
610 606
611 /* 607 /*
612 * Setup our local pointer to *fabric. 608 * Setup our local pointer to *fabric.
613 */ 609 */
614 ft_configfs = fabric; 610 ft_configfs = fabric;
615 return 0; 611 return 0;
616 } 612 }
617 613
618 void ft_deregister_configfs(void) 614 void ft_deregister_configfs(void)
619 { 615 {
620 if (!ft_configfs) 616 if (!ft_configfs)
621 return; 617 return;
622 target_fabric_configfs_deregister(ft_configfs); 618 target_fabric_configfs_deregister(ft_configfs);
623 ft_configfs = NULL; 619 ft_configfs = NULL;
624 } 620 }
625 621
626 static struct notifier_block ft_notifier = { 622 static struct notifier_block ft_notifier = {
627 .notifier_call = ft_lport_notify 623 .notifier_call = ft_lport_notify
628 }; 624 };
629 625
630 static int __init ft_init(void) 626 static int __init ft_init(void)
631 { 627 {
632 if (ft_register_configfs()) 628 if (ft_register_configfs())
633 return -1; 629 return -1;
634 if (fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov)) { 630 if (fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov)) {
635 ft_deregister_configfs(); 631 ft_deregister_configfs();
636 return -1; 632 return -1;
637 } 633 }
638 blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier); 634 blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier);
639 fc_lport_iterate(ft_lport_add, NULL); 635 fc_lport_iterate(ft_lport_add, NULL);
640 return 0; 636 return 0;
641 } 637 }
642 638
643 static void __exit ft_exit(void) 639 static void __exit ft_exit(void)
644 { 640 {
645 blocking_notifier_chain_unregister(&fc_lport_notifier_head, 641 blocking_notifier_chain_unregister(&fc_lport_notifier_head,
646 &ft_notifier); 642 &ft_notifier);
647 fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov); 643 fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov);
648 fc_lport_iterate(ft_lport_del, NULL); 644 fc_lport_iterate(ft_lport_del, NULL);
649 ft_deregister_configfs(); 645 ft_deregister_configfs();
650 synchronize_rcu(); 646 synchronize_rcu();
651 } 647 }
652 648
653 MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION); 649 MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION);
654 MODULE_LICENSE("GPL"); 650 MODULE_LICENSE("GPL");
655 module_init(ft_init); 651 module_init(ft_init);
656 module_exit(ft_exit); 652 module_exit(ft_exit);
657 653
drivers/target/tcm_fc/tfc_io.c
1 /* 1 /*
2 * Copyright (c) 2010 Cisco Systems, Inc. 2 * Copyright (c) 2010 Cisco Systems, Inc.
3 * 3 *
4 * Portions based on tcm_loop_fabric_scsi.c and libfc/fc_fcp.c 4 * Portions based on tcm_loop_fabric_scsi.c and libfc/fc_fcp.c
5 * 5 *
6 * Copyright (c) 2007 Intel Corporation. All rights reserved. 6 * Copyright (c) 2007 Intel Corporation. All rights reserved.
7 * Copyright (c) 2008 Red Hat, Inc. All rights reserved. 7 * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
8 * Copyright (c) 2008 Mike Christie 8 * Copyright (c) 2008 Mike Christie
9 * Copyright (c) 2009 Rising Tide, Inc. 9 * Copyright (c) 2009 Rising Tide, Inc.
10 * Copyright (c) 2009 Linux-iSCSI.org 10 * Copyright (c) 2009 Linux-iSCSI.org
11 * Copyright (c) 2009 Nicholas A. Bellinger <nab@linux-iscsi.org> 11 * Copyright (c) 2009 Nicholas A. Bellinger <nab@linux-iscsi.org>
12 * 12 *
13 * This program is free software; you can redistribute it and/or modify it 13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms and conditions of the GNU General Public License, 14 * under the terms and conditions of the GNU General Public License,
15 * version 2, as published by the Free Software Foundation. 15 * version 2, as published by the Free Software Foundation.
16 * 16 *
17 * This program is distributed in the hope it will be useful, but WITHOUT 17 * This program is distributed in the hope it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details. 20 * more details.
21 * 21 *
22 * You should have received a copy of the GNU General Public License along with 22 * You should have received a copy of the GNU General Public License along with
23 * this program; if not, write to the Free Software Foundation, Inc., 23 * this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 24 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
25 */ 25 */
26 26
27 /* XXX TBD some includes may be extraneous */ 27 /* XXX TBD some includes may be extraneous */
28 28
29 #include <linux/module.h> 29 #include <linux/module.h>
30 #include <linux/moduleparam.h> 30 #include <linux/moduleparam.h>
31 #include <generated/utsrelease.h> 31 #include <generated/utsrelease.h>
32 #include <linux/utsname.h> 32 #include <linux/utsname.h>
33 #include <linux/init.h> 33 #include <linux/init.h>
34 #include <linux/slab.h> 34 #include <linux/slab.h>
35 #include <linux/kthread.h> 35 #include <linux/kthread.h>
36 #include <linux/types.h> 36 #include <linux/types.h>
37 #include <linux/string.h> 37 #include <linux/string.h>
38 #include <linux/configfs.h> 38 #include <linux/configfs.h>
39 #include <linux/ctype.h> 39 #include <linux/ctype.h>
40 #include <linux/hash.h> 40 #include <linux/hash.h>
41 #include <linux/ratelimit.h> 41 #include <linux/ratelimit.h>
42 #include <asm/unaligned.h> 42 #include <asm/unaligned.h>
43 #include <scsi/scsi.h> 43 #include <scsi/scsi.h>
44 #include <scsi/scsi_host.h> 44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_device.h> 45 #include <scsi/scsi_device.h>
46 #include <scsi/scsi_cmnd.h> 46 #include <scsi/scsi_cmnd.h>
47 #include <scsi/libfc.h> 47 #include <scsi/libfc.h>
48 #include <scsi/fc_encode.h> 48 #include <scsi/fc_encode.h>
49 49
50 #include <target/target_core_base.h> 50 #include <target/target_core_base.h>
51 #include <target/target_core_transport.h> 51 #include <target/target_core_fabric.h>
52 #include <target/target_core_fabric_ops.h>
53 #include <target/target_core_device.h>
54 #include <target/target_core_tpg.h>
55 #include <target/target_core_configfs.h> 52 #include <target/target_core_configfs.h>
56 #include <target/configfs_macros.h> 53 #include <target/configfs_macros.h>
57 54
58 #include "tcm_fc.h" 55 #include "tcm_fc.h"
59 56
60 /* 57 /*
61 * Deliver read data back to initiator. 58 * Deliver read data back to initiator.
62 * XXX TBD handle resource problems later. 59 * XXX TBD handle resource problems later.
63 */ 60 */
64 int ft_queue_data_in(struct se_cmd *se_cmd) 61 int ft_queue_data_in(struct se_cmd *se_cmd)
65 { 62 {
66 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); 63 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
67 struct fc_frame *fp = NULL; 64 struct fc_frame *fp = NULL;
68 struct fc_exch *ep; 65 struct fc_exch *ep;
69 struct fc_lport *lport; 66 struct fc_lport *lport;
70 struct scatterlist *sg = NULL; 67 struct scatterlist *sg = NULL;
71 size_t remaining; 68 size_t remaining;
72 u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF; 69 u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
73 u32 mem_off = 0; 70 u32 mem_off = 0;
74 u32 fh_off = 0; 71 u32 fh_off = 0;
75 u32 frame_off = 0; 72 u32 frame_off = 0;
76 size_t frame_len = 0; 73 size_t frame_len = 0;
77 size_t mem_len = 0; 74 size_t mem_len = 0;
78 size_t tlen; 75 size_t tlen;
79 size_t off_in_page; 76 size_t off_in_page;
80 struct page *page = NULL; 77 struct page *page = NULL;
81 int use_sg; 78 int use_sg;
82 int error; 79 int error;
83 void *page_addr; 80 void *page_addr;
84 void *from; 81 void *from;
85 void *to = NULL; 82 void *to = NULL;
86 83
87 ep = fc_seq_exch(cmd->seq); 84 ep = fc_seq_exch(cmd->seq);
88 lport = ep->lp; 85 lport = ep->lp;
89 cmd->seq = lport->tt.seq_start_next(cmd->seq); 86 cmd->seq = lport->tt.seq_start_next(cmd->seq);
90 87
91 remaining = se_cmd->data_length; 88 remaining = se_cmd->data_length;
92 89
93 /* 90 /*
94 * Setup to use first mem list entry, unless no data. 91 * Setup to use first mem list entry, unless no data.
95 */ 92 */
96 BUG_ON(remaining && !se_cmd->t_data_sg); 93 BUG_ON(remaining && !se_cmd->t_data_sg);
97 if (remaining) { 94 if (remaining) {
98 sg = se_cmd->t_data_sg; 95 sg = se_cmd->t_data_sg;
99 mem_len = sg->length; 96 mem_len = sg->length;
100 mem_off = sg->offset; 97 mem_off = sg->offset;
101 page = sg_page(sg); 98 page = sg_page(sg);
102 } 99 }
103 100
104 /* no scatter/gather in skb for odd word length due to fc_seq_send() */ 101 /* no scatter/gather in skb for odd word length due to fc_seq_send() */
105 use_sg = !(remaining % 4); 102 use_sg = !(remaining % 4);
106 103
107 while (remaining) { 104 while (remaining) {
108 if (!mem_len) { 105 if (!mem_len) {
109 sg = sg_next(sg); 106 sg = sg_next(sg);
110 mem_len = min((size_t)sg->length, remaining); 107 mem_len = min((size_t)sg->length, remaining);
111 mem_off = sg->offset; 108 mem_off = sg->offset;
112 page = sg_page(sg); 109 page = sg_page(sg);
113 } 110 }
114 if (!frame_len) { 111 if (!frame_len) {
115 /* 112 /*
116 * If lport's has capability of Large Send Offload LSO) 113 * If lport's has capability of Large Send Offload LSO)
117 * , then allow 'frame_len' to be as big as 'lso_max' 114 * , then allow 'frame_len' to be as big as 'lso_max'
118 * if indicated transfer length is >= lport->lso_max 115 * if indicated transfer length is >= lport->lso_max
119 */ 116 */
120 frame_len = (lport->seq_offload) ? lport->lso_max : 117 frame_len = (lport->seq_offload) ? lport->lso_max :
121 cmd->sess->max_frame; 118 cmd->sess->max_frame;
122 frame_len = min(frame_len, remaining); 119 frame_len = min(frame_len, remaining);
123 fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len); 120 fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
124 if (!fp) 121 if (!fp)
125 return -ENOMEM; 122 return -ENOMEM;
126 to = fc_frame_payload_get(fp, 0); 123 to = fc_frame_payload_get(fp, 0);
127 fh_off = frame_off; 124 fh_off = frame_off;
128 frame_off += frame_len; 125 frame_off += frame_len;
129 /* 126 /*
130 * Setup the frame's max payload which is used by base 127 * Setup the frame's max payload which is used by base
131 * driver to indicate HW about max frame size, so that 128 * driver to indicate HW about max frame size, so that
132 * HW can do fragmentation appropriately based on 129 * HW can do fragmentation appropriately based on
133 * "gso_max_size" of underline netdev. 130 * "gso_max_size" of underline netdev.
134 */ 131 */
135 fr_max_payload(fp) = cmd->sess->max_frame; 132 fr_max_payload(fp) = cmd->sess->max_frame;
136 } 133 }
137 tlen = min(mem_len, frame_len); 134 tlen = min(mem_len, frame_len);
138 135
139 if (use_sg) { 136 if (use_sg) {
140 off_in_page = mem_off; 137 off_in_page = mem_off;
141 BUG_ON(!page); 138 BUG_ON(!page);
142 get_page(page); 139 get_page(page);
143 skb_fill_page_desc(fp_skb(fp), 140 skb_fill_page_desc(fp_skb(fp),
144 skb_shinfo(fp_skb(fp))->nr_frags, 141 skb_shinfo(fp_skb(fp))->nr_frags,
145 page, off_in_page, tlen); 142 page, off_in_page, tlen);
146 fr_len(fp) += tlen; 143 fr_len(fp) += tlen;
147 fp_skb(fp)->data_len += tlen; 144 fp_skb(fp)->data_len += tlen;
148 fp_skb(fp)->truesize += 145 fp_skb(fp)->truesize +=
149 PAGE_SIZE << compound_order(page); 146 PAGE_SIZE << compound_order(page);
150 } else { 147 } else {
151 BUG_ON(!page); 148 BUG_ON(!page);
152 from = kmap_atomic(page + (mem_off >> PAGE_SHIFT), 149 from = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
153 KM_SOFTIRQ0); 150 KM_SOFTIRQ0);
154 page_addr = from; 151 page_addr = from;
155 from += mem_off & ~PAGE_MASK; 152 from += mem_off & ~PAGE_MASK;
156 tlen = min(tlen, (size_t)(PAGE_SIZE - 153 tlen = min(tlen, (size_t)(PAGE_SIZE -
157 (mem_off & ~PAGE_MASK))); 154 (mem_off & ~PAGE_MASK)));
158 memcpy(to, from, tlen); 155 memcpy(to, from, tlen);
159 kunmap_atomic(page_addr, KM_SOFTIRQ0); 156 kunmap_atomic(page_addr, KM_SOFTIRQ0);
160 to += tlen; 157 to += tlen;
161 } 158 }
162 159
163 mem_off += tlen; 160 mem_off += tlen;
164 mem_len -= tlen; 161 mem_len -= tlen;
165 frame_len -= tlen; 162 frame_len -= tlen;
166 remaining -= tlen; 163 remaining -= tlen;
167 164
168 if (frame_len && 165 if (frame_len &&
169 (skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN)) 166 (skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN))
170 continue; 167 continue;
171 if (!remaining) 168 if (!remaining)
172 f_ctl |= FC_FC_END_SEQ; 169 f_ctl |= FC_FC_END_SEQ;
173 fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid, 170 fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
174 FC_TYPE_FCP, f_ctl, fh_off); 171 FC_TYPE_FCP, f_ctl, fh_off);
175 error = lport->tt.seq_send(lport, cmd->seq, fp); 172 error = lport->tt.seq_send(lport, cmd->seq, fp);
176 if (error) { 173 if (error) {
177 /* XXX For now, initiator will retry */ 174 /* XXX For now, initiator will retry */
178 pr_err_ratelimited("%s: Failed to send frame %p, " 175 pr_err_ratelimited("%s: Failed to send frame %p, "
179 "xid <0x%x>, remaining %zu, " 176 "xid <0x%x>, remaining %zu, "
180 "lso_max <0x%x>\n", 177 "lso_max <0x%x>\n",
181 __func__, fp, ep->xid, 178 __func__, fp, ep->xid,
182 remaining, lport->lso_max); 179 remaining, lport->lso_max);
183 } 180 }
184 } 181 }
185 return ft_queue_status(se_cmd); 182 return ft_queue_status(se_cmd);
186 } 183 }
187 184
188 /* 185 /*
189 * Receive write data frame. 186 * Receive write data frame.
190 */ 187 */
191 void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) 188 void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
192 { 189 {
193 struct se_cmd *se_cmd = &cmd->se_cmd; 190 struct se_cmd *se_cmd = &cmd->se_cmd;
194 struct fc_seq *seq = cmd->seq; 191 struct fc_seq *seq = cmd->seq;
195 struct fc_exch *ep; 192 struct fc_exch *ep;
196 struct fc_lport *lport; 193 struct fc_lport *lport;
197 struct fc_frame_header *fh; 194 struct fc_frame_header *fh;
198 struct scatterlist *sg = NULL; 195 struct scatterlist *sg = NULL;
199 u32 mem_off = 0; 196 u32 mem_off = 0;
200 u32 rel_off; 197 u32 rel_off;
201 size_t frame_len; 198 size_t frame_len;
202 size_t mem_len = 0; 199 size_t mem_len = 0;
203 size_t tlen; 200 size_t tlen;
204 struct page *page = NULL; 201 struct page *page = NULL;
205 void *page_addr; 202 void *page_addr;
206 void *from; 203 void *from;
207 void *to; 204 void *to;
208 u32 f_ctl; 205 u32 f_ctl;
209 void *buf; 206 void *buf;
210 207
211 fh = fc_frame_header_get(fp); 208 fh = fc_frame_header_get(fp);
212 if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF)) 209 if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
213 goto drop; 210 goto drop;
214 211
215 f_ctl = ntoh24(fh->fh_f_ctl); 212 f_ctl = ntoh24(fh->fh_f_ctl);
216 ep = fc_seq_exch(seq); 213 ep = fc_seq_exch(seq);
217 lport = ep->lp; 214 lport = ep->lp;
218 if (cmd->was_ddp_setup) { 215 if (cmd->was_ddp_setup) {
219 BUG_ON(!ep); 216 BUG_ON(!ep);
220 BUG_ON(!lport); 217 BUG_ON(!lport);
221 /* 218 /*
222 * Since DDP (Large Rx offload) was setup for this request, 219 * Since DDP (Large Rx offload) was setup for this request,
223 * payload is expected to be copied directly to user buffers. 220 * payload is expected to be copied directly to user buffers.
224 */ 221 */
225 buf = fc_frame_payload_get(fp, 1); 222 buf = fc_frame_payload_get(fp, 1);
226 if (buf) 223 if (buf)
227 pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, " 224 pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
228 "cmd->sg_cnt 0x%x. DDP was setup" 225 "cmd->sg_cnt 0x%x. DDP was setup"
229 " hence not expected to receive frame with " 226 " hence not expected to receive frame with "
230 "payload, Frame will be dropped if" 227 "payload, Frame will be dropped if"
231 "'Sequence Initiative' bit in f_ctl is" 228 "'Sequence Initiative' bit in f_ctl is"
232 "not set\n", __func__, ep->xid, f_ctl, 229 "not set\n", __func__, ep->xid, f_ctl,
233 cmd->sg, cmd->sg_cnt); 230 cmd->sg, cmd->sg_cnt);
234 /* 231 /*
235 * Invalidate HW DDP context if it was setup for respective 232 * Invalidate HW DDP context if it was setup for respective
236 * command. Invalidation of HW DDP context is requited in both 233 * command. Invalidation of HW DDP context is requited in both
237 * situation (success and error). 234 * situation (success and error).
238 */ 235 */
239 ft_invl_hw_context(cmd); 236 ft_invl_hw_context(cmd);
240 237
241 /* 238 /*
242 * If "Sequence Initiative (TSI)" bit set in f_ctl, means last 239 * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
243 * write data frame is received successfully where payload is 240 * write data frame is received successfully where payload is
244 * posted directly to user buffer and only the last frame's 241 * posted directly to user buffer and only the last frame's
245 * header is posted in receive queue. 242 * header is posted in receive queue.
246 * 243 *
247 * If "Sequence Initiative (TSI)" bit is not set, means error 244 * If "Sequence Initiative (TSI)" bit is not set, means error
248 * condition w.r.t. DDP, hence drop the packet and let explict 245 * condition w.r.t. DDP, hence drop the packet and let explict
249 * ABORTS from other end of exchange timer trigger the recovery. 246 * ABORTS from other end of exchange timer trigger the recovery.
250 */ 247 */
251 if (f_ctl & FC_FC_SEQ_INIT) 248 if (f_ctl & FC_FC_SEQ_INIT)
252 goto last_frame; 249 goto last_frame;
253 else 250 else
254 goto drop; 251 goto drop;
255 } 252 }
256 253
257 rel_off = ntohl(fh->fh_parm_offset); 254 rel_off = ntohl(fh->fh_parm_offset);
258 frame_len = fr_len(fp); 255 frame_len = fr_len(fp);
259 if (frame_len <= sizeof(*fh)) 256 if (frame_len <= sizeof(*fh))
260 goto drop; 257 goto drop;
261 frame_len -= sizeof(*fh); 258 frame_len -= sizeof(*fh);
262 from = fc_frame_payload_get(fp, 0); 259 from = fc_frame_payload_get(fp, 0);
263 if (rel_off >= se_cmd->data_length) 260 if (rel_off >= se_cmd->data_length)
264 goto drop; 261 goto drop;
265 if (frame_len + rel_off > se_cmd->data_length) 262 if (frame_len + rel_off > se_cmd->data_length)
266 frame_len = se_cmd->data_length - rel_off; 263 frame_len = se_cmd->data_length - rel_off;
267 264
268 /* 265 /*
269 * Setup to use first mem list entry, unless no data. 266 * Setup to use first mem list entry, unless no data.
270 */ 267 */
271 BUG_ON(frame_len && !se_cmd->t_data_sg); 268 BUG_ON(frame_len && !se_cmd->t_data_sg);
272 if (frame_len) { 269 if (frame_len) {
273 sg = se_cmd->t_data_sg; 270 sg = se_cmd->t_data_sg;
274 mem_len = sg->length; 271 mem_len = sg->length;
275 mem_off = sg->offset; 272 mem_off = sg->offset;
276 page = sg_page(sg); 273 page = sg_page(sg);
277 } 274 }
278 275
279 while (frame_len) { 276 while (frame_len) {
280 if (!mem_len) { 277 if (!mem_len) {
281 sg = sg_next(sg); 278 sg = sg_next(sg);
282 mem_len = sg->length; 279 mem_len = sg->length;
283 mem_off = sg->offset; 280 mem_off = sg->offset;
284 page = sg_page(sg); 281 page = sg_page(sg);
285 } 282 }
286 if (rel_off >= mem_len) { 283 if (rel_off >= mem_len) {
287 rel_off -= mem_len; 284 rel_off -= mem_len;
288 mem_len = 0; 285 mem_len = 0;
289 continue; 286 continue;
290 } 287 }
291 mem_off += rel_off; 288 mem_off += rel_off;
292 mem_len -= rel_off; 289 mem_len -= rel_off;
293 rel_off = 0; 290 rel_off = 0;
294 291
295 tlen = min(mem_len, frame_len); 292 tlen = min(mem_len, frame_len);
296 293
297 to = kmap_atomic(page + (mem_off >> PAGE_SHIFT), 294 to = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
298 KM_SOFTIRQ0); 295 KM_SOFTIRQ0);
299 page_addr = to; 296 page_addr = to;
300 to += mem_off & ~PAGE_MASK; 297 to += mem_off & ~PAGE_MASK;
301 tlen = min(tlen, (size_t)(PAGE_SIZE - 298 tlen = min(tlen, (size_t)(PAGE_SIZE -
302 (mem_off & ~PAGE_MASK))); 299 (mem_off & ~PAGE_MASK)));
303 memcpy(to, from, tlen); 300 memcpy(to, from, tlen);
304 kunmap_atomic(page_addr, KM_SOFTIRQ0); 301 kunmap_atomic(page_addr, KM_SOFTIRQ0);
305 302
306 from += tlen; 303 from += tlen;
307 frame_len -= tlen; 304 frame_len -= tlen;
308 mem_off += tlen; 305 mem_off += tlen;
309 mem_len -= tlen; 306 mem_len -= tlen;
310 cmd->write_data_len += tlen; 307 cmd->write_data_len += tlen;
311 } 308 }
312 last_frame: 309 last_frame:
313 if (cmd->write_data_len == se_cmd->data_length) 310 if (cmd->write_data_len == se_cmd->data_length)
314 transport_generic_handle_data(se_cmd); 311 transport_generic_handle_data(se_cmd);
315 drop: 312 drop:
316 fc_frame_free(fp); 313 fc_frame_free(fp);
317 } 314 }
318 315
319 /* 316 /*
320 * Handle and cleanup any HW specific resources if 317 * Handle and cleanup any HW specific resources if
321 * received ABORTS, errors, timeouts. 318 * received ABORTS, errors, timeouts.
322 */ 319 */
323 void ft_invl_hw_context(struct ft_cmd *cmd) 320 void ft_invl_hw_context(struct ft_cmd *cmd)
324 { 321 {
325 struct fc_seq *seq = cmd->seq; 322 struct fc_seq *seq = cmd->seq;
326 struct fc_exch *ep = NULL; 323 struct fc_exch *ep = NULL;
327 struct fc_lport *lport = NULL; 324 struct fc_lport *lport = NULL;
328 325
329 BUG_ON(!cmd); 326 BUG_ON(!cmd);
330 327
331 /* Cleanup the DDP context in HW if DDP was setup */ 328 /* Cleanup the DDP context in HW if DDP was setup */
332 if (cmd->was_ddp_setup && seq) { 329 if (cmd->was_ddp_setup && seq) {
333 ep = fc_seq_exch(seq); 330 ep = fc_seq_exch(seq);
334 if (ep) { 331 if (ep) {
335 lport = ep->lp; 332 lport = ep->lp;
336 if (lport && (ep->xid <= lport->lro_xid)) 333 if (lport && (ep->xid <= lport->lro_xid))
337 /* 334 /*
338 * "ddp_done" trigger invalidation of HW 335 * "ddp_done" trigger invalidation of HW
339 * specific DDP context 336 * specific DDP context
340 */ 337 */
341 cmd->write_data_len = lport->tt.ddp_done(lport, 338 cmd->write_data_len = lport->tt.ddp_done(lport,
342 ep->xid); 339 ep->xid);
343 340
344 /* 341 /*
345 * Resetting same variable to indicate HW's 342 * Resetting same variable to indicate HW's
346 * DDP context has been invalidated to avoid 343 * DDP context has been invalidated to avoid
347 * re_invalidation of same context (context is 344 * re_invalidation of same context (context is
348 * identified using ep->xid) 345 * identified using ep->xid)
349 */ 346 */
350 cmd->was_ddp_setup = 0; 347 cmd->was_ddp_setup = 0;
351 } 348 }
352 } 349 }
353 } 350 }
354 351
drivers/target/tcm_fc/tfc_sess.c
1 /* 1 /*
2 * Copyright (c) 2010 Cisco Systems, Inc. 2 * Copyright (c) 2010 Cisco Systems, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation. 6 * version 2, as published by the Free Software Foundation.
7 * 7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details. 11 * more details.
12 * 12 *
13 * You should have received a copy of the GNU General Public License along with 13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */ 16 */
17 17
18 /* XXX TBD some includes may be extraneous */ 18 /* XXX TBD some includes may be extraneous */
19 19
20 #include <linux/module.h> 20 #include <linux/module.h>
21 #include <linux/moduleparam.h> 21 #include <linux/moduleparam.h>
22 #include <generated/utsrelease.h> 22 #include <generated/utsrelease.h>
23 #include <linux/utsname.h> 23 #include <linux/utsname.h>
24 #include <linux/init.h> 24 #include <linux/init.h>
25 #include <linux/slab.h> 25 #include <linux/slab.h>
26 #include <linux/kthread.h> 26 #include <linux/kthread.h>
27 #include <linux/types.h> 27 #include <linux/types.h>
28 #include <linux/string.h> 28 #include <linux/string.h>
29 #include <linux/configfs.h> 29 #include <linux/configfs.h>
30 #include <linux/ctype.h> 30 #include <linux/ctype.h>
31 #include <linux/hash.h> 31 #include <linux/hash.h>
32 #include <linux/rcupdate.h> 32 #include <linux/rcupdate.h>
33 #include <linux/rculist.h> 33 #include <linux/rculist.h>
34 #include <linux/kref.h> 34 #include <linux/kref.h>
35 #include <asm/unaligned.h> 35 #include <asm/unaligned.h>
36 #include <scsi/scsi.h> 36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h> 37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_device.h> 38 #include <scsi/scsi_device.h>
39 #include <scsi/scsi_cmnd.h> 39 #include <scsi/scsi_cmnd.h>
40 #include <scsi/libfc.h> 40 #include <scsi/libfc.h>
41 41
42 #include <target/target_core_base.h> 42 #include <target/target_core_base.h>
43 #include <target/target_core_transport.h> 43 #include <target/target_core_fabric.h>
44 #include <target/target_core_fabric_ops.h>
45 #include <target/target_core_device.h>
46 #include <target/target_core_tpg.h>
47 #include <target/target_core_configfs.h> 44 #include <target/target_core_configfs.h>
48 #include <target/configfs_macros.h> 45 #include <target/configfs_macros.h>
49 46
50 #include "tcm_fc.h" 47 #include "tcm_fc.h"
51 48
52 static void ft_sess_delete_all(struct ft_tport *); 49 static void ft_sess_delete_all(struct ft_tport *);
53 50
54 /* 51 /*
55 * Lookup or allocate target local port. 52 * Lookup or allocate target local port.
56 * Caller holds ft_lport_lock. 53 * Caller holds ft_lport_lock.
57 */ 54 */
58 static struct ft_tport *ft_tport_create(struct fc_lport *lport) 55 static struct ft_tport *ft_tport_create(struct fc_lport *lport)
59 { 56 {
60 struct ft_tpg *tpg; 57 struct ft_tpg *tpg;
61 struct ft_tport *tport; 58 struct ft_tport *tport;
62 int i; 59 int i;
63 60
64 tport = rcu_dereference(lport->prov[FC_TYPE_FCP]); 61 tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
65 if (tport && tport->tpg) 62 if (tport && tport->tpg)
66 return tport; 63 return tport;
67 64
68 tpg = ft_lport_find_tpg(lport); 65 tpg = ft_lport_find_tpg(lport);
69 if (!tpg) 66 if (!tpg)
70 return NULL; 67 return NULL;
71 68
72 if (tport) { 69 if (tport) {
73 tport->tpg = tpg; 70 tport->tpg = tpg;
74 return tport; 71 return tport;
75 } 72 }
76 73
77 tport = kzalloc(sizeof(*tport), GFP_KERNEL); 74 tport = kzalloc(sizeof(*tport), GFP_KERNEL);
78 if (!tport) 75 if (!tport)
79 return NULL; 76 return NULL;
80 77
81 tport->lport = lport; 78 tport->lport = lport;
82 tport->tpg = tpg; 79 tport->tpg = tpg;
83 tpg->tport = tport; 80 tpg->tport = tport;
84 for (i = 0; i < FT_SESS_HASH_SIZE; i++) 81 for (i = 0; i < FT_SESS_HASH_SIZE; i++)
85 INIT_HLIST_HEAD(&tport->hash[i]); 82 INIT_HLIST_HEAD(&tport->hash[i]);
86 83
87 rcu_assign_pointer(lport->prov[FC_TYPE_FCP], tport); 84 rcu_assign_pointer(lport->prov[FC_TYPE_FCP], tport);
88 return tport; 85 return tport;
89 } 86 }
90 87
91 /* 88 /*
92 * Free tport via RCU. 89 * Free tport via RCU.
93 */ 90 */
94 static void ft_tport_rcu_free(struct rcu_head *rcu) 91 static void ft_tport_rcu_free(struct rcu_head *rcu)
95 { 92 {
96 struct ft_tport *tport = container_of(rcu, struct ft_tport, rcu); 93 struct ft_tport *tport = container_of(rcu, struct ft_tport, rcu);
97 94
98 kfree(tport); 95 kfree(tport);
99 } 96 }
100 97
101 /* 98 /*
102 * Delete a target local port. 99 * Delete a target local port.
103 * Caller holds ft_lport_lock. 100 * Caller holds ft_lport_lock.
104 */ 101 */
105 static void ft_tport_delete(struct ft_tport *tport) 102 static void ft_tport_delete(struct ft_tport *tport)
106 { 103 {
107 struct fc_lport *lport; 104 struct fc_lport *lport;
108 struct ft_tpg *tpg; 105 struct ft_tpg *tpg;
109 106
110 ft_sess_delete_all(tport); 107 ft_sess_delete_all(tport);
111 lport = tport->lport; 108 lport = tport->lport;
112 BUG_ON(tport != lport->prov[FC_TYPE_FCP]); 109 BUG_ON(tport != lport->prov[FC_TYPE_FCP]);
113 rcu_assign_pointer(lport->prov[FC_TYPE_FCP], NULL); 110 rcu_assign_pointer(lport->prov[FC_TYPE_FCP], NULL);
114 111
115 tpg = tport->tpg; 112 tpg = tport->tpg;
116 if (tpg) { 113 if (tpg) {
117 tpg->tport = NULL; 114 tpg->tport = NULL;
118 tport->tpg = NULL; 115 tport->tpg = NULL;
119 } 116 }
120 call_rcu(&tport->rcu, ft_tport_rcu_free); 117 call_rcu(&tport->rcu, ft_tport_rcu_free);
121 } 118 }
122 119
123 /* 120 /*
124 * Add local port. 121 * Add local port.
125 * Called thru fc_lport_iterate(). 122 * Called thru fc_lport_iterate().
126 */ 123 */
127 void ft_lport_add(struct fc_lport *lport, void *arg) 124 void ft_lport_add(struct fc_lport *lport, void *arg)
128 { 125 {
129 mutex_lock(&ft_lport_lock); 126 mutex_lock(&ft_lport_lock);
130 ft_tport_create(lport); 127 ft_tport_create(lport);
131 mutex_unlock(&ft_lport_lock); 128 mutex_unlock(&ft_lport_lock);
132 } 129 }
133 130
134 /* 131 /*
135 * Delete local port. 132 * Delete local port.
136 * Called thru fc_lport_iterate(). 133 * Called thru fc_lport_iterate().
137 */ 134 */
138 void ft_lport_del(struct fc_lport *lport, void *arg) 135 void ft_lport_del(struct fc_lport *lport, void *arg)
139 { 136 {
140 struct ft_tport *tport; 137 struct ft_tport *tport;
141 138
142 mutex_lock(&ft_lport_lock); 139 mutex_lock(&ft_lport_lock);
143 tport = lport->prov[FC_TYPE_FCP]; 140 tport = lport->prov[FC_TYPE_FCP];
144 if (tport) 141 if (tport)
145 ft_tport_delete(tport); 142 ft_tport_delete(tport);
146 mutex_unlock(&ft_lport_lock); 143 mutex_unlock(&ft_lport_lock);
147 } 144 }
148 145
149 /* 146 /*
150 * Notification of local port change from libfc. 147 * Notification of local port change from libfc.
151 * Create or delete local port and associated tport. 148 * Create or delete local port and associated tport.
152 */ 149 */
153 int ft_lport_notify(struct notifier_block *nb, unsigned long event, void *arg) 150 int ft_lport_notify(struct notifier_block *nb, unsigned long event, void *arg)
154 { 151 {
155 struct fc_lport *lport = arg; 152 struct fc_lport *lport = arg;
156 153
157 switch (event) { 154 switch (event) {
158 case FC_LPORT_EV_ADD: 155 case FC_LPORT_EV_ADD:
159 ft_lport_add(lport, NULL); 156 ft_lport_add(lport, NULL);
160 break; 157 break;
161 case FC_LPORT_EV_DEL: 158 case FC_LPORT_EV_DEL:
162 ft_lport_del(lport, NULL); 159 ft_lport_del(lport, NULL);
163 break; 160 break;
164 } 161 }
165 return NOTIFY_DONE; 162 return NOTIFY_DONE;
166 } 163 }
167 164
168 /* 165 /*
169 * Hash function for FC_IDs. 166 * Hash function for FC_IDs.
170 */ 167 */
171 static u32 ft_sess_hash(u32 port_id) 168 static u32 ft_sess_hash(u32 port_id)
172 { 169 {
173 return hash_32(port_id, FT_SESS_HASH_BITS); 170 return hash_32(port_id, FT_SESS_HASH_BITS);
174 } 171 }
175 172
176 /* 173 /*
177 * Find session in local port. 174 * Find session in local port.
178 * Sessions and hash lists are RCU-protected. 175 * Sessions and hash lists are RCU-protected.
179 * A reference is taken which must be eventually freed. 176 * A reference is taken which must be eventually freed.
180 */ 177 */
181 static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id) 178 static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
182 { 179 {
183 struct ft_tport *tport; 180 struct ft_tport *tport;
184 struct hlist_head *head; 181 struct hlist_head *head;
185 struct hlist_node *pos; 182 struct hlist_node *pos;
186 struct ft_sess *sess; 183 struct ft_sess *sess;
187 184
188 rcu_read_lock(); 185 rcu_read_lock();
189 tport = rcu_dereference(lport->prov[FC_TYPE_FCP]); 186 tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
190 if (!tport) 187 if (!tport)
191 goto out; 188 goto out;
192 189
193 head = &tport->hash[ft_sess_hash(port_id)]; 190 head = &tport->hash[ft_sess_hash(port_id)];
194 hlist_for_each_entry_rcu(sess, pos, head, hash) { 191 hlist_for_each_entry_rcu(sess, pos, head, hash) {
195 if (sess->port_id == port_id) { 192 if (sess->port_id == port_id) {
196 kref_get(&sess->kref); 193 kref_get(&sess->kref);
197 rcu_read_unlock(); 194 rcu_read_unlock();
198 pr_debug("port_id %x found %p\n", port_id, sess); 195 pr_debug("port_id %x found %p\n", port_id, sess);
199 return sess; 196 return sess;
200 } 197 }
201 } 198 }
202 out: 199 out:
203 rcu_read_unlock(); 200 rcu_read_unlock();
204 pr_debug("port_id %x not found\n", port_id); 201 pr_debug("port_id %x not found\n", port_id);
205 return NULL; 202 return NULL;
206 } 203 }
207 204
208 /* 205 /*
209 * Allocate session and enter it in the hash for the local port. 206 * Allocate session and enter it in the hash for the local port.
210 * Caller holds ft_lport_lock. 207 * Caller holds ft_lport_lock.
211 */ 208 */
212 static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id, 209 static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
213 struct ft_node_acl *acl) 210 struct ft_node_acl *acl)
214 { 211 {
215 struct ft_sess *sess; 212 struct ft_sess *sess;
216 struct hlist_head *head; 213 struct hlist_head *head;
217 struct hlist_node *pos; 214 struct hlist_node *pos;
218 215
219 head = &tport->hash[ft_sess_hash(port_id)]; 216 head = &tport->hash[ft_sess_hash(port_id)];
220 hlist_for_each_entry_rcu(sess, pos, head, hash) 217 hlist_for_each_entry_rcu(sess, pos, head, hash)
221 if (sess->port_id == port_id) 218 if (sess->port_id == port_id)
222 return sess; 219 return sess;
223 220
224 sess = kzalloc(sizeof(*sess), GFP_KERNEL); 221 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
225 if (!sess) 222 if (!sess)
226 return NULL; 223 return NULL;
227 224
228 sess->se_sess = transport_init_session(); 225 sess->se_sess = transport_init_session();
229 if (IS_ERR(sess->se_sess)) { 226 if (IS_ERR(sess->se_sess)) {
230 kfree(sess); 227 kfree(sess);
231 return NULL; 228 return NULL;
232 } 229 }
233 sess->se_sess->se_node_acl = &acl->se_node_acl; 230 sess->se_sess->se_node_acl = &acl->se_node_acl;
234 sess->tport = tport; 231 sess->tport = tport;
235 sess->port_id = port_id; 232 sess->port_id = port_id;
236 kref_init(&sess->kref); /* ref for table entry */ 233 kref_init(&sess->kref); /* ref for table entry */
237 hlist_add_head_rcu(&sess->hash, head); 234 hlist_add_head_rcu(&sess->hash, head);
238 tport->sess_count++; 235 tport->sess_count++;
239 236
240 pr_debug("port_id %x sess %p\n", port_id, sess); 237 pr_debug("port_id %x sess %p\n", port_id, sess);
241 238
242 transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl, 239 transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl,
243 sess->se_sess, sess); 240 sess->se_sess, sess);
244 return sess; 241 return sess;
245 } 242 }
246 243
247 /* 244 /*
248 * Unhash the session. 245 * Unhash the session.
249 * Caller holds ft_lport_lock. 246 * Caller holds ft_lport_lock.
250 */ 247 */
251 static void ft_sess_unhash(struct ft_sess *sess) 248 static void ft_sess_unhash(struct ft_sess *sess)
252 { 249 {
253 struct ft_tport *tport = sess->tport; 250 struct ft_tport *tport = sess->tport;
254 251
255 hlist_del_rcu(&sess->hash); 252 hlist_del_rcu(&sess->hash);
256 BUG_ON(!tport->sess_count); 253 BUG_ON(!tport->sess_count);
257 tport->sess_count--; 254 tport->sess_count--;
258 sess->port_id = -1; 255 sess->port_id = -1;
259 sess->params = 0; 256 sess->params = 0;
260 } 257 }
261 258
262 /* 259 /*
263 * Delete session from hash. 260 * Delete session from hash.
264 * Caller holds ft_lport_lock. 261 * Caller holds ft_lport_lock.
265 */ 262 */
266 static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id) 263 static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
267 { 264 {
268 struct hlist_head *head; 265 struct hlist_head *head;
269 struct hlist_node *pos; 266 struct hlist_node *pos;
270 struct ft_sess *sess; 267 struct ft_sess *sess;
271 268
272 head = &tport->hash[ft_sess_hash(port_id)]; 269 head = &tport->hash[ft_sess_hash(port_id)];
273 hlist_for_each_entry_rcu(sess, pos, head, hash) { 270 hlist_for_each_entry_rcu(sess, pos, head, hash) {
274 if (sess->port_id == port_id) { 271 if (sess->port_id == port_id) {
275 ft_sess_unhash(sess); 272 ft_sess_unhash(sess);
276 return sess; 273 return sess;
277 } 274 }
278 } 275 }
279 return NULL; 276 return NULL;
280 } 277 }
281 278
282 /* 279 /*
283 * Delete all sessions from tport. 280 * Delete all sessions from tport.
284 * Caller holds ft_lport_lock. 281 * Caller holds ft_lport_lock.
285 */ 282 */
286 static void ft_sess_delete_all(struct ft_tport *tport) 283 static void ft_sess_delete_all(struct ft_tport *tport)
287 { 284 {
288 struct hlist_head *head; 285 struct hlist_head *head;
289 struct hlist_node *pos; 286 struct hlist_node *pos;
290 struct ft_sess *sess; 287 struct ft_sess *sess;
291 288
292 for (head = tport->hash; 289 for (head = tport->hash;
293 head < &tport->hash[FT_SESS_HASH_SIZE]; head++) { 290 head < &tport->hash[FT_SESS_HASH_SIZE]; head++) {
294 hlist_for_each_entry_rcu(sess, pos, head, hash) { 291 hlist_for_each_entry_rcu(sess, pos, head, hash) {
295 ft_sess_unhash(sess); 292 ft_sess_unhash(sess);
296 transport_deregister_session_configfs(sess->se_sess); 293 transport_deregister_session_configfs(sess->se_sess);
297 ft_sess_put(sess); /* release from table */ 294 ft_sess_put(sess); /* release from table */
298 } 295 }
299 } 296 }
300 } 297 }
301 298
302 /* 299 /*
303 * TCM ops for sessions. 300 * TCM ops for sessions.
304 */ 301 */
305 302
306 /* 303 /*
307 * Determine whether session is allowed to be shutdown in the current context. 304 * Determine whether session is allowed to be shutdown in the current context.
308 * Returns non-zero if the session should be shutdown. 305 * Returns non-zero if the session should be shutdown.
309 */ 306 */
310 int ft_sess_shutdown(struct se_session *se_sess) 307 int ft_sess_shutdown(struct se_session *se_sess)
311 { 308 {
312 struct ft_sess *sess = se_sess->fabric_sess_ptr; 309 struct ft_sess *sess = se_sess->fabric_sess_ptr;
313 310
314 pr_debug("port_id %x\n", sess->port_id); 311 pr_debug("port_id %x\n", sess->port_id);
315 return 1; 312 return 1;
316 } 313 }
317 314
318 /* 315 /*
319 * Remove session and send PRLO. 316 * Remove session and send PRLO.
320 * This is called when the ACL is being deleted or queue depth is changing. 317 * This is called when the ACL is being deleted or queue depth is changing.
321 */ 318 */
322 void ft_sess_close(struct se_session *se_sess) 319 void ft_sess_close(struct se_session *se_sess)
323 { 320 {
324 struct ft_sess *sess = se_sess->fabric_sess_ptr; 321 struct ft_sess *sess = se_sess->fabric_sess_ptr;
325 struct fc_lport *lport; 322 struct fc_lport *lport;
326 u32 port_id; 323 u32 port_id;
327 324
328 mutex_lock(&ft_lport_lock); 325 mutex_lock(&ft_lport_lock);
329 lport = sess->tport->lport; 326 lport = sess->tport->lport;
330 port_id = sess->port_id; 327 port_id = sess->port_id;
331 if (port_id == -1) { 328 if (port_id == -1) {
332 mutex_unlock(&ft_lport_lock); 329 mutex_unlock(&ft_lport_lock);
333 return; 330 return;
334 } 331 }
335 pr_debug("port_id %x\n", port_id); 332 pr_debug("port_id %x\n", port_id);
336 ft_sess_unhash(sess); 333 ft_sess_unhash(sess);
337 mutex_unlock(&ft_lport_lock); 334 mutex_unlock(&ft_lport_lock);
338 transport_deregister_session_configfs(se_sess); 335 transport_deregister_session_configfs(se_sess);
339 ft_sess_put(sess); 336 ft_sess_put(sess);
340 /* XXX Send LOGO or PRLO */ 337 /* XXX Send LOGO or PRLO */
341 synchronize_rcu(); /* let transport deregister happen */ 338 synchronize_rcu(); /* let transport deregister happen */
342 } 339 }
343 340
344 void ft_sess_stop(struct se_session *se_sess, int sess_sleep, int conn_sleep) 341 void ft_sess_stop(struct se_session *se_sess, int sess_sleep, int conn_sleep)
345 { 342 {
346 struct ft_sess *sess = se_sess->fabric_sess_ptr; 343 struct ft_sess *sess = se_sess->fabric_sess_ptr;
347 344
348 pr_debug("port_id %x\n", sess->port_id); 345 pr_debug("port_id %x\n", sess->port_id);
349 } 346 }
350 347
351 int ft_sess_logged_in(struct se_session *se_sess) 348 int ft_sess_logged_in(struct se_session *se_sess)
352 { 349 {
353 struct ft_sess *sess = se_sess->fabric_sess_ptr; 350 struct ft_sess *sess = se_sess->fabric_sess_ptr;
354 351
355 return sess->port_id != -1; 352 return sess->port_id != -1;
356 } 353 }
357 354
358 u32 ft_sess_get_index(struct se_session *se_sess) 355 u32 ft_sess_get_index(struct se_session *se_sess)
359 { 356 {
360 struct ft_sess *sess = se_sess->fabric_sess_ptr; 357 struct ft_sess *sess = se_sess->fabric_sess_ptr;
361 358
362 return sess->port_id; /* XXX TBD probably not what is needed */ 359 return sess->port_id; /* XXX TBD probably not what is needed */
363 } 360 }
364 361
365 u32 ft_sess_get_port_name(struct se_session *se_sess, 362 u32 ft_sess_get_port_name(struct se_session *se_sess,
366 unsigned char *buf, u32 len) 363 unsigned char *buf, u32 len)
367 { 364 {
368 struct ft_sess *sess = se_sess->fabric_sess_ptr; 365 struct ft_sess *sess = se_sess->fabric_sess_ptr;
369 366
370 return ft_format_wwn(buf, len, sess->port_name); 367 return ft_format_wwn(buf, len, sess->port_name);
371 } 368 }
372 369
373 void ft_sess_set_erl0(struct se_session *se_sess) 370 void ft_sess_set_erl0(struct se_session *se_sess)
374 { 371 {
375 /* XXX TBD called when out of memory */ 372 /* XXX TBD called when out of memory */
376 } 373 }
377 374
378 /* 375 /*
379 * libfc ops involving sessions. 376 * libfc ops involving sessions.
380 */ 377 */
381 378
382 static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len, 379 static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
383 const struct fc_els_spp *rspp, struct fc_els_spp *spp) 380 const struct fc_els_spp *rspp, struct fc_els_spp *spp)
384 { 381 {
385 struct ft_tport *tport; 382 struct ft_tport *tport;
386 struct ft_sess *sess; 383 struct ft_sess *sess;
387 struct ft_node_acl *acl; 384 struct ft_node_acl *acl;
388 u32 fcp_parm; 385 u32 fcp_parm;
389 386
390 tport = ft_tport_create(rdata->local_port); 387 tport = ft_tport_create(rdata->local_port);
391 if (!tport) 388 if (!tport)
392 return 0; /* not a target for this local port */ 389 return 0; /* not a target for this local port */
393 390
394 acl = ft_acl_get(tport->tpg, rdata); 391 acl = ft_acl_get(tport->tpg, rdata);
395 if (!acl) 392 if (!acl)
396 return 0; 393 return 0;
397 394
398 if (!rspp) 395 if (!rspp)
399 goto fill; 396 goto fill;
400 397
401 if (rspp->spp_flags & (FC_SPP_OPA_VAL | FC_SPP_RPA_VAL)) 398 if (rspp->spp_flags & (FC_SPP_OPA_VAL | FC_SPP_RPA_VAL))
402 return FC_SPP_RESP_NO_PA; 399 return FC_SPP_RESP_NO_PA;
403 400
404 /* 401 /*
405 * If both target and initiator bits are off, the SPP is invalid. 402 * If both target and initiator bits are off, the SPP is invalid.
406 */ 403 */
407 fcp_parm = ntohl(rspp->spp_params); 404 fcp_parm = ntohl(rspp->spp_params);
408 if (!(fcp_parm & (FCP_SPPF_INIT_FCN | FCP_SPPF_TARG_FCN))) 405 if (!(fcp_parm & (FCP_SPPF_INIT_FCN | FCP_SPPF_TARG_FCN)))
409 return FC_SPP_RESP_INVL; 406 return FC_SPP_RESP_INVL;
410 407
411 /* 408 /*
412 * Create session (image pair) only if requested by 409 * Create session (image pair) only if requested by
413 * EST_IMG_PAIR flag and if the requestor is an initiator. 410 * EST_IMG_PAIR flag and if the requestor is an initiator.
414 */ 411 */
415 if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR) { 412 if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR) {
416 spp->spp_flags |= FC_SPP_EST_IMG_PAIR; 413 spp->spp_flags |= FC_SPP_EST_IMG_PAIR;
417 if (!(fcp_parm & FCP_SPPF_INIT_FCN)) 414 if (!(fcp_parm & FCP_SPPF_INIT_FCN))
418 return FC_SPP_RESP_CONF; 415 return FC_SPP_RESP_CONF;
419 sess = ft_sess_create(tport, rdata->ids.port_id, acl); 416 sess = ft_sess_create(tport, rdata->ids.port_id, acl);
420 if (!sess) 417 if (!sess)
421 return FC_SPP_RESP_RES; 418 return FC_SPP_RESP_RES;
422 if (!sess->params) 419 if (!sess->params)
423 rdata->prli_count++; 420 rdata->prli_count++;
424 sess->params = fcp_parm; 421 sess->params = fcp_parm;
425 sess->port_name = rdata->ids.port_name; 422 sess->port_name = rdata->ids.port_name;
426 sess->max_frame = rdata->maxframe_size; 423 sess->max_frame = rdata->maxframe_size;
427 424
428 /* XXX TBD - clearing actions. unit attn, see 4.10 */ 425 /* XXX TBD - clearing actions. unit attn, see 4.10 */
429 } 426 }
430 427
431 /* 428 /*
432 * OR in our service parameters with other provider (initiator), if any. 429 * OR in our service parameters with other provider (initiator), if any.
433 * TBD XXX - indicate RETRY capability? 430 * TBD XXX - indicate RETRY capability?
434 */ 431 */
435 fill: 432 fill:
436 fcp_parm = ntohl(spp->spp_params); 433 fcp_parm = ntohl(spp->spp_params);
437 spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN); 434 spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN);
438 return FC_SPP_RESP_ACK; 435 return FC_SPP_RESP_ACK;
439 } 436 }
440 437
441 /** 438 /**
442 * tcm_fcp_prli() - Handle incoming or outgoing PRLI for the FCP target 439 * tcm_fcp_prli() - Handle incoming or outgoing PRLI for the FCP target
443 * @rdata: remote port private 440 * @rdata: remote port private
444 * @spp_len: service parameter page length 441 * @spp_len: service parameter page length
445 * @rspp: received service parameter page (NULL for outgoing PRLI) 442 * @rspp: received service parameter page (NULL for outgoing PRLI)
446 * @spp: response service parameter page 443 * @spp: response service parameter page
447 * 444 *
448 * Returns spp response code. 445 * Returns spp response code.
449 */ 446 */
450 static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len, 447 static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
451 const struct fc_els_spp *rspp, struct fc_els_spp *spp) 448 const struct fc_els_spp *rspp, struct fc_els_spp *spp)
452 { 449 {
453 int ret; 450 int ret;
454 451
455 mutex_lock(&ft_lport_lock); 452 mutex_lock(&ft_lport_lock);
456 ret = ft_prli_locked(rdata, spp_len, rspp, spp); 453 ret = ft_prli_locked(rdata, spp_len, rspp, spp);
457 mutex_unlock(&ft_lport_lock); 454 mutex_unlock(&ft_lport_lock);
458 pr_debug("port_id %x flags %x ret %x\n", 455 pr_debug("port_id %x flags %x ret %x\n",
459 rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret); 456 rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
460 return ret; 457 return ret;
461 } 458 }
462 459
463 static void ft_sess_rcu_free(struct rcu_head *rcu) 460 static void ft_sess_rcu_free(struct rcu_head *rcu)
464 { 461 {
465 struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu); 462 struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu);
466 463
467 transport_deregister_session(sess->se_sess); 464 transport_deregister_session(sess->se_sess);
468 kfree(sess); 465 kfree(sess);
469 } 466 }
470 467
471 static void ft_sess_free(struct kref *kref) 468 static void ft_sess_free(struct kref *kref)
472 { 469 {
473 struct ft_sess *sess = container_of(kref, struct ft_sess, kref); 470 struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
474 471
475 call_rcu(&sess->rcu, ft_sess_rcu_free); 472 call_rcu(&sess->rcu, ft_sess_rcu_free);
476 } 473 }
477 474
478 void ft_sess_put(struct ft_sess *sess) 475 void ft_sess_put(struct ft_sess *sess)
479 { 476 {
480 int sess_held = atomic_read(&sess->kref.refcount); 477 int sess_held = atomic_read(&sess->kref.refcount);
481 478
482 BUG_ON(!sess_held); 479 BUG_ON(!sess_held);
483 kref_put(&sess->kref, ft_sess_free); 480 kref_put(&sess->kref, ft_sess_free);
484 } 481 }
485 482
486 static void ft_prlo(struct fc_rport_priv *rdata) 483 static void ft_prlo(struct fc_rport_priv *rdata)
487 { 484 {
488 struct ft_sess *sess; 485 struct ft_sess *sess;
489 struct ft_tport *tport; 486 struct ft_tport *tport;
490 487
491 mutex_lock(&ft_lport_lock); 488 mutex_lock(&ft_lport_lock);
492 tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]); 489 tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]);
493 if (!tport) { 490 if (!tport) {
494 mutex_unlock(&ft_lport_lock); 491 mutex_unlock(&ft_lport_lock);
495 return; 492 return;
496 } 493 }
497 sess = ft_sess_delete(tport, rdata->ids.port_id); 494 sess = ft_sess_delete(tport, rdata->ids.port_id);
498 if (!sess) { 495 if (!sess) {
499 mutex_unlock(&ft_lport_lock); 496 mutex_unlock(&ft_lport_lock);
500 return; 497 return;
501 } 498 }
502 mutex_unlock(&ft_lport_lock); 499 mutex_unlock(&ft_lport_lock);
503 transport_deregister_session_configfs(sess->se_sess); 500 transport_deregister_session_configfs(sess->se_sess);
504 ft_sess_put(sess); /* release from table */ 501 ft_sess_put(sess); /* release from table */
505 rdata->prli_count--; 502 rdata->prli_count--;
506 /* XXX TBD - clearing actions. unit attn, see 4.10 */ 503 /* XXX TBD - clearing actions. unit attn, see 4.10 */
507 } 504 }
508 505
509 /* 506 /*
510 * Handle incoming FCP request. 507 * Handle incoming FCP request.
511 * Caller has verified that the frame is type FCP. 508 * Caller has verified that the frame is type FCP.
512 */ 509 */
513 static void ft_recv(struct fc_lport *lport, struct fc_frame *fp) 510 static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
514 { 511 {
515 struct ft_sess *sess; 512 struct ft_sess *sess;
516 u32 sid = fc_frame_sid(fp); 513 u32 sid = fc_frame_sid(fp);
517 514
518 pr_debug("sid %x\n", sid); 515 pr_debug("sid %x\n", sid);
519 516
520 sess = ft_sess_get(lport, sid); 517 sess = ft_sess_get(lport, sid);
521 if (!sess) { 518 if (!sess) {
522 pr_debug("sid %x sess lookup failed\n", sid); 519 pr_debug("sid %x sess lookup failed\n", sid);
523 /* TBD XXX - if FCP_CMND, send PRLO */ 520 /* TBD XXX - if FCP_CMND, send PRLO */
524 fc_frame_free(fp); 521 fc_frame_free(fp);
525 return; 522 return;
526 } 523 }
527 ft_recv_req(sess, fp); /* must do ft_sess_put() */ 524 ft_recv_req(sess, fp); /* must do ft_sess_put() */
528 } 525 }
529 526
530 /* 527 /*
531 * Provider ops for libfc. 528 * Provider ops for libfc.
532 */ 529 */
533 struct fc4_prov ft_prov = { 530 struct fc4_prov ft_prov = {
534 .prli = ft_prli, 531 .prli = ft_prli,
535 .prlo = ft_prlo, 532 .prlo = ft_prlo,
536 .recv = ft_recv, 533 .recv = ft_recv,
537 .module = THIS_MODULE, 534 .module = THIS_MODULE,
538 }; 535 };
539 536
include/target/target_core_backend.h
File was created 1 #ifndef TARGET_CORE_BACKEND_H
2 #define TARGET_CORE_BACKEND_H
3
4 #define TRANSPORT_PLUGIN_PHBA_PDEV 1
5 #define TRANSPORT_PLUGIN_VHBA_PDEV 2
6 #define TRANSPORT_PLUGIN_VHBA_VDEV 3
7
8 struct se_subsystem_api {
9 struct list_head sub_api_list;
10
11 char name[16];
12 struct module *owner;
13
14 u8 transport_type;
15
16 unsigned int fua_write_emulated : 1;
17 unsigned int write_cache_emulated : 1;
18
19 int (*attach_hba)(struct se_hba *, u32);
20 void (*detach_hba)(struct se_hba *);
21 int (*pmode_enable_hba)(struct se_hba *, unsigned long);
22 void *(*allocate_virtdevice)(struct se_hba *, const char *);
23 struct se_device *(*create_virtdevice)(struct se_hba *,
24 struct se_subsystem_dev *, void *);
25 void (*free_device)(void *);
26 int (*transport_complete)(struct se_task *task);
27 struct se_task *(*alloc_task)(unsigned char *cdb);
28 int (*do_task)(struct se_task *);
29 int (*do_discard)(struct se_device *, sector_t, u32);
30 void (*do_sync_cache)(struct se_task *);
31 void (*free_task)(struct se_task *);
32 ssize_t (*check_configfs_dev_params)(struct se_hba *,
33 struct se_subsystem_dev *);
34 ssize_t (*set_configfs_dev_params)(struct se_hba *,
35 struct se_subsystem_dev *, const char *, ssize_t);
36 ssize_t (*show_configfs_dev_params)(struct se_hba *,
37 struct se_subsystem_dev *, char *);
38 u32 (*get_device_rev)(struct se_device *);
39 u32 (*get_device_type)(struct se_device *);
40 sector_t (*get_blocks)(struct se_device *);
41 unsigned char *(*get_sense_buffer)(struct se_task *);
42 };
43
44 int transport_subsystem_register(struct se_subsystem_api *);
45 void transport_subsystem_release(struct se_subsystem_api *);
46
47 struct se_device *transport_add_device_to_core_hba(struct se_hba *,
48 struct se_subsystem_api *, struct se_subsystem_dev *, u32,
49 void *, struct se_dev_limits *, const char *, const char *);
50
51 void transport_complete_sync_cache(struct se_cmd *, int);
52 void transport_complete_task(struct se_task *, int);
53
54 void target_get_task_cdb(struct se_task *, unsigned char *);
55
56 void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
57 int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
58 int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);
59 int transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
60
61 /* core helpers also used by command snooping in pscsi */
62 void *transport_kmap_first_data_page(struct se_cmd *);
63 void transport_kunmap_first_data_page(struct se_cmd *);
64
65 #endif /* TARGET_CORE_BACKEND_H */
66
include/target/target_core_base.h
1 #ifndef TARGET_CORE_BASE_H 1 #ifndef TARGET_CORE_BASE_H
2 #define TARGET_CORE_BASE_H 2 #define TARGET_CORE_BASE_H
3 3
4 #include <linux/in.h> 4 #include <linux/in.h>
5 #include <linux/configfs.h> 5 #include <linux/configfs.h>
6 #include <linux/dma-mapping.h> 6 #include <linux/dma-mapping.h>
7 #include <linux/blkdev.h> 7 #include <linux/blkdev.h>
8 #include <scsi/scsi_cmnd.h> 8 #include <scsi/scsi_cmnd.h>
9 #include <net/sock.h> 9 #include <net/sock.h>
10 #include <net/tcp.h> 10 #include <net/tcp.h>
11 11
12 #define TARGET_CORE_MOD_VERSION "v4.1.0-rc1-ml" 12 #define TARGET_CORE_MOD_VERSION "v4.1.0-rc1-ml"
13 #define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION
13 14
14 /* Maximum Number of LUNs per Target Portal Group */ 15 /* Maximum Number of LUNs per Target Portal Group */
15 /* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */ 16 /* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */
16 #define TRANSPORT_MAX_LUNS_PER_TPG 256 17 #define TRANSPORT_MAX_LUNS_PER_TPG 256
17 /* 18 /*
18 * By default we use 32-byte CDBs in TCM Core and subsystem plugin code. 19 * By default we use 32-byte CDBs in TCM Core and subsystem plugin code.
19 * 20 *
20 * Note that both include/scsi/scsi_cmnd.h:MAX_COMMAND_SIZE and 21 * Note that both include/scsi/scsi_cmnd.h:MAX_COMMAND_SIZE and
21 * include/linux/blkdev.h:BLOCK_MAX_CDB as of v2.6.36-rc4 still use 22 * include/linux/blkdev.h:BLOCK_MAX_CDB as of v2.6.36-rc4 still use
22 * 16-byte CDBs by default and require an extra allocation for 23 * 16-byte CDBs by default and require an extra allocation for
23 * 32-byte CDBs to because of legacy issues. 24 * 32-byte CDBs to because of legacy issues.
24 * 25 *
25 * Within TCM Core there are no such legacy limitiations, so we go ahead 26 * Within TCM Core there are no such legacy limitiations, so we go ahead
26 * use 32-byte CDBs by default and use include/scsi/scsi.h:scsi_command_size() 27 * use 32-byte CDBs by default and use include/scsi/scsi.h:scsi_command_size()
27 * within all TCM Core and subsystem plugin code. 28 * within all TCM Core and subsystem plugin code.
28 */ 29 */
29 #define TCM_MAX_COMMAND_SIZE 32 30 #define TCM_MAX_COMMAND_SIZE 32
30 /* 31 /*
31 * From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently 32 * From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently
32 * defined 96, but the real limit is 252 (or 260 including the header) 33 * defined 96, but the real limit is 252 (or 260 including the header)
33 */ 34 */
34 #define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE 35 #define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE
35 /* Used by transport_send_check_condition_and_sense() */ 36 /* Used by transport_send_check_condition_and_sense() */
36 #define SPC_SENSE_KEY_OFFSET 2 37 #define SPC_SENSE_KEY_OFFSET 2
37 #define SPC_ASC_KEY_OFFSET 12 38 #define SPC_ASC_KEY_OFFSET 12
38 #define SPC_ASCQ_KEY_OFFSET 13 39 #define SPC_ASCQ_KEY_OFFSET 13
39 #define TRANSPORT_IQN_LEN 224 40 #define TRANSPORT_IQN_LEN 224
40 /* Used by target_core_store_alua_lu_gp() and target_core_alua_lu_gp_show_attr_members() */ 41 /* Used by target_core_store_alua_lu_gp() and target_core_alua_lu_gp_show_attr_members() */
41 #define LU_GROUP_NAME_BUF 256 42 #define LU_GROUP_NAME_BUF 256
42 /* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */ 43 /* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */
43 #define TG_PT_GROUP_NAME_BUF 256 44 #define TG_PT_GROUP_NAME_BUF 256
44 /* Used to parse VPD into struct t10_vpd */ 45 /* Used to parse VPD into struct t10_vpd */
45 #define VPD_TMP_BUF_SIZE 128 46 #define VPD_TMP_BUF_SIZE 128
46 /* Used by transport_generic_cmd_sequencer() */ 47 /* Used by transport_generic_cmd_sequencer() */
47 #define READ_BLOCK_LEN 6 48 #define READ_BLOCK_LEN 6
48 #define READ_CAP_LEN 8 49 #define READ_CAP_LEN 8
49 #define READ_POSITION_LEN 20 50 #define READ_POSITION_LEN 20
50 #define INQUIRY_LEN 36 51 #define INQUIRY_LEN 36
51 /* Used by transport_get_inquiry_vpd_serial() */ 52 /* Used by transport_get_inquiry_vpd_serial() */
52 #define INQUIRY_VPD_SERIAL_LEN 254 53 #define INQUIRY_VPD_SERIAL_LEN 254
53 /* Used by transport_get_inquiry_vpd_device_ident() */ 54 /* Used by transport_get_inquiry_vpd_device_ident() */
54 #define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN 254 55 #define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN 254
55 56
57 /* Attempts before moving from SHORT to LONG */
58 #define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD 3
59 #define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3 /* In milliseconds */
60 #define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG 10 /* In milliseconds */
61
62 #define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */
63
64 /*
65 * struct se_subsystem_dev->su_dev_flags
66 */
67 #define SDF_FIRMWARE_VPD_UNIT_SERIAL 0x00000001
68 #define SDF_EMULATED_VPD_UNIT_SERIAL 0x00000002
69 #define SDF_USING_UDEV_PATH 0x00000004
70 #define SDF_USING_ALIAS 0x00000008
71
72 /*
73 * struct se_device->dev_flags
74 */
75 #define DF_READ_ONLY 0x00000001
76 #define DF_SPC2_RESERVATIONS 0x00000002
77 #define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004
78
79 /* struct se_dev_attrib sanity values */
80 /* Default max_unmap_lba_count */
81 #define DA_MAX_UNMAP_LBA_COUNT 0
82 /* Default max_unmap_block_desc_count */
83 #define DA_MAX_UNMAP_BLOCK_DESC_COUNT 0
84 /* Default unmap_granularity */
85 #define DA_UNMAP_GRANULARITY_DEFAULT 0
86 /* Default unmap_granularity_alignment */
87 #define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
88 /* Emulation for Direct Page Out */
89 #define DA_EMULATE_DPO 0
90 /* Emulation for Forced Unit Access WRITEs */
91 #define DA_EMULATE_FUA_WRITE 1
92 /* Emulation for Forced Unit Access READs */
93 #define DA_EMULATE_FUA_READ 0
94 /* Emulation for WriteCache and SYNCHRONIZE_CACHE */
95 #define DA_EMULATE_WRITE_CACHE 0
96 /* Emulation for UNIT ATTENTION Interlock Control */
97 #define DA_EMULATE_UA_INTLLCK_CTRL 0
98 /* Emulation for TASK_ABORTED status (TAS) by default */
99 #define DA_EMULATE_TAS 1
100 /* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */
101 #define DA_EMULATE_TPU 0
102 /*
103 * Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using
104 * block/blk-lib.c:blkdev_issue_discard()
105 */
106 #define DA_EMULATE_TPWS 0
107 /* No Emulation for PSCSI by default */
108 #define DA_EMULATE_RESERVATIONS 0
109 /* No Emulation for PSCSI by default */
110 #define DA_EMULATE_ALUA 0
111 /* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
112 #define DA_ENFORCE_PR_ISIDS 1
113 #define DA_STATUS_MAX_SECTORS_MIN 16
114 #define DA_STATUS_MAX_SECTORS_MAX 8192
115 /* By default don't report non-rotating (solid state) medium */
116 #define DA_IS_NONROT 0
117 /* Queue Algorithm Modifier default for restricted reordering in control mode page */
118 #define DA_EMULATE_REST_REORD 0
119
120 #define SE_MODE_PAGE_BUF 512
121
122
56 /* struct se_hba->hba_flags */ 123 /* struct se_hba->hba_flags */
57 enum hba_flags_table { 124 enum hba_flags_table {
58 HBA_FLAGS_INTERNAL_USE = 0x01, 125 HBA_FLAGS_INTERNAL_USE = 0x01,
59 HBA_FLAGS_PSCSI_MODE = 0x02, 126 HBA_FLAGS_PSCSI_MODE = 0x02,
60 }; 127 };
61 128
62 /* struct se_lun->lun_status */ 129 /* struct se_lun->lun_status */
63 enum transport_lun_status_table { 130 enum transport_lun_status_table {
64 TRANSPORT_LUN_STATUS_FREE = 0, 131 TRANSPORT_LUN_STATUS_FREE = 0,
65 TRANSPORT_LUN_STATUS_ACTIVE = 1, 132 TRANSPORT_LUN_STATUS_ACTIVE = 1,
66 }; 133 };
67 134
68 /* struct se_portal_group->se_tpg_type */ 135 /* struct se_portal_group->se_tpg_type */
69 enum transport_tpg_type_table { 136 enum transport_tpg_type_table {
70 TRANSPORT_TPG_TYPE_NORMAL = 0, 137 TRANSPORT_TPG_TYPE_NORMAL = 0,
71 TRANSPORT_TPG_TYPE_DISCOVERY = 1, 138 TRANSPORT_TPG_TYPE_DISCOVERY = 1,
72 }; 139 };
73 140
74 /* Used for generate timer flags */ 141 /* Used for generate timer flags */
75 enum se_task_flags { 142 enum se_task_flags {
76 TF_ACTIVE = (1 << 0), 143 TF_ACTIVE = (1 << 0),
77 TF_SENT = (1 << 1), 144 TF_SENT = (1 << 1),
78 TF_REQUEST_STOP = (1 << 2), 145 TF_REQUEST_STOP = (1 << 2),
79 }; 146 };
80 147
81 /* Special transport agnostic struct se_cmd->t_states */ 148 /* Special transport agnostic struct se_cmd->t_states */
82 enum transport_state_table { 149 enum transport_state_table {
83 TRANSPORT_NO_STATE = 0, 150 TRANSPORT_NO_STATE = 0,
84 TRANSPORT_NEW_CMD = 1, 151 TRANSPORT_NEW_CMD = 1,
85 TRANSPORT_WRITE_PENDING = 3, 152 TRANSPORT_WRITE_PENDING = 3,
86 TRANSPORT_PROCESS_WRITE = 4, 153 TRANSPORT_PROCESS_WRITE = 4,
87 TRANSPORT_PROCESSING = 5, 154 TRANSPORT_PROCESSING = 5,
88 TRANSPORT_COMPLETE = 6, 155 TRANSPORT_COMPLETE = 6,
89 TRANSPORT_PROCESS_TMR = 9, 156 TRANSPORT_PROCESS_TMR = 9,
90 TRANSPORT_ISTATE_PROCESSING = 11, 157 TRANSPORT_ISTATE_PROCESSING = 11,
91 TRANSPORT_NEW_CMD_MAP = 16, 158 TRANSPORT_NEW_CMD_MAP = 16,
92 TRANSPORT_COMPLETE_QF_WP = 18, 159 TRANSPORT_COMPLETE_QF_WP = 18,
93 TRANSPORT_COMPLETE_QF_OK = 19, 160 TRANSPORT_COMPLETE_QF_OK = 19,
94 }; 161 };
95 162
96 /* Used for struct se_cmd->se_cmd_flags */ 163 /* Used for struct se_cmd->se_cmd_flags */
97 enum se_cmd_flags_table { 164 enum se_cmd_flags_table {
98 SCF_SUPPORTED_SAM_OPCODE = 0x00000001, 165 SCF_SUPPORTED_SAM_OPCODE = 0x00000001,
99 SCF_TRANSPORT_TASK_SENSE = 0x00000002, 166 SCF_TRANSPORT_TASK_SENSE = 0x00000002,
100 SCF_EMULATED_TASK_SENSE = 0x00000004, 167 SCF_EMULATED_TASK_SENSE = 0x00000004,
101 SCF_SCSI_DATA_SG_IO_CDB = 0x00000008, 168 SCF_SCSI_DATA_SG_IO_CDB = 0x00000008,
102 SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010, 169 SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010,
103 SCF_SCSI_NON_DATA_CDB = 0x00000040, 170 SCF_SCSI_NON_DATA_CDB = 0x00000040,
104 SCF_SCSI_CDB_EXCEPTION = 0x00000080, 171 SCF_SCSI_CDB_EXCEPTION = 0x00000080,
105 SCF_SCSI_RESERVATION_CONFLICT = 0x00000100, 172 SCF_SCSI_RESERVATION_CONFLICT = 0x00000100,
106 SCF_FUA = 0x00000200, 173 SCF_FUA = 0x00000200,
107 SCF_SE_LUN_CMD = 0x00000800, 174 SCF_SE_LUN_CMD = 0x00000800,
108 SCF_SE_ALLOW_EOO = 0x00001000, 175 SCF_SE_ALLOW_EOO = 0x00001000,
109 SCF_BIDI = 0x00002000, 176 SCF_BIDI = 0x00002000,
110 SCF_SENT_CHECK_CONDITION = 0x00004000, 177 SCF_SENT_CHECK_CONDITION = 0x00004000,
111 SCF_OVERFLOW_BIT = 0x00008000, 178 SCF_OVERFLOW_BIT = 0x00008000,
112 SCF_UNDERFLOW_BIT = 0x00010000, 179 SCF_UNDERFLOW_BIT = 0x00010000,
113 SCF_SENT_DELAYED_TAS = 0x00020000, 180 SCF_SENT_DELAYED_TAS = 0x00020000,
114 SCF_ALUA_NON_OPTIMIZED = 0x00040000, 181 SCF_ALUA_NON_OPTIMIZED = 0x00040000,
115 SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000, 182 SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000,
116 SCF_UNUSED = 0x00100000, 183 SCF_UNUSED = 0x00100000,
117 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000, 184 SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000,
118 }; 185 };
119 186
120 /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ 187 /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
121 enum transport_lunflags_table { 188 enum transport_lunflags_table {
122 TRANSPORT_LUNFLAGS_NO_ACCESS = 0x00, 189 TRANSPORT_LUNFLAGS_NO_ACCESS = 0x00,
123 TRANSPORT_LUNFLAGS_INITIATOR_ACCESS = 0x01, 190 TRANSPORT_LUNFLAGS_INITIATOR_ACCESS = 0x01,
124 TRANSPORT_LUNFLAGS_READ_ONLY = 0x02, 191 TRANSPORT_LUNFLAGS_READ_ONLY = 0x02,
125 TRANSPORT_LUNFLAGS_READ_WRITE = 0x04, 192 TRANSPORT_LUNFLAGS_READ_WRITE = 0x04,
126 }; 193 };
127 194
128 /* struct se_device->dev_status */ 195 /* struct se_device->dev_status */
129 enum transport_device_status_table { 196 enum transport_device_status_table {
130 TRANSPORT_DEVICE_ACTIVATED = 0x01, 197 TRANSPORT_DEVICE_ACTIVATED = 0x01,
131 TRANSPORT_DEVICE_DEACTIVATED = 0x02, 198 TRANSPORT_DEVICE_DEACTIVATED = 0x02,
132 TRANSPORT_DEVICE_QUEUE_FULL = 0x04, 199 TRANSPORT_DEVICE_QUEUE_FULL = 0x04,
133 TRANSPORT_DEVICE_SHUTDOWN = 0x08, 200 TRANSPORT_DEVICE_SHUTDOWN = 0x08,
134 TRANSPORT_DEVICE_OFFLINE_ACTIVATED = 0x10, 201 TRANSPORT_DEVICE_OFFLINE_ACTIVATED = 0x10,
135 TRANSPORT_DEVICE_OFFLINE_DEACTIVATED = 0x20, 202 TRANSPORT_DEVICE_OFFLINE_DEACTIVATED = 0x20,
136 }; 203 };
137 204
138 /* 205 /*
139 * Used by transport_send_check_condition_and_sense() and se_cmd->scsi_sense_reason 206 * Used by transport_send_check_condition_and_sense() and se_cmd->scsi_sense_reason
140 * to signal which ASC/ASCQ sense payload should be built. 207 * to signal which ASC/ASCQ sense payload should be built.
141 */ 208 */
142 enum tcm_sense_reason_table { 209 enum tcm_sense_reason_table {
143 TCM_NON_EXISTENT_LUN = 0x01, 210 TCM_NON_EXISTENT_LUN = 0x01,
144 TCM_UNSUPPORTED_SCSI_OPCODE = 0x02, 211 TCM_UNSUPPORTED_SCSI_OPCODE = 0x02,
145 TCM_INCORRECT_AMOUNT_OF_DATA = 0x03, 212 TCM_INCORRECT_AMOUNT_OF_DATA = 0x03,
146 TCM_UNEXPECTED_UNSOLICITED_DATA = 0x04, 213 TCM_UNEXPECTED_UNSOLICITED_DATA = 0x04,
147 TCM_SERVICE_CRC_ERROR = 0x05, 214 TCM_SERVICE_CRC_ERROR = 0x05,
148 TCM_SNACK_REJECTED = 0x06, 215 TCM_SNACK_REJECTED = 0x06,
149 TCM_SECTOR_COUNT_TOO_MANY = 0x07, 216 TCM_SECTOR_COUNT_TOO_MANY = 0x07,
150 TCM_INVALID_CDB_FIELD = 0x08, 217 TCM_INVALID_CDB_FIELD = 0x08,
151 TCM_INVALID_PARAMETER_LIST = 0x09, 218 TCM_INVALID_PARAMETER_LIST = 0x09,
152 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE = 0x0a, 219 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE = 0x0a,
153 TCM_UNKNOWN_MODE_PAGE = 0x0b, 220 TCM_UNKNOWN_MODE_PAGE = 0x0b,
154 TCM_WRITE_PROTECTED = 0x0c, 221 TCM_WRITE_PROTECTED = 0x0c,
155 TCM_CHECK_CONDITION_ABORT_CMD = 0x0d, 222 TCM_CHECK_CONDITION_ABORT_CMD = 0x0d,
156 TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e, 223 TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e,
157 TCM_CHECK_CONDITION_NOT_READY = 0x0f, 224 TCM_CHECK_CONDITION_NOT_READY = 0x0f,
158 TCM_RESERVATION_CONFLICT = 0x10, 225 TCM_RESERVATION_CONFLICT = 0x10,
226 };
227
228 /* fabric independent task management function values */
229 enum tcm_tmreq_table {
230 TMR_ABORT_TASK = 1,
231 TMR_ABORT_TASK_SET = 2,
232 TMR_CLEAR_ACA = 3,
233 TMR_CLEAR_TASK_SET = 4,
234 TMR_LUN_RESET = 5,
235 TMR_TARGET_WARM_RESET = 6,
236 TMR_TARGET_COLD_RESET = 7,
237 TMR_FABRIC_TMR = 255,
238 };
239
240 /* fabric independent task management response values */
241 enum tcm_tmrsp_table {
242 TMR_FUNCTION_COMPLETE = 0,
243 TMR_TASK_DOES_NOT_EXIST = 1,
244 TMR_LUN_DOES_NOT_EXIST = 2,
245 TMR_TASK_STILL_ALLEGIANT = 3,
246 TMR_TASK_FAILOVER_NOT_SUPPORTED = 4,
247 TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED = 5,
248 TMR_FUNCTION_AUTHORIZATION_FAILED = 6,
249 TMR_FUNCTION_REJECTED = 255,
159 }; 250 };
160 251
161 struct se_obj { 252 struct se_obj {
162 atomic_t obj_access_count; 253 atomic_t obj_access_count;
163 } ____cacheline_aligned; 254 } ____cacheline_aligned;
164 255
165 /* 256 /*
166 * Used by TCM Core internally to signal if ALUA emulation is enabled or 257 * Used by TCM Core internally to signal if ALUA emulation is enabled or
167 * disabled, or running in with TCM/pSCSI passthrough mode 258 * disabled, or running in with TCM/pSCSI passthrough mode
168 */ 259 */
169 typedef enum { 260 typedef enum {
170 SPC_ALUA_PASSTHROUGH, 261 SPC_ALUA_PASSTHROUGH,
171 SPC2_ALUA_DISABLED, 262 SPC2_ALUA_DISABLED,
172 SPC3_ALUA_EMULATED 263 SPC3_ALUA_EMULATED
173 } t10_alua_index_t; 264 } t10_alua_index_t;
174 265
175 /* 266 /*
176 * Used by TCM Core internally to signal if SAM Task Attribute emulation 267 * Used by TCM Core internally to signal if SAM Task Attribute emulation
177 * is enabled or disabled, or running in with TCM/pSCSI passthrough mode 268 * is enabled or disabled, or running in with TCM/pSCSI passthrough mode
178 */ 269 */
179 typedef enum { 270 typedef enum {
180 SAM_TASK_ATTR_PASSTHROUGH, 271 SAM_TASK_ATTR_PASSTHROUGH,
181 SAM_TASK_ATTR_UNTAGGED, 272 SAM_TASK_ATTR_UNTAGGED,
182 SAM_TASK_ATTR_EMULATED 273 SAM_TASK_ATTR_EMULATED
183 } t10_task_attr_index_t; 274 } t10_task_attr_index_t;
184 275
185 /* 276 /*
186 * Used for target SCSI statistics 277 * Used for target SCSI statistics
187 */ 278 */
188 typedef enum { 279 typedef enum {
189 SCSI_INST_INDEX, 280 SCSI_INST_INDEX,
190 SCSI_DEVICE_INDEX, 281 SCSI_DEVICE_INDEX,
191 SCSI_AUTH_INTR_INDEX, 282 SCSI_AUTH_INTR_INDEX,
192 SCSI_INDEX_TYPE_MAX 283 SCSI_INDEX_TYPE_MAX
193 } scsi_index_t; 284 } scsi_index_t;
194 285
195 struct se_cmd; 286 struct se_cmd;
196 287
197 struct t10_alua { 288 struct t10_alua {
198 t10_alua_index_t alua_type; 289 t10_alua_index_t alua_type;
199 /* ALUA Target Port Group ID */ 290 /* ALUA Target Port Group ID */
200 u16 alua_tg_pt_gps_counter; 291 u16 alua_tg_pt_gps_counter;
201 u32 alua_tg_pt_gps_count; 292 u32 alua_tg_pt_gps_count;
202 spinlock_t tg_pt_gps_lock; 293 spinlock_t tg_pt_gps_lock;
203 struct se_subsystem_dev *t10_sub_dev; 294 struct se_subsystem_dev *t10_sub_dev;
204 /* Used for default ALUA Target Port Group */ 295 /* Used for default ALUA Target Port Group */
205 struct t10_alua_tg_pt_gp *default_tg_pt_gp; 296 struct t10_alua_tg_pt_gp *default_tg_pt_gp;
206 /* Used for default ALUA Target Port Group ConfigFS group */ 297 /* Used for default ALUA Target Port Group ConfigFS group */
207 struct config_group alua_tg_pt_gps_group; 298 struct config_group alua_tg_pt_gps_group;
208 int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *); 299 int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *);
209 struct list_head tg_pt_gps_list; 300 struct list_head tg_pt_gps_list;
210 } ____cacheline_aligned; 301 } ____cacheline_aligned;
211 302
212 struct t10_alua_lu_gp { 303 struct t10_alua_lu_gp {
213 u16 lu_gp_id; 304 u16 lu_gp_id;
214 int lu_gp_valid_id; 305 int lu_gp_valid_id;
215 u32 lu_gp_members; 306 u32 lu_gp_members;
216 atomic_t lu_gp_ref_cnt; 307 atomic_t lu_gp_ref_cnt;
217 spinlock_t lu_gp_lock; 308 spinlock_t lu_gp_lock;
218 struct config_group lu_gp_group; 309 struct config_group lu_gp_group;
219 struct list_head lu_gp_node; 310 struct list_head lu_gp_node;
220 struct list_head lu_gp_mem_list; 311 struct list_head lu_gp_mem_list;
221 } ____cacheline_aligned; 312 } ____cacheline_aligned;
222 313
223 struct t10_alua_lu_gp_member { 314 struct t10_alua_lu_gp_member {
224 bool lu_gp_assoc; 315 bool lu_gp_assoc;
225 atomic_t lu_gp_mem_ref_cnt; 316 atomic_t lu_gp_mem_ref_cnt;
226 spinlock_t lu_gp_mem_lock; 317 spinlock_t lu_gp_mem_lock;
227 struct t10_alua_lu_gp *lu_gp; 318 struct t10_alua_lu_gp *lu_gp;
228 struct se_device *lu_gp_mem_dev; 319 struct se_device *lu_gp_mem_dev;
229 struct list_head lu_gp_mem_list; 320 struct list_head lu_gp_mem_list;
230 } ____cacheline_aligned; 321 } ____cacheline_aligned;
231 322
232 struct t10_alua_tg_pt_gp { 323 struct t10_alua_tg_pt_gp {
233 u16 tg_pt_gp_id; 324 u16 tg_pt_gp_id;
234 int tg_pt_gp_valid_id; 325 int tg_pt_gp_valid_id;
235 int tg_pt_gp_alua_access_status; 326 int tg_pt_gp_alua_access_status;
236 int tg_pt_gp_alua_access_type; 327 int tg_pt_gp_alua_access_type;
237 int tg_pt_gp_nonop_delay_msecs; 328 int tg_pt_gp_nonop_delay_msecs;
238 int tg_pt_gp_trans_delay_msecs; 329 int tg_pt_gp_trans_delay_msecs;
239 int tg_pt_gp_pref; 330 int tg_pt_gp_pref;
240 int tg_pt_gp_write_metadata; 331 int tg_pt_gp_write_metadata;
241 /* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */ 332 /* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */
242 #define ALUA_MD_BUF_LEN 1024 333 #define ALUA_MD_BUF_LEN 1024
243 u32 tg_pt_gp_md_buf_len; 334 u32 tg_pt_gp_md_buf_len;
244 u32 tg_pt_gp_members; 335 u32 tg_pt_gp_members;
245 atomic_t tg_pt_gp_alua_access_state; 336 atomic_t tg_pt_gp_alua_access_state;
246 atomic_t tg_pt_gp_ref_cnt; 337 atomic_t tg_pt_gp_ref_cnt;
247 spinlock_t tg_pt_gp_lock; 338 spinlock_t tg_pt_gp_lock;
248 struct mutex tg_pt_gp_md_mutex; 339 struct mutex tg_pt_gp_md_mutex;
249 struct se_subsystem_dev *tg_pt_gp_su_dev; 340 struct se_subsystem_dev *tg_pt_gp_su_dev;
250 struct config_group tg_pt_gp_group; 341 struct config_group tg_pt_gp_group;
251 struct list_head tg_pt_gp_list; 342 struct list_head tg_pt_gp_list;
252 struct list_head tg_pt_gp_mem_list; 343 struct list_head tg_pt_gp_mem_list;
253 } ____cacheline_aligned; 344 } ____cacheline_aligned;
254 345
255 struct t10_alua_tg_pt_gp_member { 346 struct t10_alua_tg_pt_gp_member {
256 bool tg_pt_gp_assoc; 347 bool tg_pt_gp_assoc;
257 atomic_t tg_pt_gp_mem_ref_cnt; 348 atomic_t tg_pt_gp_mem_ref_cnt;
258 spinlock_t tg_pt_gp_mem_lock; 349 spinlock_t tg_pt_gp_mem_lock;
259 struct t10_alua_tg_pt_gp *tg_pt_gp; 350 struct t10_alua_tg_pt_gp *tg_pt_gp;
260 struct se_port *tg_pt; 351 struct se_port *tg_pt;
261 struct list_head tg_pt_gp_mem_list; 352 struct list_head tg_pt_gp_mem_list;
262 } ____cacheline_aligned; 353 } ____cacheline_aligned;
263 354
264 struct t10_vpd { 355 struct t10_vpd {
265 unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN]; 356 unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN];
266 int protocol_identifier_set; 357 int protocol_identifier_set;
267 u32 protocol_identifier; 358 u32 protocol_identifier;
268 u32 device_identifier_code_set; 359 u32 device_identifier_code_set;
269 u32 association; 360 u32 association;
270 u32 device_identifier_type; 361 u32 device_identifier_type;
271 struct list_head vpd_list; 362 struct list_head vpd_list;
272 } ____cacheline_aligned; 363 } ____cacheline_aligned;
273 364
274 struct t10_wwn { 365 struct t10_wwn {
275 char vendor[8]; 366 char vendor[8];
276 char model[16]; 367 char model[16];
277 char revision[4]; 368 char revision[4];
278 char unit_serial[INQUIRY_VPD_SERIAL_LEN]; 369 char unit_serial[INQUIRY_VPD_SERIAL_LEN];
279 spinlock_t t10_vpd_lock; 370 spinlock_t t10_vpd_lock;
280 struct se_subsystem_dev *t10_sub_dev; 371 struct se_subsystem_dev *t10_sub_dev;
281 struct config_group t10_wwn_group; 372 struct config_group t10_wwn_group;
282 struct list_head t10_vpd_list; 373 struct list_head t10_vpd_list;
283 } ____cacheline_aligned; 374 } ____cacheline_aligned;
284 375
285 376
286 /* 377 /*
287 * Used by TCM Core internally to signal if >= SPC-3 persistent reservations 378 * Used by TCM Core internally to signal if >= SPC-3 persistent reservations
288 * emulation is enabled or disabled, or running in with TCM/pSCSI passthrough 379 * emulation is enabled or disabled, or running in with TCM/pSCSI passthrough
289 * mode 380 * mode
290 */ 381 */
291 typedef enum { 382 typedef enum {
292 SPC_PASSTHROUGH, 383 SPC_PASSTHROUGH,
293 SPC2_RESERVATIONS, 384 SPC2_RESERVATIONS,
294 SPC3_PERSISTENT_RESERVATIONS 385 SPC3_PERSISTENT_RESERVATIONS
295 } t10_reservations_index_t; 386 } t10_reservations_index_t;
296 387
297 struct t10_pr_registration { 388 struct t10_pr_registration {
298 /* Used for fabrics that contain WWN+ISID */ 389 /* Used for fabrics that contain WWN+ISID */
299 #define PR_REG_ISID_LEN 16 390 #define PR_REG_ISID_LEN 16
300 /* PR_REG_ISID_LEN + ',i,0x' */ 391 /* PR_REG_ISID_LEN + ',i,0x' */
301 #define PR_REG_ISID_ID_LEN (PR_REG_ISID_LEN + 5) 392 #define PR_REG_ISID_ID_LEN (PR_REG_ISID_LEN + 5)
302 char pr_reg_isid[PR_REG_ISID_LEN]; 393 char pr_reg_isid[PR_REG_ISID_LEN];
303 /* Used during APTPL metadata reading */ 394 /* Used during APTPL metadata reading */
304 #define PR_APTPL_MAX_IPORT_LEN 256 395 #define PR_APTPL_MAX_IPORT_LEN 256
305 unsigned char pr_iport[PR_APTPL_MAX_IPORT_LEN]; 396 unsigned char pr_iport[PR_APTPL_MAX_IPORT_LEN];
306 /* Used during APTPL metadata reading */ 397 /* Used during APTPL metadata reading */
307 #define PR_APTPL_MAX_TPORT_LEN 256 398 #define PR_APTPL_MAX_TPORT_LEN 256
308 unsigned char pr_tport[PR_APTPL_MAX_TPORT_LEN]; 399 unsigned char pr_tport[PR_APTPL_MAX_TPORT_LEN];
309 /* For writing out live meta data */ 400 /* For writing out live meta data */
310 unsigned char *pr_aptpl_buf; 401 unsigned char *pr_aptpl_buf;
311 u16 pr_aptpl_rpti; 402 u16 pr_aptpl_rpti;
312 u16 pr_reg_tpgt; 403 u16 pr_reg_tpgt;
313 /* Reservation effects all target ports */ 404 /* Reservation effects all target ports */
314 int pr_reg_all_tg_pt; 405 int pr_reg_all_tg_pt;
315 /* Activate Persistence across Target Power Loss */ 406 /* Activate Persistence across Target Power Loss */
316 int pr_reg_aptpl; 407 int pr_reg_aptpl;
317 int pr_res_holder; 408 int pr_res_holder;
318 int pr_res_type; 409 int pr_res_type;
319 int pr_res_scope; 410 int pr_res_scope;
320 /* Used for fabric initiator WWPNs using a ISID */ 411 /* Used for fabric initiator WWPNs using a ISID */
321 bool isid_present_at_reg; 412 bool isid_present_at_reg;
322 u32 pr_res_mapped_lun; 413 u32 pr_res_mapped_lun;
323 u32 pr_aptpl_target_lun; 414 u32 pr_aptpl_target_lun;
324 u32 pr_res_generation; 415 u32 pr_res_generation;
325 u64 pr_reg_bin_isid; 416 u64 pr_reg_bin_isid;
326 u64 pr_res_key; 417 u64 pr_res_key;
327 atomic_t pr_res_holders; 418 atomic_t pr_res_holders;
328 struct se_node_acl *pr_reg_nacl; 419 struct se_node_acl *pr_reg_nacl;
329 struct se_dev_entry *pr_reg_deve; 420 struct se_dev_entry *pr_reg_deve;
330 struct se_lun *pr_reg_tg_pt_lun; 421 struct se_lun *pr_reg_tg_pt_lun;
331 struct list_head pr_reg_list; 422 struct list_head pr_reg_list;
332 struct list_head pr_reg_abort_list; 423 struct list_head pr_reg_abort_list;
333 struct list_head pr_reg_aptpl_list; 424 struct list_head pr_reg_aptpl_list;
334 struct list_head pr_reg_atp_list; 425 struct list_head pr_reg_atp_list;
335 struct list_head pr_reg_atp_mem_list; 426 struct list_head pr_reg_atp_mem_list;
336 } ____cacheline_aligned; 427 } ____cacheline_aligned;
337 428
338 /* 429 /*
339 * This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS, 430 * This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS,
340 * SPC2_RESERVATIONS or SPC_PASSTHROUGH in drivers/target/target_core_pr.c: 431 * SPC2_RESERVATIONS or SPC_PASSTHROUGH in drivers/target/target_core_pr.c:
341 * core_setup_reservations() 432 * core_setup_reservations()
342 */ 433 */
343 struct t10_reservation_ops { 434 struct t10_reservation_ops {
344 int (*t10_reservation_check)(struct se_cmd *, u32 *); 435 int (*t10_reservation_check)(struct se_cmd *, u32 *);
345 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32); 436 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
346 int (*t10_pr_register)(struct se_cmd *); 437 int (*t10_pr_register)(struct se_cmd *);
347 int (*t10_pr_clear)(struct se_cmd *); 438 int (*t10_pr_clear)(struct se_cmd *);
348 }; 439 };
349 440
350 struct t10_reservation { 441 struct t10_reservation {
351 /* Reservation effects all target ports */ 442 /* Reservation effects all target ports */
352 int pr_all_tg_pt; 443 int pr_all_tg_pt;
353 /* Activate Persistence across Target Power Loss enabled 444 /* Activate Persistence across Target Power Loss enabled
354 * for SCSI device */ 445 * for SCSI device */
355 int pr_aptpl_active; 446 int pr_aptpl_active;
356 /* Used by struct t10_reservation->pr_aptpl_buf_len */ 447 /* Used by struct t10_reservation->pr_aptpl_buf_len */
357 #define PR_APTPL_BUF_LEN 8192 448 #define PR_APTPL_BUF_LEN 8192
358 u32 pr_aptpl_buf_len; 449 u32 pr_aptpl_buf_len;
359 u32 pr_generation; 450 u32 pr_generation;
360 t10_reservations_index_t res_type; 451 t10_reservations_index_t res_type;
361 spinlock_t registration_lock; 452 spinlock_t registration_lock;
362 spinlock_t aptpl_reg_lock; 453 spinlock_t aptpl_reg_lock;
363 /* 454 /*
364 * This will always be set by one individual I_T Nexus. 455 * This will always be set by one individual I_T Nexus.
365 * However with all_tg_pt=1, other I_T Nexus from the 456 * However with all_tg_pt=1, other I_T Nexus from the
366 * same initiator can access PR reg/res info on a different 457 * same initiator can access PR reg/res info on a different
367 * target port. 458 * target port.
368 * 459 *
369 * There is also the 'All Registrants' case, where there is 460 * There is also the 'All Registrants' case, where there is
370 * a single *pr_res_holder of the reservation, but all 461 * a single *pr_res_holder of the reservation, but all
371 * registrations are considered reservation holders. 462 * registrations are considered reservation holders.
372 */ 463 */
373 struct se_node_acl *pr_res_holder; 464 struct se_node_acl *pr_res_holder;
374 struct list_head registration_list; 465 struct list_head registration_list;
375 struct list_head aptpl_reg_list; 466 struct list_head aptpl_reg_list;
376 struct t10_reservation_ops pr_ops; 467 struct t10_reservation_ops pr_ops;
377 } ____cacheline_aligned; 468 } ____cacheline_aligned;
378 469
379 struct se_queue_req { 470 struct se_queue_req {
380 int state; 471 int state;
381 struct se_cmd *cmd; 472 struct se_cmd *cmd;
382 struct list_head qr_list; 473 struct list_head qr_list;
383 } ____cacheline_aligned; 474 } ____cacheline_aligned;
384 475
385 struct se_queue_obj { 476 struct se_queue_obj {
386 atomic_t queue_cnt; 477 atomic_t queue_cnt;
387 spinlock_t cmd_queue_lock; 478 spinlock_t cmd_queue_lock;
388 struct list_head qobj_list; 479 struct list_head qobj_list;
389 wait_queue_head_t thread_wq; 480 wait_queue_head_t thread_wq;
390 } ____cacheline_aligned; 481 } ____cacheline_aligned;
391 482
392 struct se_task { 483 struct se_task {
393 unsigned long long task_lba; 484 unsigned long long task_lba;
394 u32 task_sectors; 485 u32 task_sectors;
395 u32 task_size; 486 u32 task_size;
396 struct se_cmd *task_se_cmd; 487 struct se_cmd *task_se_cmd;
397 struct scatterlist *task_sg; 488 struct scatterlist *task_sg;
398 u32 task_sg_nents; 489 u32 task_sg_nents;
399 u16 task_flags; 490 u16 task_flags;
400 u8 task_sense; 491 u8 task_sense;
401 u8 task_scsi_status; 492 u8 task_scsi_status;
402 int task_error_status; 493 int task_error_status;
403 enum dma_data_direction task_data_direction; 494 enum dma_data_direction task_data_direction;
404 atomic_t task_state_active; 495 atomic_t task_state_active;
405 struct list_head t_list; 496 struct list_head t_list;
406 struct list_head t_execute_list; 497 struct list_head t_execute_list;
407 struct list_head t_state_list; 498 struct list_head t_state_list;
408 struct completion task_stop_comp; 499 struct completion task_stop_comp;
409 } ____cacheline_aligned; 500 } ____cacheline_aligned;
410 501
411 struct se_cmd { 502 struct se_cmd {
412 /* SAM response code being sent to initiator */ 503 /* SAM response code being sent to initiator */
413 u8 scsi_status; 504 u8 scsi_status;
414 u8 scsi_asc; 505 u8 scsi_asc;
415 u8 scsi_ascq; 506 u8 scsi_ascq;
416 u8 scsi_sense_reason; 507 u8 scsi_sense_reason;
417 u16 scsi_sense_length; 508 u16 scsi_sense_length;
418 /* Delay for ALUA Active/NonOptimized state access in milliseconds */ 509 /* Delay for ALUA Active/NonOptimized state access in milliseconds */
419 int alua_nonop_delay; 510 int alua_nonop_delay;
420 /* See include/linux/dma-mapping.h */ 511 /* See include/linux/dma-mapping.h */
421 enum dma_data_direction data_direction; 512 enum dma_data_direction data_direction;
422 /* For SAM Task Attribute */ 513 /* For SAM Task Attribute */
423 int sam_task_attr; 514 int sam_task_attr;
424 /* Transport protocol dependent state, see transport_state_table */ 515 /* Transport protocol dependent state, see transport_state_table */
425 enum transport_state_table t_state; 516 enum transport_state_table t_state;
426 /* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */ 517 /* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */
427 unsigned check_release:1; 518 unsigned check_release:1;
428 unsigned cmd_wait_set:1; 519 unsigned cmd_wait_set:1;
429 /* See se_cmd_flags_table */ 520 /* See se_cmd_flags_table */
430 u32 se_cmd_flags; 521 u32 se_cmd_flags;
431 u32 se_ordered_id; 522 u32 se_ordered_id;
432 /* Total size in bytes associated with command */ 523 /* Total size in bytes associated with command */
433 u32 data_length; 524 u32 data_length;
434 /* SCSI Presented Data Transfer Length */ 525 /* SCSI Presented Data Transfer Length */
435 u32 cmd_spdtl; 526 u32 cmd_spdtl;
436 u32 residual_count; 527 u32 residual_count;
437 u32 orig_fe_lun; 528 u32 orig_fe_lun;
438 /* Persistent Reservation key */ 529 /* Persistent Reservation key */
439 u64 pr_res_key; 530 u64 pr_res_key;
440 /* Used for sense data */ 531 /* Used for sense data */
441 void *sense_buffer; 532 void *sense_buffer;
442 struct list_head se_delayed_node; 533 struct list_head se_delayed_node;
443 struct list_head se_lun_node; 534 struct list_head se_lun_node;
444 struct list_head se_qf_node; 535 struct list_head se_qf_node;
445 struct se_device *se_dev; 536 struct se_device *se_dev;
446 struct se_dev_entry *se_deve; 537 struct se_dev_entry *se_deve;
447 struct se_lun *se_lun; 538 struct se_lun *se_lun;
448 /* Only used for internal passthrough and legacy TCM fabric modules */ 539 /* Only used for internal passthrough and legacy TCM fabric modules */
449 struct se_session *se_sess; 540 struct se_session *se_sess;
450 struct se_tmr_req *se_tmr_req; 541 struct se_tmr_req *se_tmr_req;
451 struct list_head se_queue_node; 542 struct list_head se_queue_node;
452 struct list_head se_cmd_list; 543 struct list_head se_cmd_list;
453 struct completion cmd_wait_comp; 544 struct completion cmd_wait_comp;
454 struct target_core_fabric_ops *se_tfo; 545 struct target_core_fabric_ops *se_tfo;
455 int (*execute_task)(struct se_task *); 546 int (*execute_task)(struct se_task *);
456 void (*transport_complete_callback)(struct se_cmd *); 547 void (*transport_complete_callback)(struct se_cmd *);
457 548
458 unsigned char *t_task_cdb; 549 unsigned char *t_task_cdb;
459 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; 550 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
460 unsigned long long t_task_lba; 551 unsigned long long t_task_lba;
461 int t_tasks_failed; 552 int t_tasks_failed;
462 u32 t_tasks_sg_chained_no; 553 u32 t_tasks_sg_chained_no;
463 atomic_t t_fe_count; 554 atomic_t t_fe_count;
464 atomic_t t_se_count; 555 atomic_t t_se_count;
465 atomic_t t_task_cdbs_left; 556 atomic_t t_task_cdbs_left;
466 atomic_t t_task_cdbs_ex_left; 557 atomic_t t_task_cdbs_ex_left;
467 atomic_t t_task_cdbs_sent; 558 atomic_t t_task_cdbs_sent;
468 atomic_t t_transport_aborted; 559 atomic_t t_transport_aborted;
469 atomic_t t_transport_active; 560 atomic_t t_transport_active;
470 atomic_t t_transport_complete; 561 atomic_t t_transport_complete;
471 atomic_t t_transport_queue_active; 562 atomic_t t_transport_queue_active;
472 atomic_t t_transport_sent; 563 atomic_t t_transport_sent;
473 atomic_t t_transport_stop; 564 atomic_t t_transport_stop;
474 atomic_t transport_dev_active; 565 atomic_t transport_dev_active;
475 atomic_t transport_lun_active; 566 atomic_t transport_lun_active;
476 atomic_t transport_lun_fe_stop; 567 atomic_t transport_lun_fe_stop;
477 atomic_t transport_lun_stop; 568 atomic_t transport_lun_stop;
478 spinlock_t t_state_lock; 569 spinlock_t t_state_lock;
479 struct completion t_transport_stop_comp; 570 struct completion t_transport_stop_comp;
480 struct completion transport_lun_fe_stop_comp; 571 struct completion transport_lun_fe_stop_comp;
481 struct completion transport_lun_stop_comp; 572 struct completion transport_lun_stop_comp;
482 struct scatterlist *t_tasks_sg_chained; 573 struct scatterlist *t_tasks_sg_chained;
483 574
484 struct work_struct work; 575 struct work_struct work;
485 576
486 struct scatterlist *t_data_sg; 577 struct scatterlist *t_data_sg;
487 unsigned int t_data_nents; 578 unsigned int t_data_nents;
488 struct scatterlist *t_bidi_data_sg; 579 struct scatterlist *t_bidi_data_sg;
489 unsigned int t_bidi_data_nents; 580 unsigned int t_bidi_data_nents;
490 581
491 /* Used for BIDI READ */ 582 /* Used for BIDI READ */
492 struct list_head t_task_list; 583 struct list_head t_task_list;
493 u32 t_task_list_num; 584 u32 t_task_list_num;
494 585
495 } ____cacheline_aligned; 586 } ____cacheline_aligned;
496 587
497 struct se_tmr_req { 588 struct se_tmr_req {
498 /* Task Management function to be preformed */ 589 /* Task Management function to be preformed */
499 u8 function; 590 u8 function;
500 /* Task Management response to send */ 591 /* Task Management response to send */
501 u8 response; 592 u8 response;
502 int call_transport; 593 int call_transport;
503 /* Reference to ITT that Task Mgmt should be preformed */ 594 /* Reference to ITT that Task Mgmt should be preformed */
504 u32 ref_task_tag; 595 u32 ref_task_tag;
505 /* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */ 596 /* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */
506 u64 ref_task_lun; 597 u64 ref_task_lun;
507 void *fabric_tmr_ptr; 598 void *fabric_tmr_ptr;
508 struct se_cmd *task_cmd; 599 struct se_cmd *task_cmd;
509 struct se_cmd *ref_cmd; 600 struct se_cmd *ref_cmd;
510 struct se_device *tmr_dev; 601 struct se_device *tmr_dev;
511 struct se_lun *tmr_lun; 602 struct se_lun *tmr_lun;
512 struct list_head tmr_list; 603 struct list_head tmr_list;
513 } ____cacheline_aligned; 604 } ____cacheline_aligned;
514 605
515 struct se_ua { 606 struct se_ua {
516 u8 ua_asc; 607 u8 ua_asc;
517 u8 ua_ascq; 608 u8 ua_ascq;
518 struct se_node_acl *ua_nacl; 609 struct se_node_acl *ua_nacl;
519 struct list_head ua_dev_list; 610 struct list_head ua_dev_list;
520 struct list_head ua_nacl_list; 611 struct list_head ua_nacl_list;
521 } ____cacheline_aligned; 612 } ____cacheline_aligned;
522 613
523 struct se_node_acl { 614 struct se_node_acl {
524 char initiatorname[TRANSPORT_IQN_LEN]; 615 char initiatorname[TRANSPORT_IQN_LEN];
525 /* Used to signal demo mode created ACL, disabled by default */ 616 /* Used to signal demo mode created ACL, disabled by default */
526 bool dynamic_node_acl; 617 bool dynamic_node_acl;
527 u32 queue_depth; 618 u32 queue_depth;
528 u32 acl_index; 619 u32 acl_index;
529 u64 num_cmds; 620 u64 num_cmds;
530 u64 read_bytes; 621 u64 read_bytes;
531 u64 write_bytes; 622 u64 write_bytes;
532 spinlock_t stats_lock; 623 spinlock_t stats_lock;
533 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 624 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
534 atomic_t acl_pr_ref_count; 625 atomic_t acl_pr_ref_count;
535 struct se_dev_entry *device_list; 626 struct se_dev_entry *device_list;
536 struct se_session *nacl_sess; 627 struct se_session *nacl_sess;
537 struct se_portal_group *se_tpg; 628 struct se_portal_group *se_tpg;
538 spinlock_t device_list_lock; 629 spinlock_t device_list_lock;
539 spinlock_t nacl_sess_lock; 630 spinlock_t nacl_sess_lock;
540 struct config_group acl_group; 631 struct config_group acl_group;
541 struct config_group acl_attrib_group; 632 struct config_group acl_attrib_group;
542 struct config_group acl_auth_group; 633 struct config_group acl_auth_group;
543 struct config_group acl_param_group; 634 struct config_group acl_param_group;
544 struct config_group acl_fabric_stat_group; 635 struct config_group acl_fabric_stat_group;
545 struct config_group *acl_default_groups[5]; 636 struct config_group *acl_default_groups[5];
546 struct list_head acl_list; 637 struct list_head acl_list;
547 struct list_head acl_sess_list; 638 struct list_head acl_sess_list;
548 } ____cacheline_aligned; 639 } ____cacheline_aligned;
549 640
550 struct se_session { 641 struct se_session {
551 unsigned sess_tearing_down:1; 642 unsigned sess_tearing_down:1;
552 u64 sess_bin_isid; 643 u64 sess_bin_isid;
553 struct se_node_acl *se_node_acl; 644 struct se_node_acl *se_node_acl;
554 struct se_portal_group *se_tpg; 645 struct se_portal_group *se_tpg;
555 void *fabric_sess_ptr; 646 void *fabric_sess_ptr;
556 struct list_head sess_list; 647 struct list_head sess_list;
557 struct list_head sess_acl_list; 648 struct list_head sess_acl_list;
558 struct list_head sess_cmd_list; 649 struct list_head sess_cmd_list;
559 struct list_head sess_wait_list; 650 struct list_head sess_wait_list;
560 spinlock_t sess_cmd_lock; 651 spinlock_t sess_cmd_lock;
561 } ____cacheline_aligned; 652 } ____cacheline_aligned;
562 653
563 struct se_device; 654 struct se_device;
564 struct se_transform_info; 655 struct se_transform_info;
565 struct scatterlist; 656 struct scatterlist;
566 657
567 struct se_ml_stat_grps { 658 struct se_ml_stat_grps {
568 struct config_group stat_group; 659 struct config_group stat_group;
569 struct config_group scsi_auth_intr_group; 660 struct config_group scsi_auth_intr_group;
570 struct config_group scsi_att_intr_port_group; 661 struct config_group scsi_att_intr_port_group;
571 }; 662 };
572 663
573 struct se_lun_acl { 664 struct se_lun_acl {
574 char initiatorname[TRANSPORT_IQN_LEN]; 665 char initiatorname[TRANSPORT_IQN_LEN];
575 u32 mapped_lun; 666 u32 mapped_lun;
576 struct se_node_acl *se_lun_nacl; 667 struct se_node_acl *se_lun_nacl;
577 struct se_lun *se_lun; 668 struct se_lun *se_lun;
578 struct list_head lacl_list; 669 struct list_head lacl_list;
579 struct config_group se_lun_group; 670 struct config_group se_lun_group;
580 struct se_ml_stat_grps ml_stat_grps; 671 struct se_ml_stat_grps ml_stat_grps;
581 } ____cacheline_aligned; 672 } ____cacheline_aligned;
582 673
583 struct se_dev_entry { 674 struct se_dev_entry {
584 bool def_pr_registered; 675 bool def_pr_registered;
585 /* See transport_lunflags_table */ 676 /* See transport_lunflags_table */
586 u32 lun_flags; 677 u32 lun_flags;
587 u32 deve_cmds; 678 u32 deve_cmds;
588 u32 mapped_lun; 679 u32 mapped_lun;
589 u32 average_bytes; 680 u32 average_bytes;
590 u32 last_byte_count; 681 u32 last_byte_count;
591 u32 total_cmds; 682 u32 total_cmds;
592 u32 total_bytes; 683 u32 total_bytes;
593 u64 pr_res_key; 684 u64 pr_res_key;
594 u64 creation_time; 685 u64 creation_time;
595 u32 attach_count; 686 u32 attach_count;
596 u64 read_bytes; 687 u64 read_bytes;
597 u64 write_bytes; 688 u64 write_bytes;
598 atomic_t ua_count; 689 atomic_t ua_count;
599 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 690 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
600 atomic_t pr_ref_count; 691 atomic_t pr_ref_count;
601 struct se_lun_acl *se_lun_acl; 692 struct se_lun_acl *se_lun_acl;
602 spinlock_t ua_lock; 693 spinlock_t ua_lock;
603 struct se_lun *se_lun; 694 struct se_lun *se_lun;
604 struct list_head alua_port_list; 695 struct list_head alua_port_list;
605 struct list_head ua_list; 696 struct list_head ua_list;
606 } ____cacheline_aligned; 697 } ____cacheline_aligned;
607 698
608 struct se_dev_limits { 699 struct se_dev_limits {
609 /* Max supported HW queue depth */ 700 /* Max supported HW queue depth */
610 u32 hw_queue_depth; 701 u32 hw_queue_depth;
611 /* Max supported virtual queue depth */ 702 /* Max supported virtual queue depth */
612 u32 queue_depth; 703 u32 queue_depth;
613 /* From include/linux/blkdev.h for the other HW/SW limits. */ 704 /* From include/linux/blkdev.h for the other HW/SW limits. */
614 struct queue_limits limits; 705 struct queue_limits limits;
615 } ____cacheline_aligned; 706 } ____cacheline_aligned;
616 707
617 struct se_dev_attrib { 708 struct se_dev_attrib {
618 int emulate_dpo; 709 int emulate_dpo;
619 int emulate_fua_write; 710 int emulate_fua_write;
620 int emulate_fua_read; 711 int emulate_fua_read;
621 int emulate_write_cache; 712 int emulate_write_cache;
622 int emulate_ua_intlck_ctrl; 713 int emulate_ua_intlck_ctrl;
623 int emulate_tas; 714 int emulate_tas;
624 int emulate_tpu; 715 int emulate_tpu;
625 int emulate_tpws; 716 int emulate_tpws;
626 int emulate_reservations; 717 int emulate_reservations;
627 int emulate_alua; 718 int emulate_alua;
628 int enforce_pr_isids; 719 int enforce_pr_isids;
629 int is_nonrot; 720 int is_nonrot;
630 int emulate_rest_reord; 721 int emulate_rest_reord;
631 u32 hw_block_size; 722 u32 hw_block_size;
632 u32 block_size; 723 u32 block_size;
633 u32 hw_max_sectors; 724 u32 hw_max_sectors;
634 u32 max_sectors; 725 u32 max_sectors;
635 u32 optimal_sectors; 726 u32 optimal_sectors;
636 u32 hw_queue_depth; 727 u32 hw_queue_depth;
637 u32 queue_depth; 728 u32 queue_depth;
638 u32 max_unmap_lba_count; 729 u32 max_unmap_lba_count;
639 u32 max_unmap_block_desc_count; 730 u32 max_unmap_block_desc_count;
640 u32 unmap_granularity; 731 u32 unmap_granularity;
641 u32 unmap_granularity_alignment; 732 u32 unmap_granularity_alignment;
642 struct se_subsystem_dev *da_sub_dev; 733 struct se_subsystem_dev *da_sub_dev;
643 struct config_group da_group; 734 struct config_group da_group;
644 } ____cacheline_aligned; 735 } ____cacheline_aligned;
645 736
646 struct se_dev_stat_grps { 737 struct se_dev_stat_grps {
647 struct config_group stat_group; 738 struct config_group stat_group;
648 struct config_group scsi_dev_group; 739 struct config_group scsi_dev_group;
649 struct config_group scsi_tgt_dev_group; 740 struct config_group scsi_tgt_dev_group;
650 struct config_group scsi_lu_group; 741 struct config_group scsi_lu_group;
651 }; 742 };
652 743
653 struct se_subsystem_dev { 744 struct se_subsystem_dev {
654 /* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */ 745 /* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */
655 #define SE_DEV_ALIAS_LEN 512 746 #define SE_DEV_ALIAS_LEN 512
656 unsigned char se_dev_alias[SE_DEV_ALIAS_LEN]; 747 unsigned char se_dev_alias[SE_DEV_ALIAS_LEN];
657 /* Used for struct se_subsystem_dev->se_dev_udev_path[], must be less than PAGE_SIZE */ 748 /* Used for struct se_subsystem_dev->se_dev_udev_path[], must be less than PAGE_SIZE */
658 #define SE_UDEV_PATH_LEN 512 749 #define SE_UDEV_PATH_LEN 512
659 unsigned char se_dev_udev_path[SE_UDEV_PATH_LEN]; 750 unsigned char se_dev_udev_path[SE_UDEV_PATH_LEN];
660 u32 su_dev_flags; 751 u32 su_dev_flags;
661 struct se_hba *se_dev_hba; 752 struct se_hba *se_dev_hba;
662 struct se_device *se_dev_ptr; 753 struct se_device *se_dev_ptr;
663 struct se_dev_attrib se_dev_attrib; 754 struct se_dev_attrib se_dev_attrib;
664 /* T10 Asymmetric Logical Unit Assignment for Target Ports */ 755 /* T10 Asymmetric Logical Unit Assignment for Target Ports */
665 struct t10_alua t10_alua; 756 struct t10_alua t10_alua;
666 /* T10 Inquiry and VPD WWN Information */ 757 /* T10 Inquiry and VPD WWN Information */
667 struct t10_wwn t10_wwn; 758 struct t10_wwn t10_wwn;
668 /* T10 SPC-2 + SPC-3 Reservations */ 759 /* T10 SPC-2 + SPC-3 Reservations */
669 struct t10_reservation t10_pr; 760 struct t10_reservation t10_pr;
670 spinlock_t se_dev_lock; 761 spinlock_t se_dev_lock;
671 void *se_dev_su_ptr; 762 void *se_dev_su_ptr;
672 struct config_group se_dev_group; 763 struct config_group se_dev_group;
673 /* For T10 Reservations */ 764 /* For T10 Reservations */
674 struct config_group se_dev_pr_group; 765 struct config_group se_dev_pr_group;
675 /* For target_core_stat.c groups */ 766 /* For target_core_stat.c groups */
676 struct se_dev_stat_grps dev_stat_grps; 767 struct se_dev_stat_grps dev_stat_grps;
677 } ____cacheline_aligned; 768 } ____cacheline_aligned;
678 769
679 struct se_device { 770 struct se_device {
680 /* RELATIVE TARGET PORT IDENTIFER Counter */ 771 /* RELATIVE TARGET PORT IDENTIFER Counter */
681 u16 dev_rpti_counter; 772 u16 dev_rpti_counter;
682 /* Used for SAM Task Attribute ordering */ 773 /* Used for SAM Task Attribute ordering */
683 u32 dev_cur_ordered_id; 774 u32 dev_cur_ordered_id;
684 u32 dev_flags; 775 u32 dev_flags;
685 u32 dev_port_count; 776 u32 dev_port_count;
686 /* See transport_device_status_table */ 777 /* See transport_device_status_table */
687 u32 dev_status; 778 u32 dev_status;
688 u32 dev_tcq_window_closed; 779 u32 dev_tcq_window_closed;
689 /* Physical device queue depth */ 780 /* Physical device queue depth */
690 u32 queue_depth; 781 u32 queue_depth;
691 /* Used for SPC-2 reservations enforce of ISIDs */ 782 /* Used for SPC-2 reservations enforce of ISIDs */
692 u64 dev_res_bin_isid; 783 u64 dev_res_bin_isid;
693 t10_task_attr_index_t dev_task_attr_type; 784 t10_task_attr_index_t dev_task_attr_type;
694 /* Pointer to transport specific device structure */ 785 /* Pointer to transport specific device structure */
695 void *dev_ptr; 786 void *dev_ptr;
696 u32 dev_index; 787 u32 dev_index;
697 u64 creation_time; 788 u64 creation_time;
698 u32 num_resets; 789 u32 num_resets;
699 u64 num_cmds; 790 u64 num_cmds;
700 u64 read_bytes; 791 u64 read_bytes;
701 u64 write_bytes; 792 u64 write_bytes;
702 spinlock_t stats_lock; 793 spinlock_t stats_lock;
703 /* Active commands on this virtual SE device */ 794 /* Active commands on this virtual SE device */
704 atomic_t simple_cmds; 795 atomic_t simple_cmds;
705 atomic_t depth_left; 796 atomic_t depth_left;
706 atomic_t dev_ordered_id; 797 atomic_t dev_ordered_id;
707 atomic_t execute_tasks; 798 atomic_t execute_tasks;
708 atomic_t dev_ordered_sync; 799 atomic_t dev_ordered_sync;
709 atomic_t dev_qf_count; 800 atomic_t dev_qf_count;
710 struct se_obj dev_obj; 801 struct se_obj dev_obj;
711 struct se_obj dev_access_obj; 802 struct se_obj dev_access_obj;
712 struct se_obj dev_export_obj; 803 struct se_obj dev_export_obj;
713 struct se_queue_obj dev_queue_obj; 804 struct se_queue_obj dev_queue_obj;
714 spinlock_t delayed_cmd_lock; 805 spinlock_t delayed_cmd_lock;
715 spinlock_t execute_task_lock; 806 spinlock_t execute_task_lock;
716 spinlock_t dev_reservation_lock; 807 spinlock_t dev_reservation_lock;
717 spinlock_t dev_status_lock; 808 spinlock_t dev_status_lock;
718 spinlock_t se_port_lock; 809 spinlock_t se_port_lock;
719 spinlock_t se_tmr_lock; 810 spinlock_t se_tmr_lock;
720 spinlock_t qf_cmd_lock; 811 spinlock_t qf_cmd_lock;
721 /* Used for legacy SPC-2 reservationsa */ 812 /* Used for legacy SPC-2 reservationsa */
722 struct se_node_acl *dev_reserved_node_acl; 813 struct se_node_acl *dev_reserved_node_acl;
723 /* Used for ALUA Logical Unit Group membership */ 814 /* Used for ALUA Logical Unit Group membership */
724 struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem; 815 struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem;
725 /* Used for SPC-3 Persistent Reservations */ 816 /* Used for SPC-3 Persistent Reservations */
726 struct t10_pr_registration *dev_pr_res_holder; 817 struct t10_pr_registration *dev_pr_res_holder;
727 struct list_head dev_sep_list; 818 struct list_head dev_sep_list;
728 struct list_head dev_tmr_list; 819 struct list_head dev_tmr_list;
729 /* Pointer to descriptor for processing thread */ 820 /* Pointer to descriptor for processing thread */
730 struct task_struct *process_thread; 821 struct task_struct *process_thread;
731 struct work_struct qf_work_queue; 822 struct work_struct qf_work_queue;
732 struct list_head delayed_cmd_list; 823 struct list_head delayed_cmd_list;
733 struct list_head execute_task_list; 824 struct list_head execute_task_list;
734 struct list_head state_task_list; 825 struct list_head state_task_list;
735 struct list_head qf_cmd_list; 826 struct list_head qf_cmd_list;
736 /* Pointer to associated SE HBA */ 827 /* Pointer to associated SE HBA */
737 struct se_hba *se_hba; 828 struct se_hba *se_hba;
738 struct se_subsystem_dev *se_sub_dev; 829 struct se_subsystem_dev *se_sub_dev;
739 /* Pointer to template of function pointers for transport */ 830 /* Pointer to template of function pointers for transport */
740 struct se_subsystem_api *transport; 831 struct se_subsystem_api *transport;
741 /* Linked list for struct se_hba struct se_device list */ 832 /* Linked list for struct se_hba struct se_device list */
742 struct list_head dev_list; 833 struct list_head dev_list;
743 } ____cacheline_aligned; 834 } ____cacheline_aligned;
744 835
745 struct se_hba { 836 struct se_hba {
746 u16 hba_tpgt; 837 u16 hba_tpgt;
747 u32 hba_id; 838 u32 hba_id;
748 /* See hba_flags_table */ 839 /* See hba_flags_table */
749 u32 hba_flags; 840 u32 hba_flags;
750 /* Virtual iSCSI devices attached. */ 841 /* Virtual iSCSI devices attached. */
751 u32 dev_count; 842 u32 dev_count;
752 u32 hba_index; 843 u32 hba_index;
753 /* Pointer to transport specific host structure. */ 844 /* Pointer to transport specific host structure. */
754 void *hba_ptr; 845 void *hba_ptr;
755 /* Linked list for struct se_device */ 846 /* Linked list for struct se_device */
756 struct list_head hba_dev_list; 847 struct list_head hba_dev_list;
757 struct list_head hba_node; 848 struct list_head hba_node;
758 spinlock_t device_lock; 849 spinlock_t device_lock;
759 struct config_group hba_group; 850 struct config_group hba_group;
760 struct mutex hba_access_mutex; 851 struct mutex hba_access_mutex;
761 struct se_subsystem_api *transport; 852 struct se_subsystem_api *transport;
762 } ____cacheline_aligned; 853 } ____cacheline_aligned;
763 854
764 struct se_port_stat_grps { 855 struct se_port_stat_grps {
765 struct config_group stat_group; 856 struct config_group stat_group;
766 struct config_group scsi_port_group; 857 struct config_group scsi_port_group;
767 struct config_group scsi_tgt_port_group; 858 struct config_group scsi_tgt_port_group;
768 struct config_group scsi_transport_group; 859 struct config_group scsi_transport_group;
769 }; 860 };
770 861
771 struct se_lun { 862 struct se_lun {
772 /* See transport_lun_status_table */ 863 /* See transport_lun_status_table */
773 enum transport_lun_status_table lun_status; 864 enum transport_lun_status_table lun_status;
774 u32 lun_access; 865 u32 lun_access;
775 u32 lun_flags; 866 u32 lun_flags;
776 u32 unpacked_lun; 867 u32 unpacked_lun;
777 atomic_t lun_acl_count; 868 atomic_t lun_acl_count;
778 spinlock_t lun_acl_lock; 869 spinlock_t lun_acl_lock;
779 spinlock_t lun_cmd_lock; 870 spinlock_t lun_cmd_lock;
780 spinlock_t lun_sep_lock; 871 spinlock_t lun_sep_lock;
781 struct completion lun_shutdown_comp; 872 struct completion lun_shutdown_comp;
782 struct list_head lun_cmd_list; 873 struct list_head lun_cmd_list;
783 struct list_head lun_acl_list; 874 struct list_head lun_acl_list;
784 struct se_device *lun_se_dev; 875 struct se_device *lun_se_dev;
785 struct se_port *lun_sep; 876 struct se_port *lun_sep;
786 struct config_group lun_group; 877 struct config_group lun_group;
787 struct se_port_stat_grps port_stat_grps; 878 struct se_port_stat_grps port_stat_grps;
788 } ____cacheline_aligned; 879 } ____cacheline_aligned;
789 880
790 struct scsi_port_stats { 881 struct scsi_port_stats {
791 u64 cmd_pdus; 882 u64 cmd_pdus;
792 u64 tx_data_octets; 883 u64 tx_data_octets;
793 u64 rx_data_octets; 884 u64 rx_data_octets;
794 } ____cacheline_aligned; 885 } ____cacheline_aligned;
795 886
796 struct se_port { 887 struct se_port {
797 /* RELATIVE TARGET PORT IDENTIFER */ 888 /* RELATIVE TARGET PORT IDENTIFER */
798 u16 sep_rtpi; 889 u16 sep_rtpi;
799 int sep_tg_pt_secondary_stat; 890 int sep_tg_pt_secondary_stat;
800 int sep_tg_pt_secondary_write_md; 891 int sep_tg_pt_secondary_write_md;
801 u32 sep_index; 892 u32 sep_index;
802 struct scsi_port_stats sep_stats; 893 struct scsi_port_stats sep_stats;
803 /* Used for ALUA Target Port Groups membership */ 894 /* Used for ALUA Target Port Groups membership */
804 atomic_t sep_tg_pt_secondary_offline; 895 atomic_t sep_tg_pt_secondary_offline;
805 /* Used for PR ALL_TG_PT=1 */ 896 /* Used for PR ALL_TG_PT=1 */
806 atomic_t sep_tg_pt_ref_cnt; 897 atomic_t sep_tg_pt_ref_cnt;
807 spinlock_t sep_alua_lock; 898 spinlock_t sep_alua_lock;
808 struct mutex sep_tg_pt_md_mutex; 899 struct mutex sep_tg_pt_md_mutex;
809 struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem; 900 struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem;
810 struct se_lun *sep_lun; 901 struct se_lun *sep_lun;
811 struct se_portal_group *sep_tpg; 902 struct se_portal_group *sep_tpg;
812 struct list_head sep_alua_list; 903 struct list_head sep_alua_list;
813 struct list_head sep_list; 904 struct list_head sep_list;
814 } ____cacheline_aligned; 905 } ____cacheline_aligned;
815 906
816 struct se_tpg_np { 907 struct se_tpg_np {
817 struct se_portal_group *tpg_np_parent; 908 struct se_portal_group *tpg_np_parent;
818 struct config_group tpg_np_group; 909 struct config_group tpg_np_group;
819 } ____cacheline_aligned; 910 } ____cacheline_aligned;
820 911
821 struct se_portal_group { 912 struct se_portal_group {
822 /* Type of target portal group, see transport_tpg_type_table */ 913 /* Type of target portal group, see transport_tpg_type_table */
823 enum transport_tpg_type_table se_tpg_type; 914 enum transport_tpg_type_table se_tpg_type;
824 /* Number of ACLed Initiator Nodes for this TPG */ 915 /* Number of ACLed Initiator Nodes for this TPG */
825 u32 num_node_acls; 916 u32 num_node_acls;
826 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 917 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
827 atomic_t tpg_pr_ref_count; 918 atomic_t tpg_pr_ref_count;
828 /* Spinlock for adding/removing ACLed Nodes */ 919 /* Spinlock for adding/removing ACLed Nodes */
829 spinlock_t acl_node_lock; 920 spinlock_t acl_node_lock;
830 /* Spinlock for adding/removing sessions */ 921 /* Spinlock for adding/removing sessions */
831 spinlock_t session_lock; 922 spinlock_t session_lock;
832 spinlock_t tpg_lun_lock; 923 spinlock_t tpg_lun_lock;
833 /* Pointer to $FABRIC_MOD portal group */ 924 /* Pointer to $FABRIC_MOD portal group */
834 void *se_tpg_fabric_ptr; 925 void *se_tpg_fabric_ptr;
835 struct list_head se_tpg_node; 926 struct list_head se_tpg_node;
836 /* linked list for initiator ACL list */ 927 /* linked list for initiator ACL list */
837 struct list_head acl_node_list; 928 struct list_head acl_node_list;
838 struct se_lun *tpg_lun_list; 929 struct se_lun *tpg_lun_list;
839 struct se_lun tpg_virt_lun0; 930 struct se_lun tpg_virt_lun0;
840 /* List of TCM sessions associated wth this TPG */ 931 /* List of TCM sessions associated wth this TPG */
841 struct list_head tpg_sess_list; 932 struct list_head tpg_sess_list;
842 /* Pointer to $FABRIC_MOD dependent code */ 933 /* Pointer to $FABRIC_MOD dependent code */
843 struct target_core_fabric_ops *se_tpg_tfo; 934 struct target_core_fabric_ops *se_tpg_tfo;
844 struct se_wwn *se_tpg_wwn; 935 struct se_wwn *se_tpg_wwn;
845 struct config_group tpg_group; 936 struct config_group tpg_group;
846 struct config_group *tpg_default_groups[6]; 937 struct config_group *tpg_default_groups[6];
847 struct config_group tpg_lun_group; 938 struct config_group tpg_lun_group;
848 struct config_group tpg_np_group; 939 struct config_group tpg_np_group;
849 struct config_group tpg_acl_group; 940 struct config_group tpg_acl_group;
850 struct config_group tpg_attrib_group; 941 struct config_group tpg_attrib_group;
851 struct config_group tpg_param_group; 942 struct config_group tpg_param_group;
852 } ____cacheline_aligned; 943 } ____cacheline_aligned;
853 944
854 struct se_wwn { 945 struct se_wwn {
855 struct target_fabric_configfs *wwn_tf; 946 struct target_fabric_configfs *wwn_tf;
856 struct config_group wwn_group; 947 struct config_group wwn_group;
857 struct config_group *wwn_default_groups[2]; 948 struct config_group *wwn_default_groups[2];
858 struct config_group fabric_stat_group; 949 struct config_group fabric_stat_group;
859 } ____cacheline_aligned; 950 } ____cacheline_aligned;
860 951
861 #endif /* TARGET_CORE_BASE_H */ 952 #endif /* TARGET_CORE_BASE_H */
862 953
include/target/target_core_device.h
1 #ifndef TARGET_CORE_DEVICE_H File was deleted
2 #define TARGET_CORE_DEVICE_H
3
4
5 // external
6 extern int transport_lookup_cmd_lun(struct se_cmd *, u32);
7 extern int transport_lookup_tmr_lun(struct se_cmd *, u32);
8
9 #endif /* TARGET_CORE_DEVICE_H */
10 1 #ifndef TARGET_CORE_DEVICE_H
include/target/target_core_fabric.h
File was created 1 #ifndef TARGET_CORE_FABRIC_H
2 #define TARGET_CORE_FABRIC_H
3
4 struct target_core_fabric_ops {
5 struct configfs_subsystem *tf_subsys;
6 /*
7 * Optional to signal struct se_task->task_sg[] padding entries
8 * for scatterlist chaining using transport_do_task_sg_link(),
9 * disabled by default
10 */
11 bool task_sg_chaining;
12 char *(*get_fabric_name)(void);
13 u8 (*get_fabric_proto_ident)(struct se_portal_group *);
14 char *(*tpg_get_wwn)(struct se_portal_group *);
15 u16 (*tpg_get_tag)(struct se_portal_group *);
16 u32 (*tpg_get_default_depth)(struct se_portal_group *);
17 u32 (*tpg_get_pr_transport_id)(struct se_portal_group *,
18 struct se_node_acl *,
19 struct t10_pr_registration *, int *,
20 unsigned char *);
21 u32 (*tpg_get_pr_transport_id_len)(struct se_portal_group *,
22 struct se_node_acl *,
23 struct t10_pr_registration *, int *);
24 char *(*tpg_parse_pr_out_transport_id)(struct se_portal_group *,
25 const char *, u32 *, char **);
26 int (*tpg_check_demo_mode)(struct se_portal_group *);
27 int (*tpg_check_demo_mode_cache)(struct se_portal_group *);
28 int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *);
29 int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *);
30 /*
31 * Optionally used by fabrics to allow demo-mode login, but not
32 * expose any TPG LUNs, and return 'not connected' in standard
33 * inquiry response
34 */
35 int (*tpg_check_demo_mode_login_only)(struct se_portal_group *);
36 struct se_node_acl *(*tpg_alloc_fabric_acl)(
37 struct se_portal_group *);
38 void (*tpg_release_fabric_acl)(struct se_portal_group *,
39 struct se_node_acl *);
40 u32 (*tpg_get_inst_index)(struct se_portal_group *);
41 /*
42 * Optional function pointer for TCM to perform command map
43 * from TCM processing thread context, for those struct se_cmd
44 * initially allocated in interrupt context.
45 */
46 int (*new_cmd_map)(struct se_cmd *);
47 /*
48 * Optional to release struct se_cmd and fabric dependent allocated
49 * I/O descriptor in transport_cmd_check_stop().
50 *
51 * Returning 1 will signal a descriptor has been released.
52 * Returning 0 will signal a descriptor has not been released.
53 */
54 int (*check_stop_free)(struct se_cmd *);
55 /*
56 * Optional check for active I/O shutdown
57 */
58 int (*check_release_cmd)(struct se_cmd *);
59 void (*release_cmd)(struct se_cmd *);
60 /*
61 * Called with spin_lock_bh(struct se_portal_group->session_lock held.
62 */
63 int (*shutdown_session)(struct se_session *);
64 void (*close_session)(struct se_session *);
65 void (*stop_session)(struct se_session *, int, int);
66 void (*fall_back_to_erl0)(struct se_session *);
67 int (*sess_logged_in)(struct se_session *);
68 u32 (*sess_get_index)(struct se_session *);
69 /*
70 * Used only for SCSI fabrics that contain multi-value TransportIDs
71 * (like iSCSI). All other SCSI fabrics should set this to NULL.
72 */
73 u32 (*sess_get_initiator_sid)(struct se_session *,
74 unsigned char *, u32);
75 int (*write_pending)(struct se_cmd *);
76 int (*write_pending_status)(struct se_cmd *);
77 void (*set_default_node_attributes)(struct se_node_acl *);
78 u32 (*get_task_tag)(struct se_cmd *);
79 int (*get_cmd_state)(struct se_cmd *);
80 int (*queue_data_in)(struct se_cmd *);
81 int (*queue_status)(struct se_cmd *);
82 int (*queue_tm_rsp)(struct se_cmd *);
83 u16 (*set_fabric_sense_len)(struct se_cmd *, u32);
84 u16 (*get_fabric_sense_len)(void);
85 int (*is_state_remove)(struct se_cmd *);
86 /*
87 * fabric module calls for target_core_fabric_configfs.c
88 */
89 struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *,
90 struct config_group *, const char *);
91 void (*fabric_drop_wwn)(struct se_wwn *);
92 struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
93 struct config_group *, const char *);
94 void (*fabric_drop_tpg)(struct se_portal_group *);
95 int (*fabric_post_link)(struct se_portal_group *,
96 struct se_lun *);
97 void (*fabric_pre_unlink)(struct se_portal_group *,
98 struct se_lun *);
99 struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *,
100 struct config_group *, const char *);
101 void (*fabric_drop_np)(struct se_tpg_np *);
102 struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group *,
103 struct config_group *, const char *);
104 void (*fabric_drop_nodeacl)(struct se_node_acl *);
105 };
106
107 struct se_session *transport_init_session(void);
108 void __transport_register_session(struct se_portal_group *,
109 struct se_node_acl *, struct se_session *, void *);
110 void transport_register_session(struct se_portal_group *,
111 struct se_node_acl *, struct se_session *, void *);
112 void transport_free_session(struct se_session *);
113 void transport_deregister_session_configfs(struct se_session *);
114 void transport_deregister_session(struct se_session *);
115
116
117 void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *,
118 struct se_session *, u32, int, int, unsigned char *);
119 int transport_lookup_cmd_lun(struct se_cmd *, u32);
120 int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
121 int transport_handle_cdb_direct(struct se_cmd *);
122 int transport_generic_handle_cdb_map(struct se_cmd *);
123 int transport_generic_handle_data(struct se_cmd *);
124 int transport_generic_map_mem_to_cmd(struct se_cmd *cmd,
125 struct scatterlist *, u32, struct scatterlist *, u32);
126 void transport_do_task_sg_chain(struct se_cmd *);
127 int transport_generic_new_cmd(struct se_cmd *);
128
129 void transport_generic_process_write(struct se_cmd *);
130
131 void transport_generic_free_cmd(struct se_cmd *, int);
132
133 bool transport_wait_for_tasks(struct se_cmd *);
134 int transport_check_aborted_status(struct se_cmd *, int);
135 int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
136
137 void target_get_sess_cmd(struct se_session *, struct se_cmd *);
138 int target_put_sess_cmd(struct se_session *, struct se_cmd *);
139 void target_splice_sess_cmd_list(struct se_session *);
140 void target_wait_for_sess_cmds(struct se_session *, int);
141
142 int core_alua_check_nonop_delay(struct se_cmd *);
143
144 struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
145 void core_tmr_release_req(struct se_tmr_req *);
146 int transport_generic_handle_tmr(struct se_cmd *);
147 int transport_lookup_tmr_lun(struct se_cmd *, u32);
148
149 struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
150 unsigned char *);
151 void core_tpg_clear_object_luns(struct se_portal_group *);
152 struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *,
153 struct se_node_acl *, const char *, u32);
154 int core_tpg_del_initiator_node_acl(struct se_portal_group *,
155 struct se_node_acl *, int);
156 int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
157 unsigned char *, u32, int);
158 int core_tpg_register(struct target_core_fabric_ops *, struct se_wwn *,
159 struct se_portal_group *, void *, int);
160 int core_tpg_deregister(struct se_portal_group *);
161
162 /* SAS helpers */
163 u8 sas_get_fabric_proto_ident(struct se_portal_group *);
164 u32 sas_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
165 struct t10_pr_registration *, int *, unsigned char *);
166 u32 sas_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
167 struct t10_pr_registration *, int *);
168 char *sas_parse_pr_out_transport_id(struct se_portal_group *, const char *,
169 u32 *, char **);
170
171 /* FC helpers */
172 u8 fc_get_fabric_proto_ident(struct se_portal_group *);
173 u32 fc_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
174 struct t10_pr_registration *, int *, unsigned char *);
175 u32 fc_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
176 struct t10_pr_registration *, int *);
177 char *fc_parse_pr_out_transport_id(struct se_portal_group *, const char *,
178 u32 *, char **);
179
180 /* iSCSI helpers */
181 u8 iscsi_get_fabric_proto_ident(struct se_portal_group *);
182 u32 iscsi_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
183 struct t10_pr_registration *, int *, unsigned char *);
184 u32 iscsi_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
185 struct t10_pr_registration *, int *);
186 char *iscsi_parse_pr_out_transport_id(struct se_portal_group *, const char *,
187 u32 *, char **);
188
189 #endif /* TARGET_CORE_FABRICH */
190
include/target/target_core_fabric_lib.h
1 #ifndef TARGET_CORE_FABRIC_LIB_H File was deleted
2 #define TARGET_CORE_FABRIC_LIB_H
3
4 extern u8 sas_get_fabric_proto_ident(struct se_portal_group *);
5 extern u32 sas_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
6 struct t10_pr_registration *, int *, unsigned char *);
7 extern u32 sas_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
8 struct t10_pr_registration *, int *);
9 extern char *sas_parse_pr_out_transport_id(struct se_portal_group *,
10 const char *, u32 *, char **);
11
12 extern u8 fc_get_fabric_proto_ident(struct se_portal_group *);
13 extern u32 fc_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
14 struct t10_pr_registration *, int *, unsigned char *);
15 extern u32 fc_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
16 struct t10_pr_registration *, int *);
17 extern char *fc_parse_pr_out_transport_id(struct se_portal_group *,
18 const char *, u32 *, char **);
19
20 extern u8 iscsi_get_fabric_proto_ident(struct se_portal_group *);
21 extern u32 iscsi_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
22 struct t10_pr_registration *, int *, unsigned char *);
23 extern u32 iscsi_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
24 struct t10_pr_registration *, int *);
25 extern char *iscsi_parse_pr_out_transport_id(struct se_portal_group *,
26 const char *, u32 *, char **);
27
28 #endif /* TARGET_CORE_FABRIC_LIB_H */
29 1 #ifndef TARGET_CORE_FABRIC_LIB_H
include/target/target_core_fabric_ops.h
1 /* Defined in target_core_configfs.h */ File was deleted
2 struct target_fabric_configfs;
3
4 struct target_core_fabric_ops {
5 struct configfs_subsystem *tf_subsys;
6 /*
7 * Optional to signal struct se_task->task_sg[] padding entries
8 * for scatterlist chaining using transport_do_task_sg_link(),
9 * disabled by default
10 */
11 bool task_sg_chaining;
12 char *(*get_fabric_name)(void);
13 u8 (*get_fabric_proto_ident)(struct se_portal_group *);
14 char *(*tpg_get_wwn)(struct se_portal_group *);
15 u16 (*tpg_get_tag)(struct se_portal_group *);
16 u32 (*tpg_get_default_depth)(struct se_portal_group *);
17 u32 (*tpg_get_pr_transport_id)(struct se_portal_group *,
18 struct se_node_acl *,
19 struct t10_pr_registration *, int *,
20 unsigned char *);
21 u32 (*tpg_get_pr_transport_id_len)(struct se_portal_group *,
22 struct se_node_acl *,
23 struct t10_pr_registration *, int *);
24 char *(*tpg_parse_pr_out_transport_id)(struct se_portal_group *,
25 const char *, u32 *, char **);
26 int (*tpg_check_demo_mode)(struct se_portal_group *);
27 int (*tpg_check_demo_mode_cache)(struct se_portal_group *);
28 int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *);
29 int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *);
30 /*
31 * Optionally used by fabrics to allow demo-mode login, but not
32 * expose any TPG LUNs, and return 'not connected' in standard
33 * inquiry response
34 */
35 int (*tpg_check_demo_mode_login_only)(struct se_portal_group *);
36 struct se_node_acl *(*tpg_alloc_fabric_acl)(
37 struct se_portal_group *);
38 void (*tpg_release_fabric_acl)(struct se_portal_group *,
39 struct se_node_acl *);
40 u32 (*tpg_get_inst_index)(struct se_portal_group *);
41 /*
42 * Optional function pointer for TCM to perform command map
43 * from TCM processing thread context, for those struct se_cmd
44 * initially allocated in interrupt context.
45 */
46 int (*new_cmd_map)(struct se_cmd *);
47 /*
48 * Optional to release struct se_cmd and fabric dependent allocated
49 * I/O descriptor in transport_cmd_check_stop().
50 *
51 * Returning 1 will signal a descriptor has been released.
52 * Returning 0 will signal a descriptor has not been released.
53 */
54 int (*check_stop_free)(struct se_cmd *);
55 /*
56 * Optional check for active I/O shutdown
57 */
58 int (*check_release_cmd)(struct se_cmd *);
59 void (*release_cmd)(struct se_cmd *);
60 /*
61 * Called with spin_lock_bh(struct se_portal_group->session_lock held.
62 */
63 int (*shutdown_session)(struct se_session *);
64 void (*close_session)(struct se_session *);
65 void (*stop_session)(struct se_session *, int, int);
66 void (*fall_back_to_erl0)(struct se_session *);
67 int (*sess_logged_in)(struct se_session *);
68 u32 (*sess_get_index)(struct se_session *);
69 /*
70 * Used only for SCSI fabrics that contain multi-value TransportIDs
71 * (like iSCSI). All other SCSI fabrics should set this to NULL.
72 */
73 u32 (*sess_get_initiator_sid)(struct se_session *,
74 unsigned char *, u32);
75 int (*write_pending)(struct se_cmd *);
76 int (*write_pending_status)(struct se_cmd *);
77 void (*set_default_node_attributes)(struct se_node_acl *);
78 u32 (*get_task_tag)(struct se_cmd *);
79 int (*get_cmd_state)(struct se_cmd *);
80 int (*queue_data_in)(struct se_cmd *);
81 int (*queue_status)(struct se_cmd *);
82 int (*queue_tm_rsp)(struct se_cmd *);
83 u16 (*set_fabric_sense_len)(struct se_cmd *, u32);
84 u16 (*get_fabric_sense_len)(void);
85 int (*is_state_remove)(struct se_cmd *);
86 /*
87 * fabric module calls for target_core_fabric_configfs.c
88 */
89 struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *,
90 struct config_group *, const char *);
91 void (*fabric_drop_wwn)(struct se_wwn *);
92 struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
93 struct config_group *, const char *);
94 void (*fabric_drop_tpg)(struct se_portal_group *);
95 int (*fabric_post_link)(struct se_portal_group *,
96 struct se_lun *);
97 void (*fabric_pre_unlink)(struct se_portal_group *,
98 struct se_lun *);
99 struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *,
100 struct config_group *, const char *);
101 void (*fabric_drop_np)(struct se_tpg_np *);
102 struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group *,
103 struct config_group *, const char *);
104 void (*fabric_drop_nodeacl)(struct se_node_acl *);
105 };
106 1 /* Defined in target_core_configfs.h */
include/target/target_core_tmr.h
1 #ifndef TARGET_CORE_TMR_H File was deleted
2 #define TARGET_CORE_TMR_H
3
4 /* fabric independent task management function values */
5 enum tcm_tmreq_table {
6 TMR_ABORT_TASK = 1,
7 TMR_ABORT_TASK_SET = 2,
8 TMR_CLEAR_ACA = 3,
9 TMR_CLEAR_TASK_SET = 4,
10 TMR_LUN_RESET = 5,
11 TMR_TARGET_WARM_RESET = 6,
12 TMR_TARGET_COLD_RESET = 7,
13 TMR_FABRIC_TMR = 255,
14 };
15
16 /* fabric independent task management response values */
17 enum tcm_tmrsp_table {
18 TMR_FUNCTION_COMPLETE = 0,
19 TMR_TASK_DOES_NOT_EXIST = 1,
20 TMR_LUN_DOES_NOT_EXIST = 2,
21 TMR_TASK_STILL_ALLEGIANT = 3,
22 TMR_TASK_FAILOVER_NOT_SUPPORTED = 4,
23 TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED = 5,
24 TMR_FUNCTION_AUTHORIZATION_FAILED = 6,
25 TMR_FUNCTION_REJECTED = 255,
26 };
27
28 extern struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
29 extern void core_tmr_release_req(struct se_tmr_req *);
30
31 #endif /* TARGET_CORE_TMR_H */
32 1 #ifndef TARGET_CORE_TMR_H
include/target/target_core_tpg.h
1 #ifndef TARGET_CORE_TPG_H File was deleted
2 #define TARGET_CORE_TPG_H
3
4 extern struct se_node_acl *core_tpg_check_initiator_node_acl(
5 struct se_portal_group *,
6 unsigned char *);
7 extern void core_tpg_clear_object_luns(struct se_portal_group *);
8 extern struct se_node_acl *core_tpg_add_initiator_node_acl(
9 struct se_portal_group *,
10 struct se_node_acl *,
11 const char *, u32);
12 extern int core_tpg_del_initiator_node_acl(struct se_portal_group *,
13 struct se_node_acl *, int);
14 extern int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
15 unsigned char *, u32, int);
16 extern int core_tpg_register(struct target_core_fabric_ops *,
17 struct se_wwn *,
18 struct se_portal_group *, void *,
19 int);
20 extern int core_tpg_deregister(struct se_portal_group *);
21
22 #endif /* TARGET_CORE_TPG_H */
23 1 #ifndef TARGET_CORE_TPG_H
include/target/target_core_transport.h
1 #ifndef TARGET_CORE_TRANSPORT_H File was deleted
2 #define TARGET_CORE_TRANSPORT_H
3
4 #define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION
5
6 /* Attempts before moving from SHORT to LONG */
7 #define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD 3
8 #define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3 /* In milliseconds */
9 #define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG 10 /* In milliseconds */
10
11 #define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */
12
13 #define TRANSPORT_PLUGIN_PHBA_PDEV 1
14 #define TRANSPORT_PLUGIN_VHBA_PDEV 2
15 #define TRANSPORT_PLUGIN_VHBA_VDEV 3
16
17 /*
18 * struct se_subsystem_dev->su_dev_flags
19 */
20 #define SDF_FIRMWARE_VPD_UNIT_SERIAL 0x00000001
21 #define SDF_EMULATED_VPD_UNIT_SERIAL 0x00000002
22 #define SDF_USING_UDEV_PATH 0x00000004
23 #define SDF_USING_ALIAS 0x00000008
24
25 /*
26 * struct se_device->dev_flags
27 */
28 #define DF_READ_ONLY 0x00000001
29 #define DF_SPC2_RESERVATIONS 0x00000002
30 #define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004
31
32 /* struct se_dev_attrib sanity values */
33 /* Default max_unmap_lba_count */
34 #define DA_MAX_UNMAP_LBA_COUNT 0
35 /* Default max_unmap_block_desc_count */
36 #define DA_MAX_UNMAP_BLOCK_DESC_COUNT 0
37 /* Default unmap_granularity */
38 #define DA_UNMAP_GRANULARITY_DEFAULT 0
39 /* Default unmap_granularity_alignment */
40 #define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
41 /* Emulation for Direct Page Out */
42 #define DA_EMULATE_DPO 0
43 /* Emulation for Forced Unit Access WRITEs */
44 #define DA_EMULATE_FUA_WRITE 1
45 /* Emulation for Forced Unit Access READs */
46 #define DA_EMULATE_FUA_READ 0
47 /* Emulation for WriteCache and SYNCHRONIZE_CACHE */
48 #define DA_EMULATE_WRITE_CACHE 0
49 /* Emulation for UNIT ATTENTION Interlock Control */
50 #define DA_EMULATE_UA_INTLLCK_CTRL 0
51 /* Emulation for TASK_ABORTED status (TAS) by default */
52 #define DA_EMULATE_TAS 1
53 /* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */
54 #define DA_EMULATE_TPU 0
55 /*
56 * Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using
57 * block/blk-lib.c:blkdev_issue_discard()
58 */
59 #define DA_EMULATE_TPWS 0
60 /* No Emulation for PSCSI by default */
61 #define DA_EMULATE_RESERVATIONS 0
62 /* No Emulation for PSCSI by default */
63 #define DA_EMULATE_ALUA 0
64 /* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
65 #define DA_ENFORCE_PR_ISIDS 1
66 #define DA_STATUS_MAX_SECTORS_MIN 16
67 #define DA_STATUS_MAX_SECTORS_MAX 8192
68 /* By default don't report non-rotating (solid state) medium */
69 #define DA_IS_NONROT 0
70 /* Queue Algorithm Modifier default for restricted reordering in control mode page */
71 #define DA_EMULATE_REST_REORD 0
72
73 #define SE_MODE_PAGE_BUF 512
74
75 #define MOD_MAX_SECTORS(ms, bs) (ms % (PAGE_SIZE / bs))
76
77 struct se_subsystem_api;
78
79 extern int transport_subsystem_register(struct se_subsystem_api *);
80 extern void transport_subsystem_release(struct se_subsystem_api *);
81 extern struct se_session *transport_init_session(void);
82 extern void __transport_register_session(struct se_portal_group *,
83 struct se_node_acl *,
84 struct se_session *, void *);
85 extern void transport_register_session(struct se_portal_group *,
86 struct se_node_acl *,
87 struct se_session *, void *);
88 extern void transport_free_session(struct se_session *);
89 extern void transport_deregister_session_configfs(struct se_session *);
90 extern void transport_deregister_session(struct se_session *);
91 extern void transport_complete_sync_cache(struct se_cmd *, int);
92 extern void transport_complete_task(struct se_task *, int);
93
94 extern void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
95 extern int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
96 extern int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);
97 extern int transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
98
99 extern struct se_device *transport_add_device_to_core_hba(struct se_hba *,
100 struct se_subsystem_api *,
101 struct se_subsystem_dev *, u32,
102 void *, struct se_dev_limits *,
103 const char *, const char *);
104 extern void transport_init_se_cmd(struct se_cmd *,
105 struct target_core_fabric_ops *,
106 struct se_session *, u32, int, int,
107 unsigned char *);
108 void *transport_kmap_first_data_page(struct se_cmd *cmd);
109 void transport_kunmap_first_data_page(struct se_cmd *cmd);
110 extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
111 extern int transport_handle_cdb_direct(struct se_cmd *);
112 extern int transport_generic_handle_cdb_map(struct se_cmd *);
113 extern int transport_generic_handle_data(struct se_cmd *);
114 extern int transport_generic_handle_tmr(struct se_cmd *);
115 extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
116 struct scatterlist *, u32);
117 extern bool transport_wait_for_tasks(struct se_cmd *);
118 extern int transport_check_aborted_status(struct se_cmd *, int);
119 extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
120 extern void transport_generic_free_cmd(struct se_cmd *, int);
121 extern void target_get_sess_cmd(struct se_session *, struct se_cmd *);
122 extern int target_put_sess_cmd(struct se_session *, struct se_cmd *);
123 extern void target_splice_sess_cmd_list(struct se_session *);
124 extern void target_wait_for_sess_cmds(struct se_session *, int);
125 extern void transport_do_task_sg_chain(struct se_cmd *);
126 extern void transport_generic_process_write(struct se_cmd *);
127 extern int transport_generic_new_cmd(struct se_cmd *);
128 /* From target_core_alua.c */
129 extern int core_alua_check_nonop_delay(struct se_cmd *);
130 /* From target_core_cdb.c */
131 extern void target_get_task_cdb(struct se_task *task, unsigned char *cdb);
132
133 /*
134 * Each se_transport_task_t can have N number of possible struct se_task's
135 * for the storage transport(s) to possibly execute.
136 * Used primarily for splitting up CDBs that exceed the physical storage
137 * HBA's maximum sector count per task.
138 */
139 struct se_mem {
140 struct page *se_page;
141 u32 se_len;
142 u32 se_off;
143 struct list_head se_list;
144 } ____cacheline_aligned;
145
146 /*
147 * Each type of disk transport supported MUST have a template defined
148 * within its .h file.
149 */
150 struct se_subsystem_api {
151 /*
152 * The Name. :-)
153 */
154 char name[16];
155 /*
156 * Transport Type.
157 */
158 u8 transport_type;
159
160 unsigned int fua_write_emulated : 1;
161 unsigned int write_cache_emulated : 1;
162
163 /*
164 * struct module for struct se_hba references
165 */
166 struct module *owner;
167 /*
168 * Used for global se_subsystem_api list_head
169 */
170 struct list_head sub_api_list;
171 /*
172 * attach_hba():
173 */
174 int (*attach_hba)(struct se_hba *, u32);
175 /*
176 * detach_hba():
177 */
178 void (*detach_hba)(struct se_hba *);
179 /*
180 * pmode_hba(): Used for TCM/pSCSI subsystem plugin HBA ->
181 * Linux/SCSI struct Scsi_Host passthrough
182 */
183 int (*pmode_enable_hba)(struct se_hba *, unsigned long);
184 /*
185 * allocate_virtdevice():
186 */
187 void *(*allocate_virtdevice)(struct se_hba *, const char *);
188 /*
189 * create_virtdevice(): Only for Virtual HBAs
190 */
191 struct se_device *(*create_virtdevice)(struct se_hba *,
192 struct se_subsystem_dev *, void *);
193 /*
194 * free_device():
195 */
196 void (*free_device)(void *);
197
198 /*
199 * transport_complete():
200 *
201 * Use transport_generic_complete() for majority of DAS transport
202 * drivers. Provided out of convenience.
203 */
204 int (*transport_complete)(struct se_task *task);
205 struct se_task *(*alloc_task)(unsigned char *cdb);
206 /*
207 * do_task():
208 */
209 int (*do_task)(struct se_task *);
210 /*
211 * Used by virtual subsystem plugins IBLOCK and FILEIO to emulate
212 * UNMAP and WRITE_SAME_* w/ UNMAP=1 <-> Linux/Block Discard
213 */
214 int (*do_discard)(struct se_device *, sector_t, u32);
215 /*
216 * Used by virtual subsystem plugins IBLOCK and FILEIO to emulate
217 * SYNCHRONIZE_CACHE_* <-> Linux/Block blkdev_issue_flush()
218 */
219 void (*do_sync_cache)(struct se_task *);
220 /*
221 * free_task():
222 */
223 void (*free_task)(struct se_task *);
224 /*
225 * check_configfs_dev_params():
226 */
227 ssize_t (*check_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *);
228 /*
229 * set_configfs_dev_params():
230 */
231 ssize_t (*set_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *,
232 const char *, ssize_t);
233 /*
234 * show_configfs_dev_params():
235 */
236 ssize_t (*show_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *,
237 char *);
238 /*
239 * get_device_rev():
240 */
241 u32 (*get_device_rev)(struct se_device *);
242 /*
243 * get_device_type():
244 */
245 u32 (*get_device_type)(struct se_device *);
246 /*
247 * Get the sector_t from a subsystem backstore..
248 */
249 sector_t (*get_blocks)(struct se_device *);
250 /*
251 * get_sense_buffer():
252 */
253 unsigned char *(*get_sense_buffer)(struct se_task *);
254 } ____cacheline_aligned;
255
256 #endif /* TARGET_CORE_TRANSPORT_H */
257 1 #ifndef TARGET_CORE_TRANSPORT_H