Commit 82bc9d04f4281276b8941b09a9306e15d4dc53f6

Authored by Nicholas Bellinger
Committed by Greg Kroah-Hartman
1 parent e7b2033159

target: Drop arbitrary maximum I/O size limit

commit 046ba64285a4389ae5e9a7dfa253c6bff3d7c341 upstream.

This patch drops the arbitrary maximum I/O size limit in sbc_parse_cdb(),
which currently for fabric_max_sectors is hardcoded to 8192 (4 MB for 512
byte sector devices), and for hw_max_sectors is a backend driver dependent
value.

This limit is problematic because Linux initiators have only recently
started to honor block limits MAXIMUM TRANSFER LENGTH, and other non-Linux
based initiators (eg: MSFT Fibre Channel) can also generate I/Os larger
than 4 MB in size.

Currently when this happens, the following message will appear on the
target resulting in I/Os being returned with non recoverable status:

  SCSI OP 28h with too big sectors 16384 exceeds fabric_max_sectors: 8192

Instead, drop both [fabric,hw]_max_sector checks in sbc_parse_cdb(),
and convert the existing hw_max_sectors into a purely informational
attribute used to represent the granuality that backend driver and/or
subsystem code is splitting I/Os upon.

Also, update FILEIO with an explicit FD_MAX_BYTES check in fd_execute_rw()
to deal with the one special iovec limitiation case.

v2 changes:
  - Drop hw_max_sectors check in sbc_parse_cdb()

Reported-by: Lance Gropper <lance.gropper@qosserver.com>
Reported-by: Stefan Priebe <s.priebe@profihost.ag>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Martin K. Petersen <martin.petersen@oracle.com>
Cc: Roland Dreier <roland@purestorage.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

Showing 5 changed files with 16 additions and 25 deletions Inline Diff

drivers/target/target_core_device.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c) 2 * Filename: target_core_device.c (based on iscsi_target_device.c)
3 * 3 *
4 * This file contains the TCM Virtual Device and Disk Transport 4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions. 5 * agnostic related functions.
6 * 6 *
7 * (c) Copyright 2003-2013 Datera, Inc. 7 * (c) Copyright 2003-2013 Datera, Inc.
8 * 8 *
9 * Nicholas A. Bellinger <nab@kernel.org> 9 * Nicholas A. Bellinger <nab@kernel.org>
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or 13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version. 14 * (at your option) any later version.
15 * 15 *
16 * This program is distributed in the hope that it will be useful, 16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details. 19 * GNU General Public License for more details.
20 * 20 *
21 * You should have received a copy of the GNU General Public License 21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software 22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 * 24 *
25 ******************************************************************************/ 25 ******************************************************************************/
26 26
27 #include <linux/net.h> 27 #include <linux/net.h>
28 #include <linux/string.h> 28 #include <linux/string.h>
29 #include <linux/delay.h> 29 #include <linux/delay.h>
30 #include <linux/timer.h> 30 #include <linux/timer.h>
31 #include <linux/slab.h> 31 #include <linux/slab.h>
32 #include <linux/spinlock.h> 32 #include <linux/spinlock.h>
33 #include <linux/kthread.h> 33 #include <linux/kthread.h>
34 #include <linux/in.h> 34 #include <linux/in.h>
35 #include <linux/export.h> 35 #include <linux/export.h>
36 #include <net/sock.h> 36 #include <net/sock.h>
37 #include <net/tcp.h> 37 #include <net/tcp.h>
38 #include <scsi/scsi.h> 38 #include <scsi/scsi.h>
39 #include <scsi/scsi_device.h> 39 #include <scsi/scsi_device.h>
40 40
41 #include <target/target_core_base.h> 41 #include <target/target_core_base.h>
42 #include <target/target_core_backend.h> 42 #include <target/target_core_backend.h>
43 #include <target/target_core_fabric.h> 43 #include <target/target_core_fabric.h>
44 44
45 #include "target_core_internal.h" 45 #include "target_core_internal.h"
46 #include "target_core_alua.h" 46 #include "target_core_alua.h"
47 #include "target_core_pr.h" 47 #include "target_core_pr.h"
48 #include "target_core_ua.h" 48 #include "target_core_ua.h"
49 49
50 DEFINE_MUTEX(g_device_mutex); 50 DEFINE_MUTEX(g_device_mutex);
51 LIST_HEAD(g_device_list); 51 LIST_HEAD(g_device_list);
52 52
53 static struct se_hba *lun0_hba; 53 static struct se_hba *lun0_hba;
54 /* not static, needed by tpg.c */ 54 /* not static, needed by tpg.c */
55 struct se_device *g_lun0_dev; 55 struct se_device *g_lun0_dev;
56 56
57 sense_reason_t 57 sense_reason_t
58 transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) 58 transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
59 { 59 {
60 struct se_lun *se_lun = NULL; 60 struct se_lun *se_lun = NULL;
61 struct se_session *se_sess = se_cmd->se_sess; 61 struct se_session *se_sess = se_cmd->se_sess;
62 struct se_device *dev; 62 struct se_device *dev;
63 unsigned long flags; 63 unsigned long flags;
64 64
65 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) 65 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
66 return TCM_NON_EXISTENT_LUN; 66 return TCM_NON_EXISTENT_LUN;
67 67
68 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); 68 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
69 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun]; 69 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
70 if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 70 if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
71 struct se_dev_entry *deve = se_cmd->se_deve; 71 struct se_dev_entry *deve = se_cmd->se_deve;
72 72
73 deve->total_cmds++; 73 deve->total_cmds++;
74 74
75 if ((se_cmd->data_direction == DMA_TO_DEVICE) && 75 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
76 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { 76 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
77 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" 77 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
78 " Access for 0x%08x\n", 78 " Access for 0x%08x\n",
79 se_cmd->se_tfo->get_fabric_name(), 79 se_cmd->se_tfo->get_fabric_name(),
80 unpacked_lun); 80 unpacked_lun);
81 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 81 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
82 return TCM_WRITE_PROTECTED; 82 return TCM_WRITE_PROTECTED;
83 } 83 }
84 84
85 if (se_cmd->data_direction == DMA_TO_DEVICE) 85 if (se_cmd->data_direction == DMA_TO_DEVICE)
86 deve->write_bytes += se_cmd->data_length; 86 deve->write_bytes += se_cmd->data_length;
87 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 87 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
88 deve->read_bytes += se_cmd->data_length; 88 deve->read_bytes += se_cmd->data_length;
89 89
90 se_lun = deve->se_lun; 90 se_lun = deve->se_lun;
91 se_cmd->se_lun = deve->se_lun; 91 se_cmd->se_lun = deve->se_lun;
92 se_cmd->pr_res_key = deve->pr_res_key; 92 se_cmd->pr_res_key = deve->pr_res_key;
93 se_cmd->orig_fe_lun = unpacked_lun; 93 se_cmd->orig_fe_lun = unpacked_lun;
94 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 94 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
95 95
96 percpu_ref_get(&se_lun->lun_ref); 96 percpu_ref_get(&se_lun->lun_ref);
97 se_cmd->lun_ref_active = true; 97 se_cmd->lun_ref_active = true;
98 } 98 }
99 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 99 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
100 100
101 if (!se_lun) { 101 if (!se_lun) {
102 /* 102 /*
103 * Use the se_portal_group->tpg_virt_lun0 to allow for 103 * Use the se_portal_group->tpg_virt_lun0 to allow for
104 * REPORT_LUNS, et al to be returned when no active 104 * REPORT_LUNS, et al to be returned when no active
105 * MappedLUN=0 exists for this Initiator Port. 105 * MappedLUN=0 exists for this Initiator Port.
106 */ 106 */
107 if (unpacked_lun != 0) { 107 if (unpacked_lun != 0) {
108 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 108 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
109 " Access for 0x%08x\n", 109 " Access for 0x%08x\n",
110 se_cmd->se_tfo->get_fabric_name(), 110 se_cmd->se_tfo->get_fabric_name(),
111 unpacked_lun); 111 unpacked_lun);
112 return TCM_NON_EXISTENT_LUN; 112 return TCM_NON_EXISTENT_LUN;
113 } 113 }
114 /* 114 /*
115 * Force WRITE PROTECT for virtual LUN 0 115 * Force WRITE PROTECT for virtual LUN 0
116 */ 116 */
117 if ((se_cmd->data_direction != DMA_FROM_DEVICE) && 117 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
118 (se_cmd->data_direction != DMA_NONE)) 118 (se_cmd->data_direction != DMA_NONE))
119 return TCM_WRITE_PROTECTED; 119 return TCM_WRITE_PROTECTED;
120 120
121 se_lun = &se_sess->se_tpg->tpg_virt_lun0; 121 se_lun = &se_sess->se_tpg->tpg_virt_lun0;
122 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; 122 se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
123 se_cmd->orig_fe_lun = 0; 123 se_cmd->orig_fe_lun = 0;
124 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 124 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
125 125
126 percpu_ref_get(&se_lun->lun_ref); 126 percpu_ref_get(&se_lun->lun_ref);
127 se_cmd->lun_ref_active = true; 127 se_cmd->lun_ref_active = true;
128 } 128 }
129 129
130 /* Directly associate cmd with se_dev */ 130 /* Directly associate cmd with se_dev */
131 se_cmd->se_dev = se_lun->lun_se_dev; 131 se_cmd->se_dev = se_lun->lun_se_dev;
132 132
133 dev = se_lun->lun_se_dev; 133 dev = se_lun->lun_se_dev;
134 atomic_long_inc(&dev->num_cmds); 134 atomic_long_inc(&dev->num_cmds);
135 if (se_cmd->data_direction == DMA_TO_DEVICE) 135 if (se_cmd->data_direction == DMA_TO_DEVICE)
136 atomic_long_add(se_cmd->data_length, &dev->write_bytes); 136 atomic_long_add(se_cmd->data_length, &dev->write_bytes);
137 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 137 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
138 atomic_long_add(se_cmd->data_length, &dev->read_bytes); 138 atomic_long_add(se_cmd->data_length, &dev->read_bytes);
139 139
140 return 0; 140 return 0;
141 } 141 }
142 EXPORT_SYMBOL(transport_lookup_cmd_lun); 142 EXPORT_SYMBOL(transport_lookup_cmd_lun);
143 143
144 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun) 144 int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
145 { 145 {
146 struct se_dev_entry *deve; 146 struct se_dev_entry *deve;
147 struct se_lun *se_lun = NULL; 147 struct se_lun *se_lun = NULL;
148 struct se_session *se_sess = se_cmd->se_sess; 148 struct se_session *se_sess = se_cmd->se_sess;
149 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 149 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
150 unsigned long flags; 150 unsigned long flags;
151 151
152 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) 152 if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
153 return -ENODEV; 153 return -ENODEV;
154 154
155 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); 155 spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
156 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun]; 156 se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
157 deve = se_cmd->se_deve; 157 deve = se_cmd->se_deve;
158 158
159 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 159 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
160 se_tmr->tmr_lun = deve->se_lun; 160 se_tmr->tmr_lun = deve->se_lun;
161 se_cmd->se_lun = deve->se_lun; 161 se_cmd->se_lun = deve->se_lun;
162 se_lun = deve->se_lun; 162 se_lun = deve->se_lun;
163 se_cmd->pr_res_key = deve->pr_res_key; 163 se_cmd->pr_res_key = deve->pr_res_key;
164 se_cmd->orig_fe_lun = unpacked_lun; 164 se_cmd->orig_fe_lun = unpacked_lun;
165 } 165 }
166 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); 166 spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
167 167
168 if (!se_lun) { 168 if (!se_lun) {
169 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" 169 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
170 " Access for 0x%08x\n", 170 " Access for 0x%08x\n",
171 se_cmd->se_tfo->get_fabric_name(), 171 se_cmd->se_tfo->get_fabric_name(),
172 unpacked_lun); 172 unpacked_lun);
173 return -ENODEV; 173 return -ENODEV;
174 } 174 }
175 175
176 /* Directly associate cmd with se_dev */ 176 /* Directly associate cmd with se_dev */
177 se_cmd->se_dev = se_lun->lun_se_dev; 177 se_cmd->se_dev = se_lun->lun_se_dev;
178 se_tmr->tmr_dev = se_lun->lun_se_dev; 178 se_tmr->tmr_dev = se_lun->lun_se_dev;
179 179
180 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); 180 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
181 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); 181 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
182 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); 182 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
183 183
184 return 0; 184 return 0;
185 } 185 }
186 EXPORT_SYMBOL(transport_lookup_tmr_lun); 186 EXPORT_SYMBOL(transport_lookup_tmr_lun);
187 187
188 /* 188 /*
189 * This function is called from core_scsi3_emulate_pro_register_and_move() 189 * This function is called from core_scsi3_emulate_pro_register_and_move()
190 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count 190 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
191 * when a matching rtpi is found. 191 * when a matching rtpi is found.
192 */ 192 */
193 struct se_dev_entry *core_get_se_deve_from_rtpi( 193 struct se_dev_entry *core_get_se_deve_from_rtpi(
194 struct se_node_acl *nacl, 194 struct se_node_acl *nacl,
195 u16 rtpi) 195 u16 rtpi)
196 { 196 {
197 struct se_dev_entry *deve; 197 struct se_dev_entry *deve;
198 struct se_lun *lun; 198 struct se_lun *lun;
199 struct se_port *port; 199 struct se_port *port;
200 struct se_portal_group *tpg = nacl->se_tpg; 200 struct se_portal_group *tpg = nacl->se_tpg;
201 u32 i; 201 u32 i;
202 202
203 spin_lock_irq(&nacl->device_list_lock); 203 spin_lock_irq(&nacl->device_list_lock);
204 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 204 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
205 deve = nacl->device_list[i]; 205 deve = nacl->device_list[i];
206 206
207 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 207 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
208 continue; 208 continue;
209 209
210 lun = deve->se_lun; 210 lun = deve->se_lun;
211 if (!lun) { 211 if (!lun) {
212 pr_err("%s device entries device pointer is" 212 pr_err("%s device entries device pointer is"
213 " NULL, but Initiator has access.\n", 213 " NULL, but Initiator has access.\n",
214 tpg->se_tpg_tfo->get_fabric_name()); 214 tpg->se_tpg_tfo->get_fabric_name());
215 continue; 215 continue;
216 } 216 }
217 port = lun->lun_sep; 217 port = lun->lun_sep;
218 if (!port) { 218 if (!port) {
219 pr_err("%s device entries device pointer is" 219 pr_err("%s device entries device pointer is"
220 " NULL, but Initiator has access.\n", 220 " NULL, but Initiator has access.\n",
221 tpg->se_tpg_tfo->get_fabric_name()); 221 tpg->se_tpg_tfo->get_fabric_name());
222 continue; 222 continue;
223 } 223 }
224 if (port->sep_rtpi != rtpi) 224 if (port->sep_rtpi != rtpi)
225 continue; 225 continue;
226 226
227 atomic_inc_mb(&deve->pr_ref_count); 227 atomic_inc_mb(&deve->pr_ref_count);
228 spin_unlock_irq(&nacl->device_list_lock); 228 spin_unlock_irq(&nacl->device_list_lock);
229 229
230 return deve; 230 return deve;
231 } 231 }
232 spin_unlock_irq(&nacl->device_list_lock); 232 spin_unlock_irq(&nacl->device_list_lock);
233 233
234 return NULL; 234 return NULL;
235 } 235 }
236 236
237 int core_free_device_list_for_node( 237 int core_free_device_list_for_node(
238 struct se_node_acl *nacl, 238 struct se_node_acl *nacl,
239 struct se_portal_group *tpg) 239 struct se_portal_group *tpg)
240 { 240 {
241 struct se_dev_entry *deve; 241 struct se_dev_entry *deve;
242 struct se_lun *lun; 242 struct se_lun *lun;
243 u32 i; 243 u32 i;
244 244
245 if (!nacl->device_list) 245 if (!nacl->device_list)
246 return 0; 246 return 0;
247 247
248 spin_lock_irq(&nacl->device_list_lock); 248 spin_lock_irq(&nacl->device_list_lock);
249 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 249 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
250 deve = nacl->device_list[i]; 250 deve = nacl->device_list[i];
251 251
252 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 252 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
253 continue; 253 continue;
254 254
255 if (!deve->se_lun) { 255 if (!deve->se_lun) {
256 pr_err("%s device entries device pointer is" 256 pr_err("%s device entries device pointer is"
257 " NULL, but Initiator has access.\n", 257 " NULL, but Initiator has access.\n",
258 tpg->se_tpg_tfo->get_fabric_name()); 258 tpg->se_tpg_tfo->get_fabric_name());
259 continue; 259 continue;
260 } 260 }
261 lun = deve->se_lun; 261 lun = deve->se_lun;
262 262
263 spin_unlock_irq(&nacl->device_list_lock); 263 spin_unlock_irq(&nacl->device_list_lock);
264 core_disable_device_list_for_node(lun, NULL, deve->mapped_lun, 264 core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
265 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg); 265 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
266 spin_lock_irq(&nacl->device_list_lock); 266 spin_lock_irq(&nacl->device_list_lock);
267 } 267 }
268 spin_unlock_irq(&nacl->device_list_lock); 268 spin_unlock_irq(&nacl->device_list_lock);
269 269
270 array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG); 270 array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG);
271 nacl->device_list = NULL; 271 nacl->device_list = NULL;
272 272
273 return 0; 273 return 0;
274 } 274 }
275 275
276 void core_update_device_list_access( 276 void core_update_device_list_access(
277 u32 mapped_lun, 277 u32 mapped_lun,
278 u32 lun_access, 278 u32 lun_access,
279 struct se_node_acl *nacl) 279 struct se_node_acl *nacl)
280 { 280 {
281 struct se_dev_entry *deve; 281 struct se_dev_entry *deve;
282 282
283 spin_lock_irq(&nacl->device_list_lock); 283 spin_lock_irq(&nacl->device_list_lock);
284 deve = nacl->device_list[mapped_lun]; 284 deve = nacl->device_list[mapped_lun];
285 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { 285 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
286 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; 286 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
287 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; 287 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
288 } else { 288 } else {
289 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; 289 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
290 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; 290 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
291 } 291 }
292 spin_unlock_irq(&nacl->device_list_lock); 292 spin_unlock_irq(&nacl->device_list_lock);
293 } 293 }
294 294
295 /* core_enable_device_list_for_node(): 295 /* core_enable_device_list_for_node():
296 * 296 *
297 * 297 *
298 */ 298 */
299 int core_enable_device_list_for_node( 299 int core_enable_device_list_for_node(
300 struct se_lun *lun, 300 struct se_lun *lun,
301 struct se_lun_acl *lun_acl, 301 struct se_lun_acl *lun_acl,
302 u32 mapped_lun, 302 u32 mapped_lun,
303 u32 lun_access, 303 u32 lun_access,
304 struct se_node_acl *nacl, 304 struct se_node_acl *nacl,
305 struct se_portal_group *tpg) 305 struct se_portal_group *tpg)
306 { 306 {
307 struct se_port *port = lun->lun_sep; 307 struct se_port *port = lun->lun_sep;
308 struct se_dev_entry *deve; 308 struct se_dev_entry *deve;
309 309
310 spin_lock_irq(&nacl->device_list_lock); 310 spin_lock_irq(&nacl->device_list_lock);
311 311
312 deve = nacl->device_list[mapped_lun]; 312 deve = nacl->device_list[mapped_lun];
313 313
314 /* 314 /*
315 * Check if the call is handling demo mode -> explicit LUN ACL 315 * Check if the call is handling demo mode -> explicit LUN ACL
316 * transition. This transition must be for the same struct se_lun 316 * transition. This transition must be for the same struct se_lun
317 * + mapped_lun that was setup in demo mode.. 317 * + mapped_lun that was setup in demo mode..
318 */ 318 */
319 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { 319 if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
320 if (deve->se_lun_acl != NULL) { 320 if (deve->se_lun_acl != NULL) {
321 pr_err("struct se_dev_entry->se_lun_acl" 321 pr_err("struct se_dev_entry->se_lun_acl"
322 " already set for demo mode -> explicit" 322 " already set for demo mode -> explicit"
323 " LUN ACL transition\n"); 323 " LUN ACL transition\n");
324 spin_unlock_irq(&nacl->device_list_lock); 324 spin_unlock_irq(&nacl->device_list_lock);
325 return -EINVAL; 325 return -EINVAL;
326 } 326 }
327 if (deve->se_lun != lun) { 327 if (deve->se_lun != lun) {
328 pr_err("struct se_dev_entry->se_lun does" 328 pr_err("struct se_dev_entry->se_lun does"
329 " match passed struct se_lun for demo mode" 329 " match passed struct se_lun for demo mode"
330 " -> explicit LUN ACL transition\n"); 330 " -> explicit LUN ACL transition\n");
331 spin_unlock_irq(&nacl->device_list_lock); 331 spin_unlock_irq(&nacl->device_list_lock);
332 return -EINVAL; 332 return -EINVAL;
333 } 333 }
334 deve->se_lun_acl = lun_acl; 334 deve->se_lun_acl = lun_acl;
335 335
336 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { 336 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
337 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; 337 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
338 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; 338 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
339 } else { 339 } else {
340 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; 340 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
341 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; 341 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
342 } 342 }
343 343
344 spin_unlock_irq(&nacl->device_list_lock); 344 spin_unlock_irq(&nacl->device_list_lock);
345 return 0; 345 return 0;
346 } 346 }
347 347
348 deve->se_lun = lun; 348 deve->se_lun = lun;
349 deve->se_lun_acl = lun_acl; 349 deve->se_lun_acl = lun_acl;
350 deve->mapped_lun = mapped_lun; 350 deve->mapped_lun = mapped_lun;
351 deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS; 351 deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
352 352
353 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { 353 if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
354 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; 354 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
355 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; 355 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
356 } else { 356 } else {
357 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; 357 deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
358 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; 358 deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
359 } 359 }
360 360
361 deve->creation_time = get_jiffies_64(); 361 deve->creation_time = get_jiffies_64();
362 deve->attach_count++; 362 deve->attach_count++;
363 spin_unlock_irq(&nacl->device_list_lock); 363 spin_unlock_irq(&nacl->device_list_lock);
364 364
365 spin_lock_bh(&port->sep_alua_lock); 365 spin_lock_bh(&port->sep_alua_lock);
366 list_add_tail(&deve->alua_port_list, &port->sep_alua_list); 366 list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
367 spin_unlock_bh(&port->sep_alua_lock); 367 spin_unlock_bh(&port->sep_alua_lock);
368 368
369 return 0; 369 return 0;
370 } 370 }
371 371
372 /* core_disable_device_list_for_node(): 372 /* core_disable_device_list_for_node():
373 * 373 *
374 * 374 *
375 */ 375 */
376 int core_disable_device_list_for_node( 376 int core_disable_device_list_for_node(
377 struct se_lun *lun, 377 struct se_lun *lun,
378 struct se_lun_acl *lun_acl, 378 struct se_lun_acl *lun_acl,
379 u32 mapped_lun, 379 u32 mapped_lun,
380 u32 lun_access, 380 u32 lun_access,
381 struct se_node_acl *nacl, 381 struct se_node_acl *nacl,
382 struct se_portal_group *tpg) 382 struct se_portal_group *tpg)
383 { 383 {
384 struct se_port *port = lun->lun_sep; 384 struct se_port *port = lun->lun_sep;
385 struct se_dev_entry *deve = nacl->device_list[mapped_lun]; 385 struct se_dev_entry *deve = nacl->device_list[mapped_lun];
386 386
387 /* 387 /*
388 * If the MappedLUN entry is being disabled, the entry in 388 * If the MappedLUN entry is being disabled, the entry in
389 * port->sep_alua_list must be removed now before clearing the 389 * port->sep_alua_list must be removed now before clearing the
390 * struct se_dev_entry pointers below as logic in 390 * struct se_dev_entry pointers below as logic in
391 * core_alua_do_transition_tg_pt() depends on these being present. 391 * core_alua_do_transition_tg_pt() depends on these being present.
392 * 392 *
393 * deve->se_lun_acl will be NULL for demo-mode created LUNs 393 * deve->se_lun_acl will be NULL for demo-mode created LUNs
394 * that have not been explicitly converted to MappedLUNs -> 394 * that have not been explicitly converted to MappedLUNs ->
395 * struct se_lun_acl, but we remove deve->alua_port_list from 395 * struct se_lun_acl, but we remove deve->alua_port_list from
396 * port->sep_alua_list. This also means that active UAs and 396 * port->sep_alua_list. This also means that active UAs and
397 * NodeACL context specific PR metadata for demo-mode 397 * NodeACL context specific PR metadata for demo-mode
398 * MappedLUN *deve will be released below.. 398 * MappedLUN *deve will be released below..
399 */ 399 */
400 spin_lock_bh(&port->sep_alua_lock); 400 spin_lock_bh(&port->sep_alua_lock);
401 list_del(&deve->alua_port_list); 401 list_del(&deve->alua_port_list);
402 spin_unlock_bh(&port->sep_alua_lock); 402 spin_unlock_bh(&port->sep_alua_lock);
403 /* 403 /*
404 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE 404 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
405 * PR operation to complete. 405 * PR operation to complete.
406 */ 406 */
407 while (atomic_read(&deve->pr_ref_count) != 0) 407 while (atomic_read(&deve->pr_ref_count) != 0)
408 cpu_relax(); 408 cpu_relax();
409 409
410 spin_lock_irq(&nacl->device_list_lock); 410 spin_lock_irq(&nacl->device_list_lock);
411 /* 411 /*
412 * Disable struct se_dev_entry LUN ACL mapping 412 * Disable struct se_dev_entry LUN ACL mapping
413 */ 413 */
414 core_scsi3_ua_release_all(deve); 414 core_scsi3_ua_release_all(deve);
415 deve->se_lun = NULL; 415 deve->se_lun = NULL;
416 deve->se_lun_acl = NULL; 416 deve->se_lun_acl = NULL;
417 deve->lun_flags = 0; 417 deve->lun_flags = 0;
418 deve->creation_time = 0; 418 deve->creation_time = 0;
419 deve->attach_count--; 419 deve->attach_count--;
420 spin_unlock_irq(&nacl->device_list_lock); 420 spin_unlock_irq(&nacl->device_list_lock);
421 421
422 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl); 422 core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
423 return 0; 423 return 0;
424 } 424 }
425 425
426 /* core_clear_lun_from_tpg(): 426 /* core_clear_lun_from_tpg():
427 * 427 *
428 * 428 *
429 */ 429 */
430 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) 430 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
431 { 431 {
432 struct se_node_acl *nacl; 432 struct se_node_acl *nacl;
433 struct se_dev_entry *deve; 433 struct se_dev_entry *deve;
434 u32 i; 434 u32 i;
435 435
436 spin_lock_irq(&tpg->acl_node_lock); 436 spin_lock_irq(&tpg->acl_node_lock);
437 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { 437 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
438 spin_unlock_irq(&tpg->acl_node_lock); 438 spin_unlock_irq(&tpg->acl_node_lock);
439 439
440 spin_lock_irq(&nacl->device_list_lock); 440 spin_lock_irq(&nacl->device_list_lock);
441 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 441 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
442 deve = nacl->device_list[i]; 442 deve = nacl->device_list[i];
443 if (lun != deve->se_lun) 443 if (lun != deve->se_lun)
444 continue; 444 continue;
445 spin_unlock_irq(&nacl->device_list_lock); 445 spin_unlock_irq(&nacl->device_list_lock);
446 446
447 core_disable_device_list_for_node(lun, NULL, 447 core_disable_device_list_for_node(lun, NULL,
448 deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, 448 deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
449 nacl, tpg); 449 nacl, tpg);
450 450
451 spin_lock_irq(&nacl->device_list_lock); 451 spin_lock_irq(&nacl->device_list_lock);
452 } 452 }
453 spin_unlock_irq(&nacl->device_list_lock); 453 spin_unlock_irq(&nacl->device_list_lock);
454 454
455 spin_lock_irq(&tpg->acl_node_lock); 455 spin_lock_irq(&tpg->acl_node_lock);
456 } 456 }
457 spin_unlock_irq(&tpg->acl_node_lock); 457 spin_unlock_irq(&tpg->acl_node_lock);
458 } 458 }
459 459
460 static struct se_port *core_alloc_port(struct se_device *dev) 460 static struct se_port *core_alloc_port(struct se_device *dev)
461 { 461 {
462 struct se_port *port, *port_tmp; 462 struct se_port *port, *port_tmp;
463 463
464 port = kzalloc(sizeof(struct se_port), GFP_KERNEL); 464 port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
465 if (!port) { 465 if (!port) {
466 pr_err("Unable to allocate struct se_port\n"); 466 pr_err("Unable to allocate struct se_port\n");
467 return ERR_PTR(-ENOMEM); 467 return ERR_PTR(-ENOMEM);
468 } 468 }
469 INIT_LIST_HEAD(&port->sep_alua_list); 469 INIT_LIST_HEAD(&port->sep_alua_list);
470 INIT_LIST_HEAD(&port->sep_list); 470 INIT_LIST_HEAD(&port->sep_list);
471 atomic_set(&port->sep_tg_pt_secondary_offline, 0); 471 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
472 spin_lock_init(&port->sep_alua_lock); 472 spin_lock_init(&port->sep_alua_lock);
473 mutex_init(&port->sep_tg_pt_md_mutex); 473 mutex_init(&port->sep_tg_pt_md_mutex);
474 474
475 spin_lock(&dev->se_port_lock); 475 spin_lock(&dev->se_port_lock);
476 if (dev->dev_port_count == 0x0000ffff) { 476 if (dev->dev_port_count == 0x0000ffff) {
477 pr_warn("Reached dev->dev_port_count ==" 477 pr_warn("Reached dev->dev_port_count =="
478 " 0x0000ffff\n"); 478 " 0x0000ffff\n");
479 spin_unlock(&dev->se_port_lock); 479 spin_unlock(&dev->se_port_lock);
480 return ERR_PTR(-ENOSPC); 480 return ERR_PTR(-ENOSPC);
481 } 481 }
482 again: 482 again:
483 /* 483 /*
484 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device 484 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
485 * Here is the table from spc4r17 section 7.7.3.8. 485 * Here is the table from spc4r17 section 7.7.3.8.
486 * 486 *
487 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field 487 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
488 * 488 *
489 * Code Description 489 * Code Description
490 * 0h Reserved 490 * 0h Reserved
491 * 1h Relative port 1, historically known as port A 491 * 1h Relative port 1, historically known as port A
492 * 2h Relative port 2, historically known as port B 492 * 2h Relative port 2, historically known as port B
493 * 3h to FFFFh Relative port 3 through 65 535 493 * 3h to FFFFh Relative port 3 through 65 535
494 */ 494 */
495 port->sep_rtpi = dev->dev_rpti_counter++; 495 port->sep_rtpi = dev->dev_rpti_counter++;
496 if (!port->sep_rtpi) 496 if (!port->sep_rtpi)
497 goto again; 497 goto again;
498 498
499 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { 499 list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
500 /* 500 /*
501 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique 501 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
502 * for 16-bit wrap.. 502 * for 16-bit wrap..
503 */ 503 */
504 if (port->sep_rtpi == port_tmp->sep_rtpi) 504 if (port->sep_rtpi == port_tmp->sep_rtpi)
505 goto again; 505 goto again;
506 } 506 }
507 spin_unlock(&dev->se_port_lock); 507 spin_unlock(&dev->se_port_lock);
508 508
509 return port; 509 return port;
510 } 510 }
511 511
512 static void core_export_port( 512 static void core_export_port(
513 struct se_device *dev, 513 struct se_device *dev,
514 struct se_portal_group *tpg, 514 struct se_portal_group *tpg,
515 struct se_port *port, 515 struct se_port *port,
516 struct se_lun *lun) 516 struct se_lun *lun)
517 { 517 {
518 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; 518 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
519 519
520 spin_lock(&dev->se_port_lock); 520 spin_lock(&dev->se_port_lock);
521 spin_lock(&lun->lun_sep_lock); 521 spin_lock(&lun->lun_sep_lock);
522 port->sep_tpg = tpg; 522 port->sep_tpg = tpg;
523 port->sep_lun = lun; 523 port->sep_lun = lun;
524 lun->lun_sep = port; 524 lun->lun_sep = port;
525 spin_unlock(&lun->lun_sep_lock); 525 spin_unlock(&lun->lun_sep_lock);
526 526
527 list_add_tail(&port->sep_list, &dev->dev_sep_list); 527 list_add_tail(&port->sep_list, &dev->dev_sep_list);
528 spin_unlock(&dev->se_port_lock); 528 spin_unlock(&dev->se_port_lock);
529 529
530 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV && 530 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
531 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { 531 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
532 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); 532 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
533 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { 533 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
534 pr_err("Unable to allocate t10_alua_tg_pt" 534 pr_err("Unable to allocate t10_alua_tg_pt"
535 "_gp_member_t\n"); 535 "_gp_member_t\n");
536 return; 536 return;
537 } 537 }
538 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 538 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
539 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 539 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
540 dev->t10_alua.default_tg_pt_gp); 540 dev->t10_alua.default_tg_pt_gp);
541 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 541 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
542 pr_debug("%s/%s: Adding to default ALUA Target Port" 542 pr_debug("%s/%s: Adding to default ALUA Target Port"
543 " Group: alua/default_tg_pt_gp\n", 543 " Group: alua/default_tg_pt_gp\n",
544 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name()); 544 dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
545 } 545 }
546 546
547 dev->dev_port_count++; 547 dev->dev_port_count++;
548 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */ 548 port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */
549 } 549 }
550 550
551 /* 551 /*
552 * Called with struct se_device->se_port_lock spinlock held. 552 * Called with struct se_device->se_port_lock spinlock held.
553 */ 553 */
554 static void core_release_port(struct se_device *dev, struct se_port *port) 554 static void core_release_port(struct se_device *dev, struct se_port *port)
555 __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock) 555 __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
556 { 556 {
557 /* 557 /*
558 * Wait for any port reference for PR ALL_TG_PT=1 operation 558 * Wait for any port reference for PR ALL_TG_PT=1 operation
559 * to complete in __core_scsi3_alloc_registration() 559 * to complete in __core_scsi3_alloc_registration()
560 */ 560 */
561 spin_unlock(&dev->se_port_lock); 561 spin_unlock(&dev->se_port_lock);
562 if (atomic_read(&port->sep_tg_pt_ref_cnt)) 562 if (atomic_read(&port->sep_tg_pt_ref_cnt))
563 cpu_relax(); 563 cpu_relax();
564 spin_lock(&dev->se_port_lock); 564 spin_lock(&dev->se_port_lock);
565 565
566 core_alua_free_tg_pt_gp_mem(port); 566 core_alua_free_tg_pt_gp_mem(port);
567 567
568 list_del(&port->sep_list); 568 list_del(&port->sep_list);
569 dev->dev_port_count--; 569 dev->dev_port_count--;
570 kfree(port); 570 kfree(port);
571 } 571 }
572 572
573 int core_dev_export( 573 int core_dev_export(
574 struct se_device *dev, 574 struct se_device *dev,
575 struct se_portal_group *tpg, 575 struct se_portal_group *tpg,
576 struct se_lun *lun) 576 struct se_lun *lun)
577 { 577 {
578 struct se_hba *hba = dev->se_hba; 578 struct se_hba *hba = dev->se_hba;
579 struct se_port *port; 579 struct se_port *port;
580 580
581 port = core_alloc_port(dev); 581 port = core_alloc_port(dev);
582 if (IS_ERR(port)) 582 if (IS_ERR(port))
583 return PTR_ERR(port); 583 return PTR_ERR(port);
584 584
585 lun->lun_se_dev = dev; 585 lun->lun_se_dev = dev;
586 586
587 spin_lock(&hba->device_lock); 587 spin_lock(&hba->device_lock);
588 dev->export_count++; 588 dev->export_count++;
589 spin_unlock(&hba->device_lock); 589 spin_unlock(&hba->device_lock);
590 590
591 core_export_port(dev, tpg, port, lun); 591 core_export_port(dev, tpg, port, lun);
592 return 0; 592 return 0;
593 } 593 }
594 594
595 void core_dev_unexport( 595 void core_dev_unexport(
596 struct se_device *dev, 596 struct se_device *dev,
597 struct se_portal_group *tpg, 597 struct se_portal_group *tpg,
598 struct se_lun *lun) 598 struct se_lun *lun)
599 { 599 {
600 struct se_hba *hba = dev->se_hba; 600 struct se_hba *hba = dev->se_hba;
601 struct se_port *port = lun->lun_sep; 601 struct se_port *port = lun->lun_sep;
602 602
603 spin_lock(&lun->lun_sep_lock); 603 spin_lock(&lun->lun_sep_lock);
604 if (lun->lun_se_dev == NULL) { 604 if (lun->lun_se_dev == NULL) {
605 spin_unlock(&lun->lun_sep_lock); 605 spin_unlock(&lun->lun_sep_lock);
606 return; 606 return;
607 } 607 }
608 spin_unlock(&lun->lun_sep_lock); 608 spin_unlock(&lun->lun_sep_lock);
609 609
610 spin_lock(&dev->se_port_lock); 610 spin_lock(&dev->se_port_lock);
611 core_release_port(dev, port); 611 core_release_port(dev, port);
612 spin_unlock(&dev->se_port_lock); 612 spin_unlock(&dev->se_port_lock);
613 613
614 spin_lock(&hba->device_lock); 614 spin_lock(&hba->device_lock);
615 dev->export_count--; 615 dev->export_count--;
616 spin_unlock(&hba->device_lock); 616 spin_unlock(&hba->device_lock);
617 617
618 lun->lun_sep = NULL; 618 lun->lun_sep = NULL;
619 lun->lun_se_dev = NULL; 619 lun->lun_se_dev = NULL;
620 } 620 }
621 621
622 static void se_release_vpd_for_dev(struct se_device *dev) 622 static void se_release_vpd_for_dev(struct se_device *dev)
623 { 623 {
624 struct t10_vpd *vpd, *vpd_tmp; 624 struct t10_vpd *vpd, *vpd_tmp;
625 625
626 spin_lock(&dev->t10_wwn.t10_vpd_lock); 626 spin_lock(&dev->t10_wwn.t10_vpd_lock);
627 list_for_each_entry_safe(vpd, vpd_tmp, 627 list_for_each_entry_safe(vpd, vpd_tmp,
628 &dev->t10_wwn.t10_vpd_list, vpd_list) { 628 &dev->t10_wwn.t10_vpd_list, vpd_list) {
629 list_del(&vpd->vpd_list); 629 list_del(&vpd->vpd_list);
630 kfree(vpd); 630 kfree(vpd);
631 } 631 }
632 spin_unlock(&dev->t10_wwn.t10_vpd_lock); 632 spin_unlock(&dev->t10_wwn.t10_vpd_lock);
633 } 633 }
634 634
635 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) 635 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
636 { 636 {
637 u32 aligned_max_sectors; 637 u32 aligned_max_sectors;
638 u32 alignment; 638 u32 alignment;
639 /* 639 /*
640 * Limit max_sectors to a PAGE_SIZE aligned value for modern 640 * Limit max_sectors to a PAGE_SIZE aligned value for modern
641 * transport_allocate_data_tasks() operation. 641 * transport_allocate_data_tasks() operation.
642 */ 642 */
643 alignment = max(1ul, PAGE_SIZE / block_size); 643 alignment = max(1ul, PAGE_SIZE / block_size);
644 aligned_max_sectors = rounddown(max_sectors, alignment); 644 aligned_max_sectors = rounddown(max_sectors, alignment);
645 645
646 if (max_sectors != aligned_max_sectors) 646 if (max_sectors != aligned_max_sectors)
647 pr_info("Rounding down aligned max_sectors from %u to %u\n", 647 pr_info("Rounding down aligned max_sectors from %u to %u\n",
648 max_sectors, aligned_max_sectors); 648 max_sectors, aligned_max_sectors);
649 649
650 return aligned_max_sectors; 650 return aligned_max_sectors;
651 } 651 }
652 652
653 int se_dev_set_max_unmap_lba_count( 653 int se_dev_set_max_unmap_lba_count(
654 struct se_device *dev, 654 struct se_device *dev,
655 u32 max_unmap_lba_count) 655 u32 max_unmap_lba_count)
656 { 656 {
657 dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count; 657 dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
658 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n", 658 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
659 dev, dev->dev_attrib.max_unmap_lba_count); 659 dev, dev->dev_attrib.max_unmap_lba_count);
660 return 0; 660 return 0;
661 } 661 }
662 662
663 int se_dev_set_max_unmap_block_desc_count( 663 int se_dev_set_max_unmap_block_desc_count(
664 struct se_device *dev, 664 struct se_device *dev,
665 u32 max_unmap_block_desc_count) 665 u32 max_unmap_block_desc_count)
666 { 666 {
667 dev->dev_attrib.max_unmap_block_desc_count = 667 dev->dev_attrib.max_unmap_block_desc_count =
668 max_unmap_block_desc_count; 668 max_unmap_block_desc_count;
669 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n", 669 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
670 dev, dev->dev_attrib.max_unmap_block_desc_count); 670 dev, dev->dev_attrib.max_unmap_block_desc_count);
671 return 0; 671 return 0;
672 } 672 }
673 673
674 int se_dev_set_unmap_granularity( 674 int se_dev_set_unmap_granularity(
675 struct se_device *dev, 675 struct se_device *dev,
676 u32 unmap_granularity) 676 u32 unmap_granularity)
677 { 677 {
678 dev->dev_attrib.unmap_granularity = unmap_granularity; 678 dev->dev_attrib.unmap_granularity = unmap_granularity;
679 pr_debug("dev[%p]: Set unmap_granularity: %u\n", 679 pr_debug("dev[%p]: Set unmap_granularity: %u\n",
680 dev, dev->dev_attrib.unmap_granularity); 680 dev, dev->dev_attrib.unmap_granularity);
681 return 0; 681 return 0;
682 } 682 }
683 683
684 int se_dev_set_unmap_granularity_alignment( 684 int se_dev_set_unmap_granularity_alignment(
685 struct se_device *dev, 685 struct se_device *dev,
686 u32 unmap_granularity_alignment) 686 u32 unmap_granularity_alignment)
687 { 687 {
688 dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment; 688 dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
689 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n", 689 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
690 dev, dev->dev_attrib.unmap_granularity_alignment); 690 dev, dev->dev_attrib.unmap_granularity_alignment);
691 return 0; 691 return 0;
692 } 692 }
693 693
694 int se_dev_set_max_write_same_len( 694 int se_dev_set_max_write_same_len(
695 struct se_device *dev, 695 struct se_device *dev,
696 u32 max_write_same_len) 696 u32 max_write_same_len)
697 { 697 {
698 dev->dev_attrib.max_write_same_len = max_write_same_len; 698 dev->dev_attrib.max_write_same_len = max_write_same_len;
699 pr_debug("dev[%p]: Set max_write_same_len: %u\n", 699 pr_debug("dev[%p]: Set max_write_same_len: %u\n",
700 dev, dev->dev_attrib.max_write_same_len); 700 dev, dev->dev_attrib.max_write_same_len);
701 return 0; 701 return 0;
702 } 702 }
703 703
704 static void dev_set_t10_wwn_model_alias(struct se_device *dev) 704 static void dev_set_t10_wwn_model_alias(struct se_device *dev)
705 { 705 {
706 const char *configname; 706 const char *configname;
707 707
708 configname = config_item_name(&dev->dev_group.cg_item); 708 configname = config_item_name(&dev->dev_group.cg_item);
709 if (strlen(configname) >= 16) { 709 if (strlen(configname) >= 16) {
710 pr_warn("dev[%p]: Backstore name '%s' is too long for " 710 pr_warn("dev[%p]: Backstore name '%s' is too long for "
711 "INQUIRY_MODEL, truncating to 16 bytes\n", dev, 711 "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
712 configname); 712 configname);
713 } 713 }
714 snprintf(&dev->t10_wwn.model[0], 16, "%s", configname); 714 snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
715 } 715 }
716 716
717 int se_dev_set_emulate_model_alias(struct se_device *dev, int flag) 717 int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
718 { 718 {
719 if (dev->export_count) { 719 if (dev->export_count) {
720 pr_err("dev[%p]: Unable to change model alias" 720 pr_err("dev[%p]: Unable to change model alias"
721 " while export_count is %d\n", 721 " while export_count is %d\n",
722 dev, dev->export_count); 722 dev, dev->export_count);
723 return -EINVAL; 723 return -EINVAL;
724 } 724 }
725 725
726 if (flag != 0 && flag != 1) { 726 if (flag != 0 && flag != 1) {
727 pr_err("Illegal value %d\n", flag); 727 pr_err("Illegal value %d\n", flag);
728 return -EINVAL; 728 return -EINVAL;
729 } 729 }
730 730
731 if (flag) { 731 if (flag) {
732 dev_set_t10_wwn_model_alias(dev); 732 dev_set_t10_wwn_model_alias(dev);
733 } else { 733 } else {
734 strncpy(&dev->t10_wwn.model[0], 734 strncpy(&dev->t10_wwn.model[0],
735 dev->transport->inquiry_prod, 16); 735 dev->transport->inquiry_prod, 16);
736 } 736 }
737 dev->dev_attrib.emulate_model_alias = flag; 737 dev->dev_attrib.emulate_model_alias = flag;
738 738
739 return 0; 739 return 0;
740 } 740 }
741 741
742 int se_dev_set_emulate_dpo(struct se_device *dev, int flag) 742 int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
743 { 743 {
744 if (flag != 0 && flag != 1) { 744 if (flag != 0 && flag != 1) {
745 pr_err("Illegal value %d\n", flag); 745 pr_err("Illegal value %d\n", flag);
746 return -EINVAL; 746 return -EINVAL;
747 } 747 }
748 748
749 if (flag) { 749 if (flag) {
750 pr_err("dpo_emulated not supported\n"); 750 pr_err("dpo_emulated not supported\n");
751 return -EINVAL; 751 return -EINVAL;
752 } 752 }
753 753
754 return 0; 754 return 0;
755 } 755 }
756 756
757 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) 757 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
758 { 758 {
759 if (flag != 0 && flag != 1) { 759 if (flag != 0 && flag != 1) {
760 pr_err("Illegal value %d\n", flag); 760 pr_err("Illegal value %d\n", flag);
761 return -EINVAL; 761 return -EINVAL;
762 } 762 }
763 763
764 if (flag && 764 if (flag &&
765 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 765 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
766 pr_err("emulate_fua_write not supported for pSCSI\n"); 766 pr_err("emulate_fua_write not supported for pSCSI\n");
767 return -EINVAL; 767 return -EINVAL;
768 } 768 }
769 dev->dev_attrib.emulate_fua_write = flag; 769 dev->dev_attrib.emulate_fua_write = flag;
770 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", 770 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
771 dev, dev->dev_attrib.emulate_fua_write); 771 dev, dev->dev_attrib.emulate_fua_write);
772 return 0; 772 return 0;
773 } 773 }
774 774
775 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) 775 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
776 { 776 {
777 if (flag != 0 && flag != 1) { 777 if (flag != 0 && flag != 1) {
778 pr_err("Illegal value %d\n", flag); 778 pr_err("Illegal value %d\n", flag);
779 return -EINVAL; 779 return -EINVAL;
780 } 780 }
781 781
782 if (flag) { 782 if (flag) {
783 pr_err("ua read emulated not supported\n"); 783 pr_err("ua read emulated not supported\n");
784 return -EINVAL; 784 return -EINVAL;
785 } 785 }
786 786
787 return 0; 787 return 0;
788 } 788 }
789 789
790 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) 790 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
791 { 791 {
792 if (flag != 0 && flag != 1) { 792 if (flag != 0 && flag != 1) {
793 pr_err("Illegal value %d\n", flag); 793 pr_err("Illegal value %d\n", flag);
794 return -EINVAL; 794 return -EINVAL;
795 } 795 }
796 if (flag && 796 if (flag &&
797 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 797 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
798 pr_err("emulate_write_cache not supported for pSCSI\n"); 798 pr_err("emulate_write_cache not supported for pSCSI\n");
799 return -EINVAL; 799 return -EINVAL;
800 } 800 }
801 if (flag && 801 if (flag &&
802 dev->transport->get_write_cache) { 802 dev->transport->get_write_cache) {
803 pr_err("emulate_write_cache not supported for this device\n"); 803 pr_err("emulate_write_cache not supported for this device\n");
804 return -EINVAL; 804 return -EINVAL;
805 } 805 }
806 806
807 dev->dev_attrib.emulate_write_cache = flag; 807 dev->dev_attrib.emulate_write_cache = flag;
808 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", 808 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
809 dev, dev->dev_attrib.emulate_write_cache); 809 dev, dev->dev_attrib.emulate_write_cache);
810 return 0; 810 return 0;
811 } 811 }
812 812
813 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) 813 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
814 { 814 {
815 if ((flag != 0) && (flag != 1) && (flag != 2)) { 815 if ((flag != 0) && (flag != 1) && (flag != 2)) {
816 pr_err("Illegal value %d\n", flag); 816 pr_err("Illegal value %d\n", flag);
817 return -EINVAL; 817 return -EINVAL;
818 } 818 }
819 819
820 if (dev->export_count) { 820 if (dev->export_count) {
821 pr_err("dev[%p]: Unable to change SE Device" 821 pr_err("dev[%p]: Unable to change SE Device"
822 " UA_INTRLCK_CTRL while export_count is %d\n", 822 " UA_INTRLCK_CTRL while export_count is %d\n",
823 dev, dev->export_count); 823 dev, dev->export_count);
824 return -EINVAL; 824 return -EINVAL;
825 } 825 }
826 dev->dev_attrib.emulate_ua_intlck_ctrl = flag; 826 dev->dev_attrib.emulate_ua_intlck_ctrl = flag;
827 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", 827 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
828 dev, dev->dev_attrib.emulate_ua_intlck_ctrl); 828 dev, dev->dev_attrib.emulate_ua_intlck_ctrl);
829 829
830 return 0; 830 return 0;
831 } 831 }
832 832
833 int se_dev_set_emulate_tas(struct se_device *dev, int flag) 833 int se_dev_set_emulate_tas(struct se_device *dev, int flag)
834 { 834 {
835 if ((flag != 0) && (flag != 1)) { 835 if ((flag != 0) && (flag != 1)) {
836 pr_err("Illegal value %d\n", flag); 836 pr_err("Illegal value %d\n", flag);
837 return -EINVAL; 837 return -EINVAL;
838 } 838 }
839 839
840 if (dev->export_count) { 840 if (dev->export_count) {
841 pr_err("dev[%p]: Unable to change SE Device TAS while" 841 pr_err("dev[%p]: Unable to change SE Device TAS while"
842 " export_count is %d\n", 842 " export_count is %d\n",
843 dev, dev->export_count); 843 dev, dev->export_count);
844 return -EINVAL; 844 return -EINVAL;
845 } 845 }
846 dev->dev_attrib.emulate_tas = flag; 846 dev->dev_attrib.emulate_tas = flag;
847 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n", 847 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
848 dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled"); 848 dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
849 849
850 return 0; 850 return 0;
851 } 851 }
852 852
853 int se_dev_set_emulate_tpu(struct se_device *dev, int flag) 853 int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
854 { 854 {
855 if ((flag != 0) && (flag != 1)) { 855 if ((flag != 0) && (flag != 1)) {
856 pr_err("Illegal value %d\n", flag); 856 pr_err("Illegal value %d\n", flag);
857 return -EINVAL; 857 return -EINVAL;
858 } 858 }
859 /* 859 /*
860 * We expect this value to be non-zero when generic Block Layer 860 * We expect this value to be non-zero when generic Block Layer
861 * Discard supported is detected iblock_create_virtdevice(). 861 * Discard supported is detected iblock_create_virtdevice().
862 */ 862 */
863 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) { 863 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
864 pr_err("Generic Block Discard not supported\n"); 864 pr_err("Generic Block Discard not supported\n");
865 return -ENOSYS; 865 return -ENOSYS;
866 } 866 }
867 867
868 dev->dev_attrib.emulate_tpu = flag; 868 dev->dev_attrib.emulate_tpu = flag;
869 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", 869 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
870 dev, flag); 870 dev, flag);
871 return 0; 871 return 0;
872 } 872 }
873 873
874 int se_dev_set_emulate_tpws(struct se_device *dev, int flag) 874 int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
875 { 875 {
876 if ((flag != 0) && (flag != 1)) { 876 if ((flag != 0) && (flag != 1)) {
877 pr_err("Illegal value %d\n", flag); 877 pr_err("Illegal value %d\n", flag);
878 return -EINVAL; 878 return -EINVAL;
879 } 879 }
880 /* 880 /*
881 * We expect this value to be non-zero when generic Block Layer 881 * We expect this value to be non-zero when generic Block Layer
882 * Discard supported is detected iblock_create_virtdevice(). 882 * Discard supported is detected iblock_create_virtdevice().
883 */ 883 */
884 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) { 884 if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
885 pr_err("Generic Block Discard not supported\n"); 885 pr_err("Generic Block Discard not supported\n");
886 return -ENOSYS; 886 return -ENOSYS;
887 } 887 }
888 888
889 dev->dev_attrib.emulate_tpws = flag; 889 dev->dev_attrib.emulate_tpws = flag;
890 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", 890 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
891 dev, flag); 891 dev, flag);
892 return 0; 892 return 0;
893 } 893 }
894 894
895 int se_dev_set_emulate_caw(struct se_device *dev, int flag) 895 int se_dev_set_emulate_caw(struct se_device *dev, int flag)
896 { 896 {
897 if (flag != 0 && flag != 1) { 897 if (flag != 0 && flag != 1) {
898 pr_err("Illegal value %d\n", flag); 898 pr_err("Illegal value %d\n", flag);
899 return -EINVAL; 899 return -EINVAL;
900 } 900 }
901 dev->dev_attrib.emulate_caw = flag; 901 dev->dev_attrib.emulate_caw = flag;
902 pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n", 902 pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n",
903 dev, flag); 903 dev, flag);
904 904
905 return 0; 905 return 0;
906 } 906 }
907 907
908 int se_dev_set_emulate_3pc(struct se_device *dev, int flag) 908 int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
909 { 909 {
910 if (flag != 0 && flag != 1) { 910 if (flag != 0 && flag != 1) {
911 pr_err("Illegal value %d\n", flag); 911 pr_err("Illegal value %d\n", flag);
912 return -EINVAL; 912 return -EINVAL;
913 } 913 }
914 dev->dev_attrib.emulate_3pc = flag; 914 dev->dev_attrib.emulate_3pc = flag;
915 pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n", 915 pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n",
916 dev, flag); 916 dev, flag);
917 917
918 return 0; 918 return 0;
919 } 919 }
920 920
921 int se_dev_set_pi_prot_type(struct se_device *dev, int flag) 921 int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
922 { 922 {
923 int rc, old_prot = dev->dev_attrib.pi_prot_type; 923 int rc, old_prot = dev->dev_attrib.pi_prot_type;
924 924
925 if (flag != 0 && flag != 1 && flag != 2 && flag != 3) { 925 if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
926 pr_err("Illegal value %d for pi_prot_type\n", flag); 926 pr_err("Illegal value %d for pi_prot_type\n", flag);
927 return -EINVAL; 927 return -EINVAL;
928 } 928 }
929 if (flag == 2) { 929 if (flag == 2) {
930 pr_err("DIF TYPE2 protection currently not supported\n"); 930 pr_err("DIF TYPE2 protection currently not supported\n");
931 return -ENOSYS; 931 return -ENOSYS;
932 } 932 }
933 if (dev->dev_attrib.hw_pi_prot_type) { 933 if (dev->dev_attrib.hw_pi_prot_type) {
934 pr_warn("DIF protection enabled on underlying hardware," 934 pr_warn("DIF protection enabled on underlying hardware,"
935 " ignoring\n"); 935 " ignoring\n");
936 return 0; 936 return 0;
937 } 937 }
938 if (!dev->transport->init_prot || !dev->transport->free_prot) { 938 if (!dev->transport->init_prot || !dev->transport->free_prot) {
939 /* 0 is only allowed value for non-supporting backends */ 939 /* 0 is only allowed value for non-supporting backends */
940 if (flag == 0) 940 if (flag == 0)
941 return 0; 941 return 0;
942 942
943 pr_err("DIF protection not supported by backend: %s\n", 943 pr_err("DIF protection not supported by backend: %s\n",
944 dev->transport->name); 944 dev->transport->name);
945 return -ENOSYS; 945 return -ENOSYS;
946 } 946 }
947 if (!(dev->dev_flags & DF_CONFIGURED)) { 947 if (!(dev->dev_flags & DF_CONFIGURED)) {
948 pr_err("DIF protection requires device to be configured\n"); 948 pr_err("DIF protection requires device to be configured\n");
949 return -ENODEV; 949 return -ENODEV;
950 } 950 }
951 if (dev->export_count) { 951 if (dev->export_count) {
952 pr_err("dev[%p]: Unable to change SE Device PROT type while" 952 pr_err("dev[%p]: Unable to change SE Device PROT type while"
953 " export_count is %d\n", dev, dev->export_count); 953 " export_count is %d\n", dev, dev->export_count);
954 return -EINVAL; 954 return -EINVAL;
955 } 955 }
956 956
957 dev->dev_attrib.pi_prot_type = flag; 957 dev->dev_attrib.pi_prot_type = flag;
958 958
959 if (flag && !old_prot) { 959 if (flag && !old_prot) {
960 rc = dev->transport->init_prot(dev); 960 rc = dev->transport->init_prot(dev);
961 if (rc) { 961 if (rc) {
962 dev->dev_attrib.pi_prot_type = old_prot; 962 dev->dev_attrib.pi_prot_type = old_prot;
963 return rc; 963 return rc;
964 } 964 }
965 965
966 } else if (!flag && old_prot) { 966 } else if (!flag && old_prot) {
967 dev->transport->free_prot(dev); 967 dev->transport->free_prot(dev);
968 } 968 }
969 pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag); 969 pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
970 970
971 return 0; 971 return 0;
972 } 972 }
973 973
974 int se_dev_set_pi_prot_format(struct se_device *dev, int flag) 974 int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
975 { 975 {
976 int rc; 976 int rc;
977 977
978 if (!flag) 978 if (!flag)
979 return 0; 979 return 0;
980 980
981 if (flag != 1) { 981 if (flag != 1) {
982 pr_err("Illegal value %d for pi_prot_format\n", flag); 982 pr_err("Illegal value %d for pi_prot_format\n", flag);
983 return -EINVAL; 983 return -EINVAL;
984 } 984 }
985 if (!dev->transport->format_prot) { 985 if (!dev->transport->format_prot) {
986 pr_err("DIF protection format not supported by backend %s\n", 986 pr_err("DIF protection format not supported by backend %s\n",
987 dev->transport->name); 987 dev->transport->name);
988 return -ENOSYS; 988 return -ENOSYS;
989 } 989 }
990 if (!(dev->dev_flags & DF_CONFIGURED)) { 990 if (!(dev->dev_flags & DF_CONFIGURED)) {
991 pr_err("DIF protection format requires device to be configured\n"); 991 pr_err("DIF protection format requires device to be configured\n");
992 return -ENODEV; 992 return -ENODEV;
993 } 993 }
994 if (dev->export_count) { 994 if (dev->export_count) {
995 pr_err("dev[%p]: Unable to format SE Device PROT type while" 995 pr_err("dev[%p]: Unable to format SE Device PROT type while"
996 " export_count is %d\n", dev, dev->export_count); 996 " export_count is %d\n", dev, dev->export_count);
997 return -EINVAL; 997 return -EINVAL;
998 } 998 }
999 999
1000 rc = dev->transport->format_prot(dev); 1000 rc = dev->transport->format_prot(dev);
1001 if (rc) 1001 if (rc)
1002 return rc; 1002 return rc;
1003 1003
1004 pr_debug("dev[%p]: SE Device Protection Format complete\n", dev); 1004 pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
1005 1005
1006 return 0; 1006 return 0;
1007 } 1007 }
1008 1008
1009 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) 1009 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1010 { 1010 {
1011 if ((flag != 0) && (flag != 1)) { 1011 if ((flag != 0) && (flag != 1)) {
1012 pr_err("Illegal value %d\n", flag); 1012 pr_err("Illegal value %d\n", flag);
1013 return -EINVAL; 1013 return -EINVAL;
1014 } 1014 }
1015 dev->dev_attrib.enforce_pr_isids = flag; 1015 dev->dev_attrib.enforce_pr_isids = flag;
1016 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, 1016 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
1017 (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); 1017 (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
1018 return 0; 1018 return 0;
1019 } 1019 }
1020 1020
1021 int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag) 1021 int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag)
1022 { 1022 {
1023 if ((flag != 0) && (flag != 1)) { 1023 if ((flag != 0) && (flag != 1)) {
1024 printk(KERN_ERR "Illegal value %d\n", flag); 1024 printk(KERN_ERR "Illegal value %d\n", flag);
1025 return -EINVAL; 1025 return -EINVAL;
1026 } 1026 }
1027 if (dev->export_count) { 1027 if (dev->export_count) {
1028 pr_err("dev[%p]: Unable to set force_pr_aptpl while" 1028 pr_err("dev[%p]: Unable to set force_pr_aptpl while"
1029 " export_count is %d\n", dev, dev->export_count); 1029 " export_count is %d\n", dev, dev->export_count);
1030 return -EINVAL; 1030 return -EINVAL;
1031 } 1031 }
1032 1032
1033 dev->dev_attrib.force_pr_aptpl = flag; 1033 dev->dev_attrib.force_pr_aptpl = flag;
1034 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag); 1034 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag);
1035 return 0; 1035 return 0;
1036 } 1036 }
1037 1037
1038 int se_dev_set_is_nonrot(struct se_device *dev, int flag) 1038 int se_dev_set_is_nonrot(struct se_device *dev, int flag)
1039 { 1039 {
1040 if ((flag != 0) && (flag != 1)) { 1040 if ((flag != 0) && (flag != 1)) {
1041 printk(KERN_ERR "Illegal value %d\n", flag); 1041 printk(KERN_ERR "Illegal value %d\n", flag);
1042 return -EINVAL; 1042 return -EINVAL;
1043 } 1043 }
1044 dev->dev_attrib.is_nonrot = flag; 1044 dev->dev_attrib.is_nonrot = flag;
1045 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n", 1045 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
1046 dev, flag); 1046 dev, flag);
1047 return 0; 1047 return 0;
1048 } 1048 }
1049 1049
1050 int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) 1050 int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
1051 { 1051 {
1052 if (flag != 0) { 1052 if (flag != 0) {
1053 printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted" 1053 printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
1054 " reordering not implemented\n", dev); 1054 " reordering not implemented\n", dev);
1055 return -ENOSYS; 1055 return -ENOSYS;
1056 } 1056 }
1057 dev->dev_attrib.emulate_rest_reord = flag; 1057 dev->dev_attrib.emulate_rest_reord = flag;
1058 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag); 1058 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
1059 return 0; 1059 return 0;
1060 } 1060 }
1061 1061
1062 /* 1062 /*
1063 * Note, this can only be called on unexported SE Device Object. 1063 * Note, this can only be called on unexported SE Device Object.
1064 */ 1064 */
1065 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) 1065 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1066 { 1066 {
1067 if (dev->export_count) { 1067 if (dev->export_count) {
1068 pr_err("dev[%p]: Unable to change SE Device TCQ while" 1068 pr_err("dev[%p]: Unable to change SE Device TCQ while"
1069 " export_count is %d\n", 1069 " export_count is %d\n",
1070 dev, dev->export_count); 1070 dev, dev->export_count);
1071 return -EINVAL; 1071 return -EINVAL;
1072 } 1072 }
1073 if (!queue_depth) { 1073 if (!queue_depth) {
1074 pr_err("dev[%p]: Illegal ZERO value for queue" 1074 pr_err("dev[%p]: Illegal ZERO value for queue"
1075 "_depth\n", dev); 1075 "_depth\n", dev);
1076 return -EINVAL; 1076 return -EINVAL;
1077 } 1077 }
1078 1078
1079 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1079 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1080 if (queue_depth > dev->dev_attrib.hw_queue_depth) { 1080 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
1081 pr_err("dev[%p]: Passed queue_depth: %u" 1081 pr_err("dev[%p]: Passed queue_depth: %u"
1082 " exceeds TCM/SE_Device TCQ: %u\n", 1082 " exceeds TCM/SE_Device TCQ: %u\n",
1083 dev, queue_depth, 1083 dev, queue_depth,
1084 dev->dev_attrib.hw_queue_depth); 1084 dev->dev_attrib.hw_queue_depth);
1085 return -EINVAL; 1085 return -EINVAL;
1086 } 1086 }
1087 } else { 1087 } else {
1088 if (queue_depth > dev->dev_attrib.queue_depth) { 1088 if (queue_depth > dev->dev_attrib.queue_depth) {
1089 if (queue_depth > dev->dev_attrib.hw_queue_depth) { 1089 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
1090 pr_err("dev[%p]: Passed queue_depth:" 1090 pr_err("dev[%p]: Passed queue_depth:"
1091 " %u exceeds TCM/SE_Device MAX" 1091 " %u exceeds TCM/SE_Device MAX"
1092 " TCQ: %u\n", dev, queue_depth, 1092 " TCQ: %u\n", dev, queue_depth,
1093 dev->dev_attrib.hw_queue_depth); 1093 dev->dev_attrib.hw_queue_depth);
1094 return -EINVAL; 1094 return -EINVAL;
1095 } 1095 }
1096 } 1096 }
1097 } 1097 }
1098 1098
1099 dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth; 1099 dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
1100 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", 1100 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1101 dev, queue_depth); 1101 dev, queue_depth);
1102 return 0; 1102 return 0;
1103 } 1103 }
1104 1104
1105 int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) 1105 int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1106 { 1106 {
1107 int block_size = dev->dev_attrib.block_size; 1107 int block_size = dev->dev_attrib.block_size;
1108 1108
1109 if (dev->export_count) { 1109 if (dev->export_count) {
1110 pr_err("dev[%p]: Unable to change SE Device" 1110 pr_err("dev[%p]: Unable to change SE Device"
1111 " fabric_max_sectors while export_count is %d\n", 1111 " fabric_max_sectors while export_count is %d\n",
1112 dev, dev->export_count); 1112 dev, dev->export_count);
1113 return -EINVAL; 1113 return -EINVAL;
1114 } 1114 }
1115 if (!fabric_max_sectors) { 1115 if (!fabric_max_sectors) {
1116 pr_err("dev[%p]: Illegal ZERO value for" 1116 pr_err("dev[%p]: Illegal ZERO value for"
1117 " fabric_max_sectors\n", dev); 1117 " fabric_max_sectors\n", dev);
1118 return -EINVAL; 1118 return -EINVAL;
1119 } 1119 }
1120 if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) { 1120 if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
1121 pr_err("dev[%p]: Passed fabric_max_sectors: %u less than" 1121 pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
1122 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors, 1122 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
1123 DA_STATUS_MAX_SECTORS_MIN); 1123 DA_STATUS_MAX_SECTORS_MIN);
1124 return -EINVAL; 1124 return -EINVAL;
1125 } 1125 }
1126 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1126 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1127 if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) { 1127 if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) {
1128 pr_err("dev[%p]: Passed fabric_max_sectors: %u" 1128 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1129 " greater than TCM/SE_Device max_sectors:" 1129 " greater than TCM/SE_Device max_sectors:"
1130 " %u\n", dev, fabric_max_sectors, 1130 " %u\n", dev, fabric_max_sectors,
1131 dev->dev_attrib.hw_max_sectors); 1131 dev->dev_attrib.hw_max_sectors);
1132 return -EINVAL; 1132 return -EINVAL;
1133 } 1133 }
1134 } else { 1134 } else {
1135 if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) { 1135 if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1136 pr_err("dev[%p]: Passed fabric_max_sectors: %u" 1136 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1137 " greater than DA_STATUS_MAX_SECTORS_MAX:" 1137 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1138 " %u\n", dev, fabric_max_sectors, 1138 " %u\n", dev, fabric_max_sectors,
1139 DA_STATUS_MAX_SECTORS_MAX); 1139 DA_STATUS_MAX_SECTORS_MAX);
1140 return -EINVAL; 1140 return -EINVAL;
1141 } 1141 }
1142 } 1142 }
1143 /* 1143 /*
1144 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() 1144 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1145 */ 1145 */
1146 if (!block_size) { 1146 if (!block_size) {
1147 block_size = 512; 1147 block_size = 512;
1148 pr_warn("Defaulting to 512 for zero block_size\n"); 1148 pr_warn("Defaulting to 512 for zero block_size\n");
1149 } 1149 }
1150 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors, 1150 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
1151 block_size); 1151 block_size);
1152 1152
1153 dev->dev_attrib.fabric_max_sectors = fabric_max_sectors; 1153 dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
1154 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", 1154 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1155 dev, fabric_max_sectors); 1155 dev, fabric_max_sectors);
1156 return 0; 1156 return 0;
1157 } 1157 }
1158 1158
1159 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) 1159 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1160 { 1160 {
1161 if (dev->export_count) { 1161 if (dev->export_count) {
1162 pr_err("dev[%p]: Unable to change SE Device" 1162 pr_err("dev[%p]: Unable to change SE Device"
1163 " optimal_sectors while export_count is %d\n", 1163 " optimal_sectors while export_count is %d\n",
1164 dev, dev->export_count); 1164 dev, dev->export_count);
1165 return -EINVAL; 1165 return -EINVAL;
1166 } 1166 }
1167 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1167 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1168 pr_err("dev[%p]: Passed optimal_sectors cannot be" 1168 pr_err("dev[%p]: Passed optimal_sectors cannot be"
1169 " changed for TCM/pSCSI\n", dev); 1169 " changed for TCM/pSCSI\n", dev);
1170 return -EINVAL; 1170 return -EINVAL;
1171 } 1171 }
1172 if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) { 1172 if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
1173 pr_err("dev[%p]: Passed optimal_sectors %u cannot be" 1173 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1174 " greater than fabric_max_sectors: %u\n", dev, 1174 " greater than hw_max_sectors: %u\n", dev,
1175 optimal_sectors, dev->dev_attrib.fabric_max_sectors); 1175 optimal_sectors, dev->dev_attrib.hw_max_sectors);
1176 return -EINVAL; 1176 return -EINVAL;
1177 } 1177 }
1178 1178
1179 dev->dev_attrib.optimal_sectors = optimal_sectors; 1179 dev->dev_attrib.optimal_sectors = optimal_sectors;
1180 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n", 1180 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1181 dev, optimal_sectors); 1181 dev, optimal_sectors);
1182 return 0; 1182 return 0;
1183 } 1183 }
1184 1184
1185 int se_dev_set_block_size(struct se_device *dev, u32 block_size) 1185 int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1186 { 1186 {
1187 if (dev->export_count) { 1187 if (dev->export_count) {
1188 pr_err("dev[%p]: Unable to change SE Device block_size" 1188 pr_err("dev[%p]: Unable to change SE Device block_size"
1189 " while export_count is %d\n", 1189 " while export_count is %d\n",
1190 dev, dev->export_count); 1190 dev, dev->export_count);
1191 return -EINVAL; 1191 return -EINVAL;
1192 } 1192 }
1193 1193
1194 if ((block_size != 512) && 1194 if ((block_size != 512) &&
1195 (block_size != 1024) && 1195 (block_size != 1024) &&
1196 (block_size != 2048) && 1196 (block_size != 2048) &&
1197 (block_size != 4096)) { 1197 (block_size != 4096)) {
1198 pr_err("dev[%p]: Illegal value for block_device: %u" 1198 pr_err("dev[%p]: Illegal value for block_device: %u"
1199 " for SE device, must be 512, 1024, 2048 or 4096\n", 1199 " for SE device, must be 512, 1024, 2048 or 4096\n",
1200 dev, block_size); 1200 dev, block_size);
1201 return -EINVAL; 1201 return -EINVAL;
1202 } 1202 }
1203 1203
1204 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1204 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1205 pr_err("dev[%p]: Not allowed to change block_size for" 1205 pr_err("dev[%p]: Not allowed to change block_size for"
1206 " Physical Device, use for Linux/SCSI to change" 1206 " Physical Device, use for Linux/SCSI to change"
1207 " block_size for underlying hardware\n", dev); 1207 " block_size for underlying hardware\n", dev);
1208 return -EINVAL; 1208 return -EINVAL;
1209 } 1209 }
1210 1210
1211 dev->dev_attrib.block_size = block_size; 1211 dev->dev_attrib.block_size = block_size;
1212 pr_debug("dev[%p]: SE Device block_size changed to %u\n", 1212 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1213 dev, block_size); 1213 dev, block_size);
1214 1214
1215 if (dev->dev_attrib.max_bytes_per_io) 1215 if (dev->dev_attrib.max_bytes_per_io)
1216 dev->dev_attrib.hw_max_sectors = 1216 dev->dev_attrib.hw_max_sectors =
1217 dev->dev_attrib.max_bytes_per_io / block_size; 1217 dev->dev_attrib.max_bytes_per_io / block_size;
1218 1218
1219 return 0; 1219 return 0;
1220 } 1220 }
1221 1221
1222 struct se_lun *core_dev_add_lun( 1222 struct se_lun *core_dev_add_lun(
1223 struct se_portal_group *tpg, 1223 struct se_portal_group *tpg,
1224 struct se_device *dev, 1224 struct se_device *dev,
1225 u32 unpacked_lun) 1225 u32 unpacked_lun)
1226 { 1226 {
1227 struct se_lun *lun; 1227 struct se_lun *lun;
1228 int rc; 1228 int rc;
1229 1229
1230 lun = core_tpg_alloc_lun(tpg, unpacked_lun); 1230 lun = core_tpg_alloc_lun(tpg, unpacked_lun);
1231 if (IS_ERR(lun)) 1231 if (IS_ERR(lun))
1232 return lun; 1232 return lun;
1233 1233
1234 rc = core_tpg_add_lun(tpg, lun, 1234 rc = core_tpg_add_lun(tpg, lun,
1235 TRANSPORT_LUNFLAGS_READ_WRITE, dev); 1235 TRANSPORT_LUNFLAGS_READ_WRITE, dev);
1236 if (rc < 0) 1236 if (rc < 0)
1237 return ERR_PTR(rc); 1237 return ERR_PTR(rc);
1238 1238
1239 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" 1239 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1240 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 1240 " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1241 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 1241 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1242 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id); 1242 tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
1243 /* 1243 /*
1244 * Update LUN maps for dynamically added initiators when 1244 * Update LUN maps for dynamically added initiators when
1245 * generate_node_acl is enabled. 1245 * generate_node_acl is enabled.
1246 */ 1246 */
1247 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { 1247 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
1248 struct se_node_acl *acl; 1248 struct se_node_acl *acl;
1249 spin_lock_irq(&tpg->acl_node_lock); 1249 spin_lock_irq(&tpg->acl_node_lock);
1250 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { 1250 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
1251 if (acl->dynamic_node_acl && 1251 if (acl->dynamic_node_acl &&
1252 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || 1252 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
1253 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { 1253 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
1254 spin_unlock_irq(&tpg->acl_node_lock); 1254 spin_unlock_irq(&tpg->acl_node_lock);
1255 core_tpg_add_node_to_devs(acl, tpg); 1255 core_tpg_add_node_to_devs(acl, tpg);
1256 spin_lock_irq(&tpg->acl_node_lock); 1256 spin_lock_irq(&tpg->acl_node_lock);
1257 } 1257 }
1258 } 1258 }
1259 spin_unlock_irq(&tpg->acl_node_lock); 1259 spin_unlock_irq(&tpg->acl_node_lock);
1260 } 1260 }
1261 1261
1262 return lun; 1262 return lun;
1263 } 1263 }
1264 1264
1265 /* core_dev_del_lun(): 1265 /* core_dev_del_lun():
1266 * 1266 *
1267 * 1267 *
1268 */ 1268 */
1269 void core_dev_del_lun( 1269 void core_dev_del_lun(
1270 struct se_portal_group *tpg, 1270 struct se_portal_group *tpg,
1271 struct se_lun *lun) 1271 struct se_lun *lun)
1272 { 1272 {
1273 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivating %s Logical Unit from" 1273 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivating %s Logical Unit from"
1274 " device object\n", tpg->se_tpg_tfo->get_fabric_name(), 1274 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1275 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 1275 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1276 tpg->se_tpg_tfo->get_fabric_name()); 1276 tpg->se_tpg_tfo->get_fabric_name());
1277 1277
1278 core_tpg_remove_lun(tpg, lun); 1278 core_tpg_remove_lun(tpg, lun);
1279 } 1279 }
1280 1280
1281 struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun) 1281 struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
1282 { 1282 {
1283 struct se_lun *lun; 1283 struct se_lun *lun;
1284 1284
1285 spin_lock(&tpg->tpg_lun_lock); 1285 spin_lock(&tpg->tpg_lun_lock);
1286 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 1286 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1287 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS" 1287 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1288 "_PER_TPG-1: %u for Target Portal Group: %hu\n", 1288 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1289 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1289 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1290 TRANSPORT_MAX_LUNS_PER_TPG-1, 1290 TRANSPORT_MAX_LUNS_PER_TPG-1,
1291 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1291 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1292 spin_unlock(&tpg->tpg_lun_lock); 1292 spin_unlock(&tpg->tpg_lun_lock);
1293 return NULL; 1293 return NULL;
1294 } 1294 }
1295 lun = tpg->tpg_lun_list[unpacked_lun]; 1295 lun = tpg->tpg_lun_list[unpacked_lun];
1296 1296
1297 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { 1297 if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
1298 pr_err("%s Logical Unit Number: %u is not free on" 1298 pr_err("%s Logical Unit Number: %u is not free on"
1299 " Target Portal Group: %hu, ignoring request.\n", 1299 " Target Portal Group: %hu, ignoring request.\n",
1300 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1300 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1301 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1301 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1302 spin_unlock(&tpg->tpg_lun_lock); 1302 spin_unlock(&tpg->tpg_lun_lock);
1303 return NULL; 1303 return NULL;
1304 } 1304 }
1305 spin_unlock(&tpg->tpg_lun_lock); 1305 spin_unlock(&tpg->tpg_lun_lock);
1306 1306
1307 return lun; 1307 return lun;
1308 } 1308 }
1309 1309
1310 /* core_dev_get_lun(): 1310 /* core_dev_get_lun():
1311 * 1311 *
1312 * 1312 *
1313 */ 1313 */
1314 static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun) 1314 static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
1315 { 1315 {
1316 struct se_lun *lun; 1316 struct se_lun *lun;
1317 1317
1318 spin_lock(&tpg->tpg_lun_lock); 1318 spin_lock(&tpg->tpg_lun_lock);
1319 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 1319 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
1320 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" 1320 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1321 "_TPG-1: %u for Target Portal Group: %hu\n", 1321 "_TPG-1: %u for Target Portal Group: %hu\n",
1322 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1322 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1323 TRANSPORT_MAX_LUNS_PER_TPG-1, 1323 TRANSPORT_MAX_LUNS_PER_TPG-1,
1324 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1324 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1325 spin_unlock(&tpg->tpg_lun_lock); 1325 spin_unlock(&tpg->tpg_lun_lock);
1326 return NULL; 1326 return NULL;
1327 } 1327 }
1328 lun = tpg->tpg_lun_list[unpacked_lun]; 1328 lun = tpg->tpg_lun_list[unpacked_lun];
1329 1329
1330 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { 1330 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
1331 pr_err("%s Logical Unit Number: %u is not active on" 1331 pr_err("%s Logical Unit Number: %u is not active on"
1332 " Target Portal Group: %hu, ignoring request.\n", 1332 " Target Portal Group: %hu, ignoring request.\n",
1333 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1333 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1334 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1334 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1335 spin_unlock(&tpg->tpg_lun_lock); 1335 spin_unlock(&tpg->tpg_lun_lock);
1336 return NULL; 1336 return NULL;
1337 } 1337 }
1338 spin_unlock(&tpg->tpg_lun_lock); 1338 spin_unlock(&tpg->tpg_lun_lock);
1339 1339
1340 return lun; 1340 return lun;
1341 } 1341 }
1342 1342
1343 struct se_lun_acl *core_dev_init_initiator_node_lun_acl( 1343 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1344 struct se_portal_group *tpg, 1344 struct se_portal_group *tpg,
1345 struct se_node_acl *nacl, 1345 struct se_node_acl *nacl,
1346 u32 mapped_lun, 1346 u32 mapped_lun,
1347 int *ret) 1347 int *ret)
1348 { 1348 {
1349 struct se_lun_acl *lacl; 1349 struct se_lun_acl *lacl;
1350 1350
1351 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) { 1351 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
1352 pr_err("%s InitiatorName exceeds maximum size.\n", 1352 pr_err("%s InitiatorName exceeds maximum size.\n",
1353 tpg->se_tpg_tfo->get_fabric_name()); 1353 tpg->se_tpg_tfo->get_fabric_name());
1354 *ret = -EOVERFLOW; 1354 *ret = -EOVERFLOW;
1355 return NULL; 1355 return NULL;
1356 } 1356 }
1357 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); 1357 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1358 if (!lacl) { 1358 if (!lacl) {
1359 pr_err("Unable to allocate memory for struct se_lun_acl.\n"); 1359 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1360 *ret = -ENOMEM; 1360 *ret = -ENOMEM;
1361 return NULL; 1361 return NULL;
1362 } 1362 }
1363 1363
1364 INIT_LIST_HEAD(&lacl->lacl_list); 1364 INIT_LIST_HEAD(&lacl->lacl_list);
1365 lacl->mapped_lun = mapped_lun; 1365 lacl->mapped_lun = mapped_lun;
1366 lacl->se_lun_nacl = nacl; 1366 lacl->se_lun_nacl = nacl;
1367 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", 1367 snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
1368 nacl->initiatorname); 1368 nacl->initiatorname);
1369 1369
1370 return lacl; 1370 return lacl;
1371 } 1371 }
1372 1372
1373 int core_dev_add_initiator_node_lun_acl( 1373 int core_dev_add_initiator_node_lun_acl(
1374 struct se_portal_group *tpg, 1374 struct se_portal_group *tpg,
1375 struct se_lun_acl *lacl, 1375 struct se_lun_acl *lacl,
1376 u32 unpacked_lun, 1376 u32 unpacked_lun,
1377 u32 lun_access) 1377 u32 lun_access)
1378 { 1378 {
1379 struct se_lun *lun; 1379 struct se_lun *lun;
1380 struct se_node_acl *nacl; 1380 struct se_node_acl *nacl;
1381 1381
1382 lun = core_dev_get_lun(tpg, unpacked_lun); 1382 lun = core_dev_get_lun(tpg, unpacked_lun);
1383 if (!lun) { 1383 if (!lun) {
1384 pr_err("%s Logical Unit Number: %u is not active on" 1384 pr_err("%s Logical Unit Number: %u is not active on"
1385 " Target Portal Group: %hu, ignoring request.\n", 1385 " Target Portal Group: %hu, ignoring request.\n",
1386 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 1386 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
1387 tpg->se_tpg_tfo->tpg_get_tag(tpg)); 1387 tpg->se_tpg_tfo->tpg_get_tag(tpg));
1388 return -EINVAL; 1388 return -EINVAL;
1389 } 1389 }
1390 1390
1391 nacl = lacl->se_lun_nacl; 1391 nacl = lacl->se_lun_nacl;
1392 if (!nacl) 1392 if (!nacl)
1393 return -EINVAL; 1393 return -EINVAL;
1394 1394
1395 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && 1395 if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
1396 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)) 1396 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
1397 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 1397 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
1398 1398
1399 lacl->se_lun = lun; 1399 lacl->se_lun = lun;
1400 1400
1401 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun, 1401 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
1402 lun_access, nacl, tpg) < 0) 1402 lun_access, nacl, tpg) < 0)
1403 return -EINVAL; 1403 return -EINVAL;
1404 1404
1405 spin_lock(&lun->lun_acl_lock); 1405 spin_lock(&lun->lun_acl_lock);
1406 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); 1406 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1407 atomic_inc_mb(&lun->lun_acl_count); 1407 atomic_inc_mb(&lun->lun_acl_count);
1408 spin_unlock(&lun->lun_acl_lock); 1408 spin_unlock(&lun->lun_acl_lock);
1409 1409
1410 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " 1410 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1411 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), 1411 " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
1412 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, 1412 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
1413 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", 1413 (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
1414 lacl->initiatorname); 1414 lacl->initiatorname);
1415 /* 1415 /*
1416 * Check to see if there are any existing persistent reservation APTPL 1416 * Check to see if there are any existing persistent reservation APTPL
1417 * pre-registrations that need to be enabled for this LUN ACL.. 1417 * pre-registrations that need to be enabled for this LUN ACL..
1418 */ 1418 */
1419 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, nacl, 1419 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, nacl,
1420 lacl->mapped_lun); 1420 lacl->mapped_lun);
1421 return 0; 1421 return 0;
1422 } 1422 }
1423 1423
1424 /* core_dev_del_initiator_node_lun_acl(): 1424 /* core_dev_del_initiator_node_lun_acl():
1425 * 1425 *
1426 * 1426 *
1427 */ 1427 */
1428 int core_dev_del_initiator_node_lun_acl( 1428 int core_dev_del_initiator_node_lun_acl(
1429 struct se_portal_group *tpg, 1429 struct se_portal_group *tpg,
1430 struct se_lun *lun, 1430 struct se_lun *lun,
1431 struct se_lun_acl *lacl) 1431 struct se_lun_acl *lacl)
1432 { 1432 {
1433 struct se_node_acl *nacl; 1433 struct se_node_acl *nacl;
1434 1434
1435 nacl = lacl->se_lun_nacl; 1435 nacl = lacl->se_lun_nacl;
1436 if (!nacl) 1436 if (!nacl)
1437 return -EINVAL; 1437 return -EINVAL;
1438 1438
1439 spin_lock(&lun->lun_acl_lock); 1439 spin_lock(&lun->lun_acl_lock);
1440 list_del(&lacl->lacl_list); 1440 list_del(&lacl->lacl_list);
1441 atomic_dec_mb(&lun->lun_acl_count); 1441 atomic_dec_mb(&lun->lun_acl_count);
1442 spin_unlock(&lun->lun_acl_lock); 1442 spin_unlock(&lun->lun_acl_lock);
1443 1443
1444 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun, 1444 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
1445 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg); 1445 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
1446 1446
1447 lacl->se_lun = NULL; 1447 lacl->se_lun = NULL;
1448 1448
1449 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for" 1449 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1450 " InitiatorNode: %s Mapped LUN: %u\n", 1450 " InitiatorNode: %s Mapped LUN: %u\n",
1451 tpg->se_tpg_tfo->get_fabric_name(), 1451 tpg->se_tpg_tfo->get_fabric_name(),
1452 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, 1452 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1453 lacl->initiatorname, lacl->mapped_lun); 1453 lacl->initiatorname, lacl->mapped_lun);
1454 1454
1455 return 0; 1455 return 0;
1456 } 1456 }
1457 1457
1458 void core_dev_free_initiator_node_lun_acl( 1458 void core_dev_free_initiator_node_lun_acl(
1459 struct se_portal_group *tpg, 1459 struct se_portal_group *tpg,
1460 struct se_lun_acl *lacl) 1460 struct se_lun_acl *lacl)
1461 { 1461 {
1462 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" 1462 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1463 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(), 1463 " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
1464 tpg->se_tpg_tfo->tpg_get_tag(tpg), 1464 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1465 tpg->se_tpg_tfo->get_fabric_name(), 1465 tpg->se_tpg_tfo->get_fabric_name(),
1466 lacl->initiatorname, lacl->mapped_lun); 1466 lacl->initiatorname, lacl->mapped_lun);
1467 1467
1468 kfree(lacl); 1468 kfree(lacl);
1469 } 1469 }
1470 1470
1471 static void scsi_dump_inquiry(struct se_device *dev) 1471 static void scsi_dump_inquiry(struct se_device *dev)
1472 { 1472 {
1473 struct t10_wwn *wwn = &dev->t10_wwn; 1473 struct t10_wwn *wwn = &dev->t10_wwn;
1474 char buf[17]; 1474 char buf[17];
1475 int i, device_type; 1475 int i, device_type;
1476 /* 1476 /*
1477 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 1477 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1478 */ 1478 */
1479 for (i = 0; i < 8; i++) 1479 for (i = 0; i < 8; i++)
1480 if (wwn->vendor[i] >= 0x20) 1480 if (wwn->vendor[i] >= 0x20)
1481 buf[i] = wwn->vendor[i]; 1481 buf[i] = wwn->vendor[i];
1482 else 1482 else
1483 buf[i] = ' '; 1483 buf[i] = ' ';
1484 buf[i] = '\0'; 1484 buf[i] = '\0';
1485 pr_debug(" Vendor: %s\n", buf); 1485 pr_debug(" Vendor: %s\n", buf);
1486 1486
1487 for (i = 0; i < 16; i++) 1487 for (i = 0; i < 16; i++)
1488 if (wwn->model[i] >= 0x20) 1488 if (wwn->model[i] >= 0x20)
1489 buf[i] = wwn->model[i]; 1489 buf[i] = wwn->model[i];
1490 else 1490 else
1491 buf[i] = ' '; 1491 buf[i] = ' ';
1492 buf[i] = '\0'; 1492 buf[i] = '\0';
1493 pr_debug(" Model: %s\n", buf); 1493 pr_debug(" Model: %s\n", buf);
1494 1494
1495 for (i = 0; i < 4; i++) 1495 for (i = 0; i < 4; i++)
1496 if (wwn->revision[i] >= 0x20) 1496 if (wwn->revision[i] >= 0x20)
1497 buf[i] = wwn->revision[i]; 1497 buf[i] = wwn->revision[i];
1498 else 1498 else
1499 buf[i] = ' '; 1499 buf[i] = ' ';
1500 buf[i] = '\0'; 1500 buf[i] = '\0';
1501 pr_debug(" Revision: %s\n", buf); 1501 pr_debug(" Revision: %s\n", buf);
1502 1502
1503 device_type = dev->transport->get_device_type(dev); 1503 device_type = dev->transport->get_device_type(dev);
1504 pr_debug(" Type: %s ", scsi_device_type(device_type)); 1504 pr_debug(" Type: %s ", scsi_device_type(device_type));
1505 } 1505 }
1506 1506
1507 struct se_device *target_alloc_device(struct se_hba *hba, const char *name) 1507 struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1508 { 1508 {
1509 struct se_device *dev; 1509 struct se_device *dev;
1510 struct se_lun *xcopy_lun; 1510 struct se_lun *xcopy_lun;
1511 1511
1512 dev = hba->transport->alloc_device(hba, name); 1512 dev = hba->transport->alloc_device(hba, name);
1513 if (!dev) 1513 if (!dev)
1514 return NULL; 1514 return NULL;
1515 1515
1516 dev->dev_link_magic = SE_DEV_LINK_MAGIC; 1516 dev->dev_link_magic = SE_DEV_LINK_MAGIC;
1517 dev->se_hba = hba; 1517 dev->se_hba = hba;
1518 dev->transport = hba->transport; 1518 dev->transport = hba->transport;
1519 dev->prot_length = sizeof(struct se_dif_v1_tuple); 1519 dev->prot_length = sizeof(struct se_dif_v1_tuple);
1520 1520
1521 INIT_LIST_HEAD(&dev->dev_list); 1521 INIT_LIST_HEAD(&dev->dev_list);
1522 INIT_LIST_HEAD(&dev->dev_sep_list); 1522 INIT_LIST_HEAD(&dev->dev_sep_list);
1523 INIT_LIST_HEAD(&dev->dev_tmr_list); 1523 INIT_LIST_HEAD(&dev->dev_tmr_list);
1524 INIT_LIST_HEAD(&dev->delayed_cmd_list); 1524 INIT_LIST_HEAD(&dev->delayed_cmd_list);
1525 INIT_LIST_HEAD(&dev->state_list); 1525 INIT_LIST_HEAD(&dev->state_list);
1526 INIT_LIST_HEAD(&dev->qf_cmd_list); 1526 INIT_LIST_HEAD(&dev->qf_cmd_list);
1527 INIT_LIST_HEAD(&dev->g_dev_node); 1527 INIT_LIST_HEAD(&dev->g_dev_node);
1528 spin_lock_init(&dev->execute_task_lock); 1528 spin_lock_init(&dev->execute_task_lock);
1529 spin_lock_init(&dev->delayed_cmd_lock); 1529 spin_lock_init(&dev->delayed_cmd_lock);
1530 spin_lock_init(&dev->dev_reservation_lock); 1530 spin_lock_init(&dev->dev_reservation_lock);
1531 spin_lock_init(&dev->se_port_lock); 1531 spin_lock_init(&dev->se_port_lock);
1532 spin_lock_init(&dev->se_tmr_lock); 1532 spin_lock_init(&dev->se_tmr_lock);
1533 spin_lock_init(&dev->qf_cmd_lock); 1533 spin_lock_init(&dev->qf_cmd_lock);
1534 sema_init(&dev->caw_sem, 1); 1534 sema_init(&dev->caw_sem, 1);
1535 atomic_set(&dev->dev_ordered_id, 0); 1535 atomic_set(&dev->dev_ordered_id, 0);
1536 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); 1536 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
1537 spin_lock_init(&dev->t10_wwn.t10_vpd_lock); 1537 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
1538 INIT_LIST_HEAD(&dev->t10_pr.registration_list); 1538 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
1539 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list); 1539 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
1540 spin_lock_init(&dev->t10_pr.registration_lock); 1540 spin_lock_init(&dev->t10_pr.registration_lock);
1541 spin_lock_init(&dev->t10_pr.aptpl_reg_lock); 1541 spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
1542 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); 1542 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
1543 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); 1543 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
1544 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); 1544 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
1545 spin_lock_init(&dev->t10_alua.lba_map_lock); 1545 spin_lock_init(&dev->t10_alua.lba_map_lock);
1546 1546
1547 dev->t10_wwn.t10_dev = dev; 1547 dev->t10_wwn.t10_dev = dev;
1548 dev->t10_alua.t10_dev = dev; 1548 dev->t10_alua.t10_dev = dev;
1549 1549
1550 dev->dev_attrib.da_dev = dev; 1550 dev->dev_attrib.da_dev = dev;
1551 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS; 1551 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
1552 dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO; 1552 dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
1553 dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE; 1553 dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
1554 dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ; 1554 dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
1555 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; 1555 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
1556 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; 1556 dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
1557 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; 1557 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
1558 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; 1558 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
1559 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 1559 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
1560 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; 1560 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
1561 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; 1561 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
1562 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; 1562 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
1563 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 1563 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
1564 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL; 1564 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
1565 dev->dev_attrib.is_nonrot = DA_IS_NONROT; 1565 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
1566 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 1566 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
1567 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 1567 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
1568 dev->dev_attrib.max_unmap_block_desc_count = 1568 dev->dev_attrib.max_unmap_block_desc_count =
1569 DA_MAX_UNMAP_BLOCK_DESC_COUNT; 1569 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
1570 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; 1570 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
1571 dev->dev_attrib.unmap_granularity_alignment = 1571 dev->dev_attrib.unmap_granularity_alignment =
1572 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 1572 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
1573 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; 1573 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
1574 dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS; 1574 dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
1575 dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
1576 1575
1577 xcopy_lun = &dev->xcopy_lun; 1576 xcopy_lun = &dev->xcopy_lun;
1578 xcopy_lun->lun_se_dev = dev; 1577 xcopy_lun->lun_se_dev = dev;
1579 init_completion(&xcopy_lun->lun_shutdown_comp); 1578 init_completion(&xcopy_lun->lun_shutdown_comp);
1580 INIT_LIST_HEAD(&xcopy_lun->lun_acl_list); 1579 INIT_LIST_HEAD(&xcopy_lun->lun_acl_list);
1581 spin_lock_init(&xcopy_lun->lun_acl_lock); 1580 spin_lock_init(&xcopy_lun->lun_acl_lock);
1582 spin_lock_init(&xcopy_lun->lun_sep_lock); 1581 spin_lock_init(&xcopy_lun->lun_sep_lock);
1583 init_completion(&xcopy_lun->lun_ref_comp); 1582 init_completion(&xcopy_lun->lun_ref_comp);
1584 1583
1585 return dev; 1584 return dev;
1586 } 1585 }
1587 1586
1588 int target_configure_device(struct se_device *dev) 1587 int target_configure_device(struct se_device *dev)
1589 { 1588 {
1590 struct se_hba *hba = dev->se_hba; 1589 struct se_hba *hba = dev->se_hba;
1591 int ret; 1590 int ret;
1592 1591
1593 if (dev->dev_flags & DF_CONFIGURED) { 1592 if (dev->dev_flags & DF_CONFIGURED) {
1594 pr_err("se_dev->se_dev_ptr already set for storage" 1593 pr_err("se_dev->se_dev_ptr already set for storage"
1595 " object\n"); 1594 " object\n");
1596 return -EEXIST; 1595 return -EEXIST;
1597 } 1596 }
1598 1597
1599 ret = dev->transport->configure_device(dev); 1598 ret = dev->transport->configure_device(dev);
1600 if (ret) 1599 if (ret)
1601 goto out; 1600 goto out;
1602 dev->dev_flags |= DF_CONFIGURED; 1601 dev->dev_flags |= DF_CONFIGURED;
1603 1602
1604 /* 1603 /*
1605 * XXX: there is not much point to have two different values here.. 1604 * XXX: there is not much point to have two different values here..
1606 */ 1605 */
1607 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size; 1606 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
1608 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth; 1607 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
1609 1608
1610 /* 1609 /*
1611 * Align max_hw_sectors down to PAGE_SIZE I/O transfers 1610 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
1612 */ 1611 */
1613 dev->dev_attrib.hw_max_sectors = 1612 dev->dev_attrib.hw_max_sectors =
1614 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, 1613 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
1615 dev->dev_attrib.hw_block_size); 1614 dev->dev_attrib.hw_block_size);
1615 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
1616 1616
1617 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); 1617 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
1618 dev->creation_time = get_jiffies_64(); 1618 dev->creation_time = get_jiffies_64();
1619 1619
1620 ret = core_setup_alua(dev); 1620 ret = core_setup_alua(dev);
1621 if (ret) 1621 if (ret)
1622 goto out; 1622 goto out;
1623 1623
1624 /* 1624 /*
1625 * Startup the struct se_device processing thread 1625 * Startup the struct se_device processing thread
1626 */ 1626 */
1627 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1, 1627 dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
1628 dev->transport->name); 1628 dev->transport->name);
1629 if (!dev->tmr_wq) { 1629 if (!dev->tmr_wq) {
1630 pr_err("Unable to create tmr workqueue for %s\n", 1630 pr_err("Unable to create tmr workqueue for %s\n",
1631 dev->transport->name); 1631 dev->transport->name);
1632 ret = -ENOMEM; 1632 ret = -ENOMEM;
1633 goto out_free_alua; 1633 goto out_free_alua;
1634 } 1634 }
1635 1635
1636 /* 1636 /*
1637 * Setup work_queue for QUEUE_FULL 1637 * Setup work_queue for QUEUE_FULL
1638 */ 1638 */
1639 INIT_WORK(&dev->qf_work_queue, target_qf_do_work); 1639 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
1640 1640
1641 /* 1641 /*
1642 * Preload the initial INQUIRY const values if we are doing 1642 * Preload the initial INQUIRY const values if we are doing
1643 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI 1643 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1644 * passthrough because this is being provided by the backend LLD. 1644 * passthrough because this is being provided by the backend LLD.
1645 */ 1645 */
1646 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { 1646 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
1647 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8); 1647 strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
1648 strncpy(&dev->t10_wwn.model[0], 1648 strncpy(&dev->t10_wwn.model[0],
1649 dev->transport->inquiry_prod, 16); 1649 dev->transport->inquiry_prod, 16);
1650 strncpy(&dev->t10_wwn.revision[0], 1650 strncpy(&dev->t10_wwn.revision[0],
1651 dev->transport->inquiry_rev, 4); 1651 dev->transport->inquiry_rev, 4);
1652 } 1652 }
1653 1653
1654 scsi_dump_inquiry(dev); 1654 scsi_dump_inquiry(dev);
1655 1655
1656 spin_lock(&hba->device_lock); 1656 spin_lock(&hba->device_lock);
1657 hba->dev_count++; 1657 hba->dev_count++;
1658 spin_unlock(&hba->device_lock); 1658 spin_unlock(&hba->device_lock);
1659 1659
1660 mutex_lock(&g_device_mutex); 1660 mutex_lock(&g_device_mutex);
1661 list_add_tail(&dev->g_dev_node, &g_device_list); 1661 list_add_tail(&dev->g_dev_node, &g_device_list);
1662 mutex_unlock(&g_device_mutex); 1662 mutex_unlock(&g_device_mutex);
1663 1663
1664 return 0; 1664 return 0;
1665 1665
1666 out_free_alua: 1666 out_free_alua:
1667 core_alua_free_lu_gp_mem(dev); 1667 core_alua_free_lu_gp_mem(dev);
1668 out: 1668 out:
1669 se_release_vpd_for_dev(dev); 1669 se_release_vpd_for_dev(dev);
1670 return ret; 1670 return ret;
1671 } 1671 }
1672 1672
1673 void target_free_device(struct se_device *dev) 1673 void target_free_device(struct se_device *dev)
1674 { 1674 {
1675 struct se_hba *hba = dev->se_hba; 1675 struct se_hba *hba = dev->se_hba;
1676 1676
1677 WARN_ON(!list_empty(&dev->dev_sep_list)); 1677 WARN_ON(!list_empty(&dev->dev_sep_list));
1678 1678
1679 if (dev->dev_flags & DF_CONFIGURED) { 1679 if (dev->dev_flags & DF_CONFIGURED) {
1680 destroy_workqueue(dev->tmr_wq); 1680 destroy_workqueue(dev->tmr_wq);
1681 1681
1682 mutex_lock(&g_device_mutex); 1682 mutex_lock(&g_device_mutex);
1683 list_del(&dev->g_dev_node); 1683 list_del(&dev->g_dev_node);
1684 mutex_unlock(&g_device_mutex); 1684 mutex_unlock(&g_device_mutex);
1685 1685
1686 spin_lock(&hba->device_lock); 1686 spin_lock(&hba->device_lock);
1687 hba->dev_count--; 1687 hba->dev_count--;
1688 spin_unlock(&hba->device_lock); 1688 spin_unlock(&hba->device_lock);
1689 } 1689 }
1690 1690
1691 core_alua_free_lu_gp_mem(dev); 1691 core_alua_free_lu_gp_mem(dev);
1692 core_alua_set_lba_map(dev, NULL, 0, 0); 1692 core_alua_set_lba_map(dev, NULL, 0, 0);
1693 core_scsi3_free_all_registrations(dev); 1693 core_scsi3_free_all_registrations(dev);
1694 se_release_vpd_for_dev(dev); 1694 se_release_vpd_for_dev(dev);
1695 1695
1696 if (dev->transport->free_prot) 1696 if (dev->transport->free_prot)
1697 dev->transport->free_prot(dev); 1697 dev->transport->free_prot(dev);
1698 1698
1699 dev->transport->free_device(dev); 1699 dev->transport->free_device(dev);
1700 } 1700 }
1701 1701
1702 int core_dev_setup_virtual_lun0(void) 1702 int core_dev_setup_virtual_lun0(void)
1703 { 1703 {
1704 struct se_hba *hba; 1704 struct se_hba *hba;
1705 struct se_device *dev; 1705 struct se_device *dev;
1706 char buf[] = "rd_pages=8,rd_nullio=1"; 1706 char buf[] = "rd_pages=8,rd_nullio=1";
1707 int ret; 1707 int ret;
1708 1708
1709 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); 1709 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1710 if (IS_ERR(hba)) 1710 if (IS_ERR(hba))
1711 return PTR_ERR(hba); 1711 return PTR_ERR(hba);
1712 1712
1713 dev = target_alloc_device(hba, "virt_lun0"); 1713 dev = target_alloc_device(hba, "virt_lun0");
1714 if (!dev) { 1714 if (!dev) {
1715 ret = -ENOMEM; 1715 ret = -ENOMEM;
1716 goto out_free_hba; 1716 goto out_free_hba;
1717 } 1717 }
1718 1718
1719 hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf)); 1719 hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
1720 1720
1721 ret = target_configure_device(dev); 1721 ret = target_configure_device(dev);
1722 if (ret) 1722 if (ret)
1723 goto out_free_se_dev; 1723 goto out_free_se_dev;
1724 1724
1725 lun0_hba = hba; 1725 lun0_hba = hba;
1726 g_lun0_dev = dev; 1726 g_lun0_dev = dev;
1727 return 0; 1727 return 0;
1728 1728
1729 out_free_se_dev: 1729 out_free_se_dev:
1730 target_free_device(dev); 1730 target_free_device(dev);
1731 out_free_hba: 1731 out_free_hba:
1732 core_delete_hba(hba); 1732 core_delete_hba(hba);
1733 return ret; 1733 return ret;
1734 } 1734 }
1735 1735
1736 1736
1737 void core_dev_release_virtual_lun0(void) 1737 void core_dev_release_virtual_lun0(void)
1738 { 1738 {
1739 struct se_hba *hba = lun0_hba; 1739 struct se_hba *hba = lun0_hba;
1740 1740
1741 if (!hba) 1741 if (!hba)
1742 return; 1742 return;
1743 1743
1744 if (g_lun0_dev) 1744 if (g_lun0_dev)
1745 target_free_device(g_lun0_dev); 1745 target_free_device(g_lun0_dev);
1746 core_delete_hba(hba); 1746 core_delete_hba(hba);
1747 } 1747 }
drivers/target/target_core_file.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * Filename: target_core_file.c 2 * Filename: target_core_file.c
3 * 3 *
4 * This file contains the Storage Engine <-> FILEIO transport specific functions 4 * This file contains the Storage Engine <-> FILEIO transport specific functions
5 * 5 *
6 * (c) Copyright 2005-2013 Datera, Inc. 6 * (c) Copyright 2005-2013 Datera, Inc.
7 * 7 *
8 * Nicholas A. Bellinger <nab@kernel.org> 8 * Nicholas A. Bellinger <nab@kernel.org>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or 12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version. 13 * (at your option) any later version.
14 * 14 *
15 * This program is distributed in the hope that it will be useful, 15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 * 19 *
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * 23 *
24 ******************************************************************************/ 24 ******************************************************************************/
25 25
26 #include <linux/string.h> 26 #include <linux/string.h>
27 #include <linux/parser.h> 27 #include <linux/parser.h>
28 #include <linux/timer.h> 28 #include <linux/timer.h>
29 #include <linux/blkdev.h> 29 #include <linux/blkdev.h>
30 #include <linux/slab.h> 30 #include <linux/slab.h>
31 #include <linux/spinlock.h> 31 #include <linux/spinlock.h>
32 #include <linux/module.h> 32 #include <linux/module.h>
33 #include <linux/falloc.h> 33 #include <linux/falloc.h>
34 #include <scsi/scsi.h> 34 #include <scsi/scsi.h>
35 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_host.h>
36 #include <asm/unaligned.h> 36 #include <asm/unaligned.h>
37 37
38 #include <target/target_core_base.h> 38 #include <target/target_core_base.h>
39 #include <target/target_core_backend.h> 39 #include <target/target_core_backend.h>
40 40
41 #include "target_core_file.h" 41 #include "target_core_file.h"
42 42
43 static inline struct fd_dev *FD_DEV(struct se_device *dev) 43 static inline struct fd_dev *FD_DEV(struct se_device *dev)
44 { 44 {
45 return container_of(dev, struct fd_dev, dev); 45 return container_of(dev, struct fd_dev, dev);
46 } 46 }
47 47
48 /* fd_attach_hba(): (Part of se_subsystem_api_t template) 48 /* fd_attach_hba(): (Part of se_subsystem_api_t template)
49 * 49 *
50 * 50 *
51 */ 51 */
52 static int fd_attach_hba(struct se_hba *hba, u32 host_id) 52 static int fd_attach_hba(struct se_hba *hba, u32 host_id)
53 { 53 {
54 struct fd_host *fd_host; 54 struct fd_host *fd_host;
55 55
56 fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL); 56 fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
57 if (!fd_host) { 57 if (!fd_host) {
58 pr_err("Unable to allocate memory for struct fd_host\n"); 58 pr_err("Unable to allocate memory for struct fd_host\n");
59 return -ENOMEM; 59 return -ENOMEM;
60 } 60 }
61 61
62 fd_host->fd_host_id = host_id; 62 fd_host->fd_host_id = host_id;
63 63
64 hba->hba_ptr = fd_host; 64 hba->hba_ptr = fd_host;
65 65
66 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" 66 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
67 " Target Core Stack %s\n", hba->hba_id, FD_VERSION, 67 " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
68 TARGET_CORE_MOD_VERSION); 68 TARGET_CORE_MOD_VERSION);
69 pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n", 69 pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
70 hba->hba_id, fd_host->fd_host_id); 70 hba->hba_id, fd_host->fd_host_id);
71 71
72 return 0; 72 return 0;
73 } 73 }
74 74
75 static void fd_detach_hba(struct se_hba *hba) 75 static void fd_detach_hba(struct se_hba *hba)
76 { 76 {
77 struct fd_host *fd_host = hba->hba_ptr; 77 struct fd_host *fd_host = hba->hba_ptr;
78 78
79 pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic" 79 pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
80 " Target Core\n", hba->hba_id, fd_host->fd_host_id); 80 " Target Core\n", hba->hba_id, fd_host->fd_host_id);
81 81
82 kfree(fd_host); 82 kfree(fd_host);
83 hba->hba_ptr = NULL; 83 hba->hba_ptr = NULL;
84 } 84 }
85 85
86 static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name) 86 static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
87 { 87 {
88 struct fd_dev *fd_dev; 88 struct fd_dev *fd_dev;
89 struct fd_host *fd_host = hba->hba_ptr; 89 struct fd_host *fd_host = hba->hba_ptr;
90 90
91 fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL); 91 fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
92 if (!fd_dev) { 92 if (!fd_dev) {
93 pr_err("Unable to allocate memory for struct fd_dev\n"); 93 pr_err("Unable to allocate memory for struct fd_dev\n");
94 return NULL; 94 return NULL;
95 } 95 }
96 96
97 fd_dev->fd_host = fd_host; 97 fd_dev->fd_host = fd_host;
98 98
99 pr_debug("FILEIO: Allocated fd_dev for %p\n", name); 99 pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
100 100
101 return &fd_dev->dev; 101 return &fd_dev->dev;
102 } 102 }
103 103
104 static int fd_configure_device(struct se_device *dev) 104 static int fd_configure_device(struct se_device *dev)
105 { 105 {
106 struct fd_dev *fd_dev = FD_DEV(dev); 106 struct fd_dev *fd_dev = FD_DEV(dev);
107 struct fd_host *fd_host = dev->se_hba->hba_ptr; 107 struct fd_host *fd_host = dev->se_hba->hba_ptr;
108 struct file *file; 108 struct file *file;
109 struct inode *inode = NULL; 109 struct inode *inode = NULL;
110 int flags, ret = -EINVAL; 110 int flags, ret = -EINVAL;
111 111
112 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { 112 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
113 pr_err("Missing fd_dev_name=\n"); 113 pr_err("Missing fd_dev_name=\n");
114 return -EINVAL; 114 return -EINVAL;
115 } 115 }
116 116
117 /* 117 /*
118 * Use O_DSYNC by default instead of O_SYNC to forgo syncing 118 * Use O_DSYNC by default instead of O_SYNC to forgo syncing
119 * of pure timestamp updates. 119 * of pure timestamp updates.
120 */ 120 */
121 flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; 121 flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
122 122
123 /* 123 /*
124 * Optionally allow fd_buffered_io=1 to be enabled for people 124 * Optionally allow fd_buffered_io=1 to be enabled for people
125 * who want use the fs buffer cache as an WriteCache mechanism. 125 * who want use the fs buffer cache as an WriteCache mechanism.
126 * 126 *
127 * This means that in event of a hard failure, there is a risk 127 * This means that in event of a hard failure, there is a risk
128 * of silent data-loss if the SCSI client has *not* performed a 128 * of silent data-loss if the SCSI client has *not* performed a
129 * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE 129 * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE
130 * to write-out the entire device cache. 130 * to write-out the entire device cache.
131 */ 131 */
132 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { 132 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
133 pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n"); 133 pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n");
134 flags &= ~O_DSYNC; 134 flags &= ~O_DSYNC;
135 } 135 }
136 136
137 file = filp_open(fd_dev->fd_dev_name, flags, 0600); 137 file = filp_open(fd_dev->fd_dev_name, flags, 0600);
138 if (IS_ERR(file)) { 138 if (IS_ERR(file)) {
139 pr_err("filp_open(%s) failed\n", fd_dev->fd_dev_name); 139 pr_err("filp_open(%s) failed\n", fd_dev->fd_dev_name);
140 ret = PTR_ERR(file); 140 ret = PTR_ERR(file);
141 goto fail; 141 goto fail;
142 } 142 }
143 fd_dev->fd_file = file; 143 fd_dev->fd_file = file;
144 /* 144 /*
145 * If using a block backend with this struct file, we extract 145 * If using a block backend with this struct file, we extract
146 * fd_dev->fd_[block,dev]_size from struct block_device. 146 * fd_dev->fd_[block,dev]_size from struct block_device.
147 * 147 *
148 * Otherwise, we use the passed fd_size= from configfs 148 * Otherwise, we use the passed fd_size= from configfs
149 */ 149 */
150 inode = file->f_mapping->host; 150 inode = file->f_mapping->host;
151 if (S_ISBLK(inode->i_mode)) { 151 if (S_ISBLK(inode->i_mode)) {
152 struct request_queue *q = bdev_get_queue(inode->i_bdev); 152 struct request_queue *q = bdev_get_queue(inode->i_bdev);
153 unsigned long long dev_size; 153 unsigned long long dev_size;
154 154
155 fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev); 155 fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
156 /* 156 /*
157 * Determine the number of bytes from i_size_read() minus 157 * Determine the number of bytes from i_size_read() minus
158 * one (1) logical sector from underlying struct block_device 158 * one (1) logical sector from underlying struct block_device
159 */ 159 */
160 dev_size = (i_size_read(file->f_mapping->host) - 160 dev_size = (i_size_read(file->f_mapping->host) -
161 fd_dev->fd_block_size); 161 fd_dev->fd_block_size);
162 162
163 pr_debug("FILEIO: Using size: %llu bytes from struct" 163 pr_debug("FILEIO: Using size: %llu bytes from struct"
164 " block_device blocks: %llu logical_block_size: %d\n", 164 " block_device blocks: %llu logical_block_size: %d\n",
165 dev_size, div_u64(dev_size, fd_dev->fd_block_size), 165 dev_size, div_u64(dev_size, fd_dev->fd_block_size),
166 fd_dev->fd_block_size); 166 fd_dev->fd_block_size);
167 /* 167 /*
168 * Check if the underlying struct block_device request_queue supports 168 * Check if the underlying struct block_device request_queue supports
169 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM 169 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
170 * in ATA and we need to set TPE=1 170 * in ATA and we need to set TPE=1
171 */ 171 */
172 if (blk_queue_discard(q)) { 172 if (blk_queue_discard(q)) {
173 dev->dev_attrib.max_unmap_lba_count = 173 dev->dev_attrib.max_unmap_lba_count =
174 q->limits.max_discard_sectors; 174 q->limits.max_discard_sectors;
175 /* 175 /*
176 * Currently hardcoded to 1 in Linux/SCSI code.. 176 * Currently hardcoded to 1 in Linux/SCSI code..
177 */ 177 */
178 dev->dev_attrib.max_unmap_block_desc_count = 1; 178 dev->dev_attrib.max_unmap_block_desc_count = 1;
179 dev->dev_attrib.unmap_granularity = 179 dev->dev_attrib.unmap_granularity =
180 q->limits.discard_granularity >> 9; 180 q->limits.discard_granularity >> 9;
181 dev->dev_attrib.unmap_granularity_alignment = 181 dev->dev_attrib.unmap_granularity_alignment =
182 q->limits.discard_alignment; 182 q->limits.discard_alignment;
183 pr_debug("IFILE: BLOCK Discard support available," 183 pr_debug("IFILE: BLOCK Discard support available,"
184 " disabled by default\n"); 184 " disabled by default\n");
185 } 185 }
186 /* 186 /*
187 * Enable write same emulation for IBLOCK and use 0xFFFF as 187 * Enable write same emulation for IBLOCK and use 0xFFFF as
188 * the smaller WRITE_SAME(10) only has a two-byte block count. 188 * the smaller WRITE_SAME(10) only has a two-byte block count.
189 */ 189 */
190 dev->dev_attrib.max_write_same_len = 0xFFFF; 190 dev->dev_attrib.max_write_same_len = 0xFFFF;
191 191
192 if (blk_queue_nonrot(q)) 192 if (blk_queue_nonrot(q))
193 dev->dev_attrib.is_nonrot = 1; 193 dev->dev_attrib.is_nonrot = 1;
194 } else { 194 } else {
195 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { 195 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
196 pr_err("FILEIO: Missing fd_dev_size=" 196 pr_err("FILEIO: Missing fd_dev_size="
197 " parameter, and no backing struct" 197 " parameter, and no backing struct"
198 " block_device\n"); 198 " block_device\n");
199 goto fail; 199 goto fail;
200 } 200 }
201 201
202 fd_dev->fd_block_size = FD_BLOCKSIZE; 202 fd_dev->fd_block_size = FD_BLOCKSIZE;
203 /* 203 /*
204 * Limit UNMAP emulation to 8k Number of LBAs (NoLB) 204 * Limit UNMAP emulation to 8k Number of LBAs (NoLB)
205 */ 205 */
206 dev->dev_attrib.max_unmap_lba_count = 0x2000; 206 dev->dev_attrib.max_unmap_lba_count = 0x2000;
207 /* 207 /*
208 * Currently hardcoded to 1 in Linux/SCSI code.. 208 * Currently hardcoded to 1 in Linux/SCSI code..
209 */ 209 */
210 dev->dev_attrib.max_unmap_block_desc_count = 1; 210 dev->dev_attrib.max_unmap_block_desc_count = 1;
211 dev->dev_attrib.unmap_granularity = 1; 211 dev->dev_attrib.unmap_granularity = 1;
212 dev->dev_attrib.unmap_granularity_alignment = 0; 212 dev->dev_attrib.unmap_granularity_alignment = 0;
213 213
214 /* 214 /*
215 * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB) 215 * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
216 * based upon struct iovec limit for vfs_writev() 216 * based upon struct iovec limit for vfs_writev()
217 */ 217 */
218 dev->dev_attrib.max_write_same_len = 0x1000; 218 dev->dev_attrib.max_write_same_len = 0x1000;
219 } 219 }
220 220
221 dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; 221 dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
222 dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES; 222 dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
223 dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size; 223 dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
224 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; 224 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
225 225
226 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { 226 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
227 pr_debug("FILEIO: Forcing setting of emulate_write_cache=1" 227 pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
228 " with FDBD_HAS_BUFFERED_IO_WCE\n"); 228 " with FDBD_HAS_BUFFERED_IO_WCE\n");
229 dev->dev_attrib.emulate_write_cache = 1; 229 dev->dev_attrib.emulate_write_cache = 1;
230 } 230 }
231 231
232 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; 232 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
233 fd_dev->fd_queue_depth = dev->queue_depth; 233 fd_dev->fd_queue_depth = dev->queue_depth;
234 234
235 pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," 235 pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
236 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, 236 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
237 fd_dev->fd_dev_name, fd_dev->fd_dev_size); 237 fd_dev->fd_dev_name, fd_dev->fd_dev_size);
238 238
239 return 0; 239 return 0;
240 fail: 240 fail:
241 if (fd_dev->fd_file) { 241 if (fd_dev->fd_file) {
242 filp_close(fd_dev->fd_file, NULL); 242 filp_close(fd_dev->fd_file, NULL);
243 fd_dev->fd_file = NULL; 243 fd_dev->fd_file = NULL;
244 } 244 }
245 return ret; 245 return ret;
246 } 246 }
247 247
248 static void fd_free_device(struct se_device *dev) 248 static void fd_free_device(struct se_device *dev)
249 { 249 {
250 struct fd_dev *fd_dev = FD_DEV(dev); 250 struct fd_dev *fd_dev = FD_DEV(dev);
251 251
252 if (fd_dev->fd_file) { 252 if (fd_dev->fd_file) {
253 filp_close(fd_dev->fd_file, NULL); 253 filp_close(fd_dev->fd_file, NULL);
254 fd_dev->fd_file = NULL; 254 fd_dev->fd_file = NULL;
255 } 255 }
256 256
257 kfree(fd_dev); 257 kfree(fd_dev);
258 } 258 }
259 259
260 static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot, 260 static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
261 int is_write) 261 int is_write)
262 { 262 {
263 struct se_device *se_dev = cmd->se_dev; 263 struct se_device *se_dev = cmd->se_dev;
264 struct fd_dev *dev = FD_DEV(se_dev); 264 struct fd_dev *dev = FD_DEV(se_dev);
265 struct file *prot_fd = dev->fd_prot_file; 265 struct file *prot_fd = dev->fd_prot_file;
266 struct scatterlist *sg; 266 struct scatterlist *sg;
267 loff_t pos = (cmd->t_task_lba * se_dev->prot_length); 267 loff_t pos = (cmd->t_task_lba * se_dev->prot_length);
268 unsigned char *buf; 268 unsigned char *buf;
269 u32 prot_size, len, size; 269 u32 prot_size, len, size;
270 int rc, ret = 1, i; 270 int rc, ret = 1, i;
271 271
272 prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) * 272 prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) *
273 se_dev->prot_length; 273 se_dev->prot_length;
274 274
275 if (!is_write) { 275 if (!is_write) {
276 fd_prot->prot_buf = vzalloc(prot_size); 276 fd_prot->prot_buf = vzalloc(prot_size);
277 if (!fd_prot->prot_buf) { 277 if (!fd_prot->prot_buf) {
278 pr_err("Unable to allocate fd_prot->prot_buf\n"); 278 pr_err("Unable to allocate fd_prot->prot_buf\n");
279 return -ENOMEM; 279 return -ENOMEM;
280 } 280 }
281 buf = fd_prot->prot_buf; 281 buf = fd_prot->prot_buf;
282 282
283 fd_prot->prot_sg_nents = cmd->t_prot_nents; 283 fd_prot->prot_sg_nents = cmd->t_prot_nents;
284 fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) * 284 fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) *
285 fd_prot->prot_sg_nents, GFP_KERNEL); 285 fd_prot->prot_sg_nents, GFP_KERNEL);
286 if (!fd_prot->prot_sg) { 286 if (!fd_prot->prot_sg) {
287 pr_err("Unable to allocate fd_prot->prot_sg\n"); 287 pr_err("Unable to allocate fd_prot->prot_sg\n");
288 vfree(fd_prot->prot_buf); 288 vfree(fd_prot->prot_buf);
289 return -ENOMEM; 289 return -ENOMEM;
290 } 290 }
291 size = prot_size; 291 size = prot_size;
292 292
293 for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) { 293 for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) {
294 294
295 len = min_t(u32, PAGE_SIZE, size); 295 len = min_t(u32, PAGE_SIZE, size);
296 sg_set_buf(sg, buf, len); 296 sg_set_buf(sg, buf, len);
297 size -= len; 297 size -= len;
298 buf += len; 298 buf += len;
299 } 299 }
300 } 300 }
301 301
302 if (is_write) { 302 if (is_write) {
303 rc = kernel_write(prot_fd, fd_prot->prot_buf, prot_size, pos); 303 rc = kernel_write(prot_fd, fd_prot->prot_buf, prot_size, pos);
304 if (rc < 0 || prot_size != rc) { 304 if (rc < 0 || prot_size != rc) {
305 pr_err("kernel_write() for fd_do_prot_rw failed:" 305 pr_err("kernel_write() for fd_do_prot_rw failed:"
306 " %d\n", rc); 306 " %d\n", rc);
307 ret = -EINVAL; 307 ret = -EINVAL;
308 } 308 }
309 } else { 309 } else {
310 rc = kernel_read(prot_fd, pos, fd_prot->prot_buf, prot_size); 310 rc = kernel_read(prot_fd, pos, fd_prot->prot_buf, prot_size);
311 if (rc < 0) { 311 if (rc < 0) {
312 pr_err("kernel_read() for fd_do_prot_rw failed:" 312 pr_err("kernel_read() for fd_do_prot_rw failed:"
313 " %d\n", rc); 313 " %d\n", rc);
314 ret = -EINVAL; 314 ret = -EINVAL;
315 } 315 }
316 } 316 }
317 317
318 if (is_write || ret < 0) { 318 if (is_write || ret < 0) {
319 kfree(fd_prot->prot_sg); 319 kfree(fd_prot->prot_sg);
320 vfree(fd_prot->prot_buf); 320 vfree(fd_prot->prot_buf);
321 } 321 }
322 322
323 return ret; 323 return ret;
324 } 324 }
325 325
326 static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl, 326 static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
327 u32 sgl_nents, int is_write) 327 u32 sgl_nents, int is_write)
328 { 328 {
329 struct se_device *se_dev = cmd->se_dev; 329 struct se_device *se_dev = cmd->se_dev;
330 struct fd_dev *dev = FD_DEV(se_dev); 330 struct fd_dev *dev = FD_DEV(se_dev);
331 struct file *fd = dev->fd_file; 331 struct file *fd = dev->fd_file;
332 struct scatterlist *sg; 332 struct scatterlist *sg;
333 struct iovec *iov; 333 struct iovec *iov;
334 mm_segment_t old_fs; 334 mm_segment_t old_fs;
335 loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size); 335 loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size);
336 int ret = 0, i; 336 int ret = 0, i;
337 337
338 iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL); 338 iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
339 if (!iov) { 339 if (!iov) {
340 pr_err("Unable to allocate fd_do_readv iov[]\n"); 340 pr_err("Unable to allocate fd_do_readv iov[]\n");
341 return -ENOMEM; 341 return -ENOMEM;
342 } 342 }
343 343
344 for_each_sg(sgl, sg, sgl_nents, i) { 344 for_each_sg(sgl, sg, sgl_nents, i) {
345 iov[i].iov_len = sg->length; 345 iov[i].iov_len = sg->length;
346 iov[i].iov_base = kmap(sg_page(sg)) + sg->offset; 346 iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
347 } 347 }
348 348
349 old_fs = get_fs(); 349 old_fs = get_fs();
350 set_fs(get_ds()); 350 set_fs(get_ds());
351 351
352 if (is_write) 352 if (is_write)
353 ret = vfs_writev(fd, &iov[0], sgl_nents, &pos); 353 ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
354 else 354 else
355 ret = vfs_readv(fd, &iov[0], sgl_nents, &pos); 355 ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
356 356
357 set_fs(old_fs); 357 set_fs(old_fs);
358 358
359 for_each_sg(sgl, sg, sgl_nents, i) 359 for_each_sg(sgl, sg, sgl_nents, i)
360 kunmap(sg_page(sg)); 360 kunmap(sg_page(sg));
361 361
362 kfree(iov); 362 kfree(iov);
363 363
364 if (is_write) { 364 if (is_write) {
365 if (ret < 0 || ret != cmd->data_length) { 365 if (ret < 0 || ret != cmd->data_length) {
366 pr_err("%s() write returned %d\n", __func__, ret); 366 pr_err("%s() write returned %d\n", __func__, ret);
367 return (ret < 0 ? ret : -EINVAL); 367 return (ret < 0 ? ret : -EINVAL);
368 } 368 }
369 } else { 369 } else {
370 /* 370 /*
371 * Return zeros and GOOD status even if the READ did not return 371 * Return zeros and GOOD status even if the READ did not return
372 * the expected virt_size for struct file w/o a backing struct 372 * the expected virt_size for struct file w/o a backing struct
373 * block_device. 373 * block_device.
374 */ 374 */
375 if (S_ISBLK(file_inode(fd)->i_mode)) { 375 if (S_ISBLK(file_inode(fd)->i_mode)) {
376 if (ret < 0 || ret != cmd->data_length) { 376 if (ret < 0 || ret != cmd->data_length) {
377 pr_err("%s() returned %d, expecting %u for " 377 pr_err("%s() returned %d, expecting %u for "
378 "S_ISBLK\n", __func__, ret, 378 "S_ISBLK\n", __func__, ret,
379 cmd->data_length); 379 cmd->data_length);
380 return (ret < 0 ? ret : -EINVAL); 380 return (ret < 0 ? ret : -EINVAL);
381 } 381 }
382 } else { 382 } else {
383 if (ret < 0) { 383 if (ret < 0) {
384 pr_err("%s() returned %d for non S_ISBLK\n", 384 pr_err("%s() returned %d for non S_ISBLK\n",
385 __func__, ret); 385 __func__, ret);
386 return ret; 386 return ret;
387 } 387 }
388 } 388 }
389 } 389 }
390 return 1; 390 return 1;
391 } 391 }
392 392
393 static sense_reason_t 393 static sense_reason_t
394 fd_execute_sync_cache(struct se_cmd *cmd) 394 fd_execute_sync_cache(struct se_cmd *cmd)
395 { 395 {
396 struct se_device *dev = cmd->se_dev; 396 struct se_device *dev = cmd->se_dev;
397 struct fd_dev *fd_dev = FD_DEV(dev); 397 struct fd_dev *fd_dev = FD_DEV(dev);
398 int immed = (cmd->t_task_cdb[1] & 0x2); 398 int immed = (cmd->t_task_cdb[1] & 0x2);
399 loff_t start, end; 399 loff_t start, end;
400 int ret; 400 int ret;
401 401
402 /* 402 /*
403 * If the Immediate bit is set, queue up the GOOD response 403 * If the Immediate bit is set, queue up the GOOD response
404 * for this SYNCHRONIZE_CACHE op 404 * for this SYNCHRONIZE_CACHE op
405 */ 405 */
406 if (immed) 406 if (immed)
407 target_complete_cmd(cmd, SAM_STAT_GOOD); 407 target_complete_cmd(cmd, SAM_STAT_GOOD);
408 408
409 /* 409 /*
410 * Determine if we will be flushing the entire device. 410 * Determine if we will be flushing the entire device.
411 */ 411 */
412 if (cmd->t_task_lba == 0 && cmd->data_length == 0) { 412 if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
413 start = 0; 413 start = 0;
414 end = LLONG_MAX; 414 end = LLONG_MAX;
415 } else { 415 } else {
416 start = cmd->t_task_lba * dev->dev_attrib.block_size; 416 start = cmd->t_task_lba * dev->dev_attrib.block_size;
417 if (cmd->data_length) 417 if (cmd->data_length)
418 end = start + cmd->data_length - 1; 418 end = start + cmd->data_length - 1;
419 else 419 else
420 end = LLONG_MAX; 420 end = LLONG_MAX;
421 } 421 }
422 422
423 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); 423 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
424 if (ret != 0) 424 if (ret != 0)
425 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); 425 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
426 426
427 if (immed) 427 if (immed)
428 return 0; 428 return 0;
429 429
430 if (ret) 430 if (ret)
431 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); 431 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
432 else 432 else
433 target_complete_cmd(cmd, SAM_STAT_GOOD); 433 target_complete_cmd(cmd, SAM_STAT_GOOD);
434 434
435 return 0; 435 return 0;
436 } 436 }
437 437
438 static unsigned char * 438 static unsigned char *
439 fd_setup_write_same_buf(struct se_cmd *cmd, struct scatterlist *sg, 439 fd_setup_write_same_buf(struct se_cmd *cmd, struct scatterlist *sg,
440 unsigned int len) 440 unsigned int len)
441 { 441 {
442 struct se_device *se_dev = cmd->se_dev; 442 struct se_device *se_dev = cmd->se_dev;
443 unsigned int block_size = se_dev->dev_attrib.block_size; 443 unsigned int block_size = se_dev->dev_attrib.block_size;
444 unsigned int i = 0, end; 444 unsigned int i = 0, end;
445 unsigned char *buf, *p, *kmap_buf; 445 unsigned char *buf, *p, *kmap_buf;
446 446
447 buf = kzalloc(min_t(unsigned int, len, PAGE_SIZE), GFP_KERNEL); 447 buf = kzalloc(min_t(unsigned int, len, PAGE_SIZE), GFP_KERNEL);
448 if (!buf) { 448 if (!buf) {
449 pr_err("Unable to allocate fd_execute_write_same buf\n"); 449 pr_err("Unable to allocate fd_execute_write_same buf\n");
450 return NULL; 450 return NULL;
451 } 451 }
452 452
453 kmap_buf = kmap(sg_page(sg)) + sg->offset; 453 kmap_buf = kmap(sg_page(sg)) + sg->offset;
454 if (!kmap_buf) { 454 if (!kmap_buf) {
455 pr_err("kmap() failed in fd_setup_write_same\n"); 455 pr_err("kmap() failed in fd_setup_write_same\n");
456 kfree(buf); 456 kfree(buf);
457 return NULL; 457 return NULL;
458 } 458 }
459 /* 459 /*
460 * Fill local *buf to contain multiple WRITE_SAME blocks up to 460 * Fill local *buf to contain multiple WRITE_SAME blocks up to
461 * min(len, PAGE_SIZE) 461 * min(len, PAGE_SIZE)
462 */ 462 */
463 p = buf; 463 p = buf;
464 end = min_t(unsigned int, len, PAGE_SIZE); 464 end = min_t(unsigned int, len, PAGE_SIZE);
465 465
466 while (i < end) { 466 while (i < end) {
467 memcpy(p, kmap_buf, block_size); 467 memcpy(p, kmap_buf, block_size);
468 468
469 i += block_size; 469 i += block_size;
470 p += block_size; 470 p += block_size;
471 } 471 }
472 kunmap(sg_page(sg)); 472 kunmap(sg_page(sg));
473 473
474 return buf; 474 return buf;
475 } 475 }
476 476
477 static sense_reason_t 477 static sense_reason_t
478 fd_execute_write_same(struct se_cmd *cmd) 478 fd_execute_write_same(struct se_cmd *cmd)
479 { 479 {
480 struct se_device *se_dev = cmd->se_dev; 480 struct se_device *se_dev = cmd->se_dev;
481 struct fd_dev *fd_dev = FD_DEV(se_dev); 481 struct fd_dev *fd_dev = FD_DEV(se_dev);
482 struct file *f = fd_dev->fd_file; 482 struct file *f = fd_dev->fd_file;
483 struct scatterlist *sg; 483 struct scatterlist *sg;
484 struct iovec *iov; 484 struct iovec *iov;
485 mm_segment_t old_fs; 485 mm_segment_t old_fs;
486 sector_t nolb = sbc_get_write_same_sectors(cmd); 486 sector_t nolb = sbc_get_write_same_sectors(cmd);
487 loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size; 487 loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size;
488 unsigned int len, len_tmp, iov_num; 488 unsigned int len, len_tmp, iov_num;
489 int i, rc; 489 int i, rc;
490 unsigned char *buf; 490 unsigned char *buf;
491 491
492 if (!nolb) { 492 if (!nolb) {
493 target_complete_cmd(cmd, SAM_STAT_GOOD); 493 target_complete_cmd(cmd, SAM_STAT_GOOD);
494 return 0; 494 return 0;
495 } 495 }
496 sg = &cmd->t_data_sg[0]; 496 sg = &cmd->t_data_sg[0];
497 497
498 if (cmd->t_data_nents > 1 || 498 if (cmd->t_data_nents > 1 ||
499 sg->length != cmd->se_dev->dev_attrib.block_size) { 499 sg->length != cmd->se_dev->dev_attrib.block_size) {
500 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u" 500 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
501 " block_size: %u\n", cmd->t_data_nents, sg->length, 501 " block_size: %u\n", cmd->t_data_nents, sg->length,
502 cmd->se_dev->dev_attrib.block_size); 502 cmd->se_dev->dev_attrib.block_size);
503 return TCM_INVALID_CDB_FIELD; 503 return TCM_INVALID_CDB_FIELD;
504 } 504 }
505 505
506 len = len_tmp = nolb * se_dev->dev_attrib.block_size; 506 len = len_tmp = nolb * se_dev->dev_attrib.block_size;
507 iov_num = DIV_ROUND_UP(len, PAGE_SIZE); 507 iov_num = DIV_ROUND_UP(len, PAGE_SIZE);
508 508
509 buf = fd_setup_write_same_buf(cmd, sg, len); 509 buf = fd_setup_write_same_buf(cmd, sg, len);
510 if (!buf) 510 if (!buf)
511 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 511 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
512 512
513 iov = vzalloc(sizeof(struct iovec) * iov_num); 513 iov = vzalloc(sizeof(struct iovec) * iov_num);
514 if (!iov) { 514 if (!iov) {
515 pr_err("Unable to allocate fd_execute_write_same iovecs\n"); 515 pr_err("Unable to allocate fd_execute_write_same iovecs\n");
516 kfree(buf); 516 kfree(buf);
517 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 517 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
518 } 518 }
519 /* 519 /*
520 * Map the single fabric received scatterlist block now populated 520 * Map the single fabric received scatterlist block now populated
521 * in *buf into each iovec for I/O submission. 521 * in *buf into each iovec for I/O submission.
522 */ 522 */
523 for (i = 0; i < iov_num; i++) { 523 for (i = 0; i < iov_num; i++) {
524 iov[i].iov_base = buf; 524 iov[i].iov_base = buf;
525 iov[i].iov_len = min_t(unsigned int, len_tmp, PAGE_SIZE); 525 iov[i].iov_len = min_t(unsigned int, len_tmp, PAGE_SIZE);
526 len_tmp -= iov[i].iov_len; 526 len_tmp -= iov[i].iov_len;
527 } 527 }
528 528
529 old_fs = get_fs(); 529 old_fs = get_fs();
530 set_fs(get_ds()); 530 set_fs(get_ds());
531 rc = vfs_writev(f, &iov[0], iov_num, &pos); 531 rc = vfs_writev(f, &iov[0], iov_num, &pos);
532 set_fs(old_fs); 532 set_fs(old_fs);
533 533
534 vfree(iov); 534 vfree(iov);
535 kfree(buf); 535 kfree(buf);
536 536
537 if (rc < 0 || rc != len) { 537 if (rc < 0 || rc != len) {
538 pr_err("vfs_writev() returned %d for write same\n", rc); 538 pr_err("vfs_writev() returned %d for write same\n", rc);
539 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 539 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
540 } 540 }
541 541
542 target_complete_cmd(cmd, SAM_STAT_GOOD); 542 target_complete_cmd(cmd, SAM_STAT_GOOD);
543 return 0; 543 return 0;
544 } 544 }
545 545
546 static sense_reason_t 546 static sense_reason_t
547 fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb) 547 fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
548 { 548 {
549 struct file *file = priv; 549 struct file *file = priv;
550 struct inode *inode = file->f_mapping->host; 550 struct inode *inode = file->f_mapping->host;
551 int ret; 551 int ret;
552 552
553 if (S_ISBLK(inode->i_mode)) { 553 if (S_ISBLK(inode->i_mode)) {
554 /* The backend is block device, use discard */ 554 /* The backend is block device, use discard */
555 struct block_device *bdev = inode->i_bdev; 555 struct block_device *bdev = inode->i_bdev;
556 556
557 ret = blkdev_issue_discard(bdev, lba, 557 ret = blkdev_issue_discard(bdev, lba,
558 nolb, GFP_KERNEL, 0); 558 nolb, GFP_KERNEL, 0);
559 if (ret < 0) { 559 if (ret < 0) {
560 pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n", 560 pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
561 ret); 561 ret);
562 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 562 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
563 } 563 }
564 } else { 564 } else {
565 /* The backend is normal file, use fallocate */ 565 /* The backend is normal file, use fallocate */
566 struct se_device *se_dev = cmd->se_dev; 566 struct se_device *se_dev = cmd->se_dev;
567 loff_t pos = lba * se_dev->dev_attrib.block_size; 567 loff_t pos = lba * se_dev->dev_attrib.block_size;
568 unsigned int len = nolb * se_dev->dev_attrib.block_size; 568 unsigned int len = nolb * se_dev->dev_attrib.block_size;
569 int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE; 569 int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
570 570
571 if (!file->f_op->fallocate) 571 if (!file->f_op->fallocate)
572 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 572 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
573 573
574 ret = file->f_op->fallocate(file, mode, pos, len); 574 ret = file->f_op->fallocate(file, mode, pos, len);
575 if (ret < 0) { 575 if (ret < 0) {
576 pr_warn("FILEIO: fallocate() failed: %d\n", ret); 576 pr_warn("FILEIO: fallocate() failed: %d\n", ret);
577 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 577 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
578 } 578 }
579 } 579 }
580 580
581 return 0; 581 return 0;
582 } 582 }
583 583
584 static sense_reason_t 584 static sense_reason_t
585 fd_execute_write_same_unmap(struct se_cmd *cmd) 585 fd_execute_write_same_unmap(struct se_cmd *cmd)
586 { 586 {
587 struct se_device *se_dev = cmd->se_dev; 587 struct se_device *se_dev = cmd->se_dev;
588 struct fd_dev *fd_dev = FD_DEV(se_dev); 588 struct fd_dev *fd_dev = FD_DEV(se_dev);
589 struct file *file = fd_dev->fd_file; 589 struct file *file = fd_dev->fd_file;
590 sector_t lba = cmd->t_task_lba; 590 sector_t lba = cmd->t_task_lba;
591 sector_t nolb = sbc_get_write_same_sectors(cmd); 591 sector_t nolb = sbc_get_write_same_sectors(cmd);
592 int ret; 592 int ret;
593 593
594 if (!nolb) { 594 if (!nolb) {
595 target_complete_cmd(cmd, SAM_STAT_GOOD); 595 target_complete_cmd(cmd, SAM_STAT_GOOD);
596 return 0; 596 return 0;
597 } 597 }
598 598
599 ret = fd_do_unmap(cmd, file, lba, nolb); 599 ret = fd_do_unmap(cmd, file, lba, nolb);
600 if (ret) 600 if (ret)
601 return ret; 601 return ret;
602 602
603 target_complete_cmd(cmd, GOOD); 603 target_complete_cmd(cmd, GOOD);
604 return 0; 604 return 0;
605 } 605 }
606 606
607 static sense_reason_t 607 static sense_reason_t
608 fd_execute_unmap(struct se_cmd *cmd) 608 fd_execute_unmap(struct se_cmd *cmd)
609 { 609 {
610 struct file *file = FD_DEV(cmd->se_dev)->fd_file; 610 struct file *file = FD_DEV(cmd->se_dev)->fd_file;
611 611
612 return sbc_execute_unmap(cmd, fd_do_unmap, file); 612 return sbc_execute_unmap(cmd, fd_do_unmap, file);
613 } 613 }
614 614
615 static sense_reason_t 615 static sense_reason_t
616 fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, 616 fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
617 enum dma_data_direction data_direction) 617 enum dma_data_direction data_direction)
618 { 618 {
619 struct se_device *dev = cmd->se_dev; 619 struct se_device *dev = cmd->se_dev;
620 struct fd_prot fd_prot; 620 struct fd_prot fd_prot;
621 sense_reason_t rc; 621 sense_reason_t rc;
622 int ret = 0; 622 int ret = 0;
623 623 /*
624 * We are currently limited by the number of iovecs (2048) per
625 * single vfs_[writev,readv] call.
626 */
627 if (cmd->data_length > FD_MAX_BYTES) {
628 pr_err("FILEIO: Not able to process I/O of %u bytes due to"
629 "FD_MAX_BYTES: %u iovec count limitiation\n",
630 cmd->data_length, FD_MAX_BYTES);
631 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
632 }
624 /* 633 /*
625 * Call vectorized fileio functions to map struct scatterlist 634 * Call vectorized fileio functions to map struct scatterlist
626 * physical memory addresses to struct iovec virtual memory. 635 * physical memory addresses to struct iovec virtual memory.
627 */ 636 */
628 if (data_direction == DMA_FROM_DEVICE) { 637 if (data_direction == DMA_FROM_DEVICE) {
629 memset(&fd_prot, 0, sizeof(struct fd_prot)); 638 memset(&fd_prot, 0, sizeof(struct fd_prot));
630 639
631 if (cmd->prot_type) { 640 if (cmd->prot_type) {
632 ret = fd_do_prot_rw(cmd, &fd_prot, false); 641 ret = fd_do_prot_rw(cmd, &fd_prot, false);
633 if (ret < 0) 642 if (ret < 0)
634 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 643 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
635 } 644 }
636 645
637 ret = fd_do_rw(cmd, sgl, sgl_nents, 0); 646 ret = fd_do_rw(cmd, sgl, sgl_nents, 0);
638 647
639 if (ret > 0 && cmd->prot_type) { 648 if (ret > 0 && cmd->prot_type) {
640 u32 sectors = cmd->data_length / dev->dev_attrib.block_size; 649 u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
641 650
642 rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 651 rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors,
643 0, fd_prot.prot_sg, 0); 652 0, fd_prot.prot_sg, 0);
644 if (rc) { 653 if (rc) {
645 kfree(fd_prot.prot_sg); 654 kfree(fd_prot.prot_sg);
646 vfree(fd_prot.prot_buf); 655 vfree(fd_prot.prot_buf);
647 return rc; 656 return rc;
648 } 657 }
649 kfree(fd_prot.prot_sg); 658 kfree(fd_prot.prot_sg);
650 vfree(fd_prot.prot_buf); 659 vfree(fd_prot.prot_buf);
651 } 660 }
652 } else { 661 } else {
653 memset(&fd_prot, 0, sizeof(struct fd_prot)); 662 memset(&fd_prot, 0, sizeof(struct fd_prot));
654 663
655 if (cmd->prot_type) { 664 if (cmd->prot_type) {
656 u32 sectors = cmd->data_length / dev->dev_attrib.block_size; 665 u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
657 666
658 ret = fd_do_prot_rw(cmd, &fd_prot, false); 667 ret = fd_do_prot_rw(cmd, &fd_prot, false);
659 if (ret < 0) 668 if (ret < 0)
660 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 669 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
661 670
662 rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 671 rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors,
663 0, fd_prot.prot_sg, 0); 672 0, fd_prot.prot_sg, 0);
664 if (rc) { 673 if (rc) {
665 kfree(fd_prot.prot_sg); 674 kfree(fd_prot.prot_sg);
666 vfree(fd_prot.prot_buf); 675 vfree(fd_prot.prot_buf);
667 return rc; 676 return rc;
668 } 677 }
669 } 678 }
670 679
671 ret = fd_do_rw(cmd, sgl, sgl_nents, 1); 680 ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
672 /* 681 /*
673 * Perform implicit vfs_fsync_range() for fd_do_writev() ops 682 * Perform implicit vfs_fsync_range() for fd_do_writev() ops
674 * for SCSI WRITEs with Forced Unit Access (FUA) set. 683 * for SCSI WRITEs with Forced Unit Access (FUA) set.
675 * Allow this to happen independent of WCE=0 setting. 684 * Allow this to happen independent of WCE=0 setting.
676 */ 685 */
677 if (ret > 0 && 686 if (ret > 0 &&
678 dev->dev_attrib.emulate_fua_write > 0 && 687 dev->dev_attrib.emulate_fua_write > 0 &&
679 (cmd->se_cmd_flags & SCF_FUA)) { 688 (cmd->se_cmd_flags & SCF_FUA)) {
680 struct fd_dev *fd_dev = FD_DEV(dev); 689 struct fd_dev *fd_dev = FD_DEV(dev);
681 loff_t start = cmd->t_task_lba * 690 loff_t start = cmd->t_task_lba *
682 dev->dev_attrib.block_size; 691 dev->dev_attrib.block_size;
683 loff_t end; 692 loff_t end;
684 693
685 if (cmd->data_length) 694 if (cmd->data_length)
686 end = start + cmd->data_length - 1; 695 end = start + cmd->data_length - 1;
687 else 696 else
688 end = LLONG_MAX; 697 end = LLONG_MAX;
689 698
690 vfs_fsync_range(fd_dev->fd_file, start, end, 1); 699 vfs_fsync_range(fd_dev->fd_file, start, end, 1);
691 } 700 }
692 701
693 if (ret > 0 && cmd->prot_type) { 702 if (ret > 0 && cmd->prot_type) {
694 ret = fd_do_prot_rw(cmd, &fd_prot, true); 703 ret = fd_do_prot_rw(cmd, &fd_prot, true);
695 if (ret < 0) 704 if (ret < 0)
696 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 705 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
697 } 706 }
698 } 707 }
699 708
700 if (ret < 0) { 709 if (ret < 0) {
701 kfree(fd_prot.prot_sg); 710 kfree(fd_prot.prot_sg);
702 vfree(fd_prot.prot_buf); 711 vfree(fd_prot.prot_buf);
703 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 712 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
704 } 713 }
705 714
706 if (ret) 715 if (ret)
707 target_complete_cmd(cmd, SAM_STAT_GOOD); 716 target_complete_cmd(cmd, SAM_STAT_GOOD);
708 return 0; 717 return 0;
709 } 718 }
710 719
711 enum { 720 enum {
712 Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err 721 Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
713 }; 722 };
714 723
715 static match_table_t tokens = { 724 static match_table_t tokens = {
716 {Opt_fd_dev_name, "fd_dev_name=%s"}, 725 {Opt_fd_dev_name, "fd_dev_name=%s"},
717 {Opt_fd_dev_size, "fd_dev_size=%s"}, 726 {Opt_fd_dev_size, "fd_dev_size=%s"},
718 {Opt_fd_buffered_io, "fd_buffered_io=%d"}, 727 {Opt_fd_buffered_io, "fd_buffered_io=%d"},
719 {Opt_err, NULL} 728 {Opt_err, NULL}
720 }; 729 };
721 730
722 static ssize_t fd_set_configfs_dev_params(struct se_device *dev, 731 static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
723 const char *page, ssize_t count) 732 const char *page, ssize_t count)
724 { 733 {
725 struct fd_dev *fd_dev = FD_DEV(dev); 734 struct fd_dev *fd_dev = FD_DEV(dev);
726 char *orig, *ptr, *arg_p, *opts; 735 char *orig, *ptr, *arg_p, *opts;
727 substring_t args[MAX_OPT_ARGS]; 736 substring_t args[MAX_OPT_ARGS];
728 int ret = 0, arg, token; 737 int ret = 0, arg, token;
729 738
730 opts = kstrdup(page, GFP_KERNEL); 739 opts = kstrdup(page, GFP_KERNEL);
731 if (!opts) 740 if (!opts)
732 return -ENOMEM; 741 return -ENOMEM;
733 742
734 orig = opts; 743 orig = opts;
735 744
736 while ((ptr = strsep(&opts, ",\n")) != NULL) { 745 while ((ptr = strsep(&opts, ",\n")) != NULL) {
737 if (!*ptr) 746 if (!*ptr)
738 continue; 747 continue;
739 748
740 token = match_token(ptr, tokens, args); 749 token = match_token(ptr, tokens, args);
741 switch (token) { 750 switch (token) {
742 case Opt_fd_dev_name: 751 case Opt_fd_dev_name:
743 if (match_strlcpy(fd_dev->fd_dev_name, &args[0], 752 if (match_strlcpy(fd_dev->fd_dev_name, &args[0],
744 FD_MAX_DEV_NAME) == 0) { 753 FD_MAX_DEV_NAME) == 0) {
745 ret = -EINVAL; 754 ret = -EINVAL;
746 break; 755 break;
747 } 756 }
748 pr_debug("FILEIO: Referencing Path: %s\n", 757 pr_debug("FILEIO: Referencing Path: %s\n",
749 fd_dev->fd_dev_name); 758 fd_dev->fd_dev_name);
750 fd_dev->fbd_flags |= FBDF_HAS_PATH; 759 fd_dev->fbd_flags |= FBDF_HAS_PATH;
751 break; 760 break;
752 case Opt_fd_dev_size: 761 case Opt_fd_dev_size:
753 arg_p = match_strdup(&args[0]); 762 arg_p = match_strdup(&args[0]);
754 if (!arg_p) { 763 if (!arg_p) {
755 ret = -ENOMEM; 764 ret = -ENOMEM;
756 break; 765 break;
757 } 766 }
758 ret = kstrtoull(arg_p, 0, &fd_dev->fd_dev_size); 767 ret = kstrtoull(arg_p, 0, &fd_dev->fd_dev_size);
759 kfree(arg_p); 768 kfree(arg_p);
760 if (ret < 0) { 769 if (ret < 0) {
761 pr_err("kstrtoull() failed for" 770 pr_err("kstrtoull() failed for"
762 " fd_dev_size=\n"); 771 " fd_dev_size=\n");
763 goto out; 772 goto out;
764 } 773 }
765 pr_debug("FILEIO: Referencing Size: %llu" 774 pr_debug("FILEIO: Referencing Size: %llu"
766 " bytes\n", fd_dev->fd_dev_size); 775 " bytes\n", fd_dev->fd_dev_size);
767 fd_dev->fbd_flags |= FBDF_HAS_SIZE; 776 fd_dev->fbd_flags |= FBDF_HAS_SIZE;
768 break; 777 break;
769 case Opt_fd_buffered_io: 778 case Opt_fd_buffered_io:
770 ret = match_int(args, &arg); 779 ret = match_int(args, &arg);
771 if (ret) 780 if (ret)
772 goto out; 781 goto out;
773 if (arg != 1) { 782 if (arg != 1) {
774 pr_err("bogus fd_buffered_io=%d value\n", arg); 783 pr_err("bogus fd_buffered_io=%d value\n", arg);
775 ret = -EINVAL; 784 ret = -EINVAL;
776 goto out; 785 goto out;
777 } 786 }
778 787
779 pr_debug("FILEIO: Using buffered I/O" 788 pr_debug("FILEIO: Using buffered I/O"
780 " operations for struct fd_dev\n"); 789 " operations for struct fd_dev\n");
781 790
782 fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE; 791 fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
783 break; 792 break;
784 default: 793 default:
785 break; 794 break;
786 } 795 }
787 } 796 }
788 797
789 out: 798 out:
790 kfree(orig); 799 kfree(orig);
791 return (!ret) ? count : ret; 800 return (!ret) ? count : ret;
792 } 801 }
793 802
794 static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b) 803 static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
795 { 804 {
796 struct fd_dev *fd_dev = FD_DEV(dev); 805 struct fd_dev *fd_dev = FD_DEV(dev);
797 ssize_t bl = 0; 806 ssize_t bl = 0;
798 807
799 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); 808 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
800 bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n", 809 bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
801 fd_dev->fd_dev_name, fd_dev->fd_dev_size, 810 fd_dev->fd_dev_name, fd_dev->fd_dev_size,
802 (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ? 811 (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
803 "Buffered-WCE" : "O_DSYNC"); 812 "Buffered-WCE" : "O_DSYNC");
804 return bl; 813 return bl;
805 } 814 }
806 815
807 static sector_t fd_get_blocks(struct se_device *dev) 816 static sector_t fd_get_blocks(struct se_device *dev)
808 { 817 {
809 struct fd_dev *fd_dev = FD_DEV(dev); 818 struct fd_dev *fd_dev = FD_DEV(dev);
810 struct file *f = fd_dev->fd_file; 819 struct file *f = fd_dev->fd_file;
811 struct inode *i = f->f_mapping->host; 820 struct inode *i = f->f_mapping->host;
812 unsigned long long dev_size; 821 unsigned long long dev_size;
813 /* 822 /*
814 * When using a file that references an underlying struct block_device, 823 * When using a file that references an underlying struct block_device,
815 * ensure dev_size is always based on the current inode size in order 824 * ensure dev_size is always based on the current inode size in order
816 * to handle underlying block_device resize operations. 825 * to handle underlying block_device resize operations.
817 */ 826 */
818 if (S_ISBLK(i->i_mode)) 827 if (S_ISBLK(i->i_mode))
819 dev_size = i_size_read(i); 828 dev_size = i_size_read(i);
820 else 829 else
821 dev_size = fd_dev->fd_dev_size; 830 dev_size = fd_dev->fd_dev_size;
822 831
823 return div_u64(dev_size - dev->dev_attrib.block_size, 832 return div_u64(dev_size - dev->dev_attrib.block_size,
824 dev->dev_attrib.block_size); 833 dev->dev_attrib.block_size);
825 } 834 }
826 835
827 static int fd_init_prot(struct se_device *dev) 836 static int fd_init_prot(struct se_device *dev)
828 { 837 {
829 struct fd_dev *fd_dev = FD_DEV(dev); 838 struct fd_dev *fd_dev = FD_DEV(dev);
830 struct file *prot_file, *file = fd_dev->fd_file; 839 struct file *prot_file, *file = fd_dev->fd_file;
831 struct inode *inode; 840 struct inode *inode;
832 int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; 841 int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
833 char buf[FD_MAX_DEV_PROT_NAME]; 842 char buf[FD_MAX_DEV_PROT_NAME];
834 843
835 if (!file) { 844 if (!file) {
836 pr_err("Unable to locate fd_dev->fd_file\n"); 845 pr_err("Unable to locate fd_dev->fd_file\n");
837 return -ENODEV; 846 return -ENODEV;
838 } 847 }
839 848
840 inode = file->f_mapping->host; 849 inode = file->f_mapping->host;
841 if (S_ISBLK(inode->i_mode)) { 850 if (S_ISBLK(inode->i_mode)) {
842 pr_err("FILEIO Protection emulation only supported on" 851 pr_err("FILEIO Protection emulation only supported on"
843 " !S_ISBLK\n"); 852 " !S_ISBLK\n");
844 return -ENOSYS; 853 return -ENOSYS;
845 } 854 }
846 855
847 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) 856 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE)
848 flags &= ~O_DSYNC; 857 flags &= ~O_DSYNC;
849 858
850 snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection", 859 snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection",
851 fd_dev->fd_dev_name); 860 fd_dev->fd_dev_name);
852 861
853 prot_file = filp_open(buf, flags, 0600); 862 prot_file = filp_open(buf, flags, 0600);
854 if (IS_ERR(prot_file)) { 863 if (IS_ERR(prot_file)) {
855 pr_err("filp_open(%s) failed\n", buf); 864 pr_err("filp_open(%s) failed\n", buf);
856 ret = PTR_ERR(prot_file); 865 ret = PTR_ERR(prot_file);
857 return ret; 866 return ret;
858 } 867 }
859 fd_dev->fd_prot_file = prot_file; 868 fd_dev->fd_prot_file = prot_file;
860 869
861 return 0; 870 return 0;
862 } 871 }
863 872
864 static int fd_format_prot(struct se_device *dev) 873 static int fd_format_prot(struct se_device *dev)
865 { 874 {
866 struct fd_dev *fd_dev = FD_DEV(dev); 875 struct fd_dev *fd_dev = FD_DEV(dev);
867 struct file *prot_fd = fd_dev->fd_prot_file; 876 struct file *prot_fd = fd_dev->fd_prot_file;
868 sector_t prot_length, prot; 877 sector_t prot_length, prot;
869 unsigned char *buf; 878 unsigned char *buf;
870 loff_t pos = 0; 879 loff_t pos = 0;
871 int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size; 880 int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
872 int rc, ret = 0, size, len; 881 int rc, ret = 0, size, len;
873 882
874 if (!dev->dev_attrib.pi_prot_type) { 883 if (!dev->dev_attrib.pi_prot_type) {
875 pr_err("Unable to format_prot while pi_prot_type == 0\n"); 884 pr_err("Unable to format_prot while pi_prot_type == 0\n");
876 return -ENODEV; 885 return -ENODEV;
877 } 886 }
878 if (!prot_fd) { 887 if (!prot_fd) {
879 pr_err("Unable to locate fd_dev->fd_prot_file\n"); 888 pr_err("Unable to locate fd_dev->fd_prot_file\n");
880 return -ENODEV; 889 return -ENODEV;
881 } 890 }
882 891
883 buf = vzalloc(unit_size); 892 buf = vzalloc(unit_size);
884 if (!buf) { 893 if (!buf) {
885 pr_err("Unable to allocate FILEIO prot buf\n"); 894 pr_err("Unable to allocate FILEIO prot buf\n");
886 return -ENOMEM; 895 return -ENOMEM;
887 } 896 }
888 prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length; 897 prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length;
889 size = prot_length; 898 size = prot_length;
890 899
891 pr_debug("Using FILEIO prot_length: %llu\n", 900 pr_debug("Using FILEIO prot_length: %llu\n",
892 (unsigned long long)prot_length); 901 (unsigned long long)prot_length);
893 902
894 memset(buf, 0xff, unit_size); 903 memset(buf, 0xff, unit_size);
895 for (prot = 0; prot < prot_length; prot += unit_size) { 904 for (prot = 0; prot < prot_length; prot += unit_size) {
896 len = min(unit_size, size); 905 len = min(unit_size, size);
897 rc = kernel_write(prot_fd, buf, len, pos); 906 rc = kernel_write(prot_fd, buf, len, pos);
898 if (rc != len) { 907 if (rc != len) {
899 pr_err("vfs_write to prot file failed: %d\n", rc); 908 pr_err("vfs_write to prot file failed: %d\n", rc);
900 ret = -ENODEV; 909 ret = -ENODEV;
901 goto out; 910 goto out;
902 } 911 }
903 pos += len; 912 pos += len;
904 size -= len; 913 size -= len;
905 } 914 }
906 915
907 out: 916 out:
908 vfree(buf); 917 vfree(buf);
909 return ret; 918 return ret;
910 } 919 }
911 920
912 static void fd_free_prot(struct se_device *dev) 921 static void fd_free_prot(struct se_device *dev)
913 { 922 {
914 struct fd_dev *fd_dev = FD_DEV(dev); 923 struct fd_dev *fd_dev = FD_DEV(dev);
915 924
916 if (!fd_dev->fd_prot_file) 925 if (!fd_dev->fd_prot_file)
917 return; 926 return;
918 927
919 filp_close(fd_dev->fd_prot_file, NULL); 928 filp_close(fd_dev->fd_prot_file, NULL);
920 fd_dev->fd_prot_file = NULL; 929 fd_dev->fd_prot_file = NULL;
921 } 930 }
922 931
923 static struct sbc_ops fd_sbc_ops = { 932 static struct sbc_ops fd_sbc_ops = {
924 .execute_rw = fd_execute_rw, 933 .execute_rw = fd_execute_rw,
925 .execute_sync_cache = fd_execute_sync_cache, 934 .execute_sync_cache = fd_execute_sync_cache,
926 .execute_write_same = fd_execute_write_same, 935 .execute_write_same = fd_execute_write_same,
927 .execute_write_same_unmap = fd_execute_write_same_unmap, 936 .execute_write_same_unmap = fd_execute_write_same_unmap,
928 .execute_unmap = fd_execute_unmap, 937 .execute_unmap = fd_execute_unmap,
929 }; 938 };
930 939
931 static sense_reason_t 940 static sense_reason_t
932 fd_parse_cdb(struct se_cmd *cmd) 941 fd_parse_cdb(struct se_cmd *cmd)
933 { 942 {
934 return sbc_parse_cdb(cmd, &fd_sbc_ops); 943 return sbc_parse_cdb(cmd, &fd_sbc_ops);
935 } 944 }
936 945
937 static struct se_subsystem_api fileio_template = { 946 static struct se_subsystem_api fileio_template = {
938 .name = "fileio", 947 .name = "fileio",
939 .inquiry_prod = "FILEIO", 948 .inquiry_prod = "FILEIO",
940 .inquiry_rev = FD_VERSION, 949 .inquiry_rev = FD_VERSION,
941 .owner = THIS_MODULE, 950 .owner = THIS_MODULE,
942 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 951 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
943 .attach_hba = fd_attach_hba, 952 .attach_hba = fd_attach_hba,
944 .detach_hba = fd_detach_hba, 953 .detach_hba = fd_detach_hba,
945 .alloc_device = fd_alloc_device, 954 .alloc_device = fd_alloc_device,
946 .configure_device = fd_configure_device, 955 .configure_device = fd_configure_device,
947 .free_device = fd_free_device, 956 .free_device = fd_free_device,
948 .parse_cdb = fd_parse_cdb, 957 .parse_cdb = fd_parse_cdb,
949 .set_configfs_dev_params = fd_set_configfs_dev_params, 958 .set_configfs_dev_params = fd_set_configfs_dev_params,
950 .show_configfs_dev_params = fd_show_configfs_dev_params, 959 .show_configfs_dev_params = fd_show_configfs_dev_params,
951 .get_device_type = sbc_get_device_type, 960 .get_device_type = sbc_get_device_type,
952 .get_blocks = fd_get_blocks, 961 .get_blocks = fd_get_blocks,
953 .init_prot = fd_init_prot, 962 .init_prot = fd_init_prot,
954 .format_prot = fd_format_prot, 963 .format_prot = fd_format_prot,
955 .free_prot = fd_free_prot, 964 .free_prot = fd_free_prot,
956 }; 965 };
957 966
958 static int __init fileio_module_init(void) 967 static int __init fileio_module_init(void)
959 { 968 {
960 return transport_subsystem_register(&fileio_template); 969 return transport_subsystem_register(&fileio_template);
961 } 970 }
962 971
963 static void __exit fileio_module_exit(void) 972 static void __exit fileio_module_exit(void)
964 { 973 {
965 transport_subsystem_release(&fileio_template); 974 transport_subsystem_release(&fileio_template);
966 } 975 }
967 976
968 MODULE_DESCRIPTION("TCM FILEIO subsystem plugin"); 977 MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
969 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 978 MODULE_AUTHOR("nab@Linux-iSCSI.org");
970 MODULE_LICENSE("GPL"); 979 MODULE_LICENSE("GPL");
971 980
972 module_init(fileio_module_init); 981 module_init(fileio_module_init);
973 module_exit(fileio_module_exit); 982 module_exit(fileio_module_exit);
974 983
drivers/target/target_core_iblock.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * Filename: target_core_iblock.c 2 * Filename: target_core_iblock.c
3 * 3 *
4 * This file contains the Storage Engine <-> Linux BlockIO transport 4 * This file contains the Storage Engine <-> Linux BlockIO transport
5 * specific functions. 5 * specific functions.
6 * 6 *
7 * (c) Copyright 2003-2013 Datera, Inc. 7 * (c) Copyright 2003-2013 Datera, Inc.
8 * 8 *
9 * Nicholas A. Bellinger <nab@kernel.org> 9 * Nicholas A. Bellinger <nab@kernel.org>
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or 13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version. 14 * (at your option) any later version.
15 * 15 *
16 * This program is distributed in the hope that it will be useful, 16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details. 19 * GNU General Public License for more details.
20 * 20 *
21 * You should have received a copy of the GNU General Public License 21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software 22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 * 24 *
25 ******************************************************************************/ 25 ******************************************************************************/
26 26
27 #include <linux/string.h> 27 #include <linux/string.h>
28 #include <linux/parser.h> 28 #include <linux/parser.h>
29 #include <linux/timer.h> 29 #include <linux/timer.h>
30 #include <linux/fs.h> 30 #include <linux/fs.h>
31 #include <linux/blkdev.h> 31 #include <linux/blkdev.h>
32 #include <linux/slab.h> 32 #include <linux/slab.h>
33 #include <linux/spinlock.h> 33 #include <linux/spinlock.h>
34 #include <linux/bio.h> 34 #include <linux/bio.h>
35 #include <linux/genhd.h> 35 #include <linux/genhd.h>
36 #include <linux/file.h> 36 #include <linux/file.h>
37 #include <linux/module.h> 37 #include <linux/module.h>
38 #include <scsi/scsi.h> 38 #include <scsi/scsi.h>
39 #include <scsi/scsi_host.h> 39 #include <scsi/scsi_host.h>
40 #include <asm/unaligned.h> 40 #include <asm/unaligned.h>
41 41
42 #include <target/target_core_base.h> 42 #include <target/target_core_base.h>
43 #include <target/target_core_backend.h> 43 #include <target/target_core_backend.h>
44 44
45 #include "target_core_iblock.h" 45 #include "target_core_iblock.h"
46 46
47 #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */ 47 #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
48 #define IBLOCK_BIO_POOL_SIZE 128 48 #define IBLOCK_BIO_POOL_SIZE 128
49 49
50 static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev) 50 static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
51 { 51 {
52 return container_of(dev, struct iblock_dev, dev); 52 return container_of(dev, struct iblock_dev, dev);
53 } 53 }
54 54
55 55
56 static struct se_subsystem_api iblock_template; 56 static struct se_subsystem_api iblock_template;
57 57
58 /* iblock_attach_hba(): (Part of se_subsystem_api_t template) 58 /* iblock_attach_hba(): (Part of se_subsystem_api_t template)
59 * 59 *
60 * 60 *
61 */ 61 */
62 static int iblock_attach_hba(struct se_hba *hba, u32 host_id) 62 static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
63 { 63 {
64 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" 64 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
65 " Generic Target Core Stack %s\n", hba->hba_id, 65 " Generic Target Core Stack %s\n", hba->hba_id,
66 IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); 66 IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
67 return 0; 67 return 0;
68 } 68 }
69 69
70 static void iblock_detach_hba(struct se_hba *hba) 70 static void iblock_detach_hba(struct se_hba *hba)
71 { 71 {
72 } 72 }
73 73
74 static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name) 74 static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
75 { 75 {
76 struct iblock_dev *ib_dev = NULL; 76 struct iblock_dev *ib_dev = NULL;
77 77
78 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); 78 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
79 if (!ib_dev) { 79 if (!ib_dev) {
80 pr_err("Unable to allocate struct iblock_dev\n"); 80 pr_err("Unable to allocate struct iblock_dev\n");
81 return NULL; 81 return NULL;
82 } 82 }
83 83
84 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); 84 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
85 85
86 return &ib_dev->dev; 86 return &ib_dev->dev;
87 } 87 }
88 88
89 static int iblock_configure_device(struct se_device *dev) 89 static int iblock_configure_device(struct se_device *dev)
90 { 90 {
91 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 91 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
92 struct request_queue *q; 92 struct request_queue *q;
93 struct block_device *bd = NULL; 93 struct block_device *bd = NULL;
94 struct blk_integrity *bi; 94 struct blk_integrity *bi;
95 fmode_t mode; 95 fmode_t mode;
96 int ret = -ENOMEM; 96 int ret = -ENOMEM;
97 97
98 if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) { 98 if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
99 pr_err("Missing udev_path= parameters for IBLOCK\n"); 99 pr_err("Missing udev_path= parameters for IBLOCK\n");
100 return -EINVAL; 100 return -EINVAL;
101 } 101 }
102 102
103 ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0); 103 ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
104 if (!ib_dev->ibd_bio_set) { 104 if (!ib_dev->ibd_bio_set) {
105 pr_err("IBLOCK: Unable to create bioset\n"); 105 pr_err("IBLOCK: Unable to create bioset\n");
106 goto out; 106 goto out;
107 } 107 }
108 108
109 pr_debug( "IBLOCK: Claiming struct block_device: %s\n", 109 pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
110 ib_dev->ibd_udev_path); 110 ib_dev->ibd_udev_path);
111 111
112 mode = FMODE_READ|FMODE_EXCL; 112 mode = FMODE_READ|FMODE_EXCL;
113 if (!ib_dev->ibd_readonly) 113 if (!ib_dev->ibd_readonly)
114 mode |= FMODE_WRITE; 114 mode |= FMODE_WRITE;
115 115
116 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); 116 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
117 if (IS_ERR(bd)) { 117 if (IS_ERR(bd)) {
118 ret = PTR_ERR(bd); 118 ret = PTR_ERR(bd);
119 goto out_free_bioset; 119 goto out_free_bioset;
120 } 120 }
121 ib_dev->ibd_bd = bd; 121 ib_dev->ibd_bd = bd;
122 122
123 q = bdev_get_queue(bd); 123 q = bdev_get_queue(bd);
124 124
125 dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd); 125 dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
126 dev->dev_attrib.hw_max_sectors = UINT_MAX; 126 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
127 dev->dev_attrib.hw_queue_depth = q->nr_requests; 127 dev->dev_attrib.hw_queue_depth = q->nr_requests;
128 128
129 /* 129 /*
130 * Check if the underlying struct block_device request_queue supports 130 * Check if the underlying struct block_device request_queue supports
131 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM 131 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
132 * in ATA and we need to set TPE=1 132 * in ATA and we need to set TPE=1
133 */ 133 */
134 if (blk_queue_discard(q)) { 134 if (blk_queue_discard(q)) {
135 dev->dev_attrib.max_unmap_lba_count = 135 dev->dev_attrib.max_unmap_lba_count =
136 q->limits.max_discard_sectors; 136 q->limits.max_discard_sectors;
137 137
138 /* 138 /*
139 * Currently hardcoded to 1 in Linux/SCSI code.. 139 * Currently hardcoded to 1 in Linux/SCSI code..
140 */ 140 */
141 dev->dev_attrib.max_unmap_block_desc_count = 1; 141 dev->dev_attrib.max_unmap_block_desc_count = 1;
142 dev->dev_attrib.unmap_granularity = 142 dev->dev_attrib.unmap_granularity =
143 q->limits.discard_granularity >> 9; 143 q->limits.discard_granularity >> 9;
144 dev->dev_attrib.unmap_granularity_alignment = 144 dev->dev_attrib.unmap_granularity_alignment =
145 q->limits.discard_alignment; 145 q->limits.discard_alignment;
146 146
147 pr_debug("IBLOCK: BLOCK Discard support available," 147 pr_debug("IBLOCK: BLOCK Discard support available,"
148 " disabled by default\n"); 148 " disabled by default\n");
149 } 149 }
150 /* 150 /*
151 * Enable write same emulation for IBLOCK and use 0xFFFF as 151 * Enable write same emulation for IBLOCK and use 0xFFFF as
152 * the smaller WRITE_SAME(10) only has a two-byte block count. 152 * the smaller WRITE_SAME(10) only has a two-byte block count.
153 */ 153 */
154 dev->dev_attrib.max_write_same_len = 0xFFFF; 154 dev->dev_attrib.max_write_same_len = 0xFFFF;
155 155
156 if (blk_queue_nonrot(q)) 156 if (blk_queue_nonrot(q))
157 dev->dev_attrib.is_nonrot = 1; 157 dev->dev_attrib.is_nonrot = 1;
158 158
159 bi = bdev_get_integrity(bd); 159 bi = bdev_get_integrity(bd);
160 if (bi) { 160 if (bi) {
161 struct bio_set *bs = ib_dev->ibd_bio_set; 161 struct bio_set *bs = ib_dev->ibd_bio_set;
162 162
163 if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") || 163 if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") ||
164 !strcmp(bi->name, "T10-DIF-TYPE1-IP")) { 164 !strcmp(bi->name, "T10-DIF-TYPE1-IP")) {
165 pr_err("IBLOCK export of blk_integrity: %s not" 165 pr_err("IBLOCK export of blk_integrity: %s not"
166 " supported\n", bi->name); 166 " supported\n", bi->name);
167 ret = -ENOSYS; 167 ret = -ENOSYS;
168 goto out_blkdev_put; 168 goto out_blkdev_put;
169 } 169 }
170 170
171 if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) { 171 if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) {
172 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT; 172 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
173 } else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) { 173 } else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) {
174 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT; 174 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
175 } 175 }
176 176
177 if (dev->dev_attrib.pi_prot_type) { 177 if (dev->dev_attrib.pi_prot_type) {
178 if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) { 178 if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
179 pr_err("Unable to allocate bioset for PI\n"); 179 pr_err("Unable to allocate bioset for PI\n");
180 ret = -ENOMEM; 180 ret = -ENOMEM;
181 goto out_blkdev_put; 181 goto out_blkdev_put;
182 } 182 }
183 pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n", 183 pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
184 bs->bio_integrity_pool); 184 bs->bio_integrity_pool);
185 } 185 }
186 dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type; 186 dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
187 } 187 }
188 188
189 return 0; 189 return 0;
190 190
191 out_blkdev_put: 191 out_blkdev_put:
192 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 192 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
193 out_free_bioset: 193 out_free_bioset:
194 bioset_free(ib_dev->ibd_bio_set); 194 bioset_free(ib_dev->ibd_bio_set);
195 ib_dev->ibd_bio_set = NULL; 195 ib_dev->ibd_bio_set = NULL;
196 out: 196 out:
197 return ret; 197 return ret;
198 } 198 }
199 199
200 static void iblock_free_device(struct se_device *dev) 200 static void iblock_free_device(struct se_device *dev)
201 { 201 {
202 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 202 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
203 203
204 if (ib_dev->ibd_bd != NULL) 204 if (ib_dev->ibd_bd != NULL)
205 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 205 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
206 if (ib_dev->ibd_bio_set != NULL) 206 if (ib_dev->ibd_bio_set != NULL)
207 bioset_free(ib_dev->ibd_bio_set); 207 bioset_free(ib_dev->ibd_bio_set);
208 208
209 kfree(ib_dev); 209 kfree(ib_dev);
210 } 210 }
211 211
212 static unsigned long long iblock_emulate_read_cap_with_block_size( 212 static unsigned long long iblock_emulate_read_cap_with_block_size(
213 struct se_device *dev, 213 struct se_device *dev,
214 struct block_device *bd, 214 struct block_device *bd,
215 struct request_queue *q) 215 struct request_queue *q)
216 { 216 {
217 unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode), 217 unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
218 bdev_logical_block_size(bd)) - 1); 218 bdev_logical_block_size(bd)) - 1);
219 u32 block_size = bdev_logical_block_size(bd); 219 u32 block_size = bdev_logical_block_size(bd);
220 220
221 if (block_size == dev->dev_attrib.block_size) 221 if (block_size == dev->dev_attrib.block_size)
222 return blocks_long; 222 return blocks_long;
223 223
224 switch (block_size) { 224 switch (block_size) {
225 case 4096: 225 case 4096:
226 switch (dev->dev_attrib.block_size) { 226 switch (dev->dev_attrib.block_size) {
227 case 2048: 227 case 2048:
228 blocks_long <<= 1; 228 blocks_long <<= 1;
229 break; 229 break;
230 case 1024: 230 case 1024:
231 blocks_long <<= 2; 231 blocks_long <<= 2;
232 break; 232 break;
233 case 512: 233 case 512:
234 blocks_long <<= 3; 234 blocks_long <<= 3;
235 default: 235 default:
236 break; 236 break;
237 } 237 }
238 break; 238 break;
239 case 2048: 239 case 2048:
240 switch (dev->dev_attrib.block_size) { 240 switch (dev->dev_attrib.block_size) {
241 case 4096: 241 case 4096:
242 blocks_long >>= 1; 242 blocks_long >>= 1;
243 break; 243 break;
244 case 1024: 244 case 1024:
245 blocks_long <<= 1; 245 blocks_long <<= 1;
246 break; 246 break;
247 case 512: 247 case 512:
248 blocks_long <<= 2; 248 blocks_long <<= 2;
249 break; 249 break;
250 default: 250 default:
251 break; 251 break;
252 } 252 }
253 break; 253 break;
254 case 1024: 254 case 1024:
255 switch (dev->dev_attrib.block_size) { 255 switch (dev->dev_attrib.block_size) {
256 case 4096: 256 case 4096:
257 blocks_long >>= 2; 257 blocks_long >>= 2;
258 break; 258 break;
259 case 2048: 259 case 2048:
260 blocks_long >>= 1; 260 blocks_long >>= 1;
261 break; 261 break;
262 case 512: 262 case 512:
263 blocks_long <<= 1; 263 blocks_long <<= 1;
264 break; 264 break;
265 default: 265 default:
266 break; 266 break;
267 } 267 }
268 break; 268 break;
269 case 512: 269 case 512:
270 switch (dev->dev_attrib.block_size) { 270 switch (dev->dev_attrib.block_size) {
271 case 4096: 271 case 4096:
272 blocks_long >>= 3; 272 blocks_long >>= 3;
273 break; 273 break;
274 case 2048: 274 case 2048:
275 blocks_long >>= 2; 275 blocks_long >>= 2;
276 break; 276 break;
277 case 1024: 277 case 1024:
278 blocks_long >>= 1; 278 blocks_long >>= 1;
279 break; 279 break;
280 default: 280 default:
281 break; 281 break;
282 } 282 }
283 break; 283 break;
284 default: 284 default:
285 break; 285 break;
286 } 286 }
287 287
288 return blocks_long; 288 return blocks_long;
289 } 289 }
290 290
291 static void iblock_complete_cmd(struct se_cmd *cmd) 291 static void iblock_complete_cmd(struct se_cmd *cmd)
292 { 292 {
293 struct iblock_req *ibr = cmd->priv; 293 struct iblock_req *ibr = cmd->priv;
294 u8 status; 294 u8 status;
295 295
296 if (!atomic_dec_and_test(&ibr->pending)) 296 if (!atomic_dec_and_test(&ibr->pending))
297 return; 297 return;
298 298
299 if (atomic_read(&ibr->ib_bio_err_cnt)) 299 if (atomic_read(&ibr->ib_bio_err_cnt))
300 status = SAM_STAT_CHECK_CONDITION; 300 status = SAM_STAT_CHECK_CONDITION;
301 else 301 else
302 status = SAM_STAT_GOOD; 302 status = SAM_STAT_GOOD;
303 303
304 target_complete_cmd(cmd, status); 304 target_complete_cmd(cmd, status);
305 kfree(ibr); 305 kfree(ibr);
306 } 306 }
307 307
308 static void iblock_bio_done(struct bio *bio, int err) 308 static void iblock_bio_done(struct bio *bio, int err)
309 { 309 {
310 struct se_cmd *cmd = bio->bi_private; 310 struct se_cmd *cmd = bio->bi_private;
311 struct iblock_req *ibr = cmd->priv; 311 struct iblock_req *ibr = cmd->priv;
312 312
313 /* 313 /*
314 * Set -EIO if !BIO_UPTODATE and the passed is still err=0 314 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
315 */ 315 */
316 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) 316 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
317 err = -EIO; 317 err = -EIO;
318 318
319 if (err != 0) { 319 if (err != 0) {
320 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p," 320 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
321 " err: %d\n", bio, err); 321 " err: %d\n", bio, err);
322 /* 322 /*
323 * Bump the ib_bio_err_cnt and release bio. 323 * Bump the ib_bio_err_cnt and release bio.
324 */ 324 */
325 atomic_inc(&ibr->ib_bio_err_cnt); 325 atomic_inc(&ibr->ib_bio_err_cnt);
326 smp_mb__after_atomic(); 326 smp_mb__after_atomic();
327 } 327 }
328 328
329 bio_put(bio); 329 bio_put(bio);
330 330
331 iblock_complete_cmd(cmd); 331 iblock_complete_cmd(cmd);
332 } 332 }
333 333
334 static struct bio * 334 static struct bio *
335 iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) 335 iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
336 { 336 {
337 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); 337 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
338 struct bio *bio; 338 struct bio *bio;
339 339
340 /* 340 /*
341 * Only allocate as many vector entries as the bio code allows us to, 341 * Only allocate as many vector entries as the bio code allows us to,
342 * we'll loop later on until we have handled the whole request. 342 * we'll loop later on until we have handled the whole request.
343 */ 343 */
344 if (sg_num > BIO_MAX_PAGES) 344 if (sg_num > BIO_MAX_PAGES)
345 sg_num = BIO_MAX_PAGES; 345 sg_num = BIO_MAX_PAGES;
346 346
347 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); 347 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
348 if (!bio) { 348 if (!bio) {
349 pr_err("Unable to allocate memory for bio\n"); 349 pr_err("Unable to allocate memory for bio\n");
350 return NULL; 350 return NULL;
351 } 351 }
352 352
353 bio->bi_bdev = ib_dev->ibd_bd; 353 bio->bi_bdev = ib_dev->ibd_bd;
354 bio->bi_private = cmd; 354 bio->bi_private = cmd;
355 bio->bi_end_io = &iblock_bio_done; 355 bio->bi_end_io = &iblock_bio_done;
356 bio->bi_iter.bi_sector = lba; 356 bio->bi_iter.bi_sector = lba;
357 357
358 return bio; 358 return bio;
359 } 359 }
360 360
361 static void iblock_submit_bios(struct bio_list *list, int rw) 361 static void iblock_submit_bios(struct bio_list *list, int rw)
362 { 362 {
363 struct blk_plug plug; 363 struct blk_plug plug;
364 struct bio *bio; 364 struct bio *bio;
365 365
366 blk_start_plug(&plug); 366 blk_start_plug(&plug);
367 while ((bio = bio_list_pop(list))) 367 while ((bio = bio_list_pop(list)))
368 submit_bio(rw, bio); 368 submit_bio(rw, bio);
369 blk_finish_plug(&plug); 369 blk_finish_plug(&plug);
370 } 370 }
371 371
372 static void iblock_end_io_flush(struct bio *bio, int err) 372 static void iblock_end_io_flush(struct bio *bio, int err)
373 { 373 {
374 struct se_cmd *cmd = bio->bi_private; 374 struct se_cmd *cmd = bio->bi_private;
375 375
376 if (err) 376 if (err)
377 pr_err("IBLOCK: cache flush failed: %d\n", err); 377 pr_err("IBLOCK: cache flush failed: %d\n", err);
378 378
379 if (cmd) { 379 if (cmd) {
380 if (err) 380 if (err)
381 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); 381 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
382 else 382 else
383 target_complete_cmd(cmd, SAM_STAT_GOOD); 383 target_complete_cmd(cmd, SAM_STAT_GOOD);
384 } 384 }
385 385
386 bio_put(bio); 386 bio_put(bio);
387 } 387 }
388 388
389 /* 389 /*
390 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must 390 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
391 * always flush the whole cache. 391 * always flush the whole cache.
392 */ 392 */
393 static sense_reason_t 393 static sense_reason_t
394 iblock_execute_sync_cache(struct se_cmd *cmd) 394 iblock_execute_sync_cache(struct se_cmd *cmd)
395 { 395 {
396 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); 396 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
397 int immed = (cmd->t_task_cdb[1] & 0x2); 397 int immed = (cmd->t_task_cdb[1] & 0x2);
398 struct bio *bio; 398 struct bio *bio;
399 399
400 /* 400 /*
401 * If the Immediate bit is set, queue up the GOOD response 401 * If the Immediate bit is set, queue up the GOOD response
402 * for this SYNCHRONIZE_CACHE op. 402 * for this SYNCHRONIZE_CACHE op.
403 */ 403 */
404 if (immed) 404 if (immed)
405 target_complete_cmd(cmd, SAM_STAT_GOOD); 405 target_complete_cmd(cmd, SAM_STAT_GOOD);
406 406
407 bio = bio_alloc(GFP_KERNEL, 0); 407 bio = bio_alloc(GFP_KERNEL, 0);
408 bio->bi_end_io = iblock_end_io_flush; 408 bio->bi_end_io = iblock_end_io_flush;
409 bio->bi_bdev = ib_dev->ibd_bd; 409 bio->bi_bdev = ib_dev->ibd_bd;
410 if (!immed) 410 if (!immed)
411 bio->bi_private = cmd; 411 bio->bi_private = cmd;
412 submit_bio(WRITE_FLUSH, bio); 412 submit_bio(WRITE_FLUSH, bio);
413 return 0; 413 return 0;
414 } 414 }
415 415
416 static sense_reason_t 416 static sense_reason_t
417 iblock_do_unmap(struct se_cmd *cmd, void *priv, 417 iblock_do_unmap(struct se_cmd *cmd, void *priv,
418 sector_t lba, sector_t nolb) 418 sector_t lba, sector_t nolb)
419 { 419 {
420 struct block_device *bdev = priv; 420 struct block_device *bdev = priv;
421 int ret; 421 int ret;
422 422
423 ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0); 423 ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
424 if (ret < 0) { 424 if (ret < 0) {
425 pr_err("blkdev_issue_discard() failed: %d\n", ret); 425 pr_err("blkdev_issue_discard() failed: %d\n", ret);
426 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 426 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
427 } 427 }
428 428
429 return 0; 429 return 0;
430 } 430 }
431 431
432 static sense_reason_t 432 static sense_reason_t
433 iblock_execute_unmap(struct se_cmd *cmd) 433 iblock_execute_unmap(struct se_cmd *cmd)
434 { 434 {
435 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; 435 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
436 436
437 return sbc_execute_unmap(cmd, iblock_do_unmap, bdev); 437 return sbc_execute_unmap(cmd, iblock_do_unmap, bdev);
438 } 438 }
439 439
440 static sense_reason_t 440 static sense_reason_t
441 iblock_execute_write_same_unmap(struct se_cmd *cmd) 441 iblock_execute_write_same_unmap(struct se_cmd *cmd)
442 { 442 {
443 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; 443 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
444 sector_t lba = cmd->t_task_lba; 444 sector_t lba = cmd->t_task_lba;
445 sector_t nolb = sbc_get_write_same_sectors(cmd); 445 sector_t nolb = sbc_get_write_same_sectors(cmd);
446 int ret; 446 int ret;
447 447
448 ret = iblock_do_unmap(cmd, bdev, lba, nolb); 448 ret = iblock_do_unmap(cmd, bdev, lba, nolb);
449 if (ret) 449 if (ret)
450 return ret; 450 return ret;
451 451
452 target_complete_cmd(cmd, GOOD); 452 target_complete_cmd(cmd, GOOD);
453 return 0; 453 return 0;
454 } 454 }
455 455
456 static sense_reason_t 456 static sense_reason_t
457 iblock_execute_write_same(struct se_cmd *cmd) 457 iblock_execute_write_same(struct se_cmd *cmd)
458 { 458 {
459 struct iblock_req *ibr; 459 struct iblock_req *ibr;
460 struct scatterlist *sg; 460 struct scatterlist *sg;
461 struct bio *bio; 461 struct bio *bio;
462 struct bio_list list; 462 struct bio_list list;
463 sector_t block_lba = cmd->t_task_lba; 463 sector_t block_lba = cmd->t_task_lba;
464 sector_t sectors = sbc_get_write_same_sectors(cmd); 464 sector_t sectors = sbc_get_write_same_sectors(cmd);
465 465
466 sg = &cmd->t_data_sg[0]; 466 sg = &cmd->t_data_sg[0];
467 467
468 if (cmd->t_data_nents > 1 || 468 if (cmd->t_data_nents > 1 ||
469 sg->length != cmd->se_dev->dev_attrib.block_size) { 469 sg->length != cmd->se_dev->dev_attrib.block_size) {
470 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u" 470 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
471 " block_size: %u\n", cmd->t_data_nents, sg->length, 471 " block_size: %u\n", cmd->t_data_nents, sg->length,
472 cmd->se_dev->dev_attrib.block_size); 472 cmd->se_dev->dev_attrib.block_size);
473 return TCM_INVALID_CDB_FIELD; 473 return TCM_INVALID_CDB_FIELD;
474 } 474 }
475 475
476 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); 476 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
477 if (!ibr) 477 if (!ibr)
478 goto fail; 478 goto fail;
479 cmd->priv = ibr; 479 cmd->priv = ibr;
480 480
481 bio = iblock_get_bio(cmd, block_lba, 1); 481 bio = iblock_get_bio(cmd, block_lba, 1);
482 if (!bio) 482 if (!bio)
483 goto fail_free_ibr; 483 goto fail_free_ibr;
484 484
485 bio_list_init(&list); 485 bio_list_init(&list);
486 bio_list_add(&list, bio); 486 bio_list_add(&list, bio);
487 487
488 atomic_set(&ibr->pending, 1); 488 atomic_set(&ibr->pending, 1);
489 489
490 while (sectors) { 490 while (sectors) {
491 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) 491 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
492 != sg->length) { 492 != sg->length) {
493 493
494 bio = iblock_get_bio(cmd, block_lba, 1); 494 bio = iblock_get_bio(cmd, block_lba, 1);
495 if (!bio) 495 if (!bio)
496 goto fail_put_bios; 496 goto fail_put_bios;
497 497
498 atomic_inc(&ibr->pending); 498 atomic_inc(&ibr->pending);
499 bio_list_add(&list, bio); 499 bio_list_add(&list, bio);
500 } 500 }
501 501
502 /* Always in 512 byte units for Linux/Block */ 502 /* Always in 512 byte units for Linux/Block */
503 block_lba += sg->length >> IBLOCK_LBA_SHIFT; 503 block_lba += sg->length >> IBLOCK_LBA_SHIFT;
504 sectors -= 1; 504 sectors -= 1;
505 } 505 }
506 506
507 iblock_submit_bios(&list, WRITE); 507 iblock_submit_bios(&list, WRITE);
508 return 0; 508 return 0;
509 509
510 fail_put_bios: 510 fail_put_bios:
511 while ((bio = bio_list_pop(&list))) 511 while ((bio = bio_list_pop(&list)))
512 bio_put(bio); 512 bio_put(bio);
513 fail_free_ibr: 513 fail_free_ibr:
514 kfree(ibr); 514 kfree(ibr);
515 fail: 515 fail:
516 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 516 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
517 } 517 }
518 518
519 enum { 519 enum {
520 Opt_udev_path, Opt_readonly, Opt_force, Opt_err 520 Opt_udev_path, Opt_readonly, Opt_force, Opt_err
521 }; 521 };
522 522
523 static match_table_t tokens = { 523 static match_table_t tokens = {
524 {Opt_udev_path, "udev_path=%s"}, 524 {Opt_udev_path, "udev_path=%s"},
525 {Opt_readonly, "readonly=%d"}, 525 {Opt_readonly, "readonly=%d"},
526 {Opt_force, "force=%d"}, 526 {Opt_force, "force=%d"},
527 {Opt_err, NULL} 527 {Opt_err, NULL}
528 }; 528 };
529 529
530 static ssize_t iblock_set_configfs_dev_params(struct se_device *dev, 530 static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
531 const char *page, ssize_t count) 531 const char *page, ssize_t count)
532 { 532 {
533 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 533 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
534 char *orig, *ptr, *arg_p, *opts; 534 char *orig, *ptr, *arg_p, *opts;
535 substring_t args[MAX_OPT_ARGS]; 535 substring_t args[MAX_OPT_ARGS];
536 int ret = 0, token; 536 int ret = 0, token;
537 unsigned long tmp_readonly; 537 unsigned long tmp_readonly;
538 538
539 opts = kstrdup(page, GFP_KERNEL); 539 opts = kstrdup(page, GFP_KERNEL);
540 if (!opts) 540 if (!opts)
541 return -ENOMEM; 541 return -ENOMEM;
542 542
543 orig = opts; 543 orig = opts;
544 544
545 while ((ptr = strsep(&opts, ",\n")) != NULL) { 545 while ((ptr = strsep(&opts, ",\n")) != NULL) {
546 if (!*ptr) 546 if (!*ptr)
547 continue; 547 continue;
548 548
549 token = match_token(ptr, tokens, args); 549 token = match_token(ptr, tokens, args);
550 switch (token) { 550 switch (token) {
551 case Opt_udev_path: 551 case Opt_udev_path:
552 if (ib_dev->ibd_bd) { 552 if (ib_dev->ibd_bd) {
553 pr_err("Unable to set udev_path= while" 553 pr_err("Unable to set udev_path= while"
554 " ib_dev->ibd_bd exists\n"); 554 " ib_dev->ibd_bd exists\n");
555 ret = -EEXIST; 555 ret = -EEXIST;
556 goto out; 556 goto out;
557 } 557 }
558 if (match_strlcpy(ib_dev->ibd_udev_path, &args[0], 558 if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
559 SE_UDEV_PATH_LEN) == 0) { 559 SE_UDEV_PATH_LEN) == 0) {
560 ret = -EINVAL; 560 ret = -EINVAL;
561 break; 561 break;
562 } 562 }
563 pr_debug("IBLOCK: Referencing UDEV path: %s\n", 563 pr_debug("IBLOCK: Referencing UDEV path: %s\n",
564 ib_dev->ibd_udev_path); 564 ib_dev->ibd_udev_path);
565 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; 565 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
566 break; 566 break;
567 case Opt_readonly: 567 case Opt_readonly:
568 arg_p = match_strdup(&args[0]); 568 arg_p = match_strdup(&args[0]);
569 if (!arg_p) { 569 if (!arg_p) {
570 ret = -ENOMEM; 570 ret = -ENOMEM;
571 break; 571 break;
572 } 572 }
573 ret = kstrtoul(arg_p, 0, &tmp_readonly); 573 ret = kstrtoul(arg_p, 0, &tmp_readonly);
574 kfree(arg_p); 574 kfree(arg_p);
575 if (ret < 0) { 575 if (ret < 0) {
576 pr_err("kstrtoul() failed for" 576 pr_err("kstrtoul() failed for"
577 " readonly=\n"); 577 " readonly=\n");
578 goto out; 578 goto out;
579 } 579 }
580 ib_dev->ibd_readonly = tmp_readonly; 580 ib_dev->ibd_readonly = tmp_readonly;
581 pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly); 581 pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
582 break; 582 break;
583 case Opt_force: 583 case Opt_force:
584 break; 584 break;
585 default: 585 default:
586 break; 586 break;
587 } 587 }
588 } 588 }
589 589
590 out: 590 out:
591 kfree(orig); 591 kfree(orig);
592 return (!ret) ? count : ret; 592 return (!ret) ? count : ret;
593 } 593 }
594 594
595 static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b) 595 static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
596 { 596 {
597 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 597 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
598 struct block_device *bd = ib_dev->ibd_bd; 598 struct block_device *bd = ib_dev->ibd_bd;
599 char buf[BDEVNAME_SIZE]; 599 char buf[BDEVNAME_SIZE];
600 ssize_t bl = 0; 600 ssize_t bl = 0;
601 601
602 if (bd) 602 if (bd)
603 bl += sprintf(b + bl, "iBlock device: %s", 603 bl += sprintf(b + bl, "iBlock device: %s",
604 bdevname(bd, buf)); 604 bdevname(bd, buf));
605 if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH) 605 if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
606 bl += sprintf(b + bl, " UDEV PATH: %s", 606 bl += sprintf(b + bl, " UDEV PATH: %s",
607 ib_dev->ibd_udev_path); 607 ib_dev->ibd_udev_path);
608 bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly); 608 bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly);
609 609
610 bl += sprintf(b + bl, " "); 610 bl += sprintf(b + bl, " ");
611 if (bd) { 611 if (bd) {
612 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", 612 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
613 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? 613 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
614 "" : (bd->bd_holder == ib_dev) ? 614 "" : (bd->bd_holder == ib_dev) ?
615 "CLAIMED: IBLOCK" : "CLAIMED: OS"); 615 "CLAIMED: IBLOCK" : "CLAIMED: OS");
616 } else { 616 } else {
617 bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); 617 bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
618 } 618 }
619 619
620 return bl; 620 return bl;
621 } 621 }
622 622
623 static int 623 static int
624 iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio) 624 iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
625 { 625 {
626 struct se_device *dev = cmd->se_dev; 626 struct se_device *dev = cmd->se_dev;
627 struct blk_integrity *bi; 627 struct blk_integrity *bi;
628 struct bio_integrity_payload *bip; 628 struct bio_integrity_payload *bip;
629 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 629 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
630 struct scatterlist *sg; 630 struct scatterlist *sg;
631 int i, rc; 631 int i, rc;
632 632
633 bi = bdev_get_integrity(ib_dev->ibd_bd); 633 bi = bdev_get_integrity(ib_dev->ibd_bd);
634 if (!bi) { 634 if (!bi) {
635 pr_err("Unable to locate bio_integrity\n"); 635 pr_err("Unable to locate bio_integrity\n");
636 return -ENODEV; 636 return -ENODEV;
637 } 637 }
638 638
639 bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents); 639 bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
640 if (!bip) { 640 if (!bip) {
641 pr_err("Unable to allocate bio_integrity_payload\n"); 641 pr_err("Unable to allocate bio_integrity_payload\n");
642 return -ENOMEM; 642 return -ENOMEM;
643 } 643 }
644 644
645 bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) * 645 bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *
646 dev->prot_length; 646 dev->prot_length;
647 bip->bip_iter.bi_sector = bio->bi_iter.bi_sector; 647 bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
648 648
649 pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size, 649 pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
650 (unsigned long long)bip->bip_iter.bi_sector); 650 (unsigned long long)bip->bip_iter.bi_sector);
651 651
652 for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) { 652 for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) {
653 653
654 rc = bio_integrity_add_page(bio, sg_page(sg), sg->length, 654 rc = bio_integrity_add_page(bio, sg_page(sg), sg->length,
655 sg->offset); 655 sg->offset);
656 if (rc != sg->length) { 656 if (rc != sg->length) {
657 pr_err("bio_integrity_add_page() failed; %d\n", rc); 657 pr_err("bio_integrity_add_page() failed; %d\n", rc);
658 return -ENOMEM; 658 return -ENOMEM;
659 } 659 }
660 660
661 pr_debug("Added bio integrity page: %p length: %d offset; %d\n", 661 pr_debug("Added bio integrity page: %p length: %d offset; %d\n",
662 sg_page(sg), sg->length, sg->offset); 662 sg_page(sg), sg->length, sg->offset);
663 } 663 }
664 664
665 return 0; 665 return 0;
666 } 666 }
667 667
668 static sense_reason_t 668 static sense_reason_t
669 iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, 669 iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
670 enum dma_data_direction data_direction) 670 enum dma_data_direction data_direction)
671 { 671 {
672 struct se_device *dev = cmd->se_dev; 672 struct se_device *dev = cmd->se_dev;
673 struct iblock_req *ibr; 673 struct iblock_req *ibr;
674 struct bio *bio, *bio_start; 674 struct bio *bio, *bio_start;
675 struct bio_list list; 675 struct bio_list list;
676 struct scatterlist *sg; 676 struct scatterlist *sg;
677 u32 sg_num = sgl_nents; 677 u32 sg_num = sgl_nents;
678 sector_t block_lba; 678 sector_t block_lba;
679 unsigned bio_cnt; 679 unsigned bio_cnt;
680 int rw = 0; 680 int rw = 0;
681 int i; 681 int i;
682 682
683 if (data_direction == DMA_TO_DEVICE) { 683 if (data_direction == DMA_TO_DEVICE) {
684 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 684 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
685 struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd); 685 struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
686 /* 686 /*
687 * Force writethrough using WRITE_FUA if a volatile write cache 687 * Force writethrough using WRITE_FUA if a volatile write cache
688 * is not enabled, or if initiator set the Force Unit Access bit. 688 * is not enabled, or if initiator set the Force Unit Access bit.
689 */ 689 */
690 if (q->flush_flags & REQ_FUA) { 690 if (q->flush_flags & REQ_FUA) {
691 if (cmd->se_cmd_flags & SCF_FUA) 691 if (cmd->se_cmd_flags & SCF_FUA)
692 rw = WRITE_FUA; 692 rw = WRITE_FUA;
693 else if (!(q->flush_flags & REQ_FLUSH)) 693 else if (!(q->flush_flags & REQ_FLUSH))
694 rw = WRITE_FUA; 694 rw = WRITE_FUA;
695 else 695 else
696 rw = WRITE; 696 rw = WRITE;
697 } else { 697 } else {
698 rw = WRITE; 698 rw = WRITE;
699 } 699 }
700 } else { 700 } else {
701 rw = READ; 701 rw = READ;
702 } 702 }
703 703
704 /* 704 /*
705 * Convert the blocksize advertised to the initiator to the 512 byte 705 * Convert the blocksize advertised to the initiator to the 512 byte
706 * units unconditionally used by the Linux block layer. 706 * units unconditionally used by the Linux block layer.
707 */ 707 */
708 if (dev->dev_attrib.block_size == 4096) 708 if (dev->dev_attrib.block_size == 4096)
709 block_lba = (cmd->t_task_lba << 3); 709 block_lba = (cmd->t_task_lba << 3);
710 else if (dev->dev_attrib.block_size == 2048) 710 else if (dev->dev_attrib.block_size == 2048)
711 block_lba = (cmd->t_task_lba << 2); 711 block_lba = (cmd->t_task_lba << 2);
712 else if (dev->dev_attrib.block_size == 1024) 712 else if (dev->dev_attrib.block_size == 1024)
713 block_lba = (cmd->t_task_lba << 1); 713 block_lba = (cmd->t_task_lba << 1);
714 else if (dev->dev_attrib.block_size == 512) 714 else if (dev->dev_attrib.block_size == 512)
715 block_lba = cmd->t_task_lba; 715 block_lba = cmd->t_task_lba;
716 else { 716 else {
717 pr_err("Unsupported SCSI -> BLOCK LBA conversion:" 717 pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
718 " %u\n", dev->dev_attrib.block_size); 718 " %u\n", dev->dev_attrib.block_size);
719 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 719 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
720 } 720 }
721 721
722 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); 722 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
723 if (!ibr) 723 if (!ibr)
724 goto fail; 724 goto fail;
725 cmd->priv = ibr; 725 cmd->priv = ibr;
726 726
727 if (!sgl_nents) { 727 if (!sgl_nents) {
728 atomic_set(&ibr->pending, 1); 728 atomic_set(&ibr->pending, 1);
729 iblock_complete_cmd(cmd); 729 iblock_complete_cmd(cmd);
730 return 0; 730 return 0;
731 } 731 }
732 732
733 bio = iblock_get_bio(cmd, block_lba, sgl_nents); 733 bio = iblock_get_bio(cmd, block_lba, sgl_nents);
734 if (!bio) 734 if (!bio)
735 goto fail_free_ibr; 735 goto fail_free_ibr;
736 736
737 bio_start = bio; 737 bio_start = bio;
738 bio_list_init(&list); 738 bio_list_init(&list);
739 bio_list_add(&list, bio); 739 bio_list_add(&list, bio);
740 740
741 atomic_set(&ibr->pending, 2); 741 atomic_set(&ibr->pending, 2);
742 bio_cnt = 1; 742 bio_cnt = 1;
743 743
744 for_each_sg(sgl, sg, sgl_nents, i) { 744 for_each_sg(sgl, sg, sgl_nents, i) {
745 /* 745 /*
746 * XXX: if the length the device accepts is shorter than the 746 * XXX: if the length the device accepts is shorter than the
747 * length of the S/G list entry this will cause and 747 * length of the S/G list entry this will cause and
748 * endless loop. Better hope no driver uses huge pages. 748 * endless loop. Better hope no driver uses huge pages.
749 */ 749 */
750 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) 750 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
751 != sg->length) { 751 != sg->length) {
752 if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) { 752 if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
753 iblock_submit_bios(&list, rw); 753 iblock_submit_bios(&list, rw);
754 bio_cnt = 0; 754 bio_cnt = 0;
755 } 755 }
756 756
757 bio = iblock_get_bio(cmd, block_lba, sg_num); 757 bio = iblock_get_bio(cmd, block_lba, sg_num);
758 if (!bio) 758 if (!bio)
759 goto fail_put_bios; 759 goto fail_put_bios;
760 760
761 atomic_inc(&ibr->pending); 761 atomic_inc(&ibr->pending);
762 bio_list_add(&list, bio); 762 bio_list_add(&list, bio);
763 bio_cnt++; 763 bio_cnt++;
764 } 764 }
765 765
766 /* Always in 512 byte units for Linux/Block */ 766 /* Always in 512 byte units for Linux/Block */
767 block_lba += sg->length >> IBLOCK_LBA_SHIFT; 767 block_lba += sg->length >> IBLOCK_LBA_SHIFT;
768 sg_num--; 768 sg_num--;
769 } 769 }
770 770
771 if (cmd->prot_type) { 771 if (cmd->prot_type) {
772 int rc = iblock_alloc_bip(cmd, bio_start); 772 int rc = iblock_alloc_bip(cmd, bio_start);
773 if (rc) 773 if (rc)
774 goto fail_put_bios; 774 goto fail_put_bios;
775 } 775 }
776 776
777 iblock_submit_bios(&list, rw); 777 iblock_submit_bios(&list, rw);
778 iblock_complete_cmd(cmd); 778 iblock_complete_cmd(cmd);
779 return 0; 779 return 0;
780 780
781 fail_put_bios: 781 fail_put_bios:
782 while ((bio = bio_list_pop(&list))) 782 while ((bio = bio_list_pop(&list)))
783 bio_put(bio); 783 bio_put(bio);
784 fail_free_ibr: 784 fail_free_ibr:
785 kfree(ibr); 785 kfree(ibr);
786 fail: 786 fail:
787 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 787 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
788 } 788 }
789 789
790 static sector_t iblock_get_blocks(struct se_device *dev) 790 static sector_t iblock_get_blocks(struct se_device *dev)
791 { 791 {
792 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 792 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
793 struct block_device *bd = ib_dev->ibd_bd; 793 struct block_device *bd = ib_dev->ibd_bd;
794 struct request_queue *q = bdev_get_queue(bd); 794 struct request_queue *q = bdev_get_queue(bd);
795 795
796 return iblock_emulate_read_cap_with_block_size(dev, bd, q); 796 return iblock_emulate_read_cap_with_block_size(dev, bd, q);
797 } 797 }
798 798
799 static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev) 799 static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
800 { 800 {
801 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 801 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
802 struct block_device *bd = ib_dev->ibd_bd; 802 struct block_device *bd = ib_dev->ibd_bd;
803 int ret; 803 int ret;
804 804
805 ret = bdev_alignment_offset(bd); 805 ret = bdev_alignment_offset(bd);
806 if (ret == -1) 806 if (ret == -1)
807 return 0; 807 return 0;
808 808
809 /* convert offset-bytes to offset-lbas */ 809 /* convert offset-bytes to offset-lbas */
810 return ret / bdev_logical_block_size(bd); 810 return ret / bdev_logical_block_size(bd);
811 } 811 }
812 812
813 static unsigned int iblock_get_lbppbe(struct se_device *dev) 813 static unsigned int iblock_get_lbppbe(struct se_device *dev)
814 { 814 {
815 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 815 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
816 struct block_device *bd = ib_dev->ibd_bd; 816 struct block_device *bd = ib_dev->ibd_bd;
817 int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd); 817 int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
818 818
819 return ilog2(logs_per_phys); 819 return ilog2(logs_per_phys);
820 } 820 }
821 821
822 static unsigned int iblock_get_io_min(struct se_device *dev) 822 static unsigned int iblock_get_io_min(struct se_device *dev)
823 { 823 {
824 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 824 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
825 struct block_device *bd = ib_dev->ibd_bd; 825 struct block_device *bd = ib_dev->ibd_bd;
826 826
827 return bdev_io_min(bd); 827 return bdev_io_min(bd);
828 } 828 }
829 829
830 static unsigned int iblock_get_io_opt(struct se_device *dev) 830 static unsigned int iblock_get_io_opt(struct se_device *dev)
831 { 831 {
832 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 832 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
833 struct block_device *bd = ib_dev->ibd_bd; 833 struct block_device *bd = ib_dev->ibd_bd;
834 834
835 return bdev_io_opt(bd); 835 return bdev_io_opt(bd);
836 } 836 }
837 837
838 static struct sbc_ops iblock_sbc_ops = { 838 static struct sbc_ops iblock_sbc_ops = {
839 .execute_rw = iblock_execute_rw, 839 .execute_rw = iblock_execute_rw,
840 .execute_sync_cache = iblock_execute_sync_cache, 840 .execute_sync_cache = iblock_execute_sync_cache,
841 .execute_write_same = iblock_execute_write_same, 841 .execute_write_same = iblock_execute_write_same,
842 .execute_write_same_unmap = iblock_execute_write_same_unmap, 842 .execute_write_same_unmap = iblock_execute_write_same_unmap,
843 .execute_unmap = iblock_execute_unmap, 843 .execute_unmap = iblock_execute_unmap,
844 }; 844 };
845 845
846 static sense_reason_t 846 static sense_reason_t
847 iblock_parse_cdb(struct se_cmd *cmd) 847 iblock_parse_cdb(struct se_cmd *cmd)
848 { 848 {
849 return sbc_parse_cdb(cmd, &iblock_sbc_ops); 849 return sbc_parse_cdb(cmd, &iblock_sbc_ops);
850 } 850 }
851 851
852 static bool iblock_get_write_cache(struct se_device *dev) 852 static bool iblock_get_write_cache(struct se_device *dev)
853 { 853 {
854 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 854 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
855 struct block_device *bd = ib_dev->ibd_bd; 855 struct block_device *bd = ib_dev->ibd_bd;
856 struct request_queue *q = bdev_get_queue(bd); 856 struct request_queue *q = bdev_get_queue(bd);
857 857
858 return q->flush_flags & REQ_FLUSH; 858 return q->flush_flags & REQ_FLUSH;
859 } 859 }
860 860
861 static struct se_subsystem_api iblock_template = { 861 static struct se_subsystem_api iblock_template = {
862 .name = "iblock", 862 .name = "iblock",
863 .inquiry_prod = "IBLOCK", 863 .inquiry_prod = "IBLOCK",
864 .inquiry_rev = IBLOCK_VERSION, 864 .inquiry_rev = IBLOCK_VERSION,
865 .owner = THIS_MODULE, 865 .owner = THIS_MODULE,
866 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 866 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
867 .attach_hba = iblock_attach_hba, 867 .attach_hba = iblock_attach_hba,
868 .detach_hba = iblock_detach_hba, 868 .detach_hba = iblock_detach_hba,
869 .alloc_device = iblock_alloc_device, 869 .alloc_device = iblock_alloc_device,
870 .configure_device = iblock_configure_device, 870 .configure_device = iblock_configure_device,
871 .free_device = iblock_free_device, 871 .free_device = iblock_free_device,
872 .parse_cdb = iblock_parse_cdb, 872 .parse_cdb = iblock_parse_cdb,
873 .set_configfs_dev_params = iblock_set_configfs_dev_params, 873 .set_configfs_dev_params = iblock_set_configfs_dev_params,
874 .show_configfs_dev_params = iblock_show_configfs_dev_params, 874 .show_configfs_dev_params = iblock_show_configfs_dev_params,
875 .get_device_type = sbc_get_device_type, 875 .get_device_type = sbc_get_device_type,
876 .get_blocks = iblock_get_blocks, 876 .get_blocks = iblock_get_blocks,
877 .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas, 877 .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
878 .get_lbppbe = iblock_get_lbppbe, 878 .get_lbppbe = iblock_get_lbppbe,
879 .get_io_min = iblock_get_io_min, 879 .get_io_min = iblock_get_io_min,
880 .get_io_opt = iblock_get_io_opt, 880 .get_io_opt = iblock_get_io_opt,
881 .get_write_cache = iblock_get_write_cache, 881 .get_write_cache = iblock_get_write_cache,
882 }; 882 };
883 883
884 static int __init iblock_module_init(void) 884 static int __init iblock_module_init(void)
885 { 885 {
886 return transport_subsystem_register(&iblock_template); 886 return transport_subsystem_register(&iblock_template);
887 } 887 }
888 888
889 static void __exit iblock_module_exit(void) 889 static void __exit iblock_module_exit(void)
890 { 890 {
891 transport_subsystem_release(&iblock_template); 891 transport_subsystem_release(&iblock_template);
892 } 892 }
893 893
894 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin"); 894 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
895 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 895 MODULE_AUTHOR("nab@Linux-iSCSI.org");
896 MODULE_LICENSE("GPL"); 896 MODULE_LICENSE("GPL");
897 897
898 module_init(iblock_module_init); 898 module_init(iblock_module_init);
899 module_exit(iblock_module_exit); 899 module_exit(iblock_module_exit);
900 900
drivers/target/target_core_sbc.c
1 /* 1 /*
2 * SCSI Block Commands (SBC) parsing and emulation. 2 * SCSI Block Commands (SBC) parsing and emulation.
3 * 3 *
4 * (c) Copyright 2002-2013 Datera, Inc. 4 * (c) Copyright 2002-2013 Datera, Inc.
5 * 5 *
6 * Nicholas A. Bellinger <nab@kernel.org> 6 * Nicholas A. Bellinger <nab@kernel.org>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or 10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version. 11 * (at your option) any later version.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, 13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */ 21 */
22 22
23 #include <linux/kernel.h> 23 #include <linux/kernel.h>
24 #include <linux/module.h> 24 #include <linux/module.h>
25 #include <linux/ratelimit.h> 25 #include <linux/ratelimit.h>
26 #include <linux/crc-t10dif.h> 26 #include <linux/crc-t10dif.h>
27 #include <asm/unaligned.h> 27 #include <asm/unaligned.h>
28 #include <scsi/scsi.h> 28 #include <scsi/scsi.h>
29 #include <scsi/scsi_tcq.h> 29 #include <scsi/scsi_tcq.h>
30 30
31 #include <target/target_core_base.h> 31 #include <target/target_core_base.h>
32 #include <target/target_core_backend.h> 32 #include <target/target_core_backend.h>
33 #include <target/target_core_fabric.h> 33 #include <target/target_core_fabric.h>
34 34
35 #include "target_core_internal.h" 35 #include "target_core_internal.h"
36 #include "target_core_ua.h" 36 #include "target_core_ua.h"
37 #include "target_core_alua.h" 37 #include "target_core_alua.h"
38 38
39 static sense_reason_t 39 static sense_reason_t
40 sbc_emulate_readcapacity(struct se_cmd *cmd) 40 sbc_emulate_readcapacity(struct se_cmd *cmd)
41 { 41 {
42 struct se_device *dev = cmd->se_dev; 42 struct se_device *dev = cmd->se_dev;
43 unsigned char *cdb = cmd->t_task_cdb; 43 unsigned char *cdb = cmd->t_task_cdb;
44 unsigned long long blocks_long = dev->transport->get_blocks(dev); 44 unsigned long long blocks_long = dev->transport->get_blocks(dev);
45 unsigned char *rbuf; 45 unsigned char *rbuf;
46 unsigned char buf[8]; 46 unsigned char buf[8];
47 u32 blocks; 47 u32 blocks;
48 48
49 /* 49 /*
50 * SBC-2 says: 50 * SBC-2 says:
51 * If the PMI bit is set to zero and the LOGICAL BLOCK 51 * If the PMI bit is set to zero and the LOGICAL BLOCK
52 * ADDRESS field is not set to zero, the device server shall 52 * ADDRESS field is not set to zero, the device server shall
53 * terminate the command with CHECK CONDITION status with 53 * terminate the command with CHECK CONDITION status with
54 * the sense key set to ILLEGAL REQUEST and the additional 54 * the sense key set to ILLEGAL REQUEST and the additional
55 * sense code set to INVALID FIELD IN CDB. 55 * sense code set to INVALID FIELD IN CDB.
56 * 56 *
57 * In SBC-3, these fields are obsolete, but some SCSI 57 * In SBC-3, these fields are obsolete, but some SCSI
58 * compliance tests actually check this, so we might as well 58 * compliance tests actually check this, so we might as well
59 * follow SBC-2. 59 * follow SBC-2.
60 */ 60 */
61 if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5])) 61 if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
62 return TCM_INVALID_CDB_FIELD; 62 return TCM_INVALID_CDB_FIELD;
63 63
64 if (blocks_long >= 0x00000000ffffffff) 64 if (blocks_long >= 0x00000000ffffffff)
65 blocks = 0xffffffff; 65 blocks = 0xffffffff;
66 else 66 else
67 blocks = (u32)blocks_long; 67 blocks = (u32)blocks_long;
68 68
69 buf[0] = (blocks >> 24) & 0xff; 69 buf[0] = (blocks >> 24) & 0xff;
70 buf[1] = (blocks >> 16) & 0xff; 70 buf[1] = (blocks >> 16) & 0xff;
71 buf[2] = (blocks >> 8) & 0xff; 71 buf[2] = (blocks >> 8) & 0xff;
72 buf[3] = blocks & 0xff; 72 buf[3] = blocks & 0xff;
73 buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff; 73 buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
74 buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff; 74 buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
75 buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff; 75 buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
76 buf[7] = dev->dev_attrib.block_size & 0xff; 76 buf[7] = dev->dev_attrib.block_size & 0xff;
77 77
78 rbuf = transport_kmap_data_sg(cmd); 78 rbuf = transport_kmap_data_sg(cmd);
79 if (rbuf) { 79 if (rbuf) {
80 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 80 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
81 transport_kunmap_data_sg(cmd); 81 transport_kunmap_data_sg(cmd);
82 } 82 }
83 83
84 target_complete_cmd_with_length(cmd, GOOD, 8); 84 target_complete_cmd_with_length(cmd, GOOD, 8);
85 return 0; 85 return 0;
86 } 86 }
87 87
88 static sense_reason_t 88 static sense_reason_t
89 sbc_emulate_readcapacity_16(struct se_cmd *cmd) 89 sbc_emulate_readcapacity_16(struct se_cmd *cmd)
90 { 90 {
91 struct se_device *dev = cmd->se_dev; 91 struct se_device *dev = cmd->se_dev;
92 struct se_session *sess = cmd->se_sess; 92 struct se_session *sess = cmd->se_sess;
93 unsigned char *rbuf; 93 unsigned char *rbuf;
94 unsigned char buf[32]; 94 unsigned char buf[32];
95 unsigned long long blocks = dev->transport->get_blocks(dev); 95 unsigned long long blocks = dev->transport->get_blocks(dev);
96 96
97 memset(buf, 0, sizeof(buf)); 97 memset(buf, 0, sizeof(buf));
98 buf[0] = (blocks >> 56) & 0xff; 98 buf[0] = (blocks >> 56) & 0xff;
99 buf[1] = (blocks >> 48) & 0xff; 99 buf[1] = (blocks >> 48) & 0xff;
100 buf[2] = (blocks >> 40) & 0xff; 100 buf[2] = (blocks >> 40) & 0xff;
101 buf[3] = (blocks >> 32) & 0xff; 101 buf[3] = (blocks >> 32) & 0xff;
102 buf[4] = (blocks >> 24) & 0xff; 102 buf[4] = (blocks >> 24) & 0xff;
103 buf[5] = (blocks >> 16) & 0xff; 103 buf[5] = (blocks >> 16) & 0xff;
104 buf[6] = (blocks >> 8) & 0xff; 104 buf[6] = (blocks >> 8) & 0xff;
105 buf[7] = blocks & 0xff; 105 buf[7] = blocks & 0xff;
106 buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff; 106 buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
107 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; 107 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
108 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; 108 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
109 buf[11] = dev->dev_attrib.block_size & 0xff; 109 buf[11] = dev->dev_attrib.block_size & 0xff;
110 /* 110 /*
111 * Set P_TYPE and PROT_EN bits for DIF support 111 * Set P_TYPE and PROT_EN bits for DIF support
112 */ 112 */
113 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 113 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
114 if (dev->dev_attrib.pi_prot_type) 114 if (dev->dev_attrib.pi_prot_type)
115 buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1; 115 buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
116 } 116 }
117 117
118 if (dev->transport->get_lbppbe) 118 if (dev->transport->get_lbppbe)
119 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; 119 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
120 120
121 if (dev->transport->get_alignment_offset_lbas) { 121 if (dev->transport->get_alignment_offset_lbas) {
122 u16 lalba = dev->transport->get_alignment_offset_lbas(dev); 122 u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
123 buf[14] = (lalba >> 8) & 0x3f; 123 buf[14] = (lalba >> 8) & 0x3f;
124 buf[15] = lalba & 0xff; 124 buf[15] = lalba & 0xff;
125 } 125 }
126 126
127 /* 127 /*
128 * Set Thin Provisioning Enable bit following sbc3r22 in section 128 * Set Thin Provisioning Enable bit following sbc3r22 in section
129 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. 129 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
130 */ 130 */
131 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 131 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
132 buf[14] |= 0x80; 132 buf[14] |= 0x80;
133 133
134 rbuf = transport_kmap_data_sg(cmd); 134 rbuf = transport_kmap_data_sg(cmd);
135 if (rbuf) { 135 if (rbuf) {
136 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 136 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
137 transport_kunmap_data_sg(cmd); 137 transport_kunmap_data_sg(cmd);
138 } 138 }
139 139
140 target_complete_cmd_with_length(cmd, GOOD, 32); 140 target_complete_cmd_with_length(cmd, GOOD, 32);
141 return 0; 141 return 0;
142 } 142 }
143 143
144 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd) 144 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
145 { 145 {
146 u32 num_blocks; 146 u32 num_blocks;
147 147
148 if (cmd->t_task_cdb[0] == WRITE_SAME) 148 if (cmd->t_task_cdb[0] == WRITE_SAME)
149 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]); 149 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
150 else if (cmd->t_task_cdb[0] == WRITE_SAME_16) 150 else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
151 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); 151 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
152 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */ 152 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
153 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); 153 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
154 154
155 /* 155 /*
156 * Use the explicit range when non zero is supplied, otherwise calculate 156 * Use the explicit range when non zero is supplied, otherwise calculate
157 * the remaining range based on ->get_blocks() - starting LBA. 157 * the remaining range based on ->get_blocks() - starting LBA.
158 */ 158 */
159 if (num_blocks) 159 if (num_blocks)
160 return num_blocks; 160 return num_blocks;
161 161
162 return cmd->se_dev->transport->get_blocks(cmd->se_dev) - 162 return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
163 cmd->t_task_lba + 1; 163 cmd->t_task_lba + 1;
164 } 164 }
165 EXPORT_SYMBOL(sbc_get_write_same_sectors); 165 EXPORT_SYMBOL(sbc_get_write_same_sectors);
166 166
167 static sense_reason_t 167 static sense_reason_t
168 sbc_emulate_noop(struct se_cmd *cmd) 168 sbc_emulate_noop(struct se_cmd *cmd)
169 { 169 {
170 target_complete_cmd(cmd, GOOD); 170 target_complete_cmd(cmd, GOOD);
171 return 0; 171 return 0;
172 } 172 }
173 173
174 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) 174 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
175 { 175 {
176 return cmd->se_dev->dev_attrib.block_size * sectors; 176 return cmd->se_dev->dev_attrib.block_size * sectors;
177 } 177 }
178 178
179 static inline u32 transport_get_sectors_6(unsigned char *cdb) 179 static inline u32 transport_get_sectors_6(unsigned char *cdb)
180 { 180 {
181 /* 181 /*
182 * Use 8-bit sector value. SBC-3 says: 182 * Use 8-bit sector value. SBC-3 says:
183 * 183 *
184 * A TRANSFER LENGTH field set to zero specifies that 256 184 * A TRANSFER LENGTH field set to zero specifies that 256
185 * logical blocks shall be written. Any other value 185 * logical blocks shall be written. Any other value
186 * specifies the number of logical blocks that shall be 186 * specifies the number of logical blocks that shall be
187 * written. 187 * written.
188 */ 188 */
189 return cdb[4] ? : 256; 189 return cdb[4] ? : 256;
190 } 190 }
191 191
192 static inline u32 transport_get_sectors_10(unsigned char *cdb) 192 static inline u32 transport_get_sectors_10(unsigned char *cdb)
193 { 193 {
194 return (u32)(cdb[7] << 8) + cdb[8]; 194 return (u32)(cdb[7] << 8) + cdb[8];
195 } 195 }
196 196
197 static inline u32 transport_get_sectors_12(unsigned char *cdb) 197 static inline u32 transport_get_sectors_12(unsigned char *cdb)
198 { 198 {
199 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; 199 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
200 } 200 }
201 201
202 static inline u32 transport_get_sectors_16(unsigned char *cdb) 202 static inline u32 transport_get_sectors_16(unsigned char *cdb)
203 { 203 {
204 return (u32)(cdb[10] << 24) + (cdb[11] << 16) + 204 return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
205 (cdb[12] << 8) + cdb[13]; 205 (cdb[12] << 8) + cdb[13];
206 } 206 }
207 207
208 /* 208 /*
209 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants 209 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
210 */ 210 */
211 static inline u32 transport_get_sectors_32(unsigned char *cdb) 211 static inline u32 transport_get_sectors_32(unsigned char *cdb)
212 { 212 {
213 return (u32)(cdb[28] << 24) + (cdb[29] << 16) + 213 return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
214 (cdb[30] << 8) + cdb[31]; 214 (cdb[30] << 8) + cdb[31];
215 215
216 } 216 }
217 217
218 static inline u32 transport_lba_21(unsigned char *cdb) 218 static inline u32 transport_lba_21(unsigned char *cdb)
219 { 219 {
220 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 220 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
221 } 221 }
222 222
223 static inline u32 transport_lba_32(unsigned char *cdb) 223 static inline u32 transport_lba_32(unsigned char *cdb)
224 { 224 {
225 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 225 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
226 } 226 }
227 227
228 static inline unsigned long long transport_lba_64(unsigned char *cdb) 228 static inline unsigned long long transport_lba_64(unsigned char *cdb)
229 { 229 {
230 unsigned int __v1, __v2; 230 unsigned int __v1, __v2;
231 231
232 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 232 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
233 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 233 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
234 234
235 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 235 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
236 } 236 }
237 237
238 /* 238 /*
239 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs 239 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
240 */ 240 */
241 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) 241 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
242 { 242 {
243 unsigned int __v1, __v2; 243 unsigned int __v1, __v2;
244 244
245 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; 245 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
246 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; 246 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
247 247
248 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 248 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
249 } 249 }
250 250
251 static sense_reason_t 251 static sense_reason_t
252 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) 252 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
253 { 253 {
254 unsigned int sectors = sbc_get_write_same_sectors(cmd); 254 unsigned int sectors = sbc_get_write_same_sectors(cmd);
255 255
256 if ((flags[0] & 0x04) || (flags[0] & 0x02)) { 256 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
257 pr_err("WRITE_SAME PBDATA and LBDATA" 257 pr_err("WRITE_SAME PBDATA and LBDATA"
258 " bits not supported for Block Discard" 258 " bits not supported for Block Discard"
259 " Emulation\n"); 259 " Emulation\n");
260 return TCM_UNSUPPORTED_SCSI_OPCODE; 260 return TCM_UNSUPPORTED_SCSI_OPCODE;
261 } 261 }
262 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { 262 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
263 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n", 263 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
264 sectors, cmd->se_dev->dev_attrib.max_write_same_len); 264 sectors, cmd->se_dev->dev_attrib.max_write_same_len);
265 return TCM_INVALID_CDB_FIELD; 265 return TCM_INVALID_CDB_FIELD;
266 } 266 }
267 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ 267 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
268 if (flags[0] & 0x10) { 268 if (flags[0] & 0x10) {
269 pr_warn("WRITE SAME with ANCHOR not supported\n"); 269 pr_warn("WRITE SAME with ANCHOR not supported\n");
270 return TCM_INVALID_CDB_FIELD; 270 return TCM_INVALID_CDB_FIELD;
271 } 271 }
272 /* 272 /*
273 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting 273 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
274 * translated into block discard requests within backend code. 274 * translated into block discard requests within backend code.
275 */ 275 */
276 if (flags[0] & 0x08) { 276 if (flags[0] & 0x08) {
277 if (!ops->execute_write_same_unmap) 277 if (!ops->execute_write_same_unmap)
278 return TCM_UNSUPPORTED_SCSI_OPCODE; 278 return TCM_UNSUPPORTED_SCSI_OPCODE;
279 279
280 cmd->execute_cmd = ops->execute_write_same_unmap; 280 cmd->execute_cmd = ops->execute_write_same_unmap;
281 return 0; 281 return 0;
282 } 282 }
283 if (!ops->execute_write_same) 283 if (!ops->execute_write_same)
284 return TCM_UNSUPPORTED_SCSI_OPCODE; 284 return TCM_UNSUPPORTED_SCSI_OPCODE;
285 285
286 cmd->execute_cmd = ops->execute_write_same; 286 cmd->execute_cmd = ops->execute_write_same;
287 return 0; 287 return 0;
288 } 288 }
289 289
290 static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd) 290 static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
291 { 291 {
292 unsigned char *buf, *addr; 292 unsigned char *buf, *addr;
293 struct scatterlist *sg; 293 struct scatterlist *sg;
294 unsigned int offset; 294 unsigned int offset;
295 sense_reason_t ret = TCM_NO_SENSE; 295 sense_reason_t ret = TCM_NO_SENSE;
296 int i, count; 296 int i, count;
297 /* 297 /*
298 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 298 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
299 * 299 *
300 * 1) read the specified logical block(s); 300 * 1) read the specified logical block(s);
301 * 2) transfer logical blocks from the data-out buffer; 301 * 2) transfer logical blocks from the data-out buffer;
302 * 3) XOR the logical blocks transferred from the data-out buffer with 302 * 3) XOR the logical blocks transferred from the data-out buffer with
303 * the logical blocks read, storing the resulting XOR data in a buffer; 303 * the logical blocks read, storing the resulting XOR data in a buffer;
304 * 4) if the DISABLE WRITE bit is set to zero, then write the logical 304 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
305 * blocks transferred from the data-out buffer; and 305 * blocks transferred from the data-out buffer; and
306 * 5) transfer the resulting XOR data to the data-in buffer. 306 * 5) transfer the resulting XOR data to the data-in buffer.
307 */ 307 */
308 buf = kmalloc(cmd->data_length, GFP_KERNEL); 308 buf = kmalloc(cmd->data_length, GFP_KERNEL);
309 if (!buf) { 309 if (!buf) {
310 pr_err("Unable to allocate xor_callback buf\n"); 310 pr_err("Unable to allocate xor_callback buf\n");
311 return TCM_OUT_OF_RESOURCES; 311 return TCM_OUT_OF_RESOURCES;
312 } 312 }
313 /* 313 /*
314 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg 314 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
315 * into the locally allocated *buf 315 * into the locally allocated *buf
316 */ 316 */
317 sg_copy_to_buffer(cmd->t_data_sg, 317 sg_copy_to_buffer(cmd->t_data_sg,
318 cmd->t_data_nents, 318 cmd->t_data_nents,
319 buf, 319 buf,
320 cmd->data_length); 320 cmd->data_length);
321 321
322 /* 322 /*
323 * Now perform the XOR against the BIDI read memory located at 323 * Now perform the XOR against the BIDI read memory located at
324 * cmd->t_mem_bidi_list 324 * cmd->t_mem_bidi_list
325 */ 325 */
326 326
327 offset = 0; 327 offset = 0;
328 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 328 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
329 addr = kmap_atomic(sg_page(sg)); 329 addr = kmap_atomic(sg_page(sg));
330 if (!addr) { 330 if (!addr) {
331 ret = TCM_OUT_OF_RESOURCES; 331 ret = TCM_OUT_OF_RESOURCES;
332 goto out; 332 goto out;
333 } 333 }
334 334
335 for (i = 0; i < sg->length; i++) 335 for (i = 0; i < sg->length; i++)
336 *(addr + sg->offset + i) ^= *(buf + offset + i); 336 *(addr + sg->offset + i) ^= *(buf + offset + i);
337 337
338 offset += sg->length; 338 offset += sg->length;
339 kunmap_atomic(addr); 339 kunmap_atomic(addr);
340 } 340 }
341 341
342 out: 342 out:
343 kfree(buf); 343 kfree(buf);
344 return ret; 344 return ret;
345 } 345 }
346 346
347 static sense_reason_t 347 static sense_reason_t
348 sbc_execute_rw(struct se_cmd *cmd) 348 sbc_execute_rw(struct se_cmd *cmd)
349 { 349 {
350 return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents, 350 return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
351 cmd->data_direction); 351 cmd->data_direction);
352 } 352 }
353 353
354 static sense_reason_t compare_and_write_post(struct se_cmd *cmd) 354 static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
355 { 355 {
356 struct se_device *dev = cmd->se_dev; 356 struct se_device *dev = cmd->se_dev;
357 357
358 /* 358 /*
359 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through 359 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
360 * within target_complete_ok_work() if the command was successfully 360 * within target_complete_ok_work() if the command was successfully
361 * sent to the backend driver. 361 * sent to the backend driver.
362 */ 362 */
363 spin_lock_irq(&cmd->t_state_lock); 363 spin_lock_irq(&cmd->t_state_lock);
364 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) 364 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
365 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 365 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
366 spin_unlock_irq(&cmd->t_state_lock); 366 spin_unlock_irq(&cmd->t_state_lock);
367 367
368 /* 368 /*
369 * Unlock ->caw_sem originally obtained during sbc_compare_and_write() 369 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
370 * before the original READ I/O submission. 370 * before the original READ I/O submission.
371 */ 371 */
372 up(&dev->caw_sem); 372 up(&dev->caw_sem);
373 373
374 return TCM_NO_SENSE; 374 return TCM_NO_SENSE;
375 } 375 }
376 376
377 static sense_reason_t compare_and_write_callback(struct se_cmd *cmd) 377 static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
378 { 378 {
379 struct se_device *dev = cmd->se_dev; 379 struct se_device *dev = cmd->se_dev;
380 struct scatterlist *write_sg = NULL, *sg; 380 struct scatterlist *write_sg = NULL, *sg;
381 unsigned char *buf = NULL, *addr; 381 unsigned char *buf = NULL, *addr;
382 struct sg_mapping_iter m; 382 struct sg_mapping_iter m;
383 unsigned int offset = 0, len; 383 unsigned int offset = 0, len;
384 unsigned int nlbas = cmd->t_task_nolb; 384 unsigned int nlbas = cmd->t_task_nolb;
385 unsigned int block_size = dev->dev_attrib.block_size; 385 unsigned int block_size = dev->dev_attrib.block_size;
386 unsigned int compare_len = (nlbas * block_size); 386 unsigned int compare_len = (nlbas * block_size);
387 sense_reason_t ret = TCM_NO_SENSE; 387 sense_reason_t ret = TCM_NO_SENSE;
388 int rc, i; 388 int rc, i;
389 389
390 /* 390 /*
391 * Handle early failure in transport_generic_request_failure(), 391 * Handle early failure in transport_generic_request_failure(),
392 * which will not have taken ->caw_mutex yet.. 392 * which will not have taken ->caw_mutex yet..
393 */ 393 */
394 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg) 394 if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
395 return TCM_NO_SENSE; 395 return TCM_NO_SENSE;
396 /* 396 /*
397 * Immediately exit + release dev->caw_sem if command has already 397 * Immediately exit + release dev->caw_sem if command has already
398 * been failed with a non-zero SCSI status. 398 * been failed with a non-zero SCSI status.
399 */ 399 */
400 if (cmd->scsi_status) { 400 if (cmd->scsi_status) {
401 pr_err("compare_and_write_callback: non zero scsi_status:" 401 pr_err("compare_and_write_callback: non zero scsi_status:"
402 " 0x%02x\n", cmd->scsi_status); 402 " 0x%02x\n", cmd->scsi_status);
403 goto out; 403 goto out;
404 } 404 }
405 405
406 buf = kzalloc(cmd->data_length, GFP_KERNEL); 406 buf = kzalloc(cmd->data_length, GFP_KERNEL);
407 if (!buf) { 407 if (!buf) {
408 pr_err("Unable to allocate compare_and_write buf\n"); 408 pr_err("Unable to allocate compare_and_write buf\n");
409 ret = TCM_OUT_OF_RESOURCES; 409 ret = TCM_OUT_OF_RESOURCES;
410 goto out; 410 goto out;
411 } 411 }
412 412
413 write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, 413 write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
414 GFP_KERNEL); 414 GFP_KERNEL);
415 if (!write_sg) { 415 if (!write_sg) {
416 pr_err("Unable to allocate compare_and_write sg\n"); 416 pr_err("Unable to allocate compare_and_write sg\n");
417 ret = TCM_OUT_OF_RESOURCES; 417 ret = TCM_OUT_OF_RESOURCES;
418 goto out; 418 goto out;
419 } 419 }
420 sg_init_table(write_sg, cmd->t_data_nents); 420 sg_init_table(write_sg, cmd->t_data_nents);
421 /* 421 /*
422 * Setup verify and write data payloads from total NumberLBAs. 422 * Setup verify and write data payloads from total NumberLBAs.
423 */ 423 */
424 rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf, 424 rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf,
425 cmd->data_length); 425 cmd->data_length);
426 if (!rc) { 426 if (!rc) {
427 pr_err("sg_copy_to_buffer() failed for compare_and_write\n"); 427 pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
428 ret = TCM_OUT_OF_RESOURCES; 428 ret = TCM_OUT_OF_RESOURCES;
429 goto out; 429 goto out;
430 } 430 }
431 /* 431 /*
432 * Compare against SCSI READ payload against verify payload 432 * Compare against SCSI READ payload against verify payload
433 */ 433 */
434 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) { 434 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) {
435 addr = (unsigned char *)kmap_atomic(sg_page(sg)); 435 addr = (unsigned char *)kmap_atomic(sg_page(sg));
436 if (!addr) { 436 if (!addr) {
437 ret = TCM_OUT_OF_RESOURCES; 437 ret = TCM_OUT_OF_RESOURCES;
438 goto out; 438 goto out;
439 } 439 }
440 440
441 len = min(sg->length, compare_len); 441 len = min(sg->length, compare_len);
442 442
443 if (memcmp(addr, buf + offset, len)) { 443 if (memcmp(addr, buf + offset, len)) {
444 pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n", 444 pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
445 addr, buf + offset); 445 addr, buf + offset);
446 kunmap_atomic(addr); 446 kunmap_atomic(addr);
447 goto miscompare; 447 goto miscompare;
448 } 448 }
449 kunmap_atomic(addr); 449 kunmap_atomic(addr);
450 450
451 offset += len; 451 offset += len;
452 compare_len -= len; 452 compare_len -= len;
453 if (!compare_len) 453 if (!compare_len)
454 break; 454 break;
455 } 455 }
456 456
457 i = 0; 457 i = 0;
458 len = cmd->t_task_nolb * block_size; 458 len = cmd->t_task_nolb * block_size;
459 sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG); 459 sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
460 /* 460 /*
461 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE.. 461 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
462 */ 462 */
463 while (len) { 463 while (len) {
464 sg_miter_next(&m); 464 sg_miter_next(&m);
465 465
466 if (block_size < PAGE_SIZE) { 466 if (block_size < PAGE_SIZE) {
467 sg_set_page(&write_sg[i], m.page, block_size, 467 sg_set_page(&write_sg[i], m.page, block_size,
468 block_size); 468 block_size);
469 } else { 469 } else {
470 sg_miter_next(&m); 470 sg_miter_next(&m);
471 sg_set_page(&write_sg[i], m.page, block_size, 471 sg_set_page(&write_sg[i], m.page, block_size,
472 0); 472 0);
473 } 473 }
474 len -= block_size; 474 len -= block_size;
475 i++; 475 i++;
476 } 476 }
477 sg_miter_stop(&m); 477 sg_miter_stop(&m);
478 /* 478 /*
479 * Save the original SGL + nents values before updating to new 479 * Save the original SGL + nents values before updating to new
480 * assignments, to be released in transport_free_pages() -> 480 * assignments, to be released in transport_free_pages() ->
481 * transport_reset_sgl_orig() 481 * transport_reset_sgl_orig()
482 */ 482 */
483 cmd->t_data_sg_orig = cmd->t_data_sg; 483 cmd->t_data_sg_orig = cmd->t_data_sg;
484 cmd->t_data_sg = write_sg; 484 cmd->t_data_sg = write_sg;
485 cmd->t_data_nents_orig = cmd->t_data_nents; 485 cmd->t_data_nents_orig = cmd->t_data_nents;
486 cmd->t_data_nents = 1; 486 cmd->t_data_nents = 1;
487 487
488 cmd->sam_task_attr = MSG_HEAD_TAG; 488 cmd->sam_task_attr = MSG_HEAD_TAG;
489 cmd->transport_complete_callback = compare_and_write_post; 489 cmd->transport_complete_callback = compare_and_write_post;
490 /* 490 /*
491 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler 491 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
492 * for submitting the adjusted SGL to write instance user-data. 492 * for submitting the adjusted SGL to write instance user-data.
493 */ 493 */
494 cmd->execute_cmd = sbc_execute_rw; 494 cmd->execute_cmd = sbc_execute_rw;
495 495
496 spin_lock_irq(&cmd->t_state_lock); 496 spin_lock_irq(&cmd->t_state_lock);
497 cmd->t_state = TRANSPORT_PROCESSING; 497 cmd->t_state = TRANSPORT_PROCESSING;
498 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 498 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
499 spin_unlock_irq(&cmd->t_state_lock); 499 spin_unlock_irq(&cmd->t_state_lock);
500 500
501 __target_execute_cmd(cmd); 501 __target_execute_cmd(cmd);
502 502
503 kfree(buf); 503 kfree(buf);
504 return ret; 504 return ret;
505 505
506 miscompare: 506 miscompare:
507 pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n", 507 pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
508 dev->transport->name); 508 dev->transport->name);
509 ret = TCM_MISCOMPARE_VERIFY; 509 ret = TCM_MISCOMPARE_VERIFY;
510 out: 510 out:
511 /* 511 /*
512 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in 512 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
513 * sbc_compare_and_write() before the original READ I/O submission. 513 * sbc_compare_and_write() before the original READ I/O submission.
514 */ 514 */
515 up(&dev->caw_sem); 515 up(&dev->caw_sem);
516 kfree(write_sg); 516 kfree(write_sg);
517 kfree(buf); 517 kfree(buf);
518 return ret; 518 return ret;
519 } 519 }
520 520
521 static sense_reason_t 521 static sense_reason_t
522 sbc_compare_and_write(struct se_cmd *cmd) 522 sbc_compare_and_write(struct se_cmd *cmd)
523 { 523 {
524 struct se_device *dev = cmd->se_dev; 524 struct se_device *dev = cmd->se_dev;
525 sense_reason_t ret; 525 sense_reason_t ret;
526 int rc; 526 int rc;
527 /* 527 /*
528 * Submit the READ first for COMPARE_AND_WRITE to perform the 528 * Submit the READ first for COMPARE_AND_WRITE to perform the
529 * comparision using SGLs at cmd->t_bidi_data_sg.. 529 * comparision using SGLs at cmd->t_bidi_data_sg..
530 */ 530 */
531 rc = down_interruptible(&dev->caw_sem); 531 rc = down_interruptible(&dev->caw_sem);
532 if ((rc != 0) || signal_pending(current)) { 532 if ((rc != 0) || signal_pending(current)) {
533 cmd->transport_complete_callback = NULL; 533 cmd->transport_complete_callback = NULL;
534 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 534 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
535 } 535 }
536 /* 536 /*
537 * Reset cmd->data_length to individual block_size in order to not 537 * Reset cmd->data_length to individual block_size in order to not
538 * confuse backend drivers that depend on this value matching the 538 * confuse backend drivers that depend on this value matching the
539 * size of the I/O being submitted. 539 * size of the I/O being submitted.
540 */ 540 */
541 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size; 541 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
542 542
543 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, 543 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
544 DMA_FROM_DEVICE); 544 DMA_FROM_DEVICE);
545 if (ret) { 545 if (ret) {
546 cmd->transport_complete_callback = NULL; 546 cmd->transport_complete_callback = NULL;
547 up(&dev->caw_sem); 547 up(&dev->caw_sem);
548 return ret; 548 return ret;
549 } 549 }
550 /* 550 /*
551 * Unlock of dev->caw_sem to occur in compare_and_write_callback() 551 * Unlock of dev->caw_sem to occur in compare_and_write_callback()
552 * upon MISCOMPARE, or in compare_and_write_done() upon completion 552 * upon MISCOMPARE, or in compare_and_write_done() upon completion
553 * of WRITE instance user-data. 553 * of WRITE instance user-data.
554 */ 554 */
555 return TCM_NO_SENSE; 555 return TCM_NO_SENSE;
556 } 556 }
557 557
558 static int 558 static int
559 sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type, 559 sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
560 bool is_write, struct se_cmd *cmd) 560 bool is_write, struct se_cmd *cmd)
561 { 561 {
562 if (is_write) { 562 if (is_write) {
563 cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS : 563 cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS :
564 TARGET_PROT_DOUT_INSERT; 564 TARGET_PROT_DOUT_INSERT;
565 switch (protect) { 565 switch (protect) {
566 case 0x0: 566 case 0x0:
567 case 0x3: 567 case 0x3:
568 cmd->prot_checks = 0; 568 cmd->prot_checks = 0;
569 break; 569 break;
570 case 0x1: 570 case 0x1:
571 case 0x5: 571 case 0x5:
572 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 572 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
573 if (prot_type == TARGET_DIF_TYPE1_PROT) 573 if (prot_type == TARGET_DIF_TYPE1_PROT)
574 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 574 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
575 break; 575 break;
576 case 0x2: 576 case 0x2:
577 if (prot_type == TARGET_DIF_TYPE1_PROT) 577 if (prot_type == TARGET_DIF_TYPE1_PROT)
578 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 578 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
579 break; 579 break;
580 case 0x4: 580 case 0x4:
581 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 581 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
582 break; 582 break;
583 default: 583 default:
584 pr_err("Unsupported protect field %d\n", protect); 584 pr_err("Unsupported protect field %d\n", protect);
585 return -EINVAL; 585 return -EINVAL;
586 } 586 }
587 } else { 587 } else {
588 cmd->prot_op = protect ? TARGET_PROT_DIN_PASS : 588 cmd->prot_op = protect ? TARGET_PROT_DIN_PASS :
589 TARGET_PROT_DIN_STRIP; 589 TARGET_PROT_DIN_STRIP;
590 switch (protect) { 590 switch (protect) {
591 case 0x0: 591 case 0x0:
592 case 0x1: 592 case 0x1:
593 case 0x5: 593 case 0x5:
594 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 594 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
595 if (prot_type == TARGET_DIF_TYPE1_PROT) 595 if (prot_type == TARGET_DIF_TYPE1_PROT)
596 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 596 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
597 break; 597 break;
598 case 0x2: 598 case 0x2:
599 if (prot_type == TARGET_DIF_TYPE1_PROT) 599 if (prot_type == TARGET_DIF_TYPE1_PROT)
600 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 600 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
601 break; 601 break;
602 case 0x3: 602 case 0x3:
603 cmd->prot_checks = 0; 603 cmd->prot_checks = 0;
604 break; 604 break;
605 case 0x4: 605 case 0x4:
606 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 606 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
607 break; 607 break;
608 default: 608 default:
609 pr_err("Unsupported protect field %d\n", protect); 609 pr_err("Unsupported protect field %d\n", protect);
610 return -EINVAL; 610 return -EINVAL;
611 } 611 }
612 } 612 }
613 613
614 return 0; 614 return 0;
615 } 615 }
616 616
617 static bool 617 static bool
618 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, 618 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
619 u32 sectors, bool is_write) 619 u32 sectors, bool is_write)
620 { 620 {
621 u8 protect = cdb[1] >> 5; 621 u8 protect = cdb[1] >> 5;
622 622
623 if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto) 623 if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto)
624 return true; 624 return true;
625 625
626 switch (dev->dev_attrib.pi_prot_type) { 626 switch (dev->dev_attrib.pi_prot_type) {
627 case TARGET_DIF_TYPE3_PROT: 627 case TARGET_DIF_TYPE3_PROT:
628 cmd->reftag_seed = 0xffffffff; 628 cmd->reftag_seed = 0xffffffff;
629 break; 629 break;
630 case TARGET_DIF_TYPE2_PROT: 630 case TARGET_DIF_TYPE2_PROT:
631 if (protect) 631 if (protect)
632 return false; 632 return false;
633 633
634 cmd->reftag_seed = cmd->t_task_lba; 634 cmd->reftag_seed = cmd->t_task_lba;
635 break; 635 break;
636 case TARGET_DIF_TYPE1_PROT: 636 case TARGET_DIF_TYPE1_PROT:
637 cmd->reftag_seed = cmd->t_task_lba; 637 cmd->reftag_seed = cmd->t_task_lba;
638 break; 638 break;
639 case TARGET_DIF_TYPE0_PROT: 639 case TARGET_DIF_TYPE0_PROT:
640 default: 640 default:
641 return true; 641 return true;
642 } 642 }
643 643
644 if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type, 644 if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type,
645 is_write, cmd)) 645 is_write, cmd))
646 return false; 646 return false;
647 647
648 cmd->prot_type = dev->dev_attrib.pi_prot_type; 648 cmd->prot_type = dev->dev_attrib.pi_prot_type;
649 cmd->prot_length = dev->prot_length * sectors; 649 cmd->prot_length = dev->prot_length * sectors;
650 650
651 /** 651 /**
652 * In case protection information exists over the wire 652 * In case protection information exists over the wire
653 * we modify command data length to describe pure data. 653 * we modify command data length to describe pure data.
654 * The actual transfer length is data length + protection 654 * The actual transfer length is data length + protection
655 * length 655 * length
656 **/ 656 **/
657 if (protect) 657 if (protect)
658 cmd->data_length = sectors * dev->dev_attrib.block_size; 658 cmd->data_length = sectors * dev->dev_attrib.block_size;
659 659
660 pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d " 660 pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d "
661 "prot_op=%d prot_checks=%d\n", 661 "prot_op=%d prot_checks=%d\n",
662 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length, 662 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
663 cmd->prot_op, cmd->prot_checks); 663 cmd->prot_op, cmd->prot_checks);
664 664
665 return true; 665 return true;
666 } 666 }
667 667
668 sense_reason_t 668 sense_reason_t
669 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) 669 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
670 { 670 {
671 struct se_device *dev = cmd->se_dev; 671 struct se_device *dev = cmd->se_dev;
672 unsigned char *cdb = cmd->t_task_cdb; 672 unsigned char *cdb = cmd->t_task_cdb;
673 unsigned int size; 673 unsigned int size;
674 u32 sectors = 0; 674 u32 sectors = 0;
675 sense_reason_t ret; 675 sense_reason_t ret;
676 676
677 switch (cdb[0]) { 677 switch (cdb[0]) {
678 case READ_6: 678 case READ_6:
679 sectors = transport_get_sectors_6(cdb); 679 sectors = transport_get_sectors_6(cdb);
680 cmd->t_task_lba = transport_lba_21(cdb); 680 cmd->t_task_lba = transport_lba_21(cdb);
681 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 681 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
682 cmd->execute_rw = ops->execute_rw; 682 cmd->execute_rw = ops->execute_rw;
683 cmd->execute_cmd = sbc_execute_rw; 683 cmd->execute_cmd = sbc_execute_rw;
684 break; 684 break;
685 case READ_10: 685 case READ_10:
686 sectors = transport_get_sectors_10(cdb); 686 sectors = transport_get_sectors_10(cdb);
687 cmd->t_task_lba = transport_lba_32(cdb); 687 cmd->t_task_lba = transport_lba_32(cdb);
688 688
689 if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) 689 if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
690 return TCM_UNSUPPORTED_SCSI_OPCODE; 690 return TCM_UNSUPPORTED_SCSI_OPCODE;
691 691
692 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 692 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
693 cmd->execute_rw = ops->execute_rw; 693 cmd->execute_rw = ops->execute_rw;
694 cmd->execute_cmd = sbc_execute_rw; 694 cmd->execute_cmd = sbc_execute_rw;
695 break; 695 break;
696 case READ_12: 696 case READ_12:
697 sectors = transport_get_sectors_12(cdb); 697 sectors = transport_get_sectors_12(cdb);
698 cmd->t_task_lba = transport_lba_32(cdb); 698 cmd->t_task_lba = transport_lba_32(cdb);
699 699
700 if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) 700 if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
701 return TCM_UNSUPPORTED_SCSI_OPCODE; 701 return TCM_UNSUPPORTED_SCSI_OPCODE;
702 702
703 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 703 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
704 cmd->execute_rw = ops->execute_rw; 704 cmd->execute_rw = ops->execute_rw;
705 cmd->execute_cmd = sbc_execute_rw; 705 cmd->execute_cmd = sbc_execute_rw;
706 break; 706 break;
707 case READ_16: 707 case READ_16:
708 sectors = transport_get_sectors_16(cdb); 708 sectors = transport_get_sectors_16(cdb);
709 cmd->t_task_lba = transport_lba_64(cdb); 709 cmd->t_task_lba = transport_lba_64(cdb);
710 710
711 if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) 711 if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
712 return TCM_UNSUPPORTED_SCSI_OPCODE; 712 return TCM_UNSUPPORTED_SCSI_OPCODE;
713 713
714 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 714 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
715 cmd->execute_rw = ops->execute_rw; 715 cmd->execute_rw = ops->execute_rw;
716 cmd->execute_cmd = sbc_execute_rw; 716 cmd->execute_cmd = sbc_execute_rw;
717 break; 717 break;
718 case WRITE_6: 718 case WRITE_6:
719 sectors = transport_get_sectors_6(cdb); 719 sectors = transport_get_sectors_6(cdb);
720 cmd->t_task_lba = transport_lba_21(cdb); 720 cmd->t_task_lba = transport_lba_21(cdb);
721 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 721 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
722 cmd->execute_rw = ops->execute_rw; 722 cmd->execute_rw = ops->execute_rw;
723 cmd->execute_cmd = sbc_execute_rw; 723 cmd->execute_cmd = sbc_execute_rw;
724 break; 724 break;
725 case WRITE_10: 725 case WRITE_10:
726 case WRITE_VERIFY: 726 case WRITE_VERIFY:
727 sectors = transport_get_sectors_10(cdb); 727 sectors = transport_get_sectors_10(cdb);
728 cmd->t_task_lba = transport_lba_32(cdb); 728 cmd->t_task_lba = transport_lba_32(cdb);
729 729
730 if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) 730 if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
731 return TCM_UNSUPPORTED_SCSI_OPCODE; 731 return TCM_UNSUPPORTED_SCSI_OPCODE;
732 732
733 if (cdb[1] & 0x8) 733 if (cdb[1] & 0x8)
734 cmd->se_cmd_flags |= SCF_FUA; 734 cmd->se_cmd_flags |= SCF_FUA;
735 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 735 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
736 cmd->execute_rw = ops->execute_rw; 736 cmd->execute_rw = ops->execute_rw;
737 cmd->execute_cmd = sbc_execute_rw; 737 cmd->execute_cmd = sbc_execute_rw;
738 break; 738 break;
739 case WRITE_12: 739 case WRITE_12:
740 sectors = transport_get_sectors_12(cdb); 740 sectors = transport_get_sectors_12(cdb);
741 cmd->t_task_lba = transport_lba_32(cdb); 741 cmd->t_task_lba = transport_lba_32(cdb);
742 742
743 if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) 743 if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
744 return TCM_UNSUPPORTED_SCSI_OPCODE; 744 return TCM_UNSUPPORTED_SCSI_OPCODE;
745 745
746 if (cdb[1] & 0x8) 746 if (cdb[1] & 0x8)
747 cmd->se_cmd_flags |= SCF_FUA; 747 cmd->se_cmd_flags |= SCF_FUA;
748 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 748 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
749 cmd->execute_rw = ops->execute_rw; 749 cmd->execute_rw = ops->execute_rw;
750 cmd->execute_cmd = sbc_execute_rw; 750 cmd->execute_cmd = sbc_execute_rw;
751 break; 751 break;
752 case WRITE_16: 752 case WRITE_16:
753 sectors = transport_get_sectors_16(cdb); 753 sectors = transport_get_sectors_16(cdb);
754 cmd->t_task_lba = transport_lba_64(cdb); 754 cmd->t_task_lba = transport_lba_64(cdb);
755 755
756 if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) 756 if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
757 return TCM_UNSUPPORTED_SCSI_OPCODE; 757 return TCM_UNSUPPORTED_SCSI_OPCODE;
758 758
759 if (cdb[1] & 0x8) 759 if (cdb[1] & 0x8)
760 cmd->se_cmd_flags |= SCF_FUA; 760 cmd->se_cmd_flags |= SCF_FUA;
761 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 761 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
762 cmd->execute_rw = ops->execute_rw; 762 cmd->execute_rw = ops->execute_rw;
763 cmd->execute_cmd = sbc_execute_rw; 763 cmd->execute_cmd = sbc_execute_rw;
764 break; 764 break;
765 case XDWRITEREAD_10: 765 case XDWRITEREAD_10:
766 if (cmd->data_direction != DMA_TO_DEVICE || 766 if (cmd->data_direction != DMA_TO_DEVICE ||
767 !(cmd->se_cmd_flags & SCF_BIDI)) 767 !(cmd->se_cmd_flags & SCF_BIDI))
768 return TCM_INVALID_CDB_FIELD; 768 return TCM_INVALID_CDB_FIELD;
769 sectors = transport_get_sectors_10(cdb); 769 sectors = transport_get_sectors_10(cdb);
770 770
771 cmd->t_task_lba = transport_lba_32(cdb); 771 cmd->t_task_lba = transport_lba_32(cdb);
772 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 772 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
773 773
774 /* 774 /*
775 * Setup BIDI XOR callback to be run after I/O completion. 775 * Setup BIDI XOR callback to be run after I/O completion.
776 */ 776 */
777 cmd->execute_rw = ops->execute_rw; 777 cmd->execute_rw = ops->execute_rw;
778 cmd->execute_cmd = sbc_execute_rw; 778 cmd->execute_cmd = sbc_execute_rw;
779 cmd->transport_complete_callback = &xdreadwrite_callback; 779 cmd->transport_complete_callback = &xdreadwrite_callback;
780 if (cdb[1] & 0x8) 780 if (cdb[1] & 0x8)
781 cmd->se_cmd_flags |= SCF_FUA; 781 cmd->se_cmd_flags |= SCF_FUA;
782 break; 782 break;
783 case VARIABLE_LENGTH_CMD: 783 case VARIABLE_LENGTH_CMD:
784 { 784 {
785 u16 service_action = get_unaligned_be16(&cdb[8]); 785 u16 service_action = get_unaligned_be16(&cdb[8]);
786 switch (service_action) { 786 switch (service_action) {
787 case XDWRITEREAD_32: 787 case XDWRITEREAD_32:
788 sectors = transport_get_sectors_32(cdb); 788 sectors = transport_get_sectors_32(cdb);
789 789
790 /* 790 /*
791 * Use WRITE_32 and READ_32 opcodes for the emulated 791 * Use WRITE_32 and READ_32 opcodes for the emulated
792 * XDWRITE_READ_32 logic. 792 * XDWRITE_READ_32 logic.
793 */ 793 */
794 cmd->t_task_lba = transport_lba_64_ext(cdb); 794 cmd->t_task_lba = transport_lba_64_ext(cdb);
795 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 795 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
796 796
797 /* 797 /*
798 * Setup BIDI XOR callback to be run during after I/O 798 * Setup BIDI XOR callback to be run during after I/O
799 * completion. 799 * completion.
800 */ 800 */
801 cmd->execute_rw = ops->execute_rw; 801 cmd->execute_rw = ops->execute_rw;
802 cmd->execute_cmd = sbc_execute_rw; 802 cmd->execute_cmd = sbc_execute_rw;
803 cmd->transport_complete_callback = &xdreadwrite_callback; 803 cmd->transport_complete_callback = &xdreadwrite_callback;
804 if (cdb[1] & 0x8) 804 if (cdb[1] & 0x8)
805 cmd->se_cmd_flags |= SCF_FUA; 805 cmd->se_cmd_flags |= SCF_FUA;
806 break; 806 break;
807 case WRITE_SAME_32: 807 case WRITE_SAME_32:
808 sectors = transport_get_sectors_32(cdb); 808 sectors = transport_get_sectors_32(cdb);
809 if (!sectors) { 809 if (!sectors) {
810 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 810 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
811 " supported\n"); 811 " supported\n");
812 return TCM_INVALID_CDB_FIELD; 812 return TCM_INVALID_CDB_FIELD;
813 } 813 }
814 814
815 size = sbc_get_size(cmd, 1); 815 size = sbc_get_size(cmd, 1);
816 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 816 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
817 817
818 ret = sbc_setup_write_same(cmd, &cdb[10], ops); 818 ret = sbc_setup_write_same(cmd, &cdb[10], ops);
819 if (ret) 819 if (ret)
820 return ret; 820 return ret;
821 break; 821 break;
822 default: 822 default:
823 pr_err("VARIABLE_LENGTH_CMD service action" 823 pr_err("VARIABLE_LENGTH_CMD service action"
824 " 0x%04x not supported\n", service_action); 824 " 0x%04x not supported\n", service_action);
825 return TCM_UNSUPPORTED_SCSI_OPCODE; 825 return TCM_UNSUPPORTED_SCSI_OPCODE;
826 } 826 }
827 break; 827 break;
828 } 828 }
829 case COMPARE_AND_WRITE: 829 case COMPARE_AND_WRITE:
830 sectors = cdb[13]; 830 sectors = cdb[13];
831 /* 831 /*
832 * Currently enforce COMPARE_AND_WRITE for a single sector 832 * Currently enforce COMPARE_AND_WRITE for a single sector
833 */ 833 */
834 if (sectors > 1) { 834 if (sectors > 1) {
835 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater" 835 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
836 " than 1\n", sectors); 836 " than 1\n", sectors);
837 return TCM_INVALID_CDB_FIELD; 837 return TCM_INVALID_CDB_FIELD;
838 } 838 }
839 /* 839 /*
840 * Double size because we have two buffers, note that 840 * Double size because we have two buffers, note that
841 * zero is not an error.. 841 * zero is not an error..
842 */ 842 */
843 size = 2 * sbc_get_size(cmd, sectors); 843 size = 2 * sbc_get_size(cmd, sectors);
844 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 844 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
845 cmd->t_task_nolb = sectors; 845 cmd->t_task_nolb = sectors;
846 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE; 846 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
847 cmd->execute_rw = ops->execute_rw; 847 cmd->execute_rw = ops->execute_rw;
848 cmd->execute_cmd = sbc_compare_and_write; 848 cmd->execute_cmd = sbc_compare_and_write;
849 cmd->transport_complete_callback = compare_and_write_callback; 849 cmd->transport_complete_callback = compare_and_write_callback;
850 break; 850 break;
851 case READ_CAPACITY: 851 case READ_CAPACITY:
852 size = READ_CAP_LEN; 852 size = READ_CAP_LEN;
853 cmd->execute_cmd = sbc_emulate_readcapacity; 853 cmd->execute_cmd = sbc_emulate_readcapacity;
854 break; 854 break;
855 case SERVICE_ACTION_IN: 855 case SERVICE_ACTION_IN:
856 switch (cmd->t_task_cdb[1] & 0x1f) { 856 switch (cmd->t_task_cdb[1] & 0x1f) {
857 case SAI_READ_CAPACITY_16: 857 case SAI_READ_CAPACITY_16:
858 cmd->execute_cmd = sbc_emulate_readcapacity_16; 858 cmd->execute_cmd = sbc_emulate_readcapacity_16;
859 break; 859 break;
860 case SAI_REPORT_REFERRALS: 860 case SAI_REPORT_REFERRALS:
861 cmd->execute_cmd = target_emulate_report_referrals; 861 cmd->execute_cmd = target_emulate_report_referrals;
862 break; 862 break;
863 default: 863 default:
864 pr_err("Unsupported SA: 0x%02x\n", 864 pr_err("Unsupported SA: 0x%02x\n",
865 cmd->t_task_cdb[1] & 0x1f); 865 cmd->t_task_cdb[1] & 0x1f);
866 return TCM_INVALID_CDB_FIELD; 866 return TCM_INVALID_CDB_FIELD;
867 } 867 }
868 size = (cdb[10] << 24) | (cdb[11] << 16) | 868 size = (cdb[10] << 24) | (cdb[11] << 16) |
869 (cdb[12] << 8) | cdb[13]; 869 (cdb[12] << 8) | cdb[13];
870 break; 870 break;
871 case SYNCHRONIZE_CACHE: 871 case SYNCHRONIZE_CACHE:
872 case SYNCHRONIZE_CACHE_16: 872 case SYNCHRONIZE_CACHE_16:
873 if (cdb[0] == SYNCHRONIZE_CACHE) { 873 if (cdb[0] == SYNCHRONIZE_CACHE) {
874 sectors = transport_get_sectors_10(cdb); 874 sectors = transport_get_sectors_10(cdb);
875 cmd->t_task_lba = transport_lba_32(cdb); 875 cmd->t_task_lba = transport_lba_32(cdb);
876 } else { 876 } else {
877 sectors = transport_get_sectors_16(cdb); 877 sectors = transport_get_sectors_16(cdb);
878 cmd->t_task_lba = transport_lba_64(cdb); 878 cmd->t_task_lba = transport_lba_64(cdb);
879 } 879 }
880 if (ops->execute_sync_cache) { 880 if (ops->execute_sync_cache) {
881 cmd->execute_cmd = ops->execute_sync_cache; 881 cmd->execute_cmd = ops->execute_sync_cache;
882 goto check_lba; 882 goto check_lba;
883 } 883 }
884 size = 0; 884 size = 0;
885 cmd->execute_cmd = sbc_emulate_noop; 885 cmd->execute_cmd = sbc_emulate_noop;
886 break; 886 break;
887 case UNMAP: 887 case UNMAP:
888 if (!ops->execute_unmap) 888 if (!ops->execute_unmap)
889 return TCM_UNSUPPORTED_SCSI_OPCODE; 889 return TCM_UNSUPPORTED_SCSI_OPCODE;
890 890
891 size = get_unaligned_be16(&cdb[7]); 891 size = get_unaligned_be16(&cdb[7]);
892 cmd->execute_cmd = ops->execute_unmap; 892 cmd->execute_cmd = ops->execute_unmap;
893 break; 893 break;
894 case WRITE_SAME_16: 894 case WRITE_SAME_16:
895 sectors = transport_get_sectors_16(cdb); 895 sectors = transport_get_sectors_16(cdb);
896 if (!sectors) { 896 if (!sectors) {
897 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 897 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
898 return TCM_INVALID_CDB_FIELD; 898 return TCM_INVALID_CDB_FIELD;
899 } 899 }
900 900
901 size = sbc_get_size(cmd, 1); 901 size = sbc_get_size(cmd, 1);
902 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 902 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
903 903
904 ret = sbc_setup_write_same(cmd, &cdb[1], ops); 904 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
905 if (ret) 905 if (ret)
906 return ret; 906 return ret;
907 break; 907 break;
908 case WRITE_SAME: 908 case WRITE_SAME:
909 sectors = transport_get_sectors_10(cdb); 909 sectors = transport_get_sectors_10(cdb);
910 if (!sectors) { 910 if (!sectors) {
911 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 911 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
912 return TCM_INVALID_CDB_FIELD; 912 return TCM_INVALID_CDB_FIELD;
913 } 913 }
914 914
915 size = sbc_get_size(cmd, 1); 915 size = sbc_get_size(cmd, 1);
916 cmd->t_task_lba = get_unaligned_be32(&cdb[2]); 916 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
917 917
918 /* 918 /*
919 * Follow sbcr26 with WRITE_SAME (10) and check for the existence 919 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
920 * of byte 1 bit 3 UNMAP instead of original reserved field 920 * of byte 1 bit 3 UNMAP instead of original reserved field
921 */ 921 */
922 ret = sbc_setup_write_same(cmd, &cdb[1], ops); 922 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
923 if (ret) 923 if (ret)
924 return ret; 924 return ret;
925 break; 925 break;
926 case VERIFY: 926 case VERIFY:
927 size = 0; 927 size = 0;
928 sectors = transport_get_sectors_10(cdb); 928 sectors = transport_get_sectors_10(cdb);
929 cmd->t_task_lba = transport_lba_32(cdb); 929 cmd->t_task_lba = transport_lba_32(cdb);
930 cmd->execute_cmd = sbc_emulate_noop; 930 cmd->execute_cmd = sbc_emulate_noop;
931 goto check_lba; 931 goto check_lba;
932 case REZERO_UNIT: 932 case REZERO_UNIT:
933 case SEEK_6: 933 case SEEK_6:
934 case SEEK_10: 934 case SEEK_10:
935 /* 935 /*
936 * There are still clients out there which use these old SCSI-2 936 * There are still clients out there which use these old SCSI-2
937 * commands. This mainly happens when running VMs with legacy 937 * commands. This mainly happens when running VMs with legacy
938 * guest systems, connected via SCSI command pass-through to 938 * guest systems, connected via SCSI command pass-through to
939 * iSCSI targets. Make them happy and return status GOOD. 939 * iSCSI targets. Make them happy and return status GOOD.
940 */ 940 */
941 size = 0; 941 size = 0;
942 cmd->execute_cmd = sbc_emulate_noop; 942 cmd->execute_cmd = sbc_emulate_noop;
943 break; 943 break;
944 default: 944 default:
945 ret = spc_parse_cdb(cmd, &size); 945 ret = spc_parse_cdb(cmd, &size);
946 if (ret) 946 if (ret)
947 return ret; 947 return ret;
948 } 948 }
949 949
950 /* reject any command that we don't have a handler for */ 950 /* reject any command that we don't have a handler for */
951 if (!cmd->execute_cmd) 951 if (!cmd->execute_cmd)
952 return TCM_UNSUPPORTED_SCSI_OPCODE; 952 return TCM_UNSUPPORTED_SCSI_OPCODE;
953 953
954 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 954 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
955 unsigned long long end_lba; 955 unsigned long long end_lba;
956
957 if (sectors > dev->dev_attrib.fabric_max_sectors) {
958 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
959 " big sectors %u exceeds fabric_max_sectors:"
960 " %u\n", cdb[0], sectors,
961 dev->dev_attrib.fabric_max_sectors);
962 return TCM_INVALID_CDB_FIELD;
963 }
964 if (sectors > dev->dev_attrib.hw_max_sectors) {
965 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
966 " big sectors %u exceeds backend hw_max_sectors:"
967 " %u\n", cdb[0], sectors,
968 dev->dev_attrib.hw_max_sectors);
969 return TCM_INVALID_CDB_FIELD;
970 }
971 check_lba: 956 check_lba:
972 end_lba = dev->transport->get_blocks(dev) + 1; 957 end_lba = dev->transport->get_blocks(dev) + 1;
973 if (cmd->t_task_lba + sectors > end_lba) { 958 if (cmd->t_task_lba + sectors > end_lba) {
974 pr_err("cmd exceeds last lba %llu " 959 pr_err("cmd exceeds last lba %llu "
975 "(lba %llu, sectors %u)\n", 960 "(lba %llu, sectors %u)\n",
976 end_lba, cmd->t_task_lba, sectors); 961 end_lba, cmd->t_task_lba, sectors);
977 return TCM_ADDRESS_OUT_OF_RANGE; 962 return TCM_ADDRESS_OUT_OF_RANGE;
978 } 963 }
979 964
980 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) 965 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
981 size = sbc_get_size(cmd, sectors); 966 size = sbc_get_size(cmd, sectors);
982 } 967 }
983 968
984 return target_cmd_size_check(cmd, size); 969 return target_cmd_size_check(cmd, size);
985 } 970 }
986 EXPORT_SYMBOL(sbc_parse_cdb); 971 EXPORT_SYMBOL(sbc_parse_cdb);
987 972
988 u32 sbc_get_device_type(struct se_device *dev) 973 u32 sbc_get_device_type(struct se_device *dev)
989 { 974 {
990 return TYPE_DISK; 975 return TYPE_DISK;
991 } 976 }
992 EXPORT_SYMBOL(sbc_get_device_type); 977 EXPORT_SYMBOL(sbc_get_device_type);
993 978
994 sense_reason_t 979 sense_reason_t
995 sbc_execute_unmap(struct se_cmd *cmd, 980 sbc_execute_unmap(struct se_cmd *cmd,
996 sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *, 981 sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *,
997 sector_t, sector_t), 982 sector_t, sector_t),
998 void *priv) 983 void *priv)
999 { 984 {
1000 struct se_device *dev = cmd->se_dev; 985 struct se_device *dev = cmd->se_dev;
1001 unsigned char *buf, *ptr = NULL; 986 unsigned char *buf, *ptr = NULL;
1002 sector_t lba; 987 sector_t lba;
1003 int size; 988 int size;
1004 u32 range; 989 u32 range;
1005 sense_reason_t ret = 0; 990 sense_reason_t ret = 0;
1006 int dl, bd_dl; 991 int dl, bd_dl;
1007 992
1008 /* We never set ANC_SUP */ 993 /* We never set ANC_SUP */
1009 if (cmd->t_task_cdb[1]) 994 if (cmd->t_task_cdb[1])
1010 return TCM_INVALID_CDB_FIELD; 995 return TCM_INVALID_CDB_FIELD;
1011 996
1012 if (cmd->data_length == 0) { 997 if (cmd->data_length == 0) {
1013 target_complete_cmd(cmd, SAM_STAT_GOOD); 998 target_complete_cmd(cmd, SAM_STAT_GOOD);
1014 return 0; 999 return 0;
1015 } 1000 }
1016 1001
1017 if (cmd->data_length < 8) { 1002 if (cmd->data_length < 8) {
1018 pr_warn("UNMAP parameter list length %u too small\n", 1003 pr_warn("UNMAP parameter list length %u too small\n",
1019 cmd->data_length); 1004 cmd->data_length);
1020 return TCM_PARAMETER_LIST_LENGTH_ERROR; 1005 return TCM_PARAMETER_LIST_LENGTH_ERROR;
1021 } 1006 }
1022 1007
1023 buf = transport_kmap_data_sg(cmd); 1008 buf = transport_kmap_data_sg(cmd);
1024 if (!buf) 1009 if (!buf)
1025 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1010 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1026 1011
1027 dl = get_unaligned_be16(&buf[0]); 1012 dl = get_unaligned_be16(&buf[0]);
1028 bd_dl = get_unaligned_be16(&buf[2]); 1013 bd_dl = get_unaligned_be16(&buf[2]);
1029 1014
1030 size = cmd->data_length - 8; 1015 size = cmd->data_length - 8;
1031 if (bd_dl > size) 1016 if (bd_dl > size)
1032 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", 1017 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
1033 cmd->data_length, bd_dl); 1018 cmd->data_length, bd_dl);
1034 else 1019 else
1035 size = bd_dl; 1020 size = bd_dl;
1036 1021
1037 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) { 1022 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
1038 ret = TCM_INVALID_PARAMETER_LIST; 1023 ret = TCM_INVALID_PARAMETER_LIST;
1039 goto err; 1024 goto err;
1040 } 1025 }
1041 1026
1042 /* First UNMAP block descriptor starts at 8 byte offset */ 1027 /* First UNMAP block descriptor starts at 8 byte offset */
1043 ptr = &buf[8]; 1028 ptr = &buf[8];
1044 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" 1029 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
1045 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); 1030 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
1046 1031
1047 while (size >= 16) { 1032 while (size >= 16) {
1048 lba = get_unaligned_be64(&ptr[0]); 1033 lba = get_unaligned_be64(&ptr[0]);
1049 range = get_unaligned_be32(&ptr[8]); 1034 range = get_unaligned_be32(&ptr[8]);
1050 pr_debug("UNMAP: Using lba: %llu and range: %u\n", 1035 pr_debug("UNMAP: Using lba: %llu and range: %u\n",
1051 (unsigned long long)lba, range); 1036 (unsigned long long)lba, range);
1052 1037
1053 if (range > dev->dev_attrib.max_unmap_lba_count) { 1038 if (range > dev->dev_attrib.max_unmap_lba_count) {
1054 ret = TCM_INVALID_PARAMETER_LIST; 1039 ret = TCM_INVALID_PARAMETER_LIST;
1055 goto err; 1040 goto err;
1056 } 1041 }
1057 1042
1058 if (lba + range > dev->transport->get_blocks(dev) + 1) { 1043 if (lba + range > dev->transport->get_blocks(dev) + 1) {
1059 ret = TCM_ADDRESS_OUT_OF_RANGE; 1044 ret = TCM_ADDRESS_OUT_OF_RANGE;
1060 goto err; 1045 goto err;
1061 } 1046 }
1062 1047
1063 ret = do_unmap_fn(cmd, priv, lba, range); 1048 ret = do_unmap_fn(cmd, priv, lba, range);
1064 if (ret) 1049 if (ret)
1065 goto err; 1050 goto err;
1066 1051
1067 ptr += 16; 1052 ptr += 16;
1068 size -= 16; 1053 size -= 16;
1069 } 1054 }
1070 1055
1071 err: 1056 err:
1072 transport_kunmap_data_sg(cmd); 1057 transport_kunmap_data_sg(cmd);
1073 if (!ret) 1058 if (!ret)
1074 target_complete_cmd(cmd, GOOD); 1059 target_complete_cmd(cmd, GOOD);
1075 return ret; 1060 return ret;
1076 } 1061 }
1077 EXPORT_SYMBOL(sbc_execute_unmap); 1062 EXPORT_SYMBOL(sbc_execute_unmap);
1078 1063
1079 void 1064 void
1080 sbc_dif_generate(struct se_cmd *cmd) 1065 sbc_dif_generate(struct se_cmd *cmd)
1081 { 1066 {
1082 struct se_device *dev = cmd->se_dev; 1067 struct se_device *dev = cmd->se_dev;
1083 struct se_dif_v1_tuple *sdt; 1068 struct se_dif_v1_tuple *sdt;
1084 struct scatterlist *dsg, *psg = cmd->t_prot_sg; 1069 struct scatterlist *dsg, *psg = cmd->t_prot_sg;
1085 sector_t sector = cmd->t_task_lba; 1070 sector_t sector = cmd->t_task_lba;
1086 void *daddr, *paddr; 1071 void *daddr, *paddr;
1087 int i, j, offset = 0; 1072 int i, j, offset = 0;
1088 1073
1089 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1074 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1090 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1075 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1091 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1076 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1092 1077
1093 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1078 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1094 1079
1095 if (offset >= psg->length) { 1080 if (offset >= psg->length) {
1096 kunmap_atomic(paddr); 1081 kunmap_atomic(paddr);
1097 psg = sg_next(psg); 1082 psg = sg_next(psg);
1098 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1083 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1099 offset = 0; 1084 offset = 0;
1100 } 1085 }
1101 1086
1102 sdt = paddr + offset; 1087 sdt = paddr + offset;
1103 sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j, 1088 sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j,
1104 dev->dev_attrib.block_size)); 1089 dev->dev_attrib.block_size));
1105 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) 1090 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
1106 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); 1091 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
1107 sdt->app_tag = 0; 1092 sdt->app_tag = 0;
1108 1093
1109 pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x" 1094 pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x"
1110 " app_tag: 0x%04x ref_tag: %u\n", 1095 " app_tag: 0x%04x ref_tag: %u\n",
1111 (unsigned long long)sector, sdt->guard_tag, 1096 (unsigned long long)sector, sdt->guard_tag,
1112 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1097 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1113 1098
1114 sector++; 1099 sector++;
1115 offset += sizeof(struct se_dif_v1_tuple); 1100 offset += sizeof(struct se_dif_v1_tuple);
1116 } 1101 }
1117 1102
1118 kunmap_atomic(paddr); 1103 kunmap_atomic(paddr);
1119 kunmap_atomic(daddr); 1104 kunmap_atomic(daddr);
1120 } 1105 }
1121 } 1106 }
1122 1107
1123 static sense_reason_t 1108 static sense_reason_t
1124 sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt, 1109 sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
1125 const void *p, sector_t sector, unsigned int ei_lba) 1110 const void *p, sector_t sector, unsigned int ei_lba)
1126 { 1111 {
1127 int block_size = dev->dev_attrib.block_size; 1112 int block_size = dev->dev_attrib.block_size;
1128 __be16 csum; 1113 __be16 csum;
1129 1114
1130 csum = cpu_to_be16(crc_t10dif(p, block_size)); 1115 csum = cpu_to_be16(crc_t10dif(p, block_size));
1131 1116
1132 if (sdt->guard_tag != csum) { 1117 if (sdt->guard_tag != csum) {
1133 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x" 1118 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
1134 " csum 0x%04x\n", (unsigned long long)sector, 1119 " csum 0x%04x\n", (unsigned long long)sector,
1135 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); 1120 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
1136 return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 1121 return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1137 } 1122 }
1138 1123
1139 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT && 1124 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT &&
1140 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 1125 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1141 pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" 1126 pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
1142 " sector MSB: 0x%08x\n", (unsigned long long)sector, 1127 " sector MSB: 0x%08x\n", (unsigned long long)sector,
1143 be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff)); 1128 be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
1144 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1129 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1145 } 1130 }
1146 1131
1147 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT && 1132 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT &&
1148 be32_to_cpu(sdt->ref_tag) != ei_lba) { 1133 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1149 pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" 1134 pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
1150 " ei_lba: 0x%08x\n", (unsigned long long)sector, 1135 " ei_lba: 0x%08x\n", (unsigned long long)sector,
1151 be32_to_cpu(sdt->ref_tag), ei_lba); 1136 be32_to_cpu(sdt->ref_tag), ei_lba);
1152 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1137 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1153 } 1138 }
1154 1139
1155 return 0; 1140 return 0;
1156 } 1141 }
1157 1142
1158 static void 1143 static void
1159 sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, 1144 sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
1160 struct scatterlist *sg, int sg_off) 1145 struct scatterlist *sg, int sg_off)
1161 { 1146 {
1162 struct se_device *dev = cmd->se_dev; 1147 struct se_device *dev = cmd->se_dev;
1163 struct scatterlist *psg; 1148 struct scatterlist *psg;
1164 void *paddr, *addr; 1149 void *paddr, *addr;
1165 unsigned int i, len, left; 1150 unsigned int i, len, left;
1166 unsigned int offset = sg_off; 1151 unsigned int offset = sg_off;
1167 1152
1168 left = sectors * dev->prot_length; 1153 left = sectors * dev->prot_length;
1169 1154
1170 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { 1155 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
1171 unsigned int psg_len, copied = 0; 1156 unsigned int psg_len, copied = 0;
1172 1157
1173 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1158 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1174 psg_len = min(left, psg->length); 1159 psg_len = min(left, psg->length);
1175 while (psg_len) { 1160 while (psg_len) {
1176 len = min(psg_len, sg->length - offset); 1161 len = min(psg_len, sg->length - offset);
1177 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; 1162 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
1178 1163
1179 if (read) 1164 if (read)
1180 memcpy(paddr + copied, addr, len); 1165 memcpy(paddr + copied, addr, len);
1181 else 1166 else
1182 memcpy(addr, paddr + copied, len); 1167 memcpy(addr, paddr + copied, len);
1183 1168
1184 left -= len; 1169 left -= len;
1185 offset += len; 1170 offset += len;
1186 copied += len; 1171 copied += len;
1187 psg_len -= len; 1172 psg_len -= len;
1188 1173
1189 if (offset >= sg->length) { 1174 if (offset >= sg->length) {
1190 sg = sg_next(sg); 1175 sg = sg_next(sg);
1191 offset = 0; 1176 offset = 0;
1192 } 1177 }
1193 kunmap_atomic(addr); 1178 kunmap_atomic(addr);
1194 } 1179 }
1195 kunmap_atomic(paddr); 1180 kunmap_atomic(paddr);
1196 } 1181 }
1197 } 1182 }
1198 1183
1199 sense_reason_t 1184 sense_reason_t
1200 sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1185 sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1201 unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1186 unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1202 { 1187 {
1203 struct se_device *dev = cmd->se_dev; 1188 struct se_device *dev = cmd->se_dev;
1204 struct se_dif_v1_tuple *sdt; 1189 struct se_dif_v1_tuple *sdt;
1205 struct scatterlist *dsg, *psg = cmd->t_prot_sg; 1190 struct scatterlist *dsg, *psg = cmd->t_prot_sg;
1206 sector_t sector = start; 1191 sector_t sector = start;
1207 void *daddr, *paddr; 1192 void *daddr, *paddr;
1208 int i, j, offset = 0; 1193 int i, j, offset = 0;
1209 sense_reason_t rc; 1194 sense_reason_t rc;
1210 1195
1211 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1196 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1212 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1197 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1213 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1198 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1214 1199
1215 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1200 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1216 1201
1217 if (offset >= psg->length) { 1202 if (offset >= psg->length) {
1218 kunmap_atomic(paddr); 1203 kunmap_atomic(paddr);
1219 psg = sg_next(psg); 1204 psg = sg_next(psg);
1220 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1205 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1221 offset = 0; 1206 offset = 0;
1222 } 1207 }
1223 1208
1224 sdt = paddr + offset; 1209 sdt = paddr + offset;
1225 1210
1226 pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x" 1211 pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x"
1227 " app_tag: 0x%04x ref_tag: %u\n", 1212 " app_tag: 0x%04x ref_tag: %u\n",
1228 (unsigned long long)sector, sdt->guard_tag, 1213 (unsigned long long)sector, sdt->guard_tag,
1229 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1214 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1230 1215
1231 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, 1216 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
1232 ei_lba); 1217 ei_lba);
1233 if (rc) { 1218 if (rc) {
1234 kunmap_atomic(paddr); 1219 kunmap_atomic(paddr);
1235 kunmap_atomic(daddr); 1220 kunmap_atomic(daddr);
1236 cmd->bad_sector = sector; 1221 cmd->bad_sector = sector;
1237 return rc; 1222 return rc;
1238 } 1223 }
1239 1224
1240 sector++; 1225 sector++;
1241 ei_lba++; 1226 ei_lba++;
1242 offset += sizeof(struct se_dif_v1_tuple); 1227 offset += sizeof(struct se_dif_v1_tuple);
1243 } 1228 }
1244 1229
1245 kunmap_atomic(paddr); 1230 kunmap_atomic(paddr);
1246 kunmap_atomic(daddr); 1231 kunmap_atomic(daddr);
1247 } 1232 }
1248 sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off); 1233 sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off);
1249 1234
1250 return 0; 1235 return 0;
1251 } 1236 }
1252 EXPORT_SYMBOL(sbc_dif_verify_write); 1237 EXPORT_SYMBOL(sbc_dif_verify_write);
1253 1238
1254 static sense_reason_t 1239 static sense_reason_t
1255 __sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1240 __sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1256 unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1241 unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1257 { 1242 {
1258 struct se_device *dev = cmd->se_dev; 1243 struct se_device *dev = cmd->se_dev;
1259 struct se_dif_v1_tuple *sdt; 1244 struct se_dif_v1_tuple *sdt;
1260 struct scatterlist *dsg, *psg = sg; 1245 struct scatterlist *dsg, *psg = sg;
1261 sector_t sector = start; 1246 sector_t sector = start;
1262 void *daddr, *paddr; 1247 void *daddr, *paddr;
1263 int i, j, offset = sg_off; 1248 int i, j, offset = sg_off;
1264 sense_reason_t rc; 1249 sense_reason_t rc;
1265 1250
1266 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1251 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
1267 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1252 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1268 paddr = kmap_atomic(sg_page(psg)) + sg->offset; 1253 paddr = kmap_atomic(sg_page(psg)) + sg->offset;
1269 1254
1270 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1255 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
1271 1256
1272 if (offset >= psg->length) { 1257 if (offset >= psg->length) {
1273 kunmap_atomic(paddr); 1258 kunmap_atomic(paddr);
1274 psg = sg_next(psg); 1259 psg = sg_next(psg);
1275 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1260 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1276 offset = 0; 1261 offset = 0;
1277 } 1262 }
1278 1263
1279 sdt = paddr + offset; 1264 sdt = paddr + offset;
1280 1265
1281 pr_debug("DIF READ sector: %llu guard_tag: 0x%04x" 1266 pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
1282 " app_tag: 0x%04x ref_tag: %u\n", 1267 " app_tag: 0x%04x ref_tag: %u\n",
1283 (unsigned long long)sector, sdt->guard_tag, 1268 (unsigned long long)sector, sdt->guard_tag,
1284 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1269 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1285 1270
1286 if (sdt->app_tag == cpu_to_be16(0xffff)) { 1271 if (sdt->app_tag == cpu_to_be16(0xffff)) {
1287 sector++; 1272 sector++;
1288 offset += sizeof(struct se_dif_v1_tuple); 1273 offset += sizeof(struct se_dif_v1_tuple);
1289 continue; 1274 continue;
1290 } 1275 }
1291 1276
1292 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector, 1277 rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
1293 ei_lba); 1278 ei_lba);
1294 if (rc) { 1279 if (rc) {
1295 kunmap_atomic(paddr); 1280 kunmap_atomic(paddr);
1296 kunmap_atomic(daddr); 1281 kunmap_atomic(daddr);
1297 cmd->bad_sector = sector; 1282 cmd->bad_sector = sector;
1298 return rc; 1283 return rc;
1299 } 1284 }
1300 1285
1301 sector++; 1286 sector++;
1302 ei_lba++; 1287 ei_lba++;
1303 offset += sizeof(struct se_dif_v1_tuple); 1288 offset += sizeof(struct se_dif_v1_tuple);
1304 } 1289 }
1305 1290
1306 kunmap_atomic(paddr); 1291 kunmap_atomic(paddr);
1307 kunmap_atomic(daddr); 1292 kunmap_atomic(daddr);
1308 } 1293 }
1309 1294
1310 return 0; 1295 return 0;
1311 } 1296 }
1312 1297
1313 sense_reason_t 1298 sense_reason_t
1314 sbc_dif_read_strip(struct se_cmd *cmd) 1299 sbc_dif_read_strip(struct se_cmd *cmd)
1315 { 1300 {
1316 struct se_device *dev = cmd->se_dev; 1301 struct se_device *dev = cmd->se_dev;
1317 u32 sectors = cmd->prot_length / dev->prot_length; 1302 u32 sectors = cmd->prot_length / dev->prot_length;
1318 1303
1319 return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0, 1304 return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
1320 cmd->t_prot_sg, 0); 1305 cmd->t_prot_sg, 0);
1321 } 1306 }
1322 1307
1323 sense_reason_t 1308 sense_reason_t
1324 sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1309 sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1325 unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1310 unsigned int ei_lba, struct scatterlist *sg, int sg_off)
1326 { 1311 {
1327 sense_reason_t rc; 1312 sense_reason_t rc;
1328 1313
1329 rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off); 1314 rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off);
1330 if (rc) 1315 if (rc)
1331 return rc; 1316 return rc;
1332 1317
1333 sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off); 1318 sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
1334 return 0; 1319 return 0;
1335 } 1320 }
1336 EXPORT_SYMBOL(sbc_dif_verify_read); 1321 EXPORT_SYMBOL(sbc_dif_verify_read);
1337 1322
drivers/target/target_core_spc.c
1 /* 1 /*
2 * SCSI Primary Commands (SPC) parsing and emulation. 2 * SCSI Primary Commands (SPC) parsing and emulation.
3 * 3 *
4 * (c) Copyright 2002-2013 Datera, Inc. 4 * (c) Copyright 2002-2013 Datera, Inc.
5 * 5 *
6 * Nicholas A. Bellinger <nab@kernel.org> 6 * Nicholas A. Bellinger <nab@kernel.org>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or 10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version. 11 * (at your option) any later version.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, 13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */ 21 */
22 22
23 #include <linux/kernel.h> 23 #include <linux/kernel.h>
24 #include <linux/module.h> 24 #include <linux/module.h>
25 #include <asm/unaligned.h> 25 #include <asm/unaligned.h>
26 26
27 #include <scsi/scsi.h> 27 #include <scsi/scsi.h>
28 #include <scsi/scsi_tcq.h> 28 #include <scsi/scsi_tcq.h>
29 29
30 #include <target/target_core_base.h> 30 #include <target/target_core_base.h>
31 #include <target/target_core_backend.h> 31 #include <target/target_core_backend.h>
32 #include <target/target_core_fabric.h> 32 #include <target/target_core_fabric.h>
33 33
34 #include "target_core_internal.h" 34 #include "target_core_internal.h"
35 #include "target_core_alua.h" 35 #include "target_core_alua.h"
36 #include "target_core_pr.h" 36 #include "target_core_pr.h"
37 #include "target_core_ua.h" 37 #include "target_core_ua.h"
38 #include "target_core_xcopy.h" 38 #include "target_core_xcopy.h"
39 39
40 static void spc_fill_alua_data(struct se_port *port, unsigned char *buf) 40 static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
41 { 41 {
42 struct t10_alua_tg_pt_gp *tg_pt_gp; 42 struct t10_alua_tg_pt_gp *tg_pt_gp;
43 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 43 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
44 44
45 /* 45 /*
46 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS. 46 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
47 */ 47 */
48 buf[5] = 0x80; 48 buf[5] = 0x80;
49 49
50 /* 50 /*
51 * Set TPGS field for explicit and/or implicit ALUA access type 51 * Set TPGS field for explicit and/or implicit ALUA access type
52 * and opteration. 52 * and opteration.
53 * 53 *
54 * See spc4r17 section 6.4.2 Table 135 54 * See spc4r17 section 6.4.2 Table 135
55 */ 55 */
56 if (!port) 56 if (!port)
57 return; 57 return;
58 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 58 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
59 if (!tg_pt_gp_mem) 59 if (!tg_pt_gp_mem)
60 return; 60 return;
61 61
62 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 62 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
63 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 63 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
64 if (tg_pt_gp) 64 if (tg_pt_gp)
65 buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type; 65 buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
66 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 66 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
67 } 67 }
68 68
69 sense_reason_t 69 sense_reason_t
70 spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf) 70 spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
71 { 71 {
72 struct se_lun *lun = cmd->se_lun; 72 struct se_lun *lun = cmd->se_lun;
73 struct se_device *dev = cmd->se_dev; 73 struct se_device *dev = cmd->se_dev;
74 struct se_session *sess = cmd->se_sess; 74 struct se_session *sess = cmd->se_sess;
75 75
76 /* Set RMB (removable media) for tape devices */ 76 /* Set RMB (removable media) for tape devices */
77 if (dev->transport->get_device_type(dev) == TYPE_TAPE) 77 if (dev->transport->get_device_type(dev) == TYPE_TAPE)
78 buf[1] = 0x80; 78 buf[1] = 0x80;
79 79
80 buf[2] = 0x05; /* SPC-3 */ 80 buf[2] = 0x05; /* SPC-3 */
81 81
82 /* 82 /*
83 * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2 83 * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
84 * 84 *
85 * SPC4 says: 85 * SPC4 says:
86 * A RESPONSE DATA FORMAT field set to 2h indicates that the 86 * A RESPONSE DATA FORMAT field set to 2h indicates that the
87 * standard INQUIRY data is in the format defined in this 87 * standard INQUIRY data is in the format defined in this
88 * standard. Response data format values less than 2h are 88 * standard. Response data format values less than 2h are
89 * obsolete. Response data format values greater than 2h are 89 * obsolete. Response data format values greater than 2h are
90 * reserved. 90 * reserved.
91 */ 91 */
92 buf[3] = 2; 92 buf[3] = 2;
93 93
94 /* 94 /*
95 * Enable SCCS and TPGS fields for Emulated ALUA 95 * Enable SCCS and TPGS fields for Emulated ALUA
96 */ 96 */
97 spc_fill_alua_data(lun->lun_sep, buf); 97 spc_fill_alua_data(lun->lun_sep, buf);
98 98
99 /* 99 /*
100 * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY 100 * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY
101 */ 101 */
102 if (dev->dev_attrib.emulate_3pc) 102 if (dev->dev_attrib.emulate_3pc)
103 buf[5] |= 0x8; 103 buf[5] |= 0x8;
104 /* 104 /*
105 * Set Protection (PROTECT) bit when DIF has been enabled on the 105 * Set Protection (PROTECT) bit when DIF has been enabled on the
106 * device, and the transport supports VERIFY + PASS. 106 * device, and the transport supports VERIFY + PASS.
107 */ 107 */
108 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 108 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
109 if (dev->dev_attrib.pi_prot_type) 109 if (dev->dev_attrib.pi_prot_type)
110 buf[5] |= 0x1; 110 buf[5] |= 0x1;
111 } 111 }
112 112
113 buf[7] = 0x2; /* CmdQue=1 */ 113 buf[7] = 0x2; /* CmdQue=1 */
114 114
115 memcpy(&buf[8], "LIO-ORG ", 8); 115 memcpy(&buf[8], "LIO-ORG ", 8);
116 memset(&buf[16], 0x20, 16); 116 memset(&buf[16], 0x20, 16);
117 memcpy(&buf[16], dev->t10_wwn.model, 117 memcpy(&buf[16], dev->t10_wwn.model,
118 min_t(size_t, strlen(dev->t10_wwn.model), 16)); 118 min_t(size_t, strlen(dev->t10_wwn.model), 16));
119 memcpy(&buf[32], dev->t10_wwn.revision, 119 memcpy(&buf[32], dev->t10_wwn.revision,
120 min_t(size_t, strlen(dev->t10_wwn.revision), 4)); 120 min_t(size_t, strlen(dev->t10_wwn.revision), 4));
121 buf[4] = 31; /* Set additional length to 31 */ 121 buf[4] = 31; /* Set additional length to 31 */
122 122
123 return 0; 123 return 0;
124 } 124 }
125 EXPORT_SYMBOL(spc_emulate_inquiry_std); 125 EXPORT_SYMBOL(spc_emulate_inquiry_std);
126 126
127 /* unit serial number */ 127 /* unit serial number */
128 static sense_reason_t 128 static sense_reason_t
129 spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) 129 spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
130 { 130 {
131 struct se_device *dev = cmd->se_dev; 131 struct se_device *dev = cmd->se_dev;
132 u16 len; 132 u16 len;
133 133
134 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 134 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
135 len = sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial); 135 len = sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
136 len++; /* Extra Byte for NULL Terminator */ 136 len++; /* Extra Byte for NULL Terminator */
137 buf[3] = len; 137 buf[3] = len;
138 } 138 }
139 return 0; 139 return 0;
140 } 140 }
141 141
142 void spc_parse_naa_6h_vendor_specific(struct se_device *dev, 142 void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
143 unsigned char *buf) 143 unsigned char *buf)
144 { 144 {
145 unsigned char *p = &dev->t10_wwn.unit_serial[0]; 145 unsigned char *p = &dev->t10_wwn.unit_serial[0];
146 int cnt; 146 int cnt;
147 bool next = true; 147 bool next = true;
148 148
149 /* 149 /*
150 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on 150 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
151 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field 151 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
152 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION 152 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
153 * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL 153 * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL
154 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure 154 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
155 * per device uniqeness. 155 * per device uniqeness.
156 */ 156 */
157 for (cnt = 0; *p && cnt < 13; p++) { 157 for (cnt = 0; *p && cnt < 13; p++) {
158 int val = hex_to_bin(*p); 158 int val = hex_to_bin(*p);
159 159
160 if (val < 0) 160 if (val < 0)
161 continue; 161 continue;
162 162
163 if (next) { 163 if (next) {
164 next = false; 164 next = false;
165 buf[cnt++] |= val; 165 buf[cnt++] |= val;
166 } else { 166 } else {
167 next = true; 167 next = true;
168 buf[cnt] = val << 4; 168 buf[cnt] = val << 4;
169 } 169 }
170 } 170 }
171 } 171 }
172 172
173 /* 173 /*
174 * Device identification VPD, for a complete list of 174 * Device identification VPD, for a complete list of
175 * DESIGNATOR TYPEs see spc4r17 Table 459. 175 * DESIGNATOR TYPEs see spc4r17 Table 459.
176 */ 176 */
177 sense_reason_t 177 sense_reason_t
178 spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) 178 spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
179 { 179 {
180 struct se_device *dev = cmd->se_dev; 180 struct se_device *dev = cmd->se_dev;
181 struct se_lun *lun = cmd->se_lun; 181 struct se_lun *lun = cmd->se_lun;
182 struct se_port *port = NULL; 182 struct se_port *port = NULL;
183 struct se_portal_group *tpg = NULL; 183 struct se_portal_group *tpg = NULL;
184 struct t10_alua_lu_gp_member *lu_gp_mem; 184 struct t10_alua_lu_gp_member *lu_gp_mem;
185 struct t10_alua_tg_pt_gp *tg_pt_gp; 185 struct t10_alua_tg_pt_gp *tg_pt_gp;
186 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 186 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
187 unsigned char *prod = &dev->t10_wwn.model[0]; 187 unsigned char *prod = &dev->t10_wwn.model[0];
188 u32 prod_len; 188 u32 prod_len;
189 u32 unit_serial_len, off = 0; 189 u32 unit_serial_len, off = 0;
190 u16 len = 0, id_len; 190 u16 len = 0, id_len;
191 191
192 off = 4; 192 off = 4;
193 193
194 /* 194 /*
195 * NAA IEEE Registered Extended Assigned designator format, see 195 * NAA IEEE Registered Extended Assigned designator format, see
196 * spc4r17 section 7.7.3.6.5 196 * spc4r17 section 7.7.3.6.5
197 * 197 *
198 * We depend upon a target_core_mod/ConfigFS provided 198 * We depend upon a target_core_mod/ConfigFS provided
199 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial 199 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
200 * value in order to return the NAA id. 200 * value in order to return the NAA id.
201 */ 201 */
202 if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL)) 202 if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL))
203 goto check_t10_vend_desc; 203 goto check_t10_vend_desc;
204 204
205 /* CODE SET == Binary */ 205 /* CODE SET == Binary */
206 buf[off++] = 0x1; 206 buf[off++] = 0x1;
207 207
208 /* Set ASSOCIATION == addressed logical unit: 0)b */ 208 /* Set ASSOCIATION == addressed logical unit: 0)b */
209 buf[off] = 0x00; 209 buf[off] = 0x00;
210 210
211 /* Identifier/Designator type == NAA identifier */ 211 /* Identifier/Designator type == NAA identifier */
212 buf[off++] |= 0x3; 212 buf[off++] |= 0x3;
213 off++; 213 off++;
214 214
215 /* Identifier/Designator length */ 215 /* Identifier/Designator length */
216 buf[off++] = 0x10; 216 buf[off++] = 0x10;
217 217
218 /* 218 /*
219 * Start NAA IEEE Registered Extended Identifier/Designator 219 * Start NAA IEEE Registered Extended Identifier/Designator
220 */ 220 */
221 buf[off++] = (0x6 << 4); 221 buf[off++] = (0x6 << 4);
222 222
223 /* 223 /*
224 * Use OpenFabrics IEEE Company ID: 00 14 05 224 * Use OpenFabrics IEEE Company ID: 00 14 05
225 */ 225 */
226 buf[off++] = 0x01; 226 buf[off++] = 0x01;
227 buf[off++] = 0x40; 227 buf[off++] = 0x40;
228 buf[off] = (0x5 << 4); 228 buf[off] = (0x5 << 4);
229 229
230 /* 230 /*
231 * Return ConfigFS Unit Serial Number information for 231 * Return ConfigFS Unit Serial Number information for
232 * VENDOR_SPECIFIC_IDENTIFIER and 232 * VENDOR_SPECIFIC_IDENTIFIER and
233 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION 233 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
234 */ 234 */
235 spc_parse_naa_6h_vendor_specific(dev, &buf[off]); 235 spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
236 236
237 len = 20; 237 len = 20;
238 off = (len + 4); 238 off = (len + 4);
239 239
240 check_t10_vend_desc: 240 check_t10_vend_desc:
241 /* 241 /*
242 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4 242 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
243 */ 243 */
244 id_len = 8; /* For Vendor field */ 244 id_len = 8; /* For Vendor field */
245 prod_len = 4; /* For VPD Header */ 245 prod_len = 4; /* For VPD Header */
246 prod_len += 8; /* For Vendor field */ 246 prod_len += 8; /* For Vendor field */
247 prod_len += strlen(prod); 247 prod_len += strlen(prod);
248 prod_len++; /* For : */ 248 prod_len++; /* For : */
249 249
250 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 250 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
251 unit_serial_len = strlen(&dev->t10_wwn.unit_serial[0]); 251 unit_serial_len = strlen(&dev->t10_wwn.unit_serial[0]);
252 unit_serial_len++; /* For NULL Terminator */ 252 unit_serial_len++; /* For NULL Terminator */
253 253
254 id_len += sprintf(&buf[off+12], "%s:%s", prod, 254 id_len += sprintf(&buf[off+12], "%s:%s", prod,
255 &dev->t10_wwn.unit_serial[0]); 255 &dev->t10_wwn.unit_serial[0]);
256 } 256 }
257 buf[off] = 0x2; /* ASCII */ 257 buf[off] = 0x2; /* ASCII */
258 buf[off+1] = 0x1; /* T10 Vendor ID */ 258 buf[off+1] = 0x1; /* T10 Vendor ID */
259 buf[off+2] = 0x0; 259 buf[off+2] = 0x0;
260 memcpy(&buf[off+4], "LIO-ORG", 8); 260 memcpy(&buf[off+4], "LIO-ORG", 8);
261 /* Extra Byte for NULL Terminator */ 261 /* Extra Byte for NULL Terminator */
262 id_len++; 262 id_len++;
263 /* Identifier Length */ 263 /* Identifier Length */
264 buf[off+3] = id_len; 264 buf[off+3] = id_len;
265 /* Header size for Designation descriptor */ 265 /* Header size for Designation descriptor */
266 len += (id_len + 4); 266 len += (id_len + 4);
267 off += (id_len + 4); 267 off += (id_len + 4);
268 /* 268 /*
269 * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD 269 * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD
270 */ 270 */
271 port = lun->lun_sep; 271 port = lun->lun_sep;
272 if (port) { 272 if (port) {
273 struct t10_alua_lu_gp *lu_gp; 273 struct t10_alua_lu_gp *lu_gp;
274 u32 padding, scsi_name_len, scsi_target_len; 274 u32 padding, scsi_name_len, scsi_target_len;
275 u16 lu_gp_id = 0; 275 u16 lu_gp_id = 0;
276 u16 tg_pt_gp_id = 0; 276 u16 tg_pt_gp_id = 0;
277 u16 tpgt; 277 u16 tpgt;
278 278
279 tpg = port->sep_tpg; 279 tpg = port->sep_tpg;
280 /* 280 /*
281 * Relative target port identifer, see spc4r17 281 * Relative target port identifer, see spc4r17
282 * section 7.7.3.7 282 * section 7.7.3.7
283 * 283 *
284 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 284 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
285 * section 7.5.1 Table 362 285 * section 7.5.1 Table 362
286 */ 286 */
287 buf[off] = 287 buf[off] =
288 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 288 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
289 buf[off++] |= 0x1; /* CODE SET == Binary */ 289 buf[off++] |= 0x1; /* CODE SET == Binary */
290 buf[off] = 0x80; /* Set PIV=1 */ 290 buf[off] = 0x80; /* Set PIV=1 */
291 /* Set ASSOCIATION == target port: 01b */ 291 /* Set ASSOCIATION == target port: 01b */
292 buf[off] |= 0x10; 292 buf[off] |= 0x10;
293 /* DESIGNATOR TYPE == Relative target port identifer */ 293 /* DESIGNATOR TYPE == Relative target port identifer */
294 buf[off++] |= 0x4; 294 buf[off++] |= 0x4;
295 off++; /* Skip over Reserved */ 295 off++; /* Skip over Reserved */
296 buf[off++] = 4; /* DESIGNATOR LENGTH */ 296 buf[off++] = 4; /* DESIGNATOR LENGTH */
297 /* Skip over Obsolete field in RTPI payload 297 /* Skip over Obsolete field in RTPI payload
298 * in Table 472 */ 298 * in Table 472 */
299 off += 2; 299 off += 2;
300 buf[off++] = ((port->sep_rtpi >> 8) & 0xff); 300 buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
301 buf[off++] = (port->sep_rtpi & 0xff); 301 buf[off++] = (port->sep_rtpi & 0xff);
302 len += 8; /* Header size + Designation descriptor */ 302 len += 8; /* Header size + Designation descriptor */
303 /* 303 /*
304 * Target port group identifier, see spc4r17 304 * Target port group identifier, see spc4r17
305 * section 7.7.3.8 305 * section 7.7.3.8
306 * 306 *
307 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 307 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
308 * section 7.5.1 Table 362 308 * section 7.5.1 Table 362
309 */ 309 */
310 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 310 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
311 if (!tg_pt_gp_mem) 311 if (!tg_pt_gp_mem)
312 goto check_lu_gp; 312 goto check_lu_gp;
313 313
314 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 314 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
315 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 315 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
316 if (!tg_pt_gp) { 316 if (!tg_pt_gp) {
317 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 317 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
318 goto check_lu_gp; 318 goto check_lu_gp;
319 } 319 }
320 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; 320 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
321 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 321 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
322 322
323 buf[off] = 323 buf[off] =
324 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 324 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
325 buf[off++] |= 0x1; /* CODE SET == Binary */ 325 buf[off++] |= 0x1; /* CODE SET == Binary */
326 buf[off] = 0x80; /* Set PIV=1 */ 326 buf[off] = 0x80; /* Set PIV=1 */
327 /* Set ASSOCIATION == target port: 01b */ 327 /* Set ASSOCIATION == target port: 01b */
328 buf[off] |= 0x10; 328 buf[off] |= 0x10;
329 /* DESIGNATOR TYPE == Target port group identifier */ 329 /* DESIGNATOR TYPE == Target port group identifier */
330 buf[off++] |= 0x5; 330 buf[off++] |= 0x5;
331 off++; /* Skip over Reserved */ 331 off++; /* Skip over Reserved */
332 buf[off++] = 4; /* DESIGNATOR LENGTH */ 332 buf[off++] = 4; /* DESIGNATOR LENGTH */
333 off += 2; /* Skip over Reserved Field */ 333 off += 2; /* Skip over Reserved Field */
334 buf[off++] = ((tg_pt_gp_id >> 8) & 0xff); 334 buf[off++] = ((tg_pt_gp_id >> 8) & 0xff);
335 buf[off++] = (tg_pt_gp_id & 0xff); 335 buf[off++] = (tg_pt_gp_id & 0xff);
336 len += 8; /* Header size + Designation descriptor */ 336 len += 8; /* Header size + Designation descriptor */
337 /* 337 /*
338 * Logical Unit Group identifier, see spc4r17 338 * Logical Unit Group identifier, see spc4r17
339 * section 7.7.3.8 339 * section 7.7.3.8
340 */ 340 */
341 check_lu_gp: 341 check_lu_gp:
342 lu_gp_mem = dev->dev_alua_lu_gp_mem; 342 lu_gp_mem = dev->dev_alua_lu_gp_mem;
343 if (!lu_gp_mem) 343 if (!lu_gp_mem)
344 goto check_scsi_name; 344 goto check_scsi_name;
345 345
346 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 346 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
347 lu_gp = lu_gp_mem->lu_gp; 347 lu_gp = lu_gp_mem->lu_gp;
348 if (!lu_gp) { 348 if (!lu_gp) {
349 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 349 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
350 goto check_scsi_name; 350 goto check_scsi_name;
351 } 351 }
352 lu_gp_id = lu_gp->lu_gp_id; 352 lu_gp_id = lu_gp->lu_gp_id;
353 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 353 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
354 354
355 buf[off++] |= 0x1; /* CODE SET == Binary */ 355 buf[off++] |= 0x1; /* CODE SET == Binary */
356 /* DESIGNATOR TYPE == Logical Unit Group identifier */ 356 /* DESIGNATOR TYPE == Logical Unit Group identifier */
357 buf[off++] |= 0x6; 357 buf[off++] |= 0x6;
358 off++; /* Skip over Reserved */ 358 off++; /* Skip over Reserved */
359 buf[off++] = 4; /* DESIGNATOR LENGTH */ 359 buf[off++] = 4; /* DESIGNATOR LENGTH */
360 off += 2; /* Skip over Reserved Field */ 360 off += 2; /* Skip over Reserved Field */
361 buf[off++] = ((lu_gp_id >> 8) & 0xff); 361 buf[off++] = ((lu_gp_id >> 8) & 0xff);
362 buf[off++] = (lu_gp_id & 0xff); 362 buf[off++] = (lu_gp_id & 0xff);
363 len += 8; /* Header size + Designation descriptor */ 363 len += 8; /* Header size + Designation descriptor */
364 /* 364 /*
365 * SCSI name string designator, see spc4r17 365 * SCSI name string designator, see spc4r17
366 * section 7.7.3.11 366 * section 7.7.3.11
367 * 367 *
368 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 368 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
369 * section 7.5.1 Table 362 369 * section 7.5.1 Table 362
370 */ 370 */
371 check_scsi_name: 371 check_scsi_name:
372 buf[off] = 372 buf[off] =
373 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 373 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
374 buf[off++] |= 0x3; /* CODE SET == UTF-8 */ 374 buf[off++] |= 0x3; /* CODE SET == UTF-8 */
375 buf[off] = 0x80; /* Set PIV=1 */ 375 buf[off] = 0x80; /* Set PIV=1 */
376 /* Set ASSOCIATION == target port: 01b */ 376 /* Set ASSOCIATION == target port: 01b */
377 buf[off] |= 0x10; 377 buf[off] |= 0x10;
378 /* DESIGNATOR TYPE == SCSI name string */ 378 /* DESIGNATOR TYPE == SCSI name string */
379 buf[off++] |= 0x8; 379 buf[off++] |= 0x8;
380 off += 2; /* Skip over Reserved and length */ 380 off += 2; /* Skip over Reserved and length */
381 /* 381 /*
382 * SCSI name string identifer containing, $FABRIC_MOD 382 * SCSI name string identifer containing, $FABRIC_MOD
383 * dependent information. For LIO-Target and iSCSI 383 * dependent information. For LIO-Target and iSCSI
384 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in 384 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in
385 * UTF-8 encoding. 385 * UTF-8 encoding.
386 */ 386 */
387 tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg); 387 tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
388 scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x", 388 scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
389 tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt); 389 tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt);
390 scsi_name_len += 1 /* Include NULL terminator */; 390 scsi_name_len += 1 /* Include NULL terminator */;
391 /* 391 /*
392 * The null-terminated, null-padded (see 4.4.2) SCSI 392 * The null-terminated, null-padded (see 4.4.2) SCSI
393 * NAME STRING field contains a UTF-8 format string. 393 * NAME STRING field contains a UTF-8 format string.
394 * The number of bytes in the SCSI NAME STRING field 394 * The number of bytes in the SCSI NAME STRING field
395 * (i.e., the value in the DESIGNATOR LENGTH field) 395 * (i.e., the value in the DESIGNATOR LENGTH field)
396 * shall be no larger than 256 and shall be a multiple 396 * shall be no larger than 256 and shall be a multiple
397 * of four. 397 * of four.
398 */ 398 */
399 padding = ((-scsi_name_len) & 3); 399 padding = ((-scsi_name_len) & 3);
400 if (padding) 400 if (padding)
401 scsi_name_len += padding; 401 scsi_name_len += padding;
402 if (scsi_name_len > 256) 402 if (scsi_name_len > 256)
403 scsi_name_len = 256; 403 scsi_name_len = 256;
404 404
405 buf[off-1] = scsi_name_len; 405 buf[off-1] = scsi_name_len;
406 off += scsi_name_len; 406 off += scsi_name_len;
407 /* Header size + Designation descriptor */ 407 /* Header size + Designation descriptor */
408 len += (scsi_name_len + 4); 408 len += (scsi_name_len + 4);
409 409
410 /* 410 /*
411 * Target device designator 411 * Target device designator
412 */ 412 */
413 buf[off] = 413 buf[off] =
414 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 414 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
415 buf[off++] |= 0x3; /* CODE SET == UTF-8 */ 415 buf[off++] |= 0x3; /* CODE SET == UTF-8 */
416 buf[off] = 0x80; /* Set PIV=1 */ 416 buf[off] = 0x80; /* Set PIV=1 */
417 /* Set ASSOCIATION == target device: 10b */ 417 /* Set ASSOCIATION == target device: 10b */
418 buf[off] |= 0x20; 418 buf[off] |= 0x20;
419 /* DESIGNATOR TYPE == SCSI name string */ 419 /* DESIGNATOR TYPE == SCSI name string */
420 buf[off++] |= 0x8; 420 buf[off++] |= 0x8;
421 off += 2; /* Skip over Reserved and length */ 421 off += 2; /* Skip over Reserved and length */
422 /* 422 /*
423 * SCSI name string identifer containing, $FABRIC_MOD 423 * SCSI name string identifer containing, $FABRIC_MOD
424 * dependent information. For LIO-Target and iSCSI 424 * dependent information. For LIO-Target and iSCSI
425 * Target Port, this means "<iSCSI name>" in 425 * Target Port, this means "<iSCSI name>" in
426 * UTF-8 encoding. 426 * UTF-8 encoding.
427 */ 427 */
428 scsi_target_len = sprintf(&buf[off], "%s", 428 scsi_target_len = sprintf(&buf[off], "%s",
429 tpg->se_tpg_tfo->tpg_get_wwn(tpg)); 429 tpg->se_tpg_tfo->tpg_get_wwn(tpg));
430 scsi_target_len += 1 /* Include NULL terminator */; 430 scsi_target_len += 1 /* Include NULL terminator */;
431 /* 431 /*
432 * The null-terminated, null-padded (see 4.4.2) SCSI 432 * The null-terminated, null-padded (see 4.4.2) SCSI
433 * NAME STRING field contains a UTF-8 format string. 433 * NAME STRING field contains a UTF-8 format string.
434 * The number of bytes in the SCSI NAME STRING field 434 * The number of bytes in the SCSI NAME STRING field
435 * (i.e., the value in the DESIGNATOR LENGTH field) 435 * (i.e., the value in the DESIGNATOR LENGTH field)
436 * shall be no larger than 256 and shall be a multiple 436 * shall be no larger than 256 and shall be a multiple
437 * of four. 437 * of four.
438 */ 438 */
439 padding = ((-scsi_target_len) & 3); 439 padding = ((-scsi_target_len) & 3);
440 if (padding) 440 if (padding)
441 scsi_target_len += padding; 441 scsi_target_len += padding;
442 if (scsi_target_len > 256) 442 if (scsi_target_len > 256)
443 scsi_target_len = 256; 443 scsi_target_len = 256;
444 444
445 buf[off-1] = scsi_target_len; 445 buf[off-1] = scsi_target_len;
446 off += scsi_target_len; 446 off += scsi_target_len;
447 447
448 /* Header size + Designation descriptor */ 448 /* Header size + Designation descriptor */
449 len += (scsi_target_len + 4); 449 len += (scsi_target_len + 4);
450 } 450 }
451 buf[2] = ((len >> 8) & 0xff); 451 buf[2] = ((len >> 8) & 0xff);
452 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */ 452 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
453 return 0; 453 return 0;
454 } 454 }
455 EXPORT_SYMBOL(spc_emulate_evpd_83); 455 EXPORT_SYMBOL(spc_emulate_evpd_83);
456 456
457 static bool 457 static bool
458 spc_check_dev_wce(struct se_device *dev) 458 spc_check_dev_wce(struct se_device *dev)
459 { 459 {
460 bool wce = false; 460 bool wce = false;
461 461
462 if (dev->transport->get_write_cache) 462 if (dev->transport->get_write_cache)
463 wce = dev->transport->get_write_cache(dev); 463 wce = dev->transport->get_write_cache(dev);
464 else if (dev->dev_attrib.emulate_write_cache > 0) 464 else if (dev->dev_attrib.emulate_write_cache > 0)
465 wce = true; 465 wce = true;
466 466
467 return wce; 467 return wce;
468 } 468 }
469 469
470 /* Extended INQUIRY Data VPD Page */ 470 /* Extended INQUIRY Data VPD Page */
471 static sense_reason_t 471 static sense_reason_t
472 spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) 472 spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
473 { 473 {
474 struct se_device *dev = cmd->se_dev; 474 struct se_device *dev = cmd->se_dev;
475 struct se_session *sess = cmd->se_sess; 475 struct se_session *sess = cmd->se_sess;
476 476
477 buf[3] = 0x3c; 477 buf[3] = 0x3c;
478 /* 478 /*
479 * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK 479 * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK
480 * only for TYPE3 protection. 480 * only for TYPE3 protection.
481 */ 481 */
482 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 482 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
483 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) 483 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
484 buf[4] = 0x5; 484 buf[4] = 0x5;
485 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT) 485 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT)
486 buf[4] = 0x4; 486 buf[4] = 0x4;
487 } 487 }
488 488
489 /* Set HEADSUP, ORDSUP, SIMPSUP */ 489 /* Set HEADSUP, ORDSUP, SIMPSUP */
490 buf[5] = 0x07; 490 buf[5] = 0x07;
491 491
492 /* If WriteCache emulation is enabled, set V_SUP */ 492 /* If WriteCache emulation is enabled, set V_SUP */
493 if (spc_check_dev_wce(dev)) 493 if (spc_check_dev_wce(dev))
494 buf[6] = 0x01; 494 buf[6] = 0x01;
495 /* If an LBA map is present set R_SUP */ 495 /* If an LBA map is present set R_SUP */
496 spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); 496 spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
497 if (!list_empty(&dev->t10_alua.lba_map_list)) 497 if (!list_empty(&dev->t10_alua.lba_map_list))
498 buf[8] = 0x10; 498 buf[8] = 0x10;
499 spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock); 499 spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock);
500 return 0; 500 return 0;
501 } 501 }
502 502
503 /* Block Limits VPD page */ 503 /* Block Limits VPD page */
504 static sense_reason_t 504 static sense_reason_t
505 spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) 505 spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
506 { 506 {
507 struct se_device *dev = cmd->se_dev; 507 struct se_device *dev = cmd->se_dev;
508 u32 max_sectors;
509 int have_tp = 0; 508 int have_tp = 0;
510 int opt, min; 509 int opt, min;
511 510
512 /* 511 /*
513 * Following spc3r22 section 6.5.3 Block Limits VPD page, when 512 * Following spc3r22 section 6.5.3 Block Limits VPD page, when
514 * emulate_tpu=1 or emulate_tpws=1 we will be expect a 513 * emulate_tpu=1 or emulate_tpws=1 we will be expect a
515 * different page length for Thin Provisioning. 514 * different page length for Thin Provisioning.
516 */ 515 */
517 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 516 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
518 have_tp = 1; 517 have_tp = 1;
519 518
520 buf[0] = dev->transport->get_device_type(dev); 519 buf[0] = dev->transport->get_device_type(dev);
521 buf[3] = have_tp ? 0x3c : 0x10; 520 buf[3] = have_tp ? 0x3c : 0x10;
522 521
523 /* Set WSNZ to 1 */ 522 /* Set WSNZ to 1 */
524 buf[4] = 0x01; 523 buf[4] = 0x01;
525 /* 524 /*
526 * Set MAXIMUM COMPARE AND WRITE LENGTH 525 * Set MAXIMUM COMPARE AND WRITE LENGTH
527 */ 526 */
528 if (dev->dev_attrib.emulate_caw) 527 if (dev->dev_attrib.emulate_caw)
529 buf[5] = 0x01; 528 buf[5] = 0x01;
530 529
531 /* 530 /*
532 * Set OPTIMAL TRANSFER LENGTH GRANULARITY 531 * Set OPTIMAL TRANSFER LENGTH GRANULARITY
533 */ 532 */
534 if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev))) 533 if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev)))
535 put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]); 534 put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]);
536 else 535 else
537 put_unaligned_be16(1, &buf[6]); 536 put_unaligned_be16(1, &buf[6]);
538 537
539 /* 538 /*
540 * Set MAXIMUM TRANSFER LENGTH 539 * Set MAXIMUM TRANSFER LENGTH
541 */ 540 */
542 max_sectors = min(dev->dev_attrib.fabric_max_sectors, 541 put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]);
543 dev->dev_attrib.hw_max_sectors);
544 put_unaligned_be32(max_sectors, &buf[8]);
545 542
546 /* 543 /*
547 * Set OPTIMAL TRANSFER LENGTH 544 * Set OPTIMAL TRANSFER LENGTH
548 */ 545 */
549 if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev))) 546 if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev)))
550 put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]); 547 put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]);
551 else 548 else
552 put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); 549 put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
553 550
554 /* 551 /*
555 * Exit now if we don't support TP. 552 * Exit now if we don't support TP.
556 */ 553 */
557 if (!have_tp) 554 if (!have_tp)
558 goto max_write_same; 555 goto max_write_same;
559 556
560 /* 557 /*
561 * Set MAXIMUM UNMAP LBA COUNT 558 * Set MAXIMUM UNMAP LBA COUNT
562 */ 559 */
563 put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]); 560 put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]);
564 561
565 /* 562 /*
566 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT 563 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
567 */ 564 */
568 put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count, 565 put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count,
569 &buf[24]); 566 &buf[24]);
570 567
571 /* 568 /*
572 * Set OPTIMAL UNMAP GRANULARITY 569 * Set OPTIMAL UNMAP GRANULARITY
573 */ 570 */
574 put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]); 571 put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]);
575 572
576 /* 573 /*
577 * UNMAP GRANULARITY ALIGNMENT 574 * UNMAP GRANULARITY ALIGNMENT
578 */ 575 */
579 put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment, 576 put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment,
580 &buf[32]); 577 &buf[32]);
581 if (dev->dev_attrib.unmap_granularity_alignment != 0) 578 if (dev->dev_attrib.unmap_granularity_alignment != 0)
582 buf[32] |= 0x80; /* Set the UGAVALID bit */ 579 buf[32] |= 0x80; /* Set the UGAVALID bit */
583 580
584 /* 581 /*
585 * MAXIMUM WRITE SAME LENGTH 582 * MAXIMUM WRITE SAME LENGTH
586 */ 583 */
587 max_write_same: 584 max_write_same:
588 put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]); 585 put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]);
589 586
590 return 0; 587 return 0;
591 } 588 }
592 589
593 /* Block Device Characteristics VPD page */ 590 /* Block Device Characteristics VPD page */
594 static sense_reason_t 591 static sense_reason_t
595 spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf) 592 spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
596 { 593 {
597 struct se_device *dev = cmd->se_dev; 594 struct se_device *dev = cmd->se_dev;
598 595
599 buf[0] = dev->transport->get_device_type(dev); 596 buf[0] = dev->transport->get_device_type(dev);
600 buf[3] = 0x3c; 597 buf[3] = 0x3c;
601 buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0; 598 buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0;
602 599
603 return 0; 600 return 0;
604 } 601 }
605 602
606 /* Thin Provisioning VPD */ 603 /* Thin Provisioning VPD */
607 static sense_reason_t 604 static sense_reason_t
608 spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) 605 spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
609 { 606 {
610 struct se_device *dev = cmd->se_dev; 607 struct se_device *dev = cmd->se_dev;
611 608
612 /* 609 /*
613 * From spc3r22 section 6.5.4 Thin Provisioning VPD page: 610 * From spc3r22 section 6.5.4 Thin Provisioning VPD page:
614 * 611 *
615 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to 612 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
616 * zero, then the page length shall be set to 0004h. If the DP bit 613 * zero, then the page length shall be set to 0004h. If the DP bit
617 * is set to one, then the page length shall be set to the value 614 * is set to one, then the page length shall be set to the value
618 * defined in table 162. 615 * defined in table 162.
619 */ 616 */
620 buf[0] = dev->transport->get_device_type(dev); 617 buf[0] = dev->transport->get_device_type(dev);
621 618
622 /* 619 /*
623 * Set Hardcoded length mentioned above for DP=0 620 * Set Hardcoded length mentioned above for DP=0
624 */ 621 */
625 put_unaligned_be16(0x0004, &buf[2]); 622 put_unaligned_be16(0x0004, &buf[2]);
626 623
627 /* 624 /*
628 * The THRESHOLD EXPONENT field indicates the threshold set size in 625 * The THRESHOLD EXPONENT field indicates the threshold set size in
629 * LBAs as a power of 2 (i.e., the threshold set size is equal to 626 * LBAs as a power of 2 (i.e., the threshold set size is equal to
630 * 2(threshold exponent)). 627 * 2(threshold exponent)).
631 * 628 *
632 * Note that this is currently set to 0x00 as mkp says it will be 629 * Note that this is currently set to 0x00 as mkp says it will be
633 * changing again. We can enable this once it has settled in T10 630 * changing again. We can enable this once it has settled in T10
634 * and is actually used by Linux/SCSI ML code. 631 * and is actually used by Linux/SCSI ML code.
635 */ 632 */
636 buf[4] = 0x00; 633 buf[4] = 0x00;
637 634
638 /* 635 /*
639 * A TPU bit set to one indicates that the device server supports 636 * A TPU bit set to one indicates that the device server supports
640 * the UNMAP command (see 5.25). A TPU bit set to zero indicates 637 * the UNMAP command (see 5.25). A TPU bit set to zero indicates
641 * that the device server does not support the UNMAP command. 638 * that the device server does not support the UNMAP command.
642 */ 639 */
643 if (dev->dev_attrib.emulate_tpu != 0) 640 if (dev->dev_attrib.emulate_tpu != 0)
644 buf[5] = 0x80; 641 buf[5] = 0x80;
645 642
646 /* 643 /*
647 * A TPWS bit set to one indicates that the device server supports 644 * A TPWS bit set to one indicates that the device server supports
648 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs. 645 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
649 * A TPWS bit set to zero indicates that the device server does not 646 * A TPWS bit set to zero indicates that the device server does not
650 * support the use of the WRITE SAME (16) command to unmap LBAs. 647 * support the use of the WRITE SAME (16) command to unmap LBAs.
651 */ 648 */
652 if (dev->dev_attrib.emulate_tpws != 0) 649 if (dev->dev_attrib.emulate_tpws != 0)
653 buf[5] |= 0x40; 650 buf[5] |= 0x40;
654 651
655 return 0; 652 return 0;
656 } 653 }
657 654
658 /* Referrals VPD page */ 655 /* Referrals VPD page */
659 static sense_reason_t 656 static sense_reason_t
660 spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf) 657 spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf)
661 { 658 {
662 struct se_device *dev = cmd->se_dev; 659 struct se_device *dev = cmd->se_dev;
663 660
664 buf[0] = dev->transport->get_device_type(dev); 661 buf[0] = dev->transport->get_device_type(dev);
665 buf[3] = 0x0c; 662 buf[3] = 0x0c;
666 put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]); 663 put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]);
667 put_unaligned_be32(dev->t10_alua.lba_map_segment_multiplier, &buf[12]); 664 put_unaligned_be32(dev->t10_alua.lba_map_segment_multiplier, &buf[12]);
668 665
669 return 0; 666 return 0;
670 } 667 }
671 668
672 static sense_reason_t 669 static sense_reason_t
673 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); 670 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
674 671
675 static struct { 672 static struct {
676 uint8_t page; 673 uint8_t page;
677 sense_reason_t (*emulate)(struct se_cmd *, unsigned char *); 674 sense_reason_t (*emulate)(struct se_cmd *, unsigned char *);
678 } evpd_handlers[] = { 675 } evpd_handlers[] = {
679 { .page = 0x00, .emulate = spc_emulate_evpd_00 }, 676 { .page = 0x00, .emulate = spc_emulate_evpd_00 },
680 { .page = 0x80, .emulate = spc_emulate_evpd_80 }, 677 { .page = 0x80, .emulate = spc_emulate_evpd_80 },
681 { .page = 0x83, .emulate = spc_emulate_evpd_83 }, 678 { .page = 0x83, .emulate = spc_emulate_evpd_83 },
682 { .page = 0x86, .emulate = spc_emulate_evpd_86 }, 679 { .page = 0x86, .emulate = spc_emulate_evpd_86 },
683 { .page = 0xb0, .emulate = spc_emulate_evpd_b0 }, 680 { .page = 0xb0, .emulate = spc_emulate_evpd_b0 },
684 { .page = 0xb1, .emulate = spc_emulate_evpd_b1 }, 681 { .page = 0xb1, .emulate = spc_emulate_evpd_b1 },
685 { .page = 0xb2, .emulate = spc_emulate_evpd_b2 }, 682 { .page = 0xb2, .emulate = spc_emulate_evpd_b2 },
686 { .page = 0xb3, .emulate = spc_emulate_evpd_b3 }, 683 { .page = 0xb3, .emulate = spc_emulate_evpd_b3 },
687 }; 684 };
688 685
689 /* supported vital product data pages */ 686 /* supported vital product data pages */
690 static sense_reason_t 687 static sense_reason_t
691 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) 688 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
692 { 689 {
693 int p; 690 int p;
694 691
695 /* 692 /*
696 * Only report the INQUIRY EVPD=1 pages after a valid NAA 693 * Only report the INQUIRY EVPD=1 pages after a valid NAA
697 * Registered Extended LUN WWN has been set via ConfigFS 694 * Registered Extended LUN WWN has been set via ConfigFS
698 * during device creation/restart. 695 * during device creation/restart.
699 */ 696 */
700 if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 697 if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
701 buf[3] = ARRAY_SIZE(evpd_handlers); 698 buf[3] = ARRAY_SIZE(evpd_handlers);
702 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) 699 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
703 buf[p + 4] = evpd_handlers[p].page; 700 buf[p + 4] = evpd_handlers[p].page;
704 } 701 }
705 702
706 return 0; 703 return 0;
707 } 704 }
708 705
709 static sense_reason_t 706 static sense_reason_t
710 spc_emulate_inquiry(struct se_cmd *cmd) 707 spc_emulate_inquiry(struct se_cmd *cmd)
711 { 708 {
712 struct se_device *dev = cmd->se_dev; 709 struct se_device *dev = cmd->se_dev;
713 struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg; 710 struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
714 unsigned char *rbuf; 711 unsigned char *rbuf;
715 unsigned char *cdb = cmd->t_task_cdb; 712 unsigned char *cdb = cmd->t_task_cdb;
716 unsigned char *buf; 713 unsigned char *buf;
717 sense_reason_t ret; 714 sense_reason_t ret;
718 int p; 715 int p;
719 int len = 0; 716 int len = 0;
720 717
721 buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL); 718 buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
722 if (!buf) { 719 if (!buf) {
723 pr_err("Unable to allocate response buffer for INQUIRY\n"); 720 pr_err("Unable to allocate response buffer for INQUIRY\n");
724 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 721 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
725 } 722 }
726 723
727 if (dev == tpg->tpg_virt_lun0.lun_se_dev) 724 if (dev == tpg->tpg_virt_lun0.lun_se_dev)
728 buf[0] = 0x3f; /* Not connected */ 725 buf[0] = 0x3f; /* Not connected */
729 else 726 else
730 buf[0] = dev->transport->get_device_type(dev); 727 buf[0] = dev->transport->get_device_type(dev);
731 728
732 if (!(cdb[1] & 0x1)) { 729 if (!(cdb[1] & 0x1)) {
733 if (cdb[2]) { 730 if (cdb[2]) {
734 pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n", 731 pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n",
735 cdb[2]); 732 cdb[2]);
736 ret = TCM_INVALID_CDB_FIELD; 733 ret = TCM_INVALID_CDB_FIELD;
737 goto out; 734 goto out;
738 } 735 }
739 736
740 ret = spc_emulate_inquiry_std(cmd, buf); 737 ret = spc_emulate_inquiry_std(cmd, buf);
741 len = buf[4] + 5; 738 len = buf[4] + 5;
742 goto out; 739 goto out;
743 } 740 }
744 741
745 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) { 742 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) {
746 if (cdb[2] == evpd_handlers[p].page) { 743 if (cdb[2] == evpd_handlers[p].page) {
747 buf[1] = cdb[2]; 744 buf[1] = cdb[2];
748 ret = evpd_handlers[p].emulate(cmd, buf); 745 ret = evpd_handlers[p].emulate(cmd, buf);
749 len = get_unaligned_be16(&buf[2]) + 4; 746 len = get_unaligned_be16(&buf[2]) + 4;
750 goto out; 747 goto out;
751 } 748 }
752 } 749 }
753 750
754 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]); 751 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
755 ret = TCM_INVALID_CDB_FIELD; 752 ret = TCM_INVALID_CDB_FIELD;
756 753
757 out: 754 out:
758 rbuf = transport_kmap_data_sg(cmd); 755 rbuf = transport_kmap_data_sg(cmd);
759 if (rbuf) { 756 if (rbuf) {
760 memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length)); 757 memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length));
761 transport_kunmap_data_sg(cmd); 758 transport_kunmap_data_sg(cmd);
762 } 759 }
763 kfree(buf); 760 kfree(buf);
764 761
765 if (!ret) 762 if (!ret)
766 target_complete_cmd_with_length(cmd, GOOD, len); 763 target_complete_cmd_with_length(cmd, GOOD, len);
767 return ret; 764 return ret;
768 } 765 }
769 766
770 static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p) 767 static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p)
771 { 768 {
772 p[0] = 0x01; 769 p[0] = 0x01;
773 p[1] = 0x0a; 770 p[1] = 0x0a;
774 771
775 /* No changeable values for now */ 772 /* No changeable values for now */
776 if (pc == 1) 773 if (pc == 1)
777 goto out; 774 goto out;
778 775
779 out: 776 out:
780 return 12; 777 return 12;
781 } 778 }
782 779
783 static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p) 780 static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p)
784 { 781 {
785 struct se_device *dev = cmd->se_dev; 782 struct se_device *dev = cmd->se_dev;
786 struct se_session *sess = cmd->se_sess; 783 struct se_session *sess = cmd->se_sess;
787 784
788 p[0] = 0x0a; 785 p[0] = 0x0a;
789 p[1] = 0x0a; 786 p[1] = 0x0a;
790 787
791 /* No changeable values for now */ 788 /* No changeable values for now */
792 if (pc == 1) 789 if (pc == 1)
793 goto out; 790 goto out;
794 791
795 p[2] = 2; 792 p[2] = 2;
796 /* 793 /*
797 * From spc4r23, 7.4.7 Control mode page 794 * From spc4r23, 7.4.7 Control mode page
798 * 795 *
799 * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies 796 * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies
800 * restrictions on the algorithm used for reordering commands 797 * restrictions on the algorithm used for reordering commands
801 * having the SIMPLE task attribute (see SAM-4). 798 * having the SIMPLE task attribute (see SAM-4).
802 * 799 *
803 * Table 368 -- QUEUE ALGORITHM MODIFIER field 800 * Table 368 -- QUEUE ALGORITHM MODIFIER field
804 * Code Description 801 * Code Description
805 * 0h Restricted reordering 802 * 0h Restricted reordering
806 * 1h Unrestricted reordering allowed 803 * 1h Unrestricted reordering allowed
807 * 2h to 7h Reserved 804 * 2h to 7h Reserved
808 * 8h to Fh Vendor specific 805 * 8h to Fh Vendor specific
809 * 806 *
810 * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that 807 * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that
811 * the device server shall order the processing sequence of commands 808 * the device server shall order the processing sequence of commands
812 * having the SIMPLE task attribute such that data integrity is maintained 809 * having the SIMPLE task attribute such that data integrity is maintained
813 * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol 810 * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol
814 * requests is halted at any time, the final value of all data observable 811 * requests is halted at any time, the final value of all data observable
815 * on the medium shall be the same as if all the commands had been processed 812 * on the medium shall be the same as if all the commands had been processed
816 * with the ORDERED task attribute). 813 * with the ORDERED task attribute).
817 * 814 *
818 * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the 815 * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the
819 * device server may reorder the processing sequence of commands having the 816 * device server may reorder the processing sequence of commands having the
820 * SIMPLE task attribute in any manner. Any data integrity exposures related to 817 * SIMPLE task attribute in any manner. Any data integrity exposures related to
821 * command sequence order shall be explicitly handled by the application client 818 * command sequence order shall be explicitly handled by the application client
822 * through the selection of appropriate ommands and task attributes. 819 * through the selection of appropriate ommands and task attributes.
823 */ 820 */
824 p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10; 821 p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
825 /* 822 /*
826 * From spc4r17, section 7.4.6 Control mode Page 823 * From spc4r17, section 7.4.6 Control mode Page
827 * 824 *
828 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b 825 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
829 * 826 *
830 * 00b: The logical unit shall clear any unit attention condition 827 * 00b: The logical unit shall clear any unit attention condition
831 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 828 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
832 * status and shall not establish a unit attention condition when a com- 829 * status and shall not establish a unit attention condition when a com-
833 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT 830 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
834 * status. 831 * status.
835 * 832 *
836 * 10b: The logical unit shall not clear any unit attention condition 833 * 10b: The logical unit shall not clear any unit attention condition
837 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 834 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
838 * status and shall not establish a unit attention condition when 835 * status and shall not establish a unit attention condition when
839 * a command is completed with BUSY, TASK SET FULL, or RESERVATION 836 * a command is completed with BUSY, TASK SET FULL, or RESERVATION
840 * CONFLICT status. 837 * CONFLICT status.
841 * 838 *
842 * 11b a The logical unit shall not clear any unit attention condition 839 * 11b a The logical unit shall not clear any unit attention condition
843 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 840 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
844 * status and shall establish a unit attention condition for the 841 * status and shall establish a unit attention condition for the
845 * initiator port associated with the I_T nexus on which the BUSY, 842 * initiator port associated with the I_T nexus on which the BUSY,
846 * TASK SET FULL, or RESERVATION CONFLICT status is being returned. 843 * TASK SET FULL, or RESERVATION CONFLICT status is being returned.
847 * Depending on the status, the additional sense code shall be set to 844 * Depending on the status, the additional sense code shall be set to
848 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS 845 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
849 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE 846 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
850 * command, a unit attention condition shall be established only once 847 * command, a unit attention condition shall be established only once
851 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless 848 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
852 * to the number of commands completed with one of those status codes. 849 * to the number of commands completed with one of those status codes.
853 */ 850 */
854 p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 : 851 p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
855 (dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00; 852 (dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
856 /* 853 /*
857 * From spc4r17, section 7.4.6 Control mode Page 854 * From spc4r17, section 7.4.6 Control mode Page
858 * 855 *
859 * Task Aborted Status (TAS) bit set to zero. 856 * Task Aborted Status (TAS) bit set to zero.
860 * 857 *
861 * A task aborted status (TAS) bit set to zero specifies that aborted 858 * A task aborted status (TAS) bit set to zero specifies that aborted
862 * tasks shall be terminated by the device server without any response 859 * tasks shall be terminated by the device server without any response
863 * to the application client. A TAS bit set to one specifies that tasks 860 * to the application client. A TAS bit set to one specifies that tasks
864 * aborted by the actions of an I_T nexus other than the I_T nexus on 861 * aborted by the actions of an I_T nexus other than the I_T nexus on
865 * which the command was received shall be completed with TASK ABORTED 862 * which the command was received shall be completed with TASK ABORTED
866 * status (see SAM-4). 863 * status (see SAM-4).
867 */ 864 */
868 p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00; 865 p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
869 /* 866 /*
870 * From spc4r30, section 7.5.7 Control mode page 867 * From spc4r30, section 7.5.7 Control mode page
871 * 868 *
872 * Application Tag Owner (ATO) bit set to one. 869 * Application Tag Owner (ATO) bit set to one.
873 * 870 *
874 * If the ATO bit is set to one the device server shall not modify the 871 * If the ATO bit is set to one the device server shall not modify the
875 * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection 872 * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection
876 * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE 873 * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE
877 * TAG field. 874 * TAG field.
878 */ 875 */
879 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 876 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
880 if (dev->dev_attrib.pi_prot_type) 877 if (dev->dev_attrib.pi_prot_type)
881 p[5] |= 0x80; 878 p[5] |= 0x80;
882 } 879 }
883 880
884 p[8] = 0xff; 881 p[8] = 0xff;
885 p[9] = 0xff; 882 p[9] = 0xff;
886 p[11] = 30; 883 p[11] = 30;
887 884
888 out: 885 out:
889 return 12; 886 return 12;
890 } 887 }
891 888
892 static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p) 889 static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)
893 { 890 {
894 struct se_device *dev = cmd->se_dev; 891 struct se_device *dev = cmd->se_dev;
895 892
896 p[0] = 0x08; 893 p[0] = 0x08;
897 p[1] = 0x12; 894 p[1] = 0x12;
898 895
899 /* No changeable values for now */ 896 /* No changeable values for now */
900 if (pc == 1) 897 if (pc == 1)
901 goto out; 898 goto out;
902 899
903 if (spc_check_dev_wce(dev)) 900 if (spc_check_dev_wce(dev))
904 p[2] = 0x04; /* Write Cache Enable */ 901 p[2] = 0x04; /* Write Cache Enable */
905 p[12] = 0x20; /* Disabled Read Ahead */ 902 p[12] = 0x20; /* Disabled Read Ahead */
906 903
907 out: 904 out:
908 return 20; 905 return 20;
909 } 906 }
910 907
911 static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p) 908 static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p)
912 { 909 {
913 p[0] = 0x1c; 910 p[0] = 0x1c;
914 p[1] = 0x0a; 911 p[1] = 0x0a;
915 912
916 /* No changeable values for now */ 913 /* No changeable values for now */
917 if (pc == 1) 914 if (pc == 1)
918 goto out; 915 goto out;
919 916
920 out: 917 out:
921 return 12; 918 return 12;
922 } 919 }
923 920
924 static struct { 921 static struct {
925 uint8_t page; 922 uint8_t page;
926 uint8_t subpage; 923 uint8_t subpage;
927 int (*emulate)(struct se_cmd *, u8, unsigned char *); 924 int (*emulate)(struct se_cmd *, u8, unsigned char *);
928 } modesense_handlers[] = { 925 } modesense_handlers[] = {
929 { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery }, 926 { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },
930 { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching }, 927 { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching },
931 { .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control }, 928 { .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control },
932 { .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions }, 929 { .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions },
933 }; 930 };
934 931
935 static void spc_modesense_write_protect(unsigned char *buf, int type) 932 static void spc_modesense_write_protect(unsigned char *buf, int type)
936 { 933 {
937 /* 934 /*
938 * I believe that the WP bit (bit 7) in the mode header is the same for 935 * I believe that the WP bit (bit 7) in the mode header is the same for
939 * all device types.. 936 * all device types..
940 */ 937 */
941 switch (type) { 938 switch (type) {
942 case TYPE_DISK: 939 case TYPE_DISK:
943 case TYPE_TAPE: 940 case TYPE_TAPE:
944 default: 941 default:
945 buf[0] |= 0x80; /* WP bit */ 942 buf[0] |= 0x80; /* WP bit */
946 break; 943 break;
947 } 944 }
948 } 945 }
949 946
950 static void spc_modesense_dpofua(unsigned char *buf, int type) 947 static void spc_modesense_dpofua(unsigned char *buf, int type)
951 { 948 {
952 switch (type) { 949 switch (type) {
953 case TYPE_DISK: 950 case TYPE_DISK:
954 buf[0] |= 0x10; /* DPOFUA bit */ 951 buf[0] |= 0x10; /* DPOFUA bit */
955 break; 952 break;
956 default: 953 default:
957 break; 954 break;
958 } 955 }
959 } 956 }
960 957
961 static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size) 958 static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
962 { 959 {
963 *buf++ = 8; 960 *buf++ = 8;
964 put_unaligned_be32(min(blocks, 0xffffffffull), buf); 961 put_unaligned_be32(min(blocks, 0xffffffffull), buf);
965 buf += 4; 962 buf += 4;
966 put_unaligned_be32(block_size, buf); 963 put_unaligned_be32(block_size, buf);
967 return 9; 964 return 9;
968 } 965 }
969 966
970 static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size) 967 static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
971 { 968 {
972 if (blocks <= 0xffffffff) 969 if (blocks <= 0xffffffff)
973 return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3; 970 return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3;
974 971
975 *buf++ = 1; /* LONGLBA */ 972 *buf++ = 1; /* LONGLBA */
976 buf += 2; 973 buf += 2;
977 *buf++ = 16; 974 *buf++ = 16;
978 put_unaligned_be64(blocks, buf); 975 put_unaligned_be64(blocks, buf);
979 buf += 12; 976 buf += 12;
980 put_unaligned_be32(block_size, buf); 977 put_unaligned_be32(block_size, buf);
981 978
982 return 17; 979 return 17;
983 } 980 }
984 981
985 static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) 982 static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
986 { 983 {
987 struct se_device *dev = cmd->se_dev; 984 struct se_device *dev = cmd->se_dev;
988 char *cdb = cmd->t_task_cdb; 985 char *cdb = cmd->t_task_cdb;
989 unsigned char buf[SE_MODE_PAGE_BUF], *rbuf; 986 unsigned char buf[SE_MODE_PAGE_BUF], *rbuf;
990 int type = dev->transport->get_device_type(dev); 987 int type = dev->transport->get_device_type(dev);
991 int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10); 988 int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
992 bool dbd = !!(cdb[1] & 0x08); 989 bool dbd = !!(cdb[1] & 0x08);
993 bool llba = ten ? !!(cdb[1] & 0x10) : false; 990 bool llba = ten ? !!(cdb[1] & 0x10) : false;
994 u8 pc = cdb[2] >> 6; 991 u8 pc = cdb[2] >> 6;
995 u8 page = cdb[2] & 0x3f; 992 u8 page = cdb[2] & 0x3f;
996 u8 subpage = cdb[3]; 993 u8 subpage = cdb[3];
997 int length = 0; 994 int length = 0;
998 int ret; 995 int ret;
999 int i; 996 int i;
1000 997
1001 memset(buf, 0, SE_MODE_PAGE_BUF); 998 memset(buf, 0, SE_MODE_PAGE_BUF);
1002 999
1003 /* 1000 /*
1004 * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for 1001 * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for
1005 * MODE_SENSE_10 and byte 2 for MODE_SENSE (6). 1002 * MODE_SENSE_10 and byte 2 for MODE_SENSE (6).
1006 */ 1003 */
1007 length = ten ? 3 : 2; 1004 length = ten ? 3 : 2;
1008 1005
1009 /* DEVICE-SPECIFIC PARAMETER */ 1006 /* DEVICE-SPECIFIC PARAMETER */
1010 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || 1007 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
1011 (cmd->se_deve && 1008 (cmd->se_deve &&
1012 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) 1009 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
1013 spc_modesense_write_protect(&buf[length], type); 1010 spc_modesense_write_protect(&buf[length], type);
1014 1011
1015 if ((spc_check_dev_wce(dev)) && 1012 if ((spc_check_dev_wce(dev)) &&
1016 (dev->dev_attrib.emulate_fua_write > 0)) 1013 (dev->dev_attrib.emulate_fua_write > 0))
1017 spc_modesense_dpofua(&buf[length], type); 1014 spc_modesense_dpofua(&buf[length], type);
1018 1015
1019 ++length; 1016 ++length;
1020 1017
1021 /* BLOCK DESCRIPTOR */ 1018 /* BLOCK DESCRIPTOR */
1022 1019
1023 /* 1020 /*
1024 * For now we only include a block descriptor for disk (SBC) 1021 * For now we only include a block descriptor for disk (SBC)
1025 * devices; other command sets use a slightly different format. 1022 * devices; other command sets use a slightly different format.
1026 */ 1023 */
1027 if (!dbd && type == TYPE_DISK) { 1024 if (!dbd && type == TYPE_DISK) {
1028 u64 blocks = dev->transport->get_blocks(dev); 1025 u64 blocks = dev->transport->get_blocks(dev);
1029 u32 block_size = dev->dev_attrib.block_size; 1026 u32 block_size = dev->dev_attrib.block_size;
1030 1027
1031 if (ten) { 1028 if (ten) {
1032 if (llba) { 1029 if (llba) {
1033 length += spc_modesense_long_blockdesc(&buf[length], 1030 length += spc_modesense_long_blockdesc(&buf[length],
1034 blocks, block_size); 1031 blocks, block_size);
1035 } else { 1032 } else {
1036 length += 3; 1033 length += 3;
1037 length += spc_modesense_blockdesc(&buf[length], 1034 length += spc_modesense_blockdesc(&buf[length],
1038 blocks, block_size); 1035 blocks, block_size);
1039 } 1036 }
1040 } else { 1037 } else {
1041 length += spc_modesense_blockdesc(&buf[length], blocks, 1038 length += spc_modesense_blockdesc(&buf[length], blocks,
1042 block_size); 1039 block_size);
1043 } 1040 }
1044 } else { 1041 } else {
1045 if (ten) 1042 if (ten)
1046 length += 4; 1043 length += 4;
1047 else 1044 else
1048 length += 1; 1045 length += 1;
1049 } 1046 }
1050 1047
1051 if (page == 0x3f) { 1048 if (page == 0x3f) {
1052 if (subpage != 0x00 && subpage != 0xff) { 1049 if (subpage != 0x00 && subpage != 0xff) {
1053 pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage); 1050 pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage);
1054 return TCM_INVALID_CDB_FIELD; 1051 return TCM_INVALID_CDB_FIELD;
1055 } 1052 }
1056 1053
1057 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) { 1054 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) {
1058 /* 1055 /*
1059 * Tricky way to say all subpage 00h for 1056 * Tricky way to say all subpage 00h for
1060 * subpage==0, all subpages for subpage==0xff 1057 * subpage==0, all subpages for subpage==0xff
1061 * (and we just checked above that those are 1058 * (and we just checked above that those are
1062 * the only two possibilities). 1059 * the only two possibilities).
1063 */ 1060 */
1064 if ((modesense_handlers[i].subpage & ~subpage) == 0) { 1061 if ((modesense_handlers[i].subpage & ~subpage) == 0) {
1065 ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]); 1062 ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]);
1066 if (!ten && length + ret >= 255) 1063 if (!ten && length + ret >= 255)
1067 break; 1064 break;
1068 length += ret; 1065 length += ret;
1069 } 1066 }
1070 } 1067 }
1071 1068
1072 goto set_length; 1069 goto set_length;
1073 } 1070 }
1074 1071
1075 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) 1072 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
1076 if (modesense_handlers[i].page == page && 1073 if (modesense_handlers[i].page == page &&
1077 modesense_handlers[i].subpage == subpage) { 1074 modesense_handlers[i].subpage == subpage) {
1078 length += modesense_handlers[i].emulate(cmd, pc, &buf[length]); 1075 length += modesense_handlers[i].emulate(cmd, pc, &buf[length]);
1079 goto set_length; 1076 goto set_length;
1080 } 1077 }
1081 1078
1082 /* 1079 /*
1083 * We don't intend to implement: 1080 * We don't intend to implement:
1084 * - obsolete page 03h "format parameters" (checked by Solaris) 1081 * - obsolete page 03h "format parameters" (checked by Solaris)
1085 */ 1082 */
1086 if (page != 0x03) 1083 if (page != 0x03)
1087 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", 1084 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
1088 page, subpage); 1085 page, subpage);
1089 1086
1090 return TCM_UNKNOWN_MODE_PAGE; 1087 return TCM_UNKNOWN_MODE_PAGE;
1091 1088
1092 set_length: 1089 set_length:
1093 if (ten) 1090 if (ten)
1094 put_unaligned_be16(length - 2, buf); 1091 put_unaligned_be16(length - 2, buf);
1095 else 1092 else
1096 buf[0] = length - 1; 1093 buf[0] = length - 1;
1097 1094
1098 rbuf = transport_kmap_data_sg(cmd); 1095 rbuf = transport_kmap_data_sg(cmd);
1099 if (rbuf) { 1096 if (rbuf) {
1100 memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length)); 1097 memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length));
1101 transport_kunmap_data_sg(cmd); 1098 transport_kunmap_data_sg(cmd);
1102 } 1099 }
1103 1100
1104 target_complete_cmd_with_length(cmd, GOOD, length); 1101 target_complete_cmd_with_length(cmd, GOOD, length);
1105 return 0; 1102 return 0;
1106 } 1103 }
1107 1104
1108 static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd) 1105 static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
1109 { 1106 {
1110 char *cdb = cmd->t_task_cdb; 1107 char *cdb = cmd->t_task_cdb;
1111 bool ten = cdb[0] == MODE_SELECT_10; 1108 bool ten = cdb[0] == MODE_SELECT_10;
1112 int off = ten ? 8 : 4; 1109 int off = ten ? 8 : 4;
1113 bool pf = !!(cdb[1] & 0x10); 1110 bool pf = !!(cdb[1] & 0x10);
1114 u8 page, subpage; 1111 u8 page, subpage;
1115 unsigned char *buf; 1112 unsigned char *buf;
1116 unsigned char tbuf[SE_MODE_PAGE_BUF]; 1113 unsigned char tbuf[SE_MODE_PAGE_BUF];
1117 int length; 1114 int length;
1118 int ret = 0; 1115 int ret = 0;
1119 int i; 1116 int i;
1120 1117
1121 if (!cmd->data_length) { 1118 if (!cmd->data_length) {
1122 target_complete_cmd(cmd, GOOD); 1119 target_complete_cmd(cmd, GOOD);
1123 return 0; 1120 return 0;
1124 } 1121 }
1125 1122
1126 if (cmd->data_length < off + 2) 1123 if (cmd->data_length < off + 2)
1127 return TCM_PARAMETER_LIST_LENGTH_ERROR; 1124 return TCM_PARAMETER_LIST_LENGTH_ERROR;
1128 1125
1129 buf = transport_kmap_data_sg(cmd); 1126 buf = transport_kmap_data_sg(cmd);
1130 if (!buf) 1127 if (!buf)
1131 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1128 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1132 1129
1133 if (!pf) { 1130 if (!pf) {
1134 ret = TCM_INVALID_CDB_FIELD; 1131 ret = TCM_INVALID_CDB_FIELD;
1135 goto out; 1132 goto out;
1136 } 1133 }
1137 1134
1138 page = buf[off] & 0x3f; 1135 page = buf[off] & 0x3f;
1139 subpage = buf[off] & 0x40 ? buf[off + 1] : 0; 1136 subpage = buf[off] & 0x40 ? buf[off + 1] : 0;
1140 1137
1141 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) 1138 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
1142 if (modesense_handlers[i].page == page && 1139 if (modesense_handlers[i].page == page &&
1143 modesense_handlers[i].subpage == subpage) { 1140 modesense_handlers[i].subpage == subpage) {
1144 memset(tbuf, 0, SE_MODE_PAGE_BUF); 1141 memset(tbuf, 0, SE_MODE_PAGE_BUF);
1145 length = modesense_handlers[i].emulate(cmd, 0, tbuf); 1142 length = modesense_handlers[i].emulate(cmd, 0, tbuf);
1146 goto check_contents; 1143 goto check_contents;
1147 } 1144 }
1148 1145
1149 ret = TCM_UNKNOWN_MODE_PAGE; 1146 ret = TCM_UNKNOWN_MODE_PAGE;
1150 goto out; 1147 goto out;
1151 1148
1152 check_contents: 1149 check_contents:
1153 if (cmd->data_length < off + length) { 1150 if (cmd->data_length < off + length) {
1154 ret = TCM_PARAMETER_LIST_LENGTH_ERROR; 1151 ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
1155 goto out; 1152 goto out;
1156 } 1153 }
1157 1154
1158 if (memcmp(buf + off, tbuf, length)) 1155 if (memcmp(buf + off, tbuf, length))
1159 ret = TCM_INVALID_PARAMETER_LIST; 1156 ret = TCM_INVALID_PARAMETER_LIST;
1160 1157
1161 out: 1158 out:
1162 transport_kunmap_data_sg(cmd); 1159 transport_kunmap_data_sg(cmd);
1163 1160
1164 if (!ret) 1161 if (!ret)
1165 target_complete_cmd(cmd, GOOD); 1162 target_complete_cmd(cmd, GOOD);
1166 return ret; 1163 return ret;
1167 } 1164 }
1168 1165
1169 static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd) 1166 static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
1170 { 1167 {
1171 unsigned char *cdb = cmd->t_task_cdb; 1168 unsigned char *cdb = cmd->t_task_cdb;
1172 unsigned char *rbuf; 1169 unsigned char *rbuf;
1173 u8 ua_asc = 0, ua_ascq = 0; 1170 u8 ua_asc = 0, ua_ascq = 0;
1174 unsigned char buf[SE_SENSE_BUF]; 1171 unsigned char buf[SE_SENSE_BUF];
1175 1172
1176 memset(buf, 0, SE_SENSE_BUF); 1173 memset(buf, 0, SE_SENSE_BUF);
1177 1174
1178 if (cdb[1] & 0x01) { 1175 if (cdb[1] & 0x01) {
1179 pr_err("REQUEST_SENSE description emulation not" 1176 pr_err("REQUEST_SENSE description emulation not"
1180 " supported\n"); 1177 " supported\n");
1181 return TCM_INVALID_CDB_FIELD; 1178 return TCM_INVALID_CDB_FIELD;
1182 } 1179 }
1183 1180
1184 rbuf = transport_kmap_data_sg(cmd); 1181 rbuf = transport_kmap_data_sg(cmd);
1185 if (!rbuf) 1182 if (!rbuf)
1186 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1183 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1187 1184
1188 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) { 1185 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
1189 /* 1186 /*
1190 * CURRENT ERROR, UNIT ATTENTION 1187 * CURRENT ERROR, UNIT ATTENTION
1191 */ 1188 */
1192 buf[0] = 0x70; 1189 buf[0] = 0x70;
1193 buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; 1190 buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
1194 1191
1195 /* 1192 /*
1196 * The Additional Sense Code (ASC) from the UNIT ATTENTION 1193 * The Additional Sense Code (ASC) from the UNIT ATTENTION
1197 */ 1194 */
1198 buf[SPC_ASC_KEY_OFFSET] = ua_asc; 1195 buf[SPC_ASC_KEY_OFFSET] = ua_asc;
1199 buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq; 1196 buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq;
1200 buf[7] = 0x0A; 1197 buf[7] = 0x0A;
1201 } else { 1198 } else {
1202 /* 1199 /*
1203 * CURRENT ERROR, NO SENSE 1200 * CURRENT ERROR, NO SENSE
1204 */ 1201 */
1205 buf[0] = 0x70; 1202 buf[0] = 0x70;
1206 buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE; 1203 buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
1207 1204
1208 /* 1205 /*
1209 * NO ADDITIONAL SENSE INFORMATION 1206 * NO ADDITIONAL SENSE INFORMATION
1210 */ 1207 */
1211 buf[SPC_ASC_KEY_OFFSET] = 0x00; 1208 buf[SPC_ASC_KEY_OFFSET] = 0x00;
1212 buf[7] = 0x0A; 1209 buf[7] = 0x0A;
1213 } 1210 }
1214 1211
1215 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 1212 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
1216 transport_kunmap_data_sg(cmd); 1213 transport_kunmap_data_sg(cmd);
1217 1214
1218 target_complete_cmd(cmd, GOOD); 1215 target_complete_cmd(cmd, GOOD);
1219 return 0; 1216 return 0;
1220 } 1217 }
1221 1218
1222 sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd) 1219 sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
1223 { 1220 {
1224 struct se_dev_entry *deve; 1221 struct se_dev_entry *deve;
1225 struct se_session *sess = cmd->se_sess; 1222 struct se_session *sess = cmd->se_sess;
1226 unsigned char *buf; 1223 unsigned char *buf;
1227 u32 lun_count = 0, offset = 8, i; 1224 u32 lun_count = 0, offset = 8, i;
1228 1225
1229 if (cmd->data_length < 16) { 1226 if (cmd->data_length < 16) {
1230 pr_warn("REPORT LUNS allocation length %u too small\n", 1227 pr_warn("REPORT LUNS allocation length %u too small\n",
1231 cmd->data_length); 1228 cmd->data_length);
1232 return TCM_INVALID_CDB_FIELD; 1229 return TCM_INVALID_CDB_FIELD;
1233 } 1230 }
1234 1231
1235 buf = transport_kmap_data_sg(cmd); 1232 buf = transport_kmap_data_sg(cmd);
1236 if (!buf) 1233 if (!buf)
1237 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1234 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1238 1235
1239 /* 1236 /*
1240 * If no struct se_session pointer is present, this struct se_cmd is 1237 * If no struct se_session pointer is present, this struct se_cmd is
1241 * coming via a target_core_mod PASSTHROUGH op, and not through 1238 * coming via a target_core_mod PASSTHROUGH op, and not through
1242 * a $FABRIC_MOD. In that case, report LUN=0 only. 1239 * a $FABRIC_MOD. In that case, report LUN=0 only.
1243 */ 1240 */
1244 if (!sess) { 1241 if (!sess) {
1245 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]); 1242 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
1246 lun_count = 1; 1243 lun_count = 1;
1247 goto done; 1244 goto done;
1248 } 1245 }
1249 1246
1250 spin_lock_irq(&sess->se_node_acl->device_list_lock); 1247 spin_lock_irq(&sess->se_node_acl->device_list_lock);
1251 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 1248 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
1252 deve = sess->se_node_acl->device_list[i]; 1249 deve = sess->se_node_acl->device_list[i];
1253 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 1250 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
1254 continue; 1251 continue;
1255 /* 1252 /*
1256 * We determine the correct LUN LIST LENGTH even once we 1253 * We determine the correct LUN LIST LENGTH even once we
1257 * have reached the initial allocation length. 1254 * have reached the initial allocation length.
1258 * See SPC2-R20 7.19. 1255 * See SPC2-R20 7.19.
1259 */ 1256 */
1260 lun_count++; 1257 lun_count++;
1261 if ((offset + 8) > cmd->data_length) 1258 if ((offset + 8) > cmd->data_length)
1262 continue; 1259 continue;
1263 1260
1264 int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]); 1261 int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
1265 offset += 8; 1262 offset += 8;
1266 } 1263 }
1267 spin_unlock_irq(&sess->se_node_acl->device_list_lock); 1264 spin_unlock_irq(&sess->se_node_acl->device_list_lock);
1268 1265
1269 /* 1266 /*
1270 * See SPC3 r07, page 159. 1267 * See SPC3 r07, page 159.
1271 */ 1268 */
1272 done: 1269 done:
1273 lun_count *= 8; 1270 lun_count *= 8;
1274 buf[0] = ((lun_count >> 24) & 0xff); 1271 buf[0] = ((lun_count >> 24) & 0xff);
1275 buf[1] = ((lun_count >> 16) & 0xff); 1272 buf[1] = ((lun_count >> 16) & 0xff);
1276 buf[2] = ((lun_count >> 8) & 0xff); 1273 buf[2] = ((lun_count >> 8) & 0xff);
1277 buf[3] = (lun_count & 0xff); 1274 buf[3] = (lun_count & 0xff);
1278 transport_kunmap_data_sg(cmd); 1275 transport_kunmap_data_sg(cmd);
1279 1276
1280 target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8); 1277 target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
1281 return 0; 1278 return 0;
1282 } 1279 }
1283 EXPORT_SYMBOL(spc_emulate_report_luns); 1280 EXPORT_SYMBOL(spc_emulate_report_luns);
1284 1281
1285 static sense_reason_t 1282 static sense_reason_t
1286 spc_emulate_testunitready(struct se_cmd *cmd) 1283 spc_emulate_testunitready(struct se_cmd *cmd)
1287 { 1284 {
1288 target_complete_cmd(cmd, GOOD); 1285 target_complete_cmd(cmd, GOOD);
1289 return 0; 1286 return 0;
1290 } 1287 }
1291 1288
1292 sense_reason_t 1289 sense_reason_t
1293 spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) 1290 spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
1294 { 1291 {
1295 struct se_device *dev = cmd->se_dev; 1292 struct se_device *dev = cmd->se_dev;
1296 unsigned char *cdb = cmd->t_task_cdb; 1293 unsigned char *cdb = cmd->t_task_cdb;
1297 1294
1298 switch (cdb[0]) { 1295 switch (cdb[0]) {
1299 case MODE_SELECT: 1296 case MODE_SELECT:
1300 *size = cdb[4]; 1297 *size = cdb[4];
1301 cmd->execute_cmd = spc_emulate_modeselect; 1298 cmd->execute_cmd = spc_emulate_modeselect;
1302 break; 1299 break;
1303 case MODE_SELECT_10: 1300 case MODE_SELECT_10:
1304 *size = (cdb[7] << 8) + cdb[8]; 1301 *size = (cdb[7] << 8) + cdb[8];
1305 cmd->execute_cmd = spc_emulate_modeselect; 1302 cmd->execute_cmd = spc_emulate_modeselect;
1306 break; 1303 break;
1307 case MODE_SENSE: 1304 case MODE_SENSE:
1308 *size = cdb[4]; 1305 *size = cdb[4];
1309 cmd->execute_cmd = spc_emulate_modesense; 1306 cmd->execute_cmd = spc_emulate_modesense;
1310 break; 1307 break;
1311 case MODE_SENSE_10: 1308 case MODE_SENSE_10:
1312 *size = (cdb[7] << 8) + cdb[8]; 1309 *size = (cdb[7] << 8) + cdb[8];
1313 cmd->execute_cmd = spc_emulate_modesense; 1310 cmd->execute_cmd = spc_emulate_modesense;
1314 break; 1311 break;
1315 case LOG_SELECT: 1312 case LOG_SELECT:
1316 case LOG_SENSE: 1313 case LOG_SENSE:
1317 *size = (cdb[7] << 8) + cdb[8]; 1314 *size = (cdb[7] << 8) + cdb[8];
1318 break; 1315 break;
1319 case PERSISTENT_RESERVE_IN: 1316 case PERSISTENT_RESERVE_IN:
1320 *size = (cdb[7] << 8) + cdb[8]; 1317 *size = (cdb[7] << 8) + cdb[8];
1321 cmd->execute_cmd = target_scsi3_emulate_pr_in; 1318 cmd->execute_cmd = target_scsi3_emulate_pr_in;
1322 break; 1319 break;
1323 case PERSISTENT_RESERVE_OUT: 1320 case PERSISTENT_RESERVE_OUT:
1324 *size = (cdb[7] << 8) + cdb[8]; 1321 *size = (cdb[7] << 8) + cdb[8];
1325 cmd->execute_cmd = target_scsi3_emulate_pr_out; 1322 cmd->execute_cmd = target_scsi3_emulate_pr_out;
1326 break; 1323 break;
1327 case RELEASE: 1324 case RELEASE:
1328 case RELEASE_10: 1325 case RELEASE_10:
1329 if (cdb[0] == RELEASE_10) 1326 if (cdb[0] == RELEASE_10)
1330 *size = (cdb[7] << 8) | cdb[8]; 1327 *size = (cdb[7] << 8) | cdb[8];
1331 else 1328 else
1332 *size = cmd->data_length; 1329 *size = cmd->data_length;
1333 1330
1334 cmd->execute_cmd = target_scsi2_reservation_release; 1331 cmd->execute_cmd = target_scsi2_reservation_release;
1335 break; 1332 break;
1336 case RESERVE: 1333 case RESERVE:
1337 case RESERVE_10: 1334 case RESERVE_10:
1338 /* 1335 /*
1339 * The SPC-2 RESERVE does not contain a size in the SCSI CDB. 1336 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
1340 * Assume the passthrough or $FABRIC_MOD will tell us about it. 1337 * Assume the passthrough or $FABRIC_MOD will tell us about it.
1341 */ 1338 */
1342 if (cdb[0] == RESERVE_10) 1339 if (cdb[0] == RESERVE_10)
1343 *size = (cdb[7] << 8) | cdb[8]; 1340 *size = (cdb[7] << 8) | cdb[8];
1344 else 1341 else
1345 *size = cmd->data_length; 1342 *size = cmd->data_length;
1346 1343
1347 cmd->execute_cmd = target_scsi2_reservation_reserve; 1344 cmd->execute_cmd = target_scsi2_reservation_reserve;
1348 break; 1345 break;
1349 case REQUEST_SENSE: 1346 case REQUEST_SENSE:
1350 *size = cdb[4]; 1347 *size = cdb[4];
1351 cmd->execute_cmd = spc_emulate_request_sense; 1348 cmd->execute_cmd = spc_emulate_request_sense;
1352 break; 1349 break;
1353 case INQUIRY: 1350 case INQUIRY:
1354 *size = (cdb[3] << 8) + cdb[4]; 1351 *size = (cdb[3] << 8) + cdb[4];
1355 1352
1356 /* 1353 /*
1357 * Do implicit HEAD_OF_QUEUE processing for INQUIRY. 1354 * Do implicit HEAD_OF_QUEUE processing for INQUIRY.
1358 * See spc4r17 section 5.3 1355 * See spc4r17 section 5.3
1359 */ 1356 */
1360 cmd->sam_task_attr = MSG_HEAD_TAG; 1357 cmd->sam_task_attr = MSG_HEAD_TAG;
1361 cmd->execute_cmd = spc_emulate_inquiry; 1358 cmd->execute_cmd = spc_emulate_inquiry;
1362 break; 1359 break;
1363 case SECURITY_PROTOCOL_IN: 1360 case SECURITY_PROTOCOL_IN:
1364 case SECURITY_PROTOCOL_OUT: 1361 case SECURITY_PROTOCOL_OUT:
1365 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1362 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1366 break; 1363 break;
1367 case EXTENDED_COPY: 1364 case EXTENDED_COPY:
1368 *size = get_unaligned_be32(&cdb[10]); 1365 *size = get_unaligned_be32(&cdb[10]);
1369 cmd->execute_cmd = target_do_xcopy; 1366 cmd->execute_cmd = target_do_xcopy;
1370 break; 1367 break;
1371 case RECEIVE_COPY_RESULTS: 1368 case RECEIVE_COPY_RESULTS:
1372 *size = get_unaligned_be32(&cdb[10]); 1369 *size = get_unaligned_be32(&cdb[10]);
1373 cmd->execute_cmd = target_do_receive_copy_results; 1370 cmd->execute_cmd = target_do_receive_copy_results;
1374 break; 1371 break;
1375 case READ_ATTRIBUTE: 1372 case READ_ATTRIBUTE:
1376 case WRITE_ATTRIBUTE: 1373 case WRITE_ATTRIBUTE:
1377 *size = (cdb[10] << 24) | (cdb[11] << 16) | 1374 *size = (cdb[10] << 24) | (cdb[11] << 16) |
1378 (cdb[12] << 8) | cdb[13]; 1375 (cdb[12] << 8) | cdb[13];
1379 break; 1376 break;
1380 case RECEIVE_DIAGNOSTIC: 1377 case RECEIVE_DIAGNOSTIC:
1381 case SEND_DIAGNOSTIC: 1378 case SEND_DIAGNOSTIC:
1382 *size = (cdb[3] << 8) | cdb[4]; 1379 *size = (cdb[3] << 8) | cdb[4];
1383 break; 1380 break;
1384 case WRITE_BUFFER: 1381 case WRITE_BUFFER:
1385 *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; 1382 *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
1386 break; 1383 break;
1387 case REPORT_LUNS: 1384 case REPORT_LUNS:
1388 cmd->execute_cmd = spc_emulate_report_luns; 1385 cmd->execute_cmd = spc_emulate_report_luns;
1389 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1386 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
1390 /* 1387 /*
1391 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS 1388 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
1392 * See spc4r17 section 5.3 1389 * See spc4r17 section 5.3
1393 */ 1390 */
1394 cmd->sam_task_attr = MSG_HEAD_TAG; 1391 cmd->sam_task_attr = MSG_HEAD_TAG;
1395 break; 1392 break;
1396 case TEST_UNIT_READY: 1393 case TEST_UNIT_READY:
1397 cmd->execute_cmd = spc_emulate_testunitready; 1394 cmd->execute_cmd = spc_emulate_testunitready;
1398 *size = 0; 1395 *size = 0;
1399 break; 1396 break;
1400 case MAINTENANCE_IN: 1397 case MAINTENANCE_IN:
1401 if (dev->transport->get_device_type(dev) != TYPE_ROM) { 1398 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
1402 /* 1399 /*
1403 * MAINTENANCE_IN from SCC-2 1400 * MAINTENANCE_IN from SCC-2
1404 * Check for emulated MI_REPORT_TARGET_PGS 1401 * Check for emulated MI_REPORT_TARGET_PGS
1405 */ 1402 */
1406 if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) { 1403 if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) {
1407 cmd->execute_cmd = 1404 cmd->execute_cmd =
1408 target_emulate_report_target_port_groups; 1405 target_emulate_report_target_port_groups;
1409 } 1406 }
1410 *size = get_unaligned_be32(&cdb[6]); 1407 *size = get_unaligned_be32(&cdb[6]);
1411 } else { 1408 } else {
1412 /* 1409 /*
1413 * GPCMD_SEND_KEY from multi media commands 1410 * GPCMD_SEND_KEY from multi media commands
1414 */ 1411 */
1415 *size = get_unaligned_be16(&cdb[8]); 1412 *size = get_unaligned_be16(&cdb[8]);
1416 } 1413 }
1417 break; 1414 break;
1418 case MAINTENANCE_OUT: 1415 case MAINTENANCE_OUT:
1419 if (dev->transport->get_device_type(dev) != TYPE_ROM) { 1416 if (dev->transport->get_device_type(dev) != TYPE_ROM) {
1420 /* 1417 /*
1421 * MAINTENANCE_OUT from SCC-2 1418 * MAINTENANCE_OUT from SCC-2
1422 * Check for emulated MO_SET_TARGET_PGS. 1419 * Check for emulated MO_SET_TARGET_PGS.
1423 */ 1420 */
1424 if (cdb[1] == MO_SET_TARGET_PGS) { 1421 if (cdb[1] == MO_SET_TARGET_PGS) {
1425 cmd->execute_cmd = 1422 cmd->execute_cmd =
1426 target_emulate_set_target_port_groups; 1423 target_emulate_set_target_port_groups;
1427 } 1424 }
1428 *size = get_unaligned_be32(&cdb[6]); 1425 *size = get_unaligned_be32(&cdb[6]);
1429 } else { 1426 } else {
1430 /* 1427 /*
1431 * GPCMD_SEND_KEY from multi media commands 1428 * GPCMD_SEND_KEY from multi media commands
1432 */ 1429 */
1433 *size = get_unaligned_be16(&cdb[8]); 1430 *size = get_unaligned_be16(&cdb[8]);
1434 } 1431 }
1435 break; 1432 break;
1436 default: 1433 default:
1437 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" 1434 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
1438 " 0x%02x, sending CHECK_CONDITION.\n", 1435 " 0x%02x, sending CHECK_CONDITION.\n",
1439 cmd->se_tfo->get_fabric_name(), cdb[0]); 1436 cmd->se_tfo->get_fabric_name(), cdb[0]);
1440 return TCM_UNSUPPORTED_SCSI_OPCODE; 1437 return TCM_UNSUPPORTED_SCSI_OPCODE;
1441 } 1438 }
1442 1439
1443 return 0; 1440 return 0;
1444 } 1441 }
1445 EXPORT_SYMBOL(spc_parse_cdb); 1442 EXPORT_SYMBOL(spc_parse_cdb);
1446 1443