Commit 306c11b28d7bb85a7adda741798a2b6b60dd305a
Committed by
Nicholas Bellinger
1 parent
d5829eac5f
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
target: go through normal processing for zero-length PSCSI commands
Right now, commands with a zero-size payload are skipped completely. This is wrong; such commands should be passed down to the device and processed normally. For physical backends, this ignores completely things such as START STOP UNIT. For virtual backends, we have a hack in place to clear a unit attention state on a zero-size REQUEST SENSE, but we still do not report errors properly on zero-length commands---out-of-bounds 0-block reads and writes, too small parameter list lengths, etc. This patch fixes this for PSCSI. Uses of transport_kmap_data_sg are guarded with a check for non-zero cmd->data_length; for all other commands a zero length is handled properly in pscsi_execute_cmd. The sole exception will be for now REPORT LUNS, which is handled through the normal SPC emulation. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Showing 2 changed files with 7 additions and 5 deletions Inline Diff
drivers/target/target_core_pscsi.c
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * Filename: target_core_pscsi.c | 2 | * Filename: target_core_pscsi.c |
3 | * | 3 | * |
4 | * This file contains the generic target mode <-> Linux SCSI subsystem plugin. | 4 | * This file contains the generic target mode <-> Linux SCSI subsystem plugin. |
5 | * | 5 | * |
6 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. | 6 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. |
7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | 7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. |
8 | * Copyright (c) 2007-2010 Rising Tide Systems | 8 | * Copyright (c) 2007-2010 Rising Tide Systems |
9 | * Copyright (c) 2008-2010 Linux-iSCSI.org | 9 | * Copyright (c) 2008-2010 Linux-iSCSI.org |
10 | * | 10 | * |
11 | * Nicholas A. Bellinger <nab@kernel.org> | 11 | * Nicholas A. Bellinger <nab@kernel.org> |
12 | * | 12 | * |
13 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
14 | * it under the terms of the GNU General Public License as published by | 14 | * it under the terms of the GNU General Public License as published by |
15 | * the Free Software Foundation; either version 2 of the License, or | 15 | * the Free Software Foundation; either version 2 of the License, or |
16 | * (at your option) any later version. | 16 | * (at your option) any later version. |
17 | * | 17 | * |
18 | * This program is distributed in the hope that it will be useful, | 18 | * This program is distributed in the hope that it will be useful, |
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
21 | * GNU General Public License for more details. | 21 | * GNU General Public License for more details. |
22 | * | 22 | * |
23 | * You should have received a copy of the GNU General Public License | 23 | * You should have received a copy of the GNU General Public License |
24 | * along with this program; if not, write to the Free Software | 24 | * along with this program; if not, write to the Free Software |
25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
26 | * | 26 | * |
27 | ******************************************************************************/ | 27 | ******************************************************************************/ |
28 | 28 | ||
29 | #include <linux/string.h> | 29 | #include <linux/string.h> |
30 | #include <linux/parser.h> | 30 | #include <linux/parser.h> |
31 | #include <linux/timer.h> | 31 | #include <linux/timer.h> |
32 | #include <linux/blkdev.h> | 32 | #include <linux/blkdev.h> |
33 | #include <linux/blk_types.h> | 33 | #include <linux/blk_types.h> |
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | #include <linux/spinlock.h> | 35 | #include <linux/spinlock.h> |
36 | #include <linux/genhd.h> | 36 | #include <linux/genhd.h> |
37 | #include <linux/cdrom.h> | 37 | #include <linux/cdrom.h> |
38 | #include <linux/ratelimit.h> | 38 | #include <linux/ratelimit.h> |
39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include <asm/unaligned.h> | 40 | #include <asm/unaligned.h> |
41 | 41 | ||
42 | #include <scsi/scsi.h> | 42 | #include <scsi/scsi.h> |
43 | #include <scsi/scsi_device.h> | 43 | #include <scsi/scsi_device.h> |
44 | #include <scsi/scsi_cmnd.h> | 44 | #include <scsi/scsi_cmnd.h> |
45 | #include <scsi/scsi_host.h> | 45 | #include <scsi/scsi_host.h> |
46 | #include <scsi/scsi_tcq.h> | 46 | #include <scsi/scsi_tcq.h> |
47 | 47 | ||
48 | #include <target/target_core_base.h> | 48 | #include <target/target_core_base.h> |
49 | #include <target/target_core_backend.h> | 49 | #include <target/target_core_backend.h> |
50 | 50 | ||
51 | #include "target_core_alua.h" | 51 | #include "target_core_alua.h" |
52 | #include "target_core_pscsi.h" | 52 | #include "target_core_pscsi.h" |
53 | 53 | ||
54 | #define ISPRINT(a) ((a >= ' ') && (a <= '~')) | 54 | #define ISPRINT(a) ((a >= ' ') && (a <= '~')) |
55 | 55 | ||
56 | static struct se_subsystem_api pscsi_template; | 56 | static struct se_subsystem_api pscsi_template; |
57 | 57 | ||
58 | static int pscsi_execute_cmd(struct se_cmd *cmd); | 58 | static int pscsi_execute_cmd(struct se_cmd *cmd); |
59 | static void pscsi_req_done(struct request *, int); | 59 | static void pscsi_req_done(struct request *, int); |
60 | 60 | ||
61 | /* pscsi_attach_hba(): | 61 | /* pscsi_attach_hba(): |
62 | * | 62 | * |
63 | * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host. | 63 | * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host. |
64 | * from the passed SCSI Host ID. | 64 | * from the passed SCSI Host ID. |
65 | */ | 65 | */ |
66 | static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) | 66 | static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) |
67 | { | 67 | { |
68 | struct pscsi_hba_virt *phv; | 68 | struct pscsi_hba_virt *phv; |
69 | 69 | ||
70 | phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL); | 70 | phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL); |
71 | if (!phv) { | 71 | if (!phv) { |
72 | pr_err("Unable to allocate struct pscsi_hba_virt\n"); | 72 | pr_err("Unable to allocate struct pscsi_hba_virt\n"); |
73 | return -ENOMEM; | 73 | return -ENOMEM; |
74 | } | 74 | } |
75 | phv->phv_host_id = host_id; | 75 | phv->phv_host_id = host_id; |
76 | phv->phv_mode = PHV_VIRTUAL_HOST_ID; | 76 | phv->phv_mode = PHV_VIRTUAL_HOST_ID; |
77 | 77 | ||
78 | hba->hba_ptr = phv; | 78 | hba->hba_ptr = phv; |
79 | 79 | ||
80 | pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on" | 80 | pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on" |
81 | " Generic Target Core Stack %s\n", hba->hba_id, | 81 | " Generic Target Core Stack %s\n", hba->hba_id, |
82 | PSCSI_VERSION, TARGET_CORE_MOD_VERSION); | 82 | PSCSI_VERSION, TARGET_CORE_MOD_VERSION); |
83 | pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n", | 83 | pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n", |
84 | hba->hba_id); | 84 | hba->hba_id); |
85 | 85 | ||
86 | return 0; | 86 | return 0; |
87 | } | 87 | } |
88 | 88 | ||
89 | static void pscsi_detach_hba(struct se_hba *hba) | 89 | static void pscsi_detach_hba(struct se_hba *hba) |
90 | { | 90 | { |
91 | struct pscsi_hba_virt *phv = hba->hba_ptr; | 91 | struct pscsi_hba_virt *phv = hba->hba_ptr; |
92 | struct Scsi_Host *scsi_host = phv->phv_lld_host; | 92 | struct Scsi_Host *scsi_host = phv->phv_lld_host; |
93 | 93 | ||
94 | if (scsi_host) { | 94 | if (scsi_host) { |
95 | scsi_host_put(scsi_host); | 95 | scsi_host_put(scsi_host); |
96 | 96 | ||
97 | pr_debug("CORE_HBA[%d] - Detached SCSI HBA: %s from" | 97 | pr_debug("CORE_HBA[%d] - Detached SCSI HBA: %s from" |
98 | " Generic Target Core\n", hba->hba_id, | 98 | " Generic Target Core\n", hba->hba_id, |
99 | (scsi_host->hostt->name) ? (scsi_host->hostt->name) : | 99 | (scsi_host->hostt->name) ? (scsi_host->hostt->name) : |
100 | "Unknown"); | 100 | "Unknown"); |
101 | } else | 101 | } else |
102 | pr_debug("CORE_HBA[%d] - Detached Virtual SCSI HBA" | 102 | pr_debug("CORE_HBA[%d] - Detached Virtual SCSI HBA" |
103 | " from Generic Target Core\n", hba->hba_id); | 103 | " from Generic Target Core\n", hba->hba_id); |
104 | 104 | ||
105 | kfree(phv); | 105 | kfree(phv); |
106 | hba->hba_ptr = NULL; | 106 | hba->hba_ptr = NULL; |
107 | } | 107 | } |
108 | 108 | ||
109 | static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) | 109 | static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) |
110 | { | 110 | { |
111 | struct pscsi_hba_virt *phv = hba->hba_ptr; | 111 | struct pscsi_hba_virt *phv = hba->hba_ptr; |
112 | struct Scsi_Host *sh = phv->phv_lld_host; | 112 | struct Scsi_Host *sh = phv->phv_lld_host; |
113 | /* | 113 | /* |
114 | * Release the struct Scsi_Host | 114 | * Release the struct Scsi_Host |
115 | */ | 115 | */ |
116 | if (!mode_flag) { | 116 | if (!mode_flag) { |
117 | if (!sh) | 117 | if (!sh) |
118 | return 0; | 118 | return 0; |
119 | 119 | ||
120 | phv->phv_lld_host = NULL; | 120 | phv->phv_lld_host = NULL; |
121 | phv->phv_mode = PHV_VIRTUAL_HOST_ID; | 121 | phv->phv_mode = PHV_VIRTUAL_HOST_ID; |
122 | 122 | ||
123 | pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" | 123 | pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" |
124 | " %s\n", hba->hba_id, (sh->hostt->name) ? | 124 | " %s\n", hba->hba_id, (sh->hostt->name) ? |
125 | (sh->hostt->name) : "Unknown"); | 125 | (sh->hostt->name) : "Unknown"); |
126 | 126 | ||
127 | scsi_host_put(sh); | 127 | scsi_host_put(sh); |
128 | return 0; | 128 | return 0; |
129 | } | 129 | } |
130 | /* | 130 | /* |
131 | * Otherwise, locate struct Scsi_Host from the original passed | 131 | * Otherwise, locate struct Scsi_Host from the original passed |
132 | * pSCSI Host ID and enable for phba mode | 132 | * pSCSI Host ID and enable for phba mode |
133 | */ | 133 | */ |
134 | sh = scsi_host_lookup(phv->phv_host_id); | 134 | sh = scsi_host_lookup(phv->phv_host_id); |
135 | if (IS_ERR(sh)) { | 135 | if (IS_ERR(sh)) { |
136 | pr_err("pSCSI: Unable to locate SCSI Host for" | 136 | pr_err("pSCSI: Unable to locate SCSI Host for" |
137 | " phv_host_id: %d\n", phv->phv_host_id); | 137 | " phv_host_id: %d\n", phv->phv_host_id); |
138 | return PTR_ERR(sh); | 138 | return PTR_ERR(sh); |
139 | } | 139 | } |
140 | 140 | ||
141 | phv->phv_lld_host = sh; | 141 | phv->phv_lld_host = sh; |
142 | phv->phv_mode = PHV_LLD_SCSI_HOST_NO; | 142 | phv->phv_mode = PHV_LLD_SCSI_HOST_NO; |
143 | 143 | ||
144 | pr_debug("CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n", | 144 | pr_debug("CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n", |
145 | hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown"); | 145 | hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown"); |
146 | 146 | ||
147 | return 1; | 147 | return 1; |
148 | } | 148 | } |
149 | 149 | ||
150 | static void pscsi_tape_read_blocksize(struct se_device *dev, | 150 | static void pscsi_tape_read_blocksize(struct se_device *dev, |
151 | struct scsi_device *sdev) | 151 | struct scsi_device *sdev) |
152 | { | 152 | { |
153 | unsigned char cdb[MAX_COMMAND_SIZE], *buf; | 153 | unsigned char cdb[MAX_COMMAND_SIZE], *buf; |
154 | int ret; | 154 | int ret; |
155 | 155 | ||
156 | buf = kzalloc(12, GFP_KERNEL); | 156 | buf = kzalloc(12, GFP_KERNEL); |
157 | if (!buf) | 157 | if (!buf) |
158 | return; | 158 | return; |
159 | 159 | ||
160 | memset(cdb, 0, MAX_COMMAND_SIZE); | 160 | memset(cdb, 0, MAX_COMMAND_SIZE); |
161 | cdb[0] = MODE_SENSE; | 161 | cdb[0] = MODE_SENSE; |
162 | cdb[4] = 0x0c; /* 12 bytes */ | 162 | cdb[4] = 0x0c; /* 12 bytes */ |
163 | 163 | ||
164 | ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL, | 164 | ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL, |
165 | HZ, 1, NULL); | 165 | HZ, 1, NULL); |
166 | if (ret) | 166 | if (ret) |
167 | goto out_free; | 167 | goto out_free; |
168 | 168 | ||
169 | /* | 169 | /* |
170 | * If MODE_SENSE still returns zero, set the default value to 1024. | 170 | * If MODE_SENSE still returns zero, set the default value to 1024. |
171 | */ | 171 | */ |
172 | sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); | 172 | sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); |
173 | if (!sdev->sector_size) | 173 | if (!sdev->sector_size) |
174 | sdev->sector_size = 1024; | 174 | sdev->sector_size = 1024; |
175 | out_free: | 175 | out_free: |
176 | kfree(buf); | 176 | kfree(buf); |
177 | } | 177 | } |
178 | 178 | ||
179 | static void | 179 | static void |
180 | pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn) | 180 | pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn) |
181 | { | 181 | { |
182 | unsigned char *buf; | 182 | unsigned char *buf; |
183 | 183 | ||
184 | if (sdev->inquiry_len < INQUIRY_LEN) | 184 | if (sdev->inquiry_len < INQUIRY_LEN) |
185 | return; | 185 | return; |
186 | 186 | ||
187 | buf = sdev->inquiry; | 187 | buf = sdev->inquiry; |
188 | if (!buf) | 188 | if (!buf) |
189 | return; | 189 | return; |
190 | /* | 190 | /* |
191 | * Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev() | 191 | * Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev() |
192 | */ | 192 | */ |
193 | memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor)); | 193 | memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor)); |
194 | memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model)); | 194 | memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model)); |
195 | memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision)); | 195 | memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision)); |
196 | } | 196 | } |
197 | 197 | ||
198 | static int | 198 | static int |
199 | pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn) | 199 | pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn) |
200 | { | 200 | { |
201 | unsigned char cdb[MAX_COMMAND_SIZE], *buf; | 201 | unsigned char cdb[MAX_COMMAND_SIZE], *buf; |
202 | int ret; | 202 | int ret; |
203 | 203 | ||
204 | buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); | 204 | buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); |
205 | if (!buf) | 205 | if (!buf) |
206 | return -ENOMEM; | 206 | return -ENOMEM; |
207 | 207 | ||
208 | memset(cdb, 0, MAX_COMMAND_SIZE); | 208 | memset(cdb, 0, MAX_COMMAND_SIZE); |
209 | cdb[0] = INQUIRY; | 209 | cdb[0] = INQUIRY; |
210 | cdb[1] = 0x01; /* Query VPD */ | 210 | cdb[1] = 0x01; /* Query VPD */ |
211 | cdb[2] = 0x80; /* Unit Serial Number */ | 211 | cdb[2] = 0x80; /* Unit Serial Number */ |
212 | cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff; | 212 | cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff; |
213 | cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff); | 213 | cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff); |
214 | 214 | ||
215 | ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, | 215 | ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, |
216 | INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL); | 216 | INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL); |
217 | if (ret) | 217 | if (ret) |
218 | goto out_free; | 218 | goto out_free; |
219 | 219 | ||
220 | snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]); | 220 | snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]); |
221 | 221 | ||
222 | wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL; | 222 | wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL; |
223 | 223 | ||
224 | kfree(buf); | 224 | kfree(buf); |
225 | return 0; | 225 | return 0; |
226 | 226 | ||
227 | out_free: | 227 | out_free: |
228 | kfree(buf); | 228 | kfree(buf); |
229 | return -EPERM; | 229 | return -EPERM; |
230 | } | 230 | } |
231 | 231 | ||
232 | static void | 232 | static void |
233 | pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev, | 233 | pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev, |
234 | struct t10_wwn *wwn) | 234 | struct t10_wwn *wwn) |
235 | { | 235 | { |
236 | unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83; | 236 | unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83; |
237 | int ident_len, page_len, off = 4, ret; | 237 | int ident_len, page_len, off = 4, ret; |
238 | struct t10_vpd *vpd; | 238 | struct t10_vpd *vpd; |
239 | 239 | ||
240 | buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); | 240 | buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); |
241 | if (!buf) | 241 | if (!buf) |
242 | return; | 242 | return; |
243 | 243 | ||
244 | memset(cdb, 0, MAX_COMMAND_SIZE); | 244 | memset(cdb, 0, MAX_COMMAND_SIZE); |
245 | cdb[0] = INQUIRY; | 245 | cdb[0] = INQUIRY; |
246 | cdb[1] = 0x01; /* Query VPD */ | 246 | cdb[1] = 0x01; /* Query VPD */ |
247 | cdb[2] = 0x83; /* Device Identifier */ | 247 | cdb[2] = 0x83; /* Device Identifier */ |
248 | cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff; | 248 | cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff; |
249 | cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff); | 249 | cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff); |
250 | 250 | ||
251 | ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, | 251 | ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, |
252 | INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, | 252 | INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, |
253 | NULL, HZ, 1, NULL); | 253 | NULL, HZ, 1, NULL); |
254 | if (ret) | 254 | if (ret) |
255 | goto out; | 255 | goto out; |
256 | 256 | ||
257 | page_len = (buf[2] << 8) | buf[3]; | 257 | page_len = (buf[2] << 8) | buf[3]; |
258 | while (page_len > 0) { | 258 | while (page_len > 0) { |
259 | /* Grab a pointer to the Identification descriptor */ | 259 | /* Grab a pointer to the Identification descriptor */ |
260 | page_83 = &buf[off]; | 260 | page_83 = &buf[off]; |
261 | ident_len = page_83[3]; | 261 | ident_len = page_83[3]; |
262 | if (!ident_len) { | 262 | if (!ident_len) { |
263 | pr_err("page_83[3]: identifier" | 263 | pr_err("page_83[3]: identifier" |
264 | " length zero!\n"); | 264 | " length zero!\n"); |
265 | break; | 265 | break; |
266 | } | 266 | } |
267 | pr_debug("T10 VPD Identifer Length: %d\n", ident_len); | 267 | pr_debug("T10 VPD Identifer Length: %d\n", ident_len); |
268 | 268 | ||
269 | vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL); | 269 | vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL); |
270 | if (!vpd) { | 270 | if (!vpd) { |
271 | pr_err("Unable to allocate memory for" | 271 | pr_err("Unable to allocate memory for" |
272 | " struct t10_vpd\n"); | 272 | " struct t10_vpd\n"); |
273 | goto out; | 273 | goto out; |
274 | } | 274 | } |
275 | INIT_LIST_HEAD(&vpd->vpd_list); | 275 | INIT_LIST_HEAD(&vpd->vpd_list); |
276 | 276 | ||
277 | transport_set_vpd_proto_id(vpd, page_83); | 277 | transport_set_vpd_proto_id(vpd, page_83); |
278 | transport_set_vpd_assoc(vpd, page_83); | 278 | transport_set_vpd_assoc(vpd, page_83); |
279 | 279 | ||
280 | if (transport_set_vpd_ident_type(vpd, page_83) < 0) { | 280 | if (transport_set_vpd_ident_type(vpd, page_83) < 0) { |
281 | off += (ident_len + 4); | 281 | off += (ident_len + 4); |
282 | page_len -= (ident_len + 4); | 282 | page_len -= (ident_len + 4); |
283 | kfree(vpd); | 283 | kfree(vpd); |
284 | continue; | 284 | continue; |
285 | } | 285 | } |
286 | if (transport_set_vpd_ident(vpd, page_83) < 0) { | 286 | if (transport_set_vpd_ident(vpd, page_83) < 0) { |
287 | off += (ident_len + 4); | 287 | off += (ident_len + 4); |
288 | page_len -= (ident_len + 4); | 288 | page_len -= (ident_len + 4); |
289 | kfree(vpd); | 289 | kfree(vpd); |
290 | continue; | 290 | continue; |
291 | } | 291 | } |
292 | 292 | ||
293 | list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list); | 293 | list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list); |
294 | off += (ident_len + 4); | 294 | off += (ident_len + 4); |
295 | page_len -= (ident_len + 4); | 295 | page_len -= (ident_len + 4); |
296 | } | 296 | } |
297 | 297 | ||
298 | out: | 298 | out: |
299 | kfree(buf); | 299 | kfree(buf); |
300 | } | 300 | } |
301 | 301 | ||
302 | /* pscsi_add_device_to_list(): | 302 | /* pscsi_add_device_to_list(): |
303 | * | 303 | * |
304 | * | 304 | * |
305 | */ | 305 | */ |
306 | static struct se_device *pscsi_add_device_to_list( | 306 | static struct se_device *pscsi_add_device_to_list( |
307 | struct se_hba *hba, | 307 | struct se_hba *hba, |
308 | struct se_subsystem_dev *se_dev, | 308 | struct se_subsystem_dev *se_dev, |
309 | struct pscsi_dev_virt *pdv, | 309 | struct pscsi_dev_virt *pdv, |
310 | struct scsi_device *sd, | 310 | struct scsi_device *sd, |
311 | int dev_flags) | 311 | int dev_flags) |
312 | { | 312 | { |
313 | struct se_device *dev; | 313 | struct se_device *dev; |
314 | struct se_dev_limits dev_limits; | 314 | struct se_dev_limits dev_limits; |
315 | struct request_queue *q; | 315 | struct request_queue *q; |
316 | struct queue_limits *limits; | 316 | struct queue_limits *limits; |
317 | 317 | ||
318 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); | 318 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); |
319 | 319 | ||
320 | if (!sd->queue_depth) { | 320 | if (!sd->queue_depth) { |
321 | sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH; | 321 | sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH; |
322 | 322 | ||
323 | pr_err("Set broken SCSI Device %d:%d:%d" | 323 | pr_err("Set broken SCSI Device %d:%d:%d" |
324 | " queue_depth to %d\n", sd->channel, sd->id, | 324 | " queue_depth to %d\n", sd->channel, sd->id, |
325 | sd->lun, sd->queue_depth); | 325 | sd->lun, sd->queue_depth); |
326 | } | 326 | } |
327 | /* | 327 | /* |
328 | * Setup the local scope queue_limits from struct request_queue->limits | 328 | * Setup the local scope queue_limits from struct request_queue->limits |
329 | * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. | 329 | * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. |
330 | */ | 330 | */ |
331 | q = sd->request_queue; | 331 | q = sd->request_queue; |
332 | limits = &dev_limits.limits; | 332 | limits = &dev_limits.limits; |
333 | limits->logical_block_size = sd->sector_size; | 333 | limits->logical_block_size = sd->sector_size; |
334 | limits->max_hw_sectors = min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q)); | 334 | limits->max_hw_sectors = min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q)); |
335 | limits->max_sectors = min_t(int, sd->host->max_sectors, queue_max_sectors(q)); | 335 | limits->max_sectors = min_t(int, sd->host->max_sectors, queue_max_sectors(q)); |
336 | dev_limits.hw_queue_depth = sd->queue_depth; | 336 | dev_limits.hw_queue_depth = sd->queue_depth; |
337 | dev_limits.queue_depth = sd->queue_depth; | 337 | dev_limits.queue_depth = sd->queue_depth; |
338 | /* | 338 | /* |
339 | * Setup our standard INQUIRY info into se_dev->t10_wwn | 339 | * Setup our standard INQUIRY info into se_dev->t10_wwn |
340 | */ | 340 | */ |
341 | pscsi_set_inquiry_info(sd, &se_dev->t10_wwn); | 341 | pscsi_set_inquiry_info(sd, &se_dev->t10_wwn); |
342 | 342 | ||
343 | /* | 343 | /* |
344 | * Set the pointer pdv->pdv_sd to from passed struct scsi_device, | 344 | * Set the pointer pdv->pdv_sd to from passed struct scsi_device, |
345 | * which has already been referenced with Linux SCSI code with | 345 | * which has already been referenced with Linux SCSI code with |
346 | * scsi_device_get() in this file's pscsi_create_virtdevice(). | 346 | * scsi_device_get() in this file's pscsi_create_virtdevice(). |
347 | * | 347 | * |
348 | * The passthrough operations called by the transport_add_device_* | 348 | * The passthrough operations called by the transport_add_device_* |
349 | * function below will require this pointer to be set for passthroug | 349 | * function below will require this pointer to be set for passthroug |
350 | * ops. | 350 | * ops. |
351 | * | 351 | * |
352 | * For the shutdown case in pscsi_free_device(), this struct | 352 | * For the shutdown case in pscsi_free_device(), this struct |
353 | * scsi_device reference is released with Linux SCSI code | 353 | * scsi_device reference is released with Linux SCSI code |
354 | * scsi_device_put() and the pdv->pdv_sd cleared. | 354 | * scsi_device_put() and the pdv->pdv_sd cleared. |
355 | */ | 355 | */ |
356 | pdv->pdv_sd = sd; | 356 | pdv->pdv_sd = sd; |
357 | dev = transport_add_device_to_core_hba(hba, &pscsi_template, | 357 | dev = transport_add_device_to_core_hba(hba, &pscsi_template, |
358 | se_dev, dev_flags, pdv, | 358 | se_dev, dev_flags, pdv, |
359 | &dev_limits, NULL, NULL); | 359 | &dev_limits, NULL, NULL); |
360 | if (!dev) { | 360 | if (!dev) { |
361 | pdv->pdv_sd = NULL; | 361 | pdv->pdv_sd = NULL; |
362 | return NULL; | 362 | return NULL; |
363 | } | 363 | } |
364 | 364 | ||
365 | /* | 365 | /* |
366 | * Locate VPD WWN Information used for various purposes within | 366 | * Locate VPD WWN Information used for various purposes within |
367 | * the Storage Engine. | 367 | * the Storage Engine. |
368 | */ | 368 | */ |
369 | if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) { | 369 | if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) { |
370 | /* | 370 | /* |
371 | * If VPD Unit Serial returned GOOD status, try | 371 | * If VPD Unit Serial returned GOOD status, try |
372 | * VPD Device Identification page (0x83). | 372 | * VPD Device Identification page (0x83). |
373 | */ | 373 | */ |
374 | pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn); | 374 | pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn); |
375 | } | 375 | } |
376 | 376 | ||
377 | /* | 377 | /* |
378 | * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. | 378 | * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. |
379 | */ | 379 | */ |
380 | if (sd->type == TYPE_TAPE) | 380 | if (sd->type == TYPE_TAPE) |
381 | pscsi_tape_read_blocksize(dev, sd); | 381 | pscsi_tape_read_blocksize(dev, sd); |
382 | return dev; | 382 | return dev; |
383 | } | 383 | } |
384 | 384 | ||
385 | static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name) | 385 | static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name) |
386 | { | 386 | { |
387 | struct pscsi_dev_virt *pdv; | 387 | struct pscsi_dev_virt *pdv; |
388 | 388 | ||
389 | pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL); | 389 | pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL); |
390 | if (!pdv) { | 390 | if (!pdv) { |
391 | pr_err("Unable to allocate memory for struct pscsi_dev_virt\n"); | 391 | pr_err("Unable to allocate memory for struct pscsi_dev_virt\n"); |
392 | return NULL; | 392 | return NULL; |
393 | } | 393 | } |
394 | pdv->pdv_se_hba = hba; | 394 | pdv->pdv_se_hba = hba; |
395 | 395 | ||
396 | pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name); | 396 | pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name); |
397 | return pdv; | 397 | return pdv; |
398 | } | 398 | } |
399 | 399 | ||
400 | /* | 400 | /* |
401 | * Called with struct Scsi_Host->host_lock called. | 401 | * Called with struct Scsi_Host->host_lock called. |
402 | */ | 402 | */ |
403 | static struct se_device *pscsi_create_type_disk( | 403 | static struct se_device *pscsi_create_type_disk( |
404 | struct scsi_device *sd, | 404 | struct scsi_device *sd, |
405 | struct pscsi_dev_virt *pdv, | 405 | struct pscsi_dev_virt *pdv, |
406 | struct se_subsystem_dev *se_dev, | 406 | struct se_subsystem_dev *se_dev, |
407 | struct se_hba *hba) | 407 | struct se_hba *hba) |
408 | __releases(sh->host_lock) | 408 | __releases(sh->host_lock) |
409 | { | 409 | { |
410 | struct se_device *dev; | 410 | struct se_device *dev; |
411 | struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; | 411 | struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; |
412 | struct Scsi_Host *sh = sd->host; | 412 | struct Scsi_Host *sh = sd->host; |
413 | struct block_device *bd; | 413 | struct block_device *bd; |
414 | u32 dev_flags = 0; | 414 | u32 dev_flags = 0; |
415 | 415 | ||
416 | if (scsi_device_get(sd)) { | 416 | if (scsi_device_get(sd)) { |
417 | pr_err("scsi_device_get() failed for %d:%d:%d:%d\n", | 417 | pr_err("scsi_device_get() failed for %d:%d:%d:%d\n", |
418 | sh->host_no, sd->channel, sd->id, sd->lun); | 418 | sh->host_no, sd->channel, sd->id, sd->lun); |
419 | spin_unlock_irq(sh->host_lock); | 419 | spin_unlock_irq(sh->host_lock); |
420 | return NULL; | 420 | return NULL; |
421 | } | 421 | } |
422 | spin_unlock_irq(sh->host_lock); | 422 | spin_unlock_irq(sh->host_lock); |
423 | /* | 423 | /* |
424 | * Claim exclusive struct block_device access to struct scsi_device | 424 | * Claim exclusive struct block_device access to struct scsi_device |
425 | * for TYPE_DISK using supplied udev_path | 425 | * for TYPE_DISK using supplied udev_path |
426 | */ | 426 | */ |
427 | bd = blkdev_get_by_path(se_dev->se_dev_udev_path, | 427 | bd = blkdev_get_by_path(se_dev->se_dev_udev_path, |
428 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); | 428 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); |
429 | if (IS_ERR(bd)) { | 429 | if (IS_ERR(bd)) { |
430 | pr_err("pSCSI: blkdev_get_by_path() failed\n"); | 430 | pr_err("pSCSI: blkdev_get_by_path() failed\n"); |
431 | scsi_device_put(sd); | 431 | scsi_device_put(sd); |
432 | return NULL; | 432 | return NULL; |
433 | } | 433 | } |
434 | pdv->pdv_bd = bd; | 434 | pdv->pdv_bd = bd; |
435 | 435 | ||
436 | dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); | 436 | dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); |
437 | if (!dev) { | 437 | if (!dev) { |
438 | blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); | 438 | blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); |
439 | scsi_device_put(sd); | 439 | scsi_device_put(sd); |
440 | return NULL; | 440 | return NULL; |
441 | } | 441 | } |
442 | pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n", | 442 | pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n", |
443 | phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun); | 443 | phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun); |
444 | 444 | ||
445 | return dev; | 445 | return dev; |
446 | } | 446 | } |
447 | 447 | ||
448 | /* | 448 | /* |
449 | * Called with struct Scsi_Host->host_lock called. | 449 | * Called with struct Scsi_Host->host_lock called. |
450 | */ | 450 | */ |
451 | static struct se_device *pscsi_create_type_rom( | 451 | static struct se_device *pscsi_create_type_rom( |
452 | struct scsi_device *sd, | 452 | struct scsi_device *sd, |
453 | struct pscsi_dev_virt *pdv, | 453 | struct pscsi_dev_virt *pdv, |
454 | struct se_subsystem_dev *se_dev, | 454 | struct se_subsystem_dev *se_dev, |
455 | struct se_hba *hba) | 455 | struct se_hba *hba) |
456 | __releases(sh->host_lock) | 456 | __releases(sh->host_lock) |
457 | { | 457 | { |
458 | struct se_device *dev; | 458 | struct se_device *dev; |
459 | struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; | 459 | struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; |
460 | struct Scsi_Host *sh = sd->host; | 460 | struct Scsi_Host *sh = sd->host; |
461 | u32 dev_flags = 0; | 461 | u32 dev_flags = 0; |
462 | 462 | ||
463 | if (scsi_device_get(sd)) { | 463 | if (scsi_device_get(sd)) { |
464 | pr_err("scsi_device_get() failed for %d:%d:%d:%d\n", | 464 | pr_err("scsi_device_get() failed for %d:%d:%d:%d\n", |
465 | sh->host_no, sd->channel, sd->id, sd->lun); | 465 | sh->host_no, sd->channel, sd->id, sd->lun); |
466 | spin_unlock_irq(sh->host_lock); | 466 | spin_unlock_irq(sh->host_lock); |
467 | return NULL; | 467 | return NULL; |
468 | } | 468 | } |
469 | spin_unlock_irq(sh->host_lock); | 469 | spin_unlock_irq(sh->host_lock); |
470 | 470 | ||
471 | dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); | 471 | dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); |
472 | if (!dev) { | 472 | if (!dev) { |
473 | scsi_device_put(sd); | 473 | scsi_device_put(sd); |
474 | return NULL; | 474 | return NULL; |
475 | } | 475 | } |
476 | pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", | 476 | pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", |
477 | phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, | 477 | phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, |
478 | sd->channel, sd->id, sd->lun); | 478 | sd->channel, sd->id, sd->lun); |
479 | 479 | ||
480 | return dev; | 480 | return dev; |
481 | } | 481 | } |
482 | 482 | ||
483 | /* | 483 | /* |
484 | *Called with struct Scsi_Host->host_lock called. | 484 | *Called with struct Scsi_Host->host_lock called. |
485 | */ | 485 | */ |
486 | static struct se_device *pscsi_create_type_other( | 486 | static struct se_device *pscsi_create_type_other( |
487 | struct scsi_device *sd, | 487 | struct scsi_device *sd, |
488 | struct pscsi_dev_virt *pdv, | 488 | struct pscsi_dev_virt *pdv, |
489 | struct se_subsystem_dev *se_dev, | 489 | struct se_subsystem_dev *se_dev, |
490 | struct se_hba *hba) | 490 | struct se_hba *hba) |
491 | __releases(sh->host_lock) | 491 | __releases(sh->host_lock) |
492 | { | 492 | { |
493 | struct se_device *dev; | 493 | struct se_device *dev; |
494 | struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; | 494 | struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; |
495 | struct Scsi_Host *sh = sd->host; | 495 | struct Scsi_Host *sh = sd->host; |
496 | u32 dev_flags = 0; | 496 | u32 dev_flags = 0; |
497 | 497 | ||
498 | spin_unlock_irq(sh->host_lock); | 498 | spin_unlock_irq(sh->host_lock); |
499 | dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); | 499 | dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); |
500 | if (!dev) | 500 | if (!dev) |
501 | return NULL; | 501 | return NULL; |
502 | 502 | ||
503 | pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", | 503 | pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", |
504 | phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, | 504 | phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, |
505 | sd->channel, sd->id, sd->lun); | 505 | sd->channel, sd->id, sd->lun); |
506 | 506 | ||
507 | return dev; | 507 | return dev; |
508 | } | 508 | } |
509 | 509 | ||
510 | static struct se_device *pscsi_create_virtdevice( | 510 | static struct se_device *pscsi_create_virtdevice( |
511 | struct se_hba *hba, | 511 | struct se_hba *hba, |
512 | struct se_subsystem_dev *se_dev, | 512 | struct se_subsystem_dev *se_dev, |
513 | void *p) | 513 | void *p) |
514 | { | 514 | { |
515 | struct pscsi_dev_virt *pdv = p; | 515 | struct pscsi_dev_virt *pdv = p; |
516 | struct se_device *dev; | 516 | struct se_device *dev; |
517 | struct scsi_device *sd; | 517 | struct scsi_device *sd; |
518 | struct pscsi_hba_virt *phv = hba->hba_ptr; | 518 | struct pscsi_hba_virt *phv = hba->hba_ptr; |
519 | struct Scsi_Host *sh = phv->phv_lld_host; | 519 | struct Scsi_Host *sh = phv->phv_lld_host; |
520 | int legacy_mode_enable = 0; | 520 | int legacy_mode_enable = 0; |
521 | 521 | ||
522 | if (!pdv) { | 522 | if (!pdv) { |
523 | pr_err("Unable to locate struct pscsi_dev_virt" | 523 | pr_err("Unable to locate struct pscsi_dev_virt" |
524 | " parameter\n"); | 524 | " parameter\n"); |
525 | return ERR_PTR(-EINVAL); | 525 | return ERR_PTR(-EINVAL); |
526 | } | 526 | } |
527 | /* | 527 | /* |
528 | * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the | 528 | * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the |
529 | * struct Scsi_Host we will need to bring the TCM/pSCSI object online | 529 | * struct Scsi_Host we will need to bring the TCM/pSCSI object online |
530 | */ | 530 | */ |
531 | if (!sh) { | 531 | if (!sh) { |
532 | if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { | 532 | if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { |
533 | pr_err("pSCSI: Unable to locate struct" | 533 | pr_err("pSCSI: Unable to locate struct" |
534 | " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); | 534 | " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); |
535 | return ERR_PTR(-ENODEV); | 535 | return ERR_PTR(-ENODEV); |
536 | } | 536 | } |
537 | /* | 537 | /* |
538 | * For the newer PHV_VIRTUAL_HOST_ID struct scsi_device | 538 | * For the newer PHV_VIRTUAL_HOST_ID struct scsi_device |
539 | * reference, we enforce that udev_path has been set | 539 | * reference, we enforce that udev_path has been set |
540 | */ | 540 | */ |
541 | if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { | 541 | if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { |
542 | pr_err("pSCSI: udev_path attribute has not" | 542 | pr_err("pSCSI: udev_path attribute has not" |
543 | " been set before ENABLE=1\n"); | 543 | " been set before ENABLE=1\n"); |
544 | return ERR_PTR(-EINVAL); | 544 | return ERR_PTR(-EINVAL); |
545 | } | 545 | } |
546 | /* | 546 | /* |
547 | * If no scsi_host_id= was passed for PHV_VIRTUAL_HOST_ID, | 547 | * If no scsi_host_id= was passed for PHV_VIRTUAL_HOST_ID, |
548 | * use the original TCM hba ID to reference Linux/SCSI Host No | 548 | * use the original TCM hba ID to reference Linux/SCSI Host No |
549 | * and enable for PHV_LLD_SCSI_HOST_NO mode. | 549 | * and enable for PHV_LLD_SCSI_HOST_NO mode. |
550 | */ | 550 | */ |
551 | if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) { | 551 | if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) { |
552 | spin_lock(&hba->device_lock); | 552 | spin_lock(&hba->device_lock); |
553 | if (!list_empty(&hba->hba_dev_list)) { | 553 | if (!list_empty(&hba->hba_dev_list)) { |
554 | pr_err("pSCSI: Unable to set hba_mode" | 554 | pr_err("pSCSI: Unable to set hba_mode" |
555 | " with active devices\n"); | 555 | " with active devices\n"); |
556 | spin_unlock(&hba->device_lock); | 556 | spin_unlock(&hba->device_lock); |
557 | return ERR_PTR(-EEXIST); | 557 | return ERR_PTR(-EEXIST); |
558 | } | 558 | } |
559 | spin_unlock(&hba->device_lock); | 559 | spin_unlock(&hba->device_lock); |
560 | 560 | ||
561 | if (pscsi_pmode_enable_hba(hba, 1) != 1) | 561 | if (pscsi_pmode_enable_hba(hba, 1) != 1) |
562 | return ERR_PTR(-ENODEV); | 562 | return ERR_PTR(-ENODEV); |
563 | 563 | ||
564 | legacy_mode_enable = 1; | 564 | legacy_mode_enable = 1; |
565 | hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; | 565 | hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; |
566 | sh = phv->phv_lld_host; | 566 | sh = phv->phv_lld_host; |
567 | } else { | 567 | } else { |
568 | sh = scsi_host_lookup(pdv->pdv_host_id); | 568 | sh = scsi_host_lookup(pdv->pdv_host_id); |
569 | if (IS_ERR(sh)) { | 569 | if (IS_ERR(sh)) { |
570 | pr_err("pSCSI: Unable to locate" | 570 | pr_err("pSCSI: Unable to locate" |
571 | " pdv_host_id: %d\n", pdv->pdv_host_id); | 571 | " pdv_host_id: %d\n", pdv->pdv_host_id); |
572 | return ERR_CAST(sh); | 572 | return ERR_CAST(sh); |
573 | } | 573 | } |
574 | } | 574 | } |
575 | } else { | 575 | } else { |
576 | if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) { | 576 | if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) { |
577 | pr_err("pSCSI: PHV_VIRTUAL_HOST_ID set while" | 577 | pr_err("pSCSI: PHV_VIRTUAL_HOST_ID set while" |
578 | " struct Scsi_Host exists\n"); | 578 | " struct Scsi_Host exists\n"); |
579 | return ERR_PTR(-EEXIST); | 579 | return ERR_PTR(-EEXIST); |
580 | } | 580 | } |
581 | } | 581 | } |
582 | 582 | ||
583 | spin_lock_irq(sh->host_lock); | 583 | spin_lock_irq(sh->host_lock); |
584 | list_for_each_entry(sd, &sh->__devices, siblings) { | 584 | list_for_each_entry(sd, &sh->__devices, siblings) { |
585 | if ((pdv->pdv_channel_id != sd->channel) || | 585 | if ((pdv->pdv_channel_id != sd->channel) || |
586 | (pdv->pdv_target_id != sd->id) || | 586 | (pdv->pdv_target_id != sd->id) || |
587 | (pdv->pdv_lun_id != sd->lun)) | 587 | (pdv->pdv_lun_id != sd->lun)) |
588 | continue; | 588 | continue; |
589 | /* | 589 | /* |
590 | * Functions will release the held struct scsi_host->host_lock | 590 | * Functions will release the held struct scsi_host->host_lock |
591 | * before calling calling pscsi_add_device_to_list() to register | 591 | * before calling calling pscsi_add_device_to_list() to register |
592 | * struct scsi_device with target_core_mod. | 592 | * struct scsi_device with target_core_mod. |
593 | */ | 593 | */ |
594 | switch (sd->type) { | 594 | switch (sd->type) { |
595 | case TYPE_DISK: | 595 | case TYPE_DISK: |
596 | dev = pscsi_create_type_disk(sd, pdv, se_dev, hba); | 596 | dev = pscsi_create_type_disk(sd, pdv, se_dev, hba); |
597 | break; | 597 | break; |
598 | case TYPE_ROM: | 598 | case TYPE_ROM: |
599 | dev = pscsi_create_type_rom(sd, pdv, se_dev, hba); | 599 | dev = pscsi_create_type_rom(sd, pdv, se_dev, hba); |
600 | break; | 600 | break; |
601 | default: | 601 | default: |
602 | dev = pscsi_create_type_other(sd, pdv, se_dev, hba); | 602 | dev = pscsi_create_type_other(sd, pdv, se_dev, hba); |
603 | break; | 603 | break; |
604 | } | 604 | } |
605 | 605 | ||
606 | if (!dev) { | 606 | if (!dev) { |
607 | if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) | 607 | if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) |
608 | scsi_host_put(sh); | 608 | scsi_host_put(sh); |
609 | else if (legacy_mode_enable) { | 609 | else if (legacy_mode_enable) { |
610 | pscsi_pmode_enable_hba(hba, 0); | 610 | pscsi_pmode_enable_hba(hba, 0); |
611 | hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; | 611 | hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; |
612 | } | 612 | } |
613 | pdv->pdv_sd = NULL; | 613 | pdv->pdv_sd = NULL; |
614 | return ERR_PTR(-ENODEV); | 614 | return ERR_PTR(-ENODEV); |
615 | } | 615 | } |
616 | return dev; | 616 | return dev; |
617 | } | 617 | } |
618 | spin_unlock_irq(sh->host_lock); | 618 | spin_unlock_irq(sh->host_lock); |
619 | 619 | ||
620 | pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, | 620 | pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, |
621 | pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id); | 621 | pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id); |
622 | 622 | ||
623 | if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) | 623 | if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) |
624 | scsi_host_put(sh); | 624 | scsi_host_put(sh); |
625 | else if (legacy_mode_enable) { | 625 | else if (legacy_mode_enable) { |
626 | pscsi_pmode_enable_hba(hba, 0); | 626 | pscsi_pmode_enable_hba(hba, 0); |
627 | hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; | 627 | hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; |
628 | } | 628 | } |
629 | 629 | ||
630 | return ERR_PTR(-ENODEV); | 630 | return ERR_PTR(-ENODEV); |
631 | } | 631 | } |
632 | 632 | ||
633 | /* pscsi_free_device(): (Part of se_subsystem_api_t template) | 633 | /* pscsi_free_device(): (Part of se_subsystem_api_t template) |
634 | * | 634 | * |
635 | * | 635 | * |
636 | */ | 636 | */ |
637 | static void pscsi_free_device(void *p) | 637 | static void pscsi_free_device(void *p) |
638 | { | 638 | { |
639 | struct pscsi_dev_virt *pdv = p; | 639 | struct pscsi_dev_virt *pdv = p; |
640 | struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; | 640 | struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; |
641 | struct scsi_device *sd = pdv->pdv_sd; | 641 | struct scsi_device *sd = pdv->pdv_sd; |
642 | 642 | ||
643 | if (sd) { | 643 | if (sd) { |
644 | /* | 644 | /* |
645 | * Release exclusive pSCSI internal struct block_device claim for | 645 | * Release exclusive pSCSI internal struct block_device claim for |
646 | * struct scsi_device with TYPE_DISK from pscsi_create_type_disk() | 646 | * struct scsi_device with TYPE_DISK from pscsi_create_type_disk() |
647 | */ | 647 | */ |
648 | if ((sd->type == TYPE_DISK) && pdv->pdv_bd) { | 648 | if ((sd->type == TYPE_DISK) && pdv->pdv_bd) { |
649 | blkdev_put(pdv->pdv_bd, | 649 | blkdev_put(pdv->pdv_bd, |
650 | FMODE_WRITE|FMODE_READ|FMODE_EXCL); | 650 | FMODE_WRITE|FMODE_READ|FMODE_EXCL); |
651 | pdv->pdv_bd = NULL; | 651 | pdv->pdv_bd = NULL; |
652 | } | 652 | } |
653 | /* | 653 | /* |
654 | * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference | 654 | * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference |
655 | * to struct Scsi_Host now. | 655 | * to struct Scsi_Host now. |
656 | */ | 656 | */ |
657 | if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) && | 657 | if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) && |
658 | (phv->phv_lld_host != NULL)) | 658 | (phv->phv_lld_host != NULL)) |
659 | scsi_host_put(phv->phv_lld_host); | 659 | scsi_host_put(phv->phv_lld_host); |
660 | 660 | ||
661 | if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) | 661 | if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) |
662 | scsi_device_put(sd); | 662 | scsi_device_put(sd); |
663 | 663 | ||
664 | pdv->pdv_sd = NULL; | 664 | pdv->pdv_sd = NULL; |
665 | } | 665 | } |
666 | 666 | ||
667 | kfree(pdv); | 667 | kfree(pdv); |
668 | } | 668 | } |
669 | 669 | ||
670 | static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg, | 670 | static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg, |
671 | unsigned char *sense_buffer) | 671 | unsigned char *sense_buffer) |
672 | { | 672 | { |
673 | struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; | 673 | struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; |
674 | struct scsi_device *sd = pdv->pdv_sd; | 674 | struct scsi_device *sd = pdv->pdv_sd; |
675 | int result; | 675 | int result; |
676 | struct pscsi_plugin_task *pt = cmd->priv; | 676 | struct pscsi_plugin_task *pt = cmd->priv; |
677 | unsigned char *cdb; | 677 | unsigned char *cdb; |
678 | /* | 678 | /* |
679 | * Special case for REPORT_LUNs handling where pscsi_plugin_task has | 679 | * Special case for REPORT_LUNs handling where pscsi_plugin_task has |
680 | * not been allocated because TCM is handling the emulation directly. | 680 | * not been allocated because TCM is handling the emulation directly. |
681 | */ | 681 | */ |
682 | if (!pt) | 682 | if (!pt) |
683 | return; | 683 | return; |
684 | 684 | ||
685 | cdb = &pt->pscsi_cdb[0]; | 685 | cdb = &pt->pscsi_cdb[0]; |
686 | result = pt->pscsi_result; | 686 | result = pt->pscsi_result; |
687 | /* | 687 | /* |
688 | * Hack to make sure that Write-Protect modepage is set if R/O mode is | 688 | * Hack to make sure that Write-Protect modepage is set if R/O mode is |
689 | * forced. | 689 | * forced. |
690 | */ | 690 | */ |
691 | if (!cmd->se_deve || !cmd->data_length) | ||
692 | goto after_mode_sense; | ||
693 | |||
691 | if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && | 694 | if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && |
692 | (status_byte(result) << 1) == SAM_STAT_GOOD) { | 695 | (status_byte(result) << 1) == SAM_STAT_GOOD) { |
693 | if (!cmd->se_deve) | ||
694 | goto after_mode_sense; | ||
695 | |||
696 | if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) { | 696 | if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) { |
697 | unsigned char *buf = transport_kmap_data_sg(cmd); | 697 | unsigned char *buf = transport_kmap_data_sg(cmd); |
698 | 698 | ||
699 | if (cdb[0] == MODE_SENSE_10) { | 699 | if (cdb[0] == MODE_SENSE_10) { |
700 | if (!(buf[3] & 0x80)) | 700 | if (!(buf[3] & 0x80)) |
701 | buf[3] |= 0x80; | 701 | buf[3] |= 0x80; |
702 | } else { | 702 | } else { |
703 | if (!(buf[2] & 0x80)) | 703 | if (!(buf[2] & 0x80)) |
704 | buf[2] |= 0x80; | 704 | buf[2] |= 0x80; |
705 | } | 705 | } |
706 | 706 | ||
707 | transport_kunmap_data_sg(cmd); | 707 | transport_kunmap_data_sg(cmd); |
708 | } | 708 | } |
709 | } | 709 | } |
710 | after_mode_sense: | 710 | after_mode_sense: |
711 | 711 | ||
712 | if (sd->type != TYPE_TAPE) | 712 | if (sd->type != TYPE_TAPE || !cmd->data_length) |
713 | goto after_mode_select; | 713 | goto after_mode_select; |
714 | 714 | ||
715 | /* | 715 | /* |
716 | * Hack to correctly obtain the initiator requested blocksize for | 716 | * Hack to correctly obtain the initiator requested blocksize for |
717 | * TYPE_TAPE. Since this value is dependent upon each tape media, | 717 | * TYPE_TAPE. Since this value is dependent upon each tape media, |
718 | * struct scsi_device->sector_size will not contain the correct value | 718 | * struct scsi_device->sector_size will not contain the correct value |
719 | * by default, so we go ahead and set it so | 719 | * by default, so we go ahead and set it so |
720 | * TRANSPORT(dev)->get_blockdev() returns the correct value to the | 720 | * TRANSPORT(dev)->get_blockdev() returns the correct value to the |
721 | * storage engine. | 721 | * storage engine. |
722 | */ | 722 | */ |
723 | if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) && | 723 | if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) && |
724 | (status_byte(result) << 1) == SAM_STAT_GOOD) { | 724 | (status_byte(result) << 1) == SAM_STAT_GOOD) { |
725 | unsigned char *buf; | 725 | unsigned char *buf; |
726 | u16 bdl; | 726 | u16 bdl; |
727 | u32 blocksize; | 727 | u32 blocksize; |
728 | 728 | ||
729 | buf = sg_virt(&sg[0]); | 729 | buf = sg_virt(&sg[0]); |
730 | if (!buf) { | 730 | if (!buf) { |
731 | pr_err("Unable to get buf for scatterlist\n"); | 731 | pr_err("Unable to get buf for scatterlist\n"); |
732 | goto after_mode_select; | 732 | goto after_mode_select; |
733 | } | 733 | } |
734 | 734 | ||
735 | if (cdb[0] == MODE_SELECT) | 735 | if (cdb[0] == MODE_SELECT) |
736 | bdl = (buf[3]); | 736 | bdl = (buf[3]); |
737 | else | 737 | else |
738 | bdl = (buf[6] << 8) | (buf[7]); | 738 | bdl = (buf[6] << 8) | (buf[7]); |
739 | 739 | ||
740 | if (!bdl) | 740 | if (!bdl) |
741 | goto after_mode_select; | 741 | goto after_mode_select; |
742 | 742 | ||
743 | if (cdb[0] == MODE_SELECT) | 743 | if (cdb[0] == MODE_SELECT) |
744 | blocksize = (buf[9] << 16) | (buf[10] << 8) | | 744 | blocksize = (buf[9] << 16) | (buf[10] << 8) | |
745 | (buf[11]); | 745 | (buf[11]); |
746 | else | 746 | else |
747 | blocksize = (buf[13] << 16) | (buf[14] << 8) | | 747 | blocksize = (buf[13] << 16) | (buf[14] << 8) | |
748 | (buf[15]); | 748 | (buf[15]); |
749 | 749 | ||
750 | sd->sector_size = blocksize; | 750 | sd->sector_size = blocksize; |
751 | } | 751 | } |
752 | after_mode_select: | 752 | after_mode_select: |
753 | 753 | ||
754 | if (sense_buffer && (status_byte(result) & CHECK_CONDITION)) { | 754 | if (sense_buffer && (status_byte(result) & CHECK_CONDITION)) { |
755 | memcpy(sense_buffer, pt->pscsi_sense, TRANSPORT_SENSE_BUFFER); | 755 | memcpy(sense_buffer, pt->pscsi_sense, TRANSPORT_SENSE_BUFFER); |
756 | cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; | 756 | cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; |
757 | } | 757 | } |
758 | } | 758 | } |
759 | 759 | ||
760 | enum { | 760 | enum { |
761 | Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id, | 761 | Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id, |
762 | Opt_scsi_lun_id, Opt_err | 762 | Opt_scsi_lun_id, Opt_err |
763 | }; | 763 | }; |
764 | 764 | ||
765 | static match_table_t tokens = { | 765 | static match_table_t tokens = { |
766 | {Opt_scsi_host_id, "scsi_host_id=%d"}, | 766 | {Opt_scsi_host_id, "scsi_host_id=%d"}, |
767 | {Opt_scsi_channel_id, "scsi_channel_id=%d"}, | 767 | {Opt_scsi_channel_id, "scsi_channel_id=%d"}, |
768 | {Opt_scsi_target_id, "scsi_target_id=%d"}, | 768 | {Opt_scsi_target_id, "scsi_target_id=%d"}, |
769 | {Opt_scsi_lun_id, "scsi_lun_id=%d"}, | 769 | {Opt_scsi_lun_id, "scsi_lun_id=%d"}, |
770 | {Opt_err, NULL} | 770 | {Opt_err, NULL} |
771 | }; | 771 | }; |
772 | 772 | ||
773 | static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, | 773 | static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, |
774 | struct se_subsystem_dev *se_dev, | 774 | struct se_subsystem_dev *se_dev, |
775 | const char *page, | 775 | const char *page, |
776 | ssize_t count) | 776 | ssize_t count) |
777 | { | 777 | { |
778 | struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; | 778 | struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; |
779 | struct pscsi_hba_virt *phv = hba->hba_ptr; | 779 | struct pscsi_hba_virt *phv = hba->hba_ptr; |
780 | char *orig, *ptr, *opts; | 780 | char *orig, *ptr, *opts; |
781 | substring_t args[MAX_OPT_ARGS]; | 781 | substring_t args[MAX_OPT_ARGS]; |
782 | int ret = 0, arg, token; | 782 | int ret = 0, arg, token; |
783 | 783 | ||
784 | opts = kstrdup(page, GFP_KERNEL); | 784 | opts = kstrdup(page, GFP_KERNEL); |
785 | if (!opts) | 785 | if (!opts) |
786 | return -ENOMEM; | 786 | return -ENOMEM; |
787 | 787 | ||
788 | orig = opts; | 788 | orig = opts; |
789 | 789 | ||
790 | while ((ptr = strsep(&opts, ",\n")) != NULL) { | 790 | while ((ptr = strsep(&opts, ",\n")) != NULL) { |
791 | if (!*ptr) | 791 | if (!*ptr) |
792 | continue; | 792 | continue; |
793 | 793 | ||
794 | token = match_token(ptr, tokens, args); | 794 | token = match_token(ptr, tokens, args); |
795 | switch (token) { | 795 | switch (token) { |
796 | case Opt_scsi_host_id: | 796 | case Opt_scsi_host_id: |
797 | if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { | 797 | if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { |
798 | pr_err("PSCSI[%d]: Unable to accept" | 798 | pr_err("PSCSI[%d]: Unable to accept" |
799 | " scsi_host_id while phv_mode ==" | 799 | " scsi_host_id while phv_mode ==" |
800 | " PHV_LLD_SCSI_HOST_NO\n", | 800 | " PHV_LLD_SCSI_HOST_NO\n", |
801 | phv->phv_host_id); | 801 | phv->phv_host_id); |
802 | ret = -EINVAL; | 802 | ret = -EINVAL; |
803 | goto out; | 803 | goto out; |
804 | } | 804 | } |
805 | match_int(args, &arg); | 805 | match_int(args, &arg); |
806 | pdv->pdv_host_id = arg; | 806 | pdv->pdv_host_id = arg; |
807 | pr_debug("PSCSI[%d]: Referencing SCSI Host ID:" | 807 | pr_debug("PSCSI[%d]: Referencing SCSI Host ID:" |
808 | " %d\n", phv->phv_host_id, pdv->pdv_host_id); | 808 | " %d\n", phv->phv_host_id, pdv->pdv_host_id); |
809 | pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID; | 809 | pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID; |
810 | break; | 810 | break; |
811 | case Opt_scsi_channel_id: | 811 | case Opt_scsi_channel_id: |
812 | match_int(args, &arg); | 812 | match_int(args, &arg); |
813 | pdv->pdv_channel_id = arg; | 813 | pdv->pdv_channel_id = arg; |
814 | pr_debug("PSCSI[%d]: Referencing SCSI Channel" | 814 | pr_debug("PSCSI[%d]: Referencing SCSI Channel" |
815 | " ID: %d\n", phv->phv_host_id, | 815 | " ID: %d\n", phv->phv_host_id, |
816 | pdv->pdv_channel_id); | 816 | pdv->pdv_channel_id); |
817 | pdv->pdv_flags |= PDF_HAS_CHANNEL_ID; | 817 | pdv->pdv_flags |= PDF_HAS_CHANNEL_ID; |
818 | break; | 818 | break; |
819 | case Opt_scsi_target_id: | 819 | case Opt_scsi_target_id: |
820 | match_int(args, &arg); | 820 | match_int(args, &arg); |
821 | pdv->pdv_target_id = arg; | 821 | pdv->pdv_target_id = arg; |
822 | pr_debug("PSCSI[%d]: Referencing SCSI Target" | 822 | pr_debug("PSCSI[%d]: Referencing SCSI Target" |
823 | " ID: %d\n", phv->phv_host_id, | 823 | " ID: %d\n", phv->phv_host_id, |
824 | pdv->pdv_target_id); | 824 | pdv->pdv_target_id); |
825 | pdv->pdv_flags |= PDF_HAS_TARGET_ID; | 825 | pdv->pdv_flags |= PDF_HAS_TARGET_ID; |
826 | break; | 826 | break; |
827 | case Opt_scsi_lun_id: | 827 | case Opt_scsi_lun_id: |
828 | match_int(args, &arg); | 828 | match_int(args, &arg); |
829 | pdv->pdv_lun_id = arg; | 829 | pdv->pdv_lun_id = arg; |
830 | pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:" | 830 | pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:" |
831 | " %d\n", phv->phv_host_id, pdv->pdv_lun_id); | 831 | " %d\n", phv->phv_host_id, pdv->pdv_lun_id); |
832 | pdv->pdv_flags |= PDF_HAS_LUN_ID; | 832 | pdv->pdv_flags |= PDF_HAS_LUN_ID; |
833 | break; | 833 | break; |
834 | default: | 834 | default: |
835 | break; | 835 | break; |
836 | } | 836 | } |
837 | } | 837 | } |
838 | 838 | ||
839 | out: | 839 | out: |
840 | kfree(orig); | 840 | kfree(orig); |
841 | return (!ret) ? count : ret; | 841 | return (!ret) ? count : ret; |
842 | } | 842 | } |
843 | 843 | ||
844 | static ssize_t pscsi_check_configfs_dev_params( | 844 | static ssize_t pscsi_check_configfs_dev_params( |
845 | struct se_hba *hba, | 845 | struct se_hba *hba, |
846 | struct se_subsystem_dev *se_dev) | 846 | struct se_subsystem_dev *se_dev) |
847 | { | 847 | { |
848 | struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; | 848 | struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; |
849 | 849 | ||
850 | if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) || | 850 | if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) || |
851 | !(pdv->pdv_flags & PDF_HAS_TARGET_ID) || | 851 | !(pdv->pdv_flags & PDF_HAS_TARGET_ID) || |
852 | !(pdv->pdv_flags & PDF_HAS_LUN_ID)) { | 852 | !(pdv->pdv_flags & PDF_HAS_LUN_ID)) { |
853 | pr_err("Missing scsi_channel_id=, scsi_target_id= and" | 853 | pr_err("Missing scsi_channel_id=, scsi_target_id= and" |
854 | " scsi_lun_id= parameters\n"); | 854 | " scsi_lun_id= parameters\n"); |
855 | return -EINVAL; | 855 | return -EINVAL; |
856 | } | 856 | } |
857 | 857 | ||
858 | return 0; | 858 | return 0; |
859 | } | 859 | } |
860 | 860 | ||
861 | static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba, | 861 | static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba, |
862 | struct se_subsystem_dev *se_dev, | 862 | struct se_subsystem_dev *se_dev, |
863 | char *b) | 863 | char *b) |
864 | { | 864 | { |
865 | struct pscsi_hba_virt *phv = hba->hba_ptr; | 865 | struct pscsi_hba_virt *phv = hba->hba_ptr; |
866 | struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; | 866 | struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; |
867 | struct scsi_device *sd = pdv->pdv_sd; | 867 | struct scsi_device *sd = pdv->pdv_sd; |
868 | unsigned char host_id[16]; | 868 | unsigned char host_id[16]; |
869 | ssize_t bl; | 869 | ssize_t bl; |
870 | int i; | 870 | int i; |
871 | 871 | ||
872 | if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) | 872 | if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) |
873 | snprintf(host_id, 16, "%d", pdv->pdv_host_id); | 873 | snprintf(host_id, 16, "%d", pdv->pdv_host_id); |
874 | else | 874 | else |
875 | snprintf(host_id, 16, "PHBA Mode"); | 875 | snprintf(host_id, 16, "PHBA Mode"); |
876 | 876 | ||
877 | bl = sprintf(b, "SCSI Device Bus Location:" | 877 | bl = sprintf(b, "SCSI Device Bus Location:" |
878 | " Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n", | 878 | " Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n", |
879 | pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id, | 879 | pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id, |
880 | host_id); | 880 | host_id); |
881 | 881 | ||
882 | if (sd) { | 882 | if (sd) { |
883 | bl += sprintf(b + bl, " "); | 883 | bl += sprintf(b + bl, " "); |
884 | bl += sprintf(b + bl, "Vendor: "); | 884 | bl += sprintf(b + bl, "Vendor: "); |
885 | for (i = 0; i < 8; i++) { | 885 | for (i = 0; i < 8; i++) { |
886 | if (ISPRINT(sd->vendor[i])) /* printable character? */ | 886 | if (ISPRINT(sd->vendor[i])) /* printable character? */ |
887 | bl += sprintf(b + bl, "%c", sd->vendor[i]); | 887 | bl += sprintf(b + bl, "%c", sd->vendor[i]); |
888 | else | 888 | else |
889 | bl += sprintf(b + bl, " "); | 889 | bl += sprintf(b + bl, " "); |
890 | } | 890 | } |
891 | bl += sprintf(b + bl, " Model: "); | 891 | bl += sprintf(b + bl, " Model: "); |
892 | for (i = 0; i < 16; i++) { | 892 | for (i = 0; i < 16; i++) { |
893 | if (ISPRINT(sd->model[i])) /* printable character ? */ | 893 | if (ISPRINT(sd->model[i])) /* printable character ? */ |
894 | bl += sprintf(b + bl, "%c", sd->model[i]); | 894 | bl += sprintf(b + bl, "%c", sd->model[i]); |
895 | else | 895 | else |
896 | bl += sprintf(b + bl, " "); | 896 | bl += sprintf(b + bl, " "); |
897 | } | 897 | } |
898 | bl += sprintf(b + bl, " Rev: "); | 898 | bl += sprintf(b + bl, " Rev: "); |
899 | for (i = 0; i < 4; i++) { | 899 | for (i = 0; i < 4; i++) { |
900 | if (ISPRINT(sd->rev[i])) /* printable character ? */ | 900 | if (ISPRINT(sd->rev[i])) /* printable character ? */ |
901 | bl += sprintf(b + bl, "%c", sd->rev[i]); | 901 | bl += sprintf(b + bl, "%c", sd->rev[i]); |
902 | else | 902 | else |
903 | bl += sprintf(b + bl, " "); | 903 | bl += sprintf(b + bl, " "); |
904 | } | 904 | } |
905 | bl += sprintf(b + bl, "\n"); | 905 | bl += sprintf(b + bl, "\n"); |
906 | } | 906 | } |
907 | return bl; | 907 | return bl; |
908 | } | 908 | } |
909 | 909 | ||
910 | static void pscsi_bi_endio(struct bio *bio, int error) | 910 | static void pscsi_bi_endio(struct bio *bio, int error) |
911 | { | 911 | { |
912 | bio_put(bio); | 912 | bio_put(bio); |
913 | } | 913 | } |
914 | 914 | ||
915 | static inline struct bio *pscsi_get_bio(int sg_num) | 915 | static inline struct bio *pscsi_get_bio(int sg_num) |
916 | { | 916 | { |
917 | struct bio *bio; | 917 | struct bio *bio; |
918 | /* | 918 | /* |
919 | * Use bio_malloc() following the comment in for bio -> struct request | 919 | * Use bio_malloc() following the comment in for bio -> struct request |
920 | * in block/blk-core.c:blk_make_request() | 920 | * in block/blk-core.c:blk_make_request() |
921 | */ | 921 | */ |
922 | bio = bio_kmalloc(GFP_KERNEL, sg_num); | 922 | bio = bio_kmalloc(GFP_KERNEL, sg_num); |
923 | if (!bio) { | 923 | if (!bio) { |
924 | pr_err("PSCSI: bio_kmalloc() failed\n"); | 924 | pr_err("PSCSI: bio_kmalloc() failed\n"); |
925 | return NULL; | 925 | return NULL; |
926 | } | 926 | } |
927 | bio->bi_end_io = pscsi_bi_endio; | 927 | bio->bi_end_io = pscsi_bi_endio; |
928 | 928 | ||
929 | return bio; | 929 | return bio; |
930 | } | 930 | } |
931 | 931 | ||
932 | static int pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, | 932 | static int pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, |
933 | u32 sgl_nents, enum dma_data_direction data_direction, | 933 | u32 sgl_nents, enum dma_data_direction data_direction, |
934 | struct bio **hbio) | 934 | struct bio **hbio) |
935 | { | 935 | { |
936 | struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; | 936 | struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; |
937 | struct bio *bio = NULL, *tbio = NULL; | 937 | struct bio *bio = NULL, *tbio = NULL; |
938 | struct page *page; | 938 | struct page *page; |
939 | struct scatterlist *sg; | 939 | struct scatterlist *sg; |
940 | u32 data_len = cmd->data_length, i, len, bytes, off; | 940 | u32 data_len = cmd->data_length, i, len, bytes, off; |
941 | int nr_pages = (cmd->data_length + sgl[0].offset + | 941 | int nr_pages = (cmd->data_length + sgl[0].offset + |
942 | PAGE_SIZE - 1) >> PAGE_SHIFT; | 942 | PAGE_SIZE - 1) >> PAGE_SHIFT; |
943 | int nr_vecs = 0, rc; | 943 | int nr_vecs = 0, rc; |
944 | int rw = (data_direction == DMA_TO_DEVICE); | 944 | int rw = (data_direction == DMA_TO_DEVICE); |
945 | 945 | ||
946 | *hbio = NULL; | 946 | *hbio = NULL; |
947 | 947 | ||
948 | pr_debug("PSCSI: nr_pages: %d\n", nr_pages); | 948 | pr_debug("PSCSI: nr_pages: %d\n", nr_pages); |
949 | 949 | ||
950 | for_each_sg(sgl, sg, sgl_nents, i) { | 950 | for_each_sg(sgl, sg, sgl_nents, i) { |
951 | page = sg_page(sg); | 951 | page = sg_page(sg); |
952 | off = sg->offset; | 952 | off = sg->offset; |
953 | len = sg->length; | 953 | len = sg->length; |
954 | 954 | ||
955 | pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i, | 955 | pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i, |
956 | page, len, off); | 956 | page, len, off); |
957 | 957 | ||
958 | while (len > 0 && data_len > 0) { | 958 | while (len > 0 && data_len > 0) { |
959 | bytes = min_t(unsigned int, len, PAGE_SIZE - off); | 959 | bytes = min_t(unsigned int, len, PAGE_SIZE - off); |
960 | bytes = min(bytes, data_len); | 960 | bytes = min(bytes, data_len); |
961 | 961 | ||
962 | if (!bio) { | 962 | if (!bio) { |
963 | nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); | 963 | nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); |
964 | nr_pages -= nr_vecs; | 964 | nr_pages -= nr_vecs; |
965 | /* | 965 | /* |
966 | * Calls bio_kmalloc() and sets bio->bi_end_io() | 966 | * Calls bio_kmalloc() and sets bio->bi_end_io() |
967 | */ | 967 | */ |
968 | bio = pscsi_get_bio(nr_vecs); | 968 | bio = pscsi_get_bio(nr_vecs); |
969 | if (!bio) | 969 | if (!bio) |
970 | goto fail; | 970 | goto fail; |
971 | 971 | ||
972 | if (rw) | 972 | if (rw) |
973 | bio->bi_rw |= REQ_WRITE; | 973 | bio->bi_rw |= REQ_WRITE; |
974 | 974 | ||
975 | pr_debug("PSCSI: Allocated bio: %p," | 975 | pr_debug("PSCSI: Allocated bio: %p," |
976 | " dir: %s nr_vecs: %d\n", bio, | 976 | " dir: %s nr_vecs: %d\n", bio, |
977 | (rw) ? "rw" : "r", nr_vecs); | 977 | (rw) ? "rw" : "r", nr_vecs); |
978 | /* | 978 | /* |
979 | * Set *hbio pointer to handle the case: | 979 | * Set *hbio pointer to handle the case: |
980 | * nr_pages > BIO_MAX_PAGES, where additional | 980 | * nr_pages > BIO_MAX_PAGES, where additional |
981 | * bios need to be added to complete a given | 981 | * bios need to be added to complete a given |
982 | * command. | 982 | * command. |
983 | */ | 983 | */ |
984 | if (!*hbio) | 984 | if (!*hbio) |
985 | *hbio = tbio = bio; | 985 | *hbio = tbio = bio; |
986 | else | 986 | else |
987 | tbio = tbio->bi_next = bio; | 987 | tbio = tbio->bi_next = bio; |
988 | } | 988 | } |
989 | 989 | ||
990 | pr_debug("PSCSI: Calling bio_add_pc_page() i: %d" | 990 | pr_debug("PSCSI: Calling bio_add_pc_page() i: %d" |
991 | " bio: %p page: %p len: %d off: %d\n", i, bio, | 991 | " bio: %p page: %p len: %d off: %d\n", i, bio, |
992 | page, len, off); | 992 | page, len, off); |
993 | 993 | ||
994 | rc = bio_add_pc_page(pdv->pdv_sd->request_queue, | 994 | rc = bio_add_pc_page(pdv->pdv_sd->request_queue, |
995 | bio, page, bytes, off); | 995 | bio, page, bytes, off); |
996 | if (rc != bytes) | 996 | if (rc != bytes) |
997 | goto fail; | 997 | goto fail; |
998 | 998 | ||
999 | pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n", | 999 | pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n", |
1000 | bio->bi_vcnt, nr_vecs); | 1000 | bio->bi_vcnt, nr_vecs); |
1001 | 1001 | ||
1002 | if (bio->bi_vcnt > nr_vecs) { | 1002 | if (bio->bi_vcnt > nr_vecs) { |
1003 | pr_debug("PSCSI: Reached bio->bi_vcnt max:" | 1003 | pr_debug("PSCSI: Reached bio->bi_vcnt max:" |
1004 | " %d i: %d bio: %p, allocating another" | 1004 | " %d i: %d bio: %p, allocating another" |
1005 | " bio\n", bio->bi_vcnt, i, bio); | 1005 | " bio\n", bio->bi_vcnt, i, bio); |
1006 | /* | 1006 | /* |
1007 | * Clear the pointer so that another bio will | 1007 | * Clear the pointer so that another bio will |
1008 | * be allocated with pscsi_get_bio() above, the | 1008 | * be allocated with pscsi_get_bio() above, the |
1009 | * current bio has already been set *tbio and | 1009 | * current bio has already been set *tbio and |
1010 | * bio->bi_next. | 1010 | * bio->bi_next. |
1011 | */ | 1011 | */ |
1012 | bio = NULL; | 1012 | bio = NULL; |
1013 | } | 1013 | } |
1014 | 1014 | ||
1015 | page++; | 1015 | page++; |
1016 | len -= bytes; | 1016 | len -= bytes; |
1017 | data_len -= bytes; | 1017 | data_len -= bytes; |
1018 | off = 0; | 1018 | off = 0; |
1019 | } | 1019 | } |
1020 | } | 1020 | } |
1021 | 1021 | ||
1022 | return sgl_nents; | 1022 | return sgl_nents; |
1023 | fail: | 1023 | fail: |
1024 | while (*hbio) { | 1024 | while (*hbio) { |
1025 | bio = *hbio; | 1025 | bio = *hbio; |
1026 | *hbio = (*hbio)->bi_next; | 1026 | *hbio = (*hbio)->bi_next; |
1027 | bio->bi_next = NULL; | 1027 | bio->bi_next = NULL; |
1028 | bio_endio(bio, 0); /* XXX: should be error */ | 1028 | bio_endio(bio, 0); /* XXX: should be error */ |
1029 | } | 1029 | } |
1030 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 1030 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
1031 | return -ENOMEM; | 1031 | return -ENOMEM; |
1032 | } | 1032 | } |
1033 | 1033 | ||
1034 | /* | 1034 | /* |
1035 | * Clear a lun set in the cdb if the initiator talking to use spoke | 1035 | * Clear a lun set in the cdb if the initiator talking to use spoke |
1036 | * and old standards version, as we can't assume the underlying device | 1036 | * and old standards version, as we can't assume the underlying device |
1037 | * won't choke up on it. | 1037 | * won't choke up on it. |
1038 | */ | 1038 | */ |
1039 | static inline void pscsi_clear_cdb_lun(unsigned char *cdb) | 1039 | static inline void pscsi_clear_cdb_lun(unsigned char *cdb) |
1040 | { | 1040 | { |
1041 | switch (cdb[0]) { | 1041 | switch (cdb[0]) { |
1042 | case READ_10: /* SBC - RDProtect */ | 1042 | case READ_10: /* SBC - RDProtect */ |
1043 | case READ_12: /* SBC - RDProtect */ | 1043 | case READ_12: /* SBC - RDProtect */ |
1044 | case READ_16: /* SBC - RDProtect */ | 1044 | case READ_16: /* SBC - RDProtect */ |
1045 | case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ | 1045 | case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ |
1046 | case VERIFY: /* SBC - VRProtect */ | 1046 | case VERIFY: /* SBC - VRProtect */ |
1047 | case VERIFY_16: /* SBC - VRProtect */ | 1047 | case VERIFY_16: /* SBC - VRProtect */ |
1048 | case WRITE_VERIFY: /* SBC - VRProtect */ | 1048 | case WRITE_VERIFY: /* SBC - VRProtect */ |
1049 | case WRITE_VERIFY_12: /* SBC - VRProtect */ | 1049 | case WRITE_VERIFY_12: /* SBC - VRProtect */ |
1050 | case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ | 1050 | case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ |
1051 | break; | 1051 | break; |
1052 | default: | 1052 | default: |
1053 | cdb[1] &= 0x1f; /* clear logical unit number */ | 1053 | cdb[1] &= 0x1f; /* clear logical unit number */ |
1054 | break; | 1054 | break; |
1055 | } | 1055 | } |
1056 | } | 1056 | } |
1057 | 1057 | ||
1058 | static int pscsi_parse_cdb(struct se_cmd *cmd) | 1058 | static int pscsi_parse_cdb(struct se_cmd *cmd) |
1059 | { | 1059 | { |
1060 | unsigned char *cdb = cmd->t_task_cdb; | 1060 | unsigned char *cdb = cmd->t_task_cdb; |
1061 | unsigned int dummy_size; | 1061 | unsigned int dummy_size; |
1062 | int ret; | 1062 | int ret; |
1063 | 1063 | ||
1064 | if (cmd->se_cmd_flags & SCF_BIDI) { | 1064 | if (cmd->se_cmd_flags & SCF_BIDI) { |
1065 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 1065 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
1066 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | 1066 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; |
1067 | return -EINVAL; | 1067 | return -EINVAL; |
1068 | } | 1068 | } |
1069 | 1069 | ||
1070 | pscsi_clear_cdb_lun(cdb); | 1070 | pscsi_clear_cdb_lun(cdb); |
1071 | 1071 | ||
1072 | /* | 1072 | /* |
1073 | * For REPORT LUNS we always need to emulate the response, for everything | 1073 | * For REPORT LUNS we always need to emulate the response, for everything |
1074 | * else the default for pSCSI is to pass the command to the underlying | 1074 | * else the default for pSCSI is to pass the command to the underlying |
1075 | * LLD / physical hardware. | 1075 | * LLD / physical hardware. |
1076 | */ | 1076 | */ |
1077 | switch (cdb[0]) { | 1077 | switch (cdb[0]) { |
1078 | case REPORT_LUNS: | 1078 | case REPORT_LUNS: |
1079 | ret = spc_parse_cdb(cmd, &dummy_size); | 1079 | ret = spc_parse_cdb(cmd, &dummy_size); |
1080 | if (ret) | 1080 | if (ret) |
1081 | return ret; | 1081 | return ret; |
1082 | break; | 1082 | break; |
1083 | case READ_6: | 1083 | case READ_6: |
1084 | case READ_10: | 1084 | case READ_10: |
1085 | case READ_12: | 1085 | case READ_12: |
1086 | case READ_16: | 1086 | case READ_16: |
1087 | case WRITE_6: | 1087 | case WRITE_6: |
1088 | case WRITE_10: | 1088 | case WRITE_10: |
1089 | case WRITE_12: | 1089 | case WRITE_12: |
1090 | case WRITE_16: | 1090 | case WRITE_16: |
1091 | case WRITE_VERIFY: | 1091 | case WRITE_VERIFY: |
1092 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; | 1092 | cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; |
1093 | /* FALLTHROUGH*/ | 1093 | /* FALLTHROUGH*/ |
1094 | default: | 1094 | default: |
1095 | cmd->execute_cmd = pscsi_execute_cmd; | 1095 | cmd->execute_cmd = pscsi_execute_cmd; |
1096 | break; | 1096 | break; |
1097 | } | 1097 | } |
1098 | 1098 | ||
1099 | return 0; | 1099 | return 0; |
1100 | } | 1100 | } |
1101 | 1101 | ||
1102 | static int pscsi_execute_cmd(struct se_cmd *cmd) | 1102 | static int pscsi_execute_cmd(struct se_cmd *cmd) |
1103 | { | 1103 | { |
1104 | struct scatterlist *sgl = cmd->t_data_sg; | 1104 | struct scatterlist *sgl = cmd->t_data_sg; |
1105 | u32 sgl_nents = cmd->t_data_nents; | 1105 | u32 sgl_nents = cmd->t_data_nents; |
1106 | enum dma_data_direction data_direction = cmd->data_direction; | 1106 | enum dma_data_direction data_direction = cmd->data_direction; |
1107 | struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; | 1107 | struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; |
1108 | struct pscsi_plugin_task *pt; | 1108 | struct pscsi_plugin_task *pt; |
1109 | struct request *req; | 1109 | struct request *req; |
1110 | struct bio *hbio; | 1110 | struct bio *hbio; |
1111 | int ret; | 1111 | int ret; |
1112 | 1112 | ||
1113 | /* | 1113 | /* |
1114 | * Dynamically alloc cdb space, since it may be larger than | 1114 | * Dynamically alloc cdb space, since it may be larger than |
1115 | * TCM_MAX_COMMAND_SIZE | 1115 | * TCM_MAX_COMMAND_SIZE |
1116 | */ | 1116 | */ |
1117 | pt = kzalloc(sizeof(*pt) + scsi_command_size(cmd->t_task_cdb), GFP_KERNEL); | 1117 | pt = kzalloc(sizeof(*pt) + scsi_command_size(cmd->t_task_cdb), GFP_KERNEL); |
1118 | if (!pt) { | 1118 | if (!pt) { |
1119 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 1119 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
1120 | return -ENOMEM; | 1120 | return -ENOMEM; |
1121 | } | 1121 | } |
1122 | cmd->priv = pt; | 1122 | cmd->priv = pt; |
1123 | 1123 | ||
1124 | memcpy(pt->pscsi_cdb, cmd->t_task_cdb, | 1124 | memcpy(pt->pscsi_cdb, cmd->t_task_cdb, |
1125 | scsi_command_size(cmd->t_task_cdb)); | 1125 | scsi_command_size(cmd->t_task_cdb)); |
1126 | 1126 | ||
1127 | if (!sgl) { | 1127 | if (!sgl) { |
1128 | req = blk_get_request(pdv->pdv_sd->request_queue, | 1128 | req = blk_get_request(pdv->pdv_sd->request_queue, |
1129 | (data_direction == DMA_TO_DEVICE), | 1129 | (data_direction == DMA_TO_DEVICE), |
1130 | GFP_KERNEL); | 1130 | GFP_KERNEL); |
1131 | if (!req || IS_ERR(req)) { | 1131 | if (!req || IS_ERR(req)) { |
1132 | pr_err("PSCSI: blk_get_request() failed: %ld\n", | 1132 | pr_err("PSCSI: blk_get_request() failed: %ld\n", |
1133 | req ? IS_ERR(req) : -ENOMEM); | 1133 | req ? IS_ERR(req) : -ENOMEM); |
1134 | cmd->scsi_sense_reason = | 1134 | cmd->scsi_sense_reason = |
1135 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 1135 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
1136 | goto fail; | 1136 | goto fail; |
1137 | } | 1137 | } |
1138 | } else { | 1138 | } else { |
1139 | BUG_ON(!cmd->data_length); | 1139 | BUG_ON(!cmd->data_length); |
1140 | 1140 | ||
1141 | ret = pscsi_map_sg(cmd, sgl, sgl_nents, data_direction, &hbio); | 1141 | ret = pscsi_map_sg(cmd, sgl, sgl_nents, data_direction, &hbio); |
1142 | if (ret < 0) { | 1142 | if (ret < 0) { |
1143 | cmd->scsi_sense_reason = | 1143 | cmd->scsi_sense_reason = |
1144 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 1144 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
1145 | goto fail; | 1145 | goto fail; |
1146 | } | 1146 | } |
1147 | 1147 | ||
1148 | req = blk_make_request(pdv->pdv_sd->request_queue, hbio, | 1148 | req = blk_make_request(pdv->pdv_sd->request_queue, hbio, |
1149 | GFP_KERNEL); | 1149 | GFP_KERNEL); |
1150 | if (IS_ERR(req)) { | 1150 | if (IS_ERR(req)) { |
1151 | pr_err("pSCSI: blk_make_request() failed\n"); | 1151 | pr_err("pSCSI: blk_make_request() failed\n"); |
1152 | goto fail_free_bio; | 1152 | goto fail_free_bio; |
1153 | } | 1153 | } |
1154 | } | 1154 | } |
1155 | 1155 | ||
1156 | req->cmd_type = REQ_TYPE_BLOCK_PC; | 1156 | req->cmd_type = REQ_TYPE_BLOCK_PC; |
1157 | req->end_io = pscsi_req_done; | 1157 | req->end_io = pscsi_req_done; |
1158 | req->end_io_data = cmd; | 1158 | req->end_io_data = cmd; |
1159 | req->cmd_len = scsi_command_size(pt->pscsi_cdb); | 1159 | req->cmd_len = scsi_command_size(pt->pscsi_cdb); |
1160 | req->cmd = &pt->pscsi_cdb[0]; | 1160 | req->cmd = &pt->pscsi_cdb[0]; |
1161 | req->sense = &pt->pscsi_sense[0]; | 1161 | req->sense = &pt->pscsi_sense[0]; |
1162 | req->sense_len = 0; | 1162 | req->sense_len = 0; |
1163 | if (pdv->pdv_sd->type == TYPE_DISK) | 1163 | if (pdv->pdv_sd->type == TYPE_DISK) |
1164 | req->timeout = PS_TIMEOUT_DISK; | 1164 | req->timeout = PS_TIMEOUT_DISK; |
1165 | else | 1165 | else |
1166 | req->timeout = PS_TIMEOUT_OTHER; | 1166 | req->timeout = PS_TIMEOUT_OTHER; |
1167 | req->retries = PS_RETRY; | 1167 | req->retries = PS_RETRY; |
1168 | 1168 | ||
1169 | blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req, | 1169 | blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req, |
1170 | (cmd->sam_task_attr == MSG_HEAD_TAG), | 1170 | (cmd->sam_task_attr == MSG_HEAD_TAG), |
1171 | pscsi_req_done); | 1171 | pscsi_req_done); |
1172 | 1172 | ||
1173 | return 0; | 1173 | return 0; |
1174 | 1174 | ||
1175 | fail_free_bio: | 1175 | fail_free_bio: |
1176 | while (hbio) { | 1176 | while (hbio) { |
1177 | struct bio *bio = hbio; | 1177 | struct bio *bio = hbio; |
1178 | hbio = hbio->bi_next; | 1178 | hbio = hbio->bi_next; |
1179 | bio->bi_next = NULL; | 1179 | bio->bi_next = NULL; |
1180 | bio_endio(bio, 0); /* XXX: should be error */ | 1180 | bio_endio(bio, 0); /* XXX: should be error */ |
1181 | } | 1181 | } |
1182 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 1182 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
1183 | fail: | 1183 | fail: |
1184 | kfree(pt); | 1184 | kfree(pt); |
1185 | return -ENOMEM; | 1185 | return -ENOMEM; |
1186 | } | 1186 | } |
1187 | 1187 | ||
1188 | /* pscsi_get_device_rev(): | 1188 | /* pscsi_get_device_rev(): |
1189 | * | 1189 | * |
1190 | * | 1190 | * |
1191 | */ | 1191 | */ |
1192 | static u32 pscsi_get_device_rev(struct se_device *dev) | 1192 | static u32 pscsi_get_device_rev(struct se_device *dev) |
1193 | { | 1193 | { |
1194 | struct pscsi_dev_virt *pdv = dev->dev_ptr; | 1194 | struct pscsi_dev_virt *pdv = dev->dev_ptr; |
1195 | struct scsi_device *sd = pdv->pdv_sd; | 1195 | struct scsi_device *sd = pdv->pdv_sd; |
1196 | 1196 | ||
1197 | return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1; | 1197 | return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1; |
1198 | } | 1198 | } |
1199 | 1199 | ||
1200 | /* pscsi_get_device_type(): | 1200 | /* pscsi_get_device_type(): |
1201 | * | 1201 | * |
1202 | * | 1202 | * |
1203 | */ | 1203 | */ |
1204 | static u32 pscsi_get_device_type(struct se_device *dev) | 1204 | static u32 pscsi_get_device_type(struct se_device *dev) |
1205 | { | 1205 | { |
1206 | struct pscsi_dev_virt *pdv = dev->dev_ptr; | 1206 | struct pscsi_dev_virt *pdv = dev->dev_ptr; |
1207 | struct scsi_device *sd = pdv->pdv_sd; | 1207 | struct scsi_device *sd = pdv->pdv_sd; |
1208 | 1208 | ||
1209 | return sd->type; | 1209 | return sd->type; |
1210 | } | 1210 | } |
1211 | 1211 | ||
1212 | static sector_t pscsi_get_blocks(struct se_device *dev) | 1212 | static sector_t pscsi_get_blocks(struct se_device *dev) |
1213 | { | 1213 | { |
1214 | struct pscsi_dev_virt *pdv = dev->dev_ptr; | 1214 | struct pscsi_dev_virt *pdv = dev->dev_ptr; |
1215 | 1215 | ||
1216 | if (pdv->pdv_bd && pdv->pdv_bd->bd_part) | 1216 | if (pdv->pdv_bd && pdv->pdv_bd->bd_part) |
1217 | return pdv->pdv_bd->bd_part->nr_sects; | 1217 | return pdv->pdv_bd->bd_part->nr_sects; |
1218 | 1218 | ||
1219 | dump_stack(); | 1219 | dump_stack(); |
1220 | return 0; | 1220 | return 0; |
1221 | } | 1221 | } |
1222 | 1222 | ||
1223 | static void pscsi_req_done(struct request *req, int uptodate) | 1223 | static void pscsi_req_done(struct request *req, int uptodate) |
1224 | { | 1224 | { |
1225 | struct se_cmd *cmd = req->end_io_data; | 1225 | struct se_cmd *cmd = req->end_io_data; |
1226 | struct pscsi_plugin_task *pt = cmd->priv; | 1226 | struct pscsi_plugin_task *pt = cmd->priv; |
1227 | 1227 | ||
1228 | pt->pscsi_result = req->errors; | 1228 | pt->pscsi_result = req->errors; |
1229 | pt->pscsi_resid = req->resid_len; | 1229 | pt->pscsi_resid = req->resid_len; |
1230 | 1230 | ||
1231 | cmd->scsi_status = status_byte(pt->pscsi_result) << 1; | 1231 | cmd->scsi_status = status_byte(pt->pscsi_result) << 1; |
1232 | if (cmd->scsi_status) { | 1232 | if (cmd->scsi_status) { |
1233 | pr_debug("PSCSI Status Byte exception at cmd: %p CDB:" | 1233 | pr_debug("PSCSI Status Byte exception at cmd: %p CDB:" |
1234 | " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0], | 1234 | " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0], |
1235 | pt->pscsi_result); | 1235 | pt->pscsi_result); |
1236 | } | 1236 | } |
1237 | 1237 | ||
1238 | switch (host_byte(pt->pscsi_result)) { | 1238 | switch (host_byte(pt->pscsi_result)) { |
1239 | case DID_OK: | 1239 | case DID_OK: |
1240 | target_complete_cmd(cmd, cmd->scsi_status); | 1240 | target_complete_cmd(cmd, cmd->scsi_status); |
1241 | break; | 1241 | break; |
1242 | default: | 1242 | default: |
1243 | pr_debug("PSCSI Host Byte exception at cmd: %p CDB:" | 1243 | pr_debug("PSCSI Host Byte exception at cmd: %p CDB:" |
1244 | " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0], | 1244 | " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0], |
1245 | pt->pscsi_result); | 1245 | pt->pscsi_result); |
1246 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | 1246 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; |
1247 | target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); | 1247 | target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); |
1248 | break; | 1248 | break; |
1249 | } | 1249 | } |
1250 | 1250 | ||
1251 | __blk_put_request(req->q, req); | 1251 | __blk_put_request(req->q, req); |
1252 | kfree(pt); | 1252 | kfree(pt); |
1253 | } | 1253 | } |
1254 | 1254 | ||
1255 | static struct se_subsystem_api pscsi_template = { | 1255 | static struct se_subsystem_api pscsi_template = { |
1256 | .name = "pscsi", | 1256 | .name = "pscsi", |
1257 | .owner = THIS_MODULE, | 1257 | .owner = THIS_MODULE, |
1258 | .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, | 1258 | .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, |
1259 | .attach_hba = pscsi_attach_hba, | 1259 | .attach_hba = pscsi_attach_hba, |
1260 | .detach_hba = pscsi_detach_hba, | 1260 | .detach_hba = pscsi_detach_hba, |
1261 | .pmode_enable_hba = pscsi_pmode_enable_hba, | 1261 | .pmode_enable_hba = pscsi_pmode_enable_hba, |
1262 | .allocate_virtdevice = pscsi_allocate_virtdevice, | 1262 | .allocate_virtdevice = pscsi_allocate_virtdevice, |
1263 | .create_virtdevice = pscsi_create_virtdevice, | 1263 | .create_virtdevice = pscsi_create_virtdevice, |
1264 | .free_device = pscsi_free_device, | 1264 | .free_device = pscsi_free_device, |
1265 | .transport_complete = pscsi_transport_complete, | 1265 | .transport_complete = pscsi_transport_complete, |
1266 | .parse_cdb = pscsi_parse_cdb, | 1266 | .parse_cdb = pscsi_parse_cdb, |
1267 | .check_configfs_dev_params = pscsi_check_configfs_dev_params, | 1267 | .check_configfs_dev_params = pscsi_check_configfs_dev_params, |
1268 | .set_configfs_dev_params = pscsi_set_configfs_dev_params, | 1268 | .set_configfs_dev_params = pscsi_set_configfs_dev_params, |
1269 | .show_configfs_dev_params = pscsi_show_configfs_dev_params, | 1269 | .show_configfs_dev_params = pscsi_show_configfs_dev_params, |
1270 | .get_device_rev = pscsi_get_device_rev, | 1270 | .get_device_rev = pscsi_get_device_rev, |
1271 | .get_device_type = pscsi_get_device_type, | 1271 | .get_device_type = pscsi_get_device_type, |
1272 | .get_blocks = pscsi_get_blocks, | 1272 | .get_blocks = pscsi_get_blocks, |
1273 | }; | 1273 | }; |
1274 | 1274 | ||
1275 | static int __init pscsi_module_init(void) | 1275 | static int __init pscsi_module_init(void) |
1276 | { | 1276 | { |
1277 | return transport_subsystem_register(&pscsi_template); | 1277 | return transport_subsystem_register(&pscsi_template); |
1278 | } | 1278 | } |
1279 | 1279 | ||
1280 | static void pscsi_module_exit(void) | 1280 | static void pscsi_module_exit(void) |
1281 | { | 1281 | { |
1282 | transport_subsystem_release(&pscsi_template); | 1282 | transport_subsystem_release(&pscsi_template); |
1283 | } | 1283 | } |
1284 | 1284 | ||
1285 | MODULE_DESCRIPTION("TCM PSCSI subsystem plugin"); | 1285 | MODULE_DESCRIPTION("TCM PSCSI subsystem plugin"); |
1286 | MODULE_AUTHOR("nab@Linux-iSCSI.org"); | 1286 | MODULE_AUTHOR("nab@Linux-iSCSI.org"); |
1287 | MODULE_LICENSE("GPL"); | 1287 | MODULE_LICENSE("GPL"); |
1288 | 1288 |
drivers/target/target_core_transport.c
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * Filename: target_core_transport.c | 2 | * Filename: target_core_transport.c |
3 | * | 3 | * |
4 | * This file contains the Generic Target Engine Core. | 4 | * This file contains the Generic Target Engine Core. |
5 | * | 5 | * |
6 | * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. | 6 | * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. |
7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | 7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. |
8 | * Copyright (c) 2007-2010 Rising Tide Systems | 8 | * Copyright (c) 2007-2010 Rising Tide Systems |
9 | * Copyright (c) 2008-2010 Linux-iSCSI.org | 9 | * Copyright (c) 2008-2010 Linux-iSCSI.org |
10 | * | 10 | * |
11 | * Nicholas A. Bellinger <nab@kernel.org> | 11 | * Nicholas A. Bellinger <nab@kernel.org> |
12 | * | 12 | * |
13 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
14 | * it under the terms of the GNU General Public License as published by | 14 | * it under the terms of the GNU General Public License as published by |
15 | * the Free Software Foundation; either version 2 of the License, or | 15 | * the Free Software Foundation; either version 2 of the License, or |
16 | * (at your option) any later version. | 16 | * (at your option) any later version. |
17 | * | 17 | * |
18 | * This program is distributed in the hope that it will be useful, | 18 | * This program is distributed in the hope that it will be useful, |
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
21 | * GNU General Public License for more details. | 21 | * GNU General Public License for more details. |
22 | * | 22 | * |
23 | * You should have received a copy of the GNU General Public License | 23 | * You should have received a copy of the GNU General Public License |
24 | * along with this program; if not, write to the Free Software | 24 | * along with this program; if not, write to the Free Software |
25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
26 | * | 26 | * |
27 | ******************************************************************************/ | 27 | ******************************************************************************/ |
28 | 28 | ||
29 | #include <linux/net.h> | 29 | #include <linux/net.h> |
30 | #include <linux/delay.h> | 30 | #include <linux/delay.h> |
31 | #include <linux/string.h> | 31 | #include <linux/string.h> |
32 | #include <linux/timer.h> | 32 | #include <linux/timer.h> |
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | #include <linux/blkdev.h> | 34 | #include <linux/blkdev.h> |
35 | #include <linux/spinlock.h> | 35 | #include <linux/spinlock.h> |
36 | #include <linux/kthread.h> | 36 | #include <linux/kthread.h> |
37 | #include <linux/in.h> | 37 | #include <linux/in.h> |
38 | #include <linux/cdrom.h> | 38 | #include <linux/cdrom.h> |
39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include <linux/ratelimit.h> | 40 | #include <linux/ratelimit.h> |
41 | #include <asm/unaligned.h> | 41 | #include <asm/unaligned.h> |
42 | #include <net/sock.h> | 42 | #include <net/sock.h> |
43 | #include <net/tcp.h> | 43 | #include <net/tcp.h> |
44 | #include <scsi/scsi.h> | 44 | #include <scsi/scsi.h> |
45 | #include <scsi/scsi_cmnd.h> | 45 | #include <scsi/scsi_cmnd.h> |
46 | #include <scsi/scsi_tcq.h> | 46 | #include <scsi/scsi_tcq.h> |
47 | 47 | ||
48 | #include <target/target_core_base.h> | 48 | #include <target/target_core_base.h> |
49 | #include <target/target_core_backend.h> | 49 | #include <target/target_core_backend.h> |
50 | #include <target/target_core_fabric.h> | 50 | #include <target/target_core_fabric.h> |
51 | #include <target/target_core_configfs.h> | 51 | #include <target/target_core_configfs.h> |
52 | 52 | ||
53 | #include "target_core_internal.h" | 53 | #include "target_core_internal.h" |
54 | #include "target_core_alua.h" | 54 | #include "target_core_alua.h" |
55 | #include "target_core_pr.h" | 55 | #include "target_core_pr.h" |
56 | #include "target_core_ua.h" | 56 | #include "target_core_ua.h" |
57 | 57 | ||
58 | static int sub_api_initialized; | 58 | static int sub_api_initialized; |
59 | 59 | ||
60 | static struct workqueue_struct *target_completion_wq; | 60 | static struct workqueue_struct *target_completion_wq; |
61 | static struct kmem_cache *se_sess_cache; | 61 | static struct kmem_cache *se_sess_cache; |
62 | struct kmem_cache *se_ua_cache; | 62 | struct kmem_cache *se_ua_cache; |
63 | struct kmem_cache *t10_pr_reg_cache; | 63 | struct kmem_cache *t10_pr_reg_cache; |
64 | struct kmem_cache *t10_alua_lu_gp_cache; | 64 | struct kmem_cache *t10_alua_lu_gp_cache; |
65 | struct kmem_cache *t10_alua_lu_gp_mem_cache; | 65 | struct kmem_cache *t10_alua_lu_gp_mem_cache; |
66 | struct kmem_cache *t10_alua_tg_pt_gp_cache; | 66 | struct kmem_cache *t10_alua_tg_pt_gp_cache; |
67 | struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; | 67 | struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; |
68 | 68 | ||
69 | static void transport_complete_task_attr(struct se_cmd *cmd); | 69 | static void transport_complete_task_attr(struct se_cmd *cmd); |
70 | static void transport_handle_queue_full(struct se_cmd *cmd, | 70 | static void transport_handle_queue_full(struct se_cmd *cmd, |
71 | struct se_device *dev); | 71 | struct se_device *dev); |
72 | static int transport_generic_get_mem(struct se_cmd *cmd); | 72 | static int transport_generic_get_mem(struct se_cmd *cmd); |
73 | static int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool); | 73 | static int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool); |
74 | static void transport_put_cmd(struct se_cmd *cmd); | 74 | static void transport_put_cmd(struct se_cmd *cmd); |
75 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); | 75 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); |
76 | static void target_complete_ok_work(struct work_struct *work); | 76 | static void target_complete_ok_work(struct work_struct *work); |
77 | 77 | ||
78 | int init_se_kmem_caches(void) | 78 | int init_se_kmem_caches(void) |
79 | { | 79 | { |
80 | se_sess_cache = kmem_cache_create("se_sess_cache", | 80 | se_sess_cache = kmem_cache_create("se_sess_cache", |
81 | sizeof(struct se_session), __alignof__(struct se_session), | 81 | sizeof(struct se_session), __alignof__(struct se_session), |
82 | 0, NULL); | 82 | 0, NULL); |
83 | if (!se_sess_cache) { | 83 | if (!se_sess_cache) { |
84 | pr_err("kmem_cache_create() for struct se_session" | 84 | pr_err("kmem_cache_create() for struct se_session" |
85 | " failed\n"); | 85 | " failed\n"); |
86 | goto out; | 86 | goto out; |
87 | } | 87 | } |
88 | se_ua_cache = kmem_cache_create("se_ua_cache", | 88 | se_ua_cache = kmem_cache_create("se_ua_cache", |
89 | sizeof(struct se_ua), __alignof__(struct se_ua), | 89 | sizeof(struct se_ua), __alignof__(struct se_ua), |
90 | 0, NULL); | 90 | 0, NULL); |
91 | if (!se_ua_cache) { | 91 | if (!se_ua_cache) { |
92 | pr_err("kmem_cache_create() for struct se_ua failed\n"); | 92 | pr_err("kmem_cache_create() for struct se_ua failed\n"); |
93 | goto out_free_sess_cache; | 93 | goto out_free_sess_cache; |
94 | } | 94 | } |
95 | t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", | 95 | t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", |
96 | sizeof(struct t10_pr_registration), | 96 | sizeof(struct t10_pr_registration), |
97 | __alignof__(struct t10_pr_registration), 0, NULL); | 97 | __alignof__(struct t10_pr_registration), 0, NULL); |
98 | if (!t10_pr_reg_cache) { | 98 | if (!t10_pr_reg_cache) { |
99 | pr_err("kmem_cache_create() for struct t10_pr_registration" | 99 | pr_err("kmem_cache_create() for struct t10_pr_registration" |
100 | " failed\n"); | 100 | " failed\n"); |
101 | goto out_free_ua_cache; | 101 | goto out_free_ua_cache; |
102 | } | 102 | } |
103 | t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", | 103 | t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", |
104 | sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), | 104 | sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), |
105 | 0, NULL); | 105 | 0, NULL); |
106 | if (!t10_alua_lu_gp_cache) { | 106 | if (!t10_alua_lu_gp_cache) { |
107 | pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" | 107 | pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" |
108 | " failed\n"); | 108 | " failed\n"); |
109 | goto out_free_pr_reg_cache; | 109 | goto out_free_pr_reg_cache; |
110 | } | 110 | } |
111 | t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", | 111 | t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", |
112 | sizeof(struct t10_alua_lu_gp_member), | 112 | sizeof(struct t10_alua_lu_gp_member), |
113 | __alignof__(struct t10_alua_lu_gp_member), 0, NULL); | 113 | __alignof__(struct t10_alua_lu_gp_member), 0, NULL); |
114 | if (!t10_alua_lu_gp_mem_cache) { | 114 | if (!t10_alua_lu_gp_mem_cache) { |
115 | pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" | 115 | pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" |
116 | "cache failed\n"); | 116 | "cache failed\n"); |
117 | goto out_free_lu_gp_cache; | 117 | goto out_free_lu_gp_cache; |
118 | } | 118 | } |
119 | t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", | 119 | t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", |
120 | sizeof(struct t10_alua_tg_pt_gp), | 120 | sizeof(struct t10_alua_tg_pt_gp), |
121 | __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); | 121 | __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); |
122 | if (!t10_alua_tg_pt_gp_cache) { | 122 | if (!t10_alua_tg_pt_gp_cache) { |
123 | pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" | 123 | pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" |
124 | "cache failed\n"); | 124 | "cache failed\n"); |
125 | goto out_free_lu_gp_mem_cache; | 125 | goto out_free_lu_gp_mem_cache; |
126 | } | 126 | } |
127 | t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( | 127 | t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( |
128 | "t10_alua_tg_pt_gp_mem_cache", | 128 | "t10_alua_tg_pt_gp_mem_cache", |
129 | sizeof(struct t10_alua_tg_pt_gp_member), | 129 | sizeof(struct t10_alua_tg_pt_gp_member), |
130 | __alignof__(struct t10_alua_tg_pt_gp_member), | 130 | __alignof__(struct t10_alua_tg_pt_gp_member), |
131 | 0, NULL); | 131 | 0, NULL); |
132 | if (!t10_alua_tg_pt_gp_mem_cache) { | 132 | if (!t10_alua_tg_pt_gp_mem_cache) { |
133 | pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" | 133 | pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" |
134 | "mem_t failed\n"); | 134 | "mem_t failed\n"); |
135 | goto out_free_tg_pt_gp_cache; | 135 | goto out_free_tg_pt_gp_cache; |
136 | } | 136 | } |
137 | 137 | ||
138 | target_completion_wq = alloc_workqueue("target_completion", | 138 | target_completion_wq = alloc_workqueue("target_completion", |
139 | WQ_MEM_RECLAIM, 0); | 139 | WQ_MEM_RECLAIM, 0); |
140 | if (!target_completion_wq) | 140 | if (!target_completion_wq) |
141 | goto out_free_tg_pt_gp_mem_cache; | 141 | goto out_free_tg_pt_gp_mem_cache; |
142 | 142 | ||
143 | return 0; | 143 | return 0; |
144 | 144 | ||
145 | out_free_tg_pt_gp_mem_cache: | 145 | out_free_tg_pt_gp_mem_cache: |
146 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | 146 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); |
147 | out_free_tg_pt_gp_cache: | 147 | out_free_tg_pt_gp_cache: |
148 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | 148 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); |
149 | out_free_lu_gp_mem_cache: | 149 | out_free_lu_gp_mem_cache: |
150 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | 150 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); |
151 | out_free_lu_gp_cache: | 151 | out_free_lu_gp_cache: |
152 | kmem_cache_destroy(t10_alua_lu_gp_cache); | 152 | kmem_cache_destroy(t10_alua_lu_gp_cache); |
153 | out_free_pr_reg_cache: | 153 | out_free_pr_reg_cache: |
154 | kmem_cache_destroy(t10_pr_reg_cache); | 154 | kmem_cache_destroy(t10_pr_reg_cache); |
155 | out_free_ua_cache: | 155 | out_free_ua_cache: |
156 | kmem_cache_destroy(se_ua_cache); | 156 | kmem_cache_destroy(se_ua_cache); |
157 | out_free_sess_cache: | 157 | out_free_sess_cache: |
158 | kmem_cache_destroy(se_sess_cache); | 158 | kmem_cache_destroy(se_sess_cache); |
159 | out: | 159 | out: |
160 | return -ENOMEM; | 160 | return -ENOMEM; |
161 | } | 161 | } |
162 | 162 | ||
163 | void release_se_kmem_caches(void) | 163 | void release_se_kmem_caches(void) |
164 | { | 164 | { |
165 | destroy_workqueue(target_completion_wq); | 165 | destroy_workqueue(target_completion_wq); |
166 | kmem_cache_destroy(se_sess_cache); | 166 | kmem_cache_destroy(se_sess_cache); |
167 | kmem_cache_destroy(se_ua_cache); | 167 | kmem_cache_destroy(se_ua_cache); |
168 | kmem_cache_destroy(t10_pr_reg_cache); | 168 | kmem_cache_destroy(t10_pr_reg_cache); |
169 | kmem_cache_destroy(t10_alua_lu_gp_cache); | 169 | kmem_cache_destroy(t10_alua_lu_gp_cache); |
170 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | 170 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); |
171 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | 171 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); |
172 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | 172 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); |
173 | } | 173 | } |
174 | 174 | ||
175 | /* This code ensures unique mib indexes are handed out. */ | 175 | /* This code ensures unique mib indexes are handed out. */ |
176 | static DEFINE_SPINLOCK(scsi_mib_index_lock); | 176 | static DEFINE_SPINLOCK(scsi_mib_index_lock); |
177 | static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; | 177 | static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; |
178 | 178 | ||
179 | /* | 179 | /* |
180 | * Allocate a new row index for the entry type specified | 180 | * Allocate a new row index for the entry type specified |
181 | */ | 181 | */ |
182 | u32 scsi_get_new_index(scsi_index_t type) | 182 | u32 scsi_get_new_index(scsi_index_t type) |
183 | { | 183 | { |
184 | u32 new_index; | 184 | u32 new_index; |
185 | 185 | ||
186 | BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); | 186 | BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); |
187 | 187 | ||
188 | spin_lock(&scsi_mib_index_lock); | 188 | spin_lock(&scsi_mib_index_lock); |
189 | new_index = ++scsi_mib_index[type]; | 189 | new_index = ++scsi_mib_index[type]; |
190 | spin_unlock(&scsi_mib_index_lock); | 190 | spin_unlock(&scsi_mib_index_lock); |
191 | 191 | ||
192 | return new_index; | 192 | return new_index; |
193 | } | 193 | } |
194 | 194 | ||
195 | void transport_subsystem_check_init(void) | 195 | void transport_subsystem_check_init(void) |
196 | { | 196 | { |
197 | int ret; | 197 | int ret; |
198 | 198 | ||
199 | if (sub_api_initialized) | 199 | if (sub_api_initialized) |
200 | return; | 200 | return; |
201 | 201 | ||
202 | ret = request_module("target_core_iblock"); | 202 | ret = request_module("target_core_iblock"); |
203 | if (ret != 0) | 203 | if (ret != 0) |
204 | pr_err("Unable to load target_core_iblock\n"); | 204 | pr_err("Unable to load target_core_iblock\n"); |
205 | 205 | ||
206 | ret = request_module("target_core_file"); | 206 | ret = request_module("target_core_file"); |
207 | if (ret != 0) | 207 | if (ret != 0) |
208 | pr_err("Unable to load target_core_file\n"); | 208 | pr_err("Unable to load target_core_file\n"); |
209 | 209 | ||
210 | ret = request_module("target_core_pscsi"); | 210 | ret = request_module("target_core_pscsi"); |
211 | if (ret != 0) | 211 | if (ret != 0) |
212 | pr_err("Unable to load target_core_pscsi\n"); | 212 | pr_err("Unable to load target_core_pscsi\n"); |
213 | 213 | ||
214 | ret = request_module("target_core_stgt"); | 214 | ret = request_module("target_core_stgt"); |
215 | if (ret != 0) | 215 | if (ret != 0) |
216 | pr_err("Unable to load target_core_stgt\n"); | 216 | pr_err("Unable to load target_core_stgt\n"); |
217 | 217 | ||
218 | sub_api_initialized = 1; | 218 | sub_api_initialized = 1; |
219 | return; | 219 | return; |
220 | } | 220 | } |
221 | 221 | ||
222 | struct se_session *transport_init_session(void) | 222 | struct se_session *transport_init_session(void) |
223 | { | 223 | { |
224 | struct se_session *se_sess; | 224 | struct se_session *se_sess; |
225 | 225 | ||
226 | se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); | 226 | se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); |
227 | if (!se_sess) { | 227 | if (!se_sess) { |
228 | pr_err("Unable to allocate struct se_session from" | 228 | pr_err("Unable to allocate struct se_session from" |
229 | " se_sess_cache\n"); | 229 | " se_sess_cache\n"); |
230 | return ERR_PTR(-ENOMEM); | 230 | return ERR_PTR(-ENOMEM); |
231 | } | 231 | } |
232 | INIT_LIST_HEAD(&se_sess->sess_list); | 232 | INIT_LIST_HEAD(&se_sess->sess_list); |
233 | INIT_LIST_HEAD(&se_sess->sess_acl_list); | 233 | INIT_LIST_HEAD(&se_sess->sess_acl_list); |
234 | INIT_LIST_HEAD(&se_sess->sess_cmd_list); | 234 | INIT_LIST_HEAD(&se_sess->sess_cmd_list); |
235 | spin_lock_init(&se_sess->sess_cmd_lock); | 235 | spin_lock_init(&se_sess->sess_cmd_lock); |
236 | kref_init(&se_sess->sess_kref); | 236 | kref_init(&se_sess->sess_kref); |
237 | 237 | ||
238 | return se_sess; | 238 | return se_sess; |
239 | } | 239 | } |
240 | EXPORT_SYMBOL(transport_init_session); | 240 | EXPORT_SYMBOL(transport_init_session); |
241 | 241 | ||
242 | /* | 242 | /* |
243 | * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. | 243 | * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. |
244 | */ | 244 | */ |
245 | void __transport_register_session( | 245 | void __transport_register_session( |
246 | struct se_portal_group *se_tpg, | 246 | struct se_portal_group *se_tpg, |
247 | struct se_node_acl *se_nacl, | 247 | struct se_node_acl *se_nacl, |
248 | struct se_session *se_sess, | 248 | struct se_session *se_sess, |
249 | void *fabric_sess_ptr) | 249 | void *fabric_sess_ptr) |
250 | { | 250 | { |
251 | unsigned char buf[PR_REG_ISID_LEN]; | 251 | unsigned char buf[PR_REG_ISID_LEN]; |
252 | 252 | ||
253 | se_sess->se_tpg = se_tpg; | 253 | se_sess->se_tpg = se_tpg; |
254 | se_sess->fabric_sess_ptr = fabric_sess_ptr; | 254 | se_sess->fabric_sess_ptr = fabric_sess_ptr; |
255 | /* | 255 | /* |
256 | * Used by struct se_node_acl's under ConfigFS to locate active se_session-t | 256 | * Used by struct se_node_acl's under ConfigFS to locate active se_session-t |
257 | * | 257 | * |
258 | * Only set for struct se_session's that will actually be moving I/O. | 258 | * Only set for struct se_session's that will actually be moving I/O. |
259 | * eg: *NOT* discovery sessions. | 259 | * eg: *NOT* discovery sessions. |
260 | */ | 260 | */ |
261 | if (se_nacl) { | 261 | if (se_nacl) { |
262 | /* | 262 | /* |
263 | * If the fabric module supports an ISID based TransportID, | 263 | * If the fabric module supports an ISID based TransportID, |
264 | * save this value in binary from the fabric I_T Nexus now. | 264 | * save this value in binary from the fabric I_T Nexus now. |
265 | */ | 265 | */ |
266 | if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { | 266 | if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { |
267 | memset(&buf[0], 0, PR_REG_ISID_LEN); | 267 | memset(&buf[0], 0, PR_REG_ISID_LEN); |
268 | se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, | 268 | se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, |
269 | &buf[0], PR_REG_ISID_LEN); | 269 | &buf[0], PR_REG_ISID_LEN); |
270 | se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); | 270 | se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); |
271 | } | 271 | } |
272 | kref_get(&se_nacl->acl_kref); | 272 | kref_get(&se_nacl->acl_kref); |
273 | 273 | ||
274 | spin_lock_irq(&se_nacl->nacl_sess_lock); | 274 | spin_lock_irq(&se_nacl->nacl_sess_lock); |
275 | /* | 275 | /* |
276 | * The se_nacl->nacl_sess pointer will be set to the | 276 | * The se_nacl->nacl_sess pointer will be set to the |
277 | * last active I_T Nexus for each struct se_node_acl. | 277 | * last active I_T Nexus for each struct se_node_acl. |
278 | */ | 278 | */ |
279 | se_nacl->nacl_sess = se_sess; | 279 | se_nacl->nacl_sess = se_sess; |
280 | 280 | ||
281 | list_add_tail(&se_sess->sess_acl_list, | 281 | list_add_tail(&se_sess->sess_acl_list, |
282 | &se_nacl->acl_sess_list); | 282 | &se_nacl->acl_sess_list); |
283 | spin_unlock_irq(&se_nacl->nacl_sess_lock); | 283 | spin_unlock_irq(&se_nacl->nacl_sess_lock); |
284 | } | 284 | } |
285 | list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); | 285 | list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); |
286 | 286 | ||
287 | pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", | 287 | pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", |
288 | se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); | 288 | se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); |
289 | } | 289 | } |
290 | EXPORT_SYMBOL(__transport_register_session); | 290 | EXPORT_SYMBOL(__transport_register_session); |
291 | 291 | ||
292 | void transport_register_session( | 292 | void transport_register_session( |
293 | struct se_portal_group *se_tpg, | 293 | struct se_portal_group *se_tpg, |
294 | struct se_node_acl *se_nacl, | 294 | struct se_node_acl *se_nacl, |
295 | struct se_session *se_sess, | 295 | struct se_session *se_sess, |
296 | void *fabric_sess_ptr) | 296 | void *fabric_sess_ptr) |
297 | { | 297 | { |
298 | unsigned long flags; | 298 | unsigned long flags; |
299 | 299 | ||
300 | spin_lock_irqsave(&se_tpg->session_lock, flags); | 300 | spin_lock_irqsave(&se_tpg->session_lock, flags); |
301 | __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); | 301 | __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); |
302 | spin_unlock_irqrestore(&se_tpg->session_lock, flags); | 302 | spin_unlock_irqrestore(&se_tpg->session_lock, flags); |
303 | } | 303 | } |
304 | EXPORT_SYMBOL(transport_register_session); | 304 | EXPORT_SYMBOL(transport_register_session); |
305 | 305 | ||
306 | void target_release_session(struct kref *kref) | 306 | void target_release_session(struct kref *kref) |
307 | { | 307 | { |
308 | struct se_session *se_sess = container_of(kref, | 308 | struct se_session *se_sess = container_of(kref, |
309 | struct se_session, sess_kref); | 309 | struct se_session, sess_kref); |
310 | struct se_portal_group *se_tpg = se_sess->se_tpg; | 310 | struct se_portal_group *se_tpg = se_sess->se_tpg; |
311 | 311 | ||
312 | se_tpg->se_tpg_tfo->close_session(se_sess); | 312 | se_tpg->se_tpg_tfo->close_session(se_sess); |
313 | } | 313 | } |
314 | 314 | ||
315 | void target_get_session(struct se_session *se_sess) | 315 | void target_get_session(struct se_session *se_sess) |
316 | { | 316 | { |
317 | kref_get(&se_sess->sess_kref); | 317 | kref_get(&se_sess->sess_kref); |
318 | } | 318 | } |
319 | EXPORT_SYMBOL(target_get_session); | 319 | EXPORT_SYMBOL(target_get_session); |
320 | 320 | ||
321 | void target_put_session(struct se_session *se_sess) | 321 | void target_put_session(struct se_session *se_sess) |
322 | { | 322 | { |
323 | struct se_portal_group *tpg = se_sess->se_tpg; | 323 | struct se_portal_group *tpg = se_sess->se_tpg; |
324 | 324 | ||
325 | if (tpg->se_tpg_tfo->put_session != NULL) { | 325 | if (tpg->se_tpg_tfo->put_session != NULL) { |
326 | tpg->se_tpg_tfo->put_session(se_sess); | 326 | tpg->se_tpg_tfo->put_session(se_sess); |
327 | return; | 327 | return; |
328 | } | 328 | } |
329 | kref_put(&se_sess->sess_kref, target_release_session); | 329 | kref_put(&se_sess->sess_kref, target_release_session); |
330 | } | 330 | } |
331 | EXPORT_SYMBOL(target_put_session); | 331 | EXPORT_SYMBOL(target_put_session); |
332 | 332 | ||
333 | static void target_complete_nacl(struct kref *kref) | 333 | static void target_complete_nacl(struct kref *kref) |
334 | { | 334 | { |
335 | struct se_node_acl *nacl = container_of(kref, | 335 | struct se_node_acl *nacl = container_of(kref, |
336 | struct se_node_acl, acl_kref); | 336 | struct se_node_acl, acl_kref); |
337 | 337 | ||
338 | complete(&nacl->acl_free_comp); | 338 | complete(&nacl->acl_free_comp); |
339 | } | 339 | } |
340 | 340 | ||
341 | void target_put_nacl(struct se_node_acl *nacl) | 341 | void target_put_nacl(struct se_node_acl *nacl) |
342 | { | 342 | { |
343 | kref_put(&nacl->acl_kref, target_complete_nacl); | 343 | kref_put(&nacl->acl_kref, target_complete_nacl); |
344 | } | 344 | } |
345 | 345 | ||
346 | void transport_deregister_session_configfs(struct se_session *se_sess) | 346 | void transport_deregister_session_configfs(struct se_session *se_sess) |
347 | { | 347 | { |
348 | struct se_node_acl *se_nacl; | 348 | struct se_node_acl *se_nacl; |
349 | unsigned long flags; | 349 | unsigned long flags; |
350 | /* | 350 | /* |
351 | * Used by struct se_node_acl's under ConfigFS to locate active struct se_session | 351 | * Used by struct se_node_acl's under ConfigFS to locate active struct se_session |
352 | */ | 352 | */ |
353 | se_nacl = se_sess->se_node_acl; | 353 | se_nacl = se_sess->se_node_acl; |
354 | if (se_nacl) { | 354 | if (se_nacl) { |
355 | spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); | 355 | spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); |
356 | if (se_nacl->acl_stop == 0) | 356 | if (se_nacl->acl_stop == 0) |
357 | list_del(&se_sess->sess_acl_list); | 357 | list_del(&se_sess->sess_acl_list); |
358 | /* | 358 | /* |
359 | * If the session list is empty, then clear the pointer. | 359 | * If the session list is empty, then clear the pointer. |
360 | * Otherwise, set the struct se_session pointer from the tail | 360 | * Otherwise, set the struct se_session pointer from the tail |
361 | * element of the per struct se_node_acl active session list. | 361 | * element of the per struct se_node_acl active session list. |
362 | */ | 362 | */ |
363 | if (list_empty(&se_nacl->acl_sess_list)) | 363 | if (list_empty(&se_nacl->acl_sess_list)) |
364 | se_nacl->nacl_sess = NULL; | 364 | se_nacl->nacl_sess = NULL; |
365 | else { | 365 | else { |
366 | se_nacl->nacl_sess = container_of( | 366 | se_nacl->nacl_sess = container_of( |
367 | se_nacl->acl_sess_list.prev, | 367 | se_nacl->acl_sess_list.prev, |
368 | struct se_session, sess_acl_list); | 368 | struct se_session, sess_acl_list); |
369 | } | 369 | } |
370 | spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); | 370 | spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); |
371 | } | 371 | } |
372 | } | 372 | } |
373 | EXPORT_SYMBOL(transport_deregister_session_configfs); | 373 | EXPORT_SYMBOL(transport_deregister_session_configfs); |
374 | 374 | ||
375 | void transport_free_session(struct se_session *se_sess) | 375 | void transport_free_session(struct se_session *se_sess) |
376 | { | 376 | { |
377 | kmem_cache_free(se_sess_cache, se_sess); | 377 | kmem_cache_free(se_sess_cache, se_sess); |
378 | } | 378 | } |
379 | EXPORT_SYMBOL(transport_free_session); | 379 | EXPORT_SYMBOL(transport_free_session); |
380 | 380 | ||
381 | void transport_deregister_session(struct se_session *se_sess) | 381 | void transport_deregister_session(struct se_session *se_sess) |
382 | { | 382 | { |
383 | struct se_portal_group *se_tpg = se_sess->se_tpg; | 383 | struct se_portal_group *se_tpg = se_sess->se_tpg; |
384 | struct target_core_fabric_ops *se_tfo; | 384 | struct target_core_fabric_ops *se_tfo; |
385 | struct se_node_acl *se_nacl; | 385 | struct se_node_acl *se_nacl; |
386 | unsigned long flags; | 386 | unsigned long flags; |
387 | bool comp_nacl = true; | 387 | bool comp_nacl = true; |
388 | 388 | ||
389 | if (!se_tpg) { | 389 | if (!se_tpg) { |
390 | transport_free_session(se_sess); | 390 | transport_free_session(se_sess); |
391 | return; | 391 | return; |
392 | } | 392 | } |
393 | se_tfo = se_tpg->se_tpg_tfo; | 393 | se_tfo = se_tpg->se_tpg_tfo; |
394 | 394 | ||
395 | spin_lock_irqsave(&se_tpg->session_lock, flags); | 395 | spin_lock_irqsave(&se_tpg->session_lock, flags); |
396 | list_del(&se_sess->sess_list); | 396 | list_del(&se_sess->sess_list); |
397 | se_sess->se_tpg = NULL; | 397 | se_sess->se_tpg = NULL; |
398 | se_sess->fabric_sess_ptr = NULL; | 398 | se_sess->fabric_sess_ptr = NULL; |
399 | spin_unlock_irqrestore(&se_tpg->session_lock, flags); | 399 | spin_unlock_irqrestore(&se_tpg->session_lock, flags); |
400 | 400 | ||
401 | /* | 401 | /* |
402 | * Determine if we need to do extra work for this initiator node's | 402 | * Determine if we need to do extra work for this initiator node's |
403 | * struct se_node_acl if it had been previously dynamically generated. | 403 | * struct se_node_acl if it had been previously dynamically generated. |
404 | */ | 404 | */ |
405 | se_nacl = se_sess->se_node_acl; | 405 | se_nacl = se_sess->se_node_acl; |
406 | 406 | ||
407 | spin_lock_irqsave(&se_tpg->acl_node_lock, flags); | 407 | spin_lock_irqsave(&se_tpg->acl_node_lock, flags); |
408 | if (se_nacl && se_nacl->dynamic_node_acl) { | 408 | if (se_nacl && se_nacl->dynamic_node_acl) { |
409 | if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { | 409 | if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { |
410 | list_del(&se_nacl->acl_list); | 410 | list_del(&se_nacl->acl_list); |
411 | se_tpg->num_node_acls--; | 411 | se_tpg->num_node_acls--; |
412 | spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); | 412 | spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); |
413 | core_tpg_wait_for_nacl_pr_ref(se_nacl); | 413 | core_tpg_wait_for_nacl_pr_ref(se_nacl); |
414 | core_free_device_list_for_node(se_nacl, se_tpg); | 414 | core_free_device_list_for_node(se_nacl, se_tpg); |
415 | se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl); | 415 | se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl); |
416 | 416 | ||
417 | comp_nacl = false; | 417 | comp_nacl = false; |
418 | spin_lock_irqsave(&se_tpg->acl_node_lock, flags); | 418 | spin_lock_irqsave(&se_tpg->acl_node_lock, flags); |
419 | } | 419 | } |
420 | } | 420 | } |
421 | spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); | 421 | spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); |
422 | 422 | ||
423 | pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", | 423 | pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", |
424 | se_tpg->se_tpg_tfo->get_fabric_name()); | 424 | se_tpg->se_tpg_tfo->get_fabric_name()); |
425 | /* | 425 | /* |
426 | * If last kref is dropping now for an explict NodeACL, awake sleeping | 426 | * If last kref is dropping now for an explict NodeACL, awake sleeping |
427 | * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group | 427 | * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group |
428 | * removal context. | 428 | * removal context. |
429 | */ | 429 | */ |
430 | if (se_nacl && comp_nacl == true) | 430 | if (se_nacl && comp_nacl == true) |
431 | target_put_nacl(se_nacl); | 431 | target_put_nacl(se_nacl); |
432 | 432 | ||
433 | transport_free_session(se_sess); | 433 | transport_free_session(se_sess); |
434 | } | 434 | } |
435 | EXPORT_SYMBOL(transport_deregister_session); | 435 | EXPORT_SYMBOL(transport_deregister_session); |
436 | 436 | ||
437 | /* | 437 | /* |
438 | * Called with cmd->t_state_lock held. | 438 | * Called with cmd->t_state_lock held. |
439 | */ | 439 | */ |
440 | static void target_remove_from_state_list(struct se_cmd *cmd) | 440 | static void target_remove_from_state_list(struct se_cmd *cmd) |
441 | { | 441 | { |
442 | struct se_device *dev = cmd->se_dev; | 442 | struct se_device *dev = cmd->se_dev; |
443 | unsigned long flags; | 443 | unsigned long flags; |
444 | 444 | ||
445 | if (!dev) | 445 | if (!dev) |
446 | return; | 446 | return; |
447 | 447 | ||
448 | if (cmd->transport_state & CMD_T_BUSY) | 448 | if (cmd->transport_state & CMD_T_BUSY) |
449 | return; | 449 | return; |
450 | 450 | ||
451 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 451 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
452 | if (cmd->state_active) { | 452 | if (cmd->state_active) { |
453 | list_del(&cmd->state_list); | 453 | list_del(&cmd->state_list); |
454 | cmd->state_active = false; | 454 | cmd->state_active = false; |
455 | } | 455 | } |
456 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 456 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
457 | } | 457 | } |
458 | 458 | ||
459 | static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists) | 459 | static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists) |
460 | { | 460 | { |
461 | unsigned long flags; | 461 | unsigned long flags; |
462 | 462 | ||
463 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 463 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
464 | /* | 464 | /* |
465 | * Determine if IOCTL context caller in requesting the stopping of this | 465 | * Determine if IOCTL context caller in requesting the stopping of this |
466 | * command for LUN shutdown purposes. | 466 | * command for LUN shutdown purposes. |
467 | */ | 467 | */ |
468 | if (cmd->transport_state & CMD_T_LUN_STOP) { | 468 | if (cmd->transport_state & CMD_T_LUN_STOP) { |
469 | pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", | 469 | pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", |
470 | __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); | 470 | __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); |
471 | 471 | ||
472 | cmd->transport_state &= ~CMD_T_ACTIVE; | 472 | cmd->transport_state &= ~CMD_T_ACTIVE; |
473 | if (remove_from_lists) | 473 | if (remove_from_lists) |
474 | target_remove_from_state_list(cmd); | 474 | target_remove_from_state_list(cmd); |
475 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 475 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
476 | 476 | ||
477 | complete(&cmd->transport_lun_stop_comp); | 477 | complete(&cmd->transport_lun_stop_comp); |
478 | return 1; | 478 | return 1; |
479 | } | 479 | } |
480 | 480 | ||
481 | if (remove_from_lists) { | 481 | if (remove_from_lists) { |
482 | target_remove_from_state_list(cmd); | 482 | target_remove_from_state_list(cmd); |
483 | 483 | ||
484 | /* | 484 | /* |
485 | * Clear struct se_cmd->se_lun before the handoff to FE. | 485 | * Clear struct se_cmd->se_lun before the handoff to FE. |
486 | */ | 486 | */ |
487 | cmd->se_lun = NULL; | 487 | cmd->se_lun = NULL; |
488 | } | 488 | } |
489 | 489 | ||
490 | /* | 490 | /* |
491 | * Determine if frontend context caller is requesting the stopping of | 491 | * Determine if frontend context caller is requesting the stopping of |
492 | * this command for frontend exceptions. | 492 | * this command for frontend exceptions. |
493 | */ | 493 | */ |
494 | if (cmd->transport_state & CMD_T_STOP) { | 494 | if (cmd->transport_state & CMD_T_STOP) { |
495 | pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", | 495 | pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", |
496 | __func__, __LINE__, | 496 | __func__, __LINE__, |
497 | cmd->se_tfo->get_task_tag(cmd)); | 497 | cmd->se_tfo->get_task_tag(cmd)); |
498 | 498 | ||
499 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 499 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
500 | 500 | ||
501 | complete(&cmd->t_transport_stop_comp); | 501 | complete(&cmd->t_transport_stop_comp); |
502 | return 1; | 502 | return 1; |
503 | } | 503 | } |
504 | 504 | ||
505 | cmd->transport_state &= ~CMD_T_ACTIVE; | 505 | cmd->transport_state &= ~CMD_T_ACTIVE; |
506 | if (remove_from_lists) { | 506 | if (remove_from_lists) { |
507 | /* | 507 | /* |
508 | * Some fabric modules like tcm_loop can release | 508 | * Some fabric modules like tcm_loop can release |
509 | * their internally allocated I/O reference now and | 509 | * their internally allocated I/O reference now and |
510 | * struct se_cmd now. | 510 | * struct se_cmd now. |
511 | * | 511 | * |
512 | * Fabric modules are expected to return '1' here if the | 512 | * Fabric modules are expected to return '1' here if the |
513 | * se_cmd being passed is released at this point, | 513 | * se_cmd being passed is released at this point, |
514 | * or zero if not being released. | 514 | * or zero if not being released. |
515 | */ | 515 | */ |
516 | if (cmd->se_tfo->check_stop_free != NULL) { | 516 | if (cmd->se_tfo->check_stop_free != NULL) { |
517 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 517 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
518 | return cmd->se_tfo->check_stop_free(cmd); | 518 | return cmd->se_tfo->check_stop_free(cmd); |
519 | } | 519 | } |
520 | } | 520 | } |
521 | 521 | ||
522 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 522 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
523 | return 0; | 523 | return 0; |
524 | } | 524 | } |
525 | 525 | ||
526 | static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) | 526 | static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) |
527 | { | 527 | { |
528 | return transport_cmd_check_stop(cmd, true); | 528 | return transport_cmd_check_stop(cmd, true); |
529 | } | 529 | } |
530 | 530 | ||
531 | static void transport_lun_remove_cmd(struct se_cmd *cmd) | 531 | static void transport_lun_remove_cmd(struct se_cmd *cmd) |
532 | { | 532 | { |
533 | struct se_lun *lun = cmd->se_lun; | 533 | struct se_lun *lun = cmd->se_lun; |
534 | unsigned long flags; | 534 | unsigned long flags; |
535 | 535 | ||
536 | if (!lun) | 536 | if (!lun) |
537 | return; | 537 | return; |
538 | 538 | ||
539 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 539 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
540 | if (cmd->transport_state & CMD_T_DEV_ACTIVE) { | 540 | if (cmd->transport_state & CMD_T_DEV_ACTIVE) { |
541 | cmd->transport_state &= ~CMD_T_DEV_ACTIVE; | 541 | cmd->transport_state &= ~CMD_T_DEV_ACTIVE; |
542 | target_remove_from_state_list(cmd); | 542 | target_remove_from_state_list(cmd); |
543 | } | 543 | } |
544 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 544 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
545 | 545 | ||
546 | spin_lock_irqsave(&lun->lun_cmd_lock, flags); | 546 | spin_lock_irqsave(&lun->lun_cmd_lock, flags); |
547 | if (!list_empty(&cmd->se_lun_node)) | 547 | if (!list_empty(&cmd->se_lun_node)) |
548 | list_del_init(&cmd->se_lun_node); | 548 | list_del_init(&cmd->se_lun_node); |
549 | spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); | 549 | spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); |
550 | } | 550 | } |
551 | 551 | ||
552 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | 552 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) |
553 | { | 553 | { |
554 | if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) | 554 | if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) |
555 | transport_lun_remove_cmd(cmd); | 555 | transport_lun_remove_cmd(cmd); |
556 | 556 | ||
557 | if (transport_cmd_check_stop_to_fabric(cmd)) | 557 | if (transport_cmd_check_stop_to_fabric(cmd)) |
558 | return; | 558 | return; |
559 | if (remove) | 559 | if (remove) |
560 | transport_put_cmd(cmd); | 560 | transport_put_cmd(cmd); |
561 | } | 561 | } |
562 | 562 | ||
563 | static void target_complete_failure_work(struct work_struct *work) | 563 | static void target_complete_failure_work(struct work_struct *work) |
564 | { | 564 | { |
565 | struct se_cmd *cmd = container_of(work, struct se_cmd, work); | 565 | struct se_cmd *cmd = container_of(work, struct se_cmd, work); |
566 | 566 | ||
567 | transport_generic_request_failure(cmd); | 567 | transport_generic_request_failure(cmd); |
568 | } | 568 | } |
569 | 569 | ||
570 | /* | 570 | /* |
571 | * Used when asking transport to copy Sense Data from the underlying | 571 | * Used when asking transport to copy Sense Data from the underlying |
572 | * Linux/SCSI struct scsi_cmnd | 572 | * Linux/SCSI struct scsi_cmnd |
573 | */ | 573 | */ |
574 | static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) | 574 | static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) |
575 | { | 575 | { |
576 | unsigned char *buffer = cmd->sense_buffer; | 576 | unsigned char *buffer = cmd->sense_buffer; |
577 | struct se_device *dev = cmd->se_dev; | 577 | struct se_device *dev = cmd->se_dev; |
578 | u32 offset = 0; | 578 | u32 offset = 0; |
579 | 579 | ||
580 | WARN_ON(!cmd->se_lun); | 580 | WARN_ON(!cmd->se_lun); |
581 | 581 | ||
582 | if (!dev) | 582 | if (!dev) |
583 | return NULL; | 583 | return NULL; |
584 | 584 | ||
585 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) | 585 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) |
586 | return NULL; | 586 | return NULL; |
587 | 587 | ||
588 | offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER); | 588 | offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER); |
589 | 589 | ||
590 | /* Automatically padded */ | 590 | /* Automatically padded */ |
591 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; | 591 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; |
592 | 592 | ||
593 | pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", | 593 | pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", |
594 | dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); | 594 | dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); |
595 | return &buffer[offset]; | 595 | return &buffer[offset]; |
596 | } | 596 | } |
597 | 597 | ||
598 | void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) | 598 | void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) |
599 | { | 599 | { |
600 | struct se_device *dev = cmd->se_dev; | 600 | struct se_device *dev = cmd->se_dev; |
601 | int success = scsi_status == GOOD; | 601 | int success = scsi_status == GOOD; |
602 | unsigned long flags; | 602 | unsigned long flags; |
603 | 603 | ||
604 | cmd->scsi_status = scsi_status; | 604 | cmd->scsi_status = scsi_status; |
605 | 605 | ||
606 | 606 | ||
607 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 607 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
608 | cmd->transport_state &= ~CMD_T_BUSY; | 608 | cmd->transport_state &= ~CMD_T_BUSY; |
609 | 609 | ||
610 | if (dev && dev->transport->transport_complete) { | 610 | if (dev && dev->transport->transport_complete) { |
611 | dev->transport->transport_complete(cmd, | 611 | dev->transport->transport_complete(cmd, |
612 | cmd->t_data_sg, | 612 | cmd->t_data_sg, |
613 | transport_get_sense_buffer(cmd)); | 613 | transport_get_sense_buffer(cmd)); |
614 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) | 614 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) |
615 | success = 1; | 615 | success = 1; |
616 | } | 616 | } |
617 | 617 | ||
618 | /* | 618 | /* |
619 | * See if we are waiting to complete for an exception condition. | 619 | * See if we are waiting to complete for an exception condition. |
620 | */ | 620 | */ |
621 | if (cmd->transport_state & CMD_T_REQUEST_STOP) { | 621 | if (cmd->transport_state & CMD_T_REQUEST_STOP) { |
622 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 622 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
623 | complete(&cmd->task_stop_comp); | 623 | complete(&cmd->task_stop_comp); |
624 | return; | 624 | return; |
625 | } | 625 | } |
626 | 626 | ||
627 | if (!success) | 627 | if (!success) |
628 | cmd->transport_state |= CMD_T_FAILED; | 628 | cmd->transport_state |= CMD_T_FAILED; |
629 | 629 | ||
630 | /* | 630 | /* |
631 | * Check for case where an explict ABORT_TASK has been received | 631 | * Check for case where an explict ABORT_TASK has been received |
632 | * and transport_wait_for_tasks() will be waiting for completion.. | 632 | * and transport_wait_for_tasks() will be waiting for completion.. |
633 | */ | 633 | */ |
634 | if (cmd->transport_state & CMD_T_ABORTED && | 634 | if (cmd->transport_state & CMD_T_ABORTED && |
635 | cmd->transport_state & CMD_T_STOP) { | 635 | cmd->transport_state & CMD_T_STOP) { |
636 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 636 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
637 | complete(&cmd->t_transport_stop_comp); | 637 | complete(&cmd->t_transport_stop_comp); |
638 | return; | 638 | return; |
639 | } else if (cmd->transport_state & CMD_T_FAILED) { | 639 | } else if (cmd->transport_state & CMD_T_FAILED) { |
640 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 640 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
641 | INIT_WORK(&cmd->work, target_complete_failure_work); | 641 | INIT_WORK(&cmd->work, target_complete_failure_work); |
642 | } else { | 642 | } else { |
643 | INIT_WORK(&cmd->work, target_complete_ok_work); | 643 | INIT_WORK(&cmd->work, target_complete_ok_work); |
644 | } | 644 | } |
645 | 645 | ||
646 | cmd->t_state = TRANSPORT_COMPLETE; | 646 | cmd->t_state = TRANSPORT_COMPLETE; |
647 | cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); | 647 | cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); |
648 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 648 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
649 | 649 | ||
650 | queue_work(target_completion_wq, &cmd->work); | 650 | queue_work(target_completion_wq, &cmd->work); |
651 | } | 651 | } |
652 | EXPORT_SYMBOL(target_complete_cmd); | 652 | EXPORT_SYMBOL(target_complete_cmd); |
653 | 653 | ||
654 | static void target_add_to_state_list(struct se_cmd *cmd) | 654 | static void target_add_to_state_list(struct se_cmd *cmd) |
655 | { | 655 | { |
656 | struct se_device *dev = cmd->se_dev; | 656 | struct se_device *dev = cmd->se_dev; |
657 | unsigned long flags; | 657 | unsigned long flags; |
658 | 658 | ||
659 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 659 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
660 | if (!cmd->state_active) { | 660 | if (!cmd->state_active) { |
661 | list_add_tail(&cmd->state_list, &dev->state_list); | 661 | list_add_tail(&cmd->state_list, &dev->state_list); |
662 | cmd->state_active = true; | 662 | cmd->state_active = true; |
663 | } | 663 | } |
664 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 664 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
665 | } | 665 | } |
666 | 666 | ||
667 | /* | 667 | /* |
668 | * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status | 668 | * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status |
669 | */ | 669 | */ |
670 | static void transport_write_pending_qf(struct se_cmd *cmd); | 670 | static void transport_write_pending_qf(struct se_cmd *cmd); |
671 | static void transport_complete_qf(struct se_cmd *cmd); | 671 | static void transport_complete_qf(struct se_cmd *cmd); |
672 | 672 | ||
673 | static void target_qf_do_work(struct work_struct *work) | 673 | static void target_qf_do_work(struct work_struct *work) |
674 | { | 674 | { |
675 | struct se_device *dev = container_of(work, struct se_device, | 675 | struct se_device *dev = container_of(work, struct se_device, |
676 | qf_work_queue); | 676 | qf_work_queue); |
677 | LIST_HEAD(qf_cmd_list); | 677 | LIST_HEAD(qf_cmd_list); |
678 | struct se_cmd *cmd, *cmd_tmp; | 678 | struct se_cmd *cmd, *cmd_tmp; |
679 | 679 | ||
680 | spin_lock_irq(&dev->qf_cmd_lock); | 680 | spin_lock_irq(&dev->qf_cmd_lock); |
681 | list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); | 681 | list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); |
682 | spin_unlock_irq(&dev->qf_cmd_lock); | 682 | spin_unlock_irq(&dev->qf_cmd_lock); |
683 | 683 | ||
684 | list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { | 684 | list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { |
685 | list_del(&cmd->se_qf_node); | 685 | list_del(&cmd->se_qf_node); |
686 | atomic_dec(&dev->dev_qf_count); | 686 | atomic_dec(&dev->dev_qf_count); |
687 | smp_mb__after_atomic_dec(); | 687 | smp_mb__after_atomic_dec(); |
688 | 688 | ||
689 | pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" | 689 | pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" |
690 | " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, | 690 | " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, |
691 | (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : | 691 | (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : |
692 | (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" | 692 | (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" |
693 | : "UNKNOWN"); | 693 | : "UNKNOWN"); |
694 | 694 | ||
695 | if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) | 695 | if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) |
696 | transport_write_pending_qf(cmd); | 696 | transport_write_pending_qf(cmd); |
697 | else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) | 697 | else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) |
698 | transport_complete_qf(cmd); | 698 | transport_complete_qf(cmd); |
699 | } | 699 | } |
700 | } | 700 | } |
701 | 701 | ||
702 | unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) | 702 | unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) |
703 | { | 703 | { |
704 | switch (cmd->data_direction) { | 704 | switch (cmd->data_direction) { |
705 | case DMA_NONE: | 705 | case DMA_NONE: |
706 | return "NONE"; | 706 | return "NONE"; |
707 | case DMA_FROM_DEVICE: | 707 | case DMA_FROM_DEVICE: |
708 | return "READ"; | 708 | return "READ"; |
709 | case DMA_TO_DEVICE: | 709 | case DMA_TO_DEVICE: |
710 | return "WRITE"; | 710 | return "WRITE"; |
711 | case DMA_BIDIRECTIONAL: | 711 | case DMA_BIDIRECTIONAL: |
712 | return "BIDI"; | 712 | return "BIDI"; |
713 | default: | 713 | default: |
714 | break; | 714 | break; |
715 | } | 715 | } |
716 | 716 | ||
717 | return "UNKNOWN"; | 717 | return "UNKNOWN"; |
718 | } | 718 | } |
719 | 719 | ||
720 | void transport_dump_dev_state( | 720 | void transport_dump_dev_state( |
721 | struct se_device *dev, | 721 | struct se_device *dev, |
722 | char *b, | 722 | char *b, |
723 | int *bl) | 723 | int *bl) |
724 | { | 724 | { |
725 | *bl += sprintf(b + *bl, "Status: "); | 725 | *bl += sprintf(b + *bl, "Status: "); |
726 | switch (dev->dev_status) { | 726 | switch (dev->dev_status) { |
727 | case TRANSPORT_DEVICE_ACTIVATED: | 727 | case TRANSPORT_DEVICE_ACTIVATED: |
728 | *bl += sprintf(b + *bl, "ACTIVATED"); | 728 | *bl += sprintf(b + *bl, "ACTIVATED"); |
729 | break; | 729 | break; |
730 | case TRANSPORT_DEVICE_DEACTIVATED: | 730 | case TRANSPORT_DEVICE_DEACTIVATED: |
731 | *bl += sprintf(b + *bl, "DEACTIVATED"); | 731 | *bl += sprintf(b + *bl, "DEACTIVATED"); |
732 | break; | 732 | break; |
733 | case TRANSPORT_DEVICE_SHUTDOWN: | 733 | case TRANSPORT_DEVICE_SHUTDOWN: |
734 | *bl += sprintf(b + *bl, "SHUTDOWN"); | 734 | *bl += sprintf(b + *bl, "SHUTDOWN"); |
735 | break; | 735 | break; |
736 | case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: | 736 | case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: |
737 | case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: | 737 | case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: |
738 | *bl += sprintf(b + *bl, "OFFLINE"); | 738 | *bl += sprintf(b + *bl, "OFFLINE"); |
739 | break; | 739 | break; |
740 | default: | 740 | default: |
741 | *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status); | 741 | *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status); |
742 | break; | 742 | break; |
743 | } | 743 | } |
744 | 744 | ||
745 | *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); | 745 | *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); |
746 | *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", | 746 | *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", |
747 | dev->se_sub_dev->se_dev_attrib.block_size, | 747 | dev->se_sub_dev->se_dev_attrib.block_size, |
748 | dev->se_sub_dev->se_dev_attrib.hw_max_sectors); | 748 | dev->se_sub_dev->se_dev_attrib.hw_max_sectors); |
749 | *bl += sprintf(b + *bl, " "); | 749 | *bl += sprintf(b + *bl, " "); |
750 | } | 750 | } |
751 | 751 | ||
752 | void transport_dump_vpd_proto_id( | 752 | void transport_dump_vpd_proto_id( |
753 | struct t10_vpd *vpd, | 753 | struct t10_vpd *vpd, |
754 | unsigned char *p_buf, | 754 | unsigned char *p_buf, |
755 | int p_buf_len) | 755 | int p_buf_len) |
756 | { | 756 | { |
757 | unsigned char buf[VPD_TMP_BUF_SIZE]; | 757 | unsigned char buf[VPD_TMP_BUF_SIZE]; |
758 | int len; | 758 | int len; |
759 | 759 | ||
760 | memset(buf, 0, VPD_TMP_BUF_SIZE); | 760 | memset(buf, 0, VPD_TMP_BUF_SIZE); |
761 | len = sprintf(buf, "T10 VPD Protocol Identifier: "); | 761 | len = sprintf(buf, "T10 VPD Protocol Identifier: "); |
762 | 762 | ||
763 | switch (vpd->protocol_identifier) { | 763 | switch (vpd->protocol_identifier) { |
764 | case 0x00: | 764 | case 0x00: |
765 | sprintf(buf+len, "Fibre Channel\n"); | 765 | sprintf(buf+len, "Fibre Channel\n"); |
766 | break; | 766 | break; |
767 | case 0x10: | 767 | case 0x10: |
768 | sprintf(buf+len, "Parallel SCSI\n"); | 768 | sprintf(buf+len, "Parallel SCSI\n"); |
769 | break; | 769 | break; |
770 | case 0x20: | 770 | case 0x20: |
771 | sprintf(buf+len, "SSA\n"); | 771 | sprintf(buf+len, "SSA\n"); |
772 | break; | 772 | break; |
773 | case 0x30: | 773 | case 0x30: |
774 | sprintf(buf+len, "IEEE 1394\n"); | 774 | sprintf(buf+len, "IEEE 1394\n"); |
775 | break; | 775 | break; |
776 | case 0x40: | 776 | case 0x40: |
777 | sprintf(buf+len, "SCSI Remote Direct Memory Access" | 777 | sprintf(buf+len, "SCSI Remote Direct Memory Access" |
778 | " Protocol\n"); | 778 | " Protocol\n"); |
779 | break; | 779 | break; |
780 | case 0x50: | 780 | case 0x50: |
781 | sprintf(buf+len, "Internet SCSI (iSCSI)\n"); | 781 | sprintf(buf+len, "Internet SCSI (iSCSI)\n"); |
782 | break; | 782 | break; |
783 | case 0x60: | 783 | case 0x60: |
784 | sprintf(buf+len, "SAS Serial SCSI Protocol\n"); | 784 | sprintf(buf+len, "SAS Serial SCSI Protocol\n"); |
785 | break; | 785 | break; |
786 | case 0x70: | 786 | case 0x70: |
787 | sprintf(buf+len, "Automation/Drive Interface Transport" | 787 | sprintf(buf+len, "Automation/Drive Interface Transport" |
788 | " Protocol\n"); | 788 | " Protocol\n"); |
789 | break; | 789 | break; |
790 | case 0x80: | 790 | case 0x80: |
791 | sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); | 791 | sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); |
792 | break; | 792 | break; |
793 | default: | 793 | default: |
794 | sprintf(buf+len, "Unknown 0x%02x\n", | 794 | sprintf(buf+len, "Unknown 0x%02x\n", |
795 | vpd->protocol_identifier); | 795 | vpd->protocol_identifier); |
796 | break; | 796 | break; |
797 | } | 797 | } |
798 | 798 | ||
799 | if (p_buf) | 799 | if (p_buf) |
800 | strncpy(p_buf, buf, p_buf_len); | 800 | strncpy(p_buf, buf, p_buf_len); |
801 | else | 801 | else |
802 | pr_debug("%s", buf); | 802 | pr_debug("%s", buf); |
803 | } | 803 | } |
804 | 804 | ||
805 | void | 805 | void |
806 | transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) | 806 | transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) |
807 | { | 807 | { |
808 | /* | 808 | /* |
809 | * Check if the Protocol Identifier Valid (PIV) bit is set.. | 809 | * Check if the Protocol Identifier Valid (PIV) bit is set.. |
810 | * | 810 | * |
811 | * from spc3r23.pdf section 7.5.1 | 811 | * from spc3r23.pdf section 7.5.1 |
812 | */ | 812 | */ |
813 | if (page_83[1] & 0x80) { | 813 | if (page_83[1] & 0x80) { |
814 | vpd->protocol_identifier = (page_83[0] & 0xf0); | 814 | vpd->protocol_identifier = (page_83[0] & 0xf0); |
815 | vpd->protocol_identifier_set = 1; | 815 | vpd->protocol_identifier_set = 1; |
816 | transport_dump_vpd_proto_id(vpd, NULL, 0); | 816 | transport_dump_vpd_proto_id(vpd, NULL, 0); |
817 | } | 817 | } |
818 | } | 818 | } |
819 | EXPORT_SYMBOL(transport_set_vpd_proto_id); | 819 | EXPORT_SYMBOL(transport_set_vpd_proto_id); |
820 | 820 | ||
821 | int transport_dump_vpd_assoc( | 821 | int transport_dump_vpd_assoc( |
822 | struct t10_vpd *vpd, | 822 | struct t10_vpd *vpd, |
823 | unsigned char *p_buf, | 823 | unsigned char *p_buf, |
824 | int p_buf_len) | 824 | int p_buf_len) |
825 | { | 825 | { |
826 | unsigned char buf[VPD_TMP_BUF_SIZE]; | 826 | unsigned char buf[VPD_TMP_BUF_SIZE]; |
827 | int ret = 0; | 827 | int ret = 0; |
828 | int len; | 828 | int len; |
829 | 829 | ||
830 | memset(buf, 0, VPD_TMP_BUF_SIZE); | 830 | memset(buf, 0, VPD_TMP_BUF_SIZE); |
831 | len = sprintf(buf, "T10 VPD Identifier Association: "); | 831 | len = sprintf(buf, "T10 VPD Identifier Association: "); |
832 | 832 | ||
833 | switch (vpd->association) { | 833 | switch (vpd->association) { |
834 | case 0x00: | 834 | case 0x00: |
835 | sprintf(buf+len, "addressed logical unit\n"); | 835 | sprintf(buf+len, "addressed logical unit\n"); |
836 | break; | 836 | break; |
837 | case 0x10: | 837 | case 0x10: |
838 | sprintf(buf+len, "target port\n"); | 838 | sprintf(buf+len, "target port\n"); |
839 | break; | 839 | break; |
840 | case 0x20: | 840 | case 0x20: |
841 | sprintf(buf+len, "SCSI target device\n"); | 841 | sprintf(buf+len, "SCSI target device\n"); |
842 | break; | 842 | break; |
843 | default: | 843 | default: |
844 | sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); | 844 | sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); |
845 | ret = -EINVAL; | 845 | ret = -EINVAL; |
846 | break; | 846 | break; |
847 | } | 847 | } |
848 | 848 | ||
849 | if (p_buf) | 849 | if (p_buf) |
850 | strncpy(p_buf, buf, p_buf_len); | 850 | strncpy(p_buf, buf, p_buf_len); |
851 | else | 851 | else |
852 | pr_debug("%s", buf); | 852 | pr_debug("%s", buf); |
853 | 853 | ||
854 | return ret; | 854 | return ret; |
855 | } | 855 | } |
856 | 856 | ||
857 | int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) | 857 | int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) |
858 | { | 858 | { |
859 | /* | 859 | /* |
860 | * The VPD identification association.. | 860 | * The VPD identification association.. |
861 | * | 861 | * |
862 | * from spc3r23.pdf Section 7.6.3.1 Table 297 | 862 | * from spc3r23.pdf Section 7.6.3.1 Table 297 |
863 | */ | 863 | */ |
864 | vpd->association = (page_83[1] & 0x30); | 864 | vpd->association = (page_83[1] & 0x30); |
865 | return transport_dump_vpd_assoc(vpd, NULL, 0); | 865 | return transport_dump_vpd_assoc(vpd, NULL, 0); |
866 | } | 866 | } |
867 | EXPORT_SYMBOL(transport_set_vpd_assoc); | 867 | EXPORT_SYMBOL(transport_set_vpd_assoc); |
868 | 868 | ||
869 | int transport_dump_vpd_ident_type( | 869 | int transport_dump_vpd_ident_type( |
870 | struct t10_vpd *vpd, | 870 | struct t10_vpd *vpd, |
871 | unsigned char *p_buf, | 871 | unsigned char *p_buf, |
872 | int p_buf_len) | 872 | int p_buf_len) |
873 | { | 873 | { |
874 | unsigned char buf[VPD_TMP_BUF_SIZE]; | 874 | unsigned char buf[VPD_TMP_BUF_SIZE]; |
875 | int ret = 0; | 875 | int ret = 0; |
876 | int len; | 876 | int len; |
877 | 877 | ||
878 | memset(buf, 0, VPD_TMP_BUF_SIZE); | 878 | memset(buf, 0, VPD_TMP_BUF_SIZE); |
879 | len = sprintf(buf, "T10 VPD Identifier Type: "); | 879 | len = sprintf(buf, "T10 VPD Identifier Type: "); |
880 | 880 | ||
881 | switch (vpd->device_identifier_type) { | 881 | switch (vpd->device_identifier_type) { |
882 | case 0x00: | 882 | case 0x00: |
883 | sprintf(buf+len, "Vendor specific\n"); | 883 | sprintf(buf+len, "Vendor specific\n"); |
884 | break; | 884 | break; |
885 | case 0x01: | 885 | case 0x01: |
886 | sprintf(buf+len, "T10 Vendor ID based\n"); | 886 | sprintf(buf+len, "T10 Vendor ID based\n"); |
887 | break; | 887 | break; |
888 | case 0x02: | 888 | case 0x02: |
889 | sprintf(buf+len, "EUI-64 based\n"); | 889 | sprintf(buf+len, "EUI-64 based\n"); |
890 | break; | 890 | break; |
891 | case 0x03: | 891 | case 0x03: |
892 | sprintf(buf+len, "NAA\n"); | 892 | sprintf(buf+len, "NAA\n"); |
893 | break; | 893 | break; |
894 | case 0x04: | 894 | case 0x04: |
895 | sprintf(buf+len, "Relative target port identifier\n"); | 895 | sprintf(buf+len, "Relative target port identifier\n"); |
896 | break; | 896 | break; |
897 | case 0x08: | 897 | case 0x08: |
898 | sprintf(buf+len, "SCSI name string\n"); | 898 | sprintf(buf+len, "SCSI name string\n"); |
899 | break; | 899 | break; |
900 | default: | 900 | default: |
901 | sprintf(buf+len, "Unsupported: 0x%02x\n", | 901 | sprintf(buf+len, "Unsupported: 0x%02x\n", |
902 | vpd->device_identifier_type); | 902 | vpd->device_identifier_type); |
903 | ret = -EINVAL; | 903 | ret = -EINVAL; |
904 | break; | 904 | break; |
905 | } | 905 | } |
906 | 906 | ||
907 | if (p_buf) { | 907 | if (p_buf) { |
908 | if (p_buf_len < strlen(buf)+1) | 908 | if (p_buf_len < strlen(buf)+1) |
909 | return -EINVAL; | 909 | return -EINVAL; |
910 | strncpy(p_buf, buf, p_buf_len); | 910 | strncpy(p_buf, buf, p_buf_len); |
911 | } else { | 911 | } else { |
912 | pr_debug("%s", buf); | 912 | pr_debug("%s", buf); |
913 | } | 913 | } |
914 | 914 | ||
915 | return ret; | 915 | return ret; |
916 | } | 916 | } |
917 | 917 | ||
918 | int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) | 918 | int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) |
919 | { | 919 | { |
920 | /* | 920 | /* |
921 | * The VPD identifier type.. | 921 | * The VPD identifier type.. |
922 | * | 922 | * |
923 | * from spc3r23.pdf Section 7.6.3.1 Table 298 | 923 | * from spc3r23.pdf Section 7.6.3.1 Table 298 |
924 | */ | 924 | */ |
925 | vpd->device_identifier_type = (page_83[1] & 0x0f); | 925 | vpd->device_identifier_type = (page_83[1] & 0x0f); |
926 | return transport_dump_vpd_ident_type(vpd, NULL, 0); | 926 | return transport_dump_vpd_ident_type(vpd, NULL, 0); |
927 | } | 927 | } |
928 | EXPORT_SYMBOL(transport_set_vpd_ident_type); | 928 | EXPORT_SYMBOL(transport_set_vpd_ident_type); |
929 | 929 | ||
930 | int transport_dump_vpd_ident( | 930 | int transport_dump_vpd_ident( |
931 | struct t10_vpd *vpd, | 931 | struct t10_vpd *vpd, |
932 | unsigned char *p_buf, | 932 | unsigned char *p_buf, |
933 | int p_buf_len) | 933 | int p_buf_len) |
934 | { | 934 | { |
935 | unsigned char buf[VPD_TMP_BUF_SIZE]; | 935 | unsigned char buf[VPD_TMP_BUF_SIZE]; |
936 | int ret = 0; | 936 | int ret = 0; |
937 | 937 | ||
938 | memset(buf, 0, VPD_TMP_BUF_SIZE); | 938 | memset(buf, 0, VPD_TMP_BUF_SIZE); |
939 | 939 | ||
940 | switch (vpd->device_identifier_code_set) { | 940 | switch (vpd->device_identifier_code_set) { |
941 | case 0x01: /* Binary */ | 941 | case 0x01: /* Binary */ |
942 | sprintf(buf, "T10 VPD Binary Device Identifier: %s\n", | 942 | sprintf(buf, "T10 VPD Binary Device Identifier: %s\n", |
943 | &vpd->device_identifier[0]); | 943 | &vpd->device_identifier[0]); |
944 | break; | 944 | break; |
945 | case 0x02: /* ASCII */ | 945 | case 0x02: /* ASCII */ |
946 | sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n", | 946 | sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n", |
947 | &vpd->device_identifier[0]); | 947 | &vpd->device_identifier[0]); |
948 | break; | 948 | break; |
949 | case 0x03: /* UTF-8 */ | 949 | case 0x03: /* UTF-8 */ |
950 | sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n", | 950 | sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n", |
951 | &vpd->device_identifier[0]); | 951 | &vpd->device_identifier[0]); |
952 | break; | 952 | break; |
953 | default: | 953 | default: |
954 | sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" | 954 | sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" |
955 | " 0x%02x", vpd->device_identifier_code_set); | 955 | " 0x%02x", vpd->device_identifier_code_set); |
956 | ret = -EINVAL; | 956 | ret = -EINVAL; |
957 | break; | 957 | break; |
958 | } | 958 | } |
959 | 959 | ||
960 | if (p_buf) | 960 | if (p_buf) |
961 | strncpy(p_buf, buf, p_buf_len); | 961 | strncpy(p_buf, buf, p_buf_len); |
962 | else | 962 | else |
963 | pr_debug("%s", buf); | 963 | pr_debug("%s", buf); |
964 | 964 | ||
965 | return ret; | 965 | return ret; |
966 | } | 966 | } |
967 | 967 | ||
968 | int | 968 | int |
969 | transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) | 969 | transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) |
970 | { | 970 | { |
971 | static const char hex_str[] = "0123456789abcdef"; | 971 | static const char hex_str[] = "0123456789abcdef"; |
972 | int j = 0, i = 4; /* offset to start of the identifer */ | 972 | int j = 0, i = 4; /* offset to start of the identifer */ |
973 | 973 | ||
974 | /* | 974 | /* |
975 | * The VPD Code Set (encoding) | 975 | * The VPD Code Set (encoding) |
976 | * | 976 | * |
977 | * from spc3r23.pdf Section 7.6.3.1 Table 296 | 977 | * from spc3r23.pdf Section 7.6.3.1 Table 296 |
978 | */ | 978 | */ |
979 | vpd->device_identifier_code_set = (page_83[0] & 0x0f); | 979 | vpd->device_identifier_code_set = (page_83[0] & 0x0f); |
980 | switch (vpd->device_identifier_code_set) { | 980 | switch (vpd->device_identifier_code_set) { |
981 | case 0x01: /* Binary */ | 981 | case 0x01: /* Binary */ |
982 | vpd->device_identifier[j++] = | 982 | vpd->device_identifier[j++] = |
983 | hex_str[vpd->device_identifier_type]; | 983 | hex_str[vpd->device_identifier_type]; |
984 | while (i < (4 + page_83[3])) { | 984 | while (i < (4 + page_83[3])) { |
985 | vpd->device_identifier[j++] = | 985 | vpd->device_identifier[j++] = |
986 | hex_str[(page_83[i] & 0xf0) >> 4]; | 986 | hex_str[(page_83[i] & 0xf0) >> 4]; |
987 | vpd->device_identifier[j++] = | 987 | vpd->device_identifier[j++] = |
988 | hex_str[page_83[i] & 0x0f]; | 988 | hex_str[page_83[i] & 0x0f]; |
989 | i++; | 989 | i++; |
990 | } | 990 | } |
991 | break; | 991 | break; |
992 | case 0x02: /* ASCII */ | 992 | case 0x02: /* ASCII */ |
993 | case 0x03: /* UTF-8 */ | 993 | case 0x03: /* UTF-8 */ |
994 | while (i < (4 + page_83[3])) | 994 | while (i < (4 + page_83[3])) |
995 | vpd->device_identifier[j++] = page_83[i++]; | 995 | vpd->device_identifier[j++] = page_83[i++]; |
996 | break; | 996 | break; |
997 | default: | 997 | default: |
998 | break; | 998 | break; |
999 | } | 999 | } |
1000 | 1000 | ||
1001 | return transport_dump_vpd_ident(vpd, NULL, 0); | 1001 | return transport_dump_vpd_ident(vpd, NULL, 0); |
1002 | } | 1002 | } |
1003 | EXPORT_SYMBOL(transport_set_vpd_ident); | 1003 | EXPORT_SYMBOL(transport_set_vpd_ident); |
1004 | 1004 | ||
1005 | static void core_setup_task_attr_emulation(struct se_device *dev) | 1005 | static void core_setup_task_attr_emulation(struct se_device *dev) |
1006 | { | 1006 | { |
1007 | /* | 1007 | /* |
1008 | * If this device is from Target_Core_Mod/pSCSI, disable the | 1008 | * If this device is from Target_Core_Mod/pSCSI, disable the |
1009 | * SAM Task Attribute emulation. | 1009 | * SAM Task Attribute emulation. |
1010 | * | 1010 | * |
1011 | * This is currently not available in upsream Linux/SCSI Target | 1011 | * This is currently not available in upsream Linux/SCSI Target |
1012 | * mode code, and is assumed to be disabled while using TCM/pSCSI. | 1012 | * mode code, and is assumed to be disabled while using TCM/pSCSI. |
1013 | */ | 1013 | */ |
1014 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | 1014 | if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
1015 | dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; | 1015 | dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; |
1016 | return; | 1016 | return; |
1017 | } | 1017 | } |
1018 | 1018 | ||
1019 | dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; | 1019 | dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; |
1020 | pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" | 1020 | pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" |
1021 | " device\n", dev->transport->name, | 1021 | " device\n", dev->transport->name, |
1022 | dev->transport->get_device_rev(dev)); | 1022 | dev->transport->get_device_rev(dev)); |
1023 | } | 1023 | } |
1024 | 1024 | ||
1025 | static void scsi_dump_inquiry(struct se_device *dev) | 1025 | static void scsi_dump_inquiry(struct se_device *dev) |
1026 | { | 1026 | { |
1027 | struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; | 1027 | struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; |
1028 | char buf[17]; | 1028 | char buf[17]; |
1029 | int i, device_type; | 1029 | int i, device_type; |
1030 | /* | 1030 | /* |
1031 | * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer | 1031 | * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer |
1032 | */ | 1032 | */ |
1033 | for (i = 0; i < 8; i++) | 1033 | for (i = 0; i < 8; i++) |
1034 | if (wwn->vendor[i] >= 0x20) | 1034 | if (wwn->vendor[i] >= 0x20) |
1035 | buf[i] = wwn->vendor[i]; | 1035 | buf[i] = wwn->vendor[i]; |
1036 | else | 1036 | else |
1037 | buf[i] = ' '; | 1037 | buf[i] = ' '; |
1038 | buf[i] = '\0'; | 1038 | buf[i] = '\0'; |
1039 | pr_debug(" Vendor: %s\n", buf); | 1039 | pr_debug(" Vendor: %s\n", buf); |
1040 | 1040 | ||
1041 | for (i = 0; i < 16; i++) | 1041 | for (i = 0; i < 16; i++) |
1042 | if (wwn->model[i] >= 0x20) | 1042 | if (wwn->model[i] >= 0x20) |
1043 | buf[i] = wwn->model[i]; | 1043 | buf[i] = wwn->model[i]; |
1044 | else | 1044 | else |
1045 | buf[i] = ' '; | 1045 | buf[i] = ' '; |
1046 | buf[i] = '\0'; | 1046 | buf[i] = '\0'; |
1047 | pr_debug(" Model: %s\n", buf); | 1047 | pr_debug(" Model: %s\n", buf); |
1048 | 1048 | ||
1049 | for (i = 0; i < 4; i++) | 1049 | for (i = 0; i < 4; i++) |
1050 | if (wwn->revision[i] >= 0x20) | 1050 | if (wwn->revision[i] >= 0x20) |
1051 | buf[i] = wwn->revision[i]; | 1051 | buf[i] = wwn->revision[i]; |
1052 | else | 1052 | else |
1053 | buf[i] = ' '; | 1053 | buf[i] = ' '; |
1054 | buf[i] = '\0'; | 1054 | buf[i] = '\0'; |
1055 | pr_debug(" Revision: %s\n", buf); | 1055 | pr_debug(" Revision: %s\n", buf); |
1056 | 1056 | ||
1057 | device_type = dev->transport->get_device_type(dev); | 1057 | device_type = dev->transport->get_device_type(dev); |
1058 | pr_debug(" Type: %s ", scsi_device_type(device_type)); | 1058 | pr_debug(" Type: %s ", scsi_device_type(device_type)); |
1059 | pr_debug(" ANSI SCSI revision: %02x\n", | 1059 | pr_debug(" ANSI SCSI revision: %02x\n", |
1060 | dev->transport->get_device_rev(dev)); | 1060 | dev->transport->get_device_rev(dev)); |
1061 | } | 1061 | } |
1062 | 1062 | ||
1063 | struct se_device *transport_add_device_to_core_hba( | 1063 | struct se_device *transport_add_device_to_core_hba( |
1064 | struct se_hba *hba, | 1064 | struct se_hba *hba, |
1065 | struct se_subsystem_api *transport, | 1065 | struct se_subsystem_api *transport, |
1066 | struct se_subsystem_dev *se_dev, | 1066 | struct se_subsystem_dev *se_dev, |
1067 | u32 device_flags, | 1067 | u32 device_flags, |
1068 | void *transport_dev, | 1068 | void *transport_dev, |
1069 | struct se_dev_limits *dev_limits, | 1069 | struct se_dev_limits *dev_limits, |
1070 | const char *inquiry_prod, | 1070 | const char *inquiry_prod, |
1071 | const char *inquiry_rev) | 1071 | const char *inquiry_rev) |
1072 | { | 1072 | { |
1073 | int force_pt; | 1073 | int force_pt; |
1074 | struct se_device *dev; | 1074 | struct se_device *dev; |
1075 | 1075 | ||
1076 | dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); | 1076 | dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); |
1077 | if (!dev) { | 1077 | if (!dev) { |
1078 | pr_err("Unable to allocate memory for se_dev_t\n"); | 1078 | pr_err("Unable to allocate memory for se_dev_t\n"); |
1079 | return NULL; | 1079 | return NULL; |
1080 | } | 1080 | } |
1081 | 1081 | ||
1082 | dev->dev_flags = device_flags; | 1082 | dev->dev_flags = device_flags; |
1083 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; | 1083 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; |
1084 | dev->dev_ptr = transport_dev; | 1084 | dev->dev_ptr = transport_dev; |
1085 | dev->se_hba = hba; | 1085 | dev->se_hba = hba; |
1086 | dev->se_sub_dev = se_dev; | 1086 | dev->se_sub_dev = se_dev; |
1087 | dev->transport = transport; | 1087 | dev->transport = transport; |
1088 | INIT_LIST_HEAD(&dev->dev_list); | 1088 | INIT_LIST_HEAD(&dev->dev_list); |
1089 | INIT_LIST_HEAD(&dev->dev_sep_list); | 1089 | INIT_LIST_HEAD(&dev->dev_sep_list); |
1090 | INIT_LIST_HEAD(&dev->dev_tmr_list); | 1090 | INIT_LIST_HEAD(&dev->dev_tmr_list); |
1091 | INIT_LIST_HEAD(&dev->delayed_cmd_list); | 1091 | INIT_LIST_HEAD(&dev->delayed_cmd_list); |
1092 | INIT_LIST_HEAD(&dev->state_list); | 1092 | INIT_LIST_HEAD(&dev->state_list); |
1093 | INIT_LIST_HEAD(&dev->qf_cmd_list); | 1093 | INIT_LIST_HEAD(&dev->qf_cmd_list); |
1094 | spin_lock_init(&dev->execute_task_lock); | 1094 | spin_lock_init(&dev->execute_task_lock); |
1095 | spin_lock_init(&dev->delayed_cmd_lock); | 1095 | spin_lock_init(&dev->delayed_cmd_lock); |
1096 | spin_lock_init(&dev->dev_reservation_lock); | 1096 | spin_lock_init(&dev->dev_reservation_lock); |
1097 | spin_lock_init(&dev->dev_status_lock); | 1097 | spin_lock_init(&dev->dev_status_lock); |
1098 | spin_lock_init(&dev->se_port_lock); | 1098 | spin_lock_init(&dev->se_port_lock); |
1099 | spin_lock_init(&dev->se_tmr_lock); | 1099 | spin_lock_init(&dev->se_tmr_lock); |
1100 | spin_lock_init(&dev->qf_cmd_lock); | 1100 | spin_lock_init(&dev->qf_cmd_lock); |
1101 | atomic_set(&dev->dev_ordered_id, 0); | 1101 | atomic_set(&dev->dev_ordered_id, 0); |
1102 | 1102 | ||
1103 | se_dev_set_default_attribs(dev, dev_limits); | 1103 | se_dev_set_default_attribs(dev, dev_limits); |
1104 | 1104 | ||
1105 | dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); | 1105 | dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); |
1106 | dev->creation_time = get_jiffies_64(); | 1106 | dev->creation_time = get_jiffies_64(); |
1107 | spin_lock_init(&dev->stats_lock); | 1107 | spin_lock_init(&dev->stats_lock); |
1108 | 1108 | ||
1109 | spin_lock(&hba->device_lock); | 1109 | spin_lock(&hba->device_lock); |
1110 | list_add_tail(&dev->dev_list, &hba->hba_dev_list); | 1110 | list_add_tail(&dev->dev_list, &hba->hba_dev_list); |
1111 | hba->dev_count++; | 1111 | hba->dev_count++; |
1112 | spin_unlock(&hba->device_lock); | 1112 | spin_unlock(&hba->device_lock); |
1113 | /* | 1113 | /* |
1114 | * Setup the SAM Task Attribute emulation for struct se_device | 1114 | * Setup the SAM Task Attribute emulation for struct se_device |
1115 | */ | 1115 | */ |
1116 | core_setup_task_attr_emulation(dev); | 1116 | core_setup_task_attr_emulation(dev); |
1117 | /* | 1117 | /* |
1118 | * Force PR and ALUA passthrough emulation with internal object use. | 1118 | * Force PR and ALUA passthrough emulation with internal object use. |
1119 | */ | 1119 | */ |
1120 | force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE); | 1120 | force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE); |
1121 | /* | 1121 | /* |
1122 | * Setup the Reservations infrastructure for struct se_device | 1122 | * Setup the Reservations infrastructure for struct se_device |
1123 | */ | 1123 | */ |
1124 | core_setup_reservations(dev, force_pt); | 1124 | core_setup_reservations(dev, force_pt); |
1125 | /* | 1125 | /* |
1126 | * Setup the Asymmetric Logical Unit Assignment for struct se_device | 1126 | * Setup the Asymmetric Logical Unit Assignment for struct se_device |
1127 | */ | 1127 | */ |
1128 | if (core_setup_alua(dev, force_pt) < 0) | 1128 | if (core_setup_alua(dev, force_pt) < 0) |
1129 | goto err_dev_list; | 1129 | goto err_dev_list; |
1130 | 1130 | ||
1131 | /* | 1131 | /* |
1132 | * Startup the struct se_device processing thread | 1132 | * Startup the struct se_device processing thread |
1133 | */ | 1133 | */ |
1134 | dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1, | 1134 | dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1, |
1135 | dev->transport->name); | 1135 | dev->transport->name); |
1136 | if (!dev->tmr_wq) { | 1136 | if (!dev->tmr_wq) { |
1137 | pr_err("Unable to create tmr workqueue for %s\n", | 1137 | pr_err("Unable to create tmr workqueue for %s\n", |
1138 | dev->transport->name); | 1138 | dev->transport->name); |
1139 | goto err_dev_list; | 1139 | goto err_dev_list; |
1140 | } | 1140 | } |
1141 | /* | 1141 | /* |
1142 | * Setup work_queue for QUEUE_FULL | 1142 | * Setup work_queue for QUEUE_FULL |
1143 | */ | 1143 | */ |
1144 | INIT_WORK(&dev->qf_work_queue, target_qf_do_work); | 1144 | INIT_WORK(&dev->qf_work_queue, target_qf_do_work); |
1145 | /* | 1145 | /* |
1146 | * Preload the initial INQUIRY const values if we are doing | 1146 | * Preload the initial INQUIRY const values if we are doing |
1147 | * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI | 1147 | * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI |
1148 | * passthrough because this is being provided by the backend LLD. | 1148 | * passthrough because this is being provided by the backend LLD. |
1149 | * This is required so that transport_get_inquiry() copies these | 1149 | * This is required so that transport_get_inquiry() copies these |
1150 | * originals once back into DEV_T10_WWN(dev) for the virtual device | 1150 | * originals once back into DEV_T10_WWN(dev) for the virtual device |
1151 | * setup. | 1151 | * setup. |
1152 | */ | 1152 | */ |
1153 | if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { | 1153 | if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { |
1154 | if (!inquiry_prod || !inquiry_rev) { | 1154 | if (!inquiry_prod || !inquiry_rev) { |
1155 | pr_err("All non TCM/pSCSI plugins require" | 1155 | pr_err("All non TCM/pSCSI plugins require" |
1156 | " INQUIRY consts\n"); | 1156 | " INQUIRY consts\n"); |
1157 | goto err_wq; | 1157 | goto err_wq; |
1158 | } | 1158 | } |
1159 | 1159 | ||
1160 | strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8); | 1160 | strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8); |
1161 | strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16); | 1161 | strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16); |
1162 | strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4); | 1162 | strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4); |
1163 | } | 1163 | } |
1164 | scsi_dump_inquiry(dev); | 1164 | scsi_dump_inquiry(dev); |
1165 | 1165 | ||
1166 | return dev; | 1166 | return dev; |
1167 | 1167 | ||
1168 | err_wq: | 1168 | err_wq: |
1169 | destroy_workqueue(dev->tmr_wq); | 1169 | destroy_workqueue(dev->tmr_wq); |
1170 | err_dev_list: | 1170 | err_dev_list: |
1171 | spin_lock(&hba->device_lock); | 1171 | spin_lock(&hba->device_lock); |
1172 | list_del(&dev->dev_list); | 1172 | list_del(&dev->dev_list); |
1173 | hba->dev_count--; | 1173 | hba->dev_count--; |
1174 | spin_unlock(&hba->device_lock); | 1174 | spin_unlock(&hba->device_lock); |
1175 | 1175 | ||
1176 | se_release_vpd_for_dev(dev); | 1176 | se_release_vpd_for_dev(dev); |
1177 | 1177 | ||
1178 | kfree(dev); | 1178 | kfree(dev); |
1179 | 1179 | ||
1180 | return NULL; | 1180 | return NULL; |
1181 | } | 1181 | } |
1182 | EXPORT_SYMBOL(transport_add_device_to_core_hba); | 1182 | EXPORT_SYMBOL(transport_add_device_to_core_hba); |
1183 | 1183 | ||
1184 | int target_cmd_size_check(struct se_cmd *cmd, unsigned int size) | 1184 | int target_cmd_size_check(struct se_cmd *cmd, unsigned int size) |
1185 | { | 1185 | { |
1186 | struct se_device *dev = cmd->se_dev; | 1186 | struct se_device *dev = cmd->se_dev; |
1187 | 1187 | ||
1188 | if (cmd->unknown_data_length) { | 1188 | if (cmd->unknown_data_length) { |
1189 | cmd->data_length = size; | 1189 | cmd->data_length = size; |
1190 | } else if (size != cmd->data_length) { | 1190 | } else if (size != cmd->data_length) { |
1191 | pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" | 1191 | pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" |
1192 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" | 1192 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" |
1193 | " 0x%02x\n", cmd->se_tfo->get_fabric_name(), | 1193 | " 0x%02x\n", cmd->se_tfo->get_fabric_name(), |
1194 | cmd->data_length, size, cmd->t_task_cdb[0]); | 1194 | cmd->data_length, size, cmd->t_task_cdb[0]); |
1195 | 1195 | ||
1196 | if (cmd->data_direction == DMA_TO_DEVICE) { | 1196 | if (cmd->data_direction == DMA_TO_DEVICE) { |
1197 | pr_err("Rejecting underflow/overflow" | 1197 | pr_err("Rejecting underflow/overflow" |
1198 | " WRITE data\n"); | 1198 | " WRITE data\n"); |
1199 | goto out_invalid_cdb_field; | 1199 | goto out_invalid_cdb_field; |
1200 | } | 1200 | } |
1201 | /* | 1201 | /* |
1202 | * Reject READ_* or WRITE_* with overflow/underflow for | 1202 | * Reject READ_* or WRITE_* with overflow/underflow for |
1203 | * type SCF_SCSI_DATA_CDB. | 1203 | * type SCF_SCSI_DATA_CDB. |
1204 | */ | 1204 | */ |
1205 | if (dev->se_sub_dev->se_dev_attrib.block_size != 512) { | 1205 | if (dev->se_sub_dev->se_dev_attrib.block_size != 512) { |
1206 | pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" | 1206 | pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" |
1207 | " CDB on non 512-byte sector setup subsystem" | 1207 | " CDB on non 512-byte sector setup subsystem" |
1208 | " plugin: %s\n", dev->transport->name); | 1208 | " plugin: %s\n", dev->transport->name); |
1209 | /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ | 1209 | /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ |
1210 | goto out_invalid_cdb_field; | 1210 | goto out_invalid_cdb_field; |
1211 | } | 1211 | } |
1212 | /* | 1212 | /* |
1213 | * For the overflow case keep the existing fabric provided | 1213 | * For the overflow case keep the existing fabric provided |
1214 | * ->data_length. Otherwise for the underflow case, reset | 1214 | * ->data_length. Otherwise for the underflow case, reset |
1215 | * ->data_length to the smaller SCSI expected data transfer | 1215 | * ->data_length to the smaller SCSI expected data transfer |
1216 | * length. | 1216 | * length. |
1217 | */ | 1217 | */ |
1218 | if (size > cmd->data_length) { | 1218 | if (size > cmd->data_length) { |
1219 | cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; | 1219 | cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; |
1220 | cmd->residual_count = (size - cmd->data_length); | 1220 | cmd->residual_count = (size - cmd->data_length); |
1221 | } else { | 1221 | } else { |
1222 | cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; | 1222 | cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; |
1223 | cmd->residual_count = (cmd->data_length - size); | 1223 | cmd->residual_count = (cmd->data_length - size); |
1224 | cmd->data_length = size; | 1224 | cmd->data_length = size; |
1225 | } | 1225 | } |
1226 | } | 1226 | } |
1227 | 1227 | ||
1228 | return 0; | 1228 | return 0; |
1229 | 1229 | ||
1230 | out_invalid_cdb_field: | 1230 | out_invalid_cdb_field: |
1231 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 1231 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
1232 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | 1232 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; |
1233 | return -EINVAL; | 1233 | return -EINVAL; |
1234 | } | 1234 | } |
1235 | 1235 | ||
1236 | /* | 1236 | /* |
1237 | * Used by fabric modules containing a local struct se_cmd within their | 1237 | * Used by fabric modules containing a local struct se_cmd within their |
1238 | * fabric dependent per I/O descriptor. | 1238 | * fabric dependent per I/O descriptor. |
1239 | */ | 1239 | */ |
1240 | void transport_init_se_cmd( | 1240 | void transport_init_se_cmd( |
1241 | struct se_cmd *cmd, | 1241 | struct se_cmd *cmd, |
1242 | struct target_core_fabric_ops *tfo, | 1242 | struct target_core_fabric_ops *tfo, |
1243 | struct se_session *se_sess, | 1243 | struct se_session *se_sess, |
1244 | u32 data_length, | 1244 | u32 data_length, |
1245 | int data_direction, | 1245 | int data_direction, |
1246 | int task_attr, | 1246 | int task_attr, |
1247 | unsigned char *sense_buffer) | 1247 | unsigned char *sense_buffer) |
1248 | { | 1248 | { |
1249 | INIT_LIST_HEAD(&cmd->se_lun_node); | 1249 | INIT_LIST_HEAD(&cmd->se_lun_node); |
1250 | INIT_LIST_HEAD(&cmd->se_delayed_node); | 1250 | INIT_LIST_HEAD(&cmd->se_delayed_node); |
1251 | INIT_LIST_HEAD(&cmd->se_qf_node); | 1251 | INIT_LIST_HEAD(&cmd->se_qf_node); |
1252 | INIT_LIST_HEAD(&cmd->se_cmd_list); | 1252 | INIT_LIST_HEAD(&cmd->se_cmd_list); |
1253 | INIT_LIST_HEAD(&cmd->state_list); | 1253 | INIT_LIST_HEAD(&cmd->state_list); |
1254 | init_completion(&cmd->transport_lun_fe_stop_comp); | 1254 | init_completion(&cmd->transport_lun_fe_stop_comp); |
1255 | init_completion(&cmd->transport_lun_stop_comp); | 1255 | init_completion(&cmd->transport_lun_stop_comp); |
1256 | init_completion(&cmd->t_transport_stop_comp); | 1256 | init_completion(&cmd->t_transport_stop_comp); |
1257 | init_completion(&cmd->cmd_wait_comp); | 1257 | init_completion(&cmd->cmd_wait_comp); |
1258 | init_completion(&cmd->task_stop_comp); | 1258 | init_completion(&cmd->task_stop_comp); |
1259 | spin_lock_init(&cmd->t_state_lock); | 1259 | spin_lock_init(&cmd->t_state_lock); |
1260 | cmd->transport_state = CMD_T_DEV_ACTIVE; | 1260 | cmd->transport_state = CMD_T_DEV_ACTIVE; |
1261 | 1261 | ||
1262 | cmd->se_tfo = tfo; | 1262 | cmd->se_tfo = tfo; |
1263 | cmd->se_sess = se_sess; | 1263 | cmd->se_sess = se_sess; |
1264 | cmd->data_length = data_length; | 1264 | cmd->data_length = data_length; |
1265 | cmd->data_direction = data_direction; | 1265 | cmd->data_direction = data_direction; |
1266 | cmd->sam_task_attr = task_attr; | 1266 | cmd->sam_task_attr = task_attr; |
1267 | cmd->sense_buffer = sense_buffer; | 1267 | cmd->sense_buffer = sense_buffer; |
1268 | 1268 | ||
1269 | cmd->state_active = false; | 1269 | cmd->state_active = false; |
1270 | } | 1270 | } |
1271 | EXPORT_SYMBOL(transport_init_se_cmd); | 1271 | EXPORT_SYMBOL(transport_init_se_cmd); |
1272 | 1272 | ||
1273 | static int transport_check_alloc_task_attr(struct se_cmd *cmd) | 1273 | static int transport_check_alloc_task_attr(struct se_cmd *cmd) |
1274 | { | 1274 | { |
1275 | /* | 1275 | /* |
1276 | * Check if SAM Task Attribute emulation is enabled for this | 1276 | * Check if SAM Task Attribute emulation is enabled for this |
1277 | * struct se_device storage object | 1277 | * struct se_device storage object |
1278 | */ | 1278 | */ |
1279 | if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) | 1279 | if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) |
1280 | return 0; | 1280 | return 0; |
1281 | 1281 | ||
1282 | if (cmd->sam_task_attr == MSG_ACA_TAG) { | 1282 | if (cmd->sam_task_attr == MSG_ACA_TAG) { |
1283 | pr_debug("SAM Task Attribute ACA" | 1283 | pr_debug("SAM Task Attribute ACA" |
1284 | " emulation is not supported\n"); | 1284 | " emulation is not supported\n"); |
1285 | return -EINVAL; | 1285 | return -EINVAL; |
1286 | } | 1286 | } |
1287 | /* | 1287 | /* |
1288 | * Used to determine when ORDERED commands should go from | 1288 | * Used to determine when ORDERED commands should go from |
1289 | * Dormant to Active status. | 1289 | * Dormant to Active status. |
1290 | */ | 1290 | */ |
1291 | cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id); | 1291 | cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id); |
1292 | smp_mb__after_atomic_inc(); | 1292 | smp_mb__after_atomic_inc(); |
1293 | pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", | 1293 | pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", |
1294 | cmd->se_ordered_id, cmd->sam_task_attr, | 1294 | cmd->se_ordered_id, cmd->sam_task_attr, |
1295 | cmd->se_dev->transport->name); | 1295 | cmd->se_dev->transport->name); |
1296 | return 0; | 1296 | return 0; |
1297 | } | 1297 | } |
1298 | 1298 | ||
1299 | /* target_setup_cmd_from_cdb(): | 1299 | /* target_setup_cmd_from_cdb(): |
1300 | * | 1300 | * |
1301 | * Called from fabric RX Thread. | 1301 | * Called from fabric RX Thread. |
1302 | */ | 1302 | */ |
1303 | int target_setup_cmd_from_cdb( | 1303 | int target_setup_cmd_from_cdb( |
1304 | struct se_cmd *cmd, | 1304 | struct se_cmd *cmd, |
1305 | unsigned char *cdb) | 1305 | unsigned char *cdb) |
1306 | { | 1306 | { |
1307 | struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; | 1307 | struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; |
1308 | u32 pr_reg_type = 0; | 1308 | u32 pr_reg_type = 0; |
1309 | u8 alua_ascq = 0; | 1309 | u8 alua_ascq = 0; |
1310 | unsigned long flags; | 1310 | unsigned long flags; |
1311 | int ret; | 1311 | int ret; |
1312 | 1312 | ||
1313 | /* | 1313 | /* |
1314 | * Ensure that the received CDB is less than the max (252 + 8) bytes | 1314 | * Ensure that the received CDB is less than the max (252 + 8) bytes |
1315 | * for VARIABLE_LENGTH_CMD | 1315 | * for VARIABLE_LENGTH_CMD |
1316 | */ | 1316 | */ |
1317 | if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { | 1317 | if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { |
1318 | pr_err("Received SCSI CDB with command_size: %d that" | 1318 | pr_err("Received SCSI CDB with command_size: %d that" |
1319 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", | 1319 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", |
1320 | scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); | 1320 | scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); |
1321 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 1321 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
1322 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | 1322 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; |
1323 | return -EINVAL; | 1323 | return -EINVAL; |
1324 | } | 1324 | } |
1325 | /* | 1325 | /* |
1326 | * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, | 1326 | * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, |
1327 | * allocate the additional extended CDB buffer now.. Otherwise | 1327 | * allocate the additional extended CDB buffer now.. Otherwise |
1328 | * setup the pointer from __t_task_cdb to t_task_cdb. | 1328 | * setup the pointer from __t_task_cdb to t_task_cdb. |
1329 | */ | 1329 | */ |
1330 | if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { | 1330 | if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { |
1331 | cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), | 1331 | cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), |
1332 | GFP_KERNEL); | 1332 | GFP_KERNEL); |
1333 | if (!cmd->t_task_cdb) { | 1333 | if (!cmd->t_task_cdb) { |
1334 | pr_err("Unable to allocate cmd->t_task_cdb" | 1334 | pr_err("Unable to allocate cmd->t_task_cdb" |
1335 | " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", | 1335 | " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", |
1336 | scsi_command_size(cdb), | 1336 | scsi_command_size(cdb), |
1337 | (unsigned long)sizeof(cmd->__t_task_cdb)); | 1337 | (unsigned long)sizeof(cmd->__t_task_cdb)); |
1338 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 1338 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
1339 | cmd->scsi_sense_reason = | 1339 | cmd->scsi_sense_reason = |
1340 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 1340 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
1341 | return -ENOMEM; | 1341 | return -ENOMEM; |
1342 | } | 1342 | } |
1343 | } else | 1343 | } else |
1344 | cmd->t_task_cdb = &cmd->__t_task_cdb[0]; | 1344 | cmd->t_task_cdb = &cmd->__t_task_cdb[0]; |
1345 | /* | 1345 | /* |
1346 | * Copy the original CDB into cmd-> | 1346 | * Copy the original CDB into cmd-> |
1347 | */ | 1347 | */ |
1348 | memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); | 1348 | memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); |
1349 | 1349 | ||
1350 | /* | 1350 | /* |
1351 | * Check for an existing UNIT ATTENTION condition | 1351 | * Check for an existing UNIT ATTENTION condition |
1352 | */ | 1352 | */ |
1353 | if (core_scsi3_ua_check(cmd, cdb) < 0) { | 1353 | if (core_scsi3_ua_check(cmd, cdb) < 0) { |
1354 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 1354 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
1355 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; | 1355 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; |
1356 | return -EINVAL; | 1356 | return -EINVAL; |
1357 | } | 1357 | } |
1358 | 1358 | ||
1359 | ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); | 1359 | ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); |
1360 | if (ret != 0) { | 1360 | if (ret != 0) { |
1361 | /* | 1361 | /* |
1362 | * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; | 1362 | * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; |
1363 | * The ALUA additional sense code qualifier (ASCQ) is determined | 1363 | * The ALUA additional sense code qualifier (ASCQ) is determined |
1364 | * by the ALUA primary or secondary access state.. | 1364 | * by the ALUA primary or secondary access state.. |
1365 | */ | 1365 | */ |
1366 | if (ret > 0) { | 1366 | if (ret > 0) { |
1367 | pr_debug("[%s]: ALUA TG Port not available, " | 1367 | pr_debug("[%s]: ALUA TG Port not available, " |
1368 | "SenseKey: NOT_READY, ASC/ASCQ: " | 1368 | "SenseKey: NOT_READY, ASC/ASCQ: " |
1369 | "0x04/0x%02x\n", | 1369 | "0x04/0x%02x\n", |
1370 | cmd->se_tfo->get_fabric_name(), alua_ascq); | 1370 | cmd->se_tfo->get_fabric_name(), alua_ascq); |
1371 | 1371 | ||
1372 | transport_set_sense_codes(cmd, 0x04, alua_ascq); | 1372 | transport_set_sense_codes(cmd, 0x04, alua_ascq); |
1373 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 1373 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
1374 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; | 1374 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; |
1375 | return -EINVAL; | 1375 | return -EINVAL; |
1376 | } | 1376 | } |
1377 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 1377 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
1378 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | 1378 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; |
1379 | return -EINVAL; | 1379 | return -EINVAL; |
1380 | } | 1380 | } |
1381 | 1381 | ||
1382 | /* | 1382 | /* |
1383 | * Check status for SPC-3 Persistent Reservations | 1383 | * Check status for SPC-3 Persistent Reservations |
1384 | */ | 1384 | */ |
1385 | if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type)) { | 1385 | if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type)) { |
1386 | if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( | 1386 | if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( |
1387 | cmd, cdb, pr_reg_type) != 0) { | 1387 | cmd, cdb, pr_reg_type) != 0) { |
1388 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 1388 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
1389 | cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; | 1389 | cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; |
1390 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; | 1390 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; |
1391 | cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; | 1391 | cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; |
1392 | return -EBUSY; | 1392 | return -EBUSY; |
1393 | } | 1393 | } |
1394 | /* | 1394 | /* |
1395 | * This means the CDB is allowed for the SCSI Initiator port | 1395 | * This means the CDB is allowed for the SCSI Initiator port |
1396 | * when said port is *NOT* holding the legacy SPC-2 or | 1396 | * when said port is *NOT* holding the legacy SPC-2 or |
1397 | * SPC-3 Persistent Reservation. | 1397 | * SPC-3 Persistent Reservation. |
1398 | */ | 1398 | */ |
1399 | } | 1399 | } |
1400 | 1400 | ||
1401 | ret = cmd->se_dev->transport->parse_cdb(cmd); | 1401 | ret = cmd->se_dev->transport->parse_cdb(cmd); |
1402 | if (ret < 0) | 1402 | if (ret < 0) |
1403 | return ret; | 1403 | return ret; |
1404 | 1404 | ||
1405 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 1405 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
1406 | cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; | 1406 | cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; |
1407 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 1407 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
1408 | 1408 | ||
1409 | /* | 1409 | /* |
1410 | * Check for SAM Task Attribute Emulation | 1410 | * Check for SAM Task Attribute Emulation |
1411 | */ | 1411 | */ |
1412 | if (transport_check_alloc_task_attr(cmd) < 0) { | 1412 | if (transport_check_alloc_task_attr(cmd) < 0) { |
1413 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 1413 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
1414 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | 1414 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; |
1415 | return -EINVAL; | 1415 | return -EINVAL; |
1416 | } | 1416 | } |
1417 | spin_lock(&cmd->se_lun->lun_sep_lock); | 1417 | spin_lock(&cmd->se_lun->lun_sep_lock); |
1418 | if (cmd->se_lun->lun_sep) | 1418 | if (cmd->se_lun->lun_sep) |
1419 | cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; | 1419 | cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; |
1420 | spin_unlock(&cmd->se_lun->lun_sep_lock); | 1420 | spin_unlock(&cmd->se_lun->lun_sep_lock); |
1421 | return 0; | 1421 | return 0; |
1422 | } | 1422 | } |
1423 | EXPORT_SYMBOL(target_setup_cmd_from_cdb); | 1423 | EXPORT_SYMBOL(target_setup_cmd_from_cdb); |
1424 | 1424 | ||
1425 | /* | 1425 | /* |
1426 | * Used by fabric module frontends to queue tasks directly. | 1426 | * Used by fabric module frontends to queue tasks directly. |
1427 | * Many only be used from process context only | 1427 | * Many only be used from process context only |
1428 | */ | 1428 | */ |
1429 | int transport_handle_cdb_direct( | 1429 | int transport_handle_cdb_direct( |
1430 | struct se_cmd *cmd) | 1430 | struct se_cmd *cmd) |
1431 | { | 1431 | { |
1432 | int ret; | 1432 | int ret; |
1433 | 1433 | ||
1434 | if (!cmd->se_lun) { | 1434 | if (!cmd->se_lun) { |
1435 | dump_stack(); | 1435 | dump_stack(); |
1436 | pr_err("cmd->se_lun is NULL\n"); | 1436 | pr_err("cmd->se_lun is NULL\n"); |
1437 | return -EINVAL; | 1437 | return -EINVAL; |
1438 | } | 1438 | } |
1439 | if (in_interrupt()) { | 1439 | if (in_interrupt()) { |
1440 | dump_stack(); | 1440 | dump_stack(); |
1441 | pr_err("transport_generic_handle_cdb cannot be called" | 1441 | pr_err("transport_generic_handle_cdb cannot be called" |
1442 | " from interrupt context\n"); | 1442 | " from interrupt context\n"); |
1443 | return -EINVAL; | 1443 | return -EINVAL; |
1444 | } | 1444 | } |
1445 | /* | 1445 | /* |
1446 | * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that | 1446 | * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that |
1447 | * outstanding descriptors are handled correctly during shutdown via | 1447 | * outstanding descriptors are handled correctly during shutdown via |
1448 | * transport_wait_for_tasks() | 1448 | * transport_wait_for_tasks() |
1449 | * | 1449 | * |
1450 | * Also, we don't take cmd->t_state_lock here as we only expect | 1450 | * Also, we don't take cmd->t_state_lock here as we only expect |
1451 | * this to be called for initial descriptor submission. | 1451 | * this to be called for initial descriptor submission. |
1452 | */ | 1452 | */ |
1453 | cmd->t_state = TRANSPORT_NEW_CMD; | 1453 | cmd->t_state = TRANSPORT_NEW_CMD; |
1454 | cmd->transport_state |= CMD_T_ACTIVE; | 1454 | cmd->transport_state |= CMD_T_ACTIVE; |
1455 | 1455 | ||
1456 | /* | 1456 | /* |
1457 | * transport_generic_new_cmd() is already handling QUEUE_FULL, | 1457 | * transport_generic_new_cmd() is already handling QUEUE_FULL, |
1458 | * so follow TRANSPORT_NEW_CMD processing thread context usage | 1458 | * so follow TRANSPORT_NEW_CMD processing thread context usage |
1459 | * and call transport_generic_request_failure() if necessary.. | 1459 | * and call transport_generic_request_failure() if necessary.. |
1460 | */ | 1460 | */ |
1461 | ret = transport_generic_new_cmd(cmd); | 1461 | ret = transport_generic_new_cmd(cmd); |
1462 | if (ret < 0) | 1462 | if (ret < 0) |
1463 | transport_generic_request_failure(cmd); | 1463 | transport_generic_request_failure(cmd); |
1464 | 1464 | ||
1465 | return 0; | 1465 | return 0; |
1466 | } | 1466 | } |
1467 | EXPORT_SYMBOL(transport_handle_cdb_direct); | 1467 | EXPORT_SYMBOL(transport_handle_cdb_direct); |
1468 | 1468 | ||
1469 | /** | 1469 | /** |
1470 | * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd | 1470 | * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd |
1471 | * | 1471 | * |
1472 | * @se_cmd: command descriptor to submit | 1472 | * @se_cmd: command descriptor to submit |
1473 | * @se_sess: associated se_sess for endpoint | 1473 | * @se_sess: associated se_sess for endpoint |
1474 | * @cdb: pointer to SCSI CDB | 1474 | * @cdb: pointer to SCSI CDB |
1475 | * @sense: pointer to SCSI sense buffer | 1475 | * @sense: pointer to SCSI sense buffer |
1476 | * @unpacked_lun: unpacked LUN to reference for struct se_lun | 1476 | * @unpacked_lun: unpacked LUN to reference for struct se_lun |
1477 | * @data_length: fabric expected data transfer length | 1477 | * @data_length: fabric expected data transfer length |
1478 | * @task_addr: SAM task attribute | 1478 | * @task_addr: SAM task attribute |
1479 | * @data_dir: DMA data direction | 1479 | * @data_dir: DMA data direction |
1480 | * @flags: flags for command submission from target_sc_flags_tables | 1480 | * @flags: flags for command submission from target_sc_flags_tables |
1481 | * | 1481 | * |
1482 | * Returns non zero to signal active I/O shutdown failure. All other | 1482 | * Returns non zero to signal active I/O shutdown failure. All other |
1483 | * setup exceptions will be returned as a SCSI CHECK_CONDITION response, | 1483 | * setup exceptions will be returned as a SCSI CHECK_CONDITION response, |
1484 | * but still return zero here. | 1484 | * but still return zero here. |
1485 | * | 1485 | * |
1486 | * This may only be called from process context, and also currently | 1486 | * This may only be called from process context, and also currently |
1487 | * assumes internal allocation of fabric payload buffer by target-core. | 1487 | * assumes internal allocation of fabric payload buffer by target-core. |
1488 | **/ | 1488 | **/ |
1489 | int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, | 1489 | int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, |
1490 | unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, | 1490 | unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, |
1491 | u32 data_length, int task_attr, int data_dir, int flags) | 1491 | u32 data_length, int task_attr, int data_dir, int flags) |
1492 | { | 1492 | { |
1493 | struct se_portal_group *se_tpg; | 1493 | struct se_portal_group *se_tpg; |
1494 | int rc; | 1494 | int rc; |
1495 | 1495 | ||
1496 | se_tpg = se_sess->se_tpg; | 1496 | se_tpg = se_sess->se_tpg; |
1497 | BUG_ON(!se_tpg); | 1497 | BUG_ON(!se_tpg); |
1498 | BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); | 1498 | BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); |
1499 | BUG_ON(in_interrupt()); | 1499 | BUG_ON(in_interrupt()); |
1500 | /* | 1500 | /* |
1501 | * Initialize se_cmd for target operation. From this point | 1501 | * Initialize se_cmd for target operation. From this point |
1502 | * exceptions are handled by sending exception status via | 1502 | * exceptions are handled by sending exception status via |
1503 | * target_core_fabric_ops->queue_status() callback | 1503 | * target_core_fabric_ops->queue_status() callback |
1504 | */ | 1504 | */ |
1505 | transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, | 1505 | transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, |
1506 | data_length, data_dir, task_attr, sense); | 1506 | data_length, data_dir, task_attr, sense); |
1507 | if (flags & TARGET_SCF_UNKNOWN_SIZE) | 1507 | if (flags & TARGET_SCF_UNKNOWN_SIZE) |
1508 | se_cmd->unknown_data_length = 1; | 1508 | se_cmd->unknown_data_length = 1; |
1509 | /* | 1509 | /* |
1510 | * Obtain struct se_cmd->cmd_kref reference and add new cmd to | 1510 | * Obtain struct se_cmd->cmd_kref reference and add new cmd to |
1511 | * se_sess->sess_cmd_list. A second kref_get here is necessary | 1511 | * se_sess->sess_cmd_list. A second kref_get here is necessary |
1512 | * for fabrics using TARGET_SCF_ACK_KREF that expect a second | 1512 | * for fabrics using TARGET_SCF_ACK_KREF that expect a second |
1513 | * kref_put() to happen during fabric packet acknowledgement. | 1513 | * kref_put() to happen during fabric packet acknowledgement. |
1514 | */ | 1514 | */ |
1515 | rc = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); | 1515 | rc = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); |
1516 | if (rc) | 1516 | if (rc) |
1517 | return rc; | 1517 | return rc; |
1518 | /* | 1518 | /* |
1519 | * Signal bidirectional data payloads to target-core | 1519 | * Signal bidirectional data payloads to target-core |
1520 | */ | 1520 | */ |
1521 | if (flags & TARGET_SCF_BIDI_OP) | 1521 | if (flags & TARGET_SCF_BIDI_OP) |
1522 | se_cmd->se_cmd_flags |= SCF_BIDI; | 1522 | se_cmd->se_cmd_flags |= SCF_BIDI; |
1523 | /* | 1523 | /* |
1524 | * Locate se_lun pointer and attach it to struct se_cmd | 1524 | * Locate se_lun pointer and attach it to struct se_cmd |
1525 | */ | 1525 | */ |
1526 | if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) { | 1526 | if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) { |
1527 | transport_send_check_condition_and_sense(se_cmd, | 1527 | transport_send_check_condition_and_sense(se_cmd, |
1528 | se_cmd->scsi_sense_reason, 0); | 1528 | se_cmd->scsi_sense_reason, 0); |
1529 | target_put_sess_cmd(se_sess, se_cmd); | 1529 | target_put_sess_cmd(se_sess, se_cmd); |
1530 | return 0; | 1530 | return 0; |
1531 | } | 1531 | } |
1532 | 1532 | ||
1533 | rc = target_setup_cmd_from_cdb(se_cmd, cdb); | 1533 | rc = target_setup_cmd_from_cdb(se_cmd, cdb); |
1534 | if (rc != 0) { | 1534 | if (rc != 0) { |
1535 | transport_generic_request_failure(se_cmd); | 1535 | transport_generic_request_failure(se_cmd); |
1536 | return 0; | 1536 | return 0; |
1537 | } | 1537 | } |
1538 | 1538 | ||
1539 | /* | 1539 | /* |
1540 | * Check if we need to delay processing because of ALUA | 1540 | * Check if we need to delay processing because of ALUA |
1541 | * Active/NonOptimized primary access state.. | 1541 | * Active/NonOptimized primary access state.. |
1542 | */ | 1542 | */ |
1543 | core_alua_check_nonop_delay(se_cmd); | 1543 | core_alua_check_nonop_delay(se_cmd); |
1544 | 1544 | ||
1545 | transport_handle_cdb_direct(se_cmd); | 1545 | transport_handle_cdb_direct(se_cmd); |
1546 | return 0; | 1546 | return 0; |
1547 | } | 1547 | } |
1548 | EXPORT_SYMBOL(target_submit_cmd); | 1548 | EXPORT_SYMBOL(target_submit_cmd); |
1549 | 1549 | ||
1550 | static void target_complete_tmr_failure(struct work_struct *work) | 1550 | static void target_complete_tmr_failure(struct work_struct *work) |
1551 | { | 1551 | { |
1552 | struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); | 1552 | struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); |
1553 | 1553 | ||
1554 | se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; | 1554 | se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; |
1555 | se_cmd->se_tfo->queue_tm_rsp(se_cmd); | 1555 | se_cmd->se_tfo->queue_tm_rsp(se_cmd); |
1556 | transport_generic_free_cmd(se_cmd, 0); | 1556 | transport_generic_free_cmd(se_cmd, 0); |
1557 | } | 1557 | } |
1558 | 1558 | ||
1559 | /** | 1559 | /** |
1560 | * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd | 1560 | * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd |
1561 | * for TMR CDBs | 1561 | * for TMR CDBs |
1562 | * | 1562 | * |
1563 | * @se_cmd: command descriptor to submit | 1563 | * @se_cmd: command descriptor to submit |
1564 | * @se_sess: associated se_sess for endpoint | 1564 | * @se_sess: associated se_sess for endpoint |
1565 | * @sense: pointer to SCSI sense buffer | 1565 | * @sense: pointer to SCSI sense buffer |
1566 | * @unpacked_lun: unpacked LUN to reference for struct se_lun | 1566 | * @unpacked_lun: unpacked LUN to reference for struct se_lun |
1567 | * @fabric_context: fabric context for TMR req | 1567 | * @fabric_context: fabric context for TMR req |
1568 | * @tm_type: Type of TM request | 1568 | * @tm_type: Type of TM request |
1569 | * @gfp: gfp type for caller | 1569 | * @gfp: gfp type for caller |
1570 | * @tag: referenced task tag for TMR_ABORT_TASK | 1570 | * @tag: referenced task tag for TMR_ABORT_TASK |
1571 | * @flags: submit cmd flags | 1571 | * @flags: submit cmd flags |
1572 | * | 1572 | * |
1573 | * Callable from all contexts. | 1573 | * Callable from all contexts. |
1574 | **/ | 1574 | **/ |
1575 | 1575 | ||
1576 | int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, | 1576 | int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, |
1577 | unsigned char *sense, u32 unpacked_lun, | 1577 | unsigned char *sense, u32 unpacked_lun, |
1578 | void *fabric_tmr_ptr, unsigned char tm_type, | 1578 | void *fabric_tmr_ptr, unsigned char tm_type, |
1579 | gfp_t gfp, unsigned int tag, int flags) | 1579 | gfp_t gfp, unsigned int tag, int flags) |
1580 | { | 1580 | { |
1581 | struct se_portal_group *se_tpg; | 1581 | struct se_portal_group *se_tpg; |
1582 | int ret; | 1582 | int ret; |
1583 | 1583 | ||
1584 | se_tpg = se_sess->se_tpg; | 1584 | se_tpg = se_sess->se_tpg; |
1585 | BUG_ON(!se_tpg); | 1585 | BUG_ON(!se_tpg); |
1586 | 1586 | ||
1587 | transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, | 1587 | transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, |
1588 | 0, DMA_NONE, MSG_SIMPLE_TAG, sense); | 1588 | 0, DMA_NONE, MSG_SIMPLE_TAG, sense); |
1589 | /* | 1589 | /* |
1590 | * FIXME: Currently expect caller to handle se_cmd->se_tmr_req | 1590 | * FIXME: Currently expect caller to handle se_cmd->se_tmr_req |
1591 | * allocation failure. | 1591 | * allocation failure. |
1592 | */ | 1592 | */ |
1593 | ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); | 1593 | ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); |
1594 | if (ret < 0) | 1594 | if (ret < 0) |
1595 | return -ENOMEM; | 1595 | return -ENOMEM; |
1596 | 1596 | ||
1597 | if (tm_type == TMR_ABORT_TASK) | 1597 | if (tm_type == TMR_ABORT_TASK) |
1598 | se_cmd->se_tmr_req->ref_task_tag = tag; | 1598 | se_cmd->se_tmr_req->ref_task_tag = tag; |
1599 | 1599 | ||
1600 | /* See target_submit_cmd for commentary */ | 1600 | /* See target_submit_cmd for commentary */ |
1601 | ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); | 1601 | ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); |
1602 | if (ret) { | 1602 | if (ret) { |
1603 | core_tmr_release_req(se_cmd->se_tmr_req); | 1603 | core_tmr_release_req(se_cmd->se_tmr_req); |
1604 | return ret; | 1604 | return ret; |
1605 | } | 1605 | } |
1606 | 1606 | ||
1607 | ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); | 1607 | ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); |
1608 | if (ret) { | 1608 | if (ret) { |
1609 | /* | 1609 | /* |
1610 | * For callback during failure handling, push this work off | 1610 | * For callback during failure handling, push this work off |
1611 | * to process context with TMR_LUN_DOES_NOT_EXIST status. | 1611 | * to process context with TMR_LUN_DOES_NOT_EXIST status. |
1612 | */ | 1612 | */ |
1613 | INIT_WORK(&se_cmd->work, target_complete_tmr_failure); | 1613 | INIT_WORK(&se_cmd->work, target_complete_tmr_failure); |
1614 | schedule_work(&se_cmd->work); | 1614 | schedule_work(&se_cmd->work); |
1615 | return 0; | 1615 | return 0; |
1616 | } | 1616 | } |
1617 | transport_generic_handle_tmr(se_cmd); | 1617 | transport_generic_handle_tmr(se_cmd); |
1618 | return 0; | 1618 | return 0; |
1619 | } | 1619 | } |
1620 | EXPORT_SYMBOL(target_submit_tmr); | 1620 | EXPORT_SYMBOL(target_submit_tmr); |
1621 | 1621 | ||
1622 | /* | 1622 | /* |
1623 | * If the cmd is active, request it to be stopped and sleep until it | 1623 | * If the cmd is active, request it to be stopped and sleep until it |
1624 | * has completed. | 1624 | * has completed. |
1625 | */ | 1625 | */ |
1626 | bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) | 1626 | bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) |
1627 | { | 1627 | { |
1628 | bool was_active = false; | 1628 | bool was_active = false; |
1629 | 1629 | ||
1630 | if (cmd->transport_state & CMD_T_BUSY) { | 1630 | if (cmd->transport_state & CMD_T_BUSY) { |
1631 | cmd->transport_state |= CMD_T_REQUEST_STOP; | 1631 | cmd->transport_state |= CMD_T_REQUEST_STOP; |
1632 | spin_unlock_irqrestore(&cmd->t_state_lock, *flags); | 1632 | spin_unlock_irqrestore(&cmd->t_state_lock, *flags); |
1633 | 1633 | ||
1634 | pr_debug("cmd %p waiting to complete\n", cmd); | 1634 | pr_debug("cmd %p waiting to complete\n", cmd); |
1635 | wait_for_completion(&cmd->task_stop_comp); | 1635 | wait_for_completion(&cmd->task_stop_comp); |
1636 | pr_debug("cmd %p stopped successfully\n", cmd); | 1636 | pr_debug("cmd %p stopped successfully\n", cmd); |
1637 | 1637 | ||
1638 | spin_lock_irqsave(&cmd->t_state_lock, *flags); | 1638 | spin_lock_irqsave(&cmd->t_state_lock, *flags); |
1639 | cmd->transport_state &= ~CMD_T_REQUEST_STOP; | 1639 | cmd->transport_state &= ~CMD_T_REQUEST_STOP; |
1640 | cmd->transport_state &= ~CMD_T_BUSY; | 1640 | cmd->transport_state &= ~CMD_T_BUSY; |
1641 | was_active = true; | 1641 | was_active = true; |
1642 | } | 1642 | } |
1643 | 1643 | ||
1644 | return was_active; | 1644 | return was_active; |
1645 | } | 1645 | } |
1646 | 1646 | ||
1647 | /* | 1647 | /* |
1648 | * Handle SAM-esque emulation for generic transport request failures. | 1648 | * Handle SAM-esque emulation for generic transport request failures. |
1649 | */ | 1649 | */ |
1650 | void transport_generic_request_failure(struct se_cmd *cmd) | 1650 | void transport_generic_request_failure(struct se_cmd *cmd) |
1651 | { | 1651 | { |
1652 | int ret = 0; | 1652 | int ret = 0; |
1653 | 1653 | ||
1654 | pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" | 1654 | pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" |
1655 | " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), | 1655 | " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), |
1656 | cmd->t_task_cdb[0]); | 1656 | cmd->t_task_cdb[0]); |
1657 | pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n", | 1657 | pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n", |
1658 | cmd->se_tfo->get_cmd_state(cmd), | 1658 | cmd->se_tfo->get_cmd_state(cmd), |
1659 | cmd->t_state, cmd->scsi_sense_reason); | 1659 | cmd->t_state, cmd->scsi_sense_reason); |
1660 | pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", | 1660 | pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", |
1661 | (cmd->transport_state & CMD_T_ACTIVE) != 0, | 1661 | (cmd->transport_state & CMD_T_ACTIVE) != 0, |
1662 | (cmd->transport_state & CMD_T_STOP) != 0, | 1662 | (cmd->transport_state & CMD_T_STOP) != 0, |
1663 | (cmd->transport_state & CMD_T_SENT) != 0); | 1663 | (cmd->transport_state & CMD_T_SENT) != 0); |
1664 | 1664 | ||
1665 | /* | 1665 | /* |
1666 | * For SAM Task Attribute emulation for failed struct se_cmd | 1666 | * For SAM Task Attribute emulation for failed struct se_cmd |
1667 | */ | 1667 | */ |
1668 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | 1668 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
1669 | transport_complete_task_attr(cmd); | 1669 | transport_complete_task_attr(cmd); |
1670 | 1670 | ||
1671 | switch (cmd->scsi_sense_reason) { | 1671 | switch (cmd->scsi_sense_reason) { |
1672 | case TCM_NON_EXISTENT_LUN: | 1672 | case TCM_NON_EXISTENT_LUN: |
1673 | case TCM_UNSUPPORTED_SCSI_OPCODE: | 1673 | case TCM_UNSUPPORTED_SCSI_OPCODE: |
1674 | case TCM_INVALID_CDB_FIELD: | 1674 | case TCM_INVALID_CDB_FIELD: |
1675 | case TCM_INVALID_PARAMETER_LIST: | 1675 | case TCM_INVALID_PARAMETER_LIST: |
1676 | case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: | 1676 | case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: |
1677 | case TCM_UNKNOWN_MODE_PAGE: | 1677 | case TCM_UNKNOWN_MODE_PAGE: |
1678 | case TCM_WRITE_PROTECTED: | 1678 | case TCM_WRITE_PROTECTED: |
1679 | case TCM_ADDRESS_OUT_OF_RANGE: | 1679 | case TCM_ADDRESS_OUT_OF_RANGE: |
1680 | case TCM_CHECK_CONDITION_ABORT_CMD: | 1680 | case TCM_CHECK_CONDITION_ABORT_CMD: |
1681 | case TCM_CHECK_CONDITION_UNIT_ATTENTION: | 1681 | case TCM_CHECK_CONDITION_UNIT_ATTENTION: |
1682 | case TCM_CHECK_CONDITION_NOT_READY: | 1682 | case TCM_CHECK_CONDITION_NOT_READY: |
1683 | break; | 1683 | break; |
1684 | case TCM_RESERVATION_CONFLICT: | 1684 | case TCM_RESERVATION_CONFLICT: |
1685 | /* | 1685 | /* |
1686 | * No SENSE Data payload for this case, set SCSI Status | 1686 | * No SENSE Data payload for this case, set SCSI Status |
1687 | * and queue the response to $FABRIC_MOD. | 1687 | * and queue the response to $FABRIC_MOD. |
1688 | * | 1688 | * |
1689 | * Uses linux/include/scsi/scsi.h SAM status codes defs | 1689 | * Uses linux/include/scsi/scsi.h SAM status codes defs |
1690 | */ | 1690 | */ |
1691 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; | 1691 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; |
1692 | /* | 1692 | /* |
1693 | * For UA Interlock Code 11b, a RESERVATION CONFLICT will | 1693 | * For UA Interlock Code 11b, a RESERVATION CONFLICT will |
1694 | * establish a UNIT ATTENTION with PREVIOUS RESERVATION | 1694 | * establish a UNIT ATTENTION with PREVIOUS RESERVATION |
1695 | * CONFLICT STATUS. | 1695 | * CONFLICT STATUS. |
1696 | * | 1696 | * |
1697 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | 1697 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 |
1698 | */ | 1698 | */ |
1699 | if (cmd->se_sess && | 1699 | if (cmd->se_sess && |
1700 | cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) | 1700 | cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) |
1701 | core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, | 1701 | core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, |
1702 | cmd->orig_fe_lun, 0x2C, | 1702 | cmd->orig_fe_lun, 0x2C, |
1703 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | 1703 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); |
1704 | 1704 | ||
1705 | ret = cmd->se_tfo->queue_status(cmd); | 1705 | ret = cmd->se_tfo->queue_status(cmd); |
1706 | if (ret == -EAGAIN || ret == -ENOMEM) | 1706 | if (ret == -EAGAIN || ret == -ENOMEM) |
1707 | goto queue_full; | 1707 | goto queue_full; |
1708 | goto check_stop; | 1708 | goto check_stop; |
1709 | default: | 1709 | default: |
1710 | pr_err("Unknown transport error for CDB 0x%02x: %d\n", | 1710 | pr_err("Unknown transport error for CDB 0x%02x: %d\n", |
1711 | cmd->t_task_cdb[0], cmd->scsi_sense_reason); | 1711 | cmd->t_task_cdb[0], cmd->scsi_sense_reason); |
1712 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | 1712 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; |
1713 | break; | 1713 | break; |
1714 | } | 1714 | } |
1715 | 1715 | ||
1716 | ret = transport_send_check_condition_and_sense(cmd, | 1716 | ret = transport_send_check_condition_and_sense(cmd, |
1717 | cmd->scsi_sense_reason, 0); | 1717 | cmd->scsi_sense_reason, 0); |
1718 | if (ret == -EAGAIN || ret == -ENOMEM) | 1718 | if (ret == -EAGAIN || ret == -ENOMEM) |
1719 | goto queue_full; | 1719 | goto queue_full; |
1720 | 1720 | ||
1721 | check_stop: | 1721 | check_stop: |
1722 | transport_lun_remove_cmd(cmd); | 1722 | transport_lun_remove_cmd(cmd); |
1723 | if (!transport_cmd_check_stop_to_fabric(cmd)) | 1723 | if (!transport_cmd_check_stop_to_fabric(cmd)) |
1724 | ; | 1724 | ; |
1725 | return; | 1725 | return; |
1726 | 1726 | ||
1727 | queue_full: | 1727 | queue_full: |
1728 | cmd->t_state = TRANSPORT_COMPLETE_QF_OK; | 1728 | cmd->t_state = TRANSPORT_COMPLETE_QF_OK; |
1729 | transport_handle_queue_full(cmd, cmd->se_dev); | 1729 | transport_handle_queue_full(cmd, cmd->se_dev); |
1730 | } | 1730 | } |
1731 | EXPORT_SYMBOL(transport_generic_request_failure); | 1731 | EXPORT_SYMBOL(transport_generic_request_failure); |
1732 | 1732 | ||
1733 | static void __target_execute_cmd(struct se_cmd *cmd) | 1733 | static void __target_execute_cmd(struct se_cmd *cmd) |
1734 | { | 1734 | { |
1735 | int error = 0; | 1735 | int error = 0; |
1736 | 1736 | ||
1737 | spin_lock_irq(&cmd->t_state_lock); | 1737 | spin_lock_irq(&cmd->t_state_lock); |
1738 | cmd->transport_state |= (CMD_T_BUSY|CMD_T_SENT); | 1738 | cmd->transport_state |= (CMD_T_BUSY|CMD_T_SENT); |
1739 | spin_unlock_irq(&cmd->t_state_lock); | 1739 | spin_unlock_irq(&cmd->t_state_lock); |
1740 | 1740 | ||
1741 | if (cmd->execute_cmd) | 1741 | if (cmd->execute_cmd) |
1742 | error = cmd->execute_cmd(cmd); | 1742 | error = cmd->execute_cmd(cmd); |
1743 | 1743 | ||
1744 | if (error) { | 1744 | if (error) { |
1745 | spin_lock_irq(&cmd->t_state_lock); | 1745 | spin_lock_irq(&cmd->t_state_lock); |
1746 | cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); | 1746 | cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); |
1747 | spin_unlock_irq(&cmd->t_state_lock); | 1747 | spin_unlock_irq(&cmd->t_state_lock); |
1748 | 1748 | ||
1749 | transport_generic_request_failure(cmd); | 1749 | transport_generic_request_failure(cmd); |
1750 | } | 1750 | } |
1751 | } | 1751 | } |
1752 | 1752 | ||
1753 | void target_execute_cmd(struct se_cmd *cmd) | 1753 | void target_execute_cmd(struct se_cmd *cmd) |
1754 | { | 1754 | { |
1755 | struct se_device *dev = cmd->se_dev; | 1755 | struct se_device *dev = cmd->se_dev; |
1756 | 1756 | ||
1757 | /* | 1757 | /* |
1758 | * If the received CDB has aleady been aborted stop processing it here. | 1758 | * If the received CDB has aleady been aborted stop processing it here. |
1759 | */ | 1759 | */ |
1760 | if (transport_check_aborted_status(cmd, 1)) | 1760 | if (transport_check_aborted_status(cmd, 1)) |
1761 | return; | 1761 | return; |
1762 | 1762 | ||
1763 | /* | 1763 | /* |
1764 | * Determine if IOCTL context caller in requesting the stopping of this | 1764 | * Determine if IOCTL context caller in requesting the stopping of this |
1765 | * command for LUN shutdown purposes. | 1765 | * command for LUN shutdown purposes. |
1766 | */ | 1766 | */ |
1767 | spin_lock_irq(&cmd->t_state_lock); | 1767 | spin_lock_irq(&cmd->t_state_lock); |
1768 | if (cmd->transport_state & CMD_T_LUN_STOP) { | 1768 | if (cmd->transport_state & CMD_T_LUN_STOP) { |
1769 | pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", | 1769 | pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", |
1770 | __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); | 1770 | __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); |
1771 | 1771 | ||
1772 | cmd->transport_state &= ~CMD_T_ACTIVE; | 1772 | cmd->transport_state &= ~CMD_T_ACTIVE; |
1773 | spin_unlock_irq(&cmd->t_state_lock); | 1773 | spin_unlock_irq(&cmd->t_state_lock); |
1774 | complete(&cmd->transport_lun_stop_comp); | 1774 | complete(&cmd->transport_lun_stop_comp); |
1775 | return; | 1775 | return; |
1776 | } | 1776 | } |
1777 | /* | 1777 | /* |
1778 | * Determine if frontend context caller is requesting the stopping of | 1778 | * Determine if frontend context caller is requesting the stopping of |
1779 | * this command for frontend exceptions. | 1779 | * this command for frontend exceptions. |
1780 | */ | 1780 | */ |
1781 | if (cmd->transport_state & CMD_T_STOP) { | 1781 | if (cmd->transport_state & CMD_T_STOP) { |
1782 | pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", | 1782 | pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", |
1783 | __func__, __LINE__, | 1783 | __func__, __LINE__, |
1784 | cmd->se_tfo->get_task_tag(cmd)); | 1784 | cmd->se_tfo->get_task_tag(cmd)); |
1785 | 1785 | ||
1786 | spin_unlock_irq(&cmd->t_state_lock); | 1786 | spin_unlock_irq(&cmd->t_state_lock); |
1787 | complete(&cmd->t_transport_stop_comp); | 1787 | complete(&cmd->t_transport_stop_comp); |
1788 | return; | 1788 | return; |
1789 | } | 1789 | } |
1790 | 1790 | ||
1791 | cmd->t_state = TRANSPORT_PROCESSING; | 1791 | cmd->t_state = TRANSPORT_PROCESSING; |
1792 | spin_unlock_irq(&cmd->t_state_lock); | 1792 | spin_unlock_irq(&cmd->t_state_lock); |
1793 | 1793 | ||
1794 | if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) | 1794 | if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) |
1795 | goto execute; | 1795 | goto execute; |
1796 | 1796 | ||
1797 | /* | 1797 | /* |
1798 | * Check for the existence of HEAD_OF_QUEUE, and if true return 1 | 1798 | * Check for the existence of HEAD_OF_QUEUE, and if true return 1 |
1799 | * to allow the passed struct se_cmd list of tasks to the front of the list. | 1799 | * to allow the passed struct se_cmd list of tasks to the front of the list. |
1800 | */ | 1800 | */ |
1801 | switch (cmd->sam_task_attr) { | 1801 | switch (cmd->sam_task_attr) { |
1802 | case MSG_HEAD_TAG: | 1802 | case MSG_HEAD_TAG: |
1803 | pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, " | 1803 | pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, " |
1804 | "se_ordered_id: %u\n", | 1804 | "se_ordered_id: %u\n", |
1805 | cmd->t_task_cdb[0], cmd->se_ordered_id); | 1805 | cmd->t_task_cdb[0], cmd->se_ordered_id); |
1806 | goto execute; | 1806 | goto execute; |
1807 | case MSG_ORDERED_TAG: | 1807 | case MSG_ORDERED_TAG: |
1808 | atomic_inc(&dev->dev_ordered_sync); | 1808 | atomic_inc(&dev->dev_ordered_sync); |
1809 | smp_mb__after_atomic_inc(); | 1809 | smp_mb__after_atomic_inc(); |
1810 | 1810 | ||
1811 | pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " | 1811 | pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " |
1812 | " se_ordered_id: %u\n", | 1812 | " se_ordered_id: %u\n", |
1813 | cmd->t_task_cdb[0], cmd->se_ordered_id); | 1813 | cmd->t_task_cdb[0], cmd->se_ordered_id); |
1814 | 1814 | ||
1815 | /* | 1815 | /* |
1816 | * Execute an ORDERED command if no other older commands | 1816 | * Execute an ORDERED command if no other older commands |
1817 | * exist that need to be completed first. | 1817 | * exist that need to be completed first. |
1818 | */ | 1818 | */ |
1819 | if (!atomic_read(&dev->simple_cmds)) | 1819 | if (!atomic_read(&dev->simple_cmds)) |
1820 | goto execute; | 1820 | goto execute; |
1821 | break; | 1821 | break; |
1822 | default: | 1822 | default: |
1823 | /* | 1823 | /* |
1824 | * For SIMPLE and UNTAGGED Task Attribute commands | 1824 | * For SIMPLE and UNTAGGED Task Attribute commands |
1825 | */ | 1825 | */ |
1826 | atomic_inc(&dev->simple_cmds); | 1826 | atomic_inc(&dev->simple_cmds); |
1827 | smp_mb__after_atomic_inc(); | 1827 | smp_mb__after_atomic_inc(); |
1828 | break; | 1828 | break; |
1829 | } | 1829 | } |
1830 | 1830 | ||
1831 | if (atomic_read(&dev->dev_ordered_sync) != 0) { | 1831 | if (atomic_read(&dev->dev_ordered_sync) != 0) { |
1832 | spin_lock(&dev->delayed_cmd_lock); | 1832 | spin_lock(&dev->delayed_cmd_lock); |
1833 | list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); | 1833 | list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); |
1834 | spin_unlock(&dev->delayed_cmd_lock); | 1834 | spin_unlock(&dev->delayed_cmd_lock); |
1835 | 1835 | ||
1836 | pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" | 1836 | pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" |
1837 | " delayed CMD list, se_ordered_id: %u\n", | 1837 | " delayed CMD list, se_ordered_id: %u\n", |
1838 | cmd->t_task_cdb[0], cmd->sam_task_attr, | 1838 | cmd->t_task_cdb[0], cmd->sam_task_attr, |
1839 | cmd->se_ordered_id); | 1839 | cmd->se_ordered_id); |
1840 | return; | 1840 | return; |
1841 | } | 1841 | } |
1842 | 1842 | ||
1843 | execute: | 1843 | execute: |
1844 | /* | 1844 | /* |
1845 | * Otherwise, no ORDERED task attributes exist.. | 1845 | * Otherwise, no ORDERED task attributes exist.. |
1846 | */ | 1846 | */ |
1847 | __target_execute_cmd(cmd); | 1847 | __target_execute_cmd(cmd); |
1848 | } | 1848 | } |
1849 | EXPORT_SYMBOL(target_execute_cmd); | 1849 | EXPORT_SYMBOL(target_execute_cmd); |
1850 | 1850 | ||
1851 | /* | 1851 | /* |
1852 | * Process all commands up to the last received ORDERED task attribute which | 1852 | * Process all commands up to the last received ORDERED task attribute which |
1853 | * requires another blocking boundary | 1853 | * requires another blocking boundary |
1854 | */ | 1854 | */ |
1855 | static void target_restart_delayed_cmds(struct se_device *dev) | 1855 | static void target_restart_delayed_cmds(struct se_device *dev) |
1856 | { | 1856 | { |
1857 | for (;;) { | 1857 | for (;;) { |
1858 | struct se_cmd *cmd; | 1858 | struct se_cmd *cmd; |
1859 | 1859 | ||
1860 | spin_lock(&dev->delayed_cmd_lock); | 1860 | spin_lock(&dev->delayed_cmd_lock); |
1861 | if (list_empty(&dev->delayed_cmd_list)) { | 1861 | if (list_empty(&dev->delayed_cmd_list)) { |
1862 | spin_unlock(&dev->delayed_cmd_lock); | 1862 | spin_unlock(&dev->delayed_cmd_lock); |
1863 | break; | 1863 | break; |
1864 | } | 1864 | } |
1865 | 1865 | ||
1866 | cmd = list_entry(dev->delayed_cmd_list.next, | 1866 | cmd = list_entry(dev->delayed_cmd_list.next, |
1867 | struct se_cmd, se_delayed_node); | 1867 | struct se_cmd, se_delayed_node); |
1868 | list_del(&cmd->se_delayed_node); | 1868 | list_del(&cmd->se_delayed_node); |
1869 | spin_unlock(&dev->delayed_cmd_lock); | 1869 | spin_unlock(&dev->delayed_cmd_lock); |
1870 | 1870 | ||
1871 | __target_execute_cmd(cmd); | 1871 | __target_execute_cmd(cmd); |
1872 | 1872 | ||
1873 | if (cmd->sam_task_attr == MSG_ORDERED_TAG) | 1873 | if (cmd->sam_task_attr == MSG_ORDERED_TAG) |
1874 | break; | 1874 | break; |
1875 | } | 1875 | } |
1876 | } | 1876 | } |
1877 | 1877 | ||
1878 | /* | 1878 | /* |
1879 | * Called from I/O completion to determine which dormant/delayed | 1879 | * Called from I/O completion to determine which dormant/delayed |
1880 | * and ordered cmds need to have their tasks added to the execution queue. | 1880 | * and ordered cmds need to have their tasks added to the execution queue. |
1881 | */ | 1881 | */ |
1882 | static void transport_complete_task_attr(struct se_cmd *cmd) | 1882 | static void transport_complete_task_attr(struct se_cmd *cmd) |
1883 | { | 1883 | { |
1884 | struct se_device *dev = cmd->se_dev; | 1884 | struct se_device *dev = cmd->se_dev; |
1885 | 1885 | ||
1886 | if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { | 1886 | if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { |
1887 | atomic_dec(&dev->simple_cmds); | 1887 | atomic_dec(&dev->simple_cmds); |
1888 | smp_mb__after_atomic_dec(); | 1888 | smp_mb__after_atomic_dec(); |
1889 | dev->dev_cur_ordered_id++; | 1889 | dev->dev_cur_ordered_id++; |
1890 | pr_debug("Incremented dev->dev_cur_ordered_id: %u for" | 1890 | pr_debug("Incremented dev->dev_cur_ordered_id: %u for" |
1891 | " SIMPLE: %u\n", dev->dev_cur_ordered_id, | 1891 | " SIMPLE: %u\n", dev->dev_cur_ordered_id, |
1892 | cmd->se_ordered_id); | 1892 | cmd->se_ordered_id); |
1893 | } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { | 1893 | } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { |
1894 | dev->dev_cur_ordered_id++; | 1894 | dev->dev_cur_ordered_id++; |
1895 | pr_debug("Incremented dev_cur_ordered_id: %u for" | 1895 | pr_debug("Incremented dev_cur_ordered_id: %u for" |
1896 | " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, | 1896 | " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, |
1897 | cmd->se_ordered_id); | 1897 | cmd->se_ordered_id); |
1898 | } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { | 1898 | } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { |
1899 | atomic_dec(&dev->dev_ordered_sync); | 1899 | atomic_dec(&dev->dev_ordered_sync); |
1900 | smp_mb__after_atomic_dec(); | 1900 | smp_mb__after_atomic_dec(); |
1901 | 1901 | ||
1902 | dev->dev_cur_ordered_id++; | 1902 | dev->dev_cur_ordered_id++; |
1903 | pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" | 1903 | pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" |
1904 | " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); | 1904 | " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); |
1905 | } | 1905 | } |
1906 | 1906 | ||
1907 | target_restart_delayed_cmds(dev); | 1907 | target_restart_delayed_cmds(dev); |
1908 | } | 1908 | } |
1909 | 1909 | ||
1910 | static void transport_complete_qf(struct se_cmd *cmd) | 1910 | static void transport_complete_qf(struct se_cmd *cmd) |
1911 | { | 1911 | { |
1912 | int ret = 0; | 1912 | int ret = 0; |
1913 | 1913 | ||
1914 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | 1914 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
1915 | transport_complete_task_attr(cmd); | 1915 | transport_complete_task_attr(cmd); |
1916 | 1916 | ||
1917 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { | 1917 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { |
1918 | ret = cmd->se_tfo->queue_status(cmd); | 1918 | ret = cmd->se_tfo->queue_status(cmd); |
1919 | if (ret) | 1919 | if (ret) |
1920 | goto out; | 1920 | goto out; |
1921 | } | 1921 | } |
1922 | 1922 | ||
1923 | switch (cmd->data_direction) { | 1923 | switch (cmd->data_direction) { |
1924 | case DMA_FROM_DEVICE: | 1924 | case DMA_FROM_DEVICE: |
1925 | ret = cmd->se_tfo->queue_data_in(cmd); | 1925 | ret = cmd->se_tfo->queue_data_in(cmd); |
1926 | break; | 1926 | break; |
1927 | case DMA_TO_DEVICE: | 1927 | case DMA_TO_DEVICE: |
1928 | if (cmd->t_bidi_data_sg) { | 1928 | if (cmd->t_bidi_data_sg) { |
1929 | ret = cmd->se_tfo->queue_data_in(cmd); | 1929 | ret = cmd->se_tfo->queue_data_in(cmd); |
1930 | if (ret < 0) | 1930 | if (ret < 0) |
1931 | break; | 1931 | break; |
1932 | } | 1932 | } |
1933 | /* Fall through for DMA_TO_DEVICE */ | 1933 | /* Fall through for DMA_TO_DEVICE */ |
1934 | case DMA_NONE: | 1934 | case DMA_NONE: |
1935 | ret = cmd->se_tfo->queue_status(cmd); | 1935 | ret = cmd->se_tfo->queue_status(cmd); |
1936 | break; | 1936 | break; |
1937 | default: | 1937 | default: |
1938 | break; | 1938 | break; |
1939 | } | 1939 | } |
1940 | 1940 | ||
1941 | out: | 1941 | out: |
1942 | if (ret < 0) { | 1942 | if (ret < 0) { |
1943 | transport_handle_queue_full(cmd, cmd->se_dev); | 1943 | transport_handle_queue_full(cmd, cmd->se_dev); |
1944 | return; | 1944 | return; |
1945 | } | 1945 | } |
1946 | transport_lun_remove_cmd(cmd); | 1946 | transport_lun_remove_cmd(cmd); |
1947 | transport_cmd_check_stop_to_fabric(cmd); | 1947 | transport_cmd_check_stop_to_fabric(cmd); |
1948 | } | 1948 | } |
1949 | 1949 | ||
1950 | static void transport_handle_queue_full( | 1950 | static void transport_handle_queue_full( |
1951 | struct se_cmd *cmd, | 1951 | struct se_cmd *cmd, |
1952 | struct se_device *dev) | 1952 | struct se_device *dev) |
1953 | { | 1953 | { |
1954 | spin_lock_irq(&dev->qf_cmd_lock); | 1954 | spin_lock_irq(&dev->qf_cmd_lock); |
1955 | list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); | 1955 | list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); |
1956 | atomic_inc(&dev->dev_qf_count); | 1956 | atomic_inc(&dev->dev_qf_count); |
1957 | smp_mb__after_atomic_inc(); | 1957 | smp_mb__after_atomic_inc(); |
1958 | spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); | 1958 | spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); |
1959 | 1959 | ||
1960 | schedule_work(&cmd->se_dev->qf_work_queue); | 1960 | schedule_work(&cmd->se_dev->qf_work_queue); |
1961 | } | 1961 | } |
1962 | 1962 | ||
1963 | static void target_complete_ok_work(struct work_struct *work) | 1963 | static void target_complete_ok_work(struct work_struct *work) |
1964 | { | 1964 | { |
1965 | struct se_cmd *cmd = container_of(work, struct se_cmd, work); | 1965 | struct se_cmd *cmd = container_of(work, struct se_cmd, work); |
1966 | int ret; | 1966 | int ret; |
1967 | 1967 | ||
1968 | /* | 1968 | /* |
1969 | * Check if we need to move delayed/dormant tasks from cmds on the | 1969 | * Check if we need to move delayed/dormant tasks from cmds on the |
1970 | * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task | 1970 | * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task |
1971 | * Attribute. | 1971 | * Attribute. |
1972 | */ | 1972 | */ |
1973 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | 1973 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
1974 | transport_complete_task_attr(cmd); | 1974 | transport_complete_task_attr(cmd); |
1975 | /* | 1975 | /* |
1976 | * Check to schedule QUEUE_FULL work, or execute an existing | 1976 | * Check to schedule QUEUE_FULL work, or execute an existing |
1977 | * cmd->transport_qf_callback() | 1977 | * cmd->transport_qf_callback() |
1978 | */ | 1978 | */ |
1979 | if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) | 1979 | if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) |
1980 | schedule_work(&cmd->se_dev->qf_work_queue); | 1980 | schedule_work(&cmd->se_dev->qf_work_queue); |
1981 | 1981 | ||
1982 | /* | 1982 | /* |
1983 | * Check if we need to send a sense buffer from | 1983 | * Check if we need to send a sense buffer from |
1984 | * the struct se_cmd in question. | 1984 | * the struct se_cmd in question. |
1985 | */ | 1985 | */ |
1986 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { | 1986 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { |
1987 | WARN_ON(!cmd->scsi_status); | 1987 | WARN_ON(!cmd->scsi_status); |
1988 | ret = transport_send_check_condition_and_sense( | 1988 | ret = transport_send_check_condition_and_sense( |
1989 | cmd, 0, 1); | 1989 | cmd, 0, 1); |
1990 | if (ret == -EAGAIN || ret == -ENOMEM) | 1990 | if (ret == -EAGAIN || ret == -ENOMEM) |
1991 | goto queue_full; | 1991 | goto queue_full; |
1992 | 1992 | ||
1993 | transport_lun_remove_cmd(cmd); | 1993 | transport_lun_remove_cmd(cmd); |
1994 | transport_cmd_check_stop_to_fabric(cmd); | 1994 | transport_cmd_check_stop_to_fabric(cmd); |
1995 | return; | 1995 | return; |
1996 | } | 1996 | } |
1997 | /* | 1997 | /* |
1998 | * Check for a callback, used by amongst other things | 1998 | * Check for a callback, used by amongst other things |
1999 | * XDWRITE_READ_10 emulation. | 1999 | * XDWRITE_READ_10 emulation. |
2000 | */ | 2000 | */ |
2001 | if (cmd->transport_complete_callback) | 2001 | if (cmd->transport_complete_callback) |
2002 | cmd->transport_complete_callback(cmd); | 2002 | cmd->transport_complete_callback(cmd); |
2003 | 2003 | ||
2004 | switch (cmd->data_direction) { | 2004 | switch (cmd->data_direction) { |
2005 | case DMA_FROM_DEVICE: | 2005 | case DMA_FROM_DEVICE: |
2006 | spin_lock(&cmd->se_lun->lun_sep_lock); | 2006 | spin_lock(&cmd->se_lun->lun_sep_lock); |
2007 | if (cmd->se_lun->lun_sep) { | 2007 | if (cmd->se_lun->lun_sep) { |
2008 | cmd->se_lun->lun_sep->sep_stats.tx_data_octets += | 2008 | cmd->se_lun->lun_sep->sep_stats.tx_data_octets += |
2009 | cmd->data_length; | 2009 | cmd->data_length; |
2010 | } | 2010 | } |
2011 | spin_unlock(&cmd->se_lun->lun_sep_lock); | 2011 | spin_unlock(&cmd->se_lun->lun_sep_lock); |
2012 | 2012 | ||
2013 | ret = cmd->se_tfo->queue_data_in(cmd); | 2013 | ret = cmd->se_tfo->queue_data_in(cmd); |
2014 | if (ret == -EAGAIN || ret == -ENOMEM) | 2014 | if (ret == -EAGAIN || ret == -ENOMEM) |
2015 | goto queue_full; | 2015 | goto queue_full; |
2016 | break; | 2016 | break; |
2017 | case DMA_TO_DEVICE: | 2017 | case DMA_TO_DEVICE: |
2018 | spin_lock(&cmd->se_lun->lun_sep_lock); | 2018 | spin_lock(&cmd->se_lun->lun_sep_lock); |
2019 | if (cmd->se_lun->lun_sep) { | 2019 | if (cmd->se_lun->lun_sep) { |
2020 | cmd->se_lun->lun_sep->sep_stats.rx_data_octets += | 2020 | cmd->se_lun->lun_sep->sep_stats.rx_data_octets += |
2021 | cmd->data_length; | 2021 | cmd->data_length; |
2022 | } | 2022 | } |
2023 | spin_unlock(&cmd->se_lun->lun_sep_lock); | 2023 | spin_unlock(&cmd->se_lun->lun_sep_lock); |
2024 | /* | 2024 | /* |
2025 | * Check if we need to send READ payload for BIDI-COMMAND | 2025 | * Check if we need to send READ payload for BIDI-COMMAND |
2026 | */ | 2026 | */ |
2027 | if (cmd->t_bidi_data_sg) { | 2027 | if (cmd->t_bidi_data_sg) { |
2028 | spin_lock(&cmd->se_lun->lun_sep_lock); | 2028 | spin_lock(&cmd->se_lun->lun_sep_lock); |
2029 | if (cmd->se_lun->lun_sep) { | 2029 | if (cmd->se_lun->lun_sep) { |
2030 | cmd->se_lun->lun_sep->sep_stats.tx_data_octets += | 2030 | cmd->se_lun->lun_sep->sep_stats.tx_data_octets += |
2031 | cmd->data_length; | 2031 | cmd->data_length; |
2032 | } | 2032 | } |
2033 | spin_unlock(&cmd->se_lun->lun_sep_lock); | 2033 | spin_unlock(&cmd->se_lun->lun_sep_lock); |
2034 | ret = cmd->se_tfo->queue_data_in(cmd); | 2034 | ret = cmd->se_tfo->queue_data_in(cmd); |
2035 | if (ret == -EAGAIN || ret == -ENOMEM) | 2035 | if (ret == -EAGAIN || ret == -ENOMEM) |
2036 | goto queue_full; | 2036 | goto queue_full; |
2037 | break; | 2037 | break; |
2038 | } | 2038 | } |
2039 | /* Fall through for DMA_TO_DEVICE */ | 2039 | /* Fall through for DMA_TO_DEVICE */ |
2040 | case DMA_NONE: | 2040 | case DMA_NONE: |
2041 | ret = cmd->se_tfo->queue_status(cmd); | 2041 | ret = cmd->se_tfo->queue_status(cmd); |
2042 | if (ret == -EAGAIN || ret == -ENOMEM) | 2042 | if (ret == -EAGAIN || ret == -ENOMEM) |
2043 | goto queue_full; | 2043 | goto queue_full; |
2044 | break; | 2044 | break; |
2045 | default: | 2045 | default: |
2046 | break; | 2046 | break; |
2047 | } | 2047 | } |
2048 | 2048 | ||
2049 | transport_lun_remove_cmd(cmd); | 2049 | transport_lun_remove_cmd(cmd); |
2050 | transport_cmd_check_stop_to_fabric(cmd); | 2050 | transport_cmd_check_stop_to_fabric(cmd); |
2051 | return; | 2051 | return; |
2052 | 2052 | ||
2053 | queue_full: | 2053 | queue_full: |
2054 | pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," | 2054 | pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," |
2055 | " data_direction: %d\n", cmd, cmd->data_direction); | 2055 | " data_direction: %d\n", cmd, cmd->data_direction); |
2056 | cmd->t_state = TRANSPORT_COMPLETE_QF_OK; | 2056 | cmd->t_state = TRANSPORT_COMPLETE_QF_OK; |
2057 | transport_handle_queue_full(cmd, cmd->se_dev); | 2057 | transport_handle_queue_full(cmd, cmd->se_dev); |
2058 | } | 2058 | } |
2059 | 2059 | ||
2060 | static inline void transport_free_sgl(struct scatterlist *sgl, int nents) | 2060 | static inline void transport_free_sgl(struct scatterlist *sgl, int nents) |
2061 | { | 2061 | { |
2062 | struct scatterlist *sg; | 2062 | struct scatterlist *sg; |
2063 | int count; | 2063 | int count; |
2064 | 2064 | ||
2065 | for_each_sg(sgl, sg, nents, count) | 2065 | for_each_sg(sgl, sg, nents, count) |
2066 | __free_page(sg_page(sg)); | 2066 | __free_page(sg_page(sg)); |
2067 | 2067 | ||
2068 | kfree(sgl); | 2068 | kfree(sgl); |
2069 | } | 2069 | } |
2070 | 2070 | ||
2071 | static inline void transport_free_pages(struct se_cmd *cmd) | 2071 | static inline void transport_free_pages(struct se_cmd *cmd) |
2072 | { | 2072 | { |
2073 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) | 2073 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) |
2074 | return; | 2074 | return; |
2075 | 2075 | ||
2076 | transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); | 2076 | transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); |
2077 | cmd->t_data_sg = NULL; | 2077 | cmd->t_data_sg = NULL; |
2078 | cmd->t_data_nents = 0; | 2078 | cmd->t_data_nents = 0; |
2079 | 2079 | ||
2080 | transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); | 2080 | transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); |
2081 | cmd->t_bidi_data_sg = NULL; | 2081 | cmd->t_bidi_data_sg = NULL; |
2082 | cmd->t_bidi_data_nents = 0; | 2082 | cmd->t_bidi_data_nents = 0; |
2083 | } | 2083 | } |
2084 | 2084 | ||
2085 | /** | 2085 | /** |
2086 | * transport_release_cmd - free a command | 2086 | * transport_release_cmd - free a command |
2087 | * @cmd: command to free | 2087 | * @cmd: command to free |
2088 | * | 2088 | * |
2089 | * This routine unconditionally frees a command, and reference counting | 2089 | * This routine unconditionally frees a command, and reference counting |
2090 | * or list removal must be done in the caller. | 2090 | * or list removal must be done in the caller. |
2091 | */ | 2091 | */ |
2092 | static void transport_release_cmd(struct se_cmd *cmd) | 2092 | static void transport_release_cmd(struct se_cmd *cmd) |
2093 | { | 2093 | { |
2094 | BUG_ON(!cmd->se_tfo); | 2094 | BUG_ON(!cmd->se_tfo); |
2095 | 2095 | ||
2096 | if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) | 2096 | if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) |
2097 | core_tmr_release_req(cmd->se_tmr_req); | 2097 | core_tmr_release_req(cmd->se_tmr_req); |
2098 | if (cmd->t_task_cdb != cmd->__t_task_cdb) | 2098 | if (cmd->t_task_cdb != cmd->__t_task_cdb) |
2099 | kfree(cmd->t_task_cdb); | 2099 | kfree(cmd->t_task_cdb); |
2100 | /* | 2100 | /* |
2101 | * If this cmd has been setup with target_get_sess_cmd(), drop | 2101 | * If this cmd has been setup with target_get_sess_cmd(), drop |
2102 | * the kref and call ->release_cmd() in kref callback. | 2102 | * the kref and call ->release_cmd() in kref callback. |
2103 | */ | 2103 | */ |
2104 | if (cmd->check_release != 0) { | 2104 | if (cmd->check_release != 0) { |
2105 | target_put_sess_cmd(cmd->se_sess, cmd); | 2105 | target_put_sess_cmd(cmd->se_sess, cmd); |
2106 | return; | 2106 | return; |
2107 | } | 2107 | } |
2108 | cmd->se_tfo->release_cmd(cmd); | 2108 | cmd->se_tfo->release_cmd(cmd); |
2109 | } | 2109 | } |
2110 | 2110 | ||
2111 | /** | 2111 | /** |
2112 | * transport_put_cmd - release a reference to a command | 2112 | * transport_put_cmd - release a reference to a command |
2113 | * @cmd: command to release | 2113 | * @cmd: command to release |
2114 | * | 2114 | * |
2115 | * This routine releases our reference to the command and frees it if possible. | 2115 | * This routine releases our reference to the command and frees it if possible. |
2116 | */ | 2116 | */ |
2117 | static void transport_put_cmd(struct se_cmd *cmd) | 2117 | static void transport_put_cmd(struct se_cmd *cmd) |
2118 | { | 2118 | { |
2119 | unsigned long flags; | 2119 | unsigned long flags; |
2120 | 2120 | ||
2121 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 2121 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2122 | if (atomic_read(&cmd->t_fe_count)) { | 2122 | if (atomic_read(&cmd->t_fe_count)) { |
2123 | if (!atomic_dec_and_test(&cmd->t_fe_count)) | 2123 | if (!atomic_dec_and_test(&cmd->t_fe_count)) |
2124 | goto out_busy; | 2124 | goto out_busy; |
2125 | } | 2125 | } |
2126 | 2126 | ||
2127 | if (cmd->transport_state & CMD_T_DEV_ACTIVE) { | 2127 | if (cmd->transport_state & CMD_T_DEV_ACTIVE) { |
2128 | cmd->transport_state &= ~CMD_T_DEV_ACTIVE; | 2128 | cmd->transport_state &= ~CMD_T_DEV_ACTIVE; |
2129 | target_remove_from_state_list(cmd); | 2129 | target_remove_from_state_list(cmd); |
2130 | } | 2130 | } |
2131 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2131 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2132 | 2132 | ||
2133 | transport_free_pages(cmd); | 2133 | transport_free_pages(cmd); |
2134 | transport_release_cmd(cmd); | 2134 | transport_release_cmd(cmd); |
2135 | return; | 2135 | return; |
2136 | out_busy: | 2136 | out_busy: |
2137 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2137 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2138 | } | 2138 | } |
2139 | 2139 | ||
2140 | /* | 2140 | /* |
2141 | * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of | 2141 | * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of |
2142 | * allocating in the core. | 2142 | * allocating in the core. |
2143 | * @cmd: Associated se_cmd descriptor | 2143 | * @cmd: Associated se_cmd descriptor |
2144 | * @mem: SGL style memory for TCM WRITE / READ | 2144 | * @mem: SGL style memory for TCM WRITE / READ |
2145 | * @sg_mem_num: Number of SGL elements | 2145 | * @sg_mem_num: Number of SGL elements |
2146 | * @mem_bidi_in: SGL style memory for TCM BIDI READ | 2146 | * @mem_bidi_in: SGL style memory for TCM BIDI READ |
2147 | * @sg_mem_bidi_num: Number of BIDI READ SGL elements | 2147 | * @sg_mem_bidi_num: Number of BIDI READ SGL elements |
2148 | * | 2148 | * |
2149 | * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage | 2149 | * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage |
2150 | * of parameters. | 2150 | * of parameters. |
2151 | */ | 2151 | */ |
2152 | int transport_generic_map_mem_to_cmd( | 2152 | int transport_generic_map_mem_to_cmd( |
2153 | struct se_cmd *cmd, | 2153 | struct se_cmd *cmd, |
2154 | struct scatterlist *sgl, | 2154 | struct scatterlist *sgl, |
2155 | u32 sgl_count, | 2155 | u32 sgl_count, |
2156 | struct scatterlist *sgl_bidi, | 2156 | struct scatterlist *sgl_bidi, |
2157 | u32 sgl_bidi_count) | 2157 | u32 sgl_bidi_count) |
2158 | { | 2158 | { |
2159 | if (!sgl || !sgl_count) | 2159 | if (!sgl || !sgl_count) |
2160 | return 0; | 2160 | return 0; |
2161 | 2161 | ||
2162 | /* | 2162 | /* |
2163 | * Reject SCSI data overflow with map_mem_to_cmd() as incoming | 2163 | * Reject SCSI data overflow with map_mem_to_cmd() as incoming |
2164 | * scatterlists already have been set to follow what the fabric | 2164 | * scatterlists already have been set to follow what the fabric |
2165 | * passes for the original expected data transfer length. | 2165 | * passes for the original expected data transfer length. |
2166 | */ | 2166 | */ |
2167 | if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { | 2167 | if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { |
2168 | pr_warn("Rejecting SCSI DATA overflow for fabric using" | 2168 | pr_warn("Rejecting SCSI DATA overflow for fabric using" |
2169 | " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); | 2169 | " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); |
2170 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 2170 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
2171 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | 2171 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; |
2172 | return -EINVAL; | 2172 | return -EINVAL; |
2173 | } | 2173 | } |
2174 | 2174 | ||
2175 | cmd->t_data_sg = sgl; | 2175 | cmd->t_data_sg = sgl; |
2176 | cmd->t_data_nents = sgl_count; | 2176 | cmd->t_data_nents = sgl_count; |
2177 | 2177 | ||
2178 | if (sgl_bidi && sgl_bidi_count) { | 2178 | if (sgl_bidi && sgl_bidi_count) { |
2179 | cmd->t_bidi_data_sg = sgl_bidi; | 2179 | cmd->t_bidi_data_sg = sgl_bidi; |
2180 | cmd->t_bidi_data_nents = sgl_bidi_count; | 2180 | cmd->t_bidi_data_nents = sgl_bidi_count; |
2181 | } | 2181 | } |
2182 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; | 2182 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; |
2183 | return 0; | 2183 | return 0; |
2184 | } | 2184 | } |
2185 | EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); | 2185 | EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); |
2186 | 2186 | ||
2187 | void *transport_kmap_data_sg(struct se_cmd *cmd) | 2187 | void *transport_kmap_data_sg(struct se_cmd *cmd) |
2188 | { | 2188 | { |
2189 | struct scatterlist *sg = cmd->t_data_sg; | 2189 | struct scatterlist *sg = cmd->t_data_sg; |
2190 | struct page **pages; | 2190 | struct page **pages; |
2191 | int i; | 2191 | int i; |
2192 | 2192 | ||
2193 | BUG_ON(!sg); | 2193 | BUG_ON(!sg); |
2194 | /* | 2194 | /* |
2195 | * We need to take into account a possible offset here for fabrics like | 2195 | * We need to take into account a possible offset here for fabrics like |
2196 | * tcm_loop who may be using a contig buffer from the SCSI midlayer for | 2196 | * tcm_loop who may be using a contig buffer from the SCSI midlayer for |
2197 | * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() | 2197 | * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() |
2198 | */ | 2198 | */ |
2199 | if (!cmd->t_data_nents) | 2199 | if (!cmd->t_data_nents) |
2200 | return NULL; | 2200 | return NULL; |
2201 | else if (cmd->t_data_nents == 1) | 2201 | else if (cmd->t_data_nents == 1) |
2202 | return kmap(sg_page(sg)) + sg->offset; | 2202 | return kmap(sg_page(sg)) + sg->offset; |
2203 | 2203 | ||
2204 | /* >1 page. use vmap */ | 2204 | /* >1 page. use vmap */ |
2205 | pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL); | 2205 | pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL); |
2206 | if (!pages) | 2206 | if (!pages) |
2207 | return NULL; | 2207 | return NULL; |
2208 | 2208 | ||
2209 | /* convert sg[] to pages[] */ | 2209 | /* convert sg[] to pages[] */ |
2210 | for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { | 2210 | for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { |
2211 | pages[i] = sg_page(sg); | 2211 | pages[i] = sg_page(sg); |
2212 | } | 2212 | } |
2213 | 2213 | ||
2214 | cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); | 2214 | cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); |
2215 | kfree(pages); | 2215 | kfree(pages); |
2216 | if (!cmd->t_data_vmap) | 2216 | if (!cmd->t_data_vmap) |
2217 | return NULL; | 2217 | return NULL; |
2218 | 2218 | ||
2219 | return cmd->t_data_vmap + cmd->t_data_sg[0].offset; | 2219 | return cmd->t_data_vmap + cmd->t_data_sg[0].offset; |
2220 | } | 2220 | } |
2221 | EXPORT_SYMBOL(transport_kmap_data_sg); | 2221 | EXPORT_SYMBOL(transport_kmap_data_sg); |
2222 | 2222 | ||
2223 | void transport_kunmap_data_sg(struct se_cmd *cmd) | 2223 | void transport_kunmap_data_sg(struct se_cmd *cmd) |
2224 | { | 2224 | { |
2225 | if (!cmd->t_data_nents) { | 2225 | if (!cmd->t_data_nents) { |
2226 | return; | 2226 | return; |
2227 | } else if (cmd->t_data_nents == 1) { | 2227 | } else if (cmd->t_data_nents == 1) { |
2228 | kunmap(sg_page(cmd->t_data_sg)); | 2228 | kunmap(sg_page(cmd->t_data_sg)); |
2229 | return; | 2229 | return; |
2230 | } | 2230 | } |
2231 | 2231 | ||
2232 | vunmap(cmd->t_data_vmap); | 2232 | vunmap(cmd->t_data_vmap); |
2233 | cmd->t_data_vmap = NULL; | 2233 | cmd->t_data_vmap = NULL; |
2234 | } | 2234 | } |
2235 | EXPORT_SYMBOL(transport_kunmap_data_sg); | 2235 | EXPORT_SYMBOL(transport_kunmap_data_sg); |
2236 | 2236 | ||
2237 | static int | 2237 | static int |
2238 | transport_generic_get_mem(struct se_cmd *cmd) | 2238 | transport_generic_get_mem(struct se_cmd *cmd) |
2239 | { | 2239 | { |
2240 | u32 length = cmd->data_length; | 2240 | u32 length = cmd->data_length; |
2241 | unsigned int nents; | 2241 | unsigned int nents; |
2242 | struct page *page; | 2242 | struct page *page; |
2243 | gfp_t zero_flag; | 2243 | gfp_t zero_flag; |
2244 | int i = 0; | 2244 | int i = 0; |
2245 | 2245 | ||
2246 | nents = DIV_ROUND_UP(length, PAGE_SIZE); | 2246 | nents = DIV_ROUND_UP(length, PAGE_SIZE); |
2247 | cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); | 2247 | cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); |
2248 | if (!cmd->t_data_sg) | 2248 | if (!cmd->t_data_sg) |
2249 | return -ENOMEM; | 2249 | return -ENOMEM; |
2250 | 2250 | ||
2251 | cmd->t_data_nents = nents; | 2251 | cmd->t_data_nents = nents; |
2252 | sg_init_table(cmd->t_data_sg, nents); | 2252 | sg_init_table(cmd->t_data_sg, nents); |
2253 | 2253 | ||
2254 | zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_CDB ? 0 : __GFP_ZERO; | 2254 | zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_CDB ? 0 : __GFP_ZERO; |
2255 | 2255 | ||
2256 | while (length) { | 2256 | while (length) { |
2257 | u32 page_len = min_t(u32, length, PAGE_SIZE); | 2257 | u32 page_len = min_t(u32, length, PAGE_SIZE); |
2258 | page = alloc_page(GFP_KERNEL | zero_flag); | 2258 | page = alloc_page(GFP_KERNEL | zero_flag); |
2259 | if (!page) | 2259 | if (!page) |
2260 | goto out; | 2260 | goto out; |
2261 | 2261 | ||
2262 | sg_set_page(&cmd->t_data_sg[i], page, page_len, 0); | 2262 | sg_set_page(&cmd->t_data_sg[i], page, page_len, 0); |
2263 | length -= page_len; | 2263 | length -= page_len; |
2264 | i++; | 2264 | i++; |
2265 | } | 2265 | } |
2266 | return 0; | 2266 | return 0; |
2267 | 2267 | ||
2268 | out: | 2268 | out: |
2269 | while (i > 0) { | 2269 | while (i > 0) { |
2270 | i--; | 2270 | i--; |
2271 | __free_page(sg_page(&cmd->t_data_sg[i])); | 2271 | __free_page(sg_page(&cmd->t_data_sg[i])); |
2272 | } | 2272 | } |
2273 | kfree(cmd->t_data_sg); | 2273 | kfree(cmd->t_data_sg); |
2274 | cmd->t_data_sg = NULL; | 2274 | cmd->t_data_sg = NULL; |
2275 | return -ENOMEM; | 2275 | return -ENOMEM; |
2276 | } | 2276 | } |
2277 | 2277 | ||
2278 | /* | 2278 | /* |
2279 | * Allocate any required resources to execute the command. For writes we | 2279 | * Allocate any required resources to execute the command. For writes we |
2280 | * might not have the payload yet, so notify the fabric via a call to | 2280 | * might not have the payload yet, so notify the fabric via a call to |
2281 | * ->write_pending instead. Otherwise place it on the execution queue. | 2281 | * ->write_pending instead. Otherwise place it on the execution queue. |
2282 | */ | 2282 | */ |
2283 | int transport_generic_new_cmd(struct se_cmd *cmd) | 2283 | int transport_generic_new_cmd(struct se_cmd *cmd) |
2284 | { | 2284 | { |
2285 | int ret = 0; | 2285 | int ret = 0; |
2286 | 2286 | ||
2287 | /* | 2287 | /* |
2288 | * Determine is the TCM fabric module has already allocated physical | 2288 | * Determine is the TCM fabric module has already allocated physical |
2289 | * memory, and is directly calling transport_generic_map_mem_to_cmd() | 2289 | * memory, and is directly calling transport_generic_map_mem_to_cmd() |
2290 | * beforehand. | 2290 | * beforehand. |
2291 | */ | 2291 | */ |
2292 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && | 2292 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && |
2293 | cmd->data_length) { | 2293 | cmd->data_length) { |
2294 | ret = transport_generic_get_mem(cmd); | 2294 | ret = transport_generic_get_mem(cmd); |
2295 | if (ret < 0) | 2295 | if (ret < 0) |
2296 | goto out_fail; | 2296 | goto out_fail; |
2297 | } | 2297 | } |
2298 | /* | 2298 | /* |
2299 | * If this command doesn't have any payload and we don't have to call | 2299 | * If this command doesn't have any payload and we don't have to call |
2300 | * into the fabric for data transfers, go ahead and complete it right | 2300 | * into the fabric for data transfers, go ahead and complete it right |
2301 | * away. | 2301 | * away. |
2302 | */ | 2302 | */ |
2303 | if (!cmd->data_length) { | 2303 | if (!cmd->data_length && |
2304 | (cmd->se_dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV || | ||
2305 | cmd->t_task_cdb[0] == REPORT_LUNS) { | ||
2304 | spin_lock_irq(&cmd->t_state_lock); | 2306 | spin_lock_irq(&cmd->t_state_lock); |
2305 | cmd->t_state = TRANSPORT_COMPLETE; | 2307 | cmd->t_state = TRANSPORT_COMPLETE; |
2306 | cmd->transport_state |= CMD_T_ACTIVE; | 2308 | cmd->transport_state |= CMD_T_ACTIVE; |
2307 | spin_unlock_irq(&cmd->t_state_lock); | 2309 | spin_unlock_irq(&cmd->t_state_lock); |
2308 | 2310 | ||
2309 | if (cmd->t_task_cdb[0] == REQUEST_SENSE) { | 2311 | if (cmd->t_task_cdb[0] == REQUEST_SENSE) { |
2310 | u8 ua_asc = 0, ua_ascq = 0; | 2312 | u8 ua_asc = 0, ua_ascq = 0; |
2311 | 2313 | ||
2312 | core_scsi3_ua_clear_for_request_sense(cmd, | 2314 | core_scsi3_ua_clear_for_request_sense(cmd, |
2313 | &ua_asc, &ua_ascq); | 2315 | &ua_asc, &ua_ascq); |
2314 | } | 2316 | } |
2315 | 2317 | ||
2316 | INIT_WORK(&cmd->work, target_complete_ok_work); | 2318 | INIT_WORK(&cmd->work, target_complete_ok_work); |
2317 | queue_work(target_completion_wq, &cmd->work); | 2319 | queue_work(target_completion_wq, &cmd->work); |
2318 | return 0; | 2320 | return 0; |
2319 | } | 2321 | } |
2320 | 2322 | ||
2321 | atomic_inc(&cmd->t_fe_count); | 2323 | atomic_inc(&cmd->t_fe_count); |
2322 | 2324 | ||
2323 | /* | 2325 | /* |
2324 | * If this command is not a write we can execute it right here, | 2326 | * If this command is not a write we can execute it right here, |
2325 | * for write buffers we need to notify the fabric driver first | 2327 | * for write buffers we need to notify the fabric driver first |
2326 | * and let it call back once the write buffers are ready. | 2328 | * and let it call back once the write buffers are ready. |
2327 | */ | 2329 | */ |
2328 | target_add_to_state_list(cmd); | 2330 | target_add_to_state_list(cmd); |
2329 | if (cmd->data_direction != DMA_TO_DEVICE) { | 2331 | if (cmd->data_direction != DMA_TO_DEVICE) { |
2330 | target_execute_cmd(cmd); | 2332 | target_execute_cmd(cmd); |
2331 | return 0; | 2333 | return 0; |
2332 | } | 2334 | } |
2333 | 2335 | ||
2334 | spin_lock_irq(&cmd->t_state_lock); | 2336 | spin_lock_irq(&cmd->t_state_lock); |
2335 | cmd->t_state = TRANSPORT_WRITE_PENDING; | 2337 | cmd->t_state = TRANSPORT_WRITE_PENDING; |
2336 | spin_unlock_irq(&cmd->t_state_lock); | 2338 | spin_unlock_irq(&cmd->t_state_lock); |
2337 | 2339 | ||
2338 | transport_cmd_check_stop(cmd, false); | 2340 | transport_cmd_check_stop(cmd, false); |
2339 | 2341 | ||
2340 | ret = cmd->se_tfo->write_pending(cmd); | 2342 | ret = cmd->se_tfo->write_pending(cmd); |
2341 | if (ret == -EAGAIN || ret == -ENOMEM) | 2343 | if (ret == -EAGAIN || ret == -ENOMEM) |
2342 | goto queue_full; | 2344 | goto queue_full; |
2343 | 2345 | ||
2344 | if (ret < 0) | 2346 | if (ret < 0) |
2345 | return ret; | 2347 | return ret; |
2346 | return 1; | 2348 | return 1; |
2347 | 2349 | ||
2348 | out_fail: | 2350 | out_fail: |
2349 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 2351 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
2350 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 2352 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
2351 | return -EINVAL; | 2353 | return -EINVAL; |
2352 | queue_full: | 2354 | queue_full: |
2353 | pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); | 2355 | pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); |
2354 | cmd->t_state = TRANSPORT_COMPLETE_QF_WP; | 2356 | cmd->t_state = TRANSPORT_COMPLETE_QF_WP; |
2355 | transport_handle_queue_full(cmd, cmd->se_dev); | 2357 | transport_handle_queue_full(cmd, cmd->se_dev); |
2356 | return 0; | 2358 | return 0; |
2357 | } | 2359 | } |
2358 | EXPORT_SYMBOL(transport_generic_new_cmd); | 2360 | EXPORT_SYMBOL(transport_generic_new_cmd); |
2359 | 2361 | ||
2360 | static void transport_write_pending_qf(struct se_cmd *cmd) | 2362 | static void transport_write_pending_qf(struct se_cmd *cmd) |
2361 | { | 2363 | { |
2362 | int ret; | 2364 | int ret; |
2363 | 2365 | ||
2364 | ret = cmd->se_tfo->write_pending(cmd); | 2366 | ret = cmd->se_tfo->write_pending(cmd); |
2365 | if (ret == -EAGAIN || ret == -ENOMEM) { | 2367 | if (ret == -EAGAIN || ret == -ENOMEM) { |
2366 | pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", | 2368 | pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", |
2367 | cmd); | 2369 | cmd); |
2368 | transport_handle_queue_full(cmd, cmd->se_dev); | 2370 | transport_handle_queue_full(cmd, cmd->se_dev); |
2369 | } | 2371 | } |
2370 | } | 2372 | } |
2371 | 2373 | ||
2372 | void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) | 2374 | void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) |
2373 | { | 2375 | { |
2374 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { | 2376 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { |
2375 | if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) | 2377 | if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) |
2376 | transport_wait_for_tasks(cmd); | 2378 | transport_wait_for_tasks(cmd); |
2377 | 2379 | ||
2378 | transport_release_cmd(cmd); | 2380 | transport_release_cmd(cmd); |
2379 | } else { | 2381 | } else { |
2380 | if (wait_for_tasks) | 2382 | if (wait_for_tasks) |
2381 | transport_wait_for_tasks(cmd); | 2383 | transport_wait_for_tasks(cmd); |
2382 | 2384 | ||
2383 | core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); | 2385 | core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); |
2384 | 2386 | ||
2385 | if (cmd->se_lun) | 2387 | if (cmd->se_lun) |
2386 | transport_lun_remove_cmd(cmd); | 2388 | transport_lun_remove_cmd(cmd); |
2387 | 2389 | ||
2388 | transport_put_cmd(cmd); | 2390 | transport_put_cmd(cmd); |
2389 | } | 2391 | } |
2390 | } | 2392 | } |
2391 | EXPORT_SYMBOL(transport_generic_free_cmd); | 2393 | EXPORT_SYMBOL(transport_generic_free_cmd); |
2392 | 2394 | ||
2393 | /* target_get_sess_cmd - Add command to active ->sess_cmd_list | 2395 | /* target_get_sess_cmd - Add command to active ->sess_cmd_list |
2394 | * @se_sess: session to reference | 2396 | * @se_sess: session to reference |
2395 | * @se_cmd: command descriptor to add | 2397 | * @se_cmd: command descriptor to add |
2396 | * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() | 2398 | * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() |
2397 | */ | 2399 | */ |
2398 | static int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, | 2400 | static int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, |
2399 | bool ack_kref) | 2401 | bool ack_kref) |
2400 | { | 2402 | { |
2401 | unsigned long flags; | 2403 | unsigned long flags; |
2402 | int ret = 0; | 2404 | int ret = 0; |
2403 | 2405 | ||
2404 | kref_init(&se_cmd->cmd_kref); | 2406 | kref_init(&se_cmd->cmd_kref); |
2405 | /* | 2407 | /* |
2406 | * Add a second kref if the fabric caller is expecting to handle | 2408 | * Add a second kref if the fabric caller is expecting to handle |
2407 | * fabric acknowledgement that requires two target_put_sess_cmd() | 2409 | * fabric acknowledgement that requires two target_put_sess_cmd() |
2408 | * invocations before se_cmd descriptor release. | 2410 | * invocations before se_cmd descriptor release. |
2409 | */ | 2411 | */ |
2410 | if (ack_kref == true) { | 2412 | if (ack_kref == true) { |
2411 | kref_get(&se_cmd->cmd_kref); | 2413 | kref_get(&se_cmd->cmd_kref); |
2412 | se_cmd->se_cmd_flags |= SCF_ACK_KREF; | 2414 | se_cmd->se_cmd_flags |= SCF_ACK_KREF; |
2413 | } | 2415 | } |
2414 | 2416 | ||
2415 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); | 2417 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); |
2416 | if (se_sess->sess_tearing_down) { | 2418 | if (se_sess->sess_tearing_down) { |
2417 | ret = -ESHUTDOWN; | 2419 | ret = -ESHUTDOWN; |
2418 | goto out; | 2420 | goto out; |
2419 | } | 2421 | } |
2420 | list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); | 2422 | list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); |
2421 | se_cmd->check_release = 1; | 2423 | se_cmd->check_release = 1; |
2422 | 2424 | ||
2423 | out: | 2425 | out: |
2424 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 2426 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
2425 | return ret; | 2427 | return ret; |
2426 | } | 2428 | } |
2427 | 2429 | ||
2428 | static void target_release_cmd_kref(struct kref *kref) | 2430 | static void target_release_cmd_kref(struct kref *kref) |
2429 | { | 2431 | { |
2430 | struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); | 2432 | struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); |
2431 | struct se_session *se_sess = se_cmd->se_sess; | 2433 | struct se_session *se_sess = se_cmd->se_sess; |
2432 | unsigned long flags; | 2434 | unsigned long flags; |
2433 | 2435 | ||
2434 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); | 2436 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); |
2435 | if (list_empty(&se_cmd->se_cmd_list)) { | 2437 | if (list_empty(&se_cmd->se_cmd_list)) { |
2436 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 2438 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
2437 | se_cmd->se_tfo->release_cmd(se_cmd); | 2439 | se_cmd->se_tfo->release_cmd(se_cmd); |
2438 | return; | 2440 | return; |
2439 | } | 2441 | } |
2440 | if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { | 2442 | if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { |
2441 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 2443 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
2442 | complete(&se_cmd->cmd_wait_comp); | 2444 | complete(&se_cmd->cmd_wait_comp); |
2443 | return; | 2445 | return; |
2444 | } | 2446 | } |
2445 | list_del(&se_cmd->se_cmd_list); | 2447 | list_del(&se_cmd->se_cmd_list); |
2446 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 2448 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
2447 | 2449 | ||
2448 | se_cmd->se_tfo->release_cmd(se_cmd); | 2450 | se_cmd->se_tfo->release_cmd(se_cmd); |
2449 | } | 2451 | } |
2450 | 2452 | ||
2451 | /* target_put_sess_cmd - Check for active I/O shutdown via kref_put | 2453 | /* target_put_sess_cmd - Check for active I/O shutdown via kref_put |
2452 | * @se_sess: session to reference | 2454 | * @se_sess: session to reference |
2453 | * @se_cmd: command descriptor to drop | 2455 | * @se_cmd: command descriptor to drop |
2454 | */ | 2456 | */ |
2455 | int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) | 2457 | int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) |
2456 | { | 2458 | { |
2457 | return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); | 2459 | return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); |
2458 | } | 2460 | } |
2459 | EXPORT_SYMBOL(target_put_sess_cmd); | 2461 | EXPORT_SYMBOL(target_put_sess_cmd); |
2460 | 2462 | ||
2461 | /* target_sess_cmd_list_set_waiting - Flag all commands in | 2463 | /* target_sess_cmd_list_set_waiting - Flag all commands in |
2462 | * sess_cmd_list to complete cmd_wait_comp. Set | 2464 | * sess_cmd_list to complete cmd_wait_comp. Set |
2463 | * sess_tearing_down so no more commands are queued. | 2465 | * sess_tearing_down so no more commands are queued. |
2464 | * @se_sess: session to flag | 2466 | * @se_sess: session to flag |
2465 | */ | 2467 | */ |
2466 | void target_sess_cmd_list_set_waiting(struct se_session *se_sess) | 2468 | void target_sess_cmd_list_set_waiting(struct se_session *se_sess) |
2467 | { | 2469 | { |
2468 | struct se_cmd *se_cmd; | 2470 | struct se_cmd *se_cmd; |
2469 | unsigned long flags; | 2471 | unsigned long flags; |
2470 | 2472 | ||
2471 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); | 2473 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); |
2472 | 2474 | ||
2473 | WARN_ON(se_sess->sess_tearing_down); | 2475 | WARN_ON(se_sess->sess_tearing_down); |
2474 | se_sess->sess_tearing_down = 1; | 2476 | se_sess->sess_tearing_down = 1; |
2475 | 2477 | ||
2476 | list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) | 2478 | list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) |
2477 | se_cmd->cmd_wait_set = 1; | 2479 | se_cmd->cmd_wait_set = 1; |
2478 | 2480 | ||
2479 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 2481 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
2480 | } | 2482 | } |
2481 | EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); | 2483 | EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); |
2482 | 2484 | ||
2483 | /* target_wait_for_sess_cmds - Wait for outstanding descriptors | 2485 | /* target_wait_for_sess_cmds - Wait for outstanding descriptors |
2484 | * @se_sess: session to wait for active I/O | 2486 | * @se_sess: session to wait for active I/O |
2485 | * @wait_for_tasks: Make extra transport_wait_for_tasks call | 2487 | * @wait_for_tasks: Make extra transport_wait_for_tasks call |
2486 | */ | 2488 | */ |
2487 | void target_wait_for_sess_cmds( | 2489 | void target_wait_for_sess_cmds( |
2488 | struct se_session *se_sess, | 2490 | struct se_session *se_sess, |
2489 | int wait_for_tasks) | 2491 | int wait_for_tasks) |
2490 | { | 2492 | { |
2491 | struct se_cmd *se_cmd, *tmp_cmd; | 2493 | struct se_cmd *se_cmd, *tmp_cmd; |
2492 | bool rc = false; | 2494 | bool rc = false; |
2493 | 2495 | ||
2494 | list_for_each_entry_safe(se_cmd, tmp_cmd, | 2496 | list_for_each_entry_safe(se_cmd, tmp_cmd, |
2495 | &se_sess->sess_cmd_list, se_cmd_list) { | 2497 | &se_sess->sess_cmd_list, se_cmd_list) { |
2496 | list_del(&se_cmd->se_cmd_list); | 2498 | list_del(&se_cmd->se_cmd_list); |
2497 | 2499 | ||
2498 | pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" | 2500 | pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" |
2499 | " %d\n", se_cmd, se_cmd->t_state, | 2501 | " %d\n", se_cmd, se_cmd->t_state, |
2500 | se_cmd->se_tfo->get_cmd_state(se_cmd)); | 2502 | se_cmd->se_tfo->get_cmd_state(se_cmd)); |
2501 | 2503 | ||
2502 | if (wait_for_tasks) { | 2504 | if (wait_for_tasks) { |
2503 | pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d," | 2505 | pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d," |
2504 | " fabric state: %d\n", se_cmd, se_cmd->t_state, | 2506 | " fabric state: %d\n", se_cmd, se_cmd->t_state, |
2505 | se_cmd->se_tfo->get_cmd_state(se_cmd)); | 2507 | se_cmd->se_tfo->get_cmd_state(se_cmd)); |
2506 | 2508 | ||
2507 | rc = transport_wait_for_tasks(se_cmd); | 2509 | rc = transport_wait_for_tasks(se_cmd); |
2508 | 2510 | ||
2509 | pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d," | 2511 | pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d," |
2510 | " fabric state: %d\n", se_cmd, se_cmd->t_state, | 2512 | " fabric state: %d\n", se_cmd, se_cmd->t_state, |
2511 | se_cmd->se_tfo->get_cmd_state(se_cmd)); | 2513 | se_cmd->se_tfo->get_cmd_state(se_cmd)); |
2512 | } | 2514 | } |
2513 | 2515 | ||
2514 | if (!rc) { | 2516 | if (!rc) { |
2515 | wait_for_completion(&se_cmd->cmd_wait_comp); | 2517 | wait_for_completion(&se_cmd->cmd_wait_comp); |
2516 | pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" | 2518 | pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" |
2517 | " fabric state: %d\n", se_cmd, se_cmd->t_state, | 2519 | " fabric state: %d\n", se_cmd, se_cmd->t_state, |
2518 | se_cmd->se_tfo->get_cmd_state(se_cmd)); | 2520 | se_cmd->se_tfo->get_cmd_state(se_cmd)); |
2519 | } | 2521 | } |
2520 | 2522 | ||
2521 | se_cmd->se_tfo->release_cmd(se_cmd); | 2523 | se_cmd->se_tfo->release_cmd(se_cmd); |
2522 | } | 2524 | } |
2523 | } | 2525 | } |
2524 | EXPORT_SYMBOL(target_wait_for_sess_cmds); | 2526 | EXPORT_SYMBOL(target_wait_for_sess_cmds); |
2525 | 2527 | ||
2526 | /* transport_lun_wait_for_tasks(): | 2528 | /* transport_lun_wait_for_tasks(): |
2527 | * | 2529 | * |
2528 | * Called from ConfigFS context to stop the passed struct se_cmd to allow | 2530 | * Called from ConfigFS context to stop the passed struct se_cmd to allow |
2529 | * an struct se_lun to be successfully shutdown. | 2531 | * an struct se_lun to be successfully shutdown. |
2530 | */ | 2532 | */ |
2531 | static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) | 2533 | static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) |
2532 | { | 2534 | { |
2533 | unsigned long flags; | 2535 | unsigned long flags; |
2534 | int ret = 0; | 2536 | int ret = 0; |
2535 | 2537 | ||
2536 | /* | 2538 | /* |
2537 | * If the frontend has already requested this struct se_cmd to | 2539 | * If the frontend has already requested this struct se_cmd to |
2538 | * be stopped, we can safely ignore this struct se_cmd. | 2540 | * be stopped, we can safely ignore this struct se_cmd. |
2539 | */ | 2541 | */ |
2540 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 2542 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2541 | if (cmd->transport_state & CMD_T_STOP) { | 2543 | if (cmd->transport_state & CMD_T_STOP) { |
2542 | cmd->transport_state &= ~CMD_T_LUN_STOP; | 2544 | cmd->transport_state &= ~CMD_T_LUN_STOP; |
2543 | 2545 | ||
2544 | pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n", | 2546 | pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n", |
2545 | cmd->se_tfo->get_task_tag(cmd)); | 2547 | cmd->se_tfo->get_task_tag(cmd)); |
2546 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2548 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2547 | transport_cmd_check_stop(cmd, false); | 2549 | transport_cmd_check_stop(cmd, false); |
2548 | return -EPERM; | 2550 | return -EPERM; |
2549 | } | 2551 | } |
2550 | cmd->transport_state |= CMD_T_LUN_FE_STOP; | 2552 | cmd->transport_state |= CMD_T_LUN_FE_STOP; |
2551 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2553 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2552 | 2554 | ||
2553 | // XXX: audit task_flags checks. | 2555 | // XXX: audit task_flags checks. |
2554 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 2556 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2555 | if ((cmd->transport_state & CMD_T_BUSY) && | 2557 | if ((cmd->transport_state & CMD_T_BUSY) && |
2556 | (cmd->transport_state & CMD_T_SENT)) { | 2558 | (cmd->transport_state & CMD_T_SENT)) { |
2557 | if (!target_stop_cmd(cmd, &flags)) | 2559 | if (!target_stop_cmd(cmd, &flags)) |
2558 | ret++; | 2560 | ret++; |
2559 | } | 2561 | } |
2560 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2562 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2561 | 2563 | ||
2562 | pr_debug("ConfigFS: cmd: %p stop tasks ret:" | 2564 | pr_debug("ConfigFS: cmd: %p stop tasks ret:" |
2563 | " %d\n", cmd, ret); | 2565 | " %d\n", cmd, ret); |
2564 | if (!ret) { | 2566 | if (!ret) { |
2565 | pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", | 2567 | pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", |
2566 | cmd->se_tfo->get_task_tag(cmd)); | 2568 | cmd->se_tfo->get_task_tag(cmd)); |
2567 | wait_for_completion(&cmd->transport_lun_stop_comp); | 2569 | wait_for_completion(&cmd->transport_lun_stop_comp); |
2568 | pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", | 2570 | pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", |
2569 | cmd->se_tfo->get_task_tag(cmd)); | 2571 | cmd->se_tfo->get_task_tag(cmd)); |
2570 | } | 2572 | } |
2571 | 2573 | ||
2572 | return 0; | 2574 | return 0; |
2573 | } | 2575 | } |
2574 | 2576 | ||
2575 | static void __transport_clear_lun_from_sessions(struct se_lun *lun) | 2577 | static void __transport_clear_lun_from_sessions(struct se_lun *lun) |
2576 | { | 2578 | { |
2577 | struct se_cmd *cmd = NULL; | 2579 | struct se_cmd *cmd = NULL; |
2578 | unsigned long lun_flags, cmd_flags; | 2580 | unsigned long lun_flags, cmd_flags; |
2579 | /* | 2581 | /* |
2580 | * Do exception processing and return CHECK_CONDITION status to the | 2582 | * Do exception processing and return CHECK_CONDITION status to the |
2581 | * Initiator Port. | 2583 | * Initiator Port. |
2582 | */ | 2584 | */ |
2583 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | 2585 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
2584 | while (!list_empty(&lun->lun_cmd_list)) { | 2586 | while (!list_empty(&lun->lun_cmd_list)) { |
2585 | cmd = list_first_entry(&lun->lun_cmd_list, | 2587 | cmd = list_first_entry(&lun->lun_cmd_list, |
2586 | struct se_cmd, se_lun_node); | 2588 | struct se_cmd, se_lun_node); |
2587 | list_del_init(&cmd->se_lun_node); | 2589 | list_del_init(&cmd->se_lun_node); |
2588 | 2590 | ||
2589 | spin_lock(&cmd->t_state_lock); | 2591 | spin_lock(&cmd->t_state_lock); |
2590 | pr_debug("SE_LUN[%d] - Setting cmd->transport" | 2592 | pr_debug("SE_LUN[%d] - Setting cmd->transport" |
2591 | "_lun_stop for ITT: 0x%08x\n", | 2593 | "_lun_stop for ITT: 0x%08x\n", |
2592 | cmd->se_lun->unpacked_lun, | 2594 | cmd->se_lun->unpacked_lun, |
2593 | cmd->se_tfo->get_task_tag(cmd)); | 2595 | cmd->se_tfo->get_task_tag(cmd)); |
2594 | cmd->transport_state |= CMD_T_LUN_STOP; | 2596 | cmd->transport_state |= CMD_T_LUN_STOP; |
2595 | spin_unlock(&cmd->t_state_lock); | 2597 | spin_unlock(&cmd->t_state_lock); |
2596 | 2598 | ||
2597 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | 2599 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); |
2598 | 2600 | ||
2599 | if (!cmd->se_lun) { | 2601 | if (!cmd->se_lun) { |
2600 | pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n", | 2602 | pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n", |
2601 | cmd->se_tfo->get_task_tag(cmd), | 2603 | cmd->se_tfo->get_task_tag(cmd), |
2602 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); | 2604 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); |
2603 | BUG(); | 2605 | BUG(); |
2604 | } | 2606 | } |
2605 | /* | 2607 | /* |
2606 | * If the Storage engine still owns the iscsi_cmd_t, determine | 2608 | * If the Storage engine still owns the iscsi_cmd_t, determine |
2607 | * and/or stop its context. | 2609 | * and/or stop its context. |
2608 | */ | 2610 | */ |
2609 | pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport" | 2611 | pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport" |
2610 | "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, | 2612 | "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, |
2611 | cmd->se_tfo->get_task_tag(cmd)); | 2613 | cmd->se_tfo->get_task_tag(cmd)); |
2612 | 2614 | ||
2613 | if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { | 2615 | if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { |
2614 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | 2616 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
2615 | continue; | 2617 | continue; |
2616 | } | 2618 | } |
2617 | 2619 | ||
2618 | pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun" | 2620 | pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun" |
2619 | "_wait_for_tasks(): SUCCESS\n", | 2621 | "_wait_for_tasks(): SUCCESS\n", |
2620 | cmd->se_lun->unpacked_lun, | 2622 | cmd->se_lun->unpacked_lun, |
2621 | cmd->se_tfo->get_task_tag(cmd)); | 2623 | cmd->se_tfo->get_task_tag(cmd)); |
2622 | 2624 | ||
2623 | spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); | 2625 | spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); |
2624 | if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) { | 2626 | if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) { |
2625 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); | 2627 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); |
2626 | goto check_cond; | 2628 | goto check_cond; |
2627 | } | 2629 | } |
2628 | cmd->transport_state &= ~CMD_T_DEV_ACTIVE; | 2630 | cmd->transport_state &= ~CMD_T_DEV_ACTIVE; |
2629 | target_remove_from_state_list(cmd); | 2631 | target_remove_from_state_list(cmd); |
2630 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); | 2632 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); |
2631 | 2633 | ||
2632 | /* | 2634 | /* |
2633 | * The Storage engine stopped this struct se_cmd before it was | 2635 | * The Storage engine stopped this struct se_cmd before it was |
2634 | * send to the fabric frontend for delivery back to the | 2636 | * send to the fabric frontend for delivery back to the |
2635 | * Initiator Node. Return this SCSI CDB back with an | 2637 | * Initiator Node. Return this SCSI CDB back with an |
2636 | * CHECK_CONDITION status. | 2638 | * CHECK_CONDITION status. |
2637 | */ | 2639 | */ |
2638 | check_cond: | 2640 | check_cond: |
2639 | transport_send_check_condition_and_sense(cmd, | 2641 | transport_send_check_condition_and_sense(cmd, |
2640 | TCM_NON_EXISTENT_LUN, 0); | 2642 | TCM_NON_EXISTENT_LUN, 0); |
2641 | /* | 2643 | /* |
2642 | * If the fabric frontend is waiting for this iscsi_cmd_t to | 2644 | * If the fabric frontend is waiting for this iscsi_cmd_t to |
2643 | * be released, notify the waiting thread now that LU has | 2645 | * be released, notify the waiting thread now that LU has |
2644 | * finished accessing it. | 2646 | * finished accessing it. |
2645 | */ | 2647 | */ |
2646 | spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); | 2648 | spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); |
2647 | if (cmd->transport_state & CMD_T_LUN_FE_STOP) { | 2649 | if (cmd->transport_state & CMD_T_LUN_FE_STOP) { |
2648 | pr_debug("SE_LUN[%d] - Detected FE stop for" | 2650 | pr_debug("SE_LUN[%d] - Detected FE stop for" |
2649 | " struct se_cmd: %p ITT: 0x%08x\n", | 2651 | " struct se_cmd: %p ITT: 0x%08x\n", |
2650 | lun->unpacked_lun, | 2652 | lun->unpacked_lun, |
2651 | cmd, cmd->se_tfo->get_task_tag(cmd)); | 2653 | cmd, cmd->se_tfo->get_task_tag(cmd)); |
2652 | 2654 | ||
2653 | spin_unlock_irqrestore(&cmd->t_state_lock, | 2655 | spin_unlock_irqrestore(&cmd->t_state_lock, |
2654 | cmd_flags); | 2656 | cmd_flags); |
2655 | transport_cmd_check_stop(cmd, false); | 2657 | transport_cmd_check_stop(cmd, false); |
2656 | complete(&cmd->transport_lun_fe_stop_comp); | 2658 | complete(&cmd->transport_lun_fe_stop_comp); |
2657 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | 2659 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
2658 | continue; | 2660 | continue; |
2659 | } | 2661 | } |
2660 | pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n", | 2662 | pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n", |
2661 | lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); | 2663 | lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); |
2662 | 2664 | ||
2663 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); | 2665 | spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); |
2664 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | 2666 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
2665 | } | 2667 | } |
2666 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | 2668 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); |
2667 | } | 2669 | } |
2668 | 2670 | ||
2669 | static int transport_clear_lun_thread(void *p) | 2671 | static int transport_clear_lun_thread(void *p) |
2670 | { | 2672 | { |
2671 | struct se_lun *lun = p; | 2673 | struct se_lun *lun = p; |
2672 | 2674 | ||
2673 | __transport_clear_lun_from_sessions(lun); | 2675 | __transport_clear_lun_from_sessions(lun); |
2674 | complete(&lun->lun_shutdown_comp); | 2676 | complete(&lun->lun_shutdown_comp); |
2675 | 2677 | ||
2676 | return 0; | 2678 | return 0; |
2677 | } | 2679 | } |
2678 | 2680 | ||
2679 | int transport_clear_lun_from_sessions(struct se_lun *lun) | 2681 | int transport_clear_lun_from_sessions(struct se_lun *lun) |
2680 | { | 2682 | { |
2681 | struct task_struct *kt; | 2683 | struct task_struct *kt; |
2682 | 2684 | ||
2683 | kt = kthread_run(transport_clear_lun_thread, lun, | 2685 | kt = kthread_run(transport_clear_lun_thread, lun, |
2684 | "tcm_cl_%u", lun->unpacked_lun); | 2686 | "tcm_cl_%u", lun->unpacked_lun); |
2685 | if (IS_ERR(kt)) { | 2687 | if (IS_ERR(kt)) { |
2686 | pr_err("Unable to start clear_lun thread\n"); | 2688 | pr_err("Unable to start clear_lun thread\n"); |
2687 | return PTR_ERR(kt); | 2689 | return PTR_ERR(kt); |
2688 | } | 2690 | } |
2689 | wait_for_completion(&lun->lun_shutdown_comp); | 2691 | wait_for_completion(&lun->lun_shutdown_comp); |
2690 | 2692 | ||
2691 | return 0; | 2693 | return 0; |
2692 | } | 2694 | } |
2693 | 2695 | ||
2694 | /** | 2696 | /** |
2695 | * transport_wait_for_tasks - wait for completion to occur | 2697 | * transport_wait_for_tasks - wait for completion to occur |
2696 | * @cmd: command to wait | 2698 | * @cmd: command to wait |
2697 | * | 2699 | * |
2698 | * Called from frontend fabric context to wait for storage engine | 2700 | * Called from frontend fabric context to wait for storage engine |
2699 | * to pause and/or release frontend generated struct se_cmd. | 2701 | * to pause and/or release frontend generated struct se_cmd. |
2700 | */ | 2702 | */ |
2701 | bool transport_wait_for_tasks(struct se_cmd *cmd) | 2703 | bool transport_wait_for_tasks(struct se_cmd *cmd) |
2702 | { | 2704 | { |
2703 | unsigned long flags; | 2705 | unsigned long flags; |
2704 | 2706 | ||
2705 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 2707 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2706 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && | 2708 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && |
2707 | !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { | 2709 | !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { |
2708 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2710 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2709 | return false; | 2711 | return false; |
2710 | } | 2712 | } |
2711 | 2713 | ||
2712 | if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && | 2714 | if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && |
2713 | !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { | 2715 | !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { |
2714 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2716 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2715 | return false; | 2717 | return false; |
2716 | } | 2718 | } |
2717 | /* | 2719 | /* |
2718 | * If we are already stopped due to an external event (ie: LUN shutdown) | 2720 | * If we are already stopped due to an external event (ie: LUN shutdown) |
2719 | * sleep until the connection can have the passed struct se_cmd back. | 2721 | * sleep until the connection can have the passed struct se_cmd back. |
2720 | * The cmd->transport_lun_stopped_sem will be upped by | 2722 | * The cmd->transport_lun_stopped_sem will be upped by |
2721 | * transport_clear_lun_from_sessions() once the ConfigFS context caller | 2723 | * transport_clear_lun_from_sessions() once the ConfigFS context caller |
2722 | * has completed its operation on the struct se_cmd. | 2724 | * has completed its operation on the struct se_cmd. |
2723 | */ | 2725 | */ |
2724 | if (cmd->transport_state & CMD_T_LUN_STOP) { | 2726 | if (cmd->transport_state & CMD_T_LUN_STOP) { |
2725 | pr_debug("wait_for_tasks: Stopping" | 2727 | pr_debug("wait_for_tasks: Stopping" |
2726 | " wait_for_completion(&cmd->t_tasktransport_lun_fe" | 2728 | " wait_for_completion(&cmd->t_tasktransport_lun_fe" |
2727 | "_stop_comp); for ITT: 0x%08x\n", | 2729 | "_stop_comp); for ITT: 0x%08x\n", |
2728 | cmd->se_tfo->get_task_tag(cmd)); | 2730 | cmd->se_tfo->get_task_tag(cmd)); |
2729 | /* | 2731 | /* |
2730 | * There is a special case for WRITES where a FE exception + | 2732 | * There is a special case for WRITES where a FE exception + |
2731 | * LUN shutdown means ConfigFS context is still sleeping on | 2733 | * LUN shutdown means ConfigFS context is still sleeping on |
2732 | * transport_lun_stop_comp in transport_lun_wait_for_tasks(). | 2734 | * transport_lun_stop_comp in transport_lun_wait_for_tasks(). |
2733 | * We go ahead and up transport_lun_stop_comp just to be sure | 2735 | * We go ahead and up transport_lun_stop_comp just to be sure |
2734 | * here. | 2736 | * here. |
2735 | */ | 2737 | */ |
2736 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2738 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2737 | complete(&cmd->transport_lun_stop_comp); | 2739 | complete(&cmd->transport_lun_stop_comp); |
2738 | wait_for_completion(&cmd->transport_lun_fe_stop_comp); | 2740 | wait_for_completion(&cmd->transport_lun_fe_stop_comp); |
2739 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 2741 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2740 | 2742 | ||
2741 | target_remove_from_state_list(cmd); | 2743 | target_remove_from_state_list(cmd); |
2742 | /* | 2744 | /* |
2743 | * At this point, the frontend who was the originator of this | 2745 | * At this point, the frontend who was the originator of this |
2744 | * struct se_cmd, now owns the structure and can be released through | 2746 | * struct se_cmd, now owns the structure and can be released through |
2745 | * normal means below. | 2747 | * normal means below. |
2746 | */ | 2748 | */ |
2747 | pr_debug("wait_for_tasks: Stopped" | 2749 | pr_debug("wait_for_tasks: Stopped" |
2748 | " wait_for_completion(&cmd->t_tasktransport_lun_fe_" | 2750 | " wait_for_completion(&cmd->t_tasktransport_lun_fe_" |
2749 | "stop_comp); for ITT: 0x%08x\n", | 2751 | "stop_comp); for ITT: 0x%08x\n", |
2750 | cmd->se_tfo->get_task_tag(cmd)); | 2752 | cmd->se_tfo->get_task_tag(cmd)); |
2751 | 2753 | ||
2752 | cmd->transport_state &= ~CMD_T_LUN_STOP; | 2754 | cmd->transport_state &= ~CMD_T_LUN_STOP; |
2753 | } | 2755 | } |
2754 | 2756 | ||
2755 | if (!(cmd->transport_state & CMD_T_ACTIVE)) { | 2757 | if (!(cmd->transport_state & CMD_T_ACTIVE)) { |
2756 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2758 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2757 | return false; | 2759 | return false; |
2758 | } | 2760 | } |
2759 | 2761 | ||
2760 | cmd->transport_state |= CMD_T_STOP; | 2762 | cmd->transport_state |= CMD_T_STOP; |
2761 | 2763 | ||
2762 | pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" | 2764 | pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" |
2763 | " i_state: %d, t_state: %d, CMD_T_STOP\n", | 2765 | " i_state: %d, t_state: %d, CMD_T_STOP\n", |
2764 | cmd, cmd->se_tfo->get_task_tag(cmd), | 2766 | cmd, cmd->se_tfo->get_task_tag(cmd), |
2765 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); | 2767 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); |
2766 | 2768 | ||
2767 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2769 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2768 | 2770 | ||
2769 | wait_for_completion(&cmd->t_transport_stop_comp); | 2771 | wait_for_completion(&cmd->t_transport_stop_comp); |
2770 | 2772 | ||
2771 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 2773 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2772 | cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); | 2774 | cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); |
2773 | 2775 | ||
2774 | pr_debug("wait_for_tasks: Stopped wait_for_compltion(" | 2776 | pr_debug("wait_for_tasks: Stopped wait_for_compltion(" |
2775 | "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", | 2777 | "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", |
2776 | cmd->se_tfo->get_task_tag(cmd)); | 2778 | cmd->se_tfo->get_task_tag(cmd)); |
2777 | 2779 | ||
2778 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2780 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2779 | 2781 | ||
2780 | return true; | 2782 | return true; |
2781 | } | 2783 | } |
2782 | EXPORT_SYMBOL(transport_wait_for_tasks); | 2784 | EXPORT_SYMBOL(transport_wait_for_tasks); |
2783 | 2785 | ||
2784 | static int transport_get_sense_codes( | 2786 | static int transport_get_sense_codes( |
2785 | struct se_cmd *cmd, | 2787 | struct se_cmd *cmd, |
2786 | u8 *asc, | 2788 | u8 *asc, |
2787 | u8 *ascq) | 2789 | u8 *ascq) |
2788 | { | 2790 | { |
2789 | *asc = cmd->scsi_asc; | 2791 | *asc = cmd->scsi_asc; |
2790 | *ascq = cmd->scsi_ascq; | 2792 | *ascq = cmd->scsi_ascq; |
2791 | 2793 | ||
2792 | return 0; | 2794 | return 0; |
2793 | } | 2795 | } |
2794 | 2796 | ||
2795 | static int transport_set_sense_codes( | 2797 | static int transport_set_sense_codes( |
2796 | struct se_cmd *cmd, | 2798 | struct se_cmd *cmd, |
2797 | u8 asc, | 2799 | u8 asc, |
2798 | u8 ascq) | 2800 | u8 ascq) |
2799 | { | 2801 | { |
2800 | cmd->scsi_asc = asc; | 2802 | cmd->scsi_asc = asc; |
2801 | cmd->scsi_ascq = ascq; | 2803 | cmd->scsi_ascq = ascq; |
2802 | 2804 | ||
2803 | return 0; | 2805 | return 0; |
2804 | } | 2806 | } |
2805 | 2807 | ||
2806 | int transport_send_check_condition_and_sense( | 2808 | int transport_send_check_condition_and_sense( |
2807 | struct se_cmd *cmd, | 2809 | struct se_cmd *cmd, |
2808 | u8 reason, | 2810 | u8 reason, |
2809 | int from_transport) | 2811 | int from_transport) |
2810 | { | 2812 | { |
2811 | unsigned char *buffer = cmd->sense_buffer; | 2813 | unsigned char *buffer = cmd->sense_buffer; |
2812 | unsigned long flags; | 2814 | unsigned long flags; |
2813 | int offset; | 2815 | int offset; |
2814 | u8 asc = 0, ascq = 0; | 2816 | u8 asc = 0, ascq = 0; |
2815 | 2817 | ||
2816 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 2818 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
2817 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { | 2819 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { |
2818 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2820 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2819 | return 0; | 2821 | return 0; |
2820 | } | 2822 | } |
2821 | cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; | 2823 | cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; |
2822 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2824 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2823 | 2825 | ||
2824 | if (!reason && from_transport) | 2826 | if (!reason && from_transport) |
2825 | goto after_reason; | 2827 | goto after_reason; |
2826 | 2828 | ||
2827 | if (!from_transport) | 2829 | if (!from_transport) |
2828 | cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; | 2830 | cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; |
2829 | /* | 2831 | /* |
2830 | * Data Segment and SenseLength of the fabric response PDU. | 2832 | * Data Segment and SenseLength of the fabric response PDU. |
2831 | * | 2833 | * |
2832 | * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE | 2834 | * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE |
2833 | * from include/scsi/scsi_cmnd.h | 2835 | * from include/scsi/scsi_cmnd.h |
2834 | */ | 2836 | */ |
2835 | offset = cmd->se_tfo->set_fabric_sense_len(cmd, | 2837 | offset = cmd->se_tfo->set_fabric_sense_len(cmd, |
2836 | TRANSPORT_SENSE_BUFFER); | 2838 | TRANSPORT_SENSE_BUFFER); |
2837 | /* | 2839 | /* |
2838 | * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses | 2840 | * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses |
2839 | * SENSE KEY values from include/scsi/scsi.h | 2841 | * SENSE KEY values from include/scsi/scsi.h |
2840 | */ | 2842 | */ |
2841 | switch (reason) { | 2843 | switch (reason) { |
2842 | case TCM_NON_EXISTENT_LUN: | 2844 | case TCM_NON_EXISTENT_LUN: |
2843 | /* CURRENT ERROR */ | 2845 | /* CURRENT ERROR */ |
2844 | buffer[offset] = 0x70; | 2846 | buffer[offset] = 0x70; |
2845 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; | 2847 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; |
2846 | /* ILLEGAL REQUEST */ | 2848 | /* ILLEGAL REQUEST */ |
2847 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | 2849 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; |
2848 | /* LOGICAL UNIT NOT SUPPORTED */ | 2850 | /* LOGICAL UNIT NOT SUPPORTED */ |
2849 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25; | 2851 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25; |
2850 | break; | 2852 | break; |
2851 | case TCM_UNSUPPORTED_SCSI_OPCODE: | 2853 | case TCM_UNSUPPORTED_SCSI_OPCODE: |
2852 | case TCM_SECTOR_COUNT_TOO_MANY: | 2854 | case TCM_SECTOR_COUNT_TOO_MANY: |
2853 | /* CURRENT ERROR */ | 2855 | /* CURRENT ERROR */ |
2854 | buffer[offset] = 0x70; | 2856 | buffer[offset] = 0x70; |
2855 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; | 2857 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; |
2856 | /* ILLEGAL REQUEST */ | 2858 | /* ILLEGAL REQUEST */ |
2857 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | 2859 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; |
2858 | /* INVALID COMMAND OPERATION CODE */ | 2860 | /* INVALID COMMAND OPERATION CODE */ |
2859 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20; | 2861 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20; |
2860 | break; | 2862 | break; |
2861 | case TCM_UNKNOWN_MODE_PAGE: | 2863 | case TCM_UNKNOWN_MODE_PAGE: |
2862 | /* CURRENT ERROR */ | 2864 | /* CURRENT ERROR */ |
2863 | buffer[offset] = 0x70; | 2865 | buffer[offset] = 0x70; |
2864 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; | 2866 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; |
2865 | /* ILLEGAL REQUEST */ | 2867 | /* ILLEGAL REQUEST */ |
2866 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | 2868 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; |
2867 | /* INVALID FIELD IN CDB */ | 2869 | /* INVALID FIELD IN CDB */ |
2868 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; | 2870 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; |
2869 | break; | 2871 | break; |
2870 | case TCM_CHECK_CONDITION_ABORT_CMD: | 2872 | case TCM_CHECK_CONDITION_ABORT_CMD: |
2871 | /* CURRENT ERROR */ | 2873 | /* CURRENT ERROR */ |
2872 | buffer[offset] = 0x70; | 2874 | buffer[offset] = 0x70; |
2873 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; | 2875 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; |
2874 | /* ABORTED COMMAND */ | 2876 | /* ABORTED COMMAND */ |
2875 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | 2877 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; |
2876 | /* BUS DEVICE RESET FUNCTION OCCURRED */ | 2878 | /* BUS DEVICE RESET FUNCTION OCCURRED */ |
2877 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29; | 2879 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29; |
2878 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03; | 2880 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03; |
2879 | break; | 2881 | break; |
2880 | case TCM_INCORRECT_AMOUNT_OF_DATA: | 2882 | case TCM_INCORRECT_AMOUNT_OF_DATA: |
2881 | /* CURRENT ERROR */ | 2883 | /* CURRENT ERROR */ |
2882 | buffer[offset] = 0x70; | 2884 | buffer[offset] = 0x70; |
2883 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; | 2885 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; |
2884 | /* ABORTED COMMAND */ | 2886 | /* ABORTED COMMAND */ |
2885 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | 2887 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; |
2886 | /* WRITE ERROR */ | 2888 | /* WRITE ERROR */ |
2887 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; | 2889 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; |
2888 | /* NOT ENOUGH UNSOLICITED DATA */ | 2890 | /* NOT ENOUGH UNSOLICITED DATA */ |
2889 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d; | 2891 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d; |
2890 | break; | 2892 | break; |
2891 | case TCM_INVALID_CDB_FIELD: | 2893 | case TCM_INVALID_CDB_FIELD: |
2892 | /* CURRENT ERROR */ | 2894 | /* CURRENT ERROR */ |
2893 | buffer[offset] = 0x70; | 2895 | buffer[offset] = 0x70; |
2894 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; | 2896 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; |
2895 | /* ILLEGAL REQUEST */ | 2897 | /* ILLEGAL REQUEST */ |
2896 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | 2898 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; |
2897 | /* INVALID FIELD IN CDB */ | 2899 | /* INVALID FIELD IN CDB */ |
2898 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; | 2900 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; |
2899 | break; | 2901 | break; |
2900 | case TCM_INVALID_PARAMETER_LIST: | 2902 | case TCM_INVALID_PARAMETER_LIST: |
2901 | /* CURRENT ERROR */ | 2903 | /* CURRENT ERROR */ |
2902 | buffer[offset] = 0x70; | 2904 | buffer[offset] = 0x70; |
2903 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; | 2905 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; |
2904 | /* ILLEGAL REQUEST */ | 2906 | /* ILLEGAL REQUEST */ |
2905 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | 2907 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; |
2906 | /* INVALID FIELD IN PARAMETER LIST */ | 2908 | /* INVALID FIELD IN PARAMETER LIST */ |
2907 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26; | 2909 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26; |
2908 | break; | 2910 | break; |
2909 | case TCM_UNEXPECTED_UNSOLICITED_DATA: | 2911 | case TCM_UNEXPECTED_UNSOLICITED_DATA: |
2910 | /* CURRENT ERROR */ | 2912 | /* CURRENT ERROR */ |
2911 | buffer[offset] = 0x70; | 2913 | buffer[offset] = 0x70; |
2912 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; | 2914 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; |
2913 | /* ABORTED COMMAND */ | 2915 | /* ABORTED COMMAND */ |
2914 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | 2916 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; |
2915 | /* WRITE ERROR */ | 2917 | /* WRITE ERROR */ |
2916 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; | 2918 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; |
2917 | /* UNEXPECTED_UNSOLICITED_DATA */ | 2919 | /* UNEXPECTED_UNSOLICITED_DATA */ |
2918 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c; | 2920 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c; |
2919 | break; | 2921 | break; |
2920 | case TCM_SERVICE_CRC_ERROR: | 2922 | case TCM_SERVICE_CRC_ERROR: |
2921 | /* CURRENT ERROR */ | 2923 | /* CURRENT ERROR */ |
2922 | buffer[offset] = 0x70; | 2924 | buffer[offset] = 0x70; |
2923 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; | 2925 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; |
2924 | /* ABORTED COMMAND */ | 2926 | /* ABORTED COMMAND */ |
2925 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | 2927 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; |
2926 | /* PROTOCOL SERVICE CRC ERROR */ | 2928 | /* PROTOCOL SERVICE CRC ERROR */ |
2927 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47; | 2929 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47; |
2928 | /* N/A */ | 2930 | /* N/A */ |
2929 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05; | 2931 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05; |
2930 | break; | 2932 | break; |
2931 | case TCM_SNACK_REJECTED: | 2933 | case TCM_SNACK_REJECTED: |
2932 | /* CURRENT ERROR */ | 2934 | /* CURRENT ERROR */ |
2933 | buffer[offset] = 0x70; | 2935 | buffer[offset] = 0x70; |
2934 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; | 2936 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; |
2935 | /* ABORTED COMMAND */ | 2937 | /* ABORTED COMMAND */ |
2936 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | 2938 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; |
2937 | /* READ ERROR */ | 2939 | /* READ ERROR */ |
2938 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11; | 2940 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11; |
2939 | /* FAILED RETRANSMISSION REQUEST */ | 2941 | /* FAILED RETRANSMISSION REQUEST */ |
2940 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13; | 2942 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13; |
2941 | break; | 2943 | break; |
2942 | case TCM_WRITE_PROTECTED: | 2944 | case TCM_WRITE_PROTECTED: |
2943 | /* CURRENT ERROR */ | 2945 | /* CURRENT ERROR */ |
2944 | buffer[offset] = 0x70; | 2946 | buffer[offset] = 0x70; |
2945 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; | 2947 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; |
2946 | /* DATA PROTECT */ | 2948 | /* DATA PROTECT */ |
2947 | buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; | 2949 | buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; |
2948 | /* WRITE PROTECTED */ | 2950 | /* WRITE PROTECTED */ |
2949 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27; | 2951 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27; |
2950 | break; | 2952 | break; |
2951 | case TCM_ADDRESS_OUT_OF_RANGE: | 2953 | case TCM_ADDRESS_OUT_OF_RANGE: |
2952 | /* CURRENT ERROR */ | 2954 | /* CURRENT ERROR */ |
2953 | buffer[offset] = 0x70; | 2955 | buffer[offset] = 0x70; |
2954 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; | 2956 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; |
2955 | /* ILLEGAL REQUEST */ | 2957 | /* ILLEGAL REQUEST */ |
2956 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | 2958 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; |
2957 | /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ | 2959 | /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ |
2958 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x21; | 2960 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x21; |
2959 | break; | 2961 | break; |
2960 | case TCM_CHECK_CONDITION_UNIT_ATTENTION: | 2962 | case TCM_CHECK_CONDITION_UNIT_ATTENTION: |
2961 | /* CURRENT ERROR */ | 2963 | /* CURRENT ERROR */ |
2962 | buffer[offset] = 0x70; | 2964 | buffer[offset] = 0x70; |
2963 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; | 2965 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; |
2964 | /* UNIT ATTENTION */ | 2966 | /* UNIT ATTENTION */ |
2965 | buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; | 2967 | buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; |
2966 | core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); | 2968 | core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); |
2967 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; | 2969 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; |
2968 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; | 2970 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; |
2969 | break; | 2971 | break; |
2970 | case TCM_CHECK_CONDITION_NOT_READY: | 2972 | case TCM_CHECK_CONDITION_NOT_READY: |
2971 | /* CURRENT ERROR */ | 2973 | /* CURRENT ERROR */ |
2972 | buffer[offset] = 0x70; | 2974 | buffer[offset] = 0x70; |
2973 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; | 2975 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; |
2974 | /* Not Ready */ | 2976 | /* Not Ready */ |
2975 | buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY; | 2977 | buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY; |
2976 | transport_get_sense_codes(cmd, &asc, &ascq); | 2978 | transport_get_sense_codes(cmd, &asc, &ascq); |
2977 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; | 2979 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; |
2978 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; | 2980 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; |
2979 | break; | 2981 | break; |
2980 | case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: | 2982 | case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: |
2981 | default: | 2983 | default: |
2982 | /* CURRENT ERROR */ | 2984 | /* CURRENT ERROR */ |
2983 | buffer[offset] = 0x70; | 2985 | buffer[offset] = 0x70; |
2984 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; | 2986 | buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; |
2985 | /* ILLEGAL REQUEST */ | 2987 | /* ILLEGAL REQUEST */ |
2986 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | 2988 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; |
2987 | /* LOGICAL UNIT COMMUNICATION FAILURE */ | 2989 | /* LOGICAL UNIT COMMUNICATION FAILURE */ |
2988 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80; | 2990 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80; |
2989 | break; | 2991 | break; |
2990 | } | 2992 | } |
2991 | /* | 2993 | /* |
2992 | * This code uses linux/include/scsi/scsi.h SAM status codes! | 2994 | * This code uses linux/include/scsi/scsi.h SAM status codes! |
2993 | */ | 2995 | */ |
2994 | cmd->scsi_status = SAM_STAT_CHECK_CONDITION; | 2996 | cmd->scsi_status = SAM_STAT_CHECK_CONDITION; |
2995 | /* | 2997 | /* |
2996 | * Automatically padded, this value is encoded in the fabric's | 2998 | * Automatically padded, this value is encoded in the fabric's |
2997 | * data_length response PDU containing the SCSI defined sense data. | 2999 | * data_length response PDU containing the SCSI defined sense data. |
2998 | */ | 3000 | */ |
2999 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; | 3001 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; |
3000 | 3002 | ||
3001 | after_reason: | 3003 | after_reason: |
3002 | return cmd->se_tfo->queue_status(cmd); | 3004 | return cmd->se_tfo->queue_status(cmd); |
3003 | } | 3005 | } |
3004 | EXPORT_SYMBOL(transport_send_check_condition_and_sense); | 3006 | EXPORT_SYMBOL(transport_send_check_condition_and_sense); |
3005 | 3007 | ||
3006 | int transport_check_aborted_status(struct se_cmd *cmd, int send_status) | 3008 | int transport_check_aborted_status(struct se_cmd *cmd, int send_status) |
3007 | { | 3009 | { |
3008 | int ret = 0; | 3010 | int ret = 0; |
3009 | 3011 | ||
3010 | if (cmd->transport_state & CMD_T_ABORTED) { | 3012 | if (cmd->transport_state & CMD_T_ABORTED) { |
3011 | if (!send_status || | 3013 | if (!send_status || |
3012 | (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) | 3014 | (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) |
3013 | return 1; | 3015 | return 1; |
3014 | 3016 | ||
3015 | pr_debug("Sending delayed SAM_STAT_TASK_ABORTED" | 3017 | pr_debug("Sending delayed SAM_STAT_TASK_ABORTED" |
3016 | " status for CDB: 0x%02x ITT: 0x%08x\n", | 3018 | " status for CDB: 0x%02x ITT: 0x%08x\n", |
3017 | cmd->t_task_cdb[0], | 3019 | cmd->t_task_cdb[0], |
3018 | cmd->se_tfo->get_task_tag(cmd)); | 3020 | cmd->se_tfo->get_task_tag(cmd)); |
3019 | 3021 | ||
3020 | cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; | 3022 | cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; |
3021 | cmd->se_tfo->queue_status(cmd); | 3023 | cmd->se_tfo->queue_status(cmd); |
3022 | ret = 1; | 3024 | ret = 1; |
3023 | } | 3025 | } |
3024 | return ret; | 3026 | return ret; |
3025 | } | 3027 | } |
3026 | EXPORT_SYMBOL(transport_check_aborted_status); | 3028 | EXPORT_SYMBOL(transport_check_aborted_status); |
3027 | 3029 | ||
3028 | void transport_send_task_abort(struct se_cmd *cmd) | 3030 | void transport_send_task_abort(struct se_cmd *cmd) |
3029 | { | 3031 | { |
3030 | unsigned long flags; | 3032 | unsigned long flags; |
3031 | 3033 | ||
3032 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 3034 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
3033 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { | 3035 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { |
3034 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 3036 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
3035 | return; | 3037 | return; |
3036 | } | 3038 | } |
3037 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 3039 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
3038 | 3040 | ||
3039 | /* | 3041 | /* |
3040 | * If there are still expected incoming fabric WRITEs, we wait | 3042 | * If there are still expected incoming fabric WRITEs, we wait |
3041 | * until until they have completed before sending a TASK_ABORTED | 3043 | * until until they have completed before sending a TASK_ABORTED |
3042 | * response. This response with TASK_ABORTED status will be | 3044 | * response. This response with TASK_ABORTED status will be |
3043 | * queued back to fabric module by transport_check_aborted_status(). | 3045 | * queued back to fabric module by transport_check_aborted_status(). |
3044 | */ | 3046 | */ |
3045 | if (cmd->data_direction == DMA_TO_DEVICE) { | 3047 | if (cmd->data_direction == DMA_TO_DEVICE) { |
3046 | if (cmd->se_tfo->write_pending_status(cmd) != 0) { | 3048 | if (cmd->se_tfo->write_pending_status(cmd) != 0) { |
3047 | cmd->transport_state |= CMD_T_ABORTED; | 3049 | cmd->transport_state |= CMD_T_ABORTED; |
3048 | smp_mb__after_atomic_inc(); | 3050 | smp_mb__after_atomic_inc(); |
3049 | } | 3051 | } |
3050 | } | 3052 | } |
3051 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | 3053 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; |
3052 | 3054 | ||
3053 | pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," | 3055 | pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," |
3054 | " ITT: 0x%08x\n", cmd->t_task_cdb[0], | 3056 | " ITT: 0x%08x\n", cmd->t_task_cdb[0], |
3055 | cmd->se_tfo->get_task_tag(cmd)); | 3057 | cmd->se_tfo->get_task_tag(cmd)); |
3056 | 3058 | ||
3057 | cmd->se_tfo->queue_status(cmd); | 3059 | cmd->se_tfo->queue_status(cmd); |
3058 | } | 3060 | } |
3059 | 3061 | ||
3060 | static void target_tmr_work(struct work_struct *work) | 3062 | static void target_tmr_work(struct work_struct *work) |
3061 | { | 3063 | { |
3062 | struct se_cmd *cmd = container_of(work, struct se_cmd, work); | 3064 | struct se_cmd *cmd = container_of(work, struct se_cmd, work); |
3063 | struct se_device *dev = cmd->se_dev; | 3065 | struct se_device *dev = cmd->se_dev; |
3064 | struct se_tmr_req *tmr = cmd->se_tmr_req; | 3066 | struct se_tmr_req *tmr = cmd->se_tmr_req; |
3065 | int ret; | 3067 | int ret; |
3066 | 3068 | ||
3067 | switch (tmr->function) { | 3069 | switch (tmr->function) { |
3068 | case TMR_ABORT_TASK: | 3070 | case TMR_ABORT_TASK: |
3069 | core_tmr_abort_task(dev, tmr, cmd->se_sess); | 3071 | core_tmr_abort_task(dev, tmr, cmd->se_sess); |
3070 | break; | 3072 | break; |
3071 | case TMR_ABORT_TASK_SET: | 3073 | case TMR_ABORT_TASK_SET: |
3072 | case TMR_CLEAR_ACA: | 3074 | case TMR_CLEAR_ACA: |
3073 | case TMR_CLEAR_TASK_SET: | 3075 | case TMR_CLEAR_TASK_SET: |
3074 | tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; | 3076 | tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; |
3075 | break; | 3077 | break; |
3076 | case TMR_LUN_RESET: | 3078 | case TMR_LUN_RESET: |
3077 | ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); | 3079 | ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); |
3078 | tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : | 3080 | tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : |
3079 | TMR_FUNCTION_REJECTED; | 3081 | TMR_FUNCTION_REJECTED; |
3080 | break; | 3082 | break; |
3081 | case TMR_TARGET_WARM_RESET: | 3083 | case TMR_TARGET_WARM_RESET: |
3082 | tmr->response = TMR_FUNCTION_REJECTED; | 3084 | tmr->response = TMR_FUNCTION_REJECTED; |
3083 | break; | 3085 | break; |
3084 | case TMR_TARGET_COLD_RESET: | 3086 | case TMR_TARGET_COLD_RESET: |
3085 | tmr->response = TMR_FUNCTION_REJECTED; | 3087 | tmr->response = TMR_FUNCTION_REJECTED; |
3086 | break; | 3088 | break; |
3087 | default: | 3089 | default: |
3088 | pr_err("Uknown TMR function: 0x%02x.\n", | 3090 | pr_err("Uknown TMR function: 0x%02x.\n", |
3089 | tmr->function); | 3091 | tmr->function); |
3090 | tmr->response = TMR_FUNCTION_REJECTED; | 3092 | tmr->response = TMR_FUNCTION_REJECTED; |
3091 | break; | 3093 | break; |
3092 | } | 3094 | } |
3093 | 3095 | ||
3094 | cmd->t_state = TRANSPORT_ISTATE_PROCESSING; | 3096 | cmd->t_state = TRANSPORT_ISTATE_PROCESSING; |
3095 | cmd->se_tfo->queue_tm_rsp(cmd); | 3097 | cmd->se_tfo->queue_tm_rsp(cmd); |
3096 | 3098 | ||
3097 | transport_cmd_check_stop_to_fabric(cmd); | 3099 | transport_cmd_check_stop_to_fabric(cmd); |
3098 | } | 3100 | } |
3099 | 3101 | ||
3100 | int transport_generic_handle_tmr( | 3102 | int transport_generic_handle_tmr( |
3101 | struct se_cmd *cmd) | 3103 | struct se_cmd *cmd) |
3102 | { | 3104 | { |
3103 | INIT_WORK(&cmd->work, target_tmr_work); | 3105 | INIT_WORK(&cmd->work, target_tmr_work); |
3104 | queue_work(cmd->se_dev->tmr_wq, &cmd->work); | 3106 | queue_work(cmd->se_dev->tmr_wq, &cmd->work); |
3105 | return 0; | 3107 | return 0; |
3106 | } | 3108 | } |
3107 | EXPORT_SYMBOL(transport_generic_handle_tmr); | 3109 | EXPORT_SYMBOL(transport_generic_handle_tmr); |
3108 | 3110 |