Commit 5dd7ed2e811d5cd12f31fb7f0c5ad0107d494a12
Committed by
James Bottomley
1 parent
05aea6e7e4
Exists in
master
and in
39 other branches
[SCSI] target: Minor sparse warning fixes and annotations
This patch addresses the majority of sparse warnings and adds proper locking annotations. It also fixes the dubious one-bit signed bitfield, for which the signed one-bit types can be 0 or -1 which can cause a problem if someone ever checks if (foo->lu_gp_assoc == 1). The current code is fine because everyone just checks zero vs non-zero. But Sparse complains about it so lets change it. The warnings look like this: include/target/target_core_base.h:228:26: error: dubious one-bit signed bitfield Signed-off-by: Dan Carpenter <error27@gmail.com> Signed-off-by: Fubo Chen <fubo.chen@gmail.com> Signed-off-by: Nicholas A. Bellinger <nab@linux-iscsi.org> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Showing 8 changed files with 18 additions and 13 deletions Inline Diff
drivers/target/target_core_device.c
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * Filename: target_core_device.c (based on iscsi_target_device.c) | 2 | * Filename: target_core_device.c (based on iscsi_target_device.c) |
3 | * | 3 | * |
4 | * This file contains the iSCSI Virtual Device and Disk Transport | 4 | * This file contains the iSCSI Virtual Device and Disk Transport |
5 | * agnostic related functions. | 5 | * agnostic related functions. |
6 | * | 6 | * |
7 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. | 7 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. |
8 | * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved. | 8 | * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved. |
9 | * Copyright (c) 2007-2010 Rising Tide Systems | 9 | * Copyright (c) 2007-2010 Rising Tide Systems |
10 | * Copyright (c) 2008-2010 Linux-iSCSI.org | 10 | * Copyright (c) 2008-2010 Linux-iSCSI.org |
11 | * | 11 | * |
12 | * Nicholas A. Bellinger <nab@kernel.org> | 12 | * Nicholas A. Bellinger <nab@kernel.org> |
13 | * | 13 | * |
14 | * This program is free software; you can redistribute it and/or modify | 14 | * This program is free software; you can redistribute it and/or modify |
15 | * it under the terms of the GNU General Public License as published by | 15 | * it under the terms of the GNU General Public License as published by |
16 | * the Free Software Foundation; either version 2 of the License, or | 16 | * the Free Software Foundation; either version 2 of the License, or |
17 | * (at your option) any later version. | 17 | * (at your option) any later version. |
18 | * | 18 | * |
19 | * This program is distributed in the hope that it will be useful, | 19 | * This program is distributed in the hope that it will be useful, |
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
22 | * GNU General Public License for more details. | 22 | * GNU General Public License for more details. |
23 | * | 23 | * |
24 | * You should have received a copy of the GNU General Public License | 24 | * You should have received a copy of the GNU General Public License |
25 | * along with this program; if not, write to the Free Software | 25 | * along with this program; if not, write to the Free Software |
26 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 26 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
27 | * | 27 | * |
28 | ******************************************************************************/ | 28 | ******************************************************************************/ |
29 | 29 | ||
30 | #include <linux/net.h> | 30 | #include <linux/net.h> |
31 | #include <linux/string.h> | 31 | #include <linux/string.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | #include <linux/timer.h> | 33 | #include <linux/timer.h> |
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | #include <linux/spinlock.h> | 35 | #include <linux/spinlock.h> |
36 | #include <linux/kthread.h> | 36 | #include <linux/kthread.h> |
37 | #include <linux/in.h> | 37 | #include <linux/in.h> |
38 | #include <net/sock.h> | 38 | #include <net/sock.h> |
39 | #include <net/tcp.h> | 39 | #include <net/tcp.h> |
40 | #include <scsi/scsi.h> | 40 | #include <scsi/scsi.h> |
41 | 41 | ||
42 | #include <target/target_core_base.h> | 42 | #include <target/target_core_base.h> |
43 | #include <target/target_core_device.h> | 43 | #include <target/target_core_device.h> |
44 | #include <target/target_core_tpg.h> | 44 | #include <target/target_core_tpg.h> |
45 | #include <target/target_core_transport.h> | 45 | #include <target/target_core_transport.h> |
46 | #include <target/target_core_fabric_ops.h> | 46 | #include <target/target_core_fabric_ops.h> |
47 | 47 | ||
48 | #include "target_core_alua.h" | 48 | #include "target_core_alua.h" |
49 | #include "target_core_hba.h" | 49 | #include "target_core_hba.h" |
50 | #include "target_core_pr.h" | 50 | #include "target_core_pr.h" |
51 | #include "target_core_ua.h" | 51 | #include "target_core_ua.h" |
52 | 52 | ||
53 | static void se_dev_start(struct se_device *dev); | 53 | static void se_dev_start(struct se_device *dev); |
54 | static void se_dev_stop(struct se_device *dev); | 54 | static void se_dev_stop(struct se_device *dev); |
55 | 55 | ||
56 | int transport_get_lun_for_cmd( | 56 | int transport_get_lun_for_cmd( |
57 | struct se_cmd *se_cmd, | 57 | struct se_cmd *se_cmd, |
58 | unsigned char *cdb, | 58 | unsigned char *cdb, |
59 | u32 unpacked_lun) | 59 | u32 unpacked_lun) |
60 | { | 60 | { |
61 | struct se_dev_entry *deve; | 61 | struct se_dev_entry *deve; |
62 | struct se_lun *se_lun = NULL; | 62 | struct se_lun *se_lun = NULL; |
63 | struct se_session *se_sess = SE_SESS(se_cmd); | 63 | struct se_session *se_sess = SE_SESS(se_cmd); |
64 | unsigned long flags; | 64 | unsigned long flags; |
65 | int read_only = 0; | 65 | int read_only = 0; |
66 | 66 | ||
67 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | 67 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); |
68 | deve = se_cmd->se_deve = | 68 | deve = se_cmd->se_deve = |
69 | &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; | 69 | &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; |
70 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | 70 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { |
71 | if (se_cmd) { | 71 | if (se_cmd) { |
72 | deve->total_cmds++; | 72 | deve->total_cmds++; |
73 | deve->total_bytes += se_cmd->data_length; | 73 | deve->total_bytes += se_cmd->data_length; |
74 | 74 | ||
75 | if (se_cmd->data_direction == DMA_TO_DEVICE) { | 75 | if (se_cmd->data_direction == DMA_TO_DEVICE) { |
76 | if (deve->lun_flags & | 76 | if (deve->lun_flags & |
77 | TRANSPORT_LUNFLAGS_READ_ONLY) { | 77 | TRANSPORT_LUNFLAGS_READ_ONLY) { |
78 | read_only = 1; | 78 | read_only = 1; |
79 | goto out; | 79 | goto out; |
80 | } | 80 | } |
81 | deve->write_bytes += se_cmd->data_length; | 81 | deve->write_bytes += se_cmd->data_length; |
82 | } else if (se_cmd->data_direction == | 82 | } else if (se_cmd->data_direction == |
83 | DMA_FROM_DEVICE) { | 83 | DMA_FROM_DEVICE) { |
84 | deve->read_bytes += se_cmd->data_length; | 84 | deve->read_bytes += se_cmd->data_length; |
85 | } | 85 | } |
86 | } | 86 | } |
87 | deve->deve_cmds++; | 87 | deve->deve_cmds++; |
88 | 88 | ||
89 | se_lun = se_cmd->se_lun = deve->se_lun; | 89 | se_lun = se_cmd->se_lun = deve->se_lun; |
90 | se_cmd->pr_res_key = deve->pr_res_key; | 90 | se_cmd->pr_res_key = deve->pr_res_key; |
91 | se_cmd->orig_fe_lun = unpacked_lun; | 91 | se_cmd->orig_fe_lun = unpacked_lun; |
92 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; | 92 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; |
93 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; | 93 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; |
94 | } | 94 | } |
95 | out: | 95 | out: |
96 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | 96 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); |
97 | 97 | ||
98 | if (!se_lun) { | 98 | if (!se_lun) { |
99 | if (read_only) { | 99 | if (read_only) { |
100 | se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | 100 | se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; |
101 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 101 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
102 | printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" | 102 | printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" |
103 | " Access for 0x%08x\n", | 103 | " Access for 0x%08x\n", |
104 | CMD_TFO(se_cmd)->get_fabric_name(), | 104 | CMD_TFO(se_cmd)->get_fabric_name(), |
105 | unpacked_lun); | 105 | unpacked_lun); |
106 | return -1; | 106 | return -1; |
107 | } else { | 107 | } else { |
108 | /* | 108 | /* |
109 | * Use the se_portal_group->tpg_virt_lun0 to allow for | 109 | * Use the se_portal_group->tpg_virt_lun0 to allow for |
110 | * REPORT_LUNS, et al to be returned when no active | 110 | * REPORT_LUNS, et al to be returned when no active |
111 | * MappedLUN=0 exists for this Initiator Port. | 111 | * MappedLUN=0 exists for this Initiator Port. |
112 | */ | 112 | */ |
113 | if (unpacked_lun != 0) { | 113 | if (unpacked_lun != 0) { |
114 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; | 114 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; |
115 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 115 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
116 | printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" | 116 | printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" |
117 | " Access for 0x%08x\n", | 117 | " Access for 0x%08x\n", |
118 | CMD_TFO(se_cmd)->get_fabric_name(), | 118 | CMD_TFO(se_cmd)->get_fabric_name(), |
119 | unpacked_lun); | 119 | unpacked_lun); |
120 | return -1; | 120 | return -1; |
121 | } | 121 | } |
122 | /* | 122 | /* |
123 | * Force WRITE PROTECT for virtual LUN 0 | 123 | * Force WRITE PROTECT for virtual LUN 0 |
124 | */ | 124 | */ |
125 | if ((se_cmd->data_direction != DMA_FROM_DEVICE) && | 125 | if ((se_cmd->data_direction != DMA_FROM_DEVICE) && |
126 | (se_cmd->data_direction != DMA_NONE)) { | 126 | (se_cmd->data_direction != DMA_NONE)) { |
127 | se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | 127 | se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; |
128 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 128 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
129 | return -1; | 129 | return -1; |
130 | } | 130 | } |
131 | #if 0 | 131 | #if 0 |
132 | printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n", | 132 | printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n", |
133 | CMD_TFO(se_cmd)->get_fabric_name()); | 133 | CMD_TFO(se_cmd)->get_fabric_name()); |
134 | #endif | 134 | #endif |
135 | se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; | 135 | se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; |
136 | se_cmd->orig_fe_lun = 0; | 136 | se_cmd->orig_fe_lun = 0; |
137 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; | 137 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; |
138 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; | 138 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; |
139 | } | 139 | } |
140 | } | 140 | } |
141 | /* | 141 | /* |
142 | * Determine if the struct se_lun is online. | 142 | * Determine if the struct se_lun is online. |
143 | */ | 143 | */ |
144 | /* #warning FIXME: Check for LUN_RESET + UNIT Attention */ | 144 | /* #warning FIXME: Check for LUN_RESET + UNIT Attention */ |
145 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { | 145 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { |
146 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; | 146 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; |
147 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 147 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
148 | return -1; | 148 | return -1; |
149 | } | 149 | } |
150 | 150 | ||
151 | { | 151 | { |
152 | struct se_device *dev = se_lun->lun_se_dev; | 152 | struct se_device *dev = se_lun->lun_se_dev; |
153 | spin_lock(&dev->stats_lock); | 153 | spin_lock(&dev->stats_lock); |
154 | dev->num_cmds++; | 154 | dev->num_cmds++; |
155 | if (se_cmd->data_direction == DMA_TO_DEVICE) | 155 | if (se_cmd->data_direction == DMA_TO_DEVICE) |
156 | dev->write_bytes += se_cmd->data_length; | 156 | dev->write_bytes += se_cmd->data_length; |
157 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) | 157 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) |
158 | dev->read_bytes += se_cmd->data_length; | 158 | dev->read_bytes += se_cmd->data_length; |
159 | spin_unlock(&dev->stats_lock); | 159 | spin_unlock(&dev->stats_lock); |
160 | } | 160 | } |
161 | 161 | ||
162 | /* | 162 | /* |
163 | * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used | 163 | * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used |
164 | * for tracking state of struct se_cmds during LUN shutdown events. | 164 | * for tracking state of struct se_cmds during LUN shutdown events. |
165 | */ | 165 | */ |
166 | spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); | 166 | spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); |
167 | list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list); | 167 | list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list); |
168 | atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1); | 168 | atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1); |
169 | #if 0 | 169 | #if 0 |
170 | printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n", | 170 | printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n", |
171 | CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun); | 171 | CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun); |
172 | #endif | 172 | #endif |
173 | spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); | 173 | spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); |
174 | 174 | ||
175 | return 0; | 175 | return 0; |
176 | } | 176 | } |
177 | EXPORT_SYMBOL(transport_get_lun_for_cmd); | 177 | EXPORT_SYMBOL(transport_get_lun_for_cmd); |
178 | 178 | ||
179 | int transport_get_lun_for_tmr( | 179 | int transport_get_lun_for_tmr( |
180 | struct se_cmd *se_cmd, | 180 | struct se_cmd *se_cmd, |
181 | u32 unpacked_lun) | 181 | u32 unpacked_lun) |
182 | { | 182 | { |
183 | struct se_device *dev = NULL; | 183 | struct se_device *dev = NULL; |
184 | struct se_dev_entry *deve; | 184 | struct se_dev_entry *deve; |
185 | struct se_lun *se_lun = NULL; | 185 | struct se_lun *se_lun = NULL; |
186 | struct se_session *se_sess = SE_SESS(se_cmd); | 186 | struct se_session *se_sess = SE_SESS(se_cmd); |
187 | struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; | 187 | struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; |
188 | 188 | ||
189 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | 189 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); |
190 | deve = se_cmd->se_deve = | 190 | deve = se_cmd->se_deve = |
191 | &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; | 191 | &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; |
192 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | 192 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { |
193 | se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun; | 193 | se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun; |
194 | dev = se_tmr->tmr_dev = se_lun->lun_se_dev; | 194 | dev = se_tmr->tmr_dev = se_lun->lun_se_dev; |
195 | se_cmd->pr_res_key = deve->pr_res_key; | 195 | se_cmd->pr_res_key = deve->pr_res_key; |
196 | se_cmd->orig_fe_lun = unpacked_lun; | 196 | se_cmd->orig_fe_lun = unpacked_lun; |
197 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; | 197 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; |
198 | /* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */ | 198 | /* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */ |
199 | } | 199 | } |
200 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | 200 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); |
201 | 201 | ||
202 | if (!se_lun) { | 202 | if (!se_lun) { |
203 | printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" | 203 | printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" |
204 | " Access for 0x%08x\n", | 204 | " Access for 0x%08x\n", |
205 | CMD_TFO(se_cmd)->get_fabric_name(), | 205 | CMD_TFO(se_cmd)->get_fabric_name(), |
206 | unpacked_lun); | 206 | unpacked_lun); |
207 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 207 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
208 | return -1; | 208 | return -1; |
209 | } | 209 | } |
210 | /* | 210 | /* |
211 | * Determine if the struct se_lun is online. | 211 | * Determine if the struct se_lun is online. |
212 | */ | 212 | */ |
213 | /* #warning FIXME: Check for LUN_RESET + UNIT Attention */ | 213 | /* #warning FIXME: Check for LUN_RESET + UNIT Attention */ |
214 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { | 214 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { |
215 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 215 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
216 | return -1; | 216 | return -1; |
217 | } | 217 | } |
218 | 218 | ||
219 | spin_lock(&dev->se_tmr_lock); | 219 | spin_lock(&dev->se_tmr_lock); |
220 | list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list); | 220 | list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list); |
221 | spin_unlock(&dev->se_tmr_lock); | 221 | spin_unlock(&dev->se_tmr_lock); |
222 | 222 | ||
223 | return 0; | 223 | return 0; |
224 | } | 224 | } |
225 | EXPORT_SYMBOL(transport_get_lun_for_tmr); | 225 | EXPORT_SYMBOL(transport_get_lun_for_tmr); |
226 | 226 | ||
227 | /* | 227 | /* |
228 | * This function is called from core_scsi3_emulate_pro_register_and_move() | 228 | * This function is called from core_scsi3_emulate_pro_register_and_move() |
229 | * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count | 229 | * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count |
230 | * when a matching rtpi is found. | 230 | * when a matching rtpi is found. |
231 | */ | 231 | */ |
232 | struct se_dev_entry *core_get_se_deve_from_rtpi( | 232 | struct se_dev_entry *core_get_se_deve_from_rtpi( |
233 | struct se_node_acl *nacl, | 233 | struct se_node_acl *nacl, |
234 | u16 rtpi) | 234 | u16 rtpi) |
235 | { | 235 | { |
236 | struct se_dev_entry *deve; | 236 | struct se_dev_entry *deve; |
237 | struct se_lun *lun; | 237 | struct se_lun *lun; |
238 | struct se_port *port; | 238 | struct se_port *port; |
239 | struct se_portal_group *tpg = nacl->se_tpg; | 239 | struct se_portal_group *tpg = nacl->se_tpg; |
240 | u32 i; | 240 | u32 i; |
241 | 241 | ||
242 | spin_lock_irq(&nacl->device_list_lock); | 242 | spin_lock_irq(&nacl->device_list_lock); |
243 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 243 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
244 | deve = &nacl->device_list[i]; | 244 | deve = &nacl->device_list[i]; |
245 | 245 | ||
246 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | 246 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) |
247 | continue; | 247 | continue; |
248 | 248 | ||
249 | lun = deve->se_lun; | 249 | lun = deve->se_lun; |
250 | if (!(lun)) { | 250 | if (!(lun)) { |
251 | printk(KERN_ERR "%s device entries device pointer is" | 251 | printk(KERN_ERR "%s device entries device pointer is" |
252 | " NULL, but Initiator has access.\n", | 252 | " NULL, but Initiator has access.\n", |
253 | TPG_TFO(tpg)->get_fabric_name()); | 253 | TPG_TFO(tpg)->get_fabric_name()); |
254 | continue; | 254 | continue; |
255 | } | 255 | } |
256 | port = lun->lun_sep; | 256 | port = lun->lun_sep; |
257 | if (!(port)) { | 257 | if (!(port)) { |
258 | printk(KERN_ERR "%s device entries device pointer is" | 258 | printk(KERN_ERR "%s device entries device pointer is" |
259 | " NULL, but Initiator has access.\n", | 259 | " NULL, but Initiator has access.\n", |
260 | TPG_TFO(tpg)->get_fabric_name()); | 260 | TPG_TFO(tpg)->get_fabric_name()); |
261 | continue; | 261 | continue; |
262 | } | 262 | } |
263 | if (port->sep_rtpi != rtpi) | 263 | if (port->sep_rtpi != rtpi) |
264 | continue; | 264 | continue; |
265 | 265 | ||
266 | atomic_inc(&deve->pr_ref_count); | 266 | atomic_inc(&deve->pr_ref_count); |
267 | smp_mb__after_atomic_inc(); | 267 | smp_mb__after_atomic_inc(); |
268 | spin_unlock_irq(&nacl->device_list_lock); | 268 | spin_unlock_irq(&nacl->device_list_lock); |
269 | 269 | ||
270 | return deve; | 270 | return deve; |
271 | } | 271 | } |
272 | spin_unlock_irq(&nacl->device_list_lock); | 272 | spin_unlock_irq(&nacl->device_list_lock); |
273 | 273 | ||
274 | return NULL; | 274 | return NULL; |
275 | } | 275 | } |
276 | 276 | ||
277 | int core_free_device_list_for_node( | 277 | int core_free_device_list_for_node( |
278 | struct se_node_acl *nacl, | 278 | struct se_node_acl *nacl, |
279 | struct se_portal_group *tpg) | 279 | struct se_portal_group *tpg) |
280 | { | 280 | { |
281 | struct se_dev_entry *deve; | 281 | struct se_dev_entry *deve; |
282 | struct se_lun *lun; | 282 | struct se_lun *lun; |
283 | u32 i; | 283 | u32 i; |
284 | 284 | ||
285 | if (!nacl->device_list) | 285 | if (!nacl->device_list) |
286 | return 0; | 286 | return 0; |
287 | 287 | ||
288 | spin_lock_irq(&nacl->device_list_lock); | 288 | spin_lock_irq(&nacl->device_list_lock); |
289 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 289 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
290 | deve = &nacl->device_list[i]; | 290 | deve = &nacl->device_list[i]; |
291 | 291 | ||
292 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | 292 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) |
293 | continue; | 293 | continue; |
294 | 294 | ||
295 | if (!deve->se_lun) { | 295 | if (!deve->se_lun) { |
296 | printk(KERN_ERR "%s device entries device pointer is" | 296 | printk(KERN_ERR "%s device entries device pointer is" |
297 | " NULL, but Initiator has access.\n", | 297 | " NULL, but Initiator has access.\n", |
298 | TPG_TFO(tpg)->get_fabric_name()); | 298 | TPG_TFO(tpg)->get_fabric_name()); |
299 | continue; | 299 | continue; |
300 | } | 300 | } |
301 | lun = deve->se_lun; | 301 | lun = deve->se_lun; |
302 | 302 | ||
303 | spin_unlock_irq(&nacl->device_list_lock); | 303 | spin_unlock_irq(&nacl->device_list_lock); |
304 | core_update_device_list_for_node(lun, NULL, deve->mapped_lun, | 304 | core_update_device_list_for_node(lun, NULL, deve->mapped_lun, |
305 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); | 305 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); |
306 | spin_lock_irq(&nacl->device_list_lock); | 306 | spin_lock_irq(&nacl->device_list_lock); |
307 | } | 307 | } |
308 | spin_unlock_irq(&nacl->device_list_lock); | 308 | spin_unlock_irq(&nacl->device_list_lock); |
309 | 309 | ||
310 | kfree(nacl->device_list); | 310 | kfree(nacl->device_list); |
311 | nacl->device_list = NULL; | 311 | nacl->device_list = NULL; |
312 | 312 | ||
313 | return 0; | 313 | return 0; |
314 | } | 314 | } |
315 | 315 | ||
316 | void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) | 316 | void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) |
317 | { | 317 | { |
318 | struct se_dev_entry *deve; | 318 | struct se_dev_entry *deve; |
319 | 319 | ||
320 | spin_lock_irq(&se_nacl->device_list_lock); | 320 | spin_lock_irq(&se_nacl->device_list_lock); |
321 | deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; | 321 | deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; |
322 | deve->deve_cmds--; | 322 | deve->deve_cmds--; |
323 | spin_unlock_irq(&se_nacl->device_list_lock); | 323 | spin_unlock_irq(&se_nacl->device_list_lock); |
324 | 324 | ||
325 | return; | 325 | return; |
326 | } | 326 | } |
327 | 327 | ||
328 | void core_update_device_list_access( | 328 | void core_update_device_list_access( |
329 | u32 mapped_lun, | 329 | u32 mapped_lun, |
330 | u32 lun_access, | 330 | u32 lun_access, |
331 | struct se_node_acl *nacl) | 331 | struct se_node_acl *nacl) |
332 | { | 332 | { |
333 | struct se_dev_entry *deve; | 333 | struct se_dev_entry *deve; |
334 | 334 | ||
335 | spin_lock_irq(&nacl->device_list_lock); | 335 | spin_lock_irq(&nacl->device_list_lock); |
336 | deve = &nacl->device_list[mapped_lun]; | 336 | deve = &nacl->device_list[mapped_lun]; |
337 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { | 337 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { |
338 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; | 338 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; |
339 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; | 339 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; |
340 | } else { | 340 | } else { |
341 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; | 341 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; |
342 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | 342 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; |
343 | } | 343 | } |
344 | spin_unlock_irq(&nacl->device_list_lock); | 344 | spin_unlock_irq(&nacl->device_list_lock); |
345 | 345 | ||
346 | return; | 346 | return; |
347 | } | 347 | } |
348 | 348 | ||
349 | /* core_update_device_list_for_node(): | 349 | /* core_update_device_list_for_node(): |
350 | * | 350 | * |
351 | * | 351 | * |
352 | */ | 352 | */ |
353 | int core_update_device_list_for_node( | 353 | int core_update_device_list_for_node( |
354 | struct se_lun *lun, | 354 | struct se_lun *lun, |
355 | struct se_lun_acl *lun_acl, | 355 | struct se_lun_acl *lun_acl, |
356 | u32 mapped_lun, | 356 | u32 mapped_lun, |
357 | u32 lun_access, | 357 | u32 lun_access, |
358 | struct se_node_acl *nacl, | 358 | struct se_node_acl *nacl, |
359 | struct se_portal_group *tpg, | 359 | struct se_portal_group *tpg, |
360 | int enable) | 360 | int enable) |
361 | { | 361 | { |
362 | struct se_port *port = lun->lun_sep; | 362 | struct se_port *port = lun->lun_sep; |
363 | struct se_dev_entry *deve = &nacl->device_list[mapped_lun]; | 363 | struct se_dev_entry *deve = &nacl->device_list[mapped_lun]; |
364 | int trans = 0; | 364 | int trans = 0; |
365 | /* | 365 | /* |
366 | * If the MappedLUN entry is being disabled, the entry in | 366 | * If the MappedLUN entry is being disabled, the entry in |
367 | * port->sep_alua_list must be removed now before clearing the | 367 | * port->sep_alua_list must be removed now before clearing the |
368 | * struct se_dev_entry pointers below as logic in | 368 | * struct se_dev_entry pointers below as logic in |
369 | * core_alua_do_transition_tg_pt() depends on these being present. | 369 | * core_alua_do_transition_tg_pt() depends on these being present. |
370 | */ | 370 | */ |
371 | if (!(enable)) { | 371 | if (!(enable)) { |
372 | /* | 372 | /* |
373 | * deve->se_lun_acl will be NULL for demo-mode created LUNs | 373 | * deve->se_lun_acl will be NULL for demo-mode created LUNs |
374 | * that have not been explictly concerted to MappedLUNs -> | 374 | * that have not been explictly concerted to MappedLUNs -> |
375 | * struct se_lun_acl, but we remove deve->alua_port_list from | 375 | * struct se_lun_acl, but we remove deve->alua_port_list from |
376 | * port->sep_alua_list. This also means that active UAs and | 376 | * port->sep_alua_list. This also means that active UAs and |
377 | * NodeACL context specific PR metadata for demo-mode | 377 | * NodeACL context specific PR metadata for demo-mode |
378 | * MappedLUN *deve will be released below.. | 378 | * MappedLUN *deve will be released below.. |
379 | */ | 379 | */ |
380 | spin_lock_bh(&port->sep_alua_lock); | 380 | spin_lock_bh(&port->sep_alua_lock); |
381 | list_del(&deve->alua_port_list); | 381 | list_del(&deve->alua_port_list); |
382 | spin_unlock_bh(&port->sep_alua_lock); | 382 | spin_unlock_bh(&port->sep_alua_lock); |
383 | } | 383 | } |
384 | 384 | ||
385 | spin_lock_irq(&nacl->device_list_lock); | 385 | spin_lock_irq(&nacl->device_list_lock); |
386 | if (enable) { | 386 | if (enable) { |
387 | /* | 387 | /* |
388 | * Check if the call is handling demo mode -> explict LUN ACL | 388 | * Check if the call is handling demo mode -> explict LUN ACL |
389 | * transition. This transition must be for the same struct se_lun | 389 | * transition. This transition must be for the same struct se_lun |
390 | * + mapped_lun that was setup in demo mode.. | 390 | * + mapped_lun that was setup in demo mode.. |
391 | */ | 391 | */ |
392 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | 392 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { |
393 | if (deve->se_lun_acl != NULL) { | 393 | if (deve->se_lun_acl != NULL) { |
394 | printk(KERN_ERR "struct se_dev_entry->se_lun_acl" | 394 | printk(KERN_ERR "struct se_dev_entry->se_lun_acl" |
395 | " already set for demo mode -> explict" | 395 | " already set for demo mode -> explict" |
396 | " LUN ACL transition\n"); | 396 | " LUN ACL transition\n"); |
397 | spin_unlock_irq(&nacl->device_list_lock); | 397 | spin_unlock_irq(&nacl->device_list_lock); |
398 | return -1; | 398 | return -1; |
399 | } | 399 | } |
400 | if (deve->se_lun != lun) { | 400 | if (deve->se_lun != lun) { |
401 | printk(KERN_ERR "struct se_dev_entry->se_lun does" | 401 | printk(KERN_ERR "struct se_dev_entry->se_lun does" |
402 | " match passed struct se_lun for demo mode" | 402 | " match passed struct se_lun for demo mode" |
403 | " -> explict LUN ACL transition\n"); | 403 | " -> explict LUN ACL transition\n"); |
404 | spin_unlock_irq(&nacl->device_list_lock); | 404 | spin_unlock_irq(&nacl->device_list_lock); |
405 | return -1; | 405 | return -1; |
406 | } | 406 | } |
407 | deve->se_lun_acl = lun_acl; | 407 | deve->se_lun_acl = lun_acl; |
408 | trans = 1; | 408 | trans = 1; |
409 | } else { | 409 | } else { |
410 | deve->se_lun = lun; | 410 | deve->se_lun = lun; |
411 | deve->se_lun_acl = lun_acl; | 411 | deve->se_lun_acl = lun_acl; |
412 | deve->mapped_lun = mapped_lun; | 412 | deve->mapped_lun = mapped_lun; |
413 | deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS; | 413 | deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS; |
414 | } | 414 | } |
415 | 415 | ||
416 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { | 416 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { |
417 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; | 417 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; |
418 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; | 418 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; |
419 | } else { | 419 | } else { |
420 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; | 420 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; |
421 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | 421 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; |
422 | } | 422 | } |
423 | 423 | ||
424 | if (trans) { | 424 | if (trans) { |
425 | spin_unlock_irq(&nacl->device_list_lock); | 425 | spin_unlock_irq(&nacl->device_list_lock); |
426 | return 0; | 426 | return 0; |
427 | } | 427 | } |
428 | deve->creation_time = get_jiffies_64(); | 428 | deve->creation_time = get_jiffies_64(); |
429 | deve->attach_count++; | 429 | deve->attach_count++; |
430 | spin_unlock_irq(&nacl->device_list_lock); | 430 | spin_unlock_irq(&nacl->device_list_lock); |
431 | 431 | ||
432 | spin_lock_bh(&port->sep_alua_lock); | 432 | spin_lock_bh(&port->sep_alua_lock); |
433 | list_add_tail(&deve->alua_port_list, &port->sep_alua_list); | 433 | list_add_tail(&deve->alua_port_list, &port->sep_alua_list); |
434 | spin_unlock_bh(&port->sep_alua_lock); | 434 | spin_unlock_bh(&port->sep_alua_lock); |
435 | 435 | ||
436 | return 0; | 436 | return 0; |
437 | } | 437 | } |
438 | /* | 438 | /* |
439 | * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE | 439 | * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE |
440 | * PR operation to complete. | 440 | * PR operation to complete. |
441 | */ | 441 | */ |
442 | spin_unlock_irq(&nacl->device_list_lock); | 442 | spin_unlock_irq(&nacl->device_list_lock); |
443 | while (atomic_read(&deve->pr_ref_count) != 0) | 443 | while (atomic_read(&deve->pr_ref_count) != 0) |
444 | cpu_relax(); | 444 | cpu_relax(); |
445 | spin_lock_irq(&nacl->device_list_lock); | 445 | spin_lock_irq(&nacl->device_list_lock); |
446 | /* | 446 | /* |
447 | * Disable struct se_dev_entry LUN ACL mapping | 447 | * Disable struct se_dev_entry LUN ACL mapping |
448 | */ | 448 | */ |
449 | core_scsi3_ua_release_all(deve); | 449 | core_scsi3_ua_release_all(deve); |
450 | deve->se_lun = NULL; | 450 | deve->se_lun = NULL; |
451 | deve->se_lun_acl = NULL; | 451 | deve->se_lun_acl = NULL; |
452 | deve->lun_flags = 0; | 452 | deve->lun_flags = 0; |
453 | deve->creation_time = 0; | 453 | deve->creation_time = 0; |
454 | deve->attach_count--; | 454 | deve->attach_count--; |
455 | spin_unlock_irq(&nacl->device_list_lock); | 455 | spin_unlock_irq(&nacl->device_list_lock); |
456 | 456 | ||
457 | core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl); | 457 | core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl); |
458 | return 0; | 458 | return 0; |
459 | } | 459 | } |
460 | 460 | ||
461 | /* core_clear_lun_from_tpg(): | 461 | /* core_clear_lun_from_tpg(): |
462 | * | 462 | * |
463 | * | 463 | * |
464 | */ | 464 | */ |
465 | void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) | 465 | void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) |
466 | { | 466 | { |
467 | struct se_node_acl *nacl; | 467 | struct se_node_acl *nacl; |
468 | struct se_dev_entry *deve; | 468 | struct se_dev_entry *deve; |
469 | u32 i; | 469 | u32 i; |
470 | 470 | ||
471 | spin_lock_bh(&tpg->acl_node_lock); | 471 | spin_lock_bh(&tpg->acl_node_lock); |
472 | list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { | 472 | list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { |
473 | spin_unlock_bh(&tpg->acl_node_lock); | 473 | spin_unlock_bh(&tpg->acl_node_lock); |
474 | 474 | ||
475 | spin_lock_irq(&nacl->device_list_lock); | 475 | spin_lock_irq(&nacl->device_list_lock); |
476 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 476 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
477 | deve = &nacl->device_list[i]; | 477 | deve = &nacl->device_list[i]; |
478 | if (lun != deve->se_lun) | 478 | if (lun != deve->se_lun) |
479 | continue; | 479 | continue; |
480 | spin_unlock_irq(&nacl->device_list_lock); | 480 | spin_unlock_irq(&nacl->device_list_lock); |
481 | 481 | ||
482 | core_update_device_list_for_node(lun, NULL, | 482 | core_update_device_list_for_node(lun, NULL, |
483 | deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, | 483 | deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, |
484 | nacl, tpg, 0); | 484 | nacl, tpg, 0); |
485 | 485 | ||
486 | spin_lock_irq(&nacl->device_list_lock); | 486 | spin_lock_irq(&nacl->device_list_lock); |
487 | } | 487 | } |
488 | spin_unlock_irq(&nacl->device_list_lock); | 488 | spin_unlock_irq(&nacl->device_list_lock); |
489 | 489 | ||
490 | spin_lock_bh(&tpg->acl_node_lock); | 490 | spin_lock_bh(&tpg->acl_node_lock); |
491 | } | 491 | } |
492 | spin_unlock_bh(&tpg->acl_node_lock); | 492 | spin_unlock_bh(&tpg->acl_node_lock); |
493 | 493 | ||
494 | return; | 494 | return; |
495 | } | 495 | } |
496 | 496 | ||
497 | static struct se_port *core_alloc_port(struct se_device *dev) | 497 | static struct se_port *core_alloc_port(struct se_device *dev) |
498 | { | 498 | { |
499 | struct se_port *port, *port_tmp; | 499 | struct se_port *port, *port_tmp; |
500 | 500 | ||
501 | port = kzalloc(sizeof(struct se_port), GFP_KERNEL); | 501 | port = kzalloc(sizeof(struct se_port), GFP_KERNEL); |
502 | if (!(port)) { | 502 | if (!(port)) { |
503 | printk(KERN_ERR "Unable to allocate struct se_port\n"); | 503 | printk(KERN_ERR "Unable to allocate struct se_port\n"); |
504 | return NULL; | 504 | return NULL; |
505 | } | 505 | } |
506 | INIT_LIST_HEAD(&port->sep_alua_list); | 506 | INIT_LIST_HEAD(&port->sep_alua_list); |
507 | INIT_LIST_HEAD(&port->sep_list); | 507 | INIT_LIST_HEAD(&port->sep_list); |
508 | atomic_set(&port->sep_tg_pt_secondary_offline, 0); | 508 | atomic_set(&port->sep_tg_pt_secondary_offline, 0); |
509 | spin_lock_init(&port->sep_alua_lock); | 509 | spin_lock_init(&port->sep_alua_lock); |
510 | mutex_init(&port->sep_tg_pt_md_mutex); | 510 | mutex_init(&port->sep_tg_pt_md_mutex); |
511 | 511 | ||
512 | spin_lock(&dev->se_port_lock); | 512 | spin_lock(&dev->se_port_lock); |
513 | if (dev->dev_port_count == 0x0000ffff) { | 513 | if (dev->dev_port_count == 0x0000ffff) { |
514 | printk(KERN_WARNING "Reached dev->dev_port_count ==" | 514 | printk(KERN_WARNING "Reached dev->dev_port_count ==" |
515 | " 0x0000ffff\n"); | 515 | " 0x0000ffff\n"); |
516 | spin_unlock(&dev->se_port_lock); | 516 | spin_unlock(&dev->se_port_lock); |
517 | return NULL; | 517 | return NULL; |
518 | } | 518 | } |
519 | again: | 519 | again: |
520 | /* | 520 | /* |
521 | * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device | 521 | * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device |
522 | * Here is the table from spc4r17 section 7.7.3.8. | 522 | * Here is the table from spc4r17 section 7.7.3.8. |
523 | * | 523 | * |
524 | * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field | 524 | * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field |
525 | * | 525 | * |
526 | * Code Description | 526 | * Code Description |
527 | * 0h Reserved | 527 | * 0h Reserved |
528 | * 1h Relative port 1, historically known as port A | 528 | * 1h Relative port 1, historically known as port A |
529 | * 2h Relative port 2, historically known as port B | 529 | * 2h Relative port 2, historically known as port B |
530 | * 3h to FFFFh Relative port 3 through 65 535 | 530 | * 3h to FFFFh Relative port 3 through 65 535 |
531 | */ | 531 | */ |
532 | port->sep_rtpi = dev->dev_rpti_counter++; | 532 | port->sep_rtpi = dev->dev_rpti_counter++; |
533 | if (!(port->sep_rtpi)) | 533 | if (!(port->sep_rtpi)) |
534 | goto again; | 534 | goto again; |
535 | 535 | ||
536 | list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { | 536 | list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { |
537 | /* | 537 | /* |
538 | * Make sure RELATIVE TARGET PORT IDENTIFER is unique | 538 | * Make sure RELATIVE TARGET PORT IDENTIFER is unique |
539 | * for 16-bit wrap.. | 539 | * for 16-bit wrap.. |
540 | */ | 540 | */ |
541 | if (port->sep_rtpi == port_tmp->sep_rtpi) | 541 | if (port->sep_rtpi == port_tmp->sep_rtpi) |
542 | goto again; | 542 | goto again; |
543 | } | 543 | } |
544 | spin_unlock(&dev->se_port_lock); | 544 | spin_unlock(&dev->se_port_lock); |
545 | 545 | ||
546 | return port; | 546 | return port; |
547 | } | 547 | } |
548 | 548 | ||
549 | static void core_export_port( | 549 | static void core_export_port( |
550 | struct se_device *dev, | 550 | struct se_device *dev, |
551 | struct se_portal_group *tpg, | 551 | struct se_portal_group *tpg, |
552 | struct se_port *port, | 552 | struct se_port *port, |
553 | struct se_lun *lun) | 553 | struct se_lun *lun) |
554 | { | 554 | { |
555 | struct se_subsystem_dev *su_dev = SU_DEV(dev); | 555 | struct se_subsystem_dev *su_dev = SU_DEV(dev); |
556 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; | 556 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; |
557 | 557 | ||
558 | spin_lock(&dev->se_port_lock); | 558 | spin_lock(&dev->se_port_lock); |
559 | spin_lock(&lun->lun_sep_lock); | 559 | spin_lock(&lun->lun_sep_lock); |
560 | port->sep_tpg = tpg; | 560 | port->sep_tpg = tpg; |
561 | port->sep_lun = lun; | 561 | port->sep_lun = lun; |
562 | lun->lun_sep = port; | 562 | lun->lun_sep = port; |
563 | spin_unlock(&lun->lun_sep_lock); | 563 | spin_unlock(&lun->lun_sep_lock); |
564 | 564 | ||
565 | list_add_tail(&port->sep_list, &dev->dev_sep_list); | 565 | list_add_tail(&port->sep_list, &dev->dev_sep_list); |
566 | spin_unlock(&dev->se_port_lock); | 566 | spin_unlock(&dev->se_port_lock); |
567 | 567 | ||
568 | if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) { | 568 | if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) { |
569 | tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); | 569 | tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); |
570 | if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { | 570 | if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { |
571 | printk(KERN_ERR "Unable to allocate t10_alua_tg_pt" | 571 | printk(KERN_ERR "Unable to allocate t10_alua_tg_pt" |
572 | "_gp_member_t\n"); | 572 | "_gp_member_t\n"); |
573 | return; | 573 | return; |
574 | } | 574 | } |
575 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 575 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
576 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, | 576 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, |
577 | T10_ALUA(su_dev)->default_tg_pt_gp); | 577 | T10_ALUA(su_dev)->default_tg_pt_gp); |
578 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 578 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
579 | printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port" | 579 | printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port" |
580 | " Group: alua/default_tg_pt_gp\n", | 580 | " Group: alua/default_tg_pt_gp\n", |
581 | TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name()); | 581 | TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name()); |
582 | } | 582 | } |
583 | 583 | ||
584 | dev->dev_port_count++; | 584 | dev->dev_port_count++; |
585 | port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */ | 585 | port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */ |
586 | } | 586 | } |
587 | 587 | ||
588 | /* | 588 | /* |
589 | * Called with struct se_device->se_port_lock spinlock held. | 589 | * Called with struct se_device->se_port_lock spinlock held. |
590 | */ | 590 | */ |
591 | static void core_release_port(struct se_device *dev, struct se_port *port) | 591 | static void core_release_port(struct se_device *dev, struct se_port *port) |
592 | __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock) | ||
592 | { | 593 | { |
593 | /* | 594 | /* |
594 | * Wait for any port reference for PR ALL_TG_PT=1 operation | 595 | * Wait for any port reference for PR ALL_TG_PT=1 operation |
595 | * to complete in __core_scsi3_alloc_registration() | 596 | * to complete in __core_scsi3_alloc_registration() |
596 | */ | 597 | */ |
597 | spin_unlock(&dev->se_port_lock); | 598 | spin_unlock(&dev->se_port_lock); |
598 | if (atomic_read(&port->sep_tg_pt_ref_cnt)) | 599 | if (atomic_read(&port->sep_tg_pt_ref_cnt)) |
599 | cpu_relax(); | 600 | cpu_relax(); |
600 | spin_lock(&dev->se_port_lock); | 601 | spin_lock(&dev->se_port_lock); |
601 | 602 | ||
602 | core_alua_free_tg_pt_gp_mem(port); | 603 | core_alua_free_tg_pt_gp_mem(port); |
603 | 604 | ||
604 | list_del(&port->sep_list); | 605 | list_del(&port->sep_list); |
605 | dev->dev_port_count--; | 606 | dev->dev_port_count--; |
606 | kfree(port); | 607 | kfree(port); |
607 | 608 | ||
608 | return; | 609 | return; |
609 | } | 610 | } |
610 | 611 | ||
611 | int core_dev_export( | 612 | int core_dev_export( |
612 | struct se_device *dev, | 613 | struct se_device *dev, |
613 | struct se_portal_group *tpg, | 614 | struct se_portal_group *tpg, |
614 | struct se_lun *lun) | 615 | struct se_lun *lun) |
615 | { | 616 | { |
616 | struct se_port *port; | 617 | struct se_port *port; |
617 | 618 | ||
618 | port = core_alloc_port(dev); | 619 | port = core_alloc_port(dev); |
619 | if (!(port)) | 620 | if (!(port)) |
620 | return -1; | 621 | return -1; |
621 | 622 | ||
622 | lun->lun_se_dev = dev; | 623 | lun->lun_se_dev = dev; |
623 | se_dev_start(dev); | 624 | se_dev_start(dev); |
624 | 625 | ||
625 | atomic_inc(&dev->dev_export_obj.obj_access_count); | 626 | atomic_inc(&dev->dev_export_obj.obj_access_count); |
626 | core_export_port(dev, tpg, port, lun); | 627 | core_export_port(dev, tpg, port, lun); |
627 | return 0; | 628 | return 0; |
628 | } | 629 | } |
629 | 630 | ||
630 | void core_dev_unexport( | 631 | void core_dev_unexport( |
631 | struct se_device *dev, | 632 | struct se_device *dev, |
632 | struct se_portal_group *tpg, | 633 | struct se_portal_group *tpg, |
633 | struct se_lun *lun) | 634 | struct se_lun *lun) |
634 | { | 635 | { |
635 | struct se_port *port = lun->lun_sep; | 636 | struct se_port *port = lun->lun_sep; |
636 | 637 | ||
637 | spin_lock(&lun->lun_sep_lock); | 638 | spin_lock(&lun->lun_sep_lock); |
638 | if (lun->lun_se_dev == NULL) { | 639 | if (lun->lun_se_dev == NULL) { |
639 | spin_unlock(&lun->lun_sep_lock); | 640 | spin_unlock(&lun->lun_sep_lock); |
640 | return; | 641 | return; |
641 | } | 642 | } |
642 | spin_unlock(&lun->lun_sep_lock); | 643 | spin_unlock(&lun->lun_sep_lock); |
643 | 644 | ||
644 | spin_lock(&dev->se_port_lock); | 645 | spin_lock(&dev->se_port_lock); |
645 | atomic_dec(&dev->dev_export_obj.obj_access_count); | 646 | atomic_dec(&dev->dev_export_obj.obj_access_count); |
646 | core_release_port(dev, port); | 647 | core_release_port(dev, port); |
647 | spin_unlock(&dev->se_port_lock); | 648 | spin_unlock(&dev->se_port_lock); |
648 | 649 | ||
649 | se_dev_stop(dev); | 650 | se_dev_stop(dev); |
650 | lun->lun_se_dev = NULL; | 651 | lun->lun_se_dev = NULL; |
651 | } | 652 | } |
652 | 653 | ||
653 | int transport_core_report_lun_response(struct se_cmd *se_cmd) | 654 | int transport_core_report_lun_response(struct se_cmd *se_cmd) |
654 | { | 655 | { |
655 | struct se_dev_entry *deve; | 656 | struct se_dev_entry *deve; |
656 | struct se_lun *se_lun; | 657 | struct se_lun *se_lun; |
657 | struct se_session *se_sess = SE_SESS(se_cmd); | 658 | struct se_session *se_sess = SE_SESS(se_cmd); |
658 | struct se_task *se_task; | 659 | struct se_task *se_task; |
659 | unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf; | 660 | unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf; |
660 | u32 cdb_offset = 0, lun_count = 0, offset = 8; | 661 | u32 cdb_offset = 0, lun_count = 0, offset = 8; |
661 | u64 i, lun; | 662 | u64 i, lun; |
662 | 663 | ||
663 | list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list) | 664 | list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list) |
664 | break; | 665 | break; |
665 | 666 | ||
666 | if (!(se_task)) { | 667 | if (!(se_task)) { |
667 | printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n"); | 668 | printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n"); |
668 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 669 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
669 | } | 670 | } |
670 | 671 | ||
671 | /* | 672 | /* |
672 | * If no struct se_session pointer is present, this struct se_cmd is | 673 | * If no struct se_session pointer is present, this struct se_cmd is |
673 | * coming via a target_core_mod PASSTHROUGH op, and not through | 674 | * coming via a target_core_mod PASSTHROUGH op, and not through |
674 | * a $FABRIC_MOD. In that case, report LUN=0 only. | 675 | * a $FABRIC_MOD. In that case, report LUN=0 only. |
675 | */ | 676 | */ |
676 | if (!(se_sess)) { | 677 | if (!(se_sess)) { |
677 | lun = 0; | 678 | lun = 0; |
678 | buf[offset++] = ((lun >> 56) & 0xff); | 679 | buf[offset++] = ((lun >> 56) & 0xff); |
679 | buf[offset++] = ((lun >> 48) & 0xff); | 680 | buf[offset++] = ((lun >> 48) & 0xff); |
680 | buf[offset++] = ((lun >> 40) & 0xff); | 681 | buf[offset++] = ((lun >> 40) & 0xff); |
681 | buf[offset++] = ((lun >> 32) & 0xff); | 682 | buf[offset++] = ((lun >> 32) & 0xff); |
682 | buf[offset++] = ((lun >> 24) & 0xff); | 683 | buf[offset++] = ((lun >> 24) & 0xff); |
683 | buf[offset++] = ((lun >> 16) & 0xff); | 684 | buf[offset++] = ((lun >> 16) & 0xff); |
684 | buf[offset++] = ((lun >> 8) & 0xff); | 685 | buf[offset++] = ((lun >> 8) & 0xff); |
685 | buf[offset++] = (lun & 0xff); | 686 | buf[offset++] = (lun & 0xff); |
686 | lun_count = 1; | 687 | lun_count = 1; |
687 | goto done; | 688 | goto done; |
688 | } | 689 | } |
689 | 690 | ||
690 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | 691 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); |
691 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 692 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
692 | deve = &SE_NODE_ACL(se_sess)->device_list[i]; | 693 | deve = &SE_NODE_ACL(se_sess)->device_list[i]; |
693 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | 694 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) |
694 | continue; | 695 | continue; |
695 | se_lun = deve->se_lun; | 696 | se_lun = deve->se_lun; |
696 | /* | 697 | /* |
697 | * We determine the correct LUN LIST LENGTH even once we | 698 | * We determine the correct LUN LIST LENGTH even once we |
698 | * have reached the initial allocation length. | 699 | * have reached the initial allocation length. |
699 | * See SPC2-R20 7.19. | 700 | * See SPC2-R20 7.19. |
700 | */ | 701 | */ |
701 | lun_count++; | 702 | lun_count++; |
702 | if ((cdb_offset + 8) >= se_cmd->data_length) | 703 | if ((cdb_offset + 8) >= se_cmd->data_length) |
703 | continue; | 704 | continue; |
704 | 705 | ||
705 | lun = cpu_to_be64(CMD_TFO(se_cmd)->pack_lun(deve->mapped_lun)); | 706 | lun = cpu_to_be64(CMD_TFO(se_cmd)->pack_lun(deve->mapped_lun)); |
706 | buf[offset++] = ((lun >> 56) & 0xff); | 707 | buf[offset++] = ((lun >> 56) & 0xff); |
707 | buf[offset++] = ((lun >> 48) & 0xff); | 708 | buf[offset++] = ((lun >> 48) & 0xff); |
708 | buf[offset++] = ((lun >> 40) & 0xff); | 709 | buf[offset++] = ((lun >> 40) & 0xff); |
709 | buf[offset++] = ((lun >> 32) & 0xff); | 710 | buf[offset++] = ((lun >> 32) & 0xff); |
710 | buf[offset++] = ((lun >> 24) & 0xff); | 711 | buf[offset++] = ((lun >> 24) & 0xff); |
711 | buf[offset++] = ((lun >> 16) & 0xff); | 712 | buf[offset++] = ((lun >> 16) & 0xff); |
712 | buf[offset++] = ((lun >> 8) & 0xff); | 713 | buf[offset++] = ((lun >> 8) & 0xff); |
713 | buf[offset++] = (lun & 0xff); | 714 | buf[offset++] = (lun & 0xff); |
714 | cdb_offset += 8; | 715 | cdb_offset += 8; |
715 | } | 716 | } |
716 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | 717 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); |
717 | 718 | ||
718 | /* | 719 | /* |
719 | * See SPC3 r07, page 159. | 720 | * See SPC3 r07, page 159. |
720 | */ | 721 | */ |
721 | done: | 722 | done: |
722 | lun_count *= 8; | 723 | lun_count *= 8; |
723 | buf[0] = ((lun_count >> 24) & 0xff); | 724 | buf[0] = ((lun_count >> 24) & 0xff); |
724 | buf[1] = ((lun_count >> 16) & 0xff); | 725 | buf[1] = ((lun_count >> 16) & 0xff); |
725 | buf[2] = ((lun_count >> 8) & 0xff); | 726 | buf[2] = ((lun_count >> 8) & 0xff); |
726 | buf[3] = (lun_count & 0xff); | 727 | buf[3] = (lun_count & 0xff); |
727 | 728 | ||
728 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | 729 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; |
729 | } | 730 | } |
730 | 731 | ||
731 | /* se_release_device_for_hba(): | 732 | /* se_release_device_for_hba(): |
732 | * | 733 | * |
733 | * | 734 | * |
734 | */ | 735 | */ |
735 | void se_release_device_for_hba(struct se_device *dev) | 736 | void se_release_device_for_hba(struct se_device *dev) |
736 | { | 737 | { |
737 | struct se_hba *hba = dev->se_hba; | 738 | struct se_hba *hba = dev->se_hba; |
738 | 739 | ||
739 | if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || | 740 | if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || |
740 | (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) || | 741 | (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) || |
741 | (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) || | 742 | (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) || |
742 | (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) || | 743 | (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) || |
743 | (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED)) | 744 | (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED)) |
744 | se_dev_stop(dev); | 745 | se_dev_stop(dev); |
745 | 746 | ||
746 | if (dev->dev_ptr) { | 747 | if (dev->dev_ptr) { |
747 | kthread_stop(dev->process_thread); | 748 | kthread_stop(dev->process_thread); |
748 | if (dev->transport->free_device) | 749 | if (dev->transport->free_device) |
749 | dev->transport->free_device(dev->dev_ptr); | 750 | dev->transport->free_device(dev->dev_ptr); |
750 | } | 751 | } |
751 | 752 | ||
752 | spin_lock(&hba->device_lock); | 753 | spin_lock(&hba->device_lock); |
753 | list_del(&dev->dev_list); | 754 | list_del(&dev->dev_list); |
754 | hba->dev_count--; | 755 | hba->dev_count--; |
755 | spin_unlock(&hba->device_lock); | 756 | spin_unlock(&hba->device_lock); |
756 | 757 | ||
757 | core_scsi3_free_all_registrations(dev); | 758 | core_scsi3_free_all_registrations(dev); |
758 | se_release_vpd_for_dev(dev); | 759 | se_release_vpd_for_dev(dev); |
759 | 760 | ||
760 | kfree(dev->dev_status_queue_obj); | 761 | kfree(dev->dev_status_queue_obj); |
761 | kfree(dev->dev_queue_obj); | 762 | kfree(dev->dev_queue_obj); |
762 | kfree(dev); | 763 | kfree(dev); |
763 | 764 | ||
764 | return; | 765 | return; |
765 | } | 766 | } |
766 | 767 | ||
767 | void se_release_vpd_for_dev(struct se_device *dev) | 768 | void se_release_vpd_for_dev(struct se_device *dev) |
768 | { | 769 | { |
769 | struct t10_vpd *vpd, *vpd_tmp; | 770 | struct t10_vpd *vpd, *vpd_tmp; |
770 | 771 | ||
771 | spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock); | 772 | spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock); |
772 | list_for_each_entry_safe(vpd, vpd_tmp, | 773 | list_for_each_entry_safe(vpd, vpd_tmp, |
773 | &DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) { | 774 | &DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) { |
774 | list_del(&vpd->vpd_list); | 775 | list_del(&vpd->vpd_list); |
775 | kfree(vpd); | 776 | kfree(vpd); |
776 | } | 777 | } |
777 | spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock); | 778 | spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock); |
778 | 779 | ||
779 | return; | 780 | return; |
780 | } | 781 | } |
781 | 782 | ||
782 | /* se_free_virtual_device(): | 783 | /* se_free_virtual_device(): |
783 | * | 784 | * |
784 | * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers. | 785 | * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers. |
785 | */ | 786 | */ |
786 | int se_free_virtual_device(struct se_device *dev, struct se_hba *hba) | 787 | int se_free_virtual_device(struct se_device *dev, struct se_hba *hba) |
787 | { | 788 | { |
788 | if (!list_empty(&dev->dev_sep_list)) | 789 | if (!list_empty(&dev->dev_sep_list)) |
789 | dump_stack(); | 790 | dump_stack(); |
790 | 791 | ||
791 | core_alua_free_lu_gp_mem(dev); | 792 | core_alua_free_lu_gp_mem(dev); |
792 | se_release_device_for_hba(dev); | 793 | se_release_device_for_hba(dev); |
793 | 794 | ||
794 | return 0; | 795 | return 0; |
795 | } | 796 | } |
796 | 797 | ||
797 | static void se_dev_start(struct se_device *dev) | 798 | static void se_dev_start(struct se_device *dev) |
798 | { | 799 | { |
799 | struct se_hba *hba = dev->se_hba; | 800 | struct se_hba *hba = dev->se_hba; |
800 | 801 | ||
801 | spin_lock(&hba->device_lock); | 802 | spin_lock(&hba->device_lock); |
802 | atomic_inc(&dev->dev_obj.obj_access_count); | 803 | atomic_inc(&dev->dev_obj.obj_access_count); |
803 | if (atomic_read(&dev->dev_obj.obj_access_count) == 1) { | 804 | if (atomic_read(&dev->dev_obj.obj_access_count) == 1) { |
804 | if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) { | 805 | if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) { |
805 | dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED; | 806 | dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED; |
806 | dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED; | 807 | dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED; |
807 | } else if (dev->dev_status & | 808 | } else if (dev->dev_status & |
808 | TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) { | 809 | TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) { |
809 | dev->dev_status &= | 810 | dev->dev_status &= |
810 | ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; | 811 | ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; |
811 | dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED; | 812 | dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED; |
812 | } | 813 | } |
813 | } | 814 | } |
814 | spin_unlock(&hba->device_lock); | 815 | spin_unlock(&hba->device_lock); |
815 | } | 816 | } |
816 | 817 | ||
817 | static void se_dev_stop(struct se_device *dev) | 818 | static void se_dev_stop(struct se_device *dev) |
818 | { | 819 | { |
819 | struct se_hba *hba = dev->se_hba; | 820 | struct se_hba *hba = dev->se_hba; |
820 | 821 | ||
821 | spin_lock(&hba->device_lock); | 822 | spin_lock(&hba->device_lock); |
822 | atomic_dec(&dev->dev_obj.obj_access_count); | 823 | atomic_dec(&dev->dev_obj.obj_access_count); |
823 | if (atomic_read(&dev->dev_obj.obj_access_count) == 0) { | 824 | if (atomic_read(&dev->dev_obj.obj_access_count) == 0) { |
824 | if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) { | 825 | if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) { |
825 | dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED; | 826 | dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED; |
826 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; | 827 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; |
827 | } else if (dev->dev_status & | 828 | } else if (dev->dev_status & |
828 | TRANSPORT_DEVICE_OFFLINE_ACTIVATED) { | 829 | TRANSPORT_DEVICE_OFFLINE_ACTIVATED) { |
829 | dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED; | 830 | dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED; |
830 | dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; | 831 | dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; |
831 | } | 832 | } |
832 | } | 833 | } |
833 | spin_unlock(&hba->device_lock); | 834 | spin_unlock(&hba->device_lock); |
834 | } | 835 | } |
835 | 836 | ||
836 | int se_dev_check_online(struct se_device *dev) | 837 | int se_dev_check_online(struct se_device *dev) |
837 | { | 838 | { |
838 | int ret; | 839 | int ret; |
839 | 840 | ||
840 | spin_lock_irq(&dev->dev_status_lock); | 841 | spin_lock_irq(&dev->dev_status_lock); |
841 | ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || | 842 | ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || |
842 | (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1; | 843 | (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1; |
843 | spin_unlock_irq(&dev->dev_status_lock); | 844 | spin_unlock_irq(&dev->dev_status_lock); |
844 | 845 | ||
845 | return ret; | 846 | return ret; |
846 | } | 847 | } |
847 | 848 | ||
848 | int se_dev_check_shutdown(struct se_device *dev) | 849 | int se_dev_check_shutdown(struct se_device *dev) |
849 | { | 850 | { |
850 | int ret; | 851 | int ret; |
851 | 852 | ||
852 | spin_lock_irq(&dev->dev_status_lock); | 853 | spin_lock_irq(&dev->dev_status_lock); |
853 | ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN); | 854 | ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN); |
854 | spin_unlock_irq(&dev->dev_status_lock); | 855 | spin_unlock_irq(&dev->dev_status_lock); |
855 | 856 | ||
856 | return ret; | 857 | return ret; |
857 | } | 858 | } |
858 | 859 | ||
859 | void se_dev_set_default_attribs( | 860 | void se_dev_set_default_attribs( |
860 | struct se_device *dev, | 861 | struct se_device *dev, |
861 | struct se_dev_limits *dev_limits) | 862 | struct se_dev_limits *dev_limits) |
862 | { | 863 | { |
863 | struct queue_limits *limits = &dev_limits->limits; | 864 | struct queue_limits *limits = &dev_limits->limits; |
864 | 865 | ||
865 | DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO; | 866 | DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO; |
866 | DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE; | 867 | DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE; |
867 | DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ; | 868 | DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ; |
868 | DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE; | 869 | DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE; |
869 | DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; | 870 | DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; |
870 | DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS; | 871 | DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS; |
871 | DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU; | 872 | DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU; |
872 | DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS; | 873 | DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS; |
873 | DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS; | 874 | DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS; |
874 | DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA; | 875 | DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA; |
875 | DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS; | 876 | DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS; |
876 | /* | 877 | /* |
877 | * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK | 878 | * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK |
878 | * iblock_create_virtdevice() from struct queue_limits values | 879 | * iblock_create_virtdevice() from struct queue_limits values |
879 | * if blk_queue_discard()==1 | 880 | * if blk_queue_discard()==1 |
880 | */ | 881 | */ |
881 | DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; | 882 | DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; |
882 | DEV_ATTRIB(dev)->max_unmap_block_desc_count = | 883 | DEV_ATTRIB(dev)->max_unmap_block_desc_count = |
883 | DA_MAX_UNMAP_BLOCK_DESC_COUNT; | 884 | DA_MAX_UNMAP_BLOCK_DESC_COUNT; |
884 | DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; | 885 | DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; |
885 | DEV_ATTRIB(dev)->unmap_granularity_alignment = | 886 | DEV_ATTRIB(dev)->unmap_granularity_alignment = |
886 | DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; | 887 | DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; |
887 | /* | 888 | /* |
888 | * block_size is based on subsystem plugin dependent requirements. | 889 | * block_size is based on subsystem plugin dependent requirements. |
889 | */ | 890 | */ |
890 | DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size; | 891 | DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size; |
891 | DEV_ATTRIB(dev)->block_size = limits->logical_block_size; | 892 | DEV_ATTRIB(dev)->block_size = limits->logical_block_size; |
892 | /* | 893 | /* |
893 | * max_sectors is based on subsystem plugin dependent requirements. | 894 | * max_sectors is based on subsystem plugin dependent requirements. |
894 | */ | 895 | */ |
895 | DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors; | 896 | DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors; |
896 | DEV_ATTRIB(dev)->max_sectors = limits->max_sectors; | 897 | DEV_ATTRIB(dev)->max_sectors = limits->max_sectors; |
897 | /* | 898 | /* |
898 | * Set optimal_sectors from max_sectors, which can be lowered via | 899 | * Set optimal_sectors from max_sectors, which can be lowered via |
899 | * configfs. | 900 | * configfs. |
900 | */ | 901 | */ |
901 | DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors; | 902 | DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors; |
902 | /* | 903 | /* |
903 | * queue_depth is based on subsystem plugin dependent requirements. | 904 | * queue_depth is based on subsystem plugin dependent requirements. |
904 | */ | 905 | */ |
905 | DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth; | 906 | DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth; |
906 | DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth; | 907 | DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth; |
907 | } | 908 | } |
908 | 909 | ||
909 | int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) | 910 | int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) |
910 | { | 911 | { |
911 | if (task_timeout > DA_TASK_TIMEOUT_MAX) { | 912 | if (task_timeout > DA_TASK_TIMEOUT_MAX) { |
912 | printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then" | 913 | printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then" |
913 | " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); | 914 | " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); |
914 | return -1; | 915 | return -1; |
915 | } else { | 916 | } else { |
916 | DEV_ATTRIB(dev)->task_timeout = task_timeout; | 917 | DEV_ATTRIB(dev)->task_timeout = task_timeout; |
917 | printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n", | 918 | printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n", |
918 | dev, task_timeout); | 919 | dev, task_timeout); |
919 | } | 920 | } |
920 | 921 | ||
921 | return 0; | 922 | return 0; |
922 | } | 923 | } |
923 | 924 | ||
924 | int se_dev_set_max_unmap_lba_count( | 925 | int se_dev_set_max_unmap_lba_count( |
925 | struct se_device *dev, | 926 | struct se_device *dev, |
926 | u32 max_unmap_lba_count) | 927 | u32 max_unmap_lba_count) |
927 | { | 928 | { |
928 | DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count; | 929 | DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count; |
929 | printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n", | 930 | printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n", |
930 | dev, DEV_ATTRIB(dev)->max_unmap_lba_count); | 931 | dev, DEV_ATTRIB(dev)->max_unmap_lba_count); |
931 | return 0; | 932 | return 0; |
932 | } | 933 | } |
933 | 934 | ||
934 | int se_dev_set_max_unmap_block_desc_count( | 935 | int se_dev_set_max_unmap_block_desc_count( |
935 | struct se_device *dev, | 936 | struct se_device *dev, |
936 | u32 max_unmap_block_desc_count) | 937 | u32 max_unmap_block_desc_count) |
937 | { | 938 | { |
938 | DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count; | 939 | DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count; |
939 | printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n", | 940 | printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n", |
940 | dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count); | 941 | dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count); |
941 | return 0; | 942 | return 0; |
942 | } | 943 | } |
943 | 944 | ||
944 | int se_dev_set_unmap_granularity( | 945 | int se_dev_set_unmap_granularity( |
945 | struct se_device *dev, | 946 | struct se_device *dev, |
946 | u32 unmap_granularity) | 947 | u32 unmap_granularity) |
947 | { | 948 | { |
948 | DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity; | 949 | DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity; |
949 | printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n", | 950 | printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n", |
950 | dev, DEV_ATTRIB(dev)->unmap_granularity); | 951 | dev, DEV_ATTRIB(dev)->unmap_granularity); |
951 | return 0; | 952 | return 0; |
952 | } | 953 | } |
953 | 954 | ||
954 | int se_dev_set_unmap_granularity_alignment( | 955 | int se_dev_set_unmap_granularity_alignment( |
955 | struct se_device *dev, | 956 | struct se_device *dev, |
956 | u32 unmap_granularity_alignment) | 957 | u32 unmap_granularity_alignment) |
957 | { | 958 | { |
958 | DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment; | 959 | DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment; |
959 | printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n", | 960 | printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n", |
960 | dev, DEV_ATTRIB(dev)->unmap_granularity_alignment); | 961 | dev, DEV_ATTRIB(dev)->unmap_granularity_alignment); |
961 | return 0; | 962 | return 0; |
962 | } | 963 | } |
963 | 964 | ||
964 | int se_dev_set_emulate_dpo(struct se_device *dev, int flag) | 965 | int se_dev_set_emulate_dpo(struct se_device *dev, int flag) |
965 | { | 966 | { |
966 | if ((flag != 0) && (flag != 1)) { | 967 | if ((flag != 0) && (flag != 1)) { |
967 | printk(KERN_ERR "Illegal value %d\n", flag); | 968 | printk(KERN_ERR "Illegal value %d\n", flag); |
968 | return -1; | 969 | return -1; |
969 | } | 970 | } |
970 | if (TRANSPORT(dev)->dpo_emulated == NULL) { | 971 | if (TRANSPORT(dev)->dpo_emulated == NULL) { |
971 | printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n"); | 972 | printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n"); |
972 | return -1; | 973 | return -1; |
973 | } | 974 | } |
974 | if (TRANSPORT(dev)->dpo_emulated(dev) == 0) { | 975 | if (TRANSPORT(dev)->dpo_emulated(dev) == 0) { |
975 | printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n"); | 976 | printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n"); |
976 | return -1; | 977 | return -1; |
977 | } | 978 | } |
978 | DEV_ATTRIB(dev)->emulate_dpo = flag; | 979 | DEV_ATTRIB(dev)->emulate_dpo = flag; |
979 | printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation" | 980 | printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation" |
980 | " bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo); | 981 | " bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo); |
981 | return 0; | 982 | return 0; |
982 | } | 983 | } |
983 | 984 | ||
984 | int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) | 985 | int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) |
985 | { | 986 | { |
986 | if ((flag != 0) && (flag != 1)) { | 987 | if ((flag != 0) && (flag != 1)) { |
987 | printk(KERN_ERR "Illegal value %d\n", flag); | 988 | printk(KERN_ERR "Illegal value %d\n", flag); |
988 | return -1; | 989 | return -1; |
989 | } | 990 | } |
990 | if (TRANSPORT(dev)->fua_write_emulated == NULL) { | 991 | if (TRANSPORT(dev)->fua_write_emulated == NULL) { |
991 | printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n"); | 992 | printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n"); |
992 | return -1; | 993 | return -1; |
993 | } | 994 | } |
994 | if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) { | 995 | if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) { |
995 | printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n"); | 996 | printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n"); |
996 | return -1; | 997 | return -1; |
997 | } | 998 | } |
998 | DEV_ATTRIB(dev)->emulate_fua_write = flag; | 999 | DEV_ATTRIB(dev)->emulate_fua_write = flag; |
999 | printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", | 1000 | printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", |
1000 | dev, DEV_ATTRIB(dev)->emulate_fua_write); | 1001 | dev, DEV_ATTRIB(dev)->emulate_fua_write); |
1001 | return 0; | 1002 | return 0; |
1002 | } | 1003 | } |
1003 | 1004 | ||
1004 | int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) | 1005 | int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) |
1005 | { | 1006 | { |
1006 | if ((flag != 0) && (flag != 1)) { | 1007 | if ((flag != 0) && (flag != 1)) { |
1007 | printk(KERN_ERR "Illegal value %d\n", flag); | 1008 | printk(KERN_ERR "Illegal value %d\n", flag); |
1008 | return -1; | 1009 | return -1; |
1009 | } | 1010 | } |
1010 | if (TRANSPORT(dev)->fua_read_emulated == NULL) { | 1011 | if (TRANSPORT(dev)->fua_read_emulated == NULL) { |
1011 | printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n"); | 1012 | printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n"); |
1012 | return -1; | 1013 | return -1; |
1013 | } | 1014 | } |
1014 | if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) { | 1015 | if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) { |
1015 | printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n"); | 1016 | printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n"); |
1016 | return -1; | 1017 | return -1; |
1017 | } | 1018 | } |
1018 | DEV_ATTRIB(dev)->emulate_fua_read = flag; | 1019 | DEV_ATTRIB(dev)->emulate_fua_read = flag; |
1019 | printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n", | 1020 | printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n", |
1020 | dev, DEV_ATTRIB(dev)->emulate_fua_read); | 1021 | dev, DEV_ATTRIB(dev)->emulate_fua_read); |
1021 | return 0; | 1022 | return 0; |
1022 | } | 1023 | } |
1023 | 1024 | ||
1024 | int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) | 1025 | int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) |
1025 | { | 1026 | { |
1026 | if ((flag != 0) && (flag != 1)) { | 1027 | if ((flag != 0) && (flag != 1)) { |
1027 | printk(KERN_ERR "Illegal value %d\n", flag); | 1028 | printk(KERN_ERR "Illegal value %d\n", flag); |
1028 | return -1; | 1029 | return -1; |
1029 | } | 1030 | } |
1030 | if (TRANSPORT(dev)->write_cache_emulated == NULL) { | 1031 | if (TRANSPORT(dev)->write_cache_emulated == NULL) { |
1031 | printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n"); | 1032 | printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n"); |
1032 | return -1; | 1033 | return -1; |
1033 | } | 1034 | } |
1034 | if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) { | 1035 | if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) { |
1035 | printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n"); | 1036 | printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n"); |
1036 | return -1; | 1037 | return -1; |
1037 | } | 1038 | } |
1038 | DEV_ATTRIB(dev)->emulate_write_cache = flag; | 1039 | DEV_ATTRIB(dev)->emulate_write_cache = flag; |
1039 | printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", | 1040 | printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", |
1040 | dev, DEV_ATTRIB(dev)->emulate_write_cache); | 1041 | dev, DEV_ATTRIB(dev)->emulate_write_cache); |
1041 | return 0; | 1042 | return 0; |
1042 | } | 1043 | } |
1043 | 1044 | ||
1044 | int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) | 1045 | int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) |
1045 | { | 1046 | { |
1046 | if ((flag != 0) && (flag != 1) && (flag != 2)) { | 1047 | if ((flag != 0) && (flag != 1) && (flag != 2)) { |
1047 | printk(KERN_ERR "Illegal value %d\n", flag); | 1048 | printk(KERN_ERR "Illegal value %d\n", flag); |
1048 | return -1; | 1049 | return -1; |
1049 | } | 1050 | } |
1050 | 1051 | ||
1051 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 1052 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
1052 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" | 1053 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" |
1053 | " UA_INTRLCK_CTRL while dev_export_obj: %d count" | 1054 | " UA_INTRLCK_CTRL while dev_export_obj: %d count" |
1054 | " exists\n", dev, | 1055 | " exists\n", dev, |
1055 | atomic_read(&dev->dev_export_obj.obj_access_count)); | 1056 | atomic_read(&dev->dev_export_obj.obj_access_count)); |
1056 | return -1; | 1057 | return -1; |
1057 | } | 1058 | } |
1058 | DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag; | 1059 | DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag; |
1059 | printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", | 1060 | printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", |
1060 | dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl); | 1061 | dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl); |
1061 | 1062 | ||
1062 | return 0; | 1063 | return 0; |
1063 | } | 1064 | } |
1064 | 1065 | ||
1065 | int se_dev_set_emulate_tas(struct se_device *dev, int flag) | 1066 | int se_dev_set_emulate_tas(struct se_device *dev, int flag) |
1066 | { | 1067 | { |
1067 | if ((flag != 0) && (flag != 1)) { | 1068 | if ((flag != 0) && (flag != 1)) { |
1068 | printk(KERN_ERR "Illegal value %d\n", flag); | 1069 | printk(KERN_ERR "Illegal value %d\n", flag); |
1069 | return -1; | 1070 | return -1; |
1070 | } | 1071 | } |
1071 | 1072 | ||
1072 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 1073 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
1073 | printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while" | 1074 | printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while" |
1074 | " dev_export_obj: %d count exists\n", dev, | 1075 | " dev_export_obj: %d count exists\n", dev, |
1075 | atomic_read(&dev->dev_export_obj.obj_access_count)); | 1076 | atomic_read(&dev->dev_export_obj.obj_access_count)); |
1076 | return -1; | 1077 | return -1; |
1077 | } | 1078 | } |
1078 | DEV_ATTRIB(dev)->emulate_tas = flag; | 1079 | DEV_ATTRIB(dev)->emulate_tas = flag; |
1079 | printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n", | 1080 | printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n", |
1080 | dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled"); | 1081 | dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled"); |
1081 | 1082 | ||
1082 | return 0; | 1083 | return 0; |
1083 | } | 1084 | } |
1084 | 1085 | ||
1085 | int se_dev_set_emulate_tpu(struct se_device *dev, int flag) | 1086 | int se_dev_set_emulate_tpu(struct se_device *dev, int flag) |
1086 | { | 1087 | { |
1087 | if ((flag != 0) && (flag != 1)) { | 1088 | if ((flag != 0) && (flag != 1)) { |
1088 | printk(KERN_ERR "Illegal value %d\n", flag); | 1089 | printk(KERN_ERR "Illegal value %d\n", flag); |
1089 | return -1; | 1090 | return -1; |
1090 | } | 1091 | } |
1091 | /* | 1092 | /* |
1092 | * We expect this value to be non-zero when generic Block Layer | 1093 | * We expect this value to be non-zero when generic Block Layer |
1093 | * Discard supported is detected iblock_create_virtdevice(). | 1094 | * Discard supported is detected iblock_create_virtdevice(). |
1094 | */ | 1095 | */ |
1095 | if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { | 1096 | if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { |
1096 | printk(KERN_ERR "Generic Block Discard not supported\n"); | 1097 | printk(KERN_ERR "Generic Block Discard not supported\n"); |
1097 | return -ENOSYS; | 1098 | return -ENOSYS; |
1098 | } | 1099 | } |
1099 | 1100 | ||
1100 | DEV_ATTRIB(dev)->emulate_tpu = flag; | 1101 | DEV_ATTRIB(dev)->emulate_tpu = flag; |
1101 | printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", | 1102 | printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", |
1102 | dev, flag); | 1103 | dev, flag); |
1103 | return 0; | 1104 | return 0; |
1104 | } | 1105 | } |
1105 | 1106 | ||
1106 | int se_dev_set_emulate_tpws(struct se_device *dev, int flag) | 1107 | int se_dev_set_emulate_tpws(struct se_device *dev, int flag) |
1107 | { | 1108 | { |
1108 | if ((flag != 0) && (flag != 1)) { | 1109 | if ((flag != 0) && (flag != 1)) { |
1109 | printk(KERN_ERR "Illegal value %d\n", flag); | 1110 | printk(KERN_ERR "Illegal value %d\n", flag); |
1110 | return -1; | 1111 | return -1; |
1111 | } | 1112 | } |
1112 | /* | 1113 | /* |
1113 | * We expect this value to be non-zero when generic Block Layer | 1114 | * We expect this value to be non-zero when generic Block Layer |
1114 | * Discard supported is detected iblock_create_virtdevice(). | 1115 | * Discard supported is detected iblock_create_virtdevice(). |
1115 | */ | 1116 | */ |
1116 | if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { | 1117 | if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { |
1117 | printk(KERN_ERR "Generic Block Discard not supported\n"); | 1118 | printk(KERN_ERR "Generic Block Discard not supported\n"); |
1118 | return -ENOSYS; | 1119 | return -ENOSYS; |
1119 | } | 1120 | } |
1120 | 1121 | ||
1121 | DEV_ATTRIB(dev)->emulate_tpws = flag; | 1122 | DEV_ATTRIB(dev)->emulate_tpws = flag; |
1122 | printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", | 1123 | printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", |
1123 | dev, flag); | 1124 | dev, flag); |
1124 | return 0; | 1125 | return 0; |
1125 | } | 1126 | } |
1126 | 1127 | ||
1127 | int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) | 1128 | int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) |
1128 | { | 1129 | { |
1129 | if ((flag != 0) && (flag != 1)) { | 1130 | if ((flag != 0) && (flag != 1)) { |
1130 | printk(KERN_ERR "Illegal value %d\n", flag); | 1131 | printk(KERN_ERR "Illegal value %d\n", flag); |
1131 | return -1; | 1132 | return -1; |
1132 | } | 1133 | } |
1133 | DEV_ATTRIB(dev)->enforce_pr_isids = flag; | 1134 | DEV_ATTRIB(dev)->enforce_pr_isids = flag; |
1134 | printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, | 1135 | printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, |
1135 | (DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled"); | 1136 | (DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled"); |
1136 | return 0; | 1137 | return 0; |
1137 | } | 1138 | } |
1138 | 1139 | ||
1139 | /* | 1140 | /* |
1140 | * Note, this can only be called on unexported SE Device Object. | 1141 | * Note, this can only be called on unexported SE Device Object. |
1141 | */ | 1142 | */ |
1142 | int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) | 1143 | int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) |
1143 | { | 1144 | { |
1144 | u32 orig_queue_depth = dev->queue_depth; | 1145 | u32 orig_queue_depth = dev->queue_depth; |
1145 | 1146 | ||
1146 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 1147 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
1147 | printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while" | 1148 | printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while" |
1148 | " dev_export_obj: %d count exists\n", dev, | 1149 | " dev_export_obj: %d count exists\n", dev, |
1149 | atomic_read(&dev->dev_export_obj.obj_access_count)); | 1150 | atomic_read(&dev->dev_export_obj.obj_access_count)); |
1150 | return -1; | 1151 | return -1; |
1151 | } | 1152 | } |
1152 | if (!(queue_depth)) { | 1153 | if (!(queue_depth)) { |
1153 | printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue" | 1154 | printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue" |
1154 | "_depth\n", dev); | 1155 | "_depth\n", dev); |
1155 | return -1; | 1156 | return -1; |
1156 | } | 1157 | } |
1157 | 1158 | ||
1158 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | 1159 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
1159 | if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { | 1160 | if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { |
1160 | printk(KERN_ERR "dev[%p]: Passed queue_depth: %u" | 1161 | printk(KERN_ERR "dev[%p]: Passed queue_depth: %u" |
1161 | " exceeds TCM/SE_Device TCQ: %u\n", | 1162 | " exceeds TCM/SE_Device TCQ: %u\n", |
1162 | dev, queue_depth, | 1163 | dev, queue_depth, |
1163 | DEV_ATTRIB(dev)->hw_queue_depth); | 1164 | DEV_ATTRIB(dev)->hw_queue_depth); |
1164 | return -1; | 1165 | return -1; |
1165 | } | 1166 | } |
1166 | } else { | 1167 | } else { |
1167 | if (queue_depth > DEV_ATTRIB(dev)->queue_depth) { | 1168 | if (queue_depth > DEV_ATTRIB(dev)->queue_depth) { |
1168 | if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { | 1169 | if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { |
1169 | printk(KERN_ERR "dev[%p]: Passed queue_depth:" | 1170 | printk(KERN_ERR "dev[%p]: Passed queue_depth:" |
1170 | " %u exceeds TCM/SE_Device MAX" | 1171 | " %u exceeds TCM/SE_Device MAX" |
1171 | " TCQ: %u\n", dev, queue_depth, | 1172 | " TCQ: %u\n", dev, queue_depth, |
1172 | DEV_ATTRIB(dev)->hw_queue_depth); | 1173 | DEV_ATTRIB(dev)->hw_queue_depth); |
1173 | return -1; | 1174 | return -1; |
1174 | } | 1175 | } |
1175 | } | 1176 | } |
1176 | } | 1177 | } |
1177 | 1178 | ||
1178 | DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth; | 1179 | DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth; |
1179 | if (queue_depth > orig_queue_depth) | 1180 | if (queue_depth > orig_queue_depth) |
1180 | atomic_add(queue_depth - orig_queue_depth, &dev->depth_left); | 1181 | atomic_add(queue_depth - orig_queue_depth, &dev->depth_left); |
1181 | else if (queue_depth < orig_queue_depth) | 1182 | else if (queue_depth < orig_queue_depth) |
1182 | atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left); | 1183 | atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left); |
1183 | 1184 | ||
1184 | printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n", | 1185 | printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n", |
1185 | dev, queue_depth); | 1186 | dev, queue_depth); |
1186 | return 0; | 1187 | return 0; |
1187 | } | 1188 | } |
1188 | 1189 | ||
1189 | int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) | 1190 | int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) |
1190 | { | 1191 | { |
1191 | int force = 0; /* Force setting for VDEVS */ | 1192 | int force = 0; /* Force setting for VDEVS */ |
1192 | 1193 | ||
1193 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 1194 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
1194 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" | 1195 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" |
1195 | " max_sectors while dev_export_obj: %d count exists\n", | 1196 | " max_sectors while dev_export_obj: %d count exists\n", |
1196 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); | 1197 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); |
1197 | return -1; | 1198 | return -1; |
1198 | } | 1199 | } |
1199 | if (!(max_sectors)) { | 1200 | if (!(max_sectors)) { |
1200 | printk(KERN_ERR "dev[%p]: Illegal ZERO value for" | 1201 | printk(KERN_ERR "dev[%p]: Illegal ZERO value for" |
1201 | " max_sectors\n", dev); | 1202 | " max_sectors\n", dev); |
1202 | return -1; | 1203 | return -1; |
1203 | } | 1204 | } |
1204 | if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { | 1205 | if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { |
1205 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than" | 1206 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than" |
1206 | " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, | 1207 | " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, |
1207 | DA_STATUS_MAX_SECTORS_MIN); | 1208 | DA_STATUS_MAX_SECTORS_MIN); |
1208 | return -1; | 1209 | return -1; |
1209 | } | 1210 | } |
1210 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | 1211 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
1211 | if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) { | 1212 | if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) { |
1212 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" | 1213 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" |
1213 | " greater than TCM/SE_Device max_sectors:" | 1214 | " greater than TCM/SE_Device max_sectors:" |
1214 | " %u\n", dev, max_sectors, | 1215 | " %u\n", dev, max_sectors, |
1215 | DEV_ATTRIB(dev)->hw_max_sectors); | 1216 | DEV_ATTRIB(dev)->hw_max_sectors); |
1216 | return -1; | 1217 | return -1; |
1217 | } | 1218 | } |
1218 | } else { | 1219 | } else { |
1219 | if (!(force) && (max_sectors > | 1220 | if (!(force) && (max_sectors > |
1220 | DEV_ATTRIB(dev)->hw_max_sectors)) { | 1221 | DEV_ATTRIB(dev)->hw_max_sectors)) { |
1221 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" | 1222 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" |
1222 | " greater than TCM/SE_Device max_sectors" | 1223 | " greater than TCM/SE_Device max_sectors" |
1223 | ": %u, use force=1 to override.\n", dev, | 1224 | ": %u, use force=1 to override.\n", dev, |
1224 | max_sectors, DEV_ATTRIB(dev)->hw_max_sectors); | 1225 | max_sectors, DEV_ATTRIB(dev)->hw_max_sectors); |
1225 | return -1; | 1226 | return -1; |
1226 | } | 1227 | } |
1227 | if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { | 1228 | if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { |
1228 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" | 1229 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" |
1229 | " greater than DA_STATUS_MAX_SECTORS_MAX:" | 1230 | " greater than DA_STATUS_MAX_SECTORS_MAX:" |
1230 | " %u\n", dev, max_sectors, | 1231 | " %u\n", dev, max_sectors, |
1231 | DA_STATUS_MAX_SECTORS_MAX); | 1232 | DA_STATUS_MAX_SECTORS_MAX); |
1232 | return -1; | 1233 | return -1; |
1233 | } | 1234 | } |
1234 | } | 1235 | } |
1235 | 1236 | ||
1236 | DEV_ATTRIB(dev)->max_sectors = max_sectors; | 1237 | DEV_ATTRIB(dev)->max_sectors = max_sectors; |
1237 | printk("dev[%p]: SE Device max_sectors changed to %u\n", | 1238 | printk("dev[%p]: SE Device max_sectors changed to %u\n", |
1238 | dev, max_sectors); | 1239 | dev, max_sectors); |
1239 | return 0; | 1240 | return 0; |
1240 | } | 1241 | } |
1241 | 1242 | ||
1242 | int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) | 1243 | int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) |
1243 | { | 1244 | { |
1244 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 1245 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
1245 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" | 1246 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" |
1246 | " optimal_sectors while dev_export_obj: %d count exists\n", | 1247 | " optimal_sectors while dev_export_obj: %d count exists\n", |
1247 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); | 1248 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); |
1248 | return -EINVAL; | 1249 | return -EINVAL; |
1249 | } | 1250 | } |
1250 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | 1251 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
1251 | printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be" | 1252 | printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be" |
1252 | " changed for TCM/pSCSI\n", dev); | 1253 | " changed for TCM/pSCSI\n", dev); |
1253 | return -EINVAL; | 1254 | return -EINVAL; |
1254 | } | 1255 | } |
1255 | if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) { | 1256 | if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) { |
1256 | printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be" | 1257 | printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be" |
1257 | " greater than max_sectors: %u\n", dev, | 1258 | " greater than max_sectors: %u\n", dev, |
1258 | optimal_sectors, DEV_ATTRIB(dev)->max_sectors); | 1259 | optimal_sectors, DEV_ATTRIB(dev)->max_sectors); |
1259 | return -EINVAL; | 1260 | return -EINVAL; |
1260 | } | 1261 | } |
1261 | 1262 | ||
1262 | DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors; | 1263 | DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors; |
1263 | printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n", | 1264 | printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n", |
1264 | dev, optimal_sectors); | 1265 | dev, optimal_sectors); |
1265 | return 0; | 1266 | return 0; |
1266 | } | 1267 | } |
1267 | 1268 | ||
1268 | int se_dev_set_block_size(struct se_device *dev, u32 block_size) | 1269 | int se_dev_set_block_size(struct se_device *dev, u32 block_size) |
1269 | { | 1270 | { |
1270 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 1271 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
1271 | printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size" | 1272 | printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size" |
1272 | " while dev_export_obj: %d count exists\n", dev, | 1273 | " while dev_export_obj: %d count exists\n", dev, |
1273 | atomic_read(&dev->dev_export_obj.obj_access_count)); | 1274 | atomic_read(&dev->dev_export_obj.obj_access_count)); |
1274 | return -1; | 1275 | return -1; |
1275 | } | 1276 | } |
1276 | 1277 | ||
1277 | if ((block_size != 512) && | 1278 | if ((block_size != 512) && |
1278 | (block_size != 1024) && | 1279 | (block_size != 1024) && |
1279 | (block_size != 2048) && | 1280 | (block_size != 2048) && |
1280 | (block_size != 4096)) { | 1281 | (block_size != 4096)) { |
1281 | printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u" | 1282 | printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u" |
1282 | " for SE device, must be 512, 1024, 2048 or 4096\n", | 1283 | " for SE device, must be 512, 1024, 2048 or 4096\n", |
1283 | dev, block_size); | 1284 | dev, block_size); |
1284 | return -1; | 1285 | return -1; |
1285 | } | 1286 | } |
1286 | 1287 | ||
1287 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | 1288 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
1288 | printk(KERN_ERR "dev[%p]: Not allowed to change block_size for" | 1289 | printk(KERN_ERR "dev[%p]: Not allowed to change block_size for" |
1289 | " Physical Device, use for Linux/SCSI to change" | 1290 | " Physical Device, use for Linux/SCSI to change" |
1290 | " block_size for underlying hardware\n", dev); | 1291 | " block_size for underlying hardware\n", dev); |
1291 | return -1; | 1292 | return -1; |
1292 | } | 1293 | } |
1293 | 1294 | ||
1294 | DEV_ATTRIB(dev)->block_size = block_size; | 1295 | DEV_ATTRIB(dev)->block_size = block_size; |
1295 | printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n", | 1296 | printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n", |
1296 | dev, block_size); | 1297 | dev, block_size); |
1297 | return 0; | 1298 | return 0; |
1298 | } | 1299 | } |
1299 | 1300 | ||
1300 | struct se_lun *core_dev_add_lun( | 1301 | struct se_lun *core_dev_add_lun( |
1301 | struct se_portal_group *tpg, | 1302 | struct se_portal_group *tpg, |
1302 | struct se_hba *hba, | 1303 | struct se_hba *hba, |
1303 | struct se_device *dev, | 1304 | struct se_device *dev, |
1304 | u32 lun) | 1305 | u32 lun) |
1305 | { | 1306 | { |
1306 | struct se_lun *lun_p; | 1307 | struct se_lun *lun_p; |
1307 | u32 lun_access = 0; | 1308 | u32 lun_access = 0; |
1308 | 1309 | ||
1309 | if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { | 1310 | if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { |
1310 | printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n", | 1311 | printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n", |
1311 | atomic_read(&dev->dev_access_obj.obj_access_count)); | 1312 | atomic_read(&dev->dev_access_obj.obj_access_count)); |
1312 | return NULL; | 1313 | return NULL; |
1313 | } | 1314 | } |
1314 | 1315 | ||
1315 | lun_p = core_tpg_pre_addlun(tpg, lun); | 1316 | lun_p = core_tpg_pre_addlun(tpg, lun); |
1316 | if ((IS_ERR(lun_p)) || !(lun_p)) | 1317 | if ((IS_ERR(lun_p)) || !(lun_p)) |
1317 | return NULL; | 1318 | return NULL; |
1318 | 1319 | ||
1319 | if (dev->dev_flags & DF_READ_ONLY) | 1320 | if (dev->dev_flags & DF_READ_ONLY) |
1320 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | 1321 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; |
1321 | else | 1322 | else |
1322 | lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; | 1323 | lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; |
1323 | 1324 | ||
1324 | if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0) | 1325 | if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0) |
1325 | return NULL; | 1326 | return NULL; |
1326 | 1327 | ||
1327 | printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" | 1328 | printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" |
1328 | " CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(), | 1329 | " CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(), |
1329 | TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun, | 1330 | TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun, |
1330 | TPG_TFO(tpg)->get_fabric_name(), hba->hba_id); | 1331 | TPG_TFO(tpg)->get_fabric_name(), hba->hba_id); |
1331 | /* | 1332 | /* |
1332 | * Update LUN maps for dynamically added initiators when | 1333 | * Update LUN maps for dynamically added initiators when |
1333 | * generate_node_acl is enabled. | 1334 | * generate_node_acl is enabled. |
1334 | */ | 1335 | */ |
1335 | if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) { | 1336 | if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) { |
1336 | struct se_node_acl *acl; | 1337 | struct se_node_acl *acl; |
1337 | spin_lock_bh(&tpg->acl_node_lock); | 1338 | spin_lock_bh(&tpg->acl_node_lock); |
1338 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { | 1339 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { |
1339 | if (acl->dynamic_node_acl) { | 1340 | if (acl->dynamic_node_acl) { |
1340 | spin_unlock_bh(&tpg->acl_node_lock); | 1341 | spin_unlock_bh(&tpg->acl_node_lock); |
1341 | core_tpg_add_node_to_devs(acl, tpg); | 1342 | core_tpg_add_node_to_devs(acl, tpg); |
1342 | spin_lock_bh(&tpg->acl_node_lock); | 1343 | spin_lock_bh(&tpg->acl_node_lock); |
1343 | } | 1344 | } |
1344 | } | 1345 | } |
1345 | spin_unlock_bh(&tpg->acl_node_lock); | 1346 | spin_unlock_bh(&tpg->acl_node_lock); |
1346 | } | 1347 | } |
1347 | 1348 | ||
1348 | return lun_p; | 1349 | return lun_p; |
1349 | } | 1350 | } |
1350 | 1351 | ||
1351 | /* core_dev_del_lun(): | 1352 | /* core_dev_del_lun(): |
1352 | * | 1353 | * |
1353 | * | 1354 | * |
1354 | */ | 1355 | */ |
1355 | int core_dev_del_lun( | 1356 | int core_dev_del_lun( |
1356 | struct se_portal_group *tpg, | 1357 | struct se_portal_group *tpg, |
1357 | u32 unpacked_lun) | 1358 | u32 unpacked_lun) |
1358 | { | 1359 | { |
1359 | struct se_lun *lun; | 1360 | struct se_lun *lun; |
1360 | int ret = 0; | 1361 | int ret = 0; |
1361 | 1362 | ||
1362 | lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret); | 1363 | lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret); |
1363 | if (!(lun)) | 1364 | if (!(lun)) |
1364 | return ret; | 1365 | return ret; |
1365 | 1366 | ||
1366 | core_tpg_post_dellun(tpg, lun); | 1367 | core_tpg_post_dellun(tpg, lun); |
1367 | 1368 | ||
1368 | printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" | 1369 | printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" |
1369 | " device object\n", TPG_TFO(tpg)->get_fabric_name(), | 1370 | " device object\n", TPG_TFO(tpg)->get_fabric_name(), |
1370 | TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, | 1371 | TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, |
1371 | TPG_TFO(tpg)->get_fabric_name()); | 1372 | TPG_TFO(tpg)->get_fabric_name()); |
1372 | 1373 | ||
1373 | return 0; | 1374 | return 0; |
1374 | } | 1375 | } |
1375 | 1376 | ||
1376 | struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun) | 1377 | struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun) |
1377 | { | 1378 | { |
1378 | struct se_lun *lun; | 1379 | struct se_lun *lun; |
1379 | 1380 | ||
1380 | spin_lock(&tpg->tpg_lun_lock); | 1381 | spin_lock(&tpg->tpg_lun_lock); |
1381 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | 1382 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { |
1382 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS" | 1383 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS" |
1383 | "_PER_TPG-1: %u for Target Portal Group: %hu\n", | 1384 | "_PER_TPG-1: %u for Target Portal Group: %hu\n", |
1384 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 1385 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, |
1385 | TRANSPORT_MAX_LUNS_PER_TPG-1, | 1386 | TRANSPORT_MAX_LUNS_PER_TPG-1, |
1386 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1387 | TPG_TFO(tpg)->tpg_get_tag(tpg)); |
1387 | spin_unlock(&tpg->tpg_lun_lock); | 1388 | spin_unlock(&tpg->tpg_lun_lock); |
1388 | return NULL; | 1389 | return NULL; |
1389 | } | 1390 | } |
1390 | lun = &tpg->tpg_lun_list[unpacked_lun]; | 1391 | lun = &tpg->tpg_lun_list[unpacked_lun]; |
1391 | 1392 | ||
1392 | if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { | 1393 | if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { |
1393 | printk(KERN_ERR "%s Logical Unit Number: %u is not free on" | 1394 | printk(KERN_ERR "%s Logical Unit Number: %u is not free on" |
1394 | " Target Portal Group: %hu, ignoring request.\n", | 1395 | " Target Portal Group: %hu, ignoring request.\n", |
1395 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 1396 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, |
1396 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1397 | TPG_TFO(tpg)->tpg_get_tag(tpg)); |
1397 | spin_unlock(&tpg->tpg_lun_lock); | 1398 | spin_unlock(&tpg->tpg_lun_lock); |
1398 | return NULL; | 1399 | return NULL; |
1399 | } | 1400 | } |
1400 | spin_unlock(&tpg->tpg_lun_lock); | 1401 | spin_unlock(&tpg->tpg_lun_lock); |
1401 | 1402 | ||
1402 | return lun; | 1403 | return lun; |
1403 | } | 1404 | } |
1404 | 1405 | ||
1405 | /* core_dev_get_lun(): | 1406 | /* core_dev_get_lun(): |
1406 | * | 1407 | * |
1407 | * | 1408 | * |
1408 | */ | 1409 | */ |
1409 | static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun) | 1410 | static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun) |
1410 | { | 1411 | { |
1411 | struct se_lun *lun; | 1412 | struct se_lun *lun; |
1412 | 1413 | ||
1413 | spin_lock(&tpg->tpg_lun_lock); | 1414 | spin_lock(&tpg->tpg_lun_lock); |
1414 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | 1415 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { |
1415 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" | 1416 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" |
1416 | "_TPG-1: %u for Target Portal Group: %hu\n", | 1417 | "_TPG-1: %u for Target Portal Group: %hu\n", |
1417 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 1418 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, |
1418 | TRANSPORT_MAX_LUNS_PER_TPG-1, | 1419 | TRANSPORT_MAX_LUNS_PER_TPG-1, |
1419 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1420 | TPG_TFO(tpg)->tpg_get_tag(tpg)); |
1420 | spin_unlock(&tpg->tpg_lun_lock); | 1421 | spin_unlock(&tpg->tpg_lun_lock); |
1421 | return NULL; | 1422 | return NULL; |
1422 | } | 1423 | } |
1423 | lun = &tpg->tpg_lun_list[unpacked_lun]; | 1424 | lun = &tpg->tpg_lun_list[unpacked_lun]; |
1424 | 1425 | ||
1425 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { | 1426 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { |
1426 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" | 1427 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" |
1427 | " Target Portal Group: %hu, ignoring request.\n", | 1428 | " Target Portal Group: %hu, ignoring request.\n", |
1428 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 1429 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, |
1429 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1430 | TPG_TFO(tpg)->tpg_get_tag(tpg)); |
1430 | spin_unlock(&tpg->tpg_lun_lock); | 1431 | spin_unlock(&tpg->tpg_lun_lock); |
1431 | return NULL; | 1432 | return NULL; |
1432 | } | 1433 | } |
1433 | spin_unlock(&tpg->tpg_lun_lock); | 1434 | spin_unlock(&tpg->tpg_lun_lock); |
1434 | 1435 | ||
1435 | return lun; | 1436 | return lun; |
1436 | } | 1437 | } |
1437 | 1438 | ||
1438 | struct se_lun_acl *core_dev_init_initiator_node_lun_acl( | 1439 | struct se_lun_acl *core_dev_init_initiator_node_lun_acl( |
1439 | struct se_portal_group *tpg, | 1440 | struct se_portal_group *tpg, |
1440 | u32 mapped_lun, | 1441 | u32 mapped_lun, |
1441 | char *initiatorname, | 1442 | char *initiatorname, |
1442 | int *ret) | 1443 | int *ret) |
1443 | { | 1444 | { |
1444 | struct se_lun_acl *lacl; | 1445 | struct se_lun_acl *lacl; |
1445 | struct se_node_acl *nacl; | 1446 | struct se_node_acl *nacl; |
1446 | 1447 | ||
1447 | if (strlen(initiatorname) > TRANSPORT_IQN_LEN) { | 1448 | if (strlen(initiatorname) > TRANSPORT_IQN_LEN) { |
1448 | printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", | 1449 | printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", |
1449 | TPG_TFO(tpg)->get_fabric_name()); | 1450 | TPG_TFO(tpg)->get_fabric_name()); |
1450 | *ret = -EOVERFLOW; | 1451 | *ret = -EOVERFLOW; |
1451 | return NULL; | 1452 | return NULL; |
1452 | } | 1453 | } |
1453 | nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname); | 1454 | nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname); |
1454 | if (!(nacl)) { | 1455 | if (!(nacl)) { |
1455 | *ret = -EINVAL; | 1456 | *ret = -EINVAL; |
1456 | return NULL; | 1457 | return NULL; |
1457 | } | 1458 | } |
1458 | lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); | 1459 | lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); |
1459 | if (!(lacl)) { | 1460 | if (!(lacl)) { |
1460 | printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n"); | 1461 | printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n"); |
1461 | *ret = -ENOMEM; | 1462 | *ret = -ENOMEM; |
1462 | return NULL; | 1463 | return NULL; |
1463 | } | 1464 | } |
1464 | 1465 | ||
1465 | INIT_LIST_HEAD(&lacl->lacl_list); | 1466 | INIT_LIST_HEAD(&lacl->lacl_list); |
1466 | lacl->mapped_lun = mapped_lun; | 1467 | lacl->mapped_lun = mapped_lun; |
1467 | lacl->se_lun_nacl = nacl; | 1468 | lacl->se_lun_nacl = nacl; |
1468 | snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | 1469 | snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); |
1469 | 1470 | ||
1470 | return lacl; | 1471 | return lacl; |
1471 | } | 1472 | } |
1472 | 1473 | ||
1473 | int core_dev_add_initiator_node_lun_acl( | 1474 | int core_dev_add_initiator_node_lun_acl( |
1474 | struct se_portal_group *tpg, | 1475 | struct se_portal_group *tpg, |
1475 | struct se_lun_acl *lacl, | 1476 | struct se_lun_acl *lacl, |
1476 | u32 unpacked_lun, | 1477 | u32 unpacked_lun, |
1477 | u32 lun_access) | 1478 | u32 lun_access) |
1478 | { | 1479 | { |
1479 | struct se_lun *lun; | 1480 | struct se_lun *lun; |
1480 | struct se_node_acl *nacl; | 1481 | struct se_node_acl *nacl; |
1481 | 1482 | ||
1482 | lun = core_dev_get_lun(tpg, unpacked_lun); | 1483 | lun = core_dev_get_lun(tpg, unpacked_lun); |
1483 | if (!(lun)) { | 1484 | if (!(lun)) { |
1484 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" | 1485 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" |
1485 | " Target Portal Group: %hu, ignoring request.\n", | 1486 | " Target Portal Group: %hu, ignoring request.\n", |
1486 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 1487 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, |
1487 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1488 | TPG_TFO(tpg)->tpg_get_tag(tpg)); |
1488 | return -EINVAL; | 1489 | return -EINVAL; |
1489 | } | 1490 | } |
1490 | 1491 | ||
1491 | nacl = lacl->se_lun_nacl; | 1492 | nacl = lacl->se_lun_nacl; |
1492 | if (!(nacl)) | 1493 | if (!(nacl)) |
1493 | return -EINVAL; | 1494 | return -EINVAL; |
1494 | 1495 | ||
1495 | if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && | 1496 | if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && |
1496 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)) | 1497 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)) |
1497 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | 1498 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; |
1498 | 1499 | ||
1499 | lacl->se_lun = lun; | 1500 | lacl->se_lun = lun; |
1500 | 1501 | ||
1501 | if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun, | 1502 | if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun, |
1502 | lun_access, nacl, tpg, 1) < 0) | 1503 | lun_access, nacl, tpg, 1) < 0) |
1503 | return -EINVAL; | 1504 | return -EINVAL; |
1504 | 1505 | ||
1505 | spin_lock(&lun->lun_acl_lock); | 1506 | spin_lock(&lun->lun_acl_lock); |
1506 | list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); | 1507 | list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); |
1507 | atomic_inc(&lun->lun_acl_count); | 1508 | atomic_inc(&lun->lun_acl_count); |
1508 | smp_mb__after_atomic_inc(); | 1509 | smp_mb__after_atomic_inc(); |
1509 | spin_unlock(&lun->lun_acl_lock); | 1510 | spin_unlock(&lun->lun_acl_lock); |
1510 | 1511 | ||
1511 | printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " | 1512 | printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " |
1512 | " InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(), | 1513 | " InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(), |
1513 | TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, | 1514 | TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, |
1514 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", | 1515 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", |
1515 | lacl->initiatorname); | 1516 | lacl->initiatorname); |
1516 | /* | 1517 | /* |
1517 | * Check to see if there are any existing persistent reservation APTPL | 1518 | * Check to see if there are any existing persistent reservation APTPL |
1518 | * pre-registrations that need to be enabled for this LUN ACL.. | 1519 | * pre-registrations that need to be enabled for this LUN ACL.. |
1519 | */ | 1520 | */ |
1520 | core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl); | 1521 | core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl); |
1521 | return 0; | 1522 | return 0; |
1522 | } | 1523 | } |
1523 | 1524 | ||
1524 | /* core_dev_del_initiator_node_lun_acl(): | 1525 | /* core_dev_del_initiator_node_lun_acl(): |
1525 | * | 1526 | * |
1526 | * | 1527 | * |
1527 | */ | 1528 | */ |
1528 | int core_dev_del_initiator_node_lun_acl( | 1529 | int core_dev_del_initiator_node_lun_acl( |
1529 | struct se_portal_group *tpg, | 1530 | struct se_portal_group *tpg, |
1530 | struct se_lun *lun, | 1531 | struct se_lun *lun, |
1531 | struct se_lun_acl *lacl) | 1532 | struct se_lun_acl *lacl) |
1532 | { | 1533 | { |
1533 | struct se_node_acl *nacl; | 1534 | struct se_node_acl *nacl; |
1534 | 1535 | ||
1535 | nacl = lacl->se_lun_nacl; | 1536 | nacl = lacl->se_lun_nacl; |
1536 | if (!(nacl)) | 1537 | if (!(nacl)) |
1537 | return -EINVAL; | 1538 | return -EINVAL; |
1538 | 1539 | ||
1539 | spin_lock(&lun->lun_acl_lock); | 1540 | spin_lock(&lun->lun_acl_lock); |
1540 | list_del(&lacl->lacl_list); | 1541 | list_del(&lacl->lacl_list); |
1541 | atomic_dec(&lun->lun_acl_count); | 1542 | atomic_dec(&lun->lun_acl_count); |
1542 | smp_mb__after_atomic_dec(); | 1543 | smp_mb__after_atomic_dec(); |
1543 | spin_unlock(&lun->lun_acl_lock); | 1544 | spin_unlock(&lun->lun_acl_lock); |
1544 | 1545 | ||
1545 | core_update_device_list_for_node(lun, NULL, lacl->mapped_lun, | 1546 | core_update_device_list_for_node(lun, NULL, lacl->mapped_lun, |
1546 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); | 1547 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); |
1547 | 1548 | ||
1548 | lacl->se_lun = NULL; | 1549 | lacl->se_lun = NULL; |
1549 | 1550 | ||
1550 | printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for" | 1551 | printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for" |
1551 | " InitiatorNode: %s Mapped LUN: %u\n", | 1552 | " InitiatorNode: %s Mapped LUN: %u\n", |
1552 | TPG_TFO(tpg)->get_fabric_name(), | 1553 | TPG_TFO(tpg)->get_fabric_name(), |
1553 | TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun, | 1554 | TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun, |
1554 | lacl->initiatorname, lacl->mapped_lun); | 1555 | lacl->initiatorname, lacl->mapped_lun); |
1555 | 1556 | ||
1556 | return 0; | 1557 | return 0; |
1557 | } | 1558 | } |
1558 | 1559 | ||
1559 | void core_dev_free_initiator_node_lun_acl( | 1560 | void core_dev_free_initiator_node_lun_acl( |
1560 | struct se_portal_group *tpg, | 1561 | struct se_portal_group *tpg, |
1561 | struct se_lun_acl *lacl) | 1562 | struct se_lun_acl *lacl) |
1562 | { | 1563 | { |
1563 | printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" | 1564 | printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" |
1564 | " Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(), | 1565 | " Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(), |
1565 | TPG_TFO(tpg)->tpg_get_tag(tpg), | 1566 | TPG_TFO(tpg)->tpg_get_tag(tpg), |
1566 | TPG_TFO(tpg)->get_fabric_name(), | 1567 | TPG_TFO(tpg)->get_fabric_name(), |
1567 | lacl->initiatorname, lacl->mapped_lun); | 1568 | lacl->initiatorname, lacl->mapped_lun); |
1568 | 1569 | ||
1569 | kfree(lacl); | 1570 | kfree(lacl); |
1570 | } | 1571 | } |
1571 | 1572 | ||
1572 | int core_dev_setup_virtual_lun0(void) | 1573 | int core_dev_setup_virtual_lun0(void) |
1573 | { | 1574 | { |
1574 | struct se_hba *hba; | 1575 | struct se_hba *hba; |
1575 | struct se_device *dev; | 1576 | struct se_device *dev; |
1576 | struct se_subsystem_dev *se_dev = NULL; | 1577 | struct se_subsystem_dev *se_dev = NULL; |
1577 | struct se_subsystem_api *t; | 1578 | struct se_subsystem_api *t; |
1578 | char buf[16]; | 1579 | char buf[16]; |
1579 | int ret; | 1580 | int ret; |
1580 | 1581 | ||
1581 | hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE); | 1582 | hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE); |
1582 | if (IS_ERR(hba)) | 1583 | if (IS_ERR(hba)) |
1583 | return PTR_ERR(hba); | 1584 | return PTR_ERR(hba); |
1584 | 1585 | ||
1585 | se_global->g_lun0_hba = hba; | 1586 | se_global->g_lun0_hba = hba; |
1586 | t = hba->transport; | 1587 | t = hba->transport; |
1587 | 1588 | ||
1588 | se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); | 1589 | se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); |
1589 | if (!(se_dev)) { | 1590 | if (!(se_dev)) { |
1590 | printk(KERN_ERR "Unable to allocate memory for" | 1591 | printk(KERN_ERR "Unable to allocate memory for" |
1591 | " struct se_subsystem_dev\n"); | 1592 | " struct se_subsystem_dev\n"); |
1592 | ret = -ENOMEM; | 1593 | ret = -ENOMEM; |
1593 | goto out; | 1594 | goto out; |
1594 | } | 1595 | } |
1595 | INIT_LIST_HEAD(&se_dev->g_se_dev_list); | 1596 | INIT_LIST_HEAD(&se_dev->g_se_dev_list); |
1596 | INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); | 1597 | INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); |
1597 | spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); | 1598 | spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); |
1598 | INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list); | 1599 | INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list); |
1599 | INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list); | 1600 | INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list); |
1600 | spin_lock_init(&se_dev->t10_reservation.registration_lock); | 1601 | spin_lock_init(&se_dev->t10_reservation.registration_lock); |
1601 | spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock); | 1602 | spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock); |
1602 | INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); | 1603 | INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); |
1603 | spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); | 1604 | spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); |
1604 | spin_lock_init(&se_dev->se_dev_lock); | 1605 | spin_lock_init(&se_dev->se_dev_lock); |
1605 | se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; | 1606 | se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; |
1606 | se_dev->t10_wwn.t10_sub_dev = se_dev; | 1607 | se_dev->t10_wwn.t10_sub_dev = se_dev; |
1607 | se_dev->t10_alua.t10_sub_dev = se_dev; | 1608 | se_dev->t10_alua.t10_sub_dev = se_dev; |
1608 | se_dev->se_dev_attrib.da_sub_dev = se_dev; | 1609 | se_dev->se_dev_attrib.da_sub_dev = se_dev; |
1609 | se_dev->se_dev_hba = hba; | 1610 | se_dev->se_dev_hba = hba; |
1610 | 1611 | ||
1611 | se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0"); | 1612 | se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0"); |
1612 | if (!(se_dev->se_dev_su_ptr)) { | 1613 | if (!(se_dev->se_dev_su_ptr)) { |
1613 | printk(KERN_ERR "Unable to locate subsystem dependent pointer" | 1614 | printk(KERN_ERR "Unable to locate subsystem dependent pointer" |
1614 | " from allocate_virtdevice()\n"); | 1615 | " from allocate_virtdevice()\n"); |
1615 | ret = -ENOMEM; | 1616 | ret = -ENOMEM; |
1616 | goto out; | 1617 | goto out; |
1617 | } | 1618 | } |
1618 | se_global->g_lun0_su_dev = se_dev; | 1619 | se_global->g_lun0_su_dev = se_dev; |
1619 | 1620 | ||
1620 | memset(buf, 0, 16); | 1621 | memset(buf, 0, 16); |
1621 | sprintf(buf, "rd_pages=8"); | 1622 | sprintf(buf, "rd_pages=8"); |
1622 | t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf)); | 1623 | t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf)); |
1623 | 1624 | ||
1624 | dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); | 1625 | dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); |
1625 | if (!(dev) || IS_ERR(dev)) { | 1626 | if (!(dev) || IS_ERR(dev)) { |
1626 | ret = -ENOMEM; | 1627 | ret = -ENOMEM; |
1627 | goto out; | 1628 | goto out; |
1628 | } | 1629 | } |
1629 | se_dev->se_dev_ptr = dev; | 1630 | se_dev->se_dev_ptr = dev; |
1630 | se_global->g_lun0_dev = dev; | 1631 | se_global->g_lun0_dev = dev; |
1631 | 1632 | ||
1632 | return 0; | 1633 | return 0; |
1633 | out: | 1634 | out: |
1634 | se_global->g_lun0_su_dev = NULL; | 1635 | se_global->g_lun0_su_dev = NULL; |
1635 | kfree(se_dev); | 1636 | kfree(se_dev); |
1636 | if (se_global->g_lun0_hba) { | 1637 | if (se_global->g_lun0_hba) { |
1637 | core_delete_hba(se_global->g_lun0_hba); | 1638 | core_delete_hba(se_global->g_lun0_hba); |
1638 | se_global->g_lun0_hba = NULL; | 1639 | se_global->g_lun0_hba = NULL; |
1639 | } | 1640 | } |
1640 | return ret; | 1641 | return ret; |
1641 | } | 1642 | } |
1642 | 1643 | ||
1643 | 1644 | ||
1644 | void core_dev_release_virtual_lun0(void) | 1645 | void core_dev_release_virtual_lun0(void) |
1645 | { | 1646 | { |
1646 | struct se_hba *hba = se_global->g_lun0_hba; | 1647 | struct se_hba *hba = se_global->g_lun0_hba; |
1647 | struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev; | 1648 | struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev; |
1648 | 1649 | ||
1649 | if (!(hba)) | 1650 | if (!(hba)) |
1650 | return; | 1651 | return; |
1651 | 1652 | ||
1652 | if (se_global->g_lun0_dev) | 1653 | if (se_global->g_lun0_dev) |
1653 | se_free_virtual_device(se_global->g_lun0_dev, hba); | 1654 | se_free_virtual_device(se_global->g_lun0_dev, hba); |
1654 | 1655 | ||
1655 | kfree(su_dev); | 1656 | kfree(su_dev); |
1656 | core_delete_hba(hba); | 1657 | core_delete_hba(hba); |
1657 | } | 1658 | } |
1658 | 1659 |
drivers/target/target_core_fabric_lib.c
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * Filename: target_core_fabric_lib.c | 2 | * Filename: target_core_fabric_lib.c |
3 | * | 3 | * |
4 | * This file contains generic high level protocol identifier and PR | 4 | * This file contains generic high level protocol identifier and PR |
5 | * handlers for TCM fabric modules | 5 | * handlers for TCM fabric modules |
6 | * | 6 | * |
7 | * Copyright (c) 2010 Rising Tide Systems, Inc. | 7 | * Copyright (c) 2010 Rising Tide Systems, Inc. |
8 | * Copyright (c) 2010 Linux-iSCSI.org | 8 | * Copyright (c) 2010 Linux-iSCSI.org |
9 | * | 9 | * |
10 | * Nicholas A. Bellinger <nab@linux-iscsi.org> | 10 | * Nicholas A. Bellinger <nab@linux-iscsi.org> |
11 | * | 11 | * |
12 | * This program is free software; you can redistribute it and/or modify | 12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License as published by | 13 | * it under the terms of the GNU General Public License as published by |
14 | * the Free Software Foundation; either version 2 of the License, or | 14 | * the Free Software Foundation; either version 2 of the License, or |
15 | * (at your option) any later version. | 15 | * (at your option) any later version. |
16 | * | 16 | * |
17 | * This program is distributed in the hope that it will be useful, | 17 | * This program is distributed in the hope that it will be useful, |
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
20 | * GNU General Public License for more details. | 20 | * GNU General Public License for more details. |
21 | * | 21 | * |
22 | * You should have received a copy of the GNU General Public License | 22 | * You should have received a copy of the GNU General Public License |
23 | * along with this program; if not, write to the Free Software | 23 | * along with this program; if not, write to the Free Software |
24 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 24 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
25 | * | 25 | * |
26 | ******************************************************************************/ | 26 | ******************************************************************************/ |
27 | 27 | ||
28 | #include <linux/string.h> | 28 | #include <linux/string.h> |
29 | #include <linux/ctype.h> | 29 | #include <linux/ctype.h> |
30 | #include <linux/spinlock.h> | 30 | #include <linux/spinlock.h> |
31 | #include <scsi/scsi.h> | 31 | #include <scsi/scsi.h> |
32 | #include <scsi/scsi_cmnd.h> | 32 | #include <scsi/scsi_cmnd.h> |
33 | 33 | ||
34 | #include <target/target_core_base.h> | 34 | #include <target/target_core_base.h> |
35 | #include <target/target_core_device.h> | 35 | #include <target/target_core_device.h> |
36 | #include <target/target_core_transport.h> | 36 | #include <target/target_core_transport.h> |
37 | #include <target/target_core_fabric_lib.h> | ||
37 | #include <target/target_core_fabric_ops.h> | 38 | #include <target/target_core_fabric_ops.h> |
38 | #include <target/target_core_configfs.h> | 39 | #include <target/target_core_configfs.h> |
39 | 40 | ||
40 | #include "target_core_hba.h" | 41 | #include "target_core_hba.h" |
41 | #include "target_core_pr.h" | 42 | #include "target_core_pr.h" |
42 | 43 | ||
43 | /* | 44 | /* |
44 | * Handlers for Serial Attached SCSI (SAS) | 45 | * Handlers for Serial Attached SCSI (SAS) |
45 | */ | 46 | */ |
46 | u8 sas_get_fabric_proto_ident(struct se_portal_group *se_tpg) | 47 | u8 sas_get_fabric_proto_ident(struct se_portal_group *se_tpg) |
47 | { | 48 | { |
48 | /* | 49 | /* |
49 | * Return a SAS Serial SCSI Protocol identifier for loopback operations | 50 | * Return a SAS Serial SCSI Protocol identifier for loopback operations |
50 | * This is defined in section 7.5.1 Table 362 in spc4r17 | 51 | * This is defined in section 7.5.1 Table 362 in spc4r17 |
51 | */ | 52 | */ |
52 | return 0x6; | 53 | return 0x6; |
53 | } | 54 | } |
54 | EXPORT_SYMBOL(sas_get_fabric_proto_ident); | 55 | EXPORT_SYMBOL(sas_get_fabric_proto_ident); |
55 | 56 | ||
56 | u32 sas_get_pr_transport_id( | 57 | u32 sas_get_pr_transport_id( |
57 | struct se_portal_group *se_tpg, | 58 | struct se_portal_group *se_tpg, |
58 | struct se_node_acl *se_nacl, | 59 | struct se_node_acl *se_nacl, |
59 | struct t10_pr_registration *pr_reg, | 60 | struct t10_pr_registration *pr_reg, |
60 | int *format_code, | 61 | int *format_code, |
61 | unsigned char *buf) | 62 | unsigned char *buf) |
62 | { | 63 | { |
63 | unsigned char binary, *ptr; | 64 | unsigned char binary, *ptr; |
64 | int i; | 65 | int i; |
65 | u32 off = 4; | 66 | u32 off = 4; |
66 | /* | 67 | /* |
67 | * Set PROTOCOL IDENTIFIER to 6h for SAS | 68 | * Set PROTOCOL IDENTIFIER to 6h for SAS |
68 | */ | 69 | */ |
69 | buf[0] = 0x06; | 70 | buf[0] = 0x06; |
70 | /* | 71 | /* |
71 | * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI | 72 | * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI |
72 | * over SAS Serial SCSI Protocol | 73 | * over SAS Serial SCSI Protocol |
73 | */ | 74 | */ |
74 | ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */ | 75 | ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */ |
75 | 76 | ||
76 | for (i = 0; i < 16; i += 2) { | 77 | for (i = 0; i < 16; i += 2) { |
77 | binary = transport_asciihex_to_binaryhex(&ptr[i]); | 78 | binary = transport_asciihex_to_binaryhex(&ptr[i]); |
78 | buf[off++] = binary; | 79 | buf[off++] = binary; |
79 | } | 80 | } |
80 | /* | 81 | /* |
81 | * The SAS Transport ID is a hardcoded 24-byte length | 82 | * The SAS Transport ID is a hardcoded 24-byte length |
82 | */ | 83 | */ |
83 | return 24; | 84 | return 24; |
84 | } | 85 | } |
85 | EXPORT_SYMBOL(sas_get_pr_transport_id); | 86 | EXPORT_SYMBOL(sas_get_pr_transport_id); |
86 | 87 | ||
87 | u32 sas_get_pr_transport_id_len( | 88 | u32 sas_get_pr_transport_id_len( |
88 | struct se_portal_group *se_tpg, | 89 | struct se_portal_group *se_tpg, |
89 | struct se_node_acl *se_nacl, | 90 | struct se_node_acl *se_nacl, |
90 | struct t10_pr_registration *pr_reg, | 91 | struct t10_pr_registration *pr_reg, |
91 | int *format_code) | 92 | int *format_code) |
92 | { | 93 | { |
93 | *format_code = 0; | 94 | *format_code = 0; |
94 | /* | 95 | /* |
95 | * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI | 96 | * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI |
96 | * over SAS Serial SCSI Protocol | 97 | * over SAS Serial SCSI Protocol |
97 | * | 98 | * |
98 | * The SAS Transport ID is a hardcoded 24-byte length | 99 | * The SAS Transport ID is a hardcoded 24-byte length |
99 | */ | 100 | */ |
100 | return 24; | 101 | return 24; |
101 | } | 102 | } |
102 | EXPORT_SYMBOL(sas_get_pr_transport_id_len); | 103 | EXPORT_SYMBOL(sas_get_pr_transport_id_len); |
103 | 104 | ||
104 | /* | 105 | /* |
105 | * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above | 106 | * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above |
106 | * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations. | 107 | * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations. |
107 | */ | 108 | */ |
108 | char *sas_parse_pr_out_transport_id( | 109 | char *sas_parse_pr_out_transport_id( |
109 | struct se_portal_group *se_tpg, | 110 | struct se_portal_group *se_tpg, |
110 | const char *buf, | 111 | const char *buf, |
111 | u32 *out_tid_len, | 112 | u32 *out_tid_len, |
112 | char **port_nexus_ptr) | 113 | char **port_nexus_ptr) |
113 | { | 114 | { |
114 | /* | 115 | /* |
115 | * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID | 116 | * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID |
116 | * for initiator ports using SCSI over SAS Serial SCSI Protocol | 117 | * for initiator ports using SCSI over SAS Serial SCSI Protocol |
117 | * | 118 | * |
118 | * The TransportID for a SAS Initiator Port is of fixed size of | 119 | * The TransportID for a SAS Initiator Port is of fixed size of |
119 | * 24 bytes, and SAS does not contain a I_T nexus identifier, | 120 | * 24 bytes, and SAS does not contain a I_T nexus identifier, |
120 | * so we return the **port_nexus_ptr set to NULL. | 121 | * so we return the **port_nexus_ptr set to NULL. |
121 | */ | 122 | */ |
122 | *port_nexus_ptr = NULL; | 123 | *port_nexus_ptr = NULL; |
123 | *out_tid_len = 24; | 124 | *out_tid_len = 24; |
124 | 125 | ||
125 | return (char *)&buf[4]; | 126 | return (char *)&buf[4]; |
126 | } | 127 | } |
127 | EXPORT_SYMBOL(sas_parse_pr_out_transport_id); | 128 | EXPORT_SYMBOL(sas_parse_pr_out_transport_id); |
128 | 129 | ||
129 | /* | 130 | /* |
130 | * Handlers for Fibre Channel Protocol (FCP) | 131 | * Handlers for Fibre Channel Protocol (FCP) |
131 | */ | 132 | */ |
132 | u8 fc_get_fabric_proto_ident(struct se_portal_group *se_tpg) | 133 | u8 fc_get_fabric_proto_ident(struct se_portal_group *se_tpg) |
133 | { | 134 | { |
134 | return 0x0; /* 0 = fcp-2 per SPC4 section 7.5.1 */ | 135 | return 0x0; /* 0 = fcp-2 per SPC4 section 7.5.1 */ |
135 | } | 136 | } |
136 | EXPORT_SYMBOL(fc_get_fabric_proto_ident); | 137 | EXPORT_SYMBOL(fc_get_fabric_proto_ident); |
137 | 138 | ||
138 | u32 fc_get_pr_transport_id_len( | 139 | u32 fc_get_pr_transport_id_len( |
139 | struct se_portal_group *se_tpg, | 140 | struct se_portal_group *se_tpg, |
140 | struct se_node_acl *se_nacl, | 141 | struct se_node_acl *se_nacl, |
141 | struct t10_pr_registration *pr_reg, | 142 | struct t10_pr_registration *pr_reg, |
142 | int *format_code) | 143 | int *format_code) |
143 | { | 144 | { |
144 | *format_code = 0; | 145 | *format_code = 0; |
145 | /* | 146 | /* |
146 | * The FC Transport ID is a hardcoded 24-byte length | 147 | * The FC Transport ID is a hardcoded 24-byte length |
147 | */ | 148 | */ |
148 | return 24; | 149 | return 24; |
149 | } | 150 | } |
150 | EXPORT_SYMBOL(fc_get_pr_transport_id_len); | 151 | EXPORT_SYMBOL(fc_get_pr_transport_id_len); |
151 | 152 | ||
152 | u32 fc_get_pr_transport_id( | 153 | u32 fc_get_pr_transport_id( |
153 | struct se_portal_group *se_tpg, | 154 | struct se_portal_group *se_tpg, |
154 | struct se_node_acl *se_nacl, | 155 | struct se_node_acl *se_nacl, |
155 | struct t10_pr_registration *pr_reg, | 156 | struct t10_pr_registration *pr_reg, |
156 | int *format_code, | 157 | int *format_code, |
157 | unsigned char *buf) | 158 | unsigned char *buf) |
158 | { | 159 | { |
159 | unsigned char binary, *ptr; | 160 | unsigned char binary, *ptr; |
160 | int i; | 161 | int i; |
161 | u32 off = 8; | 162 | u32 off = 8; |
162 | /* | 163 | /* |
163 | * PROTOCOL IDENTIFIER is 0h for FCP-2 | 164 | * PROTOCOL IDENTIFIER is 0h for FCP-2 |
164 | * | 165 | * |
165 | * From spc4r17, 7.5.4.2 TransportID for initiator ports using | 166 | * From spc4r17, 7.5.4.2 TransportID for initiator ports using |
166 | * SCSI over Fibre Channel | 167 | * SCSI over Fibre Channel |
167 | * | 168 | * |
168 | * We convert the ASCII formatted N Port name into a binary | 169 | * We convert the ASCII formatted N Port name into a binary |
169 | * encoded TransportID. | 170 | * encoded TransportID. |
170 | */ | 171 | */ |
171 | ptr = &se_nacl->initiatorname[0]; | 172 | ptr = &se_nacl->initiatorname[0]; |
172 | 173 | ||
173 | for (i = 0; i < 24; ) { | 174 | for (i = 0; i < 24; ) { |
174 | if (!(strncmp(&ptr[i], ":", 1))) { | 175 | if (!(strncmp(&ptr[i], ":", 1))) { |
175 | i++; | 176 | i++; |
176 | continue; | 177 | continue; |
177 | } | 178 | } |
178 | binary = transport_asciihex_to_binaryhex(&ptr[i]); | 179 | binary = transport_asciihex_to_binaryhex(&ptr[i]); |
179 | buf[off++] = binary; | 180 | buf[off++] = binary; |
180 | i += 2; | 181 | i += 2; |
181 | } | 182 | } |
182 | /* | 183 | /* |
183 | * The FC Transport ID is a hardcoded 24-byte length | 184 | * The FC Transport ID is a hardcoded 24-byte length |
184 | */ | 185 | */ |
185 | return 24; | 186 | return 24; |
186 | } | 187 | } |
187 | EXPORT_SYMBOL(fc_get_pr_transport_id); | 188 | EXPORT_SYMBOL(fc_get_pr_transport_id); |
188 | 189 | ||
189 | char *fc_parse_pr_out_transport_id( | 190 | char *fc_parse_pr_out_transport_id( |
190 | struct se_portal_group *se_tpg, | 191 | struct se_portal_group *se_tpg, |
191 | const char *buf, | 192 | const char *buf, |
192 | u32 *out_tid_len, | 193 | u32 *out_tid_len, |
193 | char **port_nexus_ptr) | 194 | char **port_nexus_ptr) |
194 | { | 195 | { |
195 | /* | 196 | /* |
196 | * The TransportID for a FC N Port is of fixed size of | 197 | * The TransportID for a FC N Port is of fixed size of |
197 | * 24 bytes, and FC does not contain a I_T nexus identifier, | 198 | * 24 bytes, and FC does not contain a I_T nexus identifier, |
198 | * so we return the **port_nexus_ptr set to NULL. | 199 | * so we return the **port_nexus_ptr set to NULL. |
199 | */ | 200 | */ |
200 | *port_nexus_ptr = NULL; | 201 | *port_nexus_ptr = NULL; |
201 | *out_tid_len = 24; | 202 | *out_tid_len = 24; |
202 | 203 | ||
203 | return (char *)&buf[8]; | 204 | return (char *)&buf[8]; |
204 | } | 205 | } |
205 | EXPORT_SYMBOL(fc_parse_pr_out_transport_id); | 206 | EXPORT_SYMBOL(fc_parse_pr_out_transport_id); |
206 | 207 | ||
207 | /* | 208 | /* |
208 | * Handlers for Internet Small Computer Systems Interface (iSCSI) | 209 | * Handlers for Internet Small Computer Systems Interface (iSCSI) |
209 | */ | 210 | */ |
210 | 211 | ||
211 | u8 iscsi_get_fabric_proto_ident(struct se_portal_group *se_tpg) | 212 | u8 iscsi_get_fabric_proto_ident(struct se_portal_group *se_tpg) |
212 | { | 213 | { |
213 | /* | 214 | /* |
214 | * This value is defined for "Internet SCSI (iSCSI)" | 215 | * This value is defined for "Internet SCSI (iSCSI)" |
215 | * in spc4r17 section 7.5.1 Table 362 | 216 | * in spc4r17 section 7.5.1 Table 362 |
216 | */ | 217 | */ |
217 | return 0x5; | 218 | return 0x5; |
218 | } | 219 | } |
219 | EXPORT_SYMBOL(iscsi_get_fabric_proto_ident); | 220 | EXPORT_SYMBOL(iscsi_get_fabric_proto_ident); |
220 | 221 | ||
221 | u32 iscsi_get_pr_transport_id( | 222 | u32 iscsi_get_pr_transport_id( |
222 | struct se_portal_group *se_tpg, | 223 | struct se_portal_group *se_tpg, |
223 | struct se_node_acl *se_nacl, | 224 | struct se_node_acl *se_nacl, |
224 | struct t10_pr_registration *pr_reg, | 225 | struct t10_pr_registration *pr_reg, |
225 | int *format_code, | 226 | int *format_code, |
226 | unsigned char *buf) | 227 | unsigned char *buf) |
227 | { | 228 | { |
228 | u32 off = 4, padding = 0; | 229 | u32 off = 4, padding = 0; |
229 | u16 len = 0; | 230 | u16 len = 0; |
230 | 231 | ||
231 | spin_lock_irq(&se_nacl->nacl_sess_lock); | 232 | spin_lock_irq(&se_nacl->nacl_sess_lock); |
232 | /* | 233 | /* |
233 | * Set PROTOCOL IDENTIFIER to 5h for iSCSI | 234 | * Set PROTOCOL IDENTIFIER to 5h for iSCSI |
234 | */ | 235 | */ |
235 | buf[0] = 0x05; | 236 | buf[0] = 0x05; |
236 | /* | 237 | /* |
237 | * From spc4r17 Section 7.5.4.6: TransportID for initiator | 238 | * From spc4r17 Section 7.5.4.6: TransportID for initiator |
238 | * ports using SCSI over iSCSI. | 239 | * ports using SCSI over iSCSI. |
239 | * | 240 | * |
240 | * The null-terminated, null-padded (see 4.4.2) ISCSI NAME field | 241 | * The null-terminated, null-padded (see 4.4.2) ISCSI NAME field |
241 | * shall contain the iSCSI name of an iSCSI initiator node (see | 242 | * shall contain the iSCSI name of an iSCSI initiator node (see |
242 | * RFC 3720). The first ISCSI NAME field byte containing an ASCII | 243 | * RFC 3720). The first ISCSI NAME field byte containing an ASCII |
243 | * null character terminates the ISCSI NAME field without regard for | 244 | * null character terminates the ISCSI NAME field without regard for |
244 | * the specified length of the iSCSI TransportID or the contents of | 245 | * the specified length of the iSCSI TransportID or the contents of |
245 | * the ADDITIONAL LENGTH field. | 246 | * the ADDITIONAL LENGTH field. |
246 | */ | 247 | */ |
247 | len = sprintf(&buf[off], "%s", se_nacl->initiatorname); | 248 | len = sprintf(&buf[off], "%s", se_nacl->initiatorname); |
248 | /* | 249 | /* |
249 | * Add Extra byte for NULL terminator | 250 | * Add Extra byte for NULL terminator |
250 | */ | 251 | */ |
251 | len++; | 252 | len++; |
252 | /* | 253 | /* |
253 | * If there is ISID present with the registration and *format code == 1 | 254 | * If there is ISID present with the registration and *format code == 1 |
254 | * 1, use iSCSI Initiator port TransportID format. | 255 | * 1, use iSCSI Initiator port TransportID format. |
255 | * | 256 | * |
256 | * Otherwise use iSCSI Initiator device TransportID format that | 257 | * Otherwise use iSCSI Initiator device TransportID format that |
257 | * does not contain the ASCII encoded iSCSI Initiator iSID value | 258 | * does not contain the ASCII encoded iSCSI Initiator iSID value |
258 | * provied by the iSCSi Initiator during the iSCSI login process. | 259 | * provied by the iSCSi Initiator during the iSCSI login process. |
259 | */ | 260 | */ |
260 | if ((*format_code == 1) && (pr_reg->isid_present_at_reg)) { | 261 | if ((*format_code == 1) && (pr_reg->isid_present_at_reg)) { |
261 | /* | 262 | /* |
262 | * Set FORMAT CODE 01b for iSCSI Initiator port TransportID | 263 | * Set FORMAT CODE 01b for iSCSI Initiator port TransportID |
263 | * format. | 264 | * format. |
264 | */ | 265 | */ |
265 | buf[0] |= 0x40; | 266 | buf[0] |= 0x40; |
266 | /* | 267 | /* |
267 | * From spc4r17 Section 7.5.4.6: TransportID for initiator | 268 | * From spc4r17 Section 7.5.4.6: TransportID for initiator |
268 | * ports using SCSI over iSCSI. Table 390 | 269 | * ports using SCSI over iSCSI. Table 390 |
269 | * | 270 | * |
270 | * The SEPARATOR field shall contain the five ASCII | 271 | * The SEPARATOR field shall contain the five ASCII |
271 | * characters ",i,0x". | 272 | * characters ",i,0x". |
272 | * | 273 | * |
273 | * The null-terminated, null-padded ISCSI INITIATOR SESSION ID | 274 | * The null-terminated, null-padded ISCSI INITIATOR SESSION ID |
274 | * field shall contain the iSCSI initiator session identifier | 275 | * field shall contain the iSCSI initiator session identifier |
275 | * (see RFC 3720) in the form of ASCII characters that are the | 276 | * (see RFC 3720) in the form of ASCII characters that are the |
276 | * hexadecimal digits converted from the binary iSCSI initiator | 277 | * hexadecimal digits converted from the binary iSCSI initiator |
277 | * session identifier value. The first ISCSI INITIATOR SESSION | 278 | * session identifier value. The first ISCSI INITIATOR SESSION |
278 | * ID field byte containing an ASCII null character | 279 | * ID field byte containing an ASCII null character |
279 | */ | 280 | */ |
280 | buf[off+len] = 0x2c; off++; /* ASCII Character: "," */ | 281 | buf[off+len] = 0x2c; off++; /* ASCII Character: "," */ |
281 | buf[off+len] = 0x69; off++; /* ASCII Character: "i" */ | 282 | buf[off+len] = 0x69; off++; /* ASCII Character: "i" */ |
282 | buf[off+len] = 0x2c; off++; /* ASCII Character: "," */ | 283 | buf[off+len] = 0x2c; off++; /* ASCII Character: "," */ |
283 | buf[off+len] = 0x30; off++; /* ASCII Character: "0" */ | 284 | buf[off+len] = 0x30; off++; /* ASCII Character: "0" */ |
284 | buf[off+len] = 0x78; off++; /* ASCII Character: "x" */ | 285 | buf[off+len] = 0x78; off++; /* ASCII Character: "x" */ |
285 | len += 5; | 286 | len += 5; |
286 | buf[off+len] = pr_reg->pr_reg_isid[0]; off++; | 287 | buf[off+len] = pr_reg->pr_reg_isid[0]; off++; |
287 | buf[off+len] = pr_reg->pr_reg_isid[1]; off++; | 288 | buf[off+len] = pr_reg->pr_reg_isid[1]; off++; |
288 | buf[off+len] = pr_reg->pr_reg_isid[2]; off++; | 289 | buf[off+len] = pr_reg->pr_reg_isid[2]; off++; |
289 | buf[off+len] = pr_reg->pr_reg_isid[3]; off++; | 290 | buf[off+len] = pr_reg->pr_reg_isid[3]; off++; |
290 | buf[off+len] = pr_reg->pr_reg_isid[4]; off++; | 291 | buf[off+len] = pr_reg->pr_reg_isid[4]; off++; |
291 | buf[off+len] = pr_reg->pr_reg_isid[5]; off++; | 292 | buf[off+len] = pr_reg->pr_reg_isid[5]; off++; |
292 | buf[off+len] = '\0'; off++; | 293 | buf[off+len] = '\0'; off++; |
293 | len += 7; | 294 | len += 7; |
294 | } | 295 | } |
295 | spin_unlock_irq(&se_nacl->nacl_sess_lock); | 296 | spin_unlock_irq(&se_nacl->nacl_sess_lock); |
296 | /* | 297 | /* |
297 | * The ADDITIONAL LENGTH field specifies the number of bytes that follow | 298 | * The ADDITIONAL LENGTH field specifies the number of bytes that follow |
298 | * in the TransportID. The additional length shall be at least 20 and | 299 | * in the TransportID. The additional length shall be at least 20 and |
299 | * shall be a multiple of four. | 300 | * shall be a multiple of four. |
300 | */ | 301 | */ |
301 | padding = ((-len) & 3); | 302 | padding = ((-len) & 3); |
302 | if (padding != 0) | 303 | if (padding != 0) |
303 | len += padding; | 304 | len += padding; |
304 | 305 | ||
305 | buf[2] = ((len >> 8) & 0xff); | 306 | buf[2] = ((len >> 8) & 0xff); |
306 | buf[3] = (len & 0xff); | 307 | buf[3] = (len & 0xff); |
307 | /* | 308 | /* |
308 | * Increment value for total payload + header length for | 309 | * Increment value for total payload + header length for |
309 | * full status descriptor | 310 | * full status descriptor |
310 | */ | 311 | */ |
311 | len += 4; | 312 | len += 4; |
312 | 313 | ||
313 | return len; | 314 | return len; |
314 | } | 315 | } |
315 | EXPORT_SYMBOL(iscsi_get_pr_transport_id); | 316 | EXPORT_SYMBOL(iscsi_get_pr_transport_id); |
316 | 317 | ||
317 | u32 iscsi_get_pr_transport_id_len( | 318 | u32 iscsi_get_pr_transport_id_len( |
318 | struct se_portal_group *se_tpg, | 319 | struct se_portal_group *se_tpg, |
319 | struct se_node_acl *se_nacl, | 320 | struct se_node_acl *se_nacl, |
320 | struct t10_pr_registration *pr_reg, | 321 | struct t10_pr_registration *pr_reg, |
321 | int *format_code) | 322 | int *format_code) |
322 | { | 323 | { |
323 | u32 len = 0, padding = 0; | 324 | u32 len = 0, padding = 0; |
324 | 325 | ||
325 | spin_lock_irq(&se_nacl->nacl_sess_lock); | 326 | spin_lock_irq(&se_nacl->nacl_sess_lock); |
326 | len = strlen(se_nacl->initiatorname); | 327 | len = strlen(se_nacl->initiatorname); |
327 | /* | 328 | /* |
328 | * Add extra byte for NULL terminator | 329 | * Add extra byte for NULL terminator |
329 | */ | 330 | */ |
330 | len++; | 331 | len++; |
331 | /* | 332 | /* |
332 | * If there is ISID present with the registration, use format code: | 333 | * If there is ISID present with the registration, use format code: |
333 | * 01b: iSCSI Initiator port TransportID format | 334 | * 01b: iSCSI Initiator port TransportID format |
334 | * | 335 | * |
335 | * If there is not an active iSCSI session, use format code: | 336 | * If there is not an active iSCSI session, use format code: |
336 | * 00b: iSCSI Initiator device TransportID format | 337 | * 00b: iSCSI Initiator device TransportID format |
337 | */ | 338 | */ |
338 | if (pr_reg->isid_present_at_reg) { | 339 | if (pr_reg->isid_present_at_reg) { |
339 | len += 5; /* For ",i,0x" ASCII seperator */ | 340 | len += 5; /* For ",i,0x" ASCII seperator */ |
340 | len += 7; /* For iSCSI Initiator Session ID + Null terminator */ | 341 | len += 7; /* For iSCSI Initiator Session ID + Null terminator */ |
341 | *format_code = 1; | 342 | *format_code = 1; |
342 | } else | 343 | } else |
343 | *format_code = 0; | 344 | *format_code = 0; |
344 | spin_unlock_irq(&se_nacl->nacl_sess_lock); | 345 | spin_unlock_irq(&se_nacl->nacl_sess_lock); |
345 | /* | 346 | /* |
346 | * The ADDITIONAL LENGTH field specifies the number of bytes that follow | 347 | * The ADDITIONAL LENGTH field specifies the number of bytes that follow |
347 | * in the TransportID. The additional length shall be at least 20 and | 348 | * in the TransportID. The additional length shall be at least 20 and |
348 | * shall be a multiple of four. | 349 | * shall be a multiple of four. |
349 | */ | 350 | */ |
350 | padding = ((-len) & 3); | 351 | padding = ((-len) & 3); |
351 | if (padding != 0) | 352 | if (padding != 0) |
352 | len += padding; | 353 | len += padding; |
353 | /* | 354 | /* |
354 | * Increment value for total payload + header length for | 355 | * Increment value for total payload + header length for |
355 | * full status descriptor | 356 | * full status descriptor |
356 | */ | 357 | */ |
357 | len += 4; | 358 | len += 4; |
358 | 359 | ||
359 | return len; | 360 | return len; |
360 | } | 361 | } |
361 | EXPORT_SYMBOL(iscsi_get_pr_transport_id_len); | 362 | EXPORT_SYMBOL(iscsi_get_pr_transport_id_len); |
362 | 363 | ||
363 | char *iscsi_parse_pr_out_transport_id( | 364 | char *iscsi_parse_pr_out_transport_id( |
364 | struct se_portal_group *se_tpg, | 365 | struct se_portal_group *se_tpg, |
365 | const char *buf, | 366 | const char *buf, |
366 | u32 *out_tid_len, | 367 | u32 *out_tid_len, |
367 | char **port_nexus_ptr) | 368 | char **port_nexus_ptr) |
368 | { | 369 | { |
369 | char *p; | 370 | char *p; |
370 | u32 tid_len, padding; | 371 | u32 tid_len, padding; |
371 | int i; | 372 | int i; |
372 | u16 add_len; | 373 | u16 add_len; |
373 | u8 format_code = (buf[0] & 0xc0); | 374 | u8 format_code = (buf[0] & 0xc0); |
374 | /* | 375 | /* |
375 | * Check for FORMAT CODE 00b or 01b from spc4r17, section 7.5.4.6: | 376 | * Check for FORMAT CODE 00b or 01b from spc4r17, section 7.5.4.6: |
376 | * | 377 | * |
377 | * TransportID for initiator ports using SCSI over iSCSI, | 378 | * TransportID for initiator ports using SCSI over iSCSI, |
378 | * from Table 388 -- iSCSI TransportID formats. | 379 | * from Table 388 -- iSCSI TransportID formats. |
379 | * | 380 | * |
380 | * 00b Initiator port is identified using the world wide unique | 381 | * 00b Initiator port is identified using the world wide unique |
381 | * SCSI device name of the iSCSI initiator | 382 | * SCSI device name of the iSCSI initiator |
382 | * device containing the initiator port (see table 389). | 383 | * device containing the initiator port (see table 389). |
383 | * 01b Initiator port is identified using the world wide unique | 384 | * 01b Initiator port is identified using the world wide unique |
384 | * initiator port identifier (see table 390).10b to 11b | 385 | * initiator port identifier (see table 390).10b to 11b |
385 | * Reserved | 386 | * Reserved |
386 | */ | 387 | */ |
387 | if ((format_code != 0x00) && (format_code != 0x40)) { | 388 | if ((format_code != 0x00) && (format_code != 0x40)) { |
388 | printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI" | 389 | printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI" |
389 | " Initiator Transport ID\n", format_code); | 390 | " Initiator Transport ID\n", format_code); |
390 | return NULL; | 391 | return NULL; |
391 | } | 392 | } |
392 | /* | 393 | /* |
393 | * If the caller wants the TransportID Length, we set that value for the | 394 | * If the caller wants the TransportID Length, we set that value for the |
394 | * entire iSCSI Tarnsport ID now. | 395 | * entire iSCSI Tarnsport ID now. |
395 | */ | 396 | */ |
396 | if (out_tid_len != NULL) { | 397 | if (out_tid_len != NULL) { |
397 | add_len = ((buf[2] >> 8) & 0xff); | 398 | add_len = ((buf[2] >> 8) & 0xff); |
398 | add_len |= (buf[3] & 0xff); | 399 | add_len |= (buf[3] & 0xff); |
399 | 400 | ||
400 | tid_len = strlen((char *)&buf[4]); | 401 | tid_len = strlen((char *)&buf[4]); |
401 | tid_len += 4; /* Add four bytes for iSCSI Transport ID header */ | 402 | tid_len += 4; /* Add four bytes for iSCSI Transport ID header */ |
402 | tid_len += 1; /* Add one byte for NULL terminator */ | 403 | tid_len += 1; /* Add one byte for NULL terminator */ |
403 | padding = ((-tid_len) & 3); | 404 | padding = ((-tid_len) & 3); |
404 | if (padding != 0) | 405 | if (padding != 0) |
405 | tid_len += padding; | 406 | tid_len += padding; |
406 | 407 | ||
407 | if ((add_len + 4) != tid_len) { | 408 | if ((add_len + 4) != tid_len) { |
408 | printk(KERN_INFO "LIO-Target Extracted add_len: %hu " | 409 | printk(KERN_INFO "LIO-Target Extracted add_len: %hu " |
409 | "does not match calculated tid_len: %u," | 410 | "does not match calculated tid_len: %u," |
410 | " using tid_len instead\n", add_len+4, tid_len); | 411 | " using tid_len instead\n", add_len+4, tid_len); |
411 | *out_tid_len = tid_len; | 412 | *out_tid_len = tid_len; |
412 | } else | 413 | } else |
413 | *out_tid_len = (add_len + 4); | 414 | *out_tid_len = (add_len + 4); |
414 | } | 415 | } |
415 | /* | 416 | /* |
416 | * Check for ',i,0x' seperator between iSCSI Name and iSCSI Initiator | 417 | * Check for ',i,0x' seperator between iSCSI Name and iSCSI Initiator |
417 | * Session ID as defined in Table 390 - iSCSI initiator port TransportID | 418 | * Session ID as defined in Table 390 - iSCSI initiator port TransportID |
418 | * format. | 419 | * format. |
419 | */ | 420 | */ |
420 | if (format_code == 0x40) { | 421 | if (format_code == 0x40) { |
421 | p = strstr((char *)&buf[4], ",i,0x"); | 422 | p = strstr((char *)&buf[4], ",i,0x"); |
422 | if (!(p)) { | 423 | if (!(p)) { |
423 | printk(KERN_ERR "Unable to locate \",i,0x\" seperator" | 424 | printk(KERN_ERR "Unable to locate \",i,0x\" seperator" |
424 | " for Initiator port identifier: %s\n", | 425 | " for Initiator port identifier: %s\n", |
425 | (char *)&buf[4]); | 426 | (char *)&buf[4]); |
426 | return NULL; | 427 | return NULL; |
427 | } | 428 | } |
428 | *p = '\0'; /* Terminate iSCSI Name */ | 429 | *p = '\0'; /* Terminate iSCSI Name */ |
429 | p += 5; /* Skip over ",i,0x" seperator */ | 430 | p += 5; /* Skip over ",i,0x" seperator */ |
430 | 431 | ||
431 | *port_nexus_ptr = p; | 432 | *port_nexus_ptr = p; |
432 | /* | 433 | /* |
433 | * Go ahead and do the lower case conversion of the received | 434 | * Go ahead and do the lower case conversion of the received |
434 | * 12 ASCII characters representing the ISID in the TransportID | 435 | * 12 ASCII characters representing the ISID in the TransportID |
435 | * for comparision against the running iSCSI session's ISID from | 436 | * for comparision against the running iSCSI session's ISID from |
436 | * iscsi_target.c:lio_sess_get_initiator_sid() | 437 | * iscsi_target.c:lio_sess_get_initiator_sid() |
437 | */ | 438 | */ |
438 | for (i = 0; i < 12; i++) { | 439 | for (i = 0; i < 12; i++) { |
439 | if (isdigit(*p)) { | 440 | if (isdigit(*p)) { |
440 | p++; | 441 | p++; |
441 | continue; | 442 | continue; |
442 | } | 443 | } |
443 | *p = tolower(*p); | 444 | *p = tolower(*p); |
444 | p++; | 445 | p++; |
445 | } | 446 | } |
446 | } | 447 | } |
447 | 448 | ||
448 | return (char *)&buf[4]; | 449 | return (char *)&buf[4]; |
449 | } | 450 | } |
450 | EXPORT_SYMBOL(iscsi_parse_pr_out_transport_id); | 451 | EXPORT_SYMBOL(iscsi_parse_pr_out_transport_id); |
451 | 452 |
drivers/target/target_core_pscsi.c
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * Filename: target_core_pscsi.c | 2 | * Filename: target_core_pscsi.c |
3 | * | 3 | * |
4 | * This file contains the generic target mode <-> Linux SCSI subsystem plugin. | 4 | * This file contains the generic target mode <-> Linux SCSI subsystem plugin. |
5 | * | 5 | * |
6 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. | 6 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. |
7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | 7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. |
8 | * Copyright (c) 2007-2010 Rising Tide Systems | 8 | * Copyright (c) 2007-2010 Rising Tide Systems |
9 | * Copyright (c) 2008-2010 Linux-iSCSI.org | 9 | * Copyright (c) 2008-2010 Linux-iSCSI.org |
10 | * | 10 | * |
11 | * Nicholas A. Bellinger <nab@kernel.org> | 11 | * Nicholas A. Bellinger <nab@kernel.org> |
12 | * | 12 | * |
13 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
14 | * it under the terms of the GNU General Public License as published by | 14 | * it under the terms of the GNU General Public License as published by |
15 | * the Free Software Foundation; either version 2 of the License, or | 15 | * the Free Software Foundation; either version 2 of the License, or |
16 | * (at your option) any later version. | 16 | * (at your option) any later version. |
17 | * | 17 | * |
18 | * This program is distributed in the hope that it will be useful, | 18 | * This program is distributed in the hope that it will be useful, |
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
21 | * GNU General Public License for more details. | 21 | * GNU General Public License for more details. |
22 | * | 22 | * |
23 | * You should have received a copy of the GNU General Public License | 23 | * You should have received a copy of the GNU General Public License |
24 | * along with this program; if not, write to the Free Software | 24 | * along with this program; if not, write to the Free Software |
25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
26 | * | 26 | * |
27 | ******************************************************************************/ | 27 | ******************************************************************************/ |
28 | 28 | ||
29 | #include <linux/version.h> | 29 | #include <linux/version.h> |
30 | #include <linux/string.h> | 30 | #include <linux/string.h> |
31 | #include <linux/parser.h> | 31 | #include <linux/parser.h> |
32 | #include <linux/timer.h> | 32 | #include <linux/timer.h> |
33 | #include <linux/blkdev.h> | 33 | #include <linux/blkdev.h> |
34 | #include <linux/blk_types.h> | 34 | #include <linux/blk_types.h> |
35 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
36 | #include <linux/spinlock.h> | 36 | #include <linux/spinlock.h> |
37 | #include <linux/genhd.h> | 37 | #include <linux/genhd.h> |
38 | #include <linux/cdrom.h> | 38 | #include <linux/cdrom.h> |
39 | #include <linux/file.h> | 39 | #include <linux/file.h> |
40 | #include <scsi/scsi.h> | 40 | #include <scsi/scsi.h> |
41 | #include <scsi/scsi_device.h> | 41 | #include <scsi/scsi_device.h> |
42 | #include <scsi/scsi_cmnd.h> | 42 | #include <scsi/scsi_cmnd.h> |
43 | #include <scsi/scsi_host.h> | 43 | #include <scsi/scsi_host.h> |
44 | #include <scsi/libsas.h> /* For TASK_ATTR_* */ | 44 | #include <scsi/libsas.h> /* For TASK_ATTR_* */ |
45 | 45 | ||
46 | #include <target/target_core_base.h> | 46 | #include <target/target_core_base.h> |
47 | #include <target/target_core_device.h> | 47 | #include <target/target_core_device.h> |
48 | #include <target/target_core_transport.h> | 48 | #include <target/target_core_transport.h> |
49 | 49 | ||
50 | #include "target_core_pscsi.h" | 50 | #include "target_core_pscsi.h" |
51 | 51 | ||
52 | #define ISPRINT(a) ((a >= ' ') && (a <= '~')) | 52 | #define ISPRINT(a) ((a >= ' ') && (a <= '~')) |
53 | 53 | ||
54 | static struct se_subsystem_api pscsi_template; | 54 | static struct se_subsystem_api pscsi_template; |
55 | 55 | ||
56 | static void pscsi_req_done(struct request *, int); | 56 | static void pscsi_req_done(struct request *, int); |
57 | 57 | ||
58 | /* pscsi_get_sh(): | 58 | /* pscsi_get_sh(): |
59 | * | 59 | * |
60 | * | 60 | * |
61 | */ | 61 | */ |
62 | static struct Scsi_Host *pscsi_get_sh(u32 host_no) | 62 | static struct Scsi_Host *pscsi_get_sh(u32 host_no) |
63 | { | 63 | { |
64 | struct Scsi_Host *sh = NULL; | 64 | struct Scsi_Host *sh = NULL; |
65 | 65 | ||
66 | sh = scsi_host_lookup(host_no); | 66 | sh = scsi_host_lookup(host_no); |
67 | if (IS_ERR(sh)) { | 67 | if (IS_ERR(sh)) { |
68 | printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:" | 68 | printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:" |
69 | " %u\n", host_no); | 69 | " %u\n", host_no); |
70 | return NULL; | 70 | return NULL; |
71 | } | 71 | } |
72 | 72 | ||
73 | return sh; | 73 | return sh; |
74 | } | 74 | } |
75 | 75 | ||
76 | /* pscsi_attach_hba(): | 76 | /* pscsi_attach_hba(): |
77 | * | 77 | * |
78 | * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host. | 78 | * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host. |
79 | * from the passed SCSI Host ID. | 79 | * from the passed SCSI Host ID. |
80 | */ | 80 | */ |
81 | static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) | 81 | static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) |
82 | { | 82 | { |
83 | int hba_depth; | 83 | int hba_depth; |
84 | struct pscsi_hba_virt *phv; | 84 | struct pscsi_hba_virt *phv; |
85 | 85 | ||
86 | phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL); | 86 | phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL); |
87 | if (!(phv)) { | 87 | if (!(phv)) { |
88 | printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n"); | 88 | printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n"); |
89 | return -1; | 89 | return -1; |
90 | } | 90 | } |
91 | phv->phv_host_id = host_id; | 91 | phv->phv_host_id = host_id; |
92 | phv->phv_mode = PHV_VIRUTAL_HOST_ID; | 92 | phv->phv_mode = PHV_VIRUTAL_HOST_ID; |
93 | hba_depth = PSCSI_VIRTUAL_HBA_DEPTH; | 93 | hba_depth = PSCSI_VIRTUAL_HBA_DEPTH; |
94 | atomic_set(&hba->left_queue_depth, hba_depth); | 94 | atomic_set(&hba->left_queue_depth, hba_depth); |
95 | atomic_set(&hba->max_queue_depth, hba_depth); | 95 | atomic_set(&hba->max_queue_depth, hba_depth); |
96 | 96 | ||
97 | hba->hba_ptr = (void *)phv; | 97 | hba->hba_ptr = (void *)phv; |
98 | 98 | ||
99 | printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on" | 99 | printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on" |
100 | " Generic Target Core Stack %s\n", hba->hba_id, | 100 | " Generic Target Core Stack %s\n", hba->hba_id, |
101 | PSCSI_VERSION, TARGET_CORE_MOD_VERSION); | 101 | PSCSI_VERSION, TARGET_CORE_MOD_VERSION); |
102 | printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic" | 102 | printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic" |
103 | " Target Core with TCQ Depth: %d\n", hba->hba_id, | 103 | " Target Core with TCQ Depth: %d\n", hba->hba_id, |
104 | atomic_read(&hba->max_queue_depth)); | 104 | atomic_read(&hba->max_queue_depth)); |
105 | 105 | ||
106 | return 0; | 106 | return 0; |
107 | } | 107 | } |
108 | 108 | ||
109 | static void pscsi_detach_hba(struct se_hba *hba) | 109 | static void pscsi_detach_hba(struct se_hba *hba) |
110 | { | 110 | { |
111 | struct pscsi_hba_virt *phv = hba->hba_ptr; | 111 | struct pscsi_hba_virt *phv = hba->hba_ptr; |
112 | struct Scsi_Host *scsi_host = phv->phv_lld_host; | 112 | struct Scsi_Host *scsi_host = phv->phv_lld_host; |
113 | 113 | ||
114 | if (scsi_host) { | 114 | if (scsi_host) { |
115 | scsi_host_put(scsi_host); | 115 | scsi_host_put(scsi_host); |
116 | 116 | ||
117 | printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from" | 117 | printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from" |
118 | " Generic Target Core\n", hba->hba_id, | 118 | " Generic Target Core\n", hba->hba_id, |
119 | (scsi_host->hostt->name) ? (scsi_host->hostt->name) : | 119 | (scsi_host->hostt->name) ? (scsi_host->hostt->name) : |
120 | "Unknown"); | 120 | "Unknown"); |
121 | } else | 121 | } else |
122 | printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA" | 122 | printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA" |
123 | " from Generic Target Core\n", hba->hba_id); | 123 | " from Generic Target Core\n", hba->hba_id); |
124 | 124 | ||
125 | kfree(phv); | 125 | kfree(phv); |
126 | hba->hba_ptr = NULL; | 126 | hba->hba_ptr = NULL; |
127 | } | 127 | } |
128 | 128 | ||
129 | static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) | 129 | static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) |
130 | { | 130 | { |
131 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; | 131 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; |
132 | struct Scsi_Host *sh = phv->phv_lld_host; | 132 | struct Scsi_Host *sh = phv->phv_lld_host; |
133 | int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH; | 133 | int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH; |
134 | /* | 134 | /* |
135 | * Release the struct Scsi_Host | 135 | * Release the struct Scsi_Host |
136 | */ | 136 | */ |
137 | if (!(mode_flag)) { | 137 | if (!(mode_flag)) { |
138 | if (!(sh)) | 138 | if (!(sh)) |
139 | return 0; | 139 | return 0; |
140 | 140 | ||
141 | phv->phv_lld_host = NULL; | 141 | phv->phv_lld_host = NULL; |
142 | phv->phv_mode = PHV_VIRUTAL_HOST_ID; | 142 | phv->phv_mode = PHV_VIRUTAL_HOST_ID; |
143 | atomic_set(&hba->left_queue_depth, hba_depth); | 143 | atomic_set(&hba->left_queue_depth, hba_depth); |
144 | atomic_set(&hba->max_queue_depth, hba_depth); | 144 | atomic_set(&hba->max_queue_depth, hba_depth); |
145 | 145 | ||
146 | printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" | 146 | printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" |
147 | " %s\n", hba->hba_id, (sh->hostt->name) ? | 147 | " %s\n", hba->hba_id, (sh->hostt->name) ? |
148 | (sh->hostt->name) : "Unknown"); | 148 | (sh->hostt->name) : "Unknown"); |
149 | 149 | ||
150 | scsi_host_put(sh); | 150 | scsi_host_put(sh); |
151 | return 0; | 151 | return 0; |
152 | } | 152 | } |
153 | /* | 153 | /* |
154 | * Otherwise, locate struct Scsi_Host from the original passed | 154 | * Otherwise, locate struct Scsi_Host from the original passed |
155 | * pSCSI Host ID and enable for phba mode | 155 | * pSCSI Host ID and enable for phba mode |
156 | */ | 156 | */ |
157 | sh = pscsi_get_sh(phv->phv_host_id); | 157 | sh = pscsi_get_sh(phv->phv_host_id); |
158 | if (!(sh)) { | 158 | if (!(sh)) { |
159 | printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for" | 159 | printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for" |
160 | " phv_host_id: %d\n", phv->phv_host_id); | 160 | " phv_host_id: %d\n", phv->phv_host_id); |
161 | return -1; | 161 | return -1; |
162 | } | 162 | } |
163 | /* | 163 | /* |
164 | * Usually the SCSI LLD will use the hostt->can_queue value to define | 164 | * Usually the SCSI LLD will use the hostt->can_queue value to define |
165 | * its HBA TCQ depth. Some other drivers (like 2.6 megaraid) don't set | 165 | * its HBA TCQ depth. Some other drivers (like 2.6 megaraid) don't set |
166 | * this at all and set sh->can_queue at runtime. | 166 | * this at all and set sh->can_queue at runtime. |
167 | */ | 167 | */ |
168 | hba_depth = (sh->hostt->can_queue > sh->can_queue) ? | 168 | hba_depth = (sh->hostt->can_queue > sh->can_queue) ? |
169 | sh->hostt->can_queue : sh->can_queue; | 169 | sh->hostt->can_queue : sh->can_queue; |
170 | 170 | ||
171 | atomic_set(&hba->left_queue_depth, hba_depth); | 171 | atomic_set(&hba->left_queue_depth, hba_depth); |
172 | atomic_set(&hba->max_queue_depth, hba_depth); | 172 | atomic_set(&hba->max_queue_depth, hba_depth); |
173 | 173 | ||
174 | phv->phv_lld_host = sh; | 174 | phv->phv_lld_host = sh; |
175 | phv->phv_mode = PHV_LLD_SCSI_HOST_NO; | 175 | phv->phv_mode = PHV_LLD_SCSI_HOST_NO; |
176 | 176 | ||
177 | printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n", | 177 | printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n", |
178 | hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown"); | 178 | hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown"); |
179 | 179 | ||
180 | return 1; | 180 | return 1; |
181 | } | 181 | } |
182 | 182 | ||
183 | static void pscsi_tape_read_blocksize(struct se_device *dev, | 183 | static void pscsi_tape_read_blocksize(struct se_device *dev, |
184 | struct scsi_device *sdev) | 184 | struct scsi_device *sdev) |
185 | { | 185 | { |
186 | unsigned char cdb[MAX_COMMAND_SIZE], *buf; | 186 | unsigned char cdb[MAX_COMMAND_SIZE], *buf; |
187 | int ret; | 187 | int ret; |
188 | 188 | ||
189 | buf = kzalloc(12, GFP_KERNEL); | 189 | buf = kzalloc(12, GFP_KERNEL); |
190 | if (!buf) | 190 | if (!buf) |
191 | return; | 191 | return; |
192 | 192 | ||
193 | memset(cdb, 0, MAX_COMMAND_SIZE); | 193 | memset(cdb, 0, MAX_COMMAND_SIZE); |
194 | cdb[0] = MODE_SENSE; | 194 | cdb[0] = MODE_SENSE; |
195 | cdb[4] = 0x0c; /* 12 bytes */ | 195 | cdb[4] = 0x0c; /* 12 bytes */ |
196 | 196 | ||
197 | ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL, | 197 | ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL, |
198 | HZ, 1, NULL); | 198 | HZ, 1, NULL); |
199 | if (ret) | 199 | if (ret) |
200 | goto out_free; | 200 | goto out_free; |
201 | 201 | ||
202 | /* | 202 | /* |
203 | * If MODE_SENSE still returns zero, set the default value to 1024. | 203 | * If MODE_SENSE still returns zero, set the default value to 1024. |
204 | */ | 204 | */ |
205 | sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); | 205 | sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); |
206 | if (!sdev->sector_size) | 206 | if (!sdev->sector_size) |
207 | sdev->sector_size = 1024; | 207 | sdev->sector_size = 1024; |
208 | out_free: | 208 | out_free: |
209 | kfree(buf); | 209 | kfree(buf); |
210 | } | 210 | } |
211 | 211 | ||
212 | static void | 212 | static void |
213 | pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn) | 213 | pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn) |
214 | { | 214 | { |
215 | unsigned char *buf; | 215 | unsigned char *buf; |
216 | 216 | ||
217 | if (sdev->inquiry_len < INQUIRY_LEN) | 217 | if (sdev->inquiry_len < INQUIRY_LEN) |
218 | return; | 218 | return; |
219 | 219 | ||
220 | buf = sdev->inquiry; | 220 | buf = sdev->inquiry; |
221 | if (!buf) | 221 | if (!buf) |
222 | return; | 222 | return; |
223 | /* | 223 | /* |
224 | * Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev() | 224 | * Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev() |
225 | */ | 225 | */ |
226 | memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor)); | 226 | memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor)); |
227 | memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model)); | 227 | memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model)); |
228 | memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision)); | 228 | memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision)); |
229 | } | 229 | } |
230 | 230 | ||
231 | static int | 231 | static int |
232 | pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn) | 232 | pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn) |
233 | { | 233 | { |
234 | unsigned char cdb[MAX_COMMAND_SIZE], *buf; | 234 | unsigned char cdb[MAX_COMMAND_SIZE], *buf; |
235 | int ret; | 235 | int ret; |
236 | 236 | ||
237 | buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); | 237 | buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); |
238 | if (!buf) | 238 | if (!buf) |
239 | return -1; | 239 | return -1; |
240 | 240 | ||
241 | memset(cdb, 0, MAX_COMMAND_SIZE); | 241 | memset(cdb, 0, MAX_COMMAND_SIZE); |
242 | cdb[0] = INQUIRY; | 242 | cdb[0] = INQUIRY; |
243 | cdb[1] = 0x01; /* Query VPD */ | 243 | cdb[1] = 0x01; /* Query VPD */ |
244 | cdb[2] = 0x80; /* Unit Serial Number */ | 244 | cdb[2] = 0x80; /* Unit Serial Number */ |
245 | cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff; | 245 | cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff; |
246 | cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff); | 246 | cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff); |
247 | 247 | ||
248 | ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, | 248 | ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, |
249 | INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL); | 249 | INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL); |
250 | if (ret) | 250 | if (ret) |
251 | goto out_free; | 251 | goto out_free; |
252 | 252 | ||
253 | snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]); | 253 | snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]); |
254 | 254 | ||
255 | wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL; | 255 | wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL; |
256 | 256 | ||
257 | kfree(buf); | 257 | kfree(buf); |
258 | return 0; | 258 | return 0; |
259 | 259 | ||
260 | out_free: | 260 | out_free: |
261 | kfree(buf); | 261 | kfree(buf); |
262 | return -1; | 262 | return -1; |
263 | } | 263 | } |
264 | 264 | ||
265 | static void | 265 | static void |
266 | pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev, | 266 | pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev, |
267 | struct t10_wwn *wwn) | 267 | struct t10_wwn *wwn) |
268 | { | 268 | { |
269 | unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83; | 269 | unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83; |
270 | int ident_len, page_len, off = 4, ret; | 270 | int ident_len, page_len, off = 4, ret; |
271 | struct t10_vpd *vpd; | 271 | struct t10_vpd *vpd; |
272 | 272 | ||
273 | buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); | 273 | buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); |
274 | if (!buf) | 274 | if (!buf) |
275 | return; | 275 | return; |
276 | 276 | ||
277 | memset(cdb, 0, MAX_COMMAND_SIZE); | 277 | memset(cdb, 0, MAX_COMMAND_SIZE); |
278 | cdb[0] = INQUIRY; | 278 | cdb[0] = INQUIRY; |
279 | cdb[1] = 0x01; /* Query VPD */ | 279 | cdb[1] = 0x01; /* Query VPD */ |
280 | cdb[2] = 0x83; /* Device Identifier */ | 280 | cdb[2] = 0x83; /* Device Identifier */ |
281 | cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff; | 281 | cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff; |
282 | cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff); | 282 | cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff); |
283 | 283 | ||
284 | ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, | 284 | ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, |
285 | INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, | 285 | INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, |
286 | NULL, HZ, 1, NULL); | 286 | NULL, HZ, 1, NULL); |
287 | if (ret) | 287 | if (ret) |
288 | goto out; | 288 | goto out; |
289 | 289 | ||
290 | page_len = (buf[2] << 8) | buf[3]; | 290 | page_len = (buf[2] << 8) | buf[3]; |
291 | while (page_len > 0) { | 291 | while (page_len > 0) { |
292 | /* Grab a pointer to the Identification descriptor */ | 292 | /* Grab a pointer to the Identification descriptor */ |
293 | page_83 = &buf[off]; | 293 | page_83 = &buf[off]; |
294 | ident_len = page_83[3]; | 294 | ident_len = page_83[3]; |
295 | if (!ident_len) { | 295 | if (!ident_len) { |
296 | printk(KERN_ERR "page_83[3]: identifier" | 296 | printk(KERN_ERR "page_83[3]: identifier" |
297 | " length zero!\n"); | 297 | " length zero!\n"); |
298 | break; | 298 | break; |
299 | } | 299 | } |
300 | printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len); | 300 | printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len); |
301 | 301 | ||
302 | vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL); | 302 | vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL); |
303 | if (!vpd) { | 303 | if (!vpd) { |
304 | printk(KERN_ERR "Unable to allocate memory for" | 304 | printk(KERN_ERR "Unable to allocate memory for" |
305 | " struct t10_vpd\n"); | 305 | " struct t10_vpd\n"); |
306 | goto out; | 306 | goto out; |
307 | } | 307 | } |
308 | INIT_LIST_HEAD(&vpd->vpd_list); | 308 | INIT_LIST_HEAD(&vpd->vpd_list); |
309 | 309 | ||
310 | transport_set_vpd_proto_id(vpd, page_83); | 310 | transport_set_vpd_proto_id(vpd, page_83); |
311 | transport_set_vpd_assoc(vpd, page_83); | 311 | transport_set_vpd_assoc(vpd, page_83); |
312 | 312 | ||
313 | if (transport_set_vpd_ident_type(vpd, page_83) < 0) { | 313 | if (transport_set_vpd_ident_type(vpd, page_83) < 0) { |
314 | off += (ident_len + 4); | 314 | off += (ident_len + 4); |
315 | page_len -= (ident_len + 4); | 315 | page_len -= (ident_len + 4); |
316 | kfree(vpd); | 316 | kfree(vpd); |
317 | continue; | 317 | continue; |
318 | } | 318 | } |
319 | if (transport_set_vpd_ident(vpd, page_83) < 0) { | 319 | if (transport_set_vpd_ident(vpd, page_83) < 0) { |
320 | off += (ident_len + 4); | 320 | off += (ident_len + 4); |
321 | page_len -= (ident_len + 4); | 321 | page_len -= (ident_len + 4); |
322 | kfree(vpd); | 322 | kfree(vpd); |
323 | continue; | 323 | continue; |
324 | } | 324 | } |
325 | 325 | ||
326 | list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list); | 326 | list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list); |
327 | off += (ident_len + 4); | 327 | off += (ident_len + 4); |
328 | page_len -= (ident_len + 4); | 328 | page_len -= (ident_len + 4); |
329 | } | 329 | } |
330 | 330 | ||
331 | out: | 331 | out: |
332 | kfree(buf); | 332 | kfree(buf); |
333 | } | 333 | } |
334 | 334 | ||
335 | /* pscsi_add_device_to_list(): | 335 | /* pscsi_add_device_to_list(): |
336 | * | 336 | * |
337 | * | 337 | * |
338 | */ | 338 | */ |
339 | static struct se_device *pscsi_add_device_to_list( | 339 | static struct se_device *pscsi_add_device_to_list( |
340 | struct se_hba *hba, | 340 | struct se_hba *hba, |
341 | struct se_subsystem_dev *se_dev, | 341 | struct se_subsystem_dev *se_dev, |
342 | struct pscsi_dev_virt *pdv, | 342 | struct pscsi_dev_virt *pdv, |
343 | struct scsi_device *sd, | 343 | struct scsi_device *sd, |
344 | int dev_flags) | 344 | int dev_flags) |
345 | { | 345 | { |
346 | struct se_device *dev; | 346 | struct se_device *dev; |
347 | struct se_dev_limits dev_limits; | 347 | struct se_dev_limits dev_limits; |
348 | struct request_queue *q; | 348 | struct request_queue *q; |
349 | struct queue_limits *limits; | 349 | struct queue_limits *limits; |
350 | 350 | ||
351 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); | 351 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); |
352 | 352 | ||
353 | if (!sd->queue_depth) { | 353 | if (!sd->queue_depth) { |
354 | sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH; | 354 | sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH; |
355 | 355 | ||
356 | printk(KERN_ERR "Set broken SCSI Device %d:%d:%d" | 356 | printk(KERN_ERR "Set broken SCSI Device %d:%d:%d" |
357 | " queue_depth to %d\n", sd->channel, sd->id, | 357 | " queue_depth to %d\n", sd->channel, sd->id, |
358 | sd->lun, sd->queue_depth); | 358 | sd->lun, sd->queue_depth); |
359 | } | 359 | } |
360 | /* | 360 | /* |
361 | * Setup the local scope queue_limits from struct request_queue->limits | 361 | * Setup the local scope queue_limits from struct request_queue->limits |
362 | * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. | 362 | * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. |
363 | */ | 363 | */ |
364 | q = sd->request_queue; | 364 | q = sd->request_queue; |
365 | limits = &dev_limits.limits; | 365 | limits = &dev_limits.limits; |
366 | limits->logical_block_size = sd->sector_size; | 366 | limits->logical_block_size = sd->sector_size; |
367 | limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ? | 367 | limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ? |
368 | queue_max_hw_sectors(q) : sd->host->max_sectors; | 368 | queue_max_hw_sectors(q) : sd->host->max_sectors; |
369 | limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ? | 369 | limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ? |
370 | queue_max_sectors(q) : sd->host->max_sectors; | 370 | queue_max_sectors(q) : sd->host->max_sectors; |
371 | dev_limits.hw_queue_depth = sd->queue_depth; | 371 | dev_limits.hw_queue_depth = sd->queue_depth; |
372 | dev_limits.queue_depth = sd->queue_depth; | 372 | dev_limits.queue_depth = sd->queue_depth; |
373 | /* | 373 | /* |
374 | * Setup our standard INQUIRY info into se_dev->t10_wwn | 374 | * Setup our standard INQUIRY info into se_dev->t10_wwn |
375 | */ | 375 | */ |
376 | pscsi_set_inquiry_info(sd, &se_dev->t10_wwn); | 376 | pscsi_set_inquiry_info(sd, &se_dev->t10_wwn); |
377 | 377 | ||
378 | /* | 378 | /* |
379 | * Set the pointer pdv->pdv_sd to from passed struct scsi_device, | 379 | * Set the pointer pdv->pdv_sd to from passed struct scsi_device, |
380 | * which has already been referenced with Linux SCSI code with | 380 | * which has already been referenced with Linux SCSI code with |
381 | * scsi_device_get() in this file's pscsi_create_virtdevice(). | 381 | * scsi_device_get() in this file's pscsi_create_virtdevice(). |
382 | * | 382 | * |
383 | * The passthrough operations called by the transport_add_device_* | 383 | * The passthrough operations called by the transport_add_device_* |
384 | * function below will require this pointer to be set for passthroug | 384 | * function below will require this pointer to be set for passthroug |
385 | * ops. | 385 | * ops. |
386 | * | 386 | * |
387 | * For the shutdown case in pscsi_free_device(), this struct | 387 | * For the shutdown case in pscsi_free_device(), this struct |
388 | * scsi_device reference is released with Linux SCSI code | 388 | * scsi_device reference is released with Linux SCSI code |
389 | * scsi_device_put() and the pdv->pdv_sd cleared. | 389 | * scsi_device_put() and the pdv->pdv_sd cleared. |
390 | */ | 390 | */ |
391 | pdv->pdv_sd = sd; | 391 | pdv->pdv_sd = sd; |
392 | 392 | ||
393 | dev = transport_add_device_to_core_hba(hba, &pscsi_template, | 393 | dev = transport_add_device_to_core_hba(hba, &pscsi_template, |
394 | se_dev, dev_flags, (void *)pdv, | 394 | se_dev, dev_flags, (void *)pdv, |
395 | &dev_limits, NULL, NULL); | 395 | &dev_limits, NULL, NULL); |
396 | if (!(dev)) { | 396 | if (!(dev)) { |
397 | pdv->pdv_sd = NULL; | 397 | pdv->pdv_sd = NULL; |
398 | return NULL; | 398 | return NULL; |
399 | } | 399 | } |
400 | 400 | ||
401 | /* | 401 | /* |
402 | * Locate VPD WWN Information used for various purposes within | 402 | * Locate VPD WWN Information used for various purposes within |
403 | * the Storage Engine. | 403 | * the Storage Engine. |
404 | */ | 404 | */ |
405 | if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) { | 405 | if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) { |
406 | /* | 406 | /* |
407 | * If VPD Unit Serial returned GOOD status, try | 407 | * If VPD Unit Serial returned GOOD status, try |
408 | * VPD Device Identification page (0x83). | 408 | * VPD Device Identification page (0x83). |
409 | */ | 409 | */ |
410 | pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn); | 410 | pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn); |
411 | } | 411 | } |
412 | 412 | ||
413 | /* | 413 | /* |
414 | * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. | 414 | * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. |
415 | */ | 415 | */ |
416 | if (sd->type == TYPE_TAPE) | 416 | if (sd->type == TYPE_TAPE) |
417 | pscsi_tape_read_blocksize(dev, sd); | 417 | pscsi_tape_read_blocksize(dev, sd); |
418 | return dev; | 418 | return dev; |
419 | } | 419 | } |
420 | 420 | ||
421 | static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name) | 421 | static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name) |
422 | { | 422 | { |
423 | struct pscsi_dev_virt *pdv; | 423 | struct pscsi_dev_virt *pdv; |
424 | 424 | ||
425 | pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL); | 425 | pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL); |
426 | if (!(pdv)) { | 426 | if (!(pdv)) { |
427 | printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n"); | 427 | printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n"); |
428 | return NULL; | 428 | return NULL; |
429 | } | 429 | } |
430 | pdv->pdv_se_hba = hba; | 430 | pdv->pdv_se_hba = hba; |
431 | 431 | ||
432 | printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name); | 432 | printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name); |
433 | return (void *)pdv; | 433 | return (void *)pdv; |
434 | } | 434 | } |
435 | 435 | ||
436 | /* | 436 | /* |
437 | * Called with struct Scsi_Host->host_lock called. | 437 | * Called with struct Scsi_Host->host_lock called. |
438 | */ | 438 | */ |
439 | static struct se_device *pscsi_create_type_disk( | 439 | static struct se_device *pscsi_create_type_disk( |
440 | struct scsi_device *sd, | 440 | struct scsi_device *sd, |
441 | struct pscsi_dev_virt *pdv, | 441 | struct pscsi_dev_virt *pdv, |
442 | struct se_subsystem_dev *se_dev, | 442 | struct se_subsystem_dev *se_dev, |
443 | struct se_hba *hba) | 443 | struct se_hba *hba) |
444 | __releases(sh->host_lock) | ||
444 | { | 445 | { |
445 | struct se_device *dev; | 446 | struct se_device *dev; |
446 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; | 447 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; |
447 | struct Scsi_Host *sh = sd->host; | 448 | struct Scsi_Host *sh = sd->host; |
448 | struct block_device *bd; | 449 | struct block_device *bd; |
449 | u32 dev_flags = 0; | 450 | u32 dev_flags = 0; |
450 | 451 | ||
451 | if (scsi_device_get(sd)) { | 452 | if (scsi_device_get(sd)) { |
452 | printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n", | 453 | printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n", |
453 | sh->host_no, sd->channel, sd->id, sd->lun); | 454 | sh->host_no, sd->channel, sd->id, sd->lun); |
454 | spin_unlock_irq(sh->host_lock); | 455 | spin_unlock_irq(sh->host_lock); |
455 | return NULL; | 456 | return NULL; |
456 | } | 457 | } |
457 | spin_unlock_irq(sh->host_lock); | 458 | spin_unlock_irq(sh->host_lock); |
458 | /* | 459 | /* |
459 | * Claim exclusive struct block_device access to struct scsi_device | 460 | * Claim exclusive struct block_device access to struct scsi_device |
460 | * for TYPE_DISK using supplied udev_path | 461 | * for TYPE_DISK using supplied udev_path |
461 | */ | 462 | */ |
462 | bd = blkdev_get_by_path(se_dev->se_dev_udev_path, | 463 | bd = blkdev_get_by_path(se_dev->se_dev_udev_path, |
463 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); | 464 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); |
464 | if (IS_ERR(bd)) { | 465 | if (IS_ERR(bd)) { |
465 | printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n"); | 466 | printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n"); |
466 | scsi_device_put(sd); | 467 | scsi_device_put(sd); |
467 | return NULL; | 468 | return NULL; |
468 | } | 469 | } |
469 | pdv->pdv_bd = bd; | 470 | pdv->pdv_bd = bd; |
470 | 471 | ||
471 | dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); | 472 | dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); |
472 | if (!(dev)) { | 473 | if (!(dev)) { |
473 | blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); | 474 | blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); |
474 | scsi_device_put(sd); | 475 | scsi_device_put(sd); |
475 | return NULL; | 476 | return NULL; |
476 | } | 477 | } |
477 | printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n", | 478 | printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n", |
478 | phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun); | 479 | phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun); |
479 | 480 | ||
480 | return dev; | 481 | return dev; |
481 | } | 482 | } |
482 | 483 | ||
483 | /* | 484 | /* |
484 | * Called with struct Scsi_Host->host_lock called. | 485 | * Called with struct Scsi_Host->host_lock called. |
485 | */ | 486 | */ |
486 | static struct se_device *pscsi_create_type_rom( | 487 | static struct se_device *pscsi_create_type_rom( |
487 | struct scsi_device *sd, | 488 | struct scsi_device *sd, |
488 | struct pscsi_dev_virt *pdv, | 489 | struct pscsi_dev_virt *pdv, |
489 | struct se_subsystem_dev *se_dev, | 490 | struct se_subsystem_dev *se_dev, |
490 | struct se_hba *hba) | 491 | struct se_hba *hba) |
492 | __releases(sh->host_lock) | ||
491 | { | 493 | { |
492 | struct se_device *dev; | 494 | struct se_device *dev; |
493 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; | 495 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; |
494 | struct Scsi_Host *sh = sd->host; | 496 | struct Scsi_Host *sh = sd->host; |
495 | u32 dev_flags = 0; | 497 | u32 dev_flags = 0; |
496 | 498 | ||
497 | if (scsi_device_get(sd)) { | 499 | if (scsi_device_get(sd)) { |
498 | printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n", | 500 | printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n", |
499 | sh->host_no, sd->channel, sd->id, sd->lun); | 501 | sh->host_no, sd->channel, sd->id, sd->lun); |
500 | spin_unlock_irq(sh->host_lock); | 502 | spin_unlock_irq(sh->host_lock); |
501 | return NULL; | 503 | return NULL; |
502 | } | 504 | } |
503 | spin_unlock_irq(sh->host_lock); | 505 | spin_unlock_irq(sh->host_lock); |
504 | 506 | ||
505 | dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); | 507 | dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); |
506 | if (!(dev)) { | 508 | if (!(dev)) { |
507 | scsi_device_put(sd); | 509 | scsi_device_put(sd); |
508 | return NULL; | 510 | return NULL; |
509 | } | 511 | } |
510 | printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", | 512 | printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", |
511 | phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, | 513 | phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, |
512 | sd->channel, sd->id, sd->lun); | 514 | sd->channel, sd->id, sd->lun); |
513 | 515 | ||
514 | return dev; | 516 | return dev; |
515 | } | 517 | } |
516 | 518 | ||
517 | /* | 519 | /* |
518 | *Called with struct Scsi_Host->host_lock called. | 520 | *Called with struct Scsi_Host->host_lock called. |
519 | */ | 521 | */ |
520 | static struct se_device *pscsi_create_type_other( | 522 | static struct se_device *pscsi_create_type_other( |
521 | struct scsi_device *sd, | 523 | struct scsi_device *sd, |
522 | struct pscsi_dev_virt *pdv, | 524 | struct pscsi_dev_virt *pdv, |
523 | struct se_subsystem_dev *se_dev, | 525 | struct se_subsystem_dev *se_dev, |
524 | struct se_hba *hba) | 526 | struct se_hba *hba) |
527 | __releases(sh->host_lock) | ||
525 | { | 528 | { |
526 | struct se_device *dev; | 529 | struct se_device *dev; |
527 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; | 530 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; |
528 | struct Scsi_Host *sh = sd->host; | 531 | struct Scsi_Host *sh = sd->host; |
529 | u32 dev_flags = 0; | 532 | u32 dev_flags = 0; |
530 | 533 | ||
531 | spin_unlock_irq(sh->host_lock); | 534 | spin_unlock_irq(sh->host_lock); |
532 | dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); | 535 | dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); |
533 | if (!(dev)) | 536 | if (!(dev)) |
534 | return NULL; | 537 | return NULL; |
535 | 538 | ||
536 | printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", | 539 | printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", |
537 | phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, | 540 | phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, |
538 | sd->channel, sd->id, sd->lun); | 541 | sd->channel, sd->id, sd->lun); |
539 | 542 | ||
540 | return dev; | 543 | return dev; |
541 | } | 544 | } |
542 | 545 | ||
543 | static struct se_device *pscsi_create_virtdevice( | 546 | static struct se_device *pscsi_create_virtdevice( |
544 | struct se_hba *hba, | 547 | struct se_hba *hba, |
545 | struct se_subsystem_dev *se_dev, | 548 | struct se_subsystem_dev *se_dev, |
546 | void *p) | 549 | void *p) |
547 | { | 550 | { |
548 | struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p; | 551 | struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p; |
549 | struct se_device *dev; | 552 | struct se_device *dev; |
550 | struct scsi_device *sd; | 553 | struct scsi_device *sd; |
551 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; | 554 | struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; |
552 | struct Scsi_Host *sh = phv->phv_lld_host; | 555 | struct Scsi_Host *sh = phv->phv_lld_host; |
553 | int legacy_mode_enable = 0; | 556 | int legacy_mode_enable = 0; |
554 | 557 | ||
555 | if (!(pdv)) { | 558 | if (!(pdv)) { |
556 | printk(KERN_ERR "Unable to locate struct pscsi_dev_virt" | 559 | printk(KERN_ERR "Unable to locate struct pscsi_dev_virt" |
557 | " parameter\n"); | 560 | " parameter\n"); |
558 | return ERR_PTR(-EINVAL); | 561 | return ERR_PTR(-EINVAL); |
559 | } | 562 | } |
560 | /* | 563 | /* |
561 | * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the | 564 | * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the |
562 | * struct Scsi_Host we will need to bring the TCM/pSCSI object online | 565 | * struct Scsi_Host we will need to bring the TCM/pSCSI object online |
563 | */ | 566 | */ |
564 | if (!(sh)) { | 567 | if (!(sh)) { |
565 | if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { | 568 | if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { |
566 | printk(KERN_ERR "pSCSI: Unable to locate struct" | 569 | printk(KERN_ERR "pSCSI: Unable to locate struct" |
567 | " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); | 570 | " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); |
568 | return ERR_PTR(-ENODEV); | 571 | return ERR_PTR(-ENODEV); |
569 | } | 572 | } |
570 | /* | 573 | /* |
571 | * For the newer PHV_VIRUTAL_HOST_ID struct scsi_device | 574 | * For the newer PHV_VIRUTAL_HOST_ID struct scsi_device |
572 | * reference, we enforce that udev_path has been set | 575 | * reference, we enforce that udev_path has been set |
573 | */ | 576 | */ |
574 | if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { | 577 | if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { |
575 | printk(KERN_ERR "pSCSI: udev_path attribute has not" | 578 | printk(KERN_ERR "pSCSI: udev_path attribute has not" |
576 | " been set before ENABLE=1\n"); | 579 | " been set before ENABLE=1\n"); |
577 | return ERR_PTR(-EINVAL); | 580 | return ERR_PTR(-EINVAL); |
578 | } | 581 | } |
579 | /* | 582 | /* |
580 | * If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID, | 583 | * If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID, |
581 | * use the original TCM hba ID to reference Linux/SCSI Host No | 584 | * use the original TCM hba ID to reference Linux/SCSI Host No |
582 | * and enable for PHV_LLD_SCSI_HOST_NO mode. | 585 | * and enable for PHV_LLD_SCSI_HOST_NO mode. |
583 | */ | 586 | */ |
584 | if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) { | 587 | if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) { |
585 | spin_lock(&hba->device_lock); | 588 | spin_lock(&hba->device_lock); |
586 | if (!(list_empty(&hba->hba_dev_list))) { | 589 | if (!(list_empty(&hba->hba_dev_list))) { |
587 | printk(KERN_ERR "pSCSI: Unable to set hba_mode" | 590 | printk(KERN_ERR "pSCSI: Unable to set hba_mode" |
588 | " with active devices\n"); | 591 | " with active devices\n"); |
589 | spin_unlock(&hba->device_lock); | 592 | spin_unlock(&hba->device_lock); |
590 | return ERR_PTR(-EEXIST); | 593 | return ERR_PTR(-EEXIST); |
591 | } | 594 | } |
592 | spin_unlock(&hba->device_lock); | 595 | spin_unlock(&hba->device_lock); |
593 | 596 | ||
594 | if (pscsi_pmode_enable_hba(hba, 1) != 1) | 597 | if (pscsi_pmode_enable_hba(hba, 1) != 1) |
595 | return ERR_PTR(-ENODEV); | 598 | return ERR_PTR(-ENODEV); |
596 | 599 | ||
597 | legacy_mode_enable = 1; | 600 | legacy_mode_enable = 1; |
598 | hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; | 601 | hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; |
599 | sh = phv->phv_lld_host; | 602 | sh = phv->phv_lld_host; |
600 | } else { | 603 | } else { |
601 | sh = pscsi_get_sh(pdv->pdv_host_id); | 604 | sh = pscsi_get_sh(pdv->pdv_host_id); |
602 | if (!(sh)) { | 605 | if (!(sh)) { |
603 | printk(KERN_ERR "pSCSI: Unable to locate" | 606 | printk(KERN_ERR "pSCSI: Unable to locate" |
604 | " pdv_host_id: %d\n", pdv->pdv_host_id); | 607 | " pdv_host_id: %d\n", pdv->pdv_host_id); |
605 | return ERR_PTR(-ENODEV); | 608 | return ERR_PTR(-ENODEV); |
606 | } | 609 | } |
607 | } | 610 | } |
608 | } else { | 611 | } else { |
609 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) { | 612 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) { |
610 | printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while" | 613 | printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while" |
611 | " struct Scsi_Host exists\n"); | 614 | " struct Scsi_Host exists\n"); |
612 | return ERR_PTR(-EEXIST); | 615 | return ERR_PTR(-EEXIST); |
613 | } | 616 | } |
614 | } | 617 | } |
615 | 618 | ||
616 | spin_lock_irq(sh->host_lock); | 619 | spin_lock_irq(sh->host_lock); |
617 | list_for_each_entry(sd, &sh->__devices, siblings) { | 620 | list_for_each_entry(sd, &sh->__devices, siblings) { |
618 | if ((pdv->pdv_channel_id != sd->channel) || | 621 | if ((pdv->pdv_channel_id != sd->channel) || |
619 | (pdv->pdv_target_id != sd->id) || | 622 | (pdv->pdv_target_id != sd->id) || |
620 | (pdv->pdv_lun_id != sd->lun)) | 623 | (pdv->pdv_lun_id != sd->lun)) |
621 | continue; | 624 | continue; |
622 | /* | 625 | /* |
623 | * Functions will release the held struct scsi_host->host_lock | 626 | * Functions will release the held struct scsi_host->host_lock |
624 | * before calling calling pscsi_add_device_to_list() to register | 627 | * before calling calling pscsi_add_device_to_list() to register |
625 | * struct scsi_device with target_core_mod. | 628 | * struct scsi_device with target_core_mod. |
626 | */ | 629 | */ |
627 | switch (sd->type) { | 630 | switch (sd->type) { |
628 | case TYPE_DISK: | 631 | case TYPE_DISK: |
629 | dev = pscsi_create_type_disk(sd, pdv, se_dev, hba); | 632 | dev = pscsi_create_type_disk(sd, pdv, se_dev, hba); |
630 | break; | 633 | break; |
631 | case TYPE_ROM: | 634 | case TYPE_ROM: |
632 | dev = pscsi_create_type_rom(sd, pdv, se_dev, hba); | 635 | dev = pscsi_create_type_rom(sd, pdv, se_dev, hba); |
633 | break; | 636 | break; |
634 | default: | 637 | default: |
635 | dev = pscsi_create_type_other(sd, pdv, se_dev, hba); | 638 | dev = pscsi_create_type_other(sd, pdv, se_dev, hba); |
636 | break; | 639 | break; |
637 | } | 640 | } |
638 | 641 | ||
639 | if (!(dev)) { | 642 | if (!(dev)) { |
640 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) | 643 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) |
641 | scsi_host_put(sh); | 644 | scsi_host_put(sh); |
642 | else if (legacy_mode_enable) { | 645 | else if (legacy_mode_enable) { |
643 | pscsi_pmode_enable_hba(hba, 0); | 646 | pscsi_pmode_enable_hba(hba, 0); |
644 | hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; | 647 | hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; |
645 | } | 648 | } |
646 | pdv->pdv_sd = NULL; | 649 | pdv->pdv_sd = NULL; |
647 | return ERR_PTR(-ENODEV); | 650 | return ERR_PTR(-ENODEV); |
648 | } | 651 | } |
649 | return dev; | 652 | return dev; |
650 | } | 653 | } |
651 | spin_unlock_irq(sh->host_lock); | 654 | spin_unlock_irq(sh->host_lock); |
652 | 655 | ||
653 | printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, | 656 | printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, |
654 | pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id); | 657 | pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id); |
655 | 658 | ||
656 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) | 659 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) |
657 | scsi_host_put(sh); | 660 | scsi_host_put(sh); |
658 | else if (legacy_mode_enable) { | 661 | else if (legacy_mode_enable) { |
659 | pscsi_pmode_enable_hba(hba, 0); | 662 | pscsi_pmode_enable_hba(hba, 0); |
660 | hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; | 663 | hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; |
661 | } | 664 | } |
662 | 665 | ||
663 | return ERR_PTR(-ENODEV); | 666 | return ERR_PTR(-ENODEV); |
664 | } | 667 | } |
665 | 668 | ||
666 | /* pscsi_free_device(): (Part of se_subsystem_api_t template) | 669 | /* pscsi_free_device(): (Part of se_subsystem_api_t template) |
667 | * | 670 | * |
668 | * | 671 | * |
669 | */ | 672 | */ |
670 | static void pscsi_free_device(void *p) | 673 | static void pscsi_free_device(void *p) |
671 | { | 674 | { |
672 | struct pscsi_dev_virt *pdv = p; | 675 | struct pscsi_dev_virt *pdv = p; |
673 | struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; | 676 | struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; |
674 | struct scsi_device *sd = pdv->pdv_sd; | 677 | struct scsi_device *sd = pdv->pdv_sd; |
675 | 678 | ||
676 | if (sd) { | 679 | if (sd) { |
677 | /* | 680 | /* |
678 | * Release exclusive pSCSI internal struct block_device claim for | 681 | * Release exclusive pSCSI internal struct block_device claim for |
679 | * struct scsi_device with TYPE_DISK from pscsi_create_type_disk() | 682 | * struct scsi_device with TYPE_DISK from pscsi_create_type_disk() |
680 | */ | 683 | */ |
681 | if ((sd->type == TYPE_DISK) && pdv->pdv_bd) { | 684 | if ((sd->type == TYPE_DISK) && pdv->pdv_bd) { |
682 | blkdev_put(pdv->pdv_bd, | 685 | blkdev_put(pdv->pdv_bd, |
683 | FMODE_WRITE|FMODE_READ|FMODE_EXCL); | 686 | FMODE_WRITE|FMODE_READ|FMODE_EXCL); |
684 | pdv->pdv_bd = NULL; | 687 | pdv->pdv_bd = NULL; |
685 | } | 688 | } |
686 | /* | 689 | /* |
687 | * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference | 690 | * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference |
688 | * to struct Scsi_Host now. | 691 | * to struct Scsi_Host now. |
689 | */ | 692 | */ |
690 | if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) && | 693 | if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) && |
691 | (phv->phv_lld_host != NULL)) | 694 | (phv->phv_lld_host != NULL)) |
692 | scsi_host_put(phv->phv_lld_host); | 695 | scsi_host_put(phv->phv_lld_host); |
693 | 696 | ||
694 | if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) | 697 | if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) |
695 | scsi_device_put(sd); | 698 | scsi_device_put(sd); |
696 | 699 | ||
697 | pdv->pdv_sd = NULL; | 700 | pdv->pdv_sd = NULL; |
698 | } | 701 | } |
699 | 702 | ||
700 | kfree(pdv); | 703 | kfree(pdv); |
701 | } | 704 | } |
702 | 705 | ||
703 | static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task) | 706 | static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task) |
704 | { | 707 | { |
705 | return container_of(task, struct pscsi_plugin_task, pscsi_task); | 708 | return container_of(task, struct pscsi_plugin_task, pscsi_task); |
706 | } | 709 | } |
707 | 710 | ||
708 | 711 | ||
709 | /* pscsi_transport_complete(): | 712 | /* pscsi_transport_complete(): |
710 | * | 713 | * |
711 | * | 714 | * |
712 | */ | 715 | */ |
713 | static int pscsi_transport_complete(struct se_task *task) | 716 | static int pscsi_transport_complete(struct se_task *task) |
714 | { | 717 | { |
715 | struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; | 718 | struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; |
716 | struct scsi_device *sd = pdv->pdv_sd; | 719 | struct scsi_device *sd = pdv->pdv_sd; |
717 | int result; | 720 | int result; |
718 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | 721 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); |
719 | unsigned char *cdb = &pt->pscsi_cdb[0]; | 722 | unsigned char *cdb = &pt->pscsi_cdb[0]; |
720 | 723 | ||
721 | result = pt->pscsi_result; | 724 | result = pt->pscsi_result; |
722 | /* | 725 | /* |
723 | * Hack to make sure that Write-Protect modepage is set if R/O mode is | 726 | * Hack to make sure that Write-Protect modepage is set if R/O mode is |
724 | * forced. | 727 | * forced. |
725 | */ | 728 | */ |
726 | if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && | 729 | if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && |
727 | (status_byte(result) << 1) == SAM_STAT_GOOD) { | 730 | (status_byte(result) << 1) == SAM_STAT_GOOD) { |
728 | if (!TASK_CMD(task)->se_deve) | 731 | if (!TASK_CMD(task)->se_deve) |
729 | goto after_mode_sense; | 732 | goto after_mode_sense; |
730 | 733 | ||
731 | if (TASK_CMD(task)->se_deve->lun_flags & | 734 | if (TASK_CMD(task)->se_deve->lun_flags & |
732 | TRANSPORT_LUNFLAGS_READ_ONLY) { | 735 | TRANSPORT_LUNFLAGS_READ_ONLY) { |
733 | unsigned char *buf = (unsigned char *) | 736 | unsigned char *buf = (unsigned char *) |
734 | T_TASK(task->task_se_cmd)->t_task_buf; | 737 | T_TASK(task->task_se_cmd)->t_task_buf; |
735 | 738 | ||
736 | if (cdb[0] == MODE_SENSE_10) { | 739 | if (cdb[0] == MODE_SENSE_10) { |
737 | if (!(buf[3] & 0x80)) | 740 | if (!(buf[3] & 0x80)) |
738 | buf[3] |= 0x80; | 741 | buf[3] |= 0x80; |
739 | } else { | 742 | } else { |
740 | if (!(buf[2] & 0x80)) | 743 | if (!(buf[2] & 0x80)) |
741 | buf[2] |= 0x80; | 744 | buf[2] |= 0x80; |
742 | } | 745 | } |
743 | } | 746 | } |
744 | } | 747 | } |
745 | after_mode_sense: | 748 | after_mode_sense: |
746 | 749 | ||
747 | if (sd->type != TYPE_TAPE) | 750 | if (sd->type != TYPE_TAPE) |
748 | goto after_mode_select; | 751 | goto after_mode_select; |
749 | 752 | ||
750 | /* | 753 | /* |
751 | * Hack to correctly obtain the initiator requested blocksize for | 754 | * Hack to correctly obtain the initiator requested blocksize for |
752 | * TYPE_TAPE. Since this value is dependent upon each tape media, | 755 | * TYPE_TAPE. Since this value is dependent upon each tape media, |
753 | * struct scsi_device->sector_size will not contain the correct value | 756 | * struct scsi_device->sector_size will not contain the correct value |
754 | * by default, so we go ahead and set it so | 757 | * by default, so we go ahead and set it so |
755 | * TRANSPORT(dev)->get_blockdev() returns the correct value to the | 758 | * TRANSPORT(dev)->get_blockdev() returns the correct value to the |
756 | * storage engine. | 759 | * storage engine. |
757 | */ | 760 | */ |
758 | if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) && | 761 | if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) && |
759 | (status_byte(result) << 1) == SAM_STAT_GOOD) { | 762 | (status_byte(result) << 1) == SAM_STAT_GOOD) { |
760 | unsigned char *buf; | 763 | unsigned char *buf; |
761 | struct scatterlist *sg = task->task_sg; | 764 | struct scatterlist *sg = task->task_sg; |
762 | u16 bdl; | 765 | u16 bdl; |
763 | u32 blocksize; | 766 | u32 blocksize; |
764 | 767 | ||
765 | buf = sg_virt(&sg[0]); | 768 | buf = sg_virt(&sg[0]); |
766 | if (!(buf)) { | 769 | if (!(buf)) { |
767 | printk(KERN_ERR "Unable to get buf for scatterlist\n"); | 770 | printk(KERN_ERR "Unable to get buf for scatterlist\n"); |
768 | goto after_mode_select; | 771 | goto after_mode_select; |
769 | } | 772 | } |
770 | 773 | ||
771 | if (cdb[0] == MODE_SELECT) | 774 | if (cdb[0] == MODE_SELECT) |
772 | bdl = (buf[3]); | 775 | bdl = (buf[3]); |
773 | else | 776 | else |
774 | bdl = (buf[6] << 8) | (buf[7]); | 777 | bdl = (buf[6] << 8) | (buf[7]); |
775 | 778 | ||
776 | if (!bdl) | 779 | if (!bdl) |
777 | goto after_mode_select; | 780 | goto after_mode_select; |
778 | 781 | ||
779 | if (cdb[0] == MODE_SELECT) | 782 | if (cdb[0] == MODE_SELECT) |
780 | blocksize = (buf[9] << 16) | (buf[10] << 8) | | 783 | blocksize = (buf[9] << 16) | (buf[10] << 8) | |
781 | (buf[11]); | 784 | (buf[11]); |
782 | else | 785 | else |
783 | blocksize = (buf[13] << 16) | (buf[14] << 8) | | 786 | blocksize = (buf[13] << 16) | (buf[14] << 8) | |
784 | (buf[15]); | 787 | (buf[15]); |
785 | 788 | ||
786 | sd->sector_size = blocksize; | 789 | sd->sector_size = blocksize; |
787 | } | 790 | } |
788 | after_mode_select: | 791 | after_mode_select: |
789 | 792 | ||
790 | if (status_byte(result) & CHECK_CONDITION) | 793 | if (status_byte(result) & CHECK_CONDITION) |
791 | return 1; | 794 | return 1; |
792 | 795 | ||
793 | return 0; | 796 | return 0; |
794 | } | 797 | } |
795 | 798 | ||
796 | static struct se_task * | 799 | static struct se_task * |
797 | pscsi_alloc_task(struct se_cmd *cmd) | 800 | pscsi_alloc_task(struct se_cmd *cmd) |
798 | { | 801 | { |
799 | struct pscsi_plugin_task *pt; | 802 | struct pscsi_plugin_task *pt; |
800 | unsigned char *cdb = T_TASK(cmd)->t_task_cdb; | 803 | unsigned char *cdb = T_TASK(cmd)->t_task_cdb; |
801 | 804 | ||
802 | pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL); | 805 | pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL); |
803 | if (!pt) { | 806 | if (!pt) { |
804 | printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n"); | 807 | printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n"); |
805 | return NULL; | 808 | return NULL; |
806 | } | 809 | } |
807 | 810 | ||
808 | /* | 811 | /* |
809 | * If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation, | 812 | * If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation, |
810 | * allocate the extended CDB buffer for per struct se_task context | 813 | * allocate the extended CDB buffer for per struct se_task context |
811 | * pt->pscsi_cdb now. | 814 | * pt->pscsi_cdb now. |
812 | */ | 815 | */ |
813 | if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) { | 816 | if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) { |
814 | 817 | ||
815 | pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); | 818 | pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); |
816 | if (!(pt->pscsi_cdb)) { | 819 | if (!(pt->pscsi_cdb)) { |
817 | printk(KERN_ERR "pSCSI: Unable to allocate extended" | 820 | printk(KERN_ERR "pSCSI: Unable to allocate extended" |
818 | " pt->pscsi_cdb\n"); | 821 | " pt->pscsi_cdb\n"); |
819 | kfree(pt); | 822 | kfree(pt); |
820 | return NULL; | 823 | return NULL; |
821 | } | 824 | } |
822 | } else | 825 | } else |
823 | pt->pscsi_cdb = &pt->__pscsi_cdb[0]; | 826 | pt->pscsi_cdb = &pt->__pscsi_cdb[0]; |
824 | 827 | ||
825 | return &pt->pscsi_task; | 828 | return &pt->pscsi_task; |
826 | } | 829 | } |
827 | 830 | ||
828 | static inline void pscsi_blk_init_request( | 831 | static inline void pscsi_blk_init_request( |
829 | struct se_task *task, | 832 | struct se_task *task, |
830 | struct pscsi_plugin_task *pt, | 833 | struct pscsi_plugin_task *pt, |
831 | struct request *req, | 834 | struct request *req, |
832 | int bidi_read) | 835 | int bidi_read) |
833 | { | 836 | { |
834 | /* | 837 | /* |
835 | * Defined as "scsi command" in include/linux/blkdev.h. | 838 | * Defined as "scsi command" in include/linux/blkdev.h. |
836 | */ | 839 | */ |
837 | req->cmd_type = REQ_TYPE_BLOCK_PC; | 840 | req->cmd_type = REQ_TYPE_BLOCK_PC; |
838 | /* | 841 | /* |
839 | * For the extra BIDI-COMMAND READ struct request we do not | 842 | * For the extra BIDI-COMMAND READ struct request we do not |
840 | * need to setup the remaining structure members | 843 | * need to setup the remaining structure members |
841 | */ | 844 | */ |
842 | if (bidi_read) | 845 | if (bidi_read) |
843 | return; | 846 | return; |
844 | /* | 847 | /* |
845 | * Setup the done function pointer for struct request, | 848 | * Setup the done function pointer for struct request, |
846 | * also set the end_io_data pointer.to struct se_task. | 849 | * also set the end_io_data pointer.to struct se_task. |
847 | */ | 850 | */ |
848 | req->end_io = pscsi_req_done; | 851 | req->end_io = pscsi_req_done; |
849 | req->end_io_data = (void *)task; | 852 | req->end_io_data = (void *)task; |
850 | /* | 853 | /* |
851 | * Load the referenced struct se_task's SCSI CDB into | 854 | * Load the referenced struct se_task's SCSI CDB into |
852 | * include/linux/blkdev.h:struct request->cmd | 855 | * include/linux/blkdev.h:struct request->cmd |
853 | */ | 856 | */ |
854 | req->cmd_len = scsi_command_size(pt->pscsi_cdb); | 857 | req->cmd_len = scsi_command_size(pt->pscsi_cdb); |
855 | req->cmd = &pt->pscsi_cdb[0]; | 858 | req->cmd = &pt->pscsi_cdb[0]; |
856 | /* | 859 | /* |
857 | * Setup pointer for outgoing sense data. | 860 | * Setup pointer for outgoing sense data. |
858 | */ | 861 | */ |
859 | req->sense = (void *)&pt->pscsi_sense[0]; | 862 | req->sense = (void *)&pt->pscsi_sense[0]; |
860 | req->sense_len = 0; | 863 | req->sense_len = 0; |
861 | } | 864 | } |
862 | 865 | ||
863 | /* | 866 | /* |
864 | * Used for pSCSI data payloads for all *NON* SCF_SCSI_DATA_SG_IO_CDB | 867 | * Used for pSCSI data payloads for all *NON* SCF_SCSI_DATA_SG_IO_CDB |
865 | */ | 868 | */ |
866 | static int pscsi_blk_get_request(struct se_task *task) | 869 | static int pscsi_blk_get_request(struct se_task *task) |
867 | { | 870 | { |
868 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | 871 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); |
869 | struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; | 872 | struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; |
870 | 873 | ||
871 | pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue, | 874 | pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue, |
872 | (task->task_data_direction == DMA_TO_DEVICE), | 875 | (task->task_data_direction == DMA_TO_DEVICE), |
873 | GFP_KERNEL); | 876 | GFP_KERNEL); |
874 | if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) { | 877 | if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) { |
875 | printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n", | 878 | printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n", |
876 | IS_ERR(pt->pscsi_req)); | 879 | IS_ERR(pt->pscsi_req)); |
877 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 880 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
878 | } | 881 | } |
879 | /* | 882 | /* |
880 | * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC, | 883 | * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC, |
881 | * and setup rq callback, CDB and sense. | 884 | * and setup rq callback, CDB and sense. |
882 | */ | 885 | */ |
883 | pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); | 886 | pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); |
884 | return 0; | 887 | return 0; |
885 | } | 888 | } |
886 | 889 | ||
887 | /* pscsi_do_task(): (Part of se_subsystem_api_t template) | 890 | /* pscsi_do_task(): (Part of se_subsystem_api_t template) |
888 | * | 891 | * |
889 | * | 892 | * |
890 | */ | 893 | */ |
891 | static int pscsi_do_task(struct se_task *task) | 894 | static int pscsi_do_task(struct se_task *task) |
892 | { | 895 | { |
893 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | 896 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); |
894 | struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; | 897 | struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; |
895 | /* | 898 | /* |
896 | * Set the struct request->timeout value based on peripheral | 899 | * Set the struct request->timeout value based on peripheral |
897 | * device type from SCSI. | 900 | * device type from SCSI. |
898 | */ | 901 | */ |
899 | if (pdv->pdv_sd->type == TYPE_DISK) | 902 | if (pdv->pdv_sd->type == TYPE_DISK) |
900 | pt->pscsi_req->timeout = PS_TIMEOUT_DISK; | 903 | pt->pscsi_req->timeout = PS_TIMEOUT_DISK; |
901 | else | 904 | else |
902 | pt->pscsi_req->timeout = PS_TIMEOUT_OTHER; | 905 | pt->pscsi_req->timeout = PS_TIMEOUT_OTHER; |
903 | 906 | ||
904 | pt->pscsi_req->retries = PS_RETRY; | 907 | pt->pscsi_req->retries = PS_RETRY; |
905 | /* | 908 | /* |
906 | * Queue the struct request into the struct scsi_device->request_queue. | 909 | * Queue the struct request into the struct scsi_device->request_queue. |
907 | * Also check for HEAD_OF_QUEUE SAM TASK attr from received se_cmd | 910 | * Also check for HEAD_OF_QUEUE SAM TASK attr from received se_cmd |
908 | * descriptor | 911 | * descriptor |
909 | */ | 912 | */ |
910 | blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req, | 913 | blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req, |
911 | (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ), | 914 | (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ), |
912 | pscsi_req_done); | 915 | pscsi_req_done); |
913 | 916 | ||
914 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | 917 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; |
915 | } | 918 | } |
916 | 919 | ||
917 | static void pscsi_free_task(struct se_task *task) | 920 | static void pscsi_free_task(struct se_task *task) |
918 | { | 921 | { |
919 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | 922 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); |
920 | struct se_cmd *cmd = task->task_se_cmd; | 923 | struct se_cmd *cmd = task->task_se_cmd; |
921 | 924 | ||
922 | /* | 925 | /* |
923 | * Release the extended CDB allocation from pscsi_alloc_task() | 926 | * Release the extended CDB allocation from pscsi_alloc_task() |
924 | * if one exists. | 927 | * if one exists. |
925 | */ | 928 | */ |
926 | if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) | 929 | if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) |
927 | kfree(pt->pscsi_cdb); | 930 | kfree(pt->pscsi_cdb); |
928 | /* | 931 | /* |
929 | * We do not release the bio(s) here associated with this task, as | 932 | * We do not release the bio(s) here associated with this task, as |
930 | * this is handled by bio_put() and pscsi_bi_endio(). | 933 | * this is handled by bio_put() and pscsi_bi_endio(). |
931 | */ | 934 | */ |
932 | kfree(pt); | 935 | kfree(pt); |
933 | } | 936 | } |
934 | 937 | ||
935 | enum { | 938 | enum { |
936 | Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id, | 939 | Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id, |
937 | Opt_scsi_lun_id, Opt_err | 940 | Opt_scsi_lun_id, Opt_err |
938 | }; | 941 | }; |
939 | 942 | ||
940 | static match_table_t tokens = { | 943 | static match_table_t tokens = { |
941 | {Opt_scsi_host_id, "scsi_host_id=%d"}, | 944 | {Opt_scsi_host_id, "scsi_host_id=%d"}, |
942 | {Opt_scsi_channel_id, "scsi_channel_id=%d"}, | 945 | {Opt_scsi_channel_id, "scsi_channel_id=%d"}, |
943 | {Opt_scsi_target_id, "scsi_target_id=%d"}, | 946 | {Opt_scsi_target_id, "scsi_target_id=%d"}, |
944 | {Opt_scsi_lun_id, "scsi_lun_id=%d"}, | 947 | {Opt_scsi_lun_id, "scsi_lun_id=%d"}, |
945 | {Opt_err, NULL} | 948 | {Opt_err, NULL} |
946 | }; | 949 | }; |
947 | 950 | ||
948 | static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, | 951 | static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, |
949 | struct se_subsystem_dev *se_dev, | 952 | struct se_subsystem_dev *se_dev, |
950 | const char *page, | 953 | const char *page, |
951 | ssize_t count) | 954 | ssize_t count) |
952 | { | 955 | { |
953 | struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; | 956 | struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; |
954 | struct pscsi_hba_virt *phv = hba->hba_ptr; | 957 | struct pscsi_hba_virt *phv = hba->hba_ptr; |
955 | char *orig, *ptr, *opts; | 958 | char *orig, *ptr, *opts; |
956 | substring_t args[MAX_OPT_ARGS]; | 959 | substring_t args[MAX_OPT_ARGS]; |
957 | int ret = 0, arg, token; | 960 | int ret = 0, arg, token; |
958 | 961 | ||
959 | opts = kstrdup(page, GFP_KERNEL); | 962 | opts = kstrdup(page, GFP_KERNEL); |
960 | if (!opts) | 963 | if (!opts) |
961 | return -ENOMEM; | 964 | return -ENOMEM; |
962 | 965 | ||
963 | orig = opts; | 966 | orig = opts; |
964 | 967 | ||
965 | while ((ptr = strsep(&opts, ",")) != NULL) { | 968 | while ((ptr = strsep(&opts, ",")) != NULL) { |
966 | if (!*ptr) | 969 | if (!*ptr) |
967 | continue; | 970 | continue; |
968 | 971 | ||
969 | token = match_token(ptr, tokens, args); | 972 | token = match_token(ptr, tokens, args); |
970 | switch (token) { | 973 | switch (token) { |
971 | case Opt_scsi_host_id: | 974 | case Opt_scsi_host_id: |
972 | if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { | 975 | if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { |
973 | printk(KERN_ERR "PSCSI[%d]: Unable to accept" | 976 | printk(KERN_ERR "PSCSI[%d]: Unable to accept" |
974 | " scsi_host_id while phv_mode ==" | 977 | " scsi_host_id while phv_mode ==" |
975 | " PHV_LLD_SCSI_HOST_NO\n", | 978 | " PHV_LLD_SCSI_HOST_NO\n", |
976 | phv->phv_host_id); | 979 | phv->phv_host_id); |
977 | ret = -EINVAL; | 980 | ret = -EINVAL; |
978 | goto out; | 981 | goto out; |
979 | } | 982 | } |
980 | match_int(args, &arg); | 983 | match_int(args, &arg); |
981 | pdv->pdv_host_id = arg; | 984 | pdv->pdv_host_id = arg; |
982 | printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:" | 985 | printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:" |
983 | " %d\n", phv->phv_host_id, pdv->pdv_host_id); | 986 | " %d\n", phv->phv_host_id, pdv->pdv_host_id); |
984 | pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID; | 987 | pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID; |
985 | break; | 988 | break; |
986 | case Opt_scsi_channel_id: | 989 | case Opt_scsi_channel_id: |
987 | match_int(args, &arg); | 990 | match_int(args, &arg); |
988 | pdv->pdv_channel_id = arg; | 991 | pdv->pdv_channel_id = arg; |
989 | printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel" | 992 | printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel" |
990 | " ID: %d\n", phv->phv_host_id, | 993 | " ID: %d\n", phv->phv_host_id, |
991 | pdv->pdv_channel_id); | 994 | pdv->pdv_channel_id); |
992 | pdv->pdv_flags |= PDF_HAS_CHANNEL_ID; | 995 | pdv->pdv_flags |= PDF_HAS_CHANNEL_ID; |
993 | break; | 996 | break; |
994 | case Opt_scsi_target_id: | 997 | case Opt_scsi_target_id: |
995 | match_int(args, &arg); | 998 | match_int(args, &arg); |
996 | pdv->pdv_target_id = arg; | 999 | pdv->pdv_target_id = arg; |
997 | printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target" | 1000 | printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target" |
998 | " ID: %d\n", phv->phv_host_id, | 1001 | " ID: %d\n", phv->phv_host_id, |
999 | pdv->pdv_target_id); | 1002 | pdv->pdv_target_id); |
1000 | pdv->pdv_flags |= PDF_HAS_TARGET_ID; | 1003 | pdv->pdv_flags |= PDF_HAS_TARGET_ID; |
1001 | break; | 1004 | break; |
1002 | case Opt_scsi_lun_id: | 1005 | case Opt_scsi_lun_id: |
1003 | match_int(args, &arg); | 1006 | match_int(args, &arg); |
1004 | pdv->pdv_lun_id = arg; | 1007 | pdv->pdv_lun_id = arg; |
1005 | printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:" | 1008 | printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:" |
1006 | " %d\n", phv->phv_host_id, pdv->pdv_lun_id); | 1009 | " %d\n", phv->phv_host_id, pdv->pdv_lun_id); |
1007 | pdv->pdv_flags |= PDF_HAS_LUN_ID; | 1010 | pdv->pdv_flags |= PDF_HAS_LUN_ID; |
1008 | break; | 1011 | break; |
1009 | default: | 1012 | default: |
1010 | break; | 1013 | break; |
1011 | } | 1014 | } |
1012 | } | 1015 | } |
1013 | 1016 | ||
1014 | out: | 1017 | out: |
1015 | kfree(orig); | 1018 | kfree(orig); |
1016 | return (!ret) ? count : ret; | 1019 | return (!ret) ? count : ret; |
1017 | } | 1020 | } |
1018 | 1021 | ||
1019 | static ssize_t pscsi_check_configfs_dev_params( | 1022 | static ssize_t pscsi_check_configfs_dev_params( |
1020 | struct se_hba *hba, | 1023 | struct se_hba *hba, |
1021 | struct se_subsystem_dev *se_dev) | 1024 | struct se_subsystem_dev *se_dev) |
1022 | { | 1025 | { |
1023 | struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; | 1026 | struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; |
1024 | 1027 | ||
1025 | if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) || | 1028 | if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) || |
1026 | !(pdv->pdv_flags & PDF_HAS_TARGET_ID) || | 1029 | !(pdv->pdv_flags & PDF_HAS_TARGET_ID) || |
1027 | !(pdv->pdv_flags & PDF_HAS_LUN_ID)) { | 1030 | !(pdv->pdv_flags & PDF_HAS_LUN_ID)) { |
1028 | printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and" | 1031 | printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and" |
1029 | " scsi_lun_id= parameters\n"); | 1032 | " scsi_lun_id= parameters\n"); |
1030 | return -1; | 1033 | return -1; |
1031 | } | 1034 | } |
1032 | 1035 | ||
1033 | return 0; | 1036 | return 0; |
1034 | } | 1037 | } |
1035 | 1038 | ||
1036 | static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba, | 1039 | static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba, |
1037 | struct se_subsystem_dev *se_dev, | 1040 | struct se_subsystem_dev *se_dev, |
1038 | char *b) | 1041 | char *b) |
1039 | { | 1042 | { |
1040 | struct pscsi_hba_virt *phv = hba->hba_ptr; | 1043 | struct pscsi_hba_virt *phv = hba->hba_ptr; |
1041 | struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; | 1044 | struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; |
1042 | struct scsi_device *sd = pdv->pdv_sd; | 1045 | struct scsi_device *sd = pdv->pdv_sd; |
1043 | unsigned char host_id[16]; | 1046 | unsigned char host_id[16]; |
1044 | ssize_t bl; | 1047 | ssize_t bl; |
1045 | int i; | 1048 | int i; |
1046 | 1049 | ||
1047 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) | 1050 | if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) |
1048 | snprintf(host_id, 16, "%d", pdv->pdv_host_id); | 1051 | snprintf(host_id, 16, "%d", pdv->pdv_host_id); |
1049 | else | 1052 | else |
1050 | snprintf(host_id, 16, "PHBA Mode"); | 1053 | snprintf(host_id, 16, "PHBA Mode"); |
1051 | 1054 | ||
1052 | bl = sprintf(b, "SCSI Device Bus Location:" | 1055 | bl = sprintf(b, "SCSI Device Bus Location:" |
1053 | " Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n", | 1056 | " Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n", |
1054 | pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id, | 1057 | pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id, |
1055 | host_id); | 1058 | host_id); |
1056 | 1059 | ||
1057 | if (sd) { | 1060 | if (sd) { |
1058 | bl += sprintf(b + bl, " "); | 1061 | bl += sprintf(b + bl, " "); |
1059 | bl += sprintf(b + bl, "Vendor: "); | 1062 | bl += sprintf(b + bl, "Vendor: "); |
1060 | for (i = 0; i < 8; i++) { | 1063 | for (i = 0; i < 8; i++) { |
1061 | if (ISPRINT(sd->vendor[i])) /* printable character? */ | 1064 | if (ISPRINT(sd->vendor[i])) /* printable character? */ |
1062 | bl += sprintf(b + bl, "%c", sd->vendor[i]); | 1065 | bl += sprintf(b + bl, "%c", sd->vendor[i]); |
1063 | else | 1066 | else |
1064 | bl += sprintf(b + bl, " "); | 1067 | bl += sprintf(b + bl, " "); |
1065 | } | 1068 | } |
1066 | bl += sprintf(b + bl, " Model: "); | 1069 | bl += sprintf(b + bl, " Model: "); |
1067 | for (i = 0; i < 16; i++) { | 1070 | for (i = 0; i < 16; i++) { |
1068 | if (ISPRINT(sd->model[i])) /* printable character ? */ | 1071 | if (ISPRINT(sd->model[i])) /* printable character ? */ |
1069 | bl += sprintf(b + bl, "%c", sd->model[i]); | 1072 | bl += sprintf(b + bl, "%c", sd->model[i]); |
1070 | else | 1073 | else |
1071 | bl += sprintf(b + bl, " "); | 1074 | bl += sprintf(b + bl, " "); |
1072 | } | 1075 | } |
1073 | bl += sprintf(b + bl, " Rev: "); | 1076 | bl += sprintf(b + bl, " Rev: "); |
1074 | for (i = 0; i < 4; i++) { | 1077 | for (i = 0; i < 4; i++) { |
1075 | if (ISPRINT(sd->rev[i])) /* printable character ? */ | 1078 | if (ISPRINT(sd->rev[i])) /* printable character ? */ |
1076 | bl += sprintf(b + bl, "%c", sd->rev[i]); | 1079 | bl += sprintf(b + bl, "%c", sd->rev[i]); |
1077 | else | 1080 | else |
1078 | bl += sprintf(b + bl, " "); | 1081 | bl += sprintf(b + bl, " "); |
1079 | } | 1082 | } |
1080 | bl += sprintf(b + bl, "\n"); | 1083 | bl += sprintf(b + bl, "\n"); |
1081 | } | 1084 | } |
1082 | return bl; | 1085 | return bl; |
1083 | } | 1086 | } |
1084 | 1087 | ||
1085 | static void pscsi_bi_endio(struct bio *bio, int error) | 1088 | static void pscsi_bi_endio(struct bio *bio, int error) |
1086 | { | 1089 | { |
1087 | bio_put(bio); | 1090 | bio_put(bio); |
1088 | } | 1091 | } |
1089 | 1092 | ||
1090 | static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num) | 1093 | static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num) |
1091 | { | 1094 | { |
1092 | struct bio *bio; | 1095 | struct bio *bio; |
1093 | /* | 1096 | /* |
1094 | * Use bio_malloc() following the comment in for bio -> struct request | 1097 | * Use bio_malloc() following the comment in for bio -> struct request |
1095 | * in block/blk-core.c:blk_make_request() | 1098 | * in block/blk-core.c:blk_make_request() |
1096 | */ | 1099 | */ |
1097 | bio = bio_kmalloc(GFP_KERNEL, sg_num); | 1100 | bio = bio_kmalloc(GFP_KERNEL, sg_num); |
1098 | if (!(bio)) { | 1101 | if (!(bio)) { |
1099 | printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n"); | 1102 | printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n"); |
1100 | return NULL; | 1103 | return NULL; |
1101 | } | 1104 | } |
1102 | bio->bi_end_io = pscsi_bi_endio; | 1105 | bio->bi_end_io = pscsi_bi_endio; |
1103 | 1106 | ||
1104 | return bio; | 1107 | return bio; |
1105 | } | 1108 | } |
1106 | 1109 | ||
1107 | #if 0 | 1110 | #if 0 |
1108 | #define DEBUG_PSCSI(x...) printk(x) | 1111 | #define DEBUG_PSCSI(x...) printk(x) |
1109 | #else | 1112 | #else |
1110 | #define DEBUG_PSCSI(x...) | 1113 | #define DEBUG_PSCSI(x...) |
1111 | #endif | 1114 | #endif |
1112 | 1115 | ||
1113 | static int __pscsi_map_task_SG( | 1116 | static int __pscsi_map_task_SG( |
1114 | struct se_task *task, | 1117 | struct se_task *task, |
1115 | struct scatterlist *task_sg, | 1118 | struct scatterlist *task_sg, |
1116 | u32 task_sg_num, | 1119 | u32 task_sg_num, |
1117 | int bidi_read) | 1120 | int bidi_read) |
1118 | { | 1121 | { |
1119 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | 1122 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); |
1120 | struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; | 1123 | struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; |
1121 | struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; | 1124 | struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; |
1122 | struct page *page; | 1125 | struct page *page; |
1123 | struct scatterlist *sg; | 1126 | struct scatterlist *sg; |
1124 | u32 data_len = task->task_size, i, len, bytes, off; | 1127 | u32 data_len = task->task_size, i, len, bytes, off; |
1125 | int nr_pages = (task->task_size + task_sg[0].offset + | 1128 | int nr_pages = (task->task_size + task_sg[0].offset + |
1126 | PAGE_SIZE - 1) >> PAGE_SHIFT; | 1129 | PAGE_SIZE - 1) >> PAGE_SHIFT; |
1127 | int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | 1130 | int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; |
1128 | int rw = (task->task_data_direction == DMA_TO_DEVICE); | 1131 | int rw = (task->task_data_direction == DMA_TO_DEVICE); |
1129 | 1132 | ||
1130 | if (!task->task_size) | 1133 | if (!task->task_size) |
1131 | return 0; | 1134 | return 0; |
1132 | /* | 1135 | /* |
1133 | * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup | 1136 | * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup |
1134 | * the bio_vec maplist from TC< struct se_mem -> task->task_sg -> | 1137 | * the bio_vec maplist from TC< struct se_mem -> task->task_sg -> |
1135 | * struct scatterlist memory. The struct se_task->task_sg[] currently needs | 1138 | * struct scatterlist memory. The struct se_task->task_sg[] currently needs |
1136 | * to be attached to struct bios for submission to Linux/SCSI using | 1139 | * to be attached to struct bios for submission to Linux/SCSI using |
1137 | * struct request to struct scsi_device->request_queue. | 1140 | * struct request to struct scsi_device->request_queue. |
1138 | * | 1141 | * |
1139 | * Note that this will be changing post v2.6.28 as Target_Core_Mod/pSCSI | 1142 | * Note that this will be changing post v2.6.28 as Target_Core_Mod/pSCSI |
1140 | * is ported to upstream SCSI passthrough functionality that accepts | 1143 | * is ported to upstream SCSI passthrough functionality that accepts |
1141 | * struct scatterlist->page_link or struct page as a paraemeter. | 1144 | * struct scatterlist->page_link or struct page as a paraemeter. |
1142 | */ | 1145 | */ |
1143 | DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages); | 1146 | DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages); |
1144 | 1147 | ||
1145 | for_each_sg(task_sg, sg, task_sg_num, i) { | 1148 | for_each_sg(task_sg, sg, task_sg_num, i) { |
1146 | page = sg_page(sg); | 1149 | page = sg_page(sg); |
1147 | off = sg->offset; | 1150 | off = sg->offset; |
1148 | len = sg->length; | 1151 | len = sg->length; |
1149 | 1152 | ||
1150 | DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i, | 1153 | DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i, |
1151 | page, len, off); | 1154 | page, len, off); |
1152 | 1155 | ||
1153 | while (len > 0 && data_len > 0) { | 1156 | while (len > 0 && data_len > 0) { |
1154 | bytes = min_t(unsigned int, len, PAGE_SIZE - off); | 1157 | bytes = min_t(unsigned int, len, PAGE_SIZE - off); |
1155 | bytes = min(bytes, data_len); | 1158 | bytes = min(bytes, data_len); |
1156 | 1159 | ||
1157 | if (!(bio)) { | 1160 | if (!(bio)) { |
1158 | nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); | 1161 | nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); |
1159 | nr_pages -= nr_vecs; | 1162 | nr_pages -= nr_vecs; |
1160 | /* | 1163 | /* |
1161 | * Calls bio_kmalloc() and sets bio->bi_end_io() | 1164 | * Calls bio_kmalloc() and sets bio->bi_end_io() |
1162 | */ | 1165 | */ |
1163 | bio = pscsi_get_bio(pdv, nr_vecs); | 1166 | bio = pscsi_get_bio(pdv, nr_vecs); |
1164 | if (!(bio)) | 1167 | if (!(bio)) |
1165 | goto fail; | 1168 | goto fail; |
1166 | 1169 | ||
1167 | if (rw) | 1170 | if (rw) |
1168 | bio->bi_rw |= REQ_WRITE; | 1171 | bio->bi_rw |= REQ_WRITE; |
1169 | 1172 | ||
1170 | DEBUG_PSCSI("PSCSI: Allocated bio: %p," | 1173 | DEBUG_PSCSI("PSCSI: Allocated bio: %p," |
1171 | " dir: %s nr_vecs: %d\n", bio, | 1174 | " dir: %s nr_vecs: %d\n", bio, |
1172 | (rw) ? "rw" : "r", nr_vecs); | 1175 | (rw) ? "rw" : "r", nr_vecs); |
1173 | /* | 1176 | /* |
1174 | * Set *hbio pointer to handle the case: | 1177 | * Set *hbio pointer to handle the case: |
1175 | * nr_pages > BIO_MAX_PAGES, where additional | 1178 | * nr_pages > BIO_MAX_PAGES, where additional |
1176 | * bios need to be added to complete a given | 1179 | * bios need to be added to complete a given |
1177 | * struct se_task | 1180 | * struct se_task |
1178 | */ | 1181 | */ |
1179 | if (!hbio) | 1182 | if (!hbio) |
1180 | hbio = tbio = bio; | 1183 | hbio = tbio = bio; |
1181 | else | 1184 | else |
1182 | tbio = tbio->bi_next = bio; | 1185 | tbio = tbio->bi_next = bio; |
1183 | } | 1186 | } |
1184 | 1187 | ||
1185 | DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d" | 1188 | DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d" |
1186 | " bio: %p page: %p len: %d off: %d\n", i, bio, | 1189 | " bio: %p page: %p len: %d off: %d\n", i, bio, |
1187 | page, len, off); | 1190 | page, len, off); |
1188 | 1191 | ||
1189 | rc = bio_add_pc_page(pdv->pdv_sd->request_queue, | 1192 | rc = bio_add_pc_page(pdv->pdv_sd->request_queue, |
1190 | bio, page, bytes, off); | 1193 | bio, page, bytes, off); |
1191 | if (rc != bytes) | 1194 | if (rc != bytes) |
1192 | goto fail; | 1195 | goto fail; |
1193 | 1196 | ||
1194 | DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n", | 1197 | DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n", |
1195 | bio->bi_vcnt, nr_vecs); | 1198 | bio->bi_vcnt, nr_vecs); |
1196 | 1199 | ||
1197 | if (bio->bi_vcnt > nr_vecs) { | 1200 | if (bio->bi_vcnt > nr_vecs) { |
1198 | DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:" | 1201 | DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:" |
1199 | " %d i: %d bio: %p, allocating another" | 1202 | " %d i: %d bio: %p, allocating another" |
1200 | " bio\n", bio->bi_vcnt, i, bio); | 1203 | " bio\n", bio->bi_vcnt, i, bio); |
1201 | /* | 1204 | /* |
1202 | * Clear the pointer so that another bio will | 1205 | * Clear the pointer so that another bio will |
1203 | * be allocated with pscsi_get_bio() above, the | 1206 | * be allocated with pscsi_get_bio() above, the |
1204 | * current bio has already been set *tbio and | 1207 | * current bio has already been set *tbio and |
1205 | * bio->bi_next. | 1208 | * bio->bi_next. |
1206 | */ | 1209 | */ |
1207 | bio = NULL; | 1210 | bio = NULL; |
1208 | } | 1211 | } |
1209 | 1212 | ||
1210 | page++; | 1213 | page++; |
1211 | len -= bytes; | 1214 | len -= bytes; |
1212 | data_len -= bytes; | 1215 | data_len -= bytes; |
1213 | off = 0; | 1216 | off = 0; |
1214 | } | 1217 | } |
1215 | } | 1218 | } |
1216 | /* | 1219 | /* |
1217 | * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND | 1220 | * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND |
1218 | * primary SCSI WRITE poayload mapped for struct se_task->task_sg[] | 1221 | * primary SCSI WRITE poayload mapped for struct se_task->task_sg[] |
1219 | */ | 1222 | */ |
1220 | if (!(bidi_read)) { | 1223 | if (!(bidi_read)) { |
1221 | /* | 1224 | /* |
1222 | * Starting with v2.6.31, call blk_make_request() passing in *hbio to | 1225 | * Starting with v2.6.31, call blk_make_request() passing in *hbio to |
1223 | * allocate the pSCSI task a struct request. | 1226 | * allocate the pSCSI task a struct request. |
1224 | */ | 1227 | */ |
1225 | pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue, | 1228 | pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue, |
1226 | hbio, GFP_KERNEL); | 1229 | hbio, GFP_KERNEL); |
1227 | if (!(pt->pscsi_req)) { | 1230 | if (!(pt->pscsi_req)) { |
1228 | printk(KERN_ERR "pSCSI: blk_make_request() failed\n"); | 1231 | printk(KERN_ERR "pSCSI: blk_make_request() failed\n"); |
1229 | goto fail; | 1232 | goto fail; |
1230 | } | 1233 | } |
1231 | /* | 1234 | /* |
1232 | * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC, | 1235 | * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC, |
1233 | * and setup rq callback, CDB and sense. | 1236 | * and setup rq callback, CDB and sense. |
1234 | */ | 1237 | */ |
1235 | pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); | 1238 | pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); |
1236 | 1239 | ||
1237 | return task->task_sg_num; | 1240 | return task->task_sg_num; |
1238 | } | 1241 | } |
1239 | /* | 1242 | /* |
1240 | * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND | 1243 | * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND |
1241 | * SCSI READ paylaod mapped for struct se_task->task_sg_bidi[] | 1244 | * SCSI READ paylaod mapped for struct se_task->task_sg_bidi[] |
1242 | */ | 1245 | */ |
1243 | pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue, | 1246 | pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue, |
1244 | hbio, GFP_KERNEL); | 1247 | hbio, GFP_KERNEL); |
1245 | if (!(pt->pscsi_req->next_rq)) { | 1248 | if (!(pt->pscsi_req->next_rq)) { |
1246 | printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n"); | 1249 | printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n"); |
1247 | goto fail; | 1250 | goto fail; |
1248 | } | 1251 | } |
1249 | pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1); | 1252 | pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1); |
1250 | 1253 | ||
1251 | return task->task_sg_num; | 1254 | return task->task_sg_num; |
1252 | fail: | 1255 | fail: |
1253 | while (hbio) { | 1256 | while (hbio) { |
1254 | bio = hbio; | 1257 | bio = hbio; |
1255 | hbio = hbio->bi_next; | 1258 | hbio = hbio->bi_next; |
1256 | bio->bi_next = NULL; | 1259 | bio->bi_next = NULL; |
1257 | bio_endio(bio, 0); | 1260 | bio_endio(bio, 0); |
1258 | } | 1261 | } |
1259 | return ret; | 1262 | return ret; |
1260 | } | 1263 | } |
1261 | 1264 | ||
1262 | static int pscsi_map_task_SG(struct se_task *task) | 1265 | static int pscsi_map_task_SG(struct se_task *task) |
1263 | { | 1266 | { |
1264 | int ret; | 1267 | int ret; |
1265 | 1268 | ||
1266 | /* | 1269 | /* |
1267 | * Setup the main struct request for the task->task_sg[] payload | 1270 | * Setup the main struct request for the task->task_sg[] payload |
1268 | */ | 1271 | */ |
1269 | 1272 | ||
1270 | ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0); | 1273 | ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0); |
1271 | if (ret >= 0 && task->task_sg_bidi) { | 1274 | if (ret >= 0 && task->task_sg_bidi) { |
1272 | /* | 1275 | /* |
1273 | * If present, set up the extra BIDI-COMMAND SCSI READ | 1276 | * If present, set up the extra BIDI-COMMAND SCSI READ |
1274 | * struct request and payload. | 1277 | * struct request and payload. |
1275 | */ | 1278 | */ |
1276 | ret = __pscsi_map_task_SG(task, task->task_sg_bidi, | 1279 | ret = __pscsi_map_task_SG(task, task->task_sg_bidi, |
1277 | task->task_sg_num, 1); | 1280 | task->task_sg_num, 1); |
1278 | } | 1281 | } |
1279 | 1282 | ||
1280 | if (ret < 0) | 1283 | if (ret < 0) |
1281 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 1284 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
1282 | return 0; | 1285 | return 0; |
1283 | } | 1286 | } |
1284 | 1287 | ||
1285 | /* pscsi_map_task_non_SG(): | 1288 | /* pscsi_map_task_non_SG(): |
1286 | * | 1289 | * |
1287 | * | 1290 | * |
1288 | */ | 1291 | */ |
1289 | static int pscsi_map_task_non_SG(struct se_task *task) | 1292 | static int pscsi_map_task_non_SG(struct se_task *task) |
1290 | { | 1293 | { |
1291 | struct se_cmd *cmd = TASK_CMD(task); | 1294 | struct se_cmd *cmd = TASK_CMD(task); |
1292 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | 1295 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); |
1293 | struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; | 1296 | struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; |
1294 | int ret = 0; | 1297 | int ret = 0; |
1295 | 1298 | ||
1296 | if (pscsi_blk_get_request(task) < 0) | 1299 | if (pscsi_blk_get_request(task) < 0) |
1297 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 1300 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
1298 | 1301 | ||
1299 | if (!task->task_size) | 1302 | if (!task->task_size) |
1300 | return 0; | 1303 | return 0; |
1301 | 1304 | ||
1302 | ret = blk_rq_map_kern(pdv->pdv_sd->request_queue, | 1305 | ret = blk_rq_map_kern(pdv->pdv_sd->request_queue, |
1303 | pt->pscsi_req, T_TASK(cmd)->t_task_buf, | 1306 | pt->pscsi_req, T_TASK(cmd)->t_task_buf, |
1304 | task->task_size, GFP_KERNEL); | 1307 | task->task_size, GFP_KERNEL); |
1305 | if (ret < 0) { | 1308 | if (ret < 0) { |
1306 | printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret); | 1309 | printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret); |
1307 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 1310 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
1308 | } | 1311 | } |
1309 | return 0; | 1312 | return 0; |
1310 | } | 1313 | } |
1311 | 1314 | ||
1312 | static int pscsi_CDB_none(struct se_task *task) | 1315 | static int pscsi_CDB_none(struct se_task *task) |
1313 | { | 1316 | { |
1314 | return pscsi_blk_get_request(task); | 1317 | return pscsi_blk_get_request(task); |
1315 | } | 1318 | } |
1316 | 1319 | ||
1317 | /* pscsi_get_cdb(): | 1320 | /* pscsi_get_cdb(): |
1318 | * | 1321 | * |
1319 | * | 1322 | * |
1320 | */ | 1323 | */ |
1321 | static unsigned char *pscsi_get_cdb(struct se_task *task) | 1324 | static unsigned char *pscsi_get_cdb(struct se_task *task) |
1322 | { | 1325 | { |
1323 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | 1326 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); |
1324 | 1327 | ||
1325 | return pt->pscsi_cdb; | 1328 | return pt->pscsi_cdb; |
1326 | } | 1329 | } |
1327 | 1330 | ||
1328 | /* pscsi_get_sense_buffer(): | 1331 | /* pscsi_get_sense_buffer(): |
1329 | * | 1332 | * |
1330 | * | 1333 | * |
1331 | */ | 1334 | */ |
1332 | static unsigned char *pscsi_get_sense_buffer(struct se_task *task) | 1335 | static unsigned char *pscsi_get_sense_buffer(struct se_task *task) |
1333 | { | 1336 | { |
1334 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | 1337 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); |
1335 | 1338 | ||
1336 | return (unsigned char *)&pt->pscsi_sense[0]; | 1339 | return (unsigned char *)&pt->pscsi_sense[0]; |
1337 | } | 1340 | } |
1338 | 1341 | ||
1339 | /* pscsi_get_device_rev(): | 1342 | /* pscsi_get_device_rev(): |
1340 | * | 1343 | * |
1341 | * | 1344 | * |
1342 | */ | 1345 | */ |
1343 | static u32 pscsi_get_device_rev(struct se_device *dev) | 1346 | static u32 pscsi_get_device_rev(struct se_device *dev) |
1344 | { | 1347 | { |
1345 | struct pscsi_dev_virt *pdv = dev->dev_ptr; | 1348 | struct pscsi_dev_virt *pdv = dev->dev_ptr; |
1346 | struct scsi_device *sd = pdv->pdv_sd; | 1349 | struct scsi_device *sd = pdv->pdv_sd; |
1347 | 1350 | ||
1348 | return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1; | 1351 | return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1; |
1349 | } | 1352 | } |
1350 | 1353 | ||
1351 | /* pscsi_get_device_type(): | 1354 | /* pscsi_get_device_type(): |
1352 | * | 1355 | * |
1353 | * | 1356 | * |
1354 | */ | 1357 | */ |
1355 | static u32 pscsi_get_device_type(struct se_device *dev) | 1358 | static u32 pscsi_get_device_type(struct se_device *dev) |
1356 | { | 1359 | { |
1357 | struct pscsi_dev_virt *pdv = dev->dev_ptr; | 1360 | struct pscsi_dev_virt *pdv = dev->dev_ptr; |
1358 | struct scsi_device *sd = pdv->pdv_sd; | 1361 | struct scsi_device *sd = pdv->pdv_sd; |
1359 | 1362 | ||
1360 | return sd->type; | 1363 | return sd->type; |
1361 | } | 1364 | } |
1362 | 1365 | ||
1363 | static sector_t pscsi_get_blocks(struct se_device *dev) | 1366 | static sector_t pscsi_get_blocks(struct se_device *dev) |
1364 | { | 1367 | { |
1365 | struct pscsi_dev_virt *pdv = dev->dev_ptr; | 1368 | struct pscsi_dev_virt *pdv = dev->dev_ptr; |
1366 | 1369 | ||
1367 | if (pdv->pdv_bd && pdv->pdv_bd->bd_part) | 1370 | if (pdv->pdv_bd && pdv->pdv_bd->bd_part) |
1368 | return pdv->pdv_bd->bd_part->nr_sects; | 1371 | return pdv->pdv_bd->bd_part->nr_sects; |
1369 | 1372 | ||
1370 | dump_stack(); | 1373 | dump_stack(); |
1371 | return 0; | 1374 | return 0; |
1372 | } | 1375 | } |
1373 | 1376 | ||
1374 | /* pscsi_handle_SAM_STATUS_failures(): | 1377 | /* pscsi_handle_SAM_STATUS_failures(): |
1375 | * | 1378 | * |
1376 | * | 1379 | * |
1377 | */ | 1380 | */ |
1378 | static inline void pscsi_process_SAM_status( | 1381 | static inline void pscsi_process_SAM_status( |
1379 | struct se_task *task, | 1382 | struct se_task *task, |
1380 | struct pscsi_plugin_task *pt) | 1383 | struct pscsi_plugin_task *pt) |
1381 | { | 1384 | { |
1382 | task->task_scsi_status = status_byte(pt->pscsi_result); | 1385 | task->task_scsi_status = status_byte(pt->pscsi_result); |
1383 | if ((task->task_scsi_status)) { | 1386 | if ((task->task_scsi_status)) { |
1384 | task->task_scsi_status <<= 1; | 1387 | task->task_scsi_status <<= 1; |
1385 | printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:" | 1388 | printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:" |
1386 | " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], | 1389 | " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], |
1387 | pt->pscsi_result); | 1390 | pt->pscsi_result); |
1388 | } | 1391 | } |
1389 | 1392 | ||
1390 | switch (host_byte(pt->pscsi_result)) { | 1393 | switch (host_byte(pt->pscsi_result)) { |
1391 | case DID_OK: | 1394 | case DID_OK: |
1392 | transport_complete_task(task, (!task->task_scsi_status)); | 1395 | transport_complete_task(task, (!task->task_scsi_status)); |
1393 | break; | 1396 | break; |
1394 | default: | 1397 | default: |
1395 | printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:" | 1398 | printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:" |
1396 | " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], | 1399 | " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], |
1397 | pt->pscsi_result); | 1400 | pt->pscsi_result); |
1398 | task->task_scsi_status = SAM_STAT_CHECK_CONDITION; | 1401 | task->task_scsi_status = SAM_STAT_CHECK_CONDITION; |
1399 | task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 1402 | task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
1400 | TASK_CMD(task)->transport_error_status = | 1403 | TASK_CMD(task)->transport_error_status = |
1401 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 1404 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
1402 | transport_complete_task(task, 0); | 1405 | transport_complete_task(task, 0); |
1403 | break; | 1406 | break; |
1404 | } | 1407 | } |
1405 | 1408 | ||
1406 | return; | 1409 | return; |
1407 | } | 1410 | } |
1408 | 1411 | ||
1409 | static void pscsi_req_done(struct request *req, int uptodate) | 1412 | static void pscsi_req_done(struct request *req, int uptodate) |
1410 | { | 1413 | { |
1411 | struct se_task *task = req->end_io_data; | 1414 | struct se_task *task = req->end_io_data; |
1412 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); | 1415 | struct pscsi_plugin_task *pt = PSCSI_TASK(task); |
1413 | 1416 | ||
1414 | pt->pscsi_result = req->errors; | 1417 | pt->pscsi_result = req->errors; |
1415 | pt->pscsi_resid = req->resid_len; | 1418 | pt->pscsi_resid = req->resid_len; |
1416 | 1419 | ||
1417 | pscsi_process_SAM_status(task, pt); | 1420 | pscsi_process_SAM_status(task, pt); |
1418 | /* | 1421 | /* |
1419 | * Release BIDI-READ if present | 1422 | * Release BIDI-READ if present |
1420 | */ | 1423 | */ |
1421 | if (req->next_rq != NULL) | 1424 | if (req->next_rq != NULL) |
1422 | __blk_put_request(req->q, req->next_rq); | 1425 | __blk_put_request(req->q, req->next_rq); |
1423 | 1426 | ||
1424 | __blk_put_request(req->q, req); | 1427 | __blk_put_request(req->q, req); |
1425 | pt->pscsi_req = NULL; | 1428 | pt->pscsi_req = NULL; |
1426 | } | 1429 | } |
1427 | 1430 | ||
1428 | static struct se_subsystem_api pscsi_template = { | 1431 | static struct se_subsystem_api pscsi_template = { |
1429 | .name = "pscsi", | 1432 | .name = "pscsi", |
1430 | .owner = THIS_MODULE, | 1433 | .owner = THIS_MODULE, |
1431 | .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, | 1434 | .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, |
1432 | .cdb_none = pscsi_CDB_none, | 1435 | .cdb_none = pscsi_CDB_none, |
1433 | .map_task_non_SG = pscsi_map_task_non_SG, | 1436 | .map_task_non_SG = pscsi_map_task_non_SG, |
1434 | .map_task_SG = pscsi_map_task_SG, | 1437 | .map_task_SG = pscsi_map_task_SG, |
1435 | .attach_hba = pscsi_attach_hba, | 1438 | .attach_hba = pscsi_attach_hba, |
1436 | .detach_hba = pscsi_detach_hba, | 1439 | .detach_hba = pscsi_detach_hba, |
1437 | .pmode_enable_hba = pscsi_pmode_enable_hba, | 1440 | .pmode_enable_hba = pscsi_pmode_enable_hba, |
1438 | .allocate_virtdevice = pscsi_allocate_virtdevice, | 1441 | .allocate_virtdevice = pscsi_allocate_virtdevice, |
1439 | .create_virtdevice = pscsi_create_virtdevice, | 1442 | .create_virtdevice = pscsi_create_virtdevice, |
1440 | .free_device = pscsi_free_device, | 1443 | .free_device = pscsi_free_device, |
1441 | .transport_complete = pscsi_transport_complete, | 1444 | .transport_complete = pscsi_transport_complete, |
1442 | .alloc_task = pscsi_alloc_task, | 1445 | .alloc_task = pscsi_alloc_task, |
1443 | .do_task = pscsi_do_task, | 1446 | .do_task = pscsi_do_task, |
1444 | .free_task = pscsi_free_task, | 1447 | .free_task = pscsi_free_task, |
1445 | .check_configfs_dev_params = pscsi_check_configfs_dev_params, | 1448 | .check_configfs_dev_params = pscsi_check_configfs_dev_params, |
1446 | .set_configfs_dev_params = pscsi_set_configfs_dev_params, | 1449 | .set_configfs_dev_params = pscsi_set_configfs_dev_params, |
1447 | .show_configfs_dev_params = pscsi_show_configfs_dev_params, | 1450 | .show_configfs_dev_params = pscsi_show_configfs_dev_params, |
1448 | .get_cdb = pscsi_get_cdb, | 1451 | .get_cdb = pscsi_get_cdb, |
1449 | .get_sense_buffer = pscsi_get_sense_buffer, | 1452 | .get_sense_buffer = pscsi_get_sense_buffer, |
1450 | .get_device_rev = pscsi_get_device_rev, | 1453 | .get_device_rev = pscsi_get_device_rev, |
1451 | .get_device_type = pscsi_get_device_type, | 1454 | .get_device_type = pscsi_get_device_type, |
1452 | .get_blocks = pscsi_get_blocks, | 1455 | .get_blocks = pscsi_get_blocks, |
1453 | }; | 1456 | }; |
1454 | 1457 | ||
1455 | static int __init pscsi_module_init(void) | 1458 | static int __init pscsi_module_init(void) |
1456 | { | 1459 | { |
1457 | return transport_subsystem_register(&pscsi_template); | 1460 | return transport_subsystem_register(&pscsi_template); |
1458 | } | 1461 | } |
1459 | 1462 | ||
1460 | static void pscsi_module_exit(void) | 1463 | static void pscsi_module_exit(void) |
1461 | { | 1464 | { |
1462 | transport_subsystem_release(&pscsi_template); | 1465 | transport_subsystem_release(&pscsi_template); |
1463 | } | 1466 | } |
1464 | 1467 | ||
1465 | MODULE_DESCRIPTION("TCM PSCSI subsystem plugin"); | 1468 | MODULE_DESCRIPTION("TCM PSCSI subsystem plugin"); |
1466 | MODULE_AUTHOR("nab@Linux-iSCSI.org"); | 1469 | MODULE_AUTHOR("nab@Linux-iSCSI.org"); |
1467 | MODULE_LICENSE("GPL"); | 1470 | MODULE_LICENSE("GPL"); |
1468 | 1471 | ||
1469 | module_init(pscsi_module_init); | 1472 | module_init(pscsi_module_init); |
1470 | module_exit(pscsi_module_exit); | 1473 | module_exit(pscsi_module_exit); |
1471 | 1474 |
drivers/target/target_core_rd.h
1 | #ifndef TARGET_CORE_RD_H | 1 | #ifndef TARGET_CORE_RD_H |
2 | #define TARGET_CORE_RD_H | 2 | #define TARGET_CORE_RD_H |
3 | 3 | ||
4 | #define RD_HBA_VERSION "v4.0" | 4 | #define RD_HBA_VERSION "v4.0" |
5 | #define RD_DR_VERSION "4.0" | 5 | #define RD_DR_VERSION "4.0" |
6 | #define RD_MCP_VERSION "4.0" | 6 | #define RD_MCP_VERSION "4.0" |
7 | 7 | ||
8 | /* Largest piece of memory kmalloc can allocate */ | 8 | /* Largest piece of memory kmalloc can allocate */ |
9 | #define RD_MAX_ALLOCATION_SIZE 65536 | 9 | #define RD_MAX_ALLOCATION_SIZE 65536 |
10 | /* Maximum queuedepth for the Ramdisk HBA */ | 10 | /* Maximum queuedepth for the Ramdisk HBA */ |
11 | #define RD_HBA_QUEUE_DEPTH 256 | 11 | #define RD_HBA_QUEUE_DEPTH 256 |
12 | #define RD_DEVICE_QUEUE_DEPTH 32 | 12 | #define RD_DEVICE_QUEUE_DEPTH 32 |
13 | #define RD_MAX_DEVICE_QUEUE_DEPTH 128 | 13 | #define RD_MAX_DEVICE_QUEUE_DEPTH 128 |
14 | #define RD_BLOCKSIZE 512 | 14 | #define RD_BLOCKSIZE 512 |
15 | #define RD_MAX_SECTORS 1024 | 15 | #define RD_MAX_SECTORS 1024 |
16 | 16 | ||
17 | extern struct kmem_cache *se_mem_cache; | ||
18 | |||
19 | /* Used in target_core_init_configfs() for virtual LUN 0 access */ | 17 | /* Used in target_core_init_configfs() for virtual LUN 0 access */ |
20 | int __init rd_module_init(void); | 18 | int __init rd_module_init(void); |
21 | void rd_module_exit(void); | 19 | void rd_module_exit(void); |
22 | 20 | ||
23 | #define RRF_EMULATE_CDB 0x01 | 21 | #define RRF_EMULATE_CDB 0x01 |
24 | #define RRF_GOT_LBA 0x02 | 22 | #define RRF_GOT_LBA 0x02 |
25 | 23 | ||
26 | struct rd_request { | 24 | struct rd_request { |
27 | struct se_task rd_task; | 25 | struct se_task rd_task; |
28 | 26 | ||
29 | /* SCSI CDB from iSCSI Command PDU */ | 27 | /* SCSI CDB from iSCSI Command PDU */ |
30 | unsigned char rd_scsi_cdb[TCM_MAX_COMMAND_SIZE]; | 28 | unsigned char rd_scsi_cdb[TCM_MAX_COMMAND_SIZE]; |
31 | /* Offset from start of page */ | 29 | /* Offset from start of page */ |
32 | u32 rd_offset; | 30 | u32 rd_offset; |
33 | /* Starting page in Ramdisk for request */ | 31 | /* Starting page in Ramdisk for request */ |
34 | u32 rd_page; | 32 | u32 rd_page; |
35 | /* Total number of pages needed for request */ | 33 | /* Total number of pages needed for request */ |
36 | u32 rd_page_count; | 34 | u32 rd_page_count; |
37 | /* Scatterlist count */ | 35 | /* Scatterlist count */ |
38 | u32 rd_size; | 36 | u32 rd_size; |
39 | /* Ramdisk device */ | 37 | /* Ramdisk device */ |
40 | struct rd_dev *rd_dev; | 38 | struct rd_dev *rd_dev; |
41 | } ____cacheline_aligned; | 39 | } ____cacheline_aligned; |
42 | 40 | ||
43 | struct rd_dev_sg_table { | 41 | struct rd_dev_sg_table { |
44 | u32 page_start_offset; | 42 | u32 page_start_offset; |
45 | u32 page_end_offset; | 43 | u32 page_end_offset; |
46 | u32 rd_sg_count; | 44 | u32 rd_sg_count; |
47 | struct scatterlist *sg_table; | 45 | struct scatterlist *sg_table; |
48 | } ____cacheline_aligned; | 46 | } ____cacheline_aligned; |
49 | 47 | ||
50 | #define RDF_HAS_PAGE_COUNT 0x01 | 48 | #define RDF_HAS_PAGE_COUNT 0x01 |
51 | 49 | ||
52 | struct rd_dev { | 50 | struct rd_dev { |
53 | int rd_direct; | 51 | int rd_direct; |
54 | u32 rd_flags; | 52 | u32 rd_flags; |
55 | /* Unique Ramdisk Device ID in Ramdisk HBA */ | 53 | /* Unique Ramdisk Device ID in Ramdisk HBA */ |
56 | u32 rd_dev_id; | 54 | u32 rd_dev_id; |
57 | /* Total page count for ramdisk device */ | 55 | /* Total page count for ramdisk device */ |
58 | u32 rd_page_count; | 56 | u32 rd_page_count; |
59 | /* Number of SG tables in sg_table_array */ | 57 | /* Number of SG tables in sg_table_array */ |
60 | u32 sg_table_count; | 58 | u32 sg_table_count; |
61 | u32 rd_queue_depth; | 59 | u32 rd_queue_depth; |
62 | /* Array of rd_dev_sg_table_t containing scatterlists */ | 60 | /* Array of rd_dev_sg_table_t containing scatterlists */ |
63 | struct rd_dev_sg_table *sg_table_array; | 61 | struct rd_dev_sg_table *sg_table_array; |
64 | /* Ramdisk HBA device is connected to */ | 62 | /* Ramdisk HBA device is connected to */ |
65 | struct rd_host *rd_host; | 63 | struct rd_host *rd_host; |
66 | } ____cacheline_aligned; | 64 | } ____cacheline_aligned; |
67 | 65 | ||
68 | struct rd_host { | 66 | struct rd_host { |
69 | u32 rd_host_dev_id_count; | 67 | u32 rd_host_dev_id_count; |
70 | u32 rd_host_id; /* Unique Ramdisk Host ID */ | 68 | u32 rd_host_id; /* Unique Ramdisk Host ID */ |
71 | } ____cacheline_aligned; | 69 | } ____cacheline_aligned; |
72 | 70 | ||
73 | #endif /* TARGET_CORE_RD_H */ | 71 | #endif /* TARGET_CORE_RD_H */ |
74 | 72 |
drivers/target/target_core_transport.c
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * Filename: target_core_transport.c | 2 | * Filename: target_core_transport.c |
3 | * | 3 | * |
4 | * This file contains the Generic Target Engine Core. | 4 | * This file contains the Generic Target Engine Core. |
5 | * | 5 | * |
6 | * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. | 6 | * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. |
7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | 7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. |
8 | * Copyright (c) 2007-2010 Rising Tide Systems | 8 | * Copyright (c) 2007-2010 Rising Tide Systems |
9 | * Copyright (c) 2008-2010 Linux-iSCSI.org | 9 | * Copyright (c) 2008-2010 Linux-iSCSI.org |
10 | * | 10 | * |
11 | * Nicholas A. Bellinger <nab@kernel.org> | 11 | * Nicholas A. Bellinger <nab@kernel.org> |
12 | * | 12 | * |
13 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
14 | * it under the terms of the GNU General Public License as published by | 14 | * it under the terms of the GNU General Public License as published by |
15 | * the Free Software Foundation; either version 2 of the License, or | 15 | * the Free Software Foundation; either version 2 of the License, or |
16 | * (at your option) any later version. | 16 | * (at your option) any later version. |
17 | * | 17 | * |
18 | * This program is distributed in the hope that it will be useful, | 18 | * This program is distributed in the hope that it will be useful, |
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
21 | * GNU General Public License for more details. | 21 | * GNU General Public License for more details. |
22 | * | 22 | * |
23 | * You should have received a copy of the GNU General Public License | 23 | * You should have received a copy of the GNU General Public License |
24 | * along with this program; if not, write to the Free Software | 24 | * along with this program; if not, write to the Free Software |
25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
26 | * | 26 | * |
27 | ******************************************************************************/ | 27 | ******************************************************************************/ |
28 | 28 | ||
29 | #include <linux/version.h> | 29 | #include <linux/version.h> |
30 | #include <linux/net.h> | 30 | #include <linux/net.h> |
31 | #include <linux/delay.h> | 31 | #include <linux/delay.h> |
32 | #include <linux/string.h> | 32 | #include <linux/string.h> |
33 | #include <linux/timer.h> | 33 | #include <linux/timer.h> |
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | #include <linux/blkdev.h> | 35 | #include <linux/blkdev.h> |
36 | #include <linux/spinlock.h> | 36 | #include <linux/spinlock.h> |
37 | #include <linux/kthread.h> | 37 | #include <linux/kthread.h> |
38 | #include <linux/in.h> | 38 | #include <linux/in.h> |
39 | #include <linux/cdrom.h> | 39 | #include <linux/cdrom.h> |
40 | #include <asm/unaligned.h> | 40 | #include <asm/unaligned.h> |
41 | #include <net/sock.h> | 41 | #include <net/sock.h> |
42 | #include <net/tcp.h> | 42 | #include <net/tcp.h> |
43 | #include <scsi/scsi.h> | 43 | #include <scsi/scsi.h> |
44 | #include <scsi/scsi_cmnd.h> | 44 | #include <scsi/scsi_cmnd.h> |
45 | #include <scsi/libsas.h> /* For TASK_ATTR_* */ | 45 | #include <scsi/libsas.h> /* For TASK_ATTR_* */ |
46 | 46 | ||
47 | #include <target/target_core_base.h> | 47 | #include <target/target_core_base.h> |
48 | #include <target/target_core_device.h> | 48 | #include <target/target_core_device.h> |
49 | #include <target/target_core_tmr.h> | 49 | #include <target/target_core_tmr.h> |
50 | #include <target/target_core_tpg.h> | 50 | #include <target/target_core_tpg.h> |
51 | #include <target/target_core_transport.h> | 51 | #include <target/target_core_transport.h> |
52 | #include <target/target_core_fabric_ops.h> | 52 | #include <target/target_core_fabric_ops.h> |
53 | #include <target/target_core_configfs.h> | 53 | #include <target/target_core_configfs.h> |
54 | 54 | ||
55 | #include "target_core_alua.h" | 55 | #include "target_core_alua.h" |
56 | #include "target_core_hba.h" | 56 | #include "target_core_hba.h" |
57 | #include "target_core_pr.h" | 57 | #include "target_core_pr.h" |
58 | #include "target_core_scdb.h" | 58 | #include "target_core_scdb.h" |
59 | #include "target_core_ua.h" | 59 | #include "target_core_ua.h" |
60 | 60 | ||
61 | /* #define DEBUG_CDB_HANDLER */ | 61 | /* #define DEBUG_CDB_HANDLER */ |
62 | #ifdef DEBUG_CDB_HANDLER | 62 | #ifdef DEBUG_CDB_HANDLER |
63 | #define DEBUG_CDB_H(x...) printk(KERN_INFO x) | 63 | #define DEBUG_CDB_H(x...) printk(KERN_INFO x) |
64 | #else | 64 | #else |
65 | #define DEBUG_CDB_H(x...) | 65 | #define DEBUG_CDB_H(x...) |
66 | #endif | 66 | #endif |
67 | 67 | ||
68 | /* #define DEBUG_CMD_MAP */ | 68 | /* #define DEBUG_CMD_MAP */ |
69 | #ifdef DEBUG_CMD_MAP | 69 | #ifdef DEBUG_CMD_MAP |
70 | #define DEBUG_CMD_M(x...) printk(KERN_INFO x) | 70 | #define DEBUG_CMD_M(x...) printk(KERN_INFO x) |
71 | #else | 71 | #else |
72 | #define DEBUG_CMD_M(x...) | 72 | #define DEBUG_CMD_M(x...) |
73 | #endif | 73 | #endif |
74 | 74 | ||
75 | /* #define DEBUG_MEM_ALLOC */ | 75 | /* #define DEBUG_MEM_ALLOC */ |
76 | #ifdef DEBUG_MEM_ALLOC | 76 | #ifdef DEBUG_MEM_ALLOC |
77 | #define DEBUG_MEM(x...) printk(KERN_INFO x) | 77 | #define DEBUG_MEM(x...) printk(KERN_INFO x) |
78 | #else | 78 | #else |
79 | #define DEBUG_MEM(x...) | 79 | #define DEBUG_MEM(x...) |
80 | #endif | 80 | #endif |
81 | 81 | ||
82 | /* #define DEBUG_MEM2_ALLOC */ | 82 | /* #define DEBUG_MEM2_ALLOC */ |
83 | #ifdef DEBUG_MEM2_ALLOC | 83 | #ifdef DEBUG_MEM2_ALLOC |
84 | #define DEBUG_MEM2(x...) printk(KERN_INFO x) | 84 | #define DEBUG_MEM2(x...) printk(KERN_INFO x) |
85 | #else | 85 | #else |
86 | #define DEBUG_MEM2(x...) | 86 | #define DEBUG_MEM2(x...) |
87 | #endif | 87 | #endif |
88 | 88 | ||
89 | /* #define DEBUG_SG_CALC */ | 89 | /* #define DEBUG_SG_CALC */ |
90 | #ifdef DEBUG_SG_CALC | 90 | #ifdef DEBUG_SG_CALC |
91 | #define DEBUG_SC(x...) printk(KERN_INFO x) | 91 | #define DEBUG_SC(x...) printk(KERN_INFO x) |
92 | #else | 92 | #else |
93 | #define DEBUG_SC(x...) | 93 | #define DEBUG_SC(x...) |
94 | #endif | 94 | #endif |
95 | 95 | ||
96 | /* #define DEBUG_SE_OBJ */ | 96 | /* #define DEBUG_SE_OBJ */ |
97 | #ifdef DEBUG_SE_OBJ | 97 | #ifdef DEBUG_SE_OBJ |
98 | #define DEBUG_SO(x...) printk(KERN_INFO x) | 98 | #define DEBUG_SO(x...) printk(KERN_INFO x) |
99 | #else | 99 | #else |
100 | #define DEBUG_SO(x...) | 100 | #define DEBUG_SO(x...) |
101 | #endif | 101 | #endif |
102 | 102 | ||
103 | /* #define DEBUG_CMD_VOL */ | 103 | /* #define DEBUG_CMD_VOL */ |
104 | #ifdef DEBUG_CMD_VOL | 104 | #ifdef DEBUG_CMD_VOL |
105 | #define DEBUG_VOL(x...) printk(KERN_INFO x) | 105 | #define DEBUG_VOL(x...) printk(KERN_INFO x) |
106 | #else | 106 | #else |
107 | #define DEBUG_VOL(x...) | 107 | #define DEBUG_VOL(x...) |
108 | #endif | 108 | #endif |
109 | 109 | ||
110 | /* #define DEBUG_CMD_STOP */ | 110 | /* #define DEBUG_CMD_STOP */ |
111 | #ifdef DEBUG_CMD_STOP | 111 | #ifdef DEBUG_CMD_STOP |
112 | #define DEBUG_CS(x...) printk(KERN_INFO x) | 112 | #define DEBUG_CS(x...) printk(KERN_INFO x) |
113 | #else | 113 | #else |
114 | #define DEBUG_CS(x...) | 114 | #define DEBUG_CS(x...) |
115 | #endif | 115 | #endif |
116 | 116 | ||
117 | /* #define DEBUG_PASSTHROUGH */ | 117 | /* #define DEBUG_PASSTHROUGH */ |
118 | #ifdef DEBUG_PASSTHROUGH | 118 | #ifdef DEBUG_PASSTHROUGH |
119 | #define DEBUG_PT(x...) printk(KERN_INFO x) | 119 | #define DEBUG_PT(x...) printk(KERN_INFO x) |
120 | #else | 120 | #else |
121 | #define DEBUG_PT(x...) | 121 | #define DEBUG_PT(x...) |
122 | #endif | 122 | #endif |
123 | 123 | ||
124 | /* #define DEBUG_TASK_STOP */ | 124 | /* #define DEBUG_TASK_STOP */ |
125 | #ifdef DEBUG_TASK_STOP | 125 | #ifdef DEBUG_TASK_STOP |
126 | #define DEBUG_TS(x...) printk(KERN_INFO x) | 126 | #define DEBUG_TS(x...) printk(KERN_INFO x) |
127 | #else | 127 | #else |
128 | #define DEBUG_TS(x...) | 128 | #define DEBUG_TS(x...) |
129 | #endif | 129 | #endif |
130 | 130 | ||
131 | /* #define DEBUG_TRANSPORT_STOP */ | 131 | /* #define DEBUG_TRANSPORT_STOP */ |
132 | #ifdef DEBUG_TRANSPORT_STOP | 132 | #ifdef DEBUG_TRANSPORT_STOP |
133 | #define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x) | 133 | #define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x) |
134 | #else | 134 | #else |
135 | #define DEBUG_TRANSPORT_S(x...) | 135 | #define DEBUG_TRANSPORT_S(x...) |
136 | #endif | 136 | #endif |
137 | 137 | ||
138 | /* #define DEBUG_TASK_FAILURE */ | 138 | /* #define DEBUG_TASK_FAILURE */ |
139 | #ifdef DEBUG_TASK_FAILURE | 139 | #ifdef DEBUG_TASK_FAILURE |
140 | #define DEBUG_TF(x...) printk(KERN_INFO x) | 140 | #define DEBUG_TF(x...) printk(KERN_INFO x) |
141 | #else | 141 | #else |
142 | #define DEBUG_TF(x...) | 142 | #define DEBUG_TF(x...) |
143 | #endif | 143 | #endif |
144 | 144 | ||
145 | /* #define DEBUG_DEV_OFFLINE */ | 145 | /* #define DEBUG_DEV_OFFLINE */ |
146 | #ifdef DEBUG_DEV_OFFLINE | 146 | #ifdef DEBUG_DEV_OFFLINE |
147 | #define DEBUG_DO(x...) printk(KERN_INFO x) | 147 | #define DEBUG_DO(x...) printk(KERN_INFO x) |
148 | #else | 148 | #else |
149 | #define DEBUG_DO(x...) | 149 | #define DEBUG_DO(x...) |
150 | #endif | 150 | #endif |
151 | 151 | ||
152 | /* #define DEBUG_TASK_STATE */ | 152 | /* #define DEBUG_TASK_STATE */ |
153 | #ifdef DEBUG_TASK_STATE | 153 | #ifdef DEBUG_TASK_STATE |
154 | #define DEBUG_TSTATE(x...) printk(KERN_INFO x) | 154 | #define DEBUG_TSTATE(x...) printk(KERN_INFO x) |
155 | #else | 155 | #else |
156 | #define DEBUG_TSTATE(x...) | 156 | #define DEBUG_TSTATE(x...) |
157 | #endif | 157 | #endif |
158 | 158 | ||
159 | /* #define DEBUG_STATUS_THR */ | 159 | /* #define DEBUG_STATUS_THR */ |
160 | #ifdef DEBUG_STATUS_THR | 160 | #ifdef DEBUG_STATUS_THR |
161 | #define DEBUG_ST(x...) printk(KERN_INFO x) | 161 | #define DEBUG_ST(x...) printk(KERN_INFO x) |
162 | #else | 162 | #else |
163 | #define DEBUG_ST(x...) | 163 | #define DEBUG_ST(x...) |
164 | #endif | 164 | #endif |
165 | 165 | ||
166 | /* #define DEBUG_TASK_TIMEOUT */ | 166 | /* #define DEBUG_TASK_TIMEOUT */ |
167 | #ifdef DEBUG_TASK_TIMEOUT | 167 | #ifdef DEBUG_TASK_TIMEOUT |
168 | #define DEBUG_TT(x...) printk(KERN_INFO x) | 168 | #define DEBUG_TT(x...) printk(KERN_INFO x) |
169 | #else | 169 | #else |
170 | #define DEBUG_TT(x...) | 170 | #define DEBUG_TT(x...) |
171 | #endif | 171 | #endif |
172 | 172 | ||
173 | /* #define DEBUG_GENERIC_REQUEST_FAILURE */ | 173 | /* #define DEBUG_GENERIC_REQUEST_FAILURE */ |
174 | #ifdef DEBUG_GENERIC_REQUEST_FAILURE | 174 | #ifdef DEBUG_GENERIC_REQUEST_FAILURE |
175 | #define DEBUG_GRF(x...) printk(KERN_INFO x) | 175 | #define DEBUG_GRF(x...) printk(KERN_INFO x) |
176 | #else | 176 | #else |
177 | #define DEBUG_GRF(x...) | 177 | #define DEBUG_GRF(x...) |
178 | #endif | 178 | #endif |
179 | 179 | ||
180 | /* #define DEBUG_SAM_TASK_ATTRS */ | 180 | /* #define DEBUG_SAM_TASK_ATTRS */ |
181 | #ifdef DEBUG_SAM_TASK_ATTRS | 181 | #ifdef DEBUG_SAM_TASK_ATTRS |
182 | #define DEBUG_STA(x...) printk(KERN_INFO x) | 182 | #define DEBUG_STA(x...) printk(KERN_INFO x) |
183 | #else | 183 | #else |
184 | #define DEBUG_STA(x...) | 184 | #define DEBUG_STA(x...) |
185 | #endif | 185 | #endif |
186 | 186 | ||
187 | struct se_global *se_global; | 187 | struct se_global *se_global; |
188 | 188 | ||
189 | static struct kmem_cache *se_cmd_cache; | 189 | static struct kmem_cache *se_cmd_cache; |
190 | static struct kmem_cache *se_sess_cache; | 190 | static struct kmem_cache *se_sess_cache; |
191 | struct kmem_cache *se_tmr_req_cache; | 191 | struct kmem_cache *se_tmr_req_cache; |
192 | struct kmem_cache *se_ua_cache; | 192 | struct kmem_cache *se_ua_cache; |
193 | struct kmem_cache *se_mem_cache; | 193 | struct kmem_cache *se_mem_cache; |
194 | struct kmem_cache *t10_pr_reg_cache; | 194 | struct kmem_cache *t10_pr_reg_cache; |
195 | struct kmem_cache *t10_alua_lu_gp_cache; | 195 | struct kmem_cache *t10_alua_lu_gp_cache; |
196 | struct kmem_cache *t10_alua_lu_gp_mem_cache; | 196 | struct kmem_cache *t10_alua_lu_gp_mem_cache; |
197 | struct kmem_cache *t10_alua_tg_pt_gp_cache; | 197 | struct kmem_cache *t10_alua_tg_pt_gp_cache; |
198 | struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; | 198 | struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; |
199 | 199 | ||
200 | /* Used for transport_dev_get_map_*() */ | 200 | /* Used for transport_dev_get_map_*() */ |
201 | typedef int (*map_func_t)(struct se_task *, u32); | 201 | typedef int (*map_func_t)(struct se_task *, u32); |
202 | 202 | ||
203 | static int transport_generic_write_pending(struct se_cmd *); | 203 | static int transport_generic_write_pending(struct se_cmd *); |
204 | static int transport_processing_thread(void *); | 204 | static int transport_processing_thread(void *); |
205 | static int __transport_execute_tasks(struct se_device *dev); | 205 | static int __transport_execute_tasks(struct se_device *dev); |
206 | static void transport_complete_task_attr(struct se_cmd *cmd); | 206 | static void transport_complete_task_attr(struct se_cmd *cmd); |
207 | static void transport_direct_request_timeout(struct se_cmd *cmd); | 207 | static void transport_direct_request_timeout(struct se_cmd *cmd); |
208 | static void transport_free_dev_tasks(struct se_cmd *cmd); | 208 | static void transport_free_dev_tasks(struct se_cmd *cmd); |
209 | static u32 transport_generic_get_cdb_count(struct se_cmd *cmd, | 209 | static u32 transport_generic_get_cdb_count(struct se_cmd *cmd, |
210 | unsigned long long starting_lba, u32 sectors, | 210 | unsigned long long starting_lba, u32 sectors, |
211 | enum dma_data_direction data_direction, | 211 | enum dma_data_direction data_direction, |
212 | struct list_head *mem_list, int set_counts); | 212 | struct list_head *mem_list, int set_counts); |
213 | static int transport_generic_get_mem(struct se_cmd *cmd, u32 length, | 213 | static int transport_generic_get_mem(struct se_cmd *cmd, u32 length, |
214 | u32 dma_size); | 214 | u32 dma_size); |
215 | static int transport_generic_remove(struct se_cmd *cmd, | 215 | static int transport_generic_remove(struct se_cmd *cmd, |
216 | int release_to_pool, int session_reinstatement); | 216 | int release_to_pool, int session_reinstatement); |
217 | static int transport_get_sectors(struct se_cmd *cmd); | 217 | static int transport_get_sectors(struct se_cmd *cmd); |
218 | static struct list_head *transport_init_se_mem_list(void); | 218 | static struct list_head *transport_init_se_mem_list(void); |
219 | static int transport_map_sg_to_mem(struct se_cmd *cmd, | 219 | static int transport_map_sg_to_mem(struct se_cmd *cmd, |
220 | struct list_head *se_mem_list, void *in_mem, | 220 | struct list_head *se_mem_list, void *in_mem, |
221 | u32 *se_mem_cnt); | 221 | u32 *se_mem_cnt); |
222 | static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd, | 222 | static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd, |
223 | unsigned char *dst, struct list_head *se_mem_list); | 223 | unsigned char *dst, struct list_head *se_mem_list); |
224 | static void transport_release_fe_cmd(struct se_cmd *cmd); | 224 | static void transport_release_fe_cmd(struct se_cmd *cmd); |
225 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | 225 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, |
226 | struct se_queue_obj *qobj); | 226 | struct se_queue_obj *qobj); |
227 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); | 227 | static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); |
228 | static void transport_stop_all_task_timers(struct se_cmd *cmd); | 228 | static void transport_stop_all_task_timers(struct se_cmd *cmd); |
229 | 229 | ||
230 | int transport_emulate_control_cdb(struct se_task *task); | ||
231 | |||
232 | int init_se_global(void) | 230 | int init_se_global(void) |
233 | { | 231 | { |
234 | struct se_global *global; | 232 | struct se_global *global; |
235 | 233 | ||
236 | global = kzalloc(sizeof(struct se_global), GFP_KERNEL); | 234 | global = kzalloc(sizeof(struct se_global), GFP_KERNEL); |
237 | if (!(global)) { | 235 | if (!(global)) { |
238 | printk(KERN_ERR "Unable to allocate memory for struct se_global\n"); | 236 | printk(KERN_ERR "Unable to allocate memory for struct se_global\n"); |
239 | return -1; | 237 | return -1; |
240 | } | 238 | } |
241 | 239 | ||
242 | INIT_LIST_HEAD(&global->g_lu_gps_list); | 240 | INIT_LIST_HEAD(&global->g_lu_gps_list); |
243 | INIT_LIST_HEAD(&global->g_se_tpg_list); | 241 | INIT_LIST_HEAD(&global->g_se_tpg_list); |
244 | INIT_LIST_HEAD(&global->g_hba_list); | 242 | INIT_LIST_HEAD(&global->g_hba_list); |
245 | INIT_LIST_HEAD(&global->g_se_dev_list); | 243 | INIT_LIST_HEAD(&global->g_se_dev_list); |
246 | spin_lock_init(&global->g_device_lock); | 244 | spin_lock_init(&global->g_device_lock); |
247 | spin_lock_init(&global->hba_lock); | 245 | spin_lock_init(&global->hba_lock); |
248 | spin_lock_init(&global->se_tpg_lock); | 246 | spin_lock_init(&global->se_tpg_lock); |
249 | spin_lock_init(&global->lu_gps_lock); | 247 | spin_lock_init(&global->lu_gps_lock); |
250 | spin_lock_init(&global->plugin_class_lock); | 248 | spin_lock_init(&global->plugin_class_lock); |
251 | 249 | ||
252 | se_cmd_cache = kmem_cache_create("se_cmd_cache", | 250 | se_cmd_cache = kmem_cache_create("se_cmd_cache", |
253 | sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); | 251 | sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); |
254 | if (!(se_cmd_cache)) { | 252 | if (!(se_cmd_cache)) { |
255 | printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n"); | 253 | printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n"); |
256 | goto out; | 254 | goto out; |
257 | } | 255 | } |
258 | se_tmr_req_cache = kmem_cache_create("se_tmr_cache", | 256 | se_tmr_req_cache = kmem_cache_create("se_tmr_cache", |
259 | sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), | 257 | sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), |
260 | 0, NULL); | 258 | 0, NULL); |
261 | if (!(se_tmr_req_cache)) { | 259 | if (!(se_tmr_req_cache)) { |
262 | printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req" | 260 | printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req" |
263 | " failed\n"); | 261 | " failed\n"); |
264 | goto out; | 262 | goto out; |
265 | } | 263 | } |
266 | se_sess_cache = kmem_cache_create("se_sess_cache", | 264 | se_sess_cache = kmem_cache_create("se_sess_cache", |
267 | sizeof(struct se_session), __alignof__(struct se_session), | 265 | sizeof(struct se_session), __alignof__(struct se_session), |
268 | 0, NULL); | 266 | 0, NULL); |
269 | if (!(se_sess_cache)) { | 267 | if (!(se_sess_cache)) { |
270 | printk(KERN_ERR "kmem_cache_create() for struct se_session" | 268 | printk(KERN_ERR "kmem_cache_create() for struct se_session" |
271 | " failed\n"); | 269 | " failed\n"); |
272 | goto out; | 270 | goto out; |
273 | } | 271 | } |
274 | se_ua_cache = kmem_cache_create("se_ua_cache", | 272 | se_ua_cache = kmem_cache_create("se_ua_cache", |
275 | sizeof(struct se_ua), __alignof__(struct se_ua), | 273 | sizeof(struct se_ua), __alignof__(struct se_ua), |
276 | 0, NULL); | 274 | 0, NULL); |
277 | if (!(se_ua_cache)) { | 275 | if (!(se_ua_cache)) { |
278 | printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n"); | 276 | printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n"); |
279 | goto out; | 277 | goto out; |
280 | } | 278 | } |
281 | se_mem_cache = kmem_cache_create("se_mem_cache", | 279 | se_mem_cache = kmem_cache_create("se_mem_cache", |
282 | sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL); | 280 | sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL); |
283 | if (!(se_mem_cache)) { | 281 | if (!(se_mem_cache)) { |
284 | printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n"); | 282 | printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n"); |
285 | goto out; | 283 | goto out; |
286 | } | 284 | } |
287 | t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", | 285 | t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", |
288 | sizeof(struct t10_pr_registration), | 286 | sizeof(struct t10_pr_registration), |
289 | __alignof__(struct t10_pr_registration), 0, NULL); | 287 | __alignof__(struct t10_pr_registration), 0, NULL); |
290 | if (!(t10_pr_reg_cache)) { | 288 | if (!(t10_pr_reg_cache)) { |
291 | printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration" | 289 | printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration" |
292 | " failed\n"); | 290 | " failed\n"); |
293 | goto out; | 291 | goto out; |
294 | } | 292 | } |
295 | t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", | 293 | t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", |
296 | sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), | 294 | sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), |
297 | 0, NULL); | 295 | 0, NULL); |
298 | if (!(t10_alua_lu_gp_cache)) { | 296 | if (!(t10_alua_lu_gp_cache)) { |
299 | printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache" | 297 | printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache" |
300 | " failed\n"); | 298 | " failed\n"); |
301 | goto out; | 299 | goto out; |
302 | } | 300 | } |
303 | t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", | 301 | t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", |
304 | sizeof(struct t10_alua_lu_gp_member), | 302 | sizeof(struct t10_alua_lu_gp_member), |
305 | __alignof__(struct t10_alua_lu_gp_member), 0, NULL); | 303 | __alignof__(struct t10_alua_lu_gp_member), 0, NULL); |
306 | if (!(t10_alua_lu_gp_mem_cache)) { | 304 | if (!(t10_alua_lu_gp_mem_cache)) { |
307 | printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_" | 305 | printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_" |
308 | "cache failed\n"); | 306 | "cache failed\n"); |
309 | goto out; | 307 | goto out; |
310 | } | 308 | } |
311 | t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", | 309 | t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", |
312 | sizeof(struct t10_alua_tg_pt_gp), | 310 | sizeof(struct t10_alua_tg_pt_gp), |
313 | __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); | 311 | __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); |
314 | if (!(t10_alua_tg_pt_gp_cache)) { | 312 | if (!(t10_alua_tg_pt_gp_cache)) { |
315 | printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" | 313 | printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" |
316 | "cache failed\n"); | 314 | "cache failed\n"); |
317 | goto out; | 315 | goto out; |
318 | } | 316 | } |
319 | t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( | 317 | t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( |
320 | "t10_alua_tg_pt_gp_mem_cache", | 318 | "t10_alua_tg_pt_gp_mem_cache", |
321 | sizeof(struct t10_alua_tg_pt_gp_member), | 319 | sizeof(struct t10_alua_tg_pt_gp_member), |
322 | __alignof__(struct t10_alua_tg_pt_gp_member), | 320 | __alignof__(struct t10_alua_tg_pt_gp_member), |
323 | 0, NULL); | 321 | 0, NULL); |
324 | if (!(t10_alua_tg_pt_gp_mem_cache)) { | 322 | if (!(t10_alua_tg_pt_gp_mem_cache)) { |
325 | printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" | 323 | printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" |
326 | "mem_t failed\n"); | 324 | "mem_t failed\n"); |
327 | goto out; | 325 | goto out; |
328 | } | 326 | } |
329 | 327 | ||
330 | se_global = global; | 328 | se_global = global; |
331 | 329 | ||
332 | return 0; | 330 | return 0; |
333 | out: | 331 | out: |
334 | if (se_cmd_cache) | 332 | if (se_cmd_cache) |
335 | kmem_cache_destroy(se_cmd_cache); | 333 | kmem_cache_destroy(se_cmd_cache); |
336 | if (se_tmr_req_cache) | 334 | if (se_tmr_req_cache) |
337 | kmem_cache_destroy(se_tmr_req_cache); | 335 | kmem_cache_destroy(se_tmr_req_cache); |
338 | if (se_sess_cache) | 336 | if (se_sess_cache) |
339 | kmem_cache_destroy(se_sess_cache); | 337 | kmem_cache_destroy(se_sess_cache); |
340 | if (se_ua_cache) | 338 | if (se_ua_cache) |
341 | kmem_cache_destroy(se_ua_cache); | 339 | kmem_cache_destroy(se_ua_cache); |
342 | if (se_mem_cache) | 340 | if (se_mem_cache) |
343 | kmem_cache_destroy(se_mem_cache); | 341 | kmem_cache_destroy(se_mem_cache); |
344 | if (t10_pr_reg_cache) | 342 | if (t10_pr_reg_cache) |
345 | kmem_cache_destroy(t10_pr_reg_cache); | 343 | kmem_cache_destroy(t10_pr_reg_cache); |
346 | if (t10_alua_lu_gp_cache) | 344 | if (t10_alua_lu_gp_cache) |
347 | kmem_cache_destroy(t10_alua_lu_gp_cache); | 345 | kmem_cache_destroy(t10_alua_lu_gp_cache); |
348 | if (t10_alua_lu_gp_mem_cache) | 346 | if (t10_alua_lu_gp_mem_cache) |
349 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | 347 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); |
350 | if (t10_alua_tg_pt_gp_cache) | 348 | if (t10_alua_tg_pt_gp_cache) |
351 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | 349 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); |
352 | if (t10_alua_tg_pt_gp_mem_cache) | 350 | if (t10_alua_tg_pt_gp_mem_cache) |
353 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | 351 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); |
354 | kfree(global); | 352 | kfree(global); |
355 | return -1; | 353 | return -1; |
356 | } | 354 | } |
357 | 355 | ||
358 | void release_se_global(void) | 356 | void release_se_global(void) |
359 | { | 357 | { |
360 | struct se_global *global; | 358 | struct se_global *global; |
361 | 359 | ||
362 | global = se_global; | 360 | global = se_global; |
363 | if (!(global)) | 361 | if (!(global)) |
364 | return; | 362 | return; |
365 | 363 | ||
366 | kmem_cache_destroy(se_cmd_cache); | 364 | kmem_cache_destroy(se_cmd_cache); |
367 | kmem_cache_destroy(se_tmr_req_cache); | 365 | kmem_cache_destroy(se_tmr_req_cache); |
368 | kmem_cache_destroy(se_sess_cache); | 366 | kmem_cache_destroy(se_sess_cache); |
369 | kmem_cache_destroy(se_ua_cache); | 367 | kmem_cache_destroy(se_ua_cache); |
370 | kmem_cache_destroy(se_mem_cache); | 368 | kmem_cache_destroy(se_mem_cache); |
371 | kmem_cache_destroy(t10_pr_reg_cache); | 369 | kmem_cache_destroy(t10_pr_reg_cache); |
372 | kmem_cache_destroy(t10_alua_lu_gp_cache); | 370 | kmem_cache_destroy(t10_alua_lu_gp_cache); |
373 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); | 371 | kmem_cache_destroy(t10_alua_lu_gp_mem_cache); |
374 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); | 372 | kmem_cache_destroy(t10_alua_tg_pt_gp_cache); |
375 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); | 373 | kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); |
376 | kfree(global); | 374 | kfree(global); |
377 | 375 | ||
378 | se_global = NULL; | 376 | se_global = NULL; |
379 | } | 377 | } |
380 | 378 | ||
381 | /* SCSI statistics table index */ | 379 | /* SCSI statistics table index */ |
382 | static struct scsi_index_table scsi_index_table; | 380 | static struct scsi_index_table scsi_index_table; |
383 | 381 | ||
384 | /* | 382 | /* |
385 | * Initialize the index table for allocating unique row indexes to various mib | 383 | * Initialize the index table for allocating unique row indexes to various mib |
386 | * tables. | 384 | * tables. |
387 | */ | 385 | */ |
388 | void init_scsi_index_table(void) | 386 | void init_scsi_index_table(void) |
389 | { | 387 | { |
390 | memset(&scsi_index_table, 0, sizeof(struct scsi_index_table)); | 388 | memset(&scsi_index_table, 0, sizeof(struct scsi_index_table)); |
391 | spin_lock_init(&scsi_index_table.lock); | 389 | spin_lock_init(&scsi_index_table.lock); |
392 | } | 390 | } |
393 | 391 | ||
394 | /* | 392 | /* |
395 | * Allocate a new row index for the entry type specified | 393 | * Allocate a new row index for the entry type specified |
396 | */ | 394 | */ |
397 | u32 scsi_get_new_index(scsi_index_t type) | 395 | u32 scsi_get_new_index(scsi_index_t type) |
398 | { | 396 | { |
399 | u32 new_index; | 397 | u32 new_index; |
400 | 398 | ||
401 | if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) { | 399 | if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) { |
402 | printk(KERN_ERR "Invalid index type %d\n", type); | 400 | printk(KERN_ERR "Invalid index type %d\n", type); |
403 | return -EINVAL; | 401 | return -EINVAL; |
404 | } | 402 | } |
405 | 403 | ||
406 | spin_lock(&scsi_index_table.lock); | 404 | spin_lock(&scsi_index_table.lock); |
407 | new_index = ++scsi_index_table.scsi_mib_index[type]; | 405 | new_index = ++scsi_index_table.scsi_mib_index[type]; |
408 | if (new_index == 0) | 406 | if (new_index == 0) |
409 | new_index = ++scsi_index_table.scsi_mib_index[type]; | 407 | new_index = ++scsi_index_table.scsi_mib_index[type]; |
410 | spin_unlock(&scsi_index_table.lock); | 408 | spin_unlock(&scsi_index_table.lock); |
411 | 409 | ||
412 | return new_index; | 410 | return new_index; |
413 | } | 411 | } |
414 | 412 | ||
415 | void transport_init_queue_obj(struct se_queue_obj *qobj) | 413 | void transport_init_queue_obj(struct se_queue_obj *qobj) |
416 | { | 414 | { |
417 | atomic_set(&qobj->queue_cnt, 0); | 415 | atomic_set(&qobj->queue_cnt, 0); |
418 | INIT_LIST_HEAD(&qobj->qobj_list); | 416 | INIT_LIST_HEAD(&qobj->qobj_list); |
419 | init_waitqueue_head(&qobj->thread_wq); | 417 | init_waitqueue_head(&qobj->thread_wq); |
420 | spin_lock_init(&qobj->cmd_queue_lock); | 418 | spin_lock_init(&qobj->cmd_queue_lock); |
421 | } | 419 | } |
422 | EXPORT_SYMBOL(transport_init_queue_obj); | 420 | EXPORT_SYMBOL(transport_init_queue_obj); |
423 | 421 | ||
424 | static int transport_subsystem_reqmods(void) | 422 | static int transport_subsystem_reqmods(void) |
425 | { | 423 | { |
426 | int ret; | 424 | int ret; |
427 | 425 | ||
428 | ret = request_module("target_core_iblock"); | 426 | ret = request_module("target_core_iblock"); |
429 | if (ret != 0) | 427 | if (ret != 0) |
430 | printk(KERN_ERR "Unable to load target_core_iblock\n"); | 428 | printk(KERN_ERR "Unable to load target_core_iblock\n"); |
431 | 429 | ||
432 | ret = request_module("target_core_file"); | 430 | ret = request_module("target_core_file"); |
433 | if (ret != 0) | 431 | if (ret != 0) |
434 | printk(KERN_ERR "Unable to load target_core_file\n"); | 432 | printk(KERN_ERR "Unable to load target_core_file\n"); |
435 | 433 | ||
436 | ret = request_module("target_core_pscsi"); | 434 | ret = request_module("target_core_pscsi"); |
437 | if (ret != 0) | 435 | if (ret != 0) |
438 | printk(KERN_ERR "Unable to load target_core_pscsi\n"); | 436 | printk(KERN_ERR "Unable to load target_core_pscsi\n"); |
439 | 437 | ||
440 | ret = request_module("target_core_stgt"); | 438 | ret = request_module("target_core_stgt"); |
441 | if (ret != 0) | 439 | if (ret != 0) |
442 | printk(KERN_ERR "Unable to load target_core_stgt\n"); | 440 | printk(KERN_ERR "Unable to load target_core_stgt\n"); |
443 | 441 | ||
444 | return 0; | 442 | return 0; |
445 | } | 443 | } |
446 | 444 | ||
447 | int transport_subsystem_check_init(void) | 445 | int transport_subsystem_check_init(void) |
448 | { | 446 | { |
449 | if (se_global->g_sub_api_initialized) | 447 | if (se_global->g_sub_api_initialized) |
450 | return 0; | 448 | return 0; |
451 | /* | 449 | /* |
452 | * Request the loading of known TCM subsystem plugins.. | 450 | * Request the loading of known TCM subsystem plugins.. |
453 | */ | 451 | */ |
454 | if (transport_subsystem_reqmods() < 0) | 452 | if (transport_subsystem_reqmods() < 0) |
455 | return -1; | 453 | return -1; |
456 | 454 | ||
457 | se_global->g_sub_api_initialized = 1; | 455 | se_global->g_sub_api_initialized = 1; |
458 | return 0; | 456 | return 0; |
459 | } | 457 | } |
460 | 458 | ||
461 | struct se_session *transport_init_session(void) | 459 | struct se_session *transport_init_session(void) |
462 | { | 460 | { |
463 | struct se_session *se_sess; | 461 | struct se_session *se_sess; |
464 | 462 | ||
465 | se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); | 463 | se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); |
466 | if (!(se_sess)) { | 464 | if (!(se_sess)) { |
467 | printk(KERN_ERR "Unable to allocate struct se_session from" | 465 | printk(KERN_ERR "Unable to allocate struct se_session from" |
468 | " se_sess_cache\n"); | 466 | " se_sess_cache\n"); |
469 | return ERR_PTR(-ENOMEM); | 467 | return ERR_PTR(-ENOMEM); |
470 | } | 468 | } |
471 | INIT_LIST_HEAD(&se_sess->sess_list); | 469 | INIT_LIST_HEAD(&se_sess->sess_list); |
472 | INIT_LIST_HEAD(&se_sess->sess_acl_list); | 470 | INIT_LIST_HEAD(&se_sess->sess_acl_list); |
473 | 471 | ||
474 | return se_sess; | 472 | return se_sess; |
475 | } | 473 | } |
476 | EXPORT_SYMBOL(transport_init_session); | 474 | EXPORT_SYMBOL(transport_init_session); |
477 | 475 | ||
478 | /* | 476 | /* |
479 | * Called with spin_lock_bh(&struct se_portal_group->session_lock called. | 477 | * Called with spin_lock_bh(&struct se_portal_group->session_lock called. |
480 | */ | 478 | */ |
481 | void __transport_register_session( | 479 | void __transport_register_session( |
482 | struct se_portal_group *se_tpg, | 480 | struct se_portal_group *se_tpg, |
483 | struct se_node_acl *se_nacl, | 481 | struct se_node_acl *se_nacl, |
484 | struct se_session *se_sess, | 482 | struct se_session *se_sess, |
485 | void *fabric_sess_ptr) | 483 | void *fabric_sess_ptr) |
486 | { | 484 | { |
487 | unsigned char buf[PR_REG_ISID_LEN]; | 485 | unsigned char buf[PR_REG_ISID_LEN]; |
488 | 486 | ||
489 | se_sess->se_tpg = se_tpg; | 487 | se_sess->se_tpg = se_tpg; |
490 | se_sess->fabric_sess_ptr = fabric_sess_ptr; | 488 | se_sess->fabric_sess_ptr = fabric_sess_ptr; |
491 | /* | 489 | /* |
492 | * Used by struct se_node_acl's under ConfigFS to locate active se_session-t | 490 | * Used by struct se_node_acl's under ConfigFS to locate active se_session-t |
493 | * | 491 | * |
494 | * Only set for struct se_session's that will actually be moving I/O. | 492 | * Only set for struct se_session's that will actually be moving I/O. |
495 | * eg: *NOT* discovery sessions. | 493 | * eg: *NOT* discovery sessions. |
496 | */ | 494 | */ |
497 | if (se_nacl) { | 495 | if (se_nacl) { |
498 | /* | 496 | /* |
499 | * If the fabric module supports an ISID based TransportID, | 497 | * If the fabric module supports an ISID based TransportID, |
500 | * save this value in binary from the fabric I_T Nexus now. | 498 | * save this value in binary from the fabric I_T Nexus now. |
501 | */ | 499 | */ |
502 | if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) { | 500 | if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) { |
503 | memset(&buf[0], 0, PR_REG_ISID_LEN); | 501 | memset(&buf[0], 0, PR_REG_ISID_LEN); |
504 | TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, | 502 | TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, |
505 | &buf[0], PR_REG_ISID_LEN); | 503 | &buf[0], PR_REG_ISID_LEN); |
506 | se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); | 504 | se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); |
507 | } | 505 | } |
508 | spin_lock_irq(&se_nacl->nacl_sess_lock); | 506 | spin_lock_irq(&se_nacl->nacl_sess_lock); |
509 | /* | 507 | /* |
510 | * The se_nacl->nacl_sess pointer will be set to the | 508 | * The se_nacl->nacl_sess pointer will be set to the |
511 | * last active I_T Nexus for each struct se_node_acl. | 509 | * last active I_T Nexus for each struct se_node_acl. |
512 | */ | 510 | */ |
513 | se_nacl->nacl_sess = se_sess; | 511 | se_nacl->nacl_sess = se_sess; |
514 | 512 | ||
515 | list_add_tail(&se_sess->sess_acl_list, | 513 | list_add_tail(&se_sess->sess_acl_list, |
516 | &se_nacl->acl_sess_list); | 514 | &se_nacl->acl_sess_list); |
517 | spin_unlock_irq(&se_nacl->nacl_sess_lock); | 515 | spin_unlock_irq(&se_nacl->nacl_sess_lock); |
518 | } | 516 | } |
519 | list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); | 517 | list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); |
520 | 518 | ||
521 | printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", | 519 | printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", |
522 | TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr); | 520 | TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr); |
523 | } | 521 | } |
524 | EXPORT_SYMBOL(__transport_register_session); | 522 | EXPORT_SYMBOL(__transport_register_session); |
525 | 523 | ||
526 | void transport_register_session( | 524 | void transport_register_session( |
527 | struct se_portal_group *se_tpg, | 525 | struct se_portal_group *se_tpg, |
528 | struct se_node_acl *se_nacl, | 526 | struct se_node_acl *se_nacl, |
529 | struct se_session *se_sess, | 527 | struct se_session *se_sess, |
530 | void *fabric_sess_ptr) | 528 | void *fabric_sess_ptr) |
531 | { | 529 | { |
532 | spin_lock_bh(&se_tpg->session_lock); | 530 | spin_lock_bh(&se_tpg->session_lock); |
533 | __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); | 531 | __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); |
534 | spin_unlock_bh(&se_tpg->session_lock); | 532 | spin_unlock_bh(&se_tpg->session_lock); |
535 | } | 533 | } |
536 | EXPORT_SYMBOL(transport_register_session); | 534 | EXPORT_SYMBOL(transport_register_session); |
537 | 535 | ||
538 | void transport_deregister_session_configfs(struct se_session *se_sess) | 536 | void transport_deregister_session_configfs(struct se_session *se_sess) |
539 | { | 537 | { |
540 | struct se_node_acl *se_nacl; | 538 | struct se_node_acl *se_nacl; |
541 | 539 | ||
542 | /* | 540 | /* |
543 | * Used by struct se_node_acl's under ConfigFS to locate active struct se_session | 541 | * Used by struct se_node_acl's under ConfigFS to locate active struct se_session |
544 | */ | 542 | */ |
545 | se_nacl = se_sess->se_node_acl; | 543 | se_nacl = se_sess->se_node_acl; |
546 | if ((se_nacl)) { | 544 | if ((se_nacl)) { |
547 | spin_lock_irq(&se_nacl->nacl_sess_lock); | 545 | spin_lock_irq(&se_nacl->nacl_sess_lock); |
548 | list_del(&se_sess->sess_acl_list); | 546 | list_del(&se_sess->sess_acl_list); |
549 | /* | 547 | /* |
550 | * If the session list is empty, then clear the pointer. | 548 | * If the session list is empty, then clear the pointer. |
551 | * Otherwise, set the struct se_session pointer from the tail | 549 | * Otherwise, set the struct se_session pointer from the tail |
552 | * element of the per struct se_node_acl active session list. | 550 | * element of the per struct se_node_acl active session list. |
553 | */ | 551 | */ |
554 | if (list_empty(&se_nacl->acl_sess_list)) | 552 | if (list_empty(&se_nacl->acl_sess_list)) |
555 | se_nacl->nacl_sess = NULL; | 553 | se_nacl->nacl_sess = NULL; |
556 | else { | 554 | else { |
557 | se_nacl->nacl_sess = container_of( | 555 | se_nacl->nacl_sess = container_of( |
558 | se_nacl->acl_sess_list.prev, | 556 | se_nacl->acl_sess_list.prev, |
559 | struct se_session, sess_acl_list); | 557 | struct se_session, sess_acl_list); |
560 | } | 558 | } |
561 | spin_unlock_irq(&se_nacl->nacl_sess_lock); | 559 | spin_unlock_irq(&se_nacl->nacl_sess_lock); |
562 | } | 560 | } |
563 | } | 561 | } |
564 | EXPORT_SYMBOL(transport_deregister_session_configfs); | 562 | EXPORT_SYMBOL(transport_deregister_session_configfs); |
565 | 563 | ||
566 | void transport_free_session(struct se_session *se_sess) | 564 | void transport_free_session(struct se_session *se_sess) |
567 | { | 565 | { |
568 | kmem_cache_free(se_sess_cache, se_sess); | 566 | kmem_cache_free(se_sess_cache, se_sess); |
569 | } | 567 | } |
570 | EXPORT_SYMBOL(transport_free_session); | 568 | EXPORT_SYMBOL(transport_free_session); |
571 | 569 | ||
572 | void transport_deregister_session(struct se_session *se_sess) | 570 | void transport_deregister_session(struct se_session *se_sess) |
573 | { | 571 | { |
574 | struct se_portal_group *se_tpg = se_sess->se_tpg; | 572 | struct se_portal_group *se_tpg = se_sess->se_tpg; |
575 | struct se_node_acl *se_nacl; | 573 | struct se_node_acl *se_nacl; |
576 | 574 | ||
577 | if (!(se_tpg)) { | 575 | if (!(se_tpg)) { |
578 | transport_free_session(se_sess); | 576 | transport_free_session(se_sess); |
579 | return; | 577 | return; |
580 | } | 578 | } |
581 | 579 | ||
582 | spin_lock_bh(&se_tpg->session_lock); | 580 | spin_lock_bh(&se_tpg->session_lock); |
583 | list_del(&se_sess->sess_list); | 581 | list_del(&se_sess->sess_list); |
584 | se_sess->se_tpg = NULL; | 582 | se_sess->se_tpg = NULL; |
585 | se_sess->fabric_sess_ptr = NULL; | 583 | se_sess->fabric_sess_ptr = NULL; |
586 | spin_unlock_bh(&se_tpg->session_lock); | 584 | spin_unlock_bh(&se_tpg->session_lock); |
587 | 585 | ||
588 | /* | 586 | /* |
589 | * Determine if we need to do extra work for this initiator node's | 587 | * Determine if we need to do extra work for this initiator node's |
590 | * struct se_node_acl if it had been previously dynamically generated. | 588 | * struct se_node_acl if it had been previously dynamically generated. |
591 | */ | 589 | */ |
592 | se_nacl = se_sess->se_node_acl; | 590 | se_nacl = se_sess->se_node_acl; |
593 | if ((se_nacl)) { | 591 | if ((se_nacl)) { |
594 | spin_lock_bh(&se_tpg->acl_node_lock); | 592 | spin_lock_bh(&se_tpg->acl_node_lock); |
595 | if (se_nacl->dynamic_node_acl) { | 593 | if (se_nacl->dynamic_node_acl) { |
596 | if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache( | 594 | if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache( |
597 | se_tpg))) { | 595 | se_tpg))) { |
598 | list_del(&se_nacl->acl_list); | 596 | list_del(&se_nacl->acl_list); |
599 | se_tpg->num_node_acls--; | 597 | se_tpg->num_node_acls--; |
600 | spin_unlock_bh(&se_tpg->acl_node_lock); | 598 | spin_unlock_bh(&se_tpg->acl_node_lock); |
601 | 599 | ||
602 | core_tpg_wait_for_nacl_pr_ref(se_nacl); | 600 | core_tpg_wait_for_nacl_pr_ref(se_nacl); |
603 | core_free_device_list_for_node(se_nacl, se_tpg); | 601 | core_free_device_list_for_node(se_nacl, se_tpg); |
604 | TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, | 602 | TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, |
605 | se_nacl); | 603 | se_nacl); |
606 | spin_lock_bh(&se_tpg->acl_node_lock); | 604 | spin_lock_bh(&se_tpg->acl_node_lock); |
607 | } | 605 | } |
608 | } | 606 | } |
609 | spin_unlock_bh(&se_tpg->acl_node_lock); | 607 | spin_unlock_bh(&se_tpg->acl_node_lock); |
610 | } | 608 | } |
611 | 609 | ||
612 | transport_free_session(se_sess); | 610 | transport_free_session(se_sess); |
613 | 611 | ||
614 | printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n", | 612 | printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n", |
615 | TPG_TFO(se_tpg)->get_fabric_name()); | 613 | TPG_TFO(se_tpg)->get_fabric_name()); |
616 | } | 614 | } |
617 | EXPORT_SYMBOL(transport_deregister_session); | 615 | EXPORT_SYMBOL(transport_deregister_session); |
618 | 616 | ||
619 | /* | 617 | /* |
620 | * Called with T_TASK(cmd)->t_state_lock held. | 618 | * Called with T_TASK(cmd)->t_state_lock held. |
621 | */ | 619 | */ |
622 | static void transport_all_task_dev_remove_state(struct se_cmd *cmd) | 620 | static void transport_all_task_dev_remove_state(struct se_cmd *cmd) |
623 | { | 621 | { |
624 | struct se_device *dev; | 622 | struct se_device *dev; |
625 | struct se_task *task; | 623 | struct se_task *task; |
626 | unsigned long flags; | 624 | unsigned long flags; |
627 | 625 | ||
628 | if (!T_TASK(cmd)) | 626 | if (!T_TASK(cmd)) |
629 | return; | 627 | return; |
630 | 628 | ||
631 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | 629 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { |
632 | dev = task->se_dev; | 630 | dev = task->se_dev; |
633 | if (!(dev)) | 631 | if (!(dev)) |
634 | continue; | 632 | continue; |
635 | 633 | ||
636 | if (atomic_read(&task->task_active)) | 634 | if (atomic_read(&task->task_active)) |
637 | continue; | 635 | continue; |
638 | 636 | ||
639 | if (!(atomic_read(&task->task_state_active))) | 637 | if (!(atomic_read(&task->task_state_active))) |
640 | continue; | 638 | continue; |
641 | 639 | ||
642 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 640 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
643 | list_del(&task->t_state_list); | 641 | list_del(&task->t_state_list); |
644 | DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n", | 642 | DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n", |
645 | CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task); | 643 | CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task); |
646 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 644 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
647 | 645 | ||
648 | atomic_set(&task->task_state_active, 0); | 646 | atomic_set(&task->task_state_active, 0); |
649 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left); | 647 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left); |
650 | } | 648 | } |
651 | } | 649 | } |
652 | 650 | ||
653 | /* transport_cmd_check_stop(): | 651 | /* transport_cmd_check_stop(): |
654 | * | 652 | * |
655 | * 'transport_off = 1' determines if t_transport_active should be cleared. | 653 | * 'transport_off = 1' determines if t_transport_active should be cleared. |
656 | * 'transport_off = 2' determines if task_dev_state should be removed. | 654 | * 'transport_off = 2' determines if task_dev_state should be removed. |
657 | * | 655 | * |
658 | * A non-zero u8 t_state sets cmd->t_state. | 656 | * A non-zero u8 t_state sets cmd->t_state. |
659 | * Returns 1 when command is stopped, else 0. | 657 | * Returns 1 when command is stopped, else 0. |
660 | */ | 658 | */ |
661 | static int transport_cmd_check_stop( | 659 | static int transport_cmd_check_stop( |
662 | struct se_cmd *cmd, | 660 | struct se_cmd *cmd, |
663 | int transport_off, | 661 | int transport_off, |
664 | u8 t_state) | 662 | u8 t_state) |
665 | { | 663 | { |
666 | unsigned long flags; | 664 | unsigned long flags; |
667 | 665 | ||
668 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 666 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
669 | /* | 667 | /* |
670 | * Determine if IOCTL context caller in requesting the stopping of this | 668 | * Determine if IOCTL context caller in requesting the stopping of this |
671 | * command for LUN shutdown purposes. | 669 | * command for LUN shutdown purposes. |
672 | */ | 670 | */ |
673 | if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) { | 671 | if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) { |
674 | DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)" | 672 | DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)" |
675 | " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, | 673 | " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, |
676 | CMD_TFO(cmd)->get_task_tag(cmd)); | 674 | CMD_TFO(cmd)->get_task_tag(cmd)); |
677 | 675 | ||
678 | cmd->deferred_t_state = cmd->t_state; | 676 | cmd->deferred_t_state = cmd->t_state; |
679 | cmd->t_state = TRANSPORT_DEFERRED_CMD; | 677 | cmd->t_state = TRANSPORT_DEFERRED_CMD; |
680 | atomic_set(&T_TASK(cmd)->t_transport_active, 0); | 678 | atomic_set(&T_TASK(cmd)->t_transport_active, 0); |
681 | if (transport_off == 2) | 679 | if (transport_off == 2) |
682 | transport_all_task_dev_remove_state(cmd); | 680 | transport_all_task_dev_remove_state(cmd); |
683 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 681 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
684 | 682 | ||
685 | complete(&T_TASK(cmd)->transport_lun_stop_comp); | 683 | complete(&T_TASK(cmd)->transport_lun_stop_comp); |
686 | return 1; | 684 | return 1; |
687 | } | 685 | } |
688 | /* | 686 | /* |
689 | * Determine if frontend context caller is requesting the stopping of | 687 | * Determine if frontend context caller is requesting the stopping of |
690 | * this command for frontend excpections. | 688 | * this command for frontend excpections. |
691 | */ | 689 | */ |
692 | if (atomic_read(&T_TASK(cmd)->t_transport_stop)) { | 690 | if (atomic_read(&T_TASK(cmd)->t_transport_stop)) { |
693 | DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) ==" | 691 | DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) ==" |
694 | " TRUE for ITT: 0x%08x\n", __func__, __LINE__, | 692 | " TRUE for ITT: 0x%08x\n", __func__, __LINE__, |
695 | CMD_TFO(cmd)->get_task_tag(cmd)); | 693 | CMD_TFO(cmd)->get_task_tag(cmd)); |
696 | 694 | ||
697 | cmd->deferred_t_state = cmd->t_state; | 695 | cmd->deferred_t_state = cmd->t_state; |
698 | cmd->t_state = TRANSPORT_DEFERRED_CMD; | 696 | cmd->t_state = TRANSPORT_DEFERRED_CMD; |
699 | if (transport_off == 2) | 697 | if (transport_off == 2) |
700 | transport_all_task_dev_remove_state(cmd); | 698 | transport_all_task_dev_remove_state(cmd); |
701 | 699 | ||
702 | /* | 700 | /* |
703 | * Clear struct se_cmd->se_lun before the transport_off == 2 handoff | 701 | * Clear struct se_cmd->se_lun before the transport_off == 2 handoff |
704 | * to FE. | 702 | * to FE. |
705 | */ | 703 | */ |
706 | if (transport_off == 2) | 704 | if (transport_off == 2) |
707 | cmd->se_lun = NULL; | 705 | cmd->se_lun = NULL; |
708 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 706 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
709 | 707 | ||
710 | complete(&T_TASK(cmd)->t_transport_stop_comp); | 708 | complete(&T_TASK(cmd)->t_transport_stop_comp); |
711 | return 1; | 709 | return 1; |
712 | } | 710 | } |
713 | if (transport_off) { | 711 | if (transport_off) { |
714 | atomic_set(&T_TASK(cmd)->t_transport_active, 0); | 712 | atomic_set(&T_TASK(cmd)->t_transport_active, 0); |
715 | if (transport_off == 2) { | 713 | if (transport_off == 2) { |
716 | transport_all_task_dev_remove_state(cmd); | 714 | transport_all_task_dev_remove_state(cmd); |
717 | /* | 715 | /* |
718 | * Clear struct se_cmd->se_lun before the transport_off == 2 | 716 | * Clear struct se_cmd->se_lun before the transport_off == 2 |
719 | * handoff to fabric module. | 717 | * handoff to fabric module. |
720 | */ | 718 | */ |
721 | cmd->se_lun = NULL; | 719 | cmd->se_lun = NULL; |
722 | /* | 720 | /* |
723 | * Some fabric modules like tcm_loop can release | 721 | * Some fabric modules like tcm_loop can release |
724 | * their internally allocated I/O refrence now and | 722 | * their internally allocated I/O refrence now and |
725 | * struct se_cmd now. | 723 | * struct se_cmd now. |
726 | */ | 724 | */ |
727 | if (CMD_TFO(cmd)->check_stop_free != NULL) { | 725 | if (CMD_TFO(cmd)->check_stop_free != NULL) { |
728 | spin_unlock_irqrestore( | 726 | spin_unlock_irqrestore( |
729 | &T_TASK(cmd)->t_state_lock, flags); | 727 | &T_TASK(cmd)->t_state_lock, flags); |
730 | 728 | ||
731 | CMD_TFO(cmd)->check_stop_free(cmd); | 729 | CMD_TFO(cmd)->check_stop_free(cmd); |
732 | return 1; | 730 | return 1; |
733 | } | 731 | } |
734 | } | 732 | } |
735 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 733 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
736 | 734 | ||
737 | return 0; | 735 | return 0; |
738 | } else if (t_state) | 736 | } else if (t_state) |
739 | cmd->t_state = t_state; | 737 | cmd->t_state = t_state; |
740 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 738 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
741 | 739 | ||
742 | return 0; | 740 | return 0; |
743 | } | 741 | } |
744 | 742 | ||
745 | static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) | 743 | static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) |
746 | { | 744 | { |
747 | return transport_cmd_check_stop(cmd, 2, 0); | 745 | return transport_cmd_check_stop(cmd, 2, 0); |
748 | } | 746 | } |
749 | 747 | ||
750 | static void transport_lun_remove_cmd(struct se_cmd *cmd) | 748 | static void transport_lun_remove_cmd(struct se_cmd *cmd) |
751 | { | 749 | { |
752 | struct se_lun *lun = SE_LUN(cmd); | 750 | struct se_lun *lun = SE_LUN(cmd); |
753 | unsigned long flags; | 751 | unsigned long flags; |
754 | 752 | ||
755 | if (!lun) | 753 | if (!lun) |
756 | return; | 754 | return; |
757 | 755 | ||
758 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 756 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
759 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | 757 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { |
760 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 758 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
761 | goto check_lun; | 759 | goto check_lun; |
762 | } | 760 | } |
763 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | 761 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); |
764 | transport_all_task_dev_remove_state(cmd); | 762 | transport_all_task_dev_remove_state(cmd); |
765 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 763 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
766 | 764 | ||
767 | transport_free_dev_tasks(cmd); | 765 | transport_free_dev_tasks(cmd); |
768 | 766 | ||
769 | check_lun: | 767 | check_lun: |
770 | spin_lock_irqsave(&lun->lun_cmd_lock, flags); | 768 | spin_lock_irqsave(&lun->lun_cmd_lock, flags); |
771 | if (atomic_read(&T_TASK(cmd)->transport_lun_active)) { | 769 | if (atomic_read(&T_TASK(cmd)->transport_lun_active)) { |
772 | list_del(&cmd->se_lun_list); | 770 | list_del(&cmd->se_lun_list); |
773 | atomic_set(&T_TASK(cmd)->transport_lun_active, 0); | 771 | atomic_set(&T_TASK(cmd)->transport_lun_active, 0); |
774 | #if 0 | 772 | #if 0 |
775 | printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n" | 773 | printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n" |
776 | CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun); | 774 | CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun); |
777 | #endif | 775 | #endif |
778 | } | 776 | } |
779 | spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); | 777 | spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); |
780 | } | 778 | } |
781 | 779 | ||
782 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | 780 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) |
783 | { | 781 | { |
784 | transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); | 782 | transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); |
785 | transport_lun_remove_cmd(cmd); | 783 | transport_lun_remove_cmd(cmd); |
786 | 784 | ||
787 | if (transport_cmd_check_stop_to_fabric(cmd)) | 785 | if (transport_cmd_check_stop_to_fabric(cmd)) |
788 | return; | 786 | return; |
789 | if (remove) | 787 | if (remove) |
790 | transport_generic_remove(cmd, 0, 0); | 788 | transport_generic_remove(cmd, 0, 0); |
791 | } | 789 | } |
792 | 790 | ||
793 | void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) | 791 | void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) |
794 | { | 792 | { |
795 | transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); | 793 | transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); |
796 | 794 | ||
797 | if (transport_cmd_check_stop_to_fabric(cmd)) | 795 | if (transport_cmd_check_stop_to_fabric(cmd)) |
798 | return; | 796 | return; |
799 | 797 | ||
800 | transport_generic_remove(cmd, 0, 0); | 798 | transport_generic_remove(cmd, 0, 0); |
801 | } | 799 | } |
802 | 800 | ||
803 | static int transport_add_cmd_to_queue( | 801 | static int transport_add_cmd_to_queue( |
804 | struct se_cmd *cmd, | 802 | struct se_cmd *cmd, |
805 | int t_state) | 803 | int t_state) |
806 | { | 804 | { |
807 | struct se_device *dev = cmd->se_dev; | 805 | struct se_device *dev = cmd->se_dev; |
808 | struct se_queue_obj *qobj = dev->dev_queue_obj; | 806 | struct se_queue_obj *qobj = dev->dev_queue_obj; |
809 | struct se_queue_req *qr; | 807 | struct se_queue_req *qr; |
810 | unsigned long flags; | 808 | unsigned long flags; |
811 | 809 | ||
812 | qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC); | 810 | qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC); |
813 | if (!(qr)) { | 811 | if (!(qr)) { |
814 | printk(KERN_ERR "Unable to allocate memory for" | 812 | printk(KERN_ERR "Unable to allocate memory for" |
815 | " struct se_queue_req\n"); | 813 | " struct se_queue_req\n"); |
816 | return -1; | 814 | return -1; |
817 | } | 815 | } |
818 | INIT_LIST_HEAD(&qr->qr_list); | 816 | INIT_LIST_HEAD(&qr->qr_list); |
819 | 817 | ||
820 | qr->cmd = (void *)cmd; | 818 | qr->cmd = (void *)cmd; |
821 | qr->state = t_state; | 819 | qr->state = t_state; |
822 | 820 | ||
823 | if (t_state) { | 821 | if (t_state) { |
824 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 822 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
825 | cmd->t_state = t_state; | 823 | cmd->t_state = t_state; |
826 | atomic_set(&T_TASK(cmd)->t_transport_active, 1); | 824 | atomic_set(&T_TASK(cmd)->t_transport_active, 1); |
827 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 825 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
828 | } | 826 | } |
829 | 827 | ||
830 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | 828 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); |
831 | list_add_tail(&qr->qr_list, &qobj->qobj_list); | 829 | list_add_tail(&qr->qr_list, &qobj->qobj_list); |
832 | atomic_inc(&T_TASK(cmd)->t_transport_queue_active); | 830 | atomic_inc(&T_TASK(cmd)->t_transport_queue_active); |
833 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 831 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
834 | 832 | ||
835 | atomic_inc(&qobj->queue_cnt); | 833 | atomic_inc(&qobj->queue_cnt); |
836 | wake_up_interruptible(&qobj->thread_wq); | 834 | wake_up_interruptible(&qobj->thread_wq); |
837 | return 0; | 835 | return 0; |
838 | } | 836 | } |
839 | 837 | ||
840 | /* | 838 | /* |
841 | * Called with struct se_queue_obj->cmd_queue_lock held. | 839 | * Called with struct se_queue_obj->cmd_queue_lock held. |
842 | */ | 840 | */ |
843 | static struct se_queue_req * | 841 | static struct se_queue_req * |
844 | __transport_get_qr_from_queue(struct se_queue_obj *qobj) | 842 | __transport_get_qr_from_queue(struct se_queue_obj *qobj) |
845 | { | 843 | { |
846 | struct se_cmd *cmd; | 844 | struct se_cmd *cmd; |
847 | struct se_queue_req *qr = NULL; | 845 | struct se_queue_req *qr = NULL; |
848 | 846 | ||
849 | if (list_empty(&qobj->qobj_list)) | 847 | if (list_empty(&qobj->qobj_list)) |
850 | return NULL; | 848 | return NULL; |
851 | 849 | ||
852 | list_for_each_entry(qr, &qobj->qobj_list, qr_list) | 850 | list_for_each_entry(qr, &qobj->qobj_list, qr_list) |
853 | break; | 851 | break; |
854 | 852 | ||
855 | if (qr->cmd) { | 853 | if (qr->cmd) { |
856 | cmd = (struct se_cmd *)qr->cmd; | 854 | cmd = (struct se_cmd *)qr->cmd; |
857 | atomic_dec(&T_TASK(cmd)->t_transport_queue_active); | 855 | atomic_dec(&T_TASK(cmd)->t_transport_queue_active); |
858 | } | 856 | } |
859 | list_del(&qr->qr_list); | 857 | list_del(&qr->qr_list); |
860 | atomic_dec(&qobj->queue_cnt); | 858 | atomic_dec(&qobj->queue_cnt); |
861 | 859 | ||
862 | return qr; | 860 | return qr; |
863 | } | 861 | } |
864 | 862 | ||
865 | static struct se_queue_req * | 863 | static struct se_queue_req * |
866 | transport_get_qr_from_queue(struct se_queue_obj *qobj) | 864 | transport_get_qr_from_queue(struct se_queue_obj *qobj) |
867 | { | 865 | { |
868 | struct se_cmd *cmd; | 866 | struct se_cmd *cmd; |
869 | struct se_queue_req *qr; | 867 | struct se_queue_req *qr; |
870 | unsigned long flags; | 868 | unsigned long flags; |
871 | 869 | ||
872 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | 870 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); |
873 | if (list_empty(&qobj->qobj_list)) { | 871 | if (list_empty(&qobj->qobj_list)) { |
874 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 872 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
875 | return NULL; | 873 | return NULL; |
876 | } | 874 | } |
877 | 875 | ||
878 | list_for_each_entry(qr, &qobj->qobj_list, qr_list) | 876 | list_for_each_entry(qr, &qobj->qobj_list, qr_list) |
879 | break; | 877 | break; |
880 | 878 | ||
881 | if (qr->cmd) { | 879 | if (qr->cmd) { |
882 | cmd = (struct se_cmd *)qr->cmd; | 880 | cmd = (struct se_cmd *)qr->cmd; |
883 | atomic_dec(&T_TASK(cmd)->t_transport_queue_active); | 881 | atomic_dec(&T_TASK(cmd)->t_transport_queue_active); |
884 | } | 882 | } |
885 | list_del(&qr->qr_list); | 883 | list_del(&qr->qr_list); |
886 | atomic_dec(&qobj->queue_cnt); | 884 | atomic_dec(&qobj->queue_cnt); |
887 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 885 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
888 | 886 | ||
889 | return qr; | 887 | return qr; |
890 | } | 888 | } |
891 | 889 | ||
892 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | 890 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, |
893 | struct se_queue_obj *qobj) | 891 | struct se_queue_obj *qobj) |
894 | { | 892 | { |
895 | struct se_cmd *q_cmd; | 893 | struct se_cmd *q_cmd; |
896 | struct se_queue_req *qr = NULL, *qr_p = NULL; | 894 | struct se_queue_req *qr = NULL, *qr_p = NULL; |
897 | unsigned long flags; | 895 | unsigned long flags; |
898 | 896 | ||
899 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | 897 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); |
900 | if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) { | 898 | if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) { |
901 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 899 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
902 | return; | 900 | return; |
903 | } | 901 | } |
904 | 902 | ||
905 | list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) { | 903 | list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) { |
906 | q_cmd = (struct se_cmd *)qr->cmd; | 904 | q_cmd = (struct se_cmd *)qr->cmd; |
907 | if (q_cmd != cmd) | 905 | if (q_cmd != cmd) |
908 | continue; | 906 | continue; |
909 | 907 | ||
910 | atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active); | 908 | atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active); |
911 | atomic_dec(&qobj->queue_cnt); | 909 | atomic_dec(&qobj->queue_cnt); |
912 | list_del(&qr->qr_list); | 910 | list_del(&qr->qr_list); |
913 | kfree(qr); | 911 | kfree(qr); |
914 | } | 912 | } |
915 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 913 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
916 | 914 | ||
917 | if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) { | 915 | if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) { |
918 | printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n", | 916 | printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n", |
919 | CMD_TFO(cmd)->get_task_tag(cmd), | 917 | CMD_TFO(cmd)->get_task_tag(cmd), |
920 | atomic_read(&T_TASK(cmd)->t_transport_queue_active)); | 918 | atomic_read(&T_TASK(cmd)->t_transport_queue_active)); |
921 | } | 919 | } |
922 | } | 920 | } |
923 | 921 | ||
924 | /* | 922 | /* |
925 | * Completion function used by TCM subsystem plugins (such as FILEIO) | 923 | * Completion function used by TCM subsystem plugins (such as FILEIO) |
926 | * for queueing up response from struct se_subsystem_api->do_task() | 924 | * for queueing up response from struct se_subsystem_api->do_task() |
927 | */ | 925 | */ |
928 | void transport_complete_sync_cache(struct se_cmd *cmd, int good) | 926 | void transport_complete_sync_cache(struct se_cmd *cmd, int good) |
929 | { | 927 | { |
930 | struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next, | 928 | struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next, |
931 | struct se_task, t_list); | 929 | struct se_task, t_list); |
932 | 930 | ||
933 | if (good) { | 931 | if (good) { |
934 | cmd->scsi_status = SAM_STAT_GOOD; | 932 | cmd->scsi_status = SAM_STAT_GOOD; |
935 | task->task_scsi_status = GOOD; | 933 | task->task_scsi_status = GOOD; |
936 | } else { | 934 | } else { |
937 | task->task_scsi_status = SAM_STAT_CHECK_CONDITION; | 935 | task->task_scsi_status = SAM_STAT_CHECK_CONDITION; |
938 | task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; | 936 | task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; |
939 | TASK_CMD(task)->transport_error_status = | 937 | TASK_CMD(task)->transport_error_status = |
940 | PYX_TRANSPORT_ILLEGAL_REQUEST; | 938 | PYX_TRANSPORT_ILLEGAL_REQUEST; |
941 | } | 939 | } |
942 | 940 | ||
943 | transport_complete_task(task, good); | 941 | transport_complete_task(task, good); |
944 | } | 942 | } |
945 | EXPORT_SYMBOL(transport_complete_sync_cache); | 943 | EXPORT_SYMBOL(transport_complete_sync_cache); |
946 | 944 | ||
947 | /* transport_complete_task(): | 945 | /* transport_complete_task(): |
948 | * | 946 | * |
949 | * Called from interrupt and non interrupt context depending | 947 | * Called from interrupt and non interrupt context depending |
950 | * on the transport plugin. | 948 | * on the transport plugin. |
951 | */ | 949 | */ |
952 | void transport_complete_task(struct se_task *task, int success) | 950 | void transport_complete_task(struct se_task *task, int success) |
953 | { | 951 | { |
954 | struct se_cmd *cmd = TASK_CMD(task); | 952 | struct se_cmd *cmd = TASK_CMD(task); |
955 | struct se_device *dev = task->se_dev; | 953 | struct se_device *dev = task->se_dev; |
956 | int t_state; | 954 | int t_state; |
957 | unsigned long flags; | 955 | unsigned long flags; |
958 | #if 0 | 956 | #if 0 |
959 | printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task, | 957 | printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task, |
960 | T_TASK(cmd)->t_task_cdb[0], dev); | 958 | T_TASK(cmd)->t_task_cdb[0], dev); |
961 | #endif | 959 | #endif |
962 | if (dev) { | 960 | if (dev) { |
963 | spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); | 961 | spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); |
964 | atomic_inc(&dev->depth_left); | 962 | atomic_inc(&dev->depth_left); |
965 | atomic_inc(&SE_HBA(dev)->left_queue_depth); | 963 | atomic_inc(&SE_HBA(dev)->left_queue_depth); |
966 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | 964 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); |
967 | } | 965 | } |
968 | 966 | ||
969 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 967 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
970 | atomic_set(&task->task_active, 0); | 968 | atomic_set(&task->task_active, 0); |
971 | 969 | ||
972 | /* | 970 | /* |
973 | * See if any sense data exists, if so set the TASK_SENSE flag. | 971 | * See if any sense data exists, if so set the TASK_SENSE flag. |
974 | * Also check for any other post completion work that needs to be | 972 | * Also check for any other post completion work that needs to be |
975 | * done by the plugins. | 973 | * done by the plugins. |
976 | */ | 974 | */ |
977 | if (dev && dev->transport->transport_complete) { | 975 | if (dev && dev->transport->transport_complete) { |
978 | if (dev->transport->transport_complete(task) != 0) { | 976 | if (dev->transport->transport_complete(task) != 0) { |
979 | cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; | 977 | cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; |
980 | task->task_sense = 1; | 978 | task->task_sense = 1; |
981 | success = 1; | 979 | success = 1; |
982 | } | 980 | } |
983 | } | 981 | } |
984 | 982 | ||
985 | /* | 983 | /* |
986 | * See if we are waiting for outstanding struct se_task | 984 | * See if we are waiting for outstanding struct se_task |
987 | * to complete for an exception condition | 985 | * to complete for an exception condition |
988 | */ | 986 | */ |
989 | if (atomic_read(&task->task_stop)) { | 987 | if (atomic_read(&task->task_stop)) { |
990 | /* | 988 | /* |
991 | * Decrement T_TASK(cmd)->t_se_count if this task had | 989 | * Decrement T_TASK(cmd)->t_se_count if this task had |
992 | * previously thrown its timeout exception handler. | 990 | * previously thrown its timeout exception handler. |
993 | */ | 991 | */ |
994 | if (atomic_read(&task->task_timeout)) { | 992 | if (atomic_read(&task->task_timeout)) { |
995 | atomic_dec(&T_TASK(cmd)->t_se_count); | 993 | atomic_dec(&T_TASK(cmd)->t_se_count); |
996 | atomic_set(&task->task_timeout, 0); | 994 | atomic_set(&task->task_timeout, 0); |
997 | } | 995 | } |
998 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 996 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
999 | 997 | ||
1000 | complete(&task->task_stop_comp); | 998 | complete(&task->task_stop_comp); |
1001 | return; | 999 | return; |
1002 | } | 1000 | } |
1003 | /* | 1001 | /* |
1004 | * If the task's timeout handler has fired, use the t_task_cdbs_timeout | 1002 | * If the task's timeout handler has fired, use the t_task_cdbs_timeout |
1005 | * left counter to determine when the struct se_cmd is ready to be queued to | 1003 | * left counter to determine when the struct se_cmd is ready to be queued to |
1006 | * the processing thread. | 1004 | * the processing thread. |
1007 | */ | 1005 | */ |
1008 | if (atomic_read(&task->task_timeout)) { | 1006 | if (atomic_read(&task->task_timeout)) { |
1009 | if (!(atomic_dec_and_test( | 1007 | if (!(atomic_dec_and_test( |
1010 | &T_TASK(cmd)->t_task_cdbs_timeout_left))) { | 1008 | &T_TASK(cmd)->t_task_cdbs_timeout_left))) { |
1011 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 1009 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, |
1012 | flags); | 1010 | flags); |
1013 | return; | 1011 | return; |
1014 | } | 1012 | } |
1015 | t_state = TRANSPORT_COMPLETE_TIMEOUT; | 1013 | t_state = TRANSPORT_COMPLETE_TIMEOUT; |
1016 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 1014 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
1017 | 1015 | ||
1018 | transport_add_cmd_to_queue(cmd, t_state); | 1016 | transport_add_cmd_to_queue(cmd, t_state); |
1019 | return; | 1017 | return; |
1020 | } | 1018 | } |
1021 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left); | 1019 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left); |
1022 | 1020 | ||
1023 | /* | 1021 | /* |
1024 | * Decrement the outstanding t_task_cdbs_left count. The last | 1022 | * Decrement the outstanding t_task_cdbs_left count. The last |
1025 | * struct se_task from struct se_cmd will complete itself into the | 1023 | * struct se_task from struct se_cmd will complete itself into the |
1026 | * device queue depending upon int success. | 1024 | * device queue depending upon int success. |
1027 | */ | 1025 | */ |
1028 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) { | 1026 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) { |
1029 | if (!success) | 1027 | if (!success) |
1030 | T_TASK(cmd)->t_tasks_failed = 1; | 1028 | T_TASK(cmd)->t_tasks_failed = 1; |
1031 | 1029 | ||
1032 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 1030 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
1033 | return; | 1031 | return; |
1034 | } | 1032 | } |
1035 | 1033 | ||
1036 | if (!success || T_TASK(cmd)->t_tasks_failed) { | 1034 | if (!success || T_TASK(cmd)->t_tasks_failed) { |
1037 | t_state = TRANSPORT_COMPLETE_FAILURE; | 1035 | t_state = TRANSPORT_COMPLETE_FAILURE; |
1038 | if (!task->task_error_status) { | 1036 | if (!task->task_error_status) { |
1039 | task->task_error_status = | 1037 | task->task_error_status = |
1040 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 1038 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
1041 | cmd->transport_error_status = | 1039 | cmd->transport_error_status = |
1042 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | 1040 | PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
1043 | } | 1041 | } |
1044 | } else { | 1042 | } else { |
1045 | atomic_set(&T_TASK(cmd)->t_transport_complete, 1); | 1043 | atomic_set(&T_TASK(cmd)->t_transport_complete, 1); |
1046 | t_state = TRANSPORT_COMPLETE_OK; | 1044 | t_state = TRANSPORT_COMPLETE_OK; |
1047 | } | 1045 | } |
1048 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 1046 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
1049 | 1047 | ||
1050 | transport_add_cmd_to_queue(cmd, t_state); | 1048 | transport_add_cmd_to_queue(cmd, t_state); |
1051 | } | 1049 | } |
1052 | EXPORT_SYMBOL(transport_complete_task); | 1050 | EXPORT_SYMBOL(transport_complete_task); |
1053 | 1051 | ||
1054 | /* | 1052 | /* |
1055 | * Called by transport_add_tasks_from_cmd() once a struct se_cmd's | 1053 | * Called by transport_add_tasks_from_cmd() once a struct se_cmd's |
1056 | * struct se_task list are ready to be added to the active execution list | 1054 | * struct se_task list are ready to be added to the active execution list |
1057 | * struct se_device | 1055 | * struct se_device |
1058 | 1056 | ||
1059 | * Called with se_dev_t->execute_task_lock called. | 1057 | * Called with se_dev_t->execute_task_lock called. |
1060 | */ | 1058 | */ |
1061 | static inline int transport_add_task_check_sam_attr( | 1059 | static inline int transport_add_task_check_sam_attr( |
1062 | struct se_task *task, | 1060 | struct se_task *task, |
1063 | struct se_task *task_prev, | 1061 | struct se_task *task_prev, |
1064 | struct se_device *dev) | 1062 | struct se_device *dev) |
1065 | { | 1063 | { |
1066 | /* | 1064 | /* |
1067 | * No SAM Task attribute emulation enabled, add to tail of | 1065 | * No SAM Task attribute emulation enabled, add to tail of |
1068 | * execution queue | 1066 | * execution queue |
1069 | */ | 1067 | */ |
1070 | if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) { | 1068 | if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) { |
1071 | list_add_tail(&task->t_execute_list, &dev->execute_task_list); | 1069 | list_add_tail(&task->t_execute_list, &dev->execute_task_list); |
1072 | return 0; | 1070 | return 0; |
1073 | } | 1071 | } |
1074 | /* | 1072 | /* |
1075 | * HEAD_OF_QUEUE attribute for received CDB, which means | 1073 | * HEAD_OF_QUEUE attribute for received CDB, which means |
1076 | * the first task that is associated with a struct se_cmd goes to | 1074 | * the first task that is associated with a struct se_cmd goes to |
1077 | * head of the struct se_device->execute_task_list, and task_prev | 1075 | * head of the struct se_device->execute_task_list, and task_prev |
1078 | * after that for each subsequent task | 1076 | * after that for each subsequent task |
1079 | */ | 1077 | */ |
1080 | if (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ) { | 1078 | if (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ) { |
1081 | list_add(&task->t_execute_list, | 1079 | list_add(&task->t_execute_list, |
1082 | (task_prev != NULL) ? | 1080 | (task_prev != NULL) ? |
1083 | &task_prev->t_execute_list : | 1081 | &task_prev->t_execute_list : |
1084 | &dev->execute_task_list); | 1082 | &dev->execute_task_list); |
1085 | 1083 | ||
1086 | DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x" | 1084 | DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x" |
1087 | " in execution queue\n", | 1085 | " in execution queue\n", |
1088 | T_TASK(task->task_se_cmd)->t_task_cdb[0]); | 1086 | T_TASK(task->task_se_cmd)->t_task_cdb[0]); |
1089 | return 1; | 1087 | return 1; |
1090 | } | 1088 | } |
1091 | /* | 1089 | /* |
1092 | * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been | 1090 | * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been |
1093 | * transitioned from Dermant -> Active state, and are added to the end | 1091 | * transitioned from Dermant -> Active state, and are added to the end |
1094 | * of the struct se_device->execute_task_list | 1092 | * of the struct se_device->execute_task_list |
1095 | */ | 1093 | */ |
1096 | list_add_tail(&task->t_execute_list, &dev->execute_task_list); | 1094 | list_add_tail(&task->t_execute_list, &dev->execute_task_list); |
1097 | return 0; | 1095 | return 0; |
1098 | } | 1096 | } |
1099 | 1097 | ||
1100 | /* __transport_add_task_to_execute_queue(): | 1098 | /* __transport_add_task_to_execute_queue(): |
1101 | * | 1099 | * |
1102 | * Called with se_dev_t->execute_task_lock called. | 1100 | * Called with se_dev_t->execute_task_lock called. |
1103 | */ | 1101 | */ |
1104 | static void __transport_add_task_to_execute_queue( | 1102 | static void __transport_add_task_to_execute_queue( |
1105 | struct se_task *task, | 1103 | struct se_task *task, |
1106 | struct se_task *task_prev, | 1104 | struct se_task *task_prev, |
1107 | struct se_device *dev) | 1105 | struct se_device *dev) |
1108 | { | 1106 | { |
1109 | int head_of_queue; | 1107 | int head_of_queue; |
1110 | 1108 | ||
1111 | head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev); | 1109 | head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev); |
1112 | atomic_inc(&dev->execute_tasks); | 1110 | atomic_inc(&dev->execute_tasks); |
1113 | 1111 | ||
1114 | if (atomic_read(&task->task_state_active)) | 1112 | if (atomic_read(&task->task_state_active)) |
1115 | return; | 1113 | return; |
1116 | /* | 1114 | /* |
1117 | * Determine if this task needs to go to HEAD_OF_QUEUE for the | 1115 | * Determine if this task needs to go to HEAD_OF_QUEUE for the |
1118 | * state list as well. Running with SAM Task Attribute emulation | 1116 | * state list as well. Running with SAM Task Attribute emulation |
1119 | * will always return head_of_queue == 0 here | 1117 | * will always return head_of_queue == 0 here |
1120 | */ | 1118 | */ |
1121 | if (head_of_queue) | 1119 | if (head_of_queue) |
1122 | list_add(&task->t_state_list, (task_prev) ? | 1120 | list_add(&task->t_state_list, (task_prev) ? |
1123 | &task_prev->t_state_list : | 1121 | &task_prev->t_state_list : |
1124 | &dev->state_task_list); | 1122 | &dev->state_task_list); |
1125 | else | 1123 | else |
1126 | list_add_tail(&task->t_state_list, &dev->state_task_list); | 1124 | list_add_tail(&task->t_state_list, &dev->state_task_list); |
1127 | 1125 | ||
1128 | atomic_set(&task->task_state_active, 1); | 1126 | atomic_set(&task->task_state_active, 1); |
1129 | 1127 | ||
1130 | DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", | 1128 | DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", |
1131 | CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd), | 1129 | CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd), |
1132 | task, dev); | 1130 | task, dev); |
1133 | } | 1131 | } |
1134 | 1132 | ||
1135 | static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) | 1133 | static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) |
1136 | { | 1134 | { |
1137 | struct se_device *dev; | 1135 | struct se_device *dev; |
1138 | struct se_task *task; | 1136 | struct se_task *task; |
1139 | unsigned long flags; | 1137 | unsigned long flags; |
1140 | 1138 | ||
1141 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 1139 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
1142 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | 1140 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { |
1143 | dev = task->se_dev; | 1141 | dev = task->se_dev; |
1144 | 1142 | ||
1145 | if (atomic_read(&task->task_state_active)) | 1143 | if (atomic_read(&task->task_state_active)) |
1146 | continue; | 1144 | continue; |
1147 | 1145 | ||
1148 | spin_lock(&dev->execute_task_lock); | 1146 | spin_lock(&dev->execute_task_lock); |
1149 | list_add_tail(&task->t_state_list, &dev->state_task_list); | 1147 | list_add_tail(&task->t_state_list, &dev->state_task_list); |
1150 | atomic_set(&task->task_state_active, 1); | 1148 | atomic_set(&task->task_state_active, 1); |
1151 | 1149 | ||
1152 | DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", | 1150 | DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", |
1153 | CMD_TFO(task->task_se_cmd)->get_task_tag( | 1151 | CMD_TFO(task->task_se_cmd)->get_task_tag( |
1154 | task->task_se_cmd), task, dev); | 1152 | task->task_se_cmd), task, dev); |
1155 | 1153 | ||
1156 | spin_unlock(&dev->execute_task_lock); | 1154 | spin_unlock(&dev->execute_task_lock); |
1157 | } | 1155 | } |
1158 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 1156 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
1159 | } | 1157 | } |
1160 | 1158 | ||
1161 | static void transport_add_tasks_from_cmd(struct se_cmd *cmd) | 1159 | static void transport_add_tasks_from_cmd(struct se_cmd *cmd) |
1162 | { | 1160 | { |
1163 | struct se_device *dev = SE_DEV(cmd); | 1161 | struct se_device *dev = SE_DEV(cmd); |
1164 | struct se_task *task, *task_prev = NULL; | 1162 | struct se_task *task, *task_prev = NULL; |
1165 | unsigned long flags; | 1163 | unsigned long flags; |
1166 | 1164 | ||
1167 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 1165 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
1168 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | 1166 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { |
1169 | if (atomic_read(&task->task_execute_queue)) | 1167 | if (atomic_read(&task->task_execute_queue)) |
1170 | continue; | 1168 | continue; |
1171 | /* | 1169 | /* |
1172 | * __transport_add_task_to_execute_queue() handles the | 1170 | * __transport_add_task_to_execute_queue() handles the |
1173 | * SAM Task Attribute emulation if enabled | 1171 | * SAM Task Attribute emulation if enabled |
1174 | */ | 1172 | */ |
1175 | __transport_add_task_to_execute_queue(task, task_prev, dev); | 1173 | __transport_add_task_to_execute_queue(task, task_prev, dev); |
1176 | atomic_set(&task->task_execute_queue, 1); | 1174 | atomic_set(&task->task_execute_queue, 1); |
1177 | task_prev = task; | 1175 | task_prev = task; |
1178 | } | 1176 | } |
1179 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 1177 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
1180 | 1178 | ||
1181 | return; | 1179 | return; |
1182 | } | 1180 | } |
1183 | 1181 | ||
1184 | /* transport_get_task_from_execute_queue(): | 1182 | /* transport_get_task_from_execute_queue(): |
1185 | * | 1183 | * |
1186 | * Called with dev->execute_task_lock held. | 1184 | * Called with dev->execute_task_lock held. |
1187 | */ | 1185 | */ |
1188 | static struct se_task * | 1186 | static struct se_task * |
1189 | transport_get_task_from_execute_queue(struct se_device *dev) | 1187 | transport_get_task_from_execute_queue(struct se_device *dev) |
1190 | { | 1188 | { |
1191 | struct se_task *task; | 1189 | struct se_task *task; |
1192 | 1190 | ||
1193 | if (list_empty(&dev->execute_task_list)) | 1191 | if (list_empty(&dev->execute_task_list)) |
1194 | return NULL; | 1192 | return NULL; |
1195 | 1193 | ||
1196 | list_for_each_entry(task, &dev->execute_task_list, t_execute_list) | 1194 | list_for_each_entry(task, &dev->execute_task_list, t_execute_list) |
1197 | break; | 1195 | break; |
1198 | 1196 | ||
1199 | list_del(&task->t_execute_list); | 1197 | list_del(&task->t_execute_list); |
1200 | atomic_dec(&dev->execute_tasks); | 1198 | atomic_dec(&dev->execute_tasks); |
1201 | 1199 | ||
1202 | return task; | 1200 | return task; |
1203 | } | 1201 | } |
1204 | 1202 | ||
1205 | /* transport_remove_task_from_execute_queue(): | 1203 | /* transport_remove_task_from_execute_queue(): |
1206 | * | 1204 | * |
1207 | * | 1205 | * |
1208 | */ | 1206 | */ |
1209 | void transport_remove_task_from_execute_queue( | 1207 | void transport_remove_task_from_execute_queue( |
1210 | struct se_task *task, | 1208 | struct se_task *task, |
1211 | struct se_device *dev) | 1209 | struct se_device *dev) |
1212 | { | 1210 | { |
1213 | unsigned long flags; | 1211 | unsigned long flags; |
1214 | 1212 | ||
1215 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 1213 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
1216 | list_del(&task->t_execute_list); | 1214 | list_del(&task->t_execute_list); |
1217 | atomic_dec(&dev->execute_tasks); | 1215 | atomic_dec(&dev->execute_tasks); |
1218 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 1216 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
1219 | } | 1217 | } |
1220 | 1218 | ||
1221 | unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) | 1219 | unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) |
1222 | { | 1220 | { |
1223 | switch (cmd->data_direction) { | 1221 | switch (cmd->data_direction) { |
1224 | case DMA_NONE: | 1222 | case DMA_NONE: |
1225 | return "NONE"; | 1223 | return "NONE"; |
1226 | case DMA_FROM_DEVICE: | 1224 | case DMA_FROM_DEVICE: |
1227 | return "READ"; | 1225 | return "READ"; |
1228 | case DMA_TO_DEVICE: | 1226 | case DMA_TO_DEVICE: |
1229 | return "WRITE"; | 1227 | return "WRITE"; |
1230 | case DMA_BIDIRECTIONAL: | 1228 | case DMA_BIDIRECTIONAL: |
1231 | return "BIDI"; | 1229 | return "BIDI"; |
1232 | default: | 1230 | default: |
1233 | break; | 1231 | break; |
1234 | } | 1232 | } |
1235 | 1233 | ||
1236 | return "UNKNOWN"; | 1234 | return "UNKNOWN"; |
1237 | } | 1235 | } |
1238 | 1236 | ||
1239 | void transport_dump_dev_state( | 1237 | void transport_dump_dev_state( |
1240 | struct se_device *dev, | 1238 | struct se_device *dev, |
1241 | char *b, | 1239 | char *b, |
1242 | int *bl) | 1240 | int *bl) |
1243 | { | 1241 | { |
1244 | *bl += sprintf(b + *bl, "Status: "); | 1242 | *bl += sprintf(b + *bl, "Status: "); |
1245 | switch (dev->dev_status) { | 1243 | switch (dev->dev_status) { |
1246 | case TRANSPORT_DEVICE_ACTIVATED: | 1244 | case TRANSPORT_DEVICE_ACTIVATED: |
1247 | *bl += sprintf(b + *bl, "ACTIVATED"); | 1245 | *bl += sprintf(b + *bl, "ACTIVATED"); |
1248 | break; | 1246 | break; |
1249 | case TRANSPORT_DEVICE_DEACTIVATED: | 1247 | case TRANSPORT_DEVICE_DEACTIVATED: |
1250 | *bl += sprintf(b + *bl, "DEACTIVATED"); | 1248 | *bl += sprintf(b + *bl, "DEACTIVATED"); |
1251 | break; | 1249 | break; |
1252 | case TRANSPORT_DEVICE_SHUTDOWN: | 1250 | case TRANSPORT_DEVICE_SHUTDOWN: |
1253 | *bl += sprintf(b + *bl, "SHUTDOWN"); | 1251 | *bl += sprintf(b + *bl, "SHUTDOWN"); |
1254 | break; | 1252 | break; |
1255 | case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: | 1253 | case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: |
1256 | case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: | 1254 | case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: |
1257 | *bl += sprintf(b + *bl, "OFFLINE"); | 1255 | *bl += sprintf(b + *bl, "OFFLINE"); |
1258 | break; | 1256 | break; |
1259 | default: | 1257 | default: |
1260 | *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status); | 1258 | *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status); |
1261 | break; | 1259 | break; |
1262 | } | 1260 | } |
1263 | 1261 | ||
1264 | *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d", | 1262 | *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d", |
1265 | atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), | 1263 | atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), |
1266 | dev->queue_depth); | 1264 | dev->queue_depth); |
1267 | *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", | 1265 | *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", |
1268 | DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors); | 1266 | DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors); |
1269 | *bl += sprintf(b + *bl, " "); | 1267 | *bl += sprintf(b + *bl, " "); |
1270 | } | 1268 | } |
1271 | 1269 | ||
1272 | /* transport_release_all_cmds(): | 1270 | /* transport_release_all_cmds(): |
1273 | * | 1271 | * |
1274 | * | 1272 | * |
1275 | */ | 1273 | */ |
1276 | static void transport_release_all_cmds(struct se_device *dev) | 1274 | static void transport_release_all_cmds(struct se_device *dev) |
1277 | { | 1275 | { |
1278 | struct se_cmd *cmd = NULL; | 1276 | struct se_cmd *cmd = NULL; |
1279 | struct se_queue_req *qr = NULL, *qr_p = NULL; | 1277 | struct se_queue_req *qr = NULL, *qr_p = NULL; |
1280 | int bug_out = 0, t_state; | 1278 | int bug_out = 0, t_state; |
1281 | unsigned long flags; | 1279 | unsigned long flags; |
1282 | 1280 | ||
1283 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | 1281 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); |
1284 | list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list, | 1282 | list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list, |
1285 | qr_list) { | 1283 | qr_list) { |
1286 | 1284 | ||
1287 | cmd = (struct se_cmd *)qr->cmd; | 1285 | cmd = (struct se_cmd *)qr->cmd; |
1288 | t_state = qr->state; | 1286 | t_state = qr->state; |
1289 | list_del(&qr->qr_list); | 1287 | list_del(&qr->qr_list); |
1290 | kfree(qr); | 1288 | kfree(qr); |
1291 | spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, | 1289 | spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, |
1292 | flags); | 1290 | flags); |
1293 | 1291 | ||
1294 | printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u," | 1292 | printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u," |
1295 | " t_state: %u directly\n", | 1293 | " t_state: %u directly\n", |
1296 | CMD_TFO(cmd)->get_task_tag(cmd), | 1294 | CMD_TFO(cmd)->get_task_tag(cmd), |
1297 | CMD_TFO(cmd)->get_cmd_state(cmd), t_state); | 1295 | CMD_TFO(cmd)->get_cmd_state(cmd), t_state); |
1298 | 1296 | ||
1299 | transport_release_fe_cmd(cmd); | 1297 | transport_release_fe_cmd(cmd); |
1300 | bug_out = 1; | 1298 | bug_out = 1; |
1301 | 1299 | ||
1302 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | 1300 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); |
1303 | } | 1301 | } |
1304 | spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags); | 1302 | spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags); |
1305 | #if 0 | 1303 | #if 0 |
1306 | if (bug_out) | 1304 | if (bug_out) |
1307 | BUG(); | 1305 | BUG(); |
1308 | #endif | 1306 | #endif |
1309 | } | 1307 | } |
1310 | 1308 | ||
1311 | void transport_dump_vpd_proto_id( | 1309 | void transport_dump_vpd_proto_id( |
1312 | struct t10_vpd *vpd, | 1310 | struct t10_vpd *vpd, |
1313 | unsigned char *p_buf, | 1311 | unsigned char *p_buf, |
1314 | int p_buf_len) | 1312 | int p_buf_len) |
1315 | { | 1313 | { |
1316 | unsigned char buf[VPD_TMP_BUF_SIZE]; | 1314 | unsigned char buf[VPD_TMP_BUF_SIZE]; |
1317 | int len; | 1315 | int len; |
1318 | 1316 | ||
1319 | memset(buf, 0, VPD_TMP_BUF_SIZE); | 1317 | memset(buf, 0, VPD_TMP_BUF_SIZE); |
1320 | len = sprintf(buf, "T10 VPD Protocol Identifier: "); | 1318 | len = sprintf(buf, "T10 VPD Protocol Identifier: "); |
1321 | 1319 | ||
1322 | switch (vpd->protocol_identifier) { | 1320 | switch (vpd->protocol_identifier) { |
1323 | case 0x00: | 1321 | case 0x00: |
1324 | sprintf(buf+len, "Fibre Channel\n"); | 1322 | sprintf(buf+len, "Fibre Channel\n"); |
1325 | break; | 1323 | break; |
1326 | case 0x10: | 1324 | case 0x10: |
1327 | sprintf(buf+len, "Parallel SCSI\n"); | 1325 | sprintf(buf+len, "Parallel SCSI\n"); |
1328 | break; | 1326 | break; |
1329 | case 0x20: | 1327 | case 0x20: |
1330 | sprintf(buf+len, "SSA\n"); | 1328 | sprintf(buf+len, "SSA\n"); |
1331 | break; | 1329 | break; |
1332 | case 0x30: | 1330 | case 0x30: |
1333 | sprintf(buf+len, "IEEE 1394\n"); | 1331 | sprintf(buf+len, "IEEE 1394\n"); |
1334 | break; | 1332 | break; |
1335 | case 0x40: | 1333 | case 0x40: |
1336 | sprintf(buf+len, "SCSI Remote Direct Memory Access" | 1334 | sprintf(buf+len, "SCSI Remote Direct Memory Access" |
1337 | " Protocol\n"); | 1335 | " Protocol\n"); |
1338 | break; | 1336 | break; |
1339 | case 0x50: | 1337 | case 0x50: |
1340 | sprintf(buf+len, "Internet SCSI (iSCSI)\n"); | 1338 | sprintf(buf+len, "Internet SCSI (iSCSI)\n"); |
1341 | break; | 1339 | break; |
1342 | case 0x60: | 1340 | case 0x60: |
1343 | sprintf(buf+len, "SAS Serial SCSI Protocol\n"); | 1341 | sprintf(buf+len, "SAS Serial SCSI Protocol\n"); |
1344 | break; | 1342 | break; |
1345 | case 0x70: | 1343 | case 0x70: |
1346 | sprintf(buf+len, "Automation/Drive Interface Transport" | 1344 | sprintf(buf+len, "Automation/Drive Interface Transport" |
1347 | " Protocol\n"); | 1345 | " Protocol\n"); |
1348 | break; | 1346 | break; |
1349 | case 0x80: | 1347 | case 0x80: |
1350 | sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); | 1348 | sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); |
1351 | break; | 1349 | break; |
1352 | default: | 1350 | default: |
1353 | sprintf(buf+len, "Unknown 0x%02x\n", | 1351 | sprintf(buf+len, "Unknown 0x%02x\n", |
1354 | vpd->protocol_identifier); | 1352 | vpd->protocol_identifier); |
1355 | break; | 1353 | break; |
1356 | } | 1354 | } |
1357 | 1355 | ||
1358 | if (p_buf) | 1356 | if (p_buf) |
1359 | strncpy(p_buf, buf, p_buf_len); | 1357 | strncpy(p_buf, buf, p_buf_len); |
1360 | else | 1358 | else |
1361 | printk(KERN_INFO "%s", buf); | 1359 | printk(KERN_INFO "%s", buf); |
1362 | } | 1360 | } |
1363 | 1361 | ||
1364 | void | 1362 | void |
1365 | transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) | 1363 | transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) |
1366 | { | 1364 | { |
1367 | /* | 1365 | /* |
1368 | * Check if the Protocol Identifier Valid (PIV) bit is set.. | 1366 | * Check if the Protocol Identifier Valid (PIV) bit is set.. |
1369 | * | 1367 | * |
1370 | * from spc3r23.pdf section 7.5.1 | 1368 | * from spc3r23.pdf section 7.5.1 |
1371 | */ | 1369 | */ |
1372 | if (page_83[1] & 0x80) { | 1370 | if (page_83[1] & 0x80) { |
1373 | vpd->protocol_identifier = (page_83[0] & 0xf0); | 1371 | vpd->protocol_identifier = (page_83[0] & 0xf0); |
1374 | vpd->protocol_identifier_set = 1; | 1372 | vpd->protocol_identifier_set = 1; |
1375 | transport_dump_vpd_proto_id(vpd, NULL, 0); | 1373 | transport_dump_vpd_proto_id(vpd, NULL, 0); |
1376 | } | 1374 | } |
1377 | } | 1375 | } |
1378 | EXPORT_SYMBOL(transport_set_vpd_proto_id); | 1376 | EXPORT_SYMBOL(transport_set_vpd_proto_id); |
1379 | 1377 | ||
1380 | int transport_dump_vpd_assoc( | 1378 | int transport_dump_vpd_assoc( |
1381 | struct t10_vpd *vpd, | 1379 | struct t10_vpd *vpd, |
1382 | unsigned char *p_buf, | 1380 | unsigned char *p_buf, |
1383 | int p_buf_len) | 1381 | int p_buf_len) |
1384 | { | 1382 | { |
1385 | unsigned char buf[VPD_TMP_BUF_SIZE]; | 1383 | unsigned char buf[VPD_TMP_BUF_SIZE]; |
1386 | int ret = 0, len; | 1384 | int ret = 0, len; |
1387 | 1385 | ||
1388 | memset(buf, 0, VPD_TMP_BUF_SIZE); | 1386 | memset(buf, 0, VPD_TMP_BUF_SIZE); |
1389 | len = sprintf(buf, "T10 VPD Identifier Association: "); | 1387 | len = sprintf(buf, "T10 VPD Identifier Association: "); |
1390 | 1388 | ||
1391 | switch (vpd->association) { | 1389 | switch (vpd->association) { |
1392 | case 0x00: | 1390 | case 0x00: |
1393 | sprintf(buf+len, "addressed logical unit\n"); | 1391 | sprintf(buf+len, "addressed logical unit\n"); |
1394 | break; | 1392 | break; |
1395 | case 0x10: | 1393 | case 0x10: |
1396 | sprintf(buf+len, "target port\n"); | 1394 | sprintf(buf+len, "target port\n"); |
1397 | break; | 1395 | break; |
1398 | case 0x20: | 1396 | case 0x20: |
1399 | sprintf(buf+len, "SCSI target device\n"); | 1397 | sprintf(buf+len, "SCSI target device\n"); |
1400 | break; | 1398 | break; |
1401 | default: | 1399 | default: |
1402 | sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); | 1400 | sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); |
1403 | ret = -1; | 1401 | ret = -1; |
1404 | break; | 1402 | break; |
1405 | } | 1403 | } |
1406 | 1404 | ||
1407 | if (p_buf) | 1405 | if (p_buf) |
1408 | strncpy(p_buf, buf, p_buf_len); | 1406 | strncpy(p_buf, buf, p_buf_len); |
1409 | else | 1407 | else |
1410 | printk("%s", buf); | 1408 | printk("%s", buf); |
1411 | 1409 | ||
1412 | return ret; | 1410 | return ret; |
1413 | } | 1411 | } |
1414 | 1412 | ||
1415 | int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) | 1413 | int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) |
1416 | { | 1414 | { |
1417 | /* | 1415 | /* |
1418 | * The VPD identification association.. | 1416 | * The VPD identification association.. |
1419 | * | 1417 | * |
1420 | * from spc3r23.pdf Section 7.6.3.1 Table 297 | 1418 | * from spc3r23.pdf Section 7.6.3.1 Table 297 |
1421 | */ | 1419 | */ |
1422 | vpd->association = (page_83[1] & 0x30); | 1420 | vpd->association = (page_83[1] & 0x30); |
1423 | return transport_dump_vpd_assoc(vpd, NULL, 0); | 1421 | return transport_dump_vpd_assoc(vpd, NULL, 0); |
1424 | } | 1422 | } |
1425 | EXPORT_SYMBOL(transport_set_vpd_assoc); | 1423 | EXPORT_SYMBOL(transport_set_vpd_assoc); |
1426 | 1424 | ||
1427 | int transport_dump_vpd_ident_type( | 1425 | int transport_dump_vpd_ident_type( |
1428 | struct t10_vpd *vpd, | 1426 | struct t10_vpd *vpd, |
1429 | unsigned char *p_buf, | 1427 | unsigned char *p_buf, |
1430 | int p_buf_len) | 1428 | int p_buf_len) |
1431 | { | 1429 | { |
1432 | unsigned char buf[VPD_TMP_BUF_SIZE]; | 1430 | unsigned char buf[VPD_TMP_BUF_SIZE]; |
1433 | int ret = 0, len; | 1431 | int ret = 0, len; |
1434 | 1432 | ||
1435 | memset(buf, 0, VPD_TMP_BUF_SIZE); | 1433 | memset(buf, 0, VPD_TMP_BUF_SIZE); |
1436 | len = sprintf(buf, "T10 VPD Identifier Type: "); | 1434 | len = sprintf(buf, "T10 VPD Identifier Type: "); |
1437 | 1435 | ||
1438 | switch (vpd->device_identifier_type) { | 1436 | switch (vpd->device_identifier_type) { |
1439 | case 0x00: | 1437 | case 0x00: |
1440 | sprintf(buf+len, "Vendor specific\n"); | 1438 | sprintf(buf+len, "Vendor specific\n"); |
1441 | break; | 1439 | break; |
1442 | case 0x01: | 1440 | case 0x01: |
1443 | sprintf(buf+len, "T10 Vendor ID based\n"); | 1441 | sprintf(buf+len, "T10 Vendor ID based\n"); |
1444 | break; | 1442 | break; |
1445 | case 0x02: | 1443 | case 0x02: |
1446 | sprintf(buf+len, "EUI-64 based\n"); | 1444 | sprintf(buf+len, "EUI-64 based\n"); |
1447 | break; | 1445 | break; |
1448 | case 0x03: | 1446 | case 0x03: |
1449 | sprintf(buf+len, "NAA\n"); | 1447 | sprintf(buf+len, "NAA\n"); |
1450 | break; | 1448 | break; |
1451 | case 0x04: | 1449 | case 0x04: |
1452 | sprintf(buf+len, "Relative target port identifier\n"); | 1450 | sprintf(buf+len, "Relative target port identifier\n"); |
1453 | break; | 1451 | break; |
1454 | case 0x08: | 1452 | case 0x08: |
1455 | sprintf(buf+len, "SCSI name string\n"); | 1453 | sprintf(buf+len, "SCSI name string\n"); |
1456 | break; | 1454 | break; |
1457 | default: | 1455 | default: |
1458 | sprintf(buf+len, "Unsupported: 0x%02x\n", | 1456 | sprintf(buf+len, "Unsupported: 0x%02x\n", |
1459 | vpd->device_identifier_type); | 1457 | vpd->device_identifier_type); |
1460 | ret = -1; | 1458 | ret = -1; |
1461 | break; | 1459 | break; |
1462 | } | 1460 | } |
1463 | 1461 | ||
1464 | if (p_buf) | 1462 | if (p_buf) |
1465 | strncpy(p_buf, buf, p_buf_len); | 1463 | strncpy(p_buf, buf, p_buf_len); |
1466 | else | 1464 | else |
1467 | printk("%s", buf); | 1465 | printk("%s", buf); |
1468 | 1466 | ||
1469 | return ret; | 1467 | return ret; |
1470 | } | 1468 | } |
1471 | 1469 | ||
1472 | int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) | 1470 | int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) |
1473 | { | 1471 | { |
1474 | /* | 1472 | /* |
1475 | * The VPD identifier type.. | 1473 | * The VPD identifier type.. |
1476 | * | 1474 | * |
1477 | * from spc3r23.pdf Section 7.6.3.1 Table 298 | 1475 | * from spc3r23.pdf Section 7.6.3.1 Table 298 |
1478 | */ | 1476 | */ |
1479 | vpd->device_identifier_type = (page_83[1] & 0x0f); | 1477 | vpd->device_identifier_type = (page_83[1] & 0x0f); |
1480 | return transport_dump_vpd_ident_type(vpd, NULL, 0); | 1478 | return transport_dump_vpd_ident_type(vpd, NULL, 0); |
1481 | } | 1479 | } |
1482 | EXPORT_SYMBOL(transport_set_vpd_ident_type); | 1480 | EXPORT_SYMBOL(transport_set_vpd_ident_type); |
1483 | 1481 | ||
1484 | int transport_dump_vpd_ident( | 1482 | int transport_dump_vpd_ident( |
1485 | struct t10_vpd *vpd, | 1483 | struct t10_vpd *vpd, |
1486 | unsigned char *p_buf, | 1484 | unsigned char *p_buf, |
1487 | int p_buf_len) | 1485 | int p_buf_len) |
1488 | { | 1486 | { |
1489 | unsigned char buf[VPD_TMP_BUF_SIZE]; | 1487 | unsigned char buf[VPD_TMP_BUF_SIZE]; |
1490 | int ret = 0; | 1488 | int ret = 0; |
1491 | 1489 | ||
1492 | memset(buf, 0, VPD_TMP_BUF_SIZE); | 1490 | memset(buf, 0, VPD_TMP_BUF_SIZE); |
1493 | 1491 | ||
1494 | switch (vpd->device_identifier_code_set) { | 1492 | switch (vpd->device_identifier_code_set) { |
1495 | case 0x01: /* Binary */ | 1493 | case 0x01: /* Binary */ |
1496 | sprintf(buf, "T10 VPD Binary Device Identifier: %s\n", | 1494 | sprintf(buf, "T10 VPD Binary Device Identifier: %s\n", |
1497 | &vpd->device_identifier[0]); | 1495 | &vpd->device_identifier[0]); |
1498 | break; | 1496 | break; |
1499 | case 0x02: /* ASCII */ | 1497 | case 0x02: /* ASCII */ |
1500 | sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n", | 1498 | sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n", |
1501 | &vpd->device_identifier[0]); | 1499 | &vpd->device_identifier[0]); |
1502 | break; | 1500 | break; |
1503 | case 0x03: /* UTF-8 */ | 1501 | case 0x03: /* UTF-8 */ |
1504 | sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n", | 1502 | sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n", |
1505 | &vpd->device_identifier[0]); | 1503 | &vpd->device_identifier[0]); |
1506 | break; | 1504 | break; |
1507 | default: | 1505 | default: |
1508 | sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" | 1506 | sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" |
1509 | " 0x%02x", vpd->device_identifier_code_set); | 1507 | " 0x%02x", vpd->device_identifier_code_set); |
1510 | ret = -1; | 1508 | ret = -1; |
1511 | break; | 1509 | break; |
1512 | } | 1510 | } |
1513 | 1511 | ||
1514 | if (p_buf) | 1512 | if (p_buf) |
1515 | strncpy(p_buf, buf, p_buf_len); | 1513 | strncpy(p_buf, buf, p_buf_len); |
1516 | else | 1514 | else |
1517 | printk("%s", buf); | 1515 | printk("%s", buf); |
1518 | 1516 | ||
1519 | return ret; | 1517 | return ret; |
1520 | } | 1518 | } |
1521 | 1519 | ||
1522 | int | 1520 | int |
1523 | transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) | 1521 | transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) |
1524 | { | 1522 | { |
1525 | static const char hex_str[] = "0123456789abcdef"; | 1523 | static const char hex_str[] = "0123456789abcdef"; |
1526 | int j = 0, i = 4; /* offset to start of the identifer */ | 1524 | int j = 0, i = 4; /* offset to start of the identifer */ |
1527 | 1525 | ||
1528 | /* | 1526 | /* |
1529 | * The VPD Code Set (encoding) | 1527 | * The VPD Code Set (encoding) |
1530 | * | 1528 | * |
1531 | * from spc3r23.pdf Section 7.6.3.1 Table 296 | 1529 | * from spc3r23.pdf Section 7.6.3.1 Table 296 |
1532 | */ | 1530 | */ |
1533 | vpd->device_identifier_code_set = (page_83[0] & 0x0f); | 1531 | vpd->device_identifier_code_set = (page_83[0] & 0x0f); |
1534 | switch (vpd->device_identifier_code_set) { | 1532 | switch (vpd->device_identifier_code_set) { |
1535 | case 0x01: /* Binary */ | 1533 | case 0x01: /* Binary */ |
1536 | vpd->device_identifier[j++] = | 1534 | vpd->device_identifier[j++] = |
1537 | hex_str[vpd->device_identifier_type]; | 1535 | hex_str[vpd->device_identifier_type]; |
1538 | while (i < (4 + page_83[3])) { | 1536 | while (i < (4 + page_83[3])) { |
1539 | vpd->device_identifier[j++] = | 1537 | vpd->device_identifier[j++] = |
1540 | hex_str[(page_83[i] & 0xf0) >> 4]; | 1538 | hex_str[(page_83[i] & 0xf0) >> 4]; |
1541 | vpd->device_identifier[j++] = | 1539 | vpd->device_identifier[j++] = |
1542 | hex_str[page_83[i] & 0x0f]; | 1540 | hex_str[page_83[i] & 0x0f]; |
1543 | i++; | 1541 | i++; |
1544 | } | 1542 | } |
1545 | break; | 1543 | break; |
1546 | case 0x02: /* ASCII */ | 1544 | case 0x02: /* ASCII */ |
1547 | case 0x03: /* UTF-8 */ | 1545 | case 0x03: /* UTF-8 */ |
1548 | while (i < (4 + page_83[3])) | 1546 | while (i < (4 + page_83[3])) |
1549 | vpd->device_identifier[j++] = page_83[i++]; | 1547 | vpd->device_identifier[j++] = page_83[i++]; |
1550 | break; | 1548 | break; |
1551 | default: | 1549 | default: |
1552 | break; | 1550 | break; |
1553 | } | 1551 | } |
1554 | 1552 | ||
1555 | return transport_dump_vpd_ident(vpd, NULL, 0); | 1553 | return transport_dump_vpd_ident(vpd, NULL, 0); |
1556 | } | 1554 | } |
1557 | EXPORT_SYMBOL(transport_set_vpd_ident); | 1555 | EXPORT_SYMBOL(transport_set_vpd_ident); |
1558 | 1556 | ||
1559 | static void core_setup_task_attr_emulation(struct se_device *dev) | 1557 | static void core_setup_task_attr_emulation(struct se_device *dev) |
1560 | { | 1558 | { |
1561 | /* | 1559 | /* |
1562 | * If this device is from Target_Core_Mod/pSCSI, disable the | 1560 | * If this device is from Target_Core_Mod/pSCSI, disable the |
1563 | * SAM Task Attribute emulation. | 1561 | * SAM Task Attribute emulation. |
1564 | * | 1562 | * |
1565 | * This is currently not available in upsream Linux/SCSI Target | 1563 | * This is currently not available in upsream Linux/SCSI Target |
1566 | * mode code, and is assumed to be disabled while using TCM/pSCSI. | 1564 | * mode code, and is assumed to be disabled while using TCM/pSCSI. |
1567 | */ | 1565 | */ |
1568 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | 1566 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
1569 | dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; | 1567 | dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; |
1570 | return; | 1568 | return; |
1571 | } | 1569 | } |
1572 | 1570 | ||
1573 | dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; | 1571 | dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; |
1574 | DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" | 1572 | DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" |
1575 | " device\n", TRANSPORT(dev)->name, | 1573 | " device\n", TRANSPORT(dev)->name, |
1576 | TRANSPORT(dev)->get_device_rev(dev)); | 1574 | TRANSPORT(dev)->get_device_rev(dev)); |
1577 | } | 1575 | } |
1578 | 1576 | ||
1579 | static void scsi_dump_inquiry(struct se_device *dev) | 1577 | static void scsi_dump_inquiry(struct se_device *dev) |
1580 | { | 1578 | { |
1581 | struct t10_wwn *wwn = DEV_T10_WWN(dev); | 1579 | struct t10_wwn *wwn = DEV_T10_WWN(dev); |
1582 | int i, device_type; | 1580 | int i, device_type; |
1583 | /* | 1581 | /* |
1584 | * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer | 1582 | * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer |
1585 | */ | 1583 | */ |
1586 | printk(" Vendor: "); | 1584 | printk(" Vendor: "); |
1587 | for (i = 0; i < 8; i++) | 1585 | for (i = 0; i < 8; i++) |
1588 | if (wwn->vendor[i] >= 0x20) | 1586 | if (wwn->vendor[i] >= 0x20) |
1589 | printk("%c", wwn->vendor[i]); | 1587 | printk("%c", wwn->vendor[i]); |
1590 | else | 1588 | else |
1591 | printk(" "); | 1589 | printk(" "); |
1592 | 1590 | ||
1593 | printk(" Model: "); | 1591 | printk(" Model: "); |
1594 | for (i = 0; i < 16; i++) | 1592 | for (i = 0; i < 16; i++) |
1595 | if (wwn->model[i] >= 0x20) | 1593 | if (wwn->model[i] >= 0x20) |
1596 | printk("%c", wwn->model[i]); | 1594 | printk("%c", wwn->model[i]); |
1597 | else | 1595 | else |
1598 | printk(" "); | 1596 | printk(" "); |
1599 | 1597 | ||
1600 | printk(" Revision: "); | 1598 | printk(" Revision: "); |
1601 | for (i = 0; i < 4; i++) | 1599 | for (i = 0; i < 4; i++) |
1602 | if (wwn->revision[i] >= 0x20) | 1600 | if (wwn->revision[i] >= 0x20) |
1603 | printk("%c", wwn->revision[i]); | 1601 | printk("%c", wwn->revision[i]); |
1604 | else | 1602 | else |
1605 | printk(" "); | 1603 | printk(" "); |
1606 | 1604 | ||
1607 | printk("\n"); | 1605 | printk("\n"); |
1608 | 1606 | ||
1609 | device_type = TRANSPORT(dev)->get_device_type(dev); | 1607 | device_type = TRANSPORT(dev)->get_device_type(dev); |
1610 | printk(" Type: %s ", scsi_device_type(device_type)); | 1608 | printk(" Type: %s ", scsi_device_type(device_type)); |
1611 | printk(" ANSI SCSI revision: %02x\n", | 1609 | printk(" ANSI SCSI revision: %02x\n", |
1612 | TRANSPORT(dev)->get_device_rev(dev)); | 1610 | TRANSPORT(dev)->get_device_rev(dev)); |
1613 | } | 1611 | } |
1614 | 1612 | ||
1615 | struct se_device *transport_add_device_to_core_hba( | 1613 | struct se_device *transport_add_device_to_core_hba( |
1616 | struct se_hba *hba, | 1614 | struct se_hba *hba, |
1617 | struct se_subsystem_api *transport, | 1615 | struct se_subsystem_api *transport, |
1618 | struct se_subsystem_dev *se_dev, | 1616 | struct se_subsystem_dev *se_dev, |
1619 | u32 device_flags, | 1617 | u32 device_flags, |
1620 | void *transport_dev, | 1618 | void *transport_dev, |
1621 | struct se_dev_limits *dev_limits, | 1619 | struct se_dev_limits *dev_limits, |
1622 | const char *inquiry_prod, | 1620 | const char *inquiry_prod, |
1623 | const char *inquiry_rev) | 1621 | const char *inquiry_rev) |
1624 | { | 1622 | { |
1625 | int ret = 0, force_pt; | 1623 | int ret = 0, force_pt; |
1626 | struct se_device *dev; | 1624 | struct se_device *dev; |
1627 | 1625 | ||
1628 | dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); | 1626 | dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); |
1629 | if (!(dev)) { | 1627 | if (!(dev)) { |
1630 | printk(KERN_ERR "Unable to allocate memory for se_dev_t\n"); | 1628 | printk(KERN_ERR "Unable to allocate memory for se_dev_t\n"); |
1631 | return NULL; | 1629 | return NULL; |
1632 | } | 1630 | } |
1633 | dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL); | 1631 | dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL); |
1634 | if (!(dev->dev_queue_obj)) { | 1632 | if (!(dev->dev_queue_obj)) { |
1635 | printk(KERN_ERR "Unable to allocate memory for" | 1633 | printk(KERN_ERR "Unable to allocate memory for" |
1636 | " dev->dev_queue_obj\n"); | 1634 | " dev->dev_queue_obj\n"); |
1637 | kfree(dev); | 1635 | kfree(dev); |
1638 | return NULL; | 1636 | return NULL; |
1639 | } | 1637 | } |
1640 | transport_init_queue_obj(dev->dev_queue_obj); | 1638 | transport_init_queue_obj(dev->dev_queue_obj); |
1641 | 1639 | ||
1642 | dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj), | 1640 | dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj), |
1643 | GFP_KERNEL); | 1641 | GFP_KERNEL); |
1644 | if (!(dev->dev_status_queue_obj)) { | 1642 | if (!(dev->dev_status_queue_obj)) { |
1645 | printk(KERN_ERR "Unable to allocate memory for" | 1643 | printk(KERN_ERR "Unable to allocate memory for" |
1646 | " dev->dev_status_queue_obj\n"); | 1644 | " dev->dev_status_queue_obj\n"); |
1647 | kfree(dev->dev_queue_obj); | 1645 | kfree(dev->dev_queue_obj); |
1648 | kfree(dev); | 1646 | kfree(dev); |
1649 | return NULL; | 1647 | return NULL; |
1650 | } | 1648 | } |
1651 | transport_init_queue_obj(dev->dev_status_queue_obj); | 1649 | transport_init_queue_obj(dev->dev_status_queue_obj); |
1652 | 1650 | ||
1653 | dev->dev_flags = device_flags; | 1651 | dev->dev_flags = device_flags; |
1654 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; | 1652 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; |
1655 | dev->dev_ptr = (void *) transport_dev; | 1653 | dev->dev_ptr = (void *) transport_dev; |
1656 | dev->se_hba = hba; | 1654 | dev->se_hba = hba; |
1657 | dev->se_sub_dev = se_dev; | 1655 | dev->se_sub_dev = se_dev; |
1658 | dev->transport = transport; | 1656 | dev->transport = transport; |
1659 | atomic_set(&dev->active_cmds, 0); | 1657 | atomic_set(&dev->active_cmds, 0); |
1660 | INIT_LIST_HEAD(&dev->dev_list); | 1658 | INIT_LIST_HEAD(&dev->dev_list); |
1661 | INIT_LIST_HEAD(&dev->dev_sep_list); | 1659 | INIT_LIST_HEAD(&dev->dev_sep_list); |
1662 | INIT_LIST_HEAD(&dev->dev_tmr_list); | 1660 | INIT_LIST_HEAD(&dev->dev_tmr_list); |
1663 | INIT_LIST_HEAD(&dev->execute_task_list); | 1661 | INIT_LIST_HEAD(&dev->execute_task_list); |
1664 | INIT_LIST_HEAD(&dev->delayed_cmd_list); | 1662 | INIT_LIST_HEAD(&dev->delayed_cmd_list); |
1665 | INIT_LIST_HEAD(&dev->ordered_cmd_list); | 1663 | INIT_LIST_HEAD(&dev->ordered_cmd_list); |
1666 | INIT_LIST_HEAD(&dev->state_task_list); | 1664 | INIT_LIST_HEAD(&dev->state_task_list); |
1667 | spin_lock_init(&dev->execute_task_lock); | 1665 | spin_lock_init(&dev->execute_task_lock); |
1668 | spin_lock_init(&dev->delayed_cmd_lock); | 1666 | spin_lock_init(&dev->delayed_cmd_lock); |
1669 | spin_lock_init(&dev->ordered_cmd_lock); | 1667 | spin_lock_init(&dev->ordered_cmd_lock); |
1670 | spin_lock_init(&dev->state_task_lock); | 1668 | spin_lock_init(&dev->state_task_lock); |
1671 | spin_lock_init(&dev->dev_alua_lock); | 1669 | spin_lock_init(&dev->dev_alua_lock); |
1672 | spin_lock_init(&dev->dev_reservation_lock); | 1670 | spin_lock_init(&dev->dev_reservation_lock); |
1673 | spin_lock_init(&dev->dev_status_lock); | 1671 | spin_lock_init(&dev->dev_status_lock); |
1674 | spin_lock_init(&dev->dev_status_thr_lock); | 1672 | spin_lock_init(&dev->dev_status_thr_lock); |
1675 | spin_lock_init(&dev->se_port_lock); | 1673 | spin_lock_init(&dev->se_port_lock); |
1676 | spin_lock_init(&dev->se_tmr_lock); | 1674 | spin_lock_init(&dev->se_tmr_lock); |
1677 | 1675 | ||
1678 | dev->queue_depth = dev_limits->queue_depth; | 1676 | dev->queue_depth = dev_limits->queue_depth; |
1679 | atomic_set(&dev->depth_left, dev->queue_depth); | 1677 | atomic_set(&dev->depth_left, dev->queue_depth); |
1680 | atomic_set(&dev->dev_ordered_id, 0); | 1678 | atomic_set(&dev->dev_ordered_id, 0); |
1681 | 1679 | ||
1682 | se_dev_set_default_attribs(dev, dev_limits); | 1680 | se_dev_set_default_attribs(dev, dev_limits); |
1683 | 1681 | ||
1684 | dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); | 1682 | dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); |
1685 | dev->creation_time = get_jiffies_64(); | 1683 | dev->creation_time = get_jiffies_64(); |
1686 | spin_lock_init(&dev->stats_lock); | 1684 | spin_lock_init(&dev->stats_lock); |
1687 | 1685 | ||
1688 | spin_lock(&hba->device_lock); | 1686 | spin_lock(&hba->device_lock); |
1689 | list_add_tail(&dev->dev_list, &hba->hba_dev_list); | 1687 | list_add_tail(&dev->dev_list, &hba->hba_dev_list); |
1690 | hba->dev_count++; | 1688 | hba->dev_count++; |
1691 | spin_unlock(&hba->device_lock); | 1689 | spin_unlock(&hba->device_lock); |
1692 | /* | 1690 | /* |
1693 | * Setup the SAM Task Attribute emulation for struct se_device | 1691 | * Setup the SAM Task Attribute emulation for struct se_device |
1694 | */ | 1692 | */ |
1695 | core_setup_task_attr_emulation(dev); | 1693 | core_setup_task_attr_emulation(dev); |
1696 | /* | 1694 | /* |
1697 | * Force PR and ALUA passthrough emulation with internal object use. | 1695 | * Force PR and ALUA passthrough emulation with internal object use. |
1698 | */ | 1696 | */ |
1699 | force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE); | 1697 | force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE); |
1700 | /* | 1698 | /* |
1701 | * Setup the Reservations infrastructure for struct se_device | 1699 | * Setup the Reservations infrastructure for struct se_device |
1702 | */ | 1700 | */ |
1703 | core_setup_reservations(dev, force_pt); | 1701 | core_setup_reservations(dev, force_pt); |
1704 | /* | 1702 | /* |
1705 | * Setup the Asymmetric Logical Unit Assignment for struct se_device | 1703 | * Setup the Asymmetric Logical Unit Assignment for struct se_device |
1706 | */ | 1704 | */ |
1707 | if (core_setup_alua(dev, force_pt) < 0) | 1705 | if (core_setup_alua(dev, force_pt) < 0) |
1708 | goto out; | 1706 | goto out; |
1709 | 1707 | ||
1710 | /* | 1708 | /* |
1711 | * Startup the struct se_device processing thread | 1709 | * Startup the struct se_device processing thread |
1712 | */ | 1710 | */ |
1713 | dev->process_thread = kthread_run(transport_processing_thread, dev, | 1711 | dev->process_thread = kthread_run(transport_processing_thread, dev, |
1714 | "LIO_%s", TRANSPORT(dev)->name); | 1712 | "LIO_%s", TRANSPORT(dev)->name); |
1715 | if (IS_ERR(dev->process_thread)) { | 1713 | if (IS_ERR(dev->process_thread)) { |
1716 | printk(KERN_ERR "Unable to create kthread: LIO_%s\n", | 1714 | printk(KERN_ERR "Unable to create kthread: LIO_%s\n", |
1717 | TRANSPORT(dev)->name); | 1715 | TRANSPORT(dev)->name); |
1718 | goto out; | 1716 | goto out; |
1719 | } | 1717 | } |
1720 | 1718 | ||
1721 | /* | 1719 | /* |
1722 | * Preload the initial INQUIRY const values if we are doing | 1720 | * Preload the initial INQUIRY const values if we are doing |
1723 | * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI | 1721 | * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI |
1724 | * passthrough because this is being provided by the backend LLD. | 1722 | * passthrough because this is being provided by the backend LLD. |
1725 | * This is required so that transport_get_inquiry() copies these | 1723 | * This is required so that transport_get_inquiry() copies these |
1726 | * originals once back into DEV_T10_WWN(dev) for the virtual device | 1724 | * originals once back into DEV_T10_WWN(dev) for the virtual device |
1727 | * setup. | 1725 | * setup. |
1728 | */ | 1726 | */ |
1729 | if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { | 1727 | if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { |
1730 | if (!(inquiry_prod) || !(inquiry_prod)) { | 1728 | if (!(inquiry_prod) || !(inquiry_prod)) { |
1731 | printk(KERN_ERR "All non TCM/pSCSI plugins require" | 1729 | printk(KERN_ERR "All non TCM/pSCSI plugins require" |
1732 | " INQUIRY consts\n"); | 1730 | " INQUIRY consts\n"); |
1733 | goto out; | 1731 | goto out; |
1734 | } | 1732 | } |
1735 | 1733 | ||
1736 | strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8); | 1734 | strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8); |
1737 | strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16); | 1735 | strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16); |
1738 | strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4); | 1736 | strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4); |
1739 | } | 1737 | } |
1740 | scsi_dump_inquiry(dev); | 1738 | scsi_dump_inquiry(dev); |
1741 | 1739 | ||
1742 | out: | 1740 | out: |
1743 | if (!ret) | 1741 | if (!ret) |
1744 | return dev; | 1742 | return dev; |
1745 | kthread_stop(dev->process_thread); | 1743 | kthread_stop(dev->process_thread); |
1746 | 1744 | ||
1747 | spin_lock(&hba->device_lock); | 1745 | spin_lock(&hba->device_lock); |
1748 | list_del(&dev->dev_list); | 1746 | list_del(&dev->dev_list); |
1749 | hba->dev_count--; | 1747 | hba->dev_count--; |
1750 | spin_unlock(&hba->device_lock); | 1748 | spin_unlock(&hba->device_lock); |
1751 | 1749 | ||
1752 | se_release_vpd_for_dev(dev); | 1750 | se_release_vpd_for_dev(dev); |
1753 | 1751 | ||
1754 | kfree(dev->dev_status_queue_obj); | 1752 | kfree(dev->dev_status_queue_obj); |
1755 | kfree(dev->dev_queue_obj); | 1753 | kfree(dev->dev_queue_obj); |
1756 | kfree(dev); | 1754 | kfree(dev); |
1757 | 1755 | ||
1758 | return NULL; | 1756 | return NULL; |
1759 | } | 1757 | } |
1760 | EXPORT_SYMBOL(transport_add_device_to_core_hba); | 1758 | EXPORT_SYMBOL(transport_add_device_to_core_hba); |
1761 | 1759 | ||
1762 | /* transport_generic_prepare_cdb(): | 1760 | /* transport_generic_prepare_cdb(): |
1763 | * | 1761 | * |
1764 | * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will | 1762 | * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will |
1765 | * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2. | 1763 | * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2. |
1766 | * The point of this is since we are mapping iSCSI LUNs to | 1764 | * The point of this is since we are mapping iSCSI LUNs to |
1767 | * SCSI Target IDs having a non-zero LUN in the CDB will throw the | 1765 | * SCSI Target IDs having a non-zero LUN in the CDB will throw the |
1768 | * devices and HBAs for a loop. | 1766 | * devices and HBAs for a loop. |
1769 | */ | 1767 | */ |
1770 | static inline void transport_generic_prepare_cdb( | 1768 | static inline void transport_generic_prepare_cdb( |
1771 | unsigned char *cdb) | 1769 | unsigned char *cdb) |
1772 | { | 1770 | { |
1773 | switch (cdb[0]) { | 1771 | switch (cdb[0]) { |
1774 | case READ_10: /* SBC - RDProtect */ | 1772 | case READ_10: /* SBC - RDProtect */ |
1775 | case READ_12: /* SBC - RDProtect */ | 1773 | case READ_12: /* SBC - RDProtect */ |
1776 | case READ_16: /* SBC - RDProtect */ | 1774 | case READ_16: /* SBC - RDProtect */ |
1777 | case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ | 1775 | case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ |
1778 | case VERIFY: /* SBC - VRProtect */ | 1776 | case VERIFY: /* SBC - VRProtect */ |
1779 | case VERIFY_16: /* SBC - VRProtect */ | 1777 | case VERIFY_16: /* SBC - VRProtect */ |
1780 | case WRITE_VERIFY: /* SBC - VRProtect */ | 1778 | case WRITE_VERIFY: /* SBC - VRProtect */ |
1781 | case WRITE_VERIFY_12: /* SBC - VRProtect */ | 1779 | case WRITE_VERIFY_12: /* SBC - VRProtect */ |
1782 | break; | 1780 | break; |
1783 | default: | 1781 | default: |
1784 | cdb[1] &= 0x1f; /* clear logical unit number */ | 1782 | cdb[1] &= 0x1f; /* clear logical unit number */ |
1785 | break; | 1783 | break; |
1786 | } | 1784 | } |
1787 | } | 1785 | } |
1788 | 1786 | ||
1789 | static struct se_task * | 1787 | static struct se_task * |
1790 | transport_generic_get_task(struct se_cmd *cmd, | 1788 | transport_generic_get_task(struct se_cmd *cmd, |
1791 | enum dma_data_direction data_direction) | 1789 | enum dma_data_direction data_direction) |
1792 | { | 1790 | { |
1793 | struct se_task *task; | 1791 | struct se_task *task; |
1794 | struct se_device *dev = SE_DEV(cmd); | 1792 | struct se_device *dev = SE_DEV(cmd); |
1795 | unsigned long flags; | 1793 | unsigned long flags; |
1796 | 1794 | ||
1797 | task = dev->transport->alloc_task(cmd); | 1795 | task = dev->transport->alloc_task(cmd); |
1798 | if (!task) { | 1796 | if (!task) { |
1799 | printk(KERN_ERR "Unable to allocate struct se_task\n"); | 1797 | printk(KERN_ERR "Unable to allocate struct se_task\n"); |
1800 | return NULL; | 1798 | return NULL; |
1801 | } | 1799 | } |
1802 | 1800 | ||
1803 | INIT_LIST_HEAD(&task->t_list); | 1801 | INIT_LIST_HEAD(&task->t_list); |
1804 | INIT_LIST_HEAD(&task->t_execute_list); | 1802 | INIT_LIST_HEAD(&task->t_execute_list); |
1805 | INIT_LIST_HEAD(&task->t_state_list); | 1803 | INIT_LIST_HEAD(&task->t_state_list); |
1806 | init_completion(&task->task_stop_comp); | 1804 | init_completion(&task->task_stop_comp); |
1807 | task->task_no = T_TASK(cmd)->t_tasks_no++; | 1805 | task->task_no = T_TASK(cmd)->t_tasks_no++; |
1808 | task->task_se_cmd = cmd; | 1806 | task->task_se_cmd = cmd; |
1809 | task->se_dev = dev; | 1807 | task->se_dev = dev; |
1810 | task->task_data_direction = data_direction; | 1808 | task->task_data_direction = data_direction; |
1811 | 1809 | ||
1812 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 1810 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
1813 | list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list); | 1811 | list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list); |
1814 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 1812 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
1815 | 1813 | ||
1816 | return task; | 1814 | return task; |
1817 | } | 1815 | } |
1818 | 1816 | ||
1819 | static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); | 1817 | static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); |
1820 | 1818 | ||
1821 | void transport_device_setup_cmd(struct se_cmd *cmd) | 1819 | void transport_device_setup_cmd(struct se_cmd *cmd) |
1822 | { | 1820 | { |
1823 | cmd->se_dev = SE_LUN(cmd)->lun_se_dev; | 1821 | cmd->se_dev = SE_LUN(cmd)->lun_se_dev; |
1824 | } | 1822 | } |
1825 | EXPORT_SYMBOL(transport_device_setup_cmd); | 1823 | EXPORT_SYMBOL(transport_device_setup_cmd); |
1826 | 1824 | ||
1827 | /* | 1825 | /* |
1828 | * Used by fabric modules containing a local struct se_cmd within their | 1826 | * Used by fabric modules containing a local struct se_cmd within their |
1829 | * fabric dependent per I/O descriptor. | 1827 | * fabric dependent per I/O descriptor. |
1830 | */ | 1828 | */ |
1831 | void transport_init_se_cmd( | 1829 | void transport_init_se_cmd( |
1832 | struct se_cmd *cmd, | 1830 | struct se_cmd *cmd, |
1833 | struct target_core_fabric_ops *tfo, | 1831 | struct target_core_fabric_ops *tfo, |
1834 | struct se_session *se_sess, | 1832 | struct se_session *se_sess, |
1835 | u32 data_length, | 1833 | u32 data_length, |
1836 | int data_direction, | 1834 | int data_direction, |
1837 | int task_attr, | 1835 | int task_attr, |
1838 | unsigned char *sense_buffer) | 1836 | unsigned char *sense_buffer) |
1839 | { | 1837 | { |
1840 | INIT_LIST_HEAD(&cmd->se_lun_list); | 1838 | INIT_LIST_HEAD(&cmd->se_lun_list); |
1841 | INIT_LIST_HEAD(&cmd->se_delayed_list); | 1839 | INIT_LIST_HEAD(&cmd->se_delayed_list); |
1842 | INIT_LIST_HEAD(&cmd->se_ordered_list); | 1840 | INIT_LIST_HEAD(&cmd->se_ordered_list); |
1843 | /* | 1841 | /* |
1844 | * Setup t_task pointer to t_task_backstore | 1842 | * Setup t_task pointer to t_task_backstore |
1845 | */ | 1843 | */ |
1846 | cmd->t_task = &cmd->t_task_backstore; | 1844 | cmd->t_task = &cmd->t_task_backstore; |
1847 | 1845 | ||
1848 | INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list); | 1846 | INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list); |
1849 | init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp); | 1847 | init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp); |
1850 | init_completion(&T_TASK(cmd)->transport_lun_stop_comp); | 1848 | init_completion(&T_TASK(cmd)->transport_lun_stop_comp); |
1851 | init_completion(&T_TASK(cmd)->t_transport_stop_comp); | 1849 | init_completion(&T_TASK(cmd)->t_transport_stop_comp); |
1852 | spin_lock_init(&T_TASK(cmd)->t_state_lock); | 1850 | spin_lock_init(&T_TASK(cmd)->t_state_lock); |
1853 | atomic_set(&T_TASK(cmd)->transport_dev_active, 1); | 1851 | atomic_set(&T_TASK(cmd)->transport_dev_active, 1); |
1854 | 1852 | ||
1855 | cmd->se_tfo = tfo; | 1853 | cmd->se_tfo = tfo; |
1856 | cmd->se_sess = se_sess; | 1854 | cmd->se_sess = se_sess; |
1857 | cmd->data_length = data_length; | 1855 | cmd->data_length = data_length; |
1858 | cmd->data_direction = data_direction; | 1856 | cmd->data_direction = data_direction; |
1859 | cmd->sam_task_attr = task_attr; | 1857 | cmd->sam_task_attr = task_attr; |
1860 | cmd->sense_buffer = sense_buffer; | 1858 | cmd->sense_buffer = sense_buffer; |
1861 | } | 1859 | } |
1862 | EXPORT_SYMBOL(transport_init_se_cmd); | 1860 | EXPORT_SYMBOL(transport_init_se_cmd); |
1863 | 1861 | ||
1864 | static int transport_check_alloc_task_attr(struct se_cmd *cmd) | 1862 | static int transport_check_alloc_task_attr(struct se_cmd *cmd) |
1865 | { | 1863 | { |
1866 | /* | 1864 | /* |
1867 | * Check if SAM Task Attribute emulation is enabled for this | 1865 | * Check if SAM Task Attribute emulation is enabled for this |
1868 | * struct se_device storage object | 1866 | * struct se_device storage object |
1869 | */ | 1867 | */ |
1870 | if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) | 1868 | if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) |
1871 | return 0; | 1869 | return 0; |
1872 | 1870 | ||
1873 | if (cmd->sam_task_attr == TASK_ATTR_ACA) { | 1871 | if (cmd->sam_task_attr == TASK_ATTR_ACA) { |
1874 | DEBUG_STA("SAM Task Attribute ACA" | 1872 | DEBUG_STA("SAM Task Attribute ACA" |
1875 | " emulation is not supported\n"); | 1873 | " emulation is not supported\n"); |
1876 | return -1; | 1874 | return -1; |
1877 | } | 1875 | } |
1878 | /* | 1876 | /* |
1879 | * Used to determine when ORDERED commands should go from | 1877 | * Used to determine when ORDERED commands should go from |
1880 | * Dormant to Active status. | 1878 | * Dormant to Active status. |
1881 | */ | 1879 | */ |
1882 | cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id); | 1880 | cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id); |
1883 | smp_mb__after_atomic_inc(); | 1881 | smp_mb__after_atomic_inc(); |
1884 | DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", | 1882 | DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", |
1885 | cmd->se_ordered_id, cmd->sam_task_attr, | 1883 | cmd->se_ordered_id, cmd->sam_task_attr, |
1886 | TRANSPORT(cmd->se_dev)->name); | 1884 | TRANSPORT(cmd->se_dev)->name); |
1887 | return 0; | 1885 | return 0; |
1888 | } | 1886 | } |
1889 | 1887 | ||
1890 | void transport_free_se_cmd( | 1888 | void transport_free_se_cmd( |
1891 | struct se_cmd *se_cmd) | 1889 | struct se_cmd *se_cmd) |
1892 | { | 1890 | { |
1893 | if (se_cmd->se_tmr_req) | 1891 | if (se_cmd->se_tmr_req) |
1894 | core_tmr_release_req(se_cmd->se_tmr_req); | 1892 | core_tmr_release_req(se_cmd->se_tmr_req); |
1895 | /* | 1893 | /* |
1896 | * Check and free any extended CDB buffer that was allocated | 1894 | * Check and free any extended CDB buffer that was allocated |
1897 | */ | 1895 | */ |
1898 | if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb) | 1896 | if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb) |
1899 | kfree(T_TASK(se_cmd)->t_task_cdb); | 1897 | kfree(T_TASK(se_cmd)->t_task_cdb); |
1900 | } | 1898 | } |
1901 | EXPORT_SYMBOL(transport_free_se_cmd); | 1899 | EXPORT_SYMBOL(transport_free_se_cmd); |
1902 | 1900 | ||
1903 | static void transport_generic_wait_for_tasks(struct se_cmd *, int, int); | 1901 | static void transport_generic_wait_for_tasks(struct se_cmd *, int, int); |
1904 | 1902 | ||
1905 | /* transport_generic_allocate_tasks(): | 1903 | /* transport_generic_allocate_tasks(): |
1906 | * | 1904 | * |
1907 | * Called from fabric RX Thread. | 1905 | * Called from fabric RX Thread. |
1908 | */ | 1906 | */ |
1909 | int transport_generic_allocate_tasks( | 1907 | int transport_generic_allocate_tasks( |
1910 | struct se_cmd *cmd, | 1908 | struct se_cmd *cmd, |
1911 | unsigned char *cdb) | 1909 | unsigned char *cdb) |
1912 | { | 1910 | { |
1913 | int ret; | 1911 | int ret; |
1914 | 1912 | ||
1915 | transport_generic_prepare_cdb(cdb); | 1913 | transport_generic_prepare_cdb(cdb); |
1916 | 1914 | ||
1917 | /* | 1915 | /* |
1918 | * This is needed for early exceptions. | 1916 | * This is needed for early exceptions. |
1919 | */ | 1917 | */ |
1920 | cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; | 1918 | cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; |
1921 | 1919 | ||
1922 | transport_device_setup_cmd(cmd); | 1920 | transport_device_setup_cmd(cmd); |
1923 | /* | 1921 | /* |
1924 | * Ensure that the received CDB is less than the max (252 + 8) bytes | 1922 | * Ensure that the received CDB is less than the max (252 + 8) bytes |
1925 | * for VARIABLE_LENGTH_CMD | 1923 | * for VARIABLE_LENGTH_CMD |
1926 | */ | 1924 | */ |
1927 | if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { | 1925 | if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { |
1928 | printk(KERN_ERR "Received SCSI CDB with command_size: %d that" | 1926 | printk(KERN_ERR "Received SCSI CDB with command_size: %d that" |
1929 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", | 1927 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", |
1930 | scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); | 1928 | scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); |
1931 | return -1; | 1929 | return -1; |
1932 | } | 1930 | } |
1933 | /* | 1931 | /* |
1934 | * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, | 1932 | * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, |
1935 | * allocate the additional extended CDB buffer now.. Otherwise | 1933 | * allocate the additional extended CDB buffer now.. Otherwise |
1936 | * setup the pointer from __t_task_cdb to t_task_cdb. | 1934 | * setup the pointer from __t_task_cdb to t_task_cdb. |
1937 | */ | 1935 | */ |
1938 | if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) { | 1936 | if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) { |
1939 | T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb), | 1937 | T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb), |
1940 | GFP_KERNEL); | 1938 | GFP_KERNEL); |
1941 | if (!(T_TASK(cmd)->t_task_cdb)) { | 1939 | if (!(T_TASK(cmd)->t_task_cdb)) { |
1942 | printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb" | 1940 | printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb" |
1943 | " %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n", | 1941 | " %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n", |
1944 | scsi_command_size(cdb), | 1942 | scsi_command_size(cdb), |
1945 | (unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb)); | 1943 | (unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb)); |
1946 | return -1; | 1944 | return -1; |
1947 | } | 1945 | } |
1948 | } else | 1946 | } else |
1949 | T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0]; | 1947 | T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0]; |
1950 | /* | 1948 | /* |
1951 | * Copy the original CDB into T_TASK(cmd). | 1949 | * Copy the original CDB into T_TASK(cmd). |
1952 | */ | 1950 | */ |
1953 | memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb)); | 1951 | memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb)); |
1954 | /* | 1952 | /* |
1955 | * Setup the received CDB based on SCSI defined opcodes and | 1953 | * Setup the received CDB based on SCSI defined opcodes and |
1956 | * perform unit attention, persistent reservations and ALUA | 1954 | * perform unit attention, persistent reservations and ALUA |
1957 | * checks for virtual device backends. The T_TASK(cmd)->t_task_cdb | 1955 | * checks for virtual device backends. The T_TASK(cmd)->t_task_cdb |
1958 | * pointer is expected to be setup before we reach this point. | 1956 | * pointer is expected to be setup before we reach this point. |
1959 | */ | 1957 | */ |
1960 | ret = transport_generic_cmd_sequencer(cmd, cdb); | 1958 | ret = transport_generic_cmd_sequencer(cmd, cdb); |
1961 | if (ret < 0) | 1959 | if (ret < 0) |
1962 | return ret; | 1960 | return ret; |
1963 | /* | 1961 | /* |
1964 | * Check for SAM Task Attribute Emulation | 1962 | * Check for SAM Task Attribute Emulation |
1965 | */ | 1963 | */ |
1966 | if (transport_check_alloc_task_attr(cmd) < 0) { | 1964 | if (transport_check_alloc_task_attr(cmd) < 0) { |
1967 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 1965 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
1968 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | 1966 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; |
1969 | return -2; | 1967 | return -2; |
1970 | } | 1968 | } |
1971 | spin_lock(&cmd->se_lun->lun_sep_lock); | 1969 | spin_lock(&cmd->se_lun->lun_sep_lock); |
1972 | if (cmd->se_lun->lun_sep) | 1970 | if (cmd->se_lun->lun_sep) |
1973 | cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; | 1971 | cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; |
1974 | spin_unlock(&cmd->se_lun->lun_sep_lock); | 1972 | spin_unlock(&cmd->se_lun->lun_sep_lock); |
1975 | return 0; | 1973 | return 0; |
1976 | } | 1974 | } |
1977 | EXPORT_SYMBOL(transport_generic_allocate_tasks); | 1975 | EXPORT_SYMBOL(transport_generic_allocate_tasks); |
1978 | 1976 | ||
1979 | /* | 1977 | /* |
1980 | * Used by fabric module frontends not defining a TFO->new_cmd_map() | 1978 | * Used by fabric module frontends not defining a TFO->new_cmd_map() |
1981 | * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis | 1979 | * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis |
1982 | */ | 1980 | */ |
1983 | int transport_generic_handle_cdb( | 1981 | int transport_generic_handle_cdb( |
1984 | struct se_cmd *cmd) | 1982 | struct se_cmd *cmd) |
1985 | { | 1983 | { |
1986 | if (!SE_LUN(cmd)) { | 1984 | if (!SE_LUN(cmd)) { |
1987 | dump_stack(); | 1985 | dump_stack(); |
1988 | printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); | 1986 | printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); |
1989 | return -1; | 1987 | return -1; |
1990 | } | 1988 | } |
1991 | 1989 | ||
1992 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD); | 1990 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD); |
1993 | return 0; | 1991 | return 0; |
1994 | } | 1992 | } |
1995 | EXPORT_SYMBOL(transport_generic_handle_cdb); | 1993 | EXPORT_SYMBOL(transport_generic_handle_cdb); |
1996 | 1994 | ||
1997 | /* | 1995 | /* |
1998 | * Used by fabric module frontends defining a TFO->new_cmd_map() caller | 1996 | * Used by fabric module frontends defining a TFO->new_cmd_map() caller |
1999 | * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to | 1997 | * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to |
2000 | * complete setup in TCM process context w/ TFO->new_cmd_map(). | 1998 | * complete setup in TCM process context w/ TFO->new_cmd_map(). |
2001 | */ | 1999 | */ |
2002 | int transport_generic_handle_cdb_map( | 2000 | int transport_generic_handle_cdb_map( |
2003 | struct se_cmd *cmd) | 2001 | struct se_cmd *cmd) |
2004 | { | 2002 | { |
2005 | if (!SE_LUN(cmd)) { | 2003 | if (!SE_LUN(cmd)) { |
2006 | dump_stack(); | 2004 | dump_stack(); |
2007 | printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); | 2005 | printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); |
2008 | return -1; | 2006 | return -1; |
2009 | } | 2007 | } |
2010 | 2008 | ||
2011 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP); | 2009 | transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP); |
2012 | return 0; | 2010 | return 0; |
2013 | } | 2011 | } |
2014 | EXPORT_SYMBOL(transport_generic_handle_cdb_map); | 2012 | EXPORT_SYMBOL(transport_generic_handle_cdb_map); |
2015 | 2013 | ||
2016 | /* transport_generic_handle_data(): | 2014 | /* transport_generic_handle_data(): |
2017 | * | 2015 | * |
2018 | * | 2016 | * |
2019 | */ | 2017 | */ |
2020 | int transport_generic_handle_data( | 2018 | int transport_generic_handle_data( |
2021 | struct se_cmd *cmd) | 2019 | struct se_cmd *cmd) |
2022 | { | 2020 | { |
2023 | /* | 2021 | /* |
2024 | * For the software fabric case, then we assume the nexus is being | 2022 | * For the software fabric case, then we assume the nexus is being |
2025 | * failed/shutdown when signals are pending from the kthread context | 2023 | * failed/shutdown when signals are pending from the kthread context |
2026 | * caller, so we return a failure. For the HW target mode case running | 2024 | * caller, so we return a failure. For the HW target mode case running |
2027 | * in interrupt code, the signal_pending() check is skipped. | 2025 | * in interrupt code, the signal_pending() check is skipped. |
2028 | */ | 2026 | */ |
2029 | if (!in_interrupt() && signal_pending(current)) | 2027 | if (!in_interrupt() && signal_pending(current)) |
2030 | return -1; | 2028 | return -1; |
2031 | /* | 2029 | /* |
2032 | * If the received CDB has aleady been ABORTED by the generic | 2030 | * If the received CDB has aleady been ABORTED by the generic |
2033 | * target engine, we now call transport_check_aborted_status() | 2031 | * target engine, we now call transport_check_aborted_status() |
2034 | * to queue any delated TASK_ABORTED status for the received CDB to the | 2032 | * to queue any delated TASK_ABORTED status for the received CDB to the |
2035 | * fabric module as we are expecting no futher incoming DATA OUT | 2033 | * fabric module as we are expecting no futher incoming DATA OUT |
2036 | * sequences at this point. | 2034 | * sequences at this point. |
2037 | */ | 2035 | */ |
2038 | if (transport_check_aborted_status(cmd, 1) != 0) | 2036 | if (transport_check_aborted_status(cmd, 1) != 0) |
2039 | return 0; | 2037 | return 0; |
2040 | 2038 | ||
2041 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE); | 2039 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE); |
2042 | return 0; | 2040 | return 0; |
2043 | } | 2041 | } |
2044 | EXPORT_SYMBOL(transport_generic_handle_data); | 2042 | EXPORT_SYMBOL(transport_generic_handle_data); |
2045 | 2043 | ||
2046 | /* transport_generic_handle_tmr(): | 2044 | /* transport_generic_handle_tmr(): |
2047 | * | 2045 | * |
2048 | * | 2046 | * |
2049 | */ | 2047 | */ |
2050 | int transport_generic_handle_tmr( | 2048 | int transport_generic_handle_tmr( |
2051 | struct se_cmd *cmd) | 2049 | struct se_cmd *cmd) |
2052 | { | 2050 | { |
2053 | /* | 2051 | /* |
2054 | * This is needed for early exceptions. | 2052 | * This is needed for early exceptions. |
2055 | */ | 2053 | */ |
2056 | cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; | 2054 | cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; |
2057 | transport_device_setup_cmd(cmd); | 2055 | transport_device_setup_cmd(cmd); |
2058 | 2056 | ||
2059 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR); | 2057 | transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR); |
2060 | return 0; | 2058 | return 0; |
2061 | } | 2059 | } |
2062 | EXPORT_SYMBOL(transport_generic_handle_tmr); | 2060 | EXPORT_SYMBOL(transport_generic_handle_tmr); |
2063 | 2061 | ||
2064 | static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) | 2062 | static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) |
2065 | { | 2063 | { |
2066 | struct se_task *task, *task_tmp; | 2064 | struct se_task *task, *task_tmp; |
2067 | unsigned long flags; | 2065 | unsigned long flags; |
2068 | int ret = 0; | 2066 | int ret = 0; |
2069 | 2067 | ||
2070 | DEBUG_TS("ITT[0x%08x] - Stopping tasks\n", | 2068 | DEBUG_TS("ITT[0x%08x] - Stopping tasks\n", |
2071 | CMD_TFO(cmd)->get_task_tag(cmd)); | 2069 | CMD_TFO(cmd)->get_task_tag(cmd)); |
2072 | 2070 | ||
2073 | /* | 2071 | /* |
2074 | * No tasks remain in the execution queue | 2072 | * No tasks remain in the execution queue |
2075 | */ | 2073 | */ |
2076 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 2074 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
2077 | list_for_each_entry_safe(task, task_tmp, | 2075 | list_for_each_entry_safe(task, task_tmp, |
2078 | &T_TASK(cmd)->t_task_list, t_list) { | 2076 | &T_TASK(cmd)->t_task_list, t_list) { |
2079 | DEBUG_TS("task_no[%d] - Processing task %p\n", | 2077 | DEBUG_TS("task_no[%d] - Processing task %p\n", |
2080 | task->task_no, task); | 2078 | task->task_no, task); |
2081 | /* | 2079 | /* |
2082 | * If the struct se_task has not been sent and is not active, | 2080 | * If the struct se_task has not been sent and is not active, |
2083 | * remove the struct se_task from the execution queue. | 2081 | * remove the struct se_task from the execution queue. |
2084 | */ | 2082 | */ |
2085 | if (!atomic_read(&task->task_sent) && | 2083 | if (!atomic_read(&task->task_sent) && |
2086 | !atomic_read(&task->task_active)) { | 2084 | !atomic_read(&task->task_active)) { |
2087 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 2085 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, |
2088 | flags); | 2086 | flags); |
2089 | transport_remove_task_from_execute_queue(task, | 2087 | transport_remove_task_from_execute_queue(task, |
2090 | task->se_dev); | 2088 | task->se_dev); |
2091 | 2089 | ||
2092 | DEBUG_TS("task_no[%d] - Removed from execute queue\n", | 2090 | DEBUG_TS("task_no[%d] - Removed from execute queue\n", |
2093 | task->task_no); | 2091 | task->task_no); |
2094 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 2092 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
2095 | continue; | 2093 | continue; |
2096 | } | 2094 | } |
2097 | 2095 | ||
2098 | /* | 2096 | /* |
2099 | * If the struct se_task is active, sleep until it is returned | 2097 | * If the struct se_task is active, sleep until it is returned |
2100 | * from the plugin. | 2098 | * from the plugin. |
2101 | */ | 2099 | */ |
2102 | if (atomic_read(&task->task_active)) { | 2100 | if (atomic_read(&task->task_active)) { |
2103 | atomic_set(&task->task_stop, 1); | 2101 | atomic_set(&task->task_stop, 1); |
2104 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 2102 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, |
2105 | flags); | 2103 | flags); |
2106 | 2104 | ||
2107 | DEBUG_TS("task_no[%d] - Waiting to complete\n", | 2105 | DEBUG_TS("task_no[%d] - Waiting to complete\n", |
2108 | task->task_no); | 2106 | task->task_no); |
2109 | wait_for_completion(&task->task_stop_comp); | 2107 | wait_for_completion(&task->task_stop_comp); |
2110 | DEBUG_TS("task_no[%d] - Stopped successfully\n", | 2108 | DEBUG_TS("task_no[%d] - Stopped successfully\n", |
2111 | task->task_no); | 2109 | task->task_no); |
2112 | 2110 | ||
2113 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 2111 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
2114 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); | 2112 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); |
2115 | 2113 | ||
2116 | atomic_set(&task->task_active, 0); | 2114 | atomic_set(&task->task_active, 0); |
2117 | atomic_set(&task->task_stop, 0); | 2115 | atomic_set(&task->task_stop, 0); |
2118 | } else { | 2116 | } else { |
2119 | DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no); | 2117 | DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no); |
2120 | ret++; | 2118 | ret++; |
2121 | } | 2119 | } |
2122 | 2120 | ||
2123 | __transport_stop_task_timer(task, &flags); | 2121 | __transport_stop_task_timer(task, &flags); |
2124 | } | 2122 | } |
2125 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2123 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
2126 | 2124 | ||
2127 | return ret; | 2125 | return ret; |
2128 | } | 2126 | } |
2129 | 2127 | ||
2130 | static void transport_failure_reset_queue_depth(struct se_device *dev) | 2128 | static void transport_failure_reset_queue_depth(struct se_device *dev) |
2131 | { | 2129 | { |
2132 | unsigned long flags; | 2130 | unsigned long flags; |
2133 | 2131 | ||
2134 | spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);; | 2132 | spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);; |
2135 | atomic_inc(&dev->depth_left); | 2133 | atomic_inc(&dev->depth_left); |
2136 | atomic_inc(&SE_HBA(dev)->left_queue_depth); | 2134 | atomic_inc(&SE_HBA(dev)->left_queue_depth); |
2137 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | 2135 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); |
2138 | } | 2136 | } |
2139 | 2137 | ||
2140 | /* | 2138 | /* |
2141 | * Handle SAM-esque emulation for generic transport request failures. | 2139 | * Handle SAM-esque emulation for generic transport request failures. |
2142 | */ | 2140 | */ |
2143 | static void transport_generic_request_failure( | 2141 | static void transport_generic_request_failure( |
2144 | struct se_cmd *cmd, | 2142 | struct se_cmd *cmd, |
2145 | struct se_device *dev, | 2143 | struct se_device *dev, |
2146 | int complete, | 2144 | int complete, |
2147 | int sc) | 2145 | int sc) |
2148 | { | 2146 | { |
2149 | DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" | 2147 | DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" |
2150 | " CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd), | 2148 | " CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd), |
2151 | T_TASK(cmd)->t_task_cdb[0]); | 2149 | T_TASK(cmd)->t_task_cdb[0]); |
2152 | DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:" | 2150 | DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:" |
2153 | " %d/%d transport_error_status: %d\n", | 2151 | " %d/%d transport_error_status: %d\n", |
2154 | CMD_TFO(cmd)->get_cmd_state(cmd), | 2152 | CMD_TFO(cmd)->get_cmd_state(cmd), |
2155 | cmd->t_state, cmd->deferred_t_state, | 2153 | cmd->t_state, cmd->deferred_t_state, |
2156 | cmd->transport_error_status); | 2154 | cmd->transport_error_status); |
2157 | DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d" | 2155 | DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d" |
2158 | " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" | 2156 | " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" |
2159 | " t_transport_active: %d t_transport_stop: %d" | 2157 | " t_transport_active: %d t_transport_stop: %d" |
2160 | " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs, | 2158 | " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs, |
2161 | atomic_read(&T_TASK(cmd)->t_task_cdbs_left), | 2159 | atomic_read(&T_TASK(cmd)->t_task_cdbs_left), |
2162 | atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), | 2160 | atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), |
2163 | atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left), | 2161 | atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left), |
2164 | atomic_read(&T_TASK(cmd)->t_transport_active), | 2162 | atomic_read(&T_TASK(cmd)->t_transport_active), |
2165 | atomic_read(&T_TASK(cmd)->t_transport_stop), | 2163 | atomic_read(&T_TASK(cmd)->t_transport_stop), |
2166 | atomic_read(&T_TASK(cmd)->t_transport_sent)); | 2164 | atomic_read(&T_TASK(cmd)->t_transport_sent)); |
2167 | 2165 | ||
2168 | transport_stop_all_task_timers(cmd); | 2166 | transport_stop_all_task_timers(cmd); |
2169 | 2167 | ||
2170 | if (dev) | 2168 | if (dev) |
2171 | transport_failure_reset_queue_depth(dev); | 2169 | transport_failure_reset_queue_depth(dev); |
2172 | /* | 2170 | /* |
2173 | * For SAM Task Attribute emulation for failed struct se_cmd | 2171 | * For SAM Task Attribute emulation for failed struct se_cmd |
2174 | */ | 2172 | */ |
2175 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | 2173 | if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
2176 | transport_complete_task_attr(cmd); | 2174 | transport_complete_task_attr(cmd); |
2177 | 2175 | ||
2178 | if (complete) { | 2176 | if (complete) { |
2179 | transport_direct_request_timeout(cmd); | 2177 | transport_direct_request_timeout(cmd); |
2180 | cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; | 2178 | cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; |
2181 | } | 2179 | } |
2182 | 2180 | ||
2183 | switch (cmd->transport_error_status) { | 2181 | switch (cmd->transport_error_status) { |
2184 | case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE: | 2182 | case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE: |
2185 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | 2183 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; |
2186 | break; | 2184 | break; |
2187 | case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS: | 2185 | case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS: |
2188 | cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; | 2186 | cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; |
2189 | break; | 2187 | break; |
2190 | case PYX_TRANSPORT_INVALID_CDB_FIELD: | 2188 | case PYX_TRANSPORT_INVALID_CDB_FIELD: |
2191 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | 2189 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; |
2192 | break; | 2190 | break; |
2193 | case PYX_TRANSPORT_INVALID_PARAMETER_LIST: | 2191 | case PYX_TRANSPORT_INVALID_PARAMETER_LIST: |
2194 | cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; | 2192 | cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; |
2195 | break; | 2193 | break; |
2196 | case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES: | 2194 | case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES: |
2197 | if (!sc) | 2195 | if (!sc) |
2198 | transport_new_cmd_failure(cmd); | 2196 | transport_new_cmd_failure(cmd); |
2199 | /* | 2197 | /* |
2200 | * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES, | 2198 | * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES, |
2201 | * we force this session to fall back to session | 2199 | * we force this session to fall back to session |
2202 | * recovery. | 2200 | * recovery. |
2203 | */ | 2201 | */ |
2204 | CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess); | 2202 | CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess); |
2205 | CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0); | 2203 | CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0); |
2206 | 2204 | ||
2207 | goto check_stop; | 2205 | goto check_stop; |
2208 | case PYX_TRANSPORT_LU_COMM_FAILURE: | 2206 | case PYX_TRANSPORT_LU_COMM_FAILURE: |
2209 | case PYX_TRANSPORT_ILLEGAL_REQUEST: | 2207 | case PYX_TRANSPORT_ILLEGAL_REQUEST: |
2210 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 2208 | cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
2211 | break; | 2209 | break; |
2212 | case PYX_TRANSPORT_UNKNOWN_MODE_PAGE: | 2210 | case PYX_TRANSPORT_UNKNOWN_MODE_PAGE: |
2213 | cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE; | 2211 | cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE; |
2214 | break; | 2212 | break; |
2215 | case PYX_TRANSPORT_WRITE_PROTECTED: | 2213 | case PYX_TRANSPORT_WRITE_PROTECTED: |
2216 | cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | 2214 | cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; |
2217 | break; | 2215 | break; |
2218 | case PYX_TRANSPORT_RESERVATION_CONFLICT: | 2216 | case PYX_TRANSPORT_RESERVATION_CONFLICT: |
2219 | /* | 2217 | /* |
2220 | * No SENSE Data payload for this case, set SCSI Status | 2218 | * No SENSE Data payload for this case, set SCSI Status |
2221 | * and queue the response to $FABRIC_MOD. | 2219 | * and queue the response to $FABRIC_MOD. |
2222 | * | 2220 | * |
2223 | * Uses linux/include/scsi/scsi.h SAM status codes defs | 2221 | * Uses linux/include/scsi/scsi.h SAM status codes defs |
2224 | */ | 2222 | */ |
2225 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; | 2223 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; |
2226 | /* | 2224 | /* |
2227 | * For UA Interlock Code 11b, a RESERVATION CONFLICT will | 2225 | * For UA Interlock Code 11b, a RESERVATION CONFLICT will |
2228 | * establish a UNIT ATTENTION with PREVIOUS RESERVATION | 2226 | * establish a UNIT ATTENTION with PREVIOUS RESERVATION |
2229 | * CONFLICT STATUS. | 2227 | * CONFLICT STATUS. |
2230 | * | 2228 | * |
2231 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | 2229 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 |
2232 | */ | 2230 | */ |
2233 | if (SE_SESS(cmd) && | 2231 | if (SE_SESS(cmd) && |
2234 | DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2) | 2232 | DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2) |
2235 | core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl, | 2233 | core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl, |
2236 | cmd->orig_fe_lun, 0x2C, | 2234 | cmd->orig_fe_lun, 0x2C, |
2237 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | 2235 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); |
2238 | 2236 | ||
2239 | CMD_TFO(cmd)->queue_status(cmd); | 2237 | CMD_TFO(cmd)->queue_status(cmd); |
2240 | goto check_stop; | 2238 | goto check_stop; |
2241 | case PYX_TRANSPORT_USE_SENSE_REASON: | 2239 | case PYX_TRANSPORT_USE_SENSE_REASON: |
2242 | /* | 2240 | /* |
2243 | * struct se_cmd->scsi_sense_reason already set | 2241 | * struct se_cmd->scsi_sense_reason already set |
2244 | */ | 2242 | */ |
2245 | break; | 2243 | break; |
2246 | default: | 2244 | default: |
2247 | printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n", | 2245 | printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n", |
2248 | T_TASK(cmd)->t_task_cdb[0], | 2246 | T_TASK(cmd)->t_task_cdb[0], |
2249 | cmd->transport_error_status); | 2247 | cmd->transport_error_status); |
2250 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | 2248 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; |
2251 | break; | 2249 | break; |
2252 | } | 2250 | } |
2253 | 2251 | ||
2254 | if (!sc) | 2252 | if (!sc) |
2255 | transport_new_cmd_failure(cmd); | 2253 | transport_new_cmd_failure(cmd); |
2256 | else | 2254 | else |
2257 | transport_send_check_condition_and_sense(cmd, | 2255 | transport_send_check_condition_and_sense(cmd, |
2258 | cmd->scsi_sense_reason, 0); | 2256 | cmd->scsi_sense_reason, 0); |
2259 | check_stop: | 2257 | check_stop: |
2260 | transport_lun_remove_cmd(cmd); | 2258 | transport_lun_remove_cmd(cmd); |
2261 | if (!(transport_cmd_check_stop_to_fabric(cmd))) | 2259 | if (!(transport_cmd_check_stop_to_fabric(cmd))) |
2262 | ; | 2260 | ; |
2263 | } | 2261 | } |
2264 | 2262 | ||
2265 | static void transport_direct_request_timeout(struct se_cmd *cmd) | 2263 | static void transport_direct_request_timeout(struct se_cmd *cmd) |
2266 | { | 2264 | { |
2267 | unsigned long flags; | 2265 | unsigned long flags; |
2268 | 2266 | ||
2269 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 2267 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
2270 | if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) { | 2268 | if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) { |
2271 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2269 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
2272 | return; | 2270 | return; |
2273 | } | 2271 | } |
2274 | if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) { | 2272 | if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) { |
2275 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2273 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
2276 | return; | 2274 | return; |
2277 | } | 2275 | } |
2278 | 2276 | ||
2279 | atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout), | 2277 | atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout), |
2280 | &T_TASK(cmd)->t_se_count); | 2278 | &T_TASK(cmd)->t_se_count); |
2281 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2279 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
2282 | } | 2280 | } |
2283 | 2281 | ||
2284 | static void transport_generic_request_timeout(struct se_cmd *cmd) | 2282 | static void transport_generic_request_timeout(struct se_cmd *cmd) |
2285 | { | 2283 | { |
2286 | unsigned long flags; | 2284 | unsigned long flags; |
2287 | 2285 | ||
2288 | /* | 2286 | /* |
2289 | * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove() | 2287 | * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove() |
2290 | * to allow last call to free memory resources. | 2288 | * to allow last call to free memory resources. |
2291 | */ | 2289 | */ |
2292 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 2290 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
2293 | if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) { | 2291 | if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) { |
2294 | int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1); | 2292 | int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1); |
2295 | 2293 | ||
2296 | atomic_sub(tmp, &T_TASK(cmd)->t_se_count); | 2294 | atomic_sub(tmp, &T_TASK(cmd)->t_se_count); |
2297 | } | 2295 | } |
2298 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2296 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
2299 | 2297 | ||
2300 | transport_generic_remove(cmd, 0, 0); | 2298 | transport_generic_remove(cmd, 0, 0); |
2301 | } | 2299 | } |
2302 | 2300 | ||
2303 | static int | 2301 | static int |
2304 | transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length) | 2302 | transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length) |
2305 | { | 2303 | { |
2306 | unsigned char *buf; | 2304 | unsigned char *buf; |
2307 | 2305 | ||
2308 | buf = kzalloc(data_length, GFP_KERNEL); | 2306 | buf = kzalloc(data_length, GFP_KERNEL); |
2309 | if (!(buf)) { | 2307 | if (!(buf)) { |
2310 | printk(KERN_ERR "Unable to allocate memory for buffer\n"); | 2308 | printk(KERN_ERR "Unable to allocate memory for buffer\n"); |
2311 | return -1; | 2309 | return -1; |
2312 | } | 2310 | } |
2313 | 2311 | ||
2314 | T_TASK(cmd)->t_tasks_se_num = 0; | 2312 | T_TASK(cmd)->t_tasks_se_num = 0; |
2315 | T_TASK(cmd)->t_task_buf = buf; | 2313 | T_TASK(cmd)->t_task_buf = buf; |
2316 | 2314 | ||
2317 | return 0; | 2315 | return 0; |
2318 | } | 2316 | } |
2319 | 2317 | ||
2320 | static inline u32 transport_lba_21(unsigned char *cdb) | 2318 | static inline u32 transport_lba_21(unsigned char *cdb) |
2321 | { | 2319 | { |
2322 | return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; | 2320 | return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; |
2323 | } | 2321 | } |
2324 | 2322 | ||
2325 | static inline u32 transport_lba_32(unsigned char *cdb) | 2323 | static inline u32 transport_lba_32(unsigned char *cdb) |
2326 | { | 2324 | { |
2327 | return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; | 2325 | return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; |
2328 | } | 2326 | } |
2329 | 2327 | ||
2330 | static inline unsigned long long transport_lba_64(unsigned char *cdb) | 2328 | static inline unsigned long long transport_lba_64(unsigned char *cdb) |
2331 | { | 2329 | { |
2332 | unsigned int __v1, __v2; | 2330 | unsigned int __v1, __v2; |
2333 | 2331 | ||
2334 | __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; | 2332 | __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; |
2335 | __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | 2333 | __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; |
2336 | 2334 | ||
2337 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; | 2335 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; |
2338 | } | 2336 | } |
2339 | 2337 | ||
2340 | /* | 2338 | /* |
2341 | * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs | 2339 | * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs |
2342 | */ | 2340 | */ |
2343 | static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) | 2341 | static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) |
2344 | { | 2342 | { |
2345 | unsigned int __v1, __v2; | 2343 | unsigned int __v1, __v2; |
2346 | 2344 | ||
2347 | __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; | 2345 | __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; |
2348 | __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; | 2346 | __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; |
2349 | 2347 | ||
2350 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; | 2348 | return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; |
2351 | } | 2349 | } |
2352 | 2350 | ||
2353 | static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) | 2351 | static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) |
2354 | { | 2352 | { |
2355 | unsigned long flags; | 2353 | unsigned long flags; |
2356 | 2354 | ||
2357 | spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags); | 2355 | spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags); |
2358 | se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; | 2356 | se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; |
2359 | spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags); | 2357 | spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags); |
2360 | } | 2358 | } |
2361 | 2359 | ||
2362 | /* | 2360 | /* |
2363 | * Called from interrupt context. | 2361 | * Called from interrupt context. |
2364 | */ | 2362 | */ |
2365 | static void transport_task_timeout_handler(unsigned long data) | 2363 | static void transport_task_timeout_handler(unsigned long data) |
2366 | { | 2364 | { |
2367 | struct se_task *task = (struct se_task *)data; | 2365 | struct se_task *task = (struct se_task *)data; |
2368 | struct se_cmd *cmd = TASK_CMD(task); | 2366 | struct se_cmd *cmd = TASK_CMD(task); |
2369 | unsigned long flags; | 2367 | unsigned long flags; |
2370 | 2368 | ||
2371 | DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd); | 2369 | DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd); |
2372 | 2370 | ||
2373 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 2371 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
2374 | if (task->task_flags & TF_STOP) { | 2372 | if (task->task_flags & TF_STOP) { |
2375 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2373 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
2376 | return; | 2374 | return; |
2377 | } | 2375 | } |
2378 | task->task_flags &= ~TF_RUNNING; | 2376 | task->task_flags &= ~TF_RUNNING; |
2379 | 2377 | ||
2380 | /* | 2378 | /* |
2381 | * Determine if transport_complete_task() has already been called. | 2379 | * Determine if transport_complete_task() has already been called. |
2382 | */ | 2380 | */ |
2383 | if (!(atomic_read(&task->task_active))) { | 2381 | if (!(atomic_read(&task->task_active))) { |
2384 | DEBUG_TT("transport task: %p cmd: %p timeout task_active" | 2382 | DEBUG_TT("transport task: %p cmd: %p timeout task_active" |
2385 | " == 0\n", task, cmd); | 2383 | " == 0\n", task, cmd); |
2386 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2384 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
2387 | return; | 2385 | return; |
2388 | } | 2386 | } |
2389 | 2387 | ||
2390 | atomic_inc(&T_TASK(cmd)->t_se_count); | 2388 | atomic_inc(&T_TASK(cmd)->t_se_count); |
2391 | atomic_inc(&T_TASK(cmd)->t_transport_timeout); | 2389 | atomic_inc(&T_TASK(cmd)->t_transport_timeout); |
2392 | T_TASK(cmd)->t_tasks_failed = 1; | 2390 | T_TASK(cmd)->t_tasks_failed = 1; |
2393 | 2391 | ||
2394 | atomic_set(&task->task_timeout, 1); | 2392 | atomic_set(&task->task_timeout, 1); |
2395 | task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; | 2393 | task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; |
2396 | task->task_scsi_status = 1; | 2394 | task->task_scsi_status = 1; |
2397 | 2395 | ||
2398 | if (atomic_read(&task->task_stop)) { | 2396 | if (atomic_read(&task->task_stop)) { |
2399 | DEBUG_TT("transport task: %p cmd: %p timeout task_stop" | 2397 | DEBUG_TT("transport task: %p cmd: %p timeout task_stop" |
2400 | " == 1\n", task, cmd); | 2398 | " == 1\n", task, cmd); |
2401 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2399 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
2402 | complete(&task->task_stop_comp); | 2400 | complete(&task->task_stop_comp); |
2403 | return; | 2401 | return; |
2404 | } | 2402 | } |
2405 | 2403 | ||
2406 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) { | 2404 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) { |
2407 | DEBUG_TT("transport task: %p cmd: %p timeout non zero" | 2405 | DEBUG_TT("transport task: %p cmd: %p timeout non zero" |
2408 | " t_task_cdbs_left\n", task, cmd); | 2406 | " t_task_cdbs_left\n", task, cmd); |
2409 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2407 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
2410 | return; | 2408 | return; |
2411 | } | 2409 | } |
2412 | DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", | 2410 | DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", |
2413 | task, cmd); | 2411 | task, cmd); |
2414 | 2412 | ||
2415 | cmd->t_state = TRANSPORT_COMPLETE_FAILURE; | 2413 | cmd->t_state = TRANSPORT_COMPLETE_FAILURE; |
2416 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2414 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
2417 | 2415 | ||
2418 | transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); | 2416 | transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); |
2419 | } | 2417 | } |
2420 | 2418 | ||
2421 | /* | 2419 | /* |
2422 | * Called with T_TASK(cmd)->t_state_lock held. | 2420 | * Called with T_TASK(cmd)->t_state_lock held. |
2423 | */ | 2421 | */ |
2424 | static void transport_start_task_timer(struct se_task *task) | 2422 | static void transport_start_task_timer(struct se_task *task) |
2425 | { | 2423 | { |
2426 | struct se_device *dev = task->se_dev; | 2424 | struct se_device *dev = task->se_dev; |
2427 | int timeout; | 2425 | int timeout; |
2428 | 2426 | ||
2429 | if (task->task_flags & TF_RUNNING) | 2427 | if (task->task_flags & TF_RUNNING) |
2430 | return; | 2428 | return; |
2431 | /* | 2429 | /* |
2432 | * If the task_timeout is disabled, exit now. | 2430 | * If the task_timeout is disabled, exit now. |
2433 | */ | 2431 | */ |
2434 | timeout = DEV_ATTRIB(dev)->task_timeout; | 2432 | timeout = DEV_ATTRIB(dev)->task_timeout; |
2435 | if (!(timeout)) | 2433 | if (!(timeout)) |
2436 | return; | 2434 | return; |
2437 | 2435 | ||
2438 | init_timer(&task->task_timer); | 2436 | init_timer(&task->task_timer); |
2439 | task->task_timer.expires = (get_jiffies_64() + timeout * HZ); | 2437 | task->task_timer.expires = (get_jiffies_64() + timeout * HZ); |
2440 | task->task_timer.data = (unsigned long) task; | 2438 | task->task_timer.data = (unsigned long) task; |
2441 | task->task_timer.function = transport_task_timeout_handler; | 2439 | task->task_timer.function = transport_task_timeout_handler; |
2442 | 2440 | ||
2443 | task->task_flags |= TF_RUNNING; | 2441 | task->task_flags |= TF_RUNNING; |
2444 | add_timer(&task->task_timer); | 2442 | add_timer(&task->task_timer); |
2445 | #if 0 | 2443 | #if 0 |
2446 | printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:" | 2444 | printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:" |
2447 | " %d\n", task->task_se_cmd, task, timeout); | 2445 | " %d\n", task->task_se_cmd, task, timeout); |
2448 | #endif | 2446 | #endif |
2449 | } | 2447 | } |
2450 | 2448 | ||
2451 | /* | 2449 | /* |
2452 | * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held. | 2450 | * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held. |
2453 | */ | 2451 | */ |
2454 | void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) | 2452 | void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) |
2455 | { | 2453 | { |
2456 | struct se_cmd *cmd = TASK_CMD(task); | 2454 | struct se_cmd *cmd = TASK_CMD(task); |
2457 | 2455 | ||
2458 | if (!(task->task_flags & TF_RUNNING)) | 2456 | if (!(task->task_flags & TF_RUNNING)) |
2459 | return; | 2457 | return; |
2460 | 2458 | ||
2461 | task->task_flags |= TF_STOP; | 2459 | task->task_flags |= TF_STOP; |
2462 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags); | 2460 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags); |
2463 | 2461 | ||
2464 | del_timer_sync(&task->task_timer); | 2462 | del_timer_sync(&task->task_timer); |
2465 | 2463 | ||
2466 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags); | 2464 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags); |
2467 | task->task_flags &= ~TF_RUNNING; | 2465 | task->task_flags &= ~TF_RUNNING; |
2468 | task->task_flags &= ~TF_STOP; | 2466 | task->task_flags &= ~TF_STOP; |
2469 | } | 2467 | } |
2470 | 2468 | ||
2471 | static void transport_stop_all_task_timers(struct se_cmd *cmd) | 2469 | static void transport_stop_all_task_timers(struct se_cmd *cmd) |
2472 | { | 2470 | { |
2473 | struct se_task *task = NULL, *task_tmp; | 2471 | struct se_task *task = NULL, *task_tmp; |
2474 | unsigned long flags; | 2472 | unsigned long flags; |
2475 | 2473 | ||
2476 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 2474 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
2477 | list_for_each_entry_safe(task, task_tmp, | 2475 | list_for_each_entry_safe(task, task_tmp, |
2478 | &T_TASK(cmd)->t_task_list, t_list) | 2476 | &T_TASK(cmd)->t_task_list, t_list) |
2479 | __transport_stop_task_timer(task, &flags); | 2477 | __transport_stop_task_timer(task, &flags); |
2480 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2478 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
2481 | } | 2479 | } |
2482 | 2480 | ||
2483 | static inline int transport_tcq_window_closed(struct se_device *dev) | 2481 | static inline int transport_tcq_window_closed(struct se_device *dev) |
2484 | { | 2482 | { |
2485 | if (dev->dev_tcq_window_closed++ < | 2483 | if (dev->dev_tcq_window_closed++ < |
2486 | PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) { | 2484 | PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) { |
2487 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT); | 2485 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT); |
2488 | } else | 2486 | } else |
2489 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG); | 2487 | msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG); |
2490 | 2488 | ||
2491 | wake_up_interruptible(&dev->dev_queue_obj->thread_wq); | 2489 | wake_up_interruptible(&dev->dev_queue_obj->thread_wq); |
2492 | return 0; | 2490 | return 0; |
2493 | } | 2491 | } |
2494 | 2492 | ||
2495 | /* | 2493 | /* |
2496 | * Called from Fabric Module context from transport_execute_tasks() | 2494 | * Called from Fabric Module context from transport_execute_tasks() |
2497 | * | 2495 | * |
2498 | * The return of this function determins if the tasks from struct se_cmd | 2496 | * The return of this function determins if the tasks from struct se_cmd |
2499 | * get added to the execution queue in transport_execute_tasks(), | 2497 | * get added to the execution queue in transport_execute_tasks(), |
2500 | * or are added to the delayed or ordered lists here. | 2498 | * or are added to the delayed or ordered lists here. |
2501 | */ | 2499 | */ |
2502 | static inline int transport_execute_task_attr(struct se_cmd *cmd) | 2500 | static inline int transport_execute_task_attr(struct se_cmd *cmd) |
2503 | { | 2501 | { |
2504 | if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) | 2502 | if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) |
2505 | return 1; | 2503 | return 1; |
2506 | /* | 2504 | /* |
2507 | * Check for the existance of HEAD_OF_QUEUE, and if true return 1 | 2505 | * Check for the existance of HEAD_OF_QUEUE, and if true return 1 |
2508 | * to allow the passed struct se_cmd list of tasks to the front of the list. | 2506 | * to allow the passed struct se_cmd list of tasks to the front of the list. |
2509 | */ | 2507 | */ |
2510 | if (cmd->sam_task_attr == TASK_ATTR_HOQ) { | 2508 | if (cmd->sam_task_attr == TASK_ATTR_HOQ) { |
2511 | atomic_inc(&SE_DEV(cmd)->dev_hoq_count); | 2509 | atomic_inc(&SE_DEV(cmd)->dev_hoq_count); |
2512 | smp_mb__after_atomic_inc(); | 2510 | smp_mb__after_atomic_inc(); |
2513 | DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" | 2511 | DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" |
2514 | " 0x%02x, se_ordered_id: %u\n", | 2512 | " 0x%02x, se_ordered_id: %u\n", |
2515 | T_TASK(cmd)->t_task_cdb[0], | 2513 | T_TASK(cmd)->t_task_cdb[0], |
2516 | cmd->se_ordered_id); | 2514 | cmd->se_ordered_id); |
2517 | return 1; | 2515 | return 1; |
2518 | } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) { | 2516 | } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) { |
2519 | spin_lock(&SE_DEV(cmd)->ordered_cmd_lock); | 2517 | spin_lock(&SE_DEV(cmd)->ordered_cmd_lock); |
2520 | list_add_tail(&cmd->se_ordered_list, | 2518 | list_add_tail(&cmd->se_ordered_list, |
2521 | &SE_DEV(cmd)->ordered_cmd_list); | 2519 | &SE_DEV(cmd)->ordered_cmd_list); |
2522 | spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock); | 2520 | spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock); |
2523 | 2521 | ||
2524 | atomic_inc(&SE_DEV(cmd)->dev_ordered_sync); | 2522 | atomic_inc(&SE_DEV(cmd)->dev_ordered_sync); |
2525 | smp_mb__after_atomic_inc(); | 2523 | smp_mb__after_atomic_inc(); |
2526 | 2524 | ||
2527 | DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered" | 2525 | DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered" |
2528 | " list, se_ordered_id: %u\n", | 2526 | " list, se_ordered_id: %u\n", |
2529 | T_TASK(cmd)->t_task_cdb[0], | 2527 | T_TASK(cmd)->t_task_cdb[0], |
2530 | cmd->se_ordered_id); | 2528 | cmd->se_ordered_id); |
2531 | /* | 2529 | /* |
2532 | * Add ORDERED command to tail of execution queue if | 2530 | * Add ORDERED command to tail of execution queue if |
2533 | * no other older commands exist that need to be | 2531 | * no other older commands exist that need to be |
2534 | * completed first. | 2532 | * completed first. |
2535 | */ | 2533 | */ |
2536 | if (!(atomic_read(&SE_DEV(cmd)->simple_cmds))) | 2534 | if (!(atomic_read(&SE_DEV(cmd)->simple_cmds))) |
2537 | return 1; | 2535 | return 1; |
2538 | } else { | 2536 | } else { |
2539 | /* | 2537 | /* |
2540 | * For SIMPLE and UNTAGGED Task Attribute commands | 2538 | * For SIMPLE and UNTAGGED Task Attribute commands |
2541 | */ | 2539 | */ |
2542 | atomic_inc(&SE_DEV(cmd)->simple_cmds); | 2540 | atomic_inc(&SE_DEV(cmd)->simple_cmds); |
2543 | smp_mb__after_atomic_inc(); | 2541 | smp_mb__after_atomic_inc(); |
2544 | } | 2542 | } |
2545 | /* | 2543 | /* |
2546 | * Otherwise if one or more outstanding ORDERED task attribute exist, | 2544 | * Otherwise if one or more outstanding ORDERED task attribute exist, |
2547 | * add the dormant task(s) built for the passed struct se_cmd to the | 2545 | * add the dormant task(s) built for the passed struct se_cmd to the |
2548 | * execution queue and become in Active state for this struct se_device. | 2546 | * execution queue and become in Active state for this struct se_device. |
2549 | */ | 2547 | */ |
2550 | if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) { | 2548 | if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) { |
2551 | /* | 2549 | /* |
2552 | * Otherwise, add cmd w/ tasks to delayed cmd queue that | 2550 | * Otherwise, add cmd w/ tasks to delayed cmd queue that |
2553 | * will be drained upon competion of HEAD_OF_QUEUE task. | 2551 | * will be drained upon competion of HEAD_OF_QUEUE task. |
2554 | */ | 2552 | */ |
2555 | spin_lock(&SE_DEV(cmd)->delayed_cmd_lock); | 2553 | spin_lock(&SE_DEV(cmd)->delayed_cmd_lock); |
2556 | cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; | 2554 | cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; |
2557 | list_add_tail(&cmd->se_delayed_list, | 2555 | list_add_tail(&cmd->se_delayed_list, |
2558 | &SE_DEV(cmd)->delayed_cmd_list); | 2556 | &SE_DEV(cmd)->delayed_cmd_list); |
2559 | spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock); | 2557 | spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock); |
2560 | 2558 | ||
2561 | DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to" | 2559 | DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to" |
2562 | " delayed CMD list, se_ordered_id: %u\n", | 2560 | " delayed CMD list, se_ordered_id: %u\n", |
2563 | T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr, | 2561 | T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr, |
2564 | cmd->se_ordered_id); | 2562 | cmd->se_ordered_id); |
2565 | /* | 2563 | /* |
2566 | * Return zero to let transport_execute_tasks() know | 2564 | * Return zero to let transport_execute_tasks() know |
2567 | * not to add the delayed tasks to the execution list. | 2565 | * not to add the delayed tasks to the execution list. |
2568 | */ | 2566 | */ |
2569 | return 0; | 2567 | return 0; |
2570 | } | 2568 | } |
2571 | /* | 2569 | /* |
2572 | * Otherwise, no ORDERED task attributes exist.. | 2570 | * Otherwise, no ORDERED task attributes exist.. |
2573 | */ | 2571 | */ |
2574 | return 1; | 2572 | return 1; |
2575 | } | 2573 | } |
2576 | 2574 | ||
2577 | /* | 2575 | /* |
2578 | * Called from fabric module context in transport_generic_new_cmd() and | 2576 | * Called from fabric module context in transport_generic_new_cmd() and |
2579 | * transport_generic_process_write() | 2577 | * transport_generic_process_write() |
2580 | */ | 2578 | */ |
2581 | static int transport_execute_tasks(struct se_cmd *cmd) | 2579 | static int transport_execute_tasks(struct se_cmd *cmd) |
2582 | { | 2580 | { |
2583 | int add_tasks; | 2581 | int add_tasks; |
2584 | 2582 | ||
2585 | if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) { | 2583 | if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) { |
2586 | if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { | 2584 | if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { |
2587 | cmd->transport_error_status = | 2585 | cmd->transport_error_status = |
2588 | PYX_TRANSPORT_LU_COMM_FAILURE; | 2586 | PYX_TRANSPORT_LU_COMM_FAILURE; |
2589 | transport_generic_request_failure(cmd, NULL, 0, 1); | 2587 | transport_generic_request_failure(cmd, NULL, 0, 1); |
2590 | return 0; | 2588 | return 0; |
2591 | } | 2589 | } |
2592 | } | 2590 | } |
2593 | /* | 2591 | /* |
2594 | * Call transport_cmd_check_stop() to see if a fabric exception | 2592 | * Call transport_cmd_check_stop() to see if a fabric exception |
2595 | * has occured that prevents execution. | 2593 | * has occured that prevents execution. |
2596 | */ | 2594 | */ |
2597 | if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) { | 2595 | if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) { |
2598 | /* | 2596 | /* |
2599 | * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE | 2597 | * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE |
2600 | * attribute for the tasks of the received struct se_cmd CDB | 2598 | * attribute for the tasks of the received struct se_cmd CDB |
2601 | */ | 2599 | */ |
2602 | add_tasks = transport_execute_task_attr(cmd); | 2600 | add_tasks = transport_execute_task_attr(cmd); |
2603 | if (add_tasks == 0) | 2601 | if (add_tasks == 0) |
2604 | goto execute_tasks; | 2602 | goto execute_tasks; |
2605 | /* | 2603 | /* |
2606 | * This calls transport_add_tasks_from_cmd() to handle | 2604 | * This calls transport_add_tasks_from_cmd() to handle |
2607 | * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation | 2605 | * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation |
2608 | * (if enabled) in __transport_add_task_to_execute_queue() and | 2606 | * (if enabled) in __transport_add_task_to_execute_queue() and |
2609 | * transport_add_task_check_sam_attr(). | 2607 | * transport_add_task_check_sam_attr(). |
2610 | */ | 2608 | */ |
2611 | transport_add_tasks_from_cmd(cmd); | 2609 | transport_add_tasks_from_cmd(cmd); |
2612 | } | 2610 | } |
2613 | /* | 2611 | /* |
2614 | * Kick the execution queue for the cmd associated struct se_device | 2612 | * Kick the execution queue for the cmd associated struct se_device |
2615 | * storage object. | 2613 | * storage object. |
2616 | */ | 2614 | */ |
2617 | execute_tasks: | 2615 | execute_tasks: |
2618 | __transport_execute_tasks(SE_DEV(cmd)); | 2616 | __transport_execute_tasks(SE_DEV(cmd)); |
2619 | return 0; | 2617 | return 0; |
2620 | } | 2618 | } |
2621 | 2619 | ||
2622 | /* | 2620 | /* |
2623 | * Called to check struct se_device tcq depth window, and once open pull struct se_task | 2621 | * Called to check struct se_device tcq depth window, and once open pull struct se_task |
2624 | * from struct se_device->execute_task_list and | 2622 | * from struct se_device->execute_task_list and |
2625 | * | 2623 | * |
2626 | * Called from transport_processing_thread() | 2624 | * Called from transport_processing_thread() |
2627 | */ | 2625 | */ |
2628 | static int __transport_execute_tasks(struct se_device *dev) | 2626 | static int __transport_execute_tasks(struct se_device *dev) |
2629 | { | 2627 | { |
2630 | int error; | 2628 | int error; |
2631 | struct se_cmd *cmd = NULL; | 2629 | struct se_cmd *cmd = NULL; |
2632 | struct se_task *task; | 2630 | struct se_task *task; |
2633 | unsigned long flags; | 2631 | unsigned long flags; |
2634 | 2632 | ||
2635 | /* | 2633 | /* |
2636 | * Check if there is enough room in the device and HBA queue to send | 2634 | * Check if there is enough room in the device and HBA queue to send |
2637 | * struct se_transport_task's to the selected transport. | 2635 | * struct se_transport_task's to the selected transport. |
2638 | */ | 2636 | */ |
2639 | check_depth: | 2637 | check_depth: |
2640 | spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); | 2638 | spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); |
2641 | if (!(atomic_read(&dev->depth_left)) || | 2639 | if (!(atomic_read(&dev->depth_left)) || |
2642 | !(atomic_read(&SE_HBA(dev)->left_queue_depth))) { | 2640 | !(atomic_read(&SE_HBA(dev)->left_queue_depth))) { |
2643 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | 2641 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); |
2644 | return transport_tcq_window_closed(dev); | 2642 | return transport_tcq_window_closed(dev); |
2645 | } | 2643 | } |
2646 | dev->dev_tcq_window_closed = 0; | 2644 | dev->dev_tcq_window_closed = 0; |
2647 | 2645 | ||
2648 | spin_lock(&dev->execute_task_lock); | 2646 | spin_lock(&dev->execute_task_lock); |
2649 | task = transport_get_task_from_execute_queue(dev); | 2647 | task = transport_get_task_from_execute_queue(dev); |
2650 | spin_unlock(&dev->execute_task_lock); | 2648 | spin_unlock(&dev->execute_task_lock); |
2651 | 2649 | ||
2652 | if (!task) { | 2650 | if (!task) { |
2653 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | 2651 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); |
2654 | return 0; | 2652 | return 0; |
2655 | } | 2653 | } |
2656 | 2654 | ||
2657 | atomic_dec(&dev->depth_left); | 2655 | atomic_dec(&dev->depth_left); |
2658 | atomic_dec(&SE_HBA(dev)->left_queue_depth); | 2656 | atomic_dec(&SE_HBA(dev)->left_queue_depth); |
2659 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); | 2657 | spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); |
2660 | 2658 | ||
2661 | cmd = TASK_CMD(task); | 2659 | cmd = TASK_CMD(task); |
2662 | 2660 | ||
2663 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 2661 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
2664 | atomic_set(&task->task_active, 1); | 2662 | atomic_set(&task->task_active, 1); |
2665 | atomic_set(&task->task_sent, 1); | 2663 | atomic_set(&task->task_sent, 1); |
2666 | atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent); | 2664 | atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent); |
2667 | 2665 | ||
2668 | if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) == | 2666 | if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) == |
2669 | T_TASK(cmd)->t_task_cdbs) | 2667 | T_TASK(cmd)->t_task_cdbs) |
2670 | atomic_set(&cmd->transport_sent, 1); | 2668 | atomic_set(&cmd->transport_sent, 1); |
2671 | 2669 | ||
2672 | transport_start_task_timer(task); | 2670 | transport_start_task_timer(task); |
2673 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2671 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
2674 | /* | 2672 | /* |
2675 | * The struct se_cmd->transport_emulate_cdb() function pointer is used | 2673 | * The struct se_cmd->transport_emulate_cdb() function pointer is used |
2676 | * to grab REPORT_LUNS CDBs before they hit the | 2674 | * to grab REPORT_LUNS CDBs before they hit the |
2677 | * struct se_subsystem_api->do_task() caller below. | 2675 | * struct se_subsystem_api->do_task() caller below. |
2678 | */ | 2676 | */ |
2679 | if (cmd->transport_emulate_cdb) { | 2677 | if (cmd->transport_emulate_cdb) { |
2680 | error = cmd->transport_emulate_cdb(cmd); | 2678 | error = cmd->transport_emulate_cdb(cmd); |
2681 | if (error != 0) { | 2679 | if (error != 0) { |
2682 | cmd->transport_error_status = error; | 2680 | cmd->transport_error_status = error; |
2683 | atomic_set(&task->task_active, 0); | 2681 | atomic_set(&task->task_active, 0); |
2684 | atomic_set(&cmd->transport_sent, 0); | 2682 | atomic_set(&cmd->transport_sent, 0); |
2685 | transport_stop_tasks_for_cmd(cmd); | 2683 | transport_stop_tasks_for_cmd(cmd); |
2686 | transport_generic_request_failure(cmd, dev, 0, 1); | 2684 | transport_generic_request_failure(cmd, dev, 0, 1); |
2687 | goto check_depth; | 2685 | goto check_depth; |
2688 | } | 2686 | } |
2689 | /* | 2687 | /* |
2690 | * Handle the successful completion for transport_emulate_cdb() | 2688 | * Handle the successful completion for transport_emulate_cdb() |
2691 | * for synchronous operation, following SCF_EMULATE_CDB_ASYNC | 2689 | * for synchronous operation, following SCF_EMULATE_CDB_ASYNC |
2692 | * Otherwise the caller is expected to complete the task with | 2690 | * Otherwise the caller is expected to complete the task with |
2693 | * proper status. | 2691 | * proper status. |
2694 | */ | 2692 | */ |
2695 | if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) { | 2693 | if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) { |
2696 | cmd->scsi_status = SAM_STAT_GOOD; | 2694 | cmd->scsi_status = SAM_STAT_GOOD; |
2697 | task->task_scsi_status = GOOD; | 2695 | task->task_scsi_status = GOOD; |
2698 | transport_complete_task(task, 1); | 2696 | transport_complete_task(task, 1); |
2699 | } | 2697 | } |
2700 | } else { | 2698 | } else { |
2701 | /* | 2699 | /* |
2702 | * Currently for all virtual TCM plugins including IBLOCK, FILEIO and | 2700 | * Currently for all virtual TCM plugins including IBLOCK, FILEIO and |
2703 | * RAMDISK we use the internal transport_emulate_control_cdb() logic | 2701 | * RAMDISK we use the internal transport_emulate_control_cdb() logic |
2704 | * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK | 2702 | * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK |
2705 | * LUN emulation code. | 2703 | * LUN emulation code. |
2706 | * | 2704 | * |
2707 | * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we | 2705 | * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we |
2708 | * call ->do_task() directly and let the underlying TCM subsystem plugin | 2706 | * call ->do_task() directly and let the underlying TCM subsystem plugin |
2709 | * code handle the CDB emulation. | 2707 | * code handle the CDB emulation. |
2710 | */ | 2708 | */ |
2711 | if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) && | 2709 | if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) && |
2712 | (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) | 2710 | (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) |
2713 | error = transport_emulate_control_cdb(task); | 2711 | error = transport_emulate_control_cdb(task); |
2714 | else | 2712 | else |
2715 | error = TRANSPORT(dev)->do_task(task); | 2713 | error = TRANSPORT(dev)->do_task(task); |
2716 | 2714 | ||
2717 | if (error != 0) { | 2715 | if (error != 0) { |
2718 | cmd->transport_error_status = error; | 2716 | cmd->transport_error_status = error; |
2719 | atomic_set(&task->task_active, 0); | 2717 | atomic_set(&task->task_active, 0); |
2720 | atomic_set(&cmd->transport_sent, 0); | 2718 | atomic_set(&cmd->transport_sent, 0); |
2721 | transport_stop_tasks_for_cmd(cmd); | 2719 | transport_stop_tasks_for_cmd(cmd); |
2722 | transport_generic_request_failure(cmd, dev, 0, 1); | 2720 | transport_generic_request_failure(cmd, dev, 0, 1); |
2723 | } | 2721 | } |
2724 | } | 2722 | } |
2725 | 2723 | ||
2726 | goto check_depth; | 2724 | goto check_depth; |
2727 | 2725 | ||
2728 | return 0; | 2726 | return 0; |
2729 | } | 2727 | } |
2730 | 2728 | ||
2731 | void transport_new_cmd_failure(struct se_cmd *se_cmd) | 2729 | void transport_new_cmd_failure(struct se_cmd *se_cmd) |
2732 | { | 2730 | { |
2733 | unsigned long flags; | 2731 | unsigned long flags; |
2734 | /* | 2732 | /* |
2735 | * Any unsolicited data will get dumped for failed command inside of | 2733 | * Any unsolicited data will get dumped for failed command inside of |
2736 | * the fabric plugin | 2734 | * the fabric plugin |
2737 | */ | 2735 | */ |
2738 | spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags); | 2736 | spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags); |
2739 | se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; | 2737 | se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; |
2740 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 2738 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
2741 | spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags); | 2739 | spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags); |
2742 | 2740 | ||
2743 | CMD_TFO(se_cmd)->new_cmd_failure(se_cmd); | 2741 | CMD_TFO(se_cmd)->new_cmd_failure(se_cmd); |
2744 | } | 2742 | } |
2745 | 2743 | ||
2746 | static void transport_nop_wait_for_tasks(struct se_cmd *, int, int); | 2744 | static void transport_nop_wait_for_tasks(struct se_cmd *, int, int); |
2747 | 2745 | ||
2748 | static inline u32 transport_get_sectors_6( | 2746 | static inline u32 transport_get_sectors_6( |
2749 | unsigned char *cdb, | 2747 | unsigned char *cdb, |
2750 | struct se_cmd *cmd, | 2748 | struct se_cmd *cmd, |
2751 | int *ret) | 2749 | int *ret) |
2752 | { | 2750 | { |
2753 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | 2751 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; |
2754 | 2752 | ||
2755 | /* | 2753 | /* |
2756 | * Assume TYPE_DISK for non struct se_device objects. | 2754 | * Assume TYPE_DISK for non struct se_device objects. |
2757 | * Use 8-bit sector value. | 2755 | * Use 8-bit sector value. |
2758 | */ | 2756 | */ |
2759 | if (!dev) | 2757 | if (!dev) |
2760 | goto type_disk; | 2758 | goto type_disk; |
2761 | 2759 | ||
2762 | /* | 2760 | /* |
2763 | * Use 24-bit allocation length for TYPE_TAPE. | 2761 | * Use 24-bit allocation length for TYPE_TAPE. |
2764 | */ | 2762 | */ |
2765 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) | 2763 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) |
2766 | return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; | 2764 | return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; |
2767 | 2765 | ||
2768 | /* | 2766 | /* |
2769 | * Everything else assume TYPE_DISK Sector CDB location. | 2767 | * Everything else assume TYPE_DISK Sector CDB location. |
2770 | * Use 8-bit sector value. | 2768 | * Use 8-bit sector value. |
2771 | */ | 2769 | */ |
2772 | type_disk: | 2770 | type_disk: |
2773 | return (u32)cdb[4]; | 2771 | return (u32)cdb[4]; |
2774 | } | 2772 | } |
2775 | 2773 | ||
2776 | static inline u32 transport_get_sectors_10( | 2774 | static inline u32 transport_get_sectors_10( |
2777 | unsigned char *cdb, | 2775 | unsigned char *cdb, |
2778 | struct se_cmd *cmd, | 2776 | struct se_cmd *cmd, |
2779 | int *ret) | 2777 | int *ret) |
2780 | { | 2778 | { |
2781 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | 2779 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; |
2782 | 2780 | ||
2783 | /* | 2781 | /* |
2784 | * Assume TYPE_DISK for non struct se_device objects. | 2782 | * Assume TYPE_DISK for non struct se_device objects. |
2785 | * Use 16-bit sector value. | 2783 | * Use 16-bit sector value. |
2786 | */ | 2784 | */ |
2787 | if (!dev) | 2785 | if (!dev) |
2788 | goto type_disk; | 2786 | goto type_disk; |
2789 | 2787 | ||
2790 | /* | 2788 | /* |
2791 | * XXX_10 is not defined in SSC, throw an exception | 2789 | * XXX_10 is not defined in SSC, throw an exception |
2792 | */ | 2790 | */ |
2793 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { | 2791 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { |
2794 | *ret = -1; | 2792 | *ret = -1; |
2795 | return 0; | 2793 | return 0; |
2796 | } | 2794 | } |
2797 | 2795 | ||
2798 | /* | 2796 | /* |
2799 | * Everything else assume TYPE_DISK Sector CDB location. | 2797 | * Everything else assume TYPE_DISK Sector CDB location. |
2800 | * Use 16-bit sector value. | 2798 | * Use 16-bit sector value. |
2801 | */ | 2799 | */ |
2802 | type_disk: | 2800 | type_disk: |
2803 | return (u32)(cdb[7] << 8) + cdb[8]; | 2801 | return (u32)(cdb[7] << 8) + cdb[8]; |
2804 | } | 2802 | } |
2805 | 2803 | ||
2806 | static inline u32 transport_get_sectors_12( | 2804 | static inline u32 transport_get_sectors_12( |
2807 | unsigned char *cdb, | 2805 | unsigned char *cdb, |
2808 | struct se_cmd *cmd, | 2806 | struct se_cmd *cmd, |
2809 | int *ret) | 2807 | int *ret) |
2810 | { | 2808 | { |
2811 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | 2809 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; |
2812 | 2810 | ||
2813 | /* | 2811 | /* |
2814 | * Assume TYPE_DISK for non struct se_device objects. | 2812 | * Assume TYPE_DISK for non struct se_device objects. |
2815 | * Use 32-bit sector value. | 2813 | * Use 32-bit sector value. |
2816 | */ | 2814 | */ |
2817 | if (!dev) | 2815 | if (!dev) |
2818 | goto type_disk; | 2816 | goto type_disk; |
2819 | 2817 | ||
2820 | /* | 2818 | /* |
2821 | * XXX_12 is not defined in SSC, throw an exception | 2819 | * XXX_12 is not defined in SSC, throw an exception |
2822 | */ | 2820 | */ |
2823 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { | 2821 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { |
2824 | *ret = -1; | 2822 | *ret = -1; |
2825 | return 0; | 2823 | return 0; |
2826 | } | 2824 | } |
2827 | 2825 | ||
2828 | /* | 2826 | /* |
2829 | * Everything else assume TYPE_DISK Sector CDB location. | 2827 | * Everything else assume TYPE_DISK Sector CDB location. |
2830 | * Use 32-bit sector value. | 2828 | * Use 32-bit sector value. |
2831 | */ | 2829 | */ |
2832 | type_disk: | 2830 | type_disk: |
2833 | return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; | 2831 | return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; |
2834 | } | 2832 | } |
2835 | 2833 | ||
2836 | static inline u32 transport_get_sectors_16( | 2834 | static inline u32 transport_get_sectors_16( |
2837 | unsigned char *cdb, | 2835 | unsigned char *cdb, |
2838 | struct se_cmd *cmd, | 2836 | struct se_cmd *cmd, |
2839 | int *ret) | 2837 | int *ret) |
2840 | { | 2838 | { |
2841 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; | 2839 | struct se_device *dev = SE_LUN(cmd)->lun_se_dev; |
2842 | 2840 | ||
2843 | /* | 2841 | /* |
2844 | * Assume TYPE_DISK for non struct se_device objects. | 2842 | * Assume TYPE_DISK for non struct se_device objects. |
2845 | * Use 32-bit sector value. | 2843 | * Use 32-bit sector value. |
2846 | */ | 2844 | */ |
2847 | if (!dev) | 2845 | if (!dev) |
2848 | goto type_disk; | 2846 | goto type_disk; |
2849 | 2847 | ||
2850 | /* | 2848 | /* |
2851 | * Use 24-bit allocation length for TYPE_TAPE. | 2849 | * Use 24-bit allocation length for TYPE_TAPE. |
2852 | */ | 2850 | */ |
2853 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) | 2851 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) |
2854 | return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; | 2852 | return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; |
2855 | 2853 | ||
2856 | type_disk: | 2854 | type_disk: |
2857 | return (u32)(cdb[10] << 24) + (cdb[11] << 16) + | 2855 | return (u32)(cdb[10] << 24) + (cdb[11] << 16) + |
2858 | (cdb[12] << 8) + cdb[13]; | 2856 | (cdb[12] << 8) + cdb[13]; |
2859 | } | 2857 | } |
2860 | 2858 | ||
2861 | /* | 2859 | /* |
2862 | * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants | 2860 | * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants |
2863 | */ | 2861 | */ |
2864 | static inline u32 transport_get_sectors_32( | 2862 | static inline u32 transport_get_sectors_32( |
2865 | unsigned char *cdb, | 2863 | unsigned char *cdb, |
2866 | struct se_cmd *cmd, | 2864 | struct se_cmd *cmd, |
2867 | int *ret) | 2865 | int *ret) |
2868 | { | 2866 | { |
2869 | /* | 2867 | /* |
2870 | * Assume TYPE_DISK for non struct se_device objects. | 2868 | * Assume TYPE_DISK for non struct se_device objects. |
2871 | * Use 32-bit sector value. | 2869 | * Use 32-bit sector value. |
2872 | */ | 2870 | */ |
2873 | return (u32)(cdb[28] << 24) + (cdb[29] << 16) + | 2871 | return (u32)(cdb[28] << 24) + (cdb[29] << 16) + |
2874 | (cdb[30] << 8) + cdb[31]; | 2872 | (cdb[30] << 8) + cdb[31]; |
2875 | 2873 | ||
2876 | } | 2874 | } |
2877 | 2875 | ||
2878 | static inline u32 transport_get_size( | 2876 | static inline u32 transport_get_size( |
2879 | u32 sectors, | 2877 | u32 sectors, |
2880 | unsigned char *cdb, | 2878 | unsigned char *cdb, |
2881 | struct se_cmd *cmd) | 2879 | struct se_cmd *cmd) |
2882 | { | 2880 | { |
2883 | struct se_device *dev = SE_DEV(cmd); | 2881 | struct se_device *dev = SE_DEV(cmd); |
2884 | 2882 | ||
2885 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { | 2883 | if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { |
2886 | if (cdb[1] & 1) { /* sectors */ | 2884 | if (cdb[1] & 1) { /* sectors */ |
2887 | return DEV_ATTRIB(dev)->block_size * sectors; | 2885 | return DEV_ATTRIB(dev)->block_size * sectors; |
2888 | } else /* bytes */ | 2886 | } else /* bytes */ |
2889 | return sectors; | 2887 | return sectors; |
2890 | } | 2888 | } |
2891 | #if 0 | 2889 | #if 0 |
2892 | printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for" | 2890 | printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for" |
2893 | " %s object\n", DEV_ATTRIB(dev)->block_size, sectors, | 2891 | " %s object\n", DEV_ATTRIB(dev)->block_size, sectors, |
2894 | DEV_ATTRIB(dev)->block_size * sectors, | 2892 | DEV_ATTRIB(dev)->block_size * sectors, |
2895 | TRANSPORT(dev)->name); | 2893 | TRANSPORT(dev)->name); |
2896 | #endif | 2894 | #endif |
2897 | return DEV_ATTRIB(dev)->block_size * sectors; | 2895 | return DEV_ATTRIB(dev)->block_size * sectors; |
2898 | } | 2896 | } |
2899 | 2897 | ||
2900 | unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]) | 2898 | unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]) |
2901 | { | 2899 | { |
2902 | unsigned char result = 0; | 2900 | unsigned char result = 0; |
2903 | /* | 2901 | /* |
2904 | * MSB | 2902 | * MSB |
2905 | */ | 2903 | */ |
2906 | if ((val[0] >= 'a') && (val[0] <= 'f')) | 2904 | if ((val[0] >= 'a') && (val[0] <= 'f')) |
2907 | result = ((val[0] - 'a' + 10) & 0xf) << 4; | 2905 | result = ((val[0] - 'a' + 10) & 0xf) << 4; |
2908 | else | 2906 | else |
2909 | if ((val[0] >= 'A') && (val[0] <= 'F')) | 2907 | if ((val[0] >= 'A') && (val[0] <= 'F')) |
2910 | result = ((val[0] - 'A' + 10) & 0xf) << 4; | 2908 | result = ((val[0] - 'A' + 10) & 0xf) << 4; |
2911 | else /* digit */ | 2909 | else /* digit */ |
2912 | result = ((val[0] - '0') & 0xf) << 4; | 2910 | result = ((val[0] - '0') & 0xf) << 4; |
2913 | /* | 2911 | /* |
2914 | * LSB | 2912 | * LSB |
2915 | */ | 2913 | */ |
2916 | if ((val[1] >= 'a') && (val[1] <= 'f')) | 2914 | if ((val[1] >= 'a') && (val[1] <= 'f')) |
2917 | result |= ((val[1] - 'a' + 10) & 0xf); | 2915 | result |= ((val[1] - 'a' + 10) & 0xf); |
2918 | else | 2916 | else |
2919 | if ((val[1] >= 'A') && (val[1] <= 'F')) | 2917 | if ((val[1] >= 'A') && (val[1] <= 'F')) |
2920 | result |= ((val[1] - 'A' + 10) & 0xf); | 2918 | result |= ((val[1] - 'A' + 10) & 0xf); |
2921 | else /* digit */ | 2919 | else /* digit */ |
2922 | result |= ((val[1] - '0') & 0xf); | 2920 | result |= ((val[1] - '0') & 0xf); |
2923 | 2921 | ||
2924 | return result; | 2922 | return result; |
2925 | } | 2923 | } |
2926 | EXPORT_SYMBOL(transport_asciihex_to_binaryhex); | 2924 | EXPORT_SYMBOL(transport_asciihex_to_binaryhex); |
2927 | 2925 | ||
2928 | static void transport_xor_callback(struct se_cmd *cmd) | 2926 | static void transport_xor_callback(struct se_cmd *cmd) |
2929 | { | 2927 | { |
2930 | unsigned char *buf, *addr; | 2928 | unsigned char *buf, *addr; |
2931 | struct se_mem *se_mem; | 2929 | struct se_mem *se_mem; |
2932 | unsigned int offset; | 2930 | unsigned int offset; |
2933 | int i; | 2931 | int i; |
2934 | /* | 2932 | /* |
2935 | * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command | 2933 | * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command |
2936 | * | 2934 | * |
2937 | * 1) read the specified logical block(s); | 2935 | * 1) read the specified logical block(s); |
2938 | * 2) transfer logical blocks from the data-out buffer; | 2936 | * 2) transfer logical blocks from the data-out buffer; |
2939 | * 3) XOR the logical blocks transferred from the data-out buffer with | 2937 | * 3) XOR the logical blocks transferred from the data-out buffer with |
2940 | * the logical blocks read, storing the resulting XOR data in a buffer; | 2938 | * the logical blocks read, storing the resulting XOR data in a buffer; |
2941 | * 4) if the DISABLE WRITE bit is set to zero, then write the logical | 2939 | * 4) if the DISABLE WRITE bit is set to zero, then write the logical |
2942 | * blocks transferred from the data-out buffer; and | 2940 | * blocks transferred from the data-out buffer; and |
2943 | * 5) transfer the resulting XOR data to the data-in buffer. | 2941 | * 5) transfer the resulting XOR data to the data-in buffer. |
2944 | */ | 2942 | */ |
2945 | buf = kmalloc(cmd->data_length, GFP_KERNEL); | 2943 | buf = kmalloc(cmd->data_length, GFP_KERNEL); |
2946 | if (!(buf)) { | 2944 | if (!(buf)) { |
2947 | printk(KERN_ERR "Unable to allocate xor_callback buf\n"); | 2945 | printk(KERN_ERR "Unable to allocate xor_callback buf\n"); |
2948 | return; | 2946 | return; |
2949 | } | 2947 | } |
2950 | /* | 2948 | /* |
2951 | * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list | 2949 | * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list |
2952 | * into the locally allocated *buf | 2950 | * into the locally allocated *buf |
2953 | */ | 2951 | */ |
2954 | transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list); | 2952 | transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list); |
2955 | /* | 2953 | /* |
2956 | * Now perform the XOR against the BIDI read memory located at | 2954 | * Now perform the XOR against the BIDI read memory located at |
2957 | * T_TASK(cmd)->t_mem_bidi_list | 2955 | * T_TASK(cmd)->t_mem_bidi_list |
2958 | */ | 2956 | */ |
2959 | 2957 | ||
2960 | offset = 0; | 2958 | offset = 0; |
2961 | list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) { | 2959 | list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) { |
2962 | addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0); | 2960 | addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0); |
2963 | if (!(addr)) | 2961 | if (!(addr)) |
2964 | goto out; | 2962 | goto out; |
2965 | 2963 | ||
2966 | for (i = 0; i < se_mem->se_len; i++) | 2964 | for (i = 0; i < se_mem->se_len; i++) |
2967 | *(addr + se_mem->se_off + i) ^= *(buf + offset + i); | 2965 | *(addr + se_mem->se_off + i) ^= *(buf + offset + i); |
2968 | 2966 | ||
2969 | offset += se_mem->se_len; | 2967 | offset += se_mem->se_len; |
2970 | kunmap_atomic(addr, KM_USER0); | 2968 | kunmap_atomic(addr, KM_USER0); |
2971 | } | 2969 | } |
2972 | out: | 2970 | out: |
2973 | kfree(buf); | 2971 | kfree(buf); |
2974 | } | 2972 | } |
2975 | 2973 | ||
2976 | /* | 2974 | /* |
2977 | * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd | 2975 | * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd |
2978 | */ | 2976 | */ |
2979 | static int transport_get_sense_data(struct se_cmd *cmd) | 2977 | static int transport_get_sense_data(struct se_cmd *cmd) |
2980 | { | 2978 | { |
2981 | unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; | 2979 | unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; |
2982 | struct se_device *dev; | 2980 | struct se_device *dev; |
2983 | struct se_task *task = NULL, *task_tmp; | 2981 | struct se_task *task = NULL, *task_tmp; |
2984 | unsigned long flags; | 2982 | unsigned long flags; |
2985 | u32 offset = 0; | 2983 | u32 offset = 0; |
2986 | 2984 | ||
2987 | if (!SE_LUN(cmd)) { | 2985 | if (!SE_LUN(cmd)) { |
2988 | printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); | 2986 | printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); |
2989 | return -1; | 2987 | return -1; |
2990 | } | 2988 | } |
2991 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 2989 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
2992 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { | 2990 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { |
2993 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 2991 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
2994 | return 0; | 2992 | return 0; |
2995 | } | 2993 | } |
2996 | 2994 | ||
2997 | list_for_each_entry_safe(task, task_tmp, | 2995 | list_for_each_entry_safe(task, task_tmp, |
2998 | &T_TASK(cmd)->t_task_list, t_list) { | 2996 | &T_TASK(cmd)->t_task_list, t_list) { |
2999 | 2997 | ||
3000 | if (!task->task_sense) | 2998 | if (!task->task_sense) |
3001 | continue; | 2999 | continue; |
3002 | 3000 | ||
3003 | dev = task->se_dev; | 3001 | dev = task->se_dev; |
3004 | if (!(dev)) | 3002 | if (!(dev)) |
3005 | continue; | 3003 | continue; |
3006 | 3004 | ||
3007 | if (!TRANSPORT(dev)->get_sense_buffer) { | 3005 | if (!TRANSPORT(dev)->get_sense_buffer) { |
3008 | printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer" | 3006 | printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer" |
3009 | " is NULL\n"); | 3007 | " is NULL\n"); |
3010 | continue; | 3008 | continue; |
3011 | } | 3009 | } |
3012 | 3010 | ||
3013 | sense_buffer = TRANSPORT(dev)->get_sense_buffer(task); | 3011 | sense_buffer = TRANSPORT(dev)->get_sense_buffer(task); |
3014 | if (!(sense_buffer)) { | 3012 | if (!(sense_buffer)) { |
3015 | printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate" | 3013 | printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate" |
3016 | " sense buffer for task with sense\n", | 3014 | " sense buffer for task with sense\n", |
3017 | CMD_TFO(cmd)->get_task_tag(cmd), task->task_no); | 3015 | CMD_TFO(cmd)->get_task_tag(cmd), task->task_no); |
3018 | continue; | 3016 | continue; |
3019 | } | 3017 | } |
3020 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 3018 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
3021 | 3019 | ||
3022 | offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd, | 3020 | offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd, |
3023 | TRANSPORT_SENSE_BUFFER); | 3021 | TRANSPORT_SENSE_BUFFER); |
3024 | 3022 | ||
3025 | memcpy((void *)&buffer[offset], (void *)sense_buffer, | 3023 | memcpy((void *)&buffer[offset], (void *)sense_buffer, |
3026 | TRANSPORT_SENSE_BUFFER); | 3024 | TRANSPORT_SENSE_BUFFER); |
3027 | cmd->scsi_status = task->task_scsi_status; | 3025 | cmd->scsi_status = task->task_scsi_status; |
3028 | /* Automatically padded */ | 3026 | /* Automatically padded */ |
3029 | cmd->scsi_sense_length = | 3027 | cmd->scsi_sense_length = |
3030 | (TRANSPORT_SENSE_BUFFER + offset); | 3028 | (TRANSPORT_SENSE_BUFFER + offset); |
3031 | 3029 | ||
3032 | printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" | 3030 | printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" |
3033 | " and sense\n", | 3031 | " and sense\n", |
3034 | dev->se_hba->hba_id, TRANSPORT(dev)->name, | 3032 | dev->se_hba->hba_id, TRANSPORT(dev)->name, |
3035 | cmd->scsi_status); | 3033 | cmd->scsi_status); |
3036 | return 0; | 3034 | return 0; |
3037 | } | 3035 | } |
3038 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 3036 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
3039 | 3037 | ||
3040 | return -1; | 3038 | return -1; |
3041 | } | 3039 | } |
3042 | 3040 | ||
3043 | static int transport_allocate_resources(struct se_cmd *cmd) | 3041 | static int transport_allocate_resources(struct se_cmd *cmd) |
3044 | { | 3042 | { |
3045 | u32 length = cmd->data_length; | 3043 | u32 length = cmd->data_length; |
3046 | 3044 | ||
3047 | if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || | 3045 | if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || |
3048 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) | 3046 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) |
3049 | return transport_generic_get_mem(cmd, length, PAGE_SIZE); | 3047 | return transport_generic_get_mem(cmd, length, PAGE_SIZE); |
3050 | else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) | 3048 | else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) |
3051 | return transport_generic_allocate_buf(cmd, length); | 3049 | return transport_generic_allocate_buf(cmd, length); |
3052 | else | 3050 | else |
3053 | return 0; | 3051 | return 0; |
3054 | } | 3052 | } |
3055 | 3053 | ||
3056 | static int | 3054 | static int |
3057 | transport_handle_reservation_conflict(struct se_cmd *cmd) | 3055 | transport_handle_reservation_conflict(struct se_cmd *cmd) |
3058 | { | 3056 | { |
3059 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | 3057 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; |
3060 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 3058 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
3061 | cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; | 3059 | cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; |
3062 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; | 3060 | cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; |
3063 | /* | 3061 | /* |
3064 | * For UA Interlock Code 11b, a RESERVATION CONFLICT will | 3062 | * For UA Interlock Code 11b, a RESERVATION CONFLICT will |
3065 | * establish a UNIT ATTENTION with PREVIOUS RESERVATION | 3063 | * establish a UNIT ATTENTION with PREVIOUS RESERVATION |
3066 | * CONFLICT STATUS. | 3064 | * CONFLICT STATUS. |
3067 | * | 3065 | * |
3068 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 | 3066 | * See spc4r17, section 7.4.6 Control Mode Page, Table 349 |
3069 | */ | 3067 | */ |
3070 | if (SE_SESS(cmd) && | 3068 | if (SE_SESS(cmd) && |
3071 | DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2) | 3069 | DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2) |
3072 | core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl, | 3070 | core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl, |
3073 | cmd->orig_fe_lun, 0x2C, | 3071 | cmd->orig_fe_lun, 0x2C, |
3074 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); | 3072 | ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); |
3075 | return -2; | 3073 | return -2; |
3076 | } | 3074 | } |
3077 | 3075 | ||
3078 | /* transport_generic_cmd_sequencer(): | 3076 | /* transport_generic_cmd_sequencer(): |
3079 | * | 3077 | * |
3080 | * Generic Command Sequencer that should work for most DAS transport | 3078 | * Generic Command Sequencer that should work for most DAS transport |
3081 | * drivers. | 3079 | * drivers. |
3082 | * | 3080 | * |
3083 | * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD | 3081 | * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD |
3084 | * RX Thread. | 3082 | * RX Thread. |
3085 | * | 3083 | * |
3086 | * FIXME: Need to support other SCSI OPCODES where as well. | 3084 | * FIXME: Need to support other SCSI OPCODES where as well. |
3087 | */ | 3085 | */ |
3088 | static int transport_generic_cmd_sequencer( | 3086 | static int transport_generic_cmd_sequencer( |
3089 | struct se_cmd *cmd, | 3087 | struct se_cmd *cmd, |
3090 | unsigned char *cdb) | 3088 | unsigned char *cdb) |
3091 | { | 3089 | { |
3092 | struct se_device *dev = SE_DEV(cmd); | 3090 | struct se_device *dev = SE_DEV(cmd); |
3093 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; | 3091 | struct se_subsystem_dev *su_dev = dev->se_sub_dev; |
3094 | int ret = 0, sector_ret = 0, passthrough; | 3092 | int ret = 0, sector_ret = 0, passthrough; |
3095 | u32 sectors = 0, size = 0, pr_reg_type = 0; | 3093 | u32 sectors = 0, size = 0, pr_reg_type = 0; |
3096 | u16 service_action; | 3094 | u16 service_action; |
3097 | u8 alua_ascq = 0; | 3095 | u8 alua_ascq = 0; |
3098 | /* | 3096 | /* |
3099 | * Check for an existing UNIT ATTENTION condition | 3097 | * Check for an existing UNIT ATTENTION condition |
3100 | */ | 3098 | */ |
3101 | if (core_scsi3_ua_check(cmd, cdb) < 0) { | 3099 | if (core_scsi3_ua_check(cmd, cdb) < 0) { |
3102 | cmd->transport_wait_for_tasks = | 3100 | cmd->transport_wait_for_tasks = |
3103 | &transport_nop_wait_for_tasks; | 3101 | &transport_nop_wait_for_tasks; |
3104 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 3102 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
3105 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; | 3103 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; |
3106 | return -2; | 3104 | return -2; |
3107 | } | 3105 | } |
3108 | /* | 3106 | /* |
3109 | * Check status of Asymmetric Logical Unit Assignment port | 3107 | * Check status of Asymmetric Logical Unit Assignment port |
3110 | */ | 3108 | */ |
3111 | ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq); | 3109 | ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq); |
3112 | if (ret != 0) { | 3110 | if (ret != 0) { |
3113 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | 3111 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; |
3114 | /* | 3112 | /* |
3115 | * Set SCSI additional sense code (ASC) to 'LUN Not Accessable'; | 3113 | * Set SCSI additional sense code (ASC) to 'LUN Not Accessable'; |
3116 | * The ALUA additional sense code qualifier (ASCQ) is determined | 3114 | * The ALUA additional sense code qualifier (ASCQ) is determined |
3117 | * by the ALUA primary or secondary access state.. | 3115 | * by the ALUA primary or secondary access state.. |
3118 | */ | 3116 | */ |
3119 | if (ret > 0) { | 3117 | if (ret > 0) { |
3120 | #if 0 | 3118 | #if 0 |
3121 | printk(KERN_INFO "[%s]: ALUA TG Port not available," | 3119 | printk(KERN_INFO "[%s]: ALUA TG Port not available," |
3122 | " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", | 3120 | " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", |
3123 | CMD_TFO(cmd)->get_fabric_name(), alua_ascq); | 3121 | CMD_TFO(cmd)->get_fabric_name(), alua_ascq); |
3124 | #endif | 3122 | #endif |
3125 | transport_set_sense_codes(cmd, 0x04, alua_ascq); | 3123 | transport_set_sense_codes(cmd, 0x04, alua_ascq); |
3126 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 3124 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
3127 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; | 3125 | cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; |
3128 | return -2; | 3126 | return -2; |
3129 | } | 3127 | } |
3130 | goto out_invalid_cdb_field; | 3128 | goto out_invalid_cdb_field; |
3131 | } | 3129 | } |
3132 | /* | 3130 | /* |
3133 | * Check status for SPC-3 Persistent Reservations | 3131 | * Check status for SPC-3 Persistent Reservations |
3134 | */ | 3132 | */ |
3135 | if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) { | 3133 | if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) { |
3136 | if (T10_PR_OPS(su_dev)->t10_seq_non_holder( | 3134 | if (T10_PR_OPS(su_dev)->t10_seq_non_holder( |
3137 | cmd, cdb, pr_reg_type) != 0) | 3135 | cmd, cdb, pr_reg_type) != 0) |
3138 | return transport_handle_reservation_conflict(cmd); | 3136 | return transport_handle_reservation_conflict(cmd); |
3139 | /* | 3137 | /* |
3140 | * This means the CDB is allowed for the SCSI Initiator port | 3138 | * This means the CDB is allowed for the SCSI Initiator port |
3141 | * when said port is *NOT* holding the legacy SPC-2 or | 3139 | * when said port is *NOT* holding the legacy SPC-2 or |
3142 | * SPC-3 Persistent Reservation. | 3140 | * SPC-3 Persistent Reservation. |
3143 | */ | 3141 | */ |
3144 | } | 3142 | } |
3145 | 3143 | ||
3146 | switch (cdb[0]) { | 3144 | switch (cdb[0]) { |
3147 | case READ_6: | 3145 | case READ_6: |
3148 | sectors = transport_get_sectors_6(cdb, cmd, §or_ret); | 3146 | sectors = transport_get_sectors_6(cdb, cmd, §or_ret); |
3149 | if (sector_ret) | 3147 | if (sector_ret) |
3150 | goto out_unsupported_cdb; | 3148 | goto out_unsupported_cdb; |
3151 | size = transport_get_size(sectors, cdb, cmd); | 3149 | size = transport_get_size(sectors, cdb, cmd); |
3152 | cmd->transport_split_cdb = &split_cdb_XX_6; | 3150 | cmd->transport_split_cdb = &split_cdb_XX_6; |
3153 | T_TASK(cmd)->t_task_lba = transport_lba_21(cdb); | 3151 | T_TASK(cmd)->t_task_lba = transport_lba_21(cdb); |
3154 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 3152 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3155 | break; | 3153 | break; |
3156 | case READ_10: | 3154 | case READ_10: |
3157 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | 3155 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); |
3158 | if (sector_ret) | 3156 | if (sector_ret) |
3159 | goto out_unsupported_cdb; | 3157 | goto out_unsupported_cdb; |
3160 | size = transport_get_size(sectors, cdb, cmd); | 3158 | size = transport_get_size(sectors, cdb, cmd); |
3161 | cmd->transport_split_cdb = &split_cdb_XX_10; | 3159 | cmd->transport_split_cdb = &split_cdb_XX_10; |
3162 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | 3160 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); |
3163 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 3161 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3164 | break; | 3162 | break; |
3165 | case READ_12: | 3163 | case READ_12: |
3166 | sectors = transport_get_sectors_12(cdb, cmd, §or_ret); | 3164 | sectors = transport_get_sectors_12(cdb, cmd, §or_ret); |
3167 | if (sector_ret) | 3165 | if (sector_ret) |
3168 | goto out_unsupported_cdb; | 3166 | goto out_unsupported_cdb; |
3169 | size = transport_get_size(sectors, cdb, cmd); | 3167 | size = transport_get_size(sectors, cdb, cmd); |
3170 | cmd->transport_split_cdb = &split_cdb_XX_12; | 3168 | cmd->transport_split_cdb = &split_cdb_XX_12; |
3171 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | 3169 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); |
3172 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 3170 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3173 | break; | 3171 | break; |
3174 | case READ_16: | 3172 | case READ_16: |
3175 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | 3173 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); |
3176 | if (sector_ret) | 3174 | if (sector_ret) |
3177 | goto out_unsupported_cdb; | 3175 | goto out_unsupported_cdb; |
3178 | size = transport_get_size(sectors, cdb, cmd); | 3176 | size = transport_get_size(sectors, cdb, cmd); |
3179 | cmd->transport_split_cdb = &split_cdb_XX_16; | 3177 | cmd->transport_split_cdb = &split_cdb_XX_16; |
3180 | T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); | 3178 | T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); |
3181 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 3179 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3182 | break; | 3180 | break; |
3183 | case WRITE_6: | 3181 | case WRITE_6: |
3184 | sectors = transport_get_sectors_6(cdb, cmd, §or_ret); | 3182 | sectors = transport_get_sectors_6(cdb, cmd, §or_ret); |
3185 | if (sector_ret) | 3183 | if (sector_ret) |
3186 | goto out_unsupported_cdb; | 3184 | goto out_unsupported_cdb; |
3187 | size = transport_get_size(sectors, cdb, cmd); | 3185 | size = transport_get_size(sectors, cdb, cmd); |
3188 | cmd->transport_split_cdb = &split_cdb_XX_6; | 3186 | cmd->transport_split_cdb = &split_cdb_XX_6; |
3189 | T_TASK(cmd)->t_task_lba = transport_lba_21(cdb); | 3187 | T_TASK(cmd)->t_task_lba = transport_lba_21(cdb); |
3190 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 3188 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3191 | break; | 3189 | break; |
3192 | case WRITE_10: | 3190 | case WRITE_10: |
3193 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | 3191 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); |
3194 | if (sector_ret) | 3192 | if (sector_ret) |
3195 | goto out_unsupported_cdb; | 3193 | goto out_unsupported_cdb; |
3196 | size = transport_get_size(sectors, cdb, cmd); | 3194 | size = transport_get_size(sectors, cdb, cmd); |
3197 | cmd->transport_split_cdb = &split_cdb_XX_10; | 3195 | cmd->transport_split_cdb = &split_cdb_XX_10; |
3198 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | 3196 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); |
3199 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | 3197 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); |
3200 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 3198 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3201 | break; | 3199 | break; |
3202 | case WRITE_12: | 3200 | case WRITE_12: |
3203 | sectors = transport_get_sectors_12(cdb, cmd, §or_ret); | 3201 | sectors = transport_get_sectors_12(cdb, cmd, §or_ret); |
3204 | if (sector_ret) | 3202 | if (sector_ret) |
3205 | goto out_unsupported_cdb; | 3203 | goto out_unsupported_cdb; |
3206 | size = transport_get_size(sectors, cdb, cmd); | 3204 | size = transport_get_size(sectors, cdb, cmd); |
3207 | cmd->transport_split_cdb = &split_cdb_XX_12; | 3205 | cmd->transport_split_cdb = &split_cdb_XX_12; |
3208 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | 3206 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); |
3209 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | 3207 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); |
3210 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 3208 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3211 | break; | 3209 | break; |
3212 | case WRITE_16: | 3210 | case WRITE_16: |
3213 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | 3211 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); |
3214 | if (sector_ret) | 3212 | if (sector_ret) |
3215 | goto out_unsupported_cdb; | 3213 | goto out_unsupported_cdb; |
3216 | size = transport_get_size(sectors, cdb, cmd); | 3214 | size = transport_get_size(sectors, cdb, cmd); |
3217 | cmd->transport_split_cdb = &split_cdb_XX_16; | 3215 | cmd->transport_split_cdb = &split_cdb_XX_16; |
3218 | T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); | 3216 | T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); |
3219 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | 3217 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); |
3220 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 3218 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3221 | break; | 3219 | break; |
3222 | case XDWRITEREAD_10: | 3220 | case XDWRITEREAD_10: |
3223 | if ((cmd->data_direction != DMA_TO_DEVICE) || | 3221 | if ((cmd->data_direction != DMA_TO_DEVICE) || |
3224 | !(T_TASK(cmd)->t_tasks_bidi)) | 3222 | !(T_TASK(cmd)->t_tasks_bidi)) |
3225 | goto out_invalid_cdb_field; | 3223 | goto out_invalid_cdb_field; |
3226 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | 3224 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); |
3227 | if (sector_ret) | 3225 | if (sector_ret) |
3228 | goto out_unsupported_cdb; | 3226 | goto out_unsupported_cdb; |
3229 | size = transport_get_size(sectors, cdb, cmd); | 3227 | size = transport_get_size(sectors, cdb, cmd); |
3230 | cmd->transport_split_cdb = &split_cdb_XX_10; | 3228 | cmd->transport_split_cdb = &split_cdb_XX_10; |
3231 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | 3229 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); |
3232 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 3230 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3233 | passthrough = (TRANSPORT(dev)->transport_type == | 3231 | passthrough = (TRANSPORT(dev)->transport_type == |
3234 | TRANSPORT_PLUGIN_PHBA_PDEV); | 3232 | TRANSPORT_PLUGIN_PHBA_PDEV); |
3235 | /* | 3233 | /* |
3236 | * Skip the remaining assignments for TCM/PSCSI passthrough | 3234 | * Skip the remaining assignments for TCM/PSCSI passthrough |
3237 | */ | 3235 | */ |
3238 | if (passthrough) | 3236 | if (passthrough) |
3239 | break; | 3237 | break; |
3240 | /* | 3238 | /* |
3241 | * Setup BIDI XOR callback to be run during transport_generic_complete_ok() | 3239 | * Setup BIDI XOR callback to be run during transport_generic_complete_ok() |
3242 | */ | 3240 | */ |
3243 | cmd->transport_complete_callback = &transport_xor_callback; | 3241 | cmd->transport_complete_callback = &transport_xor_callback; |
3244 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); | 3242 | T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); |
3245 | break; | 3243 | break; |
3246 | case VARIABLE_LENGTH_CMD: | 3244 | case VARIABLE_LENGTH_CMD: |
3247 | service_action = get_unaligned_be16(&cdb[8]); | 3245 | service_action = get_unaligned_be16(&cdb[8]); |
3248 | /* | 3246 | /* |
3249 | * Determine if this is TCM/PSCSI device and we should disable | 3247 | * Determine if this is TCM/PSCSI device and we should disable |
3250 | * internal emulation for this CDB. | 3248 | * internal emulation for this CDB. |
3251 | */ | 3249 | */ |
3252 | passthrough = (TRANSPORT(dev)->transport_type == | 3250 | passthrough = (TRANSPORT(dev)->transport_type == |
3253 | TRANSPORT_PLUGIN_PHBA_PDEV); | 3251 | TRANSPORT_PLUGIN_PHBA_PDEV); |
3254 | 3252 | ||
3255 | switch (service_action) { | 3253 | switch (service_action) { |
3256 | case XDWRITEREAD_32: | 3254 | case XDWRITEREAD_32: |
3257 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); | 3255 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); |
3258 | if (sector_ret) | 3256 | if (sector_ret) |
3259 | goto out_unsupported_cdb; | 3257 | goto out_unsupported_cdb; |
3260 | size = transport_get_size(sectors, cdb, cmd); | 3258 | size = transport_get_size(sectors, cdb, cmd); |
3261 | /* | 3259 | /* |
3262 | * Use WRITE_32 and READ_32 opcodes for the emulated | 3260 | * Use WRITE_32 and READ_32 opcodes for the emulated |
3263 | * XDWRITE_READ_32 logic. | 3261 | * XDWRITE_READ_32 logic. |
3264 | */ | 3262 | */ |
3265 | cmd->transport_split_cdb = &split_cdb_XX_32; | 3263 | cmd->transport_split_cdb = &split_cdb_XX_32; |
3266 | T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb); | 3264 | T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb); |
3267 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; | 3265 | cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; |
3268 | 3266 | ||
3269 | /* | 3267 | /* |
3270 | * Skip the remaining assignments for TCM/PSCSI passthrough | 3268 | * Skip the remaining assignments for TCM/PSCSI passthrough |
3271 | */ | 3269 | */ |
3272 | if (passthrough) | 3270 | if (passthrough) |
3273 | break; | 3271 | break; |
3274 | 3272 | ||
3275 | /* | 3273 | /* |
3276 | * Setup BIDI XOR callback to be run during | 3274 | * Setup BIDI XOR callback to be run during |
3277 | * transport_generic_complete_ok() | 3275 | * transport_generic_complete_ok() |
3278 | */ | 3276 | */ |
3279 | cmd->transport_complete_callback = &transport_xor_callback; | 3277 | cmd->transport_complete_callback = &transport_xor_callback; |
3280 | T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8); | 3278 | T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8); |
3281 | break; | 3279 | break; |
3282 | case WRITE_SAME_32: | 3280 | case WRITE_SAME_32: |
3283 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); | 3281 | sectors = transport_get_sectors_32(cdb, cmd, §or_ret); |
3284 | if (sector_ret) | 3282 | if (sector_ret) |
3285 | goto out_unsupported_cdb; | 3283 | goto out_unsupported_cdb; |
3286 | size = transport_get_size(sectors, cdb, cmd); | 3284 | size = transport_get_size(sectors, cdb, cmd); |
3287 | T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]); | 3285 | T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]); |
3288 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | 3286 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3289 | 3287 | ||
3290 | /* | 3288 | /* |
3291 | * Skip the remaining assignments for TCM/PSCSI passthrough | 3289 | * Skip the remaining assignments for TCM/PSCSI passthrough |
3292 | */ | 3290 | */ |
3293 | if (passthrough) | 3291 | if (passthrough) |
3294 | break; | 3292 | break; |
3295 | 3293 | ||
3296 | if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) { | 3294 | if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) { |
3297 | printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" | 3295 | printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" |
3298 | " bits not supported for Block Discard" | 3296 | " bits not supported for Block Discard" |
3299 | " Emulation\n"); | 3297 | " Emulation\n"); |
3300 | goto out_invalid_cdb_field; | 3298 | goto out_invalid_cdb_field; |
3301 | } | 3299 | } |
3302 | /* | 3300 | /* |
3303 | * Currently for the emulated case we only accept | 3301 | * Currently for the emulated case we only accept |
3304 | * tpws with the UNMAP=1 bit set. | 3302 | * tpws with the UNMAP=1 bit set. |
3305 | */ | 3303 | */ |
3306 | if (!(cdb[10] & 0x08)) { | 3304 | if (!(cdb[10] & 0x08)) { |
3307 | printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not" | 3305 | printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not" |
3308 | " supported for Block Discard Emulation\n"); | 3306 | " supported for Block Discard Emulation\n"); |
3309 | goto out_invalid_cdb_field; | 3307 | goto out_invalid_cdb_field; |
3310 | } | 3308 | } |
3311 | break; | 3309 | break; |
3312 | default: | 3310 | default: |
3313 | printk(KERN_ERR "VARIABLE_LENGTH_CMD service action" | 3311 | printk(KERN_ERR "VARIABLE_LENGTH_CMD service action" |
3314 | " 0x%04x not supported\n", service_action); | 3312 | " 0x%04x not supported\n", service_action); |
3315 | goto out_unsupported_cdb; | 3313 | goto out_unsupported_cdb; |
3316 | } | 3314 | } |
3317 | break; | 3315 | break; |
3318 | case 0xa3: | 3316 | case 0xa3: |
3319 | if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) { | 3317 | if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) { |
3320 | /* MAINTENANCE_IN from SCC-2 */ | 3318 | /* MAINTENANCE_IN from SCC-2 */ |
3321 | /* | 3319 | /* |
3322 | * Check for emulated MI_REPORT_TARGET_PGS. | 3320 | * Check for emulated MI_REPORT_TARGET_PGS. |
3323 | */ | 3321 | */ |
3324 | if (cdb[1] == MI_REPORT_TARGET_PGS) { | 3322 | if (cdb[1] == MI_REPORT_TARGET_PGS) { |
3325 | cmd->transport_emulate_cdb = | 3323 | cmd->transport_emulate_cdb = |
3326 | (T10_ALUA(su_dev)->alua_type == | 3324 | (T10_ALUA(su_dev)->alua_type == |
3327 | SPC3_ALUA_EMULATED) ? | 3325 | SPC3_ALUA_EMULATED) ? |
3328 | &core_emulate_report_target_port_groups : | 3326 | &core_emulate_report_target_port_groups : |
3329 | NULL; | 3327 | NULL; |
3330 | } | 3328 | } |
3331 | size = (cdb[6] << 24) | (cdb[7] << 16) | | 3329 | size = (cdb[6] << 24) | (cdb[7] << 16) | |
3332 | (cdb[8] << 8) | cdb[9]; | 3330 | (cdb[8] << 8) | cdb[9]; |
3333 | } else { | 3331 | } else { |
3334 | /* GPCMD_SEND_KEY from multi media commands */ | 3332 | /* GPCMD_SEND_KEY from multi media commands */ |
3335 | size = (cdb[8] << 8) + cdb[9]; | 3333 | size = (cdb[8] << 8) + cdb[9]; |
3336 | } | 3334 | } |
3337 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3335 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3338 | break; | 3336 | break; |
3339 | case MODE_SELECT: | 3337 | case MODE_SELECT: |
3340 | size = cdb[4]; | 3338 | size = cdb[4]; |
3341 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | 3339 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3342 | break; | 3340 | break; |
3343 | case MODE_SELECT_10: | 3341 | case MODE_SELECT_10: |
3344 | size = (cdb[7] << 8) + cdb[8]; | 3342 | size = (cdb[7] << 8) + cdb[8]; |
3345 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | 3343 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3346 | break; | 3344 | break; |
3347 | case MODE_SENSE: | 3345 | case MODE_SENSE: |
3348 | size = cdb[4]; | 3346 | size = cdb[4]; |
3349 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3347 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3350 | break; | 3348 | break; |
3351 | case MODE_SENSE_10: | 3349 | case MODE_SENSE_10: |
3352 | case GPCMD_READ_BUFFER_CAPACITY: | 3350 | case GPCMD_READ_BUFFER_CAPACITY: |
3353 | case GPCMD_SEND_OPC: | 3351 | case GPCMD_SEND_OPC: |
3354 | case LOG_SELECT: | 3352 | case LOG_SELECT: |
3355 | case LOG_SENSE: | 3353 | case LOG_SENSE: |
3356 | size = (cdb[7] << 8) + cdb[8]; | 3354 | size = (cdb[7] << 8) + cdb[8]; |
3357 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3355 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3358 | break; | 3356 | break; |
3359 | case READ_BLOCK_LIMITS: | 3357 | case READ_BLOCK_LIMITS: |
3360 | size = READ_BLOCK_LEN; | 3358 | size = READ_BLOCK_LEN; |
3361 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3359 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3362 | break; | 3360 | break; |
3363 | case GPCMD_GET_CONFIGURATION: | 3361 | case GPCMD_GET_CONFIGURATION: |
3364 | case GPCMD_READ_FORMAT_CAPACITIES: | 3362 | case GPCMD_READ_FORMAT_CAPACITIES: |
3365 | case GPCMD_READ_DISC_INFO: | 3363 | case GPCMD_READ_DISC_INFO: |
3366 | case GPCMD_READ_TRACK_RZONE_INFO: | 3364 | case GPCMD_READ_TRACK_RZONE_INFO: |
3367 | size = (cdb[7] << 8) + cdb[8]; | 3365 | size = (cdb[7] << 8) + cdb[8]; |
3368 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | 3366 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3369 | break; | 3367 | break; |
3370 | case PERSISTENT_RESERVE_IN: | 3368 | case PERSISTENT_RESERVE_IN: |
3371 | case PERSISTENT_RESERVE_OUT: | 3369 | case PERSISTENT_RESERVE_OUT: |
3372 | cmd->transport_emulate_cdb = | 3370 | cmd->transport_emulate_cdb = |
3373 | (T10_RES(su_dev)->res_type == | 3371 | (T10_RES(su_dev)->res_type == |
3374 | SPC3_PERSISTENT_RESERVATIONS) ? | 3372 | SPC3_PERSISTENT_RESERVATIONS) ? |
3375 | &core_scsi3_emulate_pr : NULL; | 3373 | &core_scsi3_emulate_pr : NULL; |
3376 | size = (cdb[7] << 8) + cdb[8]; | 3374 | size = (cdb[7] << 8) + cdb[8]; |
3377 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3375 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3378 | break; | 3376 | break; |
3379 | case GPCMD_MECHANISM_STATUS: | 3377 | case GPCMD_MECHANISM_STATUS: |
3380 | case GPCMD_READ_DVD_STRUCTURE: | 3378 | case GPCMD_READ_DVD_STRUCTURE: |
3381 | size = (cdb[8] << 8) + cdb[9]; | 3379 | size = (cdb[8] << 8) + cdb[9]; |
3382 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | 3380 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3383 | break; | 3381 | break; |
3384 | case READ_POSITION: | 3382 | case READ_POSITION: |
3385 | size = READ_POSITION_LEN; | 3383 | size = READ_POSITION_LEN; |
3386 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3384 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3387 | break; | 3385 | break; |
3388 | case 0xa4: | 3386 | case 0xa4: |
3389 | if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) { | 3387 | if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) { |
3390 | /* MAINTENANCE_OUT from SCC-2 | 3388 | /* MAINTENANCE_OUT from SCC-2 |
3391 | * | 3389 | * |
3392 | * Check for emulated MO_SET_TARGET_PGS. | 3390 | * Check for emulated MO_SET_TARGET_PGS. |
3393 | */ | 3391 | */ |
3394 | if (cdb[1] == MO_SET_TARGET_PGS) { | 3392 | if (cdb[1] == MO_SET_TARGET_PGS) { |
3395 | cmd->transport_emulate_cdb = | 3393 | cmd->transport_emulate_cdb = |
3396 | (T10_ALUA(su_dev)->alua_type == | 3394 | (T10_ALUA(su_dev)->alua_type == |
3397 | SPC3_ALUA_EMULATED) ? | 3395 | SPC3_ALUA_EMULATED) ? |
3398 | &core_emulate_set_target_port_groups : | 3396 | &core_emulate_set_target_port_groups : |
3399 | NULL; | 3397 | NULL; |
3400 | } | 3398 | } |
3401 | 3399 | ||
3402 | size = (cdb[6] << 24) | (cdb[7] << 16) | | 3400 | size = (cdb[6] << 24) | (cdb[7] << 16) | |
3403 | (cdb[8] << 8) | cdb[9]; | 3401 | (cdb[8] << 8) | cdb[9]; |
3404 | } else { | 3402 | } else { |
3405 | /* GPCMD_REPORT_KEY from multi media commands */ | 3403 | /* GPCMD_REPORT_KEY from multi media commands */ |
3406 | size = (cdb[8] << 8) + cdb[9]; | 3404 | size = (cdb[8] << 8) + cdb[9]; |
3407 | } | 3405 | } |
3408 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3406 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3409 | break; | 3407 | break; |
3410 | case INQUIRY: | 3408 | case INQUIRY: |
3411 | size = (cdb[3] << 8) + cdb[4]; | 3409 | size = (cdb[3] << 8) + cdb[4]; |
3412 | /* | 3410 | /* |
3413 | * Do implict HEAD_OF_QUEUE processing for INQUIRY. | 3411 | * Do implict HEAD_OF_QUEUE processing for INQUIRY. |
3414 | * See spc4r17 section 5.3 | 3412 | * See spc4r17 section 5.3 |
3415 | */ | 3413 | */ |
3416 | if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | 3414 | if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
3417 | cmd->sam_task_attr = TASK_ATTR_HOQ; | 3415 | cmd->sam_task_attr = TASK_ATTR_HOQ; |
3418 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3416 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3419 | break; | 3417 | break; |
3420 | case READ_BUFFER: | 3418 | case READ_BUFFER: |
3421 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | 3419 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; |
3422 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3420 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3423 | break; | 3421 | break; |
3424 | case READ_CAPACITY: | 3422 | case READ_CAPACITY: |
3425 | size = READ_CAP_LEN; | 3423 | size = READ_CAP_LEN; |
3426 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3424 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3427 | break; | 3425 | break; |
3428 | case READ_MEDIA_SERIAL_NUMBER: | 3426 | case READ_MEDIA_SERIAL_NUMBER: |
3429 | case SECURITY_PROTOCOL_IN: | 3427 | case SECURITY_PROTOCOL_IN: |
3430 | case SECURITY_PROTOCOL_OUT: | 3428 | case SECURITY_PROTOCOL_OUT: |
3431 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | 3429 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; |
3432 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3430 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3433 | break; | 3431 | break; |
3434 | case SERVICE_ACTION_IN: | 3432 | case SERVICE_ACTION_IN: |
3435 | case ACCESS_CONTROL_IN: | 3433 | case ACCESS_CONTROL_IN: |
3436 | case ACCESS_CONTROL_OUT: | 3434 | case ACCESS_CONTROL_OUT: |
3437 | case EXTENDED_COPY: | 3435 | case EXTENDED_COPY: |
3438 | case READ_ATTRIBUTE: | 3436 | case READ_ATTRIBUTE: |
3439 | case RECEIVE_COPY_RESULTS: | 3437 | case RECEIVE_COPY_RESULTS: |
3440 | case WRITE_ATTRIBUTE: | 3438 | case WRITE_ATTRIBUTE: |
3441 | size = (cdb[10] << 24) | (cdb[11] << 16) | | 3439 | size = (cdb[10] << 24) | (cdb[11] << 16) | |
3442 | (cdb[12] << 8) | cdb[13]; | 3440 | (cdb[12] << 8) | cdb[13]; |
3443 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3441 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3444 | break; | 3442 | break; |
3445 | case RECEIVE_DIAGNOSTIC: | 3443 | case RECEIVE_DIAGNOSTIC: |
3446 | case SEND_DIAGNOSTIC: | 3444 | case SEND_DIAGNOSTIC: |
3447 | size = (cdb[3] << 8) | cdb[4]; | 3445 | size = (cdb[3] << 8) | cdb[4]; |
3448 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3446 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3449 | break; | 3447 | break; |
3450 | /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */ | 3448 | /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */ |
3451 | #if 0 | 3449 | #if 0 |
3452 | case GPCMD_READ_CD: | 3450 | case GPCMD_READ_CD: |
3453 | sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | 3451 | sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; |
3454 | size = (2336 * sectors); | 3452 | size = (2336 * sectors); |
3455 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3453 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3456 | break; | 3454 | break; |
3457 | #endif | 3455 | #endif |
3458 | case READ_TOC: | 3456 | case READ_TOC: |
3459 | size = cdb[8]; | 3457 | size = cdb[8]; |
3460 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3458 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3461 | break; | 3459 | break; |
3462 | case REQUEST_SENSE: | 3460 | case REQUEST_SENSE: |
3463 | size = cdb[4]; | 3461 | size = cdb[4]; |
3464 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3462 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3465 | break; | 3463 | break; |
3466 | case READ_ELEMENT_STATUS: | 3464 | case READ_ELEMENT_STATUS: |
3467 | size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; | 3465 | size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; |
3468 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3466 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3469 | break; | 3467 | break; |
3470 | case WRITE_BUFFER: | 3468 | case WRITE_BUFFER: |
3471 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; | 3469 | size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; |
3472 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3470 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3473 | break; | 3471 | break; |
3474 | case RESERVE: | 3472 | case RESERVE: |
3475 | case RESERVE_10: | 3473 | case RESERVE_10: |
3476 | /* | 3474 | /* |
3477 | * The SPC-2 RESERVE does not contain a size in the SCSI CDB. | 3475 | * The SPC-2 RESERVE does not contain a size in the SCSI CDB. |
3478 | * Assume the passthrough or $FABRIC_MOD will tell us about it. | 3476 | * Assume the passthrough or $FABRIC_MOD will tell us about it. |
3479 | */ | 3477 | */ |
3480 | if (cdb[0] == RESERVE_10) | 3478 | if (cdb[0] == RESERVE_10) |
3481 | size = (cdb[7] << 8) | cdb[8]; | 3479 | size = (cdb[7] << 8) | cdb[8]; |
3482 | else | 3480 | else |
3483 | size = cmd->data_length; | 3481 | size = cmd->data_length; |
3484 | 3482 | ||
3485 | /* | 3483 | /* |
3486 | * Setup the legacy emulated handler for SPC-2 and | 3484 | * Setup the legacy emulated handler for SPC-2 and |
3487 | * >= SPC-3 compatible reservation handling (CRH=1) | 3485 | * >= SPC-3 compatible reservation handling (CRH=1) |
3488 | * Otherwise, we assume the underlying SCSI logic is | 3486 | * Otherwise, we assume the underlying SCSI logic is |
3489 | * is running in SPC_PASSTHROUGH, and wants reservations | 3487 | * is running in SPC_PASSTHROUGH, and wants reservations |
3490 | * emulation disabled. | 3488 | * emulation disabled. |
3491 | */ | 3489 | */ |
3492 | cmd->transport_emulate_cdb = | 3490 | cmd->transport_emulate_cdb = |
3493 | (T10_RES(su_dev)->res_type != | 3491 | (T10_RES(su_dev)->res_type != |
3494 | SPC_PASSTHROUGH) ? | 3492 | SPC_PASSTHROUGH) ? |
3495 | &core_scsi2_emulate_crh : NULL; | 3493 | &core_scsi2_emulate_crh : NULL; |
3496 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | 3494 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; |
3497 | break; | 3495 | break; |
3498 | case RELEASE: | 3496 | case RELEASE: |
3499 | case RELEASE_10: | 3497 | case RELEASE_10: |
3500 | /* | 3498 | /* |
3501 | * The SPC-2 RELEASE does not contain a size in the SCSI CDB. | 3499 | * The SPC-2 RELEASE does not contain a size in the SCSI CDB. |
3502 | * Assume the passthrough or $FABRIC_MOD will tell us about it. | 3500 | * Assume the passthrough or $FABRIC_MOD will tell us about it. |
3503 | */ | 3501 | */ |
3504 | if (cdb[0] == RELEASE_10) | 3502 | if (cdb[0] == RELEASE_10) |
3505 | size = (cdb[7] << 8) | cdb[8]; | 3503 | size = (cdb[7] << 8) | cdb[8]; |
3506 | else | 3504 | else |
3507 | size = cmd->data_length; | 3505 | size = cmd->data_length; |
3508 | 3506 | ||
3509 | cmd->transport_emulate_cdb = | 3507 | cmd->transport_emulate_cdb = |
3510 | (T10_RES(su_dev)->res_type != | 3508 | (T10_RES(su_dev)->res_type != |
3511 | SPC_PASSTHROUGH) ? | 3509 | SPC_PASSTHROUGH) ? |
3512 | &core_scsi2_emulate_crh : NULL; | 3510 | &core_scsi2_emulate_crh : NULL; |
3513 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | 3511 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; |
3514 | break; | 3512 | break; |
3515 | case SYNCHRONIZE_CACHE: | 3513 | case SYNCHRONIZE_CACHE: |
3516 | case 0x91: /* SYNCHRONIZE_CACHE_16: */ | 3514 | case 0x91: /* SYNCHRONIZE_CACHE_16: */ |
3517 | /* | 3515 | /* |
3518 | * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE | 3516 | * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE |
3519 | */ | 3517 | */ |
3520 | if (cdb[0] == SYNCHRONIZE_CACHE) { | 3518 | if (cdb[0] == SYNCHRONIZE_CACHE) { |
3521 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | 3519 | sectors = transport_get_sectors_10(cdb, cmd, §or_ret); |
3522 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); | 3520 | T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); |
3523 | } else { | 3521 | } else { |
3524 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | 3522 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); |
3525 | T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); | 3523 | T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); |
3526 | } | 3524 | } |
3527 | if (sector_ret) | 3525 | if (sector_ret) |
3528 | goto out_unsupported_cdb; | 3526 | goto out_unsupported_cdb; |
3529 | 3527 | ||
3530 | size = transport_get_size(sectors, cdb, cmd); | 3528 | size = transport_get_size(sectors, cdb, cmd); |
3531 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | 3529 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; |
3532 | 3530 | ||
3533 | /* | 3531 | /* |
3534 | * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb() | 3532 | * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb() |
3535 | */ | 3533 | */ |
3536 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) | 3534 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) |
3537 | break; | 3535 | break; |
3538 | /* | 3536 | /* |
3539 | * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation | 3537 | * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation |
3540 | * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks() | 3538 | * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks() |
3541 | */ | 3539 | */ |
3542 | cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC; | 3540 | cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC; |
3543 | /* | 3541 | /* |
3544 | * Check to ensure that LBA + Range does not exceed past end of | 3542 | * Check to ensure that LBA + Range does not exceed past end of |
3545 | * device. | 3543 | * device. |
3546 | */ | 3544 | */ |
3547 | if (transport_get_sectors(cmd) < 0) | 3545 | if (transport_get_sectors(cmd) < 0) |
3548 | goto out_invalid_cdb_field; | 3546 | goto out_invalid_cdb_field; |
3549 | break; | 3547 | break; |
3550 | case UNMAP: | 3548 | case UNMAP: |
3551 | size = get_unaligned_be16(&cdb[7]); | 3549 | size = get_unaligned_be16(&cdb[7]); |
3552 | passthrough = (TRANSPORT(dev)->transport_type == | 3550 | passthrough = (TRANSPORT(dev)->transport_type == |
3553 | TRANSPORT_PLUGIN_PHBA_PDEV); | 3551 | TRANSPORT_PLUGIN_PHBA_PDEV); |
3554 | /* | 3552 | /* |
3555 | * Determine if the received UNMAP used to for direct passthrough | 3553 | * Determine if the received UNMAP used to for direct passthrough |
3556 | * into Linux/SCSI with struct request via TCM/pSCSI or we are | 3554 | * into Linux/SCSI with struct request via TCM/pSCSI or we are |
3557 | * signaling the use of internal transport_generic_unmap() emulation | 3555 | * signaling the use of internal transport_generic_unmap() emulation |
3558 | * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO | 3556 | * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO |
3559 | * subsystem plugin backstores. | 3557 | * subsystem plugin backstores. |
3560 | */ | 3558 | */ |
3561 | if (!(passthrough)) | 3559 | if (!(passthrough)) |
3562 | cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP; | 3560 | cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP; |
3563 | 3561 | ||
3564 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3562 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3565 | break; | 3563 | break; |
3566 | case WRITE_SAME_16: | 3564 | case WRITE_SAME_16: |
3567 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); | 3565 | sectors = transport_get_sectors_16(cdb, cmd, §or_ret); |
3568 | if (sector_ret) | 3566 | if (sector_ret) |
3569 | goto out_unsupported_cdb; | 3567 | goto out_unsupported_cdb; |
3570 | size = transport_get_size(sectors, cdb, cmd); | 3568 | size = transport_get_size(sectors, cdb, cmd); |
3571 | T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]); | 3569 | T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]); |
3572 | passthrough = (TRANSPORT(dev)->transport_type == | 3570 | passthrough = (TRANSPORT(dev)->transport_type == |
3573 | TRANSPORT_PLUGIN_PHBA_PDEV); | 3571 | TRANSPORT_PLUGIN_PHBA_PDEV); |
3574 | /* | 3572 | /* |
3575 | * Determine if the received WRITE_SAME_16 is used to for direct | 3573 | * Determine if the received WRITE_SAME_16 is used to for direct |
3576 | * passthrough into Linux/SCSI with struct request via TCM/pSCSI | 3574 | * passthrough into Linux/SCSI with struct request via TCM/pSCSI |
3577 | * or we are signaling the use of internal WRITE_SAME + UNMAP=1 | 3575 | * or we are signaling the use of internal WRITE_SAME + UNMAP=1 |
3578 | * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and | 3576 | * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and |
3579 | * TCM/FILEIO subsystem plugin backstores. | 3577 | * TCM/FILEIO subsystem plugin backstores. |
3580 | */ | 3578 | */ |
3581 | if (!(passthrough)) { | 3579 | if (!(passthrough)) { |
3582 | if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { | 3580 | if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { |
3583 | printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" | 3581 | printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" |
3584 | " bits not supported for Block Discard" | 3582 | " bits not supported for Block Discard" |
3585 | " Emulation\n"); | 3583 | " Emulation\n"); |
3586 | goto out_invalid_cdb_field; | 3584 | goto out_invalid_cdb_field; |
3587 | } | 3585 | } |
3588 | /* | 3586 | /* |
3589 | * Currently for the emulated case we only accept | 3587 | * Currently for the emulated case we only accept |
3590 | * tpws with the UNMAP=1 bit set. | 3588 | * tpws with the UNMAP=1 bit set. |
3591 | */ | 3589 | */ |
3592 | if (!(cdb[1] & 0x08)) { | 3590 | if (!(cdb[1] & 0x08)) { |
3593 | printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not " | 3591 | printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not " |
3594 | " supported for Block Discard Emulation\n"); | 3592 | " supported for Block Discard Emulation\n"); |
3595 | goto out_invalid_cdb_field; | 3593 | goto out_invalid_cdb_field; |
3596 | } | 3594 | } |
3597 | } | 3595 | } |
3598 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | 3596 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3599 | break; | 3597 | break; |
3600 | case ALLOW_MEDIUM_REMOVAL: | 3598 | case ALLOW_MEDIUM_REMOVAL: |
3601 | case GPCMD_CLOSE_TRACK: | 3599 | case GPCMD_CLOSE_TRACK: |
3602 | case ERASE: | 3600 | case ERASE: |
3603 | case INITIALIZE_ELEMENT_STATUS: | 3601 | case INITIALIZE_ELEMENT_STATUS: |
3604 | case GPCMD_LOAD_UNLOAD: | 3602 | case GPCMD_LOAD_UNLOAD: |
3605 | case REZERO_UNIT: | 3603 | case REZERO_UNIT: |
3606 | case SEEK_10: | 3604 | case SEEK_10: |
3607 | case GPCMD_SET_SPEED: | 3605 | case GPCMD_SET_SPEED: |
3608 | case SPACE: | 3606 | case SPACE: |
3609 | case START_STOP: | 3607 | case START_STOP: |
3610 | case TEST_UNIT_READY: | 3608 | case TEST_UNIT_READY: |
3611 | case VERIFY: | 3609 | case VERIFY: |
3612 | case WRITE_FILEMARKS: | 3610 | case WRITE_FILEMARKS: |
3613 | case MOVE_MEDIUM: | 3611 | case MOVE_MEDIUM: |
3614 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; | 3612 | cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; |
3615 | break; | 3613 | break; |
3616 | case REPORT_LUNS: | 3614 | case REPORT_LUNS: |
3617 | cmd->transport_emulate_cdb = | 3615 | cmd->transport_emulate_cdb = |
3618 | &transport_core_report_lun_response; | 3616 | &transport_core_report_lun_response; |
3619 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; | 3617 | size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; |
3620 | /* | 3618 | /* |
3621 | * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS | 3619 | * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS |
3622 | * See spc4r17 section 5.3 | 3620 | * See spc4r17 section 5.3 |
3623 | */ | 3621 | */ |
3624 | if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | 3622 | if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
3625 | cmd->sam_task_attr = TASK_ATTR_HOQ; | 3623 | cmd->sam_task_attr = TASK_ATTR_HOQ; |
3626 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; | 3624 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; |
3627 | break; | 3625 | break; |
3628 | default: | 3626 | default: |
3629 | printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode" | 3627 | printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode" |
3630 | " 0x%02x, sending CHECK_CONDITION.\n", | 3628 | " 0x%02x, sending CHECK_CONDITION.\n", |
3631 | CMD_TFO(cmd)->get_fabric_name(), cdb[0]); | 3629 | CMD_TFO(cmd)->get_fabric_name(), cdb[0]); |
3632 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; | 3630 | cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; |
3633 | goto out_unsupported_cdb; | 3631 | goto out_unsupported_cdb; |
3634 | } | 3632 | } |
3635 | 3633 | ||
3636 | if (size != cmd->data_length) { | 3634 | if (size != cmd->data_length) { |
3637 | printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:" | 3635 | printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:" |
3638 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" | 3636 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" |
3639 | " 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(), | 3637 | " 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(), |
3640 | cmd->data_length, size, cdb[0]); | 3638 | cmd->data_length, size, cdb[0]); |
3641 | 3639 | ||
3642 | cmd->cmd_spdtl = size; | 3640 | cmd->cmd_spdtl = size; |
3643 | 3641 | ||
3644 | if (cmd->data_direction == DMA_TO_DEVICE) { | 3642 | if (cmd->data_direction == DMA_TO_DEVICE) { |
3645 | printk(KERN_ERR "Rejecting underflow/overflow" | 3643 | printk(KERN_ERR "Rejecting underflow/overflow" |
3646 | " WRITE data\n"); | 3644 | " WRITE data\n"); |
3647 | goto out_invalid_cdb_field; | 3645 | goto out_invalid_cdb_field; |
3648 | } | 3646 | } |
3649 | /* | 3647 | /* |
3650 | * Reject READ_* or WRITE_* with overflow/underflow for | 3648 | * Reject READ_* or WRITE_* with overflow/underflow for |
3651 | * type SCF_SCSI_DATA_SG_IO_CDB. | 3649 | * type SCF_SCSI_DATA_SG_IO_CDB. |
3652 | */ | 3650 | */ |
3653 | if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512)) { | 3651 | if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512)) { |
3654 | printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op" | 3652 | printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op" |
3655 | " CDB on non 512-byte sector setup subsystem" | 3653 | " CDB on non 512-byte sector setup subsystem" |
3656 | " plugin: %s\n", TRANSPORT(dev)->name); | 3654 | " plugin: %s\n", TRANSPORT(dev)->name); |
3657 | /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ | 3655 | /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ |
3658 | goto out_invalid_cdb_field; | 3656 | goto out_invalid_cdb_field; |
3659 | } | 3657 | } |
3660 | 3658 | ||
3661 | if (size > cmd->data_length) { | 3659 | if (size > cmd->data_length) { |
3662 | cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; | 3660 | cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; |
3663 | cmd->residual_count = (size - cmd->data_length); | 3661 | cmd->residual_count = (size - cmd->data_length); |
3664 | } else { | 3662 | } else { |
3665 | cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; | 3663 | cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; |
3666 | cmd->residual_count = (cmd->data_length - size); | 3664 | cmd->residual_count = (cmd->data_length - size); |
3667 | } | 3665 | } |
3668 | cmd->data_length = size; | 3666 | cmd->data_length = size; |
3669 | } | 3667 | } |
3670 | 3668 | ||
3671 | transport_set_supported_SAM_opcode(cmd); | 3669 | transport_set_supported_SAM_opcode(cmd); |
3672 | return ret; | 3670 | return ret; |
3673 | 3671 | ||
3674 | out_unsupported_cdb: | 3672 | out_unsupported_cdb: |
3675 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 3673 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
3676 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; | 3674 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; |
3677 | return -2; | 3675 | return -2; |
3678 | out_invalid_cdb_field: | 3676 | out_invalid_cdb_field: |
3679 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 3677 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
3680 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; | 3678 | cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; |
3681 | return -2; | 3679 | return -2; |
3682 | } | 3680 | } |
3683 | 3681 | ||
3684 | static inline void transport_release_tasks(struct se_cmd *); | 3682 | static inline void transport_release_tasks(struct se_cmd *); |
3685 | 3683 | ||
3686 | /* | 3684 | /* |
3687 | * This function will copy a contiguous *src buffer into a destination | 3685 | * This function will copy a contiguous *src buffer into a destination |
3688 | * struct scatterlist array. | 3686 | * struct scatterlist array. |
3689 | */ | 3687 | */ |
3690 | static void transport_memcpy_write_contig( | 3688 | static void transport_memcpy_write_contig( |
3691 | struct se_cmd *cmd, | 3689 | struct se_cmd *cmd, |
3692 | struct scatterlist *sg_d, | 3690 | struct scatterlist *sg_d, |
3693 | unsigned char *src) | 3691 | unsigned char *src) |
3694 | { | 3692 | { |
3695 | u32 i = 0, length = 0, total_length = cmd->data_length; | 3693 | u32 i = 0, length = 0, total_length = cmd->data_length; |
3696 | void *dst; | 3694 | void *dst; |
3697 | 3695 | ||
3698 | while (total_length) { | 3696 | while (total_length) { |
3699 | length = sg_d[i].length; | 3697 | length = sg_d[i].length; |
3700 | 3698 | ||
3701 | if (length > total_length) | 3699 | if (length > total_length) |
3702 | length = total_length; | 3700 | length = total_length; |
3703 | 3701 | ||
3704 | dst = sg_virt(&sg_d[i]); | 3702 | dst = sg_virt(&sg_d[i]); |
3705 | 3703 | ||
3706 | memcpy(dst, src, length); | 3704 | memcpy(dst, src, length); |
3707 | 3705 | ||
3708 | if (!(total_length -= length)) | 3706 | if (!(total_length -= length)) |
3709 | return; | 3707 | return; |
3710 | 3708 | ||
3711 | src += length; | 3709 | src += length; |
3712 | i++; | 3710 | i++; |
3713 | } | 3711 | } |
3714 | } | 3712 | } |
3715 | 3713 | ||
3716 | /* | 3714 | /* |
3717 | * This function will copy a struct scatterlist array *sg_s into a destination | 3715 | * This function will copy a struct scatterlist array *sg_s into a destination |
3718 | * contiguous *dst buffer. | 3716 | * contiguous *dst buffer. |
3719 | */ | 3717 | */ |
3720 | static void transport_memcpy_read_contig( | 3718 | static void transport_memcpy_read_contig( |
3721 | struct se_cmd *cmd, | 3719 | struct se_cmd *cmd, |
3722 | unsigned char *dst, | 3720 | unsigned char *dst, |
3723 | struct scatterlist *sg_s) | 3721 | struct scatterlist *sg_s) |
3724 | { | 3722 | { |
3725 | u32 i = 0, length = 0, total_length = cmd->data_length; | 3723 | u32 i = 0, length = 0, total_length = cmd->data_length; |
3726 | void *src; | 3724 | void *src; |
3727 | 3725 | ||
3728 | while (total_length) { | 3726 | while (total_length) { |
3729 | length = sg_s[i].length; | 3727 | length = sg_s[i].length; |
3730 | 3728 | ||
3731 | if (length > total_length) | 3729 | if (length > total_length) |
3732 | length = total_length; | 3730 | length = total_length; |
3733 | 3731 | ||
3734 | src = sg_virt(&sg_s[i]); | 3732 | src = sg_virt(&sg_s[i]); |
3735 | 3733 | ||
3736 | memcpy(dst, src, length); | 3734 | memcpy(dst, src, length); |
3737 | 3735 | ||
3738 | if (!(total_length -= length)) | 3736 | if (!(total_length -= length)) |
3739 | return; | 3737 | return; |
3740 | 3738 | ||
3741 | dst += length; | 3739 | dst += length; |
3742 | i++; | 3740 | i++; |
3743 | } | 3741 | } |
3744 | } | 3742 | } |
3745 | 3743 | ||
3746 | static void transport_memcpy_se_mem_read_contig( | 3744 | static void transport_memcpy_se_mem_read_contig( |
3747 | struct se_cmd *cmd, | 3745 | struct se_cmd *cmd, |
3748 | unsigned char *dst, | 3746 | unsigned char *dst, |
3749 | struct list_head *se_mem_list) | 3747 | struct list_head *se_mem_list) |
3750 | { | 3748 | { |
3751 | struct se_mem *se_mem; | 3749 | struct se_mem *se_mem; |
3752 | void *src; | 3750 | void *src; |
3753 | u32 length = 0, total_length = cmd->data_length; | 3751 | u32 length = 0, total_length = cmd->data_length; |
3754 | 3752 | ||
3755 | list_for_each_entry(se_mem, se_mem_list, se_list) { | 3753 | list_for_each_entry(se_mem, se_mem_list, se_list) { |
3756 | length = se_mem->se_len; | 3754 | length = se_mem->se_len; |
3757 | 3755 | ||
3758 | if (length > total_length) | 3756 | if (length > total_length) |
3759 | length = total_length; | 3757 | length = total_length; |
3760 | 3758 | ||
3761 | src = page_address(se_mem->se_page) + se_mem->se_off; | 3759 | src = page_address(se_mem->se_page) + se_mem->se_off; |
3762 | 3760 | ||
3763 | memcpy(dst, src, length); | 3761 | memcpy(dst, src, length); |
3764 | 3762 | ||
3765 | if (!(total_length -= length)) | 3763 | if (!(total_length -= length)) |
3766 | return; | 3764 | return; |
3767 | 3765 | ||
3768 | dst += length; | 3766 | dst += length; |
3769 | } | 3767 | } |
3770 | } | 3768 | } |
3771 | 3769 | ||
3772 | /* | 3770 | /* |
3773 | * Called from transport_generic_complete_ok() and | 3771 | * Called from transport_generic_complete_ok() and |
3774 | * transport_generic_request_failure() to determine which dormant/delayed | 3772 | * transport_generic_request_failure() to determine which dormant/delayed |
3775 | * and ordered cmds need to have their tasks added to the execution queue. | 3773 | * and ordered cmds need to have their tasks added to the execution queue. |
3776 | */ | 3774 | */ |
3777 | static void transport_complete_task_attr(struct se_cmd *cmd) | 3775 | static void transport_complete_task_attr(struct se_cmd *cmd) |
3778 | { | 3776 | { |
3779 | struct se_device *dev = SE_DEV(cmd); | 3777 | struct se_device *dev = SE_DEV(cmd); |
3780 | struct se_cmd *cmd_p, *cmd_tmp; | 3778 | struct se_cmd *cmd_p, *cmd_tmp; |
3781 | int new_active_tasks = 0; | 3779 | int new_active_tasks = 0; |
3782 | 3780 | ||
3783 | if (cmd->sam_task_attr == TASK_ATTR_SIMPLE) { | 3781 | if (cmd->sam_task_attr == TASK_ATTR_SIMPLE) { |
3784 | atomic_dec(&dev->simple_cmds); | 3782 | atomic_dec(&dev->simple_cmds); |
3785 | smp_mb__after_atomic_dec(); | 3783 | smp_mb__after_atomic_dec(); |
3786 | dev->dev_cur_ordered_id++; | 3784 | dev->dev_cur_ordered_id++; |
3787 | DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for" | 3785 | DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for" |
3788 | " SIMPLE: %u\n", dev->dev_cur_ordered_id, | 3786 | " SIMPLE: %u\n", dev->dev_cur_ordered_id, |
3789 | cmd->se_ordered_id); | 3787 | cmd->se_ordered_id); |
3790 | } else if (cmd->sam_task_attr == TASK_ATTR_HOQ) { | 3788 | } else if (cmd->sam_task_attr == TASK_ATTR_HOQ) { |
3791 | atomic_dec(&dev->dev_hoq_count); | 3789 | atomic_dec(&dev->dev_hoq_count); |
3792 | smp_mb__after_atomic_dec(); | 3790 | smp_mb__after_atomic_dec(); |
3793 | dev->dev_cur_ordered_id++; | 3791 | dev->dev_cur_ordered_id++; |
3794 | DEBUG_STA("Incremented dev_cur_ordered_id: %u for" | 3792 | DEBUG_STA("Incremented dev_cur_ordered_id: %u for" |
3795 | " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, | 3793 | " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, |
3796 | cmd->se_ordered_id); | 3794 | cmd->se_ordered_id); |
3797 | } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) { | 3795 | } else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) { |
3798 | spin_lock(&dev->ordered_cmd_lock); | 3796 | spin_lock(&dev->ordered_cmd_lock); |
3799 | list_del(&cmd->se_ordered_list); | 3797 | list_del(&cmd->se_ordered_list); |
3800 | atomic_dec(&dev->dev_ordered_sync); | 3798 | atomic_dec(&dev->dev_ordered_sync); |
3801 | smp_mb__after_atomic_dec(); | 3799 | smp_mb__after_atomic_dec(); |
3802 | spin_unlock(&dev->ordered_cmd_lock); | 3800 | spin_unlock(&dev->ordered_cmd_lock); |
3803 | 3801 | ||
3804 | dev->dev_cur_ordered_id++; | 3802 | dev->dev_cur_ordered_id++; |
3805 | DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:" | 3803 | DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:" |
3806 | " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); | 3804 | " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); |
3807 | } | 3805 | } |
3808 | /* | 3806 | /* |
3809 | * Process all commands up to the last received | 3807 | * Process all commands up to the last received |
3810 | * ORDERED task attribute which requires another blocking | 3808 | * ORDERED task attribute which requires another blocking |
3811 | * boundary | 3809 | * boundary |
3812 | */ | 3810 | */ |
3813 | spin_lock(&dev->delayed_cmd_lock); | 3811 | spin_lock(&dev->delayed_cmd_lock); |
3814 | list_for_each_entry_safe(cmd_p, cmd_tmp, | 3812 | list_for_each_entry_safe(cmd_p, cmd_tmp, |
3815 | &dev->delayed_cmd_list, se_delayed_list) { | 3813 | &dev->delayed_cmd_list, se_delayed_list) { |
3816 | 3814 | ||
3817 | list_del(&cmd_p->se_delayed_list); | 3815 | list_del(&cmd_p->se_delayed_list); |
3818 | spin_unlock(&dev->delayed_cmd_lock); | 3816 | spin_unlock(&dev->delayed_cmd_lock); |
3819 | 3817 | ||
3820 | DEBUG_STA("Calling add_tasks() for" | 3818 | DEBUG_STA("Calling add_tasks() for" |
3821 | " cmd_p: 0x%02x Task Attr: 0x%02x" | 3819 | " cmd_p: 0x%02x Task Attr: 0x%02x" |
3822 | " Dormant -> Active, se_ordered_id: %u\n", | 3820 | " Dormant -> Active, se_ordered_id: %u\n", |
3823 | T_TASK(cmd_p)->t_task_cdb[0], | 3821 | T_TASK(cmd_p)->t_task_cdb[0], |
3824 | cmd_p->sam_task_attr, cmd_p->se_ordered_id); | 3822 | cmd_p->sam_task_attr, cmd_p->se_ordered_id); |
3825 | 3823 | ||
3826 | transport_add_tasks_from_cmd(cmd_p); | 3824 | transport_add_tasks_from_cmd(cmd_p); |
3827 | new_active_tasks++; | 3825 | new_active_tasks++; |
3828 | 3826 | ||
3829 | spin_lock(&dev->delayed_cmd_lock); | 3827 | spin_lock(&dev->delayed_cmd_lock); |
3830 | if (cmd_p->sam_task_attr == TASK_ATTR_ORDERED) | 3828 | if (cmd_p->sam_task_attr == TASK_ATTR_ORDERED) |
3831 | break; | 3829 | break; |
3832 | } | 3830 | } |
3833 | spin_unlock(&dev->delayed_cmd_lock); | 3831 | spin_unlock(&dev->delayed_cmd_lock); |
3834 | /* | 3832 | /* |
3835 | * If new tasks have become active, wake up the transport thread | 3833 | * If new tasks have become active, wake up the transport thread |
3836 | * to do the processing of the Active tasks. | 3834 | * to do the processing of the Active tasks. |
3837 | */ | 3835 | */ |
3838 | if (new_active_tasks != 0) | 3836 | if (new_active_tasks != 0) |
3839 | wake_up_interruptible(&dev->dev_queue_obj->thread_wq); | 3837 | wake_up_interruptible(&dev->dev_queue_obj->thread_wq); |
3840 | } | 3838 | } |
3841 | 3839 | ||
3842 | static void transport_generic_complete_ok(struct se_cmd *cmd) | 3840 | static void transport_generic_complete_ok(struct se_cmd *cmd) |
3843 | { | 3841 | { |
3844 | int reason = 0; | 3842 | int reason = 0; |
3845 | /* | 3843 | /* |
3846 | * Check if we need to move delayed/dormant tasks from cmds on the | 3844 | * Check if we need to move delayed/dormant tasks from cmds on the |
3847 | * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task | 3845 | * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task |
3848 | * Attribute. | 3846 | * Attribute. |
3849 | */ | 3847 | */ |
3850 | if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) | 3848 | if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) |
3851 | transport_complete_task_attr(cmd); | 3849 | transport_complete_task_attr(cmd); |
3852 | /* | 3850 | /* |
3853 | * Check if we need to retrieve a sense buffer from | 3851 | * Check if we need to retrieve a sense buffer from |
3854 | * the struct se_cmd in question. | 3852 | * the struct se_cmd in question. |
3855 | */ | 3853 | */ |
3856 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { | 3854 | if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { |
3857 | if (transport_get_sense_data(cmd) < 0) | 3855 | if (transport_get_sense_data(cmd) < 0) |
3858 | reason = TCM_NON_EXISTENT_LUN; | 3856 | reason = TCM_NON_EXISTENT_LUN; |
3859 | 3857 | ||
3860 | /* | 3858 | /* |
3861 | * Only set when an struct se_task->task_scsi_status returned | 3859 | * Only set when an struct se_task->task_scsi_status returned |
3862 | * a non GOOD status. | 3860 | * a non GOOD status. |
3863 | */ | 3861 | */ |
3864 | if (cmd->scsi_status) { | 3862 | if (cmd->scsi_status) { |
3865 | transport_send_check_condition_and_sense( | 3863 | transport_send_check_condition_and_sense( |
3866 | cmd, reason, 1); | 3864 | cmd, reason, 1); |
3867 | transport_lun_remove_cmd(cmd); | 3865 | transport_lun_remove_cmd(cmd); |
3868 | transport_cmd_check_stop_to_fabric(cmd); | 3866 | transport_cmd_check_stop_to_fabric(cmd); |
3869 | return; | 3867 | return; |
3870 | } | 3868 | } |
3871 | } | 3869 | } |
3872 | /* | 3870 | /* |
3873 | * Check for a callback, used by amoungst other things | 3871 | * Check for a callback, used by amoungst other things |
3874 | * XDWRITE_READ_10 emulation. | 3872 | * XDWRITE_READ_10 emulation. |
3875 | */ | 3873 | */ |
3876 | if (cmd->transport_complete_callback) | 3874 | if (cmd->transport_complete_callback) |
3877 | cmd->transport_complete_callback(cmd); | 3875 | cmd->transport_complete_callback(cmd); |
3878 | 3876 | ||
3879 | switch (cmd->data_direction) { | 3877 | switch (cmd->data_direction) { |
3880 | case DMA_FROM_DEVICE: | 3878 | case DMA_FROM_DEVICE: |
3881 | spin_lock(&cmd->se_lun->lun_sep_lock); | 3879 | spin_lock(&cmd->se_lun->lun_sep_lock); |
3882 | if (SE_LUN(cmd)->lun_sep) { | 3880 | if (SE_LUN(cmd)->lun_sep) { |
3883 | SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets += | 3881 | SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets += |
3884 | cmd->data_length; | 3882 | cmd->data_length; |
3885 | } | 3883 | } |
3886 | spin_unlock(&cmd->se_lun->lun_sep_lock); | 3884 | spin_unlock(&cmd->se_lun->lun_sep_lock); |
3887 | /* | 3885 | /* |
3888 | * If enabled by TCM fabirc module pre-registered SGL | 3886 | * If enabled by TCM fabirc module pre-registered SGL |
3889 | * memory, perform the memcpy() from the TCM internal | 3887 | * memory, perform the memcpy() from the TCM internal |
3890 | * contigious buffer back to the original SGL. | 3888 | * contigious buffer back to the original SGL. |
3891 | */ | 3889 | */ |
3892 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) | 3890 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) |
3893 | transport_memcpy_write_contig(cmd, | 3891 | transport_memcpy_write_contig(cmd, |
3894 | T_TASK(cmd)->t_task_pt_sgl, | 3892 | T_TASK(cmd)->t_task_pt_sgl, |
3895 | T_TASK(cmd)->t_task_buf); | 3893 | T_TASK(cmd)->t_task_buf); |
3896 | 3894 | ||
3897 | CMD_TFO(cmd)->queue_data_in(cmd); | 3895 | CMD_TFO(cmd)->queue_data_in(cmd); |
3898 | break; | 3896 | break; |
3899 | case DMA_TO_DEVICE: | 3897 | case DMA_TO_DEVICE: |
3900 | spin_lock(&cmd->se_lun->lun_sep_lock); | 3898 | spin_lock(&cmd->se_lun->lun_sep_lock); |
3901 | if (SE_LUN(cmd)->lun_sep) { | 3899 | if (SE_LUN(cmd)->lun_sep) { |
3902 | SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets += | 3900 | SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets += |
3903 | cmd->data_length; | 3901 | cmd->data_length; |
3904 | } | 3902 | } |
3905 | spin_unlock(&cmd->se_lun->lun_sep_lock); | 3903 | spin_unlock(&cmd->se_lun->lun_sep_lock); |
3906 | /* | 3904 | /* |
3907 | * Check if we need to send READ payload for BIDI-COMMAND | 3905 | * Check if we need to send READ payload for BIDI-COMMAND |
3908 | */ | 3906 | */ |
3909 | if (T_TASK(cmd)->t_mem_bidi_list != NULL) { | 3907 | if (T_TASK(cmd)->t_mem_bidi_list != NULL) { |
3910 | spin_lock(&cmd->se_lun->lun_sep_lock); | 3908 | spin_lock(&cmd->se_lun->lun_sep_lock); |
3911 | if (SE_LUN(cmd)->lun_sep) { | 3909 | if (SE_LUN(cmd)->lun_sep) { |
3912 | SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets += | 3910 | SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets += |
3913 | cmd->data_length; | 3911 | cmd->data_length; |
3914 | } | 3912 | } |
3915 | spin_unlock(&cmd->se_lun->lun_sep_lock); | 3913 | spin_unlock(&cmd->se_lun->lun_sep_lock); |
3916 | CMD_TFO(cmd)->queue_data_in(cmd); | 3914 | CMD_TFO(cmd)->queue_data_in(cmd); |
3917 | break; | 3915 | break; |
3918 | } | 3916 | } |
3919 | /* Fall through for DMA_TO_DEVICE */ | 3917 | /* Fall through for DMA_TO_DEVICE */ |
3920 | case DMA_NONE: | 3918 | case DMA_NONE: |
3921 | CMD_TFO(cmd)->queue_status(cmd); | 3919 | CMD_TFO(cmd)->queue_status(cmd); |
3922 | break; | 3920 | break; |
3923 | default: | 3921 | default: |
3924 | break; | 3922 | break; |
3925 | } | 3923 | } |
3926 | 3924 | ||
3927 | transport_lun_remove_cmd(cmd); | 3925 | transport_lun_remove_cmd(cmd); |
3928 | transport_cmd_check_stop_to_fabric(cmd); | 3926 | transport_cmd_check_stop_to_fabric(cmd); |
3929 | } | 3927 | } |
3930 | 3928 | ||
3931 | static void transport_free_dev_tasks(struct se_cmd *cmd) | 3929 | static void transport_free_dev_tasks(struct se_cmd *cmd) |
3932 | { | 3930 | { |
3933 | struct se_task *task, *task_tmp; | 3931 | struct se_task *task, *task_tmp; |
3934 | unsigned long flags; | 3932 | unsigned long flags; |
3935 | 3933 | ||
3936 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 3934 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
3937 | list_for_each_entry_safe(task, task_tmp, | 3935 | list_for_each_entry_safe(task, task_tmp, |
3938 | &T_TASK(cmd)->t_task_list, t_list) { | 3936 | &T_TASK(cmd)->t_task_list, t_list) { |
3939 | if (atomic_read(&task->task_active)) | 3937 | if (atomic_read(&task->task_active)) |
3940 | continue; | 3938 | continue; |
3941 | 3939 | ||
3942 | kfree(task->task_sg_bidi); | 3940 | kfree(task->task_sg_bidi); |
3943 | kfree(task->task_sg); | 3941 | kfree(task->task_sg); |
3944 | 3942 | ||
3945 | list_del(&task->t_list); | 3943 | list_del(&task->t_list); |
3946 | 3944 | ||
3947 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 3945 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
3948 | if (task->se_dev) | 3946 | if (task->se_dev) |
3949 | TRANSPORT(task->se_dev)->free_task(task); | 3947 | TRANSPORT(task->se_dev)->free_task(task); |
3950 | else | 3948 | else |
3951 | printk(KERN_ERR "task[%u] - task->se_dev is NULL\n", | 3949 | printk(KERN_ERR "task[%u] - task->se_dev is NULL\n", |
3952 | task->task_no); | 3950 | task->task_no); |
3953 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 3951 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
3954 | } | 3952 | } |
3955 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 3953 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
3956 | } | 3954 | } |
3957 | 3955 | ||
3958 | static inline void transport_free_pages(struct se_cmd *cmd) | 3956 | static inline void transport_free_pages(struct se_cmd *cmd) |
3959 | { | 3957 | { |
3960 | struct se_mem *se_mem, *se_mem_tmp; | 3958 | struct se_mem *se_mem, *se_mem_tmp; |
3961 | int free_page = 1; | 3959 | int free_page = 1; |
3962 | 3960 | ||
3963 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) | 3961 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) |
3964 | free_page = 0; | 3962 | free_page = 0; |
3965 | if (cmd->se_dev->transport->do_se_mem_map) | 3963 | if (cmd->se_dev->transport->do_se_mem_map) |
3966 | free_page = 0; | 3964 | free_page = 0; |
3967 | 3965 | ||
3968 | if (T_TASK(cmd)->t_task_buf) { | 3966 | if (T_TASK(cmd)->t_task_buf) { |
3969 | kfree(T_TASK(cmd)->t_task_buf); | 3967 | kfree(T_TASK(cmd)->t_task_buf); |
3970 | T_TASK(cmd)->t_task_buf = NULL; | 3968 | T_TASK(cmd)->t_task_buf = NULL; |
3971 | return; | 3969 | return; |
3972 | } | 3970 | } |
3973 | 3971 | ||
3974 | /* | 3972 | /* |
3975 | * Caller will handle releasing of struct se_mem. | 3973 | * Caller will handle releasing of struct se_mem. |
3976 | */ | 3974 | */ |
3977 | if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC) | 3975 | if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC) |
3978 | return; | 3976 | return; |
3979 | 3977 | ||
3980 | if (!(T_TASK(cmd)->t_tasks_se_num)) | 3978 | if (!(T_TASK(cmd)->t_tasks_se_num)) |
3981 | return; | 3979 | return; |
3982 | 3980 | ||
3983 | list_for_each_entry_safe(se_mem, se_mem_tmp, | 3981 | list_for_each_entry_safe(se_mem, se_mem_tmp, |
3984 | T_TASK(cmd)->t_mem_list, se_list) { | 3982 | T_TASK(cmd)->t_mem_list, se_list) { |
3985 | /* | 3983 | /* |
3986 | * We only release call __free_page(struct se_mem->se_page) when | 3984 | * We only release call __free_page(struct se_mem->se_page) when |
3987 | * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, | 3985 | * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, |
3988 | */ | 3986 | */ |
3989 | if (free_page) | 3987 | if (free_page) |
3990 | __free_page(se_mem->se_page); | 3988 | __free_page(se_mem->se_page); |
3991 | 3989 | ||
3992 | list_del(&se_mem->se_list); | 3990 | list_del(&se_mem->se_list); |
3993 | kmem_cache_free(se_mem_cache, se_mem); | 3991 | kmem_cache_free(se_mem_cache, se_mem); |
3994 | } | 3992 | } |
3995 | 3993 | ||
3996 | if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) { | 3994 | if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) { |
3997 | list_for_each_entry_safe(se_mem, se_mem_tmp, | 3995 | list_for_each_entry_safe(se_mem, se_mem_tmp, |
3998 | T_TASK(cmd)->t_mem_bidi_list, se_list) { | 3996 | T_TASK(cmd)->t_mem_bidi_list, se_list) { |
3999 | /* | 3997 | /* |
4000 | * We only release call __free_page(struct se_mem->se_page) when | 3998 | * We only release call __free_page(struct se_mem->se_page) when |
4001 | * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, | 3999 | * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, |
4002 | */ | 4000 | */ |
4003 | if (free_page) | 4001 | if (free_page) |
4004 | __free_page(se_mem->se_page); | 4002 | __free_page(se_mem->se_page); |
4005 | 4003 | ||
4006 | list_del(&se_mem->se_list); | 4004 | list_del(&se_mem->se_list); |
4007 | kmem_cache_free(se_mem_cache, se_mem); | 4005 | kmem_cache_free(se_mem_cache, se_mem); |
4008 | } | 4006 | } |
4009 | } | 4007 | } |
4010 | 4008 | ||
4011 | kfree(T_TASK(cmd)->t_mem_bidi_list); | 4009 | kfree(T_TASK(cmd)->t_mem_bidi_list); |
4012 | T_TASK(cmd)->t_mem_bidi_list = NULL; | 4010 | T_TASK(cmd)->t_mem_bidi_list = NULL; |
4013 | kfree(T_TASK(cmd)->t_mem_list); | 4011 | kfree(T_TASK(cmd)->t_mem_list); |
4014 | T_TASK(cmd)->t_mem_list = NULL; | 4012 | T_TASK(cmd)->t_mem_list = NULL; |
4015 | T_TASK(cmd)->t_tasks_se_num = 0; | 4013 | T_TASK(cmd)->t_tasks_se_num = 0; |
4016 | } | 4014 | } |
4017 | 4015 | ||
4018 | static inline void transport_release_tasks(struct se_cmd *cmd) | 4016 | static inline void transport_release_tasks(struct se_cmd *cmd) |
4019 | { | 4017 | { |
4020 | transport_free_dev_tasks(cmd); | 4018 | transport_free_dev_tasks(cmd); |
4021 | } | 4019 | } |
4022 | 4020 | ||
4023 | static inline int transport_dec_and_check(struct se_cmd *cmd) | 4021 | static inline int transport_dec_and_check(struct se_cmd *cmd) |
4024 | { | 4022 | { |
4025 | unsigned long flags; | 4023 | unsigned long flags; |
4026 | 4024 | ||
4027 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 4025 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
4028 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | 4026 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { |
4029 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) { | 4027 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) { |
4030 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 4028 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, |
4031 | flags); | 4029 | flags); |
4032 | return 1; | 4030 | return 1; |
4033 | } | 4031 | } |
4034 | } | 4032 | } |
4035 | 4033 | ||
4036 | if (atomic_read(&T_TASK(cmd)->t_se_count)) { | 4034 | if (atomic_read(&T_TASK(cmd)->t_se_count)) { |
4037 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) { | 4035 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) { |
4038 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 4036 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, |
4039 | flags); | 4037 | flags); |
4040 | return 1; | 4038 | return 1; |
4041 | } | 4039 | } |
4042 | } | 4040 | } |
4043 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 4041 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
4044 | 4042 | ||
4045 | return 0; | 4043 | return 0; |
4046 | } | 4044 | } |
4047 | 4045 | ||
4048 | static void transport_release_fe_cmd(struct se_cmd *cmd) | 4046 | static void transport_release_fe_cmd(struct se_cmd *cmd) |
4049 | { | 4047 | { |
4050 | unsigned long flags; | 4048 | unsigned long flags; |
4051 | 4049 | ||
4052 | if (transport_dec_and_check(cmd)) | 4050 | if (transport_dec_and_check(cmd)) |
4053 | return; | 4051 | return; |
4054 | 4052 | ||
4055 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 4053 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
4056 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | 4054 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { |
4057 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 4055 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
4058 | goto free_pages; | 4056 | goto free_pages; |
4059 | } | 4057 | } |
4060 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | 4058 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); |
4061 | transport_all_task_dev_remove_state(cmd); | 4059 | transport_all_task_dev_remove_state(cmd); |
4062 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 4060 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
4063 | 4061 | ||
4064 | transport_release_tasks(cmd); | 4062 | transport_release_tasks(cmd); |
4065 | free_pages: | 4063 | free_pages: |
4066 | transport_free_pages(cmd); | 4064 | transport_free_pages(cmd); |
4067 | transport_free_se_cmd(cmd); | 4065 | transport_free_se_cmd(cmd); |
4068 | CMD_TFO(cmd)->release_cmd_direct(cmd); | 4066 | CMD_TFO(cmd)->release_cmd_direct(cmd); |
4069 | } | 4067 | } |
4070 | 4068 | ||
4071 | static int transport_generic_remove( | 4069 | static int transport_generic_remove( |
4072 | struct se_cmd *cmd, | 4070 | struct se_cmd *cmd, |
4073 | int release_to_pool, | 4071 | int release_to_pool, |
4074 | int session_reinstatement) | 4072 | int session_reinstatement) |
4075 | { | 4073 | { |
4076 | unsigned long flags; | 4074 | unsigned long flags; |
4077 | 4075 | ||
4078 | if (!(T_TASK(cmd))) | 4076 | if (!(T_TASK(cmd))) |
4079 | goto release_cmd; | 4077 | goto release_cmd; |
4080 | 4078 | ||
4081 | if (transport_dec_and_check(cmd)) { | 4079 | if (transport_dec_and_check(cmd)) { |
4082 | if (session_reinstatement) { | 4080 | if (session_reinstatement) { |
4083 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 4081 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
4084 | transport_all_task_dev_remove_state(cmd); | 4082 | transport_all_task_dev_remove_state(cmd); |
4085 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 4083 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, |
4086 | flags); | 4084 | flags); |
4087 | } | 4085 | } |
4088 | return 1; | 4086 | return 1; |
4089 | } | 4087 | } |
4090 | 4088 | ||
4091 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 4089 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
4092 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | 4090 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { |
4093 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 4091 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
4094 | goto free_pages; | 4092 | goto free_pages; |
4095 | } | 4093 | } |
4096 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | 4094 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); |
4097 | transport_all_task_dev_remove_state(cmd); | 4095 | transport_all_task_dev_remove_state(cmd); |
4098 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 4096 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
4099 | 4097 | ||
4100 | transport_release_tasks(cmd); | 4098 | transport_release_tasks(cmd); |
4101 | free_pages: | 4099 | free_pages: |
4102 | transport_free_pages(cmd); | 4100 | transport_free_pages(cmd); |
4103 | 4101 | ||
4104 | release_cmd: | 4102 | release_cmd: |
4105 | if (release_to_pool) { | 4103 | if (release_to_pool) { |
4106 | transport_release_cmd_to_pool(cmd); | 4104 | transport_release_cmd_to_pool(cmd); |
4107 | } else { | 4105 | } else { |
4108 | transport_free_se_cmd(cmd); | 4106 | transport_free_se_cmd(cmd); |
4109 | CMD_TFO(cmd)->release_cmd_direct(cmd); | 4107 | CMD_TFO(cmd)->release_cmd_direct(cmd); |
4110 | } | 4108 | } |
4111 | 4109 | ||
4112 | return 0; | 4110 | return 0; |
4113 | } | 4111 | } |
4114 | 4112 | ||
4115 | /* | 4113 | /* |
4116 | * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map | 4114 | * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map |
4117 | * @cmd: Associated se_cmd descriptor | 4115 | * @cmd: Associated se_cmd descriptor |
4118 | * @mem: SGL style memory for TCM WRITE / READ | 4116 | * @mem: SGL style memory for TCM WRITE / READ |
4119 | * @sg_mem_num: Number of SGL elements | 4117 | * @sg_mem_num: Number of SGL elements |
4120 | * @mem_bidi_in: SGL style memory for TCM BIDI READ | 4118 | * @mem_bidi_in: SGL style memory for TCM BIDI READ |
4121 | * @sg_mem_bidi_num: Number of BIDI READ SGL elements | 4119 | * @sg_mem_bidi_num: Number of BIDI READ SGL elements |
4122 | * | 4120 | * |
4123 | * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage | 4121 | * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage |
4124 | * of parameters. | 4122 | * of parameters. |
4125 | */ | 4123 | */ |
4126 | int transport_generic_map_mem_to_cmd( | 4124 | int transport_generic_map_mem_to_cmd( |
4127 | struct se_cmd *cmd, | 4125 | struct se_cmd *cmd, |
4128 | struct scatterlist *mem, | 4126 | struct scatterlist *mem, |
4129 | u32 sg_mem_num, | 4127 | u32 sg_mem_num, |
4130 | struct scatterlist *mem_bidi_in, | 4128 | struct scatterlist *mem_bidi_in, |
4131 | u32 sg_mem_bidi_num) | 4129 | u32 sg_mem_bidi_num) |
4132 | { | 4130 | { |
4133 | u32 se_mem_cnt_out = 0; | 4131 | u32 se_mem_cnt_out = 0; |
4134 | int ret; | 4132 | int ret; |
4135 | 4133 | ||
4136 | if (!(mem) || !(sg_mem_num)) | 4134 | if (!(mem) || !(sg_mem_num)) |
4137 | return 0; | 4135 | return 0; |
4138 | /* | 4136 | /* |
4139 | * Passed *mem will contain a list_head containing preformatted | 4137 | * Passed *mem will contain a list_head containing preformatted |
4140 | * struct se_mem elements... | 4138 | * struct se_mem elements... |
4141 | */ | 4139 | */ |
4142 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) { | 4140 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) { |
4143 | if ((mem_bidi_in) || (sg_mem_bidi_num)) { | 4141 | if ((mem_bidi_in) || (sg_mem_bidi_num)) { |
4144 | printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported" | 4142 | printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported" |
4145 | " with BIDI-COMMAND\n"); | 4143 | " with BIDI-COMMAND\n"); |
4146 | return -ENOSYS; | 4144 | return -ENOSYS; |
4147 | } | 4145 | } |
4148 | 4146 | ||
4149 | T_TASK(cmd)->t_mem_list = (struct list_head *)mem; | 4147 | T_TASK(cmd)->t_mem_list = (struct list_head *)mem; |
4150 | T_TASK(cmd)->t_tasks_se_num = sg_mem_num; | 4148 | T_TASK(cmd)->t_tasks_se_num = sg_mem_num; |
4151 | cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC; | 4149 | cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC; |
4152 | return 0; | 4150 | return 0; |
4153 | } | 4151 | } |
4154 | /* | 4152 | /* |
4155 | * Otherwise, assume the caller is passing a struct scatterlist | 4153 | * Otherwise, assume the caller is passing a struct scatterlist |
4156 | * array from include/linux/scatterlist.h | 4154 | * array from include/linux/scatterlist.h |
4157 | */ | 4155 | */ |
4158 | if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || | 4156 | if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || |
4159 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { | 4157 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { |
4160 | /* | 4158 | /* |
4161 | * For CDB using TCM struct se_mem linked list scatterlist memory | 4159 | * For CDB using TCM struct se_mem linked list scatterlist memory |
4162 | * processed into a TCM struct se_subsystem_dev, we do the mapping | 4160 | * processed into a TCM struct se_subsystem_dev, we do the mapping |
4163 | * from the passed physical memory to struct se_mem->se_page here. | 4161 | * from the passed physical memory to struct se_mem->se_page here. |
4164 | */ | 4162 | */ |
4165 | T_TASK(cmd)->t_mem_list = transport_init_se_mem_list(); | 4163 | T_TASK(cmd)->t_mem_list = transport_init_se_mem_list(); |
4166 | if (!(T_TASK(cmd)->t_mem_list)) | 4164 | if (!(T_TASK(cmd)->t_mem_list)) |
4167 | return -ENOMEM; | 4165 | return -ENOMEM; |
4168 | 4166 | ||
4169 | ret = transport_map_sg_to_mem(cmd, | 4167 | ret = transport_map_sg_to_mem(cmd, |
4170 | T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out); | 4168 | T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out); |
4171 | if (ret < 0) | 4169 | if (ret < 0) |
4172 | return -ENOMEM; | 4170 | return -ENOMEM; |
4173 | 4171 | ||
4174 | T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out; | 4172 | T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out; |
4175 | /* | 4173 | /* |
4176 | * Setup BIDI READ list of struct se_mem elements | 4174 | * Setup BIDI READ list of struct se_mem elements |
4177 | */ | 4175 | */ |
4178 | if ((mem_bidi_in) && (sg_mem_bidi_num)) { | 4176 | if ((mem_bidi_in) && (sg_mem_bidi_num)) { |
4179 | T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list(); | 4177 | T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list(); |
4180 | if (!(T_TASK(cmd)->t_mem_bidi_list)) { | 4178 | if (!(T_TASK(cmd)->t_mem_bidi_list)) { |
4181 | kfree(T_TASK(cmd)->t_mem_list); | 4179 | kfree(T_TASK(cmd)->t_mem_list); |
4182 | return -ENOMEM; | 4180 | return -ENOMEM; |
4183 | } | 4181 | } |
4184 | se_mem_cnt_out = 0; | 4182 | se_mem_cnt_out = 0; |
4185 | 4183 | ||
4186 | ret = transport_map_sg_to_mem(cmd, | 4184 | ret = transport_map_sg_to_mem(cmd, |
4187 | T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in, | 4185 | T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in, |
4188 | &se_mem_cnt_out); | 4186 | &se_mem_cnt_out); |
4189 | if (ret < 0) { | 4187 | if (ret < 0) { |
4190 | kfree(T_TASK(cmd)->t_mem_list); | 4188 | kfree(T_TASK(cmd)->t_mem_list); |
4191 | return -ENOMEM; | 4189 | return -ENOMEM; |
4192 | } | 4190 | } |
4193 | 4191 | ||
4194 | T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out; | 4192 | T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out; |
4195 | } | 4193 | } |
4196 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; | 4194 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; |
4197 | 4195 | ||
4198 | } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { | 4196 | } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { |
4199 | if (mem_bidi_in || sg_mem_bidi_num) { | 4197 | if (mem_bidi_in || sg_mem_bidi_num) { |
4200 | printk(KERN_ERR "BIDI-Commands not supported using " | 4198 | printk(KERN_ERR "BIDI-Commands not supported using " |
4201 | "SCF_SCSI_CONTROL_NONSG_IO_CDB\n"); | 4199 | "SCF_SCSI_CONTROL_NONSG_IO_CDB\n"); |
4202 | return -ENOSYS; | 4200 | return -ENOSYS; |
4203 | } | 4201 | } |
4204 | /* | 4202 | /* |
4205 | * For incoming CDBs using a contiguous buffer internall with TCM, | 4203 | * For incoming CDBs using a contiguous buffer internall with TCM, |
4206 | * save the passed struct scatterlist memory. After TCM storage object | 4204 | * save the passed struct scatterlist memory. After TCM storage object |
4207 | * processing has completed for this struct se_cmd, TCM core will call | 4205 | * processing has completed for this struct se_cmd, TCM core will call |
4208 | * transport_memcpy_[write,read]_contig() as necessary from | 4206 | * transport_memcpy_[write,read]_contig() as necessary from |
4209 | * transport_generic_complete_ok() and transport_write_pending() in order | 4207 | * transport_generic_complete_ok() and transport_write_pending() in order |
4210 | * to copy the TCM buffer to/from the original passed *mem in SGL -> | 4208 | * to copy the TCM buffer to/from the original passed *mem in SGL -> |
4211 | * struct scatterlist format. | 4209 | * struct scatterlist format. |
4212 | */ | 4210 | */ |
4213 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG; | 4211 | cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG; |
4214 | T_TASK(cmd)->t_task_pt_sgl = mem; | 4212 | T_TASK(cmd)->t_task_pt_sgl = mem; |
4215 | } | 4213 | } |
4216 | 4214 | ||
4217 | return 0; | 4215 | return 0; |
4218 | } | 4216 | } |
4219 | EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); | 4217 | EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); |
4220 | 4218 | ||
4221 | 4219 | ||
4222 | static inline long long transport_dev_end_lba(struct se_device *dev) | 4220 | static inline long long transport_dev_end_lba(struct se_device *dev) |
4223 | { | 4221 | { |
4224 | return dev->transport->get_blocks(dev) + 1; | 4222 | return dev->transport->get_blocks(dev) + 1; |
4225 | } | 4223 | } |
4226 | 4224 | ||
4227 | static int transport_get_sectors(struct se_cmd *cmd) | 4225 | static int transport_get_sectors(struct se_cmd *cmd) |
4228 | { | 4226 | { |
4229 | struct se_device *dev = SE_DEV(cmd); | 4227 | struct se_device *dev = SE_DEV(cmd); |
4230 | 4228 | ||
4231 | T_TASK(cmd)->t_tasks_sectors = | 4229 | T_TASK(cmd)->t_tasks_sectors = |
4232 | (cmd->data_length / DEV_ATTRIB(dev)->block_size); | 4230 | (cmd->data_length / DEV_ATTRIB(dev)->block_size); |
4233 | if (!(T_TASK(cmd)->t_tasks_sectors)) | 4231 | if (!(T_TASK(cmd)->t_tasks_sectors)) |
4234 | T_TASK(cmd)->t_tasks_sectors = 1; | 4232 | T_TASK(cmd)->t_tasks_sectors = 1; |
4235 | 4233 | ||
4236 | if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK) | 4234 | if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK) |
4237 | return 0; | 4235 | return 0; |
4238 | 4236 | ||
4239 | if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) > | 4237 | if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) > |
4240 | transport_dev_end_lba(dev)) { | 4238 | transport_dev_end_lba(dev)) { |
4241 | printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" | 4239 | printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" |
4242 | " transport_dev_end_lba(): %llu\n", | 4240 | " transport_dev_end_lba(): %llu\n", |
4243 | T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors, | 4241 | T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors, |
4244 | transport_dev_end_lba(dev)); | 4242 | transport_dev_end_lba(dev)); |
4245 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 4243 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
4246 | cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; | 4244 | cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; |
4247 | return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS; | 4245 | return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS; |
4248 | } | 4246 | } |
4249 | 4247 | ||
4250 | return 0; | 4248 | return 0; |
4251 | } | 4249 | } |
4252 | 4250 | ||
4253 | static int transport_new_cmd_obj(struct se_cmd *cmd) | 4251 | static int transport_new_cmd_obj(struct se_cmd *cmd) |
4254 | { | 4252 | { |
4255 | struct se_device *dev = SE_DEV(cmd); | 4253 | struct se_device *dev = SE_DEV(cmd); |
4256 | u32 task_cdbs = 0, rc; | 4254 | u32 task_cdbs = 0, rc; |
4257 | 4255 | ||
4258 | if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { | 4256 | if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { |
4259 | task_cdbs++; | 4257 | task_cdbs++; |
4260 | T_TASK(cmd)->t_task_cdbs++; | 4258 | T_TASK(cmd)->t_task_cdbs++; |
4261 | } else { | 4259 | } else { |
4262 | int set_counts = 1; | 4260 | int set_counts = 1; |
4263 | 4261 | ||
4264 | /* | 4262 | /* |
4265 | * Setup any BIDI READ tasks and memory from | 4263 | * Setup any BIDI READ tasks and memory from |
4266 | * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks | 4264 | * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks |
4267 | * are queued first for the non pSCSI passthrough case. | 4265 | * are queued first for the non pSCSI passthrough case. |
4268 | */ | 4266 | */ |
4269 | if ((T_TASK(cmd)->t_mem_bidi_list != NULL) && | 4267 | if ((T_TASK(cmd)->t_mem_bidi_list != NULL) && |
4270 | (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { | 4268 | (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { |
4271 | rc = transport_generic_get_cdb_count(cmd, | 4269 | rc = transport_generic_get_cdb_count(cmd, |
4272 | T_TASK(cmd)->t_task_lba, | 4270 | T_TASK(cmd)->t_task_lba, |
4273 | T_TASK(cmd)->t_tasks_sectors, | 4271 | T_TASK(cmd)->t_tasks_sectors, |
4274 | DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list, | 4272 | DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list, |
4275 | set_counts); | 4273 | set_counts); |
4276 | if (!(rc)) { | 4274 | if (!(rc)) { |
4277 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 4275 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
4278 | cmd->scsi_sense_reason = | 4276 | cmd->scsi_sense_reason = |
4279 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 4277 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
4280 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 4278 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
4281 | } | 4279 | } |
4282 | set_counts = 0; | 4280 | set_counts = 0; |
4283 | } | 4281 | } |
4284 | /* | 4282 | /* |
4285 | * Setup the tasks and memory from T_TASK(cmd)->t_mem_list | 4283 | * Setup the tasks and memory from T_TASK(cmd)->t_mem_list |
4286 | * Note for BIDI transfers this will contain the WRITE payload | 4284 | * Note for BIDI transfers this will contain the WRITE payload |
4287 | */ | 4285 | */ |
4288 | task_cdbs = transport_generic_get_cdb_count(cmd, | 4286 | task_cdbs = transport_generic_get_cdb_count(cmd, |
4289 | T_TASK(cmd)->t_task_lba, | 4287 | T_TASK(cmd)->t_task_lba, |
4290 | T_TASK(cmd)->t_tasks_sectors, | 4288 | T_TASK(cmd)->t_tasks_sectors, |
4291 | cmd->data_direction, T_TASK(cmd)->t_mem_list, | 4289 | cmd->data_direction, T_TASK(cmd)->t_mem_list, |
4292 | set_counts); | 4290 | set_counts); |
4293 | if (!(task_cdbs)) { | 4291 | if (!(task_cdbs)) { |
4294 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 4292 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
4295 | cmd->scsi_sense_reason = | 4293 | cmd->scsi_sense_reason = |
4296 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 4294 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
4297 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 4295 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
4298 | } | 4296 | } |
4299 | T_TASK(cmd)->t_task_cdbs += task_cdbs; | 4297 | T_TASK(cmd)->t_task_cdbs += task_cdbs; |
4300 | 4298 | ||
4301 | #if 0 | 4299 | #if 0 |
4302 | printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:" | 4300 | printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:" |
4303 | " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length, | 4301 | " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length, |
4304 | T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors, | 4302 | T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors, |
4305 | T_TASK(cmd)->t_task_cdbs); | 4303 | T_TASK(cmd)->t_task_cdbs); |
4306 | #endif | 4304 | #endif |
4307 | } | 4305 | } |
4308 | 4306 | ||
4309 | atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs); | 4307 | atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs); |
4310 | atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs); | 4308 | atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs); |
4311 | atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs); | 4309 | atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs); |
4312 | return 0; | 4310 | return 0; |
4313 | } | 4311 | } |
4314 | 4312 | ||
4315 | static struct list_head *transport_init_se_mem_list(void) | 4313 | static struct list_head *transport_init_se_mem_list(void) |
4316 | { | 4314 | { |
4317 | struct list_head *se_mem_list; | 4315 | struct list_head *se_mem_list; |
4318 | 4316 | ||
4319 | se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL); | 4317 | se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL); |
4320 | if (!(se_mem_list)) { | 4318 | if (!(se_mem_list)) { |
4321 | printk(KERN_ERR "Unable to allocate memory for se_mem_list\n"); | 4319 | printk(KERN_ERR "Unable to allocate memory for se_mem_list\n"); |
4322 | return NULL; | 4320 | return NULL; |
4323 | } | 4321 | } |
4324 | INIT_LIST_HEAD(se_mem_list); | 4322 | INIT_LIST_HEAD(se_mem_list); |
4325 | 4323 | ||
4326 | return se_mem_list; | 4324 | return se_mem_list; |
4327 | } | 4325 | } |
4328 | 4326 | ||
4329 | static int | 4327 | static int |
4330 | transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) | 4328 | transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) |
4331 | { | 4329 | { |
4332 | unsigned char *buf; | 4330 | unsigned char *buf; |
4333 | struct se_mem *se_mem; | 4331 | struct se_mem *se_mem; |
4334 | 4332 | ||
4335 | T_TASK(cmd)->t_mem_list = transport_init_se_mem_list(); | 4333 | T_TASK(cmd)->t_mem_list = transport_init_se_mem_list(); |
4336 | if (!(T_TASK(cmd)->t_mem_list)) | 4334 | if (!(T_TASK(cmd)->t_mem_list)) |
4337 | return -ENOMEM; | 4335 | return -ENOMEM; |
4338 | 4336 | ||
4339 | /* | 4337 | /* |
4340 | * If the device uses memory mapping this is enough. | 4338 | * If the device uses memory mapping this is enough. |
4341 | */ | 4339 | */ |
4342 | if (cmd->se_dev->transport->do_se_mem_map) | 4340 | if (cmd->se_dev->transport->do_se_mem_map) |
4343 | return 0; | 4341 | return 0; |
4344 | 4342 | ||
4345 | /* | 4343 | /* |
4346 | * Setup BIDI-COMMAND READ list of struct se_mem elements | 4344 | * Setup BIDI-COMMAND READ list of struct se_mem elements |
4347 | */ | 4345 | */ |
4348 | if (T_TASK(cmd)->t_tasks_bidi) { | 4346 | if (T_TASK(cmd)->t_tasks_bidi) { |
4349 | T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list(); | 4347 | T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list(); |
4350 | if (!(T_TASK(cmd)->t_mem_bidi_list)) { | 4348 | if (!(T_TASK(cmd)->t_mem_bidi_list)) { |
4351 | kfree(T_TASK(cmd)->t_mem_list); | 4349 | kfree(T_TASK(cmd)->t_mem_list); |
4352 | return -ENOMEM; | 4350 | return -ENOMEM; |
4353 | } | 4351 | } |
4354 | } | 4352 | } |
4355 | 4353 | ||
4356 | while (length) { | 4354 | while (length) { |
4357 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); | 4355 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); |
4358 | if (!(se_mem)) { | 4356 | if (!(se_mem)) { |
4359 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); | 4357 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); |
4360 | goto out; | 4358 | goto out; |
4361 | } | 4359 | } |
4362 | INIT_LIST_HEAD(&se_mem->se_list); | 4360 | INIT_LIST_HEAD(&se_mem->se_list); |
4363 | se_mem->se_len = (length > dma_size) ? dma_size : length; | 4361 | se_mem->se_len = (length > dma_size) ? dma_size : length; |
4364 | 4362 | ||
4365 | /* #warning FIXME Allocate contigous pages for struct se_mem elements */ | 4363 | /* #warning FIXME Allocate contigous pages for struct se_mem elements */ |
4366 | se_mem->se_page = (struct page *) alloc_pages(GFP_KERNEL, 0); | 4364 | se_mem->se_page = (struct page *) alloc_pages(GFP_KERNEL, 0); |
4367 | if (!(se_mem->se_page)) { | 4365 | if (!(se_mem->se_page)) { |
4368 | printk(KERN_ERR "alloc_pages() failed\n"); | 4366 | printk(KERN_ERR "alloc_pages() failed\n"); |
4369 | goto out; | 4367 | goto out; |
4370 | } | 4368 | } |
4371 | 4369 | ||
4372 | buf = kmap_atomic(se_mem->se_page, KM_IRQ0); | 4370 | buf = kmap_atomic(se_mem->se_page, KM_IRQ0); |
4373 | if (!(buf)) { | 4371 | if (!(buf)) { |
4374 | printk(KERN_ERR "kmap_atomic() failed\n"); | 4372 | printk(KERN_ERR "kmap_atomic() failed\n"); |
4375 | goto out; | 4373 | goto out; |
4376 | } | 4374 | } |
4377 | memset(buf, 0, se_mem->se_len); | 4375 | memset(buf, 0, se_mem->se_len); |
4378 | kunmap_atomic(buf, KM_IRQ0); | 4376 | kunmap_atomic(buf, KM_IRQ0); |
4379 | 4377 | ||
4380 | list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list); | 4378 | list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list); |
4381 | T_TASK(cmd)->t_tasks_se_num++; | 4379 | T_TASK(cmd)->t_tasks_se_num++; |
4382 | 4380 | ||
4383 | DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)" | 4381 | DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)" |
4384 | " Offset(%u)\n", se_mem->se_page, se_mem->se_len, | 4382 | " Offset(%u)\n", se_mem->se_page, se_mem->se_len, |
4385 | se_mem->se_off); | 4383 | se_mem->se_off); |
4386 | 4384 | ||
4387 | length -= se_mem->se_len; | 4385 | length -= se_mem->se_len; |
4388 | } | 4386 | } |
4389 | 4387 | ||
4390 | DEBUG_MEM("Allocated total struct se_mem elements(%u)\n", | 4388 | DEBUG_MEM("Allocated total struct se_mem elements(%u)\n", |
4391 | T_TASK(cmd)->t_tasks_se_num); | 4389 | T_TASK(cmd)->t_tasks_se_num); |
4392 | 4390 | ||
4393 | return 0; | 4391 | return 0; |
4394 | out: | 4392 | out: |
4395 | return -1; | 4393 | return -1; |
4396 | } | 4394 | } |
4397 | 4395 | ||
4398 | extern u32 transport_calc_sg_num( | 4396 | u32 transport_calc_sg_num( |
4399 | struct se_task *task, | 4397 | struct se_task *task, |
4400 | struct se_mem *in_se_mem, | 4398 | struct se_mem *in_se_mem, |
4401 | u32 task_offset) | 4399 | u32 task_offset) |
4402 | { | 4400 | { |
4403 | struct se_cmd *se_cmd = task->task_se_cmd; | 4401 | struct se_cmd *se_cmd = task->task_se_cmd; |
4404 | struct se_device *se_dev = SE_DEV(se_cmd); | 4402 | struct se_device *se_dev = SE_DEV(se_cmd); |
4405 | struct se_mem *se_mem = in_se_mem; | 4403 | struct se_mem *se_mem = in_se_mem; |
4406 | struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd); | 4404 | struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd); |
4407 | u32 sg_length, task_size = task->task_size, task_sg_num_padded; | 4405 | u32 sg_length, task_size = task->task_size, task_sg_num_padded; |
4408 | 4406 | ||
4409 | while (task_size != 0) { | 4407 | while (task_size != 0) { |
4410 | DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)" | 4408 | DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)" |
4411 | " se_mem->se_off(%u) task_offset(%u)\n", | 4409 | " se_mem->se_off(%u) task_offset(%u)\n", |
4412 | se_mem->se_page, se_mem->se_len, | 4410 | se_mem->se_page, se_mem->se_len, |
4413 | se_mem->se_off, task_offset); | 4411 | se_mem->se_off, task_offset); |
4414 | 4412 | ||
4415 | if (task_offset == 0) { | 4413 | if (task_offset == 0) { |
4416 | if (task_size >= se_mem->se_len) { | 4414 | if (task_size >= se_mem->se_len) { |
4417 | sg_length = se_mem->se_len; | 4415 | sg_length = se_mem->se_len; |
4418 | 4416 | ||
4419 | if (!(list_is_last(&se_mem->se_list, | 4417 | if (!(list_is_last(&se_mem->se_list, |
4420 | T_TASK(se_cmd)->t_mem_list))) | 4418 | T_TASK(se_cmd)->t_mem_list))) |
4421 | se_mem = list_entry(se_mem->se_list.next, | 4419 | se_mem = list_entry(se_mem->se_list.next, |
4422 | struct se_mem, se_list); | 4420 | struct se_mem, se_list); |
4423 | } else { | 4421 | } else { |
4424 | sg_length = task_size; | 4422 | sg_length = task_size; |
4425 | task_size -= sg_length; | 4423 | task_size -= sg_length; |
4426 | goto next; | 4424 | goto next; |
4427 | } | 4425 | } |
4428 | 4426 | ||
4429 | DEBUG_SC("sg_length(%u) task_size(%u)\n", | 4427 | DEBUG_SC("sg_length(%u) task_size(%u)\n", |
4430 | sg_length, task_size); | 4428 | sg_length, task_size); |
4431 | } else { | 4429 | } else { |
4432 | if ((se_mem->se_len - task_offset) > task_size) { | 4430 | if ((se_mem->se_len - task_offset) > task_size) { |
4433 | sg_length = task_size; | 4431 | sg_length = task_size; |
4434 | task_size -= sg_length; | 4432 | task_size -= sg_length; |
4435 | goto next; | 4433 | goto next; |
4436 | } else { | 4434 | } else { |
4437 | sg_length = (se_mem->se_len - task_offset); | 4435 | sg_length = (se_mem->se_len - task_offset); |
4438 | 4436 | ||
4439 | if (!(list_is_last(&se_mem->se_list, | 4437 | if (!(list_is_last(&se_mem->se_list, |
4440 | T_TASK(se_cmd)->t_mem_list))) | 4438 | T_TASK(se_cmd)->t_mem_list))) |
4441 | se_mem = list_entry(se_mem->se_list.next, | 4439 | se_mem = list_entry(se_mem->se_list.next, |
4442 | struct se_mem, se_list); | 4440 | struct se_mem, se_list); |
4443 | } | 4441 | } |
4444 | 4442 | ||
4445 | DEBUG_SC("sg_length(%u) task_size(%u)\n", | 4443 | DEBUG_SC("sg_length(%u) task_size(%u)\n", |
4446 | sg_length, task_size); | 4444 | sg_length, task_size); |
4447 | 4445 | ||
4448 | task_offset = 0; | 4446 | task_offset = 0; |
4449 | } | 4447 | } |
4450 | task_size -= sg_length; | 4448 | task_size -= sg_length; |
4451 | next: | 4449 | next: |
4452 | DEBUG_SC("task[%u] - Reducing task_size to(%u)\n", | 4450 | DEBUG_SC("task[%u] - Reducing task_size to(%u)\n", |
4453 | task->task_no, task_size); | 4451 | task->task_no, task_size); |
4454 | 4452 | ||
4455 | task->task_sg_num++; | 4453 | task->task_sg_num++; |
4456 | } | 4454 | } |
4457 | /* | 4455 | /* |
4458 | * Check if the fabric module driver is requesting that all | 4456 | * Check if the fabric module driver is requesting that all |
4459 | * struct se_task->task_sg[] be chained together.. If so, | 4457 | * struct se_task->task_sg[] be chained together.. If so, |
4460 | * then allocate an extra padding SG entry for linking and | 4458 | * then allocate an extra padding SG entry for linking and |
4461 | * marking the end of the chained SGL. | 4459 | * marking the end of the chained SGL. |
4462 | */ | 4460 | */ |
4463 | if (tfo->task_sg_chaining) { | 4461 | if (tfo->task_sg_chaining) { |
4464 | task_sg_num_padded = (task->task_sg_num + 1); | 4462 | task_sg_num_padded = (task->task_sg_num + 1); |
4465 | task->task_padded_sg = 1; | 4463 | task->task_padded_sg = 1; |
4466 | } else | 4464 | } else |
4467 | task_sg_num_padded = task->task_sg_num; | 4465 | task_sg_num_padded = task->task_sg_num; |
4468 | 4466 | ||
4469 | task->task_sg = kzalloc(task_sg_num_padded * | 4467 | task->task_sg = kzalloc(task_sg_num_padded * |
4470 | sizeof(struct scatterlist), GFP_KERNEL); | 4468 | sizeof(struct scatterlist), GFP_KERNEL); |
4471 | if (!(task->task_sg)) { | 4469 | if (!(task->task_sg)) { |
4472 | printk(KERN_ERR "Unable to allocate memory for" | 4470 | printk(KERN_ERR "Unable to allocate memory for" |
4473 | " task->task_sg\n"); | 4471 | " task->task_sg\n"); |
4474 | return 0; | 4472 | return 0; |
4475 | } | 4473 | } |
4476 | sg_init_table(&task->task_sg[0], task_sg_num_padded); | 4474 | sg_init_table(&task->task_sg[0], task_sg_num_padded); |
4477 | /* | 4475 | /* |
4478 | * Setup task->task_sg_bidi for SCSI READ payload for | 4476 | * Setup task->task_sg_bidi for SCSI READ payload for |
4479 | * TCM/pSCSI passthrough if present for BIDI-COMMAND | 4477 | * TCM/pSCSI passthrough if present for BIDI-COMMAND |
4480 | */ | 4478 | */ |
4481 | if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) && | 4479 | if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) && |
4482 | (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) { | 4480 | (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) { |
4483 | task->task_sg_bidi = kzalloc(task_sg_num_padded * | 4481 | task->task_sg_bidi = kzalloc(task_sg_num_padded * |
4484 | sizeof(struct scatterlist), GFP_KERNEL); | 4482 | sizeof(struct scatterlist), GFP_KERNEL); |
4485 | if (!(task->task_sg_bidi)) { | 4483 | if (!(task->task_sg_bidi)) { |
4486 | printk(KERN_ERR "Unable to allocate memory for" | 4484 | printk(KERN_ERR "Unable to allocate memory for" |
4487 | " task->task_sg_bidi\n"); | 4485 | " task->task_sg_bidi\n"); |
4488 | return 0; | 4486 | return 0; |
4489 | } | 4487 | } |
4490 | sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded); | 4488 | sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded); |
4491 | } | 4489 | } |
4492 | /* | 4490 | /* |
4493 | * For the chaining case, setup the proper end of SGL for the | 4491 | * For the chaining case, setup the proper end of SGL for the |
4494 | * initial submission struct task into struct se_subsystem_api. | 4492 | * initial submission struct task into struct se_subsystem_api. |
4495 | * This will be cleared later by transport_do_task_sg_chain() | 4493 | * This will be cleared later by transport_do_task_sg_chain() |
4496 | */ | 4494 | */ |
4497 | if (task->task_padded_sg) { | 4495 | if (task->task_padded_sg) { |
4498 | sg_mark_end(&task->task_sg[task->task_sg_num - 1]); | 4496 | sg_mark_end(&task->task_sg[task->task_sg_num - 1]); |
4499 | /* | 4497 | /* |
4500 | * Added the 'if' check before marking end of bi-directional | 4498 | * Added the 'if' check before marking end of bi-directional |
4501 | * scatterlist (which gets created only in case of request | 4499 | * scatterlist (which gets created only in case of request |
4502 | * (RD + WR). | 4500 | * (RD + WR). |
4503 | */ | 4501 | */ |
4504 | if (task->task_sg_bidi) | 4502 | if (task->task_sg_bidi) |
4505 | sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]); | 4503 | sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]); |
4506 | } | 4504 | } |
4507 | 4505 | ||
4508 | DEBUG_SC("Successfully allocated task->task_sg_num(%u)," | 4506 | DEBUG_SC("Successfully allocated task->task_sg_num(%u)," |
4509 | " task_sg_num_padded(%u)\n", task->task_sg_num, | 4507 | " task_sg_num_padded(%u)\n", task->task_sg_num, |
4510 | task_sg_num_padded); | 4508 | task_sg_num_padded); |
4511 | 4509 | ||
4512 | return task->task_sg_num; | 4510 | return task->task_sg_num; |
4513 | } | 4511 | } |
4514 | 4512 | ||
4515 | static inline int transport_set_tasks_sectors_disk( | 4513 | static inline int transport_set_tasks_sectors_disk( |
4516 | struct se_task *task, | 4514 | struct se_task *task, |
4517 | struct se_device *dev, | 4515 | struct se_device *dev, |
4518 | unsigned long long lba, | 4516 | unsigned long long lba, |
4519 | u32 sectors, | 4517 | u32 sectors, |
4520 | int *max_sectors_set) | 4518 | int *max_sectors_set) |
4521 | { | 4519 | { |
4522 | if ((lba + sectors) > transport_dev_end_lba(dev)) { | 4520 | if ((lba + sectors) > transport_dev_end_lba(dev)) { |
4523 | task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1); | 4521 | task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1); |
4524 | 4522 | ||
4525 | if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) { | 4523 | if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) { |
4526 | task->task_sectors = DEV_ATTRIB(dev)->max_sectors; | 4524 | task->task_sectors = DEV_ATTRIB(dev)->max_sectors; |
4527 | *max_sectors_set = 1; | 4525 | *max_sectors_set = 1; |
4528 | } | 4526 | } |
4529 | } else { | 4527 | } else { |
4530 | if (sectors > DEV_ATTRIB(dev)->max_sectors) { | 4528 | if (sectors > DEV_ATTRIB(dev)->max_sectors) { |
4531 | task->task_sectors = DEV_ATTRIB(dev)->max_sectors; | 4529 | task->task_sectors = DEV_ATTRIB(dev)->max_sectors; |
4532 | *max_sectors_set = 1; | 4530 | *max_sectors_set = 1; |
4533 | } else | 4531 | } else |
4534 | task->task_sectors = sectors; | 4532 | task->task_sectors = sectors; |
4535 | } | 4533 | } |
4536 | 4534 | ||
4537 | return 0; | 4535 | return 0; |
4538 | } | 4536 | } |
4539 | 4537 | ||
4540 | static inline int transport_set_tasks_sectors_non_disk( | 4538 | static inline int transport_set_tasks_sectors_non_disk( |
4541 | struct se_task *task, | 4539 | struct se_task *task, |
4542 | struct se_device *dev, | 4540 | struct se_device *dev, |
4543 | unsigned long long lba, | 4541 | unsigned long long lba, |
4544 | u32 sectors, | 4542 | u32 sectors, |
4545 | int *max_sectors_set) | 4543 | int *max_sectors_set) |
4546 | { | 4544 | { |
4547 | if (sectors > DEV_ATTRIB(dev)->max_sectors) { | 4545 | if (sectors > DEV_ATTRIB(dev)->max_sectors) { |
4548 | task->task_sectors = DEV_ATTRIB(dev)->max_sectors; | 4546 | task->task_sectors = DEV_ATTRIB(dev)->max_sectors; |
4549 | *max_sectors_set = 1; | 4547 | *max_sectors_set = 1; |
4550 | } else | 4548 | } else |
4551 | task->task_sectors = sectors; | 4549 | task->task_sectors = sectors; |
4552 | 4550 | ||
4553 | return 0; | 4551 | return 0; |
4554 | } | 4552 | } |
4555 | 4553 | ||
4556 | static inline int transport_set_tasks_sectors( | 4554 | static inline int transport_set_tasks_sectors( |
4557 | struct se_task *task, | 4555 | struct se_task *task, |
4558 | struct se_device *dev, | 4556 | struct se_device *dev, |
4559 | unsigned long long lba, | 4557 | unsigned long long lba, |
4560 | u32 sectors, | 4558 | u32 sectors, |
4561 | int *max_sectors_set) | 4559 | int *max_sectors_set) |
4562 | { | 4560 | { |
4563 | return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ? | 4561 | return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ? |
4564 | transport_set_tasks_sectors_disk(task, dev, lba, sectors, | 4562 | transport_set_tasks_sectors_disk(task, dev, lba, sectors, |
4565 | max_sectors_set) : | 4563 | max_sectors_set) : |
4566 | transport_set_tasks_sectors_non_disk(task, dev, lba, sectors, | 4564 | transport_set_tasks_sectors_non_disk(task, dev, lba, sectors, |
4567 | max_sectors_set); | 4565 | max_sectors_set); |
4568 | } | 4566 | } |
4569 | 4567 | ||
4570 | static int transport_map_sg_to_mem( | 4568 | static int transport_map_sg_to_mem( |
4571 | struct se_cmd *cmd, | 4569 | struct se_cmd *cmd, |
4572 | struct list_head *se_mem_list, | 4570 | struct list_head *se_mem_list, |
4573 | void *in_mem, | 4571 | void *in_mem, |
4574 | u32 *se_mem_cnt) | 4572 | u32 *se_mem_cnt) |
4575 | { | 4573 | { |
4576 | struct se_mem *se_mem; | 4574 | struct se_mem *se_mem; |
4577 | struct scatterlist *sg; | 4575 | struct scatterlist *sg; |
4578 | u32 sg_count = 1, cmd_size = cmd->data_length; | 4576 | u32 sg_count = 1, cmd_size = cmd->data_length; |
4579 | 4577 | ||
4580 | if (!in_mem) { | 4578 | if (!in_mem) { |
4581 | printk(KERN_ERR "No source scatterlist\n"); | 4579 | printk(KERN_ERR "No source scatterlist\n"); |
4582 | return -1; | 4580 | return -1; |
4583 | } | 4581 | } |
4584 | sg = (struct scatterlist *)in_mem; | 4582 | sg = (struct scatterlist *)in_mem; |
4585 | 4583 | ||
4586 | while (cmd_size) { | 4584 | while (cmd_size) { |
4587 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); | 4585 | se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); |
4588 | if (!(se_mem)) { | 4586 | if (!(se_mem)) { |
4589 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); | 4587 | printk(KERN_ERR "Unable to allocate struct se_mem\n"); |
4590 | return -1; | 4588 | return -1; |
4591 | } | 4589 | } |
4592 | INIT_LIST_HEAD(&se_mem->se_list); | 4590 | INIT_LIST_HEAD(&se_mem->se_list); |
4593 | DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u" | 4591 | DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u" |
4594 | " sg_page: %p offset: %d length: %d\n", cmd_size, | 4592 | " sg_page: %p offset: %d length: %d\n", cmd_size, |
4595 | sg_page(sg), sg->offset, sg->length); | 4593 | sg_page(sg), sg->offset, sg->length); |
4596 | 4594 | ||
4597 | se_mem->se_page = sg_page(sg); | 4595 | se_mem->se_page = sg_page(sg); |
4598 | se_mem->se_off = sg->offset; | 4596 | se_mem->se_off = sg->offset; |
4599 | 4597 | ||
4600 | if (cmd_size > sg->length) { | 4598 | if (cmd_size > sg->length) { |
4601 | se_mem->se_len = sg->length; | 4599 | se_mem->se_len = sg->length; |
4602 | sg = sg_next(sg); | 4600 | sg = sg_next(sg); |
4603 | sg_count++; | 4601 | sg_count++; |
4604 | } else | 4602 | } else |
4605 | se_mem->se_len = cmd_size; | 4603 | se_mem->se_len = cmd_size; |
4606 | 4604 | ||
4607 | cmd_size -= se_mem->se_len; | 4605 | cmd_size -= se_mem->se_len; |
4608 | 4606 | ||
4609 | DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n", | 4607 | DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n", |
4610 | *se_mem_cnt, cmd_size); | 4608 | *se_mem_cnt, cmd_size); |
4611 | DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n", | 4609 | DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n", |
4612 | se_mem->se_page, se_mem->se_off, se_mem->se_len); | 4610 | se_mem->se_page, se_mem->se_off, se_mem->se_len); |
4613 | 4611 | ||
4614 | list_add_tail(&se_mem->se_list, se_mem_list); | 4612 | list_add_tail(&se_mem->se_list, se_mem_list); |
4615 | (*se_mem_cnt)++; | 4613 | (*se_mem_cnt)++; |
4616 | } | 4614 | } |
4617 | 4615 | ||
4618 | DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)" | 4616 | DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)" |
4619 | " struct se_mem\n", sg_count, *se_mem_cnt); | 4617 | " struct se_mem\n", sg_count, *se_mem_cnt); |
4620 | 4618 | ||
4621 | if (sg_count != *se_mem_cnt) | 4619 | if (sg_count != *se_mem_cnt) |
4622 | BUG(); | 4620 | BUG(); |
4623 | 4621 | ||
4624 | return 0; | 4622 | return 0; |
4625 | } | 4623 | } |
4626 | 4624 | ||
4627 | /* transport_map_mem_to_sg(): | 4625 | /* transport_map_mem_to_sg(): |
4628 | * | 4626 | * |
4629 | * | 4627 | * |
4630 | */ | 4628 | */ |
4631 | int transport_map_mem_to_sg( | 4629 | int transport_map_mem_to_sg( |
4632 | struct se_task *task, | 4630 | struct se_task *task, |
4633 | struct list_head *se_mem_list, | 4631 | struct list_head *se_mem_list, |
4634 | void *in_mem, | 4632 | void *in_mem, |
4635 | struct se_mem *in_se_mem, | 4633 | struct se_mem *in_se_mem, |
4636 | struct se_mem **out_se_mem, | 4634 | struct se_mem **out_se_mem, |
4637 | u32 *se_mem_cnt, | 4635 | u32 *se_mem_cnt, |
4638 | u32 *task_offset) | 4636 | u32 *task_offset) |
4639 | { | 4637 | { |
4640 | struct se_cmd *se_cmd = task->task_se_cmd; | 4638 | struct se_cmd *se_cmd = task->task_se_cmd; |
4641 | struct se_mem *se_mem = in_se_mem; | 4639 | struct se_mem *se_mem = in_se_mem; |
4642 | struct scatterlist *sg = (struct scatterlist *)in_mem; | 4640 | struct scatterlist *sg = (struct scatterlist *)in_mem; |
4643 | u32 task_size = task->task_size, sg_no = 0; | 4641 | u32 task_size = task->task_size, sg_no = 0; |
4644 | 4642 | ||
4645 | if (!sg) { | 4643 | if (!sg) { |
4646 | printk(KERN_ERR "Unable to locate valid struct" | 4644 | printk(KERN_ERR "Unable to locate valid struct" |
4647 | " scatterlist pointer\n"); | 4645 | " scatterlist pointer\n"); |
4648 | return -1; | 4646 | return -1; |
4649 | } | 4647 | } |
4650 | 4648 | ||
4651 | while (task_size != 0) { | 4649 | while (task_size != 0) { |
4652 | /* | 4650 | /* |
4653 | * Setup the contigious array of scatterlists for | 4651 | * Setup the contigious array of scatterlists for |
4654 | * this struct se_task. | 4652 | * this struct se_task. |
4655 | */ | 4653 | */ |
4656 | sg_assign_page(sg, se_mem->se_page); | 4654 | sg_assign_page(sg, se_mem->se_page); |
4657 | 4655 | ||
4658 | if (*task_offset == 0) { | 4656 | if (*task_offset == 0) { |
4659 | sg->offset = se_mem->se_off; | 4657 | sg->offset = se_mem->se_off; |
4660 | 4658 | ||
4661 | if (task_size >= se_mem->se_len) { | 4659 | if (task_size >= se_mem->se_len) { |
4662 | sg->length = se_mem->se_len; | 4660 | sg->length = se_mem->se_len; |
4663 | 4661 | ||
4664 | if (!(list_is_last(&se_mem->se_list, | 4662 | if (!(list_is_last(&se_mem->se_list, |
4665 | T_TASK(se_cmd)->t_mem_list))) { | 4663 | T_TASK(se_cmd)->t_mem_list))) { |
4666 | se_mem = list_entry(se_mem->se_list.next, | 4664 | se_mem = list_entry(se_mem->se_list.next, |
4667 | struct se_mem, se_list); | 4665 | struct se_mem, se_list); |
4668 | (*se_mem_cnt)++; | 4666 | (*se_mem_cnt)++; |
4669 | } | 4667 | } |
4670 | } else { | 4668 | } else { |
4671 | sg->length = task_size; | 4669 | sg->length = task_size; |
4672 | /* | 4670 | /* |
4673 | * Determine if we need to calculate an offset | 4671 | * Determine if we need to calculate an offset |
4674 | * into the struct se_mem on the next go around.. | 4672 | * into the struct se_mem on the next go around.. |
4675 | */ | 4673 | */ |
4676 | task_size -= sg->length; | 4674 | task_size -= sg->length; |
4677 | if (!(task_size)) | 4675 | if (!(task_size)) |
4678 | *task_offset = sg->length; | 4676 | *task_offset = sg->length; |
4679 | 4677 | ||
4680 | goto next; | 4678 | goto next; |
4681 | } | 4679 | } |
4682 | 4680 | ||
4683 | } else { | 4681 | } else { |
4684 | sg->offset = (*task_offset + se_mem->se_off); | 4682 | sg->offset = (*task_offset + se_mem->se_off); |
4685 | 4683 | ||
4686 | if ((se_mem->se_len - *task_offset) > task_size) { | 4684 | if ((se_mem->se_len - *task_offset) > task_size) { |
4687 | sg->length = task_size; | 4685 | sg->length = task_size; |
4688 | /* | 4686 | /* |
4689 | * Determine if we need to calculate an offset | 4687 | * Determine if we need to calculate an offset |
4690 | * into the struct se_mem on the next go around.. | 4688 | * into the struct se_mem on the next go around.. |
4691 | */ | 4689 | */ |
4692 | task_size -= sg->length; | 4690 | task_size -= sg->length; |
4693 | if (!(task_size)) | 4691 | if (!(task_size)) |
4694 | *task_offset += sg->length; | 4692 | *task_offset += sg->length; |
4695 | 4693 | ||
4696 | goto next; | 4694 | goto next; |
4697 | } else { | 4695 | } else { |
4698 | sg->length = (se_mem->se_len - *task_offset); | 4696 | sg->length = (se_mem->se_len - *task_offset); |
4699 | 4697 | ||
4700 | if (!(list_is_last(&se_mem->se_list, | 4698 | if (!(list_is_last(&se_mem->se_list, |
4701 | T_TASK(se_cmd)->t_mem_list))) { | 4699 | T_TASK(se_cmd)->t_mem_list))) { |
4702 | se_mem = list_entry(se_mem->se_list.next, | 4700 | se_mem = list_entry(se_mem->se_list.next, |
4703 | struct se_mem, se_list); | 4701 | struct se_mem, se_list); |
4704 | (*se_mem_cnt)++; | 4702 | (*se_mem_cnt)++; |
4705 | } | 4703 | } |
4706 | } | 4704 | } |
4707 | 4705 | ||
4708 | *task_offset = 0; | 4706 | *task_offset = 0; |
4709 | } | 4707 | } |
4710 | task_size -= sg->length; | 4708 | task_size -= sg->length; |
4711 | next: | 4709 | next: |
4712 | DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing" | 4710 | DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing" |
4713 | " task_size to(%u), task_offset: %u\n", task->task_no, sg_no, | 4711 | " task_size to(%u), task_offset: %u\n", task->task_no, sg_no, |
4714 | sg_page(sg), sg->length, sg->offset, task_size, *task_offset); | 4712 | sg_page(sg), sg->length, sg->offset, task_size, *task_offset); |
4715 | 4713 | ||
4716 | sg_no++; | 4714 | sg_no++; |
4717 | if (!(task_size)) | 4715 | if (!(task_size)) |
4718 | break; | 4716 | break; |
4719 | 4717 | ||
4720 | sg = sg_next(sg); | 4718 | sg = sg_next(sg); |
4721 | 4719 | ||
4722 | if (task_size > se_cmd->data_length) | 4720 | if (task_size > se_cmd->data_length) |
4723 | BUG(); | 4721 | BUG(); |
4724 | } | 4722 | } |
4725 | *out_se_mem = se_mem; | 4723 | *out_se_mem = se_mem; |
4726 | 4724 | ||
4727 | DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)" | 4725 | DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)" |
4728 | " SGs\n", task->task_no, *se_mem_cnt, sg_no); | 4726 | " SGs\n", task->task_no, *se_mem_cnt, sg_no); |
4729 | 4727 | ||
4730 | return 0; | 4728 | return 0; |
4731 | } | 4729 | } |
4732 | 4730 | ||
4733 | /* | 4731 | /* |
4734 | * This function can be used by HW target mode drivers to create a linked | 4732 | * This function can be used by HW target mode drivers to create a linked |
4735 | * scatterlist from all contiguously allocated struct se_task->task_sg[]. | 4733 | * scatterlist from all contiguously allocated struct se_task->task_sg[]. |
4736 | * This is intended to be called during the completion path by TCM Core | 4734 | * This is intended to be called during the completion path by TCM Core |
4737 | * when struct target_core_fabric_ops->check_task_sg_chaining is enabled. | 4735 | * when struct target_core_fabric_ops->check_task_sg_chaining is enabled. |
4738 | */ | 4736 | */ |
4739 | void transport_do_task_sg_chain(struct se_cmd *cmd) | 4737 | void transport_do_task_sg_chain(struct se_cmd *cmd) |
4740 | { | 4738 | { |
4741 | struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL; | 4739 | struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL; |
4742 | struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL; | 4740 | struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL; |
4743 | struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL; | 4741 | struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL; |
4744 | struct se_task *task; | 4742 | struct se_task *task; |
4745 | struct target_core_fabric_ops *tfo = CMD_TFO(cmd); | 4743 | struct target_core_fabric_ops *tfo = CMD_TFO(cmd); |
4746 | u32 task_sg_num = 0, sg_count = 0; | 4744 | u32 task_sg_num = 0, sg_count = 0; |
4747 | int i; | 4745 | int i; |
4748 | 4746 | ||
4749 | if (tfo->task_sg_chaining == 0) { | 4747 | if (tfo->task_sg_chaining == 0) { |
4750 | printk(KERN_ERR "task_sg_chaining is diabled for fabric module:" | 4748 | printk(KERN_ERR "task_sg_chaining is diabled for fabric module:" |
4751 | " %s\n", tfo->get_fabric_name()); | 4749 | " %s\n", tfo->get_fabric_name()); |
4752 | dump_stack(); | 4750 | dump_stack(); |
4753 | return; | 4751 | return; |
4754 | } | 4752 | } |
4755 | /* | 4753 | /* |
4756 | * Walk the struct se_task list and setup scatterlist chains | 4754 | * Walk the struct se_task list and setup scatterlist chains |
4757 | * for each contiguosly allocated struct se_task->task_sg[]. | 4755 | * for each contiguosly allocated struct se_task->task_sg[]. |
4758 | */ | 4756 | */ |
4759 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | 4757 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { |
4760 | if (!(task->task_sg) || !(task->task_padded_sg)) | 4758 | if (!(task->task_sg) || !(task->task_padded_sg)) |
4761 | continue; | 4759 | continue; |
4762 | 4760 | ||
4763 | if (sg_head && sg_link) { | 4761 | if (sg_head && sg_link) { |
4764 | sg_head_cur = &task->task_sg[0]; | 4762 | sg_head_cur = &task->task_sg[0]; |
4765 | sg_link_cur = &task->task_sg[task->task_sg_num]; | 4763 | sg_link_cur = &task->task_sg[task->task_sg_num]; |
4766 | /* | 4764 | /* |
4767 | * Either add chain or mark end of scatterlist | 4765 | * Either add chain or mark end of scatterlist |
4768 | */ | 4766 | */ |
4769 | if (!(list_is_last(&task->t_list, | 4767 | if (!(list_is_last(&task->t_list, |
4770 | &T_TASK(cmd)->t_task_list))) { | 4768 | &T_TASK(cmd)->t_task_list))) { |
4771 | /* | 4769 | /* |
4772 | * Clear existing SGL termination bit set in | 4770 | * Clear existing SGL termination bit set in |
4773 | * transport_calc_sg_num(), see sg_mark_end() | 4771 | * transport_calc_sg_num(), see sg_mark_end() |
4774 | */ | 4772 | */ |
4775 | sg_end_cur = &task->task_sg[task->task_sg_num - 1]; | 4773 | sg_end_cur = &task->task_sg[task->task_sg_num - 1]; |
4776 | sg_end_cur->page_link &= ~0x02; | 4774 | sg_end_cur->page_link &= ~0x02; |
4777 | 4775 | ||
4778 | sg_chain(sg_head, task_sg_num, sg_head_cur); | 4776 | sg_chain(sg_head, task_sg_num, sg_head_cur); |
4779 | sg_count += (task->task_sg_num + 1); | 4777 | sg_count += (task->task_sg_num + 1); |
4780 | } else | 4778 | } else |
4781 | sg_count += task->task_sg_num; | 4779 | sg_count += task->task_sg_num; |
4782 | 4780 | ||
4783 | sg_head = sg_head_cur; | 4781 | sg_head = sg_head_cur; |
4784 | sg_link = sg_link_cur; | 4782 | sg_link = sg_link_cur; |
4785 | task_sg_num = task->task_sg_num; | 4783 | task_sg_num = task->task_sg_num; |
4786 | continue; | 4784 | continue; |
4787 | } | 4785 | } |
4788 | sg_head = sg_first = &task->task_sg[0]; | 4786 | sg_head = sg_first = &task->task_sg[0]; |
4789 | sg_link = &task->task_sg[task->task_sg_num]; | 4787 | sg_link = &task->task_sg[task->task_sg_num]; |
4790 | task_sg_num = task->task_sg_num; | 4788 | task_sg_num = task->task_sg_num; |
4791 | /* | 4789 | /* |
4792 | * Check for single task.. | 4790 | * Check for single task.. |
4793 | */ | 4791 | */ |
4794 | if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) { | 4792 | if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) { |
4795 | /* | 4793 | /* |
4796 | * Clear existing SGL termination bit set in | 4794 | * Clear existing SGL termination bit set in |
4797 | * transport_calc_sg_num(), see sg_mark_end() | 4795 | * transport_calc_sg_num(), see sg_mark_end() |
4798 | */ | 4796 | */ |
4799 | sg_end = &task->task_sg[task->task_sg_num - 1]; | 4797 | sg_end = &task->task_sg[task->task_sg_num - 1]; |
4800 | sg_end->page_link &= ~0x02; | 4798 | sg_end->page_link &= ~0x02; |
4801 | sg_count += (task->task_sg_num + 1); | 4799 | sg_count += (task->task_sg_num + 1); |
4802 | } else | 4800 | } else |
4803 | sg_count += task->task_sg_num; | 4801 | sg_count += task->task_sg_num; |
4804 | } | 4802 | } |
4805 | /* | 4803 | /* |
4806 | * Setup the starting pointer and total t_tasks_sg_linked_no including | 4804 | * Setup the starting pointer and total t_tasks_sg_linked_no including |
4807 | * padding SGs for linking and to mark the end. | 4805 | * padding SGs for linking and to mark the end. |
4808 | */ | 4806 | */ |
4809 | T_TASK(cmd)->t_tasks_sg_chained = sg_first; | 4807 | T_TASK(cmd)->t_tasks_sg_chained = sg_first; |
4810 | T_TASK(cmd)->t_tasks_sg_chained_no = sg_count; | 4808 | T_TASK(cmd)->t_tasks_sg_chained_no = sg_count; |
4811 | 4809 | ||
4812 | DEBUG_CMD_M("Setup T_TASK(cmd)->t_tasks_sg_chained: %p and" | 4810 | DEBUG_CMD_M("Setup T_TASK(cmd)->t_tasks_sg_chained: %p and" |
4813 | " t_tasks_sg_chained_no: %u\n", T_TASK(cmd)->t_tasks_sg_chained, | 4811 | " t_tasks_sg_chained_no: %u\n", T_TASK(cmd)->t_tasks_sg_chained, |
4814 | T_TASK(cmd)->t_tasks_sg_chained_no); | 4812 | T_TASK(cmd)->t_tasks_sg_chained_no); |
4815 | 4813 | ||
4816 | for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg, | 4814 | for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg, |
4817 | T_TASK(cmd)->t_tasks_sg_chained_no, i) { | 4815 | T_TASK(cmd)->t_tasks_sg_chained_no, i) { |
4818 | 4816 | ||
4819 | DEBUG_CMD_M("SG: %p page: %p length: %d offset: %d\n", | 4817 | DEBUG_CMD_M("SG: %p page: %p length: %d offset: %d\n", |
4820 | sg, sg_page(sg), sg->length, sg->offset); | 4818 | sg, sg_page(sg), sg->length, sg->offset); |
4821 | if (sg_is_chain(sg)) | 4819 | if (sg_is_chain(sg)) |
4822 | DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg); | 4820 | DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg); |
4823 | if (sg_is_last(sg)) | 4821 | if (sg_is_last(sg)) |
4824 | DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg); | 4822 | DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg); |
4825 | } | 4823 | } |
4826 | 4824 | ||
4827 | } | 4825 | } |
4828 | EXPORT_SYMBOL(transport_do_task_sg_chain); | 4826 | EXPORT_SYMBOL(transport_do_task_sg_chain); |
4829 | 4827 | ||
4830 | static int transport_do_se_mem_map( | 4828 | static int transport_do_se_mem_map( |
4831 | struct se_device *dev, | 4829 | struct se_device *dev, |
4832 | struct se_task *task, | 4830 | struct se_task *task, |
4833 | struct list_head *se_mem_list, | 4831 | struct list_head *se_mem_list, |
4834 | void *in_mem, | 4832 | void *in_mem, |
4835 | struct se_mem *in_se_mem, | 4833 | struct se_mem *in_se_mem, |
4836 | struct se_mem **out_se_mem, | 4834 | struct se_mem **out_se_mem, |
4837 | u32 *se_mem_cnt, | 4835 | u32 *se_mem_cnt, |
4838 | u32 *task_offset_in) | 4836 | u32 *task_offset_in) |
4839 | { | 4837 | { |
4840 | u32 task_offset = *task_offset_in; | 4838 | u32 task_offset = *task_offset_in; |
4841 | int ret = 0; | 4839 | int ret = 0; |
4842 | /* | 4840 | /* |
4843 | * se_subsystem_api_t->do_se_mem_map is used when internal allocation | 4841 | * se_subsystem_api_t->do_se_mem_map is used when internal allocation |
4844 | * has been done by the transport plugin. | 4842 | * has been done by the transport plugin. |
4845 | */ | 4843 | */ |
4846 | if (TRANSPORT(dev)->do_se_mem_map) { | 4844 | if (TRANSPORT(dev)->do_se_mem_map) { |
4847 | ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list, | 4845 | ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list, |
4848 | in_mem, in_se_mem, out_se_mem, se_mem_cnt, | 4846 | in_mem, in_se_mem, out_se_mem, se_mem_cnt, |
4849 | task_offset_in); | 4847 | task_offset_in); |
4850 | if (ret == 0) | 4848 | if (ret == 0) |
4851 | T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; | 4849 | T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; |
4852 | 4850 | ||
4853 | return ret; | 4851 | return ret; |
4854 | } | 4852 | } |
4855 | 4853 | ||
4856 | BUG_ON(list_empty(se_mem_list)); | 4854 | BUG_ON(list_empty(se_mem_list)); |
4857 | /* | 4855 | /* |
4858 | * This is the normal path for all normal non BIDI and BIDI-COMMAND | 4856 | * This is the normal path for all normal non BIDI and BIDI-COMMAND |
4859 | * WRITE payloads.. If we need to do BIDI READ passthrough for | 4857 | * WRITE payloads.. If we need to do BIDI READ passthrough for |
4860 | * TCM/pSCSI the first call to transport_do_se_mem_map -> | 4858 | * TCM/pSCSI the first call to transport_do_se_mem_map -> |
4861 | * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the | 4859 | * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the |
4862 | * allocation for task->task_sg_bidi, and the subsequent call to | 4860 | * allocation for task->task_sg_bidi, and the subsequent call to |
4863 | * transport_do_se_mem_map() from transport_generic_get_cdb_count() | 4861 | * transport_do_se_mem_map() from transport_generic_get_cdb_count() |
4864 | */ | 4862 | */ |
4865 | if (!(task->task_sg_bidi)) { | 4863 | if (!(task->task_sg_bidi)) { |
4866 | /* | 4864 | /* |
4867 | * Assume default that transport plugin speaks preallocated | 4865 | * Assume default that transport plugin speaks preallocated |
4868 | * scatterlists. | 4866 | * scatterlists. |
4869 | */ | 4867 | */ |
4870 | if (!(transport_calc_sg_num(task, in_se_mem, task_offset))) | 4868 | if (!(transport_calc_sg_num(task, in_se_mem, task_offset))) |
4871 | return -1; | 4869 | return -1; |
4872 | /* | 4870 | /* |
4873 | * struct se_task->task_sg now contains the struct scatterlist array. | 4871 | * struct se_task->task_sg now contains the struct scatterlist array. |
4874 | */ | 4872 | */ |
4875 | return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, | 4873 | return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, |
4876 | in_se_mem, out_se_mem, se_mem_cnt, | 4874 | in_se_mem, out_se_mem, se_mem_cnt, |
4877 | task_offset_in); | 4875 | task_offset_in); |
4878 | } | 4876 | } |
4879 | /* | 4877 | /* |
4880 | * Handle the se_mem_list -> struct task->task_sg_bidi | 4878 | * Handle the se_mem_list -> struct task->task_sg_bidi |
4881 | * memory map for the extra BIDI READ payload | 4879 | * memory map for the extra BIDI READ payload |
4882 | */ | 4880 | */ |
4883 | return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi, | 4881 | return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi, |
4884 | in_se_mem, out_se_mem, se_mem_cnt, | 4882 | in_se_mem, out_se_mem, se_mem_cnt, |
4885 | task_offset_in); | 4883 | task_offset_in); |
4886 | } | 4884 | } |
4887 | 4885 | ||
4888 | static u32 transport_generic_get_cdb_count( | 4886 | static u32 transport_generic_get_cdb_count( |
4889 | struct se_cmd *cmd, | 4887 | struct se_cmd *cmd, |
4890 | unsigned long long lba, | 4888 | unsigned long long lba, |
4891 | u32 sectors, | 4889 | u32 sectors, |
4892 | enum dma_data_direction data_direction, | 4890 | enum dma_data_direction data_direction, |
4893 | struct list_head *mem_list, | 4891 | struct list_head *mem_list, |
4894 | int set_counts) | 4892 | int set_counts) |
4895 | { | 4893 | { |
4896 | unsigned char *cdb = NULL; | 4894 | unsigned char *cdb = NULL; |
4897 | struct se_task *task; | 4895 | struct se_task *task; |
4898 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; | 4896 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; |
4899 | struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL; | 4897 | struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL; |
4900 | struct se_device *dev = SE_DEV(cmd); | 4898 | struct se_device *dev = SE_DEV(cmd); |
4901 | int max_sectors_set = 0, ret; | 4899 | int max_sectors_set = 0, ret; |
4902 | u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0; | 4900 | u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0; |
4903 | 4901 | ||
4904 | if (!mem_list) { | 4902 | if (!mem_list) { |
4905 | printk(KERN_ERR "mem_list is NULL in transport_generic_get" | 4903 | printk(KERN_ERR "mem_list is NULL in transport_generic_get" |
4906 | "_cdb_count()\n"); | 4904 | "_cdb_count()\n"); |
4907 | return 0; | 4905 | return 0; |
4908 | } | 4906 | } |
4909 | /* | 4907 | /* |
4910 | * While using RAMDISK_DR backstores is the only case where | 4908 | * While using RAMDISK_DR backstores is the only case where |
4911 | * mem_list will ever be empty at this point. | 4909 | * mem_list will ever be empty at this point. |
4912 | */ | 4910 | */ |
4913 | if (!(list_empty(mem_list))) | 4911 | if (!(list_empty(mem_list))) |
4914 | se_mem = list_entry(mem_list->next, struct se_mem, se_list); | 4912 | se_mem = list_entry(mem_list->next, struct se_mem, se_list); |
4915 | /* | 4913 | /* |
4916 | * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to | 4914 | * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to |
4917 | * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation | 4915 | * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation |
4918 | */ | 4916 | */ |
4919 | if ((T_TASK(cmd)->t_mem_bidi_list != NULL) && | 4917 | if ((T_TASK(cmd)->t_mem_bidi_list != NULL) && |
4920 | !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) && | 4918 | !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) && |
4921 | (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) | 4919 | (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) |
4922 | se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next, | 4920 | se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next, |
4923 | struct se_mem, se_list); | 4921 | struct se_mem, se_list); |
4924 | 4922 | ||
4925 | while (sectors) { | 4923 | while (sectors) { |
4926 | DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n", | 4924 | DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n", |
4927 | CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors, | 4925 | CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors, |
4928 | transport_dev_end_lba(dev)); | 4926 | transport_dev_end_lba(dev)); |
4929 | 4927 | ||
4930 | task = transport_generic_get_task(cmd, data_direction); | 4928 | task = transport_generic_get_task(cmd, data_direction); |
4931 | if (!(task)) | 4929 | if (!(task)) |
4932 | goto out; | 4930 | goto out; |
4933 | 4931 | ||
4934 | transport_set_tasks_sectors(task, dev, lba, sectors, | 4932 | transport_set_tasks_sectors(task, dev, lba, sectors, |
4935 | &max_sectors_set); | 4933 | &max_sectors_set); |
4936 | 4934 | ||
4937 | task->task_lba = lba; | 4935 | task->task_lba = lba; |
4938 | lba += task->task_sectors; | 4936 | lba += task->task_sectors; |
4939 | sectors -= task->task_sectors; | 4937 | sectors -= task->task_sectors; |
4940 | task->task_size = (task->task_sectors * | 4938 | task->task_size = (task->task_sectors * |
4941 | DEV_ATTRIB(dev)->block_size); | 4939 | DEV_ATTRIB(dev)->block_size); |
4942 | 4940 | ||
4943 | cdb = TRANSPORT(dev)->get_cdb(task); | 4941 | cdb = TRANSPORT(dev)->get_cdb(task); |
4944 | if ((cdb)) { | 4942 | if ((cdb)) { |
4945 | memcpy(cdb, T_TASK(cmd)->t_task_cdb, | 4943 | memcpy(cdb, T_TASK(cmd)->t_task_cdb, |
4946 | scsi_command_size(T_TASK(cmd)->t_task_cdb)); | 4944 | scsi_command_size(T_TASK(cmd)->t_task_cdb)); |
4947 | cmd->transport_split_cdb(task->task_lba, | 4945 | cmd->transport_split_cdb(task->task_lba, |
4948 | &task->task_sectors, cdb); | 4946 | &task->task_sectors, cdb); |
4949 | } | 4947 | } |
4950 | 4948 | ||
4951 | /* | 4949 | /* |
4952 | * Perform the SE OBJ plugin and/or Transport plugin specific | 4950 | * Perform the SE OBJ plugin and/or Transport plugin specific |
4953 | * mapping for T_TASK(cmd)->t_mem_list. And setup the | 4951 | * mapping for T_TASK(cmd)->t_mem_list. And setup the |
4954 | * task->task_sg and if necessary task->task_sg_bidi | 4952 | * task->task_sg and if necessary task->task_sg_bidi |
4955 | */ | 4953 | */ |
4956 | ret = transport_do_se_mem_map(dev, task, mem_list, | 4954 | ret = transport_do_se_mem_map(dev, task, mem_list, |
4957 | NULL, se_mem, &se_mem_lout, &se_mem_cnt, | 4955 | NULL, se_mem, &se_mem_lout, &se_mem_cnt, |
4958 | &task_offset_in); | 4956 | &task_offset_in); |
4959 | if (ret < 0) | 4957 | if (ret < 0) |
4960 | goto out; | 4958 | goto out; |
4961 | 4959 | ||
4962 | se_mem = se_mem_lout; | 4960 | se_mem = se_mem_lout; |
4963 | /* | 4961 | /* |
4964 | * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi | 4962 | * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi |
4965 | * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI | 4963 | * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI |
4966 | * | 4964 | * |
4967 | * Note that the first call to transport_do_se_mem_map() above will | 4965 | * Note that the first call to transport_do_se_mem_map() above will |
4968 | * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map() | 4966 | * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map() |
4969 | * -> transport_calc_sg_num(), and the second here will do the | 4967 | * -> transport_calc_sg_num(), and the second here will do the |
4970 | * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI. | 4968 | * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI. |
4971 | */ | 4969 | */ |
4972 | if (task->task_sg_bidi != NULL) { | 4970 | if (task->task_sg_bidi != NULL) { |
4973 | ret = transport_do_se_mem_map(dev, task, | 4971 | ret = transport_do_se_mem_map(dev, task, |
4974 | T_TASK(cmd)->t_mem_bidi_list, NULL, | 4972 | T_TASK(cmd)->t_mem_bidi_list, NULL, |
4975 | se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt, | 4973 | se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt, |
4976 | &task_offset_in); | 4974 | &task_offset_in); |
4977 | if (ret < 0) | 4975 | if (ret < 0) |
4978 | goto out; | 4976 | goto out; |
4979 | 4977 | ||
4980 | se_mem_bidi = se_mem_bidi_lout; | 4978 | se_mem_bidi = se_mem_bidi_lout; |
4981 | } | 4979 | } |
4982 | task_cdbs++; | 4980 | task_cdbs++; |
4983 | 4981 | ||
4984 | DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n", | 4982 | DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n", |
4985 | task_cdbs, task->task_sg_num); | 4983 | task_cdbs, task->task_sg_num); |
4986 | 4984 | ||
4987 | if (max_sectors_set) { | 4985 | if (max_sectors_set) { |
4988 | max_sectors_set = 0; | 4986 | max_sectors_set = 0; |
4989 | continue; | 4987 | continue; |
4990 | } | 4988 | } |
4991 | 4989 | ||
4992 | if (!sectors) | 4990 | if (!sectors) |
4993 | break; | 4991 | break; |
4994 | } | 4992 | } |
4995 | 4993 | ||
4996 | if (set_counts) { | 4994 | if (set_counts) { |
4997 | atomic_inc(&T_TASK(cmd)->t_fe_count); | 4995 | atomic_inc(&T_TASK(cmd)->t_fe_count); |
4998 | atomic_inc(&T_TASK(cmd)->t_se_count); | 4996 | atomic_inc(&T_TASK(cmd)->t_se_count); |
4999 | } | 4997 | } |
5000 | 4998 | ||
5001 | DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n", | 4999 | DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n", |
5002 | CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE) | 5000 | CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE) |
5003 | ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs); | 5001 | ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs); |
5004 | 5002 | ||
5005 | return task_cdbs; | 5003 | return task_cdbs; |
5006 | out: | 5004 | out: |
5007 | return 0; | 5005 | return 0; |
5008 | } | 5006 | } |
5009 | 5007 | ||
5010 | static int | 5008 | static int |
5011 | transport_map_control_cmd_to_task(struct se_cmd *cmd) | 5009 | transport_map_control_cmd_to_task(struct se_cmd *cmd) |
5012 | { | 5010 | { |
5013 | struct se_device *dev = SE_DEV(cmd); | 5011 | struct se_device *dev = SE_DEV(cmd); |
5014 | unsigned char *cdb; | 5012 | unsigned char *cdb; |
5015 | struct se_task *task; | 5013 | struct se_task *task; |
5016 | int ret; | 5014 | int ret; |
5017 | 5015 | ||
5018 | task = transport_generic_get_task(cmd, cmd->data_direction); | 5016 | task = transport_generic_get_task(cmd, cmd->data_direction); |
5019 | if (!task) | 5017 | if (!task) |
5020 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | 5018 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; |
5021 | 5019 | ||
5022 | cdb = TRANSPORT(dev)->get_cdb(task); | 5020 | cdb = TRANSPORT(dev)->get_cdb(task); |
5023 | if (cdb) | 5021 | if (cdb) |
5024 | memcpy(cdb, cmd->t_task->t_task_cdb, | 5022 | memcpy(cdb, cmd->t_task->t_task_cdb, |
5025 | scsi_command_size(cmd->t_task->t_task_cdb)); | 5023 | scsi_command_size(cmd->t_task->t_task_cdb)); |
5026 | 5024 | ||
5027 | task->task_size = cmd->data_length; | 5025 | task->task_size = cmd->data_length; |
5028 | task->task_sg_num = | 5026 | task->task_sg_num = |
5029 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0; | 5027 | (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0; |
5030 | 5028 | ||
5031 | atomic_inc(&cmd->t_task->t_fe_count); | 5029 | atomic_inc(&cmd->t_task->t_fe_count); |
5032 | atomic_inc(&cmd->t_task->t_se_count); | 5030 | atomic_inc(&cmd->t_task->t_se_count); |
5033 | 5031 | ||
5034 | if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { | 5032 | if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { |
5035 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; | 5033 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; |
5036 | u32 se_mem_cnt = 0, task_offset = 0; | 5034 | u32 se_mem_cnt = 0, task_offset = 0; |
5037 | 5035 | ||
5038 | if (!list_empty(T_TASK(cmd)->t_mem_list)) | 5036 | if (!list_empty(T_TASK(cmd)->t_mem_list)) |
5039 | se_mem = list_entry(T_TASK(cmd)->t_mem_list->next, | 5037 | se_mem = list_entry(T_TASK(cmd)->t_mem_list->next, |
5040 | struct se_mem, se_list); | 5038 | struct se_mem, se_list); |
5041 | 5039 | ||
5042 | ret = transport_do_se_mem_map(dev, task, | 5040 | ret = transport_do_se_mem_map(dev, task, |
5043 | cmd->t_task->t_mem_list, NULL, se_mem, | 5041 | cmd->t_task->t_mem_list, NULL, se_mem, |
5044 | &se_mem_lout, &se_mem_cnt, &task_offset); | 5042 | &se_mem_lout, &se_mem_cnt, &task_offset); |
5045 | if (ret < 0) | 5043 | if (ret < 0) |
5046 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | 5044 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; |
5047 | 5045 | ||
5048 | if (dev->transport->map_task_SG) | 5046 | if (dev->transport->map_task_SG) |
5049 | return dev->transport->map_task_SG(task); | 5047 | return dev->transport->map_task_SG(task); |
5050 | return 0; | 5048 | return 0; |
5051 | } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { | 5049 | } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { |
5052 | if (dev->transport->map_task_non_SG) | 5050 | if (dev->transport->map_task_non_SG) |
5053 | return dev->transport->map_task_non_SG(task); | 5051 | return dev->transport->map_task_non_SG(task); |
5054 | return 0; | 5052 | return 0; |
5055 | } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { | 5053 | } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { |
5056 | if (dev->transport->cdb_none) | 5054 | if (dev->transport->cdb_none) |
5057 | return dev->transport->cdb_none(task); | 5055 | return dev->transport->cdb_none(task); |
5058 | return 0; | 5056 | return 0; |
5059 | } else { | 5057 | } else { |
5060 | BUG(); | 5058 | BUG(); |
5061 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | 5059 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; |
5062 | } | 5060 | } |
5063 | } | 5061 | } |
5064 | 5062 | ||
5065 | /* transport_generic_new_cmd(): Called from transport_processing_thread() | 5063 | /* transport_generic_new_cmd(): Called from transport_processing_thread() |
5066 | * | 5064 | * |
5067 | * Allocate storage transport resources from a set of values predefined | 5065 | * Allocate storage transport resources from a set of values predefined |
5068 | * by transport_generic_cmd_sequencer() from the iSCSI Target RX process. | 5066 | * by transport_generic_cmd_sequencer() from the iSCSI Target RX process. |
5069 | * Any non zero return here is treated as an "out of resource' op here. | 5067 | * Any non zero return here is treated as an "out of resource' op here. |
5070 | */ | 5068 | */ |
5071 | /* | 5069 | /* |
5072 | * Generate struct se_task(s) and/or their payloads for this CDB. | 5070 | * Generate struct se_task(s) and/or their payloads for this CDB. |
5073 | */ | 5071 | */ |
5074 | static int transport_generic_new_cmd(struct se_cmd *cmd) | 5072 | static int transport_generic_new_cmd(struct se_cmd *cmd) |
5075 | { | 5073 | { |
5076 | struct se_portal_group *se_tpg; | 5074 | struct se_portal_group *se_tpg; |
5077 | struct se_task *task; | 5075 | struct se_task *task; |
5078 | struct se_device *dev = SE_DEV(cmd); | 5076 | struct se_device *dev = SE_DEV(cmd); |
5079 | int ret = 0; | 5077 | int ret = 0; |
5080 | 5078 | ||
5081 | /* | 5079 | /* |
5082 | * Determine is the TCM fabric module has already allocated physical | 5080 | * Determine is the TCM fabric module has already allocated physical |
5083 | * memory, and is directly calling transport_generic_map_mem_to_cmd() | 5081 | * memory, and is directly calling transport_generic_map_mem_to_cmd() |
5084 | * to setup beforehand the linked list of physical memory at | 5082 | * to setup beforehand the linked list of physical memory at |
5085 | * T_TASK(cmd)->t_mem_list of struct se_mem->se_page | 5083 | * T_TASK(cmd)->t_mem_list of struct se_mem->se_page |
5086 | */ | 5084 | */ |
5087 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { | 5085 | if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { |
5088 | ret = transport_allocate_resources(cmd); | 5086 | ret = transport_allocate_resources(cmd); |
5089 | if (ret < 0) | 5087 | if (ret < 0) |
5090 | return ret; | 5088 | return ret; |
5091 | } | 5089 | } |
5092 | 5090 | ||
5093 | ret = transport_get_sectors(cmd); | 5091 | ret = transport_get_sectors(cmd); |
5094 | if (ret < 0) | 5092 | if (ret < 0) |
5095 | return ret; | 5093 | return ret; |
5096 | 5094 | ||
5097 | ret = transport_new_cmd_obj(cmd); | 5095 | ret = transport_new_cmd_obj(cmd); |
5098 | if (ret < 0) | 5096 | if (ret < 0) |
5099 | return ret; | 5097 | return ret; |
5100 | 5098 | ||
5101 | /* | 5099 | /* |
5102 | * Determine if the calling TCM fabric module is talking to | 5100 | * Determine if the calling TCM fabric module is talking to |
5103 | * Linux/NET via kernel sockets and needs to allocate a | 5101 | * Linux/NET via kernel sockets and needs to allocate a |
5104 | * struct iovec array to complete the struct se_cmd | 5102 | * struct iovec array to complete the struct se_cmd |
5105 | */ | 5103 | */ |
5106 | se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg; | 5104 | se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg; |
5107 | if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) { | 5105 | if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) { |
5108 | ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd); | 5106 | ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd); |
5109 | if (ret < 0) | 5107 | if (ret < 0) |
5110 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; | 5108 | return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; |
5111 | } | 5109 | } |
5112 | 5110 | ||
5113 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { | 5111 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { |
5114 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { | 5112 | list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { |
5115 | if (atomic_read(&task->task_sent)) | 5113 | if (atomic_read(&task->task_sent)) |
5116 | continue; | 5114 | continue; |
5117 | if (!dev->transport->map_task_SG) | 5115 | if (!dev->transport->map_task_SG) |
5118 | continue; | 5116 | continue; |
5119 | 5117 | ||
5120 | ret = dev->transport->map_task_SG(task); | 5118 | ret = dev->transport->map_task_SG(task); |
5121 | if (ret < 0) | 5119 | if (ret < 0) |
5122 | return ret; | 5120 | return ret; |
5123 | } | 5121 | } |
5124 | } else { | 5122 | } else { |
5125 | ret = transport_map_control_cmd_to_task(cmd); | 5123 | ret = transport_map_control_cmd_to_task(cmd); |
5126 | if (ret < 0) | 5124 | if (ret < 0) |
5127 | return ret; | 5125 | return ret; |
5128 | } | 5126 | } |
5129 | 5127 | ||
5130 | /* | 5128 | /* |
5131 | * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready.. | 5129 | * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready.. |
5132 | * This WRITE struct se_cmd (and all of its associated struct se_task's) | 5130 | * This WRITE struct se_cmd (and all of its associated struct se_task's) |
5133 | * will be added to the struct se_device execution queue after its WRITE | 5131 | * will be added to the struct se_device execution queue after its WRITE |
5134 | * data has arrived. (ie: It gets handled by the transport processing | 5132 | * data has arrived. (ie: It gets handled by the transport processing |
5135 | * thread a second time) | 5133 | * thread a second time) |
5136 | */ | 5134 | */ |
5137 | if (cmd->data_direction == DMA_TO_DEVICE) { | 5135 | if (cmd->data_direction == DMA_TO_DEVICE) { |
5138 | transport_add_tasks_to_state_queue(cmd); | 5136 | transport_add_tasks_to_state_queue(cmd); |
5139 | return transport_generic_write_pending(cmd); | 5137 | return transport_generic_write_pending(cmd); |
5140 | } | 5138 | } |
5141 | /* | 5139 | /* |
5142 | * Everything else but a WRITE, add the struct se_cmd's struct se_task's | 5140 | * Everything else but a WRITE, add the struct se_cmd's struct se_task's |
5143 | * to the execution queue. | 5141 | * to the execution queue. |
5144 | */ | 5142 | */ |
5145 | transport_execute_tasks(cmd); | 5143 | transport_execute_tasks(cmd); |
5146 | return 0; | 5144 | return 0; |
5147 | } | 5145 | } |
5148 | 5146 | ||
5149 | /* transport_generic_process_write(): | 5147 | /* transport_generic_process_write(): |
5150 | * | 5148 | * |
5151 | * | 5149 | * |
5152 | */ | 5150 | */ |
5153 | void transport_generic_process_write(struct se_cmd *cmd) | 5151 | void transport_generic_process_write(struct se_cmd *cmd) |
5154 | { | 5152 | { |
5155 | #if 0 | 5153 | #if 0 |
5156 | /* | 5154 | /* |
5157 | * Copy SCSI Presented DTL sector(s) from received buffers allocated to | 5155 | * Copy SCSI Presented DTL sector(s) from received buffers allocated to |
5158 | * original EDTL | 5156 | * original EDTL |
5159 | */ | 5157 | */ |
5160 | if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { | 5158 | if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { |
5161 | if (!T_TASK(cmd)->t_tasks_se_num) { | 5159 | if (!T_TASK(cmd)->t_tasks_se_num) { |
5162 | unsigned char *dst, *buf = | 5160 | unsigned char *dst, *buf = |
5163 | (unsigned char *)T_TASK(cmd)->t_task_buf; | 5161 | (unsigned char *)T_TASK(cmd)->t_task_buf; |
5164 | 5162 | ||
5165 | dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL); | 5163 | dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL); |
5166 | if (!(dst)) { | 5164 | if (!(dst)) { |
5167 | printk(KERN_ERR "Unable to allocate memory for" | 5165 | printk(KERN_ERR "Unable to allocate memory for" |
5168 | " WRITE underflow\n"); | 5166 | " WRITE underflow\n"); |
5169 | transport_generic_request_failure(cmd, NULL, | 5167 | transport_generic_request_failure(cmd, NULL, |
5170 | PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); | 5168 | PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); |
5171 | return; | 5169 | return; |
5172 | } | 5170 | } |
5173 | memcpy(dst, buf, cmd->cmd_spdtl); | 5171 | memcpy(dst, buf, cmd->cmd_spdtl); |
5174 | 5172 | ||
5175 | kfree(T_TASK(cmd)->t_task_buf); | 5173 | kfree(T_TASK(cmd)->t_task_buf); |
5176 | T_TASK(cmd)->t_task_buf = dst; | 5174 | T_TASK(cmd)->t_task_buf = dst; |
5177 | } else { | 5175 | } else { |
5178 | struct scatterlist *sg = | 5176 | struct scatterlist *sg = |
5179 | (struct scatterlist *sg)T_TASK(cmd)->t_task_buf; | 5177 | (struct scatterlist *sg)T_TASK(cmd)->t_task_buf; |
5180 | struct scatterlist *orig_sg; | 5178 | struct scatterlist *orig_sg; |
5181 | 5179 | ||
5182 | orig_sg = kzalloc(sizeof(struct scatterlist) * | 5180 | orig_sg = kzalloc(sizeof(struct scatterlist) * |
5183 | T_TASK(cmd)->t_tasks_se_num, | 5181 | T_TASK(cmd)->t_tasks_se_num, |
5184 | GFP_KERNEL))) { | 5182 | GFP_KERNEL))) { |
5185 | if (!(orig_sg)) { | 5183 | if (!(orig_sg)) { |
5186 | printk(KERN_ERR "Unable to allocate memory" | 5184 | printk(KERN_ERR "Unable to allocate memory" |
5187 | " for WRITE underflow\n"); | 5185 | " for WRITE underflow\n"); |
5188 | transport_generic_request_failure(cmd, NULL, | 5186 | transport_generic_request_failure(cmd, NULL, |
5189 | PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); | 5187 | PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); |
5190 | return; | 5188 | return; |
5191 | } | 5189 | } |
5192 | 5190 | ||
5193 | memcpy(orig_sg, T_TASK(cmd)->t_task_buf, | 5191 | memcpy(orig_sg, T_TASK(cmd)->t_task_buf, |
5194 | sizeof(struct scatterlist) * | 5192 | sizeof(struct scatterlist) * |
5195 | T_TASK(cmd)->t_tasks_se_num); | 5193 | T_TASK(cmd)->t_tasks_se_num); |
5196 | 5194 | ||
5197 | cmd->data_length = cmd->cmd_spdtl; | 5195 | cmd->data_length = cmd->cmd_spdtl; |
5198 | /* | 5196 | /* |
5199 | * FIXME, clear out original struct se_task and state | 5197 | * FIXME, clear out original struct se_task and state |
5200 | * information. | 5198 | * information. |
5201 | */ | 5199 | */ |
5202 | if (transport_generic_new_cmd(cmd) < 0) { | 5200 | if (transport_generic_new_cmd(cmd) < 0) { |
5203 | transport_generic_request_failure(cmd, NULL, | 5201 | transport_generic_request_failure(cmd, NULL, |
5204 | PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); | 5202 | PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); |
5205 | kfree(orig_sg); | 5203 | kfree(orig_sg); |
5206 | return; | 5204 | return; |
5207 | } | 5205 | } |
5208 | 5206 | ||
5209 | transport_memcpy_write_sg(cmd, orig_sg); | 5207 | transport_memcpy_write_sg(cmd, orig_sg); |
5210 | } | 5208 | } |
5211 | } | 5209 | } |
5212 | #endif | 5210 | #endif |
5213 | transport_execute_tasks(cmd); | 5211 | transport_execute_tasks(cmd); |
5214 | } | 5212 | } |
5215 | EXPORT_SYMBOL(transport_generic_process_write); | 5213 | EXPORT_SYMBOL(transport_generic_process_write); |
5216 | 5214 | ||
5217 | /* transport_generic_write_pending(): | 5215 | /* transport_generic_write_pending(): |
5218 | * | 5216 | * |
5219 | * | 5217 | * |
5220 | */ | 5218 | */ |
5221 | static int transport_generic_write_pending(struct se_cmd *cmd) | 5219 | static int transport_generic_write_pending(struct se_cmd *cmd) |
5222 | { | 5220 | { |
5223 | unsigned long flags; | 5221 | unsigned long flags; |
5224 | int ret; | 5222 | int ret; |
5225 | 5223 | ||
5226 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 5224 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
5227 | cmd->t_state = TRANSPORT_WRITE_PENDING; | 5225 | cmd->t_state = TRANSPORT_WRITE_PENDING; |
5228 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 5226 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
5229 | /* | 5227 | /* |
5230 | * For the TCM control CDBs using a contiguous buffer, do the memcpy | 5228 | * For the TCM control CDBs using a contiguous buffer, do the memcpy |
5231 | * from the passed Linux/SCSI struct scatterlist located at | 5229 | * from the passed Linux/SCSI struct scatterlist located at |
5232 | * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at | 5230 | * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at |
5233 | * T_TASK(se_cmd)->t_task_buf. | 5231 | * T_TASK(se_cmd)->t_task_buf. |
5234 | */ | 5232 | */ |
5235 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) | 5233 | if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) |
5236 | transport_memcpy_read_contig(cmd, | 5234 | transport_memcpy_read_contig(cmd, |
5237 | T_TASK(cmd)->t_task_buf, | 5235 | T_TASK(cmd)->t_task_buf, |
5238 | T_TASK(cmd)->t_task_pt_sgl); | 5236 | T_TASK(cmd)->t_task_pt_sgl); |
5239 | /* | 5237 | /* |
5240 | * Clear the se_cmd for WRITE_PENDING status in order to set | 5238 | * Clear the se_cmd for WRITE_PENDING status in order to set |
5241 | * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data | 5239 | * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data |
5242 | * can be called from HW target mode interrupt code. This is safe | 5240 | * can be called from HW target mode interrupt code. This is safe |
5243 | * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending | 5241 | * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending |
5244 | * because the se_cmd->se_lun pointer is not being cleared. | 5242 | * because the se_cmd->se_lun pointer is not being cleared. |
5245 | */ | 5243 | */ |
5246 | transport_cmd_check_stop(cmd, 1, 0); | 5244 | transport_cmd_check_stop(cmd, 1, 0); |
5247 | 5245 | ||
5248 | /* | 5246 | /* |
5249 | * Call the fabric write_pending function here to let the | 5247 | * Call the fabric write_pending function here to let the |
5250 | * frontend know that WRITE buffers are ready. | 5248 | * frontend know that WRITE buffers are ready. |
5251 | */ | 5249 | */ |
5252 | ret = CMD_TFO(cmd)->write_pending(cmd); | 5250 | ret = CMD_TFO(cmd)->write_pending(cmd); |
5253 | if (ret < 0) | 5251 | if (ret < 0) |
5254 | return ret; | 5252 | return ret; |
5255 | 5253 | ||
5256 | return PYX_TRANSPORT_WRITE_PENDING; | 5254 | return PYX_TRANSPORT_WRITE_PENDING; |
5257 | } | 5255 | } |
5258 | 5256 | ||
5259 | /* transport_release_cmd_to_pool(): | 5257 | /* transport_release_cmd_to_pool(): |
5260 | * | 5258 | * |
5261 | * | 5259 | * |
5262 | */ | 5260 | */ |
5263 | void transport_release_cmd_to_pool(struct se_cmd *cmd) | 5261 | void transport_release_cmd_to_pool(struct se_cmd *cmd) |
5264 | { | 5262 | { |
5265 | BUG_ON(!T_TASK(cmd)); | 5263 | BUG_ON(!T_TASK(cmd)); |
5266 | BUG_ON(!CMD_TFO(cmd)); | 5264 | BUG_ON(!CMD_TFO(cmd)); |
5267 | 5265 | ||
5268 | transport_free_se_cmd(cmd); | 5266 | transport_free_se_cmd(cmd); |
5269 | CMD_TFO(cmd)->release_cmd_to_pool(cmd); | 5267 | CMD_TFO(cmd)->release_cmd_to_pool(cmd); |
5270 | } | 5268 | } |
5271 | EXPORT_SYMBOL(transport_release_cmd_to_pool); | 5269 | EXPORT_SYMBOL(transport_release_cmd_to_pool); |
5272 | 5270 | ||
5273 | /* transport_generic_free_cmd(): | 5271 | /* transport_generic_free_cmd(): |
5274 | * | 5272 | * |
5275 | * Called from processing frontend to release storage engine resources | 5273 | * Called from processing frontend to release storage engine resources |
5276 | */ | 5274 | */ |
5277 | void transport_generic_free_cmd( | 5275 | void transport_generic_free_cmd( |
5278 | struct se_cmd *cmd, | 5276 | struct se_cmd *cmd, |
5279 | int wait_for_tasks, | 5277 | int wait_for_tasks, |
5280 | int release_to_pool, | 5278 | int release_to_pool, |
5281 | int session_reinstatement) | 5279 | int session_reinstatement) |
5282 | { | 5280 | { |
5283 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd)) | 5281 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd)) |
5284 | transport_release_cmd_to_pool(cmd); | 5282 | transport_release_cmd_to_pool(cmd); |
5285 | else { | 5283 | else { |
5286 | core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); | 5284 | core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); |
5287 | 5285 | ||
5288 | if (SE_LUN(cmd)) { | 5286 | if (SE_LUN(cmd)) { |
5289 | #if 0 | 5287 | #if 0 |
5290 | printk(KERN_INFO "cmd: %p ITT: 0x%08x contains" | 5288 | printk(KERN_INFO "cmd: %p ITT: 0x%08x contains" |
5291 | " SE_LUN(cmd)\n", cmd, | 5289 | " SE_LUN(cmd)\n", cmd, |
5292 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5290 | CMD_TFO(cmd)->get_task_tag(cmd)); |
5293 | #endif | 5291 | #endif |
5294 | transport_lun_remove_cmd(cmd); | 5292 | transport_lun_remove_cmd(cmd); |
5295 | } | 5293 | } |
5296 | 5294 | ||
5297 | if (wait_for_tasks && cmd->transport_wait_for_tasks) | 5295 | if (wait_for_tasks && cmd->transport_wait_for_tasks) |
5298 | cmd->transport_wait_for_tasks(cmd, 0, 0); | 5296 | cmd->transport_wait_for_tasks(cmd, 0, 0); |
5299 | 5297 | ||
5300 | transport_generic_remove(cmd, release_to_pool, | 5298 | transport_generic_remove(cmd, release_to_pool, |
5301 | session_reinstatement); | 5299 | session_reinstatement); |
5302 | } | 5300 | } |
5303 | } | 5301 | } |
5304 | EXPORT_SYMBOL(transport_generic_free_cmd); | 5302 | EXPORT_SYMBOL(transport_generic_free_cmd); |
5305 | 5303 | ||
5306 | static void transport_nop_wait_for_tasks( | 5304 | static void transport_nop_wait_for_tasks( |
5307 | struct se_cmd *cmd, | 5305 | struct se_cmd *cmd, |
5308 | int remove_cmd, | 5306 | int remove_cmd, |
5309 | int session_reinstatement) | 5307 | int session_reinstatement) |
5310 | { | 5308 | { |
5311 | return; | 5309 | return; |
5312 | } | 5310 | } |
5313 | 5311 | ||
5314 | /* transport_lun_wait_for_tasks(): | 5312 | /* transport_lun_wait_for_tasks(): |
5315 | * | 5313 | * |
5316 | * Called from ConfigFS context to stop the passed struct se_cmd to allow | 5314 | * Called from ConfigFS context to stop the passed struct se_cmd to allow |
5317 | * an struct se_lun to be successfully shutdown. | 5315 | * an struct se_lun to be successfully shutdown. |
5318 | */ | 5316 | */ |
5319 | static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) | 5317 | static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) |
5320 | { | 5318 | { |
5321 | unsigned long flags; | 5319 | unsigned long flags; |
5322 | int ret; | 5320 | int ret; |
5323 | /* | 5321 | /* |
5324 | * If the frontend has already requested this struct se_cmd to | 5322 | * If the frontend has already requested this struct se_cmd to |
5325 | * be stopped, we can safely ignore this struct se_cmd. | 5323 | * be stopped, we can safely ignore this struct se_cmd. |
5326 | */ | 5324 | */ |
5327 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 5325 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
5328 | if (atomic_read(&T_TASK(cmd)->t_transport_stop)) { | 5326 | if (atomic_read(&T_TASK(cmd)->t_transport_stop)) { |
5329 | atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); | 5327 | atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); |
5330 | DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop ==" | 5328 | DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop ==" |
5331 | " TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd)); | 5329 | " TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd)); |
5332 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 5330 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
5333 | transport_cmd_check_stop(cmd, 1, 0); | 5331 | transport_cmd_check_stop(cmd, 1, 0); |
5334 | return -1; | 5332 | return -1; |
5335 | } | 5333 | } |
5336 | atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1); | 5334 | atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1); |
5337 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 5335 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
5338 | 5336 | ||
5339 | wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq); | 5337 | wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq); |
5340 | 5338 | ||
5341 | ret = transport_stop_tasks_for_cmd(cmd); | 5339 | ret = transport_stop_tasks_for_cmd(cmd); |
5342 | 5340 | ||
5343 | DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:" | 5341 | DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:" |
5344 | " %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret); | 5342 | " %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret); |
5345 | if (!ret) { | 5343 | if (!ret) { |
5346 | DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n", | 5344 | DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n", |
5347 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5345 | CMD_TFO(cmd)->get_task_tag(cmd)); |
5348 | wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp); | 5346 | wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp); |
5349 | DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n", | 5347 | DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n", |
5350 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5348 | CMD_TFO(cmd)->get_task_tag(cmd)); |
5351 | } | 5349 | } |
5352 | transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); | 5350 | transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); |
5353 | 5351 | ||
5354 | return 0; | 5352 | return 0; |
5355 | } | 5353 | } |
5356 | 5354 | ||
5357 | /* #define DEBUG_CLEAR_LUN */ | 5355 | /* #define DEBUG_CLEAR_LUN */ |
5358 | #ifdef DEBUG_CLEAR_LUN | 5356 | #ifdef DEBUG_CLEAR_LUN |
5359 | #define DEBUG_CLEAR_L(x...) printk(KERN_INFO x) | 5357 | #define DEBUG_CLEAR_L(x...) printk(KERN_INFO x) |
5360 | #else | 5358 | #else |
5361 | #define DEBUG_CLEAR_L(x...) | 5359 | #define DEBUG_CLEAR_L(x...) |
5362 | #endif | 5360 | #endif |
5363 | 5361 | ||
5364 | static void __transport_clear_lun_from_sessions(struct se_lun *lun) | 5362 | static void __transport_clear_lun_from_sessions(struct se_lun *lun) |
5365 | { | 5363 | { |
5366 | struct se_cmd *cmd = NULL; | 5364 | struct se_cmd *cmd = NULL; |
5367 | unsigned long lun_flags, cmd_flags; | 5365 | unsigned long lun_flags, cmd_flags; |
5368 | /* | 5366 | /* |
5369 | * Do exception processing and return CHECK_CONDITION status to the | 5367 | * Do exception processing and return CHECK_CONDITION status to the |
5370 | * Initiator Port. | 5368 | * Initiator Port. |
5371 | */ | 5369 | */ |
5372 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | 5370 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
5373 | while (!list_empty_careful(&lun->lun_cmd_list)) { | 5371 | while (!list_empty_careful(&lun->lun_cmd_list)) { |
5374 | cmd = list_entry(lun->lun_cmd_list.next, | 5372 | cmd = list_entry(lun->lun_cmd_list.next, |
5375 | struct se_cmd, se_lun_list); | 5373 | struct se_cmd, se_lun_list); |
5376 | list_del(&cmd->se_lun_list); | 5374 | list_del(&cmd->se_lun_list); |
5377 | 5375 | ||
5378 | if (!(T_TASK(cmd))) { | 5376 | if (!(T_TASK(cmd))) { |
5379 | printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL" | 5377 | printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL" |
5380 | "[i,t]_state: %u/%u\n", | 5378 | "[i,t]_state: %u/%u\n", |
5381 | CMD_TFO(cmd)->get_task_tag(cmd), | 5379 | CMD_TFO(cmd)->get_task_tag(cmd), |
5382 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state); | 5380 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state); |
5383 | BUG(); | 5381 | BUG(); |
5384 | } | 5382 | } |
5385 | atomic_set(&T_TASK(cmd)->transport_lun_active, 0); | 5383 | atomic_set(&T_TASK(cmd)->transport_lun_active, 0); |
5386 | /* | 5384 | /* |
5387 | * This will notify iscsi_target_transport.c: | 5385 | * This will notify iscsi_target_transport.c: |
5388 | * transport_cmd_check_stop() that a LUN shutdown is in | 5386 | * transport_cmd_check_stop() that a LUN shutdown is in |
5389 | * progress for the iscsi_cmd_t. | 5387 | * progress for the iscsi_cmd_t. |
5390 | */ | 5388 | */ |
5391 | spin_lock(&T_TASK(cmd)->t_state_lock); | 5389 | spin_lock(&T_TASK(cmd)->t_state_lock); |
5392 | DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport" | 5390 | DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport" |
5393 | "_lun_stop for ITT: 0x%08x\n", | 5391 | "_lun_stop for ITT: 0x%08x\n", |
5394 | SE_LUN(cmd)->unpacked_lun, | 5392 | SE_LUN(cmd)->unpacked_lun, |
5395 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5393 | CMD_TFO(cmd)->get_task_tag(cmd)); |
5396 | atomic_set(&T_TASK(cmd)->transport_lun_stop, 1); | 5394 | atomic_set(&T_TASK(cmd)->transport_lun_stop, 1); |
5397 | spin_unlock(&T_TASK(cmd)->t_state_lock); | 5395 | spin_unlock(&T_TASK(cmd)->t_state_lock); |
5398 | 5396 | ||
5399 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | 5397 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); |
5400 | 5398 | ||
5401 | if (!(SE_LUN(cmd))) { | 5399 | if (!(SE_LUN(cmd))) { |
5402 | printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n", | 5400 | printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n", |
5403 | CMD_TFO(cmd)->get_task_tag(cmd), | 5401 | CMD_TFO(cmd)->get_task_tag(cmd), |
5404 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state); | 5402 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state); |
5405 | BUG(); | 5403 | BUG(); |
5406 | } | 5404 | } |
5407 | /* | 5405 | /* |
5408 | * If the Storage engine still owns the iscsi_cmd_t, determine | 5406 | * If the Storage engine still owns the iscsi_cmd_t, determine |
5409 | * and/or stop its context. | 5407 | * and/or stop its context. |
5410 | */ | 5408 | */ |
5411 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport" | 5409 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport" |
5412 | "_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun, | 5410 | "_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun, |
5413 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5411 | CMD_TFO(cmd)->get_task_tag(cmd)); |
5414 | 5412 | ||
5415 | if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) { | 5413 | if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) { |
5416 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | 5414 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
5417 | continue; | 5415 | continue; |
5418 | } | 5416 | } |
5419 | 5417 | ||
5420 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun" | 5418 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun" |
5421 | "_wait_for_tasks(): SUCCESS\n", | 5419 | "_wait_for_tasks(): SUCCESS\n", |
5422 | SE_LUN(cmd)->unpacked_lun, | 5420 | SE_LUN(cmd)->unpacked_lun, |
5423 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5421 | CMD_TFO(cmd)->get_task_tag(cmd)); |
5424 | 5422 | ||
5425 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags); | 5423 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags); |
5426 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { | 5424 | if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { |
5427 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); | 5425 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); |
5428 | goto check_cond; | 5426 | goto check_cond; |
5429 | } | 5427 | } |
5430 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); | 5428 | atomic_set(&T_TASK(cmd)->transport_dev_active, 0); |
5431 | transport_all_task_dev_remove_state(cmd); | 5429 | transport_all_task_dev_remove_state(cmd); |
5432 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); | 5430 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); |
5433 | 5431 | ||
5434 | transport_free_dev_tasks(cmd); | 5432 | transport_free_dev_tasks(cmd); |
5435 | /* | 5433 | /* |
5436 | * The Storage engine stopped this struct se_cmd before it was | 5434 | * The Storage engine stopped this struct se_cmd before it was |
5437 | * send to the fabric frontend for delivery back to the | 5435 | * send to the fabric frontend for delivery back to the |
5438 | * Initiator Node. Return this SCSI CDB back with an | 5436 | * Initiator Node. Return this SCSI CDB back with an |
5439 | * CHECK_CONDITION status. | 5437 | * CHECK_CONDITION status. |
5440 | */ | 5438 | */ |
5441 | check_cond: | 5439 | check_cond: |
5442 | transport_send_check_condition_and_sense(cmd, | 5440 | transport_send_check_condition_and_sense(cmd, |
5443 | TCM_NON_EXISTENT_LUN, 0); | 5441 | TCM_NON_EXISTENT_LUN, 0); |
5444 | /* | 5442 | /* |
5445 | * If the fabric frontend is waiting for this iscsi_cmd_t to | 5443 | * If the fabric frontend is waiting for this iscsi_cmd_t to |
5446 | * be released, notify the waiting thread now that LU has | 5444 | * be released, notify the waiting thread now that LU has |
5447 | * finished accessing it. | 5445 | * finished accessing it. |
5448 | */ | 5446 | */ |
5449 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags); | 5447 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags); |
5450 | if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) { | 5448 | if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) { |
5451 | DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for" | 5449 | DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for" |
5452 | " struct se_cmd: %p ITT: 0x%08x\n", | 5450 | " struct se_cmd: %p ITT: 0x%08x\n", |
5453 | lun->unpacked_lun, | 5451 | lun->unpacked_lun, |
5454 | cmd, CMD_TFO(cmd)->get_task_tag(cmd)); | 5452 | cmd, CMD_TFO(cmd)->get_task_tag(cmd)); |
5455 | 5453 | ||
5456 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 5454 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, |
5457 | cmd_flags); | 5455 | cmd_flags); |
5458 | transport_cmd_check_stop(cmd, 1, 0); | 5456 | transport_cmd_check_stop(cmd, 1, 0); |
5459 | complete(&T_TASK(cmd)->transport_lun_fe_stop_comp); | 5457 | complete(&T_TASK(cmd)->transport_lun_fe_stop_comp); |
5460 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | 5458 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
5461 | continue; | 5459 | continue; |
5462 | } | 5460 | } |
5463 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n", | 5461 | DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n", |
5464 | lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd)); | 5462 | lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd)); |
5465 | 5463 | ||
5466 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); | 5464 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); |
5467 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); | 5465 | spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); |
5468 | } | 5466 | } |
5469 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); | 5467 | spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); |
5470 | } | 5468 | } |
5471 | 5469 | ||
5472 | static int transport_clear_lun_thread(void *p) | 5470 | static int transport_clear_lun_thread(void *p) |
5473 | { | 5471 | { |
5474 | struct se_lun *lun = (struct se_lun *)p; | 5472 | struct se_lun *lun = (struct se_lun *)p; |
5475 | 5473 | ||
5476 | __transport_clear_lun_from_sessions(lun); | 5474 | __transport_clear_lun_from_sessions(lun); |
5477 | complete(&lun->lun_shutdown_comp); | 5475 | complete(&lun->lun_shutdown_comp); |
5478 | 5476 | ||
5479 | return 0; | 5477 | return 0; |
5480 | } | 5478 | } |
5481 | 5479 | ||
5482 | int transport_clear_lun_from_sessions(struct se_lun *lun) | 5480 | int transport_clear_lun_from_sessions(struct se_lun *lun) |
5483 | { | 5481 | { |
5484 | struct task_struct *kt; | 5482 | struct task_struct *kt; |
5485 | 5483 | ||
5486 | kt = kthread_run(transport_clear_lun_thread, (void *)lun, | 5484 | kt = kthread_run(transport_clear_lun_thread, (void *)lun, |
5487 | "tcm_cl_%u", lun->unpacked_lun); | 5485 | "tcm_cl_%u", lun->unpacked_lun); |
5488 | if (IS_ERR(kt)) { | 5486 | if (IS_ERR(kt)) { |
5489 | printk(KERN_ERR "Unable to start clear_lun thread\n"); | 5487 | printk(KERN_ERR "Unable to start clear_lun thread\n"); |
5490 | return -1; | 5488 | return -1; |
5491 | } | 5489 | } |
5492 | wait_for_completion(&lun->lun_shutdown_comp); | 5490 | wait_for_completion(&lun->lun_shutdown_comp); |
5493 | 5491 | ||
5494 | return 0; | 5492 | return 0; |
5495 | } | 5493 | } |
5496 | 5494 | ||
5497 | /* transport_generic_wait_for_tasks(): | 5495 | /* transport_generic_wait_for_tasks(): |
5498 | * | 5496 | * |
5499 | * Called from frontend or passthrough context to wait for storage engine | 5497 | * Called from frontend or passthrough context to wait for storage engine |
5500 | * to pause and/or release frontend generated struct se_cmd. | 5498 | * to pause and/or release frontend generated struct se_cmd. |
5501 | */ | 5499 | */ |
5502 | static void transport_generic_wait_for_tasks( | 5500 | static void transport_generic_wait_for_tasks( |
5503 | struct se_cmd *cmd, | 5501 | struct se_cmd *cmd, |
5504 | int remove_cmd, | 5502 | int remove_cmd, |
5505 | int session_reinstatement) | 5503 | int session_reinstatement) |
5506 | { | 5504 | { |
5507 | unsigned long flags; | 5505 | unsigned long flags; |
5508 | 5506 | ||
5509 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) | 5507 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) |
5510 | return; | 5508 | return; |
5511 | 5509 | ||
5512 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 5510 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
5513 | /* | 5511 | /* |
5514 | * If we are already stopped due to an external event (ie: LUN shutdown) | 5512 | * If we are already stopped due to an external event (ie: LUN shutdown) |
5515 | * sleep until the connection can have the passed struct se_cmd back. | 5513 | * sleep until the connection can have the passed struct se_cmd back. |
5516 | * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by | 5514 | * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by |
5517 | * transport_clear_lun_from_sessions() once the ConfigFS context caller | 5515 | * transport_clear_lun_from_sessions() once the ConfigFS context caller |
5518 | * has completed its operation on the struct se_cmd. | 5516 | * has completed its operation on the struct se_cmd. |
5519 | */ | 5517 | */ |
5520 | if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) { | 5518 | if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) { |
5521 | 5519 | ||
5522 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopping" | 5520 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopping" |
5523 | " wait_for_completion(&T_TASK(cmd)transport_lun_fe" | 5521 | " wait_for_completion(&T_TASK(cmd)transport_lun_fe" |
5524 | "_stop_comp); for ITT: 0x%08x\n", | 5522 | "_stop_comp); for ITT: 0x%08x\n", |
5525 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5523 | CMD_TFO(cmd)->get_task_tag(cmd)); |
5526 | /* | 5524 | /* |
5527 | * There is a special case for WRITES where a FE exception + | 5525 | * There is a special case for WRITES where a FE exception + |
5528 | * LUN shutdown means ConfigFS context is still sleeping on | 5526 | * LUN shutdown means ConfigFS context is still sleeping on |
5529 | * transport_lun_stop_comp in transport_lun_wait_for_tasks(). | 5527 | * transport_lun_stop_comp in transport_lun_wait_for_tasks(). |
5530 | * We go ahead and up transport_lun_stop_comp just to be sure | 5528 | * We go ahead and up transport_lun_stop_comp just to be sure |
5531 | * here. | 5529 | * here. |
5532 | */ | 5530 | */ |
5533 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 5531 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
5534 | complete(&T_TASK(cmd)->transport_lun_stop_comp); | 5532 | complete(&T_TASK(cmd)->transport_lun_stop_comp); |
5535 | wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp); | 5533 | wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp); |
5536 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 5534 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
5537 | 5535 | ||
5538 | transport_all_task_dev_remove_state(cmd); | 5536 | transport_all_task_dev_remove_state(cmd); |
5539 | /* | 5537 | /* |
5540 | * At this point, the frontend who was the originator of this | 5538 | * At this point, the frontend who was the originator of this |
5541 | * struct se_cmd, now owns the structure and can be released through | 5539 | * struct se_cmd, now owns the structure and can be released through |
5542 | * normal means below. | 5540 | * normal means below. |
5543 | */ | 5541 | */ |
5544 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopped" | 5542 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopped" |
5545 | " wait_for_completion(&T_TASK(cmd)transport_lun_fe_" | 5543 | " wait_for_completion(&T_TASK(cmd)transport_lun_fe_" |
5546 | "stop_comp); for ITT: 0x%08x\n", | 5544 | "stop_comp); for ITT: 0x%08x\n", |
5547 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5545 | CMD_TFO(cmd)->get_task_tag(cmd)); |
5548 | 5546 | ||
5549 | atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); | 5547 | atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); |
5550 | } | 5548 | } |
5551 | if (!atomic_read(&T_TASK(cmd)->t_transport_active) || | 5549 | if (!atomic_read(&T_TASK(cmd)->t_transport_active) || |
5552 | atomic_read(&T_TASK(cmd)->t_transport_aborted)) | 5550 | atomic_read(&T_TASK(cmd)->t_transport_aborted)) |
5553 | goto remove; | 5551 | goto remove; |
5554 | 5552 | ||
5555 | atomic_set(&T_TASK(cmd)->t_transport_stop, 1); | 5553 | atomic_set(&T_TASK(cmd)->t_transport_stop, 1); |
5556 | 5554 | ||
5557 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x" | 5555 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x" |
5558 | " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" | 5556 | " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" |
5559 | " = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd), | 5557 | " = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd), |
5560 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state, | 5558 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state, |
5561 | cmd->deferred_t_state); | 5559 | cmd->deferred_t_state); |
5562 | 5560 | ||
5563 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 5561 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
5564 | 5562 | ||
5565 | wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq); | 5563 | wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq); |
5566 | 5564 | ||
5567 | wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp); | 5565 | wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp); |
5568 | 5566 | ||
5569 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 5567 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
5570 | atomic_set(&T_TASK(cmd)->t_transport_active, 0); | 5568 | atomic_set(&T_TASK(cmd)->t_transport_active, 0); |
5571 | atomic_set(&T_TASK(cmd)->t_transport_stop, 0); | 5569 | atomic_set(&T_TASK(cmd)->t_transport_stop, 0); |
5572 | 5570 | ||
5573 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion(" | 5571 | DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion(" |
5574 | "&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n", | 5572 | "&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n", |
5575 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5573 | CMD_TFO(cmd)->get_task_tag(cmd)); |
5576 | remove: | 5574 | remove: |
5577 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 5575 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
5578 | if (!remove_cmd) | 5576 | if (!remove_cmd) |
5579 | return; | 5577 | return; |
5580 | 5578 | ||
5581 | transport_generic_free_cmd(cmd, 0, 0, session_reinstatement); | 5579 | transport_generic_free_cmd(cmd, 0, 0, session_reinstatement); |
5582 | } | 5580 | } |
5583 | 5581 | ||
5584 | static int transport_get_sense_codes( | 5582 | static int transport_get_sense_codes( |
5585 | struct se_cmd *cmd, | 5583 | struct se_cmd *cmd, |
5586 | u8 *asc, | 5584 | u8 *asc, |
5587 | u8 *ascq) | 5585 | u8 *ascq) |
5588 | { | 5586 | { |
5589 | *asc = cmd->scsi_asc; | 5587 | *asc = cmd->scsi_asc; |
5590 | *ascq = cmd->scsi_ascq; | 5588 | *ascq = cmd->scsi_ascq; |
5591 | 5589 | ||
5592 | return 0; | 5590 | return 0; |
5593 | } | 5591 | } |
5594 | 5592 | ||
5595 | static int transport_set_sense_codes( | 5593 | static int transport_set_sense_codes( |
5596 | struct se_cmd *cmd, | 5594 | struct se_cmd *cmd, |
5597 | u8 asc, | 5595 | u8 asc, |
5598 | u8 ascq) | 5596 | u8 ascq) |
5599 | { | 5597 | { |
5600 | cmd->scsi_asc = asc; | 5598 | cmd->scsi_asc = asc; |
5601 | cmd->scsi_ascq = ascq; | 5599 | cmd->scsi_ascq = ascq; |
5602 | 5600 | ||
5603 | return 0; | 5601 | return 0; |
5604 | } | 5602 | } |
5605 | 5603 | ||
5606 | int transport_send_check_condition_and_sense( | 5604 | int transport_send_check_condition_and_sense( |
5607 | struct se_cmd *cmd, | 5605 | struct se_cmd *cmd, |
5608 | u8 reason, | 5606 | u8 reason, |
5609 | int from_transport) | 5607 | int from_transport) |
5610 | { | 5608 | { |
5611 | unsigned char *buffer = cmd->sense_buffer; | 5609 | unsigned char *buffer = cmd->sense_buffer; |
5612 | unsigned long flags; | 5610 | unsigned long flags; |
5613 | int offset; | 5611 | int offset; |
5614 | u8 asc = 0, ascq = 0; | 5612 | u8 asc = 0, ascq = 0; |
5615 | 5613 | ||
5616 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 5614 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
5617 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { | 5615 | if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { |
5618 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 5616 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
5619 | return 0; | 5617 | return 0; |
5620 | } | 5618 | } |
5621 | cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; | 5619 | cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; |
5622 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 5620 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
5623 | 5621 | ||
5624 | if (!reason && from_transport) | 5622 | if (!reason && from_transport) |
5625 | goto after_reason; | 5623 | goto after_reason; |
5626 | 5624 | ||
5627 | if (!from_transport) | 5625 | if (!from_transport) |
5628 | cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; | 5626 | cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; |
5629 | /* | 5627 | /* |
5630 | * Data Segment and SenseLength of the fabric response PDU. | 5628 | * Data Segment and SenseLength of the fabric response PDU. |
5631 | * | 5629 | * |
5632 | * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE | 5630 | * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE |
5633 | * from include/scsi/scsi_cmnd.h | 5631 | * from include/scsi/scsi_cmnd.h |
5634 | */ | 5632 | */ |
5635 | offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd, | 5633 | offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd, |
5636 | TRANSPORT_SENSE_BUFFER); | 5634 | TRANSPORT_SENSE_BUFFER); |
5637 | /* | 5635 | /* |
5638 | * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses | 5636 | * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses |
5639 | * SENSE KEY values from include/scsi/scsi.h | 5637 | * SENSE KEY values from include/scsi/scsi.h |
5640 | */ | 5638 | */ |
5641 | switch (reason) { | 5639 | switch (reason) { |
5642 | case TCM_NON_EXISTENT_LUN: | 5640 | case TCM_NON_EXISTENT_LUN: |
5643 | case TCM_UNSUPPORTED_SCSI_OPCODE: | 5641 | case TCM_UNSUPPORTED_SCSI_OPCODE: |
5644 | case TCM_SECTOR_COUNT_TOO_MANY: | 5642 | case TCM_SECTOR_COUNT_TOO_MANY: |
5645 | /* CURRENT ERROR */ | 5643 | /* CURRENT ERROR */ |
5646 | buffer[offset] = 0x70; | 5644 | buffer[offset] = 0x70; |
5647 | /* ILLEGAL REQUEST */ | 5645 | /* ILLEGAL REQUEST */ |
5648 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | 5646 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; |
5649 | /* INVALID COMMAND OPERATION CODE */ | 5647 | /* INVALID COMMAND OPERATION CODE */ |
5650 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20; | 5648 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20; |
5651 | break; | 5649 | break; |
5652 | case TCM_UNKNOWN_MODE_PAGE: | 5650 | case TCM_UNKNOWN_MODE_PAGE: |
5653 | /* CURRENT ERROR */ | 5651 | /* CURRENT ERROR */ |
5654 | buffer[offset] = 0x70; | 5652 | buffer[offset] = 0x70; |
5655 | /* ILLEGAL REQUEST */ | 5653 | /* ILLEGAL REQUEST */ |
5656 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | 5654 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; |
5657 | /* INVALID FIELD IN CDB */ | 5655 | /* INVALID FIELD IN CDB */ |
5658 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; | 5656 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; |
5659 | break; | 5657 | break; |
5660 | case TCM_CHECK_CONDITION_ABORT_CMD: | 5658 | case TCM_CHECK_CONDITION_ABORT_CMD: |
5661 | /* CURRENT ERROR */ | 5659 | /* CURRENT ERROR */ |
5662 | buffer[offset] = 0x70; | 5660 | buffer[offset] = 0x70; |
5663 | /* ABORTED COMMAND */ | 5661 | /* ABORTED COMMAND */ |
5664 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | 5662 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; |
5665 | /* BUS DEVICE RESET FUNCTION OCCURRED */ | 5663 | /* BUS DEVICE RESET FUNCTION OCCURRED */ |
5666 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29; | 5664 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29; |
5667 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03; | 5665 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03; |
5668 | break; | 5666 | break; |
5669 | case TCM_INCORRECT_AMOUNT_OF_DATA: | 5667 | case TCM_INCORRECT_AMOUNT_OF_DATA: |
5670 | /* CURRENT ERROR */ | 5668 | /* CURRENT ERROR */ |
5671 | buffer[offset] = 0x70; | 5669 | buffer[offset] = 0x70; |
5672 | /* ABORTED COMMAND */ | 5670 | /* ABORTED COMMAND */ |
5673 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | 5671 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; |
5674 | /* WRITE ERROR */ | 5672 | /* WRITE ERROR */ |
5675 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; | 5673 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; |
5676 | /* NOT ENOUGH UNSOLICITED DATA */ | 5674 | /* NOT ENOUGH UNSOLICITED DATA */ |
5677 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d; | 5675 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d; |
5678 | break; | 5676 | break; |
5679 | case TCM_INVALID_CDB_FIELD: | 5677 | case TCM_INVALID_CDB_FIELD: |
5680 | /* CURRENT ERROR */ | 5678 | /* CURRENT ERROR */ |
5681 | buffer[offset] = 0x70; | 5679 | buffer[offset] = 0x70; |
5682 | /* ABORTED COMMAND */ | 5680 | /* ABORTED COMMAND */ |
5683 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | 5681 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; |
5684 | /* INVALID FIELD IN CDB */ | 5682 | /* INVALID FIELD IN CDB */ |
5685 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; | 5683 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; |
5686 | break; | 5684 | break; |
5687 | case TCM_INVALID_PARAMETER_LIST: | 5685 | case TCM_INVALID_PARAMETER_LIST: |
5688 | /* CURRENT ERROR */ | 5686 | /* CURRENT ERROR */ |
5689 | buffer[offset] = 0x70; | 5687 | buffer[offset] = 0x70; |
5690 | /* ABORTED COMMAND */ | 5688 | /* ABORTED COMMAND */ |
5691 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | 5689 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; |
5692 | /* INVALID FIELD IN PARAMETER LIST */ | 5690 | /* INVALID FIELD IN PARAMETER LIST */ |
5693 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26; | 5691 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26; |
5694 | break; | 5692 | break; |
5695 | case TCM_UNEXPECTED_UNSOLICITED_DATA: | 5693 | case TCM_UNEXPECTED_UNSOLICITED_DATA: |
5696 | /* CURRENT ERROR */ | 5694 | /* CURRENT ERROR */ |
5697 | buffer[offset] = 0x70; | 5695 | buffer[offset] = 0x70; |
5698 | /* ABORTED COMMAND */ | 5696 | /* ABORTED COMMAND */ |
5699 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | 5697 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; |
5700 | /* WRITE ERROR */ | 5698 | /* WRITE ERROR */ |
5701 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; | 5699 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; |
5702 | /* UNEXPECTED_UNSOLICITED_DATA */ | 5700 | /* UNEXPECTED_UNSOLICITED_DATA */ |
5703 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c; | 5701 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c; |
5704 | break; | 5702 | break; |
5705 | case TCM_SERVICE_CRC_ERROR: | 5703 | case TCM_SERVICE_CRC_ERROR: |
5706 | /* CURRENT ERROR */ | 5704 | /* CURRENT ERROR */ |
5707 | buffer[offset] = 0x70; | 5705 | buffer[offset] = 0x70; |
5708 | /* ABORTED COMMAND */ | 5706 | /* ABORTED COMMAND */ |
5709 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | 5707 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; |
5710 | /* PROTOCOL SERVICE CRC ERROR */ | 5708 | /* PROTOCOL SERVICE CRC ERROR */ |
5711 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47; | 5709 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47; |
5712 | /* N/A */ | 5710 | /* N/A */ |
5713 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05; | 5711 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05; |
5714 | break; | 5712 | break; |
5715 | case TCM_SNACK_REJECTED: | 5713 | case TCM_SNACK_REJECTED: |
5716 | /* CURRENT ERROR */ | 5714 | /* CURRENT ERROR */ |
5717 | buffer[offset] = 0x70; | 5715 | buffer[offset] = 0x70; |
5718 | /* ABORTED COMMAND */ | 5716 | /* ABORTED COMMAND */ |
5719 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; | 5717 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; |
5720 | /* READ ERROR */ | 5718 | /* READ ERROR */ |
5721 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11; | 5719 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11; |
5722 | /* FAILED RETRANSMISSION REQUEST */ | 5720 | /* FAILED RETRANSMISSION REQUEST */ |
5723 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13; | 5721 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13; |
5724 | break; | 5722 | break; |
5725 | case TCM_WRITE_PROTECTED: | 5723 | case TCM_WRITE_PROTECTED: |
5726 | /* CURRENT ERROR */ | 5724 | /* CURRENT ERROR */ |
5727 | buffer[offset] = 0x70; | 5725 | buffer[offset] = 0x70; |
5728 | /* DATA PROTECT */ | 5726 | /* DATA PROTECT */ |
5729 | buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; | 5727 | buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; |
5730 | /* WRITE PROTECTED */ | 5728 | /* WRITE PROTECTED */ |
5731 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27; | 5729 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27; |
5732 | break; | 5730 | break; |
5733 | case TCM_CHECK_CONDITION_UNIT_ATTENTION: | 5731 | case TCM_CHECK_CONDITION_UNIT_ATTENTION: |
5734 | /* CURRENT ERROR */ | 5732 | /* CURRENT ERROR */ |
5735 | buffer[offset] = 0x70; | 5733 | buffer[offset] = 0x70; |
5736 | /* UNIT ATTENTION */ | 5734 | /* UNIT ATTENTION */ |
5737 | buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; | 5735 | buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; |
5738 | core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); | 5736 | core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); |
5739 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; | 5737 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; |
5740 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; | 5738 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; |
5741 | break; | 5739 | break; |
5742 | case TCM_CHECK_CONDITION_NOT_READY: | 5740 | case TCM_CHECK_CONDITION_NOT_READY: |
5743 | /* CURRENT ERROR */ | 5741 | /* CURRENT ERROR */ |
5744 | buffer[offset] = 0x70; | 5742 | buffer[offset] = 0x70; |
5745 | /* Not Ready */ | 5743 | /* Not Ready */ |
5746 | buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY; | 5744 | buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY; |
5747 | transport_get_sense_codes(cmd, &asc, &ascq); | 5745 | transport_get_sense_codes(cmd, &asc, &ascq); |
5748 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; | 5746 | buffer[offset+SPC_ASC_KEY_OFFSET] = asc; |
5749 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; | 5747 | buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; |
5750 | break; | 5748 | break; |
5751 | case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: | 5749 | case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: |
5752 | default: | 5750 | default: |
5753 | /* CURRENT ERROR */ | 5751 | /* CURRENT ERROR */ |
5754 | buffer[offset] = 0x70; | 5752 | buffer[offset] = 0x70; |
5755 | /* ILLEGAL REQUEST */ | 5753 | /* ILLEGAL REQUEST */ |
5756 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | 5754 | buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; |
5757 | /* LOGICAL UNIT COMMUNICATION FAILURE */ | 5755 | /* LOGICAL UNIT COMMUNICATION FAILURE */ |
5758 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80; | 5756 | buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80; |
5759 | break; | 5757 | break; |
5760 | } | 5758 | } |
5761 | /* | 5759 | /* |
5762 | * This code uses linux/include/scsi/scsi.h SAM status codes! | 5760 | * This code uses linux/include/scsi/scsi.h SAM status codes! |
5763 | */ | 5761 | */ |
5764 | cmd->scsi_status = SAM_STAT_CHECK_CONDITION; | 5762 | cmd->scsi_status = SAM_STAT_CHECK_CONDITION; |
5765 | /* | 5763 | /* |
5766 | * Automatically padded, this value is encoded in the fabric's | 5764 | * Automatically padded, this value is encoded in the fabric's |
5767 | * data_length response PDU containing the SCSI defined sense data. | 5765 | * data_length response PDU containing the SCSI defined sense data. |
5768 | */ | 5766 | */ |
5769 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; | 5767 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; |
5770 | 5768 | ||
5771 | after_reason: | 5769 | after_reason: |
5772 | CMD_TFO(cmd)->queue_status(cmd); | 5770 | CMD_TFO(cmd)->queue_status(cmd); |
5773 | return 0; | 5771 | return 0; |
5774 | } | 5772 | } |
5775 | EXPORT_SYMBOL(transport_send_check_condition_and_sense); | 5773 | EXPORT_SYMBOL(transport_send_check_condition_and_sense); |
5776 | 5774 | ||
5777 | int transport_check_aborted_status(struct se_cmd *cmd, int send_status) | 5775 | int transport_check_aborted_status(struct se_cmd *cmd, int send_status) |
5778 | { | 5776 | { |
5779 | int ret = 0; | 5777 | int ret = 0; |
5780 | 5778 | ||
5781 | if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) { | 5779 | if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) { |
5782 | if (!(send_status) || | 5780 | if (!(send_status) || |
5783 | (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) | 5781 | (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) |
5784 | return 1; | 5782 | return 1; |
5785 | #if 0 | 5783 | #if 0 |
5786 | printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED" | 5784 | printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED" |
5787 | " status for CDB: 0x%02x ITT: 0x%08x\n", | 5785 | " status for CDB: 0x%02x ITT: 0x%08x\n", |
5788 | T_TASK(cmd)->t_task_cdb[0], | 5786 | T_TASK(cmd)->t_task_cdb[0], |
5789 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5787 | CMD_TFO(cmd)->get_task_tag(cmd)); |
5790 | #endif | 5788 | #endif |
5791 | cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; | 5789 | cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; |
5792 | CMD_TFO(cmd)->queue_status(cmd); | 5790 | CMD_TFO(cmd)->queue_status(cmd); |
5793 | ret = 1; | 5791 | ret = 1; |
5794 | } | 5792 | } |
5795 | return ret; | 5793 | return ret; |
5796 | } | 5794 | } |
5797 | EXPORT_SYMBOL(transport_check_aborted_status); | 5795 | EXPORT_SYMBOL(transport_check_aborted_status); |
5798 | 5796 | ||
5799 | void transport_send_task_abort(struct se_cmd *cmd) | 5797 | void transport_send_task_abort(struct se_cmd *cmd) |
5800 | { | 5798 | { |
5801 | /* | 5799 | /* |
5802 | * If there are still expected incoming fabric WRITEs, we wait | 5800 | * If there are still expected incoming fabric WRITEs, we wait |
5803 | * until until they have completed before sending a TASK_ABORTED | 5801 | * until until they have completed before sending a TASK_ABORTED |
5804 | * response. This response with TASK_ABORTED status will be | 5802 | * response. This response with TASK_ABORTED status will be |
5805 | * queued back to fabric module by transport_check_aborted_status(). | 5803 | * queued back to fabric module by transport_check_aborted_status(). |
5806 | */ | 5804 | */ |
5807 | if (cmd->data_direction == DMA_TO_DEVICE) { | 5805 | if (cmd->data_direction == DMA_TO_DEVICE) { |
5808 | if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) { | 5806 | if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) { |
5809 | atomic_inc(&T_TASK(cmd)->t_transport_aborted); | 5807 | atomic_inc(&T_TASK(cmd)->t_transport_aborted); |
5810 | smp_mb__after_atomic_inc(); | 5808 | smp_mb__after_atomic_inc(); |
5811 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | 5809 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; |
5812 | transport_new_cmd_failure(cmd); | 5810 | transport_new_cmd_failure(cmd); |
5813 | return; | 5811 | return; |
5814 | } | 5812 | } |
5815 | } | 5813 | } |
5816 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | 5814 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; |
5817 | #if 0 | 5815 | #if 0 |
5818 | printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," | 5816 | printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," |
5819 | " ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0], | 5817 | " ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0], |
5820 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5818 | CMD_TFO(cmd)->get_task_tag(cmd)); |
5821 | #endif | 5819 | #endif |
5822 | CMD_TFO(cmd)->queue_status(cmd); | 5820 | CMD_TFO(cmd)->queue_status(cmd); |
5823 | } | 5821 | } |
5824 | 5822 | ||
5825 | /* transport_generic_do_tmr(): | 5823 | /* transport_generic_do_tmr(): |
5826 | * | 5824 | * |
5827 | * | 5825 | * |
5828 | */ | 5826 | */ |
5829 | int transport_generic_do_tmr(struct se_cmd *cmd) | 5827 | int transport_generic_do_tmr(struct se_cmd *cmd) |
5830 | { | 5828 | { |
5831 | struct se_cmd *ref_cmd; | 5829 | struct se_cmd *ref_cmd; |
5832 | struct se_device *dev = SE_DEV(cmd); | 5830 | struct se_device *dev = SE_DEV(cmd); |
5833 | struct se_tmr_req *tmr = cmd->se_tmr_req; | 5831 | struct se_tmr_req *tmr = cmd->se_tmr_req; |
5834 | int ret; | 5832 | int ret; |
5835 | 5833 | ||
5836 | switch (tmr->function) { | 5834 | switch (tmr->function) { |
5837 | case ABORT_TASK: | 5835 | case ABORT_TASK: |
5838 | ref_cmd = tmr->ref_cmd; | 5836 | ref_cmd = tmr->ref_cmd; |
5839 | tmr->response = TMR_FUNCTION_REJECTED; | 5837 | tmr->response = TMR_FUNCTION_REJECTED; |
5840 | break; | 5838 | break; |
5841 | case ABORT_TASK_SET: | 5839 | case ABORT_TASK_SET: |
5842 | case CLEAR_ACA: | 5840 | case CLEAR_ACA: |
5843 | case CLEAR_TASK_SET: | 5841 | case CLEAR_TASK_SET: |
5844 | tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; | 5842 | tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; |
5845 | break; | 5843 | break; |
5846 | case LUN_RESET: | 5844 | case LUN_RESET: |
5847 | ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); | 5845 | ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); |
5848 | tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : | 5846 | tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : |
5849 | TMR_FUNCTION_REJECTED; | 5847 | TMR_FUNCTION_REJECTED; |
5850 | break; | 5848 | break; |
5851 | #if 0 | 5849 | #if 0 |
5852 | case TARGET_WARM_RESET: | 5850 | case TARGET_WARM_RESET: |
5853 | transport_generic_host_reset(dev->se_hba); | 5851 | transport_generic_host_reset(dev->se_hba); |
5854 | tmr->response = TMR_FUNCTION_REJECTED; | 5852 | tmr->response = TMR_FUNCTION_REJECTED; |
5855 | break; | 5853 | break; |
5856 | case TARGET_COLD_RESET: | 5854 | case TARGET_COLD_RESET: |
5857 | transport_generic_host_reset(dev->se_hba); | 5855 | transport_generic_host_reset(dev->se_hba); |
5858 | transport_generic_cold_reset(dev->se_hba); | 5856 | transport_generic_cold_reset(dev->se_hba); |
5859 | tmr->response = TMR_FUNCTION_REJECTED; | 5857 | tmr->response = TMR_FUNCTION_REJECTED; |
5860 | break; | 5858 | break; |
5861 | #endif | 5859 | #endif |
5862 | default: | 5860 | default: |
5863 | printk(KERN_ERR "Uknown TMR function: 0x%02x.\n", | 5861 | printk(KERN_ERR "Uknown TMR function: 0x%02x.\n", |
5864 | tmr->function); | 5862 | tmr->function); |
5865 | tmr->response = TMR_FUNCTION_REJECTED; | 5863 | tmr->response = TMR_FUNCTION_REJECTED; |
5866 | break; | 5864 | break; |
5867 | } | 5865 | } |
5868 | 5866 | ||
5869 | cmd->t_state = TRANSPORT_ISTATE_PROCESSING; | 5867 | cmd->t_state = TRANSPORT_ISTATE_PROCESSING; |
5870 | CMD_TFO(cmd)->queue_tm_rsp(cmd); | 5868 | CMD_TFO(cmd)->queue_tm_rsp(cmd); |
5871 | 5869 | ||
5872 | transport_cmd_check_stop(cmd, 2, 0); | 5870 | transport_cmd_check_stop(cmd, 2, 0); |
5873 | return 0; | 5871 | return 0; |
5874 | } | 5872 | } |
5875 | 5873 | ||
5876 | /* | 5874 | /* |
5877 | * Called with spin_lock_irq(&dev->execute_task_lock); held | 5875 | * Called with spin_lock_irq(&dev->execute_task_lock); held |
5878 | * | 5876 | * |
5879 | */ | 5877 | */ |
5880 | static struct se_task * | 5878 | static struct se_task * |
5881 | transport_get_task_from_state_list(struct se_device *dev) | 5879 | transport_get_task_from_state_list(struct se_device *dev) |
5882 | { | 5880 | { |
5883 | struct se_task *task; | 5881 | struct se_task *task; |
5884 | 5882 | ||
5885 | if (list_empty(&dev->state_task_list)) | 5883 | if (list_empty(&dev->state_task_list)) |
5886 | return NULL; | 5884 | return NULL; |
5887 | 5885 | ||
5888 | list_for_each_entry(task, &dev->state_task_list, t_state_list) | 5886 | list_for_each_entry(task, &dev->state_task_list, t_state_list) |
5889 | break; | 5887 | break; |
5890 | 5888 | ||
5891 | list_del(&task->t_state_list); | 5889 | list_del(&task->t_state_list); |
5892 | atomic_set(&task->task_state_active, 0); | 5890 | atomic_set(&task->task_state_active, 0); |
5893 | 5891 | ||
5894 | return task; | 5892 | return task; |
5895 | } | 5893 | } |
5896 | 5894 | ||
5897 | static void transport_processing_shutdown(struct se_device *dev) | 5895 | static void transport_processing_shutdown(struct se_device *dev) |
5898 | { | 5896 | { |
5899 | struct se_cmd *cmd; | 5897 | struct se_cmd *cmd; |
5900 | struct se_queue_req *qr; | 5898 | struct se_queue_req *qr; |
5901 | struct se_task *task; | 5899 | struct se_task *task; |
5902 | u8 state; | 5900 | u8 state; |
5903 | unsigned long flags; | 5901 | unsigned long flags; |
5904 | /* | 5902 | /* |
5905 | * Empty the struct se_device's struct se_task state list. | 5903 | * Empty the struct se_device's struct se_task state list. |
5906 | */ | 5904 | */ |
5907 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 5905 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
5908 | while ((task = transport_get_task_from_state_list(dev))) { | 5906 | while ((task = transport_get_task_from_state_list(dev))) { |
5909 | if (!(TASK_CMD(task))) { | 5907 | if (!(TASK_CMD(task))) { |
5910 | printk(KERN_ERR "TASK_CMD(task) is NULL!\n"); | 5908 | printk(KERN_ERR "TASK_CMD(task) is NULL!\n"); |
5911 | continue; | 5909 | continue; |
5912 | } | 5910 | } |
5913 | cmd = TASK_CMD(task); | 5911 | cmd = TASK_CMD(task); |
5914 | 5912 | ||
5915 | if (!T_TASK(cmd)) { | 5913 | if (!T_TASK(cmd)) { |
5916 | printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:" | 5914 | printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:" |
5917 | " %p ITT: 0x%08x\n", task, cmd, | 5915 | " %p ITT: 0x%08x\n", task, cmd, |
5918 | CMD_TFO(cmd)->get_task_tag(cmd)); | 5916 | CMD_TFO(cmd)->get_task_tag(cmd)); |
5919 | continue; | 5917 | continue; |
5920 | } | 5918 | } |
5921 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 5919 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
5922 | 5920 | ||
5923 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 5921 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
5924 | 5922 | ||
5925 | DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x," | 5923 | DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x," |
5926 | " i_state/def_i_state: %d/%d, t_state/def_t_state:" | 5924 | " i_state/def_i_state: %d/%d, t_state/def_t_state:" |
5927 | " %d/%d cdb: 0x%02x\n", cmd, task, | 5925 | " %d/%d cdb: 0x%02x\n", cmd, task, |
5928 | CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn, | 5926 | CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn, |
5929 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state, | 5927 | CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state, |
5930 | cmd->t_state, cmd->deferred_t_state, | 5928 | cmd->t_state, cmd->deferred_t_state, |
5931 | T_TASK(cmd)->t_task_cdb[0]); | 5929 | T_TASK(cmd)->t_task_cdb[0]); |
5932 | DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:" | 5930 | DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:" |
5933 | " %d t_task_cdbs_sent: %d -- t_transport_active: %d" | 5931 | " %d t_task_cdbs_sent: %d -- t_transport_active: %d" |
5934 | " t_transport_stop: %d t_transport_sent: %d\n", | 5932 | " t_transport_stop: %d t_transport_sent: %d\n", |
5935 | CMD_TFO(cmd)->get_task_tag(cmd), | 5933 | CMD_TFO(cmd)->get_task_tag(cmd), |
5936 | T_TASK(cmd)->t_task_cdbs, | 5934 | T_TASK(cmd)->t_task_cdbs, |
5937 | atomic_read(&T_TASK(cmd)->t_task_cdbs_left), | 5935 | atomic_read(&T_TASK(cmd)->t_task_cdbs_left), |
5938 | atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), | 5936 | atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), |
5939 | atomic_read(&T_TASK(cmd)->t_transport_active), | 5937 | atomic_read(&T_TASK(cmd)->t_transport_active), |
5940 | atomic_read(&T_TASK(cmd)->t_transport_stop), | 5938 | atomic_read(&T_TASK(cmd)->t_transport_stop), |
5941 | atomic_read(&T_TASK(cmd)->t_transport_sent)); | 5939 | atomic_read(&T_TASK(cmd)->t_transport_sent)); |
5942 | 5940 | ||
5943 | if (atomic_read(&task->task_active)) { | 5941 | if (atomic_read(&task->task_active)) { |
5944 | atomic_set(&task->task_stop, 1); | 5942 | atomic_set(&task->task_stop, 1); |
5945 | spin_unlock_irqrestore( | 5943 | spin_unlock_irqrestore( |
5946 | &T_TASK(cmd)->t_state_lock, flags); | 5944 | &T_TASK(cmd)->t_state_lock, flags); |
5947 | 5945 | ||
5948 | DEBUG_DO("Waiting for task: %p to shutdown for dev:" | 5946 | DEBUG_DO("Waiting for task: %p to shutdown for dev:" |
5949 | " %p\n", task, dev); | 5947 | " %p\n", task, dev); |
5950 | wait_for_completion(&task->task_stop_comp); | 5948 | wait_for_completion(&task->task_stop_comp); |
5951 | DEBUG_DO("Completed task: %p shutdown for dev: %p\n", | 5949 | DEBUG_DO("Completed task: %p shutdown for dev: %p\n", |
5952 | task, dev); | 5950 | task, dev); |
5953 | 5951 | ||
5954 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); | 5952 | spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); |
5955 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); | 5953 | atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); |
5956 | 5954 | ||
5957 | atomic_set(&task->task_active, 0); | 5955 | atomic_set(&task->task_active, 0); |
5958 | atomic_set(&task->task_stop, 0); | 5956 | atomic_set(&task->task_stop, 0); |
5959 | } else { | 5957 | } else { |
5960 | if (atomic_read(&task->task_execute_queue) != 0) | 5958 | if (atomic_read(&task->task_execute_queue) != 0) |
5961 | transport_remove_task_from_execute_queue(task, dev); | 5959 | transport_remove_task_from_execute_queue(task, dev); |
5962 | } | 5960 | } |
5963 | __transport_stop_task_timer(task, &flags); | 5961 | __transport_stop_task_timer(task, &flags); |
5964 | 5962 | ||
5965 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) { | 5963 | if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) { |
5966 | spin_unlock_irqrestore( | 5964 | spin_unlock_irqrestore( |
5967 | &T_TASK(cmd)->t_state_lock, flags); | 5965 | &T_TASK(cmd)->t_state_lock, flags); |
5968 | 5966 | ||
5969 | DEBUG_DO("Skipping task: %p, dev: %p for" | 5967 | DEBUG_DO("Skipping task: %p, dev: %p for" |
5970 | " t_task_cdbs_ex_left: %d\n", task, dev, | 5968 | " t_task_cdbs_ex_left: %d\n", task, dev, |
5971 | atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left)); | 5969 | atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left)); |
5972 | 5970 | ||
5973 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 5971 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
5974 | continue; | 5972 | continue; |
5975 | } | 5973 | } |
5976 | 5974 | ||
5977 | if (atomic_read(&T_TASK(cmd)->t_transport_active)) { | 5975 | if (atomic_read(&T_TASK(cmd)->t_transport_active)) { |
5978 | DEBUG_DO("got t_transport_active = 1 for task: %p, dev:" | 5976 | DEBUG_DO("got t_transport_active = 1 for task: %p, dev:" |
5979 | " %p\n", task, dev); | 5977 | " %p\n", task, dev); |
5980 | 5978 | ||
5981 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | 5979 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { |
5982 | spin_unlock_irqrestore( | 5980 | spin_unlock_irqrestore( |
5983 | &T_TASK(cmd)->t_state_lock, flags); | 5981 | &T_TASK(cmd)->t_state_lock, flags); |
5984 | transport_send_check_condition_and_sense( | 5982 | transport_send_check_condition_and_sense( |
5985 | cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, | 5983 | cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, |
5986 | 0); | 5984 | 0); |
5987 | transport_remove_cmd_from_queue(cmd, | 5985 | transport_remove_cmd_from_queue(cmd, |
5988 | SE_DEV(cmd)->dev_queue_obj); | 5986 | SE_DEV(cmd)->dev_queue_obj); |
5989 | 5987 | ||
5990 | transport_lun_remove_cmd(cmd); | 5988 | transport_lun_remove_cmd(cmd); |
5991 | transport_cmd_check_stop(cmd, 1, 0); | 5989 | transport_cmd_check_stop(cmd, 1, 0); |
5992 | } else { | 5990 | } else { |
5993 | spin_unlock_irqrestore( | 5991 | spin_unlock_irqrestore( |
5994 | &T_TASK(cmd)->t_state_lock, flags); | 5992 | &T_TASK(cmd)->t_state_lock, flags); |
5995 | 5993 | ||
5996 | transport_remove_cmd_from_queue(cmd, | 5994 | transport_remove_cmd_from_queue(cmd, |
5997 | SE_DEV(cmd)->dev_queue_obj); | 5995 | SE_DEV(cmd)->dev_queue_obj); |
5998 | 5996 | ||
5999 | transport_lun_remove_cmd(cmd); | 5997 | transport_lun_remove_cmd(cmd); |
6000 | 5998 | ||
6001 | if (transport_cmd_check_stop(cmd, 1, 0)) | 5999 | if (transport_cmd_check_stop(cmd, 1, 0)) |
6002 | transport_generic_remove(cmd, 0, 0); | 6000 | transport_generic_remove(cmd, 0, 0); |
6003 | } | 6001 | } |
6004 | 6002 | ||
6005 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 6003 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
6006 | continue; | 6004 | continue; |
6007 | } | 6005 | } |
6008 | DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n", | 6006 | DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n", |
6009 | task, dev); | 6007 | task, dev); |
6010 | 6008 | ||
6011 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | 6009 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { |
6012 | spin_unlock_irqrestore( | 6010 | spin_unlock_irqrestore( |
6013 | &T_TASK(cmd)->t_state_lock, flags); | 6011 | &T_TASK(cmd)->t_state_lock, flags); |
6014 | transport_send_check_condition_and_sense(cmd, | 6012 | transport_send_check_condition_and_sense(cmd, |
6015 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | 6013 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); |
6016 | transport_remove_cmd_from_queue(cmd, | 6014 | transport_remove_cmd_from_queue(cmd, |
6017 | SE_DEV(cmd)->dev_queue_obj); | 6015 | SE_DEV(cmd)->dev_queue_obj); |
6018 | 6016 | ||
6019 | transport_lun_remove_cmd(cmd); | 6017 | transport_lun_remove_cmd(cmd); |
6020 | transport_cmd_check_stop(cmd, 1, 0); | 6018 | transport_cmd_check_stop(cmd, 1, 0); |
6021 | } else { | 6019 | } else { |
6022 | spin_unlock_irqrestore( | 6020 | spin_unlock_irqrestore( |
6023 | &T_TASK(cmd)->t_state_lock, flags); | 6021 | &T_TASK(cmd)->t_state_lock, flags); |
6024 | 6022 | ||
6025 | transport_remove_cmd_from_queue(cmd, | 6023 | transport_remove_cmd_from_queue(cmd, |
6026 | SE_DEV(cmd)->dev_queue_obj); | 6024 | SE_DEV(cmd)->dev_queue_obj); |
6027 | transport_lun_remove_cmd(cmd); | 6025 | transport_lun_remove_cmd(cmd); |
6028 | 6026 | ||
6029 | if (transport_cmd_check_stop(cmd, 1, 0)) | 6027 | if (transport_cmd_check_stop(cmd, 1, 0)) |
6030 | transport_generic_remove(cmd, 0, 0); | 6028 | transport_generic_remove(cmd, 0, 0); |
6031 | } | 6029 | } |
6032 | 6030 | ||
6033 | spin_lock_irqsave(&dev->execute_task_lock, flags); | 6031 | spin_lock_irqsave(&dev->execute_task_lock, flags); |
6034 | } | 6032 | } |
6035 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 6033 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
6036 | /* | 6034 | /* |
6037 | * Empty the struct se_device's struct se_cmd list. | 6035 | * Empty the struct se_device's struct se_cmd list. |
6038 | */ | 6036 | */ |
6039 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | 6037 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); |
6040 | while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) { | 6038 | while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) { |
6041 | spin_unlock_irqrestore( | 6039 | spin_unlock_irqrestore( |
6042 | &dev->dev_queue_obj->cmd_queue_lock, flags); | 6040 | &dev->dev_queue_obj->cmd_queue_lock, flags); |
6043 | cmd = (struct se_cmd *)qr->cmd; | 6041 | cmd = (struct se_cmd *)qr->cmd; |
6044 | state = qr->state; | 6042 | state = qr->state; |
6045 | kfree(qr); | 6043 | kfree(qr); |
6046 | 6044 | ||
6047 | DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n", | 6045 | DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n", |
6048 | cmd, state); | 6046 | cmd, state); |
6049 | 6047 | ||
6050 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { | 6048 | if (atomic_read(&T_TASK(cmd)->t_fe_count)) { |
6051 | transport_send_check_condition_and_sense(cmd, | 6049 | transport_send_check_condition_and_sense(cmd, |
6052 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); | 6050 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); |
6053 | 6051 | ||
6054 | transport_lun_remove_cmd(cmd); | 6052 | transport_lun_remove_cmd(cmd); |
6055 | transport_cmd_check_stop(cmd, 1, 0); | 6053 | transport_cmd_check_stop(cmd, 1, 0); |
6056 | } else { | 6054 | } else { |
6057 | transport_lun_remove_cmd(cmd); | 6055 | transport_lun_remove_cmd(cmd); |
6058 | if (transport_cmd_check_stop(cmd, 1, 0)) | 6056 | if (transport_cmd_check_stop(cmd, 1, 0)) |
6059 | transport_generic_remove(cmd, 0, 0); | 6057 | transport_generic_remove(cmd, 0, 0); |
6060 | } | 6058 | } |
6061 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); | 6059 | spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); |
6062 | } | 6060 | } |
6063 | spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags); | 6061 | spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags); |
6064 | } | 6062 | } |
6065 | 6063 | ||
6066 | /* transport_processing_thread(): | 6064 | /* transport_processing_thread(): |
6067 | * | 6065 | * |
6068 | * | 6066 | * |
6069 | */ | 6067 | */ |
6070 | static int transport_processing_thread(void *param) | 6068 | static int transport_processing_thread(void *param) |
6071 | { | 6069 | { |
6072 | int ret, t_state; | 6070 | int ret, t_state; |
6073 | struct se_cmd *cmd; | 6071 | struct se_cmd *cmd; |
6074 | struct se_device *dev = (struct se_device *) param; | 6072 | struct se_device *dev = (struct se_device *) param; |
6075 | struct se_queue_req *qr; | 6073 | struct se_queue_req *qr; |
6076 | 6074 | ||
6077 | set_user_nice(current, -20); | 6075 | set_user_nice(current, -20); |
6078 | 6076 | ||
6079 | while (!kthread_should_stop()) { | 6077 | while (!kthread_should_stop()) { |
6080 | ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq, | 6078 | ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq, |
6081 | atomic_read(&dev->dev_queue_obj->queue_cnt) || | 6079 | atomic_read(&dev->dev_queue_obj->queue_cnt) || |
6082 | kthread_should_stop()); | 6080 | kthread_should_stop()); |
6083 | if (ret < 0) | 6081 | if (ret < 0) |
6084 | goto out; | 6082 | goto out; |
6085 | 6083 | ||
6086 | spin_lock_irq(&dev->dev_status_lock); | 6084 | spin_lock_irq(&dev->dev_status_lock); |
6087 | if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) { | 6085 | if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) { |
6088 | spin_unlock_irq(&dev->dev_status_lock); | 6086 | spin_unlock_irq(&dev->dev_status_lock); |
6089 | transport_processing_shutdown(dev); | 6087 | transport_processing_shutdown(dev); |
6090 | continue; | 6088 | continue; |
6091 | } | 6089 | } |
6092 | spin_unlock_irq(&dev->dev_status_lock); | 6090 | spin_unlock_irq(&dev->dev_status_lock); |
6093 | 6091 | ||
6094 | get_cmd: | 6092 | get_cmd: |
6095 | __transport_execute_tasks(dev); | 6093 | __transport_execute_tasks(dev); |
6096 | 6094 | ||
6097 | qr = transport_get_qr_from_queue(dev->dev_queue_obj); | 6095 | qr = transport_get_qr_from_queue(dev->dev_queue_obj); |
6098 | if (!(qr)) | 6096 | if (!(qr)) |
6099 | continue; | 6097 | continue; |
6100 | 6098 | ||
6101 | cmd = (struct se_cmd *)qr->cmd; | 6099 | cmd = (struct se_cmd *)qr->cmd; |
6102 | t_state = qr->state; | 6100 | t_state = qr->state; |
6103 | kfree(qr); | 6101 | kfree(qr); |
6104 | 6102 | ||
6105 | switch (t_state) { | 6103 | switch (t_state) { |
6106 | case TRANSPORT_NEW_CMD_MAP: | 6104 | case TRANSPORT_NEW_CMD_MAP: |
6107 | if (!(CMD_TFO(cmd)->new_cmd_map)) { | 6105 | if (!(CMD_TFO(cmd)->new_cmd_map)) { |
6108 | printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is" | 6106 | printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is" |
6109 | " NULL for TRANSPORT_NEW_CMD_MAP\n"); | 6107 | " NULL for TRANSPORT_NEW_CMD_MAP\n"); |
6110 | BUG(); | 6108 | BUG(); |
6111 | } | 6109 | } |
6112 | ret = CMD_TFO(cmd)->new_cmd_map(cmd); | 6110 | ret = CMD_TFO(cmd)->new_cmd_map(cmd); |
6113 | if (ret < 0) { | 6111 | if (ret < 0) { |
6114 | cmd->transport_error_status = ret; | 6112 | cmd->transport_error_status = ret; |
6115 | transport_generic_request_failure(cmd, NULL, | 6113 | transport_generic_request_failure(cmd, NULL, |
6116 | 0, (cmd->data_direction != | 6114 | 0, (cmd->data_direction != |
6117 | DMA_TO_DEVICE)); | 6115 | DMA_TO_DEVICE)); |
6118 | break; | 6116 | break; |
6119 | } | 6117 | } |
6120 | /* Fall through */ | 6118 | /* Fall through */ |
6121 | case TRANSPORT_NEW_CMD: | 6119 | case TRANSPORT_NEW_CMD: |
6122 | ret = transport_generic_new_cmd(cmd); | 6120 | ret = transport_generic_new_cmd(cmd); |
6123 | if (ret < 0) { | 6121 | if (ret < 0) { |
6124 | cmd->transport_error_status = ret; | 6122 | cmd->transport_error_status = ret; |
6125 | transport_generic_request_failure(cmd, NULL, | 6123 | transport_generic_request_failure(cmd, NULL, |
6126 | 0, (cmd->data_direction != | 6124 | 0, (cmd->data_direction != |
6127 | DMA_TO_DEVICE)); | 6125 | DMA_TO_DEVICE)); |
6128 | } | 6126 | } |
6129 | break; | 6127 | break; |
6130 | case TRANSPORT_PROCESS_WRITE: | 6128 | case TRANSPORT_PROCESS_WRITE: |
6131 | transport_generic_process_write(cmd); | 6129 | transport_generic_process_write(cmd); |
6132 | break; | 6130 | break; |
6133 | case TRANSPORT_COMPLETE_OK: | 6131 | case TRANSPORT_COMPLETE_OK: |
6134 | transport_stop_all_task_timers(cmd); | 6132 | transport_stop_all_task_timers(cmd); |
6135 | transport_generic_complete_ok(cmd); | 6133 | transport_generic_complete_ok(cmd); |
6136 | break; | 6134 | break; |
6137 | case TRANSPORT_REMOVE: | 6135 | case TRANSPORT_REMOVE: |
6138 | transport_generic_remove(cmd, 1, 0); | 6136 | transport_generic_remove(cmd, 1, 0); |
6139 | break; | 6137 | break; |
6140 | case TRANSPORT_PROCESS_TMR: | 6138 | case TRANSPORT_PROCESS_TMR: |
6141 | transport_generic_do_tmr(cmd); | 6139 | transport_generic_do_tmr(cmd); |
6142 | break; | 6140 | break; |
6143 | case TRANSPORT_COMPLETE_FAILURE: | 6141 | case TRANSPORT_COMPLETE_FAILURE: |
6144 | transport_generic_request_failure(cmd, NULL, 1, 1); | 6142 | transport_generic_request_failure(cmd, NULL, 1, 1); |
6145 | break; | 6143 | break; |
6146 | case TRANSPORT_COMPLETE_TIMEOUT: | 6144 | case TRANSPORT_COMPLETE_TIMEOUT: |
6147 | transport_stop_all_task_timers(cmd); | 6145 | transport_stop_all_task_timers(cmd); |
6148 | transport_generic_request_timeout(cmd); | 6146 | transport_generic_request_timeout(cmd); |
6149 | break; | 6147 | break; |
6150 | default: | 6148 | default: |
6151 | printk(KERN_ERR "Unknown t_state: %d deferred_t_state:" | 6149 | printk(KERN_ERR "Unknown t_state: %d deferred_t_state:" |
6152 | " %d for ITT: 0x%08x i_state: %d on SE LUN:" | 6150 | " %d for ITT: 0x%08x i_state: %d on SE LUN:" |
6153 | " %u\n", t_state, cmd->deferred_t_state, | 6151 | " %u\n", t_state, cmd->deferred_t_state, |
6154 | CMD_TFO(cmd)->get_task_tag(cmd), | 6152 | CMD_TFO(cmd)->get_task_tag(cmd), |
6155 | CMD_TFO(cmd)->get_cmd_state(cmd), | 6153 | CMD_TFO(cmd)->get_cmd_state(cmd), |
6156 | SE_LUN(cmd)->unpacked_lun); | 6154 | SE_LUN(cmd)->unpacked_lun); |
6157 | BUG(); | 6155 | BUG(); |
6158 | } | 6156 | } |
6159 | 6157 | ||
6160 | goto get_cmd; | 6158 | goto get_cmd; |
6161 | } | 6159 | } |
6162 | 6160 | ||
6163 | out: | 6161 | out: |
6164 | transport_release_all_cmds(dev); | 6162 | transport_release_all_cmds(dev); |
6165 | dev->process_thread = NULL; | 6163 | dev->process_thread = NULL; |
6166 | return 0; | 6164 | return 0; |
6167 | } | 6165 | } |
6168 | 6166 |
include/target/target_core_base.h
1 | #ifndef TARGET_CORE_BASE_H | 1 | #ifndef TARGET_CORE_BASE_H |
2 | #define TARGET_CORE_BASE_H | 2 | #define TARGET_CORE_BASE_H |
3 | 3 | ||
4 | #include <linux/in.h> | 4 | #include <linux/in.h> |
5 | #include <linux/configfs.h> | 5 | #include <linux/configfs.h> |
6 | #include <linux/dma-mapping.h> | 6 | #include <linux/dma-mapping.h> |
7 | #include <linux/blkdev.h> | 7 | #include <linux/blkdev.h> |
8 | #include <scsi/scsi_cmnd.h> | 8 | #include <scsi/scsi_cmnd.h> |
9 | #include <net/sock.h> | 9 | #include <net/sock.h> |
10 | #include <net/tcp.h> | 10 | #include <net/tcp.h> |
11 | 11 | ||
12 | #define TARGET_CORE_MOD_VERSION "v4.0.0-rc6" | 12 | #define TARGET_CORE_MOD_VERSION "v4.0.0-rc6" |
13 | #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) | 13 | #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) |
14 | 14 | ||
15 | /* Used by transport_generic_allocate_iovecs() */ | 15 | /* Used by transport_generic_allocate_iovecs() */ |
16 | #define TRANSPORT_IOV_DATA_BUFFER 5 | 16 | #define TRANSPORT_IOV_DATA_BUFFER 5 |
17 | /* Maximum Number of LUNs per Target Portal Group */ | 17 | /* Maximum Number of LUNs per Target Portal Group */ |
18 | #define TRANSPORT_MAX_LUNS_PER_TPG 256 | 18 | #define TRANSPORT_MAX_LUNS_PER_TPG 256 |
19 | /* | 19 | /* |
20 | * By default we use 32-byte CDBs in TCM Core and subsystem plugin code. | 20 | * By default we use 32-byte CDBs in TCM Core and subsystem plugin code. |
21 | * | 21 | * |
22 | * Note that both include/scsi/scsi_cmnd.h:MAX_COMMAND_SIZE and | 22 | * Note that both include/scsi/scsi_cmnd.h:MAX_COMMAND_SIZE and |
23 | * include/linux/blkdev.h:BLOCK_MAX_CDB as of v2.6.36-rc4 still use | 23 | * include/linux/blkdev.h:BLOCK_MAX_CDB as of v2.6.36-rc4 still use |
24 | * 16-byte CDBs by default and require an extra allocation for | 24 | * 16-byte CDBs by default and require an extra allocation for |
25 | * 32-byte CDBs to becasue of legacy issues. | 25 | * 32-byte CDBs to becasue of legacy issues. |
26 | * | 26 | * |
27 | * Within TCM Core there are no such legacy limitiations, so we go ahead | 27 | * Within TCM Core there are no such legacy limitiations, so we go ahead |
28 | * use 32-byte CDBs by default and use include/scsi/scsi.h:scsi_command_size() | 28 | * use 32-byte CDBs by default and use include/scsi/scsi.h:scsi_command_size() |
29 | * within all TCM Core and subsystem plugin code. | 29 | * within all TCM Core and subsystem plugin code. |
30 | */ | 30 | */ |
31 | #define TCM_MAX_COMMAND_SIZE 32 | 31 | #define TCM_MAX_COMMAND_SIZE 32 |
32 | /* | 32 | /* |
33 | * From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently | 33 | * From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently |
34 | * defined 96, but the real limit is 252 (or 260 including the header) | 34 | * defined 96, but the real limit is 252 (or 260 including the header) |
35 | */ | 35 | */ |
36 | #define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE | 36 | #define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE |
37 | /* Used by transport_send_check_condition_and_sense() */ | 37 | /* Used by transport_send_check_condition_and_sense() */ |
38 | #define SPC_SENSE_KEY_OFFSET 2 | 38 | #define SPC_SENSE_KEY_OFFSET 2 |
39 | #define SPC_ASC_KEY_OFFSET 12 | 39 | #define SPC_ASC_KEY_OFFSET 12 |
40 | #define SPC_ASCQ_KEY_OFFSET 13 | 40 | #define SPC_ASCQ_KEY_OFFSET 13 |
41 | #define TRANSPORT_IQN_LEN 224 | 41 | #define TRANSPORT_IQN_LEN 224 |
42 | /* Used by target_core_store_alua_lu_gp() and target_core_alua_lu_gp_show_attr_members() */ | 42 | /* Used by target_core_store_alua_lu_gp() and target_core_alua_lu_gp_show_attr_members() */ |
43 | #define LU_GROUP_NAME_BUF 256 | 43 | #define LU_GROUP_NAME_BUF 256 |
44 | /* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */ | 44 | /* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */ |
45 | #define TG_PT_GROUP_NAME_BUF 256 | 45 | #define TG_PT_GROUP_NAME_BUF 256 |
46 | /* Used to parse VPD into struct t10_vpd */ | 46 | /* Used to parse VPD into struct t10_vpd */ |
47 | #define VPD_TMP_BUF_SIZE 128 | 47 | #define VPD_TMP_BUF_SIZE 128 |
48 | /* Used by transport_generic_cmd_sequencer() */ | 48 | /* Used by transport_generic_cmd_sequencer() */ |
49 | #define READ_BLOCK_LEN 6 | 49 | #define READ_BLOCK_LEN 6 |
50 | #define READ_CAP_LEN 8 | 50 | #define READ_CAP_LEN 8 |
51 | #define READ_POSITION_LEN 20 | 51 | #define READ_POSITION_LEN 20 |
52 | #define INQUIRY_LEN 36 | 52 | #define INQUIRY_LEN 36 |
53 | /* Used by transport_get_inquiry_vpd_serial() */ | 53 | /* Used by transport_get_inquiry_vpd_serial() */ |
54 | #define INQUIRY_VPD_SERIAL_LEN 254 | 54 | #define INQUIRY_VPD_SERIAL_LEN 254 |
55 | /* Used by transport_get_inquiry_vpd_device_ident() */ | 55 | /* Used by transport_get_inquiry_vpd_device_ident() */ |
56 | #define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN 254 | 56 | #define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN 254 |
57 | 57 | ||
58 | /* struct se_hba->hba_flags */ | 58 | /* struct se_hba->hba_flags */ |
59 | enum hba_flags_table { | 59 | enum hba_flags_table { |
60 | HBA_FLAGS_INTERNAL_USE = 0x01, | 60 | HBA_FLAGS_INTERNAL_USE = 0x01, |
61 | HBA_FLAGS_PSCSI_MODE = 0x02, | 61 | HBA_FLAGS_PSCSI_MODE = 0x02, |
62 | }; | 62 | }; |
63 | 63 | ||
64 | /* struct se_lun->lun_status */ | 64 | /* struct se_lun->lun_status */ |
65 | enum transport_lun_status_table { | 65 | enum transport_lun_status_table { |
66 | TRANSPORT_LUN_STATUS_FREE = 0, | 66 | TRANSPORT_LUN_STATUS_FREE = 0, |
67 | TRANSPORT_LUN_STATUS_ACTIVE = 1, | 67 | TRANSPORT_LUN_STATUS_ACTIVE = 1, |
68 | }; | 68 | }; |
69 | 69 | ||
70 | /* struct se_portal_group->se_tpg_type */ | 70 | /* struct se_portal_group->se_tpg_type */ |
71 | enum transport_tpg_type_table { | 71 | enum transport_tpg_type_table { |
72 | TRANSPORT_TPG_TYPE_NORMAL = 0, | 72 | TRANSPORT_TPG_TYPE_NORMAL = 0, |
73 | TRANSPORT_TPG_TYPE_DISCOVERY = 1, | 73 | TRANSPORT_TPG_TYPE_DISCOVERY = 1, |
74 | }; | 74 | }; |
75 | 75 | ||
76 | /* Used for generate timer flags */ | 76 | /* Used for generate timer flags */ |
77 | enum timer_flags_table { | 77 | enum timer_flags_table { |
78 | TF_RUNNING = 0x01, | 78 | TF_RUNNING = 0x01, |
79 | TF_STOP = 0x02, | 79 | TF_STOP = 0x02, |
80 | }; | 80 | }; |
81 | 81 | ||
82 | /* Special transport agnostic struct se_cmd->t_states */ | 82 | /* Special transport agnostic struct se_cmd->t_states */ |
83 | enum transport_state_table { | 83 | enum transport_state_table { |
84 | TRANSPORT_NO_STATE = 0, | 84 | TRANSPORT_NO_STATE = 0, |
85 | TRANSPORT_NEW_CMD = 1, | 85 | TRANSPORT_NEW_CMD = 1, |
86 | TRANSPORT_DEFERRED_CMD = 2, | 86 | TRANSPORT_DEFERRED_CMD = 2, |
87 | TRANSPORT_WRITE_PENDING = 3, | 87 | TRANSPORT_WRITE_PENDING = 3, |
88 | TRANSPORT_PROCESS_WRITE = 4, | 88 | TRANSPORT_PROCESS_WRITE = 4, |
89 | TRANSPORT_PROCESSING = 5, | 89 | TRANSPORT_PROCESSING = 5, |
90 | TRANSPORT_COMPLETE_OK = 6, | 90 | TRANSPORT_COMPLETE_OK = 6, |
91 | TRANSPORT_COMPLETE_FAILURE = 7, | 91 | TRANSPORT_COMPLETE_FAILURE = 7, |
92 | TRANSPORT_COMPLETE_TIMEOUT = 8, | 92 | TRANSPORT_COMPLETE_TIMEOUT = 8, |
93 | TRANSPORT_PROCESS_TMR = 9, | 93 | TRANSPORT_PROCESS_TMR = 9, |
94 | TRANSPORT_TMR_COMPLETE = 10, | 94 | TRANSPORT_TMR_COMPLETE = 10, |
95 | TRANSPORT_ISTATE_PROCESSING = 11, | 95 | TRANSPORT_ISTATE_PROCESSING = 11, |
96 | TRANSPORT_ISTATE_PROCESSED = 12, | 96 | TRANSPORT_ISTATE_PROCESSED = 12, |
97 | TRANSPORT_KILL = 13, | 97 | TRANSPORT_KILL = 13, |
98 | TRANSPORT_REMOVE = 14, | 98 | TRANSPORT_REMOVE = 14, |
99 | TRANSPORT_FREE = 15, | 99 | TRANSPORT_FREE = 15, |
100 | TRANSPORT_NEW_CMD_MAP = 16, | 100 | TRANSPORT_NEW_CMD_MAP = 16, |
101 | }; | 101 | }; |
102 | 102 | ||
103 | /* Used for struct se_cmd->se_cmd_flags */ | 103 | /* Used for struct se_cmd->se_cmd_flags */ |
104 | enum se_cmd_flags_table { | 104 | enum se_cmd_flags_table { |
105 | SCF_SUPPORTED_SAM_OPCODE = 0x00000001, | 105 | SCF_SUPPORTED_SAM_OPCODE = 0x00000001, |
106 | SCF_TRANSPORT_TASK_SENSE = 0x00000002, | 106 | SCF_TRANSPORT_TASK_SENSE = 0x00000002, |
107 | SCF_EMULATED_TASK_SENSE = 0x00000004, | 107 | SCF_EMULATED_TASK_SENSE = 0x00000004, |
108 | SCF_SCSI_DATA_SG_IO_CDB = 0x00000008, | 108 | SCF_SCSI_DATA_SG_IO_CDB = 0x00000008, |
109 | SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010, | 109 | SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010, |
110 | SCF_SCSI_CONTROL_NONSG_IO_CDB = 0x00000020, | 110 | SCF_SCSI_CONTROL_NONSG_IO_CDB = 0x00000020, |
111 | SCF_SCSI_NON_DATA_CDB = 0x00000040, | 111 | SCF_SCSI_NON_DATA_CDB = 0x00000040, |
112 | SCF_SCSI_CDB_EXCEPTION = 0x00000080, | 112 | SCF_SCSI_CDB_EXCEPTION = 0x00000080, |
113 | SCF_SCSI_RESERVATION_CONFLICT = 0x00000100, | 113 | SCF_SCSI_RESERVATION_CONFLICT = 0x00000100, |
114 | SCF_CMD_PASSTHROUGH_NOALLOC = 0x00000200, | 114 | SCF_CMD_PASSTHROUGH_NOALLOC = 0x00000200, |
115 | SCF_SE_CMD_FAILED = 0x00000400, | 115 | SCF_SE_CMD_FAILED = 0x00000400, |
116 | SCF_SE_LUN_CMD = 0x00000800, | 116 | SCF_SE_LUN_CMD = 0x00000800, |
117 | SCF_SE_ALLOW_EOO = 0x00001000, | 117 | SCF_SE_ALLOW_EOO = 0x00001000, |
118 | SCF_SE_DISABLE_ONLINE_CHECK = 0x00002000, | 118 | SCF_SE_DISABLE_ONLINE_CHECK = 0x00002000, |
119 | SCF_SENT_CHECK_CONDITION = 0x00004000, | 119 | SCF_SENT_CHECK_CONDITION = 0x00004000, |
120 | SCF_OVERFLOW_BIT = 0x00008000, | 120 | SCF_OVERFLOW_BIT = 0x00008000, |
121 | SCF_UNDERFLOW_BIT = 0x00010000, | 121 | SCF_UNDERFLOW_BIT = 0x00010000, |
122 | SCF_SENT_DELAYED_TAS = 0x00020000, | 122 | SCF_SENT_DELAYED_TAS = 0x00020000, |
123 | SCF_ALUA_NON_OPTIMIZED = 0x00040000, | 123 | SCF_ALUA_NON_OPTIMIZED = 0x00040000, |
124 | SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000, | 124 | SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000, |
125 | SCF_PASSTHROUGH_SG_TO_MEM = 0x00100000, | 125 | SCF_PASSTHROUGH_SG_TO_MEM = 0x00100000, |
126 | SCF_PASSTHROUGH_CONTIG_TO_SG = 0x00200000, | 126 | SCF_PASSTHROUGH_CONTIG_TO_SG = 0x00200000, |
127 | SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000, | 127 | SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000, |
128 | SCF_EMULATE_SYNC_CACHE = 0x00800000, | 128 | SCF_EMULATE_SYNC_CACHE = 0x00800000, |
129 | SCF_EMULATE_CDB_ASYNC = 0x01000000, | 129 | SCF_EMULATE_CDB_ASYNC = 0x01000000, |
130 | SCF_EMULATE_SYNC_UNMAP = 0x02000000 | 130 | SCF_EMULATE_SYNC_UNMAP = 0x02000000 |
131 | }; | 131 | }; |
132 | 132 | ||
133 | /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ | 133 | /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ |
134 | enum transport_lunflags_table { | 134 | enum transport_lunflags_table { |
135 | TRANSPORT_LUNFLAGS_NO_ACCESS = 0x00, | 135 | TRANSPORT_LUNFLAGS_NO_ACCESS = 0x00, |
136 | TRANSPORT_LUNFLAGS_INITIATOR_ACCESS = 0x01, | 136 | TRANSPORT_LUNFLAGS_INITIATOR_ACCESS = 0x01, |
137 | TRANSPORT_LUNFLAGS_READ_ONLY = 0x02, | 137 | TRANSPORT_LUNFLAGS_READ_ONLY = 0x02, |
138 | TRANSPORT_LUNFLAGS_READ_WRITE = 0x04, | 138 | TRANSPORT_LUNFLAGS_READ_WRITE = 0x04, |
139 | }; | 139 | }; |
140 | 140 | ||
141 | /* struct se_device->dev_status */ | 141 | /* struct se_device->dev_status */ |
142 | enum transport_device_status_table { | 142 | enum transport_device_status_table { |
143 | TRANSPORT_DEVICE_ACTIVATED = 0x01, | 143 | TRANSPORT_DEVICE_ACTIVATED = 0x01, |
144 | TRANSPORT_DEVICE_DEACTIVATED = 0x02, | 144 | TRANSPORT_DEVICE_DEACTIVATED = 0x02, |
145 | TRANSPORT_DEVICE_QUEUE_FULL = 0x04, | 145 | TRANSPORT_DEVICE_QUEUE_FULL = 0x04, |
146 | TRANSPORT_DEVICE_SHUTDOWN = 0x08, | 146 | TRANSPORT_DEVICE_SHUTDOWN = 0x08, |
147 | TRANSPORT_DEVICE_OFFLINE_ACTIVATED = 0x10, | 147 | TRANSPORT_DEVICE_OFFLINE_ACTIVATED = 0x10, |
148 | TRANSPORT_DEVICE_OFFLINE_DEACTIVATED = 0x20, | 148 | TRANSPORT_DEVICE_OFFLINE_DEACTIVATED = 0x20, |
149 | }; | 149 | }; |
150 | 150 | ||
151 | /* | 151 | /* |
152 | * Used by transport_send_check_condition_and_sense() and se_cmd->scsi_sense_reason | 152 | * Used by transport_send_check_condition_and_sense() and se_cmd->scsi_sense_reason |
153 | * to signal which ASC/ASCQ sense payload should be built. | 153 | * to signal which ASC/ASCQ sense payload should be built. |
154 | */ | 154 | */ |
155 | enum tcm_sense_reason_table { | 155 | enum tcm_sense_reason_table { |
156 | TCM_NON_EXISTENT_LUN = 0x01, | 156 | TCM_NON_EXISTENT_LUN = 0x01, |
157 | TCM_UNSUPPORTED_SCSI_OPCODE = 0x02, | 157 | TCM_UNSUPPORTED_SCSI_OPCODE = 0x02, |
158 | TCM_INCORRECT_AMOUNT_OF_DATA = 0x03, | 158 | TCM_INCORRECT_AMOUNT_OF_DATA = 0x03, |
159 | TCM_UNEXPECTED_UNSOLICITED_DATA = 0x04, | 159 | TCM_UNEXPECTED_UNSOLICITED_DATA = 0x04, |
160 | TCM_SERVICE_CRC_ERROR = 0x05, | 160 | TCM_SERVICE_CRC_ERROR = 0x05, |
161 | TCM_SNACK_REJECTED = 0x06, | 161 | TCM_SNACK_REJECTED = 0x06, |
162 | TCM_SECTOR_COUNT_TOO_MANY = 0x07, | 162 | TCM_SECTOR_COUNT_TOO_MANY = 0x07, |
163 | TCM_INVALID_CDB_FIELD = 0x08, | 163 | TCM_INVALID_CDB_FIELD = 0x08, |
164 | TCM_INVALID_PARAMETER_LIST = 0x09, | 164 | TCM_INVALID_PARAMETER_LIST = 0x09, |
165 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE = 0x0a, | 165 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE = 0x0a, |
166 | TCM_UNKNOWN_MODE_PAGE = 0x0b, | 166 | TCM_UNKNOWN_MODE_PAGE = 0x0b, |
167 | TCM_WRITE_PROTECTED = 0x0c, | 167 | TCM_WRITE_PROTECTED = 0x0c, |
168 | TCM_CHECK_CONDITION_ABORT_CMD = 0x0d, | 168 | TCM_CHECK_CONDITION_ABORT_CMD = 0x0d, |
169 | TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e, | 169 | TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e, |
170 | TCM_CHECK_CONDITION_NOT_READY = 0x0f, | 170 | TCM_CHECK_CONDITION_NOT_READY = 0x0f, |
171 | }; | 171 | }; |
172 | 172 | ||
173 | struct se_obj { | 173 | struct se_obj { |
174 | atomic_t obj_access_count; | 174 | atomic_t obj_access_count; |
175 | } ____cacheline_aligned; | 175 | } ____cacheline_aligned; |
176 | 176 | ||
177 | /* | 177 | /* |
178 | * Used by TCM Core internally to signal if ALUA emulation is enabled or | 178 | * Used by TCM Core internally to signal if ALUA emulation is enabled or |
179 | * disabled, or running in with TCM/pSCSI passthrough mode | 179 | * disabled, or running in with TCM/pSCSI passthrough mode |
180 | */ | 180 | */ |
181 | typedef enum { | 181 | typedef enum { |
182 | SPC_ALUA_PASSTHROUGH, | 182 | SPC_ALUA_PASSTHROUGH, |
183 | SPC2_ALUA_DISABLED, | 183 | SPC2_ALUA_DISABLED, |
184 | SPC3_ALUA_EMULATED | 184 | SPC3_ALUA_EMULATED |
185 | } t10_alua_index_t; | 185 | } t10_alua_index_t; |
186 | 186 | ||
187 | /* | 187 | /* |
188 | * Used by TCM Core internally to signal if SAM Task Attribute emulation | 188 | * Used by TCM Core internally to signal if SAM Task Attribute emulation |
189 | * is enabled or disabled, or running in with TCM/pSCSI passthrough mode | 189 | * is enabled or disabled, or running in with TCM/pSCSI passthrough mode |
190 | */ | 190 | */ |
191 | typedef enum { | 191 | typedef enum { |
192 | SAM_TASK_ATTR_PASSTHROUGH, | 192 | SAM_TASK_ATTR_PASSTHROUGH, |
193 | SAM_TASK_ATTR_UNTAGGED, | 193 | SAM_TASK_ATTR_UNTAGGED, |
194 | SAM_TASK_ATTR_EMULATED | 194 | SAM_TASK_ATTR_EMULATED |
195 | } t10_task_attr_index_t; | 195 | } t10_task_attr_index_t; |
196 | 196 | ||
197 | /* | 197 | /* |
198 | * Used for target SCSI statistics | 198 | * Used for target SCSI statistics |
199 | */ | 199 | */ |
200 | typedef enum { | 200 | typedef enum { |
201 | SCSI_INST_INDEX, | 201 | SCSI_INST_INDEX, |
202 | SCSI_DEVICE_INDEX, | 202 | SCSI_DEVICE_INDEX, |
203 | SCSI_AUTH_INTR_INDEX, | 203 | SCSI_AUTH_INTR_INDEX, |
204 | SCSI_INDEX_TYPE_MAX | 204 | SCSI_INDEX_TYPE_MAX |
205 | } scsi_index_t; | 205 | } scsi_index_t; |
206 | 206 | ||
207 | struct scsi_index_table { | 207 | struct scsi_index_table { |
208 | spinlock_t lock; | 208 | spinlock_t lock; |
209 | u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; | 209 | u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; |
210 | } ____cacheline_aligned; | 210 | } ____cacheline_aligned; |
211 | 211 | ||
212 | struct se_cmd; | 212 | struct se_cmd; |
213 | 213 | ||
214 | struct t10_alua { | 214 | struct t10_alua { |
215 | t10_alua_index_t alua_type; | 215 | t10_alua_index_t alua_type; |
216 | /* ALUA Target Port Group ID */ | 216 | /* ALUA Target Port Group ID */ |
217 | u16 alua_tg_pt_gps_counter; | 217 | u16 alua_tg_pt_gps_counter; |
218 | u32 alua_tg_pt_gps_count; | 218 | u32 alua_tg_pt_gps_count; |
219 | spinlock_t tg_pt_gps_lock; | 219 | spinlock_t tg_pt_gps_lock; |
220 | struct se_subsystem_dev *t10_sub_dev; | 220 | struct se_subsystem_dev *t10_sub_dev; |
221 | /* Used for default ALUA Target Port Group */ | 221 | /* Used for default ALUA Target Port Group */ |
222 | struct t10_alua_tg_pt_gp *default_tg_pt_gp; | 222 | struct t10_alua_tg_pt_gp *default_tg_pt_gp; |
223 | /* Used for default ALUA Target Port Group ConfigFS group */ | 223 | /* Used for default ALUA Target Port Group ConfigFS group */ |
224 | struct config_group alua_tg_pt_gps_group; | 224 | struct config_group alua_tg_pt_gps_group; |
225 | int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *); | 225 | int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *); |
226 | struct list_head tg_pt_gps_list; | 226 | struct list_head tg_pt_gps_list; |
227 | } ____cacheline_aligned; | 227 | } ____cacheline_aligned; |
228 | 228 | ||
229 | struct t10_alua_lu_gp { | 229 | struct t10_alua_lu_gp { |
230 | u16 lu_gp_id; | 230 | u16 lu_gp_id; |
231 | int lu_gp_valid_id; | 231 | int lu_gp_valid_id; |
232 | u32 lu_gp_members; | 232 | u32 lu_gp_members; |
233 | atomic_t lu_gp_shutdown; | 233 | atomic_t lu_gp_shutdown; |
234 | atomic_t lu_gp_ref_cnt; | 234 | atomic_t lu_gp_ref_cnt; |
235 | spinlock_t lu_gp_lock; | 235 | spinlock_t lu_gp_lock; |
236 | struct config_group lu_gp_group; | 236 | struct config_group lu_gp_group; |
237 | struct list_head lu_gp_list; | 237 | struct list_head lu_gp_list; |
238 | struct list_head lu_gp_mem_list; | 238 | struct list_head lu_gp_mem_list; |
239 | } ____cacheline_aligned; | 239 | } ____cacheline_aligned; |
240 | 240 | ||
241 | struct t10_alua_lu_gp_member { | 241 | struct t10_alua_lu_gp_member { |
242 | int lu_gp_assoc:1; | 242 | bool lu_gp_assoc; |
243 | atomic_t lu_gp_mem_ref_cnt; | 243 | atomic_t lu_gp_mem_ref_cnt; |
244 | spinlock_t lu_gp_mem_lock; | 244 | spinlock_t lu_gp_mem_lock; |
245 | struct t10_alua_lu_gp *lu_gp; | 245 | struct t10_alua_lu_gp *lu_gp; |
246 | struct se_device *lu_gp_mem_dev; | 246 | struct se_device *lu_gp_mem_dev; |
247 | struct list_head lu_gp_mem_list; | 247 | struct list_head lu_gp_mem_list; |
248 | } ____cacheline_aligned; | 248 | } ____cacheline_aligned; |
249 | 249 | ||
250 | struct t10_alua_tg_pt_gp { | 250 | struct t10_alua_tg_pt_gp { |
251 | u16 tg_pt_gp_id; | 251 | u16 tg_pt_gp_id; |
252 | int tg_pt_gp_valid_id; | 252 | int tg_pt_gp_valid_id; |
253 | int tg_pt_gp_alua_access_status; | 253 | int tg_pt_gp_alua_access_status; |
254 | int tg_pt_gp_alua_access_type; | 254 | int tg_pt_gp_alua_access_type; |
255 | int tg_pt_gp_nonop_delay_msecs; | 255 | int tg_pt_gp_nonop_delay_msecs; |
256 | int tg_pt_gp_trans_delay_msecs; | 256 | int tg_pt_gp_trans_delay_msecs; |
257 | int tg_pt_gp_pref; | 257 | int tg_pt_gp_pref; |
258 | int tg_pt_gp_write_metadata; | 258 | int tg_pt_gp_write_metadata; |
259 | /* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */ | 259 | /* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */ |
260 | #define ALUA_MD_BUF_LEN 1024 | 260 | #define ALUA_MD_BUF_LEN 1024 |
261 | u32 tg_pt_gp_md_buf_len; | 261 | u32 tg_pt_gp_md_buf_len; |
262 | u32 tg_pt_gp_members; | 262 | u32 tg_pt_gp_members; |
263 | atomic_t tg_pt_gp_alua_access_state; | 263 | atomic_t tg_pt_gp_alua_access_state; |
264 | atomic_t tg_pt_gp_ref_cnt; | 264 | atomic_t tg_pt_gp_ref_cnt; |
265 | spinlock_t tg_pt_gp_lock; | 265 | spinlock_t tg_pt_gp_lock; |
266 | struct mutex tg_pt_gp_md_mutex; | 266 | struct mutex tg_pt_gp_md_mutex; |
267 | struct se_subsystem_dev *tg_pt_gp_su_dev; | 267 | struct se_subsystem_dev *tg_pt_gp_su_dev; |
268 | struct config_group tg_pt_gp_group; | 268 | struct config_group tg_pt_gp_group; |
269 | struct list_head tg_pt_gp_list; | 269 | struct list_head tg_pt_gp_list; |
270 | struct list_head tg_pt_gp_mem_list; | 270 | struct list_head tg_pt_gp_mem_list; |
271 | } ____cacheline_aligned; | 271 | } ____cacheline_aligned; |
272 | 272 | ||
273 | struct t10_alua_tg_pt_gp_member { | 273 | struct t10_alua_tg_pt_gp_member { |
274 | int tg_pt_gp_assoc:1; | 274 | bool tg_pt_gp_assoc; |
275 | atomic_t tg_pt_gp_mem_ref_cnt; | 275 | atomic_t tg_pt_gp_mem_ref_cnt; |
276 | spinlock_t tg_pt_gp_mem_lock; | 276 | spinlock_t tg_pt_gp_mem_lock; |
277 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 277 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
278 | struct se_port *tg_pt; | 278 | struct se_port *tg_pt; |
279 | struct list_head tg_pt_gp_mem_list; | 279 | struct list_head tg_pt_gp_mem_list; |
280 | } ____cacheline_aligned; | 280 | } ____cacheline_aligned; |
281 | 281 | ||
282 | struct t10_vpd { | 282 | struct t10_vpd { |
283 | unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN]; | 283 | unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN]; |
284 | int protocol_identifier_set; | 284 | int protocol_identifier_set; |
285 | u32 protocol_identifier; | 285 | u32 protocol_identifier; |
286 | u32 device_identifier_code_set; | 286 | u32 device_identifier_code_set; |
287 | u32 association; | 287 | u32 association; |
288 | u32 device_identifier_type; | 288 | u32 device_identifier_type; |
289 | struct list_head vpd_list; | 289 | struct list_head vpd_list; |
290 | } ____cacheline_aligned; | 290 | } ____cacheline_aligned; |
291 | 291 | ||
292 | struct t10_wwn { | 292 | struct t10_wwn { |
293 | unsigned char vendor[8]; | 293 | unsigned char vendor[8]; |
294 | unsigned char model[16]; | 294 | unsigned char model[16]; |
295 | unsigned char revision[4]; | 295 | unsigned char revision[4]; |
296 | unsigned char unit_serial[INQUIRY_VPD_SERIAL_LEN]; | 296 | unsigned char unit_serial[INQUIRY_VPD_SERIAL_LEN]; |
297 | spinlock_t t10_vpd_lock; | 297 | spinlock_t t10_vpd_lock; |
298 | struct se_subsystem_dev *t10_sub_dev; | 298 | struct se_subsystem_dev *t10_sub_dev; |
299 | struct config_group t10_wwn_group; | 299 | struct config_group t10_wwn_group; |
300 | struct list_head t10_vpd_list; | 300 | struct list_head t10_vpd_list; |
301 | } ____cacheline_aligned; | 301 | } ____cacheline_aligned; |
302 | 302 | ||
303 | 303 | ||
304 | /* | 304 | /* |
305 | * Used by TCM Core internally to signal if >= SPC-3 peristent reservations | 305 | * Used by TCM Core internally to signal if >= SPC-3 peristent reservations |
306 | * emulation is enabled or disabled, or running in with TCM/pSCSI passthrough | 306 | * emulation is enabled or disabled, or running in with TCM/pSCSI passthrough |
307 | * mode | 307 | * mode |
308 | */ | 308 | */ |
309 | typedef enum { | 309 | typedef enum { |
310 | SPC_PASSTHROUGH, | 310 | SPC_PASSTHROUGH, |
311 | SPC2_RESERVATIONS, | 311 | SPC2_RESERVATIONS, |
312 | SPC3_PERSISTENT_RESERVATIONS | 312 | SPC3_PERSISTENT_RESERVATIONS |
313 | } t10_reservations_index_t; | 313 | } t10_reservations_index_t; |
314 | 314 | ||
315 | struct t10_pr_registration { | 315 | struct t10_pr_registration { |
316 | /* Used for fabrics that contain WWN+ISID */ | 316 | /* Used for fabrics that contain WWN+ISID */ |
317 | #define PR_REG_ISID_LEN 16 | 317 | #define PR_REG_ISID_LEN 16 |
318 | /* PR_REG_ISID_LEN + ',i,0x' */ | 318 | /* PR_REG_ISID_LEN + ',i,0x' */ |
319 | #define PR_REG_ISID_ID_LEN (PR_REG_ISID_LEN + 5) | 319 | #define PR_REG_ISID_ID_LEN (PR_REG_ISID_LEN + 5) |
320 | char pr_reg_isid[PR_REG_ISID_LEN]; | 320 | char pr_reg_isid[PR_REG_ISID_LEN]; |
321 | /* Used during APTPL metadata reading */ | 321 | /* Used during APTPL metadata reading */ |
322 | #define PR_APTPL_MAX_IPORT_LEN 256 | 322 | #define PR_APTPL_MAX_IPORT_LEN 256 |
323 | unsigned char pr_iport[PR_APTPL_MAX_IPORT_LEN]; | 323 | unsigned char pr_iport[PR_APTPL_MAX_IPORT_LEN]; |
324 | /* Used during APTPL metadata reading */ | 324 | /* Used during APTPL metadata reading */ |
325 | #define PR_APTPL_MAX_TPORT_LEN 256 | 325 | #define PR_APTPL_MAX_TPORT_LEN 256 |
326 | unsigned char pr_tport[PR_APTPL_MAX_TPORT_LEN]; | 326 | unsigned char pr_tport[PR_APTPL_MAX_TPORT_LEN]; |
327 | /* For writing out live meta data */ | 327 | /* For writing out live meta data */ |
328 | unsigned char *pr_aptpl_buf; | 328 | unsigned char *pr_aptpl_buf; |
329 | u16 pr_aptpl_rpti; | 329 | u16 pr_aptpl_rpti; |
330 | u16 pr_reg_tpgt; | 330 | u16 pr_reg_tpgt; |
331 | /* Reservation effects all target ports */ | 331 | /* Reservation effects all target ports */ |
332 | int pr_reg_all_tg_pt; | 332 | int pr_reg_all_tg_pt; |
333 | /* Activate Persistence across Target Power Loss */ | 333 | /* Activate Persistence across Target Power Loss */ |
334 | int pr_reg_aptpl; | 334 | int pr_reg_aptpl; |
335 | int pr_res_holder; | 335 | int pr_res_holder; |
336 | int pr_res_type; | 336 | int pr_res_type; |
337 | int pr_res_scope; | 337 | int pr_res_scope; |
338 | /* Used for fabric initiator WWPNs using a ISID */ | 338 | /* Used for fabric initiator WWPNs using a ISID */ |
339 | int isid_present_at_reg:1; | 339 | bool isid_present_at_reg; |
340 | u32 pr_res_mapped_lun; | 340 | u32 pr_res_mapped_lun; |
341 | u32 pr_aptpl_target_lun; | 341 | u32 pr_aptpl_target_lun; |
342 | u32 pr_res_generation; | 342 | u32 pr_res_generation; |
343 | u64 pr_reg_bin_isid; | 343 | u64 pr_reg_bin_isid; |
344 | u64 pr_res_key; | 344 | u64 pr_res_key; |
345 | atomic_t pr_res_holders; | 345 | atomic_t pr_res_holders; |
346 | struct se_node_acl *pr_reg_nacl; | 346 | struct se_node_acl *pr_reg_nacl; |
347 | struct se_dev_entry *pr_reg_deve; | 347 | struct se_dev_entry *pr_reg_deve; |
348 | struct se_lun *pr_reg_tg_pt_lun; | 348 | struct se_lun *pr_reg_tg_pt_lun; |
349 | struct list_head pr_reg_list; | 349 | struct list_head pr_reg_list; |
350 | struct list_head pr_reg_abort_list; | 350 | struct list_head pr_reg_abort_list; |
351 | struct list_head pr_reg_aptpl_list; | 351 | struct list_head pr_reg_aptpl_list; |
352 | struct list_head pr_reg_atp_list; | 352 | struct list_head pr_reg_atp_list; |
353 | struct list_head pr_reg_atp_mem_list; | 353 | struct list_head pr_reg_atp_mem_list; |
354 | } ____cacheline_aligned; | 354 | } ____cacheline_aligned; |
355 | 355 | ||
356 | /* | 356 | /* |
357 | * This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS, | 357 | * This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS, |
358 | * SPC2_RESERVATIONS or SPC_PASSTHROUGH in drivers/target/target_core_pr.c: | 358 | * SPC2_RESERVATIONS or SPC_PASSTHROUGH in drivers/target/target_core_pr.c: |
359 | * core_setup_reservations() | 359 | * core_setup_reservations() |
360 | */ | 360 | */ |
361 | struct t10_reservation_ops { | 361 | struct t10_reservation_ops { |
362 | int (*t10_reservation_check)(struct se_cmd *, u32 *); | 362 | int (*t10_reservation_check)(struct se_cmd *, u32 *); |
363 | int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32); | 363 | int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32); |
364 | int (*t10_pr_register)(struct se_cmd *); | 364 | int (*t10_pr_register)(struct se_cmd *); |
365 | int (*t10_pr_clear)(struct se_cmd *); | 365 | int (*t10_pr_clear)(struct se_cmd *); |
366 | }; | 366 | }; |
367 | 367 | ||
368 | struct t10_reservation_template { | 368 | struct t10_reservation_template { |
369 | /* Reservation effects all target ports */ | 369 | /* Reservation effects all target ports */ |
370 | int pr_all_tg_pt; | 370 | int pr_all_tg_pt; |
371 | /* Activate Persistence across Target Power Loss enabled | 371 | /* Activate Persistence across Target Power Loss enabled |
372 | * for SCSI device */ | 372 | * for SCSI device */ |
373 | int pr_aptpl_active; | 373 | int pr_aptpl_active; |
374 | /* Used by struct t10_reservation_template->pr_aptpl_buf_len */ | 374 | /* Used by struct t10_reservation_template->pr_aptpl_buf_len */ |
375 | #define PR_APTPL_BUF_LEN 8192 | 375 | #define PR_APTPL_BUF_LEN 8192 |
376 | u32 pr_aptpl_buf_len; | 376 | u32 pr_aptpl_buf_len; |
377 | u32 pr_generation; | 377 | u32 pr_generation; |
378 | t10_reservations_index_t res_type; | 378 | t10_reservations_index_t res_type; |
379 | spinlock_t registration_lock; | 379 | spinlock_t registration_lock; |
380 | spinlock_t aptpl_reg_lock; | 380 | spinlock_t aptpl_reg_lock; |
381 | /* | 381 | /* |
382 | * This will always be set by one individual I_T Nexus. | 382 | * This will always be set by one individual I_T Nexus. |
383 | * However with all_tg_pt=1, other I_T Nexus from the | 383 | * However with all_tg_pt=1, other I_T Nexus from the |
384 | * same initiator can access PR reg/res info on a different | 384 | * same initiator can access PR reg/res info on a different |
385 | * target port. | 385 | * target port. |
386 | * | 386 | * |
387 | * There is also the 'All Registrants' case, where there is | 387 | * There is also the 'All Registrants' case, where there is |
388 | * a single *pr_res_holder of the reservation, but all | 388 | * a single *pr_res_holder of the reservation, but all |
389 | * registrations are considered reservation holders. | 389 | * registrations are considered reservation holders. |
390 | */ | 390 | */ |
391 | struct se_node_acl *pr_res_holder; | 391 | struct se_node_acl *pr_res_holder; |
392 | struct list_head registration_list; | 392 | struct list_head registration_list; |
393 | struct list_head aptpl_reg_list; | 393 | struct list_head aptpl_reg_list; |
394 | struct t10_reservation_ops pr_ops; | 394 | struct t10_reservation_ops pr_ops; |
395 | } ____cacheline_aligned; | 395 | } ____cacheline_aligned; |
396 | 396 | ||
397 | struct se_queue_req { | 397 | struct se_queue_req { |
398 | int state; | 398 | int state; |
399 | void *cmd; | 399 | void *cmd; |
400 | struct list_head qr_list; | 400 | struct list_head qr_list; |
401 | } ____cacheline_aligned; | 401 | } ____cacheline_aligned; |
402 | 402 | ||
403 | struct se_queue_obj { | 403 | struct se_queue_obj { |
404 | atomic_t queue_cnt; | 404 | atomic_t queue_cnt; |
405 | spinlock_t cmd_queue_lock; | 405 | spinlock_t cmd_queue_lock; |
406 | struct list_head qobj_list; | 406 | struct list_head qobj_list; |
407 | wait_queue_head_t thread_wq; | 407 | wait_queue_head_t thread_wq; |
408 | } ____cacheline_aligned; | 408 | } ____cacheline_aligned; |
409 | 409 | ||
410 | /* | 410 | /* |
411 | * Used one per struct se_cmd to hold all extra struct se_task | 411 | * Used one per struct se_cmd to hold all extra struct se_task |
412 | * metadata. This structure is setup and allocated in | 412 | * metadata. This structure is setup and allocated in |
413 | * drivers/target/target_core_transport.c:__transport_alloc_se_cmd() | 413 | * drivers/target/target_core_transport.c:__transport_alloc_se_cmd() |
414 | */ | 414 | */ |
415 | struct se_transport_task { | 415 | struct se_transport_task { |
416 | unsigned char *t_task_cdb; | 416 | unsigned char *t_task_cdb; |
417 | unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; | 417 | unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; |
418 | unsigned long long t_task_lba; | 418 | unsigned long long t_task_lba; |
419 | int t_tasks_failed; | 419 | int t_tasks_failed; |
420 | int t_tasks_fua; | 420 | int t_tasks_fua; |
421 | int t_tasks_bidi:1; | 421 | bool t_tasks_bidi; |
422 | u32 t_task_cdbs; | 422 | u32 t_task_cdbs; |
423 | u32 t_tasks_check; | 423 | u32 t_tasks_check; |
424 | u32 t_tasks_no; | 424 | u32 t_tasks_no; |
425 | u32 t_tasks_sectors; | 425 | u32 t_tasks_sectors; |
426 | u32 t_tasks_se_num; | 426 | u32 t_tasks_se_num; |
427 | u32 t_tasks_se_bidi_num; | 427 | u32 t_tasks_se_bidi_num; |
428 | u32 t_tasks_sg_chained_no; | 428 | u32 t_tasks_sg_chained_no; |
429 | atomic_t t_fe_count; | 429 | atomic_t t_fe_count; |
430 | atomic_t t_se_count; | 430 | atomic_t t_se_count; |
431 | atomic_t t_task_cdbs_left; | 431 | atomic_t t_task_cdbs_left; |
432 | atomic_t t_task_cdbs_ex_left; | 432 | atomic_t t_task_cdbs_ex_left; |
433 | atomic_t t_task_cdbs_timeout_left; | 433 | atomic_t t_task_cdbs_timeout_left; |
434 | atomic_t t_task_cdbs_sent; | 434 | atomic_t t_task_cdbs_sent; |
435 | atomic_t t_transport_aborted; | 435 | atomic_t t_transport_aborted; |
436 | atomic_t t_transport_active; | 436 | atomic_t t_transport_active; |
437 | atomic_t t_transport_complete; | 437 | atomic_t t_transport_complete; |
438 | atomic_t t_transport_queue_active; | 438 | atomic_t t_transport_queue_active; |
439 | atomic_t t_transport_sent; | 439 | atomic_t t_transport_sent; |
440 | atomic_t t_transport_stop; | 440 | atomic_t t_transport_stop; |
441 | atomic_t t_transport_timeout; | 441 | atomic_t t_transport_timeout; |
442 | atomic_t transport_dev_active; | 442 | atomic_t transport_dev_active; |
443 | atomic_t transport_lun_active; | 443 | atomic_t transport_lun_active; |
444 | atomic_t transport_lun_fe_stop; | 444 | atomic_t transport_lun_fe_stop; |
445 | atomic_t transport_lun_stop; | 445 | atomic_t transport_lun_stop; |
446 | spinlock_t t_state_lock; | 446 | spinlock_t t_state_lock; |
447 | struct completion t_transport_stop_comp; | 447 | struct completion t_transport_stop_comp; |
448 | struct completion transport_lun_fe_stop_comp; | 448 | struct completion transport_lun_fe_stop_comp; |
449 | struct completion transport_lun_stop_comp; | 449 | struct completion transport_lun_stop_comp; |
450 | struct scatterlist *t_tasks_sg_chained; | 450 | struct scatterlist *t_tasks_sg_chained; |
451 | struct scatterlist t_tasks_sg_bounce; | 451 | struct scatterlist t_tasks_sg_bounce; |
452 | void *t_task_buf; | 452 | void *t_task_buf; |
453 | /* | 453 | /* |
454 | * Used for pre-registered fabric SGL passthrough WRITE and READ | 454 | * Used for pre-registered fabric SGL passthrough WRITE and READ |
455 | * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop | 455 | * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop |
456 | * and other HW target mode fabric modules. | 456 | * and other HW target mode fabric modules. |
457 | */ | 457 | */ |
458 | struct scatterlist *t_task_pt_sgl; | 458 | struct scatterlist *t_task_pt_sgl; |
459 | struct list_head *t_mem_list; | 459 | struct list_head *t_mem_list; |
460 | /* Used for BIDI READ */ | 460 | /* Used for BIDI READ */ |
461 | struct list_head *t_mem_bidi_list; | 461 | struct list_head *t_mem_bidi_list; |
462 | struct list_head t_task_list; | 462 | struct list_head t_task_list; |
463 | } ____cacheline_aligned; | 463 | } ____cacheline_aligned; |
464 | 464 | ||
465 | struct se_task { | 465 | struct se_task { |
466 | unsigned char task_sense; | 466 | unsigned char task_sense; |
467 | struct scatterlist *task_sg; | 467 | struct scatterlist *task_sg; |
468 | struct scatterlist *task_sg_bidi; | 468 | struct scatterlist *task_sg_bidi; |
469 | u8 task_scsi_status; | 469 | u8 task_scsi_status; |
470 | u8 task_flags; | 470 | u8 task_flags; |
471 | int task_error_status; | 471 | int task_error_status; |
472 | int task_state_flags; | 472 | int task_state_flags; |
473 | int task_padded_sg:1; | 473 | bool task_padded_sg; |
474 | unsigned long long task_lba; | 474 | unsigned long long task_lba; |
475 | u32 task_no; | 475 | u32 task_no; |
476 | u32 task_sectors; | 476 | u32 task_sectors; |
477 | u32 task_size; | 477 | u32 task_size; |
478 | u32 task_sg_num; | 478 | u32 task_sg_num; |
479 | u32 task_sg_offset; | 479 | u32 task_sg_offset; |
480 | enum dma_data_direction task_data_direction; | 480 | enum dma_data_direction task_data_direction; |
481 | struct se_cmd *task_se_cmd; | 481 | struct se_cmd *task_se_cmd; |
482 | struct se_device *se_dev; | 482 | struct se_device *se_dev; |
483 | struct completion task_stop_comp; | 483 | struct completion task_stop_comp; |
484 | atomic_t task_active; | 484 | atomic_t task_active; |
485 | atomic_t task_execute_queue; | 485 | atomic_t task_execute_queue; |
486 | atomic_t task_timeout; | 486 | atomic_t task_timeout; |
487 | atomic_t task_sent; | 487 | atomic_t task_sent; |
488 | atomic_t task_stop; | 488 | atomic_t task_stop; |
489 | atomic_t task_state_active; | 489 | atomic_t task_state_active; |
490 | struct timer_list task_timer; | 490 | struct timer_list task_timer; |
491 | struct se_device *se_obj_ptr; | 491 | struct se_device *se_obj_ptr; |
492 | struct list_head t_list; | 492 | struct list_head t_list; |
493 | struct list_head t_execute_list; | 493 | struct list_head t_execute_list; |
494 | struct list_head t_state_list; | 494 | struct list_head t_state_list; |
495 | } ____cacheline_aligned; | 495 | } ____cacheline_aligned; |
496 | 496 | ||
497 | #define TASK_CMD(task) ((struct se_cmd *)task->task_se_cmd) | 497 | #define TASK_CMD(task) ((struct se_cmd *)task->task_se_cmd) |
498 | #define TASK_DEV(task) ((struct se_device *)task->se_dev) | 498 | #define TASK_DEV(task) ((struct se_device *)task->se_dev) |
499 | 499 | ||
500 | struct se_cmd { | 500 | struct se_cmd { |
501 | /* SAM response code being sent to initiator */ | 501 | /* SAM response code being sent to initiator */ |
502 | u8 scsi_status; | 502 | u8 scsi_status; |
503 | u8 scsi_asc; | 503 | u8 scsi_asc; |
504 | u8 scsi_ascq; | 504 | u8 scsi_ascq; |
505 | u8 scsi_sense_reason; | 505 | u8 scsi_sense_reason; |
506 | u16 scsi_sense_length; | 506 | u16 scsi_sense_length; |
507 | /* Delay for ALUA Active/NonOptimized state access in milliseconds */ | 507 | /* Delay for ALUA Active/NonOptimized state access in milliseconds */ |
508 | int alua_nonop_delay; | 508 | int alua_nonop_delay; |
509 | /* See include/linux/dma-mapping.h */ | 509 | /* See include/linux/dma-mapping.h */ |
510 | enum dma_data_direction data_direction; | 510 | enum dma_data_direction data_direction; |
511 | /* For SAM Task Attribute */ | 511 | /* For SAM Task Attribute */ |
512 | int sam_task_attr; | 512 | int sam_task_attr; |
513 | /* Transport protocol dependent state, see transport_state_table */ | 513 | /* Transport protocol dependent state, see transport_state_table */ |
514 | enum transport_state_table t_state; | 514 | enum transport_state_table t_state; |
515 | /* Transport protocol dependent state for out of order CmdSNs */ | 515 | /* Transport protocol dependent state for out of order CmdSNs */ |
516 | int deferred_t_state; | 516 | int deferred_t_state; |
517 | /* Transport specific error status */ | 517 | /* Transport specific error status */ |
518 | int transport_error_status; | 518 | int transport_error_status; |
519 | /* See se_cmd_flags_table */ | 519 | /* See se_cmd_flags_table */ |
520 | u32 se_cmd_flags; | 520 | u32 se_cmd_flags; |
521 | u32 se_ordered_id; | 521 | u32 se_ordered_id; |
522 | /* Total size in bytes associated with command */ | 522 | /* Total size in bytes associated with command */ |
523 | u32 data_length; | 523 | u32 data_length; |
524 | /* SCSI Presented Data Transfer Length */ | 524 | /* SCSI Presented Data Transfer Length */ |
525 | u32 cmd_spdtl; | 525 | u32 cmd_spdtl; |
526 | u32 residual_count; | 526 | u32 residual_count; |
527 | u32 orig_fe_lun; | 527 | u32 orig_fe_lun; |
528 | /* Persistent Reservation key */ | 528 | /* Persistent Reservation key */ |
529 | u64 pr_res_key; | 529 | u64 pr_res_key; |
530 | atomic_t transport_sent; | 530 | atomic_t transport_sent; |
531 | /* Used for sense data */ | 531 | /* Used for sense data */ |
532 | void *sense_buffer; | 532 | void *sense_buffer; |
533 | struct list_head se_delayed_list; | 533 | struct list_head se_delayed_list; |
534 | struct list_head se_ordered_list; | 534 | struct list_head se_ordered_list; |
535 | struct list_head se_lun_list; | 535 | struct list_head se_lun_list; |
536 | struct se_device *se_dev; | 536 | struct se_device *se_dev; |
537 | struct se_dev_entry *se_deve; | 537 | struct se_dev_entry *se_deve; |
538 | struct se_device *se_obj_ptr; | 538 | struct se_device *se_obj_ptr; |
539 | struct se_device *se_orig_obj_ptr; | 539 | struct se_device *se_orig_obj_ptr; |
540 | struct se_lun *se_lun; | 540 | struct se_lun *se_lun; |
541 | /* Only used for internal passthrough and legacy TCM fabric modules */ | 541 | /* Only used for internal passthrough and legacy TCM fabric modules */ |
542 | struct se_session *se_sess; | 542 | struct se_session *se_sess; |
543 | struct se_tmr_req *se_tmr_req; | 543 | struct se_tmr_req *se_tmr_req; |
544 | /* t_task is setup to t_task_backstore in transport_init_se_cmd() */ | 544 | /* t_task is setup to t_task_backstore in transport_init_se_cmd() */ |
545 | struct se_transport_task *t_task; | 545 | struct se_transport_task *t_task; |
546 | struct se_transport_task t_task_backstore; | 546 | struct se_transport_task t_task_backstore; |
547 | struct target_core_fabric_ops *se_tfo; | 547 | struct target_core_fabric_ops *se_tfo; |
548 | int (*transport_emulate_cdb)(struct se_cmd *); | 548 | int (*transport_emulate_cdb)(struct se_cmd *); |
549 | void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *); | 549 | void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *); |
550 | void (*transport_wait_for_tasks)(struct se_cmd *, int, int); | 550 | void (*transport_wait_for_tasks)(struct se_cmd *, int, int); |
551 | void (*transport_complete_callback)(struct se_cmd *); | 551 | void (*transport_complete_callback)(struct se_cmd *); |
552 | } ____cacheline_aligned; | 552 | } ____cacheline_aligned; |
553 | 553 | ||
554 | #define T_TASK(cmd) ((struct se_transport_task *)(cmd->t_task)) | 554 | #define T_TASK(cmd) ((struct se_transport_task *)(cmd->t_task)) |
555 | #define CMD_TFO(cmd) ((struct target_core_fabric_ops *)cmd->se_tfo) | 555 | #define CMD_TFO(cmd) ((struct target_core_fabric_ops *)cmd->se_tfo) |
556 | 556 | ||
557 | struct se_tmr_req { | 557 | struct se_tmr_req { |
558 | /* Task Management function to be preformed */ | 558 | /* Task Management function to be preformed */ |
559 | u8 function; | 559 | u8 function; |
560 | /* Task Management response to send */ | 560 | /* Task Management response to send */ |
561 | u8 response; | 561 | u8 response; |
562 | int call_transport; | 562 | int call_transport; |
563 | /* Reference to ITT that Task Mgmt should be preformed */ | 563 | /* Reference to ITT that Task Mgmt should be preformed */ |
564 | u32 ref_task_tag; | 564 | u32 ref_task_tag; |
565 | /* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */ | 565 | /* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */ |
566 | u64 ref_task_lun; | 566 | u64 ref_task_lun; |
567 | void *fabric_tmr_ptr; | 567 | void *fabric_tmr_ptr; |
568 | struct se_cmd *task_cmd; | 568 | struct se_cmd *task_cmd; |
569 | struct se_cmd *ref_cmd; | 569 | struct se_cmd *ref_cmd; |
570 | struct se_device *tmr_dev; | 570 | struct se_device *tmr_dev; |
571 | struct se_lun *tmr_lun; | 571 | struct se_lun *tmr_lun; |
572 | struct list_head tmr_list; | 572 | struct list_head tmr_list; |
573 | } ____cacheline_aligned; | 573 | } ____cacheline_aligned; |
574 | 574 | ||
575 | struct se_ua { | 575 | struct se_ua { |
576 | u8 ua_asc; | 576 | u8 ua_asc; |
577 | u8 ua_ascq; | 577 | u8 ua_ascq; |
578 | struct se_node_acl *ua_nacl; | 578 | struct se_node_acl *ua_nacl; |
579 | struct list_head ua_dev_list; | 579 | struct list_head ua_dev_list; |
580 | struct list_head ua_nacl_list; | 580 | struct list_head ua_nacl_list; |
581 | } ____cacheline_aligned; | 581 | } ____cacheline_aligned; |
582 | 582 | ||
583 | struct se_node_acl { | 583 | struct se_node_acl { |
584 | char initiatorname[TRANSPORT_IQN_LEN]; | 584 | char initiatorname[TRANSPORT_IQN_LEN]; |
585 | /* Used to signal demo mode created ACL, disabled by default */ | 585 | /* Used to signal demo mode created ACL, disabled by default */ |
586 | int dynamic_node_acl:1; | 586 | bool dynamic_node_acl; |
587 | u32 queue_depth; | 587 | u32 queue_depth; |
588 | u32 acl_index; | 588 | u32 acl_index; |
589 | u64 num_cmds; | 589 | u64 num_cmds; |
590 | u64 read_bytes; | 590 | u64 read_bytes; |
591 | u64 write_bytes; | 591 | u64 write_bytes; |
592 | spinlock_t stats_lock; | 592 | spinlock_t stats_lock; |
593 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ | 593 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ |
594 | atomic_t acl_pr_ref_count; | 594 | atomic_t acl_pr_ref_count; |
595 | struct se_dev_entry *device_list; | 595 | struct se_dev_entry *device_list; |
596 | struct se_session *nacl_sess; | 596 | struct se_session *nacl_sess; |
597 | struct se_portal_group *se_tpg; | 597 | struct se_portal_group *se_tpg; |
598 | spinlock_t device_list_lock; | 598 | spinlock_t device_list_lock; |
599 | spinlock_t nacl_sess_lock; | 599 | spinlock_t nacl_sess_lock; |
600 | struct config_group acl_group; | 600 | struct config_group acl_group; |
601 | struct config_group acl_attrib_group; | 601 | struct config_group acl_attrib_group; |
602 | struct config_group acl_auth_group; | 602 | struct config_group acl_auth_group; |
603 | struct config_group acl_param_group; | 603 | struct config_group acl_param_group; |
604 | struct config_group *acl_default_groups[4]; | 604 | struct config_group *acl_default_groups[4]; |
605 | struct list_head acl_list; | 605 | struct list_head acl_list; |
606 | struct list_head acl_sess_list; | 606 | struct list_head acl_sess_list; |
607 | } ____cacheline_aligned; | 607 | } ____cacheline_aligned; |
608 | 608 | ||
609 | struct se_session { | 609 | struct se_session { |
610 | u64 sess_bin_isid; | 610 | u64 sess_bin_isid; |
611 | struct se_node_acl *se_node_acl; | 611 | struct se_node_acl *se_node_acl; |
612 | struct se_portal_group *se_tpg; | 612 | struct se_portal_group *se_tpg; |
613 | void *fabric_sess_ptr; | 613 | void *fabric_sess_ptr; |
614 | struct list_head sess_list; | 614 | struct list_head sess_list; |
615 | struct list_head sess_acl_list; | 615 | struct list_head sess_acl_list; |
616 | } ____cacheline_aligned; | 616 | } ____cacheline_aligned; |
617 | 617 | ||
618 | #define SE_SESS(cmd) ((struct se_session *)(cmd)->se_sess) | 618 | #define SE_SESS(cmd) ((struct se_session *)(cmd)->se_sess) |
619 | #define SE_NODE_ACL(sess) ((struct se_node_acl *)(sess)->se_node_acl) | 619 | #define SE_NODE_ACL(sess) ((struct se_node_acl *)(sess)->se_node_acl) |
620 | 620 | ||
621 | struct se_device; | 621 | struct se_device; |
622 | struct se_transform_info; | 622 | struct se_transform_info; |
623 | struct scatterlist; | 623 | struct scatterlist; |
624 | 624 | ||
625 | struct se_lun_acl { | 625 | struct se_lun_acl { |
626 | char initiatorname[TRANSPORT_IQN_LEN]; | 626 | char initiatorname[TRANSPORT_IQN_LEN]; |
627 | u32 mapped_lun; | 627 | u32 mapped_lun; |
628 | struct se_node_acl *se_lun_nacl; | 628 | struct se_node_acl *se_lun_nacl; |
629 | struct se_lun *se_lun; | 629 | struct se_lun *se_lun; |
630 | struct list_head lacl_list; | 630 | struct list_head lacl_list; |
631 | struct config_group se_lun_group; | 631 | struct config_group se_lun_group; |
632 | } ____cacheline_aligned; | 632 | } ____cacheline_aligned; |
633 | 633 | ||
634 | struct se_dev_entry { | 634 | struct se_dev_entry { |
635 | int def_pr_registered:1; | 635 | bool def_pr_registered; |
636 | /* See transport_lunflags_table */ | 636 | /* See transport_lunflags_table */ |
637 | u32 lun_flags; | 637 | u32 lun_flags; |
638 | u32 deve_cmds; | 638 | u32 deve_cmds; |
639 | u32 mapped_lun; | 639 | u32 mapped_lun; |
640 | u32 average_bytes; | 640 | u32 average_bytes; |
641 | u32 last_byte_count; | 641 | u32 last_byte_count; |
642 | u32 total_cmds; | 642 | u32 total_cmds; |
643 | u32 total_bytes; | 643 | u32 total_bytes; |
644 | u64 pr_res_key; | 644 | u64 pr_res_key; |
645 | u64 creation_time; | 645 | u64 creation_time; |
646 | u32 attach_count; | 646 | u32 attach_count; |
647 | u64 read_bytes; | 647 | u64 read_bytes; |
648 | u64 write_bytes; | 648 | u64 write_bytes; |
649 | atomic_t ua_count; | 649 | atomic_t ua_count; |
650 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ | 650 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ |
651 | atomic_t pr_ref_count; | 651 | atomic_t pr_ref_count; |
652 | struct se_lun_acl *se_lun_acl; | 652 | struct se_lun_acl *se_lun_acl; |
653 | spinlock_t ua_lock; | 653 | spinlock_t ua_lock; |
654 | struct se_lun *se_lun; | 654 | struct se_lun *se_lun; |
655 | struct list_head alua_port_list; | 655 | struct list_head alua_port_list; |
656 | struct list_head ua_list; | 656 | struct list_head ua_list; |
657 | } ____cacheline_aligned; | 657 | } ____cacheline_aligned; |
658 | 658 | ||
659 | struct se_dev_limits { | 659 | struct se_dev_limits { |
660 | /* Max supported HW queue depth */ | 660 | /* Max supported HW queue depth */ |
661 | u32 hw_queue_depth; | 661 | u32 hw_queue_depth; |
662 | /* Max supported virtual queue depth */ | 662 | /* Max supported virtual queue depth */ |
663 | u32 queue_depth; | 663 | u32 queue_depth; |
664 | /* From include/linux/blkdev.h for the other HW/SW limits. */ | 664 | /* From include/linux/blkdev.h for the other HW/SW limits. */ |
665 | struct queue_limits limits; | 665 | struct queue_limits limits; |
666 | } ____cacheline_aligned; | 666 | } ____cacheline_aligned; |
667 | 667 | ||
668 | struct se_dev_attrib { | 668 | struct se_dev_attrib { |
669 | int emulate_dpo; | 669 | int emulate_dpo; |
670 | int emulate_fua_write; | 670 | int emulate_fua_write; |
671 | int emulate_fua_read; | 671 | int emulate_fua_read; |
672 | int emulate_write_cache; | 672 | int emulate_write_cache; |
673 | int emulate_ua_intlck_ctrl; | 673 | int emulate_ua_intlck_ctrl; |
674 | int emulate_tas; | 674 | int emulate_tas; |
675 | int emulate_tpu; | 675 | int emulate_tpu; |
676 | int emulate_tpws; | 676 | int emulate_tpws; |
677 | int emulate_reservations; | 677 | int emulate_reservations; |
678 | int emulate_alua; | 678 | int emulate_alua; |
679 | int enforce_pr_isids; | 679 | int enforce_pr_isids; |
680 | u32 hw_block_size; | 680 | u32 hw_block_size; |
681 | u32 block_size; | 681 | u32 block_size; |
682 | u32 hw_max_sectors; | 682 | u32 hw_max_sectors; |
683 | u32 max_sectors; | 683 | u32 max_sectors; |
684 | u32 optimal_sectors; | 684 | u32 optimal_sectors; |
685 | u32 hw_queue_depth; | 685 | u32 hw_queue_depth; |
686 | u32 queue_depth; | 686 | u32 queue_depth; |
687 | u32 task_timeout; | 687 | u32 task_timeout; |
688 | u32 max_unmap_lba_count; | 688 | u32 max_unmap_lba_count; |
689 | u32 max_unmap_block_desc_count; | 689 | u32 max_unmap_block_desc_count; |
690 | u32 unmap_granularity; | 690 | u32 unmap_granularity; |
691 | u32 unmap_granularity_alignment; | 691 | u32 unmap_granularity_alignment; |
692 | struct se_subsystem_dev *da_sub_dev; | 692 | struct se_subsystem_dev *da_sub_dev; |
693 | struct config_group da_group; | 693 | struct config_group da_group; |
694 | } ____cacheline_aligned; | 694 | } ____cacheline_aligned; |
695 | 695 | ||
696 | struct se_subsystem_dev { | 696 | struct se_subsystem_dev { |
697 | /* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */ | 697 | /* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */ |
698 | #define SE_DEV_ALIAS_LEN 512 | 698 | #define SE_DEV_ALIAS_LEN 512 |
699 | unsigned char se_dev_alias[SE_DEV_ALIAS_LEN]; | 699 | unsigned char se_dev_alias[SE_DEV_ALIAS_LEN]; |
700 | /* Used for struct se_subsystem_dev->se_dev_udev_path[], must be less than PAGE_SIZE */ | 700 | /* Used for struct se_subsystem_dev->se_dev_udev_path[], must be less than PAGE_SIZE */ |
701 | #define SE_UDEV_PATH_LEN 512 | 701 | #define SE_UDEV_PATH_LEN 512 |
702 | unsigned char se_dev_udev_path[SE_UDEV_PATH_LEN]; | 702 | unsigned char se_dev_udev_path[SE_UDEV_PATH_LEN]; |
703 | u32 su_dev_flags; | 703 | u32 su_dev_flags; |
704 | struct se_hba *se_dev_hba; | 704 | struct se_hba *se_dev_hba; |
705 | struct se_device *se_dev_ptr; | 705 | struct se_device *se_dev_ptr; |
706 | struct se_dev_attrib se_dev_attrib; | 706 | struct se_dev_attrib se_dev_attrib; |
707 | /* T10 Asymmetric Logical Unit Assignment for Target Ports */ | 707 | /* T10 Asymmetric Logical Unit Assignment for Target Ports */ |
708 | struct t10_alua t10_alua; | 708 | struct t10_alua t10_alua; |
709 | /* T10 Inquiry and VPD WWN Information */ | 709 | /* T10 Inquiry and VPD WWN Information */ |
710 | struct t10_wwn t10_wwn; | 710 | struct t10_wwn t10_wwn; |
711 | /* T10 SPC-2 + SPC-3 Reservations */ | 711 | /* T10 SPC-2 + SPC-3 Reservations */ |
712 | struct t10_reservation_template t10_reservation; | 712 | struct t10_reservation_template t10_reservation; |
713 | spinlock_t se_dev_lock; | 713 | spinlock_t se_dev_lock; |
714 | void *se_dev_su_ptr; | 714 | void *se_dev_su_ptr; |
715 | struct list_head g_se_dev_list; | 715 | struct list_head g_se_dev_list; |
716 | struct config_group se_dev_group; | 716 | struct config_group se_dev_group; |
717 | /* For T10 Reservations */ | 717 | /* For T10 Reservations */ |
718 | struct config_group se_dev_pr_group; | 718 | struct config_group se_dev_pr_group; |
719 | } ____cacheline_aligned; | 719 | } ____cacheline_aligned; |
720 | 720 | ||
721 | #define T10_ALUA(su_dev) (&(su_dev)->t10_alua) | 721 | #define T10_ALUA(su_dev) (&(su_dev)->t10_alua) |
722 | #define T10_RES(su_dev) (&(su_dev)->t10_reservation) | 722 | #define T10_RES(su_dev) (&(su_dev)->t10_reservation) |
723 | #define T10_PR_OPS(su_dev) (&(su_dev)->t10_reservation.pr_ops) | 723 | #define T10_PR_OPS(su_dev) (&(su_dev)->t10_reservation.pr_ops) |
724 | 724 | ||
725 | struct se_device { | 725 | struct se_device { |
726 | /* Set to 1 if thread is NOT sleeping on thread_sem */ | 726 | /* Set to 1 if thread is NOT sleeping on thread_sem */ |
727 | u8 thread_active; | 727 | u8 thread_active; |
728 | u8 dev_status_timer_flags; | 728 | u8 dev_status_timer_flags; |
729 | /* RELATIVE TARGET PORT IDENTIFER Counter */ | 729 | /* RELATIVE TARGET PORT IDENTIFER Counter */ |
730 | u16 dev_rpti_counter; | 730 | u16 dev_rpti_counter; |
731 | /* Used for SAM Task Attribute ordering */ | 731 | /* Used for SAM Task Attribute ordering */ |
732 | u32 dev_cur_ordered_id; | 732 | u32 dev_cur_ordered_id; |
733 | u32 dev_flags; | 733 | u32 dev_flags; |
734 | u32 dev_port_count; | 734 | u32 dev_port_count; |
735 | /* See transport_device_status_table */ | 735 | /* See transport_device_status_table */ |
736 | u32 dev_status; | 736 | u32 dev_status; |
737 | u32 dev_tcq_window_closed; | 737 | u32 dev_tcq_window_closed; |
738 | /* Physical device queue depth */ | 738 | /* Physical device queue depth */ |
739 | u32 queue_depth; | 739 | u32 queue_depth; |
740 | /* Used for SPC-2 reservations enforce of ISIDs */ | 740 | /* Used for SPC-2 reservations enforce of ISIDs */ |
741 | u64 dev_res_bin_isid; | 741 | u64 dev_res_bin_isid; |
742 | t10_task_attr_index_t dev_task_attr_type; | 742 | t10_task_attr_index_t dev_task_attr_type; |
743 | /* Pointer to transport specific device structure */ | 743 | /* Pointer to transport specific device structure */ |
744 | void *dev_ptr; | 744 | void *dev_ptr; |
745 | u32 dev_index; | 745 | u32 dev_index; |
746 | u64 creation_time; | 746 | u64 creation_time; |
747 | u32 num_resets; | 747 | u32 num_resets; |
748 | u64 num_cmds; | 748 | u64 num_cmds; |
749 | u64 read_bytes; | 749 | u64 read_bytes; |
750 | u64 write_bytes; | 750 | u64 write_bytes; |
751 | spinlock_t stats_lock; | 751 | spinlock_t stats_lock; |
752 | /* Active commands on this virtual SE device */ | 752 | /* Active commands on this virtual SE device */ |
753 | atomic_t active_cmds; | 753 | atomic_t active_cmds; |
754 | atomic_t simple_cmds; | 754 | atomic_t simple_cmds; |
755 | atomic_t depth_left; | 755 | atomic_t depth_left; |
756 | atomic_t dev_ordered_id; | 756 | atomic_t dev_ordered_id; |
757 | atomic_t dev_tur_active; | 757 | atomic_t dev_tur_active; |
758 | atomic_t execute_tasks; | 758 | atomic_t execute_tasks; |
759 | atomic_t dev_status_thr_count; | 759 | atomic_t dev_status_thr_count; |
760 | atomic_t dev_hoq_count; | 760 | atomic_t dev_hoq_count; |
761 | atomic_t dev_ordered_sync; | 761 | atomic_t dev_ordered_sync; |
762 | struct se_obj dev_obj; | 762 | struct se_obj dev_obj; |
763 | struct se_obj dev_access_obj; | 763 | struct se_obj dev_access_obj; |
764 | struct se_obj dev_export_obj; | 764 | struct se_obj dev_export_obj; |
765 | struct se_queue_obj *dev_queue_obj; | 765 | struct se_queue_obj *dev_queue_obj; |
766 | struct se_queue_obj *dev_status_queue_obj; | 766 | struct se_queue_obj *dev_status_queue_obj; |
767 | spinlock_t delayed_cmd_lock; | 767 | spinlock_t delayed_cmd_lock; |
768 | spinlock_t ordered_cmd_lock; | 768 | spinlock_t ordered_cmd_lock; |
769 | spinlock_t execute_task_lock; | 769 | spinlock_t execute_task_lock; |
770 | spinlock_t state_task_lock; | 770 | spinlock_t state_task_lock; |
771 | spinlock_t dev_alua_lock; | 771 | spinlock_t dev_alua_lock; |
772 | spinlock_t dev_reservation_lock; | 772 | spinlock_t dev_reservation_lock; |
773 | spinlock_t dev_state_lock; | 773 | spinlock_t dev_state_lock; |
774 | spinlock_t dev_status_lock; | 774 | spinlock_t dev_status_lock; |
775 | spinlock_t dev_status_thr_lock; | 775 | spinlock_t dev_status_thr_lock; |
776 | spinlock_t se_port_lock; | 776 | spinlock_t se_port_lock; |
777 | spinlock_t se_tmr_lock; | 777 | spinlock_t se_tmr_lock; |
778 | /* Used for legacy SPC-2 reservationsa */ | 778 | /* Used for legacy SPC-2 reservationsa */ |
779 | struct se_node_acl *dev_reserved_node_acl; | 779 | struct se_node_acl *dev_reserved_node_acl; |
780 | /* Used for ALUA Logical Unit Group membership */ | 780 | /* Used for ALUA Logical Unit Group membership */ |
781 | struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem; | 781 | struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem; |
782 | /* Used for SPC-3 Persistent Reservations */ | 782 | /* Used for SPC-3 Persistent Reservations */ |
783 | struct t10_pr_registration *dev_pr_res_holder; | 783 | struct t10_pr_registration *dev_pr_res_holder; |
784 | struct list_head dev_sep_list; | 784 | struct list_head dev_sep_list; |
785 | struct list_head dev_tmr_list; | 785 | struct list_head dev_tmr_list; |
786 | struct timer_list dev_status_timer; | 786 | struct timer_list dev_status_timer; |
787 | /* Pointer to descriptor for processing thread */ | 787 | /* Pointer to descriptor for processing thread */ |
788 | struct task_struct *process_thread; | 788 | struct task_struct *process_thread; |
789 | pid_t process_thread_pid; | 789 | pid_t process_thread_pid; |
790 | struct task_struct *dev_mgmt_thread; | 790 | struct task_struct *dev_mgmt_thread; |
791 | struct list_head delayed_cmd_list; | 791 | struct list_head delayed_cmd_list; |
792 | struct list_head ordered_cmd_list; | 792 | struct list_head ordered_cmd_list; |
793 | struct list_head execute_task_list; | 793 | struct list_head execute_task_list; |
794 | struct list_head state_task_list; | 794 | struct list_head state_task_list; |
795 | /* Pointer to associated SE HBA */ | 795 | /* Pointer to associated SE HBA */ |
796 | struct se_hba *se_hba; | 796 | struct se_hba *se_hba; |
797 | struct se_subsystem_dev *se_sub_dev; | 797 | struct se_subsystem_dev *se_sub_dev; |
798 | /* Pointer to template of function pointers for transport */ | 798 | /* Pointer to template of function pointers for transport */ |
799 | struct se_subsystem_api *transport; | 799 | struct se_subsystem_api *transport; |
800 | /* Linked list for struct se_hba struct se_device list */ | 800 | /* Linked list for struct se_hba struct se_device list */ |
801 | struct list_head dev_list; | 801 | struct list_head dev_list; |
802 | /* Linked list for struct se_global->g_se_dev_list */ | 802 | /* Linked list for struct se_global->g_se_dev_list */ |
803 | struct list_head g_se_dev_list; | 803 | struct list_head g_se_dev_list; |
804 | } ____cacheline_aligned; | 804 | } ____cacheline_aligned; |
805 | 805 | ||
806 | #define SE_DEV(cmd) ((struct se_device *)(cmd)->se_lun->lun_se_dev) | 806 | #define SE_DEV(cmd) ((struct se_device *)(cmd)->se_lun->lun_se_dev) |
807 | #define SU_DEV(dev) ((struct se_subsystem_dev *)(dev)->se_sub_dev) | 807 | #define SU_DEV(dev) ((struct se_subsystem_dev *)(dev)->se_sub_dev) |
808 | #define DEV_ATTRIB(dev) (&(dev)->se_sub_dev->se_dev_attrib) | 808 | #define DEV_ATTRIB(dev) (&(dev)->se_sub_dev->se_dev_attrib) |
809 | #define DEV_T10_WWN(dev) (&(dev)->se_sub_dev->t10_wwn) | 809 | #define DEV_T10_WWN(dev) (&(dev)->se_sub_dev->t10_wwn) |
810 | 810 | ||
811 | struct se_hba { | 811 | struct se_hba { |
812 | u16 hba_tpgt; | 812 | u16 hba_tpgt; |
813 | u32 hba_id; | 813 | u32 hba_id; |
814 | /* See hba_flags_table */ | 814 | /* See hba_flags_table */ |
815 | u32 hba_flags; | 815 | u32 hba_flags; |
816 | /* Virtual iSCSI devices attached. */ | 816 | /* Virtual iSCSI devices attached. */ |
817 | u32 dev_count; | 817 | u32 dev_count; |
818 | u32 hba_index; | 818 | u32 hba_index; |
819 | atomic_t load_balance_queue; | 819 | atomic_t load_balance_queue; |
820 | atomic_t left_queue_depth; | 820 | atomic_t left_queue_depth; |
821 | /* Maximum queue depth the HBA can handle. */ | 821 | /* Maximum queue depth the HBA can handle. */ |
822 | atomic_t max_queue_depth; | 822 | atomic_t max_queue_depth; |
823 | /* Pointer to transport specific host structure. */ | 823 | /* Pointer to transport specific host structure. */ |
824 | void *hba_ptr; | 824 | void *hba_ptr; |
825 | /* Linked list for struct se_device */ | 825 | /* Linked list for struct se_device */ |
826 | struct list_head hba_dev_list; | 826 | struct list_head hba_dev_list; |
827 | struct list_head hba_list; | 827 | struct list_head hba_list; |
828 | spinlock_t device_lock; | 828 | spinlock_t device_lock; |
829 | spinlock_t hba_queue_lock; | 829 | spinlock_t hba_queue_lock; |
830 | struct config_group hba_group; | 830 | struct config_group hba_group; |
831 | struct mutex hba_access_mutex; | 831 | struct mutex hba_access_mutex; |
832 | struct se_subsystem_api *transport; | 832 | struct se_subsystem_api *transport; |
833 | } ____cacheline_aligned; | 833 | } ____cacheline_aligned; |
834 | 834 | ||
835 | #define SE_HBA(d) ((struct se_hba *)(d)->se_hba) | 835 | #define SE_HBA(d) ((struct se_hba *)(d)->se_hba) |
836 | 836 | ||
837 | struct se_lun { | 837 | struct se_lun { |
838 | /* See transport_lun_status_table */ | 838 | /* See transport_lun_status_table */ |
839 | enum transport_lun_status_table lun_status; | 839 | enum transport_lun_status_table lun_status; |
840 | u32 lun_access; | 840 | u32 lun_access; |
841 | u32 lun_flags; | 841 | u32 lun_flags; |
842 | u32 unpacked_lun; | 842 | u32 unpacked_lun; |
843 | atomic_t lun_acl_count; | 843 | atomic_t lun_acl_count; |
844 | spinlock_t lun_acl_lock; | 844 | spinlock_t lun_acl_lock; |
845 | spinlock_t lun_cmd_lock; | 845 | spinlock_t lun_cmd_lock; |
846 | spinlock_t lun_sep_lock; | 846 | spinlock_t lun_sep_lock; |
847 | struct completion lun_shutdown_comp; | 847 | struct completion lun_shutdown_comp; |
848 | struct list_head lun_cmd_list; | 848 | struct list_head lun_cmd_list; |
849 | struct list_head lun_acl_list; | 849 | struct list_head lun_acl_list; |
850 | struct se_device *lun_se_dev; | 850 | struct se_device *lun_se_dev; |
851 | struct config_group lun_group; | 851 | struct config_group lun_group; |
852 | struct se_port *lun_sep; | 852 | struct se_port *lun_sep; |
853 | } ____cacheline_aligned; | 853 | } ____cacheline_aligned; |
854 | 854 | ||
855 | #define SE_LUN(c) ((struct se_lun *)(c)->se_lun) | 855 | #define SE_LUN(c) ((struct se_lun *)(c)->se_lun) |
856 | 856 | ||
857 | struct scsi_port_stats { | 857 | struct scsi_port_stats { |
858 | u64 cmd_pdus; | 858 | u64 cmd_pdus; |
859 | u64 tx_data_octets; | 859 | u64 tx_data_octets; |
860 | u64 rx_data_octets; | 860 | u64 rx_data_octets; |
861 | } ____cacheline_aligned; | 861 | } ____cacheline_aligned; |
862 | 862 | ||
863 | struct se_port { | 863 | struct se_port { |
864 | /* RELATIVE TARGET PORT IDENTIFER */ | 864 | /* RELATIVE TARGET PORT IDENTIFER */ |
865 | u16 sep_rtpi; | 865 | u16 sep_rtpi; |
866 | int sep_tg_pt_secondary_stat; | 866 | int sep_tg_pt_secondary_stat; |
867 | int sep_tg_pt_secondary_write_md; | 867 | int sep_tg_pt_secondary_write_md; |
868 | u32 sep_index; | 868 | u32 sep_index; |
869 | struct scsi_port_stats sep_stats; | 869 | struct scsi_port_stats sep_stats; |
870 | /* Used for ALUA Target Port Groups membership */ | 870 | /* Used for ALUA Target Port Groups membership */ |
871 | atomic_t sep_tg_pt_gp_active; | 871 | atomic_t sep_tg_pt_gp_active; |
872 | atomic_t sep_tg_pt_secondary_offline; | 872 | atomic_t sep_tg_pt_secondary_offline; |
873 | /* Used for PR ALL_TG_PT=1 */ | 873 | /* Used for PR ALL_TG_PT=1 */ |
874 | atomic_t sep_tg_pt_ref_cnt; | 874 | atomic_t sep_tg_pt_ref_cnt; |
875 | spinlock_t sep_alua_lock; | 875 | spinlock_t sep_alua_lock; |
876 | struct mutex sep_tg_pt_md_mutex; | 876 | struct mutex sep_tg_pt_md_mutex; |
877 | struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem; | 877 | struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem; |
878 | struct se_lun *sep_lun; | 878 | struct se_lun *sep_lun; |
879 | struct se_portal_group *sep_tpg; | 879 | struct se_portal_group *sep_tpg; |
880 | struct list_head sep_alua_list; | 880 | struct list_head sep_alua_list; |
881 | struct list_head sep_list; | 881 | struct list_head sep_list; |
882 | } ____cacheline_aligned; | 882 | } ____cacheline_aligned; |
883 | 883 | ||
884 | struct se_tpg_np { | 884 | struct se_tpg_np { |
885 | struct se_portal_group *tpg_np_parent; | 885 | struct se_portal_group *tpg_np_parent; |
886 | struct config_group tpg_np_group; | 886 | struct config_group tpg_np_group; |
887 | } ____cacheline_aligned; | 887 | } ____cacheline_aligned; |
888 | 888 | ||
889 | struct se_portal_group { | 889 | struct se_portal_group { |
890 | /* Type of target portal group, see transport_tpg_type_table */ | 890 | /* Type of target portal group, see transport_tpg_type_table */ |
891 | enum transport_tpg_type_table se_tpg_type; | 891 | enum transport_tpg_type_table se_tpg_type; |
892 | /* Number of ACLed Initiator Nodes for this TPG */ | 892 | /* Number of ACLed Initiator Nodes for this TPG */ |
893 | u32 num_node_acls; | 893 | u32 num_node_acls; |
894 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ | 894 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ |
895 | atomic_t tpg_pr_ref_count; | 895 | atomic_t tpg_pr_ref_count; |
896 | /* Spinlock for adding/removing ACLed Nodes */ | 896 | /* Spinlock for adding/removing ACLed Nodes */ |
897 | spinlock_t acl_node_lock; | 897 | spinlock_t acl_node_lock; |
898 | /* Spinlock for adding/removing sessions */ | 898 | /* Spinlock for adding/removing sessions */ |
899 | spinlock_t session_lock; | 899 | spinlock_t session_lock; |
900 | spinlock_t tpg_lun_lock; | 900 | spinlock_t tpg_lun_lock; |
901 | /* Pointer to $FABRIC_MOD portal group */ | 901 | /* Pointer to $FABRIC_MOD portal group */ |
902 | void *se_tpg_fabric_ptr; | 902 | void *se_tpg_fabric_ptr; |
903 | struct list_head se_tpg_list; | 903 | struct list_head se_tpg_list; |
904 | /* linked list for initiator ACL list */ | 904 | /* linked list for initiator ACL list */ |
905 | struct list_head acl_node_list; | 905 | struct list_head acl_node_list; |
906 | struct se_lun *tpg_lun_list; | 906 | struct se_lun *tpg_lun_list; |
907 | struct se_lun tpg_virt_lun0; | 907 | struct se_lun tpg_virt_lun0; |
908 | /* List of TCM sessions assoicated wth this TPG */ | 908 | /* List of TCM sessions assoicated wth this TPG */ |
909 | struct list_head tpg_sess_list; | 909 | struct list_head tpg_sess_list; |
910 | /* Pointer to $FABRIC_MOD dependent code */ | 910 | /* Pointer to $FABRIC_MOD dependent code */ |
911 | struct target_core_fabric_ops *se_tpg_tfo; | 911 | struct target_core_fabric_ops *se_tpg_tfo; |
912 | struct se_wwn *se_tpg_wwn; | 912 | struct se_wwn *se_tpg_wwn; |
913 | struct config_group tpg_group; | 913 | struct config_group tpg_group; |
914 | struct config_group *tpg_default_groups[6]; | 914 | struct config_group *tpg_default_groups[6]; |
915 | struct config_group tpg_lun_group; | 915 | struct config_group tpg_lun_group; |
916 | struct config_group tpg_np_group; | 916 | struct config_group tpg_np_group; |
917 | struct config_group tpg_acl_group; | 917 | struct config_group tpg_acl_group; |
918 | struct config_group tpg_attrib_group; | 918 | struct config_group tpg_attrib_group; |
919 | struct config_group tpg_param_group; | 919 | struct config_group tpg_param_group; |
920 | } ____cacheline_aligned; | 920 | } ____cacheline_aligned; |
921 | 921 | ||
922 | #define TPG_TFO(se_tpg) ((struct target_core_fabric_ops *)(se_tpg)->se_tpg_tfo) | 922 | #define TPG_TFO(se_tpg) ((struct target_core_fabric_ops *)(se_tpg)->se_tpg_tfo) |
923 | 923 | ||
924 | struct se_wwn { | 924 | struct se_wwn { |
925 | struct target_fabric_configfs *wwn_tf; | 925 | struct target_fabric_configfs *wwn_tf; |
926 | struct config_group wwn_group; | 926 | struct config_group wwn_group; |
927 | } ____cacheline_aligned; | 927 | } ____cacheline_aligned; |
928 | 928 | ||
929 | struct se_global { | 929 | struct se_global { |
930 | u16 alua_lu_gps_counter; | 930 | u16 alua_lu_gps_counter; |
931 | int g_sub_api_initialized; | 931 | int g_sub_api_initialized; |
932 | u32 in_shutdown; | 932 | u32 in_shutdown; |
933 | u32 alua_lu_gps_count; | 933 | u32 alua_lu_gps_count; |
934 | u32 g_hba_id_counter; | 934 | u32 g_hba_id_counter; |
935 | struct config_group target_core_hbagroup; | 935 | struct config_group target_core_hbagroup; |
936 | struct config_group alua_group; | 936 | struct config_group alua_group; |
937 | struct config_group alua_lu_gps_group; | 937 | struct config_group alua_lu_gps_group; |
938 | struct list_head g_lu_gps_list; | 938 | struct list_head g_lu_gps_list; |
939 | struct list_head g_se_tpg_list; | 939 | struct list_head g_se_tpg_list; |
940 | struct list_head g_hba_list; | 940 | struct list_head g_hba_list; |
941 | struct list_head g_se_dev_list; | 941 | struct list_head g_se_dev_list; |
942 | struct se_hba *g_lun0_hba; | 942 | struct se_hba *g_lun0_hba; |
943 | struct se_subsystem_dev *g_lun0_su_dev; | 943 | struct se_subsystem_dev *g_lun0_su_dev; |
944 | struct se_device *g_lun0_dev; | 944 | struct se_device *g_lun0_dev; |
945 | struct t10_alua_lu_gp *default_lu_gp; | 945 | struct t10_alua_lu_gp *default_lu_gp; |
946 | spinlock_t g_device_lock; | 946 | spinlock_t g_device_lock; |
947 | spinlock_t hba_lock; | 947 | spinlock_t hba_lock; |
948 | spinlock_t se_tpg_lock; | 948 | spinlock_t se_tpg_lock; |
949 | spinlock_t lu_gps_lock; | 949 | spinlock_t lu_gps_lock; |
950 | spinlock_t plugin_class_lock; | 950 | spinlock_t plugin_class_lock; |
951 | } ____cacheline_aligned; | 951 | } ____cacheline_aligned; |
952 | 952 | ||
953 | #endif /* TARGET_CORE_BASE_H */ | 953 | #endif /* TARGET_CORE_BASE_H */ |
954 | 954 |
include/target/target_core_fabric_ops.h
1 | /* Defined in target_core_configfs.h */ | 1 | /* Defined in target_core_configfs.h */ |
2 | struct target_fabric_configfs; | 2 | struct target_fabric_configfs; |
3 | 3 | ||
4 | struct target_core_fabric_ops { | 4 | struct target_core_fabric_ops { |
5 | struct configfs_subsystem *tf_subsys; | 5 | struct configfs_subsystem *tf_subsys; |
6 | /* | 6 | /* |
7 | * Optional to signal struct se_task->task_sg[] padding entries | 7 | * Optional to signal struct se_task->task_sg[] padding entries |
8 | * for scatterlist chaining using transport_do_task_sg_link(), | 8 | * for scatterlist chaining using transport_do_task_sg_link(), |
9 | * disabled by default | 9 | * disabled by default |
10 | */ | 10 | */ |
11 | int task_sg_chaining:1; | 11 | bool task_sg_chaining; |
12 | char *(*get_fabric_name)(void); | 12 | char *(*get_fabric_name)(void); |
13 | u8 (*get_fabric_proto_ident)(struct se_portal_group *); | 13 | u8 (*get_fabric_proto_ident)(struct se_portal_group *); |
14 | char *(*tpg_get_wwn)(struct se_portal_group *); | 14 | char *(*tpg_get_wwn)(struct se_portal_group *); |
15 | u16 (*tpg_get_tag)(struct se_portal_group *); | 15 | u16 (*tpg_get_tag)(struct se_portal_group *); |
16 | u32 (*tpg_get_default_depth)(struct se_portal_group *); | 16 | u32 (*tpg_get_default_depth)(struct se_portal_group *); |
17 | u32 (*tpg_get_pr_transport_id)(struct se_portal_group *, | 17 | u32 (*tpg_get_pr_transport_id)(struct se_portal_group *, |
18 | struct se_node_acl *, | 18 | struct se_node_acl *, |
19 | struct t10_pr_registration *, int *, | 19 | struct t10_pr_registration *, int *, |
20 | unsigned char *); | 20 | unsigned char *); |
21 | u32 (*tpg_get_pr_transport_id_len)(struct se_portal_group *, | 21 | u32 (*tpg_get_pr_transport_id_len)(struct se_portal_group *, |
22 | struct se_node_acl *, | 22 | struct se_node_acl *, |
23 | struct t10_pr_registration *, int *); | 23 | struct t10_pr_registration *, int *); |
24 | char *(*tpg_parse_pr_out_transport_id)(struct se_portal_group *, | 24 | char *(*tpg_parse_pr_out_transport_id)(struct se_portal_group *, |
25 | const char *, u32 *, char **); | 25 | const char *, u32 *, char **); |
26 | int (*tpg_check_demo_mode)(struct se_portal_group *); | 26 | int (*tpg_check_demo_mode)(struct se_portal_group *); |
27 | int (*tpg_check_demo_mode_cache)(struct se_portal_group *); | 27 | int (*tpg_check_demo_mode_cache)(struct se_portal_group *); |
28 | int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *); | 28 | int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *); |
29 | int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *); | 29 | int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *); |
30 | struct se_node_acl *(*tpg_alloc_fabric_acl)( | 30 | struct se_node_acl *(*tpg_alloc_fabric_acl)( |
31 | struct se_portal_group *); | 31 | struct se_portal_group *); |
32 | void (*tpg_release_fabric_acl)(struct se_portal_group *, | 32 | void (*tpg_release_fabric_acl)(struct se_portal_group *, |
33 | struct se_node_acl *); | 33 | struct se_node_acl *); |
34 | u32 (*tpg_get_inst_index)(struct se_portal_group *); | 34 | u32 (*tpg_get_inst_index)(struct se_portal_group *); |
35 | /* | 35 | /* |
36 | * Optional function pointer for TCM to perform command map | 36 | * Optional function pointer for TCM to perform command map |
37 | * from TCM processing thread context, for those struct se_cmd | 37 | * from TCM processing thread context, for those struct se_cmd |
38 | * initally allocated in interrupt context. | 38 | * initally allocated in interrupt context. |
39 | */ | 39 | */ |
40 | int (*new_cmd_map)(struct se_cmd *); | 40 | int (*new_cmd_map)(struct se_cmd *); |
41 | /* | 41 | /* |
42 | * Optional function pointer for TCM fabric modules that use | 42 | * Optional function pointer for TCM fabric modules that use |
43 | * Linux/NET sockets to allocate struct iovec array to struct se_cmd | 43 | * Linux/NET sockets to allocate struct iovec array to struct se_cmd |
44 | */ | 44 | */ |
45 | int (*alloc_cmd_iovecs)(struct se_cmd *); | 45 | int (*alloc_cmd_iovecs)(struct se_cmd *); |
46 | /* | 46 | /* |
47 | * Optional to release struct se_cmd and fabric dependent allocated | 47 | * Optional to release struct se_cmd and fabric dependent allocated |
48 | * I/O descriptor in transport_cmd_check_stop() | 48 | * I/O descriptor in transport_cmd_check_stop() |
49 | */ | 49 | */ |
50 | void (*check_stop_free)(struct se_cmd *); | 50 | void (*check_stop_free)(struct se_cmd *); |
51 | void (*release_cmd_to_pool)(struct se_cmd *); | 51 | void (*release_cmd_to_pool)(struct se_cmd *); |
52 | void (*release_cmd_direct)(struct se_cmd *); | 52 | void (*release_cmd_direct)(struct se_cmd *); |
53 | /* | 53 | /* |
54 | * Called with spin_lock_bh(struct se_portal_group->session_lock held. | 54 | * Called with spin_lock_bh(struct se_portal_group->session_lock held. |
55 | */ | 55 | */ |
56 | int (*shutdown_session)(struct se_session *); | 56 | int (*shutdown_session)(struct se_session *); |
57 | void (*close_session)(struct se_session *); | 57 | void (*close_session)(struct se_session *); |
58 | void (*stop_session)(struct se_session *, int, int); | 58 | void (*stop_session)(struct se_session *, int, int); |
59 | void (*fall_back_to_erl0)(struct se_session *); | 59 | void (*fall_back_to_erl0)(struct se_session *); |
60 | int (*sess_logged_in)(struct se_session *); | 60 | int (*sess_logged_in)(struct se_session *); |
61 | u32 (*sess_get_index)(struct se_session *); | 61 | u32 (*sess_get_index)(struct se_session *); |
62 | /* | 62 | /* |
63 | * Used only for SCSI fabrics that contain multi-value TransportIDs | 63 | * Used only for SCSI fabrics that contain multi-value TransportIDs |
64 | * (like iSCSI). All other SCSI fabrics should set this to NULL. | 64 | * (like iSCSI). All other SCSI fabrics should set this to NULL. |
65 | */ | 65 | */ |
66 | u32 (*sess_get_initiator_sid)(struct se_session *, | 66 | u32 (*sess_get_initiator_sid)(struct se_session *, |
67 | unsigned char *, u32); | 67 | unsigned char *, u32); |
68 | int (*write_pending)(struct se_cmd *); | 68 | int (*write_pending)(struct se_cmd *); |
69 | int (*write_pending_status)(struct se_cmd *); | 69 | int (*write_pending_status)(struct se_cmd *); |
70 | void (*set_default_node_attributes)(struct se_node_acl *); | 70 | void (*set_default_node_attributes)(struct se_node_acl *); |
71 | u32 (*get_task_tag)(struct se_cmd *); | 71 | u32 (*get_task_tag)(struct se_cmd *); |
72 | int (*get_cmd_state)(struct se_cmd *); | 72 | int (*get_cmd_state)(struct se_cmd *); |
73 | void (*new_cmd_failure)(struct se_cmd *); | 73 | void (*new_cmd_failure)(struct se_cmd *); |
74 | int (*queue_data_in)(struct se_cmd *); | 74 | int (*queue_data_in)(struct se_cmd *); |
75 | int (*queue_status)(struct se_cmd *); | 75 | int (*queue_status)(struct se_cmd *); |
76 | int (*queue_tm_rsp)(struct se_cmd *); | 76 | int (*queue_tm_rsp)(struct se_cmd *); |
77 | u16 (*set_fabric_sense_len)(struct se_cmd *, u32); | 77 | u16 (*set_fabric_sense_len)(struct se_cmd *, u32); |
78 | u16 (*get_fabric_sense_len)(void); | 78 | u16 (*get_fabric_sense_len)(void); |
79 | int (*is_state_remove)(struct se_cmd *); | 79 | int (*is_state_remove)(struct se_cmd *); |
80 | u64 (*pack_lun)(unsigned int); | 80 | u64 (*pack_lun)(unsigned int); |
81 | /* | 81 | /* |
82 | * fabric module calls for target_core_fabric_configfs.c | 82 | * fabric module calls for target_core_fabric_configfs.c |
83 | */ | 83 | */ |
84 | struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *, | 84 | struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *, |
85 | struct config_group *, const char *); | 85 | struct config_group *, const char *); |
86 | void (*fabric_drop_wwn)(struct se_wwn *); | 86 | void (*fabric_drop_wwn)(struct se_wwn *); |
87 | struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *, | 87 | struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *, |
88 | struct config_group *, const char *); | 88 | struct config_group *, const char *); |
89 | void (*fabric_drop_tpg)(struct se_portal_group *); | 89 | void (*fabric_drop_tpg)(struct se_portal_group *); |
90 | int (*fabric_post_link)(struct se_portal_group *, | 90 | int (*fabric_post_link)(struct se_portal_group *, |
91 | struct se_lun *); | 91 | struct se_lun *); |
92 | void (*fabric_pre_unlink)(struct se_portal_group *, | 92 | void (*fabric_pre_unlink)(struct se_portal_group *, |
93 | struct se_lun *); | 93 | struct se_lun *); |
94 | struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *, | 94 | struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *, |
95 | struct config_group *, const char *); | 95 | struct config_group *, const char *); |
96 | void (*fabric_drop_np)(struct se_tpg_np *); | 96 | void (*fabric_drop_np)(struct se_tpg_np *); |
97 | struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group *, | 97 | struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group *, |
98 | struct config_group *, const char *); | 98 | struct config_group *, const char *); |
99 | void (*fabric_drop_nodeacl)(struct se_node_acl *); | 99 | void (*fabric_drop_nodeacl)(struct se_node_acl *); |
100 | }; | 100 | }; |
101 | 101 |
include/target/target_core_transport.h
1 | #ifndef TARGET_CORE_TRANSPORT_H | 1 | #ifndef TARGET_CORE_TRANSPORT_H |
2 | #define TARGET_CORE_TRANSPORT_H | 2 | #define TARGET_CORE_TRANSPORT_H |
3 | 3 | ||
4 | #define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION | 4 | #define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION |
5 | 5 | ||
6 | /* Attempts before moving from SHORT to LONG */ | 6 | /* Attempts before moving from SHORT to LONG */ |
7 | #define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD 3 | 7 | #define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD 3 |
8 | #define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3 /* In milliseconds */ | 8 | #define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3 /* In milliseconds */ |
9 | #define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG 10 /* In milliseconds */ | 9 | #define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG 10 /* In milliseconds */ |
10 | 10 | ||
11 | #define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */ | 11 | #define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */ |
12 | 12 | ||
13 | #define PYX_TRANSPORT_SENT_TO_TRANSPORT 0 | 13 | #define PYX_TRANSPORT_SENT_TO_TRANSPORT 0 |
14 | #define PYX_TRANSPORT_WRITE_PENDING 1 | 14 | #define PYX_TRANSPORT_WRITE_PENDING 1 |
15 | 15 | ||
16 | #define PYX_TRANSPORT_UNKNOWN_SAM_OPCODE -1 | 16 | #define PYX_TRANSPORT_UNKNOWN_SAM_OPCODE -1 |
17 | #define PYX_TRANSPORT_HBA_QUEUE_FULL -2 | 17 | #define PYX_TRANSPORT_HBA_QUEUE_FULL -2 |
18 | #define PYX_TRANSPORT_REQ_TOO_MANY_SECTORS -3 | 18 | #define PYX_TRANSPORT_REQ_TOO_MANY_SECTORS -3 |
19 | #define PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES -4 | 19 | #define PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES -4 |
20 | #define PYX_TRANSPORT_INVALID_CDB_FIELD -5 | 20 | #define PYX_TRANSPORT_INVALID_CDB_FIELD -5 |
21 | #define PYX_TRANSPORT_INVALID_PARAMETER_LIST -6 | 21 | #define PYX_TRANSPORT_INVALID_PARAMETER_LIST -6 |
22 | #define PYX_TRANSPORT_LU_COMM_FAILURE -7 | 22 | #define PYX_TRANSPORT_LU_COMM_FAILURE -7 |
23 | #define PYX_TRANSPORT_UNKNOWN_MODE_PAGE -8 | 23 | #define PYX_TRANSPORT_UNKNOWN_MODE_PAGE -8 |
24 | #define PYX_TRANSPORT_WRITE_PROTECTED -9 | 24 | #define PYX_TRANSPORT_WRITE_PROTECTED -9 |
25 | #define PYX_TRANSPORT_TASK_TIMEOUT -10 | 25 | #define PYX_TRANSPORT_TASK_TIMEOUT -10 |
26 | #define PYX_TRANSPORT_RESERVATION_CONFLICT -11 | 26 | #define PYX_TRANSPORT_RESERVATION_CONFLICT -11 |
27 | #define PYX_TRANSPORT_ILLEGAL_REQUEST -12 | 27 | #define PYX_TRANSPORT_ILLEGAL_REQUEST -12 |
28 | #define PYX_TRANSPORT_USE_SENSE_REASON -13 | 28 | #define PYX_TRANSPORT_USE_SENSE_REASON -13 |
29 | 29 | ||
30 | #ifndef SAM_STAT_RESERVATION_CONFLICT | 30 | #ifndef SAM_STAT_RESERVATION_CONFLICT |
31 | #define SAM_STAT_RESERVATION_CONFLICT 0x18 | 31 | #define SAM_STAT_RESERVATION_CONFLICT 0x18 |
32 | #endif | 32 | #endif |
33 | 33 | ||
34 | #define TRANSPORT_PLUGIN_FREE 0 | 34 | #define TRANSPORT_PLUGIN_FREE 0 |
35 | #define TRANSPORT_PLUGIN_REGISTERED 1 | 35 | #define TRANSPORT_PLUGIN_REGISTERED 1 |
36 | 36 | ||
37 | #define TRANSPORT_PLUGIN_PHBA_PDEV 1 | 37 | #define TRANSPORT_PLUGIN_PHBA_PDEV 1 |
38 | #define TRANSPORT_PLUGIN_VHBA_PDEV 2 | 38 | #define TRANSPORT_PLUGIN_VHBA_PDEV 2 |
39 | #define TRANSPORT_PLUGIN_VHBA_VDEV 3 | 39 | #define TRANSPORT_PLUGIN_VHBA_VDEV 3 |
40 | 40 | ||
41 | /* For SE OBJ Plugins, in seconds */ | 41 | /* For SE OBJ Plugins, in seconds */ |
42 | #define TRANSPORT_TIMEOUT_TUR 10 | 42 | #define TRANSPORT_TIMEOUT_TUR 10 |
43 | #define TRANSPORT_TIMEOUT_TYPE_DISK 60 | 43 | #define TRANSPORT_TIMEOUT_TYPE_DISK 60 |
44 | #define TRANSPORT_TIMEOUT_TYPE_ROM 120 | 44 | #define TRANSPORT_TIMEOUT_TYPE_ROM 120 |
45 | #define TRANSPORT_TIMEOUT_TYPE_TAPE 600 | 45 | #define TRANSPORT_TIMEOUT_TYPE_TAPE 600 |
46 | #define TRANSPORT_TIMEOUT_TYPE_OTHER 300 | 46 | #define TRANSPORT_TIMEOUT_TYPE_OTHER 300 |
47 | 47 | ||
48 | /* For se_task->task_state_flags */ | 48 | /* For se_task->task_state_flags */ |
49 | #define TSF_EXCEPTION_CLEARED 0x01 | 49 | #define TSF_EXCEPTION_CLEARED 0x01 |
50 | 50 | ||
51 | /* | 51 | /* |
52 | * struct se_subsystem_dev->su_dev_flags | 52 | * struct se_subsystem_dev->su_dev_flags |
53 | */ | 53 | */ |
54 | #define SDF_FIRMWARE_VPD_UNIT_SERIAL 0x00000001 | 54 | #define SDF_FIRMWARE_VPD_UNIT_SERIAL 0x00000001 |
55 | #define SDF_EMULATED_VPD_UNIT_SERIAL 0x00000002 | 55 | #define SDF_EMULATED_VPD_UNIT_SERIAL 0x00000002 |
56 | #define SDF_USING_UDEV_PATH 0x00000004 | 56 | #define SDF_USING_UDEV_PATH 0x00000004 |
57 | #define SDF_USING_ALIAS 0x00000008 | 57 | #define SDF_USING_ALIAS 0x00000008 |
58 | 58 | ||
59 | /* | 59 | /* |
60 | * struct se_device->dev_flags | 60 | * struct se_device->dev_flags |
61 | */ | 61 | */ |
62 | #define DF_READ_ONLY 0x00000001 | 62 | #define DF_READ_ONLY 0x00000001 |
63 | #define DF_SPC2_RESERVATIONS 0x00000002 | 63 | #define DF_SPC2_RESERVATIONS 0x00000002 |
64 | #define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004 | 64 | #define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004 |
65 | 65 | ||
66 | /* struct se_dev_attrib sanity values */ | 66 | /* struct se_dev_attrib sanity values */ |
67 | /* 10 Minutes */ | 67 | /* 10 Minutes */ |
68 | #define DA_TASK_TIMEOUT_MAX 600 | 68 | #define DA_TASK_TIMEOUT_MAX 600 |
69 | /* Default max_unmap_lba_count */ | 69 | /* Default max_unmap_lba_count */ |
70 | #define DA_MAX_UNMAP_LBA_COUNT 0 | 70 | #define DA_MAX_UNMAP_LBA_COUNT 0 |
71 | /* Default max_unmap_block_desc_count */ | 71 | /* Default max_unmap_block_desc_count */ |
72 | #define DA_MAX_UNMAP_BLOCK_DESC_COUNT 0 | 72 | #define DA_MAX_UNMAP_BLOCK_DESC_COUNT 0 |
73 | /* Default unmap_granularity */ | 73 | /* Default unmap_granularity */ |
74 | #define DA_UNMAP_GRANULARITY_DEFAULT 0 | 74 | #define DA_UNMAP_GRANULARITY_DEFAULT 0 |
75 | /* Default unmap_granularity_alignment */ | 75 | /* Default unmap_granularity_alignment */ |
76 | #define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0 | 76 | #define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0 |
77 | /* Emulation for Direct Page Out */ | 77 | /* Emulation for Direct Page Out */ |
78 | #define DA_EMULATE_DPO 0 | 78 | #define DA_EMULATE_DPO 0 |
79 | /* Emulation for Forced Unit Access WRITEs */ | 79 | /* Emulation for Forced Unit Access WRITEs */ |
80 | #define DA_EMULATE_FUA_WRITE 1 | 80 | #define DA_EMULATE_FUA_WRITE 1 |
81 | /* Emulation for Forced Unit Access READs */ | 81 | /* Emulation for Forced Unit Access READs */ |
82 | #define DA_EMULATE_FUA_READ 0 | 82 | #define DA_EMULATE_FUA_READ 0 |
83 | /* Emulation for WriteCache and SYNCHRONIZE_CACHE */ | 83 | /* Emulation for WriteCache and SYNCHRONIZE_CACHE */ |
84 | #define DA_EMULATE_WRITE_CACHE 0 | 84 | #define DA_EMULATE_WRITE_CACHE 0 |
85 | /* Emulation for UNIT ATTENTION Interlock Control */ | 85 | /* Emulation for UNIT ATTENTION Interlock Control */ |
86 | #define DA_EMULATE_UA_INTLLCK_CTRL 0 | 86 | #define DA_EMULATE_UA_INTLLCK_CTRL 0 |
87 | /* Emulation for TASK_ABORTED status (TAS) by default */ | 87 | /* Emulation for TASK_ABORTED status (TAS) by default */ |
88 | #define DA_EMULATE_TAS 1 | 88 | #define DA_EMULATE_TAS 1 |
89 | /* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */ | 89 | /* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */ |
90 | #define DA_EMULATE_TPU 0 | 90 | #define DA_EMULATE_TPU 0 |
91 | /* | 91 | /* |
92 | * Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using | 92 | * Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using |
93 | * block/blk-lib.c:blkdev_issue_discard() | 93 | * block/blk-lib.c:blkdev_issue_discard() |
94 | */ | 94 | */ |
95 | #define DA_EMULATE_TPWS 0 | 95 | #define DA_EMULATE_TPWS 0 |
96 | /* No Emulation for PSCSI by default */ | 96 | /* No Emulation for PSCSI by default */ |
97 | #define DA_EMULATE_RESERVATIONS 0 | 97 | #define DA_EMULATE_RESERVATIONS 0 |
98 | /* No Emulation for PSCSI by default */ | 98 | /* No Emulation for PSCSI by default */ |
99 | #define DA_EMULATE_ALUA 0 | 99 | #define DA_EMULATE_ALUA 0 |
100 | /* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */ | 100 | /* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */ |
101 | #define DA_ENFORCE_PR_ISIDS 1 | 101 | #define DA_ENFORCE_PR_ISIDS 1 |
102 | #define DA_STATUS_MAX_SECTORS_MIN 16 | 102 | #define DA_STATUS_MAX_SECTORS_MIN 16 |
103 | #define DA_STATUS_MAX_SECTORS_MAX 8192 | 103 | #define DA_STATUS_MAX_SECTORS_MAX 8192 |
104 | 104 | ||
105 | #define SE_MODE_PAGE_BUF 512 | 105 | #define SE_MODE_PAGE_BUF 512 |
106 | 106 | ||
107 | #define MOD_MAX_SECTORS(ms, bs) (ms % (PAGE_SIZE / bs)) | 107 | #define MOD_MAX_SECTORS(ms, bs) (ms % (PAGE_SIZE / bs)) |
108 | 108 | ||
109 | struct se_mem; | 109 | struct se_mem; |
110 | struct se_subsystem_api; | 110 | struct se_subsystem_api; |
111 | 111 | ||
112 | extern struct kmem_cache *se_mem_cache; | ||
113 | |||
112 | extern int init_se_global(void); | 114 | extern int init_se_global(void); |
113 | extern void release_se_global(void); | 115 | extern void release_se_global(void); |
114 | extern void init_scsi_index_table(void); | 116 | extern void init_scsi_index_table(void); |
115 | extern u32 scsi_get_new_index(scsi_index_t); | 117 | extern u32 scsi_get_new_index(scsi_index_t); |
116 | extern void transport_init_queue_obj(struct se_queue_obj *); | 118 | extern void transport_init_queue_obj(struct se_queue_obj *); |
117 | extern int transport_subsystem_check_init(void); | 119 | extern int transport_subsystem_check_init(void); |
118 | extern int transport_subsystem_register(struct se_subsystem_api *); | 120 | extern int transport_subsystem_register(struct se_subsystem_api *); |
119 | extern void transport_subsystem_release(struct se_subsystem_api *); | 121 | extern void transport_subsystem_release(struct se_subsystem_api *); |
120 | extern void transport_load_plugins(void); | 122 | extern void transport_load_plugins(void); |
121 | extern struct se_session *transport_init_session(void); | 123 | extern struct se_session *transport_init_session(void); |
122 | extern void __transport_register_session(struct se_portal_group *, | 124 | extern void __transport_register_session(struct se_portal_group *, |
123 | struct se_node_acl *, | 125 | struct se_node_acl *, |
124 | struct se_session *, void *); | 126 | struct se_session *, void *); |
125 | extern void transport_register_session(struct se_portal_group *, | 127 | extern void transport_register_session(struct se_portal_group *, |
126 | struct se_node_acl *, | 128 | struct se_node_acl *, |
127 | struct se_session *, void *); | 129 | struct se_session *, void *); |
128 | extern void transport_free_session(struct se_session *); | 130 | extern void transport_free_session(struct se_session *); |
129 | extern void transport_deregister_session_configfs(struct se_session *); | 131 | extern void transport_deregister_session_configfs(struct se_session *); |
130 | extern void transport_deregister_session(struct se_session *); | 132 | extern void transport_deregister_session(struct se_session *); |
131 | extern void transport_cmd_finish_abort(struct se_cmd *, int); | 133 | extern void transport_cmd_finish_abort(struct se_cmd *, int); |
132 | extern void transport_cmd_finish_abort_tmr(struct se_cmd *); | 134 | extern void transport_cmd_finish_abort_tmr(struct se_cmd *); |
133 | extern void transport_complete_sync_cache(struct se_cmd *, int); | 135 | extern void transport_complete_sync_cache(struct se_cmd *, int); |
134 | extern void transport_complete_task(struct se_task *, int); | 136 | extern void transport_complete_task(struct se_task *, int); |
135 | extern void transport_add_task_to_execute_queue(struct se_task *, | 137 | extern void transport_add_task_to_execute_queue(struct se_task *, |
136 | struct se_task *, | 138 | struct se_task *, |
137 | struct se_device *); | 139 | struct se_device *); |
138 | extern void transport_remove_task_from_execute_queue(struct se_task *, | 140 | extern void transport_remove_task_from_execute_queue(struct se_task *, |
139 | struct se_device *); | 141 | struct se_device *); |
140 | unsigned char *transport_dump_cmd_direction(struct se_cmd *); | 142 | unsigned char *transport_dump_cmd_direction(struct se_cmd *); |
141 | extern void transport_dump_dev_state(struct se_device *, char *, int *); | 143 | extern void transport_dump_dev_state(struct se_device *, char *, int *); |
142 | extern void transport_dump_dev_info(struct se_device *, struct se_lun *, | 144 | extern void transport_dump_dev_info(struct se_device *, struct se_lun *, |
143 | unsigned long long, char *, int *); | 145 | unsigned long long, char *, int *); |
144 | extern void transport_dump_vpd_proto_id(struct t10_vpd *, | 146 | extern void transport_dump_vpd_proto_id(struct t10_vpd *, |
145 | unsigned char *, int); | 147 | unsigned char *, int); |
146 | extern void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); | 148 | extern void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); |
147 | extern int transport_dump_vpd_assoc(struct t10_vpd *, | 149 | extern int transport_dump_vpd_assoc(struct t10_vpd *, |
148 | unsigned char *, int); | 150 | unsigned char *, int); |
149 | extern int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *); | 151 | extern int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *); |
150 | extern int transport_dump_vpd_ident_type(struct t10_vpd *, | 152 | extern int transport_dump_vpd_ident_type(struct t10_vpd *, |
151 | unsigned char *, int); | 153 | unsigned char *, int); |
152 | extern int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *); | 154 | extern int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *); |
153 | extern int transport_dump_vpd_ident(struct t10_vpd *, | 155 | extern int transport_dump_vpd_ident(struct t10_vpd *, |
154 | unsigned char *, int); | 156 | unsigned char *, int); |
155 | extern int transport_set_vpd_ident(struct t10_vpd *, unsigned char *); | 157 | extern int transport_set_vpd_ident(struct t10_vpd *, unsigned char *); |
156 | extern struct se_device *transport_add_device_to_core_hba(struct se_hba *, | 158 | extern struct se_device *transport_add_device_to_core_hba(struct se_hba *, |
157 | struct se_subsystem_api *, | 159 | struct se_subsystem_api *, |
158 | struct se_subsystem_dev *, u32, | 160 | struct se_subsystem_dev *, u32, |
159 | void *, struct se_dev_limits *, | 161 | void *, struct se_dev_limits *, |
160 | const char *, const char *); | 162 | const char *, const char *); |
161 | extern void transport_device_setup_cmd(struct se_cmd *); | 163 | extern void transport_device_setup_cmd(struct se_cmd *); |
162 | extern void transport_init_se_cmd(struct se_cmd *, | 164 | extern void transport_init_se_cmd(struct se_cmd *, |
163 | struct target_core_fabric_ops *, | 165 | struct target_core_fabric_ops *, |
164 | struct se_session *, u32, int, int, | 166 | struct se_session *, u32, int, int, |
165 | unsigned char *); | 167 | unsigned char *); |
166 | extern void transport_free_se_cmd(struct se_cmd *); | 168 | extern void transport_free_se_cmd(struct se_cmd *); |
167 | extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *); | 169 | extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *); |
168 | extern int transport_generic_handle_cdb(struct se_cmd *); | 170 | extern int transport_generic_handle_cdb(struct se_cmd *); |
169 | extern int transport_generic_handle_cdb_map(struct se_cmd *); | 171 | extern int transport_generic_handle_cdb_map(struct se_cmd *); |
170 | extern int transport_generic_handle_data(struct se_cmd *); | 172 | extern int transport_generic_handle_data(struct se_cmd *); |
171 | extern void transport_new_cmd_failure(struct se_cmd *); | 173 | extern void transport_new_cmd_failure(struct se_cmd *); |
172 | extern int transport_generic_handle_tmr(struct se_cmd *); | 174 | extern int transport_generic_handle_tmr(struct se_cmd *); |
173 | extern void __transport_stop_task_timer(struct se_task *, unsigned long *); | 175 | extern void __transport_stop_task_timer(struct se_task *, unsigned long *); |
174 | extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]); | 176 | extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]); |
175 | extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32, | 177 | extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32, |
176 | struct scatterlist *, u32); | 178 | struct scatterlist *, u32); |
177 | extern int transport_clear_lun_from_sessions(struct se_lun *); | 179 | extern int transport_clear_lun_from_sessions(struct se_lun *); |
178 | extern int transport_check_aborted_status(struct se_cmd *, int); | 180 | extern int transport_check_aborted_status(struct se_cmd *, int); |
179 | extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int); | 181 | extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int); |
180 | extern void transport_send_task_abort(struct se_cmd *); | 182 | extern void transport_send_task_abort(struct se_cmd *); |
181 | extern void transport_release_cmd_to_pool(struct se_cmd *); | 183 | extern void transport_release_cmd_to_pool(struct se_cmd *); |
182 | extern void transport_generic_free_cmd(struct se_cmd *, int, int, int); | 184 | extern void transport_generic_free_cmd(struct se_cmd *, int, int, int); |
183 | extern void transport_generic_wait_for_cmds(struct se_cmd *, int); | 185 | extern void transport_generic_wait_for_cmds(struct se_cmd *, int); |
184 | extern u32 transport_calc_sg_num(struct se_task *, struct se_mem *, u32); | 186 | extern u32 transport_calc_sg_num(struct se_task *, struct se_mem *, u32); |
185 | extern int transport_map_mem_to_sg(struct se_task *, struct list_head *, | 187 | extern int transport_map_mem_to_sg(struct se_task *, struct list_head *, |
186 | void *, struct se_mem *, | 188 | void *, struct se_mem *, |
187 | struct se_mem **, u32 *, u32 *); | 189 | struct se_mem **, u32 *, u32 *); |
188 | extern void transport_do_task_sg_chain(struct se_cmd *); | 190 | extern void transport_do_task_sg_chain(struct se_cmd *); |
189 | extern void transport_generic_process_write(struct se_cmd *); | 191 | extern void transport_generic_process_write(struct se_cmd *); |
190 | extern int transport_generic_do_tmr(struct se_cmd *); | 192 | extern int transport_generic_do_tmr(struct se_cmd *); |
191 | /* From target_core_alua.c */ | 193 | /* From target_core_alua.c */ |
192 | extern int core_alua_check_nonop_delay(struct se_cmd *); | 194 | extern int core_alua_check_nonop_delay(struct se_cmd *); |
195 | /* From target_core_cdb.c */ | ||
196 | extern int transport_emulate_control_cdb(struct se_task *); | ||
193 | 197 | ||
194 | /* | 198 | /* |
195 | * Each se_transport_task_t can have N number of possible struct se_task's | 199 | * Each se_transport_task_t can have N number of possible struct se_task's |
196 | * for the storage transport(s) to possibly execute. | 200 | * for the storage transport(s) to possibly execute. |
197 | * Used primarily for splitting up CDBs that exceed the physical storage | 201 | * Used primarily for splitting up CDBs that exceed the physical storage |
198 | * HBA's maximum sector count per task. | 202 | * HBA's maximum sector count per task. |
199 | */ | 203 | */ |
200 | struct se_mem { | 204 | struct se_mem { |
201 | struct page *se_page; | 205 | struct page *se_page; |
202 | u32 se_len; | 206 | u32 se_len; |
203 | u32 se_off; | 207 | u32 se_off; |
204 | struct list_head se_list; | 208 | struct list_head se_list; |
205 | } ____cacheline_aligned; | 209 | } ____cacheline_aligned; |
206 | 210 | ||
207 | /* | 211 | /* |
208 | * Each type of disk transport supported MUST have a template defined | 212 | * Each type of disk transport supported MUST have a template defined |
209 | * within its .h file. | 213 | * within its .h file. |
210 | */ | 214 | */ |
211 | struct se_subsystem_api { | 215 | struct se_subsystem_api { |
212 | /* | 216 | /* |
213 | * The Name. :-) | 217 | * The Name. :-) |
214 | */ | 218 | */ |
215 | char name[16]; | 219 | char name[16]; |
216 | /* | 220 | /* |
217 | * Transport Type. | 221 | * Transport Type. |
218 | */ | 222 | */ |
219 | u8 transport_type; | 223 | u8 transport_type; |
220 | /* | 224 | /* |
221 | * struct module for struct se_hba references | 225 | * struct module for struct se_hba references |
222 | */ | 226 | */ |
223 | struct module *owner; | 227 | struct module *owner; |
224 | /* | 228 | /* |
225 | * Used for global se_subsystem_api list_head | 229 | * Used for global se_subsystem_api list_head |
226 | */ | 230 | */ |
227 | struct list_head sub_api_list; | 231 | struct list_head sub_api_list; |
228 | /* | 232 | /* |
229 | * For SCF_SCSI_NON_DATA_CDB | 233 | * For SCF_SCSI_NON_DATA_CDB |
230 | */ | 234 | */ |
231 | int (*cdb_none)(struct se_task *); | 235 | int (*cdb_none)(struct se_task *); |
232 | /* | 236 | /* |
233 | * For SCF_SCSI_CONTROL_NONSG_IO_CDB | 237 | * For SCF_SCSI_CONTROL_NONSG_IO_CDB |
234 | */ | 238 | */ |
235 | int (*map_task_non_SG)(struct se_task *); | 239 | int (*map_task_non_SG)(struct se_task *); |
236 | /* | 240 | /* |
237 | * For SCF_SCSI_DATA_SG_IO_CDB and SCF_SCSI_CONTROL_SG_IO_CDB | 241 | * For SCF_SCSI_DATA_SG_IO_CDB and SCF_SCSI_CONTROL_SG_IO_CDB |
238 | */ | 242 | */ |
239 | int (*map_task_SG)(struct se_task *); | 243 | int (*map_task_SG)(struct se_task *); |
240 | /* | 244 | /* |
241 | * attach_hba(): | 245 | * attach_hba(): |
242 | */ | 246 | */ |
243 | int (*attach_hba)(struct se_hba *, u32); | 247 | int (*attach_hba)(struct se_hba *, u32); |
244 | /* | 248 | /* |
245 | * detach_hba(): | 249 | * detach_hba(): |
246 | */ | 250 | */ |
247 | void (*detach_hba)(struct se_hba *); | 251 | void (*detach_hba)(struct se_hba *); |
248 | /* | 252 | /* |
249 | * pmode_hba(): Used for TCM/pSCSI subsystem plugin HBA -> | 253 | * pmode_hba(): Used for TCM/pSCSI subsystem plugin HBA -> |
250 | * Linux/SCSI struct Scsi_Host passthrough | 254 | * Linux/SCSI struct Scsi_Host passthrough |
251 | */ | 255 | */ |
252 | int (*pmode_enable_hba)(struct se_hba *, unsigned long); | 256 | int (*pmode_enable_hba)(struct se_hba *, unsigned long); |
253 | /* | 257 | /* |
254 | * allocate_virtdevice(): | 258 | * allocate_virtdevice(): |
255 | */ | 259 | */ |
256 | void *(*allocate_virtdevice)(struct se_hba *, const char *); | 260 | void *(*allocate_virtdevice)(struct se_hba *, const char *); |
257 | /* | 261 | /* |
258 | * create_virtdevice(): Only for Virtual HBAs | 262 | * create_virtdevice(): Only for Virtual HBAs |
259 | */ | 263 | */ |
260 | struct se_device *(*create_virtdevice)(struct se_hba *, | 264 | struct se_device *(*create_virtdevice)(struct se_hba *, |
261 | struct se_subsystem_dev *, void *); | 265 | struct se_subsystem_dev *, void *); |
262 | /* | 266 | /* |
263 | * free_device(): | 267 | * free_device(): |
264 | */ | 268 | */ |
265 | void (*free_device)(void *); | 269 | void (*free_device)(void *); |
266 | 270 | ||
267 | /* | 271 | /* |
268 | * dpo_emulated(): | 272 | * dpo_emulated(): |
269 | */ | 273 | */ |
270 | int (*dpo_emulated)(struct se_device *); | 274 | int (*dpo_emulated)(struct se_device *); |
271 | /* | 275 | /* |
272 | * fua_write_emulated(): | 276 | * fua_write_emulated(): |
273 | */ | 277 | */ |
274 | int (*fua_write_emulated)(struct se_device *); | 278 | int (*fua_write_emulated)(struct se_device *); |
275 | /* | 279 | /* |
276 | * fua_read_emulated(): | 280 | * fua_read_emulated(): |
277 | */ | 281 | */ |
278 | int (*fua_read_emulated)(struct se_device *); | 282 | int (*fua_read_emulated)(struct se_device *); |
279 | /* | 283 | /* |
280 | * write_cache_emulated(): | 284 | * write_cache_emulated(): |
281 | */ | 285 | */ |
282 | int (*write_cache_emulated)(struct se_device *); | 286 | int (*write_cache_emulated)(struct se_device *); |
283 | /* | 287 | /* |
284 | * transport_complete(): | 288 | * transport_complete(): |
285 | * | 289 | * |
286 | * Use transport_generic_complete() for majority of DAS transport | 290 | * Use transport_generic_complete() for majority of DAS transport |
287 | * drivers. Provided out of convenience. | 291 | * drivers. Provided out of convenience. |
288 | */ | 292 | */ |
289 | int (*transport_complete)(struct se_task *task); | 293 | int (*transport_complete)(struct se_task *task); |
290 | struct se_task *(*alloc_task)(struct se_cmd *); | 294 | struct se_task *(*alloc_task)(struct se_cmd *); |
291 | /* | 295 | /* |
292 | * do_task(): | 296 | * do_task(): |
293 | */ | 297 | */ |
294 | int (*do_task)(struct se_task *); | 298 | int (*do_task)(struct se_task *); |
295 | /* | 299 | /* |
296 | * Used by virtual subsystem plugins IBLOCK and FILEIO to emulate | 300 | * Used by virtual subsystem plugins IBLOCK and FILEIO to emulate |
297 | * UNMAP and WRITE_SAME_* w/ UNMAP=1 <-> Linux/Block Discard | 301 | * UNMAP and WRITE_SAME_* w/ UNMAP=1 <-> Linux/Block Discard |
298 | */ | 302 | */ |
299 | int (*do_discard)(struct se_device *, sector_t, u32); | 303 | int (*do_discard)(struct se_device *, sector_t, u32); |
300 | /* | 304 | /* |
301 | * Used by virtual subsystem plugins IBLOCK and FILEIO to emulate | 305 | * Used by virtual subsystem plugins IBLOCK and FILEIO to emulate |
302 | * SYNCHRONIZE_CACHE_* <-> Linux/Block blkdev_issue_flush() | 306 | * SYNCHRONIZE_CACHE_* <-> Linux/Block blkdev_issue_flush() |
303 | */ | 307 | */ |
304 | void (*do_sync_cache)(struct se_task *); | 308 | void (*do_sync_cache)(struct se_task *); |
305 | /* | 309 | /* |
306 | * free_task(): | 310 | * free_task(): |
307 | */ | 311 | */ |
308 | void (*free_task)(struct se_task *); | 312 | void (*free_task)(struct se_task *); |
309 | /* | 313 | /* |
310 | * check_configfs_dev_params(): | 314 | * check_configfs_dev_params(): |
311 | */ | 315 | */ |
312 | ssize_t (*check_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *); | 316 | ssize_t (*check_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *); |
313 | /* | 317 | /* |
314 | * set_configfs_dev_params(): | 318 | * set_configfs_dev_params(): |
315 | */ | 319 | */ |
316 | ssize_t (*set_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *, | 320 | ssize_t (*set_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *, |
317 | const char *, ssize_t); | 321 | const char *, ssize_t); |
318 | /* | 322 | /* |
319 | * show_configfs_dev_params(): | 323 | * show_configfs_dev_params(): |
320 | */ | 324 | */ |
321 | ssize_t (*show_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *, | 325 | ssize_t (*show_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *, |
322 | char *); | 326 | char *); |
323 | /* | 327 | /* |
324 | * get_cdb(): | 328 | * get_cdb(): |
325 | */ | 329 | */ |
326 | unsigned char *(*get_cdb)(struct se_task *); | 330 | unsigned char *(*get_cdb)(struct se_task *); |
327 | /* | 331 | /* |
328 | * get_device_rev(): | 332 | * get_device_rev(): |
329 | */ | 333 | */ |
330 | u32 (*get_device_rev)(struct se_device *); | 334 | u32 (*get_device_rev)(struct se_device *); |
331 | /* | 335 | /* |
332 | * get_device_type(): | 336 | * get_device_type(): |
333 | */ | 337 | */ |
334 | u32 (*get_device_type)(struct se_device *); | 338 | u32 (*get_device_type)(struct se_device *); |
335 | /* | 339 | /* |
336 | * Get the sector_t from a subsystem backstore.. | 340 | * Get the sector_t from a subsystem backstore.. |
337 | */ | 341 | */ |
338 | sector_t (*get_blocks)(struct se_device *); | 342 | sector_t (*get_blocks)(struct se_device *); |
339 | /* | 343 | /* |
340 | * do_se_mem_map(): | 344 | * do_se_mem_map(): |
341 | */ | 345 | */ |
342 | int (*do_se_mem_map)(struct se_task *, struct list_head *, void *, | 346 | int (*do_se_mem_map)(struct se_task *, struct list_head *, void *, |
343 | struct se_mem *, struct se_mem **, u32 *, u32 *); | 347 | struct se_mem *, struct se_mem **, u32 *, u32 *); |
344 | /* | 348 | /* |
345 | * get_sense_buffer(): | 349 | * get_sense_buffer(): |
346 | */ | 350 | */ |
347 | unsigned char *(*get_sense_buffer)(struct se_task *); | 351 | unsigned char *(*get_sense_buffer)(struct se_task *); |
348 | } ____cacheline_aligned; | 352 | } ____cacheline_aligned; |
349 | 353 | ||
350 | #define TRANSPORT(dev) ((dev)->transport) | 354 | #define TRANSPORT(dev) ((dev)->transport) |
351 | #define HBA_TRANSPORT(hba) ((hba)->transport) | 355 | #define HBA_TRANSPORT(hba) ((hba)->transport) |
352 | 356 | ||
353 | extern struct se_global *se_global; | 357 | extern struct se_global *se_global; |
354 | 358 | ||
355 | #endif /* TARGET_CORE_TRANSPORT_H */ | 359 | #endif /* TARGET_CORE_TRANSPORT_H */ |
356 | 360 |