Commit 05aea6e7e497ab418239ae54fe5966d52cbd8550
Committed by
James Bottomley
1 parent
613640e4e1
Exists in
master
and in
7 other branches
[SCSI] target: Remove unnecessary hba_dev_list walk and se_clear_dev_ports legacy code
This patch removes a legacy struct se_hba->hba_dev_list -> se_release_device_for_hba() list walk in core_delete_hba(), which is no longer required while using configfs VFS level parent/child struct config_group dependency referencing. The reason is because any struct se_hba->hba_dev_list-> struct se_device members are going to have to be released via: rmdir /sys/kernel/config/target/core/$HBA/* before rmdir release of struct se_hba via target_core_configfs.c: target_core_call_delhbafromtarget() -> core_delete_hba() rmdir /sys/kernel/config/target/core/$HBA to release struct se_hba in core_delete_hba(). This patch also removes the legacy se_clear_dev_ports() function, which is left-over pre-configfs shutdown logic for when se_free_virtual_device() was responsible for walking struct se_device->dev_sep_list and calling core_dev_del_lun() for each individual active struct se_port->se_lun. The reason this can be removed is because all struct se_device->dev_sep_list -> struct se_port communication is done via configfs symlinks, which means that an target fabric module's endpoints containg active struct se_port(s) will have to be released via target_core_fabric_configfs.c: target_fabric_port_unlink() via: unlink /sys/kernel/config/target/$FABRIC_MOD/$ENDPOINT/tpgt_$TPGT/lun/lun_$LUN_ID/<symlink> before rmdir release of struct se_device in target_core_configfs.c: target_core_drop_subdev() -> se_free_virtual_device() can happen via: rmdir /sys/kernel/config/target/core/$HBA/* to release struct se_subsystem_dev in target_core_drop_subdev() Reported-by: Stefan Richter <stefanr@s5r6.in-berlin.de> Reported-by: Fubo Chen <fubo.chen@gmail.com> Signed-off-by: Nicholas A. Bellinger <nab@linux-iscsi.org> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Showing 2 changed files with 4 additions and 50 deletions Inline Diff
drivers/target/target_core_device.c
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * Filename: target_core_device.c (based on iscsi_target_device.c) | 2 | * Filename: target_core_device.c (based on iscsi_target_device.c) |
3 | * | 3 | * |
4 | * This file contains the iSCSI Virtual Device and Disk Transport | 4 | * This file contains the iSCSI Virtual Device and Disk Transport |
5 | * agnostic related functions. | 5 | * agnostic related functions. |
6 | * | 6 | * |
7 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. | 7 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. |
8 | * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved. | 8 | * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved. |
9 | * Copyright (c) 2007-2010 Rising Tide Systems | 9 | * Copyright (c) 2007-2010 Rising Tide Systems |
10 | * Copyright (c) 2008-2010 Linux-iSCSI.org | 10 | * Copyright (c) 2008-2010 Linux-iSCSI.org |
11 | * | 11 | * |
12 | * Nicholas A. Bellinger <nab@kernel.org> | 12 | * Nicholas A. Bellinger <nab@kernel.org> |
13 | * | 13 | * |
14 | * This program is free software; you can redistribute it and/or modify | 14 | * This program is free software; you can redistribute it and/or modify |
15 | * it under the terms of the GNU General Public License as published by | 15 | * it under the terms of the GNU General Public License as published by |
16 | * the Free Software Foundation; either version 2 of the License, or | 16 | * the Free Software Foundation; either version 2 of the License, or |
17 | * (at your option) any later version. | 17 | * (at your option) any later version. |
18 | * | 18 | * |
19 | * This program is distributed in the hope that it will be useful, | 19 | * This program is distributed in the hope that it will be useful, |
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
22 | * GNU General Public License for more details. | 22 | * GNU General Public License for more details. |
23 | * | 23 | * |
24 | * You should have received a copy of the GNU General Public License | 24 | * You should have received a copy of the GNU General Public License |
25 | * along with this program; if not, write to the Free Software | 25 | * along with this program; if not, write to the Free Software |
26 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 26 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
27 | * | 27 | * |
28 | ******************************************************************************/ | 28 | ******************************************************************************/ |
29 | 29 | ||
30 | #include <linux/net.h> | 30 | #include <linux/net.h> |
31 | #include <linux/string.h> | 31 | #include <linux/string.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | #include <linux/timer.h> | 33 | #include <linux/timer.h> |
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | #include <linux/spinlock.h> | 35 | #include <linux/spinlock.h> |
36 | #include <linux/kthread.h> | 36 | #include <linux/kthread.h> |
37 | #include <linux/in.h> | 37 | #include <linux/in.h> |
38 | #include <net/sock.h> | 38 | #include <net/sock.h> |
39 | #include <net/tcp.h> | 39 | #include <net/tcp.h> |
40 | #include <scsi/scsi.h> | 40 | #include <scsi/scsi.h> |
41 | 41 | ||
42 | #include <target/target_core_base.h> | 42 | #include <target/target_core_base.h> |
43 | #include <target/target_core_device.h> | 43 | #include <target/target_core_device.h> |
44 | #include <target/target_core_tpg.h> | 44 | #include <target/target_core_tpg.h> |
45 | #include <target/target_core_transport.h> | 45 | #include <target/target_core_transport.h> |
46 | #include <target/target_core_fabric_ops.h> | 46 | #include <target/target_core_fabric_ops.h> |
47 | 47 | ||
48 | #include "target_core_alua.h" | 48 | #include "target_core_alua.h" |
49 | #include "target_core_hba.h" | 49 | #include "target_core_hba.h" |
50 | #include "target_core_pr.h" | 50 | #include "target_core_pr.h" |
51 | #include "target_core_ua.h" | 51 | #include "target_core_ua.h" |
52 | 52 | ||
53 | static void se_dev_start(struct se_device *dev); | 53 | static void se_dev_start(struct se_device *dev); |
54 | static void se_dev_stop(struct se_device *dev); | 54 | static void se_dev_stop(struct se_device *dev); |
55 | 55 | ||
56 | int transport_get_lun_for_cmd( | 56 | int transport_get_lun_for_cmd( |
57 | struct se_cmd *se_cmd, | 57 | struct se_cmd *se_cmd, |
58 | unsigned char *cdb, | 58 | unsigned char *cdb, |
59 | u32 unpacked_lun) | 59 | u32 unpacked_lun) |
60 | { | 60 | { |
61 | struct se_dev_entry *deve; | 61 | struct se_dev_entry *deve; |
62 | struct se_lun *se_lun = NULL; | 62 | struct se_lun *se_lun = NULL; |
63 | struct se_session *se_sess = SE_SESS(se_cmd); | 63 | struct se_session *se_sess = SE_SESS(se_cmd); |
64 | unsigned long flags; | 64 | unsigned long flags; |
65 | int read_only = 0; | 65 | int read_only = 0; |
66 | 66 | ||
67 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | 67 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); |
68 | deve = se_cmd->se_deve = | 68 | deve = se_cmd->se_deve = |
69 | &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; | 69 | &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; |
70 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | 70 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { |
71 | if (se_cmd) { | 71 | if (se_cmd) { |
72 | deve->total_cmds++; | 72 | deve->total_cmds++; |
73 | deve->total_bytes += se_cmd->data_length; | 73 | deve->total_bytes += se_cmd->data_length; |
74 | 74 | ||
75 | if (se_cmd->data_direction == DMA_TO_DEVICE) { | 75 | if (se_cmd->data_direction == DMA_TO_DEVICE) { |
76 | if (deve->lun_flags & | 76 | if (deve->lun_flags & |
77 | TRANSPORT_LUNFLAGS_READ_ONLY) { | 77 | TRANSPORT_LUNFLAGS_READ_ONLY) { |
78 | read_only = 1; | 78 | read_only = 1; |
79 | goto out; | 79 | goto out; |
80 | } | 80 | } |
81 | deve->write_bytes += se_cmd->data_length; | 81 | deve->write_bytes += se_cmd->data_length; |
82 | } else if (se_cmd->data_direction == | 82 | } else if (se_cmd->data_direction == |
83 | DMA_FROM_DEVICE) { | 83 | DMA_FROM_DEVICE) { |
84 | deve->read_bytes += se_cmd->data_length; | 84 | deve->read_bytes += se_cmd->data_length; |
85 | } | 85 | } |
86 | } | 86 | } |
87 | deve->deve_cmds++; | 87 | deve->deve_cmds++; |
88 | 88 | ||
89 | se_lun = se_cmd->se_lun = deve->se_lun; | 89 | se_lun = se_cmd->se_lun = deve->se_lun; |
90 | se_cmd->pr_res_key = deve->pr_res_key; | 90 | se_cmd->pr_res_key = deve->pr_res_key; |
91 | se_cmd->orig_fe_lun = unpacked_lun; | 91 | se_cmd->orig_fe_lun = unpacked_lun; |
92 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; | 92 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; |
93 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; | 93 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; |
94 | } | 94 | } |
95 | out: | 95 | out: |
96 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | 96 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); |
97 | 97 | ||
98 | if (!se_lun) { | 98 | if (!se_lun) { |
99 | if (read_only) { | 99 | if (read_only) { |
100 | se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | 100 | se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; |
101 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 101 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
102 | printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" | 102 | printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" |
103 | " Access for 0x%08x\n", | 103 | " Access for 0x%08x\n", |
104 | CMD_TFO(se_cmd)->get_fabric_name(), | 104 | CMD_TFO(se_cmd)->get_fabric_name(), |
105 | unpacked_lun); | 105 | unpacked_lun); |
106 | return -1; | 106 | return -1; |
107 | } else { | 107 | } else { |
108 | /* | 108 | /* |
109 | * Use the se_portal_group->tpg_virt_lun0 to allow for | 109 | * Use the se_portal_group->tpg_virt_lun0 to allow for |
110 | * REPORT_LUNS, et al to be returned when no active | 110 | * REPORT_LUNS, et al to be returned when no active |
111 | * MappedLUN=0 exists for this Initiator Port. | 111 | * MappedLUN=0 exists for this Initiator Port. |
112 | */ | 112 | */ |
113 | if (unpacked_lun != 0) { | 113 | if (unpacked_lun != 0) { |
114 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; | 114 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; |
115 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 115 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
116 | printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" | 116 | printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" |
117 | " Access for 0x%08x\n", | 117 | " Access for 0x%08x\n", |
118 | CMD_TFO(se_cmd)->get_fabric_name(), | 118 | CMD_TFO(se_cmd)->get_fabric_name(), |
119 | unpacked_lun); | 119 | unpacked_lun); |
120 | return -1; | 120 | return -1; |
121 | } | 121 | } |
122 | /* | 122 | /* |
123 | * Force WRITE PROTECT for virtual LUN 0 | 123 | * Force WRITE PROTECT for virtual LUN 0 |
124 | */ | 124 | */ |
125 | if ((se_cmd->data_direction != DMA_FROM_DEVICE) && | 125 | if ((se_cmd->data_direction != DMA_FROM_DEVICE) && |
126 | (se_cmd->data_direction != DMA_NONE)) { | 126 | (se_cmd->data_direction != DMA_NONE)) { |
127 | se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | 127 | se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; |
128 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 128 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
129 | return -1; | 129 | return -1; |
130 | } | 130 | } |
131 | #if 0 | 131 | #if 0 |
132 | printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n", | 132 | printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n", |
133 | CMD_TFO(se_cmd)->get_fabric_name()); | 133 | CMD_TFO(se_cmd)->get_fabric_name()); |
134 | #endif | 134 | #endif |
135 | se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; | 135 | se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; |
136 | se_cmd->orig_fe_lun = 0; | 136 | se_cmd->orig_fe_lun = 0; |
137 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; | 137 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; |
138 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; | 138 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; |
139 | } | 139 | } |
140 | } | 140 | } |
141 | /* | 141 | /* |
142 | * Determine if the struct se_lun is online. | 142 | * Determine if the struct se_lun is online. |
143 | */ | 143 | */ |
144 | /* #warning FIXME: Check for LUN_RESET + UNIT Attention */ | 144 | /* #warning FIXME: Check for LUN_RESET + UNIT Attention */ |
145 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { | 145 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { |
146 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; | 146 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; |
147 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 147 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
148 | return -1; | 148 | return -1; |
149 | } | 149 | } |
150 | 150 | ||
151 | { | 151 | { |
152 | struct se_device *dev = se_lun->lun_se_dev; | 152 | struct se_device *dev = se_lun->lun_se_dev; |
153 | spin_lock(&dev->stats_lock); | 153 | spin_lock(&dev->stats_lock); |
154 | dev->num_cmds++; | 154 | dev->num_cmds++; |
155 | if (se_cmd->data_direction == DMA_TO_DEVICE) | 155 | if (se_cmd->data_direction == DMA_TO_DEVICE) |
156 | dev->write_bytes += se_cmd->data_length; | 156 | dev->write_bytes += se_cmd->data_length; |
157 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) | 157 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) |
158 | dev->read_bytes += se_cmd->data_length; | 158 | dev->read_bytes += se_cmd->data_length; |
159 | spin_unlock(&dev->stats_lock); | 159 | spin_unlock(&dev->stats_lock); |
160 | } | 160 | } |
161 | 161 | ||
162 | /* | 162 | /* |
163 | * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used | 163 | * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used |
164 | * for tracking state of struct se_cmds during LUN shutdown events. | 164 | * for tracking state of struct se_cmds during LUN shutdown events. |
165 | */ | 165 | */ |
166 | spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); | 166 | spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); |
167 | list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list); | 167 | list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list); |
168 | atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1); | 168 | atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1); |
169 | #if 0 | 169 | #if 0 |
170 | printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n", | 170 | printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n", |
171 | CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun); | 171 | CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun); |
172 | #endif | 172 | #endif |
173 | spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); | 173 | spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); |
174 | 174 | ||
175 | return 0; | 175 | return 0; |
176 | } | 176 | } |
177 | EXPORT_SYMBOL(transport_get_lun_for_cmd); | 177 | EXPORT_SYMBOL(transport_get_lun_for_cmd); |
178 | 178 | ||
179 | int transport_get_lun_for_tmr( | 179 | int transport_get_lun_for_tmr( |
180 | struct se_cmd *se_cmd, | 180 | struct se_cmd *se_cmd, |
181 | u32 unpacked_lun) | 181 | u32 unpacked_lun) |
182 | { | 182 | { |
183 | struct se_device *dev = NULL; | 183 | struct se_device *dev = NULL; |
184 | struct se_dev_entry *deve; | 184 | struct se_dev_entry *deve; |
185 | struct se_lun *se_lun = NULL; | 185 | struct se_lun *se_lun = NULL; |
186 | struct se_session *se_sess = SE_SESS(se_cmd); | 186 | struct se_session *se_sess = SE_SESS(se_cmd); |
187 | struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; | 187 | struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; |
188 | 188 | ||
189 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | 189 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); |
190 | deve = se_cmd->se_deve = | 190 | deve = se_cmd->se_deve = |
191 | &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; | 191 | &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; |
192 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | 192 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { |
193 | se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun; | 193 | se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun; |
194 | dev = se_tmr->tmr_dev = se_lun->lun_se_dev; | 194 | dev = se_tmr->tmr_dev = se_lun->lun_se_dev; |
195 | se_cmd->pr_res_key = deve->pr_res_key; | 195 | se_cmd->pr_res_key = deve->pr_res_key; |
196 | se_cmd->orig_fe_lun = unpacked_lun; | 196 | se_cmd->orig_fe_lun = unpacked_lun; |
197 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; | 197 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; |
198 | /* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */ | 198 | /* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */ |
199 | } | 199 | } |
200 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | 200 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); |
201 | 201 | ||
202 | if (!se_lun) { | 202 | if (!se_lun) { |
203 | printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" | 203 | printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" |
204 | " Access for 0x%08x\n", | 204 | " Access for 0x%08x\n", |
205 | CMD_TFO(se_cmd)->get_fabric_name(), | 205 | CMD_TFO(se_cmd)->get_fabric_name(), |
206 | unpacked_lun); | 206 | unpacked_lun); |
207 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 207 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
208 | return -1; | 208 | return -1; |
209 | } | 209 | } |
210 | /* | 210 | /* |
211 | * Determine if the struct se_lun is online. | 211 | * Determine if the struct se_lun is online. |
212 | */ | 212 | */ |
213 | /* #warning FIXME: Check for LUN_RESET + UNIT Attention */ | 213 | /* #warning FIXME: Check for LUN_RESET + UNIT Attention */ |
214 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { | 214 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { |
215 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | 215 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
216 | return -1; | 216 | return -1; |
217 | } | 217 | } |
218 | 218 | ||
219 | spin_lock(&dev->se_tmr_lock); | 219 | spin_lock(&dev->se_tmr_lock); |
220 | list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list); | 220 | list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list); |
221 | spin_unlock(&dev->se_tmr_lock); | 221 | spin_unlock(&dev->se_tmr_lock); |
222 | 222 | ||
223 | return 0; | 223 | return 0; |
224 | } | 224 | } |
225 | EXPORT_SYMBOL(transport_get_lun_for_tmr); | 225 | EXPORT_SYMBOL(transport_get_lun_for_tmr); |
226 | 226 | ||
227 | /* | 227 | /* |
228 | * This function is called from core_scsi3_emulate_pro_register_and_move() | 228 | * This function is called from core_scsi3_emulate_pro_register_and_move() |
229 | * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count | 229 | * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count |
230 | * when a matching rtpi is found. | 230 | * when a matching rtpi is found. |
231 | */ | 231 | */ |
232 | struct se_dev_entry *core_get_se_deve_from_rtpi( | 232 | struct se_dev_entry *core_get_se_deve_from_rtpi( |
233 | struct se_node_acl *nacl, | 233 | struct se_node_acl *nacl, |
234 | u16 rtpi) | 234 | u16 rtpi) |
235 | { | 235 | { |
236 | struct se_dev_entry *deve; | 236 | struct se_dev_entry *deve; |
237 | struct se_lun *lun; | 237 | struct se_lun *lun; |
238 | struct se_port *port; | 238 | struct se_port *port; |
239 | struct se_portal_group *tpg = nacl->se_tpg; | 239 | struct se_portal_group *tpg = nacl->se_tpg; |
240 | u32 i; | 240 | u32 i; |
241 | 241 | ||
242 | spin_lock_irq(&nacl->device_list_lock); | 242 | spin_lock_irq(&nacl->device_list_lock); |
243 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 243 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
244 | deve = &nacl->device_list[i]; | 244 | deve = &nacl->device_list[i]; |
245 | 245 | ||
246 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | 246 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) |
247 | continue; | 247 | continue; |
248 | 248 | ||
249 | lun = deve->se_lun; | 249 | lun = deve->se_lun; |
250 | if (!(lun)) { | 250 | if (!(lun)) { |
251 | printk(KERN_ERR "%s device entries device pointer is" | 251 | printk(KERN_ERR "%s device entries device pointer is" |
252 | " NULL, but Initiator has access.\n", | 252 | " NULL, but Initiator has access.\n", |
253 | TPG_TFO(tpg)->get_fabric_name()); | 253 | TPG_TFO(tpg)->get_fabric_name()); |
254 | continue; | 254 | continue; |
255 | } | 255 | } |
256 | port = lun->lun_sep; | 256 | port = lun->lun_sep; |
257 | if (!(port)) { | 257 | if (!(port)) { |
258 | printk(KERN_ERR "%s device entries device pointer is" | 258 | printk(KERN_ERR "%s device entries device pointer is" |
259 | " NULL, but Initiator has access.\n", | 259 | " NULL, but Initiator has access.\n", |
260 | TPG_TFO(tpg)->get_fabric_name()); | 260 | TPG_TFO(tpg)->get_fabric_name()); |
261 | continue; | 261 | continue; |
262 | } | 262 | } |
263 | if (port->sep_rtpi != rtpi) | 263 | if (port->sep_rtpi != rtpi) |
264 | continue; | 264 | continue; |
265 | 265 | ||
266 | atomic_inc(&deve->pr_ref_count); | 266 | atomic_inc(&deve->pr_ref_count); |
267 | smp_mb__after_atomic_inc(); | 267 | smp_mb__after_atomic_inc(); |
268 | spin_unlock_irq(&nacl->device_list_lock); | 268 | spin_unlock_irq(&nacl->device_list_lock); |
269 | 269 | ||
270 | return deve; | 270 | return deve; |
271 | } | 271 | } |
272 | spin_unlock_irq(&nacl->device_list_lock); | 272 | spin_unlock_irq(&nacl->device_list_lock); |
273 | 273 | ||
274 | return NULL; | 274 | return NULL; |
275 | } | 275 | } |
276 | 276 | ||
277 | int core_free_device_list_for_node( | 277 | int core_free_device_list_for_node( |
278 | struct se_node_acl *nacl, | 278 | struct se_node_acl *nacl, |
279 | struct se_portal_group *tpg) | 279 | struct se_portal_group *tpg) |
280 | { | 280 | { |
281 | struct se_dev_entry *deve; | 281 | struct se_dev_entry *deve; |
282 | struct se_lun *lun; | 282 | struct se_lun *lun; |
283 | u32 i; | 283 | u32 i; |
284 | 284 | ||
285 | if (!nacl->device_list) | 285 | if (!nacl->device_list) |
286 | return 0; | 286 | return 0; |
287 | 287 | ||
288 | spin_lock_irq(&nacl->device_list_lock); | 288 | spin_lock_irq(&nacl->device_list_lock); |
289 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 289 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
290 | deve = &nacl->device_list[i]; | 290 | deve = &nacl->device_list[i]; |
291 | 291 | ||
292 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | 292 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) |
293 | continue; | 293 | continue; |
294 | 294 | ||
295 | if (!deve->se_lun) { | 295 | if (!deve->se_lun) { |
296 | printk(KERN_ERR "%s device entries device pointer is" | 296 | printk(KERN_ERR "%s device entries device pointer is" |
297 | " NULL, but Initiator has access.\n", | 297 | " NULL, but Initiator has access.\n", |
298 | TPG_TFO(tpg)->get_fabric_name()); | 298 | TPG_TFO(tpg)->get_fabric_name()); |
299 | continue; | 299 | continue; |
300 | } | 300 | } |
301 | lun = deve->se_lun; | 301 | lun = deve->se_lun; |
302 | 302 | ||
303 | spin_unlock_irq(&nacl->device_list_lock); | 303 | spin_unlock_irq(&nacl->device_list_lock); |
304 | core_update_device_list_for_node(lun, NULL, deve->mapped_lun, | 304 | core_update_device_list_for_node(lun, NULL, deve->mapped_lun, |
305 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); | 305 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); |
306 | spin_lock_irq(&nacl->device_list_lock); | 306 | spin_lock_irq(&nacl->device_list_lock); |
307 | } | 307 | } |
308 | spin_unlock_irq(&nacl->device_list_lock); | 308 | spin_unlock_irq(&nacl->device_list_lock); |
309 | 309 | ||
310 | kfree(nacl->device_list); | 310 | kfree(nacl->device_list); |
311 | nacl->device_list = NULL; | 311 | nacl->device_list = NULL; |
312 | 312 | ||
313 | return 0; | 313 | return 0; |
314 | } | 314 | } |
315 | 315 | ||
316 | void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) | 316 | void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) |
317 | { | 317 | { |
318 | struct se_dev_entry *deve; | 318 | struct se_dev_entry *deve; |
319 | 319 | ||
320 | spin_lock_irq(&se_nacl->device_list_lock); | 320 | spin_lock_irq(&se_nacl->device_list_lock); |
321 | deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; | 321 | deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; |
322 | deve->deve_cmds--; | 322 | deve->deve_cmds--; |
323 | spin_unlock_irq(&se_nacl->device_list_lock); | 323 | spin_unlock_irq(&se_nacl->device_list_lock); |
324 | 324 | ||
325 | return; | 325 | return; |
326 | } | 326 | } |
327 | 327 | ||
328 | void core_update_device_list_access( | 328 | void core_update_device_list_access( |
329 | u32 mapped_lun, | 329 | u32 mapped_lun, |
330 | u32 lun_access, | 330 | u32 lun_access, |
331 | struct se_node_acl *nacl) | 331 | struct se_node_acl *nacl) |
332 | { | 332 | { |
333 | struct se_dev_entry *deve; | 333 | struct se_dev_entry *deve; |
334 | 334 | ||
335 | spin_lock_irq(&nacl->device_list_lock); | 335 | spin_lock_irq(&nacl->device_list_lock); |
336 | deve = &nacl->device_list[mapped_lun]; | 336 | deve = &nacl->device_list[mapped_lun]; |
337 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { | 337 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { |
338 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; | 338 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; |
339 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; | 339 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; |
340 | } else { | 340 | } else { |
341 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; | 341 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; |
342 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | 342 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; |
343 | } | 343 | } |
344 | spin_unlock_irq(&nacl->device_list_lock); | 344 | spin_unlock_irq(&nacl->device_list_lock); |
345 | 345 | ||
346 | return; | 346 | return; |
347 | } | 347 | } |
348 | 348 | ||
349 | /* core_update_device_list_for_node(): | 349 | /* core_update_device_list_for_node(): |
350 | * | 350 | * |
351 | * | 351 | * |
352 | */ | 352 | */ |
353 | int core_update_device_list_for_node( | 353 | int core_update_device_list_for_node( |
354 | struct se_lun *lun, | 354 | struct se_lun *lun, |
355 | struct se_lun_acl *lun_acl, | 355 | struct se_lun_acl *lun_acl, |
356 | u32 mapped_lun, | 356 | u32 mapped_lun, |
357 | u32 lun_access, | 357 | u32 lun_access, |
358 | struct se_node_acl *nacl, | 358 | struct se_node_acl *nacl, |
359 | struct se_portal_group *tpg, | 359 | struct se_portal_group *tpg, |
360 | int enable) | 360 | int enable) |
361 | { | 361 | { |
362 | struct se_port *port = lun->lun_sep; | 362 | struct se_port *port = lun->lun_sep; |
363 | struct se_dev_entry *deve = &nacl->device_list[mapped_lun]; | 363 | struct se_dev_entry *deve = &nacl->device_list[mapped_lun]; |
364 | int trans = 0; | 364 | int trans = 0; |
365 | /* | 365 | /* |
366 | * If the MappedLUN entry is being disabled, the entry in | 366 | * If the MappedLUN entry is being disabled, the entry in |
367 | * port->sep_alua_list must be removed now before clearing the | 367 | * port->sep_alua_list must be removed now before clearing the |
368 | * struct se_dev_entry pointers below as logic in | 368 | * struct se_dev_entry pointers below as logic in |
369 | * core_alua_do_transition_tg_pt() depends on these being present. | 369 | * core_alua_do_transition_tg_pt() depends on these being present. |
370 | */ | 370 | */ |
371 | if (!(enable)) { | 371 | if (!(enable)) { |
372 | /* | 372 | /* |
373 | * deve->se_lun_acl will be NULL for demo-mode created LUNs | 373 | * deve->se_lun_acl will be NULL for demo-mode created LUNs |
374 | * that have not been explictly concerted to MappedLUNs -> | 374 | * that have not been explictly concerted to MappedLUNs -> |
375 | * struct se_lun_acl, but we remove deve->alua_port_list from | 375 | * struct se_lun_acl, but we remove deve->alua_port_list from |
376 | * port->sep_alua_list. This also means that active UAs and | 376 | * port->sep_alua_list. This also means that active UAs and |
377 | * NodeACL context specific PR metadata for demo-mode | 377 | * NodeACL context specific PR metadata for demo-mode |
378 | * MappedLUN *deve will be released below.. | 378 | * MappedLUN *deve will be released below.. |
379 | */ | 379 | */ |
380 | spin_lock_bh(&port->sep_alua_lock); | 380 | spin_lock_bh(&port->sep_alua_lock); |
381 | list_del(&deve->alua_port_list); | 381 | list_del(&deve->alua_port_list); |
382 | spin_unlock_bh(&port->sep_alua_lock); | 382 | spin_unlock_bh(&port->sep_alua_lock); |
383 | } | 383 | } |
384 | 384 | ||
385 | spin_lock_irq(&nacl->device_list_lock); | 385 | spin_lock_irq(&nacl->device_list_lock); |
386 | if (enable) { | 386 | if (enable) { |
387 | /* | 387 | /* |
388 | * Check if the call is handling demo mode -> explict LUN ACL | 388 | * Check if the call is handling demo mode -> explict LUN ACL |
389 | * transition. This transition must be for the same struct se_lun | 389 | * transition. This transition must be for the same struct se_lun |
390 | * + mapped_lun that was setup in demo mode.. | 390 | * + mapped_lun that was setup in demo mode.. |
391 | */ | 391 | */ |
392 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | 392 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { |
393 | if (deve->se_lun_acl != NULL) { | 393 | if (deve->se_lun_acl != NULL) { |
394 | printk(KERN_ERR "struct se_dev_entry->se_lun_acl" | 394 | printk(KERN_ERR "struct se_dev_entry->se_lun_acl" |
395 | " already set for demo mode -> explict" | 395 | " already set for demo mode -> explict" |
396 | " LUN ACL transition\n"); | 396 | " LUN ACL transition\n"); |
397 | spin_unlock_irq(&nacl->device_list_lock); | 397 | spin_unlock_irq(&nacl->device_list_lock); |
398 | return -1; | 398 | return -1; |
399 | } | 399 | } |
400 | if (deve->se_lun != lun) { | 400 | if (deve->se_lun != lun) { |
401 | printk(KERN_ERR "struct se_dev_entry->se_lun does" | 401 | printk(KERN_ERR "struct se_dev_entry->se_lun does" |
402 | " match passed struct se_lun for demo mode" | 402 | " match passed struct se_lun for demo mode" |
403 | " -> explict LUN ACL transition\n"); | 403 | " -> explict LUN ACL transition\n"); |
404 | spin_unlock_irq(&nacl->device_list_lock); | 404 | spin_unlock_irq(&nacl->device_list_lock); |
405 | return -1; | 405 | return -1; |
406 | } | 406 | } |
407 | deve->se_lun_acl = lun_acl; | 407 | deve->se_lun_acl = lun_acl; |
408 | trans = 1; | 408 | trans = 1; |
409 | } else { | 409 | } else { |
410 | deve->se_lun = lun; | 410 | deve->se_lun = lun; |
411 | deve->se_lun_acl = lun_acl; | 411 | deve->se_lun_acl = lun_acl; |
412 | deve->mapped_lun = mapped_lun; | 412 | deve->mapped_lun = mapped_lun; |
413 | deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS; | 413 | deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS; |
414 | } | 414 | } |
415 | 415 | ||
416 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { | 416 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { |
417 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; | 417 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; |
418 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; | 418 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; |
419 | } else { | 419 | } else { |
420 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; | 420 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; |
421 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | 421 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; |
422 | } | 422 | } |
423 | 423 | ||
424 | if (trans) { | 424 | if (trans) { |
425 | spin_unlock_irq(&nacl->device_list_lock); | 425 | spin_unlock_irq(&nacl->device_list_lock); |
426 | return 0; | 426 | return 0; |
427 | } | 427 | } |
428 | deve->creation_time = get_jiffies_64(); | 428 | deve->creation_time = get_jiffies_64(); |
429 | deve->attach_count++; | 429 | deve->attach_count++; |
430 | spin_unlock_irq(&nacl->device_list_lock); | 430 | spin_unlock_irq(&nacl->device_list_lock); |
431 | 431 | ||
432 | spin_lock_bh(&port->sep_alua_lock); | 432 | spin_lock_bh(&port->sep_alua_lock); |
433 | list_add_tail(&deve->alua_port_list, &port->sep_alua_list); | 433 | list_add_tail(&deve->alua_port_list, &port->sep_alua_list); |
434 | spin_unlock_bh(&port->sep_alua_lock); | 434 | spin_unlock_bh(&port->sep_alua_lock); |
435 | 435 | ||
436 | return 0; | 436 | return 0; |
437 | } | 437 | } |
438 | /* | 438 | /* |
439 | * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE | 439 | * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE |
440 | * PR operation to complete. | 440 | * PR operation to complete. |
441 | */ | 441 | */ |
442 | spin_unlock_irq(&nacl->device_list_lock); | 442 | spin_unlock_irq(&nacl->device_list_lock); |
443 | while (atomic_read(&deve->pr_ref_count) != 0) | 443 | while (atomic_read(&deve->pr_ref_count) != 0) |
444 | cpu_relax(); | 444 | cpu_relax(); |
445 | spin_lock_irq(&nacl->device_list_lock); | 445 | spin_lock_irq(&nacl->device_list_lock); |
446 | /* | 446 | /* |
447 | * Disable struct se_dev_entry LUN ACL mapping | 447 | * Disable struct se_dev_entry LUN ACL mapping |
448 | */ | 448 | */ |
449 | core_scsi3_ua_release_all(deve); | 449 | core_scsi3_ua_release_all(deve); |
450 | deve->se_lun = NULL; | 450 | deve->se_lun = NULL; |
451 | deve->se_lun_acl = NULL; | 451 | deve->se_lun_acl = NULL; |
452 | deve->lun_flags = 0; | 452 | deve->lun_flags = 0; |
453 | deve->creation_time = 0; | 453 | deve->creation_time = 0; |
454 | deve->attach_count--; | 454 | deve->attach_count--; |
455 | spin_unlock_irq(&nacl->device_list_lock); | 455 | spin_unlock_irq(&nacl->device_list_lock); |
456 | 456 | ||
457 | core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl); | 457 | core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl); |
458 | return 0; | 458 | return 0; |
459 | } | 459 | } |
460 | 460 | ||
461 | /* core_clear_lun_from_tpg(): | 461 | /* core_clear_lun_from_tpg(): |
462 | * | 462 | * |
463 | * | 463 | * |
464 | */ | 464 | */ |
465 | void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) | 465 | void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) |
466 | { | 466 | { |
467 | struct se_node_acl *nacl; | 467 | struct se_node_acl *nacl; |
468 | struct se_dev_entry *deve; | 468 | struct se_dev_entry *deve; |
469 | u32 i; | 469 | u32 i; |
470 | 470 | ||
471 | spin_lock_bh(&tpg->acl_node_lock); | 471 | spin_lock_bh(&tpg->acl_node_lock); |
472 | list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { | 472 | list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { |
473 | spin_unlock_bh(&tpg->acl_node_lock); | 473 | spin_unlock_bh(&tpg->acl_node_lock); |
474 | 474 | ||
475 | spin_lock_irq(&nacl->device_list_lock); | 475 | spin_lock_irq(&nacl->device_list_lock); |
476 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 476 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
477 | deve = &nacl->device_list[i]; | 477 | deve = &nacl->device_list[i]; |
478 | if (lun != deve->se_lun) | 478 | if (lun != deve->se_lun) |
479 | continue; | 479 | continue; |
480 | spin_unlock_irq(&nacl->device_list_lock); | 480 | spin_unlock_irq(&nacl->device_list_lock); |
481 | 481 | ||
482 | core_update_device_list_for_node(lun, NULL, | 482 | core_update_device_list_for_node(lun, NULL, |
483 | deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, | 483 | deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, |
484 | nacl, tpg, 0); | 484 | nacl, tpg, 0); |
485 | 485 | ||
486 | spin_lock_irq(&nacl->device_list_lock); | 486 | spin_lock_irq(&nacl->device_list_lock); |
487 | } | 487 | } |
488 | spin_unlock_irq(&nacl->device_list_lock); | 488 | spin_unlock_irq(&nacl->device_list_lock); |
489 | 489 | ||
490 | spin_lock_bh(&tpg->acl_node_lock); | 490 | spin_lock_bh(&tpg->acl_node_lock); |
491 | } | 491 | } |
492 | spin_unlock_bh(&tpg->acl_node_lock); | 492 | spin_unlock_bh(&tpg->acl_node_lock); |
493 | 493 | ||
494 | return; | 494 | return; |
495 | } | 495 | } |
496 | 496 | ||
497 | static struct se_port *core_alloc_port(struct se_device *dev) | 497 | static struct se_port *core_alloc_port(struct se_device *dev) |
498 | { | 498 | { |
499 | struct se_port *port, *port_tmp; | 499 | struct se_port *port, *port_tmp; |
500 | 500 | ||
501 | port = kzalloc(sizeof(struct se_port), GFP_KERNEL); | 501 | port = kzalloc(sizeof(struct se_port), GFP_KERNEL); |
502 | if (!(port)) { | 502 | if (!(port)) { |
503 | printk(KERN_ERR "Unable to allocate struct se_port\n"); | 503 | printk(KERN_ERR "Unable to allocate struct se_port\n"); |
504 | return NULL; | 504 | return NULL; |
505 | } | 505 | } |
506 | INIT_LIST_HEAD(&port->sep_alua_list); | 506 | INIT_LIST_HEAD(&port->sep_alua_list); |
507 | INIT_LIST_HEAD(&port->sep_list); | 507 | INIT_LIST_HEAD(&port->sep_list); |
508 | atomic_set(&port->sep_tg_pt_secondary_offline, 0); | 508 | atomic_set(&port->sep_tg_pt_secondary_offline, 0); |
509 | spin_lock_init(&port->sep_alua_lock); | 509 | spin_lock_init(&port->sep_alua_lock); |
510 | mutex_init(&port->sep_tg_pt_md_mutex); | 510 | mutex_init(&port->sep_tg_pt_md_mutex); |
511 | 511 | ||
512 | spin_lock(&dev->se_port_lock); | 512 | spin_lock(&dev->se_port_lock); |
513 | if (dev->dev_port_count == 0x0000ffff) { | 513 | if (dev->dev_port_count == 0x0000ffff) { |
514 | printk(KERN_WARNING "Reached dev->dev_port_count ==" | 514 | printk(KERN_WARNING "Reached dev->dev_port_count ==" |
515 | " 0x0000ffff\n"); | 515 | " 0x0000ffff\n"); |
516 | spin_unlock(&dev->se_port_lock); | 516 | spin_unlock(&dev->se_port_lock); |
517 | return NULL; | 517 | return NULL; |
518 | } | 518 | } |
519 | again: | 519 | again: |
520 | /* | 520 | /* |
521 | * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device | 521 | * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device |
522 | * Here is the table from spc4r17 section 7.7.3.8. | 522 | * Here is the table from spc4r17 section 7.7.3.8. |
523 | * | 523 | * |
524 | * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field | 524 | * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field |
525 | * | 525 | * |
526 | * Code Description | 526 | * Code Description |
527 | * 0h Reserved | 527 | * 0h Reserved |
528 | * 1h Relative port 1, historically known as port A | 528 | * 1h Relative port 1, historically known as port A |
529 | * 2h Relative port 2, historically known as port B | 529 | * 2h Relative port 2, historically known as port B |
530 | * 3h to FFFFh Relative port 3 through 65 535 | 530 | * 3h to FFFFh Relative port 3 through 65 535 |
531 | */ | 531 | */ |
532 | port->sep_rtpi = dev->dev_rpti_counter++; | 532 | port->sep_rtpi = dev->dev_rpti_counter++; |
533 | if (!(port->sep_rtpi)) | 533 | if (!(port->sep_rtpi)) |
534 | goto again; | 534 | goto again; |
535 | 535 | ||
536 | list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { | 536 | list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { |
537 | /* | 537 | /* |
538 | * Make sure RELATIVE TARGET PORT IDENTIFER is unique | 538 | * Make sure RELATIVE TARGET PORT IDENTIFER is unique |
539 | * for 16-bit wrap.. | 539 | * for 16-bit wrap.. |
540 | */ | 540 | */ |
541 | if (port->sep_rtpi == port_tmp->sep_rtpi) | 541 | if (port->sep_rtpi == port_tmp->sep_rtpi) |
542 | goto again; | 542 | goto again; |
543 | } | 543 | } |
544 | spin_unlock(&dev->se_port_lock); | 544 | spin_unlock(&dev->se_port_lock); |
545 | 545 | ||
546 | return port; | 546 | return port; |
547 | } | 547 | } |
548 | 548 | ||
549 | static void core_export_port( | 549 | static void core_export_port( |
550 | struct se_device *dev, | 550 | struct se_device *dev, |
551 | struct se_portal_group *tpg, | 551 | struct se_portal_group *tpg, |
552 | struct se_port *port, | 552 | struct se_port *port, |
553 | struct se_lun *lun) | 553 | struct se_lun *lun) |
554 | { | 554 | { |
555 | struct se_subsystem_dev *su_dev = SU_DEV(dev); | 555 | struct se_subsystem_dev *su_dev = SU_DEV(dev); |
556 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; | 556 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; |
557 | 557 | ||
558 | spin_lock(&dev->se_port_lock); | 558 | spin_lock(&dev->se_port_lock); |
559 | spin_lock(&lun->lun_sep_lock); | 559 | spin_lock(&lun->lun_sep_lock); |
560 | port->sep_tpg = tpg; | 560 | port->sep_tpg = tpg; |
561 | port->sep_lun = lun; | 561 | port->sep_lun = lun; |
562 | lun->lun_sep = port; | 562 | lun->lun_sep = port; |
563 | spin_unlock(&lun->lun_sep_lock); | 563 | spin_unlock(&lun->lun_sep_lock); |
564 | 564 | ||
565 | list_add_tail(&port->sep_list, &dev->dev_sep_list); | 565 | list_add_tail(&port->sep_list, &dev->dev_sep_list); |
566 | spin_unlock(&dev->se_port_lock); | 566 | spin_unlock(&dev->se_port_lock); |
567 | 567 | ||
568 | if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) { | 568 | if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) { |
569 | tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); | 569 | tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); |
570 | if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { | 570 | if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { |
571 | printk(KERN_ERR "Unable to allocate t10_alua_tg_pt" | 571 | printk(KERN_ERR "Unable to allocate t10_alua_tg_pt" |
572 | "_gp_member_t\n"); | 572 | "_gp_member_t\n"); |
573 | return; | 573 | return; |
574 | } | 574 | } |
575 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 575 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
576 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, | 576 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, |
577 | T10_ALUA(su_dev)->default_tg_pt_gp); | 577 | T10_ALUA(su_dev)->default_tg_pt_gp); |
578 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | 578 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); |
579 | printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port" | 579 | printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port" |
580 | " Group: alua/default_tg_pt_gp\n", | 580 | " Group: alua/default_tg_pt_gp\n", |
581 | TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name()); | 581 | TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name()); |
582 | } | 582 | } |
583 | 583 | ||
584 | dev->dev_port_count++; | 584 | dev->dev_port_count++; |
585 | port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */ | 585 | port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */ |
586 | } | 586 | } |
587 | 587 | ||
588 | /* | 588 | /* |
589 | * Called with struct se_device->se_port_lock spinlock held. | 589 | * Called with struct se_device->se_port_lock spinlock held. |
590 | */ | 590 | */ |
591 | static void core_release_port(struct se_device *dev, struct se_port *port) | 591 | static void core_release_port(struct se_device *dev, struct se_port *port) |
592 | { | 592 | { |
593 | /* | 593 | /* |
594 | * Wait for any port reference for PR ALL_TG_PT=1 operation | 594 | * Wait for any port reference for PR ALL_TG_PT=1 operation |
595 | * to complete in __core_scsi3_alloc_registration() | 595 | * to complete in __core_scsi3_alloc_registration() |
596 | */ | 596 | */ |
597 | spin_unlock(&dev->se_port_lock); | 597 | spin_unlock(&dev->se_port_lock); |
598 | if (atomic_read(&port->sep_tg_pt_ref_cnt)) | 598 | if (atomic_read(&port->sep_tg_pt_ref_cnt)) |
599 | cpu_relax(); | 599 | cpu_relax(); |
600 | spin_lock(&dev->se_port_lock); | 600 | spin_lock(&dev->se_port_lock); |
601 | 601 | ||
602 | core_alua_free_tg_pt_gp_mem(port); | 602 | core_alua_free_tg_pt_gp_mem(port); |
603 | 603 | ||
604 | list_del(&port->sep_list); | 604 | list_del(&port->sep_list); |
605 | dev->dev_port_count--; | 605 | dev->dev_port_count--; |
606 | kfree(port); | 606 | kfree(port); |
607 | 607 | ||
608 | return; | 608 | return; |
609 | } | 609 | } |
610 | 610 | ||
611 | int core_dev_export( | 611 | int core_dev_export( |
612 | struct se_device *dev, | 612 | struct se_device *dev, |
613 | struct se_portal_group *tpg, | 613 | struct se_portal_group *tpg, |
614 | struct se_lun *lun) | 614 | struct se_lun *lun) |
615 | { | 615 | { |
616 | struct se_port *port; | 616 | struct se_port *port; |
617 | 617 | ||
618 | port = core_alloc_port(dev); | 618 | port = core_alloc_port(dev); |
619 | if (!(port)) | 619 | if (!(port)) |
620 | return -1; | 620 | return -1; |
621 | 621 | ||
622 | lun->lun_se_dev = dev; | 622 | lun->lun_se_dev = dev; |
623 | se_dev_start(dev); | 623 | se_dev_start(dev); |
624 | 624 | ||
625 | atomic_inc(&dev->dev_export_obj.obj_access_count); | 625 | atomic_inc(&dev->dev_export_obj.obj_access_count); |
626 | core_export_port(dev, tpg, port, lun); | 626 | core_export_port(dev, tpg, port, lun); |
627 | return 0; | 627 | return 0; |
628 | } | 628 | } |
629 | 629 | ||
630 | void core_dev_unexport( | 630 | void core_dev_unexport( |
631 | struct se_device *dev, | 631 | struct se_device *dev, |
632 | struct se_portal_group *tpg, | 632 | struct se_portal_group *tpg, |
633 | struct se_lun *lun) | 633 | struct se_lun *lun) |
634 | { | 634 | { |
635 | struct se_port *port = lun->lun_sep; | 635 | struct se_port *port = lun->lun_sep; |
636 | 636 | ||
637 | spin_lock(&lun->lun_sep_lock); | 637 | spin_lock(&lun->lun_sep_lock); |
638 | if (lun->lun_se_dev == NULL) { | 638 | if (lun->lun_se_dev == NULL) { |
639 | spin_unlock(&lun->lun_sep_lock); | 639 | spin_unlock(&lun->lun_sep_lock); |
640 | return; | 640 | return; |
641 | } | 641 | } |
642 | spin_unlock(&lun->lun_sep_lock); | 642 | spin_unlock(&lun->lun_sep_lock); |
643 | 643 | ||
644 | spin_lock(&dev->se_port_lock); | 644 | spin_lock(&dev->se_port_lock); |
645 | atomic_dec(&dev->dev_export_obj.obj_access_count); | 645 | atomic_dec(&dev->dev_export_obj.obj_access_count); |
646 | core_release_port(dev, port); | 646 | core_release_port(dev, port); |
647 | spin_unlock(&dev->se_port_lock); | 647 | spin_unlock(&dev->se_port_lock); |
648 | 648 | ||
649 | se_dev_stop(dev); | 649 | se_dev_stop(dev); |
650 | lun->lun_se_dev = NULL; | 650 | lun->lun_se_dev = NULL; |
651 | } | 651 | } |
652 | 652 | ||
653 | int transport_core_report_lun_response(struct se_cmd *se_cmd) | 653 | int transport_core_report_lun_response(struct se_cmd *se_cmd) |
654 | { | 654 | { |
655 | struct se_dev_entry *deve; | 655 | struct se_dev_entry *deve; |
656 | struct se_lun *se_lun; | 656 | struct se_lun *se_lun; |
657 | struct se_session *se_sess = SE_SESS(se_cmd); | 657 | struct se_session *se_sess = SE_SESS(se_cmd); |
658 | struct se_task *se_task; | 658 | struct se_task *se_task; |
659 | unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf; | 659 | unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf; |
660 | u32 cdb_offset = 0, lun_count = 0, offset = 8; | 660 | u32 cdb_offset = 0, lun_count = 0, offset = 8; |
661 | u64 i, lun; | 661 | u64 i, lun; |
662 | 662 | ||
663 | list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list) | 663 | list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list) |
664 | break; | 664 | break; |
665 | 665 | ||
666 | if (!(se_task)) { | 666 | if (!(se_task)) { |
667 | printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n"); | 667 | printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n"); |
668 | return PYX_TRANSPORT_LU_COMM_FAILURE; | 668 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
669 | } | 669 | } |
670 | 670 | ||
671 | /* | 671 | /* |
672 | * If no struct se_session pointer is present, this struct se_cmd is | 672 | * If no struct se_session pointer is present, this struct se_cmd is |
673 | * coming via a target_core_mod PASSTHROUGH op, and not through | 673 | * coming via a target_core_mod PASSTHROUGH op, and not through |
674 | * a $FABRIC_MOD. In that case, report LUN=0 only. | 674 | * a $FABRIC_MOD. In that case, report LUN=0 only. |
675 | */ | 675 | */ |
676 | if (!(se_sess)) { | 676 | if (!(se_sess)) { |
677 | lun = 0; | 677 | lun = 0; |
678 | buf[offset++] = ((lun >> 56) & 0xff); | 678 | buf[offset++] = ((lun >> 56) & 0xff); |
679 | buf[offset++] = ((lun >> 48) & 0xff); | 679 | buf[offset++] = ((lun >> 48) & 0xff); |
680 | buf[offset++] = ((lun >> 40) & 0xff); | 680 | buf[offset++] = ((lun >> 40) & 0xff); |
681 | buf[offset++] = ((lun >> 32) & 0xff); | 681 | buf[offset++] = ((lun >> 32) & 0xff); |
682 | buf[offset++] = ((lun >> 24) & 0xff); | 682 | buf[offset++] = ((lun >> 24) & 0xff); |
683 | buf[offset++] = ((lun >> 16) & 0xff); | 683 | buf[offset++] = ((lun >> 16) & 0xff); |
684 | buf[offset++] = ((lun >> 8) & 0xff); | 684 | buf[offset++] = ((lun >> 8) & 0xff); |
685 | buf[offset++] = (lun & 0xff); | 685 | buf[offset++] = (lun & 0xff); |
686 | lun_count = 1; | 686 | lun_count = 1; |
687 | goto done; | 687 | goto done; |
688 | } | 688 | } |
689 | 689 | ||
690 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | 690 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); |
691 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | 691 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
692 | deve = &SE_NODE_ACL(se_sess)->device_list[i]; | 692 | deve = &SE_NODE_ACL(se_sess)->device_list[i]; |
693 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | 693 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) |
694 | continue; | 694 | continue; |
695 | se_lun = deve->se_lun; | 695 | se_lun = deve->se_lun; |
696 | /* | 696 | /* |
697 | * We determine the correct LUN LIST LENGTH even once we | 697 | * We determine the correct LUN LIST LENGTH even once we |
698 | * have reached the initial allocation length. | 698 | * have reached the initial allocation length. |
699 | * See SPC2-R20 7.19. | 699 | * See SPC2-R20 7.19. |
700 | */ | 700 | */ |
701 | lun_count++; | 701 | lun_count++; |
702 | if ((cdb_offset + 8) >= se_cmd->data_length) | 702 | if ((cdb_offset + 8) >= se_cmd->data_length) |
703 | continue; | 703 | continue; |
704 | 704 | ||
705 | lun = cpu_to_be64(CMD_TFO(se_cmd)->pack_lun(deve->mapped_lun)); | 705 | lun = cpu_to_be64(CMD_TFO(se_cmd)->pack_lun(deve->mapped_lun)); |
706 | buf[offset++] = ((lun >> 56) & 0xff); | 706 | buf[offset++] = ((lun >> 56) & 0xff); |
707 | buf[offset++] = ((lun >> 48) & 0xff); | 707 | buf[offset++] = ((lun >> 48) & 0xff); |
708 | buf[offset++] = ((lun >> 40) & 0xff); | 708 | buf[offset++] = ((lun >> 40) & 0xff); |
709 | buf[offset++] = ((lun >> 32) & 0xff); | 709 | buf[offset++] = ((lun >> 32) & 0xff); |
710 | buf[offset++] = ((lun >> 24) & 0xff); | 710 | buf[offset++] = ((lun >> 24) & 0xff); |
711 | buf[offset++] = ((lun >> 16) & 0xff); | 711 | buf[offset++] = ((lun >> 16) & 0xff); |
712 | buf[offset++] = ((lun >> 8) & 0xff); | 712 | buf[offset++] = ((lun >> 8) & 0xff); |
713 | buf[offset++] = (lun & 0xff); | 713 | buf[offset++] = (lun & 0xff); |
714 | cdb_offset += 8; | 714 | cdb_offset += 8; |
715 | } | 715 | } |
716 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | 716 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); |
717 | 717 | ||
718 | /* | 718 | /* |
719 | * See SPC3 r07, page 159. | 719 | * See SPC3 r07, page 159. |
720 | */ | 720 | */ |
721 | done: | 721 | done: |
722 | lun_count *= 8; | 722 | lun_count *= 8; |
723 | buf[0] = ((lun_count >> 24) & 0xff); | 723 | buf[0] = ((lun_count >> 24) & 0xff); |
724 | buf[1] = ((lun_count >> 16) & 0xff); | 724 | buf[1] = ((lun_count >> 16) & 0xff); |
725 | buf[2] = ((lun_count >> 8) & 0xff); | 725 | buf[2] = ((lun_count >> 8) & 0xff); |
726 | buf[3] = (lun_count & 0xff); | 726 | buf[3] = (lun_count & 0xff); |
727 | 727 | ||
728 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | 728 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; |
729 | } | 729 | } |
730 | 730 | ||
731 | /* se_release_device_for_hba(): | 731 | /* se_release_device_for_hba(): |
732 | * | 732 | * |
733 | * | 733 | * |
734 | */ | 734 | */ |
735 | void se_release_device_for_hba(struct se_device *dev) | 735 | void se_release_device_for_hba(struct se_device *dev) |
736 | { | 736 | { |
737 | struct se_hba *hba = dev->se_hba; | 737 | struct se_hba *hba = dev->se_hba; |
738 | 738 | ||
739 | if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || | 739 | if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || |
740 | (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) || | 740 | (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) || |
741 | (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) || | 741 | (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) || |
742 | (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) || | 742 | (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) || |
743 | (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED)) | 743 | (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED)) |
744 | se_dev_stop(dev); | 744 | se_dev_stop(dev); |
745 | 745 | ||
746 | if (dev->dev_ptr) { | 746 | if (dev->dev_ptr) { |
747 | kthread_stop(dev->process_thread); | 747 | kthread_stop(dev->process_thread); |
748 | if (dev->transport->free_device) | 748 | if (dev->transport->free_device) |
749 | dev->transport->free_device(dev->dev_ptr); | 749 | dev->transport->free_device(dev->dev_ptr); |
750 | } | 750 | } |
751 | 751 | ||
752 | spin_lock(&hba->device_lock); | 752 | spin_lock(&hba->device_lock); |
753 | list_del(&dev->dev_list); | 753 | list_del(&dev->dev_list); |
754 | hba->dev_count--; | 754 | hba->dev_count--; |
755 | spin_unlock(&hba->device_lock); | 755 | spin_unlock(&hba->device_lock); |
756 | 756 | ||
757 | core_scsi3_free_all_registrations(dev); | 757 | core_scsi3_free_all_registrations(dev); |
758 | se_release_vpd_for_dev(dev); | 758 | se_release_vpd_for_dev(dev); |
759 | 759 | ||
760 | kfree(dev->dev_status_queue_obj); | 760 | kfree(dev->dev_status_queue_obj); |
761 | kfree(dev->dev_queue_obj); | 761 | kfree(dev->dev_queue_obj); |
762 | kfree(dev); | 762 | kfree(dev); |
763 | 763 | ||
764 | return; | 764 | return; |
765 | } | 765 | } |
766 | 766 | ||
767 | void se_release_vpd_for_dev(struct se_device *dev) | 767 | void se_release_vpd_for_dev(struct se_device *dev) |
768 | { | 768 | { |
769 | struct t10_vpd *vpd, *vpd_tmp; | 769 | struct t10_vpd *vpd, *vpd_tmp; |
770 | 770 | ||
771 | spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock); | 771 | spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock); |
772 | list_for_each_entry_safe(vpd, vpd_tmp, | 772 | list_for_each_entry_safe(vpd, vpd_tmp, |
773 | &DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) { | 773 | &DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) { |
774 | list_del(&vpd->vpd_list); | 774 | list_del(&vpd->vpd_list); |
775 | kfree(vpd); | 775 | kfree(vpd); |
776 | } | 776 | } |
777 | spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock); | 777 | spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock); |
778 | 778 | ||
779 | return; | 779 | return; |
780 | } | 780 | } |
781 | 781 | ||
782 | /* | ||
783 | * Called with struct se_hba->device_lock held. | ||
784 | */ | ||
785 | void se_clear_dev_ports(struct se_device *dev) | ||
786 | { | ||
787 | struct se_hba *hba = dev->se_hba; | ||
788 | struct se_lun *lun; | ||
789 | struct se_portal_group *tpg; | ||
790 | struct se_port *sep, *sep_tmp; | ||
791 | |||
792 | spin_lock(&dev->se_port_lock); | ||
793 | list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) { | ||
794 | spin_unlock(&dev->se_port_lock); | ||
795 | spin_unlock(&hba->device_lock); | ||
796 | |||
797 | lun = sep->sep_lun; | ||
798 | tpg = sep->sep_tpg; | ||
799 | spin_lock(&lun->lun_sep_lock); | ||
800 | if (lun->lun_se_dev == NULL) { | ||
801 | spin_unlock(&lun->lun_sep_lock); | ||
802 | continue; | ||
803 | } | ||
804 | spin_unlock(&lun->lun_sep_lock); | ||
805 | |||
806 | core_dev_del_lun(tpg, lun->unpacked_lun); | ||
807 | |||
808 | spin_lock(&hba->device_lock); | ||
809 | spin_lock(&dev->se_port_lock); | ||
810 | } | ||
811 | spin_unlock(&dev->se_port_lock); | ||
812 | |||
813 | return; | ||
814 | } | ||
815 | |||
816 | /* se_free_virtual_device(): | 782 | /* se_free_virtual_device(): |
817 | * | 783 | * |
818 | * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers. | 784 | * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers. |
819 | */ | 785 | */ |
820 | int se_free_virtual_device(struct se_device *dev, struct se_hba *hba) | 786 | int se_free_virtual_device(struct se_device *dev, struct se_hba *hba) |
821 | { | 787 | { |
822 | spin_lock(&hba->device_lock); | 788 | if (!list_empty(&dev->dev_sep_list)) |
823 | se_clear_dev_ports(dev); | 789 | dump_stack(); |
824 | spin_unlock(&hba->device_lock); | ||
825 | 790 | ||
826 | core_alua_free_lu_gp_mem(dev); | 791 | core_alua_free_lu_gp_mem(dev); |
827 | se_release_device_for_hba(dev); | 792 | se_release_device_for_hba(dev); |
828 | 793 | ||
829 | return 0; | 794 | return 0; |
830 | } | 795 | } |
831 | 796 | ||
832 | static void se_dev_start(struct se_device *dev) | 797 | static void se_dev_start(struct se_device *dev) |
833 | { | 798 | { |
834 | struct se_hba *hba = dev->se_hba; | 799 | struct se_hba *hba = dev->se_hba; |
835 | 800 | ||
836 | spin_lock(&hba->device_lock); | 801 | spin_lock(&hba->device_lock); |
837 | atomic_inc(&dev->dev_obj.obj_access_count); | 802 | atomic_inc(&dev->dev_obj.obj_access_count); |
838 | if (atomic_read(&dev->dev_obj.obj_access_count) == 1) { | 803 | if (atomic_read(&dev->dev_obj.obj_access_count) == 1) { |
839 | if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) { | 804 | if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) { |
840 | dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED; | 805 | dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED; |
841 | dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED; | 806 | dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED; |
842 | } else if (dev->dev_status & | 807 | } else if (dev->dev_status & |
843 | TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) { | 808 | TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) { |
844 | dev->dev_status &= | 809 | dev->dev_status &= |
845 | ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; | 810 | ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; |
846 | dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED; | 811 | dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED; |
847 | } | 812 | } |
848 | } | 813 | } |
849 | spin_unlock(&hba->device_lock); | 814 | spin_unlock(&hba->device_lock); |
850 | } | 815 | } |
851 | 816 | ||
852 | static void se_dev_stop(struct se_device *dev) | 817 | static void se_dev_stop(struct se_device *dev) |
853 | { | 818 | { |
854 | struct se_hba *hba = dev->se_hba; | 819 | struct se_hba *hba = dev->se_hba; |
855 | 820 | ||
856 | spin_lock(&hba->device_lock); | 821 | spin_lock(&hba->device_lock); |
857 | atomic_dec(&dev->dev_obj.obj_access_count); | 822 | atomic_dec(&dev->dev_obj.obj_access_count); |
858 | if (atomic_read(&dev->dev_obj.obj_access_count) == 0) { | 823 | if (atomic_read(&dev->dev_obj.obj_access_count) == 0) { |
859 | if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) { | 824 | if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) { |
860 | dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED; | 825 | dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED; |
861 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; | 826 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; |
862 | } else if (dev->dev_status & | 827 | } else if (dev->dev_status & |
863 | TRANSPORT_DEVICE_OFFLINE_ACTIVATED) { | 828 | TRANSPORT_DEVICE_OFFLINE_ACTIVATED) { |
864 | dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED; | 829 | dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED; |
865 | dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; | 830 | dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; |
866 | } | 831 | } |
867 | } | 832 | } |
868 | spin_unlock(&hba->device_lock); | 833 | spin_unlock(&hba->device_lock); |
869 | } | 834 | } |
870 | 835 | ||
871 | int se_dev_check_online(struct se_device *dev) | 836 | int se_dev_check_online(struct se_device *dev) |
872 | { | 837 | { |
873 | int ret; | 838 | int ret; |
874 | 839 | ||
875 | spin_lock_irq(&dev->dev_status_lock); | 840 | spin_lock_irq(&dev->dev_status_lock); |
876 | ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || | 841 | ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || |
877 | (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1; | 842 | (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1; |
878 | spin_unlock_irq(&dev->dev_status_lock); | 843 | spin_unlock_irq(&dev->dev_status_lock); |
879 | 844 | ||
880 | return ret; | 845 | return ret; |
881 | } | 846 | } |
882 | 847 | ||
883 | int se_dev_check_shutdown(struct se_device *dev) | 848 | int se_dev_check_shutdown(struct se_device *dev) |
884 | { | 849 | { |
885 | int ret; | 850 | int ret; |
886 | 851 | ||
887 | spin_lock_irq(&dev->dev_status_lock); | 852 | spin_lock_irq(&dev->dev_status_lock); |
888 | ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN); | 853 | ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN); |
889 | spin_unlock_irq(&dev->dev_status_lock); | 854 | spin_unlock_irq(&dev->dev_status_lock); |
890 | 855 | ||
891 | return ret; | 856 | return ret; |
892 | } | 857 | } |
893 | 858 | ||
894 | void se_dev_set_default_attribs( | 859 | void se_dev_set_default_attribs( |
895 | struct se_device *dev, | 860 | struct se_device *dev, |
896 | struct se_dev_limits *dev_limits) | 861 | struct se_dev_limits *dev_limits) |
897 | { | 862 | { |
898 | struct queue_limits *limits = &dev_limits->limits; | 863 | struct queue_limits *limits = &dev_limits->limits; |
899 | 864 | ||
900 | DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO; | 865 | DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO; |
901 | DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE; | 866 | DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE; |
902 | DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ; | 867 | DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ; |
903 | DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE; | 868 | DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE; |
904 | DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; | 869 | DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; |
905 | DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS; | 870 | DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS; |
906 | DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU; | 871 | DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU; |
907 | DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS; | 872 | DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS; |
908 | DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS; | 873 | DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS; |
909 | DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA; | 874 | DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA; |
910 | DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS; | 875 | DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS; |
911 | /* | 876 | /* |
912 | * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK | 877 | * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK |
913 | * iblock_create_virtdevice() from struct queue_limits values | 878 | * iblock_create_virtdevice() from struct queue_limits values |
914 | * if blk_queue_discard()==1 | 879 | * if blk_queue_discard()==1 |
915 | */ | 880 | */ |
916 | DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; | 881 | DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; |
917 | DEV_ATTRIB(dev)->max_unmap_block_desc_count = | 882 | DEV_ATTRIB(dev)->max_unmap_block_desc_count = |
918 | DA_MAX_UNMAP_BLOCK_DESC_COUNT; | 883 | DA_MAX_UNMAP_BLOCK_DESC_COUNT; |
919 | DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; | 884 | DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; |
920 | DEV_ATTRIB(dev)->unmap_granularity_alignment = | 885 | DEV_ATTRIB(dev)->unmap_granularity_alignment = |
921 | DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; | 886 | DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; |
922 | /* | 887 | /* |
923 | * block_size is based on subsystem plugin dependent requirements. | 888 | * block_size is based on subsystem plugin dependent requirements. |
924 | */ | 889 | */ |
925 | DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size; | 890 | DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size; |
926 | DEV_ATTRIB(dev)->block_size = limits->logical_block_size; | 891 | DEV_ATTRIB(dev)->block_size = limits->logical_block_size; |
927 | /* | 892 | /* |
928 | * max_sectors is based on subsystem plugin dependent requirements. | 893 | * max_sectors is based on subsystem plugin dependent requirements. |
929 | */ | 894 | */ |
930 | DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors; | 895 | DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors; |
931 | DEV_ATTRIB(dev)->max_sectors = limits->max_sectors; | 896 | DEV_ATTRIB(dev)->max_sectors = limits->max_sectors; |
932 | /* | 897 | /* |
933 | * Set optimal_sectors from max_sectors, which can be lowered via | 898 | * Set optimal_sectors from max_sectors, which can be lowered via |
934 | * configfs. | 899 | * configfs. |
935 | */ | 900 | */ |
936 | DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors; | 901 | DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors; |
937 | /* | 902 | /* |
938 | * queue_depth is based on subsystem plugin dependent requirements. | 903 | * queue_depth is based on subsystem plugin dependent requirements. |
939 | */ | 904 | */ |
940 | DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth; | 905 | DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth; |
941 | DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth; | 906 | DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth; |
942 | } | 907 | } |
943 | 908 | ||
944 | int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) | 909 | int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) |
945 | { | 910 | { |
946 | if (task_timeout > DA_TASK_TIMEOUT_MAX) { | 911 | if (task_timeout > DA_TASK_TIMEOUT_MAX) { |
947 | printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then" | 912 | printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then" |
948 | " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); | 913 | " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); |
949 | return -1; | 914 | return -1; |
950 | } else { | 915 | } else { |
951 | DEV_ATTRIB(dev)->task_timeout = task_timeout; | 916 | DEV_ATTRIB(dev)->task_timeout = task_timeout; |
952 | printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n", | 917 | printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n", |
953 | dev, task_timeout); | 918 | dev, task_timeout); |
954 | } | 919 | } |
955 | 920 | ||
956 | return 0; | 921 | return 0; |
957 | } | 922 | } |
958 | 923 | ||
959 | int se_dev_set_max_unmap_lba_count( | 924 | int se_dev_set_max_unmap_lba_count( |
960 | struct se_device *dev, | 925 | struct se_device *dev, |
961 | u32 max_unmap_lba_count) | 926 | u32 max_unmap_lba_count) |
962 | { | 927 | { |
963 | DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count; | 928 | DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count; |
964 | printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n", | 929 | printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n", |
965 | dev, DEV_ATTRIB(dev)->max_unmap_lba_count); | 930 | dev, DEV_ATTRIB(dev)->max_unmap_lba_count); |
966 | return 0; | 931 | return 0; |
967 | } | 932 | } |
968 | 933 | ||
969 | int se_dev_set_max_unmap_block_desc_count( | 934 | int se_dev_set_max_unmap_block_desc_count( |
970 | struct se_device *dev, | 935 | struct se_device *dev, |
971 | u32 max_unmap_block_desc_count) | 936 | u32 max_unmap_block_desc_count) |
972 | { | 937 | { |
973 | DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count; | 938 | DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count; |
974 | printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n", | 939 | printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n", |
975 | dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count); | 940 | dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count); |
976 | return 0; | 941 | return 0; |
977 | } | 942 | } |
978 | 943 | ||
979 | int se_dev_set_unmap_granularity( | 944 | int se_dev_set_unmap_granularity( |
980 | struct se_device *dev, | 945 | struct se_device *dev, |
981 | u32 unmap_granularity) | 946 | u32 unmap_granularity) |
982 | { | 947 | { |
983 | DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity; | 948 | DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity; |
984 | printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n", | 949 | printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n", |
985 | dev, DEV_ATTRIB(dev)->unmap_granularity); | 950 | dev, DEV_ATTRIB(dev)->unmap_granularity); |
986 | return 0; | 951 | return 0; |
987 | } | 952 | } |
988 | 953 | ||
989 | int se_dev_set_unmap_granularity_alignment( | 954 | int se_dev_set_unmap_granularity_alignment( |
990 | struct se_device *dev, | 955 | struct se_device *dev, |
991 | u32 unmap_granularity_alignment) | 956 | u32 unmap_granularity_alignment) |
992 | { | 957 | { |
993 | DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment; | 958 | DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment; |
994 | printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n", | 959 | printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n", |
995 | dev, DEV_ATTRIB(dev)->unmap_granularity_alignment); | 960 | dev, DEV_ATTRIB(dev)->unmap_granularity_alignment); |
996 | return 0; | 961 | return 0; |
997 | } | 962 | } |
998 | 963 | ||
999 | int se_dev_set_emulate_dpo(struct se_device *dev, int flag) | 964 | int se_dev_set_emulate_dpo(struct se_device *dev, int flag) |
1000 | { | 965 | { |
1001 | if ((flag != 0) && (flag != 1)) { | 966 | if ((flag != 0) && (flag != 1)) { |
1002 | printk(KERN_ERR "Illegal value %d\n", flag); | 967 | printk(KERN_ERR "Illegal value %d\n", flag); |
1003 | return -1; | 968 | return -1; |
1004 | } | 969 | } |
1005 | if (TRANSPORT(dev)->dpo_emulated == NULL) { | 970 | if (TRANSPORT(dev)->dpo_emulated == NULL) { |
1006 | printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n"); | 971 | printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n"); |
1007 | return -1; | 972 | return -1; |
1008 | } | 973 | } |
1009 | if (TRANSPORT(dev)->dpo_emulated(dev) == 0) { | 974 | if (TRANSPORT(dev)->dpo_emulated(dev) == 0) { |
1010 | printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n"); | 975 | printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n"); |
1011 | return -1; | 976 | return -1; |
1012 | } | 977 | } |
1013 | DEV_ATTRIB(dev)->emulate_dpo = flag; | 978 | DEV_ATTRIB(dev)->emulate_dpo = flag; |
1014 | printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation" | 979 | printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation" |
1015 | " bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo); | 980 | " bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo); |
1016 | return 0; | 981 | return 0; |
1017 | } | 982 | } |
1018 | 983 | ||
1019 | int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) | 984 | int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) |
1020 | { | 985 | { |
1021 | if ((flag != 0) && (flag != 1)) { | 986 | if ((flag != 0) && (flag != 1)) { |
1022 | printk(KERN_ERR "Illegal value %d\n", flag); | 987 | printk(KERN_ERR "Illegal value %d\n", flag); |
1023 | return -1; | 988 | return -1; |
1024 | } | 989 | } |
1025 | if (TRANSPORT(dev)->fua_write_emulated == NULL) { | 990 | if (TRANSPORT(dev)->fua_write_emulated == NULL) { |
1026 | printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n"); | 991 | printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n"); |
1027 | return -1; | 992 | return -1; |
1028 | } | 993 | } |
1029 | if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) { | 994 | if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) { |
1030 | printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n"); | 995 | printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n"); |
1031 | return -1; | 996 | return -1; |
1032 | } | 997 | } |
1033 | DEV_ATTRIB(dev)->emulate_fua_write = flag; | 998 | DEV_ATTRIB(dev)->emulate_fua_write = flag; |
1034 | printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", | 999 | printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", |
1035 | dev, DEV_ATTRIB(dev)->emulate_fua_write); | 1000 | dev, DEV_ATTRIB(dev)->emulate_fua_write); |
1036 | return 0; | 1001 | return 0; |
1037 | } | 1002 | } |
1038 | 1003 | ||
1039 | int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) | 1004 | int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) |
1040 | { | 1005 | { |
1041 | if ((flag != 0) && (flag != 1)) { | 1006 | if ((flag != 0) && (flag != 1)) { |
1042 | printk(KERN_ERR "Illegal value %d\n", flag); | 1007 | printk(KERN_ERR "Illegal value %d\n", flag); |
1043 | return -1; | 1008 | return -1; |
1044 | } | 1009 | } |
1045 | if (TRANSPORT(dev)->fua_read_emulated == NULL) { | 1010 | if (TRANSPORT(dev)->fua_read_emulated == NULL) { |
1046 | printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n"); | 1011 | printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n"); |
1047 | return -1; | 1012 | return -1; |
1048 | } | 1013 | } |
1049 | if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) { | 1014 | if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) { |
1050 | printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n"); | 1015 | printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n"); |
1051 | return -1; | 1016 | return -1; |
1052 | } | 1017 | } |
1053 | DEV_ATTRIB(dev)->emulate_fua_read = flag; | 1018 | DEV_ATTRIB(dev)->emulate_fua_read = flag; |
1054 | printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n", | 1019 | printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n", |
1055 | dev, DEV_ATTRIB(dev)->emulate_fua_read); | 1020 | dev, DEV_ATTRIB(dev)->emulate_fua_read); |
1056 | return 0; | 1021 | return 0; |
1057 | } | 1022 | } |
1058 | 1023 | ||
1059 | int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) | 1024 | int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) |
1060 | { | 1025 | { |
1061 | if ((flag != 0) && (flag != 1)) { | 1026 | if ((flag != 0) && (flag != 1)) { |
1062 | printk(KERN_ERR "Illegal value %d\n", flag); | 1027 | printk(KERN_ERR "Illegal value %d\n", flag); |
1063 | return -1; | 1028 | return -1; |
1064 | } | 1029 | } |
1065 | if (TRANSPORT(dev)->write_cache_emulated == NULL) { | 1030 | if (TRANSPORT(dev)->write_cache_emulated == NULL) { |
1066 | printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n"); | 1031 | printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n"); |
1067 | return -1; | 1032 | return -1; |
1068 | } | 1033 | } |
1069 | if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) { | 1034 | if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) { |
1070 | printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n"); | 1035 | printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n"); |
1071 | return -1; | 1036 | return -1; |
1072 | } | 1037 | } |
1073 | DEV_ATTRIB(dev)->emulate_write_cache = flag; | 1038 | DEV_ATTRIB(dev)->emulate_write_cache = flag; |
1074 | printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", | 1039 | printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", |
1075 | dev, DEV_ATTRIB(dev)->emulate_write_cache); | 1040 | dev, DEV_ATTRIB(dev)->emulate_write_cache); |
1076 | return 0; | 1041 | return 0; |
1077 | } | 1042 | } |
1078 | 1043 | ||
1079 | int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) | 1044 | int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) |
1080 | { | 1045 | { |
1081 | if ((flag != 0) && (flag != 1) && (flag != 2)) { | 1046 | if ((flag != 0) && (flag != 1) && (flag != 2)) { |
1082 | printk(KERN_ERR "Illegal value %d\n", flag); | 1047 | printk(KERN_ERR "Illegal value %d\n", flag); |
1083 | return -1; | 1048 | return -1; |
1084 | } | 1049 | } |
1085 | 1050 | ||
1086 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 1051 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
1087 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" | 1052 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" |
1088 | " UA_INTRLCK_CTRL while dev_export_obj: %d count" | 1053 | " UA_INTRLCK_CTRL while dev_export_obj: %d count" |
1089 | " exists\n", dev, | 1054 | " exists\n", dev, |
1090 | atomic_read(&dev->dev_export_obj.obj_access_count)); | 1055 | atomic_read(&dev->dev_export_obj.obj_access_count)); |
1091 | return -1; | 1056 | return -1; |
1092 | } | 1057 | } |
1093 | DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag; | 1058 | DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag; |
1094 | printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", | 1059 | printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", |
1095 | dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl); | 1060 | dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl); |
1096 | 1061 | ||
1097 | return 0; | 1062 | return 0; |
1098 | } | 1063 | } |
1099 | 1064 | ||
1100 | int se_dev_set_emulate_tas(struct se_device *dev, int flag) | 1065 | int se_dev_set_emulate_tas(struct se_device *dev, int flag) |
1101 | { | 1066 | { |
1102 | if ((flag != 0) && (flag != 1)) { | 1067 | if ((flag != 0) && (flag != 1)) { |
1103 | printk(KERN_ERR "Illegal value %d\n", flag); | 1068 | printk(KERN_ERR "Illegal value %d\n", flag); |
1104 | return -1; | 1069 | return -1; |
1105 | } | 1070 | } |
1106 | 1071 | ||
1107 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 1072 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
1108 | printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while" | 1073 | printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while" |
1109 | " dev_export_obj: %d count exists\n", dev, | 1074 | " dev_export_obj: %d count exists\n", dev, |
1110 | atomic_read(&dev->dev_export_obj.obj_access_count)); | 1075 | atomic_read(&dev->dev_export_obj.obj_access_count)); |
1111 | return -1; | 1076 | return -1; |
1112 | } | 1077 | } |
1113 | DEV_ATTRIB(dev)->emulate_tas = flag; | 1078 | DEV_ATTRIB(dev)->emulate_tas = flag; |
1114 | printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n", | 1079 | printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n", |
1115 | dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled"); | 1080 | dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled"); |
1116 | 1081 | ||
1117 | return 0; | 1082 | return 0; |
1118 | } | 1083 | } |
1119 | 1084 | ||
1120 | int se_dev_set_emulate_tpu(struct se_device *dev, int flag) | 1085 | int se_dev_set_emulate_tpu(struct se_device *dev, int flag) |
1121 | { | 1086 | { |
1122 | if ((flag != 0) && (flag != 1)) { | 1087 | if ((flag != 0) && (flag != 1)) { |
1123 | printk(KERN_ERR "Illegal value %d\n", flag); | 1088 | printk(KERN_ERR "Illegal value %d\n", flag); |
1124 | return -1; | 1089 | return -1; |
1125 | } | 1090 | } |
1126 | /* | 1091 | /* |
1127 | * We expect this value to be non-zero when generic Block Layer | 1092 | * We expect this value to be non-zero when generic Block Layer |
1128 | * Discard supported is detected iblock_create_virtdevice(). | 1093 | * Discard supported is detected iblock_create_virtdevice(). |
1129 | */ | 1094 | */ |
1130 | if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { | 1095 | if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { |
1131 | printk(KERN_ERR "Generic Block Discard not supported\n"); | 1096 | printk(KERN_ERR "Generic Block Discard not supported\n"); |
1132 | return -ENOSYS; | 1097 | return -ENOSYS; |
1133 | } | 1098 | } |
1134 | 1099 | ||
1135 | DEV_ATTRIB(dev)->emulate_tpu = flag; | 1100 | DEV_ATTRIB(dev)->emulate_tpu = flag; |
1136 | printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", | 1101 | printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", |
1137 | dev, flag); | 1102 | dev, flag); |
1138 | return 0; | 1103 | return 0; |
1139 | } | 1104 | } |
1140 | 1105 | ||
1141 | int se_dev_set_emulate_tpws(struct se_device *dev, int flag) | 1106 | int se_dev_set_emulate_tpws(struct se_device *dev, int flag) |
1142 | { | 1107 | { |
1143 | if ((flag != 0) && (flag != 1)) { | 1108 | if ((flag != 0) && (flag != 1)) { |
1144 | printk(KERN_ERR "Illegal value %d\n", flag); | 1109 | printk(KERN_ERR "Illegal value %d\n", flag); |
1145 | return -1; | 1110 | return -1; |
1146 | } | 1111 | } |
1147 | /* | 1112 | /* |
1148 | * We expect this value to be non-zero when generic Block Layer | 1113 | * We expect this value to be non-zero when generic Block Layer |
1149 | * Discard supported is detected iblock_create_virtdevice(). | 1114 | * Discard supported is detected iblock_create_virtdevice(). |
1150 | */ | 1115 | */ |
1151 | if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { | 1116 | if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { |
1152 | printk(KERN_ERR "Generic Block Discard not supported\n"); | 1117 | printk(KERN_ERR "Generic Block Discard not supported\n"); |
1153 | return -ENOSYS; | 1118 | return -ENOSYS; |
1154 | } | 1119 | } |
1155 | 1120 | ||
1156 | DEV_ATTRIB(dev)->emulate_tpws = flag; | 1121 | DEV_ATTRIB(dev)->emulate_tpws = flag; |
1157 | printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", | 1122 | printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", |
1158 | dev, flag); | 1123 | dev, flag); |
1159 | return 0; | 1124 | return 0; |
1160 | } | 1125 | } |
1161 | 1126 | ||
1162 | int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) | 1127 | int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) |
1163 | { | 1128 | { |
1164 | if ((flag != 0) && (flag != 1)) { | 1129 | if ((flag != 0) && (flag != 1)) { |
1165 | printk(KERN_ERR "Illegal value %d\n", flag); | 1130 | printk(KERN_ERR "Illegal value %d\n", flag); |
1166 | return -1; | 1131 | return -1; |
1167 | } | 1132 | } |
1168 | DEV_ATTRIB(dev)->enforce_pr_isids = flag; | 1133 | DEV_ATTRIB(dev)->enforce_pr_isids = flag; |
1169 | printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, | 1134 | printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, |
1170 | (DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled"); | 1135 | (DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled"); |
1171 | return 0; | 1136 | return 0; |
1172 | } | 1137 | } |
1173 | 1138 | ||
1174 | /* | 1139 | /* |
1175 | * Note, this can only be called on unexported SE Device Object. | 1140 | * Note, this can only be called on unexported SE Device Object. |
1176 | */ | 1141 | */ |
1177 | int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) | 1142 | int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) |
1178 | { | 1143 | { |
1179 | u32 orig_queue_depth = dev->queue_depth; | 1144 | u32 orig_queue_depth = dev->queue_depth; |
1180 | 1145 | ||
1181 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 1146 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
1182 | printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while" | 1147 | printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while" |
1183 | " dev_export_obj: %d count exists\n", dev, | 1148 | " dev_export_obj: %d count exists\n", dev, |
1184 | atomic_read(&dev->dev_export_obj.obj_access_count)); | 1149 | atomic_read(&dev->dev_export_obj.obj_access_count)); |
1185 | return -1; | 1150 | return -1; |
1186 | } | 1151 | } |
1187 | if (!(queue_depth)) { | 1152 | if (!(queue_depth)) { |
1188 | printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue" | 1153 | printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue" |
1189 | "_depth\n", dev); | 1154 | "_depth\n", dev); |
1190 | return -1; | 1155 | return -1; |
1191 | } | 1156 | } |
1192 | 1157 | ||
1193 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | 1158 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
1194 | if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { | 1159 | if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { |
1195 | printk(KERN_ERR "dev[%p]: Passed queue_depth: %u" | 1160 | printk(KERN_ERR "dev[%p]: Passed queue_depth: %u" |
1196 | " exceeds TCM/SE_Device TCQ: %u\n", | 1161 | " exceeds TCM/SE_Device TCQ: %u\n", |
1197 | dev, queue_depth, | 1162 | dev, queue_depth, |
1198 | DEV_ATTRIB(dev)->hw_queue_depth); | 1163 | DEV_ATTRIB(dev)->hw_queue_depth); |
1199 | return -1; | 1164 | return -1; |
1200 | } | 1165 | } |
1201 | } else { | 1166 | } else { |
1202 | if (queue_depth > DEV_ATTRIB(dev)->queue_depth) { | 1167 | if (queue_depth > DEV_ATTRIB(dev)->queue_depth) { |
1203 | if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { | 1168 | if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { |
1204 | printk(KERN_ERR "dev[%p]: Passed queue_depth:" | 1169 | printk(KERN_ERR "dev[%p]: Passed queue_depth:" |
1205 | " %u exceeds TCM/SE_Device MAX" | 1170 | " %u exceeds TCM/SE_Device MAX" |
1206 | " TCQ: %u\n", dev, queue_depth, | 1171 | " TCQ: %u\n", dev, queue_depth, |
1207 | DEV_ATTRIB(dev)->hw_queue_depth); | 1172 | DEV_ATTRIB(dev)->hw_queue_depth); |
1208 | return -1; | 1173 | return -1; |
1209 | } | 1174 | } |
1210 | } | 1175 | } |
1211 | } | 1176 | } |
1212 | 1177 | ||
1213 | DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth; | 1178 | DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth; |
1214 | if (queue_depth > orig_queue_depth) | 1179 | if (queue_depth > orig_queue_depth) |
1215 | atomic_add(queue_depth - orig_queue_depth, &dev->depth_left); | 1180 | atomic_add(queue_depth - orig_queue_depth, &dev->depth_left); |
1216 | else if (queue_depth < orig_queue_depth) | 1181 | else if (queue_depth < orig_queue_depth) |
1217 | atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left); | 1182 | atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left); |
1218 | 1183 | ||
1219 | printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n", | 1184 | printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n", |
1220 | dev, queue_depth); | 1185 | dev, queue_depth); |
1221 | return 0; | 1186 | return 0; |
1222 | } | 1187 | } |
1223 | 1188 | ||
1224 | int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) | 1189 | int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) |
1225 | { | 1190 | { |
1226 | int force = 0; /* Force setting for VDEVS */ | 1191 | int force = 0; /* Force setting for VDEVS */ |
1227 | 1192 | ||
1228 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 1193 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
1229 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" | 1194 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" |
1230 | " max_sectors while dev_export_obj: %d count exists\n", | 1195 | " max_sectors while dev_export_obj: %d count exists\n", |
1231 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); | 1196 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); |
1232 | return -1; | 1197 | return -1; |
1233 | } | 1198 | } |
1234 | if (!(max_sectors)) { | 1199 | if (!(max_sectors)) { |
1235 | printk(KERN_ERR "dev[%p]: Illegal ZERO value for" | 1200 | printk(KERN_ERR "dev[%p]: Illegal ZERO value for" |
1236 | " max_sectors\n", dev); | 1201 | " max_sectors\n", dev); |
1237 | return -1; | 1202 | return -1; |
1238 | } | 1203 | } |
1239 | if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { | 1204 | if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { |
1240 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than" | 1205 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than" |
1241 | " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, | 1206 | " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, |
1242 | DA_STATUS_MAX_SECTORS_MIN); | 1207 | DA_STATUS_MAX_SECTORS_MIN); |
1243 | return -1; | 1208 | return -1; |
1244 | } | 1209 | } |
1245 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | 1210 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
1246 | if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) { | 1211 | if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) { |
1247 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" | 1212 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" |
1248 | " greater than TCM/SE_Device max_sectors:" | 1213 | " greater than TCM/SE_Device max_sectors:" |
1249 | " %u\n", dev, max_sectors, | 1214 | " %u\n", dev, max_sectors, |
1250 | DEV_ATTRIB(dev)->hw_max_sectors); | 1215 | DEV_ATTRIB(dev)->hw_max_sectors); |
1251 | return -1; | 1216 | return -1; |
1252 | } | 1217 | } |
1253 | } else { | 1218 | } else { |
1254 | if (!(force) && (max_sectors > | 1219 | if (!(force) && (max_sectors > |
1255 | DEV_ATTRIB(dev)->hw_max_sectors)) { | 1220 | DEV_ATTRIB(dev)->hw_max_sectors)) { |
1256 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" | 1221 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" |
1257 | " greater than TCM/SE_Device max_sectors" | 1222 | " greater than TCM/SE_Device max_sectors" |
1258 | ": %u, use force=1 to override.\n", dev, | 1223 | ": %u, use force=1 to override.\n", dev, |
1259 | max_sectors, DEV_ATTRIB(dev)->hw_max_sectors); | 1224 | max_sectors, DEV_ATTRIB(dev)->hw_max_sectors); |
1260 | return -1; | 1225 | return -1; |
1261 | } | 1226 | } |
1262 | if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { | 1227 | if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { |
1263 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" | 1228 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" |
1264 | " greater than DA_STATUS_MAX_SECTORS_MAX:" | 1229 | " greater than DA_STATUS_MAX_SECTORS_MAX:" |
1265 | " %u\n", dev, max_sectors, | 1230 | " %u\n", dev, max_sectors, |
1266 | DA_STATUS_MAX_SECTORS_MAX); | 1231 | DA_STATUS_MAX_SECTORS_MAX); |
1267 | return -1; | 1232 | return -1; |
1268 | } | 1233 | } |
1269 | } | 1234 | } |
1270 | 1235 | ||
1271 | DEV_ATTRIB(dev)->max_sectors = max_sectors; | 1236 | DEV_ATTRIB(dev)->max_sectors = max_sectors; |
1272 | printk("dev[%p]: SE Device max_sectors changed to %u\n", | 1237 | printk("dev[%p]: SE Device max_sectors changed to %u\n", |
1273 | dev, max_sectors); | 1238 | dev, max_sectors); |
1274 | return 0; | 1239 | return 0; |
1275 | } | 1240 | } |
1276 | 1241 | ||
1277 | int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) | 1242 | int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) |
1278 | { | 1243 | { |
1279 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 1244 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
1280 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" | 1245 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" |
1281 | " optimal_sectors while dev_export_obj: %d count exists\n", | 1246 | " optimal_sectors while dev_export_obj: %d count exists\n", |
1282 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); | 1247 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); |
1283 | return -EINVAL; | 1248 | return -EINVAL; |
1284 | } | 1249 | } |
1285 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | 1250 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
1286 | printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be" | 1251 | printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be" |
1287 | " changed for TCM/pSCSI\n", dev); | 1252 | " changed for TCM/pSCSI\n", dev); |
1288 | return -EINVAL; | 1253 | return -EINVAL; |
1289 | } | 1254 | } |
1290 | if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) { | 1255 | if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) { |
1291 | printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be" | 1256 | printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be" |
1292 | " greater than max_sectors: %u\n", dev, | 1257 | " greater than max_sectors: %u\n", dev, |
1293 | optimal_sectors, DEV_ATTRIB(dev)->max_sectors); | 1258 | optimal_sectors, DEV_ATTRIB(dev)->max_sectors); |
1294 | return -EINVAL; | 1259 | return -EINVAL; |
1295 | } | 1260 | } |
1296 | 1261 | ||
1297 | DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors; | 1262 | DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors; |
1298 | printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n", | 1263 | printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n", |
1299 | dev, optimal_sectors); | 1264 | dev, optimal_sectors); |
1300 | return 0; | 1265 | return 0; |
1301 | } | 1266 | } |
1302 | 1267 | ||
1303 | int se_dev_set_block_size(struct se_device *dev, u32 block_size) | 1268 | int se_dev_set_block_size(struct se_device *dev, u32 block_size) |
1304 | { | 1269 | { |
1305 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | 1270 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { |
1306 | printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size" | 1271 | printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size" |
1307 | " while dev_export_obj: %d count exists\n", dev, | 1272 | " while dev_export_obj: %d count exists\n", dev, |
1308 | atomic_read(&dev->dev_export_obj.obj_access_count)); | 1273 | atomic_read(&dev->dev_export_obj.obj_access_count)); |
1309 | return -1; | 1274 | return -1; |
1310 | } | 1275 | } |
1311 | 1276 | ||
1312 | if ((block_size != 512) && | 1277 | if ((block_size != 512) && |
1313 | (block_size != 1024) && | 1278 | (block_size != 1024) && |
1314 | (block_size != 2048) && | 1279 | (block_size != 2048) && |
1315 | (block_size != 4096)) { | 1280 | (block_size != 4096)) { |
1316 | printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u" | 1281 | printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u" |
1317 | " for SE device, must be 512, 1024, 2048 or 4096\n", | 1282 | " for SE device, must be 512, 1024, 2048 or 4096\n", |
1318 | dev, block_size); | 1283 | dev, block_size); |
1319 | return -1; | 1284 | return -1; |
1320 | } | 1285 | } |
1321 | 1286 | ||
1322 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | 1287 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { |
1323 | printk(KERN_ERR "dev[%p]: Not allowed to change block_size for" | 1288 | printk(KERN_ERR "dev[%p]: Not allowed to change block_size for" |
1324 | " Physical Device, use for Linux/SCSI to change" | 1289 | " Physical Device, use for Linux/SCSI to change" |
1325 | " block_size for underlying hardware\n", dev); | 1290 | " block_size for underlying hardware\n", dev); |
1326 | return -1; | 1291 | return -1; |
1327 | } | 1292 | } |
1328 | 1293 | ||
1329 | DEV_ATTRIB(dev)->block_size = block_size; | 1294 | DEV_ATTRIB(dev)->block_size = block_size; |
1330 | printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n", | 1295 | printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n", |
1331 | dev, block_size); | 1296 | dev, block_size); |
1332 | return 0; | 1297 | return 0; |
1333 | } | 1298 | } |
1334 | 1299 | ||
1335 | struct se_lun *core_dev_add_lun( | 1300 | struct se_lun *core_dev_add_lun( |
1336 | struct se_portal_group *tpg, | 1301 | struct se_portal_group *tpg, |
1337 | struct se_hba *hba, | 1302 | struct se_hba *hba, |
1338 | struct se_device *dev, | 1303 | struct se_device *dev, |
1339 | u32 lun) | 1304 | u32 lun) |
1340 | { | 1305 | { |
1341 | struct se_lun *lun_p; | 1306 | struct se_lun *lun_p; |
1342 | u32 lun_access = 0; | 1307 | u32 lun_access = 0; |
1343 | 1308 | ||
1344 | if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { | 1309 | if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { |
1345 | printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n", | 1310 | printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n", |
1346 | atomic_read(&dev->dev_access_obj.obj_access_count)); | 1311 | atomic_read(&dev->dev_access_obj.obj_access_count)); |
1347 | return NULL; | 1312 | return NULL; |
1348 | } | 1313 | } |
1349 | 1314 | ||
1350 | lun_p = core_tpg_pre_addlun(tpg, lun); | 1315 | lun_p = core_tpg_pre_addlun(tpg, lun); |
1351 | if ((IS_ERR(lun_p)) || !(lun_p)) | 1316 | if ((IS_ERR(lun_p)) || !(lun_p)) |
1352 | return NULL; | 1317 | return NULL; |
1353 | 1318 | ||
1354 | if (dev->dev_flags & DF_READ_ONLY) | 1319 | if (dev->dev_flags & DF_READ_ONLY) |
1355 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | 1320 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; |
1356 | else | 1321 | else |
1357 | lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; | 1322 | lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; |
1358 | 1323 | ||
1359 | if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0) | 1324 | if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0) |
1360 | return NULL; | 1325 | return NULL; |
1361 | 1326 | ||
1362 | printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" | 1327 | printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" |
1363 | " CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(), | 1328 | " CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(), |
1364 | TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun, | 1329 | TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun, |
1365 | TPG_TFO(tpg)->get_fabric_name(), hba->hba_id); | 1330 | TPG_TFO(tpg)->get_fabric_name(), hba->hba_id); |
1366 | /* | 1331 | /* |
1367 | * Update LUN maps for dynamically added initiators when | 1332 | * Update LUN maps for dynamically added initiators when |
1368 | * generate_node_acl is enabled. | 1333 | * generate_node_acl is enabled. |
1369 | */ | 1334 | */ |
1370 | if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) { | 1335 | if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) { |
1371 | struct se_node_acl *acl; | 1336 | struct se_node_acl *acl; |
1372 | spin_lock_bh(&tpg->acl_node_lock); | 1337 | spin_lock_bh(&tpg->acl_node_lock); |
1373 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { | 1338 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { |
1374 | if (acl->dynamic_node_acl) { | 1339 | if (acl->dynamic_node_acl) { |
1375 | spin_unlock_bh(&tpg->acl_node_lock); | 1340 | spin_unlock_bh(&tpg->acl_node_lock); |
1376 | core_tpg_add_node_to_devs(acl, tpg); | 1341 | core_tpg_add_node_to_devs(acl, tpg); |
1377 | spin_lock_bh(&tpg->acl_node_lock); | 1342 | spin_lock_bh(&tpg->acl_node_lock); |
1378 | } | 1343 | } |
1379 | } | 1344 | } |
1380 | spin_unlock_bh(&tpg->acl_node_lock); | 1345 | spin_unlock_bh(&tpg->acl_node_lock); |
1381 | } | 1346 | } |
1382 | 1347 | ||
1383 | return lun_p; | 1348 | return lun_p; |
1384 | } | 1349 | } |
1385 | 1350 | ||
1386 | /* core_dev_del_lun(): | 1351 | /* core_dev_del_lun(): |
1387 | * | 1352 | * |
1388 | * | 1353 | * |
1389 | */ | 1354 | */ |
1390 | int core_dev_del_lun( | 1355 | int core_dev_del_lun( |
1391 | struct se_portal_group *tpg, | 1356 | struct se_portal_group *tpg, |
1392 | u32 unpacked_lun) | 1357 | u32 unpacked_lun) |
1393 | { | 1358 | { |
1394 | struct se_lun *lun; | 1359 | struct se_lun *lun; |
1395 | int ret = 0; | 1360 | int ret = 0; |
1396 | 1361 | ||
1397 | lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret); | 1362 | lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret); |
1398 | if (!(lun)) | 1363 | if (!(lun)) |
1399 | return ret; | 1364 | return ret; |
1400 | 1365 | ||
1401 | core_tpg_post_dellun(tpg, lun); | 1366 | core_tpg_post_dellun(tpg, lun); |
1402 | 1367 | ||
1403 | printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" | 1368 | printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" |
1404 | " device object\n", TPG_TFO(tpg)->get_fabric_name(), | 1369 | " device object\n", TPG_TFO(tpg)->get_fabric_name(), |
1405 | TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, | 1370 | TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, |
1406 | TPG_TFO(tpg)->get_fabric_name()); | 1371 | TPG_TFO(tpg)->get_fabric_name()); |
1407 | 1372 | ||
1408 | return 0; | 1373 | return 0; |
1409 | } | 1374 | } |
1410 | 1375 | ||
1411 | struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun) | 1376 | struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun) |
1412 | { | 1377 | { |
1413 | struct se_lun *lun; | 1378 | struct se_lun *lun; |
1414 | 1379 | ||
1415 | spin_lock(&tpg->tpg_lun_lock); | 1380 | spin_lock(&tpg->tpg_lun_lock); |
1416 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | 1381 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { |
1417 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS" | 1382 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS" |
1418 | "_PER_TPG-1: %u for Target Portal Group: %hu\n", | 1383 | "_PER_TPG-1: %u for Target Portal Group: %hu\n", |
1419 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 1384 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, |
1420 | TRANSPORT_MAX_LUNS_PER_TPG-1, | 1385 | TRANSPORT_MAX_LUNS_PER_TPG-1, |
1421 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1386 | TPG_TFO(tpg)->tpg_get_tag(tpg)); |
1422 | spin_unlock(&tpg->tpg_lun_lock); | 1387 | spin_unlock(&tpg->tpg_lun_lock); |
1423 | return NULL; | 1388 | return NULL; |
1424 | } | 1389 | } |
1425 | lun = &tpg->tpg_lun_list[unpacked_lun]; | 1390 | lun = &tpg->tpg_lun_list[unpacked_lun]; |
1426 | 1391 | ||
1427 | if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { | 1392 | if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { |
1428 | printk(KERN_ERR "%s Logical Unit Number: %u is not free on" | 1393 | printk(KERN_ERR "%s Logical Unit Number: %u is not free on" |
1429 | " Target Portal Group: %hu, ignoring request.\n", | 1394 | " Target Portal Group: %hu, ignoring request.\n", |
1430 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 1395 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, |
1431 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1396 | TPG_TFO(tpg)->tpg_get_tag(tpg)); |
1432 | spin_unlock(&tpg->tpg_lun_lock); | 1397 | spin_unlock(&tpg->tpg_lun_lock); |
1433 | return NULL; | 1398 | return NULL; |
1434 | } | 1399 | } |
1435 | spin_unlock(&tpg->tpg_lun_lock); | 1400 | spin_unlock(&tpg->tpg_lun_lock); |
1436 | 1401 | ||
1437 | return lun; | 1402 | return lun; |
1438 | } | 1403 | } |
1439 | 1404 | ||
1440 | /* core_dev_get_lun(): | 1405 | /* core_dev_get_lun(): |
1441 | * | 1406 | * |
1442 | * | 1407 | * |
1443 | */ | 1408 | */ |
1444 | static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun) | 1409 | static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun) |
1445 | { | 1410 | { |
1446 | struct se_lun *lun; | 1411 | struct se_lun *lun; |
1447 | 1412 | ||
1448 | spin_lock(&tpg->tpg_lun_lock); | 1413 | spin_lock(&tpg->tpg_lun_lock); |
1449 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | 1414 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { |
1450 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" | 1415 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" |
1451 | "_TPG-1: %u for Target Portal Group: %hu\n", | 1416 | "_TPG-1: %u for Target Portal Group: %hu\n", |
1452 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 1417 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, |
1453 | TRANSPORT_MAX_LUNS_PER_TPG-1, | 1418 | TRANSPORT_MAX_LUNS_PER_TPG-1, |
1454 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1419 | TPG_TFO(tpg)->tpg_get_tag(tpg)); |
1455 | spin_unlock(&tpg->tpg_lun_lock); | 1420 | spin_unlock(&tpg->tpg_lun_lock); |
1456 | return NULL; | 1421 | return NULL; |
1457 | } | 1422 | } |
1458 | lun = &tpg->tpg_lun_list[unpacked_lun]; | 1423 | lun = &tpg->tpg_lun_list[unpacked_lun]; |
1459 | 1424 | ||
1460 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { | 1425 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { |
1461 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" | 1426 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" |
1462 | " Target Portal Group: %hu, ignoring request.\n", | 1427 | " Target Portal Group: %hu, ignoring request.\n", |
1463 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 1428 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, |
1464 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1429 | TPG_TFO(tpg)->tpg_get_tag(tpg)); |
1465 | spin_unlock(&tpg->tpg_lun_lock); | 1430 | spin_unlock(&tpg->tpg_lun_lock); |
1466 | return NULL; | 1431 | return NULL; |
1467 | } | 1432 | } |
1468 | spin_unlock(&tpg->tpg_lun_lock); | 1433 | spin_unlock(&tpg->tpg_lun_lock); |
1469 | 1434 | ||
1470 | return lun; | 1435 | return lun; |
1471 | } | 1436 | } |
1472 | 1437 | ||
1473 | struct se_lun_acl *core_dev_init_initiator_node_lun_acl( | 1438 | struct se_lun_acl *core_dev_init_initiator_node_lun_acl( |
1474 | struct se_portal_group *tpg, | 1439 | struct se_portal_group *tpg, |
1475 | u32 mapped_lun, | 1440 | u32 mapped_lun, |
1476 | char *initiatorname, | 1441 | char *initiatorname, |
1477 | int *ret) | 1442 | int *ret) |
1478 | { | 1443 | { |
1479 | struct se_lun_acl *lacl; | 1444 | struct se_lun_acl *lacl; |
1480 | struct se_node_acl *nacl; | 1445 | struct se_node_acl *nacl; |
1481 | 1446 | ||
1482 | if (strlen(initiatorname) > TRANSPORT_IQN_LEN) { | 1447 | if (strlen(initiatorname) > TRANSPORT_IQN_LEN) { |
1483 | printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", | 1448 | printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", |
1484 | TPG_TFO(tpg)->get_fabric_name()); | 1449 | TPG_TFO(tpg)->get_fabric_name()); |
1485 | *ret = -EOVERFLOW; | 1450 | *ret = -EOVERFLOW; |
1486 | return NULL; | 1451 | return NULL; |
1487 | } | 1452 | } |
1488 | nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname); | 1453 | nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname); |
1489 | if (!(nacl)) { | 1454 | if (!(nacl)) { |
1490 | *ret = -EINVAL; | 1455 | *ret = -EINVAL; |
1491 | return NULL; | 1456 | return NULL; |
1492 | } | 1457 | } |
1493 | lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); | 1458 | lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); |
1494 | if (!(lacl)) { | 1459 | if (!(lacl)) { |
1495 | printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n"); | 1460 | printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n"); |
1496 | *ret = -ENOMEM; | 1461 | *ret = -ENOMEM; |
1497 | return NULL; | 1462 | return NULL; |
1498 | } | 1463 | } |
1499 | 1464 | ||
1500 | INIT_LIST_HEAD(&lacl->lacl_list); | 1465 | INIT_LIST_HEAD(&lacl->lacl_list); |
1501 | lacl->mapped_lun = mapped_lun; | 1466 | lacl->mapped_lun = mapped_lun; |
1502 | lacl->se_lun_nacl = nacl; | 1467 | lacl->se_lun_nacl = nacl; |
1503 | snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | 1468 | snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); |
1504 | 1469 | ||
1505 | return lacl; | 1470 | return lacl; |
1506 | } | 1471 | } |
1507 | 1472 | ||
1508 | int core_dev_add_initiator_node_lun_acl( | 1473 | int core_dev_add_initiator_node_lun_acl( |
1509 | struct se_portal_group *tpg, | 1474 | struct se_portal_group *tpg, |
1510 | struct se_lun_acl *lacl, | 1475 | struct se_lun_acl *lacl, |
1511 | u32 unpacked_lun, | 1476 | u32 unpacked_lun, |
1512 | u32 lun_access) | 1477 | u32 lun_access) |
1513 | { | 1478 | { |
1514 | struct se_lun *lun; | 1479 | struct se_lun *lun; |
1515 | struct se_node_acl *nacl; | 1480 | struct se_node_acl *nacl; |
1516 | 1481 | ||
1517 | lun = core_dev_get_lun(tpg, unpacked_lun); | 1482 | lun = core_dev_get_lun(tpg, unpacked_lun); |
1518 | if (!(lun)) { | 1483 | if (!(lun)) { |
1519 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" | 1484 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" |
1520 | " Target Portal Group: %hu, ignoring request.\n", | 1485 | " Target Portal Group: %hu, ignoring request.\n", |
1521 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | 1486 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, |
1522 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | 1487 | TPG_TFO(tpg)->tpg_get_tag(tpg)); |
1523 | return -EINVAL; | 1488 | return -EINVAL; |
1524 | } | 1489 | } |
1525 | 1490 | ||
1526 | nacl = lacl->se_lun_nacl; | 1491 | nacl = lacl->se_lun_nacl; |
1527 | if (!(nacl)) | 1492 | if (!(nacl)) |
1528 | return -EINVAL; | 1493 | return -EINVAL; |
1529 | 1494 | ||
1530 | if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && | 1495 | if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && |
1531 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)) | 1496 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)) |
1532 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | 1497 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; |
1533 | 1498 | ||
1534 | lacl->se_lun = lun; | 1499 | lacl->se_lun = lun; |
1535 | 1500 | ||
1536 | if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun, | 1501 | if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun, |
1537 | lun_access, nacl, tpg, 1) < 0) | 1502 | lun_access, nacl, tpg, 1) < 0) |
1538 | return -EINVAL; | 1503 | return -EINVAL; |
1539 | 1504 | ||
1540 | spin_lock(&lun->lun_acl_lock); | 1505 | spin_lock(&lun->lun_acl_lock); |
1541 | list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); | 1506 | list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); |
1542 | atomic_inc(&lun->lun_acl_count); | 1507 | atomic_inc(&lun->lun_acl_count); |
1543 | smp_mb__after_atomic_inc(); | 1508 | smp_mb__after_atomic_inc(); |
1544 | spin_unlock(&lun->lun_acl_lock); | 1509 | spin_unlock(&lun->lun_acl_lock); |
1545 | 1510 | ||
1546 | printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " | 1511 | printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " |
1547 | " InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(), | 1512 | " InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(), |
1548 | TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, | 1513 | TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, |
1549 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", | 1514 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", |
1550 | lacl->initiatorname); | 1515 | lacl->initiatorname); |
1551 | /* | 1516 | /* |
1552 | * Check to see if there are any existing persistent reservation APTPL | 1517 | * Check to see if there are any existing persistent reservation APTPL |
1553 | * pre-registrations that need to be enabled for this LUN ACL.. | 1518 | * pre-registrations that need to be enabled for this LUN ACL.. |
1554 | */ | 1519 | */ |
1555 | core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl); | 1520 | core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl); |
1556 | return 0; | 1521 | return 0; |
1557 | } | 1522 | } |
1558 | 1523 | ||
1559 | /* core_dev_del_initiator_node_lun_acl(): | 1524 | /* core_dev_del_initiator_node_lun_acl(): |
1560 | * | 1525 | * |
1561 | * | 1526 | * |
1562 | */ | 1527 | */ |
1563 | int core_dev_del_initiator_node_lun_acl( | 1528 | int core_dev_del_initiator_node_lun_acl( |
1564 | struct se_portal_group *tpg, | 1529 | struct se_portal_group *tpg, |
1565 | struct se_lun *lun, | 1530 | struct se_lun *lun, |
1566 | struct se_lun_acl *lacl) | 1531 | struct se_lun_acl *lacl) |
1567 | { | 1532 | { |
1568 | struct se_node_acl *nacl; | 1533 | struct se_node_acl *nacl; |
1569 | 1534 | ||
1570 | nacl = lacl->se_lun_nacl; | 1535 | nacl = lacl->se_lun_nacl; |
1571 | if (!(nacl)) | 1536 | if (!(nacl)) |
1572 | return -EINVAL; | 1537 | return -EINVAL; |
1573 | 1538 | ||
1574 | spin_lock(&lun->lun_acl_lock); | 1539 | spin_lock(&lun->lun_acl_lock); |
1575 | list_del(&lacl->lacl_list); | 1540 | list_del(&lacl->lacl_list); |
1576 | atomic_dec(&lun->lun_acl_count); | 1541 | atomic_dec(&lun->lun_acl_count); |
1577 | smp_mb__after_atomic_dec(); | 1542 | smp_mb__after_atomic_dec(); |
1578 | spin_unlock(&lun->lun_acl_lock); | 1543 | spin_unlock(&lun->lun_acl_lock); |
1579 | 1544 | ||
1580 | core_update_device_list_for_node(lun, NULL, lacl->mapped_lun, | 1545 | core_update_device_list_for_node(lun, NULL, lacl->mapped_lun, |
1581 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); | 1546 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); |
1582 | 1547 | ||
1583 | lacl->se_lun = NULL; | 1548 | lacl->se_lun = NULL; |
1584 | 1549 | ||
1585 | printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for" | 1550 | printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for" |
1586 | " InitiatorNode: %s Mapped LUN: %u\n", | 1551 | " InitiatorNode: %s Mapped LUN: %u\n", |
1587 | TPG_TFO(tpg)->get_fabric_name(), | 1552 | TPG_TFO(tpg)->get_fabric_name(), |
1588 | TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun, | 1553 | TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun, |
1589 | lacl->initiatorname, lacl->mapped_lun); | 1554 | lacl->initiatorname, lacl->mapped_lun); |
1590 | 1555 | ||
1591 | return 0; | 1556 | return 0; |
1592 | } | 1557 | } |
1593 | 1558 | ||
1594 | void core_dev_free_initiator_node_lun_acl( | 1559 | void core_dev_free_initiator_node_lun_acl( |
1595 | struct se_portal_group *tpg, | 1560 | struct se_portal_group *tpg, |
1596 | struct se_lun_acl *lacl) | 1561 | struct se_lun_acl *lacl) |
1597 | { | 1562 | { |
1598 | printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" | 1563 | printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" |
1599 | " Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(), | 1564 | " Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(), |
1600 | TPG_TFO(tpg)->tpg_get_tag(tpg), | 1565 | TPG_TFO(tpg)->tpg_get_tag(tpg), |
1601 | TPG_TFO(tpg)->get_fabric_name(), | 1566 | TPG_TFO(tpg)->get_fabric_name(), |
1602 | lacl->initiatorname, lacl->mapped_lun); | 1567 | lacl->initiatorname, lacl->mapped_lun); |
1603 | 1568 | ||
1604 | kfree(lacl); | 1569 | kfree(lacl); |
1605 | } | 1570 | } |
1606 | 1571 | ||
1607 | int core_dev_setup_virtual_lun0(void) | 1572 | int core_dev_setup_virtual_lun0(void) |
1608 | { | 1573 | { |
1609 | struct se_hba *hba; | 1574 | struct se_hba *hba; |
1610 | struct se_device *dev; | 1575 | struct se_device *dev; |
1611 | struct se_subsystem_dev *se_dev = NULL; | 1576 | struct se_subsystem_dev *se_dev = NULL; |
1612 | struct se_subsystem_api *t; | 1577 | struct se_subsystem_api *t; |
1613 | char buf[16]; | 1578 | char buf[16]; |
1614 | int ret; | 1579 | int ret; |
1615 | 1580 | ||
1616 | hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE); | 1581 | hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE); |
1617 | if (IS_ERR(hba)) | 1582 | if (IS_ERR(hba)) |
1618 | return PTR_ERR(hba); | 1583 | return PTR_ERR(hba); |
1619 | 1584 | ||
1620 | se_global->g_lun0_hba = hba; | 1585 | se_global->g_lun0_hba = hba; |
1621 | t = hba->transport; | 1586 | t = hba->transport; |
1622 | 1587 | ||
1623 | se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); | 1588 | se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); |
1624 | if (!(se_dev)) { | 1589 | if (!(se_dev)) { |
1625 | printk(KERN_ERR "Unable to allocate memory for" | 1590 | printk(KERN_ERR "Unable to allocate memory for" |
1626 | " struct se_subsystem_dev\n"); | 1591 | " struct se_subsystem_dev\n"); |
1627 | ret = -ENOMEM; | 1592 | ret = -ENOMEM; |
1628 | goto out; | 1593 | goto out; |
1629 | } | 1594 | } |
1630 | INIT_LIST_HEAD(&se_dev->g_se_dev_list); | 1595 | INIT_LIST_HEAD(&se_dev->g_se_dev_list); |
1631 | INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); | 1596 | INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); |
1632 | spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); | 1597 | spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); |
1633 | INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list); | 1598 | INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list); |
1634 | INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list); | 1599 | INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list); |
1635 | spin_lock_init(&se_dev->t10_reservation.registration_lock); | 1600 | spin_lock_init(&se_dev->t10_reservation.registration_lock); |
1636 | spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock); | 1601 | spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock); |
1637 | INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); | 1602 | INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); |
1638 | spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); | 1603 | spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); |
1639 | spin_lock_init(&se_dev->se_dev_lock); | 1604 | spin_lock_init(&se_dev->se_dev_lock); |
1640 | se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; | 1605 | se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; |
1641 | se_dev->t10_wwn.t10_sub_dev = se_dev; | 1606 | se_dev->t10_wwn.t10_sub_dev = se_dev; |
1642 | se_dev->t10_alua.t10_sub_dev = se_dev; | 1607 | se_dev->t10_alua.t10_sub_dev = se_dev; |
1643 | se_dev->se_dev_attrib.da_sub_dev = se_dev; | 1608 | se_dev->se_dev_attrib.da_sub_dev = se_dev; |
1644 | se_dev->se_dev_hba = hba; | 1609 | se_dev->se_dev_hba = hba; |
1645 | 1610 | ||
1646 | se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0"); | 1611 | se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0"); |
1647 | if (!(se_dev->se_dev_su_ptr)) { | 1612 | if (!(se_dev->se_dev_su_ptr)) { |
1648 | printk(KERN_ERR "Unable to locate subsystem dependent pointer" | 1613 | printk(KERN_ERR "Unable to locate subsystem dependent pointer" |
1649 | " from allocate_virtdevice()\n"); | 1614 | " from allocate_virtdevice()\n"); |
1650 | ret = -ENOMEM; | 1615 | ret = -ENOMEM; |
1651 | goto out; | 1616 | goto out; |
1652 | } | 1617 | } |
1653 | se_global->g_lun0_su_dev = se_dev; | 1618 | se_global->g_lun0_su_dev = se_dev; |
1654 | 1619 | ||
1655 | memset(buf, 0, 16); | 1620 | memset(buf, 0, 16); |
1656 | sprintf(buf, "rd_pages=8"); | 1621 | sprintf(buf, "rd_pages=8"); |
1657 | t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf)); | 1622 | t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf)); |
1658 | 1623 | ||
1659 | dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); | 1624 | dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); |
1660 | if (!(dev) || IS_ERR(dev)) { | 1625 | if (!(dev) || IS_ERR(dev)) { |
1661 | ret = -ENOMEM; | 1626 | ret = -ENOMEM; |
1662 | goto out; | 1627 | goto out; |
1663 | } | 1628 | } |
1664 | se_dev->se_dev_ptr = dev; | 1629 | se_dev->se_dev_ptr = dev; |
1665 | se_global->g_lun0_dev = dev; | 1630 | se_global->g_lun0_dev = dev; |
1666 | 1631 | ||
1667 | return 0; | 1632 | return 0; |
1668 | out: | 1633 | out: |
1669 | se_global->g_lun0_su_dev = NULL; | 1634 | se_global->g_lun0_su_dev = NULL; |
1670 | kfree(se_dev); | 1635 | kfree(se_dev); |
1671 | if (se_global->g_lun0_hba) { | 1636 | if (se_global->g_lun0_hba) { |
1672 | core_delete_hba(se_global->g_lun0_hba); | 1637 | core_delete_hba(se_global->g_lun0_hba); |
1673 | se_global->g_lun0_hba = NULL; | 1638 | se_global->g_lun0_hba = NULL; |
1674 | } | 1639 | } |
1675 | return ret; | 1640 | return ret; |
1676 | } | 1641 | } |
1677 | 1642 | ||
1678 | 1643 | ||
1679 | void core_dev_release_virtual_lun0(void) | 1644 | void core_dev_release_virtual_lun0(void) |
1680 | { | 1645 | { |
1681 | struct se_hba *hba = se_global->g_lun0_hba; | 1646 | struct se_hba *hba = se_global->g_lun0_hba; |
1682 | struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev; | 1647 | struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev; |
1683 | 1648 | ||
1684 | if (!(hba)) | 1649 | if (!(hba)) |
1685 | return; | 1650 | return; |
1686 | 1651 | ||
1687 | if (se_global->g_lun0_dev) | 1652 | if (se_global->g_lun0_dev) |
1688 | se_free_virtual_device(se_global->g_lun0_dev, hba); | 1653 | se_free_virtual_device(se_global->g_lun0_dev, hba); |
1689 | 1654 | ||
1690 | kfree(su_dev); | 1655 | kfree(su_dev); |
1691 | core_delete_hba(hba); | 1656 | core_delete_hba(hba); |
1692 | } | 1657 | } |
1693 | 1658 |
drivers/target/target_core_hba.c
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * Filename: target_core_hba.c | 2 | * Filename: target_core_hba.c |
3 | * | 3 | * |
4 | * This file copntains the iSCSI HBA Transport related functions. | 4 | * This file copntains the iSCSI HBA Transport related functions. |
5 | * | 5 | * |
6 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. | 6 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. |
7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | 7 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. |
8 | * Copyright (c) 2007-2010 Rising Tide Systems | 8 | * Copyright (c) 2007-2010 Rising Tide Systems |
9 | * Copyright (c) 2008-2010 Linux-iSCSI.org | 9 | * Copyright (c) 2008-2010 Linux-iSCSI.org |
10 | * | 10 | * |
11 | * Nicholas A. Bellinger <nab@kernel.org> | 11 | * Nicholas A. Bellinger <nab@kernel.org> |
12 | * | 12 | * |
13 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
14 | * it under the terms of the GNU General Public License as published by | 14 | * it under the terms of the GNU General Public License as published by |
15 | * the Free Software Foundation; either version 2 of the License, or | 15 | * the Free Software Foundation; either version 2 of the License, or |
16 | * (at your option) any later version. | 16 | * (at your option) any later version. |
17 | * | 17 | * |
18 | * This program is distributed in the hope that it will be useful, | 18 | * This program is distributed in the hope that it will be useful, |
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
21 | * GNU General Public License for more details. | 21 | * GNU General Public License for more details. |
22 | * | 22 | * |
23 | * You should have received a copy of the GNU General Public License | 23 | * You should have received a copy of the GNU General Public License |
24 | * along with this program; if not, write to the Free Software | 24 | * along with this program; if not, write to the Free Software |
25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 25 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
26 | * | 26 | * |
27 | ******************************************************************************/ | 27 | ******************************************************************************/ |
28 | 28 | ||
29 | #include <linux/net.h> | 29 | #include <linux/net.h> |
30 | #include <linux/string.h> | 30 | #include <linux/string.h> |
31 | #include <linux/timer.h> | 31 | #include <linux/timer.h> |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/spinlock.h> | 33 | #include <linux/spinlock.h> |
34 | #include <linux/in.h> | 34 | #include <linux/in.h> |
35 | #include <net/sock.h> | 35 | #include <net/sock.h> |
36 | #include <net/tcp.h> | 36 | #include <net/tcp.h> |
37 | 37 | ||
38 | #include <target/target_core_base.h> | 38 | #include <target/target_core_base.h> |
39 | #include <target/target_core_device.h> | 39 | #include <target/target_core_device.h> |
40 | #include <target/target_core_tpg.h> | 40 | #include <target/target_core_tpg.h> |
41 | #include <target/target_core_transport.h> | 41 | #include <target/target_core_transport.h> |
42 | 42 | ||
43 | #include "target_core_hba.h" | 43 | #include "target_core_hba.h" |
44 | 44 | ||
45 | static LIST_HEAD(subsystem_list); | 45 | static LIST_HEAD(subsystem_list); |
46 | static DEFINE_MUTEX(subsystem_mutex); | 46 | static DEFINE_MUTEX(subsystem_mutex); |
47 | 47 | ||
48 | int transport_subsystem_register(struct se_subsystem_api *sub_api) | 48 | int transport_subsystem_register(struct se_subsystem_api *sub_api) |
49 | { | 49 | { |
50 | struct se_subsystem_api *s; | 50 | struct se_subsystem_api *s; |
51 | 51 | ||
52 | INIT_LIST_HEAD(&sub_api->sub_api_list); | 52 | INIT_LIST_HEAD(&sub_api->sub_api_list); |
53 | 53 | ||
54 | mutex_lock(&subsystem_mutex); | 54 | mutex_lock(&subsystem_mutex); |
55 | list_for_each_entry(s, &subsystem_list, sub_api_list) { | 55 | list_for_each_entry(s, &subsystem_list, sub_api_list) { |
56 | if (!(strcmp(s->name, sub_api->name))) { | 56 | if (!(strcmp(s->name, sub_api->name))) { |
57 | printk(KERN_ERR "%p is already registered with" | 57 | printk(KERN_ERR "%p is already registered with" |
58 | " duplicate name %s, unable to process" | 58 | " duplicate name %s, unable to process" |
59 | " request\n", s, s->name); | 59 | " request\n", s, s->name); |
60 | mutex_unlock(&subsystem_mutex); | 60 | mutex_unlock(&subsystem_mutex); |
61 | return -EEXIST; | 61 | return -EEXIST; |
62 | } | 62 | } |
63 | } | 63 | } |
64 | list_add_tail(&sub_api->sub_api_list, &subsystem_list); | 64 | list_add_tail(&sub_api->sub_api_list, &subsystem_list); |
65 | mutex_unlock(&subsystem_mutex); | 65 | mutex_unlock(&subsystem_mutex); |
66 | 66 | ||
67 | printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:" | 67 | printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:" |
68 | " %p\n", sub_api->name, sub_api->owner); | 68 | " %p\n", sub_api->name, sub_api->owner); |
69 | return 0; | 69 | return 0; |
70 | } | 70 | } |
71 | EXPORT_SYMBOL(transport_subsystem_register); | 71 | EXPORT_SYMBOL(transport_subsystem_register); |
72 | 72 | ||
73 | void transport_subsystem_release(struct se_subsystem_api *sub_api) | 73 | void transport_subsystem_release(struct se_subsystem_api *sub_api) |
74 | { | 74 | { |
75 | mutex_lock(&subsystem_mutex); | 75 | mutex_lock(&subsystem_mutex); |
76 | list_del(&sub_api->sub_api_list); | 76 | list_del(&sub_api->sub_api_list); |
77 | mutex_unlock(&subsystem_mutex); | 77 | mutex_unlock(&subsystem_mutex); |
78 | } | 78 | } |
79 | EXPORT_SYMBOL(transport_subsystem_release); | 79 | EXPORT_SYMBOL(transport_subsystem_release); |
80 | 80 | ||
81 | static struct se_subsystem_api *core_get_backend(const char *sub_name) | 81 | static struct se_subsystem_api *core_get_backend(const char *sub_name) |
82 | { | 82 | { |
83 | struct se_subsystem_api *s; | 83 | struct se_subsystem_api *s; |
84 | 84 | ||
85 | mutex_lock(&subsystem_mutex); | 85 | mutex_lock(&subsystem_mutex); |
86 | list_for_each_entry(s, &subsystem_list, sub_api_list) { | 86 | list_for_each_entry(s, &subsystem_list, sub_api_list) { |
87 | if (!strcmp(s->name, sub_name)) | 87 | if (!strcmp(s->name, sub_name)) |
88 | goto found; | 88 | goto found; |
89 | } | 89 | } |
90 | mutex_unlock(&subsystem_mutex); | 90 | mutex_unlock(&subsystem_mutex); |
91 | return NULL; | 91 | return NULL; |
92 | found: | 92 | found: |
93 | if (s->owner && !try_module_get(s->owner)) | 93 | if (s->owner && !try_module_get(s->owner)) |
94 | s = NULL; | 94 | s = NULL; |
95 | mutex_unlock(&subsystem_mutex); | 95 | mutex_unlock(&subsystem_mutex); |
96 | return s; | 96 | return s; |
97 | } | 97 | } |
98 | 98 | ||
99 | struct se_hba * | 99 | struct se_hba * |
100 | core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) | 100 | core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) |
101 | { | 101 | { |
102 | struct se_hba *hba; | 102 | struct se_hba *hba; |
103 | int ret = 0; | 103 | int ret = 0; |
104 | 104 | ||
105 | hba = kzalloc(sizeof(*hba), GFP_KERNEL); | 105 | hba = kzalloc(sizeof(*hba), GFP_KERNEL); |
106 | if (!hba) { | 106 | if (!hba) { |
107 | printk(KERN_ERR "Unable to allocate struct se_hba\n"); | 107 | printk(KERN_ERR "Unable to allocate struct se_hba\n"); |
108 | return ERR_PTR(-ENOMEM); | 108 | return ERR_PTR(-ENOMEM); |
109 | } | 109 | } |
110 | 110 | ||
111 | INIT_LIST_HEAD(&hba->hba_dev_list); | 111 | INIT_LIST_HEAD(&hba->hba_dev_list); |
112 | spin_lock_init(&hba->device_lock); | 112 | spin_lock_init(&hba->device_lock); |
113 | spin_lock_init(&hba->hba_queue_lock); | 113 | spin_lock_init(&hba->hba_queue_lock); |
114 | mutex_init(&hba->hba_access_mutex); | 114 | mutex_init(&hba->hba_access_mutex); |
115 | 115 | ||
116 | hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX); | 116 | hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX); |
117 | hba->hba_flags |= hba_flags; | 117 | hba->hba_flags |= hba_flags; |
118 | 118 | ||
119 | atomic_set(&hba->max_queue_depth, 0); | 119 | atomic_set(&hba->max_queue_depth, 0); |
120 | atomic_set(&hba->left_queue_depth, 0); | 120 | atomic_set(&hba->left_queue_depth, 0); |
121 | 121 | ||
122 | hba->transport = core_get_backend(plugin_name); | 122 | hba->transport = core_get_backend(plugin_name); |
123 | if (!hba->transport) { | 123 | if (!hba->transport) { |
124 | ret = -EINVAL; | 124 | ret = -EINVAL; |
125 | goto out_free_hba; | 125 | goto out_free_hba; |
126 | } | 126 | } |
127 | 127 | ||
128 | ret = hba->transport->attach_hba(hba, plugin_dep_id); | 128 | ret = hba->transport->attach_hba(hba, plugin_dep_id); |
129 | if (ret < 0) | 129 | if (ret < 0) |
130 | goto out_module_put; | 130 | goto out_module_put; |
131 | 131 | ||
132 | spin_lock(&se_global->hba_lock); | 132 | spin_lock(&se_global->hba_lock); |
133 | hba->hba_id = se_global->g_hba_id_counter++; | 133 | hba->hba_id = se_global->g_hba_id_counter++; |
134 | list_add_tail(&hba->hba_list, &se_global->g_hba_list); | 134 | list_add_tail(&hba->hba_list, &se_global->g_hba_list); |
135 | spin_unlock(&se_global->hba_lock); | 135 | spin_unlock(&se_global->hba_lock); |
136 | 136 | ||
137 | printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target" | 137 | printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target" |
138 | " Core\n", hba->hba_id); | 138 | " Core\n", hba->hba_id); |
139 | 139 | ||
140 | return hba; | 140 | return hba; |
141 | 141 | ||
142 | out_module_put: | 142 | out_module_put: |
143 | if (hba->transport->owner) | 143 | if (hba->transport->owner) |
144 | module_put(hba->transport->owner); | 144 | module_put(hba->transport->owner); |
145 | hba->transport = NULL; | 145 | hba->transport = NULL; |
146 | out_free_hba: | 146 | out_free_hba: |
147 | kfree(hba); | 147 | kfree(hba); |
148 | return ERR_PTR(ret); | 148 | return ERR_PTR(ret); |
149 | } | 149 | } |
150 | 150 | ||
151 | int | 151 | int |
152 | core_delete_hba(struct se_hba *hba) | 152 | core_delete_hba(struct se_hba *hba) |
153 | { | 153 | { |
154 | struct se_device *dev, *dev_tmp; | 154 | if (!list_empty(&hba->hba_dev_list)) |
155 | 155 | dump_stack(); | |
156 | spin_lock(&hba->device_lock); | ||
157 | list_for_each_entry_safe(dev, dev_tmp, &hba->hba_dev_list, dev_list) { | ||
158 | |||
159 | se_clear_dev_ports(dev); | ||
160 | spin_unlock(&hba->device_lock); | ||
161 | |||
162 | se_release_device_for_hba(dev); | ||
163 | |||
164 | spin_lock(&hba->device_lock); | ||
165 | } | ||
166 | spin_unlock(&hba->device_lock); | ||
167 | 156 | ||
168 | hba->transport->detach_hba(hba); | 157 | hba->transport->detach_hba(hba); |
169 | 158 | ||
170 | spin_lock(&se_global->hba_lock); | 159 | spin_lock(&se_global->hba_lock); |
171 | list_del(&hba->hba_list); | 160 | list_del(&hba->hba_list); |
172 | spin_unlock(&se_global->hba_lock); | 161 | spin_unlock(&se_global->hba_lock); |
173 | 162 | ||
174 | printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target" | 163 | printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target" |
175 | " Core\n", hba->hba_id); | 164 | " Core\n", hba->hba_id); |
176 | 165 | ||
177 | if (hba->transport->owner) | 166 | if (hba->transport->owner) |
178 | module_put(hba->transport->owner); | 167 | module_put(hba->transport->owner); |
179 | 168 | ||
180 | hba->transport = NULL; | 169 | hba->transport = NULL; |
181 | kfree(hba); | 170 | kfree(hba); |
182 | return 0; | 171 | return 0; |
183 | } | 172 | } |
184 | 173 |