Commit f385b6974bf93cd4335495437a6ee82fa5237df7
Exists in
master
and in
4 other branches
Merge branch '3.1-rc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
* '3.1-rc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (21 commits) target: Convert acl_node_lock to be IRQ-disabling target: Make locking in transport_deregister_session() IRQ safe tcm_fc: init/exit functions should not be protected by "#ifdef MODULE" target: Print subpage too for unhandled MODE SENSE pages iscsi-target: Fix iscsit_allocate_se_cmd_for_tmr failure path bugs iscsi-target: Implement iSCSI target IPv6 address printing. target: Fix task SGL chaining breakage with transport_allocate_data_tasks target: Fix task count > 1 handling breakage and use max_sector page alignment target: Add missing DATA_SG_IO transport_cmd_get_valid_sectors check target: Fix SYNCHRONIZE_CACHE zero LBA + range breakage target: Remove duplicate task completions in transport_emulate_control_cdb target: Fix WRITE_SAME usage with transport_get_size target: Add WRITE_SAME (10) parsing and refactor passthrough checks target: Fix write payload exception handling with ->new_cmd_map iscsi-target: forever loop bug in iscsit_attach_ooo_cmdsn() iscsi-target: remove duplicate return target: Convert target_core_rd.c to use use BUG_ON iscsi-target: Fix leak on failure in iscsi_copy_param_list() target: Use ERR_CAST inlined function target: Make standard INQUIRY return 'not connected' for tpg_virt_lun0 ...
Showing 15 changed files Side-by-side Diff
- drivers/target/iscsi/iscsi_target.c
- drivers/target/iscsi/iscsi_target_configfs.c
- drivers/target/iscsi/iscsi_target_erl1.c
- drivers/target/iscsi/iscsi_target_login.c
- drivers/target/iscsi/iscsi_target_parameters.c
- drivers/target/iscsi/iscsi_target_util.c
- drivers/target/target_core_cdb.c
- drivers/target/target_core_device.c
- drivers/target/target_core_fabric_configfs.c
- drivers/target/target_core_pr.c
- drivers/target/target_core_rd.c
- drivers/target/target_core_tpg.c
- drivers/target/target_core_transport.c
- drivers/target/tcm_fc/tfc_conf.c
- include/target/target_core_fabric_ops.h
drivers/target/iscsi/iscsi_target.c
... | ... | @@ -2243,7 +2243,6 @@ |
2243 | 2243 | case 0: |
2244 | 2244 | return iscsit_handle_recovery_datain_or_r2t(conn, buf, |
2245 | 2245 | hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength); |
2246 | - return 0; | |
2247 | 2246 | case ISCSI_FLAG_SNACK_TYPE_STATUS: |
2248 | 2247 | return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt, |
2249 | 2248 | hdr->begrun, hdr->runlength); |
drivers/target/iscsi/iscsi_target_configfs.c
... | ... | @@ -268,7 +268,7 @@ |
268 | 268 | ISCSI_TCP); |
269 | 269 | if (IS_ERR(tpg_np)) { |
270 | 270 | iscsit_put_tpg(tpg); |
271 | - return ERR_PTR(PTR_ERR(tpg_np)); | |
271 | + return ERR_CAST(tpg_np); | |
272 | 272 | } |
273 | 273 | pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n"); |
274 | 274 | |
... | ... | @@ -1285,7 +1285,7 @@ |
1285 | 1285 | |
1286 | 1286 | tiqn = iscsit_add_tiqn((unsigned char *)name); |
1287 | 1287 | if (IS_ERR(tiqn)) |
1288 | - return ERR_PTR(PTR_ERR(tiqn)); | |
1288 | + return ERR_CAST(tiqn); | |
1289 | 1289 | /* |
1290 | 1290 | * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group. |
1291 | 1291 | */ |
drivers/target/iscsi/iscsi_target_erl1.c
drivers/target/iscsi/iscsi_target_login.c
... | ... | @@ -1013,19 +1013,9 @@ |
1013 | 1013 | ISCSI_LOGIN_STATUS_TARGET_ERROR); |
1014 | 1014 | goto new_sess_out; |
1015 | 1015 | } |
1016 | -#if 0 | |
1017 | - if (!iscsi_ntop6((const unsigned char *) | |
1018 | - &sock_in6.sin6_addr.in6_u, | |
1019 | - (char *)&conn->ipv6_login_ip[0], | |
1020 | - IPV6_ADDRESS_SPACE)) { | |
1021 | - pr_err("iscsi_ntop6() failed\n"); | |
1022 | - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, | |
1023 | - ISCSI_LOGIN_STATUS_TARGET_ERROR); | |
1024 | - goto new_sess_out; | |
1025 | - } | |
1026 | -#else | |
1027 | - pr_debug("Skipping iscsi_ntop6()\n"); | |
1028 | -#endif | |
1016 | + snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c", | |
1017 | + &sock_in6.sin6_addr.in6_u); | |
1018 | + conn->login_port = ntohs(sock_in6.sin6_port); | |
1029 | 1019 | } else { |
1030 | 1020 | memset(&sock_in, 0, sizeof(struct sockaddr_in)); |
1031 | 1021 |
drivers/target/iscsi/iscsi_target_parameters.c
... | ... | @@ -545,13 +545,13 @@ |
545 | 545 | struct iscsi_param_list *src_param_list, |
546 | 546 | int leading) |
547 | 547 | { |
548 | - struct iscsi_param *new_param = NULL, *param = NULL; | |
548 | + struct iscsi_param *param = NULL; | |
549 | + struct iscsi_param *new_param = NULL; | |
549 | 550 | struct iscsi_param_list *param_list = NULL; |
550 | 551 | |
551 | 552 | param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); |
552 | 553 | if (!param_list) { |
553 | - pr_err("Unable to allocate memory for" | |
554 | - " struct iscsi_param_list.\n"); | |
554 | + pr_err("Unable to allocate memory for struct iscsi_param_list.\n"); | |
555 | 555 | goto err_out; |
556 | 556 | } |
557 | 557 | INIT_LIST_HEAD(¶m_list->param_list); |
558 | 558 | |
... | ... | @@ -567,11 +567,20 @@ |
567 | 567 | |
568 | 568 | new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL); |
569 | 569 | if (!new_param) { |
570 | - pr_err("Unable to allocate memory for" | |
571 | - " struct iscsi_param.\n"); | |
570 | + pr_err("Unable to allocate memory for struct iscsi_param.\n"); | |
572 | 571 | goto err_out; |
573 | 572 | } |
574 | 573 | |
574 | + new_param->name = kstrdup(param->name, GFP_KERNEL); | |
575 | + new_param->value = kstrdup(param->value, GFP_KERNEL); | |
576 | + if (!new_param->value || !new_param->name) { | |
577 | + kfree(new_param->value); | |
578 | + kfree(new_param->name); | |
579 | + kfree(new_param); | |
580 | + pr_err("Unable to allocate memory for parameter name/value.\n"); | |
581 | + goto err_out; | |
582 | + } | |
583 | + | |
575 | 584 | new_param->set_param = param->set_param; |
576 | 585 | new_param->phase = param->phase; |
577 | 586 | new_param->scope = param->scope; |
578 | 587 | |
579 | 588 | |
... | ... | @@ -580,32 +589,12 @@ |
580 | 589 | new_param->use = param->use; |
581 | 590 | new_param->type_range = param->type_range; |
582 | 591 | |
583 | - new_param->name = kzalloc(strlen(param->name) + 1, GFP_KERNEL); | |
584 | - if (!new_param->name) { | |
585 | - pr_err("Unable to allocate memory for" | |
586 | - " parameter name.\n"); | |
587 | - goto err_out; | |
588 | - } | |
589 | - | |
590 | - new_param->value = kzalloc(strlen(param->value) + 1, | |
591 | - GFP_KERNEL); | |
592 | - if (!new_param->value) { | |
593 | - pr_err("Unable to allocate memory for" | |
594 | - " parameter value.\n"); | |
595 | - goto err_out; | |
596 | - } | |
597 | - | |
598 | - memcpy(new_param->name, param->name, strlen(param->name)); | |
599 | - new_param->name[strlen(param->name)] = '\0'; | |
600 | - memcpy(new_param->value, param->value, strlen(param->value)); | |
601 | - new_param->value[strlen(param->value)] = '\0'; | |
602 | - | |
603 | 592 | list_add_tail(&new_param->p_list, ¶m_list->param_list); |
604 | 593 | } |
605 | 594 | |
606 | - if (!list_empty(¶m_list->param_list)) | |
595 | + if (!list_empty(¶m_list->param_list)) { | |
607 | 596 | *dst_param_list = param_list; |
608 | - else { | |
597 | + } else { | |
609 | 598 | pr_err("No parameters allocated.\n"); |
610 | 599 | goto err_out; |
611 | 600 | } |
drivers/target/iscsi/iscsi_target_util.c
... | ... | @@ -243,7 +243,7 @@ |
243 | 243 | if (!cmd->tmr_req) { |
244 | 244 | pr_err("Unable to allocate memory for" |
245 | 245 | " Task Management command!\n"); |
246 | - return NULL; | |
246 | + goto out; | |
247 | 247 | } |
248 | 248 | /* |
249 | 249 | * TASK_REASSIGN for ERL=2 / connection stays inside of |
... | ... | @@ -298,8 +298,6 @@ |
298 | 298 | return cmd; |
299 | 299 | out: |
300 | 300 | iscsit_release_cmd(cmd); |
301 | - if (se_cmd) | |
302 | - transport_free_se_cmd(se_cmd); | |
303 | 301 | return NULL; |
304 | 302 | } |
305 | 303 |
drivers/target/target_core_cdb.c
... | ... | @@ -67,6 +67,7 @@ |
67 | 67 | { |
68 | 68 | struct se_lun *lun = cmd->se_lun; |
69 | 69 | struct se_device *dev = cmd->se_dev; |
70 | + struct se_portal_group *tpg = lun->lun_sep->sep_tpg; | |
70 | 71 | unsigned char *buf; |
71 | 72 | |
72 | 73 | /* |
... | ... | @@ -81,9 +82,13 @@ |
81 | 82 | |
82 | 83 | buf = transport_kmap_first_data_page(cmd); |
83 | 84 | |
84 | - buf[0] = dev->transport->get_device_type(dev); | |
85 | - if (buf[0] == TYPE_TAPE) | |
86 | - buf[1] = 0x80; | |
85 | + if (dev == tpg->tpg_virt_lun0.lun_se_dev) { | |
86 | + buf[0] = 0x3f; /* Not connected */ | |
87 | + } else { | |
88 | + buf[0] = dev->transport->get_device_type(dev); | |
89 | + if (buf[0] == TYPE_TAPE) | |
90 | + buf[1] = 0x80; | |
91 | + } | |
87 | 92 | buf[2] = dev->transport->get_device_rev(dev); |
88 | 93 | |
89 | 94 | /* |
... | ... | @@ -915,8 +920,8 @@ |
915 | 920 | length += target_modesense_control(dev, &buf[offset+length]); |
916 | 921 | break; |
917 | 922 | default: |
918 | - pr_err("Got Unknown Mode Page: 0x%02x\n", | |
919 | - cdb[2] & 0x3f); | |
923 | + pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", | |
924 | + cdb[2] & 0x3f, cdb[3]); | |
920 | 925 | return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; |
921 | 926 | } |
922 | 927 | offset += length; |
... | ... | @@ -1072,8 +1077,6 @@ |
1072 | 1077 | size -= 16; |
1073 | 1078 | } |
1074 | 1079 | |
1075 | - task->task_scsi_status = GOOD; | |
1076 | - transport_complete_task(task, 1); | |
1077 | 1080 | err: |
1078 | 1081 | transport_kunmap_first_data_page(cmd); |
1079 | 1082 | |
1080 | 1083 | |
1081 | 1084 | |
1082 | 1085 | |
... | ... | @@ -1085,24 +1088,17 @@ |
1085 | 1088 | * Note this is not used for TCM/pSCSI passthrough |
1086 | 1089 | */ |
1087 | 1090 | static int |
1088 | -target_emulate_write_same(struct se_task *task, int write_same32) | |
1091 | +target_emulate_write_same(struct se_task *task, u32 num_blocks) | |
1089 | 1092 | { |
1090 | 1093 | struct se_cmd *cmd = task->task_se_cmd; |
1091 | 1094 | struct se_device *dev = cmd->se_dev; |
1092 | 1095 | sector_t range; |
1093 | 1096 | sector_t lba = cmd->t_task_lba; |
1094 | - unsigned int num_blocks; | |
1095 | 1097 | int ret; |
1096 | 1098 | /* |
1097 | - * Extract num_blocks from the WRITE_SAME_* CDB. Then use the explict | |
1098 | - * range when non zero is supplied, otherwise calculate the remaining | |
1099 | - * range based on ->get_blocks() - starting LBA. | |
1099 | + * Use the explicit range when non zero is supplied, otherwise calculate | |
1100 | + * the remaining range based on ->get_blocks() - starting LBA. | |
1100 | 1101 | */ |
1101 | - if (write_same32) | |
1102 | - num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); | |
1103 | - else | |
1104 | - num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); | |
1105 | - | |
1106 | 1102 | if (num_blocks != 0) |
1107 | 1103 | range = num_blocks; |
1108 | 1104 | else |
... | ... | @@ -1117,8 +1113,6 @@ |
1117 | 1113 | return ret; |
1118 | 1114 | } |
1119 | 1115 | |
1120 | - task->task_scsi_status = GOOD; | |
1121 | - transport_complete_task(task, 1); | |
1122 | 1116 | return 0; |
1123 | 1117 | } |
1124 | 1118 | |
1125 | 1119 | |
... | ... | @@ -1165,13 +1159,23 @@ |
1165 | 1159 | } |
1166 | 1160 | ret = target_emulate_unmap(task); |
1167 | 1161 | break; |
1162 | + case WRITE_SAME: | |
1163 | + if (!dev->transport->do_discard) { | |
1164 | + pr_err("WRITE_SAME emulation not supported" | |
1165 | + " for: %s\n", dev->transport->name); | |
1166 | + return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; | |
1167 | + } | |
1168 | + ret = target_emulate_write_same(task, | |
1169 | + get_unaligned_be16(&cmd->t_task_cdb[7])); | |
1170 | + break; | |
1168 | 1171 | case WRITE_SAME_16: |
1169 | 1172 | if (!dev->transport->do_discard) { |
1170 | 1173 | pr_err("WRITE_SAME_16 emulation not supported" |
1171 | 1174 | " for: %s\n", dev->transport->name); |
1172 | 1175 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
1173 | 1176 | } |
1174 | - ret = target_emulate_write_same(task, 0); | |
1177 | + ret = target_emulate_write_same(task, | |
1178 | + get_unaligned_be32(&cmd->t_task_cdb[10])); | |
1175 | 1179 | break; |
1176 | 1180 | case VARIABLE_LENGTH_CMD: |
1177 | 1181 | service_action = |
... | ... | @@ -1184,7 +1188,8 @@ |
1184 | 1188 | dev->transport->name); |
1185 | 1189 | return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; |
1186 | 1190 | } |
1187 | - ret = target_emulate_write_same(task, 1); | |
1191 | + ret = target_emulate_write_same(task, | |
1192 | + get_unaligned_be32(&cmd->t_task_cdb[28])); | |
1188 | 1193 | break; |
1189 | 1194 | default: |
1190 | 1195 | pr_err("Unsupported VARIABLE_LENGTH_CMD SA:" |
... | ... | @@ -1219,8 +1224,14 @@ |
1219 | 1224 | |
1220 | 1225 | if (ret < 0) |
1221 | 1226 | return ret; |
1222 | - task->task_scsi_status = GOOD; | |
1223 | - transport_complete_task(task, 1); | |
1227 | + /* | |
1228 | + * Handle the successful completion here unless a caller | |
1229 | + * has explictly requested an asychronous completion. | |
1230 | + */ | |
1231 | + if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) { | |
1232 | + task->task_scsi_status = GOOD; | |
1233 | + transport_complete_task(task, 1); | |
1234 | + } | |
1224 | 1235 | |
1225 | 1236 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; |
1226 | 1237 | } |
drivers/target/target_core_device.c
... | ... | @@ -472,9 +472,9 @@ |
472 | 472 | struct se_dev_entry *deve; |
473 | 473 | u32 i; |
474 | 474 | |
475 | - spin_lock_bh(&tpg->acl_node_lock); | |
475 | + spin_lock_irq(&tpg->acl_node_lock); | |
476 | 476 | list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { |
477 | - spin_unlock_bh(&tpg->acl_node_lock); | |
477 | + spin_unlock_irq(&tpg->acl_node_lock); | |
478 | 478 | |
479 | 479 | spin_lock_irq(&nacl->device_list_lock); |
480 | 480 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { |
481 | 481 | |
... | ... | @@ -491,9 +491,9 @@ |
491 | 491 | } |
492 | 492 | spin_unlock_irq(&nacl->device_list_lock); |
493 | 493 | |
494 | - spin_lock_bh(&tpg->acl_node_lock); | |
494 | + spin_lock_irq(&tpg->acl_node_lock); | |
495 | 495 | } |
496 | - spin_unlock_bh(&tpg->acl_node_lock); | |
496 | + spin_unlock_irq(&tpg->acl_node_lock); | |
497 | 497 | } |
498 | 498 | |
499 | 499 | static struct se_port *core_alloc_port(struct se_device *dev) |
... | ... | @@ -839,6 +839,24 @@ |
839 | 839 | return ret; |
840 | 840 | } |
841 | 841 | |
842 | +u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) | |
843 | +{ | |
844 | + u32 tmp, aligned_max_sectors; | |
845 | + /* | |
846 | + * Limit max_sectors to a PAGE_SIZE aligned value for modern | |
847 | + * transport_allocate_data_tasks() operation. | |
848 | + */ | |
849 | + tmp = rounddown((max_sectors * block_size), PAGE_SIZE); | |
850 | + aligned_max_sectors = (tmp / block_size); | |
851 | + if (max_sectors != aligned_max_sectors) { | |
852 | + printk(KERN_INFO "Rounding down aligned max_sectors from %u" | |
853 | + " to %u\n", max_sectors, aligned_max_sectors); | |
854 | + return aligned_max_sectors; | |
855 | + } | |
856 | + | |
857 | + return max_sectors; | |
858 | +} | |
859 | + | |
842 | 860 | void se_dev_set_default_attribs( |
843 | 861 | struct se_device *dev, |
844 | 862 | struct se_dev_limits *dev_limits) |
... | ... | @@ -878,6 +896,11 @@ |
878 | 896 | * max_sectors is based on subsystem plugin dependent requirements. |
879 | 897 | */ |
880 | 898 | dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; |
899 | + /* | |
900 | + * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() | |
901 | + */ | |
902 | + limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors, | |
903 | + limits->logical_block_size); | |
881 | 904 | dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; |
882 | 905 | /* |
883 | 906 | * Set optimal_sectors from max_sectors, which can be lowered via |
... | ... | @@ -1242,6 +1265,11 @@ |
1242 | 1265 | return -EINVAL; |
1243 | 1266 | } |
1244 | 1267 | } |
1268 | + /* | |
1269 | + * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() | |
1270 | + */ | |
1271 | + max_sectors = se_dev_align_max_sectors(max_sectors, | |
1272 | + dev->se_sub_dev->se_dev_attrib.block_size); | |
1245 | 1273 | |
1246 | 1274 | dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; |
1247 | 1275 | pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", |
1248 | 1276 | |
1249 | 1277 | |
1250 | 1278 | |
... | ... | @@ -1344,15 +1372,17 @@ |
1344 | 1372 | */ |
1345 | 1373 | if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { |
1346 | 1374 | struct se_node_acl *acl; |
1347 | - spin_lock_bh(&tpg->acl_node_lock); | |
1375 | + spin_lock_irq(&tpg->acl_node_lock); | |
1348 | 1376 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { |
1349 | - if (acl->dynamic_node_acl) { | |
1350 | - spin_unlock_bh(&tpg->acl_node_lock); | |
1377 | + if (acl->dynamic_node_acl && | |
1378 | + (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || | |
1379 | + !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { | |
1380 | + spin_unlock_irq(&tpg->acl_node_lock); | |
1351 | 1381 | core_tpg_add_node_to_devs(acl, tpg); |
1352 | - spin_lock_bh(&tpg->acl_node_lock); | |
1382 | + spin_lock_irq(&tpg->acl_node_lock); | |
1353 | 1383 | } |
1354 | 1384 | } |
1355 | - spin_unlock_bh(&tpg->acl_node_lock); | |
1385 | + spin_unlock_irq(&tpg->acl_node_lock); | |
1356 | 1386 | } |
1357 | 1387 | |
1358 | 1388 | return lun_p; |
drivers/target/target_core_fabric_configfs.c
... | ... | @@ -481,7 +481,7 @@ |
481 | 481 | |
482 | 482 | se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name); |
483 | 483 | if (IS_ERR(se_nacl)) |
484 | - return ERR_PTR(PTR_ERR(se_nacl)); | |
484 | + return ERR_CAST(se_nacl); | |
485 | 485 | |
486 | 486 | nacl_cg = &se_nacl->acl_group; |
487 | 487 | nacl_cg->default_groups = se_nacl->acl_default_groups; |
drivers/target/target_core_pr.c
... | ... | @@ -1598,14 +1598,14 @@ |
1598 | 1598 | * from the decoded fabric module specific TransportID |
1599 | 1599 | * at *i_str. |
1600 | 1600 | */ |
1601 | - spin_lock_bh(&tmp_tpg->acl_node_lock); | |
1601 | + spin_lock_irq(&tmp_tpg->acl_node_lock); | |
1602 | 1602 | dest_node_acl = __core_tpg_get_initiator_node_acl( |
1603 | 1603 | tmp_tpg, i_str); |
1604 | 1604 | if (dest_node_acl) { |
1605 | 1605 | atomic_inc(&dest_node_acl->acl_pr_ref_count); |
1606 | 1606 | smp_mb__after_atomic_inc(); |
1607 | 1607 | } |
1608 | - spin_unlock_bh(&tmp_tpg->acl_node_lock); | |
1608 | + spin_unlock_irq(&tmp_tpg->acl_node_lock); | |
1609 | 1609 | |
1610 | 1610 | if (!dest_node_acl) { |
1611 | 1611 | core_scsi3_tpg_undepend_item(tmp_tpg); |
1612 | 1612 | |
... | ... | @@ -3496,14 +3496,14 @@ |
3496 | 3496 | /* |
3497 | 3497 | * Locate the destination struct se_node_acl from the received Transport ID |
3498 | 3498 | */ |
3499 | - spin_lock_bh(&dest_se_tpg->acl_node_lock); | |
3499 | + spin_lock_irq(&dest_se_tpg->acl_node_lock); | |
3500 | 3500 | dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, |
3501 | 3501 | initiator_str); |
3502 | 3502 | if (dest_node_acl) { |
3503 | 3503 | atomic_inc(&dest_node_acl->acl_pr_ref_count); |
3504 | 3504 | smp_mb__after_atomic_inc(); |
3505 | 3505 | } |
3506 | - spin_unlock_bh(&dest_se_tpg->acl_node_lock); | |
3506 | + spin_unlock_irq(&dest_se_tpg->acl_node_lock); | |
3507 | 3507 | |
3508 | 3508 | if (!dest_node_acl) { |
3509 | 3509 | pr_err("Unable to locate %s dest_node_acl for" |
drivers/target/target_core_rd.c
... | ... | @@ -390,12 +390,10 @@ |
390 | 390 | length = req->rd_size; |
391 | 391 | |
392 | 392 | dst = sg_virt(&sg_d[i++]) + dst_offset; |
393 | - if (!dst) | |
394 | - BUG(); | |
393 | + BUG_ON(!dst); | |
395 | 394 | |
396 | 395 | src = sg_virt(&sg_s[j]) + src_offset; |
397 | - if (!src) | |
398 | - BUG(); | |
396 | + BUG_ON(!src); | |
399 | 397 | |
400 | 398 | dst_offset = 0; |
401 | 399 | src_offset = length; |
... | ... | @@ -415,8 +413,7 @@ |
415 | 413 | length = req->rd_size; |
416 | 414 | |
417 | 415 | dst = sg_virt(&sg_d[i]) + dst_offset; |
418 | - if (!dst) | |
419 | - BUG(); | |
416 | + BUG_ON(!dst); | |
420 | 417 | |
421 | 418 | if (sg_d[i].length == length) { |
422 | 419 | i++; |
... | ... | @@ -425,8 +422,7 @@ |
425 | 422 | dst_offset = length; |
426 | 423 | |
427 | 424 | src = sg_virt(&sg_s[j++]) + src_offset; |
428 | - if (!src) | |
429 | - BUG(); | |
425 | + BUG_ON(!src); | |
430 | 426 | |
431 | 427 | src_offset = 0; |
432 | 428 | page_end = 1; |
433 | 429 | |
... | ... | @@ -510,12 +506,10 @@ |
510 | 506 | length = req->rd_size; |
511 | 507 | |
512 | 508 | src = sg_virt(&sg_s[i++]) + src_offset; |
513 | - if (!src) | |
514 | - BUG(); | |
509 | + BUG_ON(!src); | |
515 | 510 | |
516 | 511 | dst = sg_virt(&sg_d[j]) + dst_offset; |
517 | - if (!dst) | |
518 | - BUG(); | |
512 | + BUG_ON(!dst); | |
519 | 513 | |
520 | 514 | src_offset = 0; |
521 | 515 | dst_offset = length; |
... | ... | @@ -535,8 +529,7 @@ |
535 | 529 | length = req->rd_size; |
536 | 530 | |
537 | 531 | src = sg_virt(&sg_s[i]) + src_offset; |
538 | - if (!src) | |
539 | - BUG(); | |
532 | + BUG_ON(!src); | |
540 | 533 | |
541 | 534 | if (sg_s[i].length == length) { |
542 | 535 | i++; |
... | ... | @@ -545,8 +538,7 @@ |
545 | 538 | src_offset = length; |
546 | 539 | |
547 | 540 | dst = sg_virt(&sg_d[j++]) + dst_offset; |
548 | - if (!dst) | |
549 | - BUG(); | |
541 | + BUG_ON(!dst); | |
550 | 542 | |
551 | 543 | dst_offset = 0; |
552 | 544 | page_end = 1; |
drivers/target/target_core_tpg.c
... | ... | @@ -137,15 +137,15 @@ |
137 | 137 | { |
138 | 138 | struct se_node_acl *acl; |
139 | 139 | |
140 | - spin_lock_bh(&tpg->acl_node_lock); | |
140 | + spin_lock_irq(&tpg->acl_node_lock); | |
141 | 141 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { |
142 | 142 | if (!strcmp(acl->initiatorname, initiatorname) && |
143 | 143 | !acl->dynamic_node_acl) { |
144 | - spin_unlock_bh(&tpg->acl_node_lock); | |
144 | + spin_unlock_irq(&tpg->acl_node_lock); | |
145 | 145 | return acl; |
146 | 146 | } |
147 | 147 | } |
148 | - spin_unlock_bh(&tpg->acl_node_lock); | |
148 | + spin_unlock_irq(&tpg->acl_node_lock); | |
149 | 149 | |
150 | 150 | return NULL; |
151 | 151 | } |
152 | 152 | |
153 | 153 | |
... | ... | @@ -298,13 +298,21 @@ |
298 | 298 | tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); |
299 | 299 | return NULL; |
300 | 300 | } |
301 | + /* | |
302 | + * Here we only create demo-mode MappedLUNs from the active | |
303 | + * TPG LUNs if the fabric is not explictly asking for | |
304 | + * tpg_check_demo_mode_login_only() == 1. | |
305 | + */ | |
306 | + if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) && | |
307 | + (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1)) | |
308 | + do { ; } while (0); | |
309 | + else | |
310 | + core_tpg_add_node_to_devs(acl, tpg); | |
301 | 311 | |
302 | - core_tpg_add_node_to_devs(acl, tpg); | |
303 | - | |
304 | - spin_lock_bh(&tpg->acl_node_lock); | |
312 | + spin_lock_irq(&tpg->acl_node_lock); | |
305 | 313 | list_add_tail(&acl->acl_list, &tpg->acl_node_list); |
306 | 314 | tpg->num_node_acls++; |
307 | - spin_unlock_bh(&tpg->acl_node_lock); | |
315 | + spin_unlock_irq(&tpg->acl_node_lock); | |
308 | 316 | |
309 | 317 | pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" |
310 | 318 | " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
... | ... | @@ -354,7 +362,7 @@ |
354 | 362 | { |
355 | 363 | struct se_node_acl *acl = NULL; |
356 | 364 | |
357 | - spin_lock_bh(&tpg->acl_node_lock); | |
365 | + spin_lock_irq(&tpg->acl_node_lock); | |
358 | 366 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); |
359 | 367 | if (acl) { |
360 | 368 | if (acl->dynamic_node_acl) { |
... | ... | @@ -362,7 +370,7 @@ |
362 | 370 | pr_debug("%s_TPG[%u] - Replacing dynamic ACL" |
363 | 371 | " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), |
364 | 372 | tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); |
365 | - spin_unlock_bh(&tpg->acl_node_lock); | |
373 | + spin_unlock_irq(&tpg->acl_node_lock); | |
366 | 374 | /* |
367 | 375 | * Release the locally allocated struct se_node_acl |
368 | 376 | * because * core_tpg_add_initiator_node_acl() returned |
369 | 377 | |
... | ... | @@ -378,10 +386,10 @@ |
378 | 386 | " Node %s already exists for TPG %u, ignoring" |
379 | 387 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), |
380 | 388 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
381 | - spin_unlock_bh(&tpg->acl_node_lock); | |
389 | + spin_unlock_irq(&tpg->acl_node_lock); | |
382 | 390 | return ERR_PTR(-EEXIST); |
383 | 391 | } |
384 | - spin_unlock_bh(&tpg->acl_node_lock); | |
392 | + spin_unlock_irq(&tpg->acl_node_lock); | |
385 | 393 | |
386 | 394 | if (!se_nacl) { |
387 | 395 | pr_err("struct se_node_acl pointer is NULL\n"); |
388 | 396 | |
... | ... | @@ -418,10 +426,10 @@ |
418 | 426 | return ERR_PTR(-EINVAL); |
419 | 427 | } |
420 | 428 | |
421 | - spin_lock_bh(&tpg->acl_node_lock); | |
429 | + spin_lock_irq(&tpg->acl_node_lock); | |
422 | 430 | list_add_tail(&acl->acl_list, &tpg->acl_node_list); |
423 | 431 | tpg->num_node_acls++; |
424 | - spin_unlock_bh(&tpg->acl_node_lock); | |
432 | + spin_unlock_irq(&tpg->acl_node_lock); | |
425 | 433 | |
426 | 434 | done: |
427 | 435 | pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" |
428 | 436 | |
... | ... | @@ -445,14 +453,14 @@ |
445 | 453 | struct se_session *sess, *sess_tmp; |
446 | 454 | int dynamic_acl = 0; |
447 | 455 | |
448 | - spin_lock_bh(&tpg->acl_node_lock); | |
456 | + spin_lock_irq(&tpg->acl_node_lock); | |
449 | 457 | if (acl->dynamic_node_acl) { |
450 | 458 | acl->dynamic_node_acl = 0; |
451 | 459 | dynamic_acl = 1; |
452 | 460 | } |
453 | 461 | list_del(&acl->acl_list); |
454 | 462 | tpg->num_node_acls--; |
455 | - spin_unlock_bh(&tpg->acl_node_lock); | |
463 | + spin_unlock_irq(&tpg->acl_node_lock); | |
456 | 464 | |
457 | 465 | spin_lock_bh(&tpg->session_lock); |
458 | 466 | list_for_each_entry_safe(sess, sess_tmp, |
459 | 467 | |
460 | 468 | |
... | ... | @@ -503,21 +511,21 @@ |
503 | 511 | struct se_node_acl *acl; |
504 | 512 | int dynamic_acl = 0; |
505 | 513 | |
506 | - spin_lock_bh(&tpg->acl_node_lock); | |
514 | + spin_lock_irq(&tpg->acl_node_lock); | |
507 | 515 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); |
508 | 516 | if (!acl) { |
509 | 517 | pr_err("Access Control List entry for %s Initiator" |
510 | 518 | " Node %s does not exists for TPG %hu, ignoring" |
511 | 519 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), |
512 | 520 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
513 | - spin_unlock_bh(&tpg->acl_node_lock); | |
521 | + spin_unlock_irq(&tpg->acl_node_lock); | |
514 | 522 | return -ENODEV; |
515 | 523 | } |
516 | 524 | if (acl->dynamic_node_acl) { |
517 | 525 | acl->dynamic_node_acl = 0; |
518 | 526 | dynamic_acl = 1; |
519 | 527 | } |
520 | - spin_unlock_bh(&tpg->acl_node_lock); | |
528 | + spin_unlock_irq(&tpg->acl_node_lock); | |
521 | 529 | |
522 | 530 | spin_lock_bh(&tpg->session_lock); |
523 | 531 | list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { |
524 | 532 | |
... | ... | @@ -533,10 +541,10 @@ |
533 | 541 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); |
534 | 542 | spin_unlock_bh(&tpg->session_lock); |
535 | 543 | |
536 | - spin_lock_bh(&tpg->acl_node_lock); | |
544 | + spin_lock_irq(&tpg->acl_node_lock); | |
537 | 545 | if (dynamic_acl) |
538 | 546 | acl->dynamic_node_acl = 1; |
539 | - spin_unlock_bh(&tpg->acl_node_lock); | |
547 | + spin_unlock_irq(&tpg->acl_node_lock); | |
540 | 548 | return -EEXIST; |
541 | 549 | } |
542 | 550 | /* |
543 | 551 | |
... | ... | @@ -571,10 +579,10 @@ |
571 | 579 | if (init_sess) |
572 | 580 | tpg->se_tpg_tfo->close_session(init_sess); |
573 | 581 | |
574 | - spin_lock_bh(&tpg->acl_node_lock); | |
582 | + spin_lock_irq(&tpg->acl_node_lock); | |
575 | 583 | if (dynamic_acl) |
576 | 584 | acl->dynamic_node_acl = 1; |
577 | - spin_unlock_bh(&tpg->acl_node_lock); | |
585 | + spin_unlock_irq(&tpg->acl_node_lock); | |
578 | 586 | return -EINVAL; |
579 | 587 | } |
580 | 588 | spin_unlock_bh(&tpg->session_lock); |
581 | 589 | |
... | ... | @@ -590,10 +598,10 @@ |
590 | 598 | initiatorname, tpg->se_tpg_tfo->get_fabric_name(), |
591 | 599 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
592 | 600 | |
593 | - spin_lock_bh(&tpg->acl_node_lock); | |
601 | + spin_lock_irq(&tpg->acl_node_lock); | |
594 | 602 | if (dynamic_acl) |
595 | 603 | acl->dynamic_node_acl = 1; |
596 | - spin_unlock_bh(&tpg->acl_node_lock); | |
604 | + spin_unlock_irq(&tpg->acl_node_lock); | |
597 | 605 | |
598 | 606 | return 0; |
599 | 607 | } |
600 | 608 | |
601 | 609 | |
602 | 610 | |
... | ... | @@ -717,20 +725,20 @@ |
717 | 725 | * not been released because of TFO->tpg_check_demo_mode_cache() == 1 |
718 | 726 | * in transport_deregister_session(). |
719 | 727 | */ |
720 | - spin_lock_bh(&se_tpg->acl_node_lock); | |
728 | + spin_lock_irq(&se_tpg->acl_node_lock); | |
721 | 729 | list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, |
722 | 730 | acl_list) { |
723 | 731 | list_del(&nacl->acl_list); |
724 | 732 | se_tpg->num_node_acls--; |
725 | - spin_unlock_bh(&se_tpg->acl_node_lock); | |
733 | + spin_unlock_irq(&se_tpg->acl_node_lock); | |
726 | 734 | |
727 | 735 | core_tpg_wait_for_nacl_pr_ref(nacl); |
728 | 736 | core_free_device_list_for_node(nacl, se_tpg); |
729 | 737 | se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); |
730 | 738 | |
731 | - spin_lock_bh(&se_tpg->acl_node_lock); | |
739 | + spin_lock_irq(&se_tpg->acl_node_lock); | |
732 | 740 | } |
733 | - spin_unlock_bh(&se_tpg->acl_node_lock); | |
741 | + spin_unlock_irq(&se_tpg->acl_node_lock); | |
734 | 742 | |
735 | 743 | if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) |
736 | 744 | core_tpg_release_virtual_lun0(se_tpg); |
drivers/target/target_core_transport.c
... | ... | @@ -389,17 +389,18 @@ |
389 | 389 | { |
390 | 390 | struct se_portal_group *se_tpg = se_sess->se_tpg; |
391 | 391 | struct se_node_acl *se_nacl; |
392 | + unsigned long flags; | |
392 | 393 | |
393 | 394 | if (!se_tpg) { |
394 | 395 | transport_free_session(se_sess); |
395 | 396 | return; |
396 | 397 | } |
397 | 398 | |
398 | - spin_lock_bh(&se_tpg->session_lock); | |
399 | + spin_lock_irqsave(&se_tpg->session_lock, flags); | |
399 | 400 | list_del(&se_sess->sess_list); |
400 | 401 | se_sess->se_tpg = NULL; |
401 | 402 | se_sess->fabric_sess_ptr = NULL; |
402 | - spin_unlock_bh(&se_tpg->session_lock); | |
403 | + spin_unlock_irqrestore(&se_tpg->session_lock, flags); | |
403 | 404 | |
404 | 405 | /* |
405 | 406 | * Determine if we need to do extra work for this initiator node's |
406 | 407 | |
407 | 408 | |
408 | 409 | |
... | ... | @@ -407,22 +408,22 @@ |
407 | 408 | */ |
408 | 409 | se_nacl = se_sess->se_node_acl; |
409 | 410 | if (se_nacl) { |
410 | - spin_lock_bh(&se_tpg->acl_node_lock); | |
411 | + spin_lock_irqsave(&se_tpg->acl_node_lock, flags); | |
411 | 412 | if (se_nacl->dynamic_node_acl) { |
412 | 413 | if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( |
413 | 414 | se_tpg)) { |
414 | 415 | list_del(&se_nacl->acl_list); |
415 | 416 | se_tpg->num_node_acls--; |
416 | - spin_unlock_bh(&se_tpg->acl_node_lock); | |
417 | + spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); | |
417 | 418 | |
418 | 419 | core_tpg_wait_for_nacl_pr_ref(se_nacl); |
419 | 420 | core_free_device_list_for_node(se_nacl, se_tpg); |
420 | 421 | se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, |
421 | 422 | se_nacl); |
422 | - spin_lock_bh(&se_tpg->acl_node_lock); | |
423 | + spin_lock_irqsave(&se_tpg->acl_node_lock, flags); | |
423 | 424 | } |
424 | 425 | } |
425 | - spin_unlock_bh(&se_tpg->acl_node_lock); | |
426 | + spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); | |
426 | 427 | } |
427 | 428 | |
428 | 429 | transport_free_session(se_sess); |
... | ... | @@ -2053,8 +2054,14 @@ |
2053 | 2054 | cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; |
2054 | 2055 | break; |
2055 | 2056 | } |
2056 | - | |
2057 | - if (!sc) | |
2057 | + /* | |
2058 | + * If a fabric does not define a cmd->se_tfo->new_cmd_map caller, | |
2059 | + * make the call to transport_send_check_condition_and_sense() | |
2060 | + * directly. Otherwise expect the fabric to make the call to | |
2061 | + * transport_send_check_condition_and_sense() after handling | |
2062 | + * possible unsoliticied write data payloads. | |
2063 | + */ | |
2064 | + if (!sc && !cmd->se_tfo->new_cmd_map) | |
2058 | 2065 | transport_new_cmd_failure(cmd); |
2059 | 2066 | else { |
2060 | 2067 | ret = transport_send_check_condition_and_sense(cmd, |
2061 | 2068 | |
2062 | 2069 | |
... | ... | @@ -2847,14 +2854,44 @@ |
2847 | 2854 | " transport_dev_end_lba(): %llu\n", |
2848 | 2855 | cmd->t_task_lba, sectors, |
2849 | 2856 | transport_dev_end_lba(dev)); |
2850 | - pr_err(" We should return CHECK_CONDITION" | |
2851 | - " but we don't yet\n"); | |
2852 | - return 0; | |
2857 | + return -EINVAL; | |
2853 | 2858 | } |
2854 | 2859 | |
2855 | - return sectors; | |
2860 | + return 0; | |
2856 | 2861 | } |
2857 | 2862 | |
2863 | +static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev) | |
2864 | +{ | |
2865 | + /* | |
2866 | + * Determine if the received WRITE_SAME is used to for direct | |
2867 | + * passthrough into Linux/SCSI with struct request via TCM/pSCSI | |
2868 | + * or we are signaling the use of internal WRITE_SAME + UNMAP=1 | |
2869 | + * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code. | |
2870 | + */ | |
2871 | + int passthrough = (dev->transport->transport_type == | |
2872 | + TRANSPORT_PLUGIN_PHBA_PDEV); | |
2873 | + | |
2874 | + if (!passthrough) { | |
2875 | + if ((flags[0] & 0x04) || (flags[0] & 0x02)) { | |
2876 | + pr_err("WRITE_SAME PBDATA and LBDATA" | |
2877 | + " bits not supported for Block Discard" | |
2878 | + " Emulation\n"); | |
2879 | + return -ENOSYS; | |
2880 | + } | |
2881 | + /* | |
2882 | + * Currently for the emulated case we only accept | |
2883 | + * tpws with the UNMAP=1 bit set. | |
2884 | + */ | |
2885 | + if (!(flags[0] & 0x08)) { | |
2886 | + pr_err("WRITE_SAME w/o UNMAP bit not" | |
2887 | + " supported for Block Discard Emulation\n"); | |
2888 | + return -ENOSYS; | |
2889 | + } | |
2890 | + } | |
2891 | + | |
2892 | + return 0; | |
2893 | +} | |
2894 | + | |
2858 | 2895 | /* transport_generic_cmd_sequencer(): |
2859 | 2896 | * |
2860 | 2897 | * Generic Command Sequencer that should work for most DAS transport |
... | ... | @@ -3065,7 +3102,7 @@ |
3065 | 3102 | goto out_unsupported_cdb; |
3066 | 3103 | |
3067 | 3104 | if (sectors) |
3068 | - size = transport_get_size(sectors, cdb, cmd); | |
3105 | + size = transport_get_size(1, cdb, cmd); | |
3069 | 3106 | else { |
3070 | 3107 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" |
3071 | 3108 | " supported\n"); |
3072 | 3109 | |
... | ... | @@ -3075,27 +3112,9 @@ |
3075 | 3112 | cmd->t_task_lba = get_unaligned_be64(&cdb[12]); |
3076 | 3113 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3077 | 3114 | |
3078 | - /* | |
3079 | - * Skip the remaining assignments for TCM/PSCSI passthrough | |
3080 | - */ | |
3081 | - if (passthrough) | |
3082 | - break; | |
3083 | - | |
3084 | - if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) { | |
3085 | - pr_err("WRITE_SAME PBDATA and LBDATA" | |
3086 | - " bits not supported for Block Discard" | |
3087 | - " Emulation\n"); | |
3115 | + if (target_check_write_same_discard(&cdb[10], dev) < 0) | |
3088 | 3116 | goto out_invalid_cdb_field; |
3089 | - } | |
3090 | - /* | |
3091 | - * Currently for the emulated case we only accept | |
3092 | - * tpws with the UNMAP=1 bit set. | |
3093 | - */ | |
3094 | - if (!(cdb[10] & 0x08)) { | |
3095 | - pr_err("WRITE_SAME w/o UNMAP bit not" | |
3096 | - " supported for Block Discard Emulation\n"); | |
3097 | - goto out_invalid_cdb_field; | |
3098 | - } | |
3117 | + | |
3099 | 3118 | break; |
3100 | 3119 | default: |
3101 | 3120 | pr_err("VARIABLE_LENGTH_CMD service action" |
3102 | 3121 | |
... | ... | @@ -3330,10 +3349,12 @@ |
3330 | 3349 | cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC; |
3331 | 3350 | /* |
3332 | 3351 | * Check to ensure that LBA + Range does not exceed past end of |
3333 | - * device. | |
3352 | + * device for IBLOCK and FILEIO ->do_sync_cache() backend calls | |
3334 | 3353 | */ |
3335 | - if (!transport_cmd_get_valid_sectors(cmd)) | |
3336 | - goto out_invalid_cdb_field; | |
3354 | + if ((cmd->t_task_lba != 0) || (sectors != 0)) { | |
3355 | + if (transport_cmd_get_valid_sectors(cmd) < 0) | |
3356 | + goto out_invalid_cdb_field; | |
3357 | + } | |
3337 | 3358 | break; |
3338 | 3359 | case UNMAP: |
3339 | 3360 | size = get_unaligned_be16(&cdb[7]); |
3340 | 3361 | |
3341 | 3362 | |
3342 | 3363 | |
... | ... | @@ -3345,40 +3366,38 @@ |
3345 | 3366 | goto out_unsupported_cdb; |
3346 | 3367 | |
3347 | 3368 | if (sectors) |
3348 | - size = transport_get_size(sectors, cdb, cmd); | |
3369 | + size = transport_get_size(1, cdb, cmd); | |
3349 | 3370 | else { |
3350 | 3371 | pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); |
3351 | 3372 | goto out_invalid_cdb_field; |
3352 | 3373 | } |
3353 | 3374 | |
3354 | 3375 | cmd->t_task_lba = get_unaligned_be64(&cdb[2]); |
3355 | - passthrough = (dev->transport->transport_type == | |
3356 | - TRANSPORT_PLUGIN_PHBA_PDEV); | |
3357 | - /* | |
3358 | - * Determine if the received WRITE_SAME_16 is used to for direct | |
3359 | - * passthrough into Linux/SCSI with struct request via TCM/pSCSI | |
3360 | - * or we are signaling the use of internal WRITE_SAME + UNMAP=1 | |
3361 | - * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and | |
3362 | - * TCM/FILEIO subsystem plugin backstores. | |
3363 | - */ | |
3364 | - if (!passthrough) { | |
3365 | - if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { | |
3366 | - pr_err("WRITE_SAME PBDATA and LBDATA" | |
3367 | - " bits not supported for Block Discard" | |
3368 | - " Emulation\n"); | |
3369 | - goto out_invalid_cdb_field; | |
3370 | - } | |
3371 | - /* | |
3372 | - * Currently for the emulated case we only accept | |
3373 | - * tpws with the UNMAP=1 bit set. | |
3374 | - */ | |
3375 | - if (!(cdb[1] & 0x08)) { | |
3376 | - pr_err("WRITE_SAME w/o UNMAP bit not " | |
3377 | - " supported for Block Discard Emulation\n"); | |
3378 | - goto out_invalid_cdb_field; | |
3379 | - } | |
3376 | + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; | |
3377 | + | |
3378 | + if (target_check_write_same_discard(&cdb[1], dev) < 0) | |
3379 | + goto out_invalid_cdb_field; | |
3380 | + break; | |
3381 | + case WRITE_SAME: | |
3382 | + sectors = transport_get_sectors_10(cdb, cmd, §or_ret); | |
3383 | + if (sector_ret) | |
3384 | + goto out_unsupported_cdb; | |
3385 | + | |
3386 | + if (sectors) | |
3387 | + size = transport_get_size(1, cdb, cmd); | |
3388 | + else { | |
3389 | + pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); | |
3390 | + goto out_invalid_cdb_field; | |
3380 | 3391 | } |
3392 | + | |
3393 | + cmd->t_task_lba = get_unaligned_be32(&cdb[2]); | |
3381 | 3394 | cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; |
3395 | + /* | |
3396 | + * Follow sbcr26 with WRITE_SAME (10) and check for the existence | |
3397 | + * of byte 1 bit 3 UNMAP instead of original reserved field | |
3398 | + */ | |
3399 | + if (target_check_write_same_discard(&cdb[1], dev) < 0) | |
3400 | + goto out_invalid_cdb_field; | |
3382 | 3401 | break; |
3383 | 3402 | case ALLOW_MEDIUM_REMOVAL: |
3384 | 3403 | case GPCMD_CLOSE_TRACK: |
... | ... | @@ -3873,9 +3892,7 @@ |
3873 | 3892 | static int transport_new_cmd_obj(struct se_cmd *cmd) |
3874 | 3893 | { |
3875 | 3894 | struct se_device *dev = cmd->se_dev; |
3876 | - u32 task_cdbs; | |
3877 | - u32 rc; | |
3878 | - int set_counts = 1; | |
3895 | + int set_counts = 1, rc, task_cdbs; | |
3879 | 3896 | |
3880 | 3897 | /* |
3881 | 3898 | * Setup any BIDI READ tasks and memory from |
... | ... | @@ -3893,7 +3910,7 @@ |
3893 | 3910 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
3894 | 3911 | cmd->scsi_sense_reason = |
3895 | 3912 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
3896 | - return PYX_TRANSPORT_LU_COMM_FAILURE; | |
3913 | + return -EINVAL; | |
3897 | 3914 | } |
3898 | 3915 | atomic_inc(&cmd->t_fe_count); |
3899 | 3916 | atomic_inc(&cmd->t_se_count); |
... | ... | @@ -3912,7 +3929,7 @@ |
3912 | 3929 | cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; |
3913 | 3930 | cmd->scsi_sense_reason = |
3914 | 3931 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
3915 | - return PYX_TRANSPORT_LU_COMM_FAILURE; | |
3932 | + return -EINVAL; | |
3916 | 3933 | } |
3917 | 3934 | |
3918 | 3935 | if (set_counts) { |
... | ... | @@ -4028,8 +4045,6 @@ |
4028 | 4045 | if (!task->task_sg) |
4029 | 4046 | continue; |
4030 | 4047 | |
4031 | - BUG_ON(!task->task_padded_sg); | |
4032 | - | |
4033 | 4048 | if (!sg_first) { |
4034 | 4049 | sg_first = task->task_sg; |
4035 | 4050 | chained_nents = task->task_sg_nents; |
4036 | 4051 | |
... | ... | @@ -4037,9 +4052,19 @@ |
4037 | 4052 | sg_chain(sg_prev, sg_prev_nents, task->task_sg); |
4038 | 4053 | chained_nents += task->task_sg_nents; |
4039 | 4054 | } |
4055 | + /* | |
4056 | + * For the padded tasks, use the extra SGL vector allocated | |
4057 | + * in transport_allocate_data_tasks() for the sg_prev_nents | |
4058 | + * offset into sg_chain() above.. The last task of a | |
4059 | + * multi-task list, or a single task will not have | |
4060 | + * task->task_sg_padded set.. | |
4061 | + */ | |
4062 | + if (task->task_padded_sg) | |
4063 | + sg_prev_nents = (task->task_sg_nents + 1); | |
4064 | + else | |
4065 | + sg_prev_nents = task->task_sg_nents; | |
4040 | 4066 | |
4041 | 4067 | sg_prev = task->task_sg; |
4042 | - sg_prev_nents = task->task_sg_nents; | |
4043 | 4068 | } |
4044 | 4069 | /* |
4045 | 4070 | * Setup the starting pointer and total t_tasks_sg_linked_no including |
... | ... | @@ -4091,7 +4116,7 @@ |
4091 | 4116 | |
4092 | 4117 | cmd_sg = sgl; |
4093 | 4118 | for (i = 0; i < task_count; i++) { |
4094 | - unsigned int task_size; | |
4119 | + unsigned int task_size, task_sg_nents_padded; | |
4095 | 4120 | int count; |
4096 | 4121 | |
4097 | 4122 | task = transport_generic_get_task(cmd, data_direction); |
4098 | 4123 | |
4099 | 4124 | |
4100 | 4125 | |
4101 | 4126 | |
4102 | 4127 | |
4103 | 4128 | |
... | ... | @@ -4110,30 +4135,33 @@ |
4110 | 4135 | |
4111 | 4136 | /* Update new cdb with updated lba/sectors */ |
4112 | 4137 | cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); |
4113 | - | |
4114 | 4138 | /* |
4139 | + * This now assumes that passed sg_ents are in PAGE_SIZE chunks | |
4140 | + * in order to calculate the number per task SGL entries | |
4141 | + */ | |
4142 | + task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE); | |
4143 | + /* | |
4115 | 4144 | * Check if the fabric module driver is requesting that all |
4116 | 4145 | * struct se_task->task_sg[] be chained together.. If so, |
4117 | 4146 | * then allocate an extra padding SG entry for linking and |
4118 | - * marking the end of the chained SGL. | |
4119 | - * Possibly over-allocate task sgl size by using cmd sgl size. | |
4120 | - * It's so much easier and only a waste when task_count > 1. | |
4121 | - * That is extremely rare. | |
4147 | + * marking the end of the chained SGL for every task except | |
4148 | + * the last one for (task_count > 1) operation, or skipping | |
4149 | + * the extra padding for the (task_count == 1) case. | |
4122 | 4150 | */ |
4123 | - task->task_sg_nents = sgl_nents; | |
4124 | - if (cmd->se_tfo->task_sg_chaining) { | |
4125 | - task->task_sg_nents++; | |
4151 | + if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) { | |
4152 | + task_sg_nents_padded = (task->task_sg_nents + 1); | |
4126 | 4153 | task->task_padded_sg = 1; |
4127 | - } | |
4154 | + } else | |
4155 | + task_sg_nents_padded = task->task_sg_nents; | |
4128 | 4156 | |
4129 | 4157 | task->task_sg = kmalloc(sizeof(struct scatterlist) * |
4130 | - task->task_sg_nents, GFP_KERNEL); | |
4158 | + task_sg_nents_padded, GFP_KERNEL); | |
4131 | 4159 | if (!task->task_sg) { |
4132 | 4160 | cmd->se_dev->transport->free_task(task); |
4133 | 4161 | return -ENOMEM; |
4134 | 4162 | } |
4135 | 4163 | |
4136 | - sg_init_table(task->task_sg, task->task_sg_nents); | |
4164 | + sg_init_table(task->task_sg, task_sg_nents_padded); | |
4137 | 4165 | |
4138 | 4166 | task_size = task->task_size; |
4139 | 4167 | |
4140 | 4168 | |
... | ... | @@ -4230,10 +4258,13 @@ |
4230 | 4258 | struct scatterlist *sgl, |
4231 | 4259 | unsigned int sgl_nents) |
4232 | 4260 | { |
4233 | - if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) | |
4261 | + if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { | |
4262 | + if (transport_cmd_get_valid_sectors(cmd) < 0) | |
4263 | + return -EINVAL; | |
4264 | + | |
4234 | 4265 | return transport_allocate_data_tasks(cmd, lba, data_direction, |
4235 | 4266 | sgl, sgl_nents); |
4236 | - else | |
4267 | + } else | |
4237 | 4268 | return transport_allocate_control_task(cmd); |
4238 | 4269 | |
4239 | 4270 | } |
... | ... | @@ -4726,6 +4757,13 @@ |
4726 | 4757 | */ |
4727 | 4758 | switch (reason) { |
4728 | 4759 | case TCM_NON_EXISTENT_LUN: |
4760 | + /* CURRENT ERROR */ | |
4761 | + buffer[offset] = 0x70; | |
4762 | + /* ILLEGAL REQUEST */ | |
4763 | + buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; | |
4764 | + /* LOGICAL UNIT NOT SUPPORTED */ | |
4765 | + buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25; | |
4766 | + break; | |
4729 | 4767 | case TCM_UNSUPPORTED_SCSI_OPCODE: |
4730 | 4768 | case TCM_SECTOR_COUNT_TOO_MANY: |
4731 | 4769 | /* CURRENT ERROR */ |
drivers/target/tcm_fc/tfc_conf.c
... | ... | @@ -256,7 +256,7 @@ |
256 | 256 | struct se_portal_group *se_tpg = &tpg->se_tpg; |
257 | 257 | struct se_node_acl *se_acl; |
258 | 258 | |
259 | - spin_lock_bh(&se_tpg->acl_node_lock); | |
259 | + spin_lock_irq(&se_tpg->acl_node_lock); | |
260 | 260 | list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) { |
261 | 261 | acl = container_of(se_acl, struct ft_node_acl, se_node_acl); |
262 | 262 | pr_debug("acl %p port_name %llx\n", |
... | ... | @@ -270,7 +270,7 @@ |
270 | 270 | break; |
271 | 271 | } |
272 | 272 | } |
273 | - spin_unlock_bh(&se_tpg->acl_node_lock); | |
273 | + spin_unlock_irq(&se_tpg->acl_node_lock); | |
274 | 274 | return found; |
275 | 275 | } |
276 | 276 | |
277 | 277 | |
... | ... | @@ -655,10 +655,8 @@ |
655 | 655 | synchronize_rcu(); |
656 | 656 | } |
657 | 657 | |
658 | -#ifdef MODULE | |
659 | 658 | MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION); |
660 | 659 | MODULE_LICENSE("GPL"); |
661 | 660 | module_init(ft_init); |
662 | 661 | module_exit(ft_exit); |
663 | -#endif /* MODULE */ |
include/target/target_core_fabric_ops.h
... | ... | @@ -27,6 +27,12 @@ |
27 | 27 | int (*tpg_check_demo_mode_cache)(struct se_portal_group *); |
28 | 28 | int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *); |
29 | 29 | int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *); |
30 | + /* | |
31 | + * Optionally used by fabrics to allow demo-mode login, but not | |
32 | + * expose any TPG LUNs, and return 'not connected' in standard | |
33 | + * inquiry response | |
34 | + */ | |
35 | + int (*tpg_check_demo_mode_login_only)(struct se_portal_group *); | |
30 | 36 | struct se_node_acl *(*tpg_alloc_fabric_acl)( |
31 | 37 | struct se_portal_group *); |
32 | 38 | void (*tpg_release_fabric_acl)(struct se_portal_group *, |