Commit ecc88efbe7adceb3f4bfdbbb1efb669efcaab124

Authored by Linus Torvalds

Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull scsi target updates from Nicholas Bellinger:
 "The highlights in this series include:

   - Improve sg_table lookup scalability in RAMDISK_MCP (martin)

   - Add device attribute to expose config name for INQUIRY model (tregaron)

   - Convert tcm_vhost to use lock-less list for cmd completion (asias)

   - Add tcm_vhost support for multiple target's per endpoint (asias)

   - Add tcm_vhost support for multiple queues per vhost (asias)

   - Add missing mapped_lun bounds checking during make_mappedlun setup
     in generic fabric configfs code (jan engelhardt + nab)

   - Enforce individual iscsi-target network portal export once per
     TargetName endpoint (grover + nab)

   - Add WRITE_SAME w/ UNMAP=0 emulation to FILEIO backend (nab)

  Things have been mostly quiet this round, with majority of the work
  being done on the iser-target WIP driver + associated iscsi-target
  refactoring patches currently in flight for v3.10 code.

  At this point there is one patch series left outstanding from Asias to
  add support for UNMAP + WRITE_SAME w/ UNMAP=1 to FILEIO awaiting
  feedback from hch & Co, that will likely be included in a post
  v3.9-rc1 PULL request if there are no objections.

  Also, there is a regression bug recently reported off-list that seems
  to be effecting v3.5 and v3.6 kernels with MSFT iSCSI initiators that
  is still being tracked down.  No word if this effects >= v3.7 just
  yet, but if so there will likely another PULL request coming your
  way.."

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (26 commits)
  target: Rename spc_get_write_same_sectors -> sbc_get_write_same_sectors
  target/file: Add WRITE_SAME w/ UNMAP=0 emulation support
  iscsi-target: Enforce individual network portal export once per TargetName
  iscsi-target: Refactor iscsit_get_np sockaddr matching into iscsit_check_np_match
  target: Add missing mapped_lun bounds checking during make_mappedlun setup
  target: Fix lookup of dynamic NodeACLs during cached demo-mode operation
  target: Fix parameter list length checking in MODE SELECT
  target: Fix error checking for UNMAP commands
  target: Fix sense data for out-of-bounds IO operations
  target_core_rd: break out unterminated loop during copy
  tcm_vhost: Multi-queue support
  tcm_vhost: Multi-target support
  target: Add device attribute to expose config_item_name for INQUIRY model
  target: don't truncate the fail intr address
  target: don't always say "ipv6" as address type
  target/iblock: Use backend REQ_FLUSH hint for WriteCacheEnabled status
  iscsi-target: make some temporary buffers larger
  tcm_vhost: Optimize gup in vhost_scsi_map_to_sgl
  tcm_vhost: Use iov_num_pages to calculate sgl_count
  tcm_vhost: Introduce iov_num_pages
  ...

Showing 21 changed files Side-by-side Diff

drivers/target/iscsi/iscsi_target.c
... ... @@ -264,16 +264,50 @@
264 264 return 0;
265 265 }
266 266  
267   -static struct iscsi_np *iscsit_get_np(
  267 +bool iscsit_check_np_match(
268 268 struct __kernel_sockaddr_storage *sockaddr,
  269 + struct iscsi_np *np,
269 270 int network_transport)
270 271 {
271 272 struct sockaddr_in *sock_in, *sock_in_e;
272 273 struct sockaddr_in6 *sock_in6, *sock_in6_e;
273   - struct iscsi_np *np;
274   - int ip_match = 0;
  274 + bool ip_match = false;
275 275 u16 port;
276 276  
  277 + if (sockaddr->ss_family == AF_INET6) {
  278 + sock_in6 = (struct sockaddr_in6 *)sockaddr;
  279 + sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
  280 +
  281 + if (!memcmp(&sock_in6->sin6_addr.in6_u,
  282 + &sock_in6_e->sin6_addr.in6_u,
  283 + sizeof(struct in6_addr)))
  284 + ip_match = true;
  285 +
  286 + port = ntohs(sock_in6->sin6_port);
  287 + } else {
  288 + sock_in = (struct sockaddr_in *)sockaddr;
  289 + sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
  290 +
  291 + if (sock_in->sin_addr.s_addr == sock_in_e->sin_addr.s_addr)
  292 + ip_match = true;
  293 +
  294 + port = ntohs(sock_in->sin_port);
  295 + }
  296 +
  297 + if ((ip_match == true) && (np->np_port == port) &&
  298 + (np->np_network_transport == network_transport))
  299 + return true;
  300 +
  301 + return false;
  302 +}
  303 +
  304 +static struct iscsi_np *iscsit_get_np(
  305 + struct __kernel_sockaddr_storage *sockaddr,
  306 + int network_transport)
  307 +{
  308 + struct iscsi_np *np;
  309 + bool match;
  310 +
277 311 spin_lock_bh(&np_lock);
278 312 list_for_each_entry(np, &g_np_list, np_list) {
279 313 spin_lock(&np->np_thread_lock);
... ... @@ -282,29 +316,8 @@
282 316 continue;
283 317 }
284 318  
285   - if (sockaddr->ss_family == AF_INET6) {
286   - sock_in6 = (struct sockaddr_in6 *)sockaddr;
287   - sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
288   -
289   - if (!memcmp(&sock_in6->sin6_addr.in6_u,
290   - &sock_in6_e->sin6_addr.in6_u,
291   - sizeof(struct in6_addr)))
292   - ip_match = 1;
293   -
294   - port = ntohs(sock_in6->sin6_port);
295   - } else {
296   - sock_in = (struct sockaddr_in *)sockaddr;
297   - sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
298   -
299   - if (sock_in->sin_addr.s_addr ==
300   - sock_in_e->sin_addr.s_addr)
301   - ip_match = 1;
302   -
303   - port = ntohs(sock_in->sin_port);
304   - }
305   -
306   - if ((ip_match == 1) && (np->np_port == port) &&
307   - (np->np_network_transport == network_transport)) {
  319 + match = iscsit_check_np_match(sockaddr, np, network_transport);
  320 + if (match == true) {
308 321 /*
309 322 * Increment the np_exports reference count now to
310 323 * prevent iscsit_del_np() below from being called
drivers/target/iscsi/iscsi_target.h
... ... @@ -8,6 +8,8 @@
8 8 extern void iscsit_del_tiqn(struct iscsi_tiqn *);
9 9 extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *);
10 10 extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *);
  11 +extern bool iscsit_check_np_match(struct __kernel_sockaddr_storage *,
  12 + struct iscsi_np *, int);
11 13 extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
12 14 char *, int);
13 15 extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
drivers/target/iscsi/iscsi_target_parameters.c
... ... @@ -1095,11 +1095,11 @@
1095 1095 SET_PSTATE_REPLY_OPTIONAL(param);
1096 1096 }
1097 1097 } else if (IS_TYPE_NUMBER(param)) {
1098   - char *tmpptr, buf[10];
  1098 + char *tmpptr, buf[11];
1099 1099 u32 acceptor_value = simple_strtoul(param->value, &tmpptr, 0);
1100 1100 u32 proposer_value = simple_strtoul(value, &tmpptr, 0);
1101 1101  
1102   - memset(buf, 0, 10);
  1102 + memset(buf, 0, sizeof(buf));
1103 1103  
1104 1104 if (!strcmp(param->name, MAXCONNECTIONS) ||
1105 1105 !strcmp(param->name, MAXBURSTLENGTH) ||
... ... @@ -1503,8 +1503,8 @@
1503 1503 FirstBurstLength = simple_strtoul(param->value,
1504 1504 &tmpptr, 0);
1505 1505 if (FirstBurstLength > MaxBurstLength) {
1506   - char tmpbuf[10];
1507   - memset(tmpbuf, 0, 10);
  1506 + char tmpbuf[11];
  1507 + memset(tmpbuf, 0, sizeof(tmpbuf));
1508 1508 sprintf(tmpbuf, "%u", MaxBurstLength);
1509 1509 if (iscsi_update_param_value(param, tmpbuf))
1510 1510 return -1;
drivers/target/iscsi/iscsi_target_stat.c
... ... @@ -410,14 +410,16 @@
410 410 struct iscsi_tiqn *tiqn = container_of(igrps,
411 411 struct iscsi_tiqn, tiqn_stat_grps);
412 412 struct iscsi_login_stats *lstat = &tiqn->login_stats;
413   - unsigned char buf[8];
  413 + int ret;
414 414  
415 415 spin_lock(&lstat->lock);
416   - snprintf(buf, 8, "%s", (lstat->last_intr_fail_ip_addr != NULL) ?
417   - "ipv6" : "ipv4");
  416 + if (lstat->last_intr_fail_ip_family == AF_INET6)
  417 + ret = snprintf(page, PAGE_SIZE, "ipv6\n");
  418 + else
  419 + ret = snprintf(page, PAGE_SIZE, "ipv4\n");
418 420 spin_unlock(&lstat->lock);
419 421  
420   - return snprintf(page, PAGE_SIZE, "%s\n", buf);
  422 + return ret;
421 423 }
422 424 ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr_type);
423 425  
424 426  
425 427  
... ... @@ -427,16 +429,19 @@
427 429 struct iscsi_tiqn *tiqn = container_of(igrps,
428 430 struct iscsi_tiqn, tiqn_stat_grps);
429 431 struct iscsi_login_stats *lstat = &tiqn->login_stats;
430   - unsigned char buf[32];
  432 + int ret;
431 433  
432 434 spin_lock(&lstat->lock);
433   - if (lstat->last_intr_fail_ip_family == AF_INET6)
434   - snprintf(buf, 32, "[%s]", lstat->last_intr_fail_ip_addr);
435   - else
436   - snprintf(buf, 32, "%s", lstat->last_intr_fail_ip_addr);
  435 + if (lstat->last_intr_fail_ip_family == AF_INET6) {
  436 + ret = snprintf(page, PAGE_SIZE, "[%s]\n",
  437 + lstat->last_intr_fail_ip_addr);
  438 + } else {
  439 + ret = snprintf(page, PAGE_SIZE, "%s\n",
  440 + lstat->last_intr_fail_ip_addr);
  441 + }
437 442 spin_unlock(&lstat->lock);
438 443  
439   - return snprintf(page, PAGE_SIZE, "%s\n", buf);
  444 + return ret;
440 445 }
441 446 ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr);
442 447  
drivers/target/iscsi/iscsi_target_tpg.c
... ... @@ -422,6 +422,35 @@
422 422 return NULL;
423 423 }
424 424  
  425 +static bool iscsit_tpg_check_network_portal(
  426 + struct iscsi_tiqn *tiqn,
  427 + struct __kernel_sockaddr_storage *sockaddr,
  428 + int network_transport)
  429 +{
  430 + struct iscsi_portal_group *tpg;
  431 + struct iscsi_tpg_np *tpg_np;
  432 + struct iscsi_np *np;
  433 + bool match = false;
  434 +
  435 + spin_lock(&tiqn->tiqn_tpg_lock);
  436 + list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
  437 +
  438 + spin_lock(&tpg->tpg_np_lock);
  439 + list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
  440 + np = tpg_np->tpg_np;
  441 +
  442 + match = iscsit_check_np_match(sockaddr, np,
  443 + network_transport);
  444 + if (match == true)
  445 + break;
  446 + }
  447 + spin_unlock(&tpg->tpg_np_lock);
  448 + }
  449 + spin_unlock(&tiqn->tiqn_tpg_lock);
  450 +
  451 + return match;
  452 +}
  453 +
425 454 struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
426 455 struct iscsi_portal_group *tpg,
427 456 struct __kernel_sockaddr_storage *sockaddr,
... ... @@ -431,6 +460,16 @@
431 460 {
432 461 struct iscsi_np *np;
433 462 struct iscsi_tpg_np *tpg_np;
  463 +
  464 + if (!tpg_np_parent) {
  465 + if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr,
  466 + network_transport) == true) {
  467 + pr_err("Network Portal: %s already exists on a"
  468 + " different TPG on %s\n", ip_str,
  469 + tpg->tpg_tiqn->tiqn);
  470 + return ERR_PTR(-EEXIST);
  471 + }
  472 + }
434 473  
435 474 tpg_np = kzalloc(sizeof(struct iscsi_tpg_np), GFP_KERNEL);
436 475 if (!tpg_np) {
drivers/target/target_core_configfs.c
... ... @@ -609,6 +609,9 @@
609 609 __CONFIGFS_EATTR_RO(_name, \
610 610 target_core_dev_show_attr_##_name);
611 611  
  612 +DEF_DEV_ATTRIB(emulate_model_alias);
  613 +SE_DEV_ATTR(emulate_model_alias, S_IRUGO | S_IWUSR);
  614 +
612 615 DEF_DEV_ATTRIB(emulate_dpo);
613 616 SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
614 617  
... ... @@ -681,6 +684,7 @@
681 684 CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
682 685  
683 686 static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
  687 + &target_core_dev_attrib_emulate_model_alias.attr,
684 688 &target_core_dev_attrib_emulate_dpo.attr,
685 689 &target_core_dev_attrib_emulate_fua_write.attr,
686 690 &target_core_dev_attrib_emulate_fua_read.attr,
drivers/target/target_core_device.c
... ... @@ -713,6 +713,44 @@
713 713 return 0;
714 714 }
715 715  
  716 +static void dev_set_t10_wwn_model_alias(struct se_device *dev)
  717 +{
  718 + const char *configname;
  719 +
  720 + configname = config_item_name(&dev->dev_group.cg_item);
  721 + if (strlen(configname) >= 16) {
  722 + pr_warn("dev[%p]: Backstore name '%s' is too long for "
  723 + "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
  724 + configname);
  725 + }
  726 + snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
  727 +}
  728 +
  729 +int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
  730 +{
  731 + if (dev->export_count) {
  732 + pr_err("dev[%p]: Unable to change model alias"
  733 + " while export_count is %d\n",
  734 + dev, dev->export_count);
  735 + return -EINVAL;
  736 + }
  737 +
  738 + if (flag != 0 && flag != 1) {
  739 + pr_err("Illegal value %d\n", flag);
  740 + return -EINVAL;
  741 + }
  742 +
  743 + if (flag) {
  744 + dev_set_t10_wwn_model_alias(dev);
  745 + } else {
  746 + strncpy(&dev->t10_wwn.model[0],
  747 + dev->transport->inquiry_prod, 16);
  748 + }
  749 + dev->dev_attrib.emulate_model_alias = flag;
  750 +
  751 + return 0;
  752 +}
  753 +
716 754 int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
717 755 {
718 756 if (flag != 0 && flag != 1) {
... ... @@ -772,6 +810,12 @@
772 810 pr_err("emulate_write_cache not supported for pSCSI\n");
773 811 return -EINVAL;
774 812 }
  813 + if (dev->transport->get_write_cache) {
  814 + pr_warn("emulate_write_cache cannot be changed when underlying"
  815 + " HW reports WriteCacheEnabled, ignoring request\n");
  816 + return 0;
  817 + }
  818 +
775 819 dev->dev_attrib.emulate_write_cache = flag;
776 820 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
777 821 dev, dev->dev_attrib.emulate_write_cache);
778 822  
779 823  
780 824  
781 825  
... ... @@ -1182,24 +1226,18 @@
1182 1226  
1183 1227 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
1184 1228 struct se_portal_group *tpg,
  1229 + struct se_node_acl *nacl,
1185 1230 u32 mapped_lun,
1186   - char *initiatorname,
1187 1231 int *ret)
1188 1232 {
1189 1233 struct se_lun_acl *lacl;
1190   - struct se_node_acl *nacl;
1191 1234  
1192   - if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
  1235 + if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
1193 1236 pr_err("%s InitiatorName exceeds maximum size.\n",
1194 1237 tpg->se_tpg_tfo->get_fabric_name());
1195 1238 *ret = -EOVERFLOW;
1196 1239 return NULL;
1197 1240 }
1198   - nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
1199   - if (!nacl) {
1200   - *ret = -EINVAL;
1201   - return NULL;
1202   - }
1203 1241 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
1204 1242 if (!lacl) {
1205 1243 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
... ... @@ -1210,7 +1248,8 @@
1210 1248 INIT_LIST_HEAD(&lacl->lacl_list);
1211 1249 lacl->mapped_lun = mapped_lun;
1212 1250 lacl->se_lun_nacl = nacl;
1213   - snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
  1251 + snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
  1252 + nacl->initiatorname);
1214 1253  
1215 1254 return lacl;
1216 1255 }
... ... @@ -1390,6 +1429,7 @@
1390 1429 dev->t10_alua.t10_dev = dev;
1391 1430  
1392 1431 dev->dev_attrib.da_dev = dev;
  1432 + dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
1393 1433 dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
1394 1434 dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
1395 1435 dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
drivers/target/target_core_fabric_configfs.c
... ... @@ -354,9 +354,17 @@
354 354 ret = -EINVAL;
355 355 goto out;
356 356 }
  357 + if (mapped_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
  358 + pr_err("Mapped LUN: %lu exceeds TRANSPORT_MAX_LUNS_PER_TPG"
  359 + "-1: %u for Target Portal Group: %u\n", mapped_lun,
  360 + TRANSPORT_MAX_LUNS_PER_TPG-1,
  361 + se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
  362 + ret = -EINVAL;
  363 + goto out;
  364 + }
357 365  
358   - lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
359   - config_item_name(acl_ci), &ret);
  366 + lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl,
  367 + mapped_lun, &ret);
360 368 if (!lacl) {
361 369 ret = -EINVAL;
362 370 goto out;
drivers/target/target_core_file.c
... ... @@ -190,6 +190,11 @@
190 190  
191 191 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
192 192 fd_dev->fd_queue_depth = dev->queue_depth;
  193 + /*
  194 + * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
  195 + * based upon struct iovec limit for vfs_writev()
  196 + */
  197 + dev->dev_attrib.max_write_same_len = 0x1000;
193 198  
194 199 pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
195 200 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
196 201  
... ... @@ -328,7 +333,115 @@
328 333 return 0;
329 334 }
330 335  
  336 +static unsigned char *
  337 +fd_setup_write_same_buf(struct se_cmd *cmd, struct scatterlist *sg,
  338 + unsigned int len)
  339 +{
  340 + struct se_device *se_dev = cmd->se_dev;
  341 + unsigned int block_size = se_dev->dev_attrib.block_size;
  342 + unsigned int i = 0, end;
  343 + unsigned char *buf, *p, *kmap_buf;
  344 +
  345 + buf = kzalloc(min_t(unsigned int, len, PAGE_SIZE), GFP_KERNEL);
  346 + if (!buf) {
  347 + pr_err("Unable to allocate fd_execute_write_same buf\n");
  348 + return NULL;
  349 + }
  350 +
  351 + kmap_buf = kmap(sg_page(sg)) + sg->offset;
  352 + if (!kmap_buf) {
  353 + pr_err("kmap() failed in fd_setup_write_same\n");
  354 + kfree(buf);
  355 + return NULL;
  356 + }
  357 + /*
  358 + * Fill local *buf to contain multiple WRITE_SAME blocks up to
  359 + * min(len, PAGE_SIZE)
  360 + */
  361 + p = buf;
  362 + end = min_t(unsigned int, len, PAGE_SIZE);
  363 +
  364 + while (i < end) {
  365 + memcpy(p, kmap_buf, block_size);
  366 +
  367 + i += block_size;
  368 + p += block_size;
  369 + }
  370 + kunmap(sg_page(sg));
  371 +
  372 + return buf;
  373 +}
  374 +
331 375 static sense_reason_t
  376 +fd_execute_write_same(struct se_cmd *cmd)
  377 +{
  378 + struct se_device *se_dev = cmd->se_dev;
  379 + struct fd_dev *fd_dev = FD_DEV(se_dev);
  380 + struct file *f = fd_dev->fd_file;
  381 + struct scatterlist *sg;
  382 + struct iovec *iov;
  383 + mm_segment_t old_fs;
  384 + sector_t nolb = sbc_get_write_same_sectors(cmd);
  385 + loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size;
  386 + unsigned int len, len_tmp, iov_num;
  387 + int i, rc;
  388 + unsigned char *buf;
  389 +
  390 + if (!nolb) {
  391 + target_complete_cmd(cmd, SAM_STAT_GOOD);
  392 + return 0;
  393 + }
  394 + sg = &cmd->t_data_sg[0];
  395 +
  396 + if (cmd->t_data_nents > 1 ||
  397 + sg->length != cmd->se_dev->dev_attrib.block_size) {
  398 + pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
  399 + " block_size: %u\n", cmd->t_data_nents, sg->length,
  400 + cmd->se_dev->dev_attrib.block_size);
  401 + return TCM_INVALID_CDB_FIELD;
  402 + }
  403 +
  404 + len = len_tmp = nolb * se_dev->dev_attrib.block_size;
  405 + iov_num = DIV_ROUND_UP(len, PAGE_SIZE);
  406 +
  407 + buf = fd_setup_write_same_buf(cmd, sg, len);
  408 + if (!buf)
  409 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  410 +
  411 + iov = vzalloc(sizeof(struct iovec) * iov_num);
  412 + if (!iov) {
  413 + pr_err("Unable to allocate fd_execute_write_same iovecs\n");
  414 + kfree(buf);
  415 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  416 + }
  417 + /*
  418 + * Map the single fabric received scatterlist block now populated
  419 + * in *buf into each iovec for I/O submission.
  420 + */
  421 + for (i = 0; i < iov_num; i++) {
  422 + iov[i].iov_base = buf;
  423 + iov[i].iov_len = min_t(unsigned int, len_tmp, PAGE_SIZE);
  424 + len_tmp -= iov[i].iov_len;
  425 + }
  426 +
  427 + old_fs = get_fs();
  428 + set_fs(get_ds());
  429 + rc = vfs_writev(f, &iov[0], iov_num, &pos);
  430 + set_fs(old_fs);
  431 +
  432 + vfree(iov);
  433 + kfree(buf);
  434 +
  435 + if (rc < 0 || rc != len) {
  436 + pr_err("vfs_writev() returned %d for write same\n", rc);
  437 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  438 + }
  439 +
  440 + target_complete_cmd(cmd, SAM_STAT_GOOD);
  441 + return 0;
  442 +}
  443 +
  444 +static sense_reason_t
332 445 fd_execute_rw(struct se_cmd *cmd)
333 446 {
334 447 struct scatterlist *sgl = cmd->t_data_sg;
... ... @@ -486,6 +599,7 @@
486 599 static struct sbc_ops fd_sbc_ops = {
487 600 .execute_rw = fd_execute_rw,
488 601 .execute_sync_cache = fd_execute_sync_cache,
  602 + .execute_write_same = fd_execute_write_same,
489 603 };
490 604  
491 605 static sense_reason_t
drivers/target/target_core_iblock.c
... ... @@ -154,6 +154,7 @@
154 154  
155 155 if (blk_queue_nonrot(q))
156 156 dev->dev_attrib.is_nonrot = 1;
  157 +
157 158 return 0;
158 159  
159 160 out_free_bioset:
160 161  
... ... @@ -390,10 +391,19 @@
390 391 sense_reason_t ret = 0;
391 392 int dl, bd_dl, err;
392 393  
  394 + /* We never set ANC_SUP */
  395 + if (cmd->t_task_cdb[1])
  396 + return TCM_INVALID_CDB_FIELD;
  397 +
  398 + if (cmd->data_length == 0) {
  399 + target_complete_cmd(cmd, SAM_STAT_GOOD);
  400 + return 0;
  401 + }
  402 +
393 403 if (cmd->data_length < 8) {
394 404 pr_warn("UNMAP parameter list length %u too small\n",
395 405 cmd->data_length);
396   - return TCM_INVALID_PARAMETER_LIST;
  406 + return TCM_PARAMETER_LIST_LENGTH_ERROR;
397 407 }
398 408  
399 409 buf = transport_kmap_data_sg(cmd);
... ... @@ -463,7 +473,7 @@
463 473 int rc;
464 474  
465 475 rc = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba,
466   - spc_get_write_same_sectors(cmd), GFP_KERNEL, 0);
  476 + sbc_get_write_same_sectors(cmd), GFP_KERNEL, 0);
467 477 if (rc < 0) {
468 478 pr_warn("blkdev_issue_discard() failed: %d\n", rc);
469 479 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
... ... @@ -481,7 +491,7 @@
481 491 struct bio *bio;
482 492 struct bio_list list;
483 493 sector_t block_lba = cmd->t_task_lba;
484   - sector_t sectors = spc_get_write_same_sectors(cmd);
  494 + sector_t sectors = sbc_get_write_same_sectors(cmd);
485 495  
486 496 sg = &cmd->t_data_sg[0];
487 497  
488 498  
489 499  
490 500  
491 501  
... ... @@ -654,20 +664,24 @@
654 664 u32 sg_num = sgl_nents;
655 665 sector_t block_lba;
656 666 unsigned bio_cnt;
657   - int rw;
  667 + int rw = 0;
658 668 int i;
659 669  
660 670 if (data_direction == DMA_TO_DEVICE) {
  671 + struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
  672 + struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
661 673 /*
662   - * Force data to disk if we pretend to not have a volatile
663   - * write cache, or the initiator set the Force Unit Access bit.
  674 + * Force writethrough using WRITE_FUA if a volatile write cache
  675 + * is not enabled, or if initiator set the Force Unit Access bit.
664 676 */
665   - if (dev->dev_attrib.emulate_write_cache == 0 ||
666   - (dev->dev_attrib.emulate_fua_write > 0 &&
667   - (cmd->se_cmd_flags & SCF_FUA)))
668   - rw = WRITE_FUA;
669   - else
  677 + if (q->flush_flags & REQ_FUA) {
  678 + if (cmd->se_cmd_flags & SCF_FUA)
  679 + rw = WRITE_FUA;
  680 + else if (!(q->flush_flags & REQ_FLUSH))
  681 + rw = WRITE_FUA;
  682 + } else {
670 683 rw = WRITE;
  684 + }
671 685 } else {
672 686 rw = READ;
673 687 }
... ... @@ -774,6 +788,15 @@
774 788 return sbc_parse_cdb(cmd, &iblock_sbc_ops);
775 789 }
776 790  
  791 +bool iblock_get_write_cache(struct se_device *dev)
  792 +{
  793 + struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
  794 + struct block_device *bd = ib_dev->ibd_bd;
  795 + struct request_queue *q = bdev_get_queue(bd);
  796 +
  797 + return q->flush_flags & REQ_FLUSH;
  798 +}
  799 +
777 800 static struct se_subsystem_api iblock_template = {
778 801 .name = "iblock",
779 802 .inquiry_prod = "IBLOCK",
... ... @@ -790,6 +813,7 @@
790 813 .show_configfs_dev_params = iblock_show_configfs_dev_params,
791 814 .get_device_type = sbc_get_device_type,
792 815 .get_blocks = iblock_get_blocks,
  816 + .get_write_cache = iblock_get_write_cache,
793 817 };
794 818  
795 819 static int __init iblock_module_init(void)
drivers/target/target_core_internal.h
... ... @@ -25,6 +25,7 @@
25 25 int se_dev_set_unmap_granularity(struct se_device *, u32);
26 26 int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
27 27 int se_dev_set_max_write_same_len(struct se_device *, u32);
  28 +int se_dev_set_emulate_model_alias(struct se_device *, int);
28 29 int se_dev_set_emulate_dpo(struct se_device *, int);
29 30 int se_dev_set_emulate_fua_write(struct se_device *, int);
30 31 int se_dev_set_emulate_fua_read(struct se_device *, int);
... ... @@ -45,7 +46,7 @@
45 46 int core_dev_del_lun(struct se_portal_group *, u32);
46 47 struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
47 48 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
48   - u32, char *, int *);
  49 + struct se_node_acl *, u32, int *);
49 50 int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
50 51 struct se_lun_acl *, u32, u32);
51 52 int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
drivers/target/target_core_rd.c
... ... @@ -256,10 +256,12 @@
256 256  
257 257 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
258 258 {
259   - u32 i;
260 259 struct rd_dev_sg_table *sg_table;
  260 + u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
  261 + sizeof(struct scatterlist));
261 262  
262   - for (i = 0; i < rd_dev->sg_table_count; i++) {
  263 + i = page / sg_per_table;
  264 + if (i < rd_dev->sg_table_count) {
263 265 sg_table = &rd_dev->sg_table_array[i];
264 266 if ((sg_table->page_start_offset <= page) &&
265 267 (sg_table->page_end_offset >= page))
266 268  
... ... @@ -314,7 +316,19 @@
314 316 void *rd_addr;
315 317  
316 318 sg_miter_next(&m);
  319 + if (!(u32)m.length) {
  320 + pr_debug("RD[%u]: invalid sgl %p len %zu\n",
  321 + dev->rd_dev_id, m.addr, m.length);
  322 + sg_miter_stop(&m);
  323 + return TCM_INCORRECT_AMOUNT_OF_DATA;
  324 + }
317 325 len = min((u32)m.length, src_len);
  326 + if (len > rd_size) {
  327 + pr_debug("RD[%u]: size underrun page %d offset %d "
  328 + "size %d\n", dev->rd_dev_id,
  329 + rd_page, rd_offset, rd_size);
  330 + len = rd_size;
  331 + }
318 332 m.consumed = len;
319 333  
320 334 rd_addr = sg_virt(rd_sg) + rd_offset;
drivers/target/target_core_sbc.c
... ... @@ -105,7 +105,7 @@
105 105 return 0;
106 106 }
107 107  
108   -sector_t spc_get_write_same_sectors(struct se_cmd *cmd)
  108 +sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
109 109 {
110 110 u32 num_blocks;
111 111  
... ... @@ -126,7 +126,7 @@
126 126 return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
127 127 cmd->t_task_lba + 1;
128 128 }
129   -EXPORT_SYMBOL(spc_get_write_same_sectors);
  129 +EXPORT_SYMBOL(sbc_get_write_same_sectors);
130 130  
131 131 static sense_reason_t
132 132 sbc_emulate_noop(struct se_cmd *cmd)
... ... @@ -233,7 +233,7 @@
233 233 static sense_reason_t
234 234 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
235 235 {
236   - unsigned int sectors = spc_get_write_same_sectors(cmd);
  236 + unsigned int sectors = sbc_get_write_same_sectors(cmd);
237 237  
238 238 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
239 239 pr_err("WRITE_SAME PBDATA and LBDATA"
... ... @@ -486,7 +486,7 @@
486 486 */
487 487 if (cmd->t_task_lba || sectors) {
488 488 if (sbc_check_valid_sectors(cmd) < 0)
489   - return TCM_INVALID_CDB_FIELD;
  489 + return TCM_ADDRESS_OUT_OF_RANGE;
490 490 }
491 491 cmd->execute_cmd = ops->execute_sync_cache;
492 492 break;
drivers/target/target_core_spc.c
... ... @@ -66,8 +66,8 @@
66 66 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
67 67 }
68 68  
69   -static sense_reason_t
70   -spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
  69 +sense_reason_t
  70 +spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
71 71 {
72 72 struct se_lun *lun = cmd->se_lun;
73 73 struct se_device *dev = cmd->se_dev;
... ... @@ -104,6 +104,7 @@
104 104  
105 105 return 0;
106 106 }
  107 +EXPORT_SYMBOL(spc_emulate_inquiry_std);
107 108  
108 109 /* unit serial number */
109 110 static sense_reason_t
... ... @@ -160,7 +161,7 @@
160 161 * Device identification VPD, for a complete list of
161 162 * DESIGNATOR TYPEs see spc4r17 Table 459.
162 163 */
163   -static sense_reason_t
  164 +sense_reason_t
164 165 spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
165 166 {
166 167 struct se_device *dev = cmd->se_dev;
167 168  
168 169  
169 170  
... ... @@ -404,17 +405,33 @@
404 405 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
405 406 return 0;
406 407 }
  408 +EXPORT_SYMBOL(spc_emulate_evpd_83);
407 409  
  410 +static bool
  411 +spc_check_dev_wce(struct se_device *dev)
  412 +{
  413 + bool wce = false;
  414 +
  415 + if (dev->transport->get_write_cache)
  416 + wce = dev->transport->get_write_cache(dev);
  417 + else if (dev->dev_attrib.emulate_write_cache > 0)
  418 + wce = true;
  419 +
  420 + return wce;
  421 +}
  422 +
408 423 /* Extended INQUIRY Data VPD Page */
409 424 static sense_reason_t
410 425 spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
411 426 {
  427 + struct se_device *dev = cmd->se_dev;
  428 +
412 429 buf[3] = 0x3c;
413 430 /* Set HEADSUP, ORDSUP, SIMPSUP */
414 431 buf[5] = 0x07;
415 432  
416 433 /* If WriteCache emulation is enabled, set V_SUP */
417   - if (cmd->se_dev->dev_attrib.emulate_write_cache > 0)
  434 + if (spc_check_dev_wce(dev))
418 435 buf[6] = 0x01;
419 436 return 0;
420 437 }
... ... @@ -764,7 +781,7 @@
764 781 if (pc == 1)
765 782 goto out;
766 783  
767   - if (dev->dev_attrib.emulate_write_cache > 0)
  784 + if (spc_check_dev_wce(dev))
768 785 p[2] = 0x04; /* Write Cache Enable */
769 786 p[12] = 0x20; /* Disabled Read Ahead */
770 787  
... ... @@ -876,7 +893,7 @@
876 893 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
877 894 spc_modesense_write_protect(&buf[length], type);
878 895  
879   - if ((dev->dev_attrib.emulate_write_cache > 0) &&
  896 + if ((spc_check_dev_wce(dev)) &&
880 897 (dev->dev_attrib.emulate_fua_write > 0))
881 898 spc_modesense_dpofua(&buf[length], type);
882 899  
... ... @@ -983,6 +1000,14 @@
983 1000 int ret = 0;
984 1001 int i;
985 1002  
  1003 + if (!cmd->data_length) {
  1004 + target_complete_cmd(cmd, GOOD);
  1005 + return 0;
  1006 + }
  1007 +
  1008 + if (cmd->data_length < off + 2)
  1009 + return TCM_PARAMETER_LIST_LENGTH_ERROR;
  1010 +
986 1011 buf = transport_kmap_data_sg(cmd);
987 1012 if (!buf)
988 1013 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
... ... @@ -1007,6 +1032,11 @@
1007 1032 goto out;
1008 1033  
1009 1034 check_contents:
  1035 + if (cmd->data_length < off + length) {
  1036 + ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
  1037 + goto out;
  1038 + }
  1039 +
1010 1040 if (memcmp(buf + off, tbuf, length))
1011 1041 ret = TCM_INVALID_PARAMETER_LIST;
1012 1042  
drivers/target/target_core_tmr.c
... ... @@ -331,18 +331,6 @@
331 331  
332 332 fe_count = atomic_read(&cmd->t_fe_count);
333 333  
334   - if (!(cmd->transport_state & CMD_T_ACTIVE)) {
335   - pr_debug("LUN_RESET: got CMD_T_ACTIVE for"
336   - " cdb: %p, t_fe_count: %d dev: %p\n", cmd,
337   - fe_count, dev);
338   - cmd->transport_state |= CMD_T_ABORTED;
339   - spin_unlock_irqrestore(&cmd->t_state_lock, flags);
340   -
341   - core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
342   - continue;
343   - }
344   - pr_debug("LUN_RESET: Got !CMD_T_ACTIVE for cdb: %p,"
345   - " t_fe_count: %d dev: %p\n", cmd, fe_count, dev);
346 334 cmd->transport_state |= CMD_T_ABORTED;
347 335 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
348 336  
drivers/target/target_core_tpg.c
... ... @@ -111,16 +111,10 @@
111 111 struct se_node_acl *acl;
112 112  
113 113 spin_lock_irq(&tpg->acl_node_lock);
114   - list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
115   - if (!strcmp(acl->initiatorname, initiatorname) &&
116   - !acl->dynamic_node_acl) {
117   - spin_unlock_irq(&tpg->acl_node_lock);
118   - return acl;
119   - }
120   - }
  114 + acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
121 115 spin_unlock_irq(&tpg->acl_node_lock);
122 116  
123   - return NULL;
  117 + return acl;
124 118 }
125 119  
126 120 /* core_tpg_add_node_to_devs():
drivers/target/target_core_transport.c
... ... @@ -907,15 +907,18 @@
907 907  
908 908 switch (vpd->device_identifier_code_set) {
909 909 case 0x01: /* Binary */
910   - sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
  910 + snprintf(buf, sizeof(buf),
  911 + "T10 VPD Binary Device Identifier: %s\n",
911 912 &vpd->device_identifier[0]);
912 913 break;
913 914 case 0x02: /* ASCII */
914   - sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
  915 + snprintf(buf, sizeof(buf),
  916 + "T10 VPD ASCII Device Identifier: %s\n",
915 917 &vpd->device_identifier[0]);
916 918 break;
917 919 case 0x03: /* UTF-8 */
918   - sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
  920 + snprintf(buf, sizeof(buf),
  921 + "T10 VPD UTF-8 Device Identifier: %s\n",
919 922 &vpd->device_identifier[0]);
920 923 break;
921 924 default:
... ... @@ -1514,6 +1517,7 @@
1514 1517 case TCM_UNSUPPORTED_SCSI_OPCODE:
1515 1518 case TCM_INVALID_CDB_FIELD:
1516 1519 case TCM_INVALID_PARAMETER_LIST:
  1520 + case TCM_PARAMETER_LIST_LENGTH_ERROR:
1517 1521 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1518 1522 case TCM_UNKNOWN_MODE_PAGE:
1519 1523 case TCM_WRITE_PROTECTED:
... ... @@ -2673,6 +2677,15 @@
2673 2677 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
2674 2678 /* INVALID FIELD IN PARAMETER LIST */
2675 2679 buffer[SPC_ASC_KEY_OFFSET] = 0x26;
  2680 + break;
  2681 + case TCM_PARAMETER_LIST_LENGTH_ERROR:
  2682 + /* CURRENT ERROR */
  2683 + buffer[0] = 0x70;
  2684 + buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
  2685 + /* ILLEGAL REQUEST */
  2686 + buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
  2687 + /* PARAMETER LIST LENGTH ERROR */
  2688 + buffer[SPC_ASC_KEY_OFFSET] = 0x1a;
2676 2689 break;
2677 2690 case TCM_UNEXPECTED_UNSOLICITED_DATA:
2678 2691 /* CURRENT ERROR */
drivers/vhost/tcm_vhost.c
... ... @@ -47,6 +47,8 @@
47 47 #include <linux/vhost.h>
48 48 #include <linux/virtio_net.h> /* TODO vhost.h currently depends on this */
49 49 #include <linux/virtio_scsi.h>
  50 +#include <linux/llist.h>
  51 +#include <linux/bitmap.h>
50 52  
51 53 #include "vhost.c"
52 54 #include "vhost.h"
53 55  
54 56  
55 57  
... ... @@ -58,14 +60,20 @@
58 60 VHOST_SCSI_VQ_IO = 2,
59 61 };
60 62  
  63 +#define VHOST_SCSI_MAX_TARGET 256
  64 +#define VHOST_SCSI_MAX_VQ 128
  65 +
61 66 struct vhost_scsi {
62   - struct tcm_vhost_tpg *vs_tpg; /* Protected by vhost_scsi->dev.mutex */
  67 + /* Protected by vhost_scsi->dev.mutex */
  68 + struct tcm_vhost_tpg *vs_tpg[VHOST_SCSI_MAX_TARGET];
  69 + char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
  70 + bool vs_endpoint;
  71 +
63 72 struct vhost_dev dev;
64   - struct vhost_virtqueue vqs[3];
  73 + struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ];
65 74  
66 75 struct vhost_work vs_completion_work; /* cmd completion work item */
67   - struct list_head vs_completion_list; /* cmd completion queue */
68   - spinlock_t vs_completion_lock; /* protects s_completion_list */
  76 + struct llist_head vs_completion_list; /* cmd completion queue */
69 77 };
70 78  
71 79 /* Local pointer to allocated TCM configfs fabric module */
... ... @@ -77,6 +85,12 @@
77 85 static DEFINE_MUTEX(tcm_vhost_mutex);
78 86 static LIST_HEAD(tcm_vhost_list);
79 87  
  88 +static int iov_num_pages(struct iovec *iov)
  89 +{
  90 + return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
  91 + ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
  92 +}
  93 +
80 94 static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
81 95 {
82 96 return 1;
... ... @@ -301,9 +315,7 @@
301 315 {
302 316 struct vhost_scsi *vs = tv_cmd->tvc_vhost;
303 317  
304   - spin_lock_bh(&vs->vs_completion_lock);
305   - list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
306   - spin_unlock_bh(&vs->vs_completion_lock);
  318 + llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
307 319  
308 320 vhost_work_queue(&vs->dev, &vs->vs_completion_work);
309 321 }
... ... @@ -347,27 +359,6 @@
347 359 kfree(tv_cmd);
348 360 }
349 361  
350   -/* Dequeue a command from the completion list */
351   -static struct tcm_vhost_cmd *vhost_scsi_get_cmd_from_completion(
352   - struct vhost_scsi *vs)
353   -{
354   - struct tcm_vhost_cmd *tv_cmd = NULL;
355   -
356   - spin_lock_bh(&vs->vs_completion_lock);
357   - if (list_empty(&vs->vs_completion_list)) {
358   - spin_unlock_bh(&vs->vs_completion_lock);
359   - return NULL;
360   - }
361   -
362   - list_for_each_entry(tv_cmd, &vs->vs_completion_list,
363   - tvc_completion_list) {
364   - list_del(&tv_cmd->tvc_completion_list);
365   - break;
366   - }
367   - spin_unlock_bh(&vs->vs_completion_lock);
368   - return tv_cmd;
369   -}
370   -
371 362 /* Fill in status and signal that we are done processing this command
372 363 *
373 364 * This is scheduled in the vhost work queue so we are called with the owner
374 365  
375 366  
... ... @@ -377,12 +368,20 @@
377 368 {
378 369 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
379 370 vs_completion_work);
  371 + DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
  372 + struct virtio_scsi_cmd_resp v_rsp;
380 373 struct tcm_vhost_cmd *tv_cmd;
  374 + struct llist_node *llnode;
  375 + struct se_cmd *se_cmd;
  376 + int ret, vq;
381 377  
382   - while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs))) {
383   - struct virtio_scsi_cmd_resp v_rsp;
384   - struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
385   - int ret;
  378 + bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
  379 + llnode = llist_del_all(&vs->vs_completion_list);
  380 + while (llnode) {
  381 + tv_cmd = llist_entry(llnode, struct tcm_vhost_cmd,
  382 + tvc_completion_list);
  383 + llnode = llist_next(llnode);
  384 + se_cmd = &tv_cmd->tvc_se_cmd;
386 385  
387 386 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
388 387 tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
389 388  
... ... @@ -395,15 +394,20 @@
395 394 memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
396 395 v_rsp.sense_len);
397 396 ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
398   - if (likely(ret == 0))
399   - vhost_add_used(&vs->vqs[2], tv_cmd->tvc_vq_desc, 0);
400   - else
  397 + if (likely(ret == 0)) {
  398 + vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0);
  399 + vq = tv_cmd->tvc_vq - vs->vqs;
  400 + __set_bit(vq, signal);
  401 + } else
401 402 pr_err("Faulted on virtio_scsi_cmd_resp\n");
402 403  
403 404 vhost_scsi_free_cmd(tv_cmd);
404 405 }
405 406  
406   - vhost_signal(&vs->dev, &vs->vqs[2]);
  407 + vq = -1;
  408 + while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
  409 + < VHOST_SCSI_MAX_VQ)
  410 + vhost_signal(&vs->dev, &vs->vqs[vq]);
407 411 }
408 412  
409 413 static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
... ... @@ -426,7 +430,6 @@
426 430 pr_err("Unable to allocate struct tcm_vhost_cmd\n");
427 431 return ERR_PTR(-ENOMEM);
428 432 }
429   - INIT_LIST_HEAD(&tv_cmd->tvc_completion_list);
430 433 tv_cmd->tvc_tag = v_req->tag;
431 434 tv_cmd->tvc_task_attr = v_req->task_attr;
432 435 tv_cmd->tvc_exp_data_len = exp_data_len;
433 436  
434 437  
435 438  
436 439  
437 440  
438 441  
439 442  
440 443  
... ... @@ -442,40 +445,47 @@
442 445 * Returns the number of scatterlist entries used or -errno on error.
443 446 */
444 447 static int vhost_scsi_map_to_sgl(struct scatterlist *sgl,
445   - unsigned int sgl_count, void __user *ptr, size_t len, int write)
  448 + unsigned int sgl_count, struct iovec *iov, int write)
446 449 {
  450 + unsigned int npages = 0, pages_nr, offset, nbytes;
447 451 struct scatterlist *sg = sgl;
448   - unsigned int npages = 0;
449   - int ret;
  452 + void __user *ptr = iov->iov_base;
  453 + size_t len = iov->iov_len;
  454 + struct page **pages;
  455 + int ret, i;
450 456  
451   - while (len > 0) {
452   - struct page *page;
453   - unsigned int offset = (uintptr_t)ptr & ~PAGE_MASK;
454   - unsigned int nbytes = min_t(unsigned int,
455   - PAGE_SIZE - offset, len);
  457 + pages_nr = iov_num_pages(iov);
  458 + if (pages_nr > sgl_count)
  459 + return -ENOBUFS;
456 460  
457   - if (npages == sgl_count) {
458   - ret = -ENOBUFS;
459   - goto err;
460   - }
  461 + pages = kmalloc(pages_nr * sizeof(struct page *), GFP_KERNEL);
  462 + if (!pages)
  463 + return -ENOMEM;
461 464  
462   - ret = get_user_pages_fast((unsigned long)ptr, 1, write, &page);
463   - BUG_ON(ret == 0); /* we should either get our page or fail */
464   - if (ret < 0)
465   - goto err;
  465 + ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
  466 + /* No pages were pinned */
  467 + if (ret < 0)
  468 + goto out;
  469 + /* Less pages pinned than wanted */
  470 + if (ret != pages_nr) {
  471 + for (i = 0; i < ret; i++)
  472 + put_page(pages[i]);
  473 + ret = -EFAULT;
  474 + goto out;
  475 + }
466 476  
467   - sg_set_page(sg, page, nbytes, offset);
  477 + while (len > 0) {
  478 + offset = (uintptr_t)ptr & ~PAGE_MASK;
  479 + nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
  480 + sg_set_page(sg, pages[npages], nbytes, offset);
468 481 ptr += nbytes;
469 482 len -= nbytes;
470 483 sg++;
471 484 npages++;
472 485 }
473   - return npages;
474 486  
475   -err:
476   - /* Put pages that we hold */
477   - for (sg = sgl; sg != &sgl[npages]; sg++)
478   - put_page(sg_page(sg));
  487 +out:
  488 + kfree(pages);
479 489 return ret;
480 490 }
481 491  
... ... @@ -491,11 +501,9 @@
491 501 * Find out how long sglist needs to be
492 502 */
493 503 sgl_count = 0;
494   - for (i = 0; i < niov; i++) {
495   - sgl_count += (((uintptr_t)iov[i].iov_base + iov[i].iov_len +
496   - PAGE_SIZE - 1) >> PAGE_SHIFT) -
497   - ((uintptr_t)iov[i].iov_base >> PAGE_SHIFT);
498   - }
  504 + for (i = 0; i < niov; i++)
  505 + sgl_count += iov_num_pages(&iov[i]);
  506 +
499 507 /* TODO overflow checking */
500 508  
501 509 sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
... ... @@ -510,8 +518,7 @@
510 518  
511 519 pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
512 520 for (i = 0; i < niov; i++) {
513   - ret = vhost_scsi_map_to_sgl(sg, sgl_count, iov[i].iov_base,
514   - iov[i].iov_len, write);
  521 + ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write);
515 522 if (ret < 0) {
516 523 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
517 524 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
518 525  
519 526  
520 527  
... ... @@ -563,19 +570,19 @@
563 570 }
564 571 }
565 572  
566   -static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
  573 +static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
  574 + struct vhost_virtqueue *vq)
567 575 {
568   - struct vhost_virtqueue *vq = &vs->vqs[2];
569 576 struct virtio_scsi_cmd_req v_req;
570 577 struct tcm_vhost_tpg *tv_tpg;
571 578 struct tcm_vhost_cmd *tv_cmd;
572 579 u32 exp_data_len, data_first, data_num, data_direction;
573 580 unsigned out, in, i;
574 581 int head, ret;
  582 + u8 target;
575 583  
576 584 /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */
577   - tv_tpg = vs->vs_tpg;
578   - if (unlikely(!tv_tpg))
  585 + if (unlikely(!vs->vs_endpoint))
579 586 return;
580 587  
581 588 mutex_lock(&vq->mutex);
... ... @@ -643,6 +650,28 @@
643 650 break;
644 651 }
645 652  
  653 + /* Extract the tpgt */
  654 + target = v_req.lun[1];
  655 + tv_tpg = vs->vs_tpg[target];
  656 +
  657 + /* Target does not exist, fail the request */
  658 + if (unlikely(!tv_tpg)) {
  659 + struct virtio_scsi_cmd_resp __user *resp;
  660 + struct virtio_scsi_cmd_resp rsp;
  661 +
  662 + memset(&rsp, 0, sizeof(rsp));
  663 + rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
  664 + resp = vq->iov[out].iov_base;
  665 + ret = __copy_to_user(resp, &rsp, sizeof(rsp));
  666 + if (!ret)
  667 + vhost_add_used_and_signal(&vs->dev,
  668 + vq, head, 0);
  669 + else
  670 + pr_err("Faulted on virtio_scsi_cmd_resp\n");
  671 +
  672 + continue;
  673 + }
  674 +
646 675 exp_data_len = 0;
647 676 for (i = 0; i < data_num; i++)
648 677 exp_data_len += vq->iov[data_first + i].iov_len;
... ... @@ -658,6 +687,7 @@
658 687 ": %d\n", tv_cmd, exp_data_len, data_direction);
659 688  
660 689 tv_cmd->tvc_vhost = vs;
  690 + tv_cmd->tvc_vq = vq;
661 691  
662 692 if (unlikely(vq->iov[out].iov_len !=
663 693 sizeof(struct virtio_scsi_cmd_resp))) {
... ... @@ -738,7 +768,7 @@
738 768 poll.work);
739 769 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
740 770  
741   - vhost_scsi_handle_vq(vs);
  771 + vhost_scsi_handle_vq(vs, vq);
742 772 }
743 773  
744 774 /*
... ... @@ -751,7 +781,8 @@
751 781 {
752 782 struct tcm_vhost_tport *tv_tport;
753 783 struct tcm_vhost_tpg *tv_tpg;
754   - int index;
  784 + bool match = false;
  785 + int index, ret;
755 786  
756 787 mutex_lock(&vs->dev.mutex);
757 788 /* Verify that ring has been setup correctly. */
... ... @@ -762,7 +793,6 @@
762 793 return -EFAULT;
763 794 }
764 795 }
765   - mutex_unlock(&vs->dev.mutex);
766 796  
767 797 mutex_lock(&tcm_vhost_mutex);
768 798 list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
769 799  
770 800  
771 801  
772 802  
... ... @@ -777,30 +807,33 @@
777 807 }
778 808 tv_tport = tv_tpg->tport;
779 809  
780   - if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) &&
781   - (tv_tpg->tport_tpgt == t->vhost_tpgt)) {
782   - tv_tpg->tv_tpg_vhost_count++;
783   - mutex_unlock(&tv_tpg->tv_tpg_mutex);
784   - mutex_unlock(&tcm_vhost_mutex);
785   -
786   - mutex_lock(&vs->dev.mutex);
787   - if (vs->vs_tpg) {
788   - mutex_unlock(&vs->dev.mutex);
789   - mutex_lock(&tv_tpg->tv_tpg_mutex);
790   - tv_tpg->tv_tpg_vhost_count--;
  810 + if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
  811 + if (vs->vs_tpg[tv_tpg->tport_tpgt]) {
791 812 mutex_unlock(&tv_tpg->tv_tpg_mutex);
  813 + mutex_unlock(&tcm_vhost_mutex);
  814 + mutex_unlock(&vs->dev.mutex);
792 815 return -EEXIST;
793 816 }
794   -
795   - vs->vs_tpg = tv_tpg;
  817 + tv_tpg->tv_tpg_vhost_count++;
  818 + vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
796 819 smp_mb__after_atomic_inc();
797   - mutex_unlock(&vs->dev.mutex);
798   - return 0;
  820 + match = true;
799 821 }
800 822 mutex_unlock(&tv_tpg->tv_tpg_mutex);
801 823 }
802 824 mutex_unlock(&tcm_vhost_mutex);
803   - return -EINVAL;
  825 +
  826 + if (match) {
  827 + memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
  828 + sizeof(vs->vs_vhost_wwpn));
  829 + vs->vs_endpoint = true;
  830 + ret = 0;
  831 + } else {
  832 + ret = -EEXIST;
  833 + }
  834 +
  835 + mutex_unlock(&vs->dev.mutex);
  836 + return ret;
804 837 }
805 838  
806 839 static int vhost_scsi_clear_endpoint(
... ... @@ -809,7 +842,8 @@
809 842 {
810 843 struct tcm_vhost_tport *tv_tport;
811 844 struct tcm_vhost_tpg *tv_tpg;
812   - int index, ret;
  845 + int index, ret, i;
  846 + u8 target;
813 847  
814 848 mutex_lock(&vs->dev.mutex);
815 849 /* Verify that ring has been setup correctly. */
816 850  
817 851  
818 852  
819 853  
... ... @@ -819,27 +853,32 @@
819 853 goto err;
820 854 }
821 855 }
  856 + for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
  857 + target = i;
822 858  
823   - if (!vs->vs_tpg) {
824   - ret = -ENODEV;
825   - goto err;
826   - }
827   - tv_tpg = vs->vs_tpg;
828   - tv_tport = tv_tpg->tport;
  859 + tv_tpg = vs->vs_tpg[target];
  860 + if (!tv_tpg)
  861 + continue;
829 862  
830   - if (strcmp(tv_tport->tport_name, t->vhost_wwpn) ||
831   - (tv_tpg->tport_tpgt != t->vhost_tpgt)) {
832   - pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
833   - " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
834   - tv_tport->tport_name, tv_tpg->tport_tpgt,
835   - t->vhost_wwpn, t->vhost_tpgt);
836   - ret = -EINVAL;
837   - goto err;
  863 + tv_tport = tv_tpg->tport;
  864 + if (!tv_tport) {
  865 + ret = -ENODEV;
  866 + goto err;
  867 + }
  868 +
  869 + if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
  870 + pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
  871 + " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
  872 + tv_tport->tport_name, tv_tpg->tport_tpgt,
  873 + t->vhost_wwpn, t->vhost_tpgt);
  874 + ret = -EINVAL;
  875 + goto err;
  876 + }
  877 + tv_tpg->tv_tpg_vhost_count--;
  878 + vs->vs_tpg[target] = NULL;
  879 + vs->vs_endpoint = false;
838 880 }
839   - tv_tpg->tv_tpg_vhost_count--;
840   - vs->vs_tpg = NULL;
841 881 mutex_unlock(&vs->dev.mutex);
842   -
843 882 return 0;
844 883  
845 884 err:
846 885  
847 886  
... ... @@ -850,20 +889,19 @@
850 889 static int vhost_scsi_open(struct inode *inode, struct file *f)
851 890 {
852 891 struct vhost_scsi *s;
853   - int r;
  892 + int r, i;
854 893  
855 894 s = kzalloc(sizeof(*s), GFP_KERNEL);
856 895 if (!s)
857 896 return -ENOMEM;
858 897  
859 898 vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
860   - INIT_LIST_HEAD(&s->vs_completion_list);
861   - spin_lock_init(&s->vs_completion_lock);
862 899  
863 900 s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
864 901 s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
865   - s->vqs[VHOST_SCSI_VQ_IO].handle_kick = vhost_scsi_handle_kick;
866   - r = vhost_dev_init(&s->dev, s->vqs, 3);
  902 + for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++)
  903 + s->vqs[i].handle_kick = vhost_scsi_handle_kick;
  904 + r = vhost_dev_init(&s->dev, s->vqs, VHOST_SCSI_MAX_VQ);
867 905 if (r < 0) {
868 906 kfree(s);
869 907 return r;
870 908  
... ... @@ -876,16 +914,12 @@
876 914 static int vhost_scsi_release(struct inode *inode, struct file *f)
877 915 {
878 916 struct vhost_scsi *s = f->private_data;
  917 + struct vhost_scsi_target t;
879 918  
880   - if (s->vs_tpg && s->vs_tpg->tport) {
881   - struct vhost_scsi_target backend;
882   -
883   - memcpy(backend.vhost_wwpn, s->vs_tpg->tport->tport_name,
884   - sizeof(backend.vhost_wwpn));
885   - backend.vhost_tpgt = s->vs_tpg->tport_tpgt;
886   - vhost_scsi_clear_endpoint(s, &backend);
887   - }
888   -
  919 + mutex_lock(&s->dev.mutex);
  920 + memcpy(t.vhost_wwpn, s->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
  921 + mutex_unlock(&s->dev.mutex);
  922 + vhost_scsi_clear_endpoint(s, &t);
889 923 vhost_dev_stop(&s->dev);
890 924 vhost_dev_cleanup(&s->dev, false);
891 925 kfree(s);
... ... @@ -899,9 +933,10 @@
899 933  
900 934 static void vhost_scsi_flush(struct vhost_scsi *vs)
901 935 {
902   - vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_CTL);
903   - vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_EVT);
904   - vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_IO);
  936 + int i;
  937 +
  938 + for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
  939 + vhost_scsi_flush_vq(vs, i);
905 940 }
906 941  
907 942 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
drivers/vhost/tcm_vhost.h
... ... @@ -23,6 +23,8 @@
23 23 struct virtio_scsi_cmd_resp __user *tvc_resp;
24 24 /* Pointer to vhost_scsi for our device */
25 25 struct vhost_scsi *tvc_vhost;
  26 + /* Pointer to vhost_virtqueue for the cmd */
  27 + struct vhost_virtqueue *tvc_vq;
26 28 /* Pointer to vhost nexus memory */
27 29 struct tcm_vhost_nexus *tvc_nexus;
28 30 /* The TCM I/O descriptor that is accessed via container_of() */
... ... @@ -34,7 +36,7 @@
34 36 /* Sense buffer that will be mapped into outgoing status */
35 37 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
36 38 /* Completed commands list, serviced from vhost worker thread */
37   - struct list_head tvc_completion_list;
  39 + struct llist_node tvc_completion_list;
38 40 };
39 41  
40 42 struct tcm_vhost_nexus {
41 43  
... ... @@ -93,9 +95,11 @@
93 95 *
94 96 * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate +
95 97 * RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage
  98 + * ABI Rev 1: January 2013. Ignore vhost_tpgt filed in struct vhost_scsi_target.
  99 + * All the targets under vhost_wwpn can be seen and used by guset.
96 100 */
97 101  
98   -#define VHOST_SCSI_ABI_VERSION 0
  102 +#define VHOST_SCSI_ABI_VERSION 1
99 103  
100 104 struct vhost_scsi_target {
101 105 int abi_version;
include/target/target_core_backend.h
... ... @@ -35,6 +35,7 @@
35 35 u32 (*get_device_type)(struct se_device *);
36 36 sector_t (*get_blocks)(struct se_device *);
37 37 unsigned char *(*get_sense_buffer)(struct se_cmd *);
  38 + bool (*get_write_cache)(struct se_device *);
38 39 };
39 40  
40 41 struct sbc_ops {
41 42  
... ... @@ -52,11 +53,13 @@
52 53  
53 54 sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
54 55 sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd);
55   -sector_t spc_get_write_same_sectors(struct se_cmd *cmd);
  56 +sense_reason_t spc_emulate_inquiry_std(struct se_cmd *, unsigned char *);
  57 +sense_reason_t spc_emulate_evpd_83(struct se_cmd *, unsigned char *);
56 58  
57 59 sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops);
58 60 u32 sbc_get_device_rev(struct se_device *dev);
59 61 u32 sbc_get_device_type(struct se_device *dev);
  62 +sector_t sbc_get_write_same_sectors(struct se_cmd *cmd);
60 63  
61 64 void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
62 65 int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
include/target/target_core_base.h
... ... @@ -44,7 +44,7 @@
44 44 /* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */
45 45 #define TG_PT_GROUP_NAME_BUF 256
46 46 /* Used to parse VPD into struct t10_vpd */
47   -#define VPD_TMP_BUF_SIZE 128
  47 +#define VPD_TMP_BUF_SIZE 254
48 48 /* Used by transport_generic_cmd_sequencer() */
49 49 #define READ_BLOCK_LEN 6
50 50 #define READ_CAP_LEN 8
... ... @@ -75,6 +75,8 @@
75 75 #define DA_MAX_WRITE_SAME_LEN 0
76 76 /* Default max transfer length */
77 77 #define DA_FABRIC_MAX_SECTORS 8192
  78 +/* Use a model alias based on the configfs backend device name */
  79 +#define DA_EMULATE_MODEL_ALIAS 0
78 80 /* Emulation for Direct Page Out */
79 81 #define DA_EMULATE_DPO 0
80 82 /* Emulation for Forced Unit Access WRITEs */
... ... @@ -193,6 +195,7 @@
193 195 TCM_RESERVATION_CONFLICT = R(0x10),
194 196 TCM_ADDRESS_OUT_OF_RANGE = R(0x11),
195 197 TCM_OUT_OF_RESOURCES = R(0x12),
  198 + TCM_PARAMETER_LIST_LENGTH_ERROR = R(0x13),
196 199 #undef R
197 200 };
198 201  
... ... @@ -211,7 +214,6 @@
211 214 TMR_LUN_RESET = 5,
212 215 TMR_TARGET_WARM_RESET = 6,
213 216 TMR_TARGET_COLD_RESET = 7,
214   - TMR_FABRIC_TMR = 255,
215 217 };
216 218  
217 219 /* fabric independent task management response values */
... ... @@ -592,6 +594,7 @@
592 594 };
593 595  
594 596 struct se_dev_attrib {
  597 + int emulate_model_alias;
595 598 int emulate_dpo;
596 599 int emulate_fua_write;
597 600 int emulate_fua_read;