Commit 33319141252fd14b58cf13685156c23dcaac2527

Authored by Pavel Shilovsky
Committed by Steve French
1 parent c9de5c80d5

CIFS: Add SMB2 support for cifs_iovec_write

Signed-off-by: Pavel Shilovsky <pshilovsky@samba.org>
Signed-off-by: Steve French <smfrench@gmail.com>
Signed-off-by: Steve French <sfrench@us.ibm.com>

Showing 8 changed files with 206 additions and 44 deletions Side-by-side Diff

... ... @@ -89,6 +89,10 @@
89 89  
90 90 struct workqueue_struct *cifsiod_wq;
91 91  
  92 +#ifdef CONFIG_HIGHMEM
  93 +DEFINE_MUTEX(cifs_kmap_mutex);
  94 +#endif
  95 +
92 96 static int
93 97 cifs_read_super(struct super_block *sb)
94 98 {
... ... @@ -582,7 +582,34 @@
582 582 #define CIFS_KMAP_SIZE_LIMIT (1<<24)
583 583 #endif /* CONFIG_HIGHMEM */
584 584  
  585 +#ifdef CONFIG_HIGHMEM
585 586 /*
  587 + * On arches that have high memory, kmap address space is limited. By
  588 + * serializing the kmap operations on those arches, we ensure that we don't
  589 + * end up with a bunch of threads in writeback with partially mapped page
  590 + * arrays, stuck waiting for kmap to come back. That situation prevents
  591 + * progress and can deadlock.
  592 + */
  593 +
  594 +extern struct mutex cifs_kmap_mutex;
  595 +
  596 +static inline void
  597 +cifs_kmap_lock(void)
  598 +{
  599 + mutex_lock(&cifs_kmap_mutex);
  600 +}
  601 +
  602 +static inline void
  603 +cifs_kmap_unlock(void)
  604 +{
  605 + mutex_unlock(&cifs_kmap_mutex);
  606 +}
  607 +#else /* !CONFIG_HIGHMEM */
  608 +#define cifs_kmap_lock() do { ; } while (0)
  609 +#define cifs_kmap_unlock() do { ; } while (0)
  610 +#endif /* CONFIG_HIGHMEM */
  611 +
  612 +/*
586 613 * Macros to allow the TCP_Server_Info->net field and related code to drop out
587 614 * when CONFIG_NET_NS isn't set.
588 615 */
... ... @@ -889,6 +916,26 @@
889 916 unsigned int remaining);
890 917 unsigned int nr_iov;
891 918 struct kvec iov[1];
  919 +};
  920 +
  921 +struct cifs_writedata;
  922 +
  923 +/* asynchronous write support */
  924 +struct cifs_writedata {
  925 + struct kref refcount;
  926 + struct list_head list;
  927 + struct completion done;
  928 + enum writeback_sync_modes sync_mode;
  929 + struct work_struct work;
  930 + struct cifsFileInfo *cfile;
  931 + __u64 offset;
  932 + pid_t pid;
  933 + unsigned int bytes;
  934 + int result;
  935 + void (*marshal_iov) (struct kvec *iov,
  936 + struct cifs_writedata *wdata);
  937 + unsigned int nr_pages;
  938 + struct page *pages[1];
892 939 };
893 940  
894 941 /*
... ... @@ -468,24 +468,6 @@
468 468 int cifs_async_readv(struct cifs_readdata *rdata);
469 469 int cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid);
470 470  
471   -/* asynchronous write support */
472   -struct cifs_writedata {
473   - struct kref refcount;
474   - struct list_head list;
475   - struct completion done;
476   - enum writeback_sync_modes sync_mode;
477   - struct work_struct work;
478   - struct cifsFileInfo *cfile;
479   - __u64 offset;
480   - pid_t pid;
481   - unsigned int bytes;
482   - int result;
483   - void (*marshal_iov) (struct kvec *iov,
484   - struct cifs_writedata *wdata);
485   - unsigned int nr_pages;
486   - struct page *pages[1];
487   -};
488   -
489 471 int cifs_async_writev(struct cifs_writedata *wdata);
490 472 void cifs_writev_complete(struct work_struct *work);
491 473 struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages,
... ... @@ -86,32 +86,6 @@
86 86 #endif /* CONFIG_CIFS_WEAK_PW_HASH */
87 87 #endif /* CIFS_POSIX */
88 88  
89   -#ifdef CONFIG_HIGHMEM
90   -/*
91   - * On arches that have high memory, kmap address space is limited. By
92   - * serializing the kmap operations on those arches, we ensure that we don't
93   - * end up with a bunch of threads in writeback with partially mapped page
94   - * arrays, stuck waiting for kmap to come back. That situation prevents
95   - * progress and can deadlock.
96   - */
97   -static DEFINE_MUTEX(cifs_kmap_mutex);
98   -
99   -static inline void
100   -cifs_kmap_lock(void)
101   -{
102   - mutex_lock(&cifs_kmap_mutex);
103   -}
104   -
105   -static inline void
106   -cifs_kmap_unlock(void)
107   -{
108   - mutex_unlock(&cifs_kmap_mutex);
109   -}
110   -#else /* !CONFIG_HIGHMEM */
111   -#define cifs_kmap_lock() do { ; } while(0)
112   -#define cifs_kmap_unlock() do { ; } while(0)
113   -#endif /* CONFIG_HIGHMEM */
114   -
115 89 /*
116 90 * Mark as invalid, all open files on tree connections since they
117 91 * were closed when session to server was lost.
... ... @@ -434,6 +434,7 @@
434 434 .close = smb2_close_file,
435 435 .flush = smb2_flush_file,
436 436 .async_readv = smb2_async_readv,
  437 + .async_writev = smb2_async_writev,
437 438 };
438 439  
439 440 struct smb_version_values smb21_values = {
... ... @@ -33,6 +33,7 @@
33 33 #include <linux/vfs.h>
34 34 #include <linux/task_io_accounting_ops.h>
35 35 #include <linux/uaccess.h>
  36 +#include <linux/pagemap.h>
36 37 #include <linux/xattr.h>
37 38 #include "smb2pdu.h"
38 39 #include "cifsglob.h"
... ... @@ -1325,6 +1326,128 @@
1325 1326 kref_put(&rdata->refcount, cifs_readdata_release);
1326 1327  
1327 1328 cifs_small_buf_release(buf);
  1329 + return rc;
  1330 +}
  1331 +
  1332 +/*
  1333 + * Check the mid_state and signature on received buffer (if any), and queue the
  1334 + * workqueue completion task.
  1335 + */
  1336 +static void
  1337 +smb2_writev_callback(struct mid_q_entry *mid)
  1338 +{
  1339 + struct cifs_writedata *wdata = mid->callback_data;
  1340 + struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
  1341 + unsigned int written;
  1342 + struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
  1343 + unsigned int credits_received = 1;
  1344 +
  1345 + switch (mid->mid_state) {
  1346 + case MID_RESPONSE_RECEIVED:
  1347 + credits_received = le16_to_cpu(rsp->hdr.CreditRequest);
  1348 + wdata->result = smb2_check_receive(mid, tcon->ses->server, 0);
  1349 + if (wdata->result != 0)
  1350 + break;
  1351 +
  1352 + written = le32_to_cpu(rsp->DataLength);
  1353 + /*
  1354 + * Mask off high 16 bits when bytes written as returned
  1355 + * by the server is greater than bytes requested by the
  1356 + * client. OS/2 servers are known to set incorrect
  1357 + * CountHigh values.
  1358 + */
  1359 + if (written > wdata->bytes)
  1360 + written &= 0xFFFF;
  1361 +
  1362 + if (written < wdata->bytes)
  1363 + wdata->result = -ENOSPC;
  1364 + else
  1365 + wdata->bytes = written;
  1366 + break;
  1367 + case MID_REQUEST_SUBMITTED:
  1368 + case MID_RETRY_NEEDED:
  1369 + wdata->result = -EAGAIN;
  1370 + break;
  1371 + default:
  1372 + wdata->result = -EIO;
  1373 + break;
  1374 + }
  1375 +
  1376 + if (wdata->result)
  1377 + cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
  1378 +
  1379 + queue_work(cifsiod_wq, &wdata->work);
  1380 + DeleteMidQEntry(mid);
  1381 + add_credits(tcon->ses->server, credits_received, 0);
  1382 +}
  1383 +
  1384 +/* smb2_async_writev - send an async write, and set up mid to handle result */
  1385 +int
  1386 +smb2_async_writev(struct cifs_writedata *wdata)
  1387 +{
  1388 + int i, rc = -EACCES;
  1389 + struct smb2_write_req *req = NULL;
  1390 + struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
  1391 + struct kvec *iov = NULL;
  1392 +
  1393 + rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req);
  1394 + if (rc)
  1395 + goto async_writev_out;
  1396 +
  1397 + /* 1 iov per page + 1 for header */
  1398 + iov = kzalloc((wdata->nr_pages + 1) * sizeof(*iov), GFP_NOFS);
  1399 + if (iov == NULL) {
  1400 + rc = -ENOMEM;
  1401 + goto async_writev_out;
  1402 + }
  1403 +
  1404 + req->hdr.ProcessId = cpu_to_le32(wdata->cfile->pid);
  1405 +
  1406 + req->PersistentFileId = wdata->cfile->fid.persistent_fid;
  1407 + req->VolatileFileId = wdata->cfile->fid.volatile_fid;
  1408 + req->WriteChannelInfoOffset = 0;
  1409 + req->WriteChannelInfoLength = 0;
  1410 + req->Channel = 0;
  1411 + req->Offset = cpu_to_le64(wdata->offset);
  1412 + /* 4 for rfc1002 length field */
  1413 + req->DataOffset = cpu_to_le16(
  1414 + offsetof(struct smb2_write_req, Buffer) - 4);
  1415 + req->RemainingBytes = 0;
  1416 +
  1417 + /* 4 for rfc1002 length field and 1 for Buffer */
  1418 + iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
  1419 + iov[0].iov_base = (char *)req;
  1420 +
  1421 + /*
  1422 + * This function should marshal up the page array into the kvec
  1423 + * array, reserving [0] for the header. It should kmap the pages
  1424 + * and set the iov_len properly for each one. It may also set
  1425 + * wdata->bytes too.
  1426 + */
  1427 + cifs_kmap_lock();
  1428 + wdata->marshal_iov(iov, wdata);
  1429 + cifs_kmap_unlock();
  1430 +
  1431 + cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes);
  1432 +
  1433 + req->Length = cpu_to_le32(wdata->bytes);
  1434 +
  1435 + inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */);
  1436 +
  1437 + kref_get(&wdata->refcount);
  1438 + rc = cifs_call_async(tcon->ses->server, iov, wdata->nr_pages + 1,
  1439 + NULL, smb2_writev_callback, wdata, 0);
  1440 +
  1441 + if (rc)
  1442 + kref_put(&wdata->refcount, cifs_writedata_release);
  1443 +
  1444 + /* send is done, unmap pages */
  1445 + for (i = 0; i < wdata->nr_pages; i++)
  1446 + kunmap(wdata->pages[i]);
  1447 +
  1448 +async_writev_out:
  1449 + cifs_small_buf_release(req);
  1450 + kfree(iov);
1328 1451 return rc;
1329 1452 }
... ... @@ -496,6 +496,36 @@
496 496 __u8 Buffer[1];
497 497 } __packed;
498 498  
  499 +/* For write request Flags field below the following flag is defined: */
  500 +#define SMB2_WRITEFLAG_WRITE_THROUGH 0x00000001
  501 +
  502 +struct smb2_write_req {
  503 + struct smb2_hdr hdr;
  504 + __le16 StructureSize; /* Must be 49 */
  505 + __le16 DataOffset; /* offset from start of SMB2 header to write data */
  506 + __le32 Length;
  507 + __le64 Offset;
  508 + __u64 PersistentFileId; /* opaque endianness */
  509 + __u64 VolatileFileId; /* opaque endianness */
  510 + __le32 Channel; /* Reserved MBZ */
  511 + __le32 RemainingBytes;
  512 + __le16 WriteChannelInfoOffset; /* Reserved MBZ */
  513 + __le16 WriteChannelInfoLength; /* Reserved MBZ */
  514 + __le32 Flags;
  515 + __u8 Buffer[1];
  516 +} __packed;
  517 +
  518 +struct smb2_write_rsp {
  519 + struct smb2_hdr hdr;
  520 + __le16 StructureSize; /* Must be 17 */
  521 + __u8 DataOffset;
  522 + __u8 Reserved;
  523 + __le32 DataLength;
  524 + __le32 DataRemaining;
  525 + __u32 Reserved2;
  526 + __u8 Buffer[1];
  527 +} __packed;
  528 +
499 529 struct smb2_echo_req {
500 530 struct smb2_hdr hdr;
501 531 __le16 StructureSize; /* Must be 4 */
... ... @@ -98,6 +98,7 @@
98 98 u64 persistent_fid, u64 volatile_fid,
99 99 __le64 *uniqueid);
100 100 extern int smb2_async_readv(struct cifs_readdata *rdata);
  101 +extern int smb2_async_writev(struct cifs_writedata *wdata);
101 102 extern int SMB2_echo(struct TCP_Server_Info *server);
102 103  
103 104 #endif /* _SMB2PROTO_H */