Commit 4250c0668ea10a19f3d37b1733f54ce6c8a37234

Authored by Maxim Patlasov
Committed by Miklos Szeredi
1 parent 0b05b18381

fuse: general infrastructure for pages[] of variable size

The patch removes inline array of FUSE_MAX_PAGES_PER_REQ page pointers from
fuse_req. Instead of that, req->pages may now point either to small inline
array or to an array allocated dynamically.

This essentially means that all callers of fuse_request_alloc[_nofs] should
pass the number of pages needed explicitly.

The patch doesn't make any logic changes.

Signed-off-by: Maxim Patlasov <mpatlasov@parallels.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>

Showing 4 changed files with 50 additions and 20 deletions Side-by-side Diff

... ... @@ -34,34 +34,55 @@
34 34 return file->private_data;
35 35 }
36 36  
37   -static void fuse_request_init(struct fuse_req *req)
  37 +static void fuse_request_init(struct fuse_req *req, struct page **pages,
  38 + unsigned npages)
38 39 {
39 40 memset(req, 0, sizeof(*req));
  41 + memset(pages, 0, sizeof(*pages) * npages);
40 42 INIT_LIST_HEAD(&req->list);
41 43 INIT_LIST_HEAD(&req->intr_entry);
42 44 init_waitqueue_head(&req->waitq);
43 45 atomic_set(&req->count, 1);
  46 + req->pages = pages;
  47 + req->max_pages = npages;
44 48 }
45 49  
46   -struct fuse_req *fuse_request_alloc(void)
  50 +static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
47 51 {
48   - struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
49   - if (req)
50   - fuse_request_init(req);
  52 + struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
  53 + if (req) {
  54 + struct page **pages;
  55 +
  56 + if (npages <= FUSE_REQ_INLINE_PAGES)
  57 + pages = req->inline_pages;
  58 + else
  59 + pages = kmalloc(sizeof(struct page *) * npages, flags);
  60 +
  61 + if (!pages) {
  62 + kmem_cache_free(fuse_req_cachep, req);
  63 + return NULL;
  64 + }
  65 +
  66 + fuse_request_init(req, pages, npages);
  67 + }
51 68 return req;
52 69 }
  70 +
  71 +struct fuse_req *fuse_request_alloc(unsigned npages)
  72 +{
  73 + return __fuse_request_alloc(npages, GFP_KERNEL);
  74 +}
53 75 EXPORT_SYMBOL_GPL(fuse_request_alloc);
54 76  
55   -struct fuse_req *fuse_request_alloc_nofs(void)
  77 +struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
56 78 {
57   - struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS);
58   - if (req)
59   - fuse_request_init(req);
60   - return req;
  79 + return __fuse_request_alloc(npages, GFP_NOFS);
61 80 }
62 81  
63 82 void fuse_request_free(struct fuse_req *req)
64 83 {
  84 + if (req->pages != req->inline_pages)
  85 + kfree(req->pages);
65 86 kmem_cache_free(fuse_req_cachep, req);
66 87 }
67 88  
... ... @@ -116,7 +137,7 @@
116 137 if (!fc->connected)
117 138 goto out;
118 139  
119   - req = fuse_request_alloc();
  140 + req = fuse_request_alloc(FUSE_MAX_PAGES_PER_REQ);
120 141 err = -ENOMEM;
121 142 if (!req)
122 143 goto out;
... ... @@ -165,7 +186,7 @@
165 186 struct fuse_file *ff = file->private_data;
166 187  
167 188 spin_lock(&fc->lock);
168   - fuse_request_init(req);
  189 + fuse_request_init(req, req->pages, req->max_pages);
169 190 BUG_ON(ff->reserved_req);
170 191 ff->reserved_req = req;
171 192 wake_up_all(&fc->reserved_req_waitq);
... ... @@ -192,7 +213,7 @@
192 213  
193 214 atomic_inc(&fc->num_waiting);
194 215 wait_event(fc->blocked_waitq, !fc->blocked);
195   - req = fuse_request_alloc();
  216 + req = fuse_request_alloc(FUSE_MAX_PAGES_PER_REQ);
196 217 if (!req)
197 218 req = get_reserved_req(fc, file);
198 219  
... ... @@ -57,7 +57,7 @@
57 57 return NULL;
58 58  
59 59 ff->fc = fc;
60   - ff->reserved_req = fuse_request_alloc();
  60 + ff->reserved_req = fuse_request_alloc(0);
61 61 if (unlikely(!ff->reserved_req)) {
62 62 kfree(ff);
63 63 return NULL;
... ... @@ -1272,7 +1272,7 @@
1272 1272  
1273 1273 set_page_writeback(page);
1274 1274  
1275   - req = fuse_request_alloc_nofs();
  1275 + req = fuse_request_alloc_nofs(1);
1276 1276 if (!req)
1277 1277 goto err;
1278 1278  
... ... @@ -44,6 +44,9 @@
44 44 doing the mount will be allowed to access the filesystem */
45 45 #define FUSE_ALLOW_OTHER (1 << 1)
46 46  
  47 +/** Number of page pointers embedded in fuse_req */
  48 +#define FUSE_REQ_INLINE_PAGES 1
  49 +
47 50 /** List of active connections */
48 51 extern struct list_head fuse_conn_list;
49 52  
50 53  
... ... @@ -291,8 +294,14 @@
291 294 } misc;
292 295  
293 296 /** page vector */
294   - struct page *pages[FUSE_MAX_PAGES_PER_REQ];
  297 + struct page **pages;
295 298  
  299 + /** size of the 'pages' array */
  300 + unsigned max_pages;
  301 +
  302 + /** inline page vector */
  303 + struct page *inline_pages[FUSE_REQ_INLINE_PAGES];
  304 +
296 305 /** number of pages in vector */
297 306 unsigned num_pages;
298 307  
299 308  
... ... @@ -664,9 +673,9 @@
664 673 /**
665 674 * Allocate a request
666 675 */
667   -struct fuse_req *fuse_request_alloc(void);
  676 +struct fuse_req *fuse_request_alloc(unsigned npages);
668 677  
669   -struct fuse_req *fuse_request_alloc_nofs(void);
  678 +struct fuse_req *fuse_request_alloc_nofs(unsigned npages);
670 679  
671 680 /**
672 681 * Free a request
... ... @@ -1037,12 +1037,12 @@
1037 1037 /* only now - we want root dentry with NULL ->d_op */
1038 1038 sb->s_d_op = &fuse_dentry_operations;
1039 1039  
1040   - init_req = fuse_request_alloc();
  1040 + init_req = fuse_request_alloc(0);
1041 1041 if (!init_req)
1042 1042 goto err_put_root;
1043 1043  
1044 1044 if (is_bdev) {
1045   - fc->destroy_req = fuse_request_alloc();
  1045 + fc->destroy_req = fuse_request_alloc(0);
1046 1046 if (!fc->destroy_req)
1047 1047 goto err_free_init_req;
1048 1048 }