Commit eb6fe0c388e43b02e261f0fdee60e42f6298d7f7

Authored by Carsten Otte
Committed by Linus Torvalds
1 parent 6d79125bba

[PATCH] xip: reduce code duplication

This patch reworks filemap_xip.c with the goal to reduce code duplication
from mm/filemap.c.  It applies agains 2.6.12-rc6-mm1.  Instead of
implementing the aio functions, this one implements the synchronous
read/write functions only.  For readv and writev, the generic fallback is
used.  For aio, we rely on the application doing the fallback.  Since our
"synchronous" function does memcpy immediately anyway, there is no
performance difference between using the fallbacks or implementing each
operation.

Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 4 changed files with 63 additions and 205 deletions Side-by-side Diff

... ... @@ -58,17 +58,13 @@
58 58 #ifdef CONFIG_EXT2_FS_XIP
59 59 struct file_operations ext2_xip_file_operations = {
60 60 .llseek = generic_file_llseek,
61   - .read = do_sync_read,
62   - .write = do_sync_write,
63   - .aio_read = xip_file_aio_read,
64   - .aio_write = xip_file_aio_write,
  61 + .read = xip_file_read,
  62 + .write = xip_file_write,
65 63 .ioctl = ext2_ioctl,
66 64 .mmap = xip_file_mmap,
67 65 .open = generic_file_open,
68 66 .release = ext2_release_file,
69 67 .fsync = ext2_sync_file,
70   - .readv = xip_file_readv,
71   - .writev = xip_file_writev,
72 68 .sendfile = xip_file_sendfile,
73 69 };
74 70 #endif
... ... @@ -1500,18 +1500,14 @@
1500 1500 extern int nonseekable_open(struct inode * inode, struct file * filp);
1501 1501  
1502 1502 #ifdef CONFIG_FS_XIP
1503   -extern ssize_t xip_file_aio_read(struct kiocb *iocb, char __user *buf,
1504   - size_t count, loff_t pos);
1505   -extern ssize_t xip_file_readv(struct file *filp, const struct iovec *iov,
1506   - unsigned long nr_segs, loff_t *ppos);
  1503 +extern ssize_t xip_file_read(struct file *filp, char __user *buf, size_t len,
  1504 + loff_t *ppos);
1507 1505 extern ssize_t xip_file_sendfile(struct file *in_file, loff_t *ppos,
1508 1506 size_t count, read_actor_t actor,
1509 1507 void *target);
1510 1508 extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma);
1511   -extern ssize_t xip_file_aio_write(struct kiocb *iocb, const char __user *buf,
1512   - size_t count, loff_t pos);
1513   -extern ssize_t xip_file_writev(struct file *file, const struct iovec *iov,
1514   - unsigned long nr_segs, loff_t *ppos);
  1509 +extern ssize_t xip_file_write(struct file *filp, const char __user *buf,
  1510 + size_t len, loff_t *ppos);
1515 1511 extern int xip_truncate_page(struct address_space *mapping, loff_t from);
1516 1512 #else
1517 1513 static inline int xip_truncate_page(struct address_space *mapping, loff_t from)
... ... @@ -15,7 +15,7 @@
15 15 #include <linux/config.h>
16 16 #include <asm/uaccess.h>
17 17  
18   -extern size_t
  18 +size_t
19 19 __filemap_copy_from_user_iovec(char *vaddr,
20 20 const struct iovec *iov,
21 21 size_t base,
... ... @@ -114,85 +114,30 @@
114 114 file_accessed(filp);
115 115 }
116 116  
117   -/*
118   - * This is the "read()" routine for all filesystems
119   - * that uses the get_xip_page address space operation.
120   - */
121   -static ssize_t
122   -__xip_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
123   - unsigned long nr_segs, loff_t *ppos)
  117 +ssize_t
  118 +xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
124 119 {
125   - struct file *filp = iocb->ki_filp;
126   - ssize_t retval;
127   - unsigned long seg;
128   - size_t count;
  120 + read_descriptor_t desc;
129 121  
130   - count = 0;
131   - for (seg = 0; seg < nr_segs; seg++) {
132   - const struct iovec *iv = &iov[seg];
  122 + if (!access_ok(VERIFY_WRITE, buf, len))
  123 + return -EFAULT;
133 124  
134   - /*
135   - * If any segment has a negative length, or the cumulative
136   - * length ever wraps negative then return -EINVAL.
137   - */
138   - count += iv->iov_len;
139   - if (unlikely((ssize_t)(count|iv->iov_len) < 0))
140   - return -EINVAL;
141   - if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len))
142   - continue;
143   - if (seg == 0)
144   - return -EFAULT;
145   - nr_segs = seg;
146   - count -= iv->iov_len; /* This segment is no good */
147   - break;
148   - }
  125 + desc.written = 0;
  126 + desc.arg.buf = buf;
  127 + desc.count = len;
  128 + desc.error = 0;
149 129  
150   - retval = 0;
151   - if (count) {
152   - for (seg = 0; seg < nr_segs; seg++) {
153   - read_descriptor_t desc;
  130 + do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
  131 + ppos, &desc, file_read_actor);
154 132  
155   - desc.written = 0;
156   - desc.arg.buf = iov[seg].iov_base;
157   - desc.count = iov[seg].iov_len;
158   - if (desc.count == 0)
159   - continue;
160   - desc.error = 0;
161   - do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
162   - ppos, &desc, file_read_actor);
163   - retval += desc.written;
164   - if (!retval) {
165   - retval = desc.error;
166   - break;
167   - }
168   - }
169   - }
170   - return retval;
  133 + if (desc.written)
  134 + return desc.written;
  135 + else
  136 + return desc.error;
171 137 }
  138 +EXPORT_SYMBOL_GPL(xip_file_read);
172 139  
173 140 ssize_t
174   -xip_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count,
175   - loff_t pos)
176   -{
177   - struct iovec local_iov = { .iov_base = buf, .iov_len = count };
178   -
179   - BUG_ON(iocb->ki_pos != pos);
180   - return __xip_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos);
181   -}
182   -EXPORT_SYMBOL_GPL(xip_file_aio_read);
183   -
184   -ssize_t
185   -xip_file_readv(struct file *filp, const struct iovec *iov,
186   - unsigned long nr_segs, loff_t *ppos)
187   -{
188   - struct kiocb kiocb;
189   -
190   - init_sync_kiocb(&kiocb, filp);
191   - return __xip_file_aio_read(&kiocb, iov, nr_segs, ppos);
192   -}
193   -EXPORT_SYMBOL_GPL(xip_file_readv);
194   -
195   -ssize_t
196 141 xip_file_sendfile(struct file *in_file, loff_t *ppos,
197 142 size_t count, read_actor_t actor, void *target)
198 143 {
199 144  
200 145  
201 146  
... ... @@ -326,25 +271,19 @@
326 271 EXPORT_SYMBOL_GPL(xip_file_mmap);
327 272  
328 273 static ssize_t
329   -do_xip_file_write(struct kiocb *iocb, const struct iovec *iov,
330   - unsigned long nr_segs, loff_t pos, loff_t *ppos,
331   - size_t count)
  274 +__xip_file_write(struct file *filp, const char __user *buf,
  275 + size_t count, loff_t pos, loff_t *ppos)
332 276 {
333   - struct file *file = iocb->ki_filp;
334   - struct address_space * mapping = file->f_mapping;
  277 + struct address_space * mapping = filp->f_mapping;
335 278 struct address_space_operations *a_ops = mapping->a_ops;
336 279 struct inode *inode = mapping->host;
337 280 long status = 0;
338 281 struct page *page;
339 282 size_t bytes;
340   - const struct iovec *cur_iov = iov; /* current iovec */
341   - size_t iov_base = 0; /* offset in the current iovec */
342   - char __user *buf;
343 283 ssize_t written = 0;
344 284  
345 285 BUG_ON(!mapping->a_ops->get_xip_page);
346 286  
347   - buf = iov->iov_base;
348 287 do {
349 288 unsigned long index;
350 289 unsigned long offset;
351 290  
352 291  
... ... @@ -365,15 +304,14 @@
365 304 fault_in_pages_readable(buf, bytes);
366 305  
367 306 page = a_ops->get_xip_page(mapping,
368   - index*(PAGE_SIZE/512), 0);
  307 + index*(PAGE_SIZE/512), 0);
369 308 if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) {
370 309 /* we allocate a new page unmap it */
371 310 page = a_ops->get_xip_page(mapping,
372   - index*(PAGE_SIZE/512), 1);
  311 + index*(PAGE_SIZE/512), 1);
373 312 if (!IS_ERR(page))
374   - /* unmap page at pgoff from all other vmas */
375   - __xip_unmap(mapping, index);
376   -
  313 + /* unmap page at pgoff from all other vmas */
  314 + __xip_unmap(mapping, index);
377 315 }
378 316  
379 317 if (IS_ERR(page)) {
... ... @@ -383,12 +321,7 @@
383 321  
384 322 BUG_ON(!PageUptodate(page));
385 323  
386   - if (likely(nr_segs == 1))
387   - copied = filemap_copy_from_user(page, offset,
388   - buf, bytes);
389   - else
390   - copied = filemap_copy_from_user_iovec(page, offset,
391   - cur_iov, iov_base, bytes);
  324 + copied = filemap_copy_from_user(page, offset, buf, bytes);
392 325 flush_dcache_page(page);
393 326 if (likely(copied > 0)) {
394 327 status = copied;
... ... @@ -398,9 +331,6 @@
398 331 count -= status;
399 332 pos += status;
400 333 buf += status;
401   - if (unlikely(nr_segs > 1))
402   - filemap_set_next_iovec(&cur_iov,
403   - &iov_base, status);
404 334 }
405 335 }
406 336 if (unlikely(copied != bytes))
407 337  
408 338  
409 339  
410 340  
411 341  
412 342  
413 343  
414 344  
415 345  
416 346  
417 347  
418 348  
419 349  
... ... @@ -422,111 +352,53 @@
422 352 return written ? written : status;
423 353 }
424 354  
425   -static ssize_t
426   -xip_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
427   - unsigned long nr_segs, loff_t *ppos)
  355 +ssize_t
  356 +xip_file_write(struct file *filp, const char __user *buf, size_t len,
  357 + loff_t *ppos)
428 358 {
429   - struct file *file = iocb->ki_filp;
430   - struct address_space * mapping = file->f_mapping;
431   - size_t ocount; /* original count */
432   - size_t count; /* after file limit checks */
433   - struct inode *inode = mapping->host;
434   - unsigned long seg;
435   - loff_t pos;
436   - ssize_t written;
437   - ssize_t err;
  359 + struct address_space *mapping = filp->f_mapping;
  360 + struct inode *inode = mapping->host;
  361 + size_t count;
  362 + loff_t pos;
  363 + ssize_t ret;
438 364  
439   - ocount = 0;
440   - for (seg = 0; seg < nr_segs; seg++) {
441   - const struct iovec *iv = &iov[seg];
  365 + down(&inode->i_sem);
442 366  
443   - /*
444   - * If any segment has a negative length, or the cumulative
445   - * length ever wraps negative then return -EINVAL.
446   - */
447   - ocount += iv->iov_len;
448   - if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
449   - return -EINVAL;
450   - if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
451   - continue;
452   - if (seg == 0)
453   - return -EFAULT;
454   - nr_segs = seg;
455   - ocount -= iv->iov_len; /* This segment is no good */
456   - break;
  367 + if (!access_ok(VERIFY_READ, buf, len)) {
  368 + ret=-EFAULT;
  369 + goto out_up;
457 370 }
458 371  
459   - count = ocount;
460 372 pos = *ppos;
  373 + count = len;
461 374  
462 375 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
463 376  
464   - written = 0;
  377 + /* We can write back this queue in page reclaim */
  378 + current->backing_dev_info = mapping->backing_dev_info;
465 379  
466   - err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
467   - if (err)
468   - goto out;
469   -
  380 + ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
  381 + if (ret)
  382 + goto out_backing;
470 383 if (count == 0)
471   - goto out;
  384 + goto out_backing;
472 385  
473   - err = remove_suid(file->f_dentry);
474   - if (err)
475   - goto out;
  386 + ret = remove_suid(filp->f_dentry);
  387 + if (ret)
  388 + goto out_backing;
476 389  
477 390 inode_update_time(inode, 1);
478 391  
479   - /* use execute in place to copy directly to disk */
480   - written = do_xip_file_write (iocb, iov,
481   - nr_segs, pos, ppos, count);
482   - out:
483   - return written ? written : err;
484   -}
  392 + ret = __xip_file_write (filp, buf, count, pos, ppos);
485 393  
486   -static ssize_t
487   -__xip_file_write_nolock(struct file *file, const struct iovec *iov,
488   - unsigned long nr_segs, loff_t *ppos)
489   -{
490   - struct kiocb kiocb;
491   -
492   - init_sync_kiocb(&kiocb, file);
493   - return xip_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
494   -}
495   -
496   -ssize_t
497   -xip_file_aio_write(struct kiocb *iocb, const char __user *buf,
498   - size_t count, loff_t pos)
499   -{
500   - struct file *file = iocb->ki_filp;
501   - struct address_space *mapping = file->f_mapping;
502   - struct inode *inode = mapping->host;
503   - ssize_t ret;
504   - struct iovec local_iov = { .iov_base = (void __user *)buf,
505   - .iov_len = count };
506   -
507   - BUG_ON(iocb->ki_pos != pos);
508   -
509   - down(&inode->i_sem);
510   - ret = xip_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos);
  394 + out_backing:
  395 + current->backing_dev_info = NULL;
  396 + out_up:
511 397 up(&inode->i_sem);
512 398 return ret;
513 399 }
514   -EXPORT_SYMBOL_GPL(xip_file_aio_write);
  400 +EXPORT_SYMBOL_GPL(xip_file_write);
515 401  
516   -ssize_t xip_file_writev(struct file *file, const struct iovec *iov,
517   - unsigned long nr_segs, loff_t *ppos)
518   -{
519   - struct address_space *mapping = file->f_mapping;
520   - struct inode *inode = mapping->host;
521   - ssize_t ret;
522   -
523   - down(&inode->i_sem);
524   - ret = __xip_file_write_nolock(file, iov, nr_segs, ppos);
525   - up(&inode->i_sem);
526   - return ret;
527   -}
528   -EXPORT_SYMBOL_GPL(xip_file_writev);
529   -
530 402 /*
531 403 * truncate a page used for execute in place
532 404 * functionality is analog to block_truncate_page but does use get_xip_page
... ... @@ -541,7 +413,6 @@
541 413 unsigned length;
542 414 struct page *page;
543 415 void *kaddr;
544   - int err;
545 416  
546 417 BUG_ON(!mapping->a_ops->get_xip_page);
547 418  
548 419  
549 420  
550 421  
... ... @@ -556,17 +427,14 @@
556 427  
557 428 page = mapping->a_ops->get_xip_page(mapping,
558 429 index*(PAGE_SIZE/512), 0);
559   - err = -ENOMEM;
560 430 if (!page)
561   - goto out;
  431 + return -ENOMEM;
562 432 if (unlikely(IS_ERR(page))) {
563   - if (PTR_ERR(page) == -ENODATA) {
  433 + if (PTR_ERR(page) == -ENODATA)
564 434 /* Hole? No need to truncate */
565 435 return 0;
566   - } else {
567   - err = PTR_ERR(page);
568   - goto out;
569   - }
  436 + else
  437 + return PTR_ERR(page);
570 438 } else
571 439 BUG_ON(!PageUptodate(page));
572 440 kaddr = kmap_atomic(page, KM_USER0);
... ... @@ -574,9 +442,7 @@
574 442 kunmap_atomic(kaddr, KM_USER0);
575 443  
576 444 flush_dcache_page(page);
577   - err = 0;
578   -out:
579   - return err;
  445 + return 0;
580 446 }
581 447 EXPORT_SYMBOL_GPL(xip_truncate_page);