Commit 8207649c41bf5c28a987be47d66545fa9d2994d8

Authored by Linus Torvalds

Merge branch 'akpm' (fixes from Andrew Morton)

Merge fixes from Andrew Morton:
 "9 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm: softdirty: keep bit when zapping file pte
  fs/cachefiles: add missing \n to kerror conversions
  genalloc: fix device node resource counter
  drivers/rtc/rtc-efi.c: add missing module alias
  mm, slab: initialize object alignment on cache creation
  mm: softdirty: addresses before VMAs in PTE holes aren't softdirty
  ocfs2/dlm: do not get resource spinlock if lockres is new
  nilfs2: fix data loss with mmap()
  ocfs2: free vol_label in ocfs2_delete_osb()

Showing 14 changed files Side-by-side Diff

drivers/rtc/rtc-efi.c
... ... @@ -232,6 +232,7 @@
232 232  
233 233 module_platform_driver_probe(efi_rtc_driver, efi_rtc_probe);
234 234  
  235 +MODULE_ALIAS("platform:rtc-efi");
235 236 MODULE_AUTHOR("dann frazier <dannf@hp.com>");
236 237 MODULE_LICENSE("GPL");
237 238 MODULE_DESCRIPTION("EFI RTC driver");
fs/cachefiles/bind.c
... ... @@ -50,18 +50,18 @@
50 50 cache->brun_percent < 100);
51 51  
52 52 if (*args) {
53   - pr_err("'bind' command doesn't take an argument");
  53 + pr_err("'bind' command doesn't take an argument\n");
54 54 return -EINVAL;
55 55 }
56 56  
57 57 if (!cache->rootdirname) {
58   - pr_err("No cache directory specified");
  58 + pr_err("No cache directory specified\n");
59 59 return -EINVAL;
60 60 }
61 61  
62 62 /* don't permit already bound caches to be re-bound */
63 63 if (test_bit(CACHEFILES_READY, &cache->flags)) {
64   - pr_err("Cache already bound");
  64 + pr_err("Cache already bound\n");
65 65 return -EBUSY;
66 66 }
67 67  
... ... @@ -248,7 +248,7 @@
248 248 kmem_cache_free(cachefiles_object_jar, fsdef);
249 249 error_root_object:
250 250 cachefiles_end_secure(cache, saved_cred);
251   - pr_err("Failed to register: %d", ret);
  251 + pr_err("Failed to register: %d\n", ret);
252 252 return ret;
253 253 }
254 254  
fs/cachefiles/daemon.c
... ... @@ -315,7 +315,7 @@
315 315 static int cachefiles_daemon_range_error(struct cachefiles_cache *cache,
316 316 char *args)
317 317 {
318   - pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%");
  318 + pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%\n");
319 319  
320 320 return -EINVAL;
321 321 }
322 322  
... ... @@ -475,12 +475,12 @@
475 475 _enter(",%s", args);
476 476  
477 477 if (!*args) {
478   - pr_err("Empty directory specified");
  478 + pr_err("Empty directory specified\n");
479 479 return -EINVAL;
480 480 }
481 481  
482 482 if (cache->rootdirname) {
483   - pr_err("Second cache directory specified");
  483 + pr_err("Second cache directory specified\n");
484 484 return -EEXIST;
485 485 }
486 486  
487 487  
... ... @@ -503,12 +503,12 @@
503 503 _enter(",%s", args);
504 504  
505 505 if (!*args) {
506   - pr_err("Empty security context specified");
  506 + pr_err("Empty security context specified\n");
507 507 return -EINVAL;
508 508 }
509 509  
510 510 if (cache->secctx) {
511   - pr_err("Second security context specified");
  511 + pr_err("Second security context specified\n");
512 512 return -EINVAL;
513 513 }
514 514  
... ... @@ -531,7 +531,7 @@
531 531 _enter(",%s", args);
532 532  
533 533 if (!*args) {
534   - pr_err("Empty tag specified");
  534 + pr_err("Empty tag specified\n");
535 535 return -EINVAL;
536 536 }
537 537  
538 538  
... ... @@ -562,12 +562,12 @@
562 562 goto inval;
563 563  
564 564 if (!test_bit(CACHEFILES_READY, &cache->flags)) {
565   - pr_err("cull applied to unready cache");
  565 + pr_err("cull applied to unready cache\n");
566 566 return -EIO;
567 567 }
568 568  
569 569 if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
570   - pr_err("cull applied to dead cache");
  570 + pr_err("cull applied to dead cache\n");
571 571 return -EIO;
572 572 }
573 573  
574 574  
... ... @@ -587,11 +587,11 @@
587 587  
588 588 notdir:
589 589 path_put(&path);
590   - pr_err("cull command requires dirfd to be a directory");
  590 + pr_err("cull command requires dirfd to be a directory\n");
591 591 return -ENOTDIR;
592 592  
593 593 inval:
594   - pr_err("cull command requires dirfd and filename");
  594 + pr_err("cull command requires dirfd and filename\n");
595 595 return -EINVAL;
596 596 }
597 597  
... ... @@ -614,7 +614,7 @@
614 614 return 0;
615 615  
616 616 inval:
617   - pr_err("debug command requires mask");
  617 + pr_err("debug command requires mask\n");
618 618 return -EINVAL;
619 619 }
620 620  
621 621  
... ... @@ -634,12 +634,12 @@
634 634 goto inval;
635 635  
636 636 if (!test_bit(CACHEFILES_READY, &cache->flags)) {
637   - pr_err("inuse applied to unready cache");
  637 + pr_err("inuse applied to unready cache\n");
638 638 return -EIO;
639 639 }
640 640  
641 641 if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
642   - pr_err("inuse applied to dead cache");
  642 + pr_err("inuse applied to dead cache\n");
643 643 return -EIO;
644 644 }
645 645  
646 646  
... ... @@ -659,11 +659,11 @@
659 659  
660 660 notdir:
661 661 path_put(&path);
662   - pr_err("inuse command requires dirfd to be a directory");
  662 + pr_err("inuse command requires dirfd to be a directory\n");
663 663 return -ENOTDIR;
664 664  
665 665 inval:
666   - pr_err("inuse command requires dirfd and filename");
  666 + pr_err("inuse command requires dirfd and filename\n");
667 667 return -EINVAL;
668 668 }
669 669  
fs/cachefiles/internal.h
... ... @@ -255,7 +255,7 @@
255 255  
256 256 #define cachefiles_io_error(___cache, FMT, ...) \
257 257 do { \
258   - pr_err("I/O Error: " FMT, ##__VA_ARGS__); \
  258 + pr_err("I/O Error: " FMT"\n", ##__VA_ARGS__); \
259 259 fscache_io_error(&(___cache)->cache); \
260 260 set_bit(CACHEFILES_DEAD, &(___cache)->flags); \
261 261 } while (0)
fs/cachefiles/main.c
... ... @@ -84,7 +84,7 @@
84 84 error_object_jar:
85 85 misc_deregister(&cachefiles_dev);
86 86 error_dev:
87   - pr_err("failed to register: %d", ret);
  87 + pr_err("failed to register: %d\n", ret);
88 88 return ret;
89 89 }
90 90  
fs/cachefiles/namei.c
... ... @@ -543,7 +543,7 @@
543 543 next, next->d_inode, next->d_inode->i_ino);
544 544  
545 545 } else if (!S_ISDIR(next->d_inode->i_mode)) {
546   - pr_err("inode %lu is not a directory",
  546 + pr_err("inode %lu is not a directory\n",
547 547 next->d_inode->i_ino);
548 548 ret = -ENOBUFS;
549 549 goto error;
... ... @@ -574,7 +574,7 @@
574 574 } else if (!S_ISDIR(next->d_inode->i_mode) &&
575 575 !S_ISREG(next->d_inode->i_mode)
576 576 ) {
577   - pr_err("inode %lu is not a file or directory",
  577 + pr_err("inode %lu is not a file or directory\n",
578 578 next->d_inode->i_ino);
579 579 ret = -ENOBUFS;
580 580 goto error;
... ... @@ -768,7 +768,7 @@
768 768 ASSERT(subdir->d_inode);
769 769  
770 770 if (!S_ISDIR(subdir->d_inode->i_mode)) {
771   - pr_err("%s is not a directory", dirname);
  771 + pr_err("%s is not a directory\n", dirname);
772 772 ret = -EIO;
773 773 goto check_error;
774 774 }
775 775  
... ... @@ -796,13 +796,13 @@
796 796 mkdir_error:
797 797 mutex_unlock(&dir->d_inode->i_mutex);
798 798 dput(subdir);
799   - pr_err("mkdir %s failed with error %d", dirname, ret);
  799 + pr_err("mkdir %s failed with error %d\n", dirname, ret);
800 800 return ERR_PTR(ret);
801 801  
802 802 lookup_error:
803 803 mutex_unlock(&dir->d_inode->i_mutex);
804 804 ret = PTR_ERR(subdir);
805   - pr_err("Lookup %s failed with error %d", dirname, ret);
  805 + pr_err("Lookup %s failed with error %d\n", dirname, ret);
806 806 return ERR_PTR(ret);
807 807  
808 808 nomem_d_alloc:
... ... @@ -892,7 +892,7 @@
892 892 if (ret == -EIO) {
893 893 cachefiles_io_error(cache, "Lookup failed");
894 894 } else if (ret != -ENOMEM) {
895   - pr_err("Internal error: %d", ret);
  895 + pr_err("Internal error: %d\n", ret);
896 896 ret = -EIO;
897 897 }
898 898  
... ... @@ -951,7 +951,7 @@
951 951 }
952 952  
953 953 if (ret != -ENOMEM) {
954   - pr_err("Internal error: %d", ret);
  954 + pr_err("Internal error: %d\n", ret);
955 955 ret = -EIO;
956 956 }
957 957  
fs/cachefiles/xattr.c
... ... @@ -51,7 +51,7 @@
51 51 }
52 52  
53 53 if (ret != -EEXIST) {
54   - pr_err("Can't set xattr on %*.*s [%lu] (err %d)",
  54 + pr_err("Can't set xattr on %*.*s [%lu] (err %d)\n",
55 55 dentry->d_name.len, dentry->d_name.len,
56 56 dentry->d_name.name, dentry->d_inode->i_ino,
57 57 -ret);
... ... @@ -64,7 +64,7 @@
64 64 if (ret == -ERANGE)
65 65 goto bad_type_length;
66 66  
67   - pr_err("Can't read xattr on %*.*s [%lu] (err %d)",
  67 + pr_err("Can't read xattr on %*.*s [%lu] (err %d)\n",
68 68 dentry->d_name.len, dentry->d_name.len,
69 69 dentry->d_name.name, dentry->d_inode->i_ino,
70 70 -ret);
71 71  
... ... @@ -85,14 +85,14 @@
85 85 return ret;
86 86  
87 87 bad_type_length:
88   - pr_err("Cache object %lu type xattr length incorrect",
  88 + pr_err("Cache object %lu type xattr length incorrect\n",
89 89 dentry->d_inode->i_ino);
90 90 ret = -EIO;
91 91 goto error;
92 92  
93 93 bad_type:
94 94 xtype[2] = 0;
95   - pr_err("Cache object %*.*s [%lu] type %s not %s",
  95 + pr_err("Cache object %*.*s [%lu] type %s not %s\n",
96 96 dentry->d_name.len, dentry->d_name.len,
97 97 dentry->d_name.name, dentry->d_inode->i_ino,
98 98 xtype, type);
... ... @@ -293,7 +293,7 @@
293 293 return ret;
294 294  
295 295 bad_type_length:
296   - pr_err("Cache object %lu xattr length incorrect",
  296 + pr_err("Cache object %lu xattr length incorrect\n",
297 297 dentry->d_inode->i_ino);
298 298 ret = -EIO;
299 299 goto error;
... ... @@ -24,6 +24,7 @@
24 24 #include <linux/buffer_head.h>
25 25 #include <linux/gfp.h>
26 26 #include <linux/mpage.h>
  27 +#include <linux/pagemap.h>
27 28 #include <linux/writeback.h>
28 29 #include <linux/aio.h>
29 30 #include "nilfs.h"
30 31  
... ... @@ -219,10 +220,10 @@
219 220  
220 221 static int nilfs_set_page_dirty(struct page *page)
221 222 {
  223 + struct inode *inode = page->mapping->host;
222 224 int ret = __set_page_dirty_nobuffers(page);
223 225  
224 226 if (page_has_buffers(page)) {
225   - struct inode *inode = page->mapping->host;
226 227 unsigned nr_dirty = 0;
227 228 struct buffer_head *bh, *head;
228 229  
... ... @@ -245,6 +246,10 @@
245 246  
246 247 if (nr_dirty)
247 248 nilfs_set_file_dirty(inode, nr_dirty);
  249 + } else if (ret) {
  250 + unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
  251 +
  252 + nilfs_set_file_dirty(inode, nr_dirty);
248 253 }
249 254 return ret;
250 255 }
fs/ocfs2/dlm/dlmmaster.c
... ... @@ -655,12 +655,9 @@
655 655 clear_bit(bit, res->refmap);
656 656 }
657 657  
658   -
659   -void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
  658 +static void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
660 659 struct dlm_lock_resource *res)
661 660 {
662   - assert_spin_locked(&res->spinlock);
663   -
664 661 res->inflight_locks++;
665 662  
666 663 mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
... ... @@ -668,6 +665,13 @@
668 665 __builtin_return_address(0));
669 666 }
670 667  
  668 +void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
  669 + struct dlm_lock_resource *res)
  670 +{
  671 + assert_spin_locked(&res->spinlock);
  672 + __dlm_lockres_grab_inflight_ref(dlm, res);
  673 +}
  674 +
671 675 void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
672 676 struct dlm_lock_resource *res)
673 677 {
... ... @@ -894,10 +898,8 @@
894 898 /* finally add the lockres to its hash bucket */
895 899 __dlm_insert_lockres(dlm, res);
896 900  
897   - /* Grab inflight ref to pin the resource */
898   - spin_lock(&res->spinlock);
899   - dlm_lockres_grab_inflight_ref(dlm, res);
900   - spin_unlock(&res->spinlock);
  901 + /* since this lockres is new it doesn't not require the spinlock */
  902 + __dlm_lockres_grab_inflight_ref(dlm, res);
901 903  
902 904 /* get an extra ref on the mle in case this is a BLOCK
903 905 * if so, the creator of the BLOCK may try to put the last
... ... @@ -2532,6 +2532,7 @@
2532 2532 kfree(osb->journal);
2533 2533 kfree(osb->local_alloc_copy);
2534 2534 kfree(osb->uuid_str);
  2535 + kfree(osb->vol_label);
2535 2536 ocfs2_put_dlm_debug(osb->osb_dlm_debug);
2536 2537 memset(osb, 0, sizeof(struct ocfs2_super));
2537 2538 }
... ... @@ -931,23 +931,32 @@
931 931 while (addr < end) {
932 932 struct vm_area_struct *vma = find_vma(walk->mm, addr);
933 933 pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
934   - unsigned long vm_end;
  934 + /* End of address space hole, which we mark as non-present. */
  935 + unsigned long hole_end;
935 936  
936   - if (!vma) {
937   - vm_end = end;
938   - } else {
939   - vm_end = min(end, vma->vm_end);
940   - if (vma->vm_flags & VM_SOFTDIRTY)
941   - pme.pme |= PM_STATUS2(pm->v2, __PM_SOFT_DIRTY);
  937 + if (vma)
  938 + hole_end = min(end, vma->vm_start);
  939 + else
  940 + hole_end = end;
  941 +
  942 + for (; addr < hole_end; addr += PAGE_SIZE) {
  943 + err = add_to_pagemap(addr, &pme, pm);
  944 + if (err)
  945 + goto out;
942 946 }
943 947  
944   - for (; addr < vm_end; addr += PAGE_SIZE) {
  948 + if (!vma)
  949 + break;
  950 +
  951 + /* Addresses in the VMA. */
  952 + if (vma->vm_flags & VM_SOFTDIRTY)
  953 + pme.pme |= PM_STATUS2(pm->v2, __PM_SOFT_DIRTY);
  954 + for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
945 955 err = add_to_pagemap(addr, &pme, pm);
946 956 if (err)
947 957 goto out;
948 958 }
949 959 }
950   -
951 960 out:
952 961 return err;
953 962 }
... ... @@ -588,6 +588,7 @@
588 588 if (!np_pool)
589 589 return NULL;
590 590 pdev = of_find_device_by_node(np_pool);
  591 + of_node_put(np_pool);
591 592 if (!pdev)
592 593 return NULL;
593 594 return dev_get_gen_pool(&pdev->dev);
... ... @@ -1127,7 +1127,7 @@
1127 1127 addr) != page->index) {
1128 1128 pte_t ptfile = pgoff_to_pte(page->index);
1129 1129 if (pte_soft_dirty(ptent))
1130   - pte_file_mksoft_dirty(ptfile);
  1130 + ptfile = pte_file_mksoft_dirty(ptfile);
1131 1131 set_pte_at(mm, addr, pte, ptfile);
1132 1132 }
1133 1133 if (PageAnon(page))
... ... @@ -2124,7 +2124,8 @@
2124 2124 int
2125 2125 __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2126 2126 {
2127   - size_t left_over, freelist_size, ralign;
  2127 + size_t left_over, freelist_size;
  2128 + size_t ralign = BYTES_PER_WORD;
2128 2129 gfp_t gfp;
2129 2130 int err;
2130 2131 size_t size = cachep->size;
... ... @@ -2156,14 +2157,6 @@
2156 2157 size += (BYTES_PER_WORD - 1);
2157 2158 size &= ~(BYTES_PER_WORD - 1);
2158 2159 }
2159   -
2160   - /*
2161   - * Redzoning and user store require word alignment or possibly larger.
2162   - * Note this will be overridden by architecture or caller mandated
2163   - * alignment if either is greater than BYTES_PER_WORD.
2164   - */
2165   - if (flags & SLAB_STORE_USER)
2166   - ralign = BYTES_PER_WORD;
2167 2160  
2168 2161 if (flags & SLAB_RED_ZONE) {
2169 2162 ralign = REDZONE_ALIGN;