Commit 33e5d76979cf01e3834814fe0aea569d1d602c1a

Authored by David Howells
Committed by Linus Torvalds
1 parent 5482415a5e

nommu: fix a number of issues with the per-MM VMA patch

Fix a number of issues with the per-MM VMA patch:

 (1) Make mmap_pages_allocated an atomic_long_t, just in case this is used on
     a NOMMU system with more than 2G pages.  Makes no difference on a 32-bit
     system.

 (2) Report vma->vm_pgoff * PAGE_SIZE as a 64-bit value, not a 32-bit value,
     lest it overflow.

 (3) Move the allocation of the vm_area_struct slab back for fork.c.

 (4) Use KMEM_CACHE() for both vm_area_struct and vm_region slabs.

 (5) Use BUG_ON() rather than if () BUG().

 (6) Make the default validate_nommu_regions() a static inline rather than a
     #define.

 (7) Make free_page_series()'s objection to pages with a refcount != 1 more
     informative.

 (8) Adjust the __put_nommu_region() banner comment to indicate that the
     semaphore must be held for writing.

 (9) Limit the number of warnings about munmaps of non-mmapped regions.

Reported-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: David Howells <dhowells@redhat.com>
Cc: Greg Ungerer <gerg@snapgear.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 6 changed files with 30 additions and 34 deletions Side-by-side Diff

... ... @@ -120,7 +120,7 @@
120 120 K(i.freeram-i.freehigh),
121 121 #endif
122 122 #ifndef CONFIG_MMU
123   - K((unsigned long) atomic_read(&mmap_pages_allocated)),
  123 + K((unsigned long) atomic_long_read(&mmap_pages_allocated)),
124 124 #endif
125 125 K(i.totalswap),
126 126 K(i.freeswap),
fs/proc/task_nommu.c
... ... @@ -136,14 +136,14 @@
136 136 }
137 137  
138 138 seq_printf(m,
139   - "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
  139 + "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
140 140 vma->vm_start,
141 141 vma->vm_end,
142 142 flags & VM_READ ? 'r' : '-',
143 143 flags & VM_WRITE ? 'w' : '-',
144 144 flags & VM_EXEC ? 'x' : '-',
145 145 flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
146   - vma->vm_pgoff << PAGE_SHIFT,
  146 + (unsigned long long) vma->vm_pgoff << PAGE_SHIFT,
147 147 MAJOR(dev), MINOR(dev), ino, &len);
148 148  
149 149 if (file) {
... ... @@ -1079,7 +1079,7 @@
1079 1079 #endif
1080 1080  
1081 1081 /* nommu.c */
1082   -extern atomic_t mmap_pages_allocated;
  1082 +extern atomic_long_t mmap_pages_allocated;
1083 1083  
1084 1084 /* prio_tree.c */
1085 1085 void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
... ... @@ -1488,6 +1488,7 @@
1488 1488 mm_cachep = kmem_cache_create("mm_struct",
1489 1489 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
1490 1490 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
  1491 + vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
1491 1492 mmap_init();
1492 1493 }
1493 1494  
... ... @@ -2481,8 +2481,5 @@
2481 2481 */
2482 2482 void __init mmap_init(void)
2483 2483 {
2484   - vm_area_cachep = kmem_cache_create("vm_area_struct",
2485   - sizeof(struct vm_area_struct), 0,
2486   - SLAB_PANIC, NULL);
2487 2484 }
... ... @@ -69,7 +69,7 @@
69 69 int sysctl_nr_trim_pages = 1; /* page trimming behaviour */
70 70 int heap_stack_gap = 0;
71 71  
72   -atomic_t mmap_pages_allocated;
  72 +atomic_long_t mmap_pages_allocated;
73 73  
74 74 EXPORT_SYMBOL(mem_map);
75 75 EXPORT_SYMBOL(num_physpages);
... ... @@ -463,12 +463,7 @@
463 463 */
464 464 void __init mmap_init(void)
465 465 {
466   - vm_region_jar = kmem_cache_create("vm_region_jar",
467   - sizeof(struct vm_region), 0,
468   - SLAB_PANIC, NULL);
469   - vm_area_cachep = kmem_cache_create("vm_area_struct",
470   - sizeof(struct vm_area_struct), 0,
471   - SLAB_PANIC, NULL);
  466 + vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC);
472 467 }
473 468  
474 469 /*
475 470  
476 471  
... ... @@ -486,27 +481,24 @@
486 481 return;
487 482  
488 483 last = rb_entry(lastp, struct vm_region, vm_rb);
489   - if (unlikely(last->vm_end <= last->vm_start))
490   - BUG();
491   - if (unlikely(last->vm_top < last->vm_end))
492   - BUG();
  484 + BUG_ON(unlikely(last->vm_end <= last->vm_start));
  485 + BUG_ON(unlikely(last->vm_top < last->vm_end));
493 486  
494 487 while ((p = rb_next(lastp))) {
495 488 region = rb_entry(p, struct vm_region, vm_rb);
496 489 last = rb_entry(lastp, struct vm_region, vm_rb);
497 490  
498   - if (unlikely(region->vm_end <= region->vm_start))
499   - BUG();
500   - if (unlikely(region->vm_top < region->vm_end))
501   - BUG();
502   - if (unlikely(region->vm_start < last->vm_top))
503   - BUG();
  491 + BUG_ON(unlikely(region->vm_end <= region->vm_start));
  492 + BUG_ON(unlikely(region->vm_top < region->vm_end));
  493 + BUG_ON(unlikely(region->vm_start < last->vm_top));
504 494  
505 495 lastp = p;
506 496 }
507 497 }
508 498 #else
509   -#define validate_nommu_regions() do {} while(0)
  499 +static void validate_nommu_regions(void)
  500 +{
  501 +}
510 502 #endif
511 503  
512 504 /*
513 505  
514 506  
... ... @@ -563,16 +555,17 @@
563 555 struct page *page = virt_to_page(from);
564 556  
565 557 kdebug("- free %lx", from);
566   - atomic_dec(&mmap_pages_allocated);
  558 + atomic_long_dec(&mmap_pages_allocated);
567 559 if (page_count(page) != 1)
568   - kdebug("free page %p [%d]", page, page_count(page));
  560 + kdebug("free page %p: refcount not one: %d",
  561 + page, page_count(page));
569 562 put_page(page);
570 563 }
571 564 }
572 565  
573 566 /*
574 567 * release a reference to a region
575   - * - the caller must hold the region semaphore, which this releases
  568 + * - the caller must hold the region semaphore for writing, which this releases
576 569 * - the region may not have been added to the tree yet, in which case vm_top
577 570 * will equal vm_start
578 571 */
... ... @@ -1096,7 +1089,7 @@
1096 1089 goto enomem;
1097 1090  
1098 1091 total = 1 << order;
1099   - atomic_add(total, &mmap_pages_allocated);
  1092 + atomic_long_add(total, &mmap_pages_allocated);
1100 1093  
1101 1094 point = rlen >> PAGE_SHIFT;
1102 1095  
... ... @@ -1107,7 +1100,7 @@
1107 1100 order = ilog2(total - point);
1108 1101 n = 1 << order;
1109 1102 kdebug("shave %lu/%lu @%lu", n, total - point, total);
1110   - atomic_sub(n, &mmap_pages_allocated);
  1103 + atomic_long_sub(n, &mmap_pages_allocated);
1111 1104 total -= n;
1112 1105 set_page_refcounted(pages + total);
1113 1106 __free_pages(pages + total, order);
... ... @@ -1536,10 +1529,15 @@
1536 1529 /* find the first potentially overlapping VMA */
1537 1530 vma = find_vma(mm, start);
1538 1531 if (!vma) {
1539   - printk(KERN_WARNING
1540   - "munmap of memory not mmapped by process %d (%s):"
1541   - " 0x%lx-0x%lx\n",
1542   - current->pid, current->comm, start, start + len - 1);
  1532 + static int limit = 0;
  1533 + if (limit < 5) {
  1534 + printk(KERN_WARNING
  1535 + "munmap of memory not mmapped by process %d"
  1536 + " (%s): 0x%lx-0x%lx\n",
  1537 + current->pid, current->comm,
  1538 + start, start + len - 1);
  1539 + limit++;
  1540 + }
1543 1541 return -EINVAL;
1544 1542 }
1545 1543