Commit 6038def0d11b322019d0dbb43f2a611247dfbdb6

Authored by Namhyung Kim
Committed by Linus Torvalds
1 parent ac3bbec5ec

mm: nommu: sort mm->mmap list properly

When I was reading nommu code, I found that it handles the vma list/tree
in an unusual way.  IIUC, because there can be more than one
identical/overrapped vmas in the list/tree, it sorts the tree more
strictly and does a linear search on the tree.  But it doesn't applied to
the list (i.e.  the list could be constructed in a different order than
the tree so that we can't use the list when finding the first vma in that
order).

Since inserting/sorting a vma in the tree and link is done at the same
time, we can easily construct both of them in the same order.  And linear
searching on the tree could be more costly than doing it on the list, it
can be converted to use the list.

Also, after the commit 297c5eee3724 ("mm: make the vma list be doubly
linked") made the list be doubly linked, there were a couple of code need
to be fixed to construct the list properly.

Patch 1/6 is a preparation.  It maintains the list sorted same as the tree
and construct doubly-linked list properly.  Patch 2/6 is a simple
optimization for the vma deletion.  Patch 3/6 and 4/6 convert tree
traversal to list traversal and the rest are simple fixes and cleanups.

This patch:

@vma added into @mm should be sorted by start addr, end addr and VMA
struct addr in that order because we may get identical VMAs in the @mm.
However this was true only for the rbtree, not for the list.

This patch fixes this by remembering 'rb_prev' during the tree traversal
like find_vma_prepare() does and linking the @vma via __vma_link_list().
After this patch, we can iterate the whole VMAs in correct order simply by
using @mm->mmap list.

[akpm@linux-foundation.org: avoid duplicating __vma_link_list()]
Signed-off-by: Namhyung Kim <namhyung@gmail.com>
Acked-by: Greg Ungerer <gerg@uclinux.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 4 changed files with 44 additions and 45 deletions Side-by-side Diff

... ... @@ -66,6 +66,10 @@
66 66 return page_private(page);
67 67 }
68 68  
  69 +/* mm/util.c */
  70 +void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
  71 + struct vm_area_struct *prev, struct rb_node *rb_parent);
  72 +
69 73 #ifdef CONFIG_MMU
70 74 extern long mlock_vma_pages_range(struct vm_area_struct *vma,
71 75 unsigned long start, unsigned long end);
... ... @@ -398,29 +398,6 @@
398 398 return vma;
399 399 }
400 400  
401   -static inline void
402   -__vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
403   - struct vm_area_struct *prev, struct rb_node *rb_parent)
404   -{
405   - struct vm_area_struct *next;
406   -
407   - vma->vm_prev = prev;
408   - if (prev) {
409   - next = prev->vm_next;
410   - prev->vm_next = vma;
411   - } else {
412   - mm->mmap = vma;
413   - if (rb_parent)
414   - next = rb_entry(rb_parent,
415   - struct vm_area_struct, vm_rb);
416   - else
417   - next = NULL;
418   - }
419   - vma->vm_next = next;
420   - if (next)
421   - next->vm_prev = vma;
422   -}
423   -
424 401 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
425 402 struct rb_node **rb_link, struct rb_node *rb_parent)
426 403 {
... ... @@ -680,9 +680,9 @@
680 680 */
681 681 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
682 682 {
683   - struct vm_area_struct *pvma, **pp, *next;
  683 + struct vm_area_struct *pvma, *prev;
684 684 struct address_space *mapping;
685   - struct rb_node **p, *parent;
  685 + struct rb_node **p, *parent, *rb_prev;
686 686  
687 687 kenter(",%p", vma);
688 688  
... ... @@ -703,7 +703,7 @@
703 703 }
704 704  
705 705 /* add the VMA to the tree */
706   - parent = NULL;
  706 + parent = rb_prev = NULL;
707 707 p = &mm->mm_rb.rb_node;
708 708 while (*p) {
709 709 parent = *p;
710 710  
711 711  
712 712  
713 713  
714 714  
... ... @@ -713,17 +713,20 @@
713 713 * (the latter is necessary as we may get identical VMAs) */
714 714 if (vma->vm_start < pvma->vm_start)
715 715 p = &(*p)->rb_left;
716   - else if (vma->vm_start > pvma->vm_start)
  716 + else if (vma->vm_start > pvma->vm_start) {
  717 + rb_prev = parent;
717 718 p = &(*p)->rb_right;
718   - else if (vma->vm_end < pvma->vm_end)
  719 + } else if (vma->vm_end < pvma->vm_end)
719 720 p = &(*p)->rb_left;
720   - else if (vma->vm_end > pvma->vm_end)
  721 + else if (vma->vm_end > pvma->vm_end) {
  722 + rb_prev = parent;
721 723 p = &(*p)->rb_right;
722   - else if (vma < pvma)
  724 + } else if (vma < pvma)
723 725 p = &(*p)->rb_left;
724   - else if (vma > pvma)
  726 + else if (vma > pvma) {
  727 + rb_prev = parent;
725 728 p = &(*p)->rb_right;
726   - else
  729 + } else
727 730 BUG();
728 731 }
729 732  
730 733  
... ... @@ -731,20 +734,11 @@
731 734 rb_insert_color(&vma->vm_rb, &mm->mm_rb);
732 735  
733 736 /* add VMA to the VMA list also */
734   - for (pp = &mm->mmap; (pvma = *pp); pp = &(*pp)->vm_next) {
735   - if (pvma->vm_start > vma->vm_start)
736   - break;
737   - if (pvma->vm_start < vma->vm_start)
738   - continue;
739   - if (pvma->vm_end < vma->vm_end)
740   - break;
741   - }
  737 + prev = NULL;
  738 + if (rb_prev)
  739 + prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
742 740  
743   - next = *pp;
744   - *pp = vma;
745   - vma->vm_next = next;
746   - if (next)
747   - next->vm_prev = vma;
  741 + __vma_link_list(mm, vma, prev, parent);
748 742 }
749 743  
750 744 /*
... ... @@ -6,6 +6,8 @@
6 6 #include <linux/sched.h>
7 7 #include <asm/uaccess.h>
8 8  
  9 +#include "internal.h"
  10 +
9 11 #define CREATE_TRACE_POINTS
10 12 #include <trace/events/kmem.h>
11 13  
... ... @@ -214,6 +216,28 @@
214 216 return p;
215 217 }
216 218 EXPORT_SYMBOL(strndup_user);
  219 +
  220 +void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
  221 + struct vm_area_struct *prev, struct rb_node *rb_parent)
  222 +{
  223 + struct vm_area_struct *next;
  224 +
  225 + vma->vm_prev = prev;
  226 + if (prev) {
  227 + next = prev->vm_next;
  228 + prev->vm_next = vma;
  229 + } else {
  230 + mm->mmap = vma;
  231 + if (rb_parent)
  232 + next = rb_entry(rb_parent,
  233 + struct vm_area_struct, vm_rb);
  234 + else
  235 + next = NULL;
  236 + }
  237 + vma->vm_next = next;
  238 + if (next)
  239 + next->vm_prev = vma;
  240 +}
217 241  
218 242 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
219 243 void arch_pick_mmap_layout(struct mm_struct *mm)