Commit 50f5aa8a9b248fa4262cf379863ec9a531b49737

Authored by Linus Torvalds
1 parent d1db0eea85

mm: don't pointlessly use BUG_ON() for sanity check

BUG_ON() is a big hammer, and should be used _only_ if there is some
major corruption that you cannot possibly recover from, making it
imperative that the current process (and possibly the whole machine) be
terminated with extreme prejudice.

The trivial sanity check in the vmacache code is *not* such a fatal
error.  Recovering from it is absolutely trivial, and using BUG_ON()
just makes it harder to debug for no actual advantage.

To make matters worse, the placement of the BUG_ON() (only if the range
check matched) actually makes it harder to hit the sanity check to begin
with, so _if_ there is a bug (and we just got a report from Srivatsa
Bhat that this can indeed trigger), it is harder to debug not just
because the machine is possibly dead, but because we don't have better
coverage.

BUG_ON() must *die*.  Maybe we should add a checkpatch warning for it,
because it is simply just about the worst thing you can ever do if you
hit some "this cannot happen" situation.

Reported-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Davidlohr Bueso <davidlohr@hp.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 5 additions and 3 deletions Inline Diff

1 /* 1 /*
2 * Copyright (C) 2014 Davidlohr Bueso. 2 * Copyright (C) 2014 Davidlohr Bueso.
3 */ 3 */
4 #include <linux/sched.h> 4 #include <linux/sched.h>
5 #include <linux/mm.h> 5 #include <linux/mm.h>
6 #include <linux/vmacache.h> 6 #include <linux/vmacache.h>
7 7
8 /* 8 /*
9 * Flush vma caches for threads that share a given mm. 9 * Flush vma caches for threads that share a given mm.
10 * 10 *
11 * The operation is safe because the caller holds the mmap_sem 11 * The operation is safe because the caller holds the mmap_sem
12 * exclusively and other threads accessing the vma cache will 12 * exclusively and other threads accessing the vma cache will
13 * have mmap_sem held at least for read, so no extra locking 13 * have mmap_sem held at least for read, so no extra locking
14 * is required to maintain the vma cache. 14 * is required to maintain the vma cache.
15 */ 15 */
16 void vmacache_flush_all(struct mm_struct *mm) 16 void vmacache_flush_all(struct mm_struct *mm)
17 { 17 {
18 struct task_struct *g, *p; 18 struct task_struct *g, *p;
19 19
20 rcu_read_lock(); 20 rcu_read_lock();
21 for_each_process_thread(g, p) { 21 for_each_process_thread(g, p) {
22 /* 22 /*
23 * Only flush the vmacache pointers as the 23 * Only flush the vmacache pointers as the
24 * mm seqnum is already set and curr's will 24 * mm seqnum is already set and curr's will
25 * be set upon invalidation when the next 25 * be set upon invalidation when the next
26 * lookup is done. 26 * lookup is done.
27 */ 27 */
28 if (mm == p->mm) 28 if (mm == p->mm)
29 vmacache_flush(p); 29 vmacache_flush(p);
30 } 30 }
31 rcu_read_unlock(); 31 rcu_read_unlock();
32 } 32 }
33 33
34 /* 34 /*
35 * This task may be accessing a foreign mm via (for example) 35 * This task may be accessing a foreign mm via (for example)
36 * get_user_pages()->find_vma(). The vmacache is task-local and this 36 * get_user_pages()->find_vma(). The vmacache is task-local and this
37 * task's vmacache pertains to a different mm (ie, its own). There is 37 * task's vmacache pertains to a different mm (ie, its own). There is
38 * nothing we can do here. 38 * nothing we can do here.
39 * 39 *
40 * Also handle the case where a kernel thread has adopted this mm via use_mm(). 40 * Also handle the case where a kernel thread has adopted this mm via use_mm().
41 * That kernel thread's vmacache is not applicable to this mm. 41 * That kernel thread's vmacache is not applicable to this mm.
42 */ 42 */
43 static bool vmacache_valid_mm(struct mm_struct *mm) 43 static bool vmacache_valid_mm(struct mm_struct *mm)
44 { 44 {
45 return current->mm == mm && !(current->flags & PF_KTHREAD); 45 return current->mm == mm && !(current->flags & PF_KTHREAD);
46 } 46 }
47 47
48 void vmacache_update(unsigned long addr, struct vm_area_struct *newvma) 48 void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
49 { 49 {
50 if (vmacache_valid_mm(newvma->vm_mm)) 50 if (vmacache_valid_mm(newvma->vm_mm))
51 current->vmacache[VMACACHE_HASH(addr)] = newvma; 51 current->vmacache[VMACACHE_HASH(addr)] = newvma;
52 } 52 }
53 53
54 static bool vmacache_valid(struct mm_struct *mm) 54 static bool vmacache_valid(struct mm_struct *mm)
55 { 55 {
56 struct task_struct *curr; 56 struct task_struct *curr;
57 57
58 if (!vmacache_valid_mm(mm)) 58 if (!vmacache_valid_mm(mm))
59 return false; 59 return false;
60 60
61 curr = current; 61 curr = current;
62 if (mm->vmacache_seqnum != curr->vmacache_seqnum) { 62 if (mm->vmacache_seqnum != curr->vmacache_seqnum) {
63 /* 63 /*
64 * First attempt will always be invalid, initialize 64 * First attempt will always be invalid, initialize
65 * the new cache for this task here. 65 * the new cache for this task here.
66 */ 66 */
67 curr->vmacache_seqnum = mm->vmacache_seqnum; 67 curr->vmacache_seqnum = mm->vmacache_seqnum;
68 vmacache_flush(curr); 68 vmacache_flush(curr);
69 return false; 69 return false;
70 } 70 }
71 return true; 71 return true;
72 } 72 }
73 73
74 struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr) 74 struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
75 { 75 {
76 int i; 76 int i;
77 77
78 if (!vmacache_valid(mm)) 78 if (!vmacache_valid(mm))
79 return NULL; 79 return NULL;
80 80
81 for (i = 0; i < VMACACHE_SIZE; i++) { 81 for (i = 0; i < VMACACHE_SIZE; i++) {
82 struct vm_area_struct *vma = current->vmacache[i]; 82 struct vm_area_struct *vma = current->vmacache[i];
83 83
84 if (vma && vma->vm_start <= addr && vma->vm_end > addr) { 84 if (!vma)
85 BUG_ON(vma->vm_mm != mm); 85 continue;
86 if (WARN_ON_ONCE(vma->vm_mm != mm))
87 break;
88 if (vma->vm_start <= addr && vma->vm_end > addr)
86 return vma; 89 return vma;
87 }
88 } 90 }
89 91
90 return NULL; 92 return NULL;
91 } 93 }
92 94
93 #ifndef CONFIG_MMU 95 #ifndef CONFIG_MMU
94 struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, 96 struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
95 unsigned long start, 97 unsigned long start,
96 unsigned long end) 98 unsigned long end)
97 { 99 {
98 int i; 100 int i;
99 101
100 if (!vmacache_valid(mm)) 102 if (!vmacache_valid(mm))
101 return NULL; 103 return NULL;
102 104
103 for (i = 0; i < VMACACHE_SIZE; i++) { 105 for (i = 0; i < VMACACHE_SIZE; i++) {
104 struct vm_area_struct *vma = current->vmacache[i]; 106 struct vm_area_struct *vma = current->vmacache[i];
105 107
106 if (vma && vma->vm_start == start && vma->vm_end == end) 108 if (vma && vma->vm_start == start && vma->vm_end == end)
107 return vma; 109 return vma;
108 } 110 }
109 111
110 return NULL; 112 return NULL;
111 } 113 }
112 #endif 114 #endif