Commit 4e3eaddd142e2142c048c5052a0a9d2604fccfc6
Exists in
master
and in
4 other branches
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel…
…/git/tip/linux-2.6-tip * 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: locking: Make sparse work with inline spinlocks and rwlocks x86/mce: Fix RCU lockdep splats rcu: Increase RCU CPU stall timeouts if PROVE_RCU ftrace: Replace read_barrier_depends() with rcu_dereference_raw() rcu: Suppress RCU lockdep warnings during early boot rcu, ftrace: Fix RCU lockdep splat in ftrace_perf_buf_prepare() rcu: Suppress __mpol_dup() false positive from RCU lockdep rcu: Make rcu_read_lock_sched_held() handle !PREEMPT rcu: Add control variables to lockdep_rcu_dereference() diagnostics rcu, cgroup: Relax the check in task_subsys_state() as early boot is now handled by lockdep-RCU rcu: Use wrapper function instead of exporting tasklist_lock sched, rcu: Fix rcu_dereference() for RCU-lockdep rcu: Make task_subsys_state() RCU-lockdep checks handle boot-time use rcu: Fix holdoff for accelerated GPs for last non-dynticked CPU x86/gart: Unexport gart_iommu_aperture Fix trivial conflicts in kernel/trace/ftrace.c
Showing 18 changed files Side-by-side Diff
- arch/x86/kernel/aperture_64.c
- arch/x86/kernel/cpu/mcheck/mce.c
- include/linux/cred.h
- include/linux/rcupdate.h
- include/linux/rwlock.h
- include/linux/sched.h
- include/linux/spinlock.h
- include/trace/ftrace.h
- kernel/exit.c
- kernel/fork.c
- kernel/lockdep.c
- kernel/pid.c
- kernel/rcutree.h
- kernel/rcutree_plugin.h
- kernel/sched_fair.c
- kernel/trace/ftrace.c
- kernel/trace/trace_event_profile.c
- mm/mempolicy.c
arch/x86/kernel/aperture_64.c
arch/x86/kernel/cpu/mcheck/mce.c
... | ... | @@ -46,6 +46,11 @@ |
46 | 46 | |
47 | 47 | #include "mce-internal.h" |
48 | 48 | |
49 | +#define rcu_dereference_check_mce(p) \ | |
50 | + rcu_dereference_check((p), \ | |
51 | + rcu_read_lock_sched_held() || \ | |
52 | + lockdep_is_held(&mce_read_mutex)) | |
53 | + | |
49 | 54 | #define CREATE_TRACE_POINTS |
50 | 55 | #include <trace/events/mce.h> |
51 | 56 | |
... | ... | @@ -158,7 +163,7 @@ |
158 | 163 | mce->finished = 0; |
159 | 164 | wmb(); |
160 | 165 | for (;;) { |
161 | - entry = rcu_dereference(mcelog.next); | |
166 | + entry = rcu_dereference_check_mce(mcelog.next); | |
162 | 167 | for (;;) { |
163 | 168 | /* |
164 | 169 | * When the buffer fills up discard new entries. |
... | ... | @@ -1500,7 +1505,7 @@ |
1500 | 1505 | return -ENOMEM; |
1501 | 1506 | |
1502 | 1507 | mutex_lock(&mce_read_mutex); |
1503 | - next = rcu_dereference(mcelog.next); | |
1508 | + next = rcu_dereference_check_mce(mcelog.next); | |
1504 | 1509 | |
1505 | 1510 | /* Only supports full reads right now */ |
1506 | 1511 | if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { |
... | ... | @@ -1565,7 +1570,7 @@ |
1565 | 1570 | static unsigned int mce_poll(struct file *file, poll_table *wait) |
1566 | 1571 | { |
1567 | 1572 | poll_wait(file, &mce_wait, wait); |
1568 | - if (rcu_dereference(mcelog.next)) | |
1573 | + if (rcu_dereference_check_mce(mcelog.next)) | |
1569 | 1574 | return POLLIN | POLLRDNORM; |
1570 | 1575 | return 0; |
1571 | 1576 | } |
include/linux/cred.h
... | ... | @@ -280,7 +280,7 @@ |
280 | 280 | * task or by holding tasklist_lock to prevent it from being unlinked. |
281 | 281 | */ |
282 | 282 | #define __task_cred(task) \ |
283 | - ((const struct cred *)(rcu_dereference_check((task)->real_cred, rcu_read_lock_held() || lockdep_is_held(&tasklist_lock)))) | |
283 | + ((const struct cred *)(rcu_dereference_check((task)->real_cred, rcu_read_lock_held() || lockdep_tasklist_lock_is_held()))) | |
284 | 284 | |
285 | 285 | /** |
286 | 286 | * get_task_cred - Get another task's objective credentials |
include/linux/rcupdate.h
... | ... | @@ -101,6 +101,11 @@ |
101 | 101 | # define rcu_read_release_sched() \ |
102 | 102 | lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) |
103 | 103 | |
104 | +static inline int debug_lockdep_rcu_enabled(void) | |
105 | +{ | |
106 | + return likely(rcu_scheduler_active && debug_locks); | |
107 | +} | |
108 | + | |
104 | 109 | /** |
105 | 110 | * rcu_read_lock_held - might we be in RCU read-side critical section? |
106 | 111 | * |
107 | 112 | |
... | ... | @@ -108,12 +113,14 @@ |
108 | 113 | * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, |
109 | 114 | * this assumes we are in an RCU read-side critical section unless it can |
110 | 115 | * prove otherwise. |
116 | + * | |
117 | + * Check rcu_scheduler_active to prevent false positives during boot. | |
111 | 118 | */ |
112 | 119 | static inline int rcu_read_lock_held(void) |
113 | 120 | { |
114 | - if (debug_locks) | |
115 | - return lock_is_held(&rcu_lock_map); | |
116 | - return 1; | |
121 | + if (!debug_lockdep_rcu_enabled()) | |
122 | + return 1; | |
123 | + return lock_is_held(&rcu_lock_map); | |
117 | 124 | } |
118 | 125 | |
119 | 126 | /** |
120 | 127 | |
... | ... | @@ -123,12 +130,14 @@ |
123 | 130 | * an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING, |
124 | 131 | * this assumes we are in an RCU-bh read-side critical section unless it can |
125 | 132 | * prove otherwise. |
133 | + * | |
134 | + * Check rcu_scheduler_active to prevent false positives during boot. | |
126 | 135 | */ |
127 | 136 | static inline int rcu_read_lock_bh_held(void) |
128 | 137 | { |
129 | - if (debug_locks) | |
130 | - return lock_is_held(&rcu_bh_lock_map); | |
131 | - return 1; | |
138 | + if (!debug_lockdep_rcu_enabled()) | |
139 | + return 1; | |
140 | + return lock_is_held(&rcu_bh_lock_map); | |
132 | 141 | } |
133 | 142 | |
134 | 143 | /** |
135 | 144 | |
136 | 145 | |
137 | 146 | |
138 | 147 | |
... | ... | @@ -139,15 +148,26 @@ |
139 | 148 | * this assumes we are in an RCU-sched read-side critical section unless it |
140 | 149 | * can prove otherwise. Note that disabling of preemption (including |
141 | 150 | * disabling irqs) counts as an RCU-sched read-side critical section. |
151 | + * | |
152 | + * Check rcu_scheduler_active to prevent false positives during boot. | |
142 | 153 | */ |
154 | +#ifdef CONFIG_PREEMPT | |
143 | 155 | static inline int rcu_read_lock_sched_held(void) |
144 | 156 | { |
145 | 157 | int lockdep_opinion = 0; |
146 | 158 | |
159 | + if (!debug_lockdep_rcu_enabled()) | |
160 | + return 1; | |
147 | 161 | if (debug_locks) |
148 | 162 | lockdep_opinion = lock_is_held(&rcu_sched_lock_map); |
149 | - return lockdep_opinion || preempt_count() != 0 || !rcu_scheduler_active; | |
163 | + return lockdep_opinion || preempt_count() != 0; | |
150 | 164 | } |
165 | +#else /* #ifdef CONFIG_PREEMPT */ | |
166 | +static inline int rcu_read_lock_sched_held(void) | |
167 | +{ | |
168 | + return 1; | |
169 | +} | |
170 | +#endif /* #else #ifdef CONFIG_PREEMPT */ | |
151 | 171 | |
152 | 172 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
153 | 173 | |
154 | 174 | |
155 | 175 | |
... | ... | @@ -168,10 +188,17 @@ |
168 | 188 | return 1; |
169 | 189 | } |
170 | 190 | |
191 | +#ifdef CONFIG_PREEMPT | |
171 | 192 | static inline int rcu_read_lock_sched_held(void) |
172 | 193 | { |
173 | - return preempt_count() != 0 || !rcu_scheduler_active; | |
194 | + return !rcu_scheduler_active || preempt_count() != 0; | |
174 | 195 | } |
196 | +#else /* #ifdef CONFIG_PREEMPT */ | |
197 | +static inline int rcu_read_lock_sched_held(void) | |
198 | +{ | |
199 | + return 1; | |
200 | +} | |
201 | +#endif /* #else #ifdef CONFIG_PREEMPT */ | |
175 | 202 | |
176 | 203 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
177 | 204 | |
... | ... | @@ -188,7 +215,7 @@ |
188 | 215 | */ |
189 | 216 | #define rcu_dereference_check(p, c) \ |
190 | 217 | ({ \ |
191 | - if (debug_locks && !(c)) \ | |
218 | + if (debug_lockdep_rcu_enabled() && !(c)) \ | |
192 | 219 | lockdep_rcu_dereference(__FILE__, __LINE__); \ |
193 | 220 | rcu_dereference_raw(p); \ |
194 | 221 | }) |
include/linux/rwlock.h
... | ... | @@ -29,25 +29,25 @@ |
29 | 29 | #endif |
30 | 30 | |
31 | 31 | #ifdef CONFIG_DEBUG_SPINLOCK |
32 | - extern void do_raw_read_lock(rwlock_t *lock); | |
32 | + extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock); | |
33 | 33 | #define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock) |
34 | 34 | extern int do_raw_read_trylock(rwlock_t *lock); |
35 | - extern void do_raw_read_unlock(rwlock_t *lock); | |
36 | - extern void do_raw_write_lock(rwlock_t *lock); | |
35 | + extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock); | |
36 | + extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock); | |
37 | 37 | #define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock) |
38 | 38 | extern int do_raw_write_trylock(rwlock_t *lock); |
39 | - extern void do_raw_write_unlock(rwlock_t *lock); | |
39 | + extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock); | |
40 | 40 | #else |
41 | -# define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock) | |
41 | +# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0) | |
42 | 42 | # define do_raw_read_lock_flags(lock, flags) \ |
43 | - arch_read_lock_flags(&(lock)->raw_lock, *(flags)) | |
43 | + do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0) | |
44 | 44 | # define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) |
45 | -# define do_raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock) | |
46 | -# define do_raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock) | |
45 | +# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0) | |
46 | +# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0) | |
47 | 47 | # define do_raw_write_lock_flags(lock, flags) \ |
48 | - arch_write_lock_flags(&(lock)->raw_lock, *(flags)) | |
48 | + do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0) | |
49 | 49 | # define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock) |
50 | -# define do_raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock) | |
50 | +# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0) | |
51 | 51 | #endif |
52 | 52 | |
53 | 53 | #define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock) |
include/linux/sched.h
... | ... | @@ -258,6 +258,10 @@ |
258 | 258 | |
259 | 259 | struct task_struct; |
260 | 260 | |
261 | +#ifdef CONFIG_PROVE_RCU | |
262 | +extern int lockdep_tasklist_lock_is_held(void); | |
263 | +#endif /* #ifdef CONFIG_PROVE_RCU */ | |
264 | + | |
261 | 265 | extern void sched_init(void); |
262 | 266 | extern void sched_init_smp(void); |
263 | 267 | extern asmlinkage void schedule_tail(struct task_struct *prev); |
include/linux/spinlock.h
... | ... | @@ -128,19 +128,21 @@ |
128 | 128 | #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) |
129 | 129 | |
130 | 130 | #ifdef CONFIG_DEBUG_SPINLOCK |
131 | - extern void do_raw_spin_lock(raw_spinlock_t *lock); | |
131 | + extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); | |
132 | 132 | #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) |
133 | 133 | extern int do_raw_spin_trylock(raw_spinlock_t *lock); |
134 | - extern void do_raw_spin_unlock(raw_spinlock_t *lock); | |
134 | + extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); | |
135 | 135 | #else |
136 | -static inline void do_raw_spin_lock(raw_spinlock_t *lock) | |
136 | +static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) | |
137 | 137 | { |
138 | + __acquire(lock); | |
138 | 139 | arch_spin_lock(&lock->raw_lock); |
139 | 140 | } |
140 | 141 | |
141 | 142 | static inline void |
142 | -do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) | |
143 | +do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) | |
143 | 144 | { |
145 | + __acquire(lock); | |
144 | 146 | arch_spin_lock_flags(&lock->raw_lock, *flags); |
145 | 147 | } |
146 | 148 | |
147 | 149 | |
... | ... | @@ -149,9 +151,10 @@ |
149 | 151 | return arch_spin_trylock(&(lock)->raw_lock); |
150 | 152 | } |
151 | 153 | |
152 | -static inline void do_raw_spin_unlock(raw_spinlock_t *lock) | |
154 | +static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) | |
153 | 155 | { |
154 | 156 | arch_spin_unlock(&lock->raw_lock); |
157 | + __release(lock); | |
155 | 158 | } |
156 | 159 | #endif |
157 | 160 |
include/trace/ftrace.h
... | ... | @@ -699,9 +699,9 @@ |
699 | 699 | * __cpu = smp_processor_id(); |
700 | 700 | * |
701 | 701 | * if (in_nmi()) |
702 | - * trace_buf = rcu_dereference(perf_trace_buf_nmi); | |
702 | + * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi); | |
703 | 703 | * else |
704 | - * trace_buf = rcu_dereference(perf_trace_buf); | |
704 | + * trace_buf = rcu_dereference_sched(perf_trace_buf); | |
705 | 705 | * |
706 | 706 | * if (!trace_buf) |
707 | 707 | * goto end; |
kernel/exit.c
kernel/fork.c
... | ... | @@ -86,7 +86,14 @@ |
86 | 86 | DEFINE_PER_CPU(unsigned long, process_counts) = 0; |
87 | 87 | |
88 | 88 | __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ |
89 | -EXPORT_SYMBOL_GPL(tasklist_lock); | |
89 | + | |
90 | +#ifdef CONFIG_PROVE_RCU | |
91 | +int lockdep_tasklist_lock_is_held(void) | |
92 | +{ | |
93 | + return lockdep_is_held(&tasklist_lock); | |
94 | +} | |
95 | +EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); | |
96 | +#endif /* #ifdef CONFIG_PROVE_RCU */ | |
90 | 97 | |
91 | 98 | int nr_processes(void) |
92 | 99 | { |
kernel/lockdep.c
... | ... | @@ -3822,6 +3822,7 @@ |
3822 | 3822 | printk("%s:%d invoked rcu_dereference_check() without protection!\n", |
3823 | 3823 | file, line); |
3824 | 3824 | printk("\nother info that might help us debug this:\n\n"); |
3825 | + printk("\nrcu_scheduler_active = %d, debug_locks = %d\n", rcu_scheduler_active, debug_locks); | |
3825 | 3826 | lockdep_print_held_locks(curr); |
3826 | 3827 | printk("\nstack backtrace:\n"); |
3827 | 3828 | dump_stack(); |
kernel/pid.c
... | ... | @@ -367,7 +367,9 @@ |
367 | 367 | struct task_struct *result = NULL; |
368 | 368 | if (pid) { |
369 | 369 | struct hlist_node *first; |
370 | - first = rcu_dereference_check(pid->tasks[type].first, rcu_read_lock_held() || lockdep_is_held(&tasklist_lock)); | |
370 | + first = rcu_dereference_check(pid->tasks[type].first, | |
371 | + rcu_read_lock_held() || | |
372 | + lockdep_tasklist_lock_is_held()); | |
371 | 373 | if (first) |
372 | 374 | result = hlist_entry(first, struct task_struct, pids[(type)].node); |
373 | 375 | } |
kernel/rcutree.h
... | ... | @@ -246,12 +246,21 @@ |
246 | 246 | |
247 | 247 | #define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ |
248 | 248 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
249 | -#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */ | |
250 | -#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */ | |
251 | -#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ | |
252 | - /* to take at least one */ | |
253 | - /* scheduling clock irq */ | |
254 | - /* before ratting on them. */ | |
249 | + | |
250 | +#ifdef CONFIG_PROVE_RCU | |
251 | +#define RCU_STALL_DELAY_DELTA (5 * HZ) | |
252 | +#else | |
253 | +#define RCU_STALL_DELAY_DELTA 0 | |
254 | +#endif | |
255 | + | |
256 | +#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ + RCU_STALL_DELAY_DELTA) | |
257 | + /* for rsp->jiffies_stall */ | |
258 | +#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ + RCU_STALL_DELAY_DELTA) | |
259 | + /* for rsp->jiffies_stall */ | |
260 | +#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ | |
261 | + /* to take at least one */ | |
262 | + /* scheduling clock irq */ | |
263 | + /* before ratting on them. */ | |
255 | 264 | |
256 | 265 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
257 | 266 |
kernel/rcutree_plugin.h
... | ... | @@ -1010,6 +1010,10 @@ |
1010 | 1010 | int c = 0; |
1011 | 1011 | int thatcpu; |
1012 | 1012 | |
1013 | + /* Check for being in the holdoff period. */ | |
1014 | + if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) | |
1015 | + return rcu_needs_cpu_quick_check(cpu); | |
1016 | + | |
1013 | 1017 | /* Don't bother unless we are the last non-dyntick-idle CPU. */ |
1014 | 1018 | for_each_cpu_not(thatcpu, nohz_cpu_mask) |
1015 | 1019 | if (thatcpu != cpu) { |
1016 | 1020 | |
... | ... | @@ -1041,10 +1045,8 @@ |
1041 | 1045 | } |
1042 | 1046 | |
1043 | 1047 | /* If RCU callbacks are still pending, RCU still needs this CPU. */ |
1044 | - if (c) { | |
1048 | + if (c) | |
1045 | 1049 | raise_softirq(RCU_SOFTIRQ); |
1046 | - per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; | |
1047 | - } | |
1048 | 1050 | return c; |
1049 | 1051 | } |
1050 | 1052 |
kernel/sched_fair.c
kernel/trace/ftrace.c
... | ... | @@ -27,6 +27,7 @@ |
27 | 27 | #include <linux/ctype.h> |
28 | 28 | #include <linux/list.h> |
29 | 29 | #include <linux/hash.h> |
30 | +#include <linux/rcupdate.h> | |
30 | 31 | |
31 | 32 | #include <trace/events/sched.h> |
32 | 33 | |
33 | 34 | |
34 | 35 | |
35 | 36 | |
36 | 37 | |
... | ... | @@ -84,18 +85,22 @@ |
84 | 85 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; |
85 | 86 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; |
86 | 87 | |
88 | +/* | |
89 | + * Traverse the ftrace_list, invoking all entries. The reason that we | |
90 | + * can use rcu_dereference_raw() is that elements removed from this list | |
91 | + * are simply leaked, so there is no need to interact with a grace-period | |
92 | + * mechanism. The rcu_dereference_raw() calls are needed to handle | |
93 | + * concurrent insertions into the ftrace_list. | |
94 | + * | |
95 | + * Silly Alpha and silly pointer-speculation compiler optimizations! | |
96 | + */ | |
87 | 97 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) |
88 | 98 | { |
89 | - struct ftrace_ops *op = ftrace_list; | |
99 | + struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/ | |
90 | 100 | |
91 | - /* in case someone actually ports this to alpha! */ | |
92 | - read_barrier_depends(); | |
93 | - | |
94 | 101 | while (op != &ftrace_list_end) { |
95 | - /* silly alpha */ | |
96 | - read_barrier_depends(); | |
97 | 102 | op->func(ip, parent_ip); |
98 | - op = op->next; | |
103 | + op = rcu_dereference_raw(op->next); /*see above*/ | |
99 | 104 | }; |
100 | 105 | } |
101 | 106 | |
... | ... | @@ -150,8 +155,7 @@ |
150 | 155 | * the ops->next pointer is valid before another CPU sees |
151 | 156 | * the ops pointer included into the ftrace_list. |
152 | 157 | */ |
153 | - smp_wmb(); | |
154 | - ftrace_list = ops; | |
158 | + rcu_assign_pointer(ftrace_list, ops); | |
155 | 159 | |
156 | 160 | if (ftrace_enabled) { |
157 | 161 | ftrace_func_t func; |
kernel/trace/trace_event_profile.c
... | ... | @@ -138,9 +138,9 @@ |
138 | 138 | cpu = smp_processor_id(); |
139 | 139 | |
140 | 140 | if (in_nmi()) |
141 | - trace_buf = rcu_dereference(perf_trace_buf_nmi); | |
141 | + trace_buf = rcu_dereference_sched(perf_trace_buf_nmi); | |
142 | 142 | else |
143 | - trace_buf = rcu_dereference(perf_trace_buf); | |
143 | + trace_buf = rcu_dereference_sched(perf_trace_buf); | |
144 | 144 | |
145 | 145 | if (!trace_buf) |
146 | 146 | goto err; |
mm/mempolicy.c
... | ... | @@ -1756,10 +1756,12 @@ |
1756 | 1756 | |
1757 | 1757 | if (!new) |
1758 | 1758 | return ERR_PTR(-ENOMEM); |
1759 | + rcu_read_lock(); | |
1759 | 1760 | if (current_cpuset_is_being_rebound()) { |
1760 | 1761 | nodemask_t mems = cpuset_mems_allowed(current); |
1761 | 1762 | mpol_rebind_policy(old, &mems); |
1762 | 1763 | } |
1764 | + rcu_read_unlock(); | |
1763 | 1765 | *new = *old; |
1764 | 1766 | atomic_set(&new->refcnt, 1); |
1765 | 1767 | return new; |