Commit d11c563dd20ff35da5652c3e1c989d9e10e1d6d0

Authored by Paul E. McKenney
Committed by Ingo Molnar
1 parent a898def29e

sched: Use lockdep-based checking on rcu_dereference()

Update the rcu_dereference() usages to take advantage of the new
lockdep-based checking.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <1266887105-1528-6-git-send-email-paulmck@linux.vnet.ibm.com>
[ -v2: fix allmodconfig missing symbol export build failure on x86 ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 9 changed files with 45 additions and 12 deletions Side-by-side Diff

include/linux/cgroup.h
... ... @@ -28,6 +28,7 @@
28 28 extern int cgroup_init_early(void);
29 29 extern int cgroup_init(void);
30 30 extern void cgroup_lock(void);
  31 +extern int cgroup_lock_is_held(void);
31 32 extern bool cgroup_lock_live_group(struct cgroup *cgrp);
32 33 extern void cgroup_unlock(void);
33 34 extern void cgroup_fork(struct task_struct *p);
... ... @@ -486,7 +487,9 @@
486 487 static inline struct cgroup_subsys_state *task_subsys_state(
487 488 struct task_struct *task, int subsys_id)
488 489 {
489   - return rcu_dereference(task->cgroups->subsys[subsys_id]);
  490 + return rcu_dereference_check(task->cgroups->subsys[subsys_id],
  491 + rcu_read_lock_held() ||
  492 + cgroup_lock_is_held());
490 493 }
491 494  
492 495 static inline struct cgroup* task_cgroup(struct task_struct *task,
include/linux/cred.h
... ... @@ -280,7 +280,7 @@
280 280 * task or by holding tasklist_lock to prevent it from being unlinked.
281 281 */
282 282 #define __task_cred(task) \
283   - ((const struct cred *)(rcu_dereference((task)->real_cred)))
  283 + ((const struct cred *)(rcu_dereference_check((task)->real_cred, rcu_read_lock_held() || lockdep_is_held(&tasklist_lock))))
284 284  
285 285 /**
286 286 * get_task_cred - Get another task's objective credentials
... ... @@ -416,7 +416,9 @@
416 416 kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND);
417 417 numa_default_policy();
418 418 pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
  419 + rcu_read_lock();
419 420 kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
  421 + rcu_read_unlock();
420 422 unlock_kernel();
421 423  
422 424 /*
... ... @@ -166,6 +166,20 @@
166 166 */
167 167 static int need_forkexit_callback __read_mostly;
168 168  
  169 +#ifdef CONFIG_PROVE_LOCKING
  170 +int cgroup_lock_is_held(void)
  171 +{
  172 + return lockdep_is_held(&cgroup_mutex);
  173 +}
  174 +#else /* #ifdef CONFIG_PROVE_LOCKING */
  175 +int cgroup_lock_is_held(void)
  176 +{
  177 + return mutex_is_locked(&cgroup_mutex);
  178 +}
  179 +#endif /* #else #ifdef CONFIG_PROVE_LOCKING */
  180 +
  181 +EXPORT_SYMBOL_GPL(cgroup_lock_is_held);
  182 +
169 183 /* convenient tests for these bits */
170 184 inline int cgroup_is_removed(const struct cgroup *cgrp)
171 185 {
... ... @@ -85,7 +85,9 @@
85 85 BUG_ON(!sig);
86 86 BUG_ON(!atomic_read(&sig->count));
87 87  
88   - sighand = rcu_dereference(tsk->sighand);
  88 + sighand = rcu_dereference_check(tsk->sighand,
  89 + rcu_read_lock_held() ||
  90 + lockdep_is_held(&tasklist_lock));
89 91 spin_lock(&sighand->siglock);
90 92  
91 93 posix_cpu_timers_exit(tsk);
92 94  
... ... @@ -170,8 +172,10 @@
170 172 repeat:
171 173 tracehook_prepare_release_task(p);
172 174 /* don't need to get the RCU readlock here - the process is dead and
173   - * can't be modifying its own credentials */
  175 + * can't be modifying its own credentials. But shut RCU-lockdep up */
  176 + rcu_read_lock();
174 177 atomic_dec(&__task_cred(p)->user->processes);
  178 + rcu_read_unlock();
175 179  
176 180 proc_flush_task(p);
177 181  
178 182  
179 183  
... ... @@ -473,9 +477,11 @@
473 477 /*
474 478 * It is safe to dereference the fd table without RCU or
475 479 * ->file_lock because this is the last reference to the
476   - * files structure.
  480 + * files structure. But use RCU to shut RCU-lockdep up.
477 481 */
  482 + rcu_read_lock();
478 483 fdt = files_fdtable(files);
  484 + rcu_read_unlock();
479 485 for (;;) {
480 486 unsigned long set;
481 487 i = j * __NFDBITS;
482 488  
... ... @@ -521,10 +527,12 @@
521 527 * at the end of the RCU grace period. Otherwise,
522 528 * you can free files immediately.
523 529 */
  530 + rcu_read_lock();
524 531 fdt = files_fdtable(files);
525 532 if (fdt != &files->fdtab)
526 533 kmem_cache_free(files_cachep, files);
527 534 free_fdtable(fdt);
  535 + rcu_read_unlock();
528 536 }
529 537 }
530 538  
... ... @@ -86,6 +86,7 @@
86 86 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
87 87  
88 88 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
  89 +EXPORT_SYMBOL_GPL(tasklist_lock);
89 90  
90 91 int nr_processes(void)
91 92 {
... ... @@ -78,10 +78,10 @@
78 78 int ret = NOTIFY_DONE;
79 79 struct notifier_block *nb, *next_nb;
80 80  
81   - nb = rcu_dereference(*nl);
  81 + nb = rcu_dereference_raw(*nl);
82 82  
83 83 while (nb && nr_to_call) {
84   - next_nb = rcu_dereference(nb->next);
  84 + next_nb = rcu_dereference_raw(nb->next);
85 85  
86 86 #ifdef CONFIG_DEBUG_NOTIFIERS
87 87 if (unlikely(!func_ptr_is_kernel_text(nb->notifier_call))) {
... ... @@ -309,7 +309,7 @@
309 309 * racy then it does not matter what the result of the test
310 310 * is, we re-check the list after having taken the lock anyway:
311 311 */
312   - if (rcu_dereference(nh->head)) {
  312 + if (rcu_dereference_raw(nh->head)) {
313 313 down_read(&nh->rwsem);
314 314 ret = notifier_call_chain(&nh->head, val, v, nr_to_call,
315 315 nr_calls);
... ... @@ -367,7 +367,7 @@
367 367 struct task_struct *result = NULL;
368 368 if (pid) {
369 369 struct hlist_node *first;
370   - first = rcu_dereference(pid->tasks[type].first);
  370 + first = rcu_dereference_check(pid->tasks[type].first, rcu_read_lock_held() || lockdep_is_held(&tasklist_lock));
371 371 if (first)
372 372 result = hlist_entry(first, struct task_struct, pids[(type)].node);
373 373 }
... ... @@ -645,6 +645,11 @@
645 645 #endif
646 646 }
647 647  
  648 +#define for_each_domain_rd(p) \
  649 + rcu_dereference_check((p), \
  650 + rcu_read_lock_sched_held() || \
  651 + lockdep_is_held(&sched_domains_mutex))
  652 +
648 653 /*
649 654 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
650 655 * See detach_destroy_domains: synchronize_sched for details.
... ... @@ -653,7 +658,7 @@
653 658 * preempt-disabled sections.
654 659 */
655 660 #define for_each_domain(cpu, __sd) \
656   - for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
  661 + for (__sd = for_each_domain_rd(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
657 662  
658 663 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
659 664 #define this_rq() (&__get_cpu_var(runqueues))
... ... @@ -1531,7 +1536,7 @@
1531 1536  
1532 1537 static struct sched_group *group_of(int cpu)
1533 1538 {
1534   - struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
  1539 + struct sched_domain *sd = rcu_dereference_sched(cpu_rq(cpu)->sd);
1535 1540  
1536 1541 if (!sd)
1537 1542 return NULL;
... ... @@ -4888,7 +4893,7 @@
4888 4893  
4889 4894 static inline int on_null_domain(int cpu)
4890 4895 {
4891   - return !rcu_dereference(cpu_rq(cpu)->sd);
  4896 + return !rcu_dereference_sched(cpu_rq(cpu)->sd);
4892 4897 }
4893 4898  
4894 4899 /*