Commit 6ebb237bece23275d1da149b61a342f0d4d06a08

Authored by Paul E. McKenney
Committed by Ingo Molnar
1 parent 9f680ab414

rcu: Re-arrange code to reduce #ifdef pain

Remove #ifdefs from kernel/rcupdate.c and
include/linux/rcupdate.h by moving code to
include/linux/rcutiny.h, include/linux/rcutree.h, and
kernel/rcutree.c.

Also remove some definitions that are no longer used.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <1258908830885-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 6 changed files with 118 additions and 117 deletions Side-by-side Diff

include/linux/rcupdate.h
... ... @@ -52,11 +52,6 @@
52 52 };
53 53  
54 54 /* Exported common interfaces */
55   -#ifdef CONFIG_TREE_PREEMPT_RCU
56   -extern void synchronize_rcu(void);
57   -#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
58   -#define synchronize_rcu synchronize_sched
59   -#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
60 55 extern void synchronize_rcu_bh(void);
61 56 extern void synchronize_sched(void);
62 57 extern void rcu_barrier(void);
... ... @@ -67,13 +62,6 @@
67 62  
68 63 /* Internal to kernel */
69 64 extern void rcu_init(void);
70   -extern void rcu_scheduler_starting(void);
71   -#ifndef CONFIG_TINY_RCU
72   -extern int rcu_needs_cpu(int cpu);
73   -#else
74   -static inline int rcu_needs_cpu(int cpu) { return 0; }
75   -#endif
76   -extern int rcu_scheduler_active;
77 65  
78 66 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
79 67 #include <linux/rcutree.h>
include/linux/rcutiny.h
... ... @@ -39,6 +39,11 @@
39 39 #define rcu_init_sched() do { } while (0)
40 40 extern void rcu_check_callbacks(int cpu, int user);
41 41  
  42 +static inline int rcu_needs_cpu(int cpu)
  43 +{
  44 + return 0;
  45 +}
  46 +
42 47 /*
43 48 * Return the number of grace periods.
44 49 */
... ... @@ -57,6 +62,8 @@
57 62  
58 63 extern int rcu_expedited_torture_stats(char *page);
59 64  
  65 +#define synchronize_rcu synchronize_sched
  66 +
60 67 static inline void synchronize_rcu_expedited(void)
61 68 {
62 69 synchronize_sched();
... ... @@ -85,6 +92,10 @@
85 92 }
86 93  
87 94 #endif /* #else #ifdef CONFIG_NO_HZ */
  95 +
  96 +static inline void rcu_scheduler_starting(void)
  97 +{
  98 +}
88 99  
89 100 static inline void exit_rcu(void)
90 101 {
include/linux/rcutree.h
... ... @@ -35,12 +35,14 @@
35 35 extern void rcu_sched_qs(int cpu);
36 36 extern void rcu_bh_qs(int cpu);
37 37 extern int rcu_needs_cpu(int cpu);
  38 +extern void rcu_scheduler_starting(void);
38 39 extern int rcu_expedited_torture_stats(char *page);
39 40  
40 41 #ifdef CONFIG_TREE_PREEMPT_RCU
41 42  
42 43 extern void __rcu_read_lock(void);
43 44 extern void __rcu_read_unlock(void);
  45 +extern void synchronize_rcu(void);
44 46 extern void exit_rcu(void);
45 47  
46 48 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
... ... @@ -55,7 +57,7 @@
55 57 preempt_enable();
56 58 }
57 59  
58   -#define __synchronize_sched() synchronize_rcu()
  60 +#define synchronize_rcu synchronize_sched
59 61  
60 62 static inline void exit_rcu(void)
61 63 {
... ... @@ -44,7 +44,6 @@
44 44 #include <linux/cpu.h>
45 45 #include <linux/mutex.h>
46 46 #include <linux/module.h>
47   -#include <linux/kernel_stat.h>
48 47  
49 48 #ifdef CONFIG_DEBUG_LOCK_ALLOC
50 49 static struct lock_class_key rcu_lock_key;
... ... @@ -53,8 +52,6 @@
53 52 EXPORT_SYMBOL_GPL(rcu_lock_map);
54 53 #endif
55 54  
56   -int rcu_scheduler_active __read_mostly;
57   -
58 55 /*
59 56 * Awaken the corresponding synchronize_rcu() instance now that a
60 57 * grace period has elapsed.
... ... @@ -65,106 +62,5 @@
65 62  
66 63 rcu = container_of(head, struct rcu_synchronize, head);
67 64 complete(&rcu->completion);
68   -}
69   -
70   -#ifndef CONFIG_TINY_RCU
71   -
72   -#ifdef CONFIG_TREE_PREEMPT_RCU
73   -
74   -/**
75   - * synchronize_rcu - wait until a grace period has elapsed.
76   - *
77   - * Control will return to the caller some time after a full grace
78   - * period has elapsed, in other words after all currently executing RCU
79   - * read-side critical sections have completed. RCU read-side critical
80   - * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
81   - * and may be nested.
82   - */
83   -void synchronize_rcu(void)
84   -{
85   - struct rcu_synchronize rcu;
86   -
87   - if (!rcu_scheduler_active)
88   - return;
89   -
90   - init_completion(&rcu.completion);
91   - /* Will wake me after RCU finished. */
92   - call_rcu(&rcu.head, wakeme_after_rcu);
93   - /* Wait for it. */
94   - wait_for_completion(&rcu.completion);
95   -}
96   -EXPORT_SYMBOL_GPL(synchronize_rcu);
97   -
98   -#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
99   -
100   -/**
101   - * synchronize_sched - wait until an rcu-sched grace period has elapsed.
102   - *
103   - * Control will return to the caller some time after a full rcu-sched
104   - * grace period has elapsed, in other words after all currently executing
105   - * rcu-sched read-side critical sections have completed. These read-side
106   - * critical sections are delimited by rcu_read_lock_sched() and
107   - * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
108   - * local_irq_disable(), and so on may be used in place of
109   - * rcu_read_lock_sched().
110   - *
111   - * This means that all preempt_disable code sequences, including NMI and
112   - * hardware-interrupt handlers, in progress on entry will have completed
113   - * before this primitive returns. However, this does not guarantee that
114   - * softirq handlers will have completed, since in some kernels, these
115   - * handlers can run in process context, and can block.
116   - *
117   - * This primitive provides the guarantees made by the (now removed)
118   - * synchronize_kernel() API. In contrast, synchronize_rcu() only
119   - * guarantees that rcu_read_lock() sections will have completed.
120   - * In "classic RCU", these two guarantees happen to be one and
121   - * the same, but can differ in realtime RCU implementations.
122   - */
123   -void synchronize_sched(void)
124   -{
125   - struct rcu_synchronize rcu;
126   -
127   - if (rcu_blocking_is_gp())
128   - return;
129   -
130   - init_completion(&rcu.completion);
131   - /* Will wake me after RCU finished. */
132   - call_rcu_sched(&rcu.head, wakeme_after_rcu);
133   - /* Wait for it. */
134   - wait_for_completion(&rcu.completion);
135   -}
136   -EXPORT_SYMBOL_GPL(synchronize_sched);
137   -
138   -/**
139   - * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
140   - *
141   - * Control will return to the caller some time after a full rcu_bh grace
142   - * period has elapsed, in other words after all currently executing rcu_bh
143   - * read-side critical sections have completed. RCU read-side critical
144   - * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
145   - * and may be nested.
146   - */
147   -void synchronize_rcu_bh(void)
148   -{
149   - struct rcu_synchronize rcu;
150   -
151   - if (rcu_blocking_is_gp())
152   - return;
153   -
154   - init_completion(&rcu.completion);
155   - /* Will wake me after RCU finished. */
156   - call_rcu_bh(&rcu.head, wakeme_after_rcu);
157   - /* Wait for it. */
158   - wait_for_completion(&rcu.completion);
159   -}
160   -EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
161   -
162   -#endif /* #ifndef CONFIG_TINY_RCU */
163   -
164   -void rcu_scheduler_starting(void)
165   -{
166   - WARN_ON(num_online_cpus() != 1);
167   - WARN_ON(nr_context_switches() > 0);
168   - rcu_scheduler_active = 1;
169 65 }
... ... @@ -46,6 +46,7 @@
46 46 #include <linux/cpu.h>
47 47 #include <linux/mutex.h>
48 48 #include <linux/time.h>
  49 +#include <linux/kernel_stat.h>
49 50  
50 51 #include "rcutree.h"
51 52  
52 53  
... ... @@ -79,7 +80,9 @@
79 80 struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
80 81 DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
81 82  
  83 +static int rcu_scheduler_active __read_mostly;
82 84  
  85 +
83 86 /*
84 87 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
85 88 * permit this function to be invoked without holding the root rcu_node
... ... @@ -1396,6 +1399,68 @@
1396 1399 }
1397 1400 EXPORT_SYMBOL_GPL(call_rcu_bh);
1398 1401  
  1402 +/**
  1403 + * synchronize_sched - wait until an rcu-sched grace period has elapsed.
  1404 + *
  1405 + * Control will return to the caller some time after a full rcu-sched
  1406 + * grace period has elapsed, in other words after all currently executing
  1407 + * rcu-sched read-side critical sections have completed. These read-side
  1408 + * critical sections are delimited by rcu_read_lock_sched() and
  1409 + * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
  1410 + * local_irq_disable(), and so on may be used in place of
  1411 + * rcu_read_lock_sched().
  1412 + *
  1413 + * This means that all preempt_disable code sequences, including NMI and
  1414 + * hardware-interrupt handlers, in progress on entry will have completed
  1415 + * before this primitive returns. However, this does not guarantee that
  1416 + * softirq handlers will have completed, since in some kernels, these
  1417 + * handlers can run in process context, and can block.
  1418 + *
  1419 + * This primitive provides the guarantees made by the (now removed)
  1420 + * synchronize_kernel() API. In contrast, synchronize_rcu() only
  1421 + * guarantees that rcu_read_lock() sections will have completed.
  1422 + * In "classic RCU", these two guarantees happen to be one and
  1423 + * the same, but can differ in realtime RCU implementations.
  1424 + */
  1425 +void synchronize_sched(void)
  1426 +{
  1427 + struct rcu_synchronize rcu;
  1428 +
  1429 + if (rcu_blocking_is_gp())
  1430 + return;
  1431 +
  1432 + init_completion(&rcu.completion);
  1433 + /* Will wake me after RCU finished. */
  1434 + call_rcu_sched(&rcu.head, wakeme_after_rcu);
  1435 + /* Wait for it. */
  1436 + wait_for_completion(&rcu.completion);
  1437 +}
  1438 +EXPORT_SYMBOL_GPL(synchronize_sched);
  1439 +
  1440 +/**
  1441 + * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
  1442 + *
  1443 + * Control will return to the caller some time after a full rcu_bh grace
  1444 + * period has elapsed, in other words after all currently executing rcu_bh
  1445 + * read-side critical sections have completed. RCU read-side critical
  1446 + * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
  1447 + * and may be nested.
  1448 + */
  1449 +void synchronize_rcu_bh(void)
  1450 +{
  1451 + struct rcu_synchronize rcu;
  1452 +
  1453 + if (rcu_blocking_is_gp())
  1454 + return;
  1455 +
  1456 + init_completion(&rcu.completion);
  1457 + /* Will wake me after RCU finished. */
  1458 + call_rcu_bh(&rcu.head, wakeme_after_rcu);
  1459 + /* Wait for it. */
  1460 + wait_for_completion(&rcu.completion);
  1461 +}
  1462 +EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
  1463 +
1399 1464 /*
1400 1465 * Check to see if there is any immediate RCU-related work to be done
1401 1466 * by the current CPU, for the specified type of RCU, returning 1 if so.
... ... @@ -1478,6 +1543,21 @@
1478 1543 return per_cpu(rcu_sched_data, cpu).nxtlist ||
1479 1544 per_cpu(rcu_bh_data, cpu).nxtlist ||
1480 1545 rcu_preempt_needs_cpu(cpu);
  1546 +}
  1547 +
  1548 +/*
  1549 + * This function is invoked towards the end of the scheduler's initialization
  1550 + * process. Before this is called, the idle task might contain
  1551 + * RCU read-side critical sections (during which time, this idle
  1552 + * task is booting the system). After this function is called, the
  1553 + * idle tasks are prohibited from containing RCU read-side critical
  1554 + * sections.
  1555 + */
  1556 +void rcu_scheduler_starting(void)
  1557 +{
  1558 + WARN_ON(num_online_cpus() != 1);
  1559 + WARN_ON(nr_context_switches() > 0);
  1560 + rcu_scheduler_active = 1;
1481 1561 }
1482 1562  
1483 1563 static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
kernel/rcutree_plugin.h
... ... @@ -425,6 +425,30 @@
425 425 }
426 426 EXPORT_SYMBOL_GPL(call_rcu);
427 427  
  428 +/**
  429 + * synchronize_rcu - wait until a grace period has elapsed.
  430 + *
  431 + * Control will return to the caller some time after a full grace
  432 + * period has elapsed, in other words after all currently executing RCU
  433 + * read-side critical sections have completed. RCU read-side critical
  434 + * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
  435 + * and may be nested.
  436 + */
  437 +void synchronize_rcu(void)
  438 +{
  439 + struct rcu_synchronize rcu;
  440 +
  441 + if (!rcu_scheduler_active)
  442 + return;
  443 +
  444 + init_completion(&rcu.completion);
  445 + /* Will wake me after RCU finished. */
  446 + call_rcu(&rcu.head, wakeme_after_rcu);
  447 + /* Wait for it. */
  448 + wait_for_completion(&rcu.completion);
  449 +}
  450 +EXPORT_SYMBOL_GPL(synchronize_rcu);
  451 +
428 452 /*
429 453 * Wait for an rcu-preempt grace period. We are supposed to expedite the
430 454 * grace period, but this is the crude slow compatability hack, so just