Commit 49fb4c6290c70c418a5c25eee996d6b55ea132d6

Authored by Paul Gortmaker
1 parent 013dbb325b

rcu: delete __cpuinit usage from all rcu files

The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications.  For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.

After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out.  Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.

This removes all the drivers/rcu uses of the __cpuinit macros
from all C files.

[1] https://lkml.org/lkml/2013/5/20/589

Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Josh Triplett <josh@freedesktop.org>
Cc: Dipankar Sarma <dipankar@in.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>

Showing 4 changed files with 11 additions and 11 deletions Side-by-side Diff

... ... @@ -1476,7 +1476,7 @@
1476 1476 * Execute random CPU-hotplug operations at the interval specified
1477 1477 * by the onoff_interval.
1478 1478 */
1479   -static int __cpuinit
  1479 +static int
1480 1480 rcu_torture_onoff(void *arg)
1481 1481 {
1482 1482 int cpu;
... ... @@ -1558,7 +1558,7 @@
1558 1558 return 0;
1559 1559 }
1560 1560  
1561   -static int __cpuinit
  1561 +static int
1562 1562 rcu_torture_onoff_init(void)
1563 1563 {
1564 1564 int ret;
... ... @@ -1601,7 +1601,7 @@
1601 1601 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
1602 1602 * induces a CPU stall for the time specified by stall_cpu.
1603 1603 */
1604   -static int __cpuinit rcu_torture_stall(void *args)
  1604 +static int rcu_torture_stall(void *args)
1605 1605 {
1606 1606 unsigned long stop_at;
1607 1607  
... ... @@ -2910,7 +2910,7 @@
2910 2910 * can accept some slop in the rsp->completed access due to the fact
2911 2911 * that this CPU cannot possibly have any RCU callbacks in flight yet.
2912 2912 */
2913   -static void __cpuinit
  2913 +static void
2914 2914 rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
2915 2915 {
2916 2916 unsigned long flags;
... ... @@ -2962,7 +2962,7 @@
2962 2962 mutex_unlock(&rsp->onoff_mutex);
2963 2963 }
2964 2964  
2965   -static void __cpuinit rcu_prepare_cpu(int cpu)
  2965 +static void rcu_prepare_cpu(int cpu)
2966 2966 {
2967 2967 struct rcu_state *rsp;
2968 2968  
... ... @@ -2974,7 +2974,7 @@
2974 2974 /*
2975 2975 * Handle CPU online/offline notification events.
2976 2976 */
2977   -static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
  2977 +static int rcu_cpu_notify(struct notifier_block *self,
2978 2978 unsigned long action, void *hcpu)
2979 2979 {
2980 2980 long cpu = (long)hcpu;
... ... @@ -521,10 +521,10 @@
521 521 static bool rcu_is_callbacks_kthread(void);
522 522 #ifdef CONFIG_RCU_BOOST
523 523 static void rcu_preempt_do_callbacks(void);
524   -static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
  524 +static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
525 525 struct rcu_node *rnp);
526 526 #endif /* #ifdef CONFIG_RCU_BOOST */
527   -static void __cpuinit rcu_prepare_kthreads(int cpu);
  527 +static void rcu_prepare_kthreads(int cpu);
528 528 static void rcu_cleanup_after_idle(int cpu);
529 529 static void rcu_prepare_for_idle(int cpu);
530 530 static void rcu_idle_count_callbacks_posted(void);
kernel/rcutree_plugin.h
... ... @@ -1352,7 +1352,7 @@
1352 1352 * already exist. We only create this kthread for preemptible RCU.
1353 1353 * Returns zero if all is well, a negated errno otherwise.
1354 1354 */
1355   -static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
  1355 +static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1356 1356 struct rcu_node *rnp)
1357 1357 {
1358 1358 int rnp_index = rnp - &rsp->node[0];
... ... @@ -1507,7 +1507,7 @@
1507 1507 }
1508 1508 early_initcall(rcu_spawn_kthreads);
1509 1509  
1510   -static void __cpuinit rcu_prepare_kthreads(int cpu)
  1510 +static void rcu_prepare_kthreads(int cpu)
1511 1511 {
1512 1512 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1513 1513 struct rcu_node *rnp = rdp->mynode;
... ... @@ -1549,7 +1549,7 @@
1549 1549 }
1550 1550 early_initcall(rcu_scheduler_really_started);
1551 1551  
1552   -static void __cpuinit rcu_prepare_kthreads(int cpu)
  1552 +static void rcu_prepare_kthreads(int cpu)
1553 1553 {
1554 1554 }
1555 1555