Commit d166991234347215dc23fc9dc15a63a83a1a54e1

Authored by Thomas Gleixner
1 parent a1a04ec3c7

idle: Implement generic idle function

All idle functions in arch/* are more or less the same, plus minus a
few bugs and extra instrumentation, tickless support and other
optional items.

Implement a generic idle function which resembles the functionality
found in arch/. Provide weak arch_cpu_idle_* functions which can be
overridden by the architecture code if needed.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Reviewed-by: Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Magnus Damm <magnus.damm@gmail.com>
Link: http://lkml.kernel.org/r/20130321215233.646635455@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

Showing 3 changed files with 116 additions and 0 deletions Side-by-side Diff

... ... @@ -216,6 +216,9 @@
216 216 config GENERIC_SMP_IDLE_THREAD
217 217 bool
218 218  
  219 +config GENERIC_IDLE_LOOP
  220 + bool
  221 +
219 222 # Select if arch init_task initializer is different to init/init_task.c
220 223 config ARCH_INIT_TASK
221 224 bool
... ... @@ -220,5 +220,13 @@
220 220 void cpu_startup_entry(enum cpuhp_state state);
221 221 void cpu_idle(void);
222 222  
  223 +void cpu_idle_poll_ctrl(bool enable);
  224 +
  225 +void arch_cpu_idle(void);
  226 +void arch_cpu_idle_prepare(void);
  227 +void arch_cpu_idle_enter(void);
  228 +void arch_cpu_idle_exit(void);
  229 +void arch_cpu_idle_dead(void);
  230 +
223 231 #endif /* _LINUX_CPU_H_ */
... ... @@ -3,9 +3,114 @@
3 3 */
4 4 #include <linux/sched.h>
5 5 #include <linux/cpu.h>
  6 +#include <linux/tick.h>
  7 +#include <linux/mm.h>
6 8  
  9 +#include <asm/tlb.h>
  10 +
  11 +#include <trace/events/power.h>
  12 +
  13 +#ifndef CONFIG_GENERIC_IDLE_LOOP
7 14 void cpu_startup_entry(enum cpuhp_state state)
8 15 {
9 16 cpu_idle();
10 17 }
  18 +#else
  19 +
  20 +static int __read_mostly cpu_idle_force_poll;
  21 +
  22 +void cpu_idle_poll_ctrl(bool enable)
  23 +{
  24 + if (enable) {
  25 + cpu_idle_force_poll++;
  26 + } else {
  27 + cpu_idle_force_poll--;
  28 + WARN_ON_ONCE(cpu_idle_force_poll < 0);
  29 + }
  30 +}
  31 +
  32 +#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
  33 +static int __init cpu_idle_poll_setup(char *__unused)
  34 +{
  35 + cpu_idle_force_poll = 1;
  36 + return 1;
  37 +}
  38 +__setup("nohlt", cpu_idle_poll_setup);
  39 +
  40 +static int __init cpu_idle_nopoll_setup(char *__unused)
  41 +{
  42 + cpu_idle_force_poll = 0;
  43 + return 1;
  44 +}
  45 +__setup("hlt", cpu_idle_nopoll_setup);
  46 +#endif
  47 +
  48 +static inline int cpu_idle_poll(void)
  49 +{
  50 + trace_cpu_idle_rcuidle(0, smp_processor_id());
  51 + local_irq_enable();
  52 + while (!need_resched())
  53 + cpu_relax();
  54 + trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
  55 + return 1;
  56 +}
  57 +
  58 +/* Weak implementations for optional arch specific functions */
  59 +void __weak arch_cpu_idle_prepare(void) { }
  60 +void __weak arch_cpu_idle_enter(void) { }
  61 +void __weak arch_cpu_idle_exit(void) { }
  62 +void __weak arch_cpu_idle_dead(void) { }
  63 +void __weak arch_cpu_idle(void)
  64 +{
  65 + cpu_idle_force_poll = 1;
  66 +}
  67 +
  68 +/*
  69 + * Generic idle loop implementation
  70 + */
  71 +static void cpu_idle_loop(void)
  72 +{
  73 + while (1) {
  74 + tick_nohz_idle_enter();
  75 +
  76 + while (!need_resched()) {
  77 + check_pgt_cache();
  78 + rmb();
  79 +
  80 + if (cpu_is_offline(smp_processor_id()))
  81 + arch_cpu_idle_dead();
  82 +
  83 + local_irq_disable();
  84 + arch_cpu_idle_enter();
  85 +
  86 + if (cpu_idle_force_poll) {
  87 + cpu_idle_poll();
  88 + } else {
  89 + current_clr_polling();
  90 + if (!need_resched()) {
  91 + stop_critical_timings();
  92 + rcu_idle_enter();
  93 + arch_cpu_idle();
  94 + WARN_ON_ONCE(irqs_disabled());
  95 + rcu_idle_exit();
  96 + start_critical_timings();
  97 + } else {
  98 + local_irq_enable();
  99 + }
  100 + current_set_polling();
  101 + }
  102 + arch_cpu_idle_exit();
  103 + }
  104 + tick_nohz_idle_exit();
  105 + schedule_preempt_disabled();
  106 + }
  107 +}
  108 +
  109 +void cpu_startup_entry(enum cpuhp_state state)
  110 +{
  111 + current_set_polling();
  112 + arch_cpu_idle_prepare();
  113 + cpu_idle_loop();
  114 +}
  115 +#endif