Commit 26e09c6eee14f4827b55137ba0eedc4e77cd50ab

Authored by Don Zickus
Committed by Frederic Weisbecker
1 parent cafcd80d21

lockup_detector: Convert per_cpu to __get_cpu_var for readability

Just a bunch of conversions as suggested by Frederic W.
__get_cpu_var() provides preemption disabled checks.

Plus it gives more readability as it makes it obvious
we are dealing locally now with these vars.

Signed-off-by: Don Zickus <dzickus@redhat.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
LKML-Reference: <1274133966-18415-2-git-send-email-dzickus@redhat.com>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>

Showing 1 changed file with 17 additions and 18 deletions Side-by-side Diff

... ... @@ -115,7 +115,7 @@
115 115 /* Commands for resetting the watchdog */
116 116 static void __touch_watchdog(void)
117 117 {
118   - int this_cpu = raw_smp_processor_id();
  118 + int this_cpu = smp_processor_id();
119 119  
120 120 __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu);
121 121 }
122 122  
123 123  
124 124  
125 125  
126 126  
... ... @@ -157,21 +157,21 @@
157 157  
158 158 #ifdef CONFIG_HARDLOCKUP_DETECTOR
159 159 /* watchdog detector functions */
160   -static int is_hardlockup(int cpu)
  160 +static int is_hardlockup(void)
161 161 {
162   - unsigned long hrint = per_cpu(hrtimer_interrupts, cpu);
  162 + unsigned long hrint = __get_cpu_var(hrtimer_interrupts);
163 163  
164   - if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
  164 + if (__get_cpu_var(hrtimer_interrupts_saved) == hrint)
165 165 return 1;
166 166  
167   - per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
  167 + __get_cpu_var(hrtimer_interrupts_saved) = hrint;
168 168 return 0;
169 169 }
170 170 #endif
171 171  
172   -static int is_softlockup(unsigned long touch_ts, int cpu)
  172 +static int is_softlockup(unsigned long touch_ts)
173 173 {
174   - unsigned long now = get_timestamp(cpu);
  174 + unsigned long now = get_timestamp(smp_processor_id());
175 175  
176 176 /* Warn about unreasonable delays: */
177 177 if (time_after(now, touch_ts + softlockup_thresh))
... ... @@ -206,8 +206,6 @@
206 206 struct perf_sample_data *data,
207 207 struct pt_regs *regs)
208 208 {
209   - int this_cpu = smp_processor_id();
210   -
211 209 if (__get_cpu_var(watchdog_nmi_touch) == true) {
212 210 __get_cpu_var(watchdog_nmi_touch) = false;
213 211 return;
... ... @@ -219,7 +217,9 @@
219 217 * fired multiple times before we overflow'd. If it hasn't
220 218 * then this is a good indication the cpu is stuck
221 219 */
222   - if (is_hardlockup(this_cpu)) {
  220 + if (is_hardlockup()) {
  221 + int this_cpu = smp_processor_id();
  222 +
223 223 /* only print hardlockups once */
224 224 if (__get_cpu_var(hard_watchdog_warn) == true)
225 225 return;
... ... @@ -247,7 +247,6 @@
247 247 /* watchdog kicker functions */
248 248 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
249 249 {
250   - int this_cpu = smp_processor_id();
251 250 unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts);
252 251 struct pt_regs *regs = get_irq_regs();
253 252 int duration;
254 253  
... ... @@ -262,12 +261,12 @@
262 261 hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
263 262  
264 263 if (touch_ts == 0) {
265   - if (unlikely(per_cpu(softlockup_touch_sync, this_cpu))) {
  264 + if (unlikely(__get_cpu_var(softlockup_touch_sync))) {
266 265 /*
267 266 * If the time stamp was touched atomically
268 267 * make sure the scheduler tick is up to date.
269 268 */
270   - per_cpu(softlockup_touch_sync, this_cpu) = false;
  269 + __get_cpu_var(softlockup_touch_sync) = false;
271 270 sched_clock_tick();
272 271 }
273 272 __touch_watchdog();
274 273  
... ... @@ -280,14 +279,14 @@
280 279 * indicate it is getting cpu time. If it hasn't then
281 280 * this is a good indication some task is hogging the cpu
282 281 */
283   - duration = is_softlockup(touch_ts, this_cpu);
  282 + duration = is_softlockup(touch_ts);
284 283 if (unlikely(duration)) {
285 284 /* only warn once */
286 285 if (__get_cpu_var(soft_watchdog_warn) == true)
287 286 return HRTIMER_RESTART;
288 287  
289 288 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
290   - this_cpu, duration,
  289 + smp_processor_id(), duration,
291 290 current->comm, task_pid_nr(current));
292 291 print_modules();
293 292 print_irqtrace_events(current);
294 293  
... ... @@ -309,10 +308,10 @@
309 308 /*
310 309 * The watchdog thread - touches the timestamp.
311 310 */
312   -static int watchdog(void *__bind_cpu)
  311 +static int watchdog(void *unused)
313 312 {
314 313 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
315   - struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, (unsigned long)__bind_cpu);
  314 + struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
316 315  
317 316 sched_setscheduler(current, SCHED_FIFO, &param);
318 317  
... ... @@ -328,7 +327,7 @@
328 327 /*
329 328 * Run briefly once per second to reset the softlockup timestamp.
330 329 * If this gets delayed for more than 60 seconds then the
331   - * debug-printout triggers in softlockup_tick().
  330 + * debug-printout triggers in watchdog_timer_fn().
332 331 */
333 332 while (!kthread_should_stop()) {
334 333 __touch_watchdog();