Commit 4145fa7fbee3ec1e61c52825b146192885d9759f

Authored by Paul E. McKenney
Committed by Paul E. McKenney
1 parent 9ceae0e248

rcu: Deconfuse dynticks entry-exit tracing

The trace_rcu_dyntick() trace event did not print both the old and
the new value of the nesting level, and furthermore printed only
the low-order 32 bits of it.  This could result in some confusion
when interpreting trace-event dumps, so this commit prints both
the old and the new value, prints the full 64 bits, and also selects
the process-entry/exit increment to print nicely in hexadecimal.

Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>

Showing 4 changed files with 53 additions and 32 deletions Side-by-side Diff

include/trace/events/rcu.h
... ... @@ -246,21 +246,24 @@
246 246 */
247 247 TRACE_EVENT(rcu_dyntick,
248 248  
249   - TP_PROTO(char *polarity, int nesting),
  249 + TP_PROTO(char *polarity, long long oldnesting, long long newnesting),
250 250  
251   - TP_ARGS(polarity, nesting),
  251 + TP_ARGS(polarity, oldnesting, newnesting),
252 252  
253 253 TP_STRUCT__entry(
254 254 __field(char *, polarity)
255   - __field(int, nesting)
  255 + __field(long long, oldnesting)
  256 + __field(long long, newnesting)
256 257 ),
257 258  
258 259 TP_fast_assign(
259 260 __entry->polarity = polarity;
260   - __entry->nesting = nesting;
  261 + __entry->oldnesting = oldnesting;
  262 + __entry->newnesting = newnesting;
261 263 ),
262 264  
263   - TP_printk("%s %d", __entry->polarity, __entry->nesting)
  265 + TP_printk("%s %llx %llx", __entry->polarity,
  266 + __entry->oldnesting, __entry->newnesting)
264 267 );
265 268  
266 269 /*
... ... @@ -470,7 +473,7 @@
470 473 #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
471 474 #define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0)
472 475 #define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
473   -#define trace_rcu_dyntick(polarity, nesting) do { } while (0)
  476 +#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
474 477 #define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0)
475 478 #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0)
476 479 #define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0)
... ... @@ -30,6 +30,13 @@
30 30 #endif /* #else #ifdef CONFIG_RCU_TRACE */
31 31  
32 32 /*
  33 + * Process-level increment to ->dynticks_nesting field. This allows for
  34 + * architectures that use half-interrupts and half-exceptions from
  35 + * process context.
  36 + */
  37 +#define DYNTICK_TASK_NESTING (LLONG_MAX / 2 - 1)
  38 +
  39 +/*
33 40 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
34 41 * by call_rcu() and rcu callback execution, and are therefore not part of the
35 42 * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
... ... @@ -53,20 +53,21 @@
53 53  
54 54 #include "rcutiny_plugin.h"
55 55  
56   -static long long rcu_dynticks_nesting = LLONG_MAX / 2;
  56 +static long long rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
57 57  
58 58 /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
59   -static void rcu_idle_enter_common(void)
  59 +static void rcu_idle_enter_common(long long oldval)
60 60 {
61 61 if (rcu_dynticks_nesting) {
62   - RCU_TRACE(trace_rcu_dyntick("--=", rcu_dynticks_nesting));
  62 + RCU_TRACE(trace_rcu_dyntick("--=",
  63 + oldval, rcu_dynticks_nesting));
63 64 return;
64 65 }
65   - RCU_TRACE(trace_rcu_dyntick("Start", rcu_dynticks_nesting));
  66 + RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting));
66 67 if (!idle_cpu(smp_processor_id())) {
67 68 WARN_ON_ONCE(1); /* must be idle task! */
68 69 RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
69   - rcu_dynticks_nesting));
  70 + oldval, rcu_dynticks_nesting));
70 71 ftrace_dump(DUMP_ALL);
71 72 }
72 73 rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
73 74  
74 75  
... ... @@ -79,10 +80,12 @@
79 80 void rcu_idle_enter(void)
80 81 {
81 82 unsigned long flags;
  83 + long long oldval;
82 84  
83 85 local_irq_save(flags);
  86 + oldval = rcu_dynticks_nesting;
84 87 rcu_dynticks_nesting = 0;
85   - rcu_idle_enter_common();
  88 + rcu_idle_enter_common(oldval);
86 89 local_irq_restore(flags);
87 90 }
88 91  
89 92  
90 93  
... ... @@ -92,11 +95,13 @@
92 95 void rcu_irq_exit(void)
93 96 {
94 97 unsigned long flags;
  98 + long long oldval;
95 99  
96 100 local_irq_save(flags);
  101 + oldval = rcu_dynticks_nesting;
97 102 rcu_dynticks_nesting--;
98 103 WARN_ON_ONCE(rcu_dynticks_nesting < 0);
99   - rcu_idle_enter_common();
  104 + rcu_idle_enter_common(oldval);
100 105 local_irq_restore(flags);
101 106 }
102 107  
103 108  
104 109  
... ... @@ -104,14 +109,15 @@
104 109 static void rcu_idle_exit_common(long long oldval)
105 110 {
106 111 if (oldval) {
107   - RCU_TRACE(trace_rcu_dyntick("++=", rcu_dynticks_nesting));
  112 + RCU_TRACE(trace_rcu_dyntick("++=",
  113 + oldval, rcu_dynticks_nesting));
108 114 return;
109 115 }
110   - RCU_TRACE(trace_rcu_dyntick("End", oldval));
  116 + RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));
111 117 if (!idle_cpu(smp_processor_id())) {
112 118 WARN_ON_ONCE(1); /* must be idle task! */
113 119 RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
114   - oldval));
  120 + oldval, rcu_dynticks_nesting));
115 121 ftrace_dump(DUMP_ALL);
116 122 }
117 123 }
... ... @@ -127,7 +133,7 @@
127 133 local_irq_save(flags);
128 134 oldval = rcu_dynticks_nesting;
129 135 WARN_ON_ONCE(oldval != 0);
130   - rcu_dynticks_nesting = LLONG_MAX / 2;
  136 + rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
131 137 rcu_idle_exit_common(oldval);
132 138 local_irq_restore(flags);
133 139 }
... ... @@ -196,7 +196,7 @@
196 196 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
197 197  
198 198 DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
199   - .dynticks_nesting = LLONG_MAX / 2,
  199 + .dynticks_nesting = DYNTICK_TASK_NESTING,
200 200 .dynticks = ATOMIC_INIT(1),
201 201 };
202 202  
203 203  
204 204  
205 205  
... ... @@ -348,17 +348,17 @@
348 348 * we really have entered idle, and must do the appropriate accounting.
349 349 * The caller must have disabled interrupts.
350 350 */
351   -static void rcu_idle_enter_common(struct rcu_dynticks *rdtp)
  351 +static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
352 352 {
353 353 if (rdtp->dynticks_nesting) {
354   - trace_rcu_dyntick("--=", rdtp->dynticks_nesting);
  354 + trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting);
355 355 return;
356 356 }
357   - trace_rcu_dyntick("Start", rdtp->dynticks_nesting);
  357 + trace_rcu_dyntick("Start", oldval, rdtp->dynticks_nesting);
358 358 if (!idle_cpu(smp_processor_id())) {
359 359 WARN_ON_ONCE(1); /* must be idle task! */
360 360 trace_rcu_dyntick("Error on entry: not idle task",
361   - rdtp->dynticks_nesting);
  361 + oldval, rdtp->dynticks_nesting);
362 362 ftrace_dump(DUMP_ALL);
363 363 }
364 364 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
365 365  
366 366  
... ... @@ -383,12 +383,14 @@
383 383 void rcu_idle_enter(void)
384 384 {
385 385 unsigned long flags;
  386 + long long oldval;
386 387 struct rcu_dynticks *rdtp;
387 388  
388 389 local_irq_save(flags);
389 390 rdtp = &__get_cpu_var(rcu_dynticks);
  391 + oldval = rdtp->dynticks_nesting;
390 392 rdtp->dynticks_nesting = 0;
391   - rcu_idle_enter_common(rdtp);
  393 + rcu_idle_enter_common(rdtp, oldval);
392 394 local_irq_restore(flags);
393 395 }
394 396  
395 397  
396 398  
... ... @@ -411,13 +413,15 @@
411 413 void rcu_irq_exit(void)
412 414 {
413 415 unsigned long flags;
  416 + long long oldval;
414 417 struct rcu_dynticks *rdtp;
415 418  
416 419 local_irq_save(flags);
417 420 rdtp = &__get_cpu_var(rcu_dynticks);
  421 + oldval = rdtp->dynticks_nesting;
418 422 rdtp->dynticks_nesting--;
419 423 WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
420   - rcu_idle_enter_common(rdtp);
  424 + rcu_idle_enter_common(rdtp, oldval);
421 425 local_irq_restore(flags);
422 426 }
423 427  
... ... @@ -431,7 +435,7 @@
431 435 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
432 436 {
433 437 if (oldval) {
434   - trace_rcu_dyntick("++=", rdtp->dynticks_nesting);
  438 + trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting);
435 439 return;
436 440 }
437 441 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
438 442  
... ... @@ -439,10 +443,11 @@
439 443 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
440 444 smp_mb__after_atomic_inc(); /* See above. */
441 445 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
442   - trace_rcu_dyntick("End", oldval);
  446 + trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
443 447 if (!idle_cpu(smp_processor_id())) {
444 448 WARN_ON_ONCE(1); /* must be idle task! */
445   - trace_rcu_dyntick("Error on exit: not idle task", oldval);
  449 + trace_rcu_dyntick("Error on exit: not idle task",
  450 + oldval, rdtp->dynticks_nesting);
446 451 ftrace_dump(DUMP_ALL);
447 452 }
448 453 }
... ... @@ -453,8 +458,8 @@
453 458 * Exit idle mode, in other words, -enter- the mode in which RCU
454 459 * read-side critical sections can occur.
455 460 *
456   - * We crowbar the ->dynticks_nesting field to LLONG_MAX/2 to allow for
457   - * the possibility of usermode upcalls messing up our count
  461 + * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NESTING to
  462 + * allow for the possibility of usermode upcalls messing up our count
458 463 * of interrupt nesting level during the busy period that is just
459 464 * now starting.
460 465 */
... ... @@ -468,7 +473,7 @@
468 473 rdtp = &__get_cpu_var(rcu_dynticks);
469 474 oldval = rdtp->dynticks_nesting;
470 475 WARN_ON_ONCE(oldval != 0);
471   - rdtp->dynticks_nesting = LLONG_MAX / 2;
  476 + rdtp->dynticks_nesting = DYNTICK_TASK_NESTING;
472 477 rcu_idle_exit_common(rdtp, oldval);
473 478 local_irq_restore(flags);
474 479 }
... ... @@ -2012,7 +2017,7 @@
2012 2017 rdp->nxttail[i] = &rdp->nxtlist;
2013 2018 rdp->qlen = 0;
2014 2019 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
2015   - WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != LLONG_MAX / 2);
  2020 + WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);
2016 2021 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
2017 2022 rdp->cpu = cpu;
2018 2023 rdp->rsp = rsp;
... ... @@ -2040,7 +2045,7 @@
2040 2045 rdp->qlen_last_fqs_check = 0;
2041 2046 rdp->n_force_qs_snap = rsp->n_force_qs;
2042 2047 rdp->blimit = blimit;
2043   - WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != LLONG_MAX / 2);
  2048 + WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);
2044 2049 WARN_ON_ONCE((atomic_read(&rdp->dynticks->dynticks) & 0x1) != 1);
2045 2050 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2046 2051