Commit a30489c5228fba6f16b4c740a0292879ef13371e
Committed by
Paul E. McKenney
1 parent
40694d6644
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
rcu: Instrument synchronize_rcu_expedited() for debugfs tracing
This commit adds the counters to rcu_state and updates them in synchronize_rcu_expedited() to provide the data needed for debugfs tracing. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Showing 2 changed files with 24 additions and 3 deletions Side-by-side Diff
kernel/rcutree.c
... | ... | @@ -2321,6 +2321,7 @@ |
2321 | 2321 | (ulong)atomic_long_read(&rsp->expedited_done) + |
2322 | 2322 | ULONG_MAX / 8)) { |
2323 | 2323 | synchronize_sched(); |
2324 | + atomic_long_inc(&rsp->expedited_wrap); | |
2324 | 2325 | return; |
2325 | 2326 | } |
2326 | 2327 | |
2327 | 2328 | |
... | ... | @@ -2341,11 +2342,14 @@ |
2341 | 2342 | synchronize_sched_expedited_cpu_stop, |
2342 | 2343 | NULL) == -EAGAIN) { |
2343 | 2344 | put_online_cpus(); |
2345 | + atomic_long_inc(&rsp->expedited_tryfail); | |
2344 | 2346 | |
2345 | 2347 | /* Check to see if someone else did our work for us. */ |
2346 | 2348 | s = atomic_long_read(&rsp->expedited_done); |
2347 | 2349 | if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { |
2348 | - smp_mb(); /* ensure test happens before caller kfree */ | |
2350 | + /* ensure test happens before caller kfree */ | |
2351 | + smp_mb__before_atomic_inc(); /* ^^^ */ | |
2352 | + atomic_long_inc(&rsp->expedited_workdone1); | |
2349 | 2353 | return; |
2350 | 2354 | } |
2351 | 2355 | |
2352 | 2356 | |
... | ... | @@ -2354,13 +2358,16 @@ |
2354 | 2358 | udelay(trycount * num_online_cpus()); |
2355 | 2359 | } else { |
2356 | 2360 | synchronize_sched(); |
2361 | + atomic_long_inc(&rsp->expedited_normal); | |
2357 | 2362 | return; |
2358 | 2363 | } |
2359 | 2364 | |
2360 | 2365 | /* Recheck to see if someone else did our work for us. */ |
2361 | 2366 | s = atomic_long_read(&rsp->expedited_done); |
2362 | 2367 | if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { |
2363 | - smp_mb(); /* ensure test happens before caller kfree */ | |
2368 | + /* ensure test happens before caller kfree */ | |
2369 | + smp_mb__before_atomic_inc(); /* ^^^ */ | |
2370 | + atomic_long_inc(&rsp->expedited_workdone2); | |
2364 | 2371 | return; |
2365 | 2372 | } |
2366 | 2373 | |
... | ... | @@ -2375,6 +2382,7 @@ |
2375 | 2382 | snap = atomic_long_read(&rsp->expedited_start); |
2376 | 2383 | smp_mb(); /* ensure read is before try_stop_cpus(). */ |
2377 | 2384 | } |
2385 | + atomic_long_inc(&rsp->expedited_stoppedcpus); | |
2378 | 2386 | |
2379 | 2387 | /* |
2380 | 2388 | * Everyone up to our most recent fetch is covered by our grace |
2381 | 2389 | |
2382 | 2390 | |
... | ... | @@ -2383,12 +2391,16 @@ |
2383 | 2391 | * than we did already did their update. |
2384 | 2392 | */ |
2385 | 2393 | do { |
2394 | + atomic_long_inc(&rsp->expedited_done_tries); | |
2386 | 2395 | s = atomic_long_read(&rsp->expedited_done); |
2387 | 2396 | if (ULONG_CMP_GE((ulong)s, (ulong)snap)) { |
2388 | - smp_mb(); /* ensure test happens before caller kfree */ | |
2397 | + /* ensure test happens before caller kfree */ | |
2398 | + smp_mb__before_atomic_inc(); /* ^^^ */ | |
2399 | + atomic_long_inc(&rsp->expedited_done_lost); | |
2389 | 2400 | break; |
2390 | 2401 | } |
2391 | 2402 | } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s); |
2403 | + atomic_long_inc(&rsp->expedited_done_exit); | |
2392 | 2404 | |
2393 | 2405 | put_online_cpus(); |
2394 | 2406 | } |
kernel/rcutree.h
... | ... | @@ -406,6 +406,15 @@ |
406 | 406 | |
407 | 407 | atomic_long_t expedited_start; /* Starting ticket. */ |
408 | 408 | atomic_long_t expedited_done; /* Done ticket. */ |
409 | + atomic_long_t expedited_wrap; /* # near-wrap incidents. */ | |
410 | + atomic_long_t expedited_tryfail; /* # acquisition failures. */ | |
411 | + atomic_long_t expedited_workdone1; /* # done by others #1. */ | |
412 | + atomic_long_t expedited_workdone2; /* # done by others #2. */ | |
413 | + atomic_long_t expedited_normal; /* # fallbacks to normal. */ | |
414 | + atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */ | |
415 | + atomic_long_t expedited_done_tries; /* # tries to update _done. */ | |
416 | + atomic_long_t expedited_done_lost; /* # times beaten to _done. */ | |
417 | + atomic_long_t expedited_done_exit; /* # times exited _done loop. */ | |
409 | 418 | |
410 | 419 | unsigned long jiffies_force_qs; /* Time at which to invoke */ |
411 | 420 | /* force_quiescent_state(). */ |