Commit a488985851cf2facd2227bd982cc2c251df56268

Authored by Paul E. McKenney
Committed by Paul E. McKenney
1 parent 09c7b89062

rcu: Distinguish "rcuo" kthreads by RCU flavor

Currently, the per-no-CBs-CPU kthreads are named "rcuo" followed by
the CPU number, for example, "rcuo".  This is problematic given that
there are either two or three RCU flavors, each of which gets a per-CPU
kthread with exactly the same name.  This commit therefore introduces
a one-letter abbreviation for each RCU flavor, namely 'b' for RCU-bh,
'p' for RCU-preempt, and 's' for RCU-sched.  This abbreviation is used
to distinguish the "rcuo" kthreads, for example, for CPU 0 we would have
"rcuob/0", "rcuop/0", and "rcuos/0".

Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com>

Showing 5 changed files with 20 additions and 13 deletions Side-by-side Diff

Documentation/kernel-parameters.txt
... ... @@ -2461,9 +2461,12 @@
2461 2461 In kernels built with CONFIG_RCU_NOCB_CPU=y, set
2462 2462 the specified list of CPUs to be no-callback CPUs.
2463 2463 Invocation of these CPUs' RCU callbacks will
2464   - be offloaded to "rcuoN" kthreads created for
2465   - that purpose. This reduces OS jitter on the
  2464 + be offloaded to "rcuox/N" kthreads created for
  2465 + that purpose, where "x" is "b" for RCU-bh, "p"
  2466 + for RCU-preempt, and "s" for RCU-sched, and "N"
  2467 + is the CPU number. This reduces OS jitter on the
2466 2468 offloaded CPUs, which can be useful for HPC and
  2469 +
2467 2470 real-time workloads. It can also improve energy
2468 2471 efficiency for asymmetric multiprocessors.
2469 2472  
... ... @@ -666,12 +666,13 @@
666 666  
667 667 This option offloads callback invocation from the set of
668 668 CPUs specified at boot time by the rcu_nocbs parameter.
669   - For each such CPU, a kthread ("rcuoN") will be created to
670   - invoke callbacks, where the "N" is the CPU being offloaded.
671   - Nothing prevents this kthread from running on the specified
672   - CPUs, but (1) the kthreads may be preempted between each
673   - callback, and (2) affinity or cgroups can be used to force
674   - the kthreads to run on whatever set of CPUs is desired.
  669 + For each such CPU, a kthread ("rcuox/N") will be created to
  670 + invoke callbacks, where the "N" is the CPU being offloaded,
  671 + and where the "x" is "b" for RCU-bh, "p" for RCU-preempt, and
  672 + "s" for RCU-sched. Nothing prevents this kthread from running
  673 + on the specified CPUs, but (1) the kthreads may be preempted
  674 + between each callback, and (2) affinity or cgroups can be used
  675 + to force the kthreads to run on whatever set of CPUs is desired.
675 676  
676 677 Say Y here if you want to help to debug reduced OS jitter.
677 678 Say N here if you are unsure.
... ... @@ -64,7 +64,7 @@
64 64 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
65 65 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
66 66  
67   -#define RCU_STATE_INITIALIZER(sname, cr) { \
  67 +#define RCU_STATE_INITIALIZER(sname, sabbr, cr) { \
68 68 .level = { &sname##_state.node[0] }, \
69 69 .call = cr, \
70 70 .fqs_state = RCU_GP_IDLE, \
71 71  
72 72  
... ... @@ -76,13 +76,14 @@
76 76 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
77 77 .onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \
78 78 .name = #sname, \
  79 + .abbr = sabbr, \
79 80 }
80 81  
81 82 struct rcu_state rcu_sched_state =
82   - RCU_STATE_INITIALIZER(rcu_sched, call_rcu_sched);
  83 + RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
83 84 DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
84 85  
85   -struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh, call_rcu_bh);
  86 +struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
86 87 DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
87 88  
88 89 static struct rcu_state *rcu_state;
... ... @@ -443,6 +443,7 @@
443 443 unsigned long gp_max; /* Maximum GP duration in */
444 444 /* jiffies. */
445 445 char *name; /* Name of structure. */
  446 + char abbr; /* Abbreviated name. */
446 447 struct list_head flavors; /* List of RCU flavors. */
447 448 };
448 449  
kernel/rcutree_plugin.h
... ... @@ -111,7 +111,7 @@
111 111 #ifdef CONFIG_TREE_PREEMPT_RCU
112 112  
113 113 struct rcu_state rcu_preempt_state =
114   - RCU_STATE_INITIALIZER(rcu_preempt, call_rcu);
  114 + RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
115 115 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
116 116 static struct rcu_state *rcu_state = &rcu_preempt_state;
117 117  
... ... @@ -2517,7 +2517,8 @@
2517 2517 return;
2518 2518 for_each_cpu(cpu, rcu_nocb_mask) {
2519 2519 rdp = per_cpu_ptr(rsp->rda, cpu);
2520   - t = kthread_run(rcu_nocb_kthread, rdp, "rcuo%d", cpu);
  2520 + t = kthread_run(rcu_nocb_kthread, rdp,
  2521 + "rcuo%c/%d", rsp->abbr, cpu);
2521 2522 BUG_ON(IS_ERR(t));
2522 2523 ACCESS_ONCE(rdp->nocb_kthread) = t;
2523 2524 }