Blame view
kernel/context_tracking.c
6.28 KB
4eacdf183 context_tracking:... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
/* * Context tracking: Probe on high level context boundaries such as kernel * and userspace. This includes syscalls and exceptions entry/exit. * * This is used by RCU to remove its dependency on the timer tick while a CPU * runs in userspace. * * Started by Frederic Weisbecker: * * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com> * * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton, * Steven Rostedt, Peter Zijlstra for suggestions and improvements. * */ |
91d1aa43d context_tracking:... |
16 17 18 |
#include <linux/context_tracking.h> #include <linux/rcupdate.h> #include <linux/sched.h> |
91d1aa43d context_tracking:... |
19 |
#include <linux/hardirq.h> |
6a61671bb cputime: Safely r... |
20 |
#include <linux/export.h> |
4cdf77a82 x86/kprobes: Fix ... |
21 |
#include <linux/kprobes.h> |
91d1aa43d context_tracking:... |
22 |
|
1b6a259aa context_tracking:... |
23 24 |
#define CREATE_TRACE_POINTS #include <trace/events/context_tracking.h> |
ed11a7f1b context_tracking:... |
25 |
DEFINE_STATIC_KEY_FALSE(context_tracking_enabled); |
48d6a816a context_tracking:... |
26 |
EXPORT_SYMBOL_GPL(context_tracking_enabled); |
65f382fd0 context_tracking:... |
27 28 |
DEFINE_PER_CPU(struct context_tracking, context_tracking); |
48d6a816a context_tracking:... |
29 |
EXPORT_SYMBOL_GPL(context_tracking); |
91d1aa43d context_tracking:... |
30 |
|
aed5ed477 context_tracking:... |
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
static bool context_tracking_recursion_enter(void) { int recursion; recursion = __this_cpu_inc_return(context_tracking.recursion); if (recursion == 1) return true; WARN_ONCE((recursion < 1), "Invalid context tracking recursion value %d ", recursion); __this_cpu_dec(context_tracking.recursion); return false; } static void context_tracking_recursion_exit(void) { __this_cpu_dec(context_tracking.recursion); } |
4eacdf183 context_tracking:... |
50 |
/** |
3aab4f50b context_tracking:... |
51 52 |
* context_tracking_enter - Inform the context tracking that the CPU is going * enter user or guest space mode. |
4eacdf183 context_tracking:... |
53 54 |
* * This function must be called right before we switch from the kernel |
3aab4f50b context_tracking:... |
55 56 57 |
* to user or guest space, when it's guaranteed the remaining kernel * instructions to execute won't use any RCU read side critical section * because this function sets RCU in extended quiescent state. |
4eacdf183 context_tracking:... |
58 |
*/ |
d0e536d89 context_tracking:... |
59 |
void __context_tracking_enter(enum ctx_state state) |
91d1aa43d context_tracking:... |
60 |
{ |
4eacdf183 context_tracking:... |
61 |
/* Kernel threads aren't supposed to go to userspace */ |
91d1aa43d context_tracking:... |
62 |
WARN_ON_ONCE(!current->mm); |
aed5ed477 context_tracking:... |
63 |
if (!context_tracking_recursion_enter()) |
d0e536d89 context_tracking:... |
64 |
return; |
aed5ed477 context_tracking:... |
65 |
|
3aab4f50b context_tracking:... |
66 |
if ( __this_cpu_read(context_tracking.state) != state) { |
d65ec1212 context_tracking:... |
67 68 69 70 71 72 73 74 |
if (__this_cpu_read(context_tracking.active)) { /* * At this stage, only low level arch entry code remains and * then we'll run in userspace. We can assume there won't be * any RCU read-side critical section until the next call to * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency * on the tick. */ |
19fdd98b6 context_tracking:... |
75 76 77 78 |
if (state == CONTEXT_USER) { trace_user_enter(0); vtime_user_enter(current); } |
d65ec1212 context_tracking:... |
79 80 |
rcu_user_enter(); } |
4eacdf183 context_tracking:... |
81 |
/* |
d65ec1212 context_tracking:... |
82 83 84 85 86 87 88 89 90 91 92 |
* Even if context tracking is disabled on this CPU, because it's outside * the full dynticks mask for example, we still have to keep track of the * context transitions and states to prevent inconsistency on those of * other CPUs. * If a task triggers an exception in userspace, sleep on the exception * handler and then migrate to another CPU, that new CPU must know where * the exception returns by the time we call exception_exit(). * This information can only be provided by the previous CPU when it called * exception_enter(). * OTOH we can spare the calls to vtime and RCU when context_tracking.active * is false because we know that CPU is not tickless. |
4eacdf183 context_tracking:... |
93 |
*/ |
3aab4f50b context_tracking:... |
94 |
__this_cpu_write(context_tracking.state, state); |
91d1aa43d context_tracking:... |
95 |
} |
aed5ed477 context_tracking:... |
96 |
context_tracking_recursion_exit(); |
d0e536d89 context_tracking:... |
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 |
} NOKPROBE_SYMBOL(__context_tracking_enter); EXPORT_SYMBOL_GPL(__context_tracking_enter); void context_tracking_enter(enum ctx_state state) { unsigned long flags; /* * Some contexts may involve an exception occuring in an irq, * leading to that nesting: * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() * This would mess up the dyntick_nesting count though. And rcu_irq_*() * helpers are enough to protect RCU uses inside the exception. So * just return immediately if we detect we are in an IRQ. */ if (in_interrupt()) return; local_irq_save(flags); __context_tracking_enter(state); |
91d1aa43d context_tracking:... |
118 119 |
local_irq_restore(flags); } |
3aab4f50b context_tracking:... |
120 |
NOKPROBE_SYMBOL(context_tracking_enter); |
efc1e2c9b context_tracking:... |
121 |
EXPORT_SYMBOL_GPL(context_tracking_enter); |
3aab4f50b context_tracking:... |
122 123 124 |
void context_tracking_user_enter(void) { |
f70cd6b07 context_tracking:... |
125 |
user_enter(); |
3aab4f50b context_tracking:... |
126 |
} |
4cdf77a82 x86/kprobes: Fix ... |
127 |
NOKPROBE_SYMBOL(context_tracking_user_enter); |
91d1aa43d context_tracking:... |
128 |
|
4eacdf183 context_tracking:... |
129 |
/** |
3aab4f50b context_tracking:... |
130 131 |
* context_tracking_exit - Inform the context tracking that the CPU is * exiting user or guest mode and entering the kernel. |
4eacdf183 context_tracking:... |
132 |
* |
3aab4f50b context_tracking:... |
133 134 135 136 |
* This function must be called after we entered the kernel from user or * guest space before any use of RCU read side critical section. This * potentially include any high level kernel code like syscalls, exceptions, * signal handling, etc... |
4eacdf183 context_tracking:... |
137 138 139 140 |
* * This call supports re-entrancy. This way it can be called from any exception * handler without needing to know if we came from userspace or not. */ |
d0e536d89 context_tracking:... |
141 |
void __context_tracking_exit(enum ctx_state state) |
91d1aa43d context_tracking:... |
142 |
{ |
aed5ed477 context_tracking:... |
143 |
if (!context_tracking_recursion_enter()) |
d0e536d89 context_tracking:... |
144 |
return; |
aed5ed477 context_tracking:... |
145 |
|
3aab4f50b context_tracking:... |
146 |
if (__this_cpu_read(context_tracking.state) == state) { |
d65ec1212 context_tracking:... |
147 148 149 150 151 152 |
if (__this_cpu_read(context_tracking.active)) { /* * We are going to run code that may use RCU. Inform * RCU core about that (ie: we may need the tick again). */ rcu_user_exit(); |
19fdd98b6 context_tracking:... |
153 154 155 156 |
if (state == CONTEXT_USER) { vtime_user_exit(current); trace_user_exit(0); } |
d65ec1212 context_tracking:... |
157 |
} |
c467ea763 context_tracking:... |
158 |
__this_cpu_write(context_tracking.state, CONTEXT_KERNEL); |
91d1aa43d context_tracking:... |
159 |
} |
aed5ed477 context_tracking:... |
160 |
context_tracking_recursion_exit(); |
d0e536d89 context_tracking:... |
161 162 163 164 165 166 167 168 169 170 171 172 173 |
} NOKPROBE_SYMBOL(__context_tracking_exit); EXPORT_SYMBOL_GPL(__context_tracking_exit); void context_tracking_exit(enum ctx_state state) { unsigned long flags; if (in_interrupt()) return; local_irq_save(flags); __context_tracking_exit(state); |
91d1aa43d context_tracking:... |
174 175 |
local_irq_restore(flags); } |
3aab4f50b context_tracking:... |
176 |
NOKPROBE_SYMBOL(context_tracking_exit); |
efc1e2c9b context_tracking:... |
177 |
EXPORT_SYMBOL_GPL(context_tracking_exit); |
3aab4f50b context_tracking:... |
178 179 180 |
void context_tracking_user_exit(void) { |
f70cd6b07 context_tracking:... |
181 |
user_exit(); |
3aab4f50b context_tracking:... |
182 |
} |
4cdf77a82 x86/kprobes: Fix ... |
183 |
NOKPROBE_SYMBOL(context_tracking_user_exit); |
91d1aa43d context_tracking:... |
184 |
|
fafe870f3 context_tracking:... |
185 |
void __init context_tracking_cpu_set(int cpu) |
91d1aa43d context_tracking:... |
186 |
{ |
fafe870f3 context_tracking:... |
187 188 189 190 |
static __initdata bool initialized = false; if (!per_cpu(context_tracking.active, cpu)) { per_cpu(context_tracking.active, cpu) = true; |
ed11a7f1b context_tracking:... |
191 |
static_branch_inc(&context_tracking_enabled); |
fafe870f3 context_tracking:... |
192 193 194 195 196 197 198 199 200 201 202 203 204 |
} if (initialized) return; /* * Set TIF_NOHZ to init/0 and let it propagate to all tasks through fork * This assumes that init is the only task at this early boot stage. */ set_tsk_thread_flag(&init_task, TIF_NOHZ); WARN_ON_ONCE(!tasklist_empty()); initialized = true; |
91d1aa43d context_tracking:... |
205 |
} |
65f382fd0 context_tracking:... |
206 207 208 209 210 211 212 213 214 215 |
#ifdef CONFIG_CONTEXT_TRACKING_FORCE void __init context_tracking_init(void) { int cpu; for_each_possible_cpu(cpu) context_tracking_cpu_set(cpu); } #endif |