Blame view
kernel/context_tracking.c
5.71 KB
4eacdf183
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
/* * Context tracking: Probe on high level context boundaries such as kernel * and userspace. This includes syscalls and exceptions entry/exit. * * This is used by RCU to remove its dependency on the timer tick while a CPU * runs in userspace. * * Started by Frederic Weisbecker: * * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com> * * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton, * Steven Rostedt, Peter Zijlstra for suggestions and improvements. * */ |
91d1aa43d
|
16 17 18 |
#include <linux/context_tracking.h> #include <linux/rcupdate.h> #include <linux/sched.h> |
91d1aa43d
|
19 |
#include <linux/hardirq.h> |
6a61671bb
|
20 |
#include <linux/export.h> |
4cdf77a82
|
21 |
#include <linux/kprobes.h> |
91d1aa43d
|
22 |
|
1b6a259aa
|
23 24 |
#define CREATE_TRACE_POINTS #include <trace/events/context_tracking.h> |
65f382fd0
|
25 |
struct static_key context_tracking_enabled = STATIC_KEY_INIT_FALSE; |
48d6a816a
|
26 |
EXPORT_SYMBOL_GPL(context_tracking_enabled); |
65f382fd0
|
27 28 |
DEFINE_PER_CPU(struct context_tracking, context_tracking); |
48d6a816a
|
29 |
EXPORT_SYMBOL_GPL(context_tracking); |
91d1aa43d
|
30 |
|
2e7093386
|
31 32 |
void context_tracking_cpu_set(int cpu) { |
65f382fd0
|
33 34 35 36 |
if (!per_cpu(context_tracking.active, cpu)) { per_cpu(context_tracking.active, cpu) = true; static_key_slow_inc(&context_tracking_enabled); } |
2e7093386
|
37 |
} |
4eacdf183
|
38 |
/** |
ad65782fb
|
39 40 |
* context_tracking_user_enter - Inform the context tracking that the CPU is going to * enter userspace mode. |
4eacdf183
|
41 42 43 44 45 46 |
* * This function must be called right before we switch from the kernel * to userspace, when it's guaranteed the remaining kernel instructions * to execute won't use any RCU read side critical section because this * function sets RCU in extended quiescent state. */ |
ad65782fb
|
47 |
void context_tracking_user_enter(void) |
91d1aa43d
|
48 49 50 51 |
{ unsigned long flags; /* |
0c06a5d4b
|
52 53 |
* Repeat the user_enter() check here because some archs may be calling * this from asm and if no CPU needs context tracking, they shouldn't |
58135f574
|
54 55 |
* go further. Repeat the check here until they support the inline static * key check. |
0c06a5d4b
|
56 |
*/ |
58135f574
|
57 |
if (!context_tracking_is_enabled()) |
0c06a5d4b
|
58 59 60 |
return; /* |
91d1aa43d
|
61 62 63 64 65 66 67 68 69 |
* Some contexts may involve an exception occuring in an irq, * leading to that nesting: * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() * This would mess up the dyntick_nesting count though. And rcu_irq_*() * helpers are enough to protect RCU uses inside the exception. So * just return immediately if we detect we are in an IRQ. */ if (in_interrupt()) return; |
4eacdf183
|
70 |
/* Kernel threads aren't supposed to go to userspace */ |
91d1aa43d
|
71 72 73 |
WARN_ON_ONCE(!current->mm); local_irq_save(flags); |
d65ec1212
|
74 75 |
if ( __this_cpu_read(context_tracking.state) != IN_USER) { if (__this_cpu_read(context_tracking.active)) { |
1b6a259aa
|
76 |
trace_user_enter(0); |
d65ec1212
|
77 78 79 80 81 82 83 84 85 86 |
/* * At this stage, only low level arch entry code remains and * then we'll run in userspace. We can assume there won't be * any RCU read-side critical section until the next call to * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency * on the tick. */ vtime_user_enter(current); rcu_user_enter(); } |
4eacdf183
|
87 |
/* |
d65ec1212
|
88 89 90 91 92 93 94 95 96 97 98 |
* Even if context tracking is disabled on this CPU, because it's outside * the full dynticks mask for example, we still have to keep track of the * context transitions and states to prevent inconsistency on those of * other CPUs. * If a task triggers an exception in userspace, sleep on the exception * handler and then migrate to another CPU, that new CPU must know where * the exception returns by the time we call exception_exit(). * This information can only be provided by the previous CPU when it called * exception_enter(). * OTOH we can spare the calls to vtime and RCU when context_tracking.active * is false because we know that CPU is not tickless. |
4eacdf183
|
99 |
*/ |
abf917cd9
|
100 |
__this_cpu_write(context_tracking.state, IN_USER); |
91d1aa43d
|
101 102 103 |
} local_irq_restore(flags); } |
4cdf77a82
|
104 |
NOKPROBE_SYMBOL(context_tracking_user_enter); |
91d1aa43d
|
105 |
|
4eacdf183
|
106 |
/** |
ad65782fb
|
107 108 |
* context_tracking_user_exit - Inform the context tracking that the CPU is * exiting userspace mode and entering the kernel. |
4eacdf183
|
109 110 111 112 113 114 115 116 |
* * This function must be called after we entered the kernel from userspace * before any use of RCU read side critical section. This potentially include * any high level kernel code like syscalls, exceptions, signal handling, etc... * * This call supports re-entrancy. This way it can be called from any exception * handler without needing to know if we came from userspace or not. */ |
ad65782fb
|
117 |
void context_tracking_user_exit(void) |
91d1aa43d
|
118 119 |
{ unsigned long flags; |
58135f574
|
120 |
if (!context_tracking_is_enabled()) |
0c06a5d4b
|
121 |
return; |
91d1aa43d
|
122 123 124 125 126 |
if (in_interrupt()) return; local_irq_save(flags); if (__this_cpu_read(context_tracking.state) == IN_USER) { |
d65ec1212
|
127 128 129 130 131 132 133 |
if (__this_cpu_read(context_tracking.active)) { /* * We are going to run code that may use RCU. Inform * RCU core about that (ie: we may need the tick again). */ rcu_user_exit(); vtime_user_exit(current); |
1b6a259aa
|
134 |
trace_user_exit(0); |
d65ec1212
|
135 |
} |
abf917cd9
|
136 |
__this_cpu_write(context_tracking.state, IN_KERNEL); |
91d1aa43d
|
137 138 139 |
} local_irq_restore(flags); } |
4cdf77a82
|
140 |
NOKPROBE_SYMBOL(context_tracking_user_exit); |
91d1aa43d
|
141 |
|
4eacdf183
|
142 |
/** |
73d424f9a
|
143 |
* __context_tracking_task_switch - context switch the syscall callbacks |
4eacdf183
|
144 145 146 147 148 149 150 151 152 153 154 |
* @prev: the task that is being switched out * @next: the task that is being switched in * * The context tracking uses the syscall slow path to implement its user-kernel * boundaries probes on syscalls. This way it doesn't impact the syscall fast * path on CPUs that don't do context tracking. * * But we need to clear the flag on the previous task because it may later * migrate to some CPU that doesn't do the context tracking. As such the TIF * flag may not be desired there. */ |
73d424f9a
|
155 156 |
void __context_tracking_task_switch(struct task_struct *prev, struct task_struct *next) |
91d1aa43d
|
157 |
{ |
d65ec1212
|
158 159 |
clear_tsk_thread_flag(prev, TIF_NOHZ); set_tsk_thread_flag(next, TIF_NOHZ); |
91d1aa43d
|
160 |
} |
65f382fd0
|
161 162 163 164 165 166 167 168 169 170 |
#ifdef CONFIG_CONTEXT_TRACKING_FORCE void __init context_tracking_init(void) { int cpu; for_each_possible_cpu(cpu) context_tracking_cpu_set(cpu); } #endif |