Blame view
kernel/context_tracking.c
5.46 KB
4eacdf183 context_tracking:... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
/* * Context tracking: Probe on high level context boundaries such as kernel * and userspace. This includes syscalls and exceptions entry/exit. * * This is used by RCU to remove its dependency on the timer tick while a CPU * runs in userspace. * * Started by Frederic Weisbecker: * * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com> * * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton, * Steven Rostedt, Peter Zijlstra for suggestions and improvements. * */ |
91d1aa43d context_tracking:... |
16 17 18 |
#include <linux/context_tracking.h> #include <linux/rcupdate.h> #include <linux/sched.h> |
91d1aa43d context_tracking:... |
19 |
#include <linux/hardirq.h> |
6a61671bb cputime: Safely r... |
20 |
#include <linux/export.h> |
91d1aa43d context_tracking:... |
21 |
|
95a79fd45 context_tracking:... |
22 |
DEFINE_PER_CPU(struct context_tracking, context_tracking) = { |
91d1aa43d context_tracking:... |
23 24 25 26 |
#ifdef CONFIG_CONTEXT_TRACKING_FORCE .active = true, #endif }; |
4eacdf183 context_tracking:... |
27 28 29 30 31 32 33 34 35 |
/** * user_enter - Inform the context tracking that the CPU is going to * enter userspace mode. * * This function must be called right before we switch from the kernel * to userspace, when it's guaranteed the remaining kernel instructions * to execute won't use any RCU read side critical section because this * function sets RCU in extended quiescent state. */ |
91d1aa43d context_tracking:... |
36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
void user_enter(void) { unsigned long flags; /* * Some contexts may involve an exception occuring in an irq, * leading to that nesting: * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() * This would mess up the dyntick_nesting count though. And rcu_irq_*() * helpers are enough to protect RCU uses inside the exception. So * just return immediately if we detect we are in an IRQ. */ if (in_interrupt()) return; |
4eacdf183 context_tracking:... |
50 |
/* Kernel threads aren't supposed to go to userspace */ |
91d1aa43d context_tracking:... |
51 52 53 54 55 |
WARN_ON_ONCE(!current->mm); local_irq_save(flags); if (__this_cpu_read(context_tracking.active) && __this_cpu_read(context_tracking.state) != IN_USER) { |
4eacdf183 context_tracking:... |
56 57 58 59 60 61 62 |
/* * At this stage, only low level arch entry code remains and * then we'll run in userspace. We can assume there won't be * any RCU read-side critical section until the next call to * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency * on the tick. */ |
abf917cd9 cputime: Generic ... |
63 |
vtime_user_enter(current); |
91d1aa43d context_tracking:... |
64 |
rcu_user_enter(); |
abf917cd9 cputime: Generic ... |
65 |
__this_cpu_write(context_tracking.state, IN_USER); |
91d1aa43d context_tracking:... |
66 67 68 |
} local_irq_restore(flags); } |
29bb9e5a7 tracing/context-t... |
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 |
#ifdef CONFIG_PREEMPT /** * preempt_schedule_context - preempt_schedule called by tracing * * The tracing infrastructure uses preempt_enable_notrace to prevent * recursion and tracing preempt enabling caused by the tracing * infrastructure itself. But as tracing can happen in areas coming * from userspace or just about to enter userspace, a preempt enable * can occur before user_exit() is called. This will cause the scheduler * to be called when the system is still in usermode. * * To prevent this, the preempt_enable_notrace will use this function * instead of preempt_schedule() to exit user context if needed before * calling the scheduler. */ void __sched notrace preempt_schedule_context(void) { struct thread_info *ti = current_thread_info(); enum ctx_state prev_ctx; if (likely(ti->preempt_count || irqs_disabled())) return; /* * Need to disable preemption in case user_exit() is traced * and the tracer calls preempt_enable_notrace() causing * an infinite recursion. */ preempt_disable_notrace(); prev_ctx = exception_enter(); preempt_enable_no_resched_notrace(); preempt_schedule(); preempt_disable_notrace(); exception_exit(prev_ctx); preempt_enable_notrace(); } EXPORT_SYMBOL_GPL(preempt_schedule_context); #endif /* CONFIG_PREEMPT */ |
4eacdf183 context_tracking:... |
109 110 111 112 113 114 115 116 117 118 119 120 |
/** * user_exit - Inform the context tracking that the CPU is * exiting userspace mode and entering the kernel. * * This function must be called after we entered the kernel from userspace * before any use of RCU read side critical section. This potentially include * any high level kernel code like syscalls, exceptions, signal handling, etc... * * This call supports re-entrancy. This way it can be called from any exception * handler without needing to know if we came from userspace or not. */ |
91d1aa43d context_tracking:... |
121 122 123 |
void user_exit(void) { unsigned long flags; |
91d1aa43d context_tracking:... |
124 125 126 127 128 |
if (in_interrupt()) return; local_irq_save(flags); if (__this_cpu_read(context_tracking.state) == IN_USER) { |
4eacdf183 context_tracking:... |
129 130 131 132 |
/* * We are going to run code that may use RCU. Inform * RCU core about that (ie: we may need the tick again). */ |
91d1aa43d context_tracking:... |
133 |
rcu_user_exit(); |
abf917cd9 cputime: Generic ... |
134 135 |
vtime_user_exit(current); __this_cpu_write(context_tracking.state, IN_KERNEL); |
91d1aa43d context_tracking:... |
136 137 138 |
} local_irq_restore(flags); } |
6a61671bb cputime: Safely r... |
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
void guest_enter(void) { if (vtime_accounting_enabled()) vtime_guest_enter(current); else __guest_enter(); } EXPORT_SYMBOL_GPL(guest_enter); void guest_exit(void) { if (vtime_accounting_enabled()) vtime_guest_exit(current); else __guest_exit(); } EXPORT_SYMBOL_GPL(guest_exit); |
4eacdf183 context_tracking:... |
156 157 158 159 160 161 162 163 164 165 166 167 168 169 |
/** * context_tracking_task_switch - context switch the syscall callbacks * @prev: the task that is being switched out * @next: the task that is being switched in * * The context tracking uses the syscall slow path to implement its user-kernel * boundaries probes on syscalls. This way it doesn't impact the syscall fast * path on CPUs that don't do context tracking. * * But we need to clear the flag on the previous task because it may later * migrate to some CPU that doesn't do the context tracking. As such the TIF * flag may not be desired there. */ |
91d1aa43d context_tracking:... |
170 171 172 173 174 175 176 177 |
void context_tracking_task_switch(struct task_struct *prev, struct task_struct *next) { if (__this_cpu_read(context_tracking.active)) { clear_tsk_thread_flag(prev, TIF_NOHZ); set_tsk_thread_flag(next, TIF_NOHZ); } } |