Commit 0b3fcf178deefd7b64154c2c0760a2c63df0b74f
Committed by
Ingo Molnar
1 parent
23a2f3ab46
Exists in
master
and in
7 other branches
perf_events: Move code around to prepare for cgroup
In particular this patch move perf_event_exit_task() before cgroup_exit() to allow for cgroup support. The cgroup_exit() function detaches the cgroups attached to a task. Other movements include hoisting some definitions and inlines at the top of perf_event.c Signed-off-by: Stephane Eranian <eranian@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <4d22058b.cdace30a.4657.ffff95b1@mx.google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 2 changed files with 26 additions and 16 deletions Side-by-side Diff
kernel/exit.c
... | ... | @@ -994,6 +994,15 @@ |
994 | 994 | exit_fs(tsk); |
995 | 995 | check_stack_usage(); |
996 | 996 | exit_thread(); |
997 | + | |
998 | + /* | |
999 | + * Flush inherited counters to the parent - before the parent | |
1000 | + * gets woken up by child-exit notifications. | |
1001 | + * | |
1002 | + * because of cgroup mode, must be called before cgroup_exit() | |
1003 | + */ | |
1004 | + perf_event_exit_task(tsk); | |
1005 | + | |
997 | 1006 | cgroup_exit(tsk, 1); |
998 | 1007 | |
999 | 1008 | if (group_dead) |
... | ... | @@ -1007,11 +1016,6 @@ |
1007 | 1016 | * FIXME: do that only when needed, using sched_exit tracepoint |
1008 | 1017 | */ |
1009 | 1018 | flush_ptrace_hw_breakpoint(tsk); |
1010 | - /* | |
1011 | - * Flush inherited counters to the parent - before the parent | |
1012 | - * gets woken up by child-exit notifications. | |
1013 | - */ | |
1014 | - perf_event_exit_task(tsk); | |
1015 | 1019 | |
1016 | 1020 | exit_notify(tsk, group_dead); |
1017 | 1021 | #ifdef CONFIG_NUMA |
kernel/perf_event.c
... | ... | @@ -38,6 +38,12 @@ |
38 | 38 | |
39 | 39 | #include <asm/irq_regs.h> |
40 | 40 | |
41 | +enum event_type_t { | |
42 | + EVENT_FLEXIBLE = 0x1, | |
43 | + EVENT_PINNED = 0x2, | |
44 | + EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, | |
45 | +}; | |
46 | + | |
41 | 47 | atomic_t perf_task_events __read_mostly; |
42 | 48 | static atomic_t nr_mmap_events __read_mostly; |
43 | 49 | static atomic_t nr_comm_events __read_mostly; |
... | ... | @@ -65,6 +71,12 @@ |
65 | 71 | |
66 | 72 | static atomic64_t perf_event_id; |
67 | 73 | |
74 | +static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, | |
75 | + enum event_type_t event_type); | |
76 | + | |
77 | +static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, | |
78 | + enum event_type_t event_type); | |
79 | + | |
68 | 80 | void __weak perf_event_print_debug(void) { } |
69 | 81 | |
70 | 82 | extern __weak const char *perf_pmu_name(void) |
... | ... | @@ -72,6 +84,11 @@ |
72 | 84 | return "pmu"; |
73 | 85 | } |
74 | 86 | |
87 | +static inline u64 perf_clock(void) | |
88 | +{ | |
89 | + return local_clock(); | |
90 | +} | |
91 | + | |
75 | 92 | void perf_pmu_disable(struct pmu *pmu) |
76 | 93 | { |
77 | 94 | int *count = this_cpu_ptr(pmu->pmu_disable_count); |
... | ... | @@ -240,11 +257,6 @@ |
240 | 257 | put_ctx(ctx); |
241 | 258 | } |
242 | 259 | |
243 | -static inline u64 perf_clock(void) | |
244 | -{ | |
245 | - return local_clock(); | |
246 | -} | |
247 | - | |
248 | 260 | /* |
249 | 261 | * Update the record of the current time in a context. |
250 | 262 | */ |
... | ... | @@ -1192,12 +1204,6 @@ |
1192 | 1204 | |
1193 | 1205 | return 0; |
1194 | 1206 | } |
1195 | - | |
1196 | -enum event_type_t { | |
1197 | - EVENT_FLEXIBLE = 0x1, | |
1198 | - EVENT_PINNED = 0x2, | |
1199 | - EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, | |
1200 | -}; | |
1201 | 1207 | |
1202 | 1208 | static void ctx_sched_out(struct perf_event_context *ctx, |
1203 | 1209 | struct perf_cpu_context *cpuctx, |