Commit bf1af3a809506645b9130755b713b008da14737f

Authored by Ingo Molnar

Merge branch 'tip/perf/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/…

…rostedt/linux-2.6-trace into perf/core

Showing 3 changed files Side-by-side Diff

include/linux/ftrace.h
... ... @@ -428,6 +428,7 @@
428 428  
429 429 extern void ftrace_graph_init_task(struct task_struct *t);
430 430 extern void ftrace_graph_exit_task(struct task_struct *t);
  431 +extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
431 432  
432 433 static inline int task_curr_ret_stack(struct task_struct *t)
433 434 {
... ... @@ -451,6 +452,7 @@
451 452  
452 453 static inline void ftrace_graph_init_task(struct task_struct *t) { }
453 454 static inline void ftrace_graph_exit_task(struct task_struct *t) { }
  455 +static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
454 456  
455 457 static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
456 458 trace_func_graph_ent_t entryfunc)
... ... @@ -5550,7 +5550,7 @@
5550 5550 * The idle tasks have their own, simple scheduling class:
5551 5551 */
5552 5552 idle->sched_class = &idle_sched_class;
5553   - ftrace_graph_init_task(idle);
  5553 + ftrace_graph_init_idle_task(idle, cpu);
5554 5554 }
5555 5555  
5556 5556 /*
kernel/trace/ftrace.c
... ... @@ -3328,7 +3328,7 @@
3328 3328 /* The cpu_boot init_task->ret_stack will never be freed */
3329 3329 for_each_online_cpu(cpu) {
3330 3330 if (!idle_task(cpu)->ret_stack)
3331   - ftrace_graph_init_task(idle_task(cpu));
  3331 + ftrace_graph_init_idle_task(idle_task(cpu), cpu);
3332 3332 }
3333 3333  
3334 3334 do {
... ... @@ -3418,6 +3418,49 @@
3418 3418 mutex_unlock(&ftrace_lock);
3419 3419 }
3420 3420  
  3421 +static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
  3422 +
  3423 +static void
  3424 +graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
  3425 +{
  3426 + atomic_set(&t->tracing_graph_pause, 0);
  3427 + atomic_set(&t->trace_overrun, 0);
  3428 + t->ftrace_timestamp = 0;
  3429 + /* make curr_ret_stack visable before we add the ret_stack */
  3430 + smp_wmb();
  3431 + t->ret_stack = ret_stack;
  3432 +}
  3433 +
  3434 +/*
  3435 + * Allocate a return stack for the idle task. May be the first
  3436 + * time through, or it may be done by CPU hotplug online.
  3437 + */
  3438 +void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
  3439 +{
  3440 + t->curr_ret_stack = -1;
  3441 + /*
  3442 + * The idle task has no parent, it either has its own
  3443 + * stack or no stack at all.
  3444 + */
  3445 + if (t->ret_stack)
  3446 + WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
  3447 +
  3448 + if (ftrace_graph_active) {
  3449 + struct ftrace_ret_stack *ret_stack;
  3450 +
  3451 + ret_stack = per_cpu(idle_ret_stack, cpu);
  3452 + if (!ret_stack) {
  3453 + ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
  3454 + * sizeof(struct ftrace_ret_stack),
  3455 + GFP_KERNEL);
  3456 + if (!ret_stack)
  3457 + return;
  3458 + per_cpu(idle_ret_stack, cpu) = ret_stack;
  3459 + }
  3460 + graph_init_task(t, ret_stack);
  3461 + }
  3462 +}
  3463 +
3421 3464 /* Allocate a return stack for newly created task */
3422 3465 void ftrace_graph_init_task(struct task_struct *t)
3423 3466 {
... ... @@ -3433,12 +3476,7 @@
3433 3476 GFP_KERNEL);
3434 3477 if (!ret_stack)
3435 3478 return;
3436   - atomic_set(&t->tracing_graph_pause, 0);
3437   - atomic_set(&t->trace_overrun, 0);
3438   - t->ftrace_timestamp = 0;
3439   - /* make curr_ret_stack visable before we add the ret_stack */
3440   - smp_wmb();
3441   - t->ret_stack = ret_stack;
  3479 + graph_init_task(t, ret_stack);
3442 3480 }
3443 3481 }
3444 3482