Commit a0e0fac633bed47c15cab744663d8c67f8f3421d

Authored by Ingo Molnar

Merge branch 'tip/perf/core-2' of git://git.kernel.org/pub/scm/linux/kernel/git/…

…rostedt/linux-trace into perf/core

Pull ftrace fixlets from Steve Rostedt.

Signed-off-by: Ingo Molnar <mingo@kernel.org>

Showing 4 changed files Side-by-side Diff

kernel/trace/ring_buffer.c
... ... @@ -2816,7 +2816,7 @@
2816 2816 * to the buffer after this will fail and return NULL.
2817 2817 *
2818 2818 * This is different than ring_buffer_record_disable() as
2819   - * it works like an on/off switch, where as the disable() verison
  2819 + * it works like an on/off switch, where as the disable() version
2820 2820 * must be paired with a enable().
2821 2821 */
2822 2822 void ring_buffer_record_off(struct ring_buffer *buffer)
... ... @@ -2839,7 +2839,7 @@
2839 2839 * ring_buffer_record_off().
2840 2840 *
2841 2841 * This is different than ring_buffer_record_enable() as
2842   - * it works like an on/off switch, where as the enable() verison
  2842 + * it works like an on/off switch, where as the enable() version
2843 2843 * must be paired with a disable().
2844 2844 */
2845 2845 void ring_buffer_record_on(struct ring_buffer *buffer)
kernel/trace/trace.c
... ... @@ -426,15 +426,15 @@
426 426  
427 427 static int __init set_tracing_thresh(char *str)
428 428 {
429   - unsigned long threshhold;
  429 + unsigned long threshold;
430 430 int ret;
431 431  
432 432 if (!str)
433 433 return 0;
434   - ret = strict_strtoul(str, 0, &threshhold);
  434 + ret = strict_strtoul(str, 0, &threshold);
435 435 if (ret < 0)
436 436 return 0;
437   - tracing_thresh = threshhold * 1000;
  437 + tracing_thresh = threshold * 1000;
438 438 return 1;
439 439 }
440 440 __setup("tracing_thresh=", set_tracing_thresh);
kernel/trace/trace_events_filter.c
... ... @@ -2002,7 +2002,7 @@
2002 2002 static int __ftrace_function_set_filter(int filter, char *buf, int len,
2003 2003 struct function_filter_data *data)
2004 2004 {
2005   - int i, re_cnt, ret;
  2005 + int i, re_cnt, ret = -EINVAL;
2006 2006 int *reset;
2007 2007 char **re;
2008 2008  
kernel/trace/trace_selftest.c
... ... @@ -1041,6 +1041,8 @@
1041 1041 set_current_state(TASK_INTERRUPTIBLE);
1042 1042 schedule();
1043 1043  
  1044 + complete(x);
  1045 +
1044 1046 /* we are awake, now wait to disappear */
1045 1047 while (!kthread_should_stop()) {
1046 1048 /*
1047 1049  
1048 1050  
... ... @@ -1084,24 +1086,21 @@
1084 1086 /* reset the max latency */
1085 1087 tracing_max_latency = 0;
1086 1088  
1087   - /* sleep to let the RT thread sleep too */
1088   - msleep(100);
  1089 + while (p->on_rq) {
  1090 + /*
  1091 + * Sleep to make sure the RT thread is asleep too.
  1092 + * On virtual machines we can't rely on timings,
  1093 + * but we want to make sure this test still works.
  1094 + */
  1095 + msleep(100);
  1096 + }
1089 1097  
1090   - /*
1091   - * Yes this is slightly racy. It is possible that for some
1092   - * strange reason that the RT thread we created, did not
1093   - * call schedule for 100ms after doing the completion,
1094   - * and we do a wakeup on a task that already is awake.
1095   - * But that is extremely unlikely, and the worst thing that
1096   - * happens in such a case, is that we disable tracing.
1097   - * Honestly, if this race does happen something is horrible
1098   - * wrong with the system.
1099   - */
  1098 + init_completion(&isrt);
1100 1099  
1101 1100 wake_up_process(p);
1102 1101  
1103   - /* give a little time to let the thread wake up */
1104   - msleep(100);
  1102 + /* Wait for the task to wake up */
  1103 + wait_for_completion(&isrt);
1105 1104  
1106 1105 /* stop the tracing. */
1107 1106 tracing_stop();