Commit 22d368544b0ed9093a3db3ee4e00a842540fcecd

Authored by Linus Torvalds

Merge tag 'trace-fixes-v3.16-rc5-v2' of git://git.kernel.org/pub/scm/linux/kerne…

…l/git/rostedt/linux-trace

Pull tracing fixes from Steven Rostedt:
 "A few more fixes for ftrace infrastructure.

  I was cleaning out my INBOX and found two fixes from zhangwei from a
  year ago that were lost in my mail.  These fix an inconsistency
  between trace_puts() and the way trace_printk() works.  The reason
  this is important to fix is because when trace_printk() doesn't have
  any arguments, it turns into a trace_puts().  Not being able to enable
  a stack trace against trace_printk() because it does not have any
  arguments is quite confusing.  Also, the fix is rather trivial and low
  risk.

  While porting some changes to PowerPC I discovered that it still has
  the function graph tracer filter bug that if you also enable stack
  tracing the function graph tracer filter is ignored.  I fixed that up.

  Finally, Martin Lau, fixed a bug that would cause readers of the
  ftrace ring buffer to block forever even though it was suppose to be
  NONBLOCK"

This also includes the fix from an earlier pull request:

 "Oleg Nesterov fixed a memory leak that happens if a user creates a
  tracing instance, sets up a filter in an event, and then removes that
  instance.  The filter allocates memory that is never freed when the
  instance is destroyed"

* tag 'trace-fixes-v3.16-rc5-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  ring-buffer: Fix polling on trace_pipe
  tracing: Add TRACE_ITER_PRINTK flag check in __trace_puts/__trace_bputs
  tracing: Fix graph tracer with stack tracer on other archs
  tracing: Add ftrace_trace_stack into __trace_puts/__trace_bputs
  tracing: instance_rmdir() leaks ftrace_event_file->filter

Showing 4 changed files Side-by-side Diff

kernel/trace/ftrace.c
... ... @@ -265,11 +265,11 @@
265 265 func = ftrace_ops_list_func;
266 266 }
267 267  
  268 + update_function_graph_func();
  269 +
268 270 /* If there's no change, then do nothing more here */
269 271 if (ftrace_trace_function == func)
270 272 return;
271   -
272   - update_function_graph_func();
273 273  
274 274 /*
275 275 * If we are using the list function, it doesn't care
kernel/trace/ring_buffer.c
... ... @@ -616,10 +616,6 @@
616 616 struct ring_buffer_per_cpu *cpu_buffer;
617 617 struct rb_irq_work *work;
618 618  
619   - if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
620   - (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
621   - return POLLIN | POLLRDNORM;
622   -
623 619 if (cpu == RING_BUFFER_ALL_CPUS)
624 620 work = &buffer->irq_work;
625 621 else {
kernel/trace/trace.c
... ... @@ -466,7 +466,13 @@
466 466 struct print_entry *entry;
467 467 unsigned long irq_flags;
468 468 int alloc;
  469 + int pc;
469 470  
  471 + if (!(trace_flags & TRACE_ITER_PRINTK))
  472 + return 0;
  473 +
  474 + pc = preempt_count();
  475 +
470 476 if (unlikely(tracing_selftest_running || tracing_disabled))
471 477 return 0;
472 478  
... ... @@ -475,7 +481,7 @@
475 481 local_save_flags(irq_flags);
476 482 buffer = global_trace.trace_buffer.buffer;
477 483 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
478   - irq_flags, preempt_count());
  484 + irq_flags, pc);
479 485 if (!event)
480 486 return 0;
481 487  
... ... @@ -492,6 +498,7 @@
492 498 entry->buf[size] = '\0';
493 499  
494 500 __buffer_unlock_commit(buffer, event);
  501 + ftrace_trace_stack(buffer, irq_flags, 4, pc);
495 502  
496 503 return size;
497 504 }
498 505  
499 506  
... ... @@ -509,14 +516,20 @@
509 516 struct bputs_entry *entry;
510 517 unsigned long irq_flags;
511 518 int size = sizeof(struct bputs_entry);
  519 + int pc;
512 520  
  521 + if (!(trace_flags & TRACE_ITER_PRINTK))
  522 + return 0;
  523 +
  524 + pc = preempt_count();
  525 +
513 526 if (unlikely(tracing_selftest_running || tracing_disabled))
514 527 return 0;
515 528  
516 529 local_save_flags(irq_flags);
517 530 buffer = global_trace.trace_buffer.buffer;
518 531 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
519   - irq_flags, preempt_count());
  532 + irq_flags, pc);
520 533 if (!event)
521 534 return 0;
522 535  
... ... @@ -525,6 +538,7 @@
525 538 entry->str = str;
526 539  
527 540 __buffer_unlock_commit(buffer, event);
  541 + ftrace_trace_stack(buffer, irq_flags, 4, pc);
528 542  
529 543 return 1;
530 544 }
kernel/trace/trace_events.c
... ... @@ -470,6 +470,7 @@
470 470  
471 471 list_del(&file->list);
472 472 remove_subsystem(file->system);
  473 + free_event_filter(file->filter);
473 474 kmem_cache_free(file_cachep, file);
474 475 }
475 476