Commit 90983b16078ab0fdc58f0dab3e8e3da79c9579a2

Authored by Frederic Weisbecker
Committed by Ingo Molnar
1 parent 6050cb0b0b

perf: Sanitize get_callchain_buffer()

In case of allocation failure, get_callchain_buffer() keeps the
refcount incremented for the current event.

As a result, when get_callchain_buffers() returns an error,
we must cleanup what it did by cancelling its last refcount
with a call to put_callchain_buffers().

This is a hack in order to be able to call free_event()
after that failure.

The original purpose of that was to simplify the failure
path. But this error handling is actually counter intuitive,
ugly and not very easy to follow because one expect to
see the resources used to perform a service to be cleaned
by the callee if case of failure, not by the caller.

So lets clean this up by cancelling the refcount from
get_callchain_buffer() in case of failure. And correctly free
the event accordingly in perf_event_alloc().

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1374539466-4799-3-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

Showing 2 changed files with 23 additions and 20 deletions Side-by-side Diff

kernel/events/callchain.c
... ... @@ -117,6 +117,8 @@
117 117 err = alloc_callchain_buffers();
118 118 exit:
119 119 mutex_unlock(&callchain_mutex);
  120 + if (err)
  121 + atomic_dec(&nr_callchain_events);
120 122  
121 123 return err;
122 124 }
kernel/events/core.c
... ... @@ -6457,7 +6457,7 @@
6457 6457 struct pmu *pmu;
6458 6458 struct perf_event *event;
6459 6459 struct hw_perf_event *hwc;
6460   - long err;
  6460 + long err = -EINVAL;
6461 6461  
6462 6462 if ((unsigned)cpu >= nr_cpu_ids) {
6463 6463 if (!task || cpu != -1)
6464 6464  
6465 6465  
6466 6466  
6467 6467  
... ... @@ -6540,25 +6540,23 @@
6540 6540 * we currently do not support PERF_FORMAT_GROUP on inherited events
6541 6541 */
6542 6542 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
6543   - goto done;
  6543 + goto err_ns;
6544 6544  
6545 6545 pmu = perf_init_event(event);
6546   -
6547   -done:
6548   - err = 0;
6549 6546 if (!pmu)
6550   - err = -EINVAL;
6551   - else if (IS_ERR(pmu))
  6547 + goto err_ns;
  6548 + else if (IS_ERR(pmu)) {
6552 6549 err = PTR_ERR(pmu);
6553   -
6554   - if (err) {
6555   - if (event->ns)
6556   - put_pid_ns(event->ns);
6557   - kfree(event);
6558   - return ERR_PTR(err);
  6550 + goto err_ns;
6559 6551 }
6560 6552  
6561 6553 if (!event->parent) {
  6554 + if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
  6555 + err = get_callchain_buffers();
  6556 + if (err)
  6557 + goto err_pmu;
  6558 + }
  6559 +
6562 6560 if (event->attach_state & PERF_ATTACH_TASK)
6563 6561 static_key_slow_inc(&perf_sched_events.key);
6564 6562 if (event->attr.mmap || event->attr.mmap_data)
6565 6563  
... ... @@ -6573,16 +6571,19 @@
6573 6571 atomic_inc(&per_cpu(perf_branch_stack_events,
6574 6572 event->cpu));
6575 6573 }
6576   - if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
6577   - err = get_callchain_buffers();
6578   - if (err) {
6579   - free_event(event);
6580   - return ERR_PTR(err);
6581   - }
6582   - }
6583 6574 }
6584 6575  
6585 6576 return event;
  6577 +
  6578 +err_pmu:
  6579 + if (event->destroy)
  6580 + event->destroy(event);
  6581 +err_ns:
  6582 + if (event->ns)
  6583 + put_pid_ns(event->ns);
  6584 + kfree(event);
  6585 +
  6586 + return ERR_PTR(err);
6586 6587 }
6587 6588  
6588 6589 static int perf_copy_attr(struct perf_event_attr __user *uattr,