Commit 19504828b4bee5e471bcd35e214bc6fd0d380692

Authored by Linus Torvalds

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kerne…

…l/git/tip/linux-2.6-tip

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  perf tools: Fix sample size bit operations
  perf tools: Fix ommitted mmap data update on remap
  watchdog: Change the default timeout and configure nmi watchdog period based on watchdog_thresh
  watchdog: Disable watchdog when thresh is zero
  watchdog: Only disable/enable watchdog if neccessary
  watchdog: Fix rounding bug in get_sample_period()
  perf tools: Propagate event parse error handling
  perf tools: Robustify dynamic sample content fetch
  perf tools: Pre-check sample size before parsing
  perf tools: Move evlist sample helpers to evlist area
  perf tools: Remove junk code in mmap size handling
  perf tools: Check we are able to read the event size on mmap

Showing 18 changed files Side-by-side Diff

arch/x86/kernel/apic/hw_nmi.c
... ... @@ -19,9 +19,9 @@
19 19 #include <linux/delay.h>
20 20  
21 21 #ifdef CONFIG_HARDLOCKUP_DETECTOR
22   -u64 hw_nmi_get_sample_period(void)
  22 +u64 hw_nmi_get_sample_period(int watchdog_thresh)
23 23 {
24   - return (u64)(cpu_khz) * 1000 * 60;
  24 + return (u64)(cpu_khz) * 1000 * watchdog_thresh;
25 25 }
26 26 #endif
27 27  
... ... @@ -45,11 +45,12 @@
45 45  
46 46 #ifdef CONFIG_LOCKUP_DETECTOR
47 47 int hw_nmi_is_cpu_stuck(struct pt_regs *);
48   -u64 hw_nmi_get_sample_period(void);
  48 +u64 hw_nmi_get_sample_period(int watchdog_thresh);
49 49 extern int watchdog_enabled;
  50 +extern int watchdog_thresh;
50 51 struct ctl_table;
51   -extern int proc_dowatchdog_enabled(struct ctl_table *, int ,
52   - void __user *, size_t *, loff_t *);
  52 +extern int proc_dowatchdog(struct ctl_table *, int ,
  53 + void __user *, size_t *, loff_t *);
53 54 #endif
54 55  
55 56 #endif
include/linux/sched.h
... ... @@ -315,7 +315,6 @@
315 315 void __user *buffer,
316 316 size_t *lenp, loff_t *ppos);
317 317 extern unsigned int softlockup_panic;
318   -extern int softlockup_thresh;
319 318 void lockup_detector_init(void);
320 319 #else
321 320 static inline void touch_softlockup_watchdog(void)
... ... @@ -730,14 +730,16 @@
730 730 .data = &watchdog_enabled,
731 731 .maxlen = sizeof (int),
732 732 .mode = 0644,
733   - .proc_handler = proc_dowatchdog_enabled,
  733 + .proc_handler = proc_dowatchdog,
  734 + .extra1 = &zero,
  735 + .extra2 = &one,
734 736 },
735 737 {
736 738 .procname = "watchdog_thresh",
737   - .data = &softlockup_thresh,
  739 + .data = &watchdog_thresh,
738 740 .maxlen = sizeof(int),
739 741 .mode = 0644,
740   - .proc_handler = proc_dowatchdog_thresh,
  742 + .proc_handler = proc_dowatchdog,
741 743 .extra1 = &neg_one,
742 744 .extra2 = &sixty,
743 745 },
... ... @@ -755,7 +757,9 @@
755 757 .data = &watchdog_enabled,
756 758 .maxlen = sizeof (int),
757 759 .mode = 0644,
758   - .proc_handler = proc_dowatchdog_enabled,
  760 + .proc_handler = proc_dowatchdog,
  761 + .extra1 = &zero,
  762 + .extra2 = &one,
759 763 },
760 764 #endif
761 765 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86)
... ... @@ -28,7 +28,7 @@
28 28 #include <linux/perf_event.h>
29 29  
30 30 int watchdog_enabled = 1;
31   -int __read_mostly softlockup_thresh = 60;
  31 +int __read_mostly watchdog_thresh = 10;
32 32  
33 33 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
34 34 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
... ... @@ -91,6 +91,17 @@
91 91 __setup("nosoftlockup", nosoftlockup_setup);
92 92 /* */
93 93  
  94 +/*
  95 + * Hard-lockup warnings should be triggered after just a few seconds. Soft-
  96 + * lockups can have false positives under extreme conditions. So we generally
  97 + * want a higher threshold for soft lockups than for hard lockups. So we couple
  98 + * the thresholds with a factor: we make the soft threshold twice the amount of
  99 + * time the hard threshold is.
  100 + */
  101 +static int get_softlockup_thresh()
  102 +{
  103 + return watchdog_thresh * 2;
  104 +}
94 105  
95 106 /*
96 107 * Returns seconds, approximately. We don't need nanosecond
97 108  
... ... @@ -105,12 +116,12 @@
105 116 static unsigned long get_sample_period(void)
106 117 {
107 118 /*
108   - * convert softlockup_thresh from seconds to ns
  119 + * convert watchdog_thresh from seconds to ns
109 120 * the divide by 5 is to give hrtimer 5 chances to
110 121 * increment before the hardlockup detector generates
111 122 * a warning
112 123 */
113   - return softlockup_thresh / 5 * NSEC_PER_SEC;
  124 + return get_softlockup_thresh() * (NSEC_PER_SEC / 5);
114 125 }
115 126  
116 127 /* Commands for resetting the watchdog */
... ... @@ -182,7 +193,7 @@
182 193 unsigned long now = get_timestamp(smp_processor_id());
183 194  
184 195 /* Warn about unreasonable delays: */
185   - if (time_after(now, touch_ts + softlockup_thresh))
  196 + if (time_after(now, touch_ts + get_softlockup_thresh()))
186 197 return now - touch_ts;
187 198  
188 199 return 0;
... ... @@ -359,7 +370,7 @@
359 370  
360 371 /* Try to register using hardware perf events */
361 372 wd_attr = &wd_hw_attr;
362   - wd_attr->sample_period = hw_nmi_get_sample_period();
  373 + wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
363 374 event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback);
364 375 if (!IS_ERR(event)) {
365 376 printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n");
366 377  
367 378  
368 379  
369 380  
... ... @@ -501,28 +512,25 @@
501 512 /* sysctl functions */
502 513 #ifdef CONFIG_SYSCTL
503 514 /*
504   - * proc handler for /proc/sys/kernel/nmi_watchdog
  515 + * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
505 516 */
506 517  
507   -int proc_dowatchdog_enabled(struct ctl_table *table, int write,
508   - void __user *buffer, size_t *length, loff_t *ppos)
  518 +int proc_dowatchdog(struct ctl_table *table, int write,
  519 + void __user *buffer, size_t *lenp, loff_t *ppos)
509 520 {
510   - proc_dointvec(table, write, buffer, length, ppos);
  521 + int ret;
511 522  
512   - if (write) {
513   - if (watchdog_enabled)
514   - watchdog_enable_all_cpus();
515   - else
516   - watchdog_disable_all_cpus();
517   - }
518   - return 0;
519   -}
  523 + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  524 + if (ret || !write)
  525 + goto out;
520 526  
521   -int proc_dowatchdog_thresh(struct ctl_table *table, int write,
522   - void __user *buffer,
523   - size_t *lenp, loff_t *ppos)
524   -{
525   - return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  527 + if (watchdog_enabled && watchdog_thresh)
  528 + watchdog_enable_all_cpus();
  529 + else
  530 + watchdog_disable_all_cpus();
  531 +
  532 +out:
  533 + return ret;
526 534 }
527 535 #endif /* CONFIG_SYSCTL */
528 536  
tools/perf/builtin-test.c
... ... @@ -474,6 +474,7 @@
474 474 unsigned int nr_events[nsyscalls],
475 475 expected_nr_events[nsyscalls], i, j;
476 476 struct perf_evsel *evsels[nsyscalls], *evsel;
  477 + int sample_size = perf_sample_size(attr.sample_type);
477 478  
478 479 for (i = 0; i < nsyscalls; ++i) {
479 480 char name[64];
... ... @@ -558,7 +559,13 @@
558 559 goto out_munmap;
559 560 }
560 561  
561   - perf_event__parse_sample(event, attr.sample_type, false, &sample);
  562 + err = perf_event__parse_sample(event, attr.sample_type, sample_size,
  563 + false, &sample);
  564 + if (err) {
  565 + pr_err("Can't parse sample, err = %d\n", err);
  566 + goto out_munmap;
  567 + }
  568 +
562 569 evsel = perf_evlist__id2evsel(evlist, sample.id);
563 570 if (evsel == NULL) {
564 571 pr_debug("event with id %" PRIu64
tools/perf/builtin-top.c
... ... @@ -805,9 +805,14 @@
805 805 {
806 806 struct perf_sample sample;
807 807 union perf_event *event;
  808 + int ret;
808 809  
809 810 while ((event = perf_evlist__mmap_read(top.evlist, idx)) != NULL) {
810   - perf_session__parse_sample(self, event, &sample);
  811 + ret = perf_session__parse_sample(self, event, &sample);
  812 + if (ret) {
  813 + pr_err("Can't parse sample, err = %d\n", ret);
  814 + continue;
  815 + }
811 816  
812 817 if (event->header.type == PERF_RECORD_SAMPLE)
813 818 perf_event__process_sample(event, &sample, self);
tools/perf/util/event.c
... ... @@ -9,21 +9,21 @@
9 9 #include "thread_map.h"
10 10  
11 11 static const char *perf_event__names[] = {
12   - [0] = "TOTAL",
13   - [PERF_RECORD_MMAP] = "MMAP",
14   - [PERF_RECORD_LOST] = "LOST",
15   - [PERF_RECORD_COMM] = "COMM",
16   - [PERF_RECORD_EXIT] = "EXIT",
17   - [PERF_RECORD_THROTTLE] = "THROTTLE",
18   - [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
19   - [PERF_RECORD_FORK] = "FORK",
20   - [PERF_RECORD_READ] = "READ",
21   - [PERF_RECORD_SAMPLE] = "SAMPLE",
22   - [PERF_RECORD_HEADER_ATTR] = "ATTR",
23   - [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
24   - [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
25   - [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
26   - [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND",
  12 + [0] = "TOTAL",
  13 + [PERF_RECORD_MMAP] = "MMAP",
  14 + [PERF_RECORD_LOST] = "LOST",
  15 + [PERF_RECORD_COMM] = "COMM",
  16 + [PERF_RECORD_EXIT] = "EXIT",
  17 + [PERF_RECORD_THROTTLE] = "THROTTLE",
  18 + [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
  19 + [PERF_RECORD_FORK] = "FORK",
  20 + [PERF_RECORD_READ] = "READ",
  21 + [PERF_RECORD_SAMPLE] = "SAMPLE",
  22 + [PERF_RECORD_HEADER_ATTR] = "ATTR",
  23 + [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
  24 + [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
  25 + [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
  26 + [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND",
27 27 };
28 28  
29 29 const char *perf_event__name(unsigned int id)
... ... @@ -33,6 +33,22 @@
33 33 if (!perf_event__names[id])
34 34 return "UNKNOWN";
35 35 return perf_event__names[id];
  36 +}
  37 +
  38 +int perf_sample_size(u64 sample_type)
  39 +{
  40 + u64 mask = sample_type & PERF_SAMPLE_MASK;
  41 + int size = 0;
  42 + int i;
  43 +
  44 + for (i = 0; i < 64; i++) {
  45 + if (mask & (1UL << i))
  46 + size++;
  47 + }
  48 +
  49 + size *= sizeof(u64);
  50 +
  51 + return size;
36 52 }
37 53  
38 54 static struct perf_sample synth_sample = {
tools/perf/util/event.h
... ... @@ -56,6 +56,13 @@
56 56 u64 id;
57 57 };
58 58  
  59 +
  60 +#define PERF_SAMPLE_MASK \
  61 + (PERF_SAMPLE_IP | PERF_SAMPLE_TID | \
  62 + PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \
  63 + PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \
  64 + PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
  65 +
59 66 struct sample_event {
60 67 struct perf_event_header header;
61 68 u64 array[];
... ... @@ -75,6 +82,8 @@
75 82 struct ip_callchain *callchain;
76 83 };
77 84  
  85 +int perf_sample_size(u64 sample_type);
  86 +
78 87 #define BUILD_ID_SIZE 20
79 88  
80 89 struct build_id_event {
... ... @@ -178,7 +187,8 @@
178 187 const char *perf_event__name(unsigned int id);
179 188  
180 189 int perf_event__parse_sample(const union perf_event *event, u64 type,
181   - bool sample_id_all, struct perf_sample *sample);
  190 + int sample_size, bool sample_id_all,
  191 + struct perf_sample *sample);
182 192  
183 193 #endif /* __PERF_RECORD_H */
tools/perf/util/evlist.c
... ... @@ -459,4 +459,35 @@
459 459  
460 460 return 0;
461 461 }
  462 +
  463 +u64 perf_evlist__sample_type(struct perf_evlist *evlist)
  464 +{
  465 + struct perf_evsel *pos;
  466 + u64 type = 0;
  467 +
  468 + list_for_each_entry(pos, &evlist->entries, node) {
  469 + if (!type)
  470 + type = pos->attr.sample_type;
  471 + else if (type != pos->attr.sample_type)
  472 + die("non matching sample_type");
  473 + }
  474 +
  475 + return type;
  476 +}
  477 +
  478 +bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
  479 +{
  480 + bool value = false, first = true;
  481 + struct perf_evsel *pos;
  482 +
  483 + list_for_each_entry(pos, &evlist->entries, node) {
  484 + if (first) {
  485 + value = pos->attr.sample_id_all;
  486 + first = false;
  487 + } else if (value != pos->attr.sample_id_all)
  488 + die("non matching sample_id_all");
  489 + }
  490 +
  491 + return value;
  492 +}
tools/perf/util/evlist.h
... ... @@ -66,5 +66,8 @@
66 66 void perf_evlist__delete_maps(struct perf_evlist *evlist);
67 67 int perf_evlist__set_filters(struct perf_evlist *evlist);
68 68  
  69 +u64 perf_evlist__sample_type(struct perf_evlist *evlist);
  70 +bool perf_evlist__sample_id_all(const struct perf_evlist *evlist);
  71 +
69 72 #endif /* __PERF_EVLIST_H */
tools/perf/util/evsel.c
... ... @@ -303,8 +303,20 @@
303 303 return 0;
304 304 }
305 305  
  306 +static bool sample_overlap(const union perf_event *event,
  307 + const void *offset, u64 size)
  308 +{
  309 + const void *base = event;
  310 +
  311 + if (offset + size > base + event->header.size)
  312 + return true;
  313 +
  314 + return false;
  315 +}
  316 +
306 317 int perf_event__parse_sample(const union perf_event *event, u64 type,
307   - bool sample_id_all, struct perf_sample *data)
  318 + int sample_size, bool sample_id_all,
  319 + struct perf_sample *data)
308 320 {
309 321 const u64 *array;
310 322  
... ... @@ -319,6 +331,9 @@
319 331  
320 332 array = event->sample.array;
321 333  
  334 + if (sample_size + sizeof(event->header) > event->header.size)
  335 + return -EFAULT;
  336 +
322 337 if (type & PERF_SAMPLE_IP) {
323 338 data->ip = event->ip.ip;
324 339 array++;
325 340  
326 341  
327 342  
... ... @@ -369,14 +384,29 @@
369 384 }
370 385  
371 386 if (type & PERF_SAMPLE_CALLCHAIN) {
  387 + if (sample_overlap(event, array, sizeof(data->callchain->nr)))
  388 + return -EFAULT;
  389 +
372 390 data->callchain = (struct ip_callchain *)array;
  391 +
  392 + if (sample_overlap(event, array, data->callchain->nr))
  393 + return -EFAULT;
  394 +
373 395 array += 1 + data->callchain->nr;
374 396 }
375 397  
376 398 if (type & PERF_SAMPLE_RAW) {
377 399 u32 *p = (u32 *)array;
  400 +
  401 + if (sample_overlap(event, array, sizeof(u32)))
  402 + return -EFAULT;
  403 +
378 404 data->raw_size = *p;
379 405 p++;
  406 +
  407 + if (sample_overlap(event, p, data->raw_size))
  408 + return -EFAULT;
  409 +
380 410 data->raw_data = p;
381 411 }
382 412  
tools/perf/util/header.c
... ... @@ -934,37 +934,6 @@
934 934 return -ENOMEM;
935 935 }
936 936  
937   -u64 perf_evlist__sample_type(struct perf_evlist *evlist)
938   -{
939   - struct perf_evsel *pos;
940   - u64 type = 0;
941   -
942   - list_for_each_entry(pos, &evlist->entries, node) {
943   - if (!type)
944   - type = pos->attr.sample_type;
945   - else if (type != pos->attr.sample_type)
946   - die("non matching sample_type");
947   - }
948   -
949   - return type;
950   -}
951   -
952   -bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
953   -{
954   - bool value = false, first = true;
955   - struct perf_evsel *pos;
956   -
957   - list_for_each_entry(pos, &evlist->entries, node) {
958   - if (first) {
959   - value = pos->attr.sample_id_all;
960   - first = false;
961   - } else if (value != pos->attr.sample_id_all)
962   - die("non matching sample_id_all");
963   - }
964   -
965   - return value;
966   -}
967   -
968 937 int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
969 938 perf_event__handler_t process,
970 939 struct perf_session *session)
tools/perf/util/header.h
... ... @@ -64,8 +64,6 @@
64 64 int perf_header__push_event(u64 id, const char *name);
65 65 char *perf_header__find_event(u64 id);
66 66  
67   -u64 perf_evlist__sample_type(struct perf_evlist *evlist);
68   -bool perf_evlist__sample_id_all(const struct perf_evlist *evlist);
69 67 void perf_header__set_feat(struct perf_header *header, int feat);
70 68 void perf_header__clear_feat(struct perf_header *header, int feat);
71 69 bool perf_header__has_feat(const struct perf_header *header, int feat);
tools/perf/util/include/linux/list.h
1 1 #include <linux/kernel.h>
  2 +#include <linux/prefetch.h>
  3 +
2 4 #include "../../../../include/linux/list.h"
3 5  
4 6 #ifndef PERF_LIST_H
tools/perf/util/python.c
... ... @@ -675,6 +675,7 @@
675 675 union perf_event *event;
676 676 int sample_id_all = 1, cpu;
677 677 static char *kwlist[] = {"sample_id_all", NULL, NULL};
  678 + int err;
678 679  
679 680 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
680 681 &cpu, &sample_id_all))
681 682  
... ... @@ -690,11 +691,17 @@
690 691 return PyErr_NoMemory();
691 692  
692 693 first = list_entry(evlist->entries.next, struct perf_evsel, node);
693   - perf_event__parse_sample(event, first->attr.sample_type, sample_id_all,
694   - &pevent->sample);
  694 + err = perf_event__parse_sample(event, first->attr.sample_type,
  695 + perf_sample_size(first->attr.sample_type),
  696 + sample_id_all, &pevent->sample);
  697 + if (err) {
  698 + pr_err("Can't parse sample, err = %d\n", err);
  699 + goto end;
  700 + }
  701 +
695 702 return pyevent;
696 703 }
697   -
  704 +end:
698 705 Py_INCREF(Py_None);
699 706 return Py_None;
700 707 }
tools/perf/util/session.c
... ... @@ -97,6 +97,7 @@
97 97 void perf_session__update_sample_type(struct perf_session *self)
98 98 {
99 99 self->sample_type = perf_evlist__sample_type(self->evlist);
  100 + self->sample_size = perf_sample_size(self->sample_type);
100 101 self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
101 102 perf_session__id_header_size(self);
102 103 }
... ... @@ -479,6 +480,7 @@
479 480 struct perf_sample sample;
480 481 u64 limit = os->next_flush;
481 482 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
  483 + int ret;
482 484  
483 485 if (!ops->ordered_samples || !limit)
484 486 return;
... ... @@ -487,9 +489,12 @@
487 489 if (iter->timestamp > limit)
488 490 break;
489 491  
490   - perf_session__parse_sample(s, iter->event, &sample);
491   - perf_session_deliver_event(s, iter->event, &sample, ops,
492   - iter->file_offset);
  492 + ret = perf_session__parse_sample(s, iter->event, &sample);
  493 + if (ret)
  494 + pr_err("Can't parse sample, err = %d\n", ret);
  495 + else
  496 + perf_session_deliver_event(s, iter->event, &sample, ops,
  497 + iter->file_offset);
493 498  
494 499 os->last_flush = iter->timestamp;
495 500 list_del(&iter->list);
... ... @@ -805,7 +810,9 @@
805 810 /*
806 811 * For all kernel events we get the sample data
807 812 */
808   - perf_session__parse_sample(session, event, &sample);
  813 + ret = perf_session__parse_sample(session, event, &sample);
  814 + if (ret)
  815 + return ret;
809 816  
810 817 /* Preprocess sample records - precheck callchains */
811 818 if (perf_session__preprocess_sample(session, event, &sample))
... ... @@ -953,6 +960,30 @@
953 960 return err;
954 961 }
955 962  
  963 +static union perf_event *
  964 +fetch_mmaped_event(struct perf_session *session,
  965 + u64 head, size_t mmap_size, char *buf)
  966 +{
  967 + union perf_event *event;
  968 +
  969 + /*
  970 + * Ensure we have enough space remaining to read
  971 + * the size of the event in the headers.
  972 + */
  973 + if (head + sizeof(event->header) > mmap_size)
  974 + return NULL;
  975 +
  976 + event = (union perf_event *)(buf + head);
  977 +
  978 + if (session->header.needs_swap)
  979 + perf_event_header__bswap(&event->header);
  980 +
  981 + if (head + event->header.size > mmap_size)
  982 + return NULL;
  983 +
  984 + return event;
  985 +}
  986 +
956 987 int __perf_session__process_events(struct perf_session *session,
957 988 u64 data_offset, u64 data_size,
958 989 u64 file_size, struct perf_event_ops *ops)
... ... @@ -1007,15 +1038,8 @@
1007 1038 file_pos = file_offset + head;
1008 1039  
1009 1040 more:
1010   - event = (union perf_event *)(buf + head);
1011   -
1012   - if (session->header.needs_swap)
1013   - perf_event_header__bswap(&event->header);
1014   - size = event->header.size;
1015   - if (size == 0)
1016   - size = 8;
1017   -
1018   - if (head + event->header.size > mmap_size) {
  1041 + event = fetch_mmaped_event(session, head, mmap_size, buf);
  1042 + if (!event) {
1019 1043 if (mmaps[map_idx]) {
1020 1044 munmap(mmaps[map_idx], mmap_size);
1021 1045 mmaps[map_idx] = NULL;
tools/perf/util/session.h
... ... @@ -43,6 +43,7 @@
43 43 */
44 44 struct hists hists;
45 45 u64 sample_type;
  46 + int sample_size;
46 47 int fd;
47 48 bool fd_pipe;
48 49 bool repipe;
... ... @@ -159,6 +160,7 @@
159 160 struct perf_sample *sample)
160 161 {
161 162 return perf_event__parse_sample(event, session->sample_type,
  163 + session->sample_size,
162 164 session->sample_id_all, sample);
163 165 }
164 166