Commit e8ff14951e0c852eec6c683436dd4b2d6a5f64a4

Authored by Jiri Olsa
Committed by Greg Kroah-Hartman
1 parent 7f5dada0d5

perf session: Do not fail on processing out of order event

commit f61ff6c06dc8f32c7036013ad802c899ec590607 upstream.

Linus reported perf report command being interrupted due to processing
of 'out of order' event, with following error:

  Timestamp below last timeslice flush
  0x5733a8 [0x28]: failed to process type: 3

I could reproduce the issue and in my case it was caused by one CPU
(mmap) being behind during record and userspace mmap reader seeing the
data after other CPUs data were already stored.

This is expected under some circumstances because we need to limit the
number of events that we queue for reordering when we receive a
PERF_RECORD_FINISHED_ROUND or when we force flush due to memory
pressure.

Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Ingo Molnar <mingo@kernel.org>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Matt Fleming <matt.fleming@intel.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1417016371-30249-1-git-send-email-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Zhiqiang Zhang <zhangzhiqiang.zhang@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

Showing 2 changed files with 6 additions and 6 deletions Inline Diff

tools/perf/util/event.h
1 #ifndef __PERF_RECORD_H 1 #ifndef __PERF_RECORD_H
2 #define __PERF_RECORD_H 2 #define __PERF_RECORD_H
3 3
4 #include <limits.h> 4 #include <limits.h>
5 #include <stdio.h> 5 #include <stdio.h>
6 6
7 #include "../perf.h" 7 #include "../perf.h"
8 #include "map.h" 8 #include "map.h"
9 #include "build-id.h" 9 #include "build-id.h"
10 #include "perf_regs.h" 10 #include "perf_regs.h"
11 11
12 struct mmap_event { 12 struct mmap_event {
13 struct perf_event_header header; 13 struct perf_event_header header;
14 u32 pid, tid; 14 u32 pid, tid;
15 u64 start; 15 u64 start;
16 u64 len; 16 u64 len;
17 u64 pgoff; 17 u64 pgoff;
18 char filename[PATH_MAX]; 18 char filename[PATH_MAX];
19 }; 19 };
20 20
21 struct mmap2_event { 21 struct mmap2_event {
22 struct perf_event_header header; 22 struct perf_event_header header;
23 u32 pid, tid; 23 u32 pid, tid;
24 u64 start; 24 u64 start;
25 u64 len; 25 u64 len;
26 u64 pgoff; 26 u64 pgoff;
27 u32 maj; 27 u32 maj;
28 u32 min; 28 u32 min;
29 u64 ino; 29 u64 ino;
30 u64 ino_generation; 30 u64 ino_generation;
31 u32 prot; 31 u32 prot;
32 u32 flags; 32 u32 flags;
33 char filename[PATH_MAX]; 33 char filename[PATH_MAX];
34 }; 34 };
35 35
36 struct comm_event { 36 struct comm_event {
37 struct perf_event_header header; 37 struct perf_event_header header;
38 u32 pid, tid; 38 u32 pid, tid;
39 char comm[16]; 39 char comm[16];
40 }; 40 };
41 41
42 struct fork_event { 42 struct fork_event {
43 struct perf_event_header header; 43 struct perf_event_header header;
44 u32 pid, ppid; 44 u32 pid, ppid;
45 u32 tid, ptid; 45 u32 tid, ptid;
46 u64 time; 46 u64 time;
47 }; 47 };
48 48
49 struct lost_event { 49 struct lost_event {
50 struct perf_event_header header; 50 struct perf_event_header header;
51 u64 id; 51 u64 id;
52 u64 lost; 52 u64 lost;
53 }; 53 };
54 54
55 /* 55 /*
56 * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID 56 * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID
57 */ 57 */
58 struct read_event { 58 struct read_event {
59 struct perf_event_header header; 59 struct perf_event_header header;
60 u32 pid, tid; 60 u32 pid, tid;
61 u64 value; 61 u64 value;
62 u64 time_enabled; 62 u64 time_enabled;
63 u64 time_running; 63 u64 time_running;
64 u64 id; 64 u64 id;
65 }; 65 };
66 66
67 struct throttle_event { 67 struct throttle_event {
68 struct perf_event_header header; 68 struct perf_event_header header;
69 u64 time; 69 u64 time;
70 u64 id; 70 u64 id;
71 u64 stream_id; 71 u64 stream_id;
72 }; 72 };
73 73
74 #define PERF_SAMPLE_MASK \ 74 #define PERF_SAMPLE_MASK \
75 (PERF_SAMPLE_IP | PERF_SAMPLE_TID | \ 75 (PERF_SAMPLE_IP | PERF_SAMPLE_TID | \
76 PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \ 76 PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \
77 PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \ 77 PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \
78 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD | \ 78 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD | \
79 PERF_SAMPLE_IDENTIFIER) 79 PERF_SAMPLE_IDENTIFIER)
80 80
81 /* perf sample has 16 bits size limit */ 81 /* perf sample has 16 bits size limit */
82 #define PERF_SAMPLE_MAX_SIZE (1 << 16) 82 #define PERF_SAMPLE_MAX_SIZE (1 << 16)
83 83
84 struct sample_event { 84 struct sample_event {
85 struct perf_event_header header; 85 struct perf_event_header header;
86 u64 array[]; 86 u64 array[];
87 }; 87 };
88 88
89 struct regs_dump { 89 struct regs_dump {
90 u64 abi; 90 u64 abi;
91 u64 mask; 91 u64 mask;
92 u64 *regs; 92 u64 *regs;
93 93
94 /* Cached values/mask filled by first register access. */ 94 /* Cached values/mask filled by first register access. */
95 u64 cache_regs[PERF_REGS_MAX]; 95 u64 cache_regs[PERF_REGS_MAX];
96 u64 cache_mask; 96 u64 cache_mask;
97 }; 97 };
98 98
99 struct stack_dump { 99 struct stack_dump {
100 u16 offset; 100 u16 offset;
101 u64 size; 101 u64 size;
102 char *data; 102 char *data;
103 }; 103 };
104 104
105 struct sample_read_value { 105 struct sample_read_value {
106 u64 value; 106 u64 value;
107 u64 id; 107 u64 id;
108 }; 108 };
109 109
110 struct sample_read { 110 struct sample_read {
111 u64 time_enabled; 111 u64 time_enabled;
112 u64 time_running; 112 u64 time_running;
113 union { 113 union {
114 struct { 114 struct {
115 u64 nr; 115 u64 nr;
116 struct sample_read_value *values; 116 struct sample_read_value *values;
117 } group; 117 } group;
118 struct sample_read_value one; 118 struct sample_read_value one;
119 }; 119 };
120 }; 120 };
121 121
122 struct ip_callchain { 122 struct ip_callchain {
123 u64 nr; 123 u64 nr;
124 u64 ips[0]; 124 u64 ips[0];
125 }; 125 };
126 126
127 struct branch_flags { 127 struct branch_flags {
128 u64 mispred:1; 128 u64 mispred:1;
129 u64 predicted:1; 129 u64 predicted:1;
130 u64 in_tx:1; 130 u64 in_tx:1;
131 u64 abort:1; 131 u64 abort:1;
132 u64 reserved:60; 132 u64 reserved:60;
133 }; 133 };
134 134
135 struct branch_entry { 135 struct branch_entry {
136 u64 from; 136 u64 from;
137 u64 to; 137 u64 to;
138 struct branch_flags flags; 138 struct branch_flags flags;
139 }; 139 };
140 140
141 struct branch_stack { 141 struct branch_stack {
142 u64 nr; 142 u64 nr;
143 struct branch_entry entries[0]; 143 struct branch_entry entries[0];
144 }; 144 };
145 145
146 struct perf_sample { 146 struct perf_sample {
147 u64 ip; 147 u64 ip;
148 u32 pid, tid; 148 u32 pid, tid;
149 u64 time; 149 u64 time;
150 u64 addr; 150 u64 addr;
151 u64 id; 151 u64 id;
152 u64 stream_id; 152 u64 stream_id;
153 u64 period; 153 u64 period;
154 u64 weight; 154 u64 weight;
155 u64 transaction; 155 u64 transaction;
156 u32 cpu; 156 u32 cpu;
157 u32 raw_size; 157 u32 raw_size;
158 u64 data_src; 158 u64 data_src;
159 u32 flags; 159 u32 flags;
160 u16 insn_len; 160 u16 insn_len;
161 void *raw_data; 161 void *raw_data;
162 struct ip_callchain *callchain; 162 struct ip_callchain *callchain;
163 struct branch_stack *branch_stack; 163 struct branch_stack *branch_stack;
164 struct regs_dump user_regs; 164 struct regs_dump user_regs;
165 struct stack_dump user_stack; 165 struct stack_dump user_stack;
166 struct sample_read read; 166 struct sample_read read;
167 }; 167 };
168 168
169 #define PERF_MEM_DATA_SRC_NONE \ 169 #define PERF_MEM_DATA_SRC_NONE \
170 (PERF_MEM_S(OP, NA) |\ 170 (PERF_MEM_S(OP, NA) |\
171 PERF_MEM_S(LVL, NA) |\ 171 PERF_MEM_S(LVL, NA) |\
172 PERF_MEM_S(SNOOP, NA) |\ 172 PERF_MEM_S(SNOOP, NA) |\
173 PERF_MEM_S(LOCK, NA) |\ 173 PERF_MEM_S(LOCK, NA) |\
174 PERF_MEM_S(TLB, NA)) 174 PERF_MEM_S(TLB, NA))
175 175
176 struct build_id_event { 176 struct build_id_event {
177 struct perf_event_header header; 177 struct perf_event_header header;
178 pid_t pid; 178 pid_t pid;
179 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))]; 179 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
180 char filename[]; 180 char filename[];
181 }; 181 };
182 182
183 enum perf_user_event_type { /* above any possible kernel type */ 183 enum perf_user_event_type { /* above any possible kernel type */
184 PERF_RECORD_USER_TYPE_START = 64, 184 PERF_RECORD_USER_TYPE_START = 64,
185 PERF_RECORD_HEADER_ATTR = 64, 185 PERF_RECORD_HEADER_ATTR = 64,
186 PERF_RECORD_HEADER_EVENT_TYPE = 65, /* depreceated */ 186 PERF_RECORD_HEADER_EVENT_TYPE = 65, /* depreceated */
187 PERF_RECORD_HEADER_TRACING_DATA = 66, 187 PERF_RECORD_HEADER_TRACING_DATA = 66,
188 PERF_RECORD_HEADER_BUILD_ID = 67, 188 PERF_RECORD_HEADER_BUILD_ID = 67,
189 PERF_RECORD_FINISHED_ROUND = 68, 189 PERF_RECORD_FINISHED_ROUND = 68,
190 PERF_RECORD_HEADER_MAX 190 PERF_RECORD_HEADER_MAX
191 }; 191 };
192 192
193 /* 193 /*
194 * The kernel collects the number of events it couldn't send in a stretch and 194 * The kernel collects the number of events it couldn't send in a stretch and
195 * when possible sends this number in a PERF_RECORD_LOST event. The number of 195 * when possible sends this number in a PERF_RECORD_LOST event. The number of
196 * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while 196 * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while
197 * total_lost tells exactly how many events the kernel in fact lost, i.e. it is 197 * total_lost tells exactly how many events the kernel in fact lost, i.e. it is
198 * the sum of all struct lost_event.lost fields reported. 198 * the sum of all struct lost_event.lost fields reported.
199 * 199 *
200 * The total_period is needed because by default auto-freq is used, so 200 * The total_period is needed because by default auto-freq is used, so
201 * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get 201 * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
202 * the total number of low level events, it is necessary to to sum all struct 202 * the total number of low level events, it is necessary to to sum all struct
203 * sample_event.period and stash the result in total_period. 203 * sample_event.period and stash the result in total_period.
204 */ 204 */
205 struct events_stats { 205 struct events_stats {
206 u64 total_period; 206 u64 total_period;
207 u64 total_non_filtered_period; 207 u64 total_non_filtered_period;
208 u64 total_lost; 208 u64 total_lost;
209 u64 total_invalid_chains; 209 u64 total_invalid_chains;
210 u32 nr_events[PERF_RECORD_HEADER_MAX]; 210 u32 nr_events[PERF_RECORD_HEADER_MAX];
211 u32 nr_non_filtered_samples; 211 u32 nr_non_filtered_samples;
212 u32 nr_lost_warned; 212 u32 nr_lost_warned;
213 u32 nr_unknown_events; 213 u32 nr_unknown_events;
214 u32 nr_invalid_chains; 214 u32 nr_invalid_chains;
215 u32 nr_unknown_id; 215 u32 nr_unknown_id;
216 u32 nr_unprocessable_samples; 216 u32 nr_unprocessable_samples;
217 u32 nr_unordered_events;
217 }; 218 };
218 219
219 struct attr_event { 220 struct attr_event {
220 struct perf_event_header header; 221 struct perf_event_header header;
221 struct perf_event_attr attr; 222 struct perf_event_attr attr;
222 u64 id[]; 223 u64 id[];
223 }; 224 };
224 225
225 #define MAX_EVENT_NAME 64 226 #define MAX_EVENT_NAME 64
226 227
227 struct perf_trace_event_type { 228 struct perf_trace_event_type {
228 u64 event_id; 229 u64 event_id;
229 char name[MAX_EVENT_NAME]; 230 char name[MAX_EVENT_NAME];
230 }; 231 };
231 232
232 struct event_type_event { 233 struct event_type_event {
233 struct perf_event_header header; 234 struct perf_event_header header;
234 struct perf_trace_event_type event_type; 235 struct perf_trace_event_type event_type;
235 }; 236 };
236 237
237 struct tracing_data_event { 238 struct tracing_data_event {
238 struct perf_event_header header; 239 struct perf_event_header header;
239 u32 size; 240 u32 size;
240 }; 241 };
241 242
242 union perf_event { 243 union perf_event {
243 struct perf_event_header header; 244 struct perf_event_header header;
244 struct mmap_event mmap; 245 struct mmap_event mmap;
245 struct mmap2_event mmap2; 246 struct mmap2_event mmap2;
246 struct comm_event comm; 247 struct comm_event comm;
247 struct fork_event fork; 248 struct fork_event fork;
248 struct lost_event lost; 249 struct lost_event lost;
249 struct read_event read; 250 struct read_event read;
250 struct throttle_event throttle; 251 struct throttle_event throttle;
251 struct sample_event sample; 252 struct sample_event sample;
252 struct attr_event attr; 253 struct attr_event attr;
253 struct event_type_event event_type; 254 struct event_type_event event_type;
254 struct tracing_data_event tracing_data; 255 struct tracing_data_event tracing_data;
255 struct build_id_event build_id; 256 struct build_id_event build_id;
256 }; 257 };
257 258
258 void perf_event__print_totals(void); 259 void perf_event__print_totals(void);
259 260
260 struct perf_tool; 261 struct perf_tool;
261 struct thread_map; 262 struct thread_map;
262 263
263 typedef int (*perf_event__handler_t)(struct perf_tool *tool, 264 typedef int (*perf_event__handler_t)(struct perf_tool *tool,
264 union perf_event *event, 265 union perf_event *event,
265 struct perf_sample *sample, 266 struct perf_sample *sample,
266 struct machine *machine); 267 struct machine *machine);
267 268
268 int perf_event__synthesize_thread_map(struct perf_tool *tool, 269 int perf_event__synthesize_thread_map(struct perf_tool *tool,
269 struct thread_map *threads, 270 struct thread_map *threads,
270 perf_event__handler_t process, 271 perf_event__handler_t process,
271 struct machine *machine, bool mmap_data); 272 struct machine *machine, bool mmap_data);
272 int perf_event__synthesize_threads(struct perf_tool *tool, 273 int perf_event__synthesize_threads(struct perf_tool *tool,
273 perf_event__handler_t process, 274 perf_event__handler_t process,
274 struct machine *machine, bool mmap_data); 275 struct machine *machine, bool mmap_data);
275 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 276 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
276 perf_event__handler_t process, 277 perf_event__handler_t process,
277 struct machine *machine); 278 struct machine *machine);
278 279
279 int perf_event__synthesize_modules(struct perf_tool *tool, 280 int perf_event__synthesize_modules(struct perf_tool *tool,
280 perf_event__handler_t process, 281 perf_event__handler_t process,
281 struct machine *machine); 282 struct machine *machine);
282 283
283 int perf_event__process_comm(struct perf_tool *tool, 284 int perf_event__process_comm(struct perf_tool *tool,
284 union perf_event *event, 285 union perf_event *event,
285 struct perf_sample *sample, 286 struct perf_sample *sample,
286 struct machine *machine); 287 struct machine *machine);
287 int perf_event__process_lost(struct perf_tool *tool, 288 int perf_event__process_lost(struct perf_tool *tool,
288 union perf_event *event, 289 union perf_event *event,
289 struct perf_sample *sample, 290 struct perf_sample *sample,
290 struct machine *machine); 291 struct machine *machine);
291 int perf_event__process_mmap(struct perf_tool *tool, 292 int perf_event__process_mmap(struct perf_tool *tool,
292 union perf_event *event, 293 union perf_event *event,
293 struct perf_sample *sample, 294 struct perf_sample *sample,
294 struct machine *machine); 295 struct machine *machine);
295 int perf_event__process_mmap2(struct perf_tool *tool, 296 int perf_event__process_mmap2(struct perf_tool *tool,
296 union perf_event *event, 297 union perf_event *event,
297 struct perf_sample *sample, 298 struct perf_sample *sample,
298 struct machine *machine); 299 struct machine *machine);
299 int perf_event__process_fork(struct perf_tool *tool, 300 int perf_event__process_fork(struct perf_tool *tool,
300 union perf_event *event, 301 union perf_event *event,
301 struct perf_sample *sample, 302 struct perf_sample *sample,
302 struct machine *machine); 303 struct machine *machine);
303 int perf_event__process_exit(struct perf_tool *tool, 304 int perf_event__process_exit(struct perf_tool *tool,
304 union perf_event *event, 305 union perf_event *event,
305 struct perf_sample *sample, 306 struct perf_sample *sample,
306 struct machine *machine); 307 struct machine *machine);
307 int perf_event__process(struct perf_tool *tool, 308 int perf_event__process(struct perf_tool *tool,
308 union perf_event *event, 309 union perf_event *event,
309 struct perf_sample *sample, 310 struct perf_sample *sample,
310 struct machine *machine); 311 struct machine *machine);
311 312
312 struct addr_location; 313 struct addr_location;
313 314
314 int perf_event__preprocess_sample(const union perf_event *event, 315 int perf_event__preprocess_sample(const union perf_event *event,
315 struct machine *machine, 316 struct machine *machine,
316 struct addr_location *al, 317 struct addr_location *al,
317 struct perf_sample *sample); 318 struct perf_sample *sample);
318 319
319 struct thread; 320 struct thread;
320 321
321 bool is_bts_event(struct perf_event_attr *attr); 322 bool is_bts_event(struct perf_event_attr *attr);
322 bool sample_addr_correlates_sym(struct perf_event_attr *attr); 323 bool sample_addr_correlates_sym(struct perf_event_attr *attr);
323 void perf_event__preprocess_sample_addr(union perf_event *event, 324 void perf_event__preprocess_sample_addr(union perf_event *event,
324 struct perf_sample *sample, 325 struct perf_sample *sample,
325 struct machine *machine, 326 struct machine *machine,
326 struct thread *thread, 327 struct thread *thread,
327 struct addr_location *al); 328 struct addr_location *al);
328 329
329 const char *perf_event__name(unsigned int id); 330 const char *perf_event__name(unsigned int id);
330 331
331 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, 332 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
332 u64 read_format); 333 u64 read_format);
333 int perf_event__synthesize_sample(union perf_event *event, u64 type, 334 int perf_event__synthesize_sample(union perf_event *event, u64 type,
334 u64 read_format, 335 u64 read_format,
335 const struct perf_sample *sample, 336 const struct perf_sample *sample,
336 bool swapped); 337 bool swapped);
337 338
338 int perf_event__synthesize_mmap_events(struct perf_tool *tool, 339 int perf_event__synthesize_mmap_events(struct perf_tool *tool,
339 union perf_event *event, 340 union perf_event *event,
340 pid_t pid, pid_t tgid, 341 pid_t pid, pid_t tgid,
341 perf_event__handler_t process, 342 perf_event__handler_t process,
342 struct machine *machine, 343 struct machine *machine,
343 bool mmap_data); 344 bool mmap_data);
344 345
345 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp); 346 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
346 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp); 347 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
347 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp); 348 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
348 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp); 349 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
349 size_t perf_event__fprintf(union perf_event *event, FILE *fp); 350 size_t perf_event__fprintf(union perf_event *event, FILE *fp);
350 351
351 u64 kallsyms__get_function_start(const char *kallsyms_filename, 352 u64 kallsyms__get_function_start(const char *kallsyms_filename,
352 const char *symbol_name); 353 const char *symbol_name);
353 354
354 #endif /* __PERF_RECORD_H */ 355 #endif /* __PERF_RECORD_H */
355 356
tools/perf/util/session.c
1 #include <linux/kernel.h> 1 #include <linux/kernel.h>
2 #include <traceevent/event-parse.h> 2 #include <traceevent/event-parse.h>
3 3
4 #include <byteswap.h> 4 #include <byteswap.h>
5 #include <unistd.h> 5 #include <unistd.h>
6 #include <sys/types.h> 6 #include <sys/types.h>
7 #include <sys/mman.h> 7 #include <sys/mman.h>
8 8
9 #include "evlist.h" 9 #include "evlist.h"
10 #include "evsel.h" 10 #include "evsel.h"
11 #include "session.h" 11 #include "session.h"
12 #include "tool.h" 12 #include "tool.h"
13 #include "sort.h" 13 #include "sort.h"
14 #include "util.h" 14 #include "util.h"
15 #include "cpumap.h" 15 #include "cpumap.h"
16 #include "perf_regs.h" 16 #include "perf_regs.h"
17 #include "asm/bug.h" 17 #include "asm/bug.h"
18 18
19 static int perf_session__open(struct perf_session *session) 19 static int perf_session__open(struct perf_session *session)
20 { 20 {
21 struct perf_data_file *file = session->file; 21 struct perf_data_file *file = session->file;
22 22
23 if (perf_session__read_header(session) < 0) { 23 if (perf_session__read_header(session) < 0) {
24 pr_err("incompatible file format (rerun with -v to learn more)"); 24 pr_err("incompatible file format (rerun with -v to learn more)");
25 return -1; 25 return -1;
26 } 26 }
27 27
28 if (perf_data_file__is_pipe(file)) 28 if (perf_data_file__is_pipe(file))
29 return 0; 29 return 0;
30 30
31 if (!perf_evlist__valid_sample_type(session->evlist)) { 31 if (!perf_evlist__valid_sample_type(session->evlist)) {
32 pr_err("non matching sample_type"); 32 pr_err("non matching sample_type");
33 return -1; 33 return -1;
34 } 34 }
35 35
36 if (!perf_evlist__valid_sample_id_all(session->evlist)) { 36 if (!perf_evlist__valid_sample_id_all(session->evlist)) {
37 pr_err("non matching sample_id_all"); 37 pr_err("non matching sample_id_all");
38 return -1; 38 return -1;
39 } 39 }
40 40
41 if (!perf_evlist__valid_read_format(session->evlist)) { 41 if (!perf_evlist__valid_read_format(session->evlist)) {
42 pr_err("non matching read_format"); 42 pr_err("non matching read_format");
43 return -1; 43 return -1;
44 } 44 }
45 45
46 return 0; 46 return 0;
47 } 47 }
48 48
49 void perf_session__set_id_hdr_size(struct perf_session *session) 49 void perf_session__set_id_hdr_size(struct perf_session *session)
50 { 50 {
51 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist); 51 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
52 52
53 machines__set_id_hdr_size(&session->machines, id_hdr_size); 53 machines__set_id_hdr_size(&session->machines, id_hdr_size);
54 } 54 }
55 55
56 int perf_session__create_kernel_maps(struct perf_session *session) 56 int perf_session__create_kernel_maps(struct perf_session *session)
57 { 57 {
58 int ret = machine__create_kernel_maps(&session->machines.host); 58 int ret = machine__create_kernel_maps(&session->machines.host);
59 59
60 if (ret >= 0) 60 if (ret >= 0)
61 ret = machines__create_guest_kernel_maps(&session->machines); 61 ret = machines__create_guest_kernel_maps(&session->machines);
62 return ret; 62 return ret;
63 } 63 }
64 64
65 static void perf_session__destroy_kernel_maps(struct perf_session *session) 65 static void perf_session__destroy_kernel_maps(struct perf_session *session)
66 { 66 {
67 machines__destroy_kernel_maps(&session->machines); 67 machines__destroy_kernel_maps(&session->machines);
68 } 68 }
69 69
70 static bool perf_session__has_comm_exec(struct perf_session *session) 70 static bool perf_session__has_comm_exec(struct perf_session *session)
71 { 71 {
72 struct perf_evsel *evsel; 72 struct perf_evsel *evsel;
73 73
74 evlist__for_each(session->evlist, evsel) { 74 evlist__for_each(session->evlist, evsel) {
75 if (evsel->attr.comm_exec) 75 if (evsel->attr.comm_exec)
76 return true; 76 return true;
77 } 77 }
78 78
79 return false; 79 return false;
80 } 80 }
81 81
82 static void perf_session__set_comm_exec(struct perf_session *session) 82 static void perf_session__set_comm_exec(struct perf_session *session)
83 { 83 {
84 bool comm_exec = perf_session__has_comm_exec(session); 84 bool comm_exec = perf_session__has_comm_exec(session);
85 85
86 machines__set_comm_exec(&session->machines, comm_exec); 86 machines__set_comm_exec(&session->machines, comm_exec);
87 } 87 }
88 88
89 struct perf_session *perf_session__new(struct perf_data_file *file, 89 struct perf_session *perf_session__new(struct perf_data_file *file,
90 bool repipe, struct perf_tool *tool) 90 bool repipe, struct perf_tool *tool)
91 { 91 {
92 struct perf_session *session = zalloc(sizeof(*session)); 92 struct perf_session *session = zalloc(sizeof(*session));
93 93
94 if (!session) 94 if (!session)
95 goto out; 95 goto out;
96 96
97 session->repipe = repipe; 97 session->repipe = repipe;
98 ordered_events__init(&session->ordered_events); 98 ordered_events__init(&session->ordered_events);
99 machines__init(&session->machines); 99 machines__init(&session->machines);
100 100
101 if (file) { 101 if (file) {
102 if (perf_data_file__open(file)) 102 if (perf_data_file__open(file))
103 goto out_delete; 103 goto out_delete;
104 104
105 session->file = file; 105 session->file = file;
106 106
107 if (perf_data_file__is_read(file)) { 107 if (perf_data_file__is_read(file)) {
108 if (perf_session__open(session) < 0) 108 if (perf_session__open(session) < 0)
109 goto out_close; 109 goto out_close;
110 110
111 perf_session__set_id_hdr_size(session); 111 perf_session__set_id_hdr_size(session);
112 perf_session__set_comm_exec(session); 112 perf_session__set_comm_exec(session);
113 } 113 }
114 } 114 }
115 115
116 if (!file || perf_data_file__is_write(file)) { 116 if (!file || perf_data_file__is_write(file)) {
117 /* 117 /*
118 * In O_RDONLY mode this will be performed when reading the 118 * In O_RDONLY mode this will be performed when reading the
119 * kernel MMAP event, in perf_event__process_mmap(). 119 * kernel MMAP event, in perf_event__process_mmap().
120 */ 120 */
121 if (perf_session__create_kernel_maps(session) < 0) 121 if (perf_session__create_kernel_maps(session) < 0)
122 pr_warning("Cannot read kernel map\n"); 122 pr_warning("Cannot read kernel map\n");
123 } 123 }
124 124
125 if (tool && tool->ordering_requires_timestamps && 125 if (tool && tool->ordering_requires_timestamps &&
126 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) { 126 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
127 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); 127 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
128 tool->ordered_events = false; 128 tool->ordered_events = false;
129 } 129 }
130 130
131 return session; 131 return session;
132 132
133 out_close: 133 out_close:
134 perf_data_file__close(file); 134 perf_data_file__close(file);
135 out_delete: 135 out_delete:
136 perf_session__delete(session); 136 perf_session__delete(session);
137 out: 137 out:
138 return NULL; 138 return NULL;
139 } 139 }
140 140
141 static void perf_session__delete_dead_threads(struct perf_session *session) 141 static void perf_session__delete_dead_threads(struct perf_session *session)
142 { 142 {
143 machine__delete_dead_threads(&session->machines.host); 143 machine__delete_dead_threads(&session->machines.host);
144 } 144 }
145 145
146 static void perf_session__delete_threads(struct perf_session *session) 146 static void perf_session__delete_threads(struct perf_session *session)
147 { 147 {
148 machine__delete_threads(&session->machines.host); 148 machine__delete_threads(&session->machines.host);
149 } 149 }
150 150
151 static void perf_session_env__delete(struct perf_session_env *env) 151 static void perf_session_env__delete(struct perf_session_env *env)
152 { 152 {
153 zfree(&env->hostname); 153 zfree(&env->hostname);
154 zfree(&env->os_release); 154 zfree(&env->os_release);
155 zfree(&env->version); 155 zfree(&env->version);
156 zfree(&env->arch); 156 zfree(&env->arch);
157 zfree(&env->cpu_desc); 157 zfree(&env->cpu_desc);
158 zfree(&env->cpuid); 158 zfree(&env->cpuid);
159 159
160 zfree(&env->cmdline); 160 zfree(&env->cmdline);
161 zfree(&env->sibling_cores); 161 zfree(&env->sibling_cores);
162 zfree(&env->sibling_threads); 162 zfree(&env->sibling_threads);
163 zfree(&env->numa_nodes); 163 zfree(&env->numa_nodes);
164 zfree(&env->pmu_mappings); 164 zfree(&env->pmu_mappings);
165 } 165 }
166 166
167 void perf_session__delete(struct perf_session *session) 167 void perf_session__delete(struct perf_session *session)
168 { 168 {
169 perf_session__destroy_kernel_maps(session); 169 perf_session__destroy_kernel_maps(session);
170 perf_session__delete_dead_threads(session); 170 perf_session__delete_dead_threads(session);
171 perf_session__delete_threads(session); 171 perf_session__delete_threads(session);
172 perf_session_env__delete(&session->header.env); 172 perf_session_env__delete(&session->header.env);
173 machines__exit(&session->machines); 173 machines__exit(&session->machines);
174 if (session->file) 174 if (session->file)
175 perf_data_file__close(session->file); 175 perf_data_file__close(session->file);
176 free(session); 176 free(session);
177 } 177 }
178 178
179 static int process_event_synth_tracing_data_stub(struct perf_tool *tool 179 static int process_event_synth_tracing_data_stub(struct perf_tool *tool
180 __maybe_unused, 180 __maybe_unused,
181 union perf_event *event 181 union perf_event *event
182 __maybe_unused, 182 __maybe_unused,
183 struct perf_session *session 183 struct perf_session *session
184 __maybe_unused) 184 __maybe_unused)
185 { 185 {
186 dump_printf(": unhandled!\n"); 186 dump_printf(": unhandled!\n");
187 return 0; 187 return 0;
188 } 188 }
189 189
190 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused, 190 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
191 union perf_event *event __maybe_unused, 191 union perf_event *event __maybe_unused,
192 struct perf_evlist **pevlist 192 struct perf_evlist **pevlist
193 __maybe_unused) 193 __maybe_unused)
194 { 194 {
195 dump_printf(": unhandled!\n"); 195 dump_printf(": unhandled!\n");
196 return 0; 196 return 0;
197 } 197 }
198 198
199 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused, 199 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
200 union perf_event *event __maybe_unused, 200 union perf_event *event __maybe_unused,
201 struct perf_sample *sample __maybe_unused, 201 struct perf_sample *sample __maybe_unused,
202 struct perf_evsel *evsel __maybe_unused, 202 struct perf_evsel *evsel __maybe_unused,
203 struct machine *machine __maybe_unused) 203 struct machine *machine __maybe_unused)
204 { 204 {
205 dump_printf(": unhandled!\n"); 205 dump_printf(": unhandled!\n");
206 return 0; 206 return 0;
207 } 207 }
208 208
209 static int process_event_stub(struct perf_tool *tool __maybe_unused, 209 static int process_event_stub(struct perf_tool *tool __maybe_unused,
210 union perf_event *event __maybe_unused, 210 union perf_event *event __maybe_unused,
211 struct perf_sample *sample __maybe_unused, 211 struct perf_sample *sample __maybe_unused,
212 struct machine *machine __maybe_unused) 212 struct machine *machine __maybe_unused)
213 { 213 {
214 dump_printf(": unhandled!\n"); 214 dump_printf(": unhandled!\n");
215 return 0; 215 return 0;
216 } 216 }
217 217
218 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused, 218 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
219 union perf_event *event __maybe_unused, 219 union perf_event *event __maybe_unused,
220 struct perf_session *perf_session 220 struct perf_session *perf_session
221 __maybe_unused) 221 __maybe_unused)
222 { 222 {
223 dump_printf(": unhandled!\n"); 223 dump_printf(": unhandled!\n");
224 return 0; 224 return 0;
225 } 225 }
226 226
227 static int process_finished_round(struct perf_tool *tool, 227 static int process_finished_round(struct perf_tool *tool,
228 union perf_event *event, 228 union perf_event *event,
229 struct perf_session *session); 229 struct perf_session *session);
230 230
231 void perf_tool__fill_defaults(struct perf_tool *tool) 231 void perf_tool__fill_defaults(struct perf_tool *tool)
232 { 232 {
233 if (tool->sample == NULL) 233 if (tool->sample == NULL)
234 tool->sample = process_event_sample_stub; 234 tool->sample = process_event_sample_stub;
235 if (tool->mmap == NULL) 235 if (tool->mmap == NULL)
236 tool->mmap = process_event_stub; 236 tool->mmap = process_event_stub;
237 if (tool->mmap2 == NULL) 237 if (tool->mmap2 == NULL)
238 tool->mmap2 = process_event_stub; 238 tool->mmap2 = process_event_stub;
239 if (tool->comm == NULL) 239 if (tool->comm == NULL)
240 tool->comm = process_event_stub; 240 tool->comm = process_event_stub;
241 if (tool->fork == NULL) 241 if (tool->fork == NULL)
242 tool->fork = process_event_stub; 242 tool->fork = process_event_stub;
243 if (tool->exit == NULL) 243 if (tool->exit == NULL)
244 tool->exit = process_event_stub; 244 tool->exit = process_event_stub;
245 if (tool->lost == NULL) 245 if (tool->lost == NULL)
246 tool->lost = perf_event__process_lost; 246 tool->lost = perf_event__process_lost;
247 if (tool->read == NULL) 247 if (tool->read == NULL)
248 tool->read = process_event_sample_stub; 248 tool->read = process_event_sample_stub;
249 if (tool->throttle == NULL) 249 if (tool->throttle == NULL)
250 tool->throttle = process_event_stub; 250 tool->throttle = process_event_stub;
251 if (tool->unthrottle == NULL) 251 if (tool->unthrottle == NULL)
252 tool->unthrottle = process_event_stub; 252 tool->unthrottle = process_event_stub;
253 if (tool->attr == NULL) 253 if (tool->attr == NULL)
254 tool->attr = process_event_synth_attr_stub; 254 tool->attr = process_event_synth_attr_stub;
255 if (tool->tracing_data == NULL) 255 if (tool->tracing_data == NULL)
256 tool->tracing_data = process_event_synth_tracing_data_stub; 256 tool->tracing_data = process_event_synth_tracing_data_stub;
257 if (tool->build_id == NULL) 257 if (tool->build_id == NULL)
258 tool->build_id = process_finished_round_stub; 258 tool->build_id = process_finished_round_stub;
259 if (tool->finished_round == NULL) { 259 if (tool->finished_round == NULL) {
260 if (tool->ordered_events) 260 if (tool->ordered_events)
261 tool->finished_round = process_finished_round; 261 tool->finished_round = process_finished_round;
262 else 262 else
263 tool->finished_round = process_finished_round_stub; 263 tool->finished_round = process_finished_round_stub;
264 } 264 }
265 } 265 }
266 266
267 static void swap_sample_id_all(union perf_event *event, void *data) 267 static void swap_sample_id_all(union perf_event *event, void *data)
268 { 268 {
269 void *end = (void *) event + event->header.size; 269 void *end = (void *) event + event->header.size;
270 int size = end - data; 270 int size = end - data;
271 271
272 BUG_ON(size % sizeof(u64)); 272 BUG_ON(size % sizeof(u64));
273 mem_bswap_64(data, size); 273 mem_bswap_64(data, size);
274 } 274 }
275 275
276 static void perf_event__all64_swap(union perf_event *event, 276 static void perf_event__all64_swap(union perf_event *event,
277 bool sample_id_all __maybe_unused) 277 bool sample_id_all __maybe_unused)
278 { 278 {
279 struct perf_event_header *hdr = &event->header; 279 struct perf_event_header *hdr = &event->header;
280 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); 280 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
281 } 281 }
282 282
283 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) 283 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
284 { 284 {
285 event->comm.pid = bswap_32(event->comm.pid); 285 event->comm.pid = bswap_32(event->comm.pid);
286 event->comm.tid = bswap_32(event->comm.tid); 286 event->comm.tid = bswap_32(event->comm.tid);
287 287
288 if (sample_id_all) { 288 if (sample_id_all) {
289 void *data = &event->comm.comm; 289 void *data = &event->comm.comm;
290 290
291 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 291 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
292 swap_sample_id_all(event, data); 292 swap_sample_id_all(event, data);
293 } 293 }
294 } 294 }
295 295
296 static void perf_event__mmap_swap(union perf_event *event, 296 static void perf_event__mmap_swap(union perf_event *event,
297 bool sample_id_all) 297 bool sample_id_all)
298 { 298 {
299 event->mmap.pid = bswap_32(event->mmap.pid); 299 event->mmap.pid = bswap_32(event->mmap.pid);
300 event->mmap.tid = bswap_32(event->mmap.tid); 300 event->mmap.tid = bswap_32(event->mmap.tid);
301 event->mmap.start = bswap_64(event->mmap.start); 301 event->mmap.start = bswap_64(event->mmap.start);
302 event->mmap.len = bswap_64(event->mmap.len); 302 event->mmap.len = bswap_64(event->mmap.len);
303 event->mmap.pgoff = bswap_64(event->mmap.pgoff); 303 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
304 304
305 if (sample_id_all) { 305 if (sample_id_all) {
306 void *data = &event->mmap.filename; 306 void *data = &event->mmap.filename;
307 307
308 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 308 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
309 swap_sample_id_all(event, data); 309 swap_sample_id_all(event, data);
310 } 310 }
311 } 311 }
312 312
313 static void perf_event__mmap2_swap(union perf_event *event, 313 static void perf_event__mmap2_swap(union perf_event *event,
314 bool sample_id_all) 314 bool sample_id_all)
315 { 315 {
316 event->mmap2.pid = bswap_32(event->mmap2.pid); 316 event->mmap2.pid = bswap_32(event->mmap2.pid);
317 event->mmap2.tid = bswap_32(event->mmap2.tid); 317 event->mmap2.tid = bswap_32(event->mmap2.tid);
318 event->mmap2.start = bswap_64(event->mmap2.start); 318 event->mmap2.start = bswap_64(event->mmap2.start);
319 event->mmap2.len = bswap_64(event->mmap2.len); 319 event->mmap2.len = bswap_64(event->mmap2.len);
320 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff); 320 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
321 event->mmap2.maj = bswap_32(event->mmap2.maj); 321 event->mmap2.maj = bswap_32(event->mmap2.maj);
322 event->mmap2.min = bswap_32(event->mmap2.min); 322 event->mmap2.min = bswap_32(event->mmap2.min);
323 event->mmap2.ino = bswap_64(event->mmap2.ino); 323 event->mmap2.ino = bswap_64(event->mmap2.ino);
324 324
325 if (sample_id_all) { 325 if (sample_id_all) {
326 void *data = &event->mmap2.filename; 326 void *data = &event->mmap2.filename;
327 327
328 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 328 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
329 swap_sample_id_all(event, data); 329 swap_sample_id_all(event, data);
330 } 330 }
331 } 331 }
332 static void perf_event__task_swap(union perf_event *event, bool sample_id_all) 332 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
333 { 333 {
334 event->fork.pid = bswap_32(event->fork.pid); 334 event->fork.pid = bswap_32(event->fork.pid);
335 event->fork.tid = bswap_32(event->fork.tid); 335 event->fork.tid = bswap_32(event->fork.tid);
336 event->fork.ppid = bswap_32(event->fork.ppid); 336 event->fork.ppid = bswap_32(event->fork.ppid);
337 event->fork.ptid = bswap_32(event->fork.ptid); 337 event->fork.ptid = bswap_32(event->fork.ptid);
338 event->fork.time = bswap_64(event->fork.time); 338 event->fork.time = bswap_64(event->fork.time);
339 339
340 if (sample_id_all) 340 if (sample_id_all)
341 swap_sample_id_all(event, &event->fork + 1); 341 swap_sample_id_all(event, &event->fork + 1);
342 } 342 }
343 343
344 static void perf_event__read_swap(union perf_event *event, bool sample_id_all) 344 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
345 { 345 {
346 event->read.pid = bswap_32(event->read.pid); 346 event->read.pid = bswap_32(event->read.pid);
347 event->read.tid = bswap_32(event->read.tid); 347 event->read.tid = bswap_32(event->read.tid);
348 event->read.value = bswap_64(event->read.value); 348 event->read.value = bswap_64(event->read.value);
349 event->read.time_enabled = bswap_64(event->read.time_enabled); 349 event->read.time_enabled = bswap_64(event->read.time_enabled);
350 event->read.time_running = bswap_64(event->read.time_running); 350 event->read.time_running = bswap_64(event->read.time_running);
351 event->read.id = bswap_64(event->read.id); 351 event->read.id = bswap_64(event->read.id);
352 352
353 if (sample_id_all) 353 if (sample_id_all)
354 swap_sample_id_all(event, &event->read + 1); 354 swap_sample_id_all(event, &event->read + 1);
355 } 355 }
356 356
357 static void perf_event__throttle_swap(union perf_event *event, 357 static void perf_event__throttle_swap(union perf_event *event,
358 bool sample_id_all) 358 bool sample_id_all)
359 { 359 {
360 event->throttle.time = bswap_64(event->throttle.time); 360 event->throttle.time = bswap_64(event->throttle.time);
361 event->throttle.id = bswap_64(event->throttle.id); 361 event->throttle.id = bswap_64(event->throttle.id);
362 event->throttle.stream_id = bswap_64(event->throttle.stream_id); 362 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
363 363
364 if (sample_id_all) 364 if (sample_id_all)
365 swap_sample_id_all(event, &event->throttle + 1); 365 swap_sample_id_all(event, &event->throttle + 1);
366 } 366 }
367 367
368 static u8 revbyte(u8 b) 368 static u8 revbyte(u8 b)
369 { 369 {
370 int rev = (b >> 4) | ((b & 0xf) << 4); 370 int rev = (b >> 4) | ((b & 0xf) << 4);
371 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2); 371 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
372 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1); 372 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
373 return (u8) rev; 373 return (u8) rev;
374 } 374 }
375 375
376 /* 376 /*
377 * XXX this is hack in attempt to carry flags bitfield 377 * XXX this is hack in attempt to carry flags bitfield
378 * throught endian village. ABI says: 378 * throught endian village. ABI says:
379 * 379 *
380 * Bit-fields are allocated from right to left (least to most significant) 380 * Bit-fields are allocated from right to left (least to most significant)
381 * on little-endian implementations and from left to right (most to least 381 * on little-endian implementations and from left to right (most to least
382 * significant) on big-endian implementations. 382 * significant) on big-endian implementations.
383 * 383 *
384 * The above seems to be byte specific, so we need to reverse each 384 * The above seems to be byte specific, so we need to reverse each
385 * byte of the bitfield. 'Internet' also says this might be implementation 385 * byte of the bitfield. 'Internet' also says this might be implementation
386 * specific and we probably need proper fix and carry perf_event_attr 386 * specific and we probably need proper fix and carry perf_event_attr
387 * bitfield flags in separate data file FEAT_ section. Thought this seems 387 * bitfield flags in separate data file FEAT_ section. Thought this seems
388 * to work for now. 388 * to work for now.
389 */ 389 */
390 static void swap_bitfield(u8 *p, unsigned len) 390 static void swap_bitfield(u8 *p, unsigned len)
391 { 391 {
392 unsigned i; 392 unsigned i;
393 393
394 for (i = 0; i < len; i++) { 394 for (i = 0; i < len; i++) {
395 *p = revbyte(*p); 395 *p = revbyte(*p);
396 p++; 396 p++;
397 } 397 }
398 } 398 }
399 399
400 /* exported for swapping attributes in file header */ 400 /* exported for swapping attributes in file header */
401 void perf_event__attr_swap(struct perf_event_attr *attr) 401 void perf_event__attr_swap(struct perf_event_attr *attr)
402 { 402 {
403 attr->type = bswap_32(attr->type); 403 attr->type = bswap_32(attr->type);
404 attr->size = bswap_32(attr->size); 404 attr->size = bswap_32(attr->size);
405 attr->config = bswap_64(attr->config); 405 attr->config = bswap_64(attr->config);
406 attr->sample_period = bswap_64(attr->sample_period); 406 attr->sample_period = bswap_64(attr->sample_period);
407 attr->sample_type = bswap_64(attr->sample_type); 407 attr->sample_type = bswap_64(attr->sample_type);
408 attr->read_format = bswap_64(attr->read_format); 408 attr->read_format = bswap_64(attr->read_format);
409 attr->wakeup_events = bswap_32(attr->wakeup_events); 409 attr->wakeup_events = bswap_32(attr->wakeup_events);
410 attr->bp_type = bswap_32(attr->bp_type); 410 attr->bp_type = bswap_32(attr->bp_type);
411 attr->bp_addr = bswap_64(attr->bp_addr); 411 attr->bp_addr = bswap_64(attr->bp_addr);
412 attr->bp_len = bswap_64(attr->bp_len); 412 attr->bp_len = bswap_64(attr->bp_len);
413 attr->branch_sample_type = bswap_64(attr->branch_sample_type); 413 attr->branch_sample_type = bswap_64(attr->branch_sample_type);
414 attr->sample_regs_user = bswap_64(attr->sample_regs_user); 414 attr->sample_regs_user = bswap_64(attr->sample_regs_user);
415 attr->sample_stack_user = bswap_32(attr->sample_stack_user); 415 attr->sample_stack_user = bswap_32(attr->sample_stack_user);
416 416
417 swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64)); 417 swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
418 } 418 }
419 419
420 static void perf_event__hdr_attr_swap(union perf_event *event, 420 static void perf_event__hdr_attr_swap(union perf_event *event,
421 bool sample_id_all __maybe_unused) 421 bool sample_id_all __maybe_unused)
422 { 422 {
423 size_t size; 423 size_t size;
424 424
425 perf_event__attr_swap(&event->attr.attr); 425 perf_event__attr_swap(&event->attr.attr);
426 426
427 size = event->header.size; 427 size = event->header.size;
428 size -= (void *)&event->attr.id - (void *)event; 428 size -= (void *)&event->attr.id - (void *)event;
429 mem_bswap_64(event->attr.id, size); 429 mem_bswap_64(event->attr.id, size);
430 } 430 }
431 431
432 static void perf_event__event_type_swap(union perf_event *event, 432 static void perf_event__event_type_swap(union perf_event *event,
433 bool sample_id_all __maybe_unused) 433 bool sample_id_all __maybe_unused)
434 { 434 {
435 event->event_type.event_type.event_id = 435 event->event_type.event_type.event_id =
436 bswap_64(event->event_type.event_type.event_id); 436 bswap_64(event->event_type.event_type.event_id);
437 } 437 }
438 438
439 static void perf_event__tracing_data_swap(union perf_event *event, 439 static void perf_event__tracing_data_swap(union perf_event *event,
440 bool sample_id_all __maybe_unused) 440 bool sample_id_all __maybe_unused)
441 { 441 {
442 event->tracing_data.size = bswap_32(event->tracing_data.size); 442 event->tracing_data.size = bswap_32(event->tracing_data.size);
443 } 443 }
444 444
445 typedef void (*perf_event__swap_op)(union perf_event *event, 445 typedef void (*perf_event__swap_op)(union perf_event *event,
446 bool sample_id_all); 446 bool sample_id_all);
447 447
448 static perf_event__swap_op perf_event__swap_ops[] = { 448 static perf_event__swap_op perf_event__swap_ops[] = {
449 [PERF_RECORD_MMAP] = perf_event__mmap_swap, 449 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
450 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap, 450 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
451 [PERF_RECORD_COMM] = perf_event__comm_swap, 451 [PERF_RECORD_COMM] = perf_event__comm_swap,
452 [PERF_RECORD_FORK] = perf_event__task_swap, 452 [PERF_RECORD_FORK] = perf_event__task_swap,
453 [PERF_RECORD_EXIT] = perf_event__task_swap, 453 [PERF_RECORD_EXIT] = perf_event__task_swap,
454 [PERF_RECORD_LOST] = perf_event__all64_swap, 454 [PERF_RECORD_LOST] = perf_event__all64_swap,
455 [PERF_RECORD_READ] = perf_event__read_swap, 455 [PERF_RECORD_READ] = perf_event__read_swap,
456 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap, 456 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
457 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap, 457 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
458 [PERF_RECORD_SAMPLE] = perf_event__all64_swap, 458 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
459 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, 459 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
460 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, 460 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
461 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, 461 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
462 [PERF_RECORD_HEADER_BUILD_ID] = NULL, 462 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
463 [PERF_RECORD_HEADER_MAX] = NULL, 463 [PERF_RECORD_HEADER_MAX] = NULL,
464 }; 464 };
465 465
466 /* 466 /*
467 * When perf record finishes a pass on every buffers, it records this pseudo 467 * When perf record finishes a pass on every buffers, it records this pseudo
468 * event. 468 * event.
469 * We record the max timestamp t found in the pass n. 469 * We record the max timestamp t found in the pass n.
470 * Assuming these timestamps are monotonic across cpus, we know that if 470 * Assuming these timestamps are monotonic across cpus, we know that if
471 * a buffer still has events with timestamps below t, they will be all 471 * a buffer still has events with timestamps below t, they will be all
472 * available and then read in the pass n + 1. 472 * available and then read in the pass n + 1.
473 * Hence when we start to read the pass n + 2, we can safely flush every 473 * Hence when we start to read the pass n + 2, we can safely flush every
474 * events with timestamps below t. 474 * events with timestamps below t.
475 * 475 *
476 * ============ PASS n ================= 476 * ============ PASS n =================
477 * CPU 0 | CPU 1 477 * CPU 0 | CPU 1
478 * | 478 * |
479 * cnt1 timestamps | cnt2 timestamps 479 * cnt1 timestamps | cnt2 timestamps
480 * 1 | 2 480 * 1 | 2
481 * 2 | 3 481 * 2 | 3
482 * - | 4 <--- max recorded 482 * - | 4 <--- max recorded
483 * 483 *
484 * ============ PASS n + 1 ============== 484 * ============ PASS n + 1 ==============
485 * CPU 0 | CPU 1 485 * CPU 0 | CPU 1
486 * | 486 * |
487 * cnt1 timestamps | cnt2 timestamps 487 * cnt1 timestamps | cnt2 timestamps
488 * 3 | 5 488 * 3 | 5
489 * 4 | 6 489 * 4 | 6
490 * 5 | 7 <---- max recorded 490 * 5 | 7 <---- max recorded
491 * 491 *
492 * Flush every events below timestamp 4 492 * Flush every events below timestamp 4
493 * 493 *
494 * ============ PASS n + 2 ============== 494 * ============ PASS n + 2 ==============
495 * CPU 0 | CPU 1 495 * CPU 0 | CPU 1
496 * | 496 * |
497 * cnt1 timestamps | cnt2 timestamps 497 * cnt1 timestamps | cnt2 timestamps
498 * 6 | 8 498 * 6 | 8
499 * 7 | 9 499 * 7 | 9
500 * - | 10 500 * - | 10
501 * 501 *
502 * Flush every events below timestamp 7 502 * Flush every events below timestamp 7
503 * etc... 503 * etc...
504 */ 504 */
505 static int process_finished_round(struct perf_tool *tool, 505 static int process_finished_round(struct perf_tool *tool,
506 union perf_event *event __maybe_unused, 506 union perf_event *event __maybe_unused,
507 struct perf_session *session) 507 struct perf_session *session)
508 { 508 {
509 return ordered_events__flush(session, tool, OE_FLUSH__ROUND); 509 return ordered_events__flush(session, tool, OE_FLUSH__ROUND);
510 } 510 }
511 511
512 int perf_session_queue_event(struct perf_session *s, union perf_event *event, 512 int perf_session_queue_event(struct perf_session *s, union perf_event *event,
513 struct perf_tool *tool, struct perf_sample *sample, 513 struct perf_tool *tool, struct perf_sample *sample,
514 u64 file_offset) 514 u64 file_offset)
515 { 515 {
516 struct ordered_events *oe = &s->ordered_events; 516 struct ordered_events *oe = &s->ordered_events;
517 u64 timestamp = sample->time; 517 u64 timestamp = sample->time;
518 struct ordered_event *new; 518 struct ordered_event *new;
519 519
520 if (!timestamp || timestamp == ~0ULL) 520 if (!timestamp || timestamp == ~0ULL)
521 return -ETIME; 521 return -ETIME;
522 522
523 if (timestamp < oe->last_flush) { 523 if (timestamp < oe->last_flush) {
524 WARN_ONCE(1, "Timestamp below last timeslice flush\n"); 524 pr_oe_time(timestamp, "out of order event\n");
525
526 pr_oe_time(timestamp, "out of order event");
527 pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n", 525 pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
528 oe->last_flush_type); 526 oe->last_flush_type);
529 527
530 /* We could get out of order messages after forced flush. */ 528 s->stats.nr_unordered_events++;
531 if (oe->last_flush_type != OE_FLUSH__HALF)
532 return -EINVAL;
533 } 529 }
534 530
535 new = ordered_events__new(oe, timestamp, event); 531 new = ordered_events__new(oe, timestamp, event);
536 if (!new) { 532 if (!new) {
537 ordered_events__flush(s, tool, OE_FLUSH__HALF); 533 ordered_events__flush(s, tool, OE_FLUSH__HALF);
538 new = ordered_events__new(oe, timestamp, event); 534 new = ordered_events__new(oe, timestamp, event);
539 } 535 }
540 536
541 if (!new) 537 if (!new)
542 return -ENOMEM; 538 return -ENOMEM;
543 539
544 new->file_offset = file_offset; 540 new->file_offset = file_offset;
545 return 0; 541 return 0;
546 } 542 }
547 543
548 static void callchain__printf(struct perf_sample *sample) 544 static void callchain__printf(struct perf_sample *sample)
549 { 545 {
550 unsigned int i; 546 unsigned int i;
551 547
552 printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr); 548 printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
553 549
554 for (i = 0; i < sample->callchain->nr; i++) 550 for (i = 0; i < sample->callchain->nr; i++)
555 printf("..... %2d: %016" PRIx64 "\n", 551 printf("..... %2d: %016" PRIx64 "\n",
556 i, sample->callchain->ips[i]); 552 i, sample->callchain->ips[i]);
557 } 553 }
558 554
559 static void branch_stack__printf(struct perf_sample *sample) 555 static void branch_stack__printf(struct perf_sample *sample)
560 { 556 {
561 uint64_t i; 557 uint64_t i;
562 558
563 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr); 559 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
564 560
565 for (i = 0; i < sample->branch_stack->nr; i++) 561 for (i = 0; i < sample->branch_stack->nr; i++)
566 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n", 562 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
567 i, sample->branch_stack->entries[i].from, 563 i, sample->branch_stack->entries[i].from,
568 sample->branch_stack->entries[i].to); 564 sample->branch_stack->entries[i].to);
569 } 565 }
570 566
571 static void regs_dump__printf(u64 mask, u64 *regs) 567 static void regs_dump__printf(u64 mask, u64 *regs)
572 { 568 {
573 unsigned rid, i = 0; 569 unsigned rid, i = 0;
574 570
575 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) { 571 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
576 u64 val = regs[i++]; 572 u64 val = regs[i++];
577 573
578 printf(".... %-5s 0x%" PRIx64 "\n", 574 printf(".... %-5s 0x%" PRIx64 "\n",
579 perf_reg_name(rid), val); 575 perf_reg_name(rid), val);
580 } 576 }
581 } 577 }
582 578
583 static void regs_user__printf(struct perf_sample *sample) 579 static void regs_user__printf(struct perf_sample *sample)
584 { 580 {
585 struct regs_dump *user_regs = &sample->user_regs; 581 struct regs_dump *user_regs = &sample->user_regs;
586 582
587 if (user_regs->regs) { 583 if (user_regs->regs) {
588 u64 mask = user_regs->mask; 584 u64 mask = user_regs->mask;
589 printf("... user regs: mask 0x%" PRIx64 "\n", mask); 585 printf("... user regs: mask 0x%" PRIx64 "\n", mask);
590 regs_dump__printf(mask, user_regs->regs); 586 regs_dump__printf(mask, user_regs->regs);
591 } 587 }
592 } 588 }
593 589
594 static void stack_user__printf(struct stack_dump *dump) 590 static void stack_user__printf(struct stack_dump *dump)
595 { 591 {
596 printf("... ustack: size %" PRIu64 ", offset 0x%x\n", 592 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
597 dump->size, dump->offset); 593 dump->size, dump->offset);
598 } 594 }
599 595
600 static void perf_session__print_tstamp(struct perf_session *session, 596 static void perf_session__print_tstamp(struct perf_session *session,
601 union perf_event *event, 597 union perf_event *event,
602 struct perf_sample *sample) 598 struct perf_sample *sample)
603 { 599 {
604 u64 sample_type = __perf_evlist__combined_sample_type(session->evlist); 600 u64 sample_type = __perf_evlist__combined_sample_type(session->evlist);
605 601
606 if (event->header.type != PERF_RECORD_SAMPLE && 602 if (event->header.type != PERF_RECORD_SAMPLE &&
607 !perf_evlist__sample_id_all(session->evlist)) { 603 !perf_evlist__sample_id_all(session->evlist)) {
608 fputs("-1 -1 ", stdout); 604 fputs("-1 -1 ", stdout);
609 return; 605 return;
610 } 606 }
611 607
612 if ((sample_type & PERF_SAMPLE_CPU)) 608 if ((sample_type & PERF_SAMPLE_CPU))
613 printf("%u ", sample->cpu); 609 printf("%u ", sample->cpu);
614 610
615 if (sample_type & PERF_SAMPLE_TIME) 611 if (sample_type & PERF_SAMPLE_TIME)
616 printf("%" PRIu64 " ", sample->time); 612 printf("%" PRIu64 " ", sample->time);
617 } 613 }
618 614
619 static void sample_read__printf(struct perf_sample *sample, u64 read_format) 615 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
620 { 616 {
621 printf("... sample_read:\n"); 617 printf("... sample_read:\n");
622 618
623 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 619 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
624 printf("...... time enabled %016" PRIx64 "\n", 620 printf("...... time enabled %016" PRIx64 "\n",
625 sample->read.time_enabled); 621 sample->read.time_enabled);
626 622
627 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 623 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
628 printf("...... time running %016" PRIx64 "\n", 624 printf("...... time running %016" PRIx64 "\n",
629 sample->read.time_running); 625 sample->read.time_running);
630 626
631 if (read_format & PERF_FORMAT_GROUP) { 627 if (read_format & PERF_FORMAT_GROUP) {
632 u64 i; 628 u64 i;
633 629
634 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr); 630 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
635 631
636 for (i = 0; i < sample->read.group.nr; i++) { 632 for (i = 0; i < sample->read.group.nr; i++) {
637 struct sample_read_value *value; 633 struct sample_read_value *value;
638 634
639 value = &sample->read.group.values[i]; 635 value = &sample->read.group.values[i];
640 printf("..... id %016" PRIx64 636 printf("..... id %016" PRIx64
641 ", value %016" PRIx64 "\n", 637 ", value %016" PRIx64 "\n",
642 value->id, value->value); 638 value->id, value->value);
643 } 639 }
644 } else 640 } else
645 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n", 641 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
646 sample->read.one.id, sample->read.one.value); 642 sample->read.one.id, sample->read.one.value);
647 } 643 }
648 644
649 static void dump_event(struct perf_session *session, union perf_event *event, 645 static void dump_event(struct perf_session *session, union perf_event *event,
650 u64 file_offset, struct perf_sample *sample) 646 u64 file_offset, struct perf_sample *sample)
651 { 647 {
652 if (!dump_trace) 648 if (!dump_trace)
653 return; 649 return;
654 650
655 printf("\n%#" PRIx64 " [%#x]: event: %d\n", 651 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
656 file_offset, event->header.size, event->header.type); 652 file_offset, event->header.size, event->header.type);
657 653
658 trace_event(event); 654 trace_event(event);
659 655
660 if (sample) 656 if (sample)
661 perf_session__print_tstamp(session, event, sample); 657 perf_session__print_tstamp(session, event, sample);
662 658
663 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, 659 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
664 event->header.size, perf_event__name(event->header.type)); 660 event->header.size, perf_event__name(event->header.type));
665 } 661 }
666 662
667 static void dump_sample(struct perf_evsel *evsel, union perf_event *event, 663 static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
668 struct perf_sample *sample) 664 struct perf_sample *sample)
669 { 665 {
670 u64 sample_type; 666 u64 sample_type;
671 667
672 if (!dump_trace) 668 if (!dump_trace)
673 return; 669 return;
674 670
675 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n", 671 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
676 event->header.misc, sample->pid, sample->tid, sample->ip, 672 event->header.misc, sample->pid, sample->tid, sample->ip,
677 sample->period, sample->addr); 673 sample->period, sample->addr);
678 674
679 sample_type = evsel->attr.sample_type; 675 sample_type = evsel->attr.sample_type;
680 676
681 if (sample_type & PERF_SAMPLE_CALLCHAIN) 677 if (sample_type & PERF_SAMPLE_CALLCHAIN)
682 callchain__printf(sample); 678 callchain__printf(sample);
683 679
684 if (sample_type & PERF_SAMPLE_BRANCH_STACK) 680 if (sample_type & PERF_SAMPLE_BRANCH_STACK)
685 branch_stack__printf(sample); 681 branch_stack__printf(sample);
686 682
687 if (sample_type & PERF_SAMPLE_REGS_USER) 683 if (sample_type & PERF_SAMPLE_REGS_USER)
688 regs_user__printf(sample); 684 regs_user__printf(sample);
689 685
690 if (sample_type & PERF_SAMPLE_STACK_USER) 686 if (sample_type & PERF_SAMPLE_STACK_USER)
691 stack_user__printf(&sample->user_stack); 687 stack_user__printf(&sample->user_stack);
692 688
693 if (sample_type & PERF_SAMPLE_WEIGHT) 689 if (sample_type & PERF_SAMPLE_WEIGHT)
694 printf("... weight: %" PRIu64 "\n", sample->weight); 690 printf("... weight: %" PRIu64 "\n", sample->weight);
695 691
696 if (sample_type & PERF_SAMPLE_DATA_SRC) 692 if (sample_type & PERF_SAMPLE_DATA_SRC)
697 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src); 693 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
698 694
699 if (sample_type & PERF_SAMPLE_TRANSACTION) 695 if (sample_type & PERF_SAMPLE_TRANSACTION)
700 printf("... transaction: %" PRIx64 "\n", sample->transaction); 696 printf("... transaction: %" PRIx64 "\n", sample->transaction);
701 697
702 if (sample_type & PERF_SAMPLE_READ) 698 if (sample_type & PERF_SAMPLE_READ)
703 sample_read__printf(sample, evsel->attr.read_format); 699 sample_read__printf(sample, evsel->attr.read_format);
704 } 700 }
705 701
706 static struct machine * 702 static struct machine *
707 perf_session__find_machine_for_cpumode(struct perf_session *session, 703 perf_session__find_machine_for_cpumode(struct perf_session *session,
708 union perf_event *event, 704 union perf_event *event,
709 struct perf_sample *sample) 705 struct perf_sample *sample)
710 { 706 {
711 const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 707 const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
712 struct machine *machine; 708 struct machine *machine;
713 709
714 if (perf_guest && 710 if (perf_guest &&
715 ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) || 711 ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
716 (cpumode == PERF_RECORD_MISC_GUEST_USER))) { 712 (cpumode == PERF_RECORD_MISC_GUEST_USER))) {
717 u32 pid; 713 u32 pid;
718 714
719 if (event->header.type == PERF_RECORD_MMAP 715 if (event->header.type == PERF_RECORD_MMAP
720 || event->header.type == PERF_RECORD_MMAP2) 716 || event->header.type == PERF_RECORD_MMAP2)
721 pid = event->mmap.pid; 717 pid = event->mmap.pid;
722 else 718 else
723 pid = sample->pid; 719 pid = sample->pid;
724 720
725 machine = perf_session__find_machine(session, pid); 721 machine = perf_session__find_machine(session, pid);
726 if (!machine) 722 if (!machine)
727 machine = perf_session__findnew_machine(session, 723 machine = perf_session__findnew_machine(session,
728 DEFAULT_GUEST_KERNEL_ID); 724 DEFAULT_GUEST_KERNEL_ID);
729 return machine; 725 return machine;
730 } 726 }
731 727
732 return &session->machines.host; 728 return &session->machines.host;
733 } 729 }
734 730
735 static int deliver_sample_value(struct perf_session *session, 731 static int deliver_sample_value(struct perf_session *session,
736 struct perf_tool *tool, 732 struct perf_tool *tool,
737 union perf_event *event, 733 union perf_event *event,
738 struct perf_sample *sample, 734 struct perf_sample *sample,
739 struct sample_read_value *v, 735 struct sample_read_value *v,
740 struct machine *machine) 736 struct machine *machine)
741 { 737 {
742 struct perf_sample_id *sid; 738 struct perf_sample_id *sid;
743 739
744 sid = perf_evlist__id2sid(session->evlist, v->id); 740 sid = perf_evlist__id2sid(session->evlist, v->id);
745 if (sid) { 741 if (sid) {
746 sample->id = v->id; 742 sample->id = v->id;
747 sample->period = v->value - sid->period; 743 sample->period = v->value - sid->period;
748 sid->period = v->value; 744 sid->period = v->value;
749 } 745 }
750 746
751 if (!sid || sid->evsel == NULL) { 747 if (!sid || sid->evsel == NULL) {
752 ++session->stats.nr_unknown_id; 748 ++session->stats.nr_unknown_id;
753 return 0; 749 return 0;
754 } 750 }
755 751
756 return tool->sample(tool, event, sample, sid->evsel, machine); 752 return tool->sample(tool, event, sample, sid->evsel, machine);
757 } 753 }
758 754
759 static int deliver_sample_group(struct perf_session *session, 755 static int deliver_sample_group(struct perf_session *session,
760 struct perf_tool *tool, 756 struct perf_tool *tool,
761 union perf_event *event, 757 union perf_event *event,
762 struct perf_sample *sample, 758 struct perf_sample *sample,
763 struct machine *machine) 759 struct machine *machine)
764 { 760 {
765 int ret = -EINVAL; 761 int ret = -EINVAL;
766 u64 i; 762 u64 i;
767 763
768 for (i = 0; i < sample->read.group.nr; i++) { 764 for (i = 0; i < sample->read.group.nr; i++) {
769 ret = deliver_sample_value(session, tool, event, sample, 765 ret = deliver_sample_value(session, tool, event, sample,
770 &sample->read.group.values[i], 766 &sample->read.group.values[i],
771 machine); 767 machine);
772 if (ret) 768 if (ret)
773 break; 769 break;
774 } 770 }
775 771
776 return ret; 772 return ret;
777 } 773 }
778 774
779 static int 775 static int
780 perf_session__deliver_sample(struct perf_session *session, 776 perf_session__deliver_sample(struct perf_session *session,
781 struct perf_tool *tool, 777 struct perf_tool *tool,
782 union perf_event *event, 778 union perf_event *event,
783 struct perf_sample *sample, 779 struct perf_sample *sample,
784 struct perf_evsel *evsel, 780 struct perf_evsel *evsel,
785 struct machine *machine) 781 struct machine *machine)
786 { 782 {
787 /* We know evsel != NULL. */ 783 /* We know evsel != NULL. */
788 u64 sample_type = evsel->attr.sample_type; 784 u64 sample_type = evsel->attr.sample_type;
789 u64 read_format = evsel->attr.read_format; 785 u64 read_format = evsel->attr.read_format;
790 786
791 /* Standard sample delievery. */ 787 /* Standard sample delievery. */
792 if (!(sample_type & PERF_SAMPLE_READ)) 788 if (!(sample_type & PERF_SAMPLE_READ))
793 return tool->sample(tool, event, sample, evsel, machine); 789 return tool->sample(tool, event, sample, evsel, machine);
794 790
795 /* For PERF_SAMPLE_READ we have either single or group mode. */ 791 /* For PERF_SAMPLE_READ we have either single or group mode. */
796 if (read_format & PERF_FORMAT_GROUP) 792 if (read_format & PERF_FORMAT_GROUP)
797 return deliver_sample_group(session, tool, event, sample, 793 return deliver_sample_group(session, tool, event, sample,
798 machine); 794 machine);
799 else 795 else
800 return deliver_sample_value(session, tool, event, sample, 796 return deliver_sample_value(session, tool, event, sample,
801 &sample->read.one, machine); 797 &sample->read.one, machine);
802 } 798 }
803 799
804 int perf_session__deliver_event(struct perf_session *session, 800 int perf_session__deliver_event(struct perf_session *session,
805 union perf_event *event, 801 union perf_event *event,
806 struct perf_sample *sample, 802 struct perf_sample *sample,
807 struct perf_tool *tool, u64 file_offset) 803 struct perf_tool *tool, u64 file_offset)
808 { 804 {
809 struct perf_evsel *evsel; 805 struct perf_evsel *evsel;
810 struct machine *machine; 806 struct machine *machine;
811 807
812 dump_event(session, event, file_offset, sample); 808 dump_event(session, event, file_offset, sample);
813 809
814 evsel = perf_evlist__id2evsel(session->evlist, sample->id); 810 evsel = perf_evlist__id2evsel(session->evlist, sample->id);
815 811
816 machine = perf_session__find_machine_for_cpumode(session, event, 812 machine = perf_session__find_machine_for_cpumode(session, event,
817 sample); 813 sample);
818 814
819 switch (event->header.type) { 815 switch (event->header.type) {
820 case PERF_RECORD_SAMPLE: 816 case PERF_RECORD_SAMPLE:
821 dump_sample(evsel, event, sample); 817 dump_sample(evsel, event, sample);
822 if (evsel == NULL) { 818 if (evsel == NULL) {
823 ++session->stats.nr_unknown_id; 819 ++session->stats.nr_unknown_id;
824 return 0; 820 return 0;
825 } 821 }
826 if (machine == NULL) { 822 if (machine == NULL) {
827 ++session->stats.nr_unprocessable_samples; 823 ++session->stats.nr_unprocessable_samples;
828 return 0; 824 return 0;
829 } 825 }
830 return perf_session__deliver_sample(session, tool, event, 826 return perf_session__deliver_sample(session, tool, event,
831 sample, evsel, machine); 827 sample, evsel, machine);
832 case PERF_RECORD_MMAP: 828 case PERF_RECORD_MMAP:
833 return tool->mmap(tool, event, sample, machine); 829 return tool->mmap(tool, event, sample, machine);
834 case PERF_RECORD_MMAP2: 830 case PERF_RECORD_MMAP2:
835 return tool->mmap2(tool, event, sample, machine); 831 return tool->mmap2(tool, event, sample, machine);
836 case PERF_RECORD_COMM: 832 case PERF_RECORD_COMM:
837 return tool->comm(tool, event, sample, machine); 833 return tool->comm(tool, event, sample, machine);
838 case PERF_RECORD_FORK: 834 case PERF_RECORD_FORK:
839 return tool->fork(tool, event, sample, machine); 835 return tool->fork(tool, event, sample, machine);
840 case PERF_RECORD_EXIT: 836 case PERF_RECORD_EXIT:
841 return tool->exit(tool, event, sample, machine); 837 return tool->exit(tool, event, sample, machine);
842 case PERF_RECORD_LOST: 838 case PERF_RECORD_LOST:
843 if (tool->lost == perf_event__process_lost) 839 if (tool->lost == perf_event__process_lost)
844 session->stats.total_lost += event->lost.lost; 840 session->stats.total_lost += event->lost.lost;
845 return tool->lost(tool, event, sample, machine); 841 return tool->lost(tool, event, sample, machine);
846 case PERF_RECORD_READ: 842 case PERF_RECORD_READ:
847 return tool->read(tool, event, sample, evsel, machine); 843 return tool->read(tool, event, sample, evsel, machine);
848 case PERF_RECORD_THROTTLE: 844 case PERF_RECORD_THROTTLE:
849 return tool->throttle(tool, event, sample, machine); 845 return tool->throttle(tool, event, sample, machine);
850 case PERF_RECORD_UNTHROTTLE: 846 case PERF_RECORD_UNTHROTTLE:
851 return tool->unthrottle(tool, event, sample, machine); 847 return tool->unthrottle(tool, event, sample, machine);
852 default: 848 default:
853 ++session->stats.nr_unknown_events; 849 ++session->stats.nr_unknown_events;
854 return -1; 850 return -1;
855 } 851 }
856 } 852 }
857 853
858 static s64 perf_session__process_user_event(struct perf_session *session, 854 static s64 perf_session__process_user_event(struct perf_session *session,
859 union perf_event *event, 855 union perf_event *event,
860 struct perf_tool *tool, 856 struct perf_tool *tool,
861 u64 file_offset) 857 u64 file_offset)
862 { 858 {
863 int fd = perf_data_file__fd(session->file); 859 int fd = perf_data_file__fd(session->file);
864 int err; 860 int err;
865 861
866 dump_event(session, event, file_offset, NULL); 862 dump_event(session, event, file_offset, NULL);
867 863
868 /* These events are processed right away */ 864 /* These events are processed right away */
869 switch (event->header.type) { 865 switch (event->header.type) {
870 case PERF_RECORD_HEADER_ATTR: 866 case PERF_RECORD_HEADER_ATTR:
871 err = tool->attr(tool, event, &session->evlist); 867 err = tool->attr(tool, event, &session->evlist);
872 if (err == 0) { 868 if (err == 0) {
873 perf_session__set_id_hdr_size(session); 869 perf_session__set_id_hdr_size(session);
874 perf_session__set_comm_exec(session); 870 perf_session__set_comm_exec(session);
875 } 871 }
876 return err; 872 return err;
877 case PERF_RECORD_HEADER_EVENT_TYPE: 873 case PERF_RECORD_HEADER_EVENT_TYPE:
878 /* 874 /*
879 * Depreceated, but we need to handle it for sake 875 * Depreceated, but we need to handle it for sake
880 * of old data files create in pipe mode. 876 * of old data files create in pipe mode.
881 */ 877 */
882 return 0; 878 return 0;
883 case PERF_RECORD_HEADER_TRACING_DATA: 879 case PERF_RECORD_HEADER_TRACING_DATA:
884 /* setup for reading amidst mmap */ 880 /* setup for reading amidst mmap */
885 lseek(fd, file_offset, SEEK_SET); 881 lseek(fd, file_offset, SEEK_SET);
886 return tool->tracing_data(tool, event, session); 882 return tool->tracing_data(tool, event, session);
887 case PERF_RECORD_HEADER_BUILD_ID: 883 case PERF_RECORD_HEADER_BUILD_ID:
888 return tool->build_id(tool, event, session); 884 return tool->build_id(tool, event, session);
889 case PERF_RECORD_FINISHED_ROUND: 885 case PERF_RECORD_FINISHED_ROUND:
890 return tool->finished_round(tool, event, session); 886 return tool->finished_round(tool, event, session);
891 default: 887 default:
892 return -EINVAL; 888 return -EINVAL;
893 } 889 }
894 } 890 }
895 891
896 static void event_swap(union perf_event *event, bool sample_id_all) 892 static void event_swap(union perf_event *event, bool sample_id_all)
897 { 893 {
898 perf_event__swap_op swap; 894 perf_event__swap_op swap;
899 895
900 swap = perf_event__swap_ops[event->header.type]; 896 swap = perf_event__swap_ops[event->header.type];
901 if (swap) 897 if (swap)
902 swap(event, sample_id_all); 898 swap(event, sample_id_all);
903 } 899 }
904 900
905 int perf_session__peek_event(struct perf_session *session, off_t file_offset, 901 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
906 void *buf, size_t buf_sz, 902 void *buf, size_t buf_sz,
907 union perf_event **event_ptr, 903 union perf_event **event_ptr,
908 struct perf_sample *sample) 904 struct perf_sample *sample)
909 { 905 {
910 union perf_event *event; 906 union perf_event *event;
911 size_t hdr_sz, rest; 907 size_t hdr_sz, rest;
912 int fd; 908 int fd;
913 909
914 if (session->one_mmap && !session->header.needs_swap) { 910 if (session->one_mmap && !session->header.needs_swap) {
915 event = file_offset - session->one_mmap_offset + 911 event = file_offset - session->one_mmap_offset +
916 session->one_mmap_addr; 912 session->one_mmap_addr;
917 goto out_parse_sample; 913 goto out_parse_sample;
918 } 914 }
919 915
920 if (perf_data_file__is_pipe(session->file)) 916 if (perf_data_file__is_pipe(session->file))
921 return -1; 917 return -1;
922 918
923 fd = perf_data_file__fd(session->file); 919 fd = perf_data_file__fd(session->file);
924 hdr_sz = sizeof(struct perf_event_header); 920 hdr_sz = sizeof(struct perf_event_header);
925 921
926 if (buf_sz < hdr_sz) 922 if (buf_sz < hdr_sz)
927 return -1; 923 return -1;
928 924
929 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 || 925 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
930 readn(fd, &buf, hdr_sz) != (ssize_t)hdr_sz) 926 readn(fd, &buf, hdr_sz) != (ssize_t)hdr_sz)
931 return -1; 927 return -1;
932 928
933 event = (union perf_event *)buf; 929 event = (union perf_event *)buf;
934 930
935 if (session->header.needs_swap) 931 if (session->header.needs_swap)
936 perf_event_header__bswap(&event->header); 932 perf_event_header__bswap(&event->header);
937 933
938 if (event->header.size < hdr_sz) 934 if (event->header.size < hdr_sz)
939 return -1; 935 return -1;
940 936
941 rest = event->header.size - hdr_sz; 937 rest = event->header.size - hdr_sz;
942 938
943 if (readn(fd, &buf, rest) != (ssize_t)rest) 939 if (readn(fd, &buf, rest) != (ssize_t)rest)
944 return -1; 940 return -1;
945 941
946 if (session->header.needs_swap) 942 if (session->header.needs_swap)
947 event_swap(event, perf_evlist__sample_id_all(session->evlist)); 943 event_swap(event, perf_evlist__sample_id_all(session->evlist));
948 944
949 out_parse_sample: 945 out_parse_sample:
950 946
951 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START && 947 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
952 perf_evlist__parse_sample(session->evlist, event, sample)) 948 perf_evlist__parse_sample(session->evlist, event, sample))
953 return -1; 949 return -1;
954 950
955 *event_ptr = event; 951 *event_ptr = event;
956 952
957 return 0; 953 return 0;
958 } 954 }
959 955
960 static s64 perf_session__process_event(struct perf_session *session, 956 static s64 perf_session__process_event(struct perf_session *session,
961 union perf_event *event, 957 union perf_event *event,
962 struct perf_tool *tool, 958 struct perf_tool *tool,
963 u64 file_offset) 959 u64 file_offset)
964 { 960 {
965 struct perf_sample sample; 961 struct perf_sample sample;
966 int ret; 962 int ret;
967 963
968 if (session->header.needs_swap) 964 if (session->header.needs_swap)
969 event_swap(event, perf_evlist__sample_id_all(session->evlist)); 965 event_swap(event, perf_evlist__sample_id_all(session->evlist));
970 966
971 if (event->header.type >= PERF_RECORD_HEADER_MAX) 967 if (event->header.type >= PERF_RECORD_HEADER_MAX)
972 return -EINVAL; 968 return -EINVAL;
973 969
974 events_stats__inc(&session->stats, event->header.type); 970 events_stats__inc(&session->stats, event->header.type);
975 971
976 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 972 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
977 return perf_session__process_user_event(session, event, tool, file_offset); 973 return perf_session__process_user_event(session, event, tool, file_offset);
978 974
979 /* 975 /*
980 * For all kernel events we get the sample data 976 * For all kernel events we get the sample data
981 */ 977 */
982 ret = perf_evlist__parse_sample(session->evlist, event, &sample); 978 ret = perf_evlist__parse_sample(session->evlist, event, &sample);
983 if (ret) 979 if (ret)
984 return ret; 980 return ret;
985 981
986 if (tool->ordered_events) { 982 if (tool->ordered_events) {
987 ret = perf_session_queue_event(session, event, tool, &sample, 983 ret = perf_session_queue_event(session, event, tool, &sample,
988 file_offset); 984 file_offset);
989 if (ret != -ETIME) 985 if (ret != -ETIME)
990 return ret; 986 return ret;
991 } 987 }
992 988
993 return perf_session__deliver_event(session, event, &sample, tool, 989 return perf_session__deliver_event(session, event, &sample, tool,
994 file_offset); 990 file_offset);
995 } 991 }
996 992
997 void perf_event_header__bswap(struct perf_event_header *hdr) 993 void perf_event_header__bswap(struct perf_event_header *hdr)
998 { 994 {
999 hdr->type = bswap_32(hdr->type); 995 hdr->type = bswap_32(hdr->type);
1000 hdr->misc = bswap_16(hdr->misc); 996 hdr->misc = bswap_16(hdr->misc);
1001 hdr->size = bswap_16(hdr->size); 997 hdr->size = bswap_16(hdr->size);
1002 } 998 }
1003 999
1004 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) 1000 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1005 { 1001 {
1006 return machine__findnew_thread(&session->machines.host, -1, pid); 1002 return machine__findnew_thread(&session->machines.host, -1, pid);
1007 } 1003 }
1008 1004
1009 static struct thread *perf_session__register_idle_thread(struct perf_session *session) 1005 static struct thread *perf_session__register_idle_thread(struct perf_session *session)
1010 { 1006 {
1011 struct thread *thread; 1007 struct thread *thread;
1012 1008
1013 thread = machine__findnew_thread(&session->machines.host, 0, 0); 1009 thread = machine__findnew_thread(&session->machines.host, 0, 0);
1014 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) { 1010 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1015 pr_err("problem inserting idle task.\n"); 1011 pr_err("problem inserting idle task.\n");
1016 thread = NULL; 1012 thread = NULL;
1017 } 1013 }
1018 1014
1019 return thread; 1015 return thread;
1020 } 1016 }
1021 1017
1022 static void perf_session__warn_about_errors(const struct perf_session *session, 1018 static void perf_session__warn_about_errors(const struct perf_session *session,
1023 const struct perf_tool *tool) 1019 const struct perf_tool *tool)
1024 { 1020 {
1025 if (tool->lost == perf_event__process_lost && 1021 if (tool->lost == perf_event__process_lost &&
1026 session->stats.nr_events[PERF_RECORD_LOST] != 0) { 1022 session->stats.nr_events[PERF_RECORD_LOST] != 0) {
1027 ui__warning("Processed %d events and lost %d chunks!\n\n" 1023 ui__warning("Processed %d events and lost %d chunks!\n\n"
1028 "Check IO/CPU overload!\n\n", 1024 "Check IO/CPU overload!\n\n",
1029 session->stats.nr_events[0], 1025 session->stats.nr_events[0],
1030 session->stats.nr_events[PERF_RECORD_LOST]); 1026 session->stats.nr_events[PERF_RECORD_LOST]);
1031 } 1027 }
1032 1028
1033 if (session->stats.nr_unknown_events != 0) { 1029 if (session->stats.nr_unknown_events != 0) {
1034 ui__warning("Found %u unknown events!\n\n" 1030 ui__warning("Found %u unknown events!\n\n"
1035 "Is this an older tool processing a perf.data " 1031 "Is this an older tool processing a perf.data "
1036 "file generated by a more recent tool?\n\n" 1032 "file generated by a more recent tool?\n\n"
1037 "If that is not the case, consider " 1033 "If that is not the case, consider "
1038 "reporting to linux-kernel@vger.kernel.org.\n\n", 1034 "reporting to linux-kernel@vger.kernel.org.\n\n",
1039 session->stats.nr_unknown_events); 1035 session->stats.nr_unknown_events);
1040 } 1036 }
1041 1037
1042 if (session->stats.nr_unknown_id != 0) { 1038 if (session->stats.nr_unknown_id != 0) {
1043 ui__warning("%u samples with id not present in the header\n", 1039 ui__warning("%u samples with id not present in the header\n",
1044 session->stats.nr_unknown_id); 1040 session->stats.nr_unknown_id);
1045 } 1041 }
1046 1042
1047 if (session->stats.nr_invalid_chains != 0) { 1043 if (session->stats.nr_invalid_chains != 0) {
1048 ui__warning("Found invalid callchains!\n\n" 1044 ui__warning("Found invalid callchains!\n\n"
1049 "%u out of %u events were discarded for this reason.\n\n" 1045 "%u out of %u events were discarded for this reason.\n\n"
1050 "Consider reporting to linux-kernel@vger.kernel.org.\n\n", 1046 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1051 session->stats.nr_invalid_chains, 1047 session->stats.nr_invalid_chains,
1052 session->stats.nr_events[PERF_RECORD_SAMPLE]); 1048 session->stats.nr_events[PERF_RECORD_SAMPLE]);
1053 } 1049 }
1054 1050
1055 if (session->stats.nr_unprocessable_samples != 0) { 1051 if (session->stats.nr_unprocessable_samples != 0) {
1056 ui__warning("%u unprocessable samples recorded.\n" 1052 ui__warning("%u unprocessable samples recorded.\n"
1057 "Do you have a KVM guest running and not using 'perf kvm'?\n", 1053 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1058 session->stats.nr_unprocessable_samples); 1054 session->stats.nr_unprocessable_samples);
1059 } 1055 }
1056
1057 if (session->stats.nr_unordered_events != 0)
1058 ui__warning("%u out of order events recorded.\n", session->stats.nr_unordered_events);
1060 } 1059 }
1061 1060
1062 volatile int session_done; 1061 volatile int session_done;
1063 1062
1064 static int __perf_session__process_pipe_events(struct perf_session *session, 1063 static int __perf_session__process_pipe_events(struct perf_session *session,
1065 struct perf_tool *tool) 1064 struct perf_tool *tool)
1066 { 1065 {
1067 int fd = perf_data_file__fd(session->file); 1066 int fd = perf_data_file__fd(session->file);
1068 union perf_event *event; 1067 union perf_event *event;
1069 uint32_t size, cur_size = 0; 1068 uint32_t size, cur_size = 0;
1070 void *buf = NULL; 1069 void *buf = NULL;
1071 s64 skip = 0; 1070 s64 skip = 0;
1072 u64 head; 1071 u64 head;
1073 ssize_t err; 1072 ssize_t err;
1074 void *p; 1073 void *p;
1075 1074
1076 perf_tool__fill_defaults(tool); 1075 perf_tool__fill_defaults(tool);
1077 1076
1078 head = 0; 1077 head = 0;
1079 cur_size = sizeof(union perf_event); 1078 cur_size = sizeof(union perf_event);
1080 1079
1081 buf = malloc(cur_size); 1080 buf = malloc(cur_size);
1082 if (!buf) 1081 if (!buf)
1083 return -errno; 1082 return -errno;
1084 more: 1083 more:
1085 event = buf; 1084 event = buf;
1086 err = readn(fd, event, sizeof(struct perf_event_header)); 1085 err = readn(fd, event, sizeof(struct perf_event_header));
1087 if (err <= 0) { 1086 if (err <= 0) {
1088 if (err == 0) 1087 if (err == 0)
1089 goto done; 1088 goto done;
1090 1089
1091 pr_err("failed to read event header\n"); 1090 pr_err("failed to read event header\n");
1092 goto out_err; 1091 goto out_err;
1093 } 1092 }
1094 1093
1095 if (session->header.needs_swap) 1094 if (session->header.needs_swap)
1096 perf_event_header__bswap(&event->header); 1095 perf_event_header__bswap(&event->header);
1097 1096
1098 size = event->header.size; 1097 size = event->header.size;
1099 if (size < sizeof(struct perf_event_header)) { 1098 if (size < sizeof(struct perf_event_header)) {
1100 pr_err("bad event header size\n"); 1099 pr_err("bad event header size\n");
1101 goto out_err; 1100 goto out_err;
1102 } 1101 }
1103 1102
1104 if (size > cur_size) { 1103 if (size > cur_size) {
1105 void *new = realloc(buf, size); 1104 void *new = realloc(buf, size);
1106 if (!new) { 1105 if (!new) {
1107 pr_err("failed to allocate memory to read event\n"); 1106 pr_err("failed to allocate memory to read event\n");
1108 goto out_err; 1107 goto out_err;
1109 } 1108 }
1110 buf = new; 1109 buf = new;
1111 cur_size = size; 1110 cur_size = size;
1112 event = buf; 1111 event = buf;
1113 } 1112 }
1114 p = event; 1113 p = event;
1115 p += sizeof(struct perf_event_header); 1114 p += sizeof(struct perf_event_header);
1116 1115
1117 if (size - sizeof(struct perf_event_header)) { 1116 if (size - sizeof(struct perf_event_header)) {
1118 err = readn(fd, p, size - sizeof(struct perf_event_header)); 1117 err = readn(fd, p, size - sizeof(struct perf_event_header));
1119 if (err <= 0) { 1118 if (err <= 0) {
1120 if (err == 0) { 1119 if (err == 0) {
1121 pr_err("unexpected end of event stream\n"); 1120 pr_err("unexpected end of event stream\n");
1122 goto done; 1121 goto done;
1123 } 1122 }
1124 1123
1125 pr_err("failed to read event data\n"); 1124 pr_err("failed to read event data\n");
1126 goto out_err; 1125 goto out_err;
1127 } 1126 }
1128 } 1127 }
1129 1128
1130 if ((skip = perf_session__process_event(session, event, tool, head)) < 0) { 1129 if ((skip = perf_session__process_event(session, event, tool, head)) < 0) {
1131 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1130 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1132 head, event->header.size, event->header.type); 1131 head, event->header.size, event->header.type);
1133 err = -EINVAL; 1132 err = -EINVAL;
1134 goto out_err; 1133 goto out_err;
1135 } 1134 }
1136 1135
1137 head += size; 1136 head += size;
1138 1137
1139 if (skip > 0) 1138 if (skip > 0)
1140 head += skip; 1139 head += skip;
1141 1140
1142 if (!session_done()) 1141 if (!session_done())
1143 goto more; 1142 goto more;
1144 done: 1143 done:
1145 /* do the final flush for ordered samples */ 1144 /* do the final flush for ordered samples */
1146 err = ordered_events__flush(session, tool, OE_FLUSH__FINAL); 1145 err = ordered_events__flush(session, tool, OE_FLUSH__FINAL);
1147 out_err: 1146 out_err:
1148 free(buf); 1147 free(buf);
1149 perf_session__warn_about_errors(session, tool); 1148 perf_session__warn_about_errors(session, tool);
1150 ordered_events__free(&session->ordered_events); 1149 ordered_events__free(&session->ordered_events);
1151 return err; 1150 return err;
1152 } 1151 }
1153 1152
1154 static union perf_event * 1153 static union perf_event *
1155 fetch_mmaped_event(struct perf_session *session, 1154 fetch_mmaped_event(struct perf_session *session,
1156 u64 head, size_t mmap_size, char *buf) 1155 u64 head, size_t mmap_size, char *buf)
1157 { 1156 {
1158 union perf_event *event; 1157 union perf_event *event;
1159 1158
1160 /* 1159 /*
1161 * Ensure we have enough space remaining to read 1160 * Ensure we have enough space remaining to read
1162 * the size of the event in the headers. 1161 * the size of the event in the headers.
1163 */ 1162 */
1164 if (head + sizeof(event->header) > mmap_size) 1163 if (head + sizeof(event->header) > mmap_size)
1165 return NULL; 1164 return NULL;
1166 1165
1167 event = (union perf_event *)(buf + head); 1166 event = (union perf_event *)(buf + head);
1168 1167
1169 if (session->header.needs_swap) 1168 if (session->header.needs_swap)
1170 perf_event_header__bswap(&event->header); 1169 perf_event_header__bswap(&event->header);
1171 1170
1172 if (head + event->header.size > mmap_size) { 1171 if (head + event->header.size > mmap_size) {
1173 /* We're not fetching the event so swap back again */ 1172 /* We're not fetching the event so swap back again */
1174 if (session->header.needs_swap) 1173 if (session->header.needs_swap)
1175 perf_event_header__bswap(&event->header); 1174 perf_event_header__bswap(&event->header);
1176 return NULL; 1175 return NULL;
1177 } 1176 }
1178 1177
1179 return event; 1178 return event;
1180 } 1179 }
1181 1180
1182 /* 1181 /*
1183 * On 64bit we can mmap the data file in one go. No need for tiny mmap 1182 * On 64bit we can mmap the data file in one go. No need for tiny mmap
1184 * slices. On 32bit we use 32MB. 1183 * slices. On 32bit we use 32MB.
1185 */ 1184 */
1186 #if BITS_PER_LONG == 64 1185 #if BITS_PER_LONG == 64
1187 #define MMAP_SIZE ULLONG_MAX 1186 #define MMAP_SIZE ULLONG_MAX
1188 #define NUM_MMAPS 1 1187 #define NUM_MMAPS 1
1189 #else 1188 #else
1190 #define MMAP_SIZE (32 * 1024 * 1024ULL) 1189 #define MMAP_SIZE (32 * 1024 * 1024ULL)
1191 #define NUM_MMAPS 128 1190 #define NUM_MMAPS 128
1192 #endif 1191 #endif
1193 1192
1194 int __perf_session__process_events(struct perf_session *session, 1193 int __perf_session__process_events(struct perf_session *session,
1195 u64 data_offset, u64 data_size, 1194 u64 data_offset, u64 data_size,
1196 u64 file_size, struct perf_tool *tool) 1195 u64 file_size, struct perf_tool *tool)
1197 { 1196 {
1198 int fd = perf_data_file__fd(session->file); 1197 int fd = perf_data_file__fd(session->file);
1199 u64 head, page_offset, file_offset, file_pos, size; 1198 u64 head, page_offset, file_offset, file_pos, size;
1200 int err, mmap_prot, mmap_flags, map_idx = 0; 1199 int err, mmap_prot, mmap_flags, map_idx = 0;
1201 size_t mmap_size; 1200 size_t mmap_size;
1202 char *buf, *mmaps[NUM_MMAPS]; 1201 char *buf, *mmaps[NUM_MMAPS];
1203 union perf_event *event; 1202 union perf_event *event;
1204 struct ui_progress prog; 1203 struct ui_progress prog;
1205 s64 skip; 1204 s64 skip;
1206 1205
1207 perf_tool__fill_defaults(tool); 1206 perf_tool__fill_defaults(tool);
1208 1207
1209 page_offset = page_size * (data_offset / page_size); 1208 page_offset = page_size * (data_offset / page_size);
1210 file_offset = page_offset; 1209 file_offset = page_offset;
1211 head = data_offset - page_offset; 1210 head = data_offset - page_offset;
1212 1211
1213 if (data_size && (data_offset + data_size < file_size)) 1212 if (data_size && (data_offset + data_size < file_size))
1214 file_size = data_offset + data_size; 1213 file_size = data_offset + data_size;
1215 1214
1216 ui_progress__init(&prog, file_size, "Processing events..."); 1215 ui_progress__init(&prog, file_size, "Processing events...");
1217 1216
1218 mmap_size = MMAP_SIZE; 1217 mmap_size = MMAP_SIZE;
1219 if (mmap_size > file_size) { 1218 if (mmap_size > file_size) {
1220 mmap_size = file_size; 1219 mmap_size = file_size;
1221 session->one_mmap = true; 1220 session->one_mmap = true;
1222 } 1221 }
1223 1222
1224 memset(mmaps, 0, sizeof(mmaps)); 1223 memset(mmaps, 0, sizeof(mmaps));
1225 1224
1226 mmap_prot = PROT_READ; 1225 mmap_prot = PROT_READ;
1227 mmap_flags = MAP_SHARED; 1226 mmap_flags = MAP_SHARED;
1228 1227
1229 if (session->header.needs_swap) { 1228 if (session->header.needs_swap) {
1230 mmap_prot |= PROT_WRITE; 1229 mmap_prot |= PROT_WRITE;
1231 mmap_flags = MAP_PRIVATE; 1230 mmap_flags = MAP_PRIVATE;
1232 } 1231 }
1233 remap: 1232 remap:
1234 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd, 1233 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
1235 file_offset); 1234 file_offset);
1236 if (buf == MAP_FAILED) { 1235 if (buf == MAP_FAILED) {
1237 pr_err("failed to mmap file\n"); 1236 pr_err("failed to mmap file\n");
1238 err = -errno; 1237 err = -errno;
1239 goto out_err; 1238 goto out_err;
1240 } 1239 }
1241 mmaps[map_idx] = buf; 1240 mmaps[map_idx] = buf;
1242 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1); 1241 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1243 file_pos = file_offset + head; 1242 file_pos = file_offset + head;
1244 if (session->one_mmap) { 1243 if (session->one_mmap) {
1245 session->one_mmap_addr = buf; 1244 session->one_mmap_addr = buf;
1246 session->one_mmap_offset = file_offset; 1245 session->one_mmap_offset = file_offset;
1247 } 1246 }
1248 1247
1249 more: 1248 more:
1250 event = fetch_mmaped_event(session, head, mmap_size, buf); 1249 event = fetch_mmaped_event(session, head, mmap_size, buf);
1251 if (!event) { 1250 if (!event) {
1252 if (mmaps[map_idx]) { 1251 if (mmaps[map_idx]) {
1253 munmap(mmaps[map_idx], mmap_size); 1252 munmap(mmaps[map_idx], mmap_size);
1254 mmaps[map_idx] = NULL; 1253 mmaps[map_idx] = NULL;
1255 } 1254 }
1256 1255
1257 page_offset = page_size * (head / page_size); 1256 page_offset = page_size * (head / page_size);
1258 file_offset += page_offset; 1257 file_offset += page_offset;
1259 head -= page_offset; 1258 head -= page_offset;
1260 goto remap; 1259 goto remap;
1261 } 1260 }
1262 1261
1263 size = event->header.size; 1262 size = event->header.size;
1264 1263
1265 if (size < sizeof(struct perf_event_header) || 1264 if (size < sizeof(struct perf_event_header) ||
1266 (skip = perf_session__process_event(session, event, tool, file_pos)) 1265 (skip = perf_session__process_event(session, event, tool, file_pos))
1267 < 0) { 1266 < 0) {
1268 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1267 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1269 file_offset + head, event->header.size, 1268 file_offset + head, event->header.size,
1270 event->header.type); 1269 event->header.type);
1271 err = -EINVAL; 1270 err = -EINVAL;
1272 goto out_err; 1271 goto out_err;
1273 } 1272 }
1274 1273
1275 if (skip) 1274 if (skip)
1276 size += skip; 1275 size += skip;
1277 1276
1278 head += size; 1277 head += size;
1279 file_pos += size; 1278 file_pos += size;
1280 1279
1281 ui_progress__update(&prog, size); 1280 ui_progress__update(&prog, size);
1282 1281
1283 if (session_done()) 1282 if (session_done())
1284 goto out; 1283 goto out;
1285 1284
1286 if (file_pos < file_size) 1285 if (file_pos < file_size)
1287 goto more; 1286 goto more;
1288 1287
1289 out: 1288 out:
1290 /* do the final flush for ordered samples */ 1289 /* do the final flush for ordered samples */
1291 err = ordered_events__flush(session, tool, OE_FLUSH__FINAL); 1290 err = ordered_events__flush(session, tool, OE_FLUSH__FINAL);
1292 out_err: 1291 out_err:
1293 ui_progress__finish(); 1292 ui_progress__finish();
1294 perf_session__warn_about_errors(session, tool); 1293 perf_session__warn_about_errors(session, tool);
1295 ordered_events__free(&session->ordered_events); 1294 ordered_events__free(&session->ordered_events);
1296 session->one_mmap = false; 1295 session->one_mmap = false;
1297 return err; 1296 return err;
1298 } 1297 }
1299 1298
1300 int perf_session__process_events(struct perf_session *session, 1299 int perf_session__process_events(struct perf_session *session,
1301 struct perf_tool *tool) 1300 struct perf_tool *tool)
1302 { 1301 {
1303 u64 size = perf_data_file__size(session->file); 1302 u64 size = perf_data_file__size(session->file);
1304 int err; 1303 int err;
1305 1304
1306 if (perf_session__register_idle_thread(session) == NULL) 1305 if (perf_session__register_idle_thread(session) == NULL)
1307 return -ENOMEM; 1306 return -ENOMEM;
1308 1307
1309 if (!perf_data_file__is_pipe(session->file)) 1308 if (!perf_data_file__is_pipe(session->file))
1310 err = __perf_session__process_events(session, 1309 err = __perf_session__process_events(session,
1311 session->header.data_offset, 1310 session->header.data_offset,
1312 session->header.data_size, 1311 session->header.data_size,
1313 size, tool); 1312 size, tool);
1314 else 1313 else
1315 err = __perf_session__process_pipe_events(session, tool); 1314 err = __perf_session__process_pipe_events(session, tool);
1316 1315
1317 return err; 1316 return err;
1318 } 1317 }
1319 1318
1320 bool perf_session__has_traces(struct perf_session *session, const char *msg) 1319 bool perf_session__has_traces(struct perf_session *session, const char *msg)
1321 { 1320 {
1322 struct perf_evsel *evsel; 1321 struct perf_evsel *evsel;
1323 1322
1324 evlist__for_each(session->evlist, evsel) { 1323 evlist__for_each(session->evlist, evsel) {
1325 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) 1324 if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
1326 return true; 1325 return true;
1327 } 1326 }
1328 1327
1329 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); 1328 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1330 return false; 1329 return false;
1331 } 1330 }
1332 1331
1333 int maps__set_kallsyms_ref_reloc_sym(struct map **maps, 1332 int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1334 const char *symbol_name, u64 addr) 1333 const char *symbol_name, u64 addr)
1335 { 1334 {
1336 char *bracket; 1335 char *bracket;
1337 enum map_type i; 1336 enum map_type i;
1338 struct ref_reloc_sym *ref; 1337 struct ref_reloc_sym *ref;
1339 1338
1340 ref = zalloc(sizeof(struct ref_reloc_sym)); 1339 ref = zalloc(sizeof(struct ref_reloc_sym));
1341 if (ref == NULL) 1340 if (ref == NULL)
1342 return -ENOMEM; 1341 return -ENOMEM;
1343 1342
1344 ref->name = strdup(symbol_name); 1343 ref->name = strdup(symbol_name);
1345 if (ref->name == NULL) { 1344 if (ref->name == NULL) {
1346 free(ref); 1345 free(ref);
1347 return -ENOMEM; 1346 return -ENOMEM;
1348 } 1347 }
1349 1348
1350 bracket = strchr(ref->name, ']'); 1349 bracket = strchr(ref->name, ']');
1351 if (bracket) 1350 if (bracket)
1352 *bracket = '\0'; 1351 *bracket = '\0';
1353 1352
1354 ref->addr = addr; 1353 ref->addr = addr;
1355 1354
1356 for (i = 0; i < MAP__NR_TYPES; ++i) { 1355 for (i = 0; i < MAP__NR_TYPES; ++i) {
1357 struct kmap *kmap = map__kmap(maps[i]); 1356 struct kmap *kmap = map__kmap(maps[i]);
1358 kmap->ref_reloc_sym = ref; 1357 kmap->ref_reloc_sym = ref;
1359 } 1358 }
1360 1359
1361 return 0; 1360 return 0;
1362 } 1361 }
1363 1362
1364 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp) 1363 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
1365 { 1364 {
1366 return machines__fprintf_dsos(&session->machines, fp); 1365 return machines__fprintf_dsos(&session->machines, fp);
1367 } 1366 }
1368 1367
1369 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp, 1368 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
1370 bool (skip)(struct dso *dso, int parm), int parm) 1369 bool (skip)(struct dso *dso, int parm), int parm)
1371 { 1370 {
1372 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm); 1371 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
1373 } 1372 }
1374 1373
1375 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) 1374 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1376 { 1375 {
1377 size_t ret = fprintf(fp, "Aggregated stats:\n"); 1376 size_t ret = fprintf(fp, "Aggregated stats:\n");
1378 1377
1379 ret += events_stats__fprintf(&session->stats, fp); 1378 ret += events_stats__fprintf(&session->stats, fp);
1380 return ret; 1379 return ret;
1381 } 1380 }
1382 1381
1383 size_t perf_session__fprintf(struct perf_session *session, FILE *fp) 1382 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
1384 { 1383 {
1385 /* 1384 /*
1386 * FIXME: Here we have to actually print all the machines in this 1385 * FIXME: Here we have to actually print all the machines in this
1387 * session, not just the host... 1386 * session, not just the host...
1388 */ 1387 */
1389 return machine__fprintf(&session->machines.host, fp); 1388 return machine__fprintf(&session->machines.host, fp);
1390 } 1389 }
1391 1390
1392 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, 1391 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
1393 unsigned int type) 1392 unsigned int type)
1394 { 1393 {
1395 struct perf_evsel *pos; 1394 struct perf_evsel *pos;
1396 1395
1397 evlist__for_each(session->evlist, pos) { 1396 evlist__for_each(session->evlist, pos) {
1398 if (pos->attr.type == type) 1397 if (pos->attr.type == type)
1399 return pos; 1398 return pos;
1400 } 1399 }
1401 return NULL; 1400 return NULL;
1402 } 1401 }
1403 1402
1404 void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample, 1403 void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample,
1405 struct addr_location *al, 1404 struct addr_location *al,
1406 unsigned int print_opts, unsigned int stack_depth) 1405 unsigned int print_opts, unsigned int stack_depth)
1407 { 1406 {
1408 struct callchain_cursor_node *node; 1407 struct callchain_cursor_node *node;
1409 int print_ip = print_opts & PRINT_IP_OPT_IP; 1408 int print_ip = print_opts & PRINT_IP_OPT_IP;
1410 int print_sym = print_opts & PRINT_IP_OPT_SYM; 1409 int print_sym = print_opts & PRINT_IP_OPT_SYM;
1411 int print_dso = print_opts & PRINT_IP_OPT_DSO; 1410 int print_dso = print_opts & PRINT_IP_OPT_DSO;
1412 int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET; 1411 int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET;
1413 int print_oneline = print_opts & PRINT_IP_OPT_ONELINE; 1412 int print_oneline = print_opts & PRINT_IP_OPT_ONELINE;
1414 int print_srcline = print_opts & PRINT_IP_OPT_SRCLINE; 1413 int print_srcline = print_opts & PRINT_IP_OPT_SRCLINE;
1415 char s = print_oneline ? ' ' : '\t'; 1414 char s = print_oneline ? ' ' : '\t';
1416 1415
1417 if (symbol_conf.use_callchain && sample->callchain) { 1416 if (symbol_conf.use_callchain && sample->callchain) {
1418 struct addr_location node_al; 1417 struct addr_location node_al;
1419 1418
1420 if (machine__resolve_callchain(al->machine, evsel, al->thread, 1419 if (machine__resolve_callchain(al->machine, evsel, al->thread,
1421 sample, NULL, NULL, 1420 sample, NULL, NULL,
1422 PERF_MAX_STACK_DEPTH) != 0) { 1421 PERF_MAX_STACK_DEPTH) != 0) {
1423 if (verbose) 1422 if (verbose)
1424 error("Failed to resolve callchain. Skipping\n"); 1423 error("Failed to resolve callchain. Skipping\n");
1425 return; 1424 return;
1426 } 1425 }
1427 callchain_cursor_commit(&callchain_cursor); 1426 callchain_cursor_commit(&callchain_cursor);
1428 1427
1429 if (print_symoffset) 1428 if (print_symoffset)
1430 node_al = *al; 1429 node_al = *al;
1431 1430
1432 while (stack_depth) { 1431 while (stack_depth) {
1433 u64 addr = 0; 1432 u64 addr = 0;
1434 1433
1435 node = callchain_cursor_current(&callchain_cursor); 1434 node = callchain_cursor_current(&callchain_cursor);
1436 if (!node) 1435 if (!node)
1437 break; 1436 break;
1438 1437
1439 if (node->sym && node->sym->ignore) 1438 if (node->sym && node->sym->ignore)
1440 goto next; 1439 goto next;
1441 1440
1442 if (print_ip) 1441 if (print_ip)
1443 printf("%c%16" PRIx64, s, node->ip); 1442 printf("%c%16" PRIx64, s, node->ip);
1444 1443
1445 if (node->map) 1444 if (node->map)
1446 addr = node->map->map_ip(node->map, node->ip); 1445 addr = node->map->map_ip(node->map, node->ip);
1447 1446
1448 if (print_sym) { 1447 if (print_sym) {
1449 printf(" "); 1448 printf(" ");
1450 if (print_symoffset) { 1449 if (print_symoffset) {
1451 node_al.addr = addr; 1450 node_al.addr = addr;
1452 node_al.map = node->map; 1451 node_al.map = node->map;
1453 symbol__fprintf_symname_offs(node->sym, &node_al, stdout); 1452 symbol__fprintf_symname_offs(node->sym, &node_al, stdout);
1454 } else 1453 } else
1455 symbol__fprintf_symname(node->sym, stdout); 1454 symbol__fprintf_symname(node->sym, stdout);
1456 } 1455 }
1457 1456
1458 if (print_dso) { 1457 if (print_dso) {
1459 printf(" ("); 1458 printf(" (");
1460 map__fprintf_dsoname(node->map, stdout); 1459 map__fprintf_dsoname(node->map, stdout);
1461 printf(")"); 1460 printf(")");
1462 } 1461 }
1463 1462
1464 if (print_srcline) 1463 if (print_srcline)
1465 map__fprintf_srcline(node->map, addr, "\n ", 1464 map__fprintf_srcline(node->map, addr, "\n ",
1466 stdout); 1465 stdout);
1467 1466
1468 if (!print_oneline) 1467 if (!print_oneline)
1469 printf("\n"); 1468 printf("\n");
1470 1469
1471 stack_depth--; 1470 stack_depth--;
1472 next: 1471 next:
1473 callchain_cursor_advance(&callchain_cursor); 1472 callchain_cursor_advance(&callchain_cursor);
1474 } 1473 }
1475 1474
1476 } else { 1475 } else {
1477 if (al->sym && al->sym->ignore) 1476 if (al->sym && al->sym->ignore)
1478 return; 1477 return;
1479 1478
1480 if (print_ip) 1479 if (print_ip)
1481 printf("%16" PRIx64, sample->ip); 1480 printf("%16" PRIx64, sample->ip);
1482 1481
1483 if (print_sym) { 1482 if (print_sym) {
1484 printf(" "); 1483 printf(" ");
1485 if (print_symoffset) 1484 if (print_symoffset)
1486 symbol__fprintf_symname_offs(al->sym, al, 1485 symbol__fprintf_symname_offs(al->sym, al,
1487 stdout); 1486 stdout);
1488 else 1487 else
1489 symbol__fprintf_symname(al->sym, stdout); 1488 symbol__fprintf_symname(al->sym, stdout);
1490 } 1489 }
1491 1490
1492 if (print_dso) { 1491 if (print_dso) {
1493 printf(" ("); 1492 printf(" (");
1494 map__fprintf_dsoname(al->map, stdout); 1493 map__fprintf_dsoname(al->map, stdout);
1495 printf(")"); 1494 printf(")");
1496 } 1495 }
1497 1496
1498 if (print_srcline) 1497 if (print_srcline)
1499 map__fprintf_srcline(al->map, al->addr, "\n ", stdout); 1498 map__fprintf_srcline(al->map, al->addr, "\n ", stdout);
1500 } 1499 }
1501 } 1500 }
1502 1501
1503 int perf_session__cpu_bitmap(struct perf_session *session, 1502 int perf_session__cpu_bitmap(struct perf_session *session,
1504 const char *cpu_list, unsigned long *cpu_bitmap) 1503 const char *cpu_list, unsigned long *cpu_bitmap)
1505 { 1504 {
1506 int i, err = -1; 1505 int i, err = -1;
1507 struct cpu_map *map; 1506 struct cpu_map *map;
1508 1507
1509 for (i = 0; i < PERF_TYPE_MAX; ++i) { 1508 for (i = 0; i < PERF_TYPE_MAX; ++i) {
1510 struct perf_evsel *evsel; 1509 struct perf_evsel *evsel;
1511 1510
1512 evsel = perf_session__find_first_evtype(session, i); 1511 evsel = perf_session__find_first_evtype(session, i);
1513 if (!evsel) 1512 if (!evsel)
1514 continue; 1513 continue;
1515 1514
1516 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) { 1515 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
1517 pr_err("File does not contain CPU events. " 1516 pr_err("File does not contain CPU events. "
1518 "Remove -c option to proceed.\n"); 1517 "Remove -c option to proceed.\n");
1519 return -1; 1518 return -1;
1520 } 1519 }
1521 } 1520 }
1522 1521
1523 map = cpu_map__new(cpu_list); 1522 map = cpu_map__new(cpu_list);
1524 if (map == NULL) { 1523 if (map == NULL) {
1525 pr_err("Invalid cpu_list\n"); 1524 pr_err("Invalid cpu_list\n");
1526 return -1; 1525 return -1;
1527 } 1526 }
1528 1527
1529 for (i = 0; i < map->nr; i++) { 1528 for (i = 0; i < map->nr; i++) {
1530 int cpu = map->map[i]; 1529 int cpu = map->map[i];
1531 1530
1532 if (cpu >= MAX_NR_CPUS) { 1531 if (cpu >= MAX_NR_CPUS) {
1533 pr_err("Requested CPU %d too large. " 1532 pr_err("Requested CPU %d too large. "
1534 "Consider raising MAX_NR_CPUS\n", cpu); 1533 "Consider raising MAX_NR_CPUS\n", cpu);
1535 goto out_delete_map; 1534 goto out_delete_map;
1536 } 1535 }
1537 1536
1538 set_bit(cpu, cpu_bitmap); 1537 set_bit(cpu, cpu_bitmap);
1539 } 1538 }
1540 1539
1541 err = 0; 1540 err = 0;
1542 1541
1543 out_delete_map: 1542 out_delete_map:
1544 cpu_map__delete(map); 1543 cpu_map__delete(map);
1545 return err; 1544 return err;
1546 } 1545 }
1547 1546
1548 void perf_session__fprintf_info(struct perf_session *session, FILE *fp, 1547 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
1549 bool full) 1548 bool full)
1550 { 1549 {
1551 struct stat st; 1550 struct stat st;
1552 int fd, ret; 1551 int fd, ret;
1553 1552
1554 if (session == NULL || fp == NULL) 1553 if (session == NULL || fp == NULL)
1555 return; 1554 return;
1556 1555
1557 fd = perf_data_file__fd(session->file); 1556 fd = perf_data_file__fd(session->file);
1558 1557
1559 ret = fstat(fd, &st); 1558 ret = fstat(fd, &st);
1560 if (ret == -1) 1559 if (ret == -1)
1561 return; 1560 return;
1562 1561
1563 fprintf(fp, "# ========\n"); 1562 fprintf(fp, "# ========\n");
1564 fprintf(fp, "# captured on: %s", ctime(&st.st_ctime)); 1563 fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
1565 perf_header__fprintf_info(session, fp, full); 1564 perf_header__fprintf_info(session, fp, full);
1566 fprintf(fp, "# ========\n#\n"); 1565 fprintf(fp, "# ========\n#\n");
1567 } 1566 }
1568 1567
1569 1568
1570 int __perf_session__set_tracepoints_handlers(struct perf_session *session, 1569 int __perf_session__set_tracepoints_handlers(struct perf_session *session,
1571 const struct perf_evsel_str_handler *assocs, 1570 const struct perf_evsel_str_handler *assocs,
1572 size_t nr_assocs) 1571 size_t nr_assocs)
1573 { 1572 {
1574 struct perf_evsel *evsel; 1573 struct perf_evsel *evsel;
1575 size_t i; 1574 size_t i;
1576 int err; 1575 int err;
1577 1576
1578 for (i = 0; i < nr_assocs; i++) { 1577 for (i = 0; i < nr_assocs; i++) {
1579 /* 1578 /*
1580 * Adding a handler for an event not in the session, 1579 * Adding a handler for an event not in the session,
1581 * just ignore it. 1580 * just ignore it.
1582 */ 1581 */
1583 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name); 1582 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
1584 if (evsel == NULL) 1583 if (evsel == NULL)
1585 continue; 1584 continue;
1586 1585
1587 err = -EEXIST; 1586 err = -EEXIST;
1588 if (evsel->handler != NULL) 1587 if (evsel->handler != NULL)
1589 goto out; 1588 goto out;
1590 evsel->handler = assocs[i].handler; 1589 evsel->handler = assocs[i].handler;
1591 } 1590 }
1592 1591
1593 err = 0; 1592 err = 0;
1594 out: 1593 out: