Commit 39876e7dd385e0f0a438ee0ab13cf75a4f5e0e3b

Authored by Arnaldo Carvalho de Melo
1 parent de332ac40f

perf evlist: Introduce add_newtp method

To reduce the boilerplate of creating and adding a new tracepoint to an
evlist.

Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Namhyung Kim <namhyung@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-4z90i79gnmsza2czv2dhdrb7@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

Showing 3 changed files with 20 additions and 15 deletions Inline Diff

tools/perf/builtin-trace.c
1 #include "builtin.h" 1 #include "builtin.h"
2 #include "util/evlist.h" 2 #include "util/evlist.h"
3 #include "util/parse-options.h" 3 #include "util/parse-options.h"
4 #include "util/thread_map.h" 4 #include "util/thread_map.h"
5 #include "event-parse.h" 5 #include "event-parse.h"
6 6
7 #include <libaudit.h> 7 #include <libaudit.h>
8 #include <stdlib.h> 8 #include <stdlib.h>
9 9
10 static struct syscall_fmt { 10 static struct syscall_fmt {
11 const char *name; 11 const char *name;
12 const char *alias; 12 const char *alias;
13 bool errmsg; 13 bool errmsg;
14 bool timeout; 14 bool timeout;
15 } syscall_fmts[] = { 15 } syscall_fmts[] = {
16 { .name = "arch_prctl", .errmsg = true, .alias = "prctl", }, 16 { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
17 { .name = "fstat", .errmsg = true, .alias = "newfstat", }, 17 { .name = "fstat", .errmsg = true, .alias = "newfstat", },
18 { .name = "fstatat", .errmsg = true, .alias = "newfstatat", }, 18 { .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
19 { .name = "futex", .errmsg = true, }, 19 { .name = "futex", .errmsg = true, },
20 { .name = "poll", .errmsg = true, .timeout = true, }, 20 { .name = "poll", .errmsg = true, .timeout = true, },
21 { .name = "ppoll", .errmsg = true, .timeout = true, }, 21 { .name = "ppoll", .errmsg = true, .timeout = true, },
22 { .name = "read", .errmsg = true, }, 22 { .name = "read", .errmsg = true, },
23 { .name = "recvfrom", .errmsg = true, }, 23 { .name = "recvfrom", .errmsg = true, },
24 { .name = "select", .errmsg = true, .timeout = true, }, 24 { .name = "select", .errmsg = true, .timeout = true, },
25 { .name = "stat", .errmsg = true, .alias = "newstat", }, 25 { .name = "stat", .errmsg = true, .alias = "newstat", },
26 }; 26 };
27 27
28 static int syscall_fmt__cmp(const void *name, const void *fmtp) 28 static int syscall_fmt__cmp(const void *name, const void *fmtp)
29 { 29 {
30 const struct syscall_fmt *fmt = fmtp; 30 const struct syscall_fmt *fmt = fmtp;
31 return strcmp(name, fmt->name); 31 return strcmp(name, fmt->name);
32 } 32 }
33 33
34 static struct syscall_fmt *syscall_fmt__find(const char *name) 34 static struct syscall_fmt *syscall_fmt__find(const char *name)
35 { 35 {
36 const int nmemb = ARRAY_SIZE(syscall_fmts); 36 const int nmemb = ARRAY_SIZE(syscall_fmts);
37 return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp); 37 return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
38 } 38 }
39 39
40 struct syscall { 40 struct syscall {
41 struct event_format *tp_format; 41 struct event_format *tp_format;
42 const char *name; 42 const char *name;
43 struct syscall_fmt *fmt; 43 struct syscall_fmt *fmt;
44 }; 44 };
45 45
46 struct trace { 46 struct trace {
47 int audit_machine; 47 int audit_machine;
48 struct { 48 struct {
49 int max; 49 int max;
50 struct syscall *table; 50 struct syscall *table;
51 } syscalls; 51 } syscalls;
52 struct perf_record_opts opts; 52 struct perf_record_opts opts;
53 }; 53 };
54 54
55 static int trace__read_syscall_info(struct trace *trace, int id) 55 static int trace__read_syscall_info(struct trace *trace, int id)
56 { 56 {
57 char tp_name[128]; 57 char tp_name[128];
58 struct syscall *sc; 58 struct syscall *sc;
59 59
60 if (id > trace->syscalls.max) { 60 if (id > trace->syscalls.max) {
61 struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc)); 61 struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
62 62
63 if (nsyscalls == NULL) 63 if (nsyscalls == NULL)
64 return -1; 64 return -1;
65 65
66 if (trace->syscalls.max != -1) { 66 if (trace->syscalls.max != -1) {
67 memset(nsyscalls + trace->syscalls.max + 1, 0, 67 memset(nsyscalls + trace->syscalls.max + 1, 0,
68 (id - trace->syscalls.max) * sizeof(*sc)); 68 (id - trace->syscalls.max) * sizeof(*sc));
69 } else { 69 } else {
70 memset(nsyscalls, 0, (id + 1) * sizeof(*sc)); 70 memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
71 } 71 }
72 72
73 trace->syscalls.table = nsyscalls; 73 trace->syscalls.table = nsyscalls;
74 trace->syscalls.max = id; 74 trace->syscalls.max = id;
75 } 75 }
76 76
77 sc = trace->syscalls.table + id; 77 sc = trace->syscalls.table + id;
78 sc->name = audit_syscall_to_name(id, trace->audit_machine); 78 sc->name = audit_syscall_to_name(id, trace->audit_machine);
79 if (sc->name == NULL) 79 if (sc->name == NULL)
80 return -1; 80 return -1;
81 81
82 sc->fmt = syscall_fmt__find(sc->name); 82 sc->fmt = syscall_fmt__find(sc->name);
83 83
84 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name); 84 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
85 sc->tp_format = event_format__new("syscalls", tp_name); 85 sc->tp_format = event_format__new("syscalls", tp_name);
86 86
87 if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) { 87 if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) {
88 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias); 88 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
89 sc->tp_format = event_format__new("syscalls", tp_name); 89 sc->tp_format = event_format__new("syscalls", tp_name);
90 } 90 }
91 91
92 return sc->tp_format != NULL ? 0 : -1; 92 return sc->tp_format != NULL ? 0 : -1;
93 } 93 }
94 94
95 static size_t syscall__fprintf_args(struct syscall *sc, unsigned long *args, FILE *fp) 95 static size_t syscall__fprintf_args(struct syscall *sc, unsigned long *args, FILE *fp)
96 { 96 {
97 int i = 0; 97 int i = 0;
98 size_t printed = 0; 98 size_t printed = 0;
99 99
100 if (sc->tp_format != NULL) { 100 if (sc->tp_format != NULL) {
101 struct format_field *field; 101 struct format_field *field;
102 102
103 for (field = sc->tp_format->format.fields->next; field; field = field->next) { 103 for (field = sc->tp_format->format.fields->next; field; field = field->next) {
104 printed += fprintf(fp, "%s%s: %ld", printed ? ", " : "", 104 printed += fprintf(fp, "%s%s: %ld", printed ? ", " : "",
105 field->name, args[i++]); 105 field->name, args[i++]);
106 } 106 }
107 } else { 107 } else {
108 while (i < 6) { 108 while (i < 6) {
109 printed += fprintf(fp, "%sarg%d: %ld", printed ? ", " : "", i, args[i]); 109 printed += fprintf(fp, "%sarg%d: %ld", printed ? ", " : "", i, args[i]);
110 ++i; 110 ++i;
111 } 111 }
112 } 112 }
113 113
114 return printed; 114 return printed;
115 } 115 }
116 116
117 typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel, 117 typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
118 struct perf_sample *sample); 118 struct perf_sample *sample);
119 119
120 static struct syscall *trace__syscall_info(struct trace *trace, 120 static struct syscall *trace__syscall_info(struct trace *trace,
121 struct perf_evsel *evsel, 121 struct perf_evsel *evsel,
122 struct perf_sample *sample) 122 struct perf_sample *sample)
123 { 123 {
124 int id = perf_evsel__intval(evsel, sample, "id"); 124 int id = perf_evsel__intval(evsel, sample, "id");
125 125
126 if (id < 0) { 126 if (id < 0) {
127 printf("Invalid syscall %d id, skipping...\n", id); 127 printf("Invalid syscall %d id, skipping...\n", id);
128 return NULL; 128 return NULL;
129 } 129 }
130 130
131 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) && 131 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
132 trace__read_syscall_info(trace, id)) 132 trace__read_syscall_info(trace, id))
133 goto out_cant_read; 133 goto out_cant_read;
134 134
135 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL)) 135 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
136 goto out_cant_read; 136 goto out_cant_read;
137 137
138 return &trace->syscalls.table[id]; 138 return &trace->syscalls.table[id];
139 139
140 out_cant_read: 140 out_cant_read:
141 printf("Problems reading syscall %d information\n", id); 141 printf("Problems reading syscall %d information\n", id);
142 return NULL; 142 return NULL;
143 } 143 }
144 144
145 static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel, 145 static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
146 struct perf_sample *sample) 146 struct perf_sample *sample)
147 { 147 {
148 void *args; 148 void *args;
149 struct syscall *sc = trace__syscall_info(trace, evsel, sample); 149 struct syscall *sc = trace__syscall_info(trace, evsel, sample);
150 150
151 if (sc == NULL) 151 if (sc == NULL)
152 return -1; 152 return -1;
153 153
154 args = perf_evsel__rawptr(evsel, sample, "args"); 154 args = perf_evsel__rawptr(evsel, sample, "args");
155 if (args == NULL) { 155 if (args == NULL) {
156 printf("Problems reading syscall arguments\n"); 156 printf("Problems reading syscall arguments\n");
157 return -1; 157 return -1;
158 } 158 }
159 159
160 printf("%s(", sc->name); 160 printf("%s(", sc->name);
161 syscall__fprintf_args(sc, args, stdout); 161 syscall__fprintf_args(sc, args, stdout);
162 162
163 return 0; 163 return 0;
164 } 164 }
165 165
166 static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel, 166 static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
167 struct perf_sample *sample) 167 struct perf_sample *sample)
168 { 168 {
169 int ret; 169 int ret;
170 struct syscall *sc = trace__syscall_info(trace, evsel, sample); 170 struct syscall *sc = trace__syscall_info(trace, evsel, sample);
171 171
172 if (sc == NULL) 172 if (sc == NULL)
173 return -1; 173 return -1;
174 174
175 ret = perf_evsel__intval(evsel, sample, "ret"); 175 ret = perf_evsel__intval(evsel, sample, "ret");
176 176
177 if (ret < 0 && sc->fmt && sc->fmt->errmsg) { 177 if (ret < 0 && sc->fmt && sc->fmt->errmsg) {
178 char bf[256]; 178 char bf[256];
179 const char *emsg = strerror_r(-ret, bf, sizeof(bf)), 179 const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
180 *e = audit_errno_to_name(-ret); 180 *e = audit_errno_to_name(-ret);
181 181
182 printf(") = -1 %s %s", e, emsg); 182 printf(") = -1 %s %s", e, emsg);
183 } else if (ret == 0 && sc->fmt && sc->fmt->timeout) 183 } else if (ret == 0 && sc->fmt && sc->fmt->timeout)
184 printf(") = 0 Timeout"); 184 printf(") = 0 Timeout");
185 else 185 else
186 printf(") = %d", ret); 186 printf(") = %d", ret);
187 187
188 putchar('\n'); 188 putchar('\n');
189 return 0; 189 return 0;
190 } 190 }
191 191
192 static int trace__run(struct trace *trace) 192 static int trace__run(struct trace *trace)
193 { 193 {
194 struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); 194 struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
195 struct perf_evsel *evsel; 195 struct perf_evsel *evsel;
196 int err = -1, i, nr_events = 0, before; 196 int err = -1, i, nr_events = 0, before;
197 197
198 if (evlist == NULL) { 198 if (evlist == NULL) {
199 printf("Not enough memory to run!\n"); 199 printf("Not enough memory to run!\n");
200 goto out; 200 goto out;
201 } 201 }
202 202
203 evsel = perf_evsel__newtp("raw_syscalls", "sys_enter", 0); 203 if (perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_enter", trace__sys_enter) ||
204 if (evsel == NULL) { 204 perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_exit", trace__sys_exit)) {
205 printf("Couldn't read the raw_syscalls:sys_enter tracepoint information!\n"); 205 printf("Couldn't read the raw_syscalls tracepoints information!\n");
206 goto out_delete_evlist; 206 goto out_delete_evlist;
207 } 207 }
208
209 evsel->handler.func = trace__sys_enter;
210 perf_evlist__add(evlist, evsel);
211
212 evsel = perf_evsel__newtp("raw_syscalls", "sys_exit", 1);
213 if (evsel == NULL) {
214 printf("Couldn't read the raw_syscalls:sys_exit tracepoint information!\n");
215 goto out_delete_evlist;
216 }
217
218 evsel->handler.func = trace__sys_exit;
219 perf_evlist__add(evlist, evsel);
220 208
221 err = perf_evlist__create_maps(evlist, &trace->opts.target); 209 err = perf_evlist__create_maps(evlist, &trace->opts.target);
222 if (err < 0) { 210 if (err < 0) {
223 printf("Problems parsing the target to trace, check your options!\n"); 211 printf("Problems parsing the target to trace, check your options!\n");
224 goto out_delete_evlist; 212 goto out_delete_evlist;
225 } 213 }
226 214
227 perf_evlist__config_attrs(evlist, &trace->opts); 215 perf_evlist__config_attrs(evlist, &trace->opts);
228 216
229 err = perf_evlist__open(evlist); 217 err = perf_evlist__open(evlist);
230 if (err < 0) { 218 if (err < 0) {
231 printf("Couldn't create the events: %s\n", strerror(errno)); 219 printf("Couldn't create the events: %s\n", strerror(errno));
232 goto out_delete_evlist; 220 goto out_delete_evlist;
233 } 221 }
234 222
235 err = perf_evlist__mmap(evlist, UINT_MAX, false); 223 err = perf_evlist__mmap(evlist, UINT_MAX, false);
236 if (err < 0) { 224 if (err < 0) {
237 printf("Couldn't mmap the events: %s\n", strerror(errno)); 225 printf("Couldn't mmap the events: %s\n", strerror(errno));
238 goto out_delete_evlist; 226 goto out_delete_evlist;
239 } 227 }
240 228
241 perf_evlist__enable(evlist); 229 perf_evlist__enable(evlist);
242 again: 230 again:
243 before = nr_events; 231 before = nr_events;
244 232
245 for (i = 0; i < evlist->nr_mmaps; i++) { 233 for (i = 0; i < evlist->nr_mmaps; i++) {
246 union perf_event *event; 234 union perf_event *event;
247 235
248 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { 236 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
249 const u32 type = event->header.type; 237 const u32 type = event->header.type;
250 tracepoint_handler handler; 238 tracepoint_handler handler;
251 struct perf_sample sample; 239 struct perf_sample sample;
252 240
253 ++nr_events; 241 ++nr_events;
254 242
255 switch (type) { 243 switch (type) {
256 case PERF_RECORD_SAMPLE: 244 case PERF_RECORD_SAMPLE:
257 break; 245 break;
258 case PERF_RECORD_LOST: 246 case PERF_RECORD_LOST:
259 printf("LOST %" PRIu64 " events!\n", event->lost.lost); 247 printf("LOST %" PRIu64 " events!\n", event->lost.lost);
260 continue; 248 continue;
261 default: 249 default:
262 printf("Unexpected %s event, skipping...\n", 250 printf("Unexpected %s event, skipping...\n",
263 perf_event__name(type)); 251 perf_event__name(type));
264 continue; 252 continue;
265 } 253 }
266 254
267 err = perf_evlist__parse_sample(evlist, event, &sample); 255 err = perf_evlist__parse_sample(evlist, event, &sample);
268 if (err) { 256 if (err) {
269 printf("Can't parse sample, err = %d, skipping...\n", err); 257 printf("Can't parse sample, err = %d, skipping...\n", err);
270 continue; 258 continue;
271 } 259 }
272 260
273 evsel = perf_evlist__id2evsel(evlist, sample.id); 261 evsel = perf_evlist__id2evsel(evlist, sample.id);
274 if (evsel == NULL) { 262 if (evsel == NULL) {
275 printf("Unknown tp ID %" PRIu64 ", skipping...\n", sample.id); 263 printf("Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
276 continue; 264 continue;
277 } 265 }
278 266
279 if (evlist->threads->map[0] == -1 || evlist->threads->nr > 1) 267 if (evlist->threads->map[0] == -1 || evlist->threads->nr > 1)
280 printf("%d ", sample.tid); 268 printf("%d ", sample.tid);
281 269
282 handler = evsel->handler.func; 270 handler = evsel->handler.func;
283 handler(trace, evsel, &sample); 271 handler(trace, evsel, &sample);
284 } 272 }
285 } 273 }
286 274
287 if (nr_events == before) 275 if (nr_events == before)
288 poll(evlist->pollfd, evlist->nr_fds, -1); 276 poll(evlist->pollfd, evlist->nr_fds, -1);
289 277
290 goto again; 278 goto again;
291 279
292 out_delete_evlist: 280 out_delete_evlist:
293 perf_evlist__delete(evlist); 281 perf_evlist__delete(evlist);
294 out: 282 out:
295 return err; 283 return err;
296 } 284 }
297 285
298 int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused) 286 int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
299 { 287 {
300 const char * const trace_usage[] = { 288 const char * const trace_usage[] = {
301 "perf trace [<options>]", 289 "perf trace [<options>]",
302 NULL 290 NULL
303 }; 291 };
304 struct trace trace = { 292 struct trace trace = {
305 .audit_machine = audit_detect_machine(), 293 .audit_machine = audit_detect_machine(),
306 .syscalls = { 294 .syscalls = {
307 . max = -1, 295 . max = -1,
308 }, 296 },
309 .opts = { 297 .opts = {
310 .target = { 298 .target = {
311 .uid = UINT_MAX, 299 .uid = UINT_MAX,
312 .uses_mmap = true, 300 .uses_mmap = true,
313 }, 301 },
314 .user_freq = UINT_MAX, 302 .user_freq = UINT_MAX,
315 .user_interval = ULLONG_MAX, 303 .user_interval = ULLONG_MAX,
316 .no_delay = true, 304 .no_delay = true,
317 .mmap_pages = 1024, 305 .mmap_pages = 1024,
318 }, 306 },
319 }; 307 };
320 const struct option trace_options[] = { 308 const struct option trace_options[] = {
321 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid", 309 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
322 "trace events on existing process id"), 310 "trace events on existing process id"),
323 OPT_STRING(0, "tid", &trace.opts.target.tid, "tid", 311 OPT_STRING(0, "tid", &trace.opts.target.tid, "tid",
324 "trace events on existing thread id"), 312 "trace events on existing thread id"),
325 OPT_BOOLEAN(0, "all-cpus", &trace.opts.target.system_wide, 313 OPT_BOOLEAN(0, "all-cpus", &trace.opts.target.system_wide,
326 "system-wide collection from all CPUs"), 314 "system-wide collection from all CPUs"),
327 OPT_STRING(0, "cpu", &trace.opts.target.cpu_list, "cpu", 315 OPT_STRING(0, "cpu", &trace.opts.target.cpu_list, "cpu",
328 "list of cpus to monitor"), 316 "list of cpus to monitor"),
329 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit, 317 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
330 "child tasks do not inherit counters"), 318 "child tasks do not inherit counters"),
331 OPT_UINTEGER(0, "mmap-pages", &trace.opts.mmap_pages, 319 OPT_UINTEGER(0, "mmap-pages", &trace.opts.mmap_pages,
332 "number of mmap data pages"), 320 "number of mmap data pages"),
333 OPT_STRING(0, "uid", &trace.opts.target.uid_str, "user", 321 OPT_STRING(0, "uid", &trace.opts.target.uid_str, "user",
334 "user to profile"), 322 "user to profile"),
335 OPT_END() 323 OPT_END()
336 }; 324 };
337 int err; 325 int err;
338 326
339 argc = parse_options(argc, argv, trace_options, trace_usage, 0); 327 argc = parse_options(argc, argv, trace_options, trace_usage, 0);
340 if (argc) 328 if (argc)
341 usage_with_options(trace_usage, trace_options); 329 usage_with_options(trace_usage, trace_options);
342 330
343 err = perf_target__parse_uid(&trace.opts.target); 331 err = perf_target__parse_uid(&trace.opts.target);
344 if (err) { 332 if (err) {
345 char bf[BUFSIZ]; 333 char bf[BUFSIZ];
346 perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf)); 334 perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
347 printf("%s", bf); 335 printf("%s", bf);
348 return err; 336 return err;
349 } 337 }
350 338
351 return trace__run(&trace); 339 return trace__run(&trace);
352 } 340 }
353 341
tools/perf/util/evlist.c
1 /* 1 /*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 * 3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further 4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes. 5 * copyright notes.
6 * 6 *
7 * Released under the GPL v2. (and only v2, not any later version) 7 * Released under the GPL v2. (and only v2, not any later version)
8 */ 8 */
9 #include "util.h" 9 #include "util.h"
10 #include "debugfs.h" 10 #include "debugfs.h"
11 #include <poll.h> 11 #include <poll.h>
12 #include "cpumap.h" 12 #include "cpumap.h"
13 #include "thread_map.h" 13 #include "thread_map.h"
14 #include "target.h" 14 #include "target.h"
15 #include "evlist.h" 15 #include "evlist.h"
16 #include "evsel.h" 16 #include "evsel.h"
17 #include <unistd.h> 17 #include <unistd.h>
18 18
19 #include "parse-events.h" 19 #include "parse-events.h"
20 20
21 #include <sys/mman.h> 21 #include <sys/mman.h>
22 22
23 #include <linux/bitops.h> 23 #include <linux/bitops.h>
24 #include <linux/hash.h> 24 #include <linux/hash.h>
25 25
26 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 26 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
27 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) 27 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
28 28
29 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, 29 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
30 struct thread_map *threads) 30 struct thread_map *threads)
31 { 31 {
32 int i; 32 int i;
33 33
34 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) 34 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
35 INIT_HLIST_HEAD(&evlist->heads[i]); 35 INIT_HLIST_HEAD(&evlist->heads[i]);
36 INIT_LIST_HEAD(&evlist->entries); 36 INIT_LIST_HEAD(&evlist->entries);
37 perf_evlist__set_maps(evlist, cpus, threads); 37 perf_evlist__set_maps(evlist, cpus, threads);
38 evlist->workload.pid = -1; 38 evlist->workload.pid = -1;
39 } 39 }
40 40
41 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, 41 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
42 struct thread_map *threads) 42 struct thread_map *threads)
43 { 43 {
44 struct perf_evlist *evlist = zalloc(sizeof(*evlist)); 44 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
45 45
46 if (evlist != NULL) 46 if (evlist != NULL)
47 perf_evlist__init(evlist, cpus, threads); 47 perf_evlist__init(evlist, cpus, threads);
48 48
49 return evlist; 49 return evlist;
50 } 50 }
51 51
52 void perf_evlist__config_attrs(struct perf_evlist *evlist, 52 void perf_evlist__config_attrs(struct perf_evlist *evlist,
53 struct perf_record_opts *opts) 53 struct perf_record_opts *opts)
54 { 54 {
55 struct perf_evsel *evsel, *first; 55 struct perf_evsel *evsel, *first;
56 56
57 if (evlist->cpus->map[0] < 0) 57 if (evlist->cpus->map[0] < 0)
58 opts->no_inherit = true; 58 opts->no_inherit = true;
59 59
60 first = perf_evlist__first(evlist); 60 first = perf_evlist__first(evlist);
61 61
62 list_for_each_entry(evsel, &evlist->entries, node) { 62 list_for_each_entry(evsel, &evlist->entries, node) {
63 perf_evsel__config(evsel, opts, first); 63 perf_evsel__config(evsel, opts, first);
64 64
65 if (evlist->nr_entries > 1) 65 if (evlist->nr_entries > 1)
66 evsel->attr.sample_type |= PERF_SAMPLE_ID; 66 evsel->attr.sample_type |= PERF_SAMPLE_ID;
67 } 67 }
68 } 68 }
69 69
70 static void perf_evlist__purge(struct perf_evlist *evlist) 70 static void perf_evlist__purge(struct perf_evlist *evlist)
71 { 71 {
72 struct perf_evsel *pos, *n; 72 struct perf_evsel *pos, *n;
73 73
74 list_for_each_entry_safe(pos, n, &evlist->entries, node) { 74 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
75 list_del_init(&pos->node); 75 list_del_init(&pos->node);
76 perf_evsel__delete(pos); 76 perf_evsel__delete(pos);
77 } 77 }
78 78
79 evlist->nr_entries = 0; 79 evlist->nr_entries = 0;
80 } 80 }
81 81
82 void perf_evlist__exit(struct perf_evlist *evlist) 82 void perf_evlist__exit(struct perf_evlist *evlist)
83 { 83 {
84 free(evlist->mmap); 84 free(evlist->mmap);
85 free(evlist->pollfd); 85 free(evlist->pollfd);
86 evlist->mmap = NULL; 86 evlist->mmap = NULL;
87 evlist->pollfd = NULL; 87 evlist->pollfd = NULL;
88 } 88 }
89 89
90 void perf_evlist__delete(struct perf_evlist *evlist) 90 void perf_evlist__delete(struct perf_evlist *evlist)
91 { 91 {
92 perf_evlist__purge(evlist); 92 perf_evlist__purge(evlist);
93 perf_evlist__exit(evlist); 93 perf_evlist__exit(evlist);
94 free(evlist); 94 free(evlist);
95 } 95 }
96 96
97 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) 97 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
98 { 98 {
99 list_add_tail(&entry->node, &evlist->entries); 99 list_add_tail(&entry->node, &evlist->entries);
100 ++evlist->nr_entries; 100 ++evlist->nr_entries;
101 } 101 }
102 102
103 void perf_evlist__splice_list_tail(struct perf_evlist *evlist, 103 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
104 struct list_head *list, 104 struct list_head *list,
105 int nr_entries) 105 int nr_entries)
106 { 106 {
107 list_splice_tail(list, &evlist->entries); 107 list_splice_tail(list, &evlist->entries);
108 evlist->nr_entries += nr_entries; 108 evlist->nr_entries += nr_entries;
109 } 109 }
110 110
111 void __perf_evlist__set_leader(struct list_head *list) 111 void __perf_evlist__set_leader(struct list_head *list)
112 { 112 {
113 struct perf_evsel *evsel, *leader; 113 struct perf_evsel *evsel, *leader;
114 114
115 leader = list_entry(list->next, struct perf_evsel, node); 115 leader = list_entry(list->next, struct perf_evsel, node);
116 leader->leader = NULL; 116 leader->leader = NULL;
117 117
118 list_for_each_entry(evsel, list, node) { 118 list_for_each_entry(evsel, list, node) {
119 if (evsel != leader) 119 if (evsel != leader)
120 evsel->leader = leader; 120 evsel->leader = leader;
121 } 121 }
122 } 122 }
123 123
124 void perf_evlist__set_leader(struct perf_evlist *evlist) 124 void perf_evlist__set_leader(struct perf_evlist *evlist)
125 { 125 {
126 if (evlist->nr_entries) 126 if (evlist->nr_entries)
127 __perf_evlist__set_leader(&evlist->entries); 127 __perf_evlist__set_leader(&evlist->entries);
128 } 128 }
129 129
130 int perf_evlist__add_default(struct perf_evlist *evlist) 130 int perf_evlist__add_default(struct perf_evlist *evlist)
131 { 131 {
132 struct perf_event_attr attr = { 132 struct perf_event_attr attr = {
133 .type = PERF_TYPE_HARDWARE, 133 .type = PERF_TYPE_HARDWARE,
134 .config = PERF_COUNT_HW_CPU_CYCLES, 134 .config = PERF_COUNT_HW_CPU_CYCLES,
135 }; 135 };
136 struct perf_evsel *evsel; 136 struct perf_evsel *evsel;
137 137
138 event_attr_init(&attr); 138 event_attr_init(&attr);
139 139
140 evsel = perf_evsel__new(&attr, 0); 140 evsel = perf_evsel__new(&attr, 0);
141 if (evsel == NULL) 141 if (evsel == NULL)
142 goto error; 142 goto error;
143 143
144 /* use strdup() because free(evsel) assumes name is allocated */ 144 /* use strdup() because free(evsel) assumes name is allocated */
145 evsel->name = strdup("cycles"); 145 evsel->name = strdup("cycles");
146 if (!evsel->name) 146 if (!evsel->name)
147 goto error_free; 147 goto error_free;
148 148
149 perf_evlist__add(evlist, evsel); 149 perf_evlist__add(evlist, evsel);
150 return 0; 150 return 0;
151 error_free: 151 error_free:
152 perf_evsel__delete(evsel); 152 perf_evsel__delete(evsel);
153 error: 153 error:
154 return -ENOMEM; 154 return -ENOMEM;
155 } 155 }
156 156
157 int perf_evlist__add_attrs(struct perf_evlist *evlist, 157 int perf_evlist__add_attrs(struct perf_evlist *evlist,
158 struct perf_event_attr *attrs, size_t nr_attrs) 158 struct perf_event_attr *attrs, size_t nr_attrs)
159 { 159 {
160 struct perf_evsel *evsel, *n; 160 struct perf_evsel *evsel, *n;
161 LIST_HEAD(head); 161 LIST_HEAD(head);
162 size_t i; 162 size_t i;
163 163
164 for (i = 0; i < nr_attrs; i++) { 164 for (i = 0; i < nr_attrs; i++) {
165 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i); 165 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
166 if (evsel == NULL) 166 if (evsel == NULL)
167 goto out_delete_partial_list; 167 goto out_delete_partial_list;
168 list_add_tail(&evsel->node, &head); 168 list_add_tail(&evsel->node, &head);
169 } 169 }
170 170
171 perf_evlist__splice_list_tail(evlist, &head, nr_attrs); 171 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
172 172
173 return 0; 173 return 0;
174 174
175 out_delete_partial_list: 175 out_delete_partial_list:
176 list_for_each_entry_safe(evsel, n, &head, node) 176 list_for_each_entry_safe(evsel, n, &head, node)
177 perf_evsel__delete(evsel); 177 perf_evsel__delete(evsel);
178 return -1; 178 return -1;
179 } 179 }
180 180
181 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, 181 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
182 struct perf_event_attr *attrs, size_t nr_attrs) 182 struct perf_event_attr *attrs, size_t nr_attrs)
183 { 183 {
184 size_t i; 184 size_t i;
185 185
186 for (i = 0; i < nr_attrs; i++) 186 for (i = 0; i < nr_attrs; i++)
187 event_attr_init(attrs + i); 187 event_attr_init(attrs + i);
188 188
189 return perf_evlist__add_attrs(evlist, attrs, nr_attrs); 189 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
190 } 190 }
191 191
192 static int trace_event__id(const char *evname) 192 static int trace_event__id(const char *evname)
193 { 193 {
194 char *filename, *colon; 194 char *filename, *colon;
195 int err = -1, fd; 195 int err = -1, fd;
196 196
197 if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0) 197 if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0)
198 return -1; 198 return -1;
199 199
200 colon = strrchr(filename, ':'); 200 colon = strrchr(filename, ':');
201 if (colon != NULL) 201 if (colon != NULL)
202 *colon = '/'; 202 *colon = '/';
203 203
204 fd = open(filename, O_RDONLY); 204 fd = open(filename, O_RDONLY);
205 if (fd >= 0) { 205 if (fd >= 0) {
206 char id[16]; 206 char id[16];
207 if (read(fd, id, sizeof(id)) > 0) 207 if (read(fd, id, sizeof(id)) > 0)
208 err = atoi(id); 208 err = atoi(id);
209 close(fd); 209 close(fd);
210 } 210 }
211 211
212 free(filename); 212 free(filename);
213 return err; 213 return err;
214 } 214 }
215 215
216 int perf_evlist__add_tracepoints(struct perf_evlist *evlist, 216 int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
217 const char *tracepoints[], 217 const char *tracepoints[],
218 size_t nr_tracepoints) 218 size_t nr_tracepoints)
219 { 219 {
220 int err; 220 int err;
221 size_t i; 221 size_t i;
222 struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs)); 222 struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs));
223 223
224 if (attrs == NULL) 224 if (attrs == NULL)
225 return -1; 225 return -1;
226 226
227 for (i = 0; i < nr_tracepoints; i++) { 227 for (i = 0; i < nr_tracepoints; i++) {
228 err = trace_event__id(tracepoints[i]); 228 err = trace_event__id(tracepoints[i]);
229 229
230 if (err < 0) 230 if (err < 0)
231 goto out_free_attrs; 231 goto out_free_attrs;
232 232
233 attrs[i].type = PERF_TYPE_TRACEPOINT; 233 attrs[i].type = PERF_TYPE_TRACEPOINT;
234 attrs[i].config = err; 234 attrs[i].config = err;
235 attrs[i].sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | 235 attrs[i].sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
236 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD); 236 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD);
237 attrs[i].sample_period = 1; 237 attrs[i].sample_period = 1;
238 } 238 }
239 239
240 err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints); 240 err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints);
241 out_free_attrs: 241 out_free_attrs:
242 free(attrs); 242 free(attrs);
243 return err; 243 return err;
244 } 244 }
245 245
246 struct perf_evsel * 246 struct perf_evsel *
247 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) 247 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
248 { 248 {
249 struct perf_evsel *evsel; 249 struct perf_evsel *evsel;
250 250
251 list_for_each_entry(evsel, &evlist->entries, node) { 251 list_for_each_entry(evsel, &evlist->entries, node) {
252 if (evsel->attr.type == PERF_TYPE_TRACEPOINT && 252 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
253 (int)evsel->attr.config == id) 253 (int)evsel->attr.config == id)
254 return evsel; 254 return evsel;
255 } 255 }
256 256
257 return NULL; 257 return NULL;
258 } 258 }
259 259
260 int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, 260 int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
261 const struct perf_evsel_str_handler *assocs, 261 const struct perf_evsel_str_handler *assocs,
262 size_t nr_assocs) 262 size_t nr_assocs)
263 { 263 {
264 struct perf_evsel *evsel; 264 struct perf_evsel *evsel;
265 int err; 265 int err;
266 size_t i; 266 size_t i;
267 267
268 for (i = 0; i < nr_assocs; i++) { 268 for (i = 0; i < nr_assocs; i++) {
269 err = trace_event__id(assocs[i].name); 269 err = trace_event__id(assocs[i].name);
270 if (err < 0) 270 if (err < 0)
271 goto out; 271 goto out;
272 272
273 evsel = perf_evlist__find_tracepoint_by_id(evlist, err); 273 evsel = perf_evlist__find_tracepoint_by_id(evlist, err);
274 if (evsel == NULL) 274 if (evsel == NULL)
275 continue; 275 continue;
276 276
277 err = -EEXIST; 277 err = -EEXIST;
278 if (evsel->handler.func != NULL) 278 if (evsel->handler.func != NULL)
279 goto out; 279 goto out;
280 evsel->handler.func = assocs[i].handler; 280 evsel->handler.func = assocs[i].handler;
281 } 281 }
282 282
283 err = 0; 283 err = 0;
284 out: 284 out:
285 return err; 285 return err;
286 } 286 }
287 287
288 int perf_evlist__add_newtp(struct perf_evlist *evlist,
289 const char *sys, const char *name, void *handler)
290 {
291 struct perf_evsel *evsel;
292
293 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
294 if (evsel == NULL)
295 return -1;
296
297 evsel->handler.func = handler;
298 perf_evlist__add(evlist, evsel);
299 return 0;
300 }
301
288 void perf_evlist__disable(struct perf_evlist *evlist) 302 void perf_evlist__disable(struct perf_evlist *evlist)
289 { 303 {
290 int cpu, thread; 304 int cpu, thread;
291 struct perf_evsel *pos; 305 struct perf_evsel *pos;
292 306
293 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 307 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
294 list_for_each_entry(pos, &evlist->entries, node) { 308 list_for_each_entry(pos, &evlist->entries, node) {
295 for (thread = 0; thread < evlist->threads->nr; thread++) 309 for (thread = 0; thread < evlist->threads->nr; thread++)
296 ioctl(FD(pos, cpu, thread), 310 ioctl(FD(pos, cpu, thread),
297 PERF_EVENT_IOC_DISABLE, 0); 311 PERF_EVENT_IOC_DISABLE, 0);
298 } 312 }
299 } 313 }
300 } 314 }
301 315
302 void perf_evlist__enable(struct perf_evlist *evlist) 316 void perf_evlist__enable(struct perf_evlist *evlist)
303 { 317 {
304 int cpu, thread; 318 int cpu, thread;
305 struct perf_evsel *pos; 319 struct perf_evsel *pos;
306 320
307 for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) { 321 for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) {
308 list_for_each_entry(pos, &evlist->entries, node) { 322 list_for_each_entry(pos, &evlist->entries, node) {
309 for (thread = 0; thread < evlist->threads->nr; thread++) 323 for (thread = 0; thread < evlist->threads->nr; thread++)
310 ioctl(FD(pos, cpu, thread), 324 ioctl(FD(pos, cpu, thread),
311 PERF_EVENT_IOC_ENABLE, 0); 325 PERF_EVENT_IOC_ENABLE, 0);
312 } 326 }
313 } 327 }
314 } 328 }
315 329
316 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) 330 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
317 { 331 {
318 int nfds = cpu_map__nr(evlist->cpus) * evlist->threads->nr * evlist->nr_entries; 332 int nfds = cpu_map__nr(evlist->cpus) * evlist->threads->nr * evlist->nr_entries;
319 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); 333 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
320 return evlist->pollfd != NULL ? 0 : -ENOMEM; 334 return evlist->pollfd != NULL ? 0 : -ENOMEM;
321 } 335 }
322 336
323 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) 337 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
324 { 338 {
325 fcntl(fd, F_SETFL, O_NONBLOCK); 339 fcntl(fd, F_SETFL, O_NONBLOCK);
326 evlist->pollfd[evlist->nr_fds].fd = fd; 340 evlist->pollfd[evlist->nr_fds].fd = fd;
327 evlist->pollfd[evlist->nr_fds].events = POLLIN; 341 evlist->pollfd[evlist->nr_fds].events = POLLIN;
328 evlist->nr_fds++; 342 evlist->nr_fds++;
329 } 343 }
330 344
331 static void perf_evlist__id_hash(struct perf_evlist *evlist, 345 static void perf_evlist__id_hash(struct perf_evlist *evlist,
332 struct perf_evsel *evsel, 346 struct perf_evsel *evsel,
333 int cpu, int thread, u64 id) 347 int cpu, int thread, u64 id)
334 { 348 {
335 int hash; 349 int hash;
336 struct perf_sample_id *sid = SID(evsel, cpu, thread); 350 struct perf_sample_id *sid = SID(evsel, cpu, thread);
337 351
338 sid->id = id; 352 sid->id = id;
339 sid->evsel = evsel; 353 sid->evsel = evsel;
340 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); 354 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
341 hlist_add_head(&sid->node, &evlist->heads[hash]); 355 hlist_add_head(&sid->node, &evlist->heads[hash]);
342 } 356 }
343 357
344 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, 358 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
345 int cpu, int thread, u64 id) 359 int cpu, int thread, u64 id)
346 { 360 {
347 perf_evlist__id_hash(evlist, evsel, cpu, thread, id); 361 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
348 evsel->id[evsel->ids++] = id; 362 evsel->id[evsel->ids++] = id;
349 } 363 }
350 364
351 static int perf_evlist__id_add_fd(struct perf_evlist *evlist, 365 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
352 struct perf_evsel *evsel, 366 struct perf_evsel *evsel,
353 int cpu, int thread, int fd) 367 int cpu, int thread, int fd)
354 { 368 {
355 u64 read_data[4] = { 0, }; 369 u64 read_data[4] = { 0, };
356 int id_idx = 1; /* The first entry is the counter value */ 370 int id_idx = 1; /* The first entry is the counter value */
357 371
358 if (!(evsel->attr.read_format & PERF_FORMAT_ID) || 372 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
359 read(fd, &read_data, sizeof(read_data)) == -1) 373 read(fd, &read_data, sizeof(read_data)) == -1)
360 return -1; 374 return -1;
361 375
362 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 376 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
363 ++id_idx; 377 ++id_idx;
364 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 378 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
365 ++id_idx; 379 ++id_idx;
366 380
367 perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]); 381 perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
368 return 0; 382 return 0;
369 } 383 }
370 384
371 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) 385 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
372 { 386 {
373 struct hlist_head *head; 387 struct hlist_head *head;
374 struct hlist_node *pos; 388 struct hlist_node *pos;
375 struct perf_sample_id *sid; 389 struct perf_sample_id *sid;
376 int hash; 390 int hash;
377 391
378 if (evlist->nr_entries == 1) 392 if (evlist->nr_entries == 1)
379 return perf_evlist__first(evlist); 393 return perf_evlist__first(evlist);
380 394
381 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 395 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
382 head = &evlist->heads[hash]; 396 head = &evlist->heads[hash];
383 397
384 hlist_for_each_entry(sid, pos, head, node) 398 hlist_for_each_entry(sid, pos, head, node)
385 if (sid->id == id) 399 if (sid->id == id)
386 return sid->evsel; 400 return sid->evsel;
387 401
388 if (!perf_evlist__sample_id_all(evlist)) 402 if (!perf_evlist__sample_id_all(evlist))
389 return perf_evlist__first(evlist); 403 return perf_evlist__first(evlist);
390 404
391 return NULL; 405 return NULL;
392 } 406 }
393 407
394 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) 408 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
395 { 409 {
396 /* XXX Move this to perf.c, making it generally available */ 410 /* XXX Move this to perf.c, making it generally available */
397 unsigned int page_size = sysconf(_SC_PAGE_SIZE); 411 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
398 struct perf_mmap *md = &evlist->mmap[idx]; 412 struct perf_mmap *md = &evlist->mmap[idx];
399 unsigned int head = perf_mmap__read_head(md); 413 unsigned int head = perf_mmap__read_head(md);
400 unsigned int old = md->prev; 414 unsigned int old = md->prev;
401 unsigned char *data = md->base + page_size; 415 unsigned char *data = md->base + page_size;
402 union perf_event *event = NULL; 416 union perf_event *event = NULL;
403 417
404 if (evlist->overwrite) { 418 if (evlist->overwrite) {
405 /* 419 /*
406 * If we're further behind than half the buffer, there's a chance 420 * If we're further behind than half the buffer, there's a chance
407 * the writer will bite our tail and mess up the samples under us. 421 * the writer will bite our tail and mess up the samples under us.
408 * 422 *
409 * If we somehow ended up ahead of the head, we got messed up. 423 * If we somehow ended up ahead of the head, we got messed up.
410 * 424 *
411 * In either case, truncate and restart at head. 425 * In either case, truncate and restart at head.
412 */ 426 */
413 int diff = head - old; 427 int diff = head - old;
414 if (diff > md->mask / 2 || diff < 0) { 428 if (diff > md->mask / 2 || diff < 0) {
415 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); 429 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
416 430
417 /* 431 /*
418 * head points to a known good entry, start there. 432 * head points to a known good entry, start there.
419 */ 433 */
420 old = head; 434 old = head;
421 } 435 }
422 } 436 }
423 437
424 if (old != head) { 438 if (old != head) {
425 size_t size; 439 size_t size;
426 440
427 event = (union perf_event *)&data[old & md->mask]; 441 event = (union perf_event *)&data[old & md->mask];
428 size = event->header.size; 442 size = event->header.size;
429 443
430 /* 444 /*
431 * Event straddles the mmap boundary -- header should always 445 * Event straddles the mmap boundary -- header should always
432 * be inside due to u64 alignment of output. 446 * be inside due to u64 alignment of output.
433 */ 447 */
434 if ((old & md->mask) + size != ((old + size) & md->mask)) { 448 if ((old & md->mask) + size != ((old + size) & md->mask)) {
435 unsigned int offset = old; 449 unsigned int offset = old;
436 unsigned int len = min(sizeof(*event), size), cpy; 450 unsigned int len = min(sizeof(*event), size), cpy;
437 void *dst = &evlist->event_copy; 451 void *dst = &evlist->event_copy;
438 452
439 do { 453 do {
440 cpy = min(md->mask + 1 - (offset & md->mask), len); 454 cpy = min(md->mask + 1 - (offset & md->mask), len);
441 memcpy(dst, &data[offset & md->mask], cpy); 455 memcpy(dst, &data[offset & md->mask], cpy);
442 offset += cpy; 456 offset += cpy;
443 dst += cpy; 457 dst += cpy;
444 len -= cpy; 458 len -= cpy;
445 } while (len); 459 } while (len);
446 460
447 event = &evlist->event_copy; 461 event = &evlist->event_copy;
448 } 462 }
449 463
450 old += size; 464 old += size;
451 } 465 }
452 466
453 md->prev = old; 467 md->prev = old;
454 468
455 if (!evlist->overwrite) 469 if (!evlist->overwrite)
456 perf_mmap__write_tail(md, old); 470 perf_mmap__write_tail(md, old);
457 471
458 return event; 472 return event;
459 } 473 }
460 474
461 void perf_evlist__munmap(struct perf_evlist *evlist) 475 void perf_evlist__munmap(struct perf_evlist *evlist)
462 { 476 {
463 int i; 477 int i;
464 478
465 for (i = 0; i < evlist->nr_mmaps; i++) { 479 for (i = 0; i < evlist->nr_mmaps; i++) {
466 if (evlist->mmap[i].base != NULL) { 480 if (evlist->mmap[i].base != NULL) {
467 munmap(evlist->mmap[i].base, evlist->mmap_len); 481 munmap(evlist->mmap[i].base, evlist->mmap_len);
468 evlist->mmap[i].base = NULL; 482 evlist->mmap[i].base = NULL;
469 } 483 }
470 } 484 }
471 485
472 free(evlist->mmap); 486 free(evlist->mmap);
473 evlist->mmap = NULL; 487 evlist->mmap = NULL;
474 } 488 }
475 489
476 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) 490 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
477 { 491 {
478 evlist->nr_mmaps = cpu_map__nr(evlist->cpus); 492 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
479 if (cpu_map__all(evlist->cpus)) 493 if (cpu_map__all(evlist->cpus))
480 evlist->nr_mmaps = evlist->threads->nr; 494 evlist->nr_mmaps = evlist->threads->nr;
481 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); 495 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
482 return evlist->mmap != NULL ? 0 : -ENOMEM; 496 return evlist->mmap != NULL ? 0 : -ENOMEM;
483 } 497 }
484 498
485 static int __perf_evlist__mmap(struct perf_evlist *evlist, 499 static int __perf_evlist__mmap(struct perf_evlist *evlist,
486 int idx, int prot, int mask, int fd) 500 int idx, int prot, int mask, int fd)
487 { 501 {
488 evlist->mmap[idx].prev = 0; 502 evlist->mmap[idx].prev = 0;
489 evlist->mmap[idx].mask = mask; 503 evlist->mmap[idx].mask = mask;
490 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, 504 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
491 MAP_SHARED, fd, 0); 505 MAP_SHARED, fd, 0);
492 if (evlist->mmap[idx].base == MAP_FAILED) { 506 if (evlist->mmap[idx].base == MAP_FAILED) {
493 evlist->mmap[idx].base = NULL; 507 evlist->mmap[idx].base = NULL;
494 return -1; 508 return -1;
495 } 509 }
496 510
497 perf_evlist__add_pollfd(evlist, fd); 511 perf_evlist__add_pollfd(evlist, fd);
498 return 0; 512 return 0;
499 } 513 }
500 514
501 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask) 515 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
502 { 516 {
503 struct perf_evsel *evsel; 517 struct perf_evsel *evsel;
504 int cpu, thread; 518 int cpu, thread;
505 519
506 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 520 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
507 int output = -1; 521 int output = -1;
508 522
509 for (thread = 0; thread < evlist->threads->nr; thread++) { 523 for (thread = 0; thread < evlist->threads->nr; thread++) {
510 list_for_each_entry(evsel, &evlist->entries, node) { 524 list_for_each_entry(evsel, &evlist->entries, node) {
511 int fd = FD(evsel, cpu, thread); 525 int fd = FD(evsel, cpu, thread);
512 526
513 if (output == -1) { 527 if (output == -1) {
514 output = fd; 528 output = fd;
515 if (__perf_evlist__mmap(evlist, cpu, 529 if (__perf_evlist__mmap(evlist, cpu,
516 prot, mask, output) < 0) 530 prot, mask, output) < 0)
517 goto out_unmap; 531 goto out_unmap;
518 } else { 532 } else {
519 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) 533 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
520 goto out_unmap; 534 goto out_unmap;
521 } 535 }
522 536
523 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 537 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
524 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) 538 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
525 goto out_unmap; 539 goto out_unmap;
526 } 540 }
527 } 541 }
528 } 542 }
529 543
530 return 0; 544 return 0;
531 545
532 out_unmap: 546 out_unmap:
533 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 547 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
534 if (evlist->mmap[cpu].base != NULL) { 548 if (evlist->mmap[cpu].base != NULL) {
535 munmap(evlist->mmap[cpu].base, evlist->mmap_len); 549 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
536 evlist->mmap[cpu].base = NULL; 550 evlist->mmap[cpu].base = NULL;
537 } 551 }
538 } 552 }
539 return -1; 553 return -1;
540 } 554 }
541 555
542 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask) 556 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
543 { 557 {
544 struct perf_evsel *evsel; 558 struct perf_evsel *evsel;
545 int thread; 559 int thread;
546 560
547 for (thread = 0; thread < evlist->threads->nr; thread++) { 561 for (thread = 0; thread < evlist->threads->nr; thread++) {
548 int output = -1; 562 int output = -1;
549 563
550 list_for_each_entry(evsel, &evlist->entries, node) { 564 list_for_each_entry(evsel, &evlist->entries, node) {
551 int fd = FD(evsel, 0, thread); 565 int fd = FD(evsel, 0, thread);
552 566
553 if (output == -1) { 567 if (output == -1) {
554 output = fd; 568 output = fd;
555 if (__perf_evlist__mmap(evlist, thread, 569 if (__perf_evlist__mmap(evlist, thread,
556 prot, mask, output) < 0) 570 prot, mask, output) < 0)
557 goto out_unmap; 571 goto out_unmap;
558 } else { 572 } else {
559 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) 573 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
560 goto out_unmap; 574 goto out_unmap;
561 } 575 }
562 576
563 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 577 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
564 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0) 578 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
565 goto out_unmap; 579 goto out_unmap;
566 } 580 }
567 } 581 }
568 582
569 return 0; 583 return 0;
570 584
571 out_unmap: 585 out_unmap:
572 for (thread = 0; thread < evlist->threads->nr; thread++) { 586 for (thread = 0; thread < evlist->threads->nr; thread++) {
573 if (evlist->mmap[thread].base != NULL) { 587 if (evlist->mmap[thread].base != NULL) {
574 munmap(evlist->mmap[thread].base, evlist->mmap_len); 588 munmap(evlist->mmap[thread].base, evlist->mmap_len);
575 evlist->mmap[thread].base = NULL; 589 evlist->mmap[thread].base = NULL;
576 } 590 }
577 } 591 }
578 return -1; 592 return -1;
579 } 593 }
580 594
581 /** perf_evlist__mmap - Create per cpu maps to receive events 595 /** perf_evlist__mmap - Create per cpu maps to receive events
582 * 596 *
583 * @evlist - list of events 597 * @evlist - list of events
584 * @pages - map length in pages 598 * @pages - map length in pages
585 * @overwrite - overwrite older events? 599 * @overwrite - overwrite older events?
586 * 600 *
587 * If overwrite is false the user needs to signal event consuption using: 601 * If overwrite is false the user needs to signal event consuption using:
588 * 602 *
589 * struct perf_mmap *m = &evlist->mmap[cpu]; 603 * struct perf_mmap *m = &evlist->mmap[cpu];
590 * unsigned int head = perf_mmap__read_head(m); 604 * unsigned int head = perf_mmap__read_head(m);
591 * 605 *
592 * perf_mmap__write_tail(m, head) 606 * perf_mmap__write_tail(m, head)
593 * 607 *
594 * Using perf_evlist__read_on_cpu does this automatically. 608 * Using perf_evlist__read_on_cpu does this automatically.
595 */ 609 */
596 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, 610 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
597 bool overwrite) 611 bool overwrite)
598 { 612 {
599 unsigned int page_size = sysconf(_SC_PAGE_SIZE); 613 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
600 struct perf_evsel *evsel; 614 struct perf_evsel *evsel;
601 const struct cpu_map *cpus = evlist->cpus; 615 const struct cpu_map *cpus = evlist->cpus;
602 const struct thread_map *threads = evlist->threads; 616 const struct thread_map *threads = evlist->threads;
603 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask; 617 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
604 618
605 /* 512 kiB: default amount of unprivileged mlocked memory */ 619 /* 512 kiB: default amount of unprivileged mlocked memory */
606 if (pages == UINT_MAX) 620 if (pages == UINT_MAX)
607 pages = (512 * 1024) / page_size; 621 pages = (512 * 1024) / page_size;
608 else if (!is_power_of_2(pages)) 622 else if (!is_power_of_2(pages))
609 return -EINVAL; 623 return -EINVAL;
610 624
611 mask = pages * page_size - 1; 625 mask = pages * page_size - 1;
612 626
613 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) 627 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
614 return -ENOMEM; 628 return -ENOMEM;
615 629
616 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0) 630 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
617 return -ENOMEM; 631 return -ENOMEM;
618 632
619 evlist->overwrite = overwrite; 633 evlist->overwrite = overwrite;
620 evlist->mmap_len = (pages + 1) * page_size; 634 evlist->mmap_len = (pages + 1) * page_size;
621 635
622 list_for_each_entry(evsel, &evlist->entries, node) { 636 list_for_each_entry(evsel, &evlist->entries, node) {
623 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 637 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
624 evsel->sample_id == NULL && 638 evsel->sample_id == NULL &&
625 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0) 639 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
626 return -ENOMEM; 640 return -ENOMEM;
627 } 641 }
628 642
629 if (cpu_map__all(cpus)) 643 if (cpu_map__all(cpus))
630 return perf_evlist__mmap_per_thread(evlist, prot, mask); 644 return perf_evlist__mmap_per_thread(evlist, prot, mask);
631 645
632 return perf_evlist__mmap_per_cpu(evlist, prot, mask); 646 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
633 } 647 }
634 648
635 int perf_evlist__create_maps(struct perf_evlist *evlist, 649 int perf_evlist__create_maps(struct perf_evlist *evlist,
636 struct perf_target *target) 650 struct perf_target *target)
637 { 651 {
638 evlist->threads = thread_map__new_str(target->pid, target->tid, 652 evlist->threads = thread_map__new_str(target->pid, target->tid,
639 target->uid); 653 target->uid);
640 654
641 if (evlist->threads == NULL) 655 if (evlist->threads == NULL)
642 return -1; 656 return -1;
643 657
644 if (perf_target__has_task(target)) 658 if (perf_target__has_task(target))
645 evlist->cpus = cpu_map__dummy_new(); 659 evlist->cpus = cpu_map__dummy_new();
646 else if (!perf_target__has_cpu(target) && !target->uses_mmap) 660 else if (!perf_target__has_cpu(target) && !target->uses_mmap)
647 evlist->cpus = cpu_map__dummy_new(); 661 evlist->cpus = cpu_map__dummy_new();
648 else 662 else
649 evlist->cpus = cpu_map__new(target->cpu_list); 663 evlist->cpus = cpu_map__new(target->cpu_list);
650 664
651 if (evlist->cpus == NULL) 665 if (evlist->cpus == NULL)
652 goto out_delete_threads; 666 goto out_delete_threads;
653 667
654 return 0; 668 return 0;
655 669
656 out_delete_threads: 670 out_delete_threads:
657 thread_map__delete(evlist->threads); 671 thread_map__delete(evlist->threads);
658 return -1; 672 return -1;
659 } 673 }
660 674
661 void perf_evlist__delete_maps(struct perf_evlist *evlist) 675 void perf_evlist__delete_maps(struct perf_evlist *evlist)
662 { 676 {
663 cpu_map__delete(evlist->cpus); 677 cpu_map__delete(evlist->cpus);
664 thread_map__delete(evlist->threads); 678 thread_map__delete(evlist->threads);
665 evlist->cpus = NULL; 679 evlist->cpus = NULL;
666 evlist->threads = NULL; 680 evlist->threads = NULL;
667 } 681 }
668 682
669 int perf_evlist__apply_filters(struct perf_evlist *evlist) 683 int perf_evlist__apply_filters(struct perf_evlist *evlist)
670 { 684 {
671 struct perf_evsel *evsel; 685 struct perf_evsel *evsel;
672 int err = 0; 686 int err = 0;
673 const int ncpus = cpu_map__nr(evlist->cpus), 687 const int ncpus = cpu_map__nr(evlist->cpus),
674 nthreads = evlist->threads->nr; 688 nthreads = evlist->threads->nr;
675 689
676 list_for_each_entry(evsel, &evlist->entries, node) { 690 list_for_each_entry(evsel, &evlist->entries, node) {
677 if (evsel->filter == NULL) 691 if (evsel->filter == NULL)
678 continue; 692 continue;
679 693
680 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter); 694 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
681 if (err) 695 if (err)
682 break; 696 break;
683 } 697 }
684 698
685 return err; 699 return err;
686 } 700 }
687 701
688 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) 702 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
689 { 703 {
690 struct perf_evsel *evsel; 704 struct perf_evsel *evsel;
691 int err = 0; 705 int err = 0;
692 const int ncpus = cpu_map__nr(evlist->cpus), 706 const int ncpus = cpu_map__nr(evlist->cpus),
693 nthreads = evlist->threads->nr; 707 nthreads = evlist->threads->nr;
694 708
695 list_for_each_entry(evsel, &evlist->entries, node) { 709 list_for_each_entry(evsel, &evlist->entries, node) {
696 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter); 710 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
697 if (err) 711 if (err)
698 break; 712 break;
699 } 713 }
700 714
701 return err; 715 return err;
702 } 716 }
703 717
704 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) 718 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
705 { 719 {
706 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; 720 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
707 721
708 list_for_each_entry_continue(pos, &evlist->entries, node) { 722 list_for_each_entry_continue(pos, &evlist->entries, node) {
709 if (first->attr.sample_type != pos->attr.sample_type) 723 if (first->attr.sample_type != pos->attr.sample_type)
710 return false; 724 return false;
711 } 725 }
712 726
713 return true; 727 return true;
714 } 728 }
715 729
716 u64 perf_evlist__sample_type(struct perf_evlist *evlist) 730 u64 perf_evlist__sample_type(struct perf_evlist *evlist)
717 { 731 {
718 struct perf_evsel *first = perf_evlist__first(evlist); 732 struct perf_evsel *first = perf_evlist__first(evlist);
719 return first->attr.sample_type; 733 return first->attr.sample_type;
720 } 734 }
721 735
722 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) 736 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
723 { 737 {
724 struct perf_evsel *first = perf_evlist__first(evlist); 738 struct perf_evsel *first = perf_evlist__first(evlist);
725 struct perf_sample *data; 739 struct perf_sample *data;
726 u64 sample_type; 740 u64 sample_type;
727 u16 size = 0; 741 u16 size = 0;
728 742
729 if (!first->attr.sample_id_all) 743 if (!first->attr.sample_id_all)
730 goto out; 744 goto out;
731 745
732 sample_type = first->attr.sample_type; 746 sample_type = first->attr.sample_type;
733 747
734 if (sample_type & PERF_SAMPLE_TID) 748 if (sample_type & PERF_SAMPLE_TID)
735 size += sizeof(data->tid) * 2; 749 size += sizeof(data->tid) * 2;
736 750
737 if (sample_type & PERF_SAMPLE_TIME) 751 if (sample_type & PERF_SAMPLE_TIME)
738 size += sizeof(data->time); 752 size += sizeof(data->time);
739 753
740 if (sample_type & PERF_SAMPLE_ID) 754 if (sample_type & PERF_SAMPLE_ID)
741 size += sizeof(data->id); 755 size += sizeof(data->id);
742 756
743 if (sample_type & PERF_SAMPLE_STREAM_ID) 757 if (sample_type & PERF_SAMPLE_STREAM_ID)
744 size += sizeof(data->stream_id); 758 size += sizeof(data->stream_id);
745 759
746 if (sample_type & PERF_SAMPLE_CPU) 760 if (sample_type & PERF_SAMPLE_CPU)
747 size += sizeof(data->cpu) * 2; 761 size += sizeof(data->cpu) * 2;
748 out: 762 out:
749 return size; 763 return size;
750 } 764 }
751 765
752 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist) 766 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
753 { 767 {
754 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; 768 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
755 769
756 list_for_each_entry_continue(pos, &evlist->entries, node) { 770 list_for_each_entry_continue(pos, &evlist->entries, node) {
757 if (first->attr.sample_id_all != pos->attr.sample_id_all) 771 if (first->attr.sample_id_all != pos->attr.sample_id_all)
758 return false; 772 return false;
759 } 773 }
760 774
761 return true; 775 return true;
762 } 776 }
763 777
764 bool perf_evlist__sample_id_all(struct perf_evlist *evlist) 778 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
765 { 779 {
766 struct perf_evsel *first = perf_evlist__first(evlist); 780 struct perf_evsel *first = perf_evlist__first(evlist);
767 return first->attr.sample_id_all; 781 return first->attr.sample_id_all;
768 } 782 }
769 783
770 void perf_evlist__set_selected(struct perf_evlist *evlist, 784 void perf_evlist__set_selected(struct perf_evlist *evlist,
771 struct perf_evsel *evsel) 785 struct perf_evsel *evsel)
772 { 786 {
773 evlist->selected = evsel; 787 evlist->selected = evsel;
774 } 788 }
775 789
776 int perf_evlist__open(struct perf_evlist *evlist) 790 int perf_evlist__open(struct perf_evlist *evlist)
777 { 791 {
778 struct perf_evsel *evsel; 792 struct perf_evsel *evsel;
779 int err, ncpus, nthreads; 793 int err, ncpus, nthreads;
780 794
781 list_for_each_entry(evsel, &evlist->entries, node) { 795 list_for_each_entry(evsel, &evlist->entries, node) {
782 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads); 796 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
783 if (err < 0) 797 if (err < 0)
784 goto out_err; 798 goto out_err;
785 } 799 }
786 800
787 return 0; 801 return 0;
788 out_err: 802 out_err:
789 ncpus = evlist->cpus ? evlist->cpus->nr : 1; 803 ncpus = evlist->cpus ? evlist->cpus->nr : 1;
790 nthreads = evlist->threads ? evlist->threads->nr : 1; 804 nthreads = evlist->threads ? evlist->threads->nr : 1;
791 805
792 list_for_each_entry_reverse(evsel, &evlist->entries, node) 806 list_for_each_entry_reverse(evsel, &evlist->entries, node)
793 perf_evsel__close(evsel, ncpus, nthreads); 807 perf_evsel__close(evsel, ncpus, nthreads);
794 808
795 errno = -err; 809 errno = -err;
796 return err; 810 return err;
797 } 811 }
798 812
799 int perf_evlist__prepare_workload(struct perf_evlist *evlist, 813 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
800 struct perf_record_opts *opts, 814 struct perf_record_opts *opts,
801 const char *argv[]) 815 const char *argv[])
802 { 816 {
803 int child_ready_pipe[2], go_pipe[2]; 817 int child_ready_pipe[2], go_pipe[2];
804 char bf; 818 char bf;
805 819
806 if (pipe(child_ready_pipe) < 0) { 820 if (pipe(child_ready_pipe) < 0) {
807 perror("failed to create 'ready' pipe"); 821 perror("failed to create 'ready' pipe");
808 return -1; 822 return -1;
809 } 823 }
810 824
811 if (pipe(go_pipe) < 0) { 825 if (pipe(go_pipe) < 0) {
812 perror("failed to create 'go' pipe"); 826 perror("failed to create 'go' pipe");
813 goto out_close_ready_pipe; 827 goto out_close_ready_pipe;
814 } 828 }
815 829
816 evlist->workload.pid = fork(); 830 evlist->workload.pid = fork();
817 if (evlist->workload.pid < 0) { 831 if (evlist->workload.pid < 0) {
818 perror("failed to fork"); 832 perror("failed to fork");
819 goto out_close_pipes; 833 goto out_close_pipes;
820 } 834 }
821 835
822 if (!evlist->workload.pid) { 836 if (!evlist->workload.pid) {
823 if (opts->pipe_output) 837 if (opts->pipe_output)
824 dup2(2, 1); 838 dup2(2, 1);
825 839
826 close(child_ready_pipe[0]); 840 close(child_ready_pipe[0]);
827 close(go_pipe[1]); 841 close(go_pipe[1]);
828 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); 842 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
829 843
830 /* 844 /*
831 * Do a dummy execvp to get the PLT entry resolved, 845 * Do a dummy execvp to get the PLT entry resolved,
832 * so we avoid the resolver overhead on the real 846 * so we avoid the resolver overhead on the real
833 * execvp call. 847 * execvp call.
834 */ 848 */
835 execvp("", (char **)argv); 849 execvp("", (char **)argv);
836 850
837 /* 851 /*
838 * Tell the parent we're ready to go 852 * Tell the parent we're ready to go
839 */ 853 */
840 close(child_ready_pipe[1]); 854 close(child_ready_pipe[1]);
841 855
842 /* 856 /*
843 * Wait until the parent tells us to go. 857 * Wait until the parent tells us to go.
844 */ 858 */
845 if (read(go_pipe[0], &bf, 1) == -1) 859 if (read(go_pipe[0], &bf, 1) == -1)
846 perror("unable to read pipe"); 860 perror("unable to read pipe");
847 861
848 execvp(argv[0], (char **)argv); 862 execvp(argv[0], (char **)argv);
849 863
850 perror(argv[0]); 864 perror(argv[0]);
851 kill(getppid(), SIGUSR1); 865 kill(getppid(), SIGUSR1);
852 exit(-1); 866 exit(-1);
853 } 867 }
854 868
855 if (perf_target__none(&opts->target)) 869 if (perf_target__none(&opts->target))
856 evlist->threads->map[0] = evlist->workload.pid; 870 evlist->threads->map[0] = evlist->workload.pid;
857 871
858 close(child_ready_pipe[1]); 872 close(child_ready_pipe[1]);
859 close(go_pipe[0]); 873 close(go_pipe[0]);
860 /* 874 /*
861 * wait for child to settle 875 * wait for child to settle
862 */ 876 */
863 if (read(child_ready_pipe[0], &bf, 1) == -1) { 877 if (read(child_ready_pipe[0], &bf, 1) == -1) {
864 perror("unable to read pipe"); 878 perror("unable to read pipe");
865 goto out_close_pipes; 879 goto out_close_pipes;
866 } 880 }
867 881
868 evlist->workload.cork_fd = go_pipe[1]; 882 evlist->workload.cork_fd = go_pipe[1];
869 close(child_ready_pipe[0]); 883 close(child_ready_pipe[0]);
870 return 0; 884 return 0;
871 885
872 out_close_pipes: 886 out_close_pipes:
873 close(go_pipe[0]); 887 close(go_pipe[0]);
874 close(go_pipe[1]); 888 close(go_pipe[1]);
875 out_close_ready_pipe: 889 out_close_ready_pipe:
876 close(child_ready_pipe[0]); 890 close(child_ready_pipe[0]);
877 close(child_ready_pipe[1]); 891 close(child_ready_pipe[1]);
878 return -1; 892 return -1;
879 } 893 }
880 894
881 int perf_evlist__start_workload(struct perf_evlist *evlist) 895 int perf_evlist__start_workload(struct perf_evlist *evlist)
882 { 896 {
883 if (evlist->workload.cork_fd > 0) { 897 if (evlist->workload.cork_fd > 0) {
884 /* 898 /*
885 * Remove the cork, let it rip! 899 * Remove the cork, let it rip!
886 */ 900 */
887 return close(evlist->workload.cork_fd); 901 return close(evlist->workload.cork_fd);
888 } 902 }
889 903
890 return 0; 904 return 0;
891 } 905 }
892 906
893 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, 907 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
894 struct perf_sample *sample) 908 struct perf_sample *sample)
895 { 909 {
896 struct perf_evsel *evsel = perf_evlist__first(evlist); 910 struct perf_evsel *evsel = perf_evlist__first(evlist);
897 return perf_evsel__parse_sample(evsel, event, sample); 911 return perf_evsel__parse_sample(evsel, event, sample);
898 } 912 }
899 913
900 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) 914 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
901 { 915 {
902 struct perf_evsel *evsel; 916 struct perf_evsel *evsel;
903 size_t printed = 0; 917 size_t printed = 0;
904 918
905 list_for_each_entry(evsel, &evlist->entries, node) { 919 list_for_each_entry(evsel, &evlist->entries, node) {
906 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "", 920 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
907 perf_evsel__name(evsel)); 921 perf_evsel__name(evsel));
908 } 922 }
909 923
910 return printed + fprintf(fp, "\n");; 924 return printed + fprintf(fp, "\n");;
911 } 925 }
912 926
tools/perf/util/evlist.h
1 #ifndef __PERF_EVLIST_H 1 #ifndef __PERF_EVLIST_H
2 #define __PERF_EVLIST_H 1 2 #define __PERF_EVLIST_H 1
3 3
4 #include <linux/list.h> 4 #include <linux/list.h>
5 #include <stdio.h> 5 #include <stdio.h>
6 #include "../perf.h" 6 #include "../perf.h"
7 #include "event.h" 7 #include "event.h"
8 #include "evsel.h" 8 #include "evsel.h"
9 #include "util.h" 9 #include "util.h"
10 #include <unistd.h> 10 #include <unistd.h>
11 11
12 struct pollfd; 12 struct pollfd;
13 struct thread_map; 13 struct thread_map;
14 struct cpu_map; 14 struct cpu_map;
15 struct perf_record_opts; 15 struct perf_record_opts;
16 16
17 #define PERF_EVLIST__HLIST_BITS 8 17 #define PERF_EVLIST__HLIST_BITS 8
18 #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS) 18 #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
19 19
20 struct perf_evlist { 20 struct perf_evlist {
21 struct list_head entries; 21 struct list_head entries;
22 struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; 22 struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
23 int nr_entries; 23 int nr_entries;
24 int nr_fds; 24 int nr_fds;
25 int nr_mmaps; 25 int nr_mmaps;
26 int mmap_len; 26 int mmap_len;
27 struct { 27 struct {
28 int cork_fd; 28 int cork_fd;
29 pid_t pid; 29 pid_t pid;
30 } workload; 30 } workload;
31 bool overwrite; 31 bool overwrite;
32 union perf_event event_copy; 32 union perf_event event_copy;
33 struct perf_mmap *mmap; 33 struct perf_mmap *mmap;
34 struct pollfd *pollfd; 34 struct pollfd *pollfd;
35 struct thread_map *threads; 35 struct thread_map *threads;
36 struct cpu_map *cpus; 36 struct cpu_map *cpus;
37 struct perf_evsel *selected; 37 struct perf_evsel *selected;
38 }; 38 };
39 39
40 struct perf_evsel_str_handler { 40 struct perf_evsel_str_handler {
41 const char *name; 41 const char *name;
42 void *handler; 42 void *handler;
43 }; 43 };
44 44
45 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, 45 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
46 struct thread_map *threads); 46 struct thread_map *threads);
47 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, 47 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
48 struct thread_map *threads); 48 struct thread_map *threads);
49 void perf_evlist__exit(struct perf_evlist *evlist); 49 void perf_evlist__exit(struct perf_evlist *evlist);
50 void perf_evlist__delete(struct perf_evlist *evlist); 50 void perf_evlist__delete(struct perf_evlist *evlist);
51 51
52 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry); 52 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
53 int perf_evlist__add_default(struct perf_evlist *evlist); 53 int perf_evlist__add_default(struct perf_evlist *evlist);
54 int perf_evlist__add_attrs(struct perf_evlist *evlist, 54 int perf_evlist__add_attrs(struct perf_evlist *evlist,
55 struct perf_event_attr *attrs, size_t nr_attrs); 55 struct perf_event_attr *attrs, size_t nr_attrs);
56 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, 56 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
57 struct perf_event_attr *attrs, size_t nr_attrs); 57 struct perf_event_attr *attrs, size_t nr_attrs);
58 int perf_evlist__add_tracepoints(struct perf_evlist *evlist, 58 int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
59 const char *tracepoints[], size_t nr_tracepoints); 59 const char *tracepoints[], size_t nr_tracepoints);
60 int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, 60 int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
61 const struct perf_evsel_str_handler *assocs, 61 const struct perf_evsel_str_handler *assocs,
62 size_t nr_assocs); 62 size_t nr_assocs);
63 63
64 #define perf_evlist__add_attrs_array(evlist, array) \ 64 #define perf_evlist__add_attrs_array(evlist, array) \
65 perf_evlist__add_attrs(evlist, array, ARRAY_SIZE(array)) 65 perf_evlist__add_attrs(evlist, array, ARRAY_SIZE(array))
66 #define perf_evlist__add_default_attrs(evlist, array) \ 66 #define perf_evlist__add_default_attrs(evlist, array) \
67 __perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array)) 67 __perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
68 68
69 #define perf_evlist__add_tracepoints_array(evlist, array) \ 69 #define perf_evlist__add_tracepoints_array(evlist, array) \
70 perf_evlist__add_tracepoints(evlist, array, ARRAY_SIZE(array)) 70 perf_evlist__add_tracepoints(evlist, array, ARRAY_SIZE(array))
71 71
72 #define perf_evlist__set_tracepoints_handlers_array(evlist, array) \ 72 #define perf_evlist__set_tracepoints_handlers_array(evlist, array) \
73 perf_evlist__set_tracepoints_handlers(evlist, array, ARRAY_SIZE(array)) 73 perf_evlist__set_tracepoints_handlers(evlist, array, ARRAY_SIZE(array))
74 74
75 int perf_evlist__add_newtp(struct perf_evlist *evlist,
76 const char *sys, const char *name, void *handler);
77
75 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter); 78 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter);
76 79
77 struct perf_evsel * 80 struct perf_evsel *
78 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id); 81 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id);
79 82
80 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, 83 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
81 int cpu, int thread, u64 id); 84 int cpu, int thread, u64 id);
82 85
83 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd); 86 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
84 87
85 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); 88 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
86 89
87 union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); 90 union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
88 91
89 int perf_evlist__open(struct perf_evlist *evlist); 92 int perf_evlist__open(struct perf_evlist *evlist);
90 93
91 void perf_evlist__config_attrs(struct perf_evlist *evlist, 94 void perf_evlist__config_attrs(struct perf_evlist *evlist,
92 struct perf_record_opts *opts); 95 struct perf_record_opts *opts);
93 96
94 int perf_evlist__prepare_workload(struct perf_evlist *evlist, 97 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
95 struct perf_record_opts *opts, 98 struct perf_record_opts *opts,
96 const char *argv[]); 99 const char *argv[]);
97 int perf_evlist__start_workload(struct perf_evlist *evlist); 100 int perf_evlist__start_workload(struct perf_evlist *evlist);
98 101
99 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, 102 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
100 bool overwrite); 103 bool overwrite);
101 void perf_evlist__munmap(struct perf_evlist *evlist); 104 void perf_evlist__munmap(struct perf_evlist *evlist);
102 105
103 void perf_evlist__disable(struct perf_evlist *evlist); 106 void perf_evlist__disable(struct perf_evlist *evlist);
104 void perf_evlist__enable(struct perf_evlist *evlist); 107 void perf_evlist__enable(struct perf_evlist *evlist);
105 108
106 void perf_evlist__set_selected(struct perf_evlist *evlist, 109 void perf_evlist__set_selected(struct perf_evlist *evlist,
107 struct perf_evsel *evsel); 110 struct perf_evsel *evsel);
108 111
109 static inline void perf_evlist__set_maps(struct perf_evlist *evlist, 112 static inline void perf_evlist__set_maps(struct perf_evlist *evlist,
110 struct cpu_map *cpus, 113 struct cpu_map *cpus,
111 struct thread_map *threads) 114 struct thread_map *threads)
112 { 115 {
113 evlist->cpus = cpus; 116 evlist->cpus = cpus;
114 evlist->threads = threads; 117 evlist->threads = threads;
115 } 118 }
116 119
117 int perf_evlist__create_maps(struct perf_evlist *evlist, 120 int perf_evlist__create_maps(struct perf_evlist *evlist,
118 struct perf_target *target); 121 struct perf_target *target);
119 void perf_evlist__delete_maps(struct perf_evlist *evlist); 122 void perf_evlist__delete_maps(struct perf_evlist *evlist);
120 int perf_evlist__apply_filters(struct perf_evlist *evlist); 123 int perf_evlist__apply_filters(struct perf_evlist *evlist);
121 124
122 void __perf_evlist__set_leader(struct list_head *list); 125 void __perf_evlist__set_leader(struct list_head *list);
123 void perf_evlist__set_leader(struct perf_evlist *evlist); 126 void perf_evlist__set_leader(struct perf_evlist *evlist);
124 127
125 u64 perf_evlist__sample_type(struct perf_evlist *evlist); 128 u64 perf_evlist__sample_type(struct perf_evlist *evlist);
126 bool perf_evlist__sample_id_all(struct perf_evlist *evlist); 129 bool perf_evlist__sample_id_all(struct perf_evlist *evlist);
127 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist); 130 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist);
128 131
129 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, 132 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
130 struct perf_sample *sample); 133 struct perf_sample *sample);
131 134
132 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist); 135 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist);
133 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist); 136 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist);
134 137
135 void perf_evlist__splice_list_tail(struct perf_evlist *evlist, 138 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
136 struct list_head *list, 139 struct list_head *list,
137 int nr_entries); 140 int nr_entries);
138 141
139 static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist) 142 static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist)
140 { 143 {
141 return list_entry(evlist->entries.next, struct perf_evsel, node); 144 return list_entry(evlist->entries.next, struct perf_evsel, node);
142 } 145 }
143 146
144 static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist) 147 static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist)
145 { 148 {
146 return list_entry(evlist->entries.prev, struct perf_evsel, node); 149 return list_entry(evlist->entries.prev, struct perf_evsel, node);
147 } 150 }
148 151
149 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp); 152 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
150 #endif /* __PERF_EVLIST_H */ 153 #endif /* __PERF_EVLIST_H */
151 154