Commit 1aed2671738785e8f5aea663a6fda91aa7ef59b5

Authored by Joerg Roedel
Committed by Arnaldo Carvalho de Melo
1 parent df25f989a4

perf kvm: Do guest-only counting by default

Make use of exclude_guest and exlude_host in perf-kvm to do only
guest-only counting by default.

Cc: Gleb Natapov <gleb@redhat.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
[ committer note: Moved perf_{guest,host} & event_attr_init to util.c ]
[                 so as not to drag more stuff to the python binding]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

Showing 5 changed files with 26 additions and 5 deletions Inline Diff

tools/perf/builtin-kvm.c
1 #include "builtin.h" 1 #include "builtin.h"
2 #include "perf.h" 2 #include "perf.h"
3 3
4 #include "util/util.h" 4 #include "util/util.h"
5 #include "util/cache.h" 5 #include "util/cache.h"
6 #include "util/symbol.h" 6 #include "util/symbol.h"
7 #include "util/thread.h" 7 #include "util/thread.h"
8 #include "util/header.h" 8 #include "util/header.h"
9 #include "util/session.h" 9 #include "util/session.h"
10 10
11 #include "util/parse-options.h" 11 #include "util/parse-options.h"
12 #include "util/trace-event.h" 12 #include "util/trace-event.h"
13 13
14 #include "util/debug.h" 14 #include "util/debug.h"
15 15
16 #include <sys/prctl.h> 16 #include <sys/prctl.h>
17 17
18 #include <semaphore.h> 18 #include <semaphore.h>
19 #include <pthread.h> 19 #include <pthread.h>
20 #include <math.h> 20 #include <math.h>
21 21
22 static const char *file_name; 22 static const char *file_name;
23 static char name_buffer[256]; 23 static char name_buffer[256];
24 24
25 bool perf_host = 1;
26 bool perf_guest;
27
28 static const char * const kvm_usage[] = { 25 static const char * const kvm_usage[] = {
29 "perf kvm [<options>] {top|record|report|diff|buildid-list}", 26 "perf kvm [<options>] {top|record|report|diff|buildid-list}",
30 NULL 27 NULL
31 }; 28 };
32 29
33 static const struct option kvm_options[] = { 30 static const struct option kvm_options[] = {
34 OPT_STRING('i', "input", &file_name, "file", 31 OPT_STRING('i', "input", &file_name, "file",
35 "Input file name"), 32 "Input file name"),
36 OPT_STRING('o', "output", &file_name, "file", 33 OPT_STRING('o', "output", &file_name, "file",
37 "Output file name"), 34 "Output file name"),
38 OPT_BOOLEAN(0, "guest", &perf_guest, 35 OPT_BOOLEAN(0, "guest", &perf_guest,
39 "Collect guest os data"), 36 "Collect guest os data"),
40 OPT_BOOLEAN(0, "host", &perf_host, 37 OPT_BOOLEAN(0, "host", &perf_host,
41 "Collect host os data"), 38 "Collect host os data"),
42 OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory", 39 OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
43 "guest mount directory under which every guest os" 40 "guest mount directory under which every guest os"
44 " instance has a subdir"), 41 " instance has a subdir"),
45 OPT_STRING(0, "guestvmlinux", &symbol_conf.default_guest_vmlinux_name, 42 OPT_STRING(0, "guestvmlinux", &symbol_conf.default_guest_vmlinux_name,
46 "file", "file saving guest os vmlinux"), 43 "file", "file saving guest os vmlinux"),
47 OPT_STRING(0, "guestkallsyms", &symbol_conf.default_guest_kallsyms, 44 OPT_STRING(0, "guestkallsyms", &symbol_conf.default_guest_kallsyms,
48 "file", "file saving guest os /proc/kallsyms"), 45 "file", "file saving guest os /proc/kallsyms"),
49 OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules, 46 OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules,
50 "file", "file saving guest os /proc/modules"), 47 "file", "file saving guest os /proc/modules"),
51 OPT_END() 48 OPT_END()
52 }; 49 };
53 50
54 static int __cmd_record(int argc, const char **argv) 51 static int __cmd_record(int argc, const char **argv)
55 { 52 {
56 int rec_argc, i = 0, j; 53 int rec_argc, i = 0, j;
57 const char **rec_argv; 54 const char **rec_argv;
58 55
59 rec_argc = argc + 2; 56 rec_argc = argc + 2;
60 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 57 rec_argv = calloc(rec_argc + 1, sizeof(char *));
61 rec_argv[i++] = strdup("record"); 58 rec_argv[i++] = strdup("record");
62 rec_argv[i++] = strdup("-o"); 59 rec_argv[i++] = strdup("-o");
63 rec_argv[i++] = strdup(file_name); 60 rec_argv[i++] = strdup(file_name);
64 for (j = 1; j < argc; j++, i++) 61 for (j = 1; j < argc; j++, i++)
65 rec_argv[i] = argv[j]; 62 rec_argv[i] = argv[j];
66 63
67 BUG_ON(i != rec_argc); 64 BUG_ON(i != rec_argc);
68 65
69 return cmd_record(i, rec_argv, NULL); 66 return cmd_record(i, rec_argv, NULL);
70 } 67 }
71 68
72 static int __cmd_report(int argc, const char **argv) 69 static int __cmd_report(int argc, const char **argv)
73 { 70 {
74 int rec_argc, i = 0, j; 71 int rec_argc, i = 0, j;
75 const char **rec_argv; 72 const char **rec_argv;
76 73
77 rec_argc = argc + 2; 74 rec_argc = argc + 2;
78 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 75 rec_argv = calloc(rec_argc + 1, sizeof(char *));
79 rec_argv[i++] = strdup("report"); 76 rec_argv[i++] = strdup("report");
80 rec_argv[i++] = strdup("-i"); 77 rec_argv[i++] = strdup("-i");
81 rec_argv[i++] = strdup(file_name); 78 rec_argv[i++] = strdup(file_name);
82 for (j = 1; j < argc; j++, i++) 79 for (j = 1; j < argc; j++, i++)
83 rec_argv[i] = argv[j]; 80 rec_argv[i] = argv[j];
84 81
85 BUG_ON(i != rec_argc); 82 BUG_ON(i != rec_argc);
86 83
87 return cmd_report(i, rec_argv, NULL); 84 return cmd_report(i, rec_argv, NULL);
88 } 85 }
89 86
90 static int __cmd_buildid_list(int argc, const char **argv) 87 static int __cmd_buildid_list(int argc, const char **argv)
91 { 88 {
92 int rec_argc, i = 0, j; 89 int rec_argc, i = 0, j;
93 const char **rec_argv; 90 const char **rec_argv;
94 91
95 rec_argc = argc + 2; 92 rec_argc = argc + 2;
96 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 93 rec_argv = calloc(rec_argc + 1, sizeof(char *));
97 rec_argv[i++] = strdup("buildid-list"); 94 rec_argv[i++] = strdup("buildid-list");
98 rec_argv[i++] = strdup("-i"); 95 rec_argv[i++] = strdup("-i");
99 rec_argv[i++] = strdup(file_name); 96 rec_argv[i++] = strdup(file_name);
100 for (j = 1; j < argc; j++, i++) 97 for (j = 1; j < argc; j++, i++)
101 rec_argv[i] = argv[j]; 98 rec_argv[i] = argv[j];
102 99
103 BUG_ON(i != rec_argc); 100 BUG_ON(i != rec_argc);
104 101
105 return cmd_buildid_list(i, rec_argv, NULL); 102 return cmd_buildid_list(i, rec_argv, NULL);
106 } 103 }
107 104
108 int cmd_kvm(int argc, const char **argv, const char *prefix __used) 105 int cmd_kvm(int argc, const char **argv, const char *prefix __used)
109 { 106 {
110 perf_host = perf_guest = 0; 107 perf_host = 0;
108 perf_guest = 1;
111 109
112 argc = parse_options(argc, argv, kvm_options, kvm_usage, 110 argc = parse_options(argc, argv, kvm_options, kvm_usage,
113 PARSE_OPT_STOP_AT_NON_OPTION); 111 PARSE_OPT_STOP_AT_NON_OPTION);
114 if (!argc) 112 if (!argc)
115 usage_with_options(kvm_usage, kvm_options); 113 usage_with_options(kvm_usage, kvm_options);
116 114
117 if (!perf_host) 115 if (!perf_host)
118 perf_guest = 1; 116 perf_guest = 1;
119 117
120 if (!file_name) { 118 if (!file_name) {
121 if (perf_host && !perf_guest) 119 if (perf_host && !perf_guest)
122 sprintf(name_buffer, "perf.data.host"); 120 sprintf(name_buffer, "perf.data.host");
123 else if (!perf_host && perf_guest) 121 else if (!perf_host && perf_guest)
124 sprintf(name_buffer, "perf.data.guest"); 122 sprintf(name_buffer, "perf.data.guest");
125 else 123 else
126 sprintf(name_buffer, "perf.data.kvm"); 124 sprintf(name_buffer, "perf.data.kvm");
127 file_name = name_buffer; 125 file_name = name_buffer;
128 } 126 }
129 127
130 if (!strncmp(argv[0], "rec", 3)) 128 if (!strncmp(argv[0], "rec", 3))
131 return __cmd_record(argc, argv); 129 return __cmd_record(argc, argv);
132 else if (!strncmp(argv[0], "rep", 3)) 130 else if (!strncmp(argv[0], "rep", 3))
133 return __cmd_report(argc, argv); 131 return __cmd_report(argc, argv);
134 else if (!strncmp(argv[0], "diff", 4)) 132 else if (!strncmp(argv[0], "diff", 4))
135 return cmd_diff(argc, argv, NULL); 133 return cmd_diff(argc, argv, NULL);
136 else if (!strncmp(argv[0], "top", 3)) 134 else if (!strncmp(argv[0], "top", 3))
137 return cmd_top(argc, argv, NULL); 135 return cmd_top(argc, argv, NULL);
138 else if (!strncmp(argv[0], "buildid-list", 12)) 136 else if (!strncmp(argv[0], "buildid-list", 12))
139 return __cmd_buildid_list(argc, argv); 137 return __cmd_buildid_list(argc, argv);
140 else 138 else
141 usage_with_options(kvm_usage, kvm_options); 139 usage_with_options(kvm_usage, kvm_options);
142 140
143 return 0; 141 return 0;
144 } 142 }
tools/perf/util/evlist.c
1 /* 1 /*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 * 3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further 4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes. 5 * copyright notes.
6 * 6 *
7 * Released under the GPL v2. (and only v2, not any later version) 7 * Released under the GPL v2. (and only v2, not any later version)
8 */ 8 */
9 #include "util.h" 9 #include "util.h"
10 #include "debugfs.h" 10 #include "debugfs.h"
11 #include <poll.h> 11 #include <poll.h>
12 #include "cpumap.h" 12 #include "cpumap.h"
13 #include "thread_map.h" 13 #include "thread_map.h"
14 #include "evlist.h" 14 #include "evlist.h"
15 #include "evsel.h" 15 #include "evsel.h"
16 #include <unistd.h> 16 #include <unistd.h>
17 17
18 #include "parse-events.h" 18 #include "parse-events.h"
19 19
20 #include <sys/mman.h> 20 #include <sys/mman.h>
21 21
22 #include <linux/bitops.h> 22 #include <linux/bitops.h>
23 #include <linux/hash.h> 23 #include <linux/hash.h>
24 24
25 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 25 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
26 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) 26 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
27 27
28 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, 28 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
29 struct thread_map *threads) 29 struct thread_map *threads)
30 { 30 {
31 int i; 31 int i;
32 32
33 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) 33 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
34 INIT_HLIST_HEAD(&evlist->heads[i]); 34 INIT_HLIST_HEAD(&evlist->heads[i]);
35 INIT_LIST_HEAD(&evlist->entries); 35 INIT_LIST_HEAD(&evlist->entries);
36 perf_evlist__set_maps(evlist, cpus, threads); 36 perf_evlist__set_maps(evlist, cpus, threads);
37 evlist->workload.pid = -1; 37 evlist->workload.pid = -1;
38 } 38 }
39 39
40 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, 40 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
41 struct thread_map *threads) 41 struct thread_map *threads)
42 { 42 {
43 struct perf_evlist *evlist = zalloc(sizeof(*evlist)); 43 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
44 44
45 if (evlist != NULL) 45 if (evlist != NULL)
46 perf_evlist__init(evlist, cpus, threads); 46 perf_evlist__init(evlist, cpus, threads);
47 47
48 return evlist; 48 return evlist;
49 } 49 }
50 50
51 void perf_evlist__config_attrs(struct perf_evlist *evlist, 51 void perf_evlist__config_attrs(struct perf_evlist *evlist,
52 struct perf_record_opts *opts) 52 struct perf_record_opts *opts)
53 { 53 {
54 struct perf_evsel *evsel; 54 struct perf_evsel *evsel;
55 55
56 if (evlist->cpus->map[0] < 0) 56 if (evlist->cpus->map[0] < 0)
57 opts->no_inherit = true; 57 opts->no_inherit = true;
58 58
59 list_for_each_entry(evsel, &evlist->entries, node) { 59 list_for_each_entry(evsel, &evlist->entries, node) {
60 perf_evsel__config(evsel, opts); 60 perf_evsel__config(evsel, opts);
61 61
62 if (evlist->nr_entries > 1) 62 if (evlist->nr_entries > 1)
63 evsel->attr.sample_type |= PERF_SAMPLE_ID; 63 evsel->attr.sample_type |= PERF_SAMPLE_ID;
64 } 64 }
65 } 65 }
66 66
67 static void perf_evlist__purge(struct perf_evlist *evlist) 67 static void perf_evlist__purge(struct perf_evlist *evlist)
68 { 68 {
69 struct perf_evsel *pos, *n; 69 struct perf_evsel *pos, *n;
70 70
71 list_for_each_entry_safe(pos, n, &evlist->entries, node) { 71 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
72 list_del_init(&pos->node); 72 list_del_init(&pos->node);
73 perf_evsel__delete(pos); 73 perf_evsel__delete(pos);
74 } 74 }
75 75
76 evlist->nr_entries = 0; 76 evlist->nr_entries = 0;
77 } 77 }
78 78
79 void perf_evlist__exit(struct perf_evlist *evlist) 79 void perf_evlist__exit(struct perf_evlist *evlist)
80 { 80 {
81 free(evlist->mmap); 81 free(evlist->mmap);
82 free(evlist->pollfd); 82 free(evlist->pollfd);
83 evlist->mmap = NULL; 83 evlist->mmap = NULL;
84 evlist->pollfd = NULL; 84 evlist->pollfd = NULL;
85 } 85 }
86 86
87 void perf_evlist__delete(struct perf_evlist *evlist) 87 void perf_evlist__delete(struct perf_evlist *evlist)
88 { 88 {
89 perf_evlist__purge(evlist); 89 perf_evlist__purge(evlist);
90 perf_evlist__exit(evlist); 90 perf_evlist__exit(evlist);
91 free(evlist); 91 free(evlist);
92 } 92 }
93 93
94 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) 94 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
95 { 95 {
96 list_add_tail(&entry->node, &evlist->entries); 96 list_add_tail(&entry->node, &evlist->entries);
97 ++evlist->nr_entries; 97 ++evlist->nr_entries;
98 } 98 }
99 99
100 static void perf_evlist__splice_list_tail(struct perf_evlist *evlist, 100 static void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
101 struct list_head *list, 101 struct list_head *list,
102 int nr_entries) 102 int nr_entries)
103 { 103 {
104 list_splice_tail(list, &evlist->entries); 104 list_splice_tail(list, &evlist->entries);
105 evlist->nr_entries += nr_entries; 105 evlist->nr_entries += nr_entries;
106 } 106 }
107 107
108 int perf_evlist__add_default(struct perf_evlist *evlist) 108 int perf_evlist__add_default(struct perf_evlist *evlist)
109 { 109 {
110 struct perf_event_attr attr = { 110 struct perf_event_attr attr = {
111 .type = PERF_TYPE_HARDWARE, 111 .type = PERF_TYPE_HARDWARE,
112 .config = PERF_COUNT_HW_CPU_CYCLES, 112 .config = PERF_COUNT_HW_CPU_CYCLES,
113 }; 113 };
114 struct perf_evsel *evsel = perf_evsel__new(&attr, 0); 114 struct perf_evsel *evsel;
115 115
116 event_attr_init(&attr);
117
118 evsel = perf_evsel__new(&attr, 0);
116 if (evsel == NULL) 119 if (evsel == NULL)
117 goto error; 120 goto error;
118 121
119 /* use strdup() because free(evsel) assumes name is allocated */ 122 /* use strdup() because free(evsel) assumes name is allocated */
120 evsel->name = strdup("cycles"); 123 evsel->name = strdup("cycles");
121 if (!evsel->name) 124 if (!evsel->name)
122 goto error_free; 125 goto error_free;
123 126
124 perf_evlist__add(evlist, evsel); 127 perf_evlist__add(evlist, evsel);
125 return 0; 128 return 0;
126 error_free: 129 error_free:
127 perf_evsel__delete(evsel); 130 perf_evsel__delete(evsel);
128 error: 131 error:
129 return -ENOMEM; 132 return -ENOMEM;
130 } 133 }
131 134
132 int perf_evlist__add_attrs(struct perf_evlist *evlist, 135 int perf_evlist__add_attrs(struct perf_evlist *evlist,
133 struct perf_event_attr *attrs, size_t nr_attrs) 136 struct perf_event_attr *attrs, size_t nr_attrs)
134 { 137 {
135 struct perf_evsel *evsel, *n; 138 struct perf_evsel *evsel, *n;
136 LIST_HEAD(head); 139 LIST_HEAD(head);
137 size_t i; 140 size_t i;
138 141
139 for (i = 0; i < nr_attrs; i++) { 142 for (i = 0; i < nr_attrs; i++) {
140 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i); 143 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
141 if (evsel == NULL) 144 if (evsel == NULL)
142 goto out_delete_partial_list; 145 goto out_delete_partial_list;
143 list_add_tail(&evsel->node, &head); 146 list_add_tail(&evsel->node, &head);
144 } 147 }
145 148
146 perf_evlist__splice_list_tail(evlist, &head, nr_attrs); 149 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
147 150
148 return 0; 151 return 0;
149 152
150 out_delete_partial_list: 153 out_delete_partial_list:
151 list_for_each_entry_safe(evsel, n, &head, node) 154 list_for_each_entry_safe(evsel, n, &head, node)
152 perf_evsel__delete(evsel); 155 perf_evsel__delete(evsel);
153 return -1; 156 return -1;
154 } 157 }
155 158
156 static int trace_event__id(const char *evname) 159 static int trace_event__id(const char *evname)
157 { 160 {
158 char *filename, *colon; 161 char *filename, *colon;
159 int err = -1, fd; 162 int err = -1, fd;
160 163
161 if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0) 164 if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0)
162 return -1; 165 return -1;
163 166
164 colon = strrchr(filename, ':'); 167 colon = strrchr(filename, ':');
165 if (colon != NULL) 168 if (colon != NULL)
166 *colon = '/'; 169 *colon = '/';
167 170
168 fd = open(filename, O_RDONLY); 171 fd = open(filename, O_RDONLY);
169 if (fd >= 0) { 172 if (fd >= 0) {
170 char id[16]; 173 char id[16];
171 if (read(fd, id, sizeof(id)) > 0) 174 if (read(fd, id, sizeof(id)) > 0)
172 err = atoi(id); 175 err = atoi(id);
173 close(fd); 176 close(fd);
174 } 177 }
175 178
176 free(filename); 179 free(filename);
177 return err; 180 return err;
178 } 181 }
179 182
180 int perf_evlist__add_tracepoints(struct perf_evlist *evlist, 183 int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
181 const char *tracepoints[], 184 const char *tracepoints[],
182 size_t nr_tracepoints) 185 size_t nr_tracepoints)
183 { 186 {
184 int err; 187 int err;
185 size_t i; 188 size_t i;
186 struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs)); 189 struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs));
187 190
188 if (attrs == NULL) 191 if (attrs == NULL)
189 return -1; 192 return -1;
190 193
191 for (i = 0; i < nr_tracepoints; i++) { 194 for (i = 0; i < nr_tracepoints; i++) {
192 err = trace_event__id(tracepoints[i]); 195 err = trace_event__id(tracepoints[i]);
193 196
194 if (err < 0) 197 if (err < 0)
195 goto out_free_attrs; 198 goto out_free_attrs;
196 199
197 attrs[i].type = PERF_TYPE_TRACEPOINT; 200 attrs[i].type = PERF_TYPE_TRACEPOINT;
198 attrs[i].config = err; 201 attrs[i].config = err;
199 attrs[i].sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | 202 attrs[i].sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
200 PERF_SAMPLE_CPU); 203 PERF_SAMPLE_CPU);
201 attrs[i].sample_period = 1; 204 attrs[i].sample_period = 1;
202 } 205 }
203 206
204 err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints); 207 err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints);
205 out_free_attrs: 208 out_free_attrs:
206 free(attrs); 209 free(attrs);
207 return err; 210 return err;
208 } 211 }
209 212
210 static struct perf_evsel * 213 static struct perf_evsel *
211 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) 214 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
212 { 215 {
213 struct perf_evsel *evsel; 216 struct perf_evsel *evsel;
214 217
215 list_for_each_entry(evsel, &evlist->entries, node) { 218 list_for_each_entry(evsel, &evlist->entries, node) {
216 if (evsel->attr.type == PERF_TYPE_TRACEPOINT && 219 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
217 (int)evsel->attr.config == id) 220 (int)evsel->attr.config == id)
218 return evsel; 221 return evsel;
219 } 222 }
220 223
221 return NULL; 224 return NULL;
222 } 225 }
223 226
224 int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, 227 int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
225 const struct perf_evsel_str_handler *assocs, 228 const struct perf_evsel_str_handler *assocs,
226 size_t nr_assocs) 229 size_t nr_assocs)
227 { 230 {
228 struct perf_evsel *evsel; 231 struct perf_evsel *evsel;
229 int err; 232 int err;
230 size_t i; 233 size_t i;
231 234
232 for (i = 0; i < nr_assocs; i++) { 235 for (i = 0; i < nr_assocs; i++) {
233 err = trace_event__id(assocs[i].name); 236 err = trace_event__id(assocs[i].name);
234 if (err < 0) 237 if (err < 0)
235 goto out; 238 goto out;
236 239
237 evsel = perf_evlist__find_tracepoint_by_id(evlist, err); 240 evsel = perf_evlist__find_tracepoint_by_id(evlist, err);
238 if (evsel == NULL) 241 if (evsel == NULL)
239 continue; 242 continue;
240 243
241 err = -EEXIST; 244 err = -EEXIST;
242 if (evsel->handler.func != NULL) 245 if (evsel->handler.func != NULL)
243 goto out; 246 goto out;
244 evsel->handler.func = assocs[i].handler; 247 evsel->handler.func = assocs[i].handler;
245 } 248 }
246 249
247 err = 0; 250 err = 0;
248 out: 251 out:
249 return err; 252 return err;
250 } 253 }
251 254
252 void perf_evlist__disable(struct perf_evlist *evlist) 255 void perf_evlist__disable(struct perf_evlist *evlist)
253 { 256 {
254 int cpu, thread; 257 int cpu, thread;
255 struct perf_evsel *pos; 258 struct perf_evsel *pos;
256 259
257 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 260 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
258 list_for_each_entry(pos, &evlist->entries, node) { 261 list_for_each_entry(pos, &evlist->entries, node) {
259 for (thread = 0; thread < evlist->threads->nr; thread++) 262 for (thread = 0; thread < evlist->threads->nr; thread++)
260 ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE); 263 ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE);
261 } 264 }
262 } 265 }
263 } 266 }
264 267
265 void perf_evlist__enable(struct perf_evlist *evlist) 268 void perf_evlist__enable(struct perf_evlist *evlist)
266 { 269 {
267 int cpu, thread; 270 int cpu, thread;
268 struct perf_evsel *pos; 271 struct perf_evsel *pos;
269 272
270 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 273 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
271 list_for_each_entry(pos, &evlist->entries, node) { 274 list_for_each_entry(pos, &evlist->entries, node) {
272 for (thread = 0; thread < evlist->threads->nr; thread++) 275 for (thread = 0; thread < evlist->threads->nr; thread++)
273 ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE); 276 ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE);
274 } 277 }
275 } 278 }
276 } 279 }
277 280
278 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) 281 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
279 { 282 {
280 int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries; 283 int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
281 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); 284 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
282 return evlist->pollfd != NULL ? 0 : -ENOMEM; 285 return evlist->pollfd != NULL ? 0 : -ENOMEM;
283 } 286 }
284 287
285 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) 288 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
286 { 289 {
287 fcntl(fd, F_SETFL, O_NONBLOCK); 290 fcntl(fd, F_SETFL, O_NONBLOCK);
288 evlist->pollfd[evlist->nr_fds].fd = fd; 291 evlist->pollfd[evlist->nr_fds].fd = fd;
289 evlist->pollfd[evlist->nr_fds].events = POLLIN; 292 evlist->pollfd[evlist->nr_fds].events = POLLIN;
290 evlist->nr_fds++; 293 evlist->nr_fds++;
291 } 294 }
292 295
293 static void perf_evlist__id_hash(struct perf_evlist *evlist, 296 static void perf_evlist__id_hash(struct perf_evlist *evlist,
294 struct perf_evsel *evsel, 297 struct perf_evsel *evsel,
295 int cpu, int thread, u64 id) 298 int cpu, int thread, u64 id)
296 { 299 {
297 int hash; 300 int hash;
298 struct perf_sample_id *sid = SID(evsel, cpu, thread); 301 struct perf_sample_id *sid = SID(evsel, cpu, thread);
299 302
300 sid->id = id; 303 sid->id = id;
301 sid->evsel = evsel; 304 sid->evsel = evsel;
302 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); 305 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
303 hlist_add_head(&sid->node, &evlist->heads[hash]); 306 hlist_add_head(&sid->node, &evlist->heads[hash]);
304 } 307 }
305 308
306 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, 309 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
307 int cpu, int thread, u64 id) 310 int cpu, int thread, u64 id)
308 { 311 {
309 perf_evlist__id_hash(evlist, evsel, cpu, thread, id); 312 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
310 evsel->id[evsel->ids++] = id; 313 evsel->id[evsel->ids++] = id;
311 } 314 }
312 315
313 static int perf_evlist__id_add_fd(struct perf_evlist *evlist, 316 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
314 struct perf_evsel *evsel, 317 struct perf_evsel *evsel,
315 int cpu, int thread, int fd) 318 int cpu, int thread, int fd)
316 { 319 {
317 u64 read_data[4] = { 0, }; 320 u64 read_data[4] = { 0, };
318 int id_idx = 1; /* The first entry is the counter value */ 321 int id_idx = 1; /* The first entry is the counter value */
319 322
320 if (!(evsel->attr.read_format & PERF_FORMAT_ID) || 323 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
321 read(fd, &read_data, sizeof(read_data)) == -1) 324 read(fd, &read_data, sizeof(read_data)) == -1)
322 return -1; 325 return -1;
323 326
324 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 327 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
325 ++id_idx; 328 ++id_idx;
326 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 329 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
327 ++id_idx; 330 ++id_idx;
328 331
329 perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]); 332 perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
330 return 0; 333 return 0;
331 } 334 }
332 335
333 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) 336 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
334 { 337 {
335 struct hlist_head *head; 338 struct hlist_head *head;
336 struct hlist_node *pos; 339 struct hlist_node *pos;
337 struct perf_sample_id *sid; 340 struct perf_sample_id *sid;
338 int hash; 341 int hash;
339 342
340 if (evlist->nr_entries == 1) 343 if (evlist->nr_entries == 1)
341 return list_entry(evlist->entries.next, struct perf_evsel, node); 344 return list_entry(evlist->entries.next, struct perf_evsel, node);
342 345
343 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 346 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
344 head = &evlist->heads[hash]; 347 head = &evlist->heads[hash];
345 348
346 hlist_for_each_entry(sid, pos, head, node) 349 hlist_for_each_entry(sid, pos, head, node)
347 if (sid->id == id) 350 if (sid->id == id)
348 return sid->evsel; 351 return sid->evsel;
349 return NULL; 352 return NULL;
350 } 353 }
351 354
352 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) 355 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
353 { 356 {
354 /* XXX Move this to perf.c, making it generally available */ 357 /* XXX Move this to perf.c, making it generally available */
355 unsigned int page_size = sysconf(_SC_PAGE_SIZE); 358 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
356 struct perf_mmap *md = &evlist->mmap[idx]; 359 struct perf_mmap *md = &evlist->mmap[idx];
357 unsigned int head = perf_mmap__read_head(md); 360 unsigned int head = perf_mmap__read_head(md);
358 unsigned int old = md->prev; 361 unsigned int old = md->prev;
359 unsigned char *data = md->base + page_size; 362 unsigned char *data = md->base + page_size;
360 union perf_event *event = NULL; 363 union perf_event *event = NULL;
361 364
362 if (evlist->overwrite) { 365 if (evlist->overwrite) {
363 /* 366 /*
364 * If we're further behind than half the buffer, there's a chance 367 * If we're further behind than half the buffer, there's a chance
365 * the writer will bite our tail and mess up the samples under us. 368 * the writer will bite our tail and mess up the samples under us.
366 * 369 *
367 * If we somehow ended up ahead of the head, we got messed up. 370 * If we somehow ended up ahead of the head, we got messed up.
368 * 371 *
369 * In either case, truncate and restart at head. 372 * In either case, truncate and restart at head.
370 */ 373 */
371 int diff = head - old; 374 int diff = head - old;
372 if (diff > md->mask / 2 || diff < 0) { 375 if (diff > md->mask / 2 || diff < 0) {
373 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); 376 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
374 377
375 /* 378 /*
376 * head points to a known good entry, start there. 379 * head points to a known good entry, start there.
377 */ 380 */
378 old = head; 381 old = head;
379 } 382 }
380 } 383 }
381 384
382 if (old != head) { 385 if (old != head) {
383 size_t size; 386 size_t size;
384 387
385 event = (union perf_event *)&data[old & md->mask]; 388 event = (union perf_event *)&data[old & md->mask];
386 size = event->header.size; 389 size = event->header.size;
387 390
388 /* 391 /*
389 * Event straddles the mmap boundary -- header should always 392 * Event straddles the mmap boundary -- header should always
390 * be inside due to u64 alignment of output. 393 * be inside due to u64 alignment of output.
391 */ 394 */
392 if ((old & md->mask) + size != ((old + size) & md->mask)) { 395 if ((old & md->mask) + size != ((old + size) & md->mask)) {
393 unsigned int offset = old; 396 unsigned int offset = old;
394 unsigned int len = min(sizeof(*event), size), cpy; 397 unsigned int len = min(sizeof(*event), size), cpy;
395 void *dst = &evlist->event_copy; 398 void *dst = &evlist->event_copy;
396 399
397 do { 400 do {
398 cpy = min(md->mask + 1 - (offset & md->mask), len); 401 cpy = min(md->mask + 1 - (offset & md->mask), len);
399 memcpy(dst, &data[offset & md->mask], cpy); 402 memcpy(dst, &data[offset & md->mask], cpy);
400 offset += cpy; 403 offset += cpy;
401 dst += cpy; 404 dst += cpy;
402 len -= cpy; 405 len -= cpy;
403 } while (len); 406 } while (len);
404 407
405 event = &evlist->event_copy; 408 event = &evlist->event_copy;
406 } 409 }
407 410
408 old += size; 411 old += size;
409 } 412 }
410 413
411 md->prev = old; 414 md->prev = old;
412 415
413 if (!evlist->overwrite) 416 if (!evlist->overwrite)
414 perf_mmap__write_tail(md, old); 417 perf_mmap__write_tail(md, old);
415 418
416 return event; 419 return event;
417 } 420 }
418 421
419 void perf_evlist__munmap(struct perf_evlist *evlist) 422 void perf_evlist__munmap(struct perf_evlist *evlist)
420 { 423 {
421 int i; 424 int i;
422 425
423 for (i = 0; i < evlist->nr_mmaps; i++) { 426 for (i = 0; i < evlist->nr_mmaps; i++) {
424 if (evlist->mmap[i].base != NULL) { 427 if (evlist->mmap[i].base != NULL) {
425 munmap(evlist->mmap[i].base, evlist->mmap_len); 428 munmap(evlist->mmap[i].base, evlist->mmap_len);
426 evlist->mmap[i].base = NULL; 429 evlist->mmap[i].base = NULL;
427 } 430 }
428 } 431 }
429 432
430 free(evlist->mmap); 433 free(evlist->mmap);
431 evlist->mmap = NULL; 434 evlist->mmap = NULL;
432 } 435 }
433 436
434 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) 437 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
435 { 438 {
436 evlist->nr_mmaps = evlist->cpus->nr; 439 evlist->nr_mmaps = evlist->cpus->nr;
437 if (evlist->cpus->map[0] == -1) 440 if (evlist->cpus->map[0] == -1)
438 evlist->nr_mmaps = evlist->threads->nr; 441 evlist->nr_mmaps = evlist->threads->nr;
439 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); 442 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
440 return evlist->mmap != NULL ? 0 : -ENOMEM; 443 return evlist->mmap != NULL ? 0 : -ENOMEM;
441 } 444 }
442 445
443 static int __perf_evlist__mmap(struct perf_evlist *evlist, 446 static int __perf_evlist__mmap(struct perf_evlist *evlist,
444 int idx, int prot, int mask, int fd) 447 int idx, int prot, int mask, int fd)
445 { 448 {
446 evlist->mmap[idx].prev = 0; 449 evlist->mmap[idx].prev = 0;
447 evlist->mmap[idx].mask = mask; 450 evlist->mmap[idx].mask = mask;
448 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, 451 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
449 MAP_SHARED, fd, 0); 452 MAP_SHARED, fd, 0);
450 if (evlist->mmap[idx].base == MAP_FAILED) { 453 if (evlist->mmap[idx].base == MAP_FAILED) {
451 evlist->mmap[idx].base = NULL; 454 evlist->mmap[idx].base = NULL;
452 return -1; 455 return -1;
453 } 456 }
454 457
455 perf_evlist__add_pollfd(evlist, fd); 458 perf_evlist__add_pollfd(evlist, fd);
456 return 0; 459 return 0;
457 } 460 }
458 461
459 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask) 462 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
460 { 463 {
461 struct perf_evsel *evsel; 464 struct perf_evsel *evsel;
462 int cpu, thread; 465 int cpu, thread;
463 466
464 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 467 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
465 int output = -1; 468 int output = -1;
466 469
467 for (thread = 0; thread < evlist->threads->nr; thread++) { 470 for (thread = 0; thread < evlist->threads->nr; thread++) {
468 list_for_each_entry(evsel, &evlist->entries, node) { 471 list_for_each_entry(evsel, &evlist->entries, node) {
469 int fd = FD(evsel, cpu, thread); 472 int fd = FD(evsel, cpu, thread);
470 473
471 if (output == -1) { 474 if (output == -1) {
472 output = fd; 475 output = fd;
473 if (__perf_evlist__mmap(evlist, cpu, 476 if (__perf_evlist__mmap(evlist, cpu,
474 prot, mask, output) < 0) 477 prot, mask, output) < 0)
475 goto out_unmap; 478 goto out_unmap;
476 } else { 479 } else {
477 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) 480 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
478 goto out_unmap; 481 goto out_unmap;
479 } 482 }
480 483
481 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 484 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
482 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) 485 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
483 goto out_unmap; 486 goto out_unmap;
484 } 487 }
485 } 488 }
486 } 489 }
487 490
488 return 0; 491 return 0;
489 492
490 out_unmap: 493 out_unmap:
491 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 494 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
492 if (evlist->mmap[cpu].base != NULL) { 495 if (evlist->mmap[cpu].base != NULL) {
493 munmap(evlist->mmap[cpu].base, evlist->mmap_len); 496 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
494 evlist->mmap[cpu].base = NULL; 497 evlist->mmap[cpu].base = NULL;
495 } 498 }
496 } 499 }
497 return -1; 500 return -1;
498 } 501 }
499 502
500 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask) 503 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
501 { 504 {
502 struct perf_evsel *evsel; 505 struct perf_evsel *evsel;
503 int thread; 506 int thread;
504 507
505 for (thread = 0; thread < evlist->threads->nr; thread++) { 508 for (thread = 0; thread < evlist->threads->nr; thread++) {
506 int output = -1; 509 int output = -1;
507 510
508 list_for_each_entry(evsel, &evlist->entries, node) { 511 list_for_each_entry(evsel, &evlist->entries, node) {
509 int fd = FD(evsel, 0, thread); 512 int fd = FD(evsel, 0, thread);
510 513
511 if (output == -1) { 514 if (output == -1) {
512 output = fd; 515 output = fd;
513 if (__perf_evlist__mmap(evlist, thread, 516 if (__perf_evlist__mmap(evlist, thread,
514 prot, mask, output) < 0) 517 prot, mask, output) < 0)
515 goto out_unmap; 518 goto out_unmap;
516 } else { 519 } else {
517 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) 520 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
518 goto out_unmap; 521 goto out_unmap;
519 } 522 }
520 523
521 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 524 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
522 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0) 525 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
523 goto out_unmap; 526 goto out_unmap;
524 } 527 }
525 } 528 }
526 529
527 return 0; 530 return 0;
528 531
529 out_unmap: 532 out_unmap:
530 for (thread = 0; thread < evlist->threads->nr; thread++) { 533 for (thread = 0; thread < evlist->threads->nr; thread++) {
531 if (evlist->mmap[thread].base != NULL) { 534 if (evlist->mmap[thread].base != NULL) {
532 munmap(evlist->mmap[thread].base, evlist->mmap_len); 535 munmap(evlist->mmap[thread].base, evlist->mmap_len);
533 evlist->mmap[thread].base = NULL; 536 evlist->mmap[thread].base = NULL;
534 } 537 }
535 } 538 }
536 return -1; 539 return -1;
537 } 540 }
538 541
539 /** perf_evlist__mmap - Create per cpu maps to receive events 542 /** perf_evlist__mmap - Create per cpu maps to receive events
540 * 543 *
541 * @evlist - list of events 544 * @evlist - list of events
542 * @pages - map length in pages 545 * @pages - map length in pages
543 * @overwrite - overwrite older events? 546 * @overwrite - overwrite older events?
544 * 547 *
545 * If overwrite is false the user needs to signal event consuption using: 548 * If overwrite is false the user needs to signal event consuption using:
546 * 549 *
547 * struct perf_mmap *m = &evlist->mmap[cpu]; 550 * struct perf_mmap *m = &evlist->mmap[cpu];
548 * unsigned int head = perf_mmap__read_head(m); 551 * unsigned int head = perf_mmap__read_head(m);
549 * 552 *
550 * perf_mmap__write_tail(m, head) 553 * perf_mmap__write_tail(m, head)
551 * 554 *
552 * Using perf_evlist__read_on_cpu does this automatically. 555 * Using perf_evlist__read_on_cpu does this automatically.
553 */ 556 */
554 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, 557 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
555 bool overwrite) 558 bool overwrite)
556 { 559 {
557 unsigned int page_size = sysconf(_SC_PAGE_SIZE); 560 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
558 struct perf_evsel *evsel; 561 struct perf_evsel *evsel;
559 const struct cpu_map *cpus = evlist->cpus; 562 const struct cpu_map *cpus = evlist->cpus;
560 const struct thread_map *threads = evlist->threads; 563 const struct thread_map *threads = evlist->threads;
561 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask; 564 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
562 565
563 /* 512 kiB: default amount of unprivileged mlocked memory */ 566 /* 512 kiB: default amount of unprivileged mlocked memory */
564 if (pages == UINT_MAX) 567 if (pages == UINT_MAX)
565 pages = (512 * 1024) / page_size; 568 pages = (512 * 1024) / page_size;
566 else if (!is_power_of_2(pages)) 569 else if (!is_power_of_2(pages))
567 return -EINVAL; 570 return -EINVAL;
568 571
569 mask = pages * page_size - 1; 572 mask = pages * page_size - 1;
570 573
571 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) 574 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
572 return -ENOMEM; 575 return -ENOMEM;
573 576
574 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0) 577 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
575 return -ENOMEM; 578 return -ENOMEM;
576 579
577 evlist->overwrite = overwrite; 580 evlist->overwrite = overwrite;
578 evlist->mmap_len = (pages + 1) * page_size; 581 evlist->mmap_len = (pages + 1) * page_size;
579 582
580 list_for_each_entry(evsel, &evlist->entries, node) { 583 list_for_each_entry(evsel, &evlist->entries, node) {
581 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 584 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
582 evsel->sample_id == NULL && 585 evsel->sample_id == NULL &&
583 perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0) 586 perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
584 return -ENOMEM; 587 return -ENOMEM;
585 } 588 }
586 589
587 if (evlist->cpus->map[0] == -1) 590 if (evlist->cpus->map[0] == -1)
588 return perf_evlist__mmap_per_thread(evlist, prot, mask); 591 return perf_evlist__mmap_per_thread(evlist, prot, mask);
589 592
590 return perf_evlist__mmap_per_cpu(evlist, prot, mask); 593 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
591 } 594 }
592 595
593 int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid, 596 int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
594 pid_t target_tid, const char *cpu_list) 597 pid_t target_tid, const char *cpu_list)
595 { 598 {
596 evlist->threads = thread_map__new(target_pid, target_tid); 599 evlist->threads = thread_map__new(target_pid, target_tid);
597 600
598 if (evlist->threads == NULL) 601 if (evlist->threads == NULL)
599 return -1; 602 return -1;
600 603
601 if (cpu_list == NULL && target_tid != -1) 604 if (cpu_list == NULL && target_tid != -1)
602 evlist->cpus = cpu_map__dummy_new(); 605 evlist->cpus = cpu_map__dummy_new();
603 else 606 else
604 evlist->cpus = cpu_map__new(cpu_list); 607 evlist->cpus = cpu_map__new(cpu_list);
605 608
606 if (evlist->cpus == NULL) 609 if (evlist->cpus == NULL)
607 goto out_delete_threads; 610 goto out_delete_threads;
608 611
609 return 0; 612 return 0;
610 613
611 out_delete_threads: 614 out_delete_threads:
612 thread_map__delete(evlist->threads); 615 thread_map__delete(evlist->threads);
613 return -1; 616 return -1;
614 } 617 }
615 618
616 void perf_evlist__delete_maps(struct perf_evlist *evlist) 619 void perf_evlist__delete_maps(struct perf_evlist *evlist)
617 { 620 {
618 cpu_map__delete(evlist->cpus); 621 cpu_map__delete(evlist->cpus);
619 thread_map__delete(evlist->threads); 622 thread_map__delete(evlist->threads);
620 evlist->cpus = NULL; 623 evlist->cpus = NULL;
621 evlist->threads = NULL; 624 evlist->threads = NULL;
622 } 625 }
623 626
624 int perf_evlist__set_filters(struct perf_evlist *evlist) 627 int perf_evlist__set_filters(struct perf_evlist *evlist)
625 { 628 {
626 const struct thread_map *threads = evlist->threads; 629 const struct thread_map *threads = evlist->threads;
627 const struct cpu_map *cpus = evlist->cpus; 630 const struct cpu_map *cpus = evlist->cpus;
628 struct perf_evsel *evsel; 631 struct perf_evsel *evsel;
629 char *filter; 632 char *filter;
630 int thread; 633 int thread;
631 int cpu; 634 int cpu;
632 int err; 635 int err;
633 int fd; 636 int fd;
634 637
635 list_for_each_entry(evsel, &evlist->entries, node) { 638 list_for_each_entry(evsel, &evlist->entries, node) {
636 filter = evsel->filter; 639 filter = evsel->filter;
637 if (!filter) 640 if (!filter)
638 continue; 641 continue;
639 for (cpu = 0; cpu < cpus->nr; cpu++) { 642 for (cpu = 0; cpu < cpus->nr; cpu++) {
640 for (thread = 0; thread < threads->nr; thread++) { 643 for (thread = 0; thread < threads->nr; thread++) {
641 fd = FD(evsel, cpu, thread); 644 fd = FD(evsel, cpu, thread);
642 err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter); 645 err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
643 if (err) 646 if (err)
644 return err; 647 return err;
645 } 648 }
646 } 649 }
647 } 650 }
648 651
649 return 0; 652 return 0;
650 } 653 }
651 654
652 bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist) 655 bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
653 { 656 {
654 struct perf_evsel *pos, *first; 657 struct perf_evsel *pos, *first;
655 658
656 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node); 659 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
657 660
658 list_for_each_entry_continue(pos, &evlist->entries, node) { 661 list_for_each_entry_continue(pos, &evlist->entries, node) {
659 if (first->attr.sample_type != pos->attr.sample_type) 662 if (first->attr.sample_type != pos->attr.sample_type)
660 return false; 663 return false;
661 } 664 }
662 665
663 return true; 666 return true;
664 } 667 }
665 668
666 u64 perf_evlist__sample_type(const struct perf_evlist *evlist) 669 u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
667 { 670 {
668 struct perf_evsel *first; 671 struct perf_evsel *first;
669 672
670 first = list_entry(evlist->entries.next, struct perf_evsel, node); 673 first = list_entry(evlist->entries.next, struct perf_evsel, node);
671 return first->attr.sample_type; 674 return first->attr.sample_type;
672 } 675 }
673 676
674 u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist) 677 u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist)
675 { 678 {
676 struct perf_evsel *first; 679 struct perf_evsel *first;
677 struct perf_sample *data; 680 struct perf_sample *data;
678 u64 sample_type; 681 u64 sample_type;
679 u16 size = 0; 682 u16 size = 0;
680 683
681 first = list_entry(evlist->entries.next, struct perf_evsel, node); 684 first = list_entry(evlist->entries.next, struct perf_evsel, node);
682 685
683 if (!first->attr.sample_id_all) 686 if (!first->attr.sample_id_all)
684 goto out; 687 goto out;
685 688
686 sample_type = first->attr.sample_type; 689 sample_type = first->attr.sample_type;
687 690
688 if (sample_type & PERF_SAMPLE_TID) 691 if (sample_type & PERF_SAMPLE_TID)
689 size += sizeof(data->tid) * 2; 692 size += sizeof(data->tid) * 2;
690 693
691 if (sample_type & PERF_SAMPLE_TIME) 694 if (sample_type & PERF_SAMPLE_TIME)
692 size += sizeof(data->time); 695 size += sizeof(data->time);
693 696
694 if (sample_type & PERF_SAMPLE_ID) 697 if (sample_type & PERF_SAMPLE_ID)
695 size += sizeof(data->id); 698 size += sizeof(data->id);
696 699
697 if (sample_type & PERF_SAMPLE_STREAM_ID) 700 if (sample_type & PERF_SAMPLE_STREAM_ID)
698 size += sizeof(data->stream_id); 701 size += sizeof(data->stream_id);
699 702
700 if (sample_type & PERF_SAMPLE_CPU) 703 if (sample_type & PERF_SAMPLE_CPU)
701 size += sizeof(data->cpu) * 2; 704 size += sizeof(data->cpu) * 2;
702 out: 705 out:
703 return size; 706 return size;
704 } 707 }
705 708
706 bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist) 709 bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
707 { 710 {
708 struct perf_evsel *pos, *first; 711 struct perf_evsel *pos, *first;
709 712
710 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node); 713 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
711 714
712 list_for_each_entry_continue(pos, &evlist->entries, node) { 715 list_for_each_entry_continue(pos, &evlist->entries, node) {
713 if (first->attr.sample_id_all != pos->attr.sample_id_all) 716 if (first->attr.sample_id_all != pos->attr.sample_id_all)
714 return false; 717 return false;
715 } 718 }
716 719
717 return true; 720 return true;
718 } 721 }
719 722
720 bool perf_evlist__sample_id_all(const struct perf_evlist *evlist) 723 bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
721 { 724 {
722 struct perf_evsel *first; 725 struct perf_evsel *first;
723 726
724 first = list_entry(evlist->entries.next, struct perf_evsel, node); 727 first = list_entry(evlist->entries.next, struct perf_evsel, node);
725 return first->attr.sample_id_all; 728 return first->attr.sample_id_all;
726 } 729 }
727 730
728 void perf_evlist__set_selected(struct perf_evlist *evlist, 731 void perf_evlist__set_selected(struct perf_evlist *evlist,
729 struct perf_evsel *evsel) 732 struct perf_evsel *evsel)
730 { 733 {
731 evlist->selected = evsel; 734 evlist->selected = evsel;
732 } 735 }
733 736
734 int perf_evlist__open(struct perf_evlist *evlist, bool group) 737 int perf_evlist__open(struct perf_evlist *evlist, bool group)
735 { 738 {
736 struct perf_evsel *evsel, *first; 739 struct perf_evsel *evsel, *first;
737 int err, ncpus, nthreads; 740 int err, ncpus, nthreads;
738 741
739 first = list_entry(evlist->entries.next, struct perf_evsel, node); 742 first = list_entry(evlist->entries.next, struct perf_evsel, node);
740 743
741 list_for_each_entry(evsel, &evlist->entries, node) { 744 list_for_each_entry(evsel, &evlist->entries, node) {
742 struct xyarray *group_fd = NULL; 745 struct xyarray *group_fd = NULL;
743 746
744 if (group && evsel != first) 747 if (group && evsel != first)
745 group_fd = first->fd; 748 group_fd = first->fd;
746 749
747 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads, 750 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads,
748 group, group_fd); 751 group, group_fd);
749 if (err < 0) 752 if (err < 0)
750 goto out_err; 753 goto out_err;
751 } 754 }
752 755
753 return 0; 756 return 0;
754 out_err: 757 out_err:
755 ncpus = evlist->cpus ? evlist->cpus->nr : 1; 758 ncpus = evlist->cpus ? evlist->cpus->nr : 1;
756 nthreads = evlist->threads ? evlist->threads->nr : 1; 759 nthreads = evlist->threads ? evlist->threads->nr : 1;
757 760
758 list_for_each_entry_reverse(evsel, &evlist->entries, node) 761 list_for_each_entry_reverse(evsel, &evlist->entries, node)
759 perf_evsel__close(evsel, ncpus, nthreads); 762 perf_evsel__close(evsel, ncpus, nthreads);
760 763
761 return err; 764 return err;
762 } 765 }
763 766
764 int perf_evlist__prepare_workload(struct perf_evlist *evlist, 767 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
765 struct perf_record_opts *opts, 768 struct perf_record_opts *opts,
766 const char *argv[]) 769 const char *argv[])
767 { 770 {
768 int child_ready_pipe[2], go_pipe[2]; 771 int child_ready_pipe[2], go_pipe[2];
769 char bf; 772 char bf;
770 773
771 if (pipe(child_ready_pipe) < 0) { 774 if (pipe(child_ready_pipe) < 0) {
772 perror("failed to create 'ready' pipe"); 775 perror("failed to create 'ready' pipe");
773 return -1; 776 return -1;
774 } 777 }
775 778
776 if (pipe(go_pipe) < 0) { 779 if (pipe(go_pipe) < 0) {
777 perror("failed to create 'go' pipe"); 780 perror("failed to create 'go' pipe");
778 goto out_close_ready_pipe; 781 goto out_close_ready_pipe;
779 } 782 }
780 783
781 evlist->workload.pid = fork(); 784 evlist->workload.pid = fork();
782 if (evlist->workload.pid < 0) { 785 if (evlist->workload.pid < 0) {
783 perror("failed to fork"); 786 perror("failed to fork");
784 goto out_close_pipes; 787 goto out_close_pipes;
785 } 788 }
786 789
787 if (!evlist->workload.pid) { 790 if (!evlist->workload.pid) {
788 if (opts->pipe_output) 791 if (opts->pipe_output)
789 dup2(2, 1); 792 dup2(2, 1);
790 793
791 close(child_ready_pipe[0]); 794 close(child_ready_pipe[0]);
792 close(go_pipe[1]); 795 close(go_pipe[1]);
793 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); 796 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
794 797
795 /* 798 /*
796 * Do a dummy execvp to get the PLT entry resolved, 799 * Do a dummy execvp to get the PLT entry resolved,
797 * so we avoid the resolver overhead on the real 800 * so we avoid the resolver overhead on the real
798 * execvp call. 801 * execvp call.
799 */ 802 */
800 execvp("", (char **)argv); 803 execvp("", (char **)argv);
801 804
802 /* 805 /*
803 * Tell the parent we're ready to go 806 * Tell the parent we're ready to go
804 */ 807 */
805 close(child_ready_pipe[1]); 808 close(child_ready_pipe[1]);
806 809
807 /* 810 /*
808 * Wait until the parent tells us to go. 811 * Wait until the parent tells us to go.
809 */ 812 */
810 if (read(go_pipe[0], &bf, 1) == -1) 813 if (read(go_pipe[0], &bf, 1) == -1)
811 perror("unable to read pipe"); 814 perror("unable to read pipe");
812 815
813 execvp(argv[0], (char **)argv); 816 execvp(argv[0], (char **)argv);
814 817
815 perror(argv[0]); 818 perror(argv[0]);
816 kill(getppid(), SIGUSR1); 819 kill(getppid(), SIGUSR1);
817 exit(-1); 820 exit(-1);
818 } 821 }
819 822
820 if (!opts->system_wide && opts->target_tid == -1 && opts->target_pid == -1) 823 if (!opts->system_wide && opts->target_tid == -1 && opts->target_pid == -1)
821 evlist->threads->map[0] = evlist->workload.pid; 824 evlist->threads->map[0] = evlist->workload.pid;
822 825
823 close(child_ready_pipe[1]); 826 close(child_ready_pipe[1]);
824 close(go_pipe[0]); 827 close(go_pipe[0]);
825 /* 828 /*
826 * wait for child to settle 829 * wait for child to settle
827 */ 830 */
828 if (read(child_ready_pipe[0], &bf, 1) == -1) { 831 if (read(child_ready_pipe[0], &bf, 1) == -1) {
829 perror("unable to read pipe"); 832 perror("unable to read pipe");
830 goto out_close_pipes; 833 goto out_close_pipes;
831 } 834 }
832 835
833 evlist->workload.cork_fd = go_pipe[1]; 836 evlist->workload.cork_fd = go_pipe[1];
834 close(child_ready_pipe[0]); 837 close(child_ready_pipe[0]);
835 return 0; 838 return 0;
836 839
837 out_close_pipes: 840 out_close_pipes:
838 close(go_pipe[0]); 841 close(go_pipe[0]);
839 close(go_pipe[1]); 842 close(go_pipe[1]);
840 out_close_ready_pipe: 843 out_close_ready_pipe:
841 close(child_ready_pipe[0]); 844 close(child_ready_pipe[0]);
842 close(child_ready_pipe[1]); 845 close(child_ready_pipe[1]);
843 return -1; 846 return -1;
844 } 847 }
845 848
846 int perf_evlist__start_workload(struct perf_evlist *evlist) 849 int perf_evlist__start_workload(struct perf_evlist *evlist)
847 { 850 {
848 if (evlist->workload.cork_fd > 0) { 851 if (evlist->workload.cork_fd > 0) {
849 /* 852 /*
850 * Remove the cork, let it rip! 853 * Remove the cork, let it rip!
851 */ 854 */
852 return close(evlist->workload.cork_fd); 855 return close(evlist->workload.cork_fd);
853 } 856 }
854 857
855 return 0; 858 return 0;
856 } 859 }
857 860
tools/perf/util/parse-events.c
1 #include "../../../include/linux/hw_breakpoint.h" 1 #include "../../../include/linux/hw_breakpoint.h"
2 #include "util.h" 2 #include "util.h"
3 #include "../perf.h" 3 #include "../perf.h"
4 #include "evlist.h" 4 #include "evlist.h"
5 #include "evsel.h" 5 #include "evsel.h"
6 #include "parse-options.h" 6 #include "parse-options.h"
7 #include "parse-events.h" 7 #include "parse-events.h"
8 #include "exec_cmd.h" 8 #include "exec_cmd.h"
9 #include "string.h" 9 #include "string.h"
10 #include "symbol.h" 10 #include "symbol.h"
11 #include "cache.h" 11 #include "cache.h"
12 #include "header.h" 12 #include "header.h"
13 #include "debugfs.h" 13 #include "debugfs.h"
14 14
15 struct event_symbol { 15 struct event_symbol {
16 u8 type; 16 u8 type;
17 u64 config; 17 u64 config;
18 const char *symbol; 18 const char *symbol;
19 const char *alias; 19 const char *alias;
20 }; 20 };
21 21
22 enum event_result { 22 enum event_result {
23 EVT_FAILED, 23 EVT_FAILED,
24 EVT_HANDLED, 24 EVT_HANDLED,
25 EVT_HANDLED_ALL 25 EVT_HANDLED_ALL
26 }; 26 };
27 27
28 #define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x 28 #define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
29 #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x 29 #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
30 30
31 static struct event_symbol event_symbols[] = { 31 static struct event_symbol event_symbols[] = {
32 { CHW(CPU_CYCLES), "cpu-cycles", "cycles" }, 32 { CHW(CPU_CYCLES), "cpu-cycles", "cycles" },
33 { CHW(STALLED_CYCLES_FRONTEND), "stalled-cycles-frontend", "idle-cycles-frontend" }, 33 { CHW(STALLED_CYCLES_FRONTEND), "stalled-cycles-frontend", "idle-cycles-frontend" },
34 { CHW(STALLED_CYCLES_BACKEND), "stalled-cycles-backend", "idle-cycles-backend" }, 34 { CHW(STALLED_CYCLES_BACKEND), "stalled-cycles-backend", "idle-cycles-backend" },
35 { CHW(INSTRUCTIONS), "instructions", "" }, 35 { CHW(INSTRUCTIONS), "instructions", "" },
36 { CHW(CACHE_REFERENCES), "cache-references", "" }, 36 { CHW(CACHE_REFERENCES), "cache-references", "" },
37 { CHW(CACHE_MISSES), "cache-misses", "" }, 37 { CHW(CACHE_MISSES), "cache-misses", "" },
38 { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" }, 38 { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" },
39 { CHW(BRANCH_MISSES), "branch-misses", "" }, 39 { CHW(BRANCH_MISSES), "branch-misses", "" },
40 { CHW(BUS_CYCLES), "bus-cycles", "" }, 40 { CHW(BUS_CYCLES), "bus-cycles", "" },
41 { CHW(REF_CPU_CYCLES), "ref-cycles", "" }, 41 { CHW(REF_CPU_CYCLES), "ref-cycles", "" },
42 42
43 { CSW(CPU_CLOCK), "cpu-clock", "" }, 43 { CSW(CPU_CLOCK), "cpu-clock", "" },
44 { CSW(TASK_CLOCK), "task-clock", "" }, 44 { CSW(TASK_CLOCK), "task-clock", "" },
45 { CSW(PAGE_FAULTS), "page-faults", "faults" }, 45 { CSW(PAGE_FAULTS), "page-faults", "faults" },
46 { CSW(PAGE_FAULTS_MIN), "minor-faults", "" }, 46 { CSW(PAGE_FAULTS_MIN), "minor-faults", "" },
47 { CSW(PAGE_FAULTS_MAJ), "major-faults", "" }, 47 { CSW(PAGE_FAULTS_MAJ), "major-faults", "" },
48 { CSW(CONTEXT_SWITCHES), "context-switches", "cs" }, 48 { CSW(CONTEXT_SWITCHES), "context-switches", "cs" },
49 { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, 49 { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" },
50 { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" }, 50 { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" },
51 { CSW(EMULATION_FAULTS), "emulation-faults", "" }, 51 { CSW(EMULATION_FAULTS), "emulation-faults", "" },
52 }; 52 };
53 53
54 #define __PERF_EVENT_FIELD(config, name) \ 54 #define __PERF_EVENT_FIELD(config, name) \
55 ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT) 55 ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT)
56 56
57 #define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW) 57 #define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW)
58 #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG) 58 #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG)
59 #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) 59 #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
60 #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) 60 #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
61 61
62 static const char *hw_event_names[PERF_COUNT_HW_MAX] = { 62 static const char *hw_event_names[PERF_COUNT_HW_MAX] = {
63 "cycles", 63 "cycles",
64 "instructions", 64 "instructions",
65 "cache-references", 65 "cache-references",
66 "cache-misses", 66 "cache-misses",
67 "branches", 67 "branches",
68 "branch-misses", 68 "branch-misses",
69 "bus-cycles", 69 "bus-cycles",
70 "stalled-cycles-frontend", 70 "stalled-cycles-frontend",
71 "stalled-cycles-backend", 71 "stalled-cycles-backend",
72 "ref-cycles", 72 "ref-cycles",
73 }; 73 };
74 74
75 static const char *sw_event_names[PERF_COUNT_SW_MAX] = { 75 static const char *sw_event_names[PERF_COUNT_SW_MAX] = {
76 "cpu-clock", 76 "cpu-clock",
77 "task-clock", 77 "task-clock",
78 "page-faults", 78 "page-faults",
79 "context-switches", 79 "context-switches",
80 "CPU-migrations", 80 "CPU-migrations",
81 "minor-faults", 81 "minor-faults",
82 "major-faults", 82 "major-faults",
83 "alignment-faults", 83 "alignment-faults",
84 "emulation-faults", 84 "emulation-faults",
85 }; 85 };
86 86
87 #define MAX_ALIASES 8 87 #define MAX_ALIASES 8
88 88
89 static const char *hw_cache[PERF_COUNT_HW_CACHE_MAX][MAX_ALIASES] = { 89 static const char *hw_cache[PERF_COUNT_HW_CACHE_MAX][MAX_ALIASES] = {
90 { "L1-dcache", "l1-d", "l1d", "L1-data", }, 90 { "L1-dcache", "l1-d", "l1d", "L1-data", },
91 { "L1-icache", "l1-i", "l1i", "L1-instruction", }, 91 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
92 { "LLC", "L2", }, 92 { "LLC", "L2", },
93 { "dTLB", "d-tlb", "Data-TLB", }, 93 { "dTLB", "d-tlb", "Data-TLB", },
94 { "iTLB", "i-tlb", "Instruction-TLB", }, 94 { "iTLB", "i-tlb", "Instruction-TLB", },
95 { "branch", "branches", "bpu", "btb", "bpc", }, 95 { "branch", "branches", "bpu", "btb", "bpc", },
96 { "node", }, 96 { "node", },
97 }; 97 };
98 98
99 static const char *hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][MAX_ALIASES] = { 99 static const char *hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][MAX_ALIASES] = {
100 { "load", "loads", "read", }, 100 { "load", "loads", "read", },
101 { "store", "stores", "write", }, 101 { "store", "stores", "write", },
102 { "prefetch", "prefetches", "speculative-read", "speculative-load", }, 102 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
103 }; 103 };
104 104
105 static const char *hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] 105 static const char *hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
106 [MAX_ALIASES] = { 106 [MAX_ALIASES] = {
107 { "refs", "Reference", "ops", "access", }, 107 { "refs", "Reference", "ops", "access", },
108 { "misses", "miss", }, 108 { "misses", "miss", },
109 }; 109 };
110 110
111 #define C(x) PERF_COUNT_HW_CACHE_##x 111 #define C(x) PERF_COUNT_HW_CACHE_##x
112 #define CACHE_READ (1 << C(OP_READ)) 112 #define CACHE_READ (1 << C(OP_READ))
113 #define CACHE_WRITE (1 << C(OP_WRITE)) 113 #define CACHE_WRITE (1 << C(OP_WRITE))
114 #define CACHE_PREFETCH (1 << C(OP_PREFETCH)) 114 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
115 #define COP(x) (1 << x) 115 #define COP(x) (1 << x)
116 116
117 /* 117 /*
118 * cache operartion stat 118 * cache operartion stat
119 * L1I : Read and prefetch only 119 * L1I : Read and prefetch only
120 * ITLB and BPU : Read-only 120 * ITLB and BPU : Read-only
121 */ 121 */
122 static unsigned long hw_cache_stat[C(MAX)] = { 122 static unsigned long hw_cache_stat[C(MAX)] = {
123 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 123 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
124 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH), 124 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
125 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 125 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
126 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 126 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
127 [C(ITLB)] = (CACHE_READ), 127 [C(ITLB)] = (CACHE_READ),
128 [C(BPU)] = (CACHE_READ), 128 [C(BPU)] = (CACHE_READ),
129 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), 129 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
130 }; 130 };
131 131
132 #define for_each_subsystem(sys_dir, sys_dirent, sys_next) \ 132 #define for_each_subsystem(sys_dir, sys_dirent, sys_next) \
133 while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \ 133 while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \
134 if (sys_dirent.d_type == DT_DIR && \ 134 if (sys_dirent.d_type == DT_DIR && \
135 (strcmp(sys_dirent.d_name, ".")) && \ 135 (strcmp(sys_dirent.d_name, ".")) && \
136 (strcmp(sys_dirent.d_name, ".."))) 136 (strcmp(sys_dirent.d_name, "..")))
137 137
138 static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir) 138 static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
139 { 139 {
140 char evt_path[MAXPATHLEN]; 140 char evt_path[MAXPATHLEN];
141 int fd; 141 int fd;
142 142
143 snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path, 143 snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path,
144 sys_dir->d_name, evt_dir->d_name); 144 sys_dir->d_name, evt_dir->d_name);
145 fd = open(evt_path, O_RDONLY); 145 fd = open(evt_path, O_RDONLY);
146 if (fd < 0) 146 if (fd < 0)
147 return -EINVAL; 147 return -EINVAL;
148 close(fd); 148 close(fd);
149 149
150 return 0; 150 return 0;
151 } 151 }
152 152
153 #define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \ 153 #define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \
154 while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \ 154 while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \
155 if (evt_dirent.d_type == DT_DIR && \ 155 if (evt_dirent.d_type == DT_DIR && \
156 (strcmp(evt_dirent.d_name, ".")) && \ 156 (strcmp(evt_dirent.d_name, ".")) && \
157 (strcmp(evt_dirent.d_name, "..")) && \ 157 (strcmp(evt_dirent.d_name, "..")) && \
158 (!tp_event_has_id(&sys_dirent, &evt_dirent))) 158 (!tp_event_has_id(&sys_dirent, &evt_dirent)))
159 159
160 #define MAX_EVENT_LENGTH 512 160 #define MAX_EVENT_LENGTH 512
161 161
162 162
163 struct tracepoint_path *tracepoint_id_to_path(u64 config) 163 struct tracepoint_path *tracepoint_id_to_path(u64 config)
164 { 164 {
165 struct tracepoint_path *path = NULL; 165 struct tracepoint_path *path = NULL;
166 DIR *sys_dir, *evt_dir; 166 DIR *sys_dir, *evt_dir;
167 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; 167 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
168 char id_buf[4]; 168 char id_buf[4];
169 int fd; 169 int fd;
170 u64 id; 170 u64 id;
171 char evt_path[MAXPATHLEN]; 171 char evt_path[MAXPATHLEN];
172 char dir_path[MAXPATHLEN]; 172 char dir_path[MAXPATHLEN];
173 173
174 if (debugfs_valid_mountpoint(tracing_events_path)) 174 if (debugfs_valid_mountpoint(tracing_events_path))
175 return NULL; 175 return NULL;
176 176
177 sys_dir = opendir(tracing_events_path); 177 sys_dir = opendir(tracing_events_path);
178 if (!sys_dir) 178 if (!sys_dir)
179 return NULL; 179 return NULL;
180 180
181 for_each_subsystem(sys_dir, sys_dirent, sys_next) { 181 for_each_subsystem(sys_dir, sys_dirent, sys_next) {
182 182
183 snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, 183 snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
184 sys_dirent.d_name); 184 sys_dirent.d_name);
185 evt_dir = opendir(dir_path); 185 evt_dir = opendir(dir_path);
186 if (!evt_dir) 186 if (!evt_dir)
187 continue; 187 continue;
188 188
189 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { 189 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
190 190
191 snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path, 191 snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
192 evt_dirent.d_name); 192 evt_dirent.d_name);
193 fd = open(evt_path, O_RDONLY); 193 fd = open(evt_path, O_RDONLY);
194 if (fd < 0) 194 if (fd < 0)
195 continue; 195 continue;
196 if (read(fd, id_buf, sizeof(id_buf)) < 0) { 196 if (read(fd, id_buf, sizeof(id_buf)) < 0) {
197 close(fd); 197 close(fd);
198 continue; 198 continue;
199 } 199 }
200 close(fd); 200 close(fd);
201 id = atoll(id_buf); 201 id = atoll(id_buf);
202 if (id == config) { 202 if (id == config) {
203 closedir(evt_dir); 203 closedir(evt_dir);
204 closedir(sys_dir); 204 closedir(sys_dir);
205 path = zalloc(sizeof(*path)); 205 path = zalloc(sizeof(*path));
206 path->system = malloc(MAX_EVENT_LENGTH); 206 path->system = malloc(MAX_EVENT_LENGTH);
207 if (!path->system) { 207 if (!path->system) {
208 free(path); 208 free(path);
209 return NULL; 209 return NULL;
210 } 210 }
211 path->name = malloc(MAX_EVENT_LENGTH); 211 path->name = malloc(MAX_EVENT_LENGTH);
212 if (!path->name) { 212 if (!path->name) {
213 free(path->system); 213 free(path->system);
214 free(path); 214 free(path);
215 return NULL; 215 return NULL;
216 } 216 }
217 strncpy(path->system, sys_dirent.d_name, 217 strncpy(path->system, sys_dirent.d_name,
218 MAX_EVENT_LENGTH); 218 MAX_EVENT_LENGTH);
219 strncpy(path->name, evt_dirent.d_name, 219 strncpy(path->name, evt_dirent.d_name,
220 MAX_EVENT_LENGTH); 220 MAX_EVENT_LENGTH);
221 return path; 221 return path;
222 } 222 }
223 } 223 }
224 closedir(evt_dir); 224 closedir(evt_dir);
225 } 225 }
226 226
227 closedir(sys_dir); 227 closedir(sys_dir);
228 return NULL; 228 return NULL;
229 } 229 }
230 230
231 #define TP_PATH_LEN (MAX_EVENT_LENGTH * 2 + 1) 231 #define TP_PATH_LEN (MAX_EVENT_LENGTH * 2 + 1)
232 static const char *tracepoint_id_to_name(u64 config) 232 static const char *tracepoint_id_to_name(u64 config)
233 { 233 {
234 static char buf[TP_PATH_LEN]; 234 static char buf[TP_PATH_LEN];
235 struct tracepoint_path *path; 235 struct tracepoint_path *path;
236 236
237 path = tracepoint_id_to_path(config); 237 path = tracepoint_id_to_path(config);
238 if (path) { 238 if (path) {
239 snprintf(buf, TP_PATH_LEN, "%s:%s", path->system, path->name); 239 snprintf(buf, TP_PATH_LEN, "%s:%s", path->system, path->name);
240 free(path->name); 240 free(path->name);
241 free(path->system); 241 free(path->system);
242 free(path); 242 free(path);
243 } else 243 } else
244 snprintf(buf, TP_PATH_LEN, "%s:%s", "unknown", "unknown"); 244 snprintf(buf, TP_PATH_LEN, "%s:%s", "unknown", "unknown");
245 245
246 return buf; 246 return buf;
247 } 247 }
248 248
249 static int is_cache_op_valid(u8 cache_type, u8 cache_op) 249 static int is_cache_op_valid(u8 cache_type, u8 cache_op)
250 { 250 {
251 if (hw_cache_stat[cache_type] & COP(cache_op)) 251 if (hw_cache_stat[cache_type] & COP(cache_op))
252 return 1; /* valid */ 252 return 1; /* valid */
253 else 253 else
254 return 0; /* invalid */ 254 return 0; /* invalid */
255 } 255 }
256 256
257 static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result) 257 static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result)
258 { 258 {
259 static char name[50]; 259 static char name[50];
260 260
261 if (cache_result) { 261 if (cache_result) {
262 sprintf(name, "%s-%s-%s", hw_cache[cache_type][0], 262 sprintf(name, "%s-%s-%s", hw_cache[cache_type][0],
263 hw_cache_op[cache_op][0], 263 hw_cache_op[cache_op][0],
264 hw_cache_result[cache_result][0]); 264 hw_cache_result[cache_result][0]);
265 } else { 265 } else {
266 sprintf(name, "%s-%s", hw_cache[cache_type][0], 266 sprintf(name, "%s-%s", hw_cache[cache_type][0],
267 hw_cache_op[cache_op][1]); 267 hw_cache_op[cache_op][1]);
268 } 268 }
269 269
270 return name; 270 return name;
271 } 271 }
272 272
273 const char *event_type(int type) 273 const char *event_type(int type)
274 { 274 {
275 switch (type) { 275 switch (type) {
276 case PERF_TYPE_HARDWARE: 276 case PERF_TYPE_HARDWARE:
277 return "hardware"; 277 return "hardware";
278 278
279 case PERF_TYPE_SOFTWARE: 279 case PERF_TYPE_SOFTWARE:
280 return "software"; 280 return "software";
281 281
282 case PERF_TYPE_TRACEPOINT: 282 case PERF_TYPE_TRACEPOINT:
283 return "tracepoint"; 283 return "tracepoint";
284 284
285 case PERF_TYPE_HW_CACHE: 285 case PERF_TYPE_HW_CACHE:
286 return "hardware-cache"; 286 return "hardware-cache";
287 287
288 default: 288 default:
289 break; 289 break;
290 } 290 }
291 291
292 return "unknown"; 292 return "unknown";
293 } 293 }
294 294
295 const char *event_name(struct perf_evsel *evsel) 295 const char *event_name(struct perf_evsel *evsel)
296 { 296 {
297 u64 config = evsel->attr.config; 297 u64 config = evsel->attr.config;
298 int type = evsel->attr.type; 298 int type = evsel->attr.type;
299 299
300 if (evsel->name) 300 if (evsel->name)
301 return evsel->name; 301 return evsel->name;
302 302
303 return __event_name(type, config); 303 return __event_name(type, config);
304 } 304 }
305 305
306 const char *__event_name(int type, u64 config) 306 const char *__event_name(int type, u64 config)
307 { 307 {
308 static char buf[32]; 308 static char buf[32];
309 309
310 if (type == PERF_TYPE_RAW) { 310 if (type == PERF_TYPE_RAW) {
311 sprintf(buf, "raw 0x%" PRIx64, config); 311 sprintf(buf, "raw 0x%" PRIx64, config);
312 return buf; 312 return buf;
313 } 313 }
314 314
315 switch (type) { 315 switch (type) {
316 case PERF_TYPE_HARDWARE: 316 case PERF_TYPE_HARDWARE:
317 if (config < PERF_COUNT_HW_MAX && hw_event_names[config]) 317 if (config < PERF_COUNT_HW_MAX && hw_event_names[config])
318 return hw_event_names[config]; 318 return hw_event_names[config];
319 return "unknown-hardware"; 319 return "unknown-hardware";
320 320
321 case PERF_TYPE_HW_CACHE: { 321 case PERF_TYPE_HW_CACHE: {
322 u8 cache_type, cache_op, cache_result; 322 u8 cache_type, cache_op, cache_result;
323 323
324 cache_type = (config >> 0) & 0xff; 324 cache_type = (config >> 0) & 0xff;
325 if (cache_type > PERF_COUNT_HW_CACHE_MAX) 325 if (cache_type > PERF_COUNT_HW_CACHE_MAX)
326 return "unknown-ext-hardware-cache-type"; 326 return "unknown-ext-hardware-cache-type";
327 327
328 cache_op = (config >> 8) & 0xff; 328 cache_op = (config >> 8) & 0xff;
329 if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX) 329 if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX)
330 return "unknown-ext-hardware-cache-op"; 330 return "unknown-ext-hardware-cache-op";
331 331
332 cache_result = (config >> 16) & 0xff; 332 cache_result = (config >> 16) & 0xff;
333 if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX) 333 if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX)
334 return "unknown-ext-hardware-cache-result"; 334 return "unknown-ext-hardware-cache-result";
335 335
336 if (!is_cache_op_valid(cache_type, cache_op)) 336 if (!is_cache_op_valid(cache_type, cache_op))
337 return "invalid-cache"; 337 return "invalid-cache";
338 338
339 return event_cache_name(cache_type, cache_op, cache_result); 339 return event_cache_name(cache_type, cache_op, cache_result);
340 } 340 }
341 341
342 case PERF_TYPE_SOFTWARE: 342 case PERF_TYPE_SOFTWARE:
343 if (config < PERF_COUNT_SW_MAX && sw_event_names[config]) 343 if (config < PERF_COUNT_SW_MAX && sw_event_names[config])
344 return sw_event_names[config]; 344 return sw_event_names[config];
345 return "unknown-software"; 345 return "unknown-software";
346 346
347 case PERF_TYPE_TRACEPOINT: 347 case PERF_TYPE_TRACEPOINT:
348 return tracepoint_id_to_name(config); 348 return tracepoint_id_to_name(config);
349 349
350 default: 350 default:
351 break; 351 break;
352 } 352 }
353 353
354 return "unknown"; 354 return "unknown";
355 } 355 }
356 356
357 static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int size) 357 static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int size)
358 { 358 {
359 int i, j; 359 int i, j;
360 int n, longest = -1; 360 int n, longest = -1;
361 361
362 for (i = 0; i < size; i++) { 362 for (i = 0; i < size; i++) {
363 for (j = 0; j < MAX_ALIASES && names[i][j]; j++) { 363 for (j = 0; j < MAX_ALIASES && names[i][j]; j++) {
364 n = strlen(names[i][j]); 364 n = strlen(names[i][j]);
365 if (n > longest && !strncasecmp(*str, names[i][j], n)) 365 if (n > longest && !strncasecmp(*str, names[i][j], n))
366 longest = n; 366 longest = n;
367 } 367 }
368 if (longest > 0) { 368 if (longest > 0) {
369 *str += longest; 369 *str += longest;
370 return i; 370 return i;
371 } 371 }
372 } 372 }
373 373
374 return -1; 374 return -1;
375 } 375 }
376 376
377 static enum event_result 377 static enum event_result
378 parse_generic_hw_event(const char **str, struct perf_event_attr *attr) 378 parse_generic_hw_event(const char **str, struct perf_event_attr *attr)
379 { 379 {
380 const char *s = *str; 380 const char *s = *str;
381 int cache_type = -1, cache_op = -1, cache_result = -1; 381 int cache_type = -1, cache_op = -1, cache_result = -1;
382 382
383 cache_type = parse_aliases(&s, hw_cache, PERF_COUNT_HW_CACHE_MAX); 383 cache_type = parse_aliases(&s, hw_cache, PERF_COUNT_HW_CACHE_MAX);
384 /* 384 /*
385 * No fallback - if we cannot get a clear cache type 385 * No fallback - if we cannot get a clear cache type
386 * then bail out: 386 * then bail out:
387 */ 387 */
388 if (cache_type == -1) 388 if (cache_type == -1)
389 return EVT_FAILED; 389 return EVT_FAILED;
390 390
391 while ((cache_op == -1 || cache_result == -1) && *s == '-') { 391 while ((cache_op == -1 || cache_result == -1) && *s == '-') {
392 ++s; 392 ++s;
393 393
394 if (cache_op == -1) { 394 if (cache_op == -1) {
395 cache_op = parse_aliases(&s, hw_cache_op, 395 cache_op = parse_aliases(&s, hw_cache_op,
396 PERF_COUNT_HW_CACHE_OP_MAX); 396 PERF_COUNT_HW_CACHE_OP_MAX);
397 if (cache_op >= 0) { 397 if (cache_op >= 0) {
398 if (!is_cache_op_valid(cache_type, cache_op)) 398 if (!is_cache_op_valid(cache_type, cache_op))
399 return EVT_FAILED; 399 return EVT_FAILED;
400 continue; 400 continue;
401 } 401 }
402 } 402 }
403 403
404 if (cache_result == -1) { 404 if (cache_result == -1) {
405 cache_result = parse_aliases(&s, hw_cache_result, 405 cache_result = parse_aliases(&s, hw_cache_result,
406 PERF_COUNT_HW_CACHE_RESULT_MAX); 406 PERF_COUNT_HW_CACHE_RESULT_MAX);
407 if (cache_result >= 0) 407 if (cache_result >= 0)
408 continue; 408 continue;
409 } 409 }
410 410
411 /* 411 /*
412 * Can't parse this as a cache op or result, so back up 412 * Can't parse this as a cache op or result, so back up
413 * to the '-'. 413 * to the '-'.
414 */ 414 */
415 --s; 415 --s;
416 break; 416 break;
417 } 417 }
418 418
419 /* 419 /*
420 * Fall back to reads: 420 * Fall back to reads:
421 */ 421 */
422 if (cache_op == -1) 422 if (cache_op == -1)
423 cache_op = PERF_COUNT_HW_CACHE_OP_READ; 423 cache_op = PERF_COUNT_HW_CACHE_OP_READ;
424 424
425 /* 425 /*
426 * Fall back to accesses: 426 * Fall back to accesses:
427 */ 427 */
428 if (cache_result == -1) 428 if (cache_result == -1)
429 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; 429 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
430 430
431 attr->config = cache_type | (cache_op << 8) | (cache_result << 16); 431 attr->config = cache_type | (cache_op << 8) | (cache_result << 16);
432 attr->type = PERF_TYPE_HW_CACHE; 432 attr->type = PERF_TYPE_HW_CACHE;
433 433
434 *str = s; 434 *str = s;
435 return EVT_HANDLED; 435 return EVT_HANDLED;
436 } 436 }
437 437
438 static enum event_result 438 static enum event_result
439 parse_single_tracepoint_event(char *sys_name, 439 parse_single_tracepoint_event(char *sys_name,
440 const char *evt_name, 440 const char *evt_name,
441 unsigned int evt_length, 441 unsigned int evt_length,
442 struct perf_event_attr *attr, 442 struct perf_event_attr *attr,
443 const char **strp) 443 const char **strp)
444 { 444 {
445 char evt_path[MAXPATHLEN]; 445 char evt_path[MAXPATHLEN];
446 char id_buf[4]; 446 char id_buf[4];
447 u64 id; 447 u64 id;
448 int fd; 448 int fd;
449 449
450 snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path, 450 snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path,
451 sys_name, evt_name); 451 sys_name, evt_name);
452 452
453 fd = open(evt_path, O_RDONLY); 453 fd = open(evt_path, O_RDONLY);
454 if (fd < 0) 454 if (fd < 0)
455 return EVT_FAILED; 455 return EVT_FAILED;
456 456
457 if (read(fd, id_buf, sizeof(id_buf)) < 0) { 457 if (read(fd, id_buf, sizeof(id_buf)) < 0) {
458 close(fd); 458 close(fd);
459 return EVT_FAILED; 459 return EVT_FAILED;
460 } 460 }
461 461
462 close(fd); 462 close(fd);
463 id = atoll(id_buf); 463 id = atoll(id_buf);
464 attr->config = id; 464 attr->config = id;
465 attr->type = PERF_TYPE_TRACEPOINT; 465 attr->type = PERF_TYPE_TRACEPOINT;
466 *strp += strlen(sys_name) + evt_length + 1; /* + 1 for the ':' */ 466 *strp += strlen(sys_name) + evt_length + 1; /* + 1 for the ':' */
467 467
468 attr->sample_type |= PERF_SAMPLE_RAW; 468 attr->sample_type |= PERF_SAMPLE_RAW;
469 attr->sample_type |= PERF_SAMPLE_TIME; 469 attr->sample_type |= PERF_SAMPLE_TIME;
470 attr->sample_type |= PERF_SAMPLE_CPU; 470 attr->sample_type |= PERF_SAMPLE_CPU;
471 471
472 attr->sample_period = 1; 472 attr->sample_period = 1;
473 473
474 474
475 return EVT_HANDLED; 475 return EVT_HANDLED;
476 } 476 }
477 477
478 /* sys + ':' + event + ':' + flags*/ 478 /* sys + ':' + event + ':' + flags*/
479 #define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128) 479 #define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128)
480 static enum event_result 480 static enum event_result
481 parse_multiple_tracepoint_event(struct perf_evlist *evlist, char *sys_name, 481 parse_multiple_tracepoint_event(struct perf_evlist *evlist, char *sys_name,
482 const char *evt_exp, char *flags) 482 const char *evt_exp, char *flags)
483 { 483 {
484 char evt_path[MAXPATHLEN]; 484 char evt_path[MAXPATHLEN];
485 struct dirent *evt_ent; 485 struct dirent *evt_ent;
486 DIR *evt_dir; 486 DIR *evt_dir;
487 487
488 snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name); 488 snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name);
489 evt_dir = opendir(evt_path); 489 evt_dir = opendir(evt_path);
490 490
491 if (!evt_dir) { 491 if (!evt_dir) {
492 perror("Can't open event dir"); 492 perror("Can't open event dir");
493 return EVT_FAILED; 493 return EVT_FAILED;
494 } 494 }
495 495
496 while ((evt_ent = readdir(evt_dir))) { 496 while ((evt_ent = readdir(evt_dir))) {
497 char event_opt[MAX_EVOPT_LEN + 1]; 497 char event_opt[MAX_EVOPT_LEN + 1];
498 int len; 498 int len;
499 499
500 if (!strcmp(evt_ent->d_name, ".") 500 if (!strcmp(evt_ent->d_name, ".")
501 || !strcmp(evt_ent->d_name, "..") 501 || !strcmp(evt_ent->d_name, "..")
502 || !strcmp(evt_ent->d_name, "enable") 502 || !strcmp(evt_ent->d_name, "enable")
503 || !strcmp(evt_ent->d_name, "filter")) 503 || !strcmp(evt_ent->d_name, "filter"))
504 continue; 504 continue;
505 505
506 if (!strglobmatch(evt_ent->d_name, evt_exp)) 506 if (!strglobmatch(evt_ent->d_name, evt_exp))
507 continue; 507 continue;
508 508
509 len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s%s%s", sys_name, 509 len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s%s%s", sys_name,
510 evt_ent->d_name, flags ? ":" : "", 510 evt_ent->d_name, flags ? ":" : "",
511 flags ?: ""); 511 flags ?: "");
512 if (len < 0) 512 if (len < 0)
513 return EVT_FAILED; 513 return EVT_FAILED;
514 514
515 if (parse_events(evlist, event_opt, 0)) 515 if (parse_events(evlist, event_opt, 0))
516 return EVT_FAILED; 516 return EVT_FAILED;
517 } 517 }
518 518
519 return EVT_HANDLED_ALL; 519 return EVT_HANDLED_ALL;
520 } 520 }
521 521
522 static enum event_result 522 static enum event_result
523 parse_tracepoint_event(struct perf_evlist *evlist, const char **strp, 523 parse_tracepoint_event(struct perf_evlist *evlist, const char **strp,
524 struct perf_event_attr *attr) 524 struct perf_event_attr *attr)
525 { 525 {
526 const char *evt_name; 526 const char *evt_name;
527 char *flags = NULL, *comma_loc; 527 char *flags = NULL, *comma_loc;
528 char sys_name[MAX_EVENT_LENGTH]; 528 char sys_name[MAX_EVENT_LENGTH];
529 unsigned int sys_length, evt_length; 529 unsigned int sys_length, evt_length;
530 530
531 if (debugfs_valid_mountpoint(tracing_events_path)) 531 if (debugfs_valid_mountpoint(tracing_events_path))
532 return 0; 532 return 0;
533 533
534 evt_name = strchr(*strp, ':'); 534 evt_name = strchr(*strp, ':');
535 if (!evt_name) 535 if (!evt_name)
536 return EVT_FAILED; 536 return EVT_FAILED;
537 537
538 sys_length = evt_name - *strp; 538 sys_length = evt_name - *strp;
539 if (sys_length >= MAX_EVENT_LENGTH) 539 if (sys_length >= MAX_EVENT_LENGTH)
540 return 0; 540 return 0;
541 541
542 strncpy(sys_name, *strp, sys_length); 542 strncpy(sys_name, *strp, sys_length);
543 sys_name[sys_length] = '\0'; 543 sys_name[sys_length] = '\0';
544 evt_name = evt_name + 1; 544 evt_name = evt_name + 1;
545 545
546 comma_loc = strchr(evt_name, ','); 546 comma_loc = strchr(evt_name, ',');
547 if (comma_loc) { 547 if (comma_loc) {
548 /* take the event name up to the comma */ 548 /* take the event name up to the comma */
549 evt_name = strndup(evt_name, comma_loc - evt_name); 549 evt_name = strndup(evt_name, comma_loc - evt_name);
550 } 550 }
551 flags = strchr(evt_name, ':'); 551 flags = strchr(evt_name, ':');
552 if (flags) { 552 if (flags) {
553 /* split it out: */ 553 /* split it out: */
554 evt_name = strndup(evt_name, flags - evt_name); 554 evt_name = strndup(evt_name, flags - evt_name);
555 flags++; 555 flags++;
556 } 556 }
557 557
558 evt_length = strlen(evt_name); 558 evt_length = strlen(evt_name);
559 if (evt_length >= MAX_EVENT_LENGTH) 559 if (evt_length >= MAX_EVENT_LENGTH)
560 return EVT_FAILED; 560 return EVT_FAILED;
561 if (strpbrk(evt_name, "*?")) { 561 if (strpbrk(evt_name, "*?")) {
562 *strp += strlen(sys_name) + evt_length + 1; /* 1 == the ':' */ 562 *strp += strlen(sys_name) + evt_length + 1; /* 1 == the ':' */
563 return parse_multiple_tracepoint_event(evlist, sys_name, 563 return parse_multiple_tracepoint_event(evlist, sys_name,
564 evt_name, flags); 564 evt_name, flags);
565 } else { 565 } else {
566 return parse_single_tracepoint_event(sys_name, evt_name, 566 return parse_single_tracepoint_event(sys_name, evt_name,
567 evt_length, attr, strp); 567 evt_length, attr, strp);
568 } 568 }
569 } 569 }
570 570
571 static enum event_result 571 static enum event_result
572 parse_breakpoint_type(const char *type, const char **strp, 572 parse_breakpoint_type(const char *type, const char **strp,
573 struct perf_event_attr *attr) 573 struct perf_event_attr *attr)
574 { 574 {
575 int i; 575 int i;
576 576
577 for (i = 0; i < 3; i++) { 577 for (i = 0; i < 3; i++) {
578 if (!type[i]) 578 if (!type[i])
579 break; 579 break;
580 580
581 switch (type[i]) { 581 switch (type[i]) {
582 case 'r': 582 case 'r':
583 attr->bp_type |= HW_BREAKPOINT_R; 583 attr->bp_type |= HW_BREAKPOINT_R;
584 break; 584 break;
585 case 'w': 585 case 'w':
586 attr->bp_type |= HW_BREAKPOINT_W; 586 attr->bp_type |= HW_BREAKPOINT_W;
587 break; 587 break;
588 case 'x': 588 case 'x':
589 attr->bp_type |= HW_BREAKPOINT_X; 589 attr->bp_type |= HW_BREAKPOINT_X;
590 break; 590 break;
591 default: 591 default:
592 return EVT_FAILED; 592 return EVT_FAILED;
593 } 593 }
594 } 594 }
595 if (!attr->bp_type) /* Default */ 595 if (!attr->bp_type) /* Default */
596 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; 596 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
597 597
598 *strp = type + i; 598 *strp = type + i;
599 599
600 return EVT_HANDLED; 600 return EVT_HANDLED;
601 } 601 }
602 602
603 static enum event_result 603 static enum event_result
604 parse_breakpoint_event(const char **strp, struct perf_event_attr *attr) 604 parse_breakpoint_event(const char **strp, struct perf_event_attr *attr)
605 { 605 {
606 const char *target; 606 const char *target;
607 const char *type; 607 const char *type;
608 char *endaddr; 608 char *endaddr;
609 u64 addr; 609 u64 addr;
610 enum event_result err; 610 enum event_result err;
611 611
612 target = strchr(*strp, ':'); 612 target = strchr(*strp, ':');
613 if (!target) 613 if (!target)
614 return EVT_FAILED; 614 return EVT_FAILED;
615 615
616 if (strncmp(*strp, "mem", target - *strp) != 0) 616 if (strncmp(*strp, "mem", target - *strp) != 0)
617 return EVT_FAILED; 617 return EVT_FAILED;
618 618
619 target++; 619 target++;
620 620
621 addr = strtoull(target, &endaddr, 0); 621 addr = strtoull(target, &endaddr, 0);
622 if (target == endaddr) 622 if (target == endaddr)
623 return EVT_FAILED; 623 return EVT_FAILED;
624 624
625 attr->bp_addr = addr; 625 attr->bp_addr = addr;
626 *strp = endaddr; 626 *strp = endaddr;
627 627
628 type = strchr(target, ':'); 628 type = strchr(target, ':');
629 629
630 /* If no type is defined, just rw as default */ 630 /* If no type is defined, just rw as default */
631 if (!type) { 631 if (!type) {
632 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; 632 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
633 } else { 633 } else {
634 err = parse_breakpoint_type(++type, strp, attr); 634 err = parse_breakpoint_type(++type, strp, attr);
635 if (err == EVT_FAILED) 635 if (err == EVT_FAILED)
636 return EVT_FAILED; 636 return EVT_FAILED;
637 } 637 }
638 638
639 /* 639 /*
640 * We should find a nice way to override the access length 640 * We should find a nice way to override the access length
641 * Provide some defaults for now 641 * Provide some defaults for now
642 */ 642 */
643 if (attr->bp_type == HW_BREAKPOINT_X) 643 if (attr->bp_type == HW_BREAKPOINT_X)
644 attr->bp_len = sizeof(long); 644 attr->bp_len = sizeof(long);
645 else 645 else
646 attr->bp_len = HW_BREAKPOINT_LEN_4; 646 attr->bp_len = HW_BREAKPOINT_LEN_4;
647 647
648 attr->type = PERF_TYPE_BREAKPOINT; 648 attr->type = PERF_TYPE_BREAKPOINT;
649 649
650 return EVT_HANDLED; 650 return EVT_HANDLED;
651 } 651 }
652 652
653 static int check_events(const char *str, unsigned int i) 653 static int check_events(const char *str, unsigned int i)
654 { 654 {
655 int n; 655 int n;
656 656
657 n = strlen(event_symbols[i].symbol); 657 n = strlen(event_symbols[i].symbol);
658 if (!strncasecmp(str, event_symbols[i].symbol, n)) 658 if (!strncasecmp(str, event_symbols[i].symbol, n))
659 return n; 659 return n;
660 660
661 n = strlen(event_symbols[i].alias); 661 n = strlen(event_symbols[i].alias);
662 if (n) { 662 if (n) {
663 if (!strncasecmp(str, event_symbols[i].alias, n)) 663 if (!strncasecmp(str, event_symbols[i].alias, n))
664 return n; 664 return n;
665 } 665 }
666 666
667 return 0; 667 return 0;
668 } 668 }
669 669
670 static enum event_result 670 static enum event_result
671 parse_symbolic_event(const char **strp, struct perf_event_attr *attr) 671 parse_symbolic_event(const char **strp, struct perf_event_attr *attr)
672 { 672 {
673 const char *str = *strp; 673 const char *str = *strp;
674 unsigned int i; 674 unsigned int i;
675 int n; 675 int n;
676 676
677 for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { 677 for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
678 n = check_events(str, i); 678 n = check_events(str, i);
679 if (n > 0) { 679 if (n > 0) {
680 attr->type = event_symbols[i].type; 680 attr->type = event_symbols[i].type;
681 attr->config = event_symbols[i].config; 681 attr->config = event_symbols[i].config;
682 *strp = str + n; 682 *strp = str + n;
683 return EVT_HANDLED; 683 return EVT_HANDLED;
684 } 684 }
685 } 685 }
686 return EVT_FAILED; 686 return EVT_FAILED;
687 } 687 }
688 688
689 static enum event_result 689 static enum event_result
690 parse_raw_event(const char **strp, struct perf_event_attr *attr) 690 parse_raw_event(const char **strp, struct perf_event_attr *attr)
691 { 691 {
692 const char *str = *strp; 692 const char *str = *strp;
693 u64 config; 693 u64 config;
694 int n; 694 int n;
695 695
696 if (*str != 'r') 696 if (*str != 'r')
697 return EVT_FAILED; 697 return EVT_FAILED;
698 n = hex2u64(str + 1, &config); 698 n = hex2u64(str + 1, &config);
699 if (n > 0) { 699 if (n > 0) {
700 const char *end = str + n + 1; 700 const char *end = str + n + 1;
701 if (*end != '\0' && *end != ',' && *end != ':') 701 if (*end != '\0' && *end != ',' && *end != ':')
702 return EVT_FAILED; 702 return EVT_FAILED;
703 703
704 *strp = end; 704 *strp = end;
705 attr->type = PERF_TYPE_RAW; 705 attr->type = PERF_TYPE_RAW;
706 attr->config = config; 706 attr->config = config;
707 return EVT_HANDLED; 707 return EVT_HANDLED;
708 } 708 }
709 return EVT_FAILED; 709 return EVT_FAILED;
710 } 710 }
711 711
712 static enum event_result 712 static enum event_result
713 parse_numeric_event(const char **strp, struct perf_event_attr *attr) 713 parse_numeric_event(const char **strp, struct perf_event_attr *attr)
714 { 714 {
715 const char *str = *strp; 715 const char *str = *strp;
716 char *endp; 716 char *endp;
717 unsigned long type; 717 unsigned long type;
718 u64 config; 718 u64 config;
719 719
720 type = strtoul(str, &endp, 0); 720 type = strtoul(str, &endp, 0);
721 if (endp > str && type < PERF_TYPE_MAX && *endp == ':') { 721 if (endp > str && type < PERF_TYPE_MAX && *endp == ':') {
722 str = endp + 1; 722 str = endp + 1;
723 config = strtoul(str, &endp, 0); 723 config = strtoul(str, &endp, 0);
724 if (endp > str) { 724 if (endp > str) {
725 attr->type = type; 725 attr->type = type;
726 attr->config = config; 726 attr->config = config;
727 *strp = endp; 727 *strp = endp;
728 return EVT_HANDLED; 728 return EVT_HANDLED;
729 } 729 }
730 } 730 }
731 return EVT_FAILED; 731 return EVT_FAILED;
732 } 732 }
733 733
734 static int 734 static int
735 parse_event_modifier(const char **strp, struct perf_event_attr *attr) 735 parse_event_modifier(const char **strp, struct perf_event_attr *attr)
736 { 736 {
737 const char *str = *strp; 737 const char *str = *strp;
738 int exclude = 0; 738 int exclude = 0;
739 int eu = 0, ek = 0, eh = 0, precise = 0; 739 int eu = 0, ek = 0, eh = 0, precise = 0;
740 740
741 if (!*str) 741 if (!*str)
742 return 0; 742 return 0;
743 743
744 if (*str == ',') 744 if (*str == ',')
745 return 0; 745 return 0;
746 746
747 if (*str++ != ':') 747 if (*str++ != ':')
748 return -1; 748 return -1;
749 749
750 while (*str) { 750 while (*str) {
751 if (*str == 'u') { 751 if (*str == 'u') {
752 if (!exclude) 752 if (!exclude)
753 exclude = eu = ek = eh = 1; 753 exclude = eu = ek = eh = 1;
754 eu = 0; 754 eu = 0;
755 } else if (*str == 'k') { 755 } else if (*str == 'k') {
756 if (!exclude) 756 if (!exclude)
757 exclude = eu = ek = eh = 1; 757 exclude = eu = ek = eh = 1;
758 ek = 0; 758 ek = 0;
759 } else if (*str == 'h') { 759 } else if (*str == 'h') {
760 if (!exclude) 760 if (!exclude)
761 exclude = eu = ek = eh = 1; 761 exclude = eu = ek = eh = 1;
762 eh = 0; 762 eh = 0;
763 } else if (*str == 'p') { 763 } else if (*str == 'p') {
764 precise++; 764 precise++;
765 } else 765 } else
766 break; 766 break;
767 767
768 ++str; 768 ++str;
769 } 769 }
770 if (str < *strp + 2) 770 if (str < *strp + 2)
771 return -1; 771 return -1;
772 772
773 *strp = str; 773 *strp = str;
774 774
775 attr->exclude_user = eu; 775 attr->exclude_user = eu;
776 attr->exclude_kernel = ek; 776 attr->exclude_kernel = ek;
777 attr->exclude_hv = eh; 777 attr->exclude_hv = eh;
778 attr->precise_ip = precise; 778 attr->precise_ip = precise;
779 779
780 return 0; 780 return 0;
781 } 781 }
782 782
783 /* 783 /*
784 * Each event can have multiple symbolic names. 784 * Each event can have multiple symbolic names.
785 * Symbolic names are (almost) exactly matched. 785 * Symbolic names are (almost) exactly matched.
786 */ 786 */
787 static enum event_result 787 static enum event_result
788 parse_event_symbols(struct perf_evlist *evlist, const char **str, 788 parse_event_symbols(struct perf_evlist *evlist, const char **str,
789 struct perf_event_attr *attr) 789 struct perf_event_attr *attr)
790 { 790 {
791 enum event_result ret; 791 enum event_result ret;
792 792
793 ret = parse_tracepoint_event(evlist, str, attr); 793 ret = parse_tracepoint_event(evlist, str, attr);
794 if (ret != EVT_FAILED) 794 if (ret != EVT_FAILED)
795 goto modifier; 795 goto modifier;
796 796
797 ret = parse_raw_event(str, attr); 797 ret = parse_raw_event(str, attr);
798 if (ret != EVT_FAILED) 798 if (ret != EVT_FAILED)
799 goto modifier; 799 goto modifier;
800 800
801 ret = parse_numeric_event(str, attr); 801 ret = parse_numeric_event(str, attr);
802 if (ret != EVT_FAILED) 802 if (ret != EVT_FAILED)
803 goto modifier; 803 goto modifier;
804 804
805 ret = parse_symbolic_event(str, attr); 805 ret = parse_symbolic_event(str, attr);
806 if (ret != EVT_FAILED) 806 if (ret != EVT_FAILED)
807 goto modifier; 807 goto modifier;
808 808
809 ret = parse_generic_hw_event(str, attr); 809 ret = parse_generic_hw_event(str, attr);
810 if (ret != EVT_FAILED) 810 if (ret != EVT_FAILED)
811 goto modifier; 811 goto modifier;
812 812
813 ret = parse_breakpoint_event(str, attr); 813 ret = parse_breakpoint_event(str, attr);
814 if (ret != EVT_FAILED) 814 if (ret != EVT_FAILED)
815 goto modifier; 815 goto modifier;
816 816
817 fprintf(stderr, "invalid or unsupported event: '%s'\n", *str); 817 fprintf(stderr, "invalid or unsupported event: '%s'\n", *str);
818 fprintf(stderr, "Run 'perf list' for a list of valid events\n"); 818 fprintf(stderr, "Run 'perf list' for a list of valid events\n");
819 return EVT_FAILED; 819 return EVT_FAILED;
820 820
821 modifier: 821 modifier:
822 if (parse_event_modifier(str, attr) < 0) { 822 if (parse_event_modifier(str, attr) < 0) {
823 fprintf(stderr, "invalid event modifier: '%s'\n", *str); 823 fprintf(stderr, "invalid event modifier: '%s'\n", *str);
824 fprintf(stderr, "Run 'perf list' for a list of valid events and modifiers\n"); 824 fprintf(stderr, "Run 'perf list' for a list of valid events and modifiers\n");
825 825
826 return EVT_FAILED; 826 return EVT_FAILED;
827 } 827 }
828 828
829 return ret; 829 return ret;
830 } 830 }
831 831
832 int parse_events(struct perf_evlist *evlist , const char *str, int unset __used) 832 int parse_events(struct perf_evlist *evlist , const char *str, int unset __used)
833 { 833 {
834 struct perf_event_attr attr; 834 struct perf_event_attr attr;
835 enum event_result ret; 835 enum event_result ret;
836 const char *ostr; 836 const char *ostr;
837 837
838 for (;;) { 838 for (;;) {
839 ostr = str; 839 ostr = str;
840 memset(&attr, 0, sizeof(attr)); 840 memset(&attr, 0, sizeof(attr));
841 event_attr_init(&attr);
841 ret = parse_event_symbols(evlist, &str, &attr); 842 ret = parse_event_symbols(evlist, &str, &attr);
842 if (ret == EVT_FAILED) 843 if (ret == EVT_FAILED)
843 return -1; 844 return -1;
844 845
845 if (!(*str == 0 || *str == ',' || isspace(*str))) 846 if (!(*str == 0 || *str == ',' || isspace(*str)))
846 return -1; 847 return -1;
847 848
848 if (ret != EVT_HANDLED_ALL) { 849 if (ret != EVT_HANDLED_ALL) {
849 struct perf_evsel *evsel; 850 struct perf_evsel *evsel;
850 evsel = perf_evsel__new(&attr, evlist->nr_entries); 851 evsel = perf_evsel__new(&attr, evlist->nr_entries);
851 if (evsel == NULL) 852 if (evsel == NULL)
852 return -1; 853 return -1;
853 perf_evlist__add(evlist, evsel); 854 perf_evlist__add(evlist, evsel);
854 855
855 evsel->name = calloc(str - ostr + 1, 1); 856 evsel->name = calloc(str - ostr + 1, 1);
856 if (!evsel->name) 857 if (!evsel->name)
857 return -1; 858 return -1;
858 strncpy(evsel->name, ostr, str - ostr); 859 strncpy(evsel->name, ostr, str - ostr);
859 } 860 }
860 861
861 if (*str == 0) 862 if (*str == 0)
862 break; 863 break;
863 if (*str == ',') 864 if (*str == ',')
864 ++str; 865 ++str;
865 while (isspace(*str)) 866 while (isspace(*str))
866 ++str; 867 ++str;
867 } 868 }
868 869
869 return 0; 870 return 0;
870 } 871 }
871 872
872 int parse_events_option(const struct option *opt, const char *str, 873 int parse_events_option(const struct option *opt, const char *str,
873 int unset __used) 874 int unset __used)
874 { 875 {
875 struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; 876 struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
876 return parse_events(evlist, str, unset); 877 return parse_events(evlist, str, unset);
877 } 878 }
878 879
879 int parse_filter(const struct option *opt, const char *str, 880 int parse_filter(const struct option *opt, const char *str,
880 int unset __used) 881 int unset __used)
881 { 882 {
882 struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; 883 struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
883 struct perf_evsel *last = NULL; 884 struct perf_evsel *last = NULL;
884 885
885 if (evlist->nr_entries > 0) 886 if (evlist->nr_entries > 0)
886 last = list_entry(evlist->entries.prev, struct perf_evsel, node); 887 last = list_entry(evlist->entries.prev, struct perf_evsel, node);
887 888
888 if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) { 889 if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) {
889 fprintf(stderr, 890 fprintf(stderr,
890 "-F option should follow a -e tracepoint option\n"); 891 "-F option should follow a -e tracepoint option\n");
891 return -1; 892 return -1;
892 } 893 }
893 894
894 last->filter = strdup(str); 895 last->filter = strdup(str);
895 if (last->filter == NULL) { 896 if (last->filter == NULL) {
896 fprintf(stderr, "not enough memory to hold filter string\n"); 897 fprintf(stderr, "not enough memory to hold filter string\n");
897 return -1; 898 return -1;
898 } 899 }
899 900
900 return 0; 901 return 0;
901 } 902 }
902 903
903 static const char * const event_type_descriptors[] = { 904 static const char * const event_type_descriptors[] = {
904 "Hardware event", 905 "Hardware event",
905 "Software event", 906 "Software event",
906 "Tracepoint event", 907 "Tracepoint event",
907 "Hardware cache event", 908 "Hardware cache event",
908 "Raw hardware event descriptor", 909 "Raw hardware event descriptor",
909 "Hardware breakpoint", 910 "Hardware breakpoint",
910 }; 911 };
911 912
912 /* 913 /*
913 * Print the events from <debugfs_mount_point>/tracing/events 914 * Print the events from <debugfs_mount_point>/tracing/events
914 */ 915 */
915 916
916 void print_tracepoint_events(const char *subsys_glob, const char *event_glob) 917 void print_tracepoint_events(const char *subsys_glob, const char *event_glob)
917 { 918 {
918 DIR *sys_dir, *evt_dir; 919 DIR *sys_dir, *evt_dir;
919 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; 920 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
920 char evt_path[MAXPATHLEN]; 921 char evt_path[MAXPATHLEN];
921 char dir_path[MAXPATHLEN]; 922 char dir_path[MAXPATHLEN];
922 923
923 if (debugfs_valid_mountpoint(tracing_events_path)) 924 if (debugfs_valid_mountpoint(tracing_events_path))
924 return; 925 return;
925 926
926 sys_dir = opendir(tracing_events_path); 927 sys_dir = opendir(tracing_events_path);
927 if (!sys_dir) 928 if (!sys_dir)
928 return; 929 return;
929 930
930 for_each_subsystem(sys_dir, sys_dirent, sys_next) { 931 for_each_subsystem(sys_dir, sys_dirent, sys_next) {
931 if (subsys_glob != NULL && 932 if (subsys_glob != NULL &&
932 !strglobmatch(sys_dirent.d_name, subsys_glob)) 933 !strglobmatch(sys_dirent.d_name, subsys_glob))
933 continue; 934 continue;
934 935
935 snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, 936 snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
936 sys_dirent.d_name); 937 sys_dirent.d_name);
937 evt_dir = opendir(dir_path); 938 evt_dir = opendir(dir_path);
938 if (!evt_dir) 939 if (!evt_dir)
939 continue; 940 continue;
940 941
941 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { 942 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
942 if (event_glob != NULL && 943 if (event_glob != NULL &&
943 !strglobmatch(evt_dirent.d_name, event_glob)) 944 !strglobmatch(evt_dirent.d_name, event_glob))
944 continue; 945 continue;
945 946
946 snprintf(evt_path, MAXPATHLEN, "%s:%s", 947 snprintf(evt_path, MAXPATHLEN, "%s:%s",
947 sys_dirent.d_name, evt_dirent.d_name); 948 sys_dirent.d_name, evt_dirent.d_name);
948 printf(" %-50s [%s]\n", evt_path, 949 printf(" %-50s [%s]\n", evt_path,
949 event_type_descriptors[PERF_TYPE_TRACEPOINT]); 950 event_type_descriptors[PERF_TYPE_TRACEPOINT]);
950 } 951 }
951 closedir(evt_dir); 952 closedir(evt_dir);
952 } 953 }
953 closedir(sys_dir); 954 closedir(sys_dir);
954 } 955 }
955 956
956 /* 957 /*
957 * Check whether event is in <debugfs_mount_point>/tracing/events 958 * Check whether event is in <debugfs_mount_point>/tracing/events
958 */ 959 */
959 960
960 int is_valid_tracepoint(const char *event_string) 961 int is_valid_tracepoint(const char *event_string)
961 { 962 {
962 DIR *sys_dir, *evt_dir; 963 DIR *sys_dir, *evt_dir;
963 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; 964 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
964 char evt_path[MAXPATHLEN]; 965 char evt_path[MAXPATHLEN];
965 char dir_path[MAXPATHLEN]; 966 char dir_path[MAXPATHLEN];
966 967
967 if (debugfs_valid_mountpoint(tracing_events_path)) 968 if (debugfs_valid_mountpoint(tracing_events_path))
968 return 0; 969 return 0;
969 970
970 sys_dir = opendir(tracing_events_path); 971 sys_dir = opendir(tracing_events_path);
971 if (!sys_dir) 972 if (!sys_dir)
972 return 0; 973 return 0;
973 974
974 for_each_subsystem(sys_dir, sys_dirent, sys_next) { 975 for_each_subsystem(sys_dir, sys_dirent, sys_next) {
975 976
976 snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, 977 snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
977 sys_dirent.d_name); 978 sys_dirent.d_name);
978 evt_dir = opendir(dir_path); 979 evt_dir = opendir(dir_path);
979 if (!evt_dir) 980 if (!evt_dir)
980 continue; 981 continue;
981 982
982 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { 983 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
983 snprintf(evt_path, MAXPATHLEN, "%s:%s", 984 snprintf(evt_path, MAXPATHLEN, "%s:%s",
984 sys_dirent.d_name, evt_dirent.d_name); 985 sys_dirent.d_name, evt_dirent.d_name);
985 if (!strcmp(evt_path, event_string)) { 986 if (!strcmp(evt_path, event_string)) {
986 closedir(evt_dir); 987 closedir(evt_dir);
987 closedir(sys_dir); 988 closedir(sys_dir);
988 return 1; 989 return 1;
989 } 990 }
990 } 991 }
991 closedir(evt_dir); 992 closedir(evt_dir);
992 } 993 }
993 closedir(sys_dir); 994 closedir(sys_dir);
994 return 0; 995 return 0;
995 } 996 }
996 997
997 void print_events_type(u8 type) 998 void print_events_type(u8 type)
998 { 999 {
999 struct event_symbol *syms = event_symbols; 1000 struct event_symbol *syms = event_symbols;
1000 unsigned int i; 1001 unsigned int i;
1001 char name[64]; 1002 char name[64];
1002 1003
1003 for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) { 1004 for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
1004 if (type != syms->type) 1005 if (type != syms->type)
1005 continue; 1006 continue;
1006 1007
1007 if (strlen(syms->alias)) 1008 if (strlen(syms->alias))
1008 snprintf(name, sizeof(name), "%s OR %s", 1009 snprintf(name, sizeof(name), "%s OR %s",
1009 syms->symbol, syms->alias); 1010 syms->symbol, syms->alias);
1010 else 1011 else
1011 snprintf(name, sizeof(name), "%s", syms->symbol); 1012 snprintf(name, sizeof(name), "%s", syms->symbol);
1012 1013
1013 printf(" %-50s [%s]\n", name, 1014 printf(" %-50s [%s]\n", name,
1014 event_type_descriptors[type]); 1015 event_type_descriptors[type]);
1015 } 1016 }
1016 } 1017 }
1017 1018
1018 int print_hwcache_events(const char *event_glob) 1019 int print_hwcache_events(const char *event_glob)
1019 { 1020 {
1020 unsigned int type, op, i, printed = 0; 1021 unsigned int type, op, i, printed = 0;
1021 1022
1022 for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { 1023 for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
1023 for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { 1024 for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
1024 /* skip invalid cache type */ 1025 /* skip invalid cache type */
1025 if (!is_cache_op_valid(type, op)) 1026 if (!is_cache_op_valid(type, op))
1026 continue; 1027 continue;
1027 1028
1028 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { 1029 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
1029 char *name = event_cache_name(type, op, i); 1030 char *name = event_cache_name(type, op, i);
1030 1031
1031 if (event_glob != NULL && !strglobmatch(name, event_glob)) 1032 if (event_glob != NULL && !strglobmatch(name, event_glob))
1032 continue; 1033 continue;
1033 1034
1034 printf(" %-50s [%s]\n", name, 1035 printf(" %-50s [%s]\n", name,
1035 event_type_descriptors[PERF_TYPE_HW_CACHE]); 1036 event_type_descriptors[PERF_TYPE_HW_CACHE]);
1036 ++printed; 1037 ++printed;
1037 } 1038 }
1038 } 1039 }
1039 } 1040 }
1040 1041
1041 return printed; 1042 return printed;
1042 } 1043 }
1043 1044
1044 #define MAX_NAME_LEN 100 1045 #define MAX_NAME_LEN 100
1045 1046
1046 /* 1047 /*
1047 * Print the help text for the event symbols: 1048 * Print the help text for the event symbols:
1048 */ 1049 */
1049 void print_events(const char *event_glob) 1050 void print_events(const char *event_glob)
1050 { 1051 {
1051 unsigned int i, type, prev_type = -1, printed = 0, ntypes_printed = 0; 1052 unsigned int i, type, prev_type = -1, printed = 0, ntypes_printed = 0;
1052 struct event_symbol *syms = event_symbols; 1053 struct event_symbol *syms = event_symbols;
1053 char name[MAX_NAME_LEN]; 1054 char name[MAX_NAME_LEN];
1054 1055
1055 printf("\n"); 1056 printf("\n");
1056 printf("List of pre-defined events (to be used in -e):\n"); 1057 printf("List of pre-defined events (to be used in -e):\n");
1057 1058
1058 for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) { 1059 for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
1059 type = syms->type; 1060 type = syms->type;
1060 1061
1061 if (type != prev_type && printed) { 1062 if (type != prev_type && printed) {
1062 printf("\n"); 1063 printf("\n");
1063 printed = 0; 1064 printed = 0;
1064 ntypes_printed++; 1065 ntypes_printed++;
1065 } 1066 }
1066 1067
1067 if (event_glob != NULL && 1068 if (event_glob != NULL &&
1068 !(strglobmatch(syms->symbol, event_glob) || 1069 !(strglobmatch(syms->symbol, event_glob) ||
1069 (syms->alias && strglobmatch(syms->alias, event_glob)))) 1070 (syms->alias && strglobmatch(syms->alias, event_glob))))
1070 continue; 1071 continue;
1071 1072
1072 if (strlen(syms->alias)) 1073 if (strlen(syms->alias))
1073 snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias); 1074 snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
1074 else 1075 else
1075 strncpy(name, syms->symbol, MAX_NAME_LEN); 1076 strncpy(name, syms->symbol, MAX_NAME_LEN);
1076 printf(" %-50s [%s]\n", name, 1077 printf(" %-50s [%s]\n", name,
1077 event_type_descriptors[type]); 1078 event_type_descriptors[type]);
1078 1079
1079 prev_type = type; 1080 prev_type = type;
1080 ++printed; 1081 ++printed;
1081 } 1082 }
1082 1083
1083 if (ntypes_printed) { 1084 if (ntypes_printed) {
1084 printed = 0; 1085 printed = 0;
1085 printf("\n"); 1086 printf("\n");
1086 } 1087 }
1087 print_hwcache_events(event_glob); 1088 print_hwcache_events(event_glob);
1088 1089
1089 if (event_glob != NULL) 1090 if (event_glob != NULL)
1090 return; 1091 return;
1091 1092
1092 printf("\n"); 1093 printf("\n");
1093 printf(" %-50s [%s]\n", 1094 printf(" %-50s [%s]\n",
1094 "rNNN (see 'perf list --help' on how to encode it)", 1095 "rNNN (see 'perf list --help' on how to encode it)",
1095 event_type_descriptors[PERF_TYPE_RAW]); 1096 event_type_descriptors[PERF_TYPE_RAW]);
1096 printf("\n"); 1097 printf("\n");
1097 1098
1098 printf(" %-50s [%s]\n", 1099 printf(" %-50s [%s]\n",
1099 "mem:<addr>[:access]", 1100 "mem:<addr>[:access]",
1100 event_type_descriptors[PERF_TYPE_BREAKPOINT]); 1101 event_type_descriptors[PERF_TYPE_BREAKPOINT]);
1101 printf("\n"); 1102 printf("\n");
1102 1103
1103 print_tracepoint_events(NULL, NULL); 1104 print_tracepoint_events(NULL, NULL);
1104 } 1105 }
1105 1106
tools/perf/util/util.c
1 #include "../perf.h"
1 #include "util.h" 2 #include "util.h"
2 #include <sys/mman.h> 3 #include <sys/mman.h>
4
5 /*
6 * XXX We need to find a better place for these things...
7 */
8 bool perf_host = true;
9 bool perf_guest = true;
10
11 void event_attr_init(struct perf_event_attr *attr)
12 {
13 if (!perf_host)
14 attr->exclude_host = 1;
15 if (!perf_guest)
16 attr->exclude_guest = 1;
17 }
3 18
4 int mkdir_p(char *path, mode_t mode) 19 int mkdir_p(char *path, mode_t mode)
5 { 20 {
6 struct stat st; 21 struct stat st;
7 int err; 22 int err;
8 char *d = path; 23 char *d = path;
9 24
10 if (*d != '/') 25 if (*d != '/')
11 return -1; 26 return -1;
12 27
13 if (stat(path, &st) == 0) 28 if (stat(path, &st) == 0)
14 return 0; 29 return 0;
15 30
16 while (*++d == '/'); 31 while (*++d == '/');
17 32
18 while ((d = strchr(d, '/'))) { 33 while ((d = strchr(d, '/'))) {
19 *d = '\0'; 34 *d = '\0';
20 err = stat(path, &st) && mkdir(path, mode); 35 err = stat(path, &st) && mkdir(path, mode);
21 *d++ = '/'; 36 *d++ = '/';
22 if (err) 37 if (err)
23 return -1; 38 return -1;
24 while (*d == '/') 39 while (*d == '/')
25 ++d; 40 ++d;
26 } 41 }
27 return (stat(path, &st) && mkdir(path, mode)) ? -1 : 0; 42 return (stat(path, &st) && mkdir(path, mode)) ? -1 : 0;
28 } 43 }
29 44
30 static int slow_copyfile(const char *from, const char *to) 45 static int slow_copyfile(const char *from, const char *to)
31 { 46 {
32 int err = 0; 47 int err = 0;
33 char *line = NULL; 48 char *line = NULL;
34 size_t n; 49 size_t n;
35 FILE *from_fp = fopen(from, "r"), *to_fp; 50 FILE *from_fp = fopen(from, "r"), *to_fp;
36 51
37 if (from_fp == NULL) 52 if (from_fp == NULL)
38 goto out; 53 goto out;
39 54
40 to_fp = fopen(to, "w"); 55 to_fp = fopen(to, "w");
41 if (to_fp == NULL) 56 if (to_fp == NULL)
42 goto out_fclose_from; 57 goto out_fclose_from;
43 58
44 while (getline(&line, &n, from_fp) > 0) 59 while (getline(&line, &n, from_fp) > 0)
45 if (fputs(line, to_fp) == EOF) 60 if (fputs(line, to_fp) == EOF)
46 goto out_fclose_to; 61 goto out_fclose_to;
47 err = 0; 62 err = 0;
48 out_fclose_to: 63 out_fclose_to:
49 fclose(to_fp); 64 fclose(to_fp);
50 free(line); 65 free(line);
51 out_fclose_from: 66 out_fclose_from:
52 fclose(from_fp); 67 fclose(from_fp);
53 out: 68 out:
54 return err; 69 return err;
55 } 70 }
56 71
57 int copyfile(const char *from, const char *to) 72 int copyfile(const char *from, const char *to)
58 { 73 {
59 int fromfd, tofd; 74 int fromfd, tofd;
60 struct stat st; 75 struct stat st;
61 void *addr; 76 void *addr;
62 int err = -1; 77 int err = -1;
63 78
64 if (stat(from, &st)) 79 if (stat(from, &st))
65 goto out; 80 goto out;
66 81
67 if (st.st_size == 0) /* /proc? do it slowly... */ 82 if (st.st_size == 0) /* /proc? do it slowly... */
68 return slow_copyfile(from, to); 83 return slow_copyfile(from, to);
69 84
70 fromfd = open(from, O_RDONLY); 85 fromfd = open(from, O_RDONLY);
71 if (fromfd < 0) 86 if (fromfd < 0)
72 goto out; 87 goto out;
73 88
74 tofd = creat(to, 0755); 89 tofd = creat(to, 0755);
75 if (tofd < 0) 90 if (tofd < 0)
76 goto out_close_from; 91 goto out_close_from;
77 92
78 addr = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fromfd, 0); 93 addr = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fromfd, 0);
79 if (addr == MAP_FAILED) 94 if (addr == MAP_FAILED)
80 goto out_close_to; 95 goto out_close_to;
81 96
82 if (write(tofd, addr, st.st_size) == st.st_size) 97 if (write(tofd, addr, st.st_size) == st.st_size)
83 err = 0; 98 err = 0;
84 99
85 munmap(addr, st.st_size); 100 munmap(addr, st.st_size);
86 out_close_to: 101 out_close_to:
87 close(tofd); 102 close(tofd);
88 if (err) 103 if (err)
89 unlink(to); 104 unlink(to);
90 out_close_from: 105 out_close_from:
91 close(fromfd); 106 close(fromfd);
92 out: 107 out:
93 return err; 108 return err;
94 } 109 }
95 110
96 unsigned long convert_unit(unsigned long value, char *unit) 111 unsigned long convert_unit(unsigned long value, char *unit)
97 { 112 {
98 *unit = ' '; 113 *unit = ' ';
99 114
100 if (value > 1000) { 115 if (value > 1000) {
101 value /= 1000; 116 value /= 1000;
102 *unit = 'K'; 117 *unit = 'K';
103 } 118 }
104 119
105 if (value > 1000) { 120 if (value > 1000) {
106 value /= 1000; 121 value /= 1000;
107 *unit = 'M'; 122 *unit = 'M';
108 } 123 }
109 124
110 if (value > 1000) { 125 if (value > 1000) {
111 value /= 1000; 126 value /= 1000;
112 *unit = 'G'; 127 *unit = 'G';
113 } 128 }
114 129
115 return value; 130 return value;
116 } 131 }
117 132
118 int readn(int fd, void *buf, size_t n) 133 int readn(int fd, void *buf, size_t n)
119 { 134 {
120 void *buf_start = buf; 135 void *buf_start = buf;
121 136
122 while (n) { 137 while (n) {
123 int ret = read(fd, buf, n); 138 int ret = read(fd, buf, n);
124 139
125 if (ret <= 0) 140 if (ret <= 0)
126 return ret; 141 return ret;
127 142
128 n -= ret; 143 n -= ret;
129 buf += ret; 144 buf += ret;
130 } 145 }
131 146
132 return buf - buf_start; 147 return buf - buf_start;
133 } 148 }
134 149
tools/perf/util/util.h
1 #ifndef GIT_COMPAT_UTIL_H 1 #ifndef GIT_COMPAT_UTIL_H
2 #define GIT_COMPAT_UTIL_H 2 #define GIT_COMPAT_UTIL_H
3 3
4 #define _FILE_OFFSET_BITS 64 4 #define _FILE_OFFSET_BITS 64
5 5
6 #ifndef FLEX_ARRAY 6 #ifndef FLEX_ARRAY
7 /* 7 /*
8 * See if our compiler is known to support flexible array members. 8 * See if our compiler is known to support flexible array members.
9 */ 9 */
10 #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) 10 #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
11 # define FLEX_ARRAY /* empty */ 11 # define FLEX_ARRAY /* empty */
12 #elif defined(__GNUC__) 12 #elif defined(__GNUC__)
13 # if (__GNUC__ >= 3) 13 # if (__GNUC__ >= 3)
14 # define FLEX_ARRAY /* empty */ 14 # define FLEX_ARRAY /* empty */
15 # else 15 # else
16 # define FLEX_ARRAY 0 /* older GNU extension */ 16 # define FLEX_ARRAY 0 /* older GNU extension */
17 # endif 17 # endif
18 #endif 18 #endif
19 19
20 /* 20 /*
21 * Otherwise, default to safer but a bit wasteful traditional style 21 * Otherwise, default to safer but a bit wasteful traditional style
22 */ 22 */
23 #ifndef FLEX_ARRAY 23 #ifndef FLEX_ARRAY
24 # define FLEX_ARRAY 1 24 # define FLEX_ARRAY 1
25 #endif 25 #endif
26 #endif 26 #endif
27 27
28 #define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) 28 #define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
29 29
30 #ifdef __GNUC__ 30 #ifdef __GNUC__
31 #define TYPEOF(x) (__typeof__(x)) 31 #define TYPEOF(x) (__typeof__(x))
32 #else 32 #else
33 #define TYPEOF(x) 33 #define TYPEOF(x)
34 #endif 34 #endif
35 35
36 #define MSB(x, bits) ((x) & TYPEOF(x)(~0ULL << (sizeof(x) * 8 - (bits)))) 36 #define MSB(x, bits) ((x) & TYPEOF(x)(~0ULL << (sizeof(x) * 8 - (bits))))
37 #define HAS_MULTI_BITS(i) ((i) & ((i) - 1)) /* checks if an integer has more than 1 bit set */ 37 #define HAS_MULTI_BITS(i) ((i) & ((i) - 1)) /* checks if an integer has more than 1 bit set */
38 38
39 /* Approximation of the length of the decimal representation of this type. */ 39 /* Approximation of the length of the decimal representation of this type. */
40 #define decimal_length(x) ((int)(sizeof(x) * 2.56 + 0.5) + 1) 40 #define decimal_length(x) ((int)(sizeof(x) * 2.56 + 0.5) + 1)
41 41
42 #define _ALL_SOURCE 1 42 #define _ALL_SOURCE 1
43 #define _GNU_SOURCE 1 43 #define _GNU_SOURCE 1
44 #define _BSD_SOURCE 1 44 #define _BSD_SOURCE 1
45 #define HAS_BOOL 45 #define HAS_BOOL
46 46
47 #include <unistd.h> 47 #include <unistd.h>
48 #include <stdio.h> 48 #include <stdio.h>
49 #include <sys/stat.h> 49 #include <sys/stat.h>
50 #include <sys/statfs.h> 50 #include <sys/statfs.h>
51 #include <fcntl.h> 51 #include <fcntl.h>
52 #include <stdbool.h> 52 #include <stdbool.h>
53 #include <stddef.h> 53 #include <stddef.h>
54 #include <stdlib.h> 54 #include <stdlib.h>
55 #include <stdarg.h> 55 #include <stdarg.h>
56 #include <string.h> 56 #include <string.h>
57 #include <errno.h> 57 #include <errno.h>
58 #include <limits.h> 58 #include <limits.h>
59 #include <sys/param.h> 59 #include <sys/param.h>
60 #include <sys/types.h> 60 #include <sys/types.h>
61 #include <dirent.h> 61 #include <dirent.h>
62 #include <sys/time.h> 62 #include <sys/time.h>
63 #include <time.h> 63 #include <time.h>
64 #include <signal.h> 64 #include <signal.h>
65 #include <fnmatch.h> 65 #include <fnmatch.h>
66 #include <assert.h> 66 #include <assert.h>
67 #include <regex.h> 67 #include <regex.h>
68 #include <utime.h> 68 #include <utime.h>
69 #include <sys/wait.h> 69 #include <sys/wait.h>
70 #include <sys/poll.h> 70 #include <sys/poll.h>
71 #include <sys/socket.h> 71 #include <sys/socket.h>
72 #include <sys/ioctl.h> 72 #include <sys/ioctl.h>
73 #include <sys/select.h> 73 #include <sys/select.h>
74 #include <netinet/in.h> 74 #include <netinet/in.h>
75 #include <netinet/tcp.h> 75 #include <netinet/tcp.h>
76 #include <arpa/inet.h> 76 #include <arpa/inet.h>
77 #include <netdb.h> 77 #include <netdb.h>
78 #include <pwd.h> 78 #include <pwd.h>
79 #include <inttypes.h> 79 #include <inttypes.h>
80 #include "../../../include/linux/magic.h" 80 #include "../../../include/linux/magic.h"
81 #include "types.h" 81 #include "types.h"
82 #include <sys/ttydefaults.h> 82 #include <sys/ttydefaults.h>
83 83
84 extern const char *graph_line; 84 extern const char *graph_line;
85 extern const char *graph_dotted_line; 85 extern const char *graph_dotted_line;
86 extern char buildid_dir[]; 86 extern char buildid_dir[];
87 87
88 /* On most systems <limits.h> would have given us this, but 88 /* On most systems <limits.h> would have given us this, but
89 * not on some systems (e.g. GNU/Hurd). 89 * not on some systems (e.g. GNU/Hurd).
90 */ 90 */
91 #ifndef PATH_MAX 91 #ifndef PATH_MAX
92 #define PATH_MAX 4096 92 #define PATH_MAX 4096
93 #endif 93 #endif
94 94
95 #ifndef PRIuMAX 95 #ifndef PRIuMAX
96 #define PRIuMAX "llu" 96 #define PRIuMAX "llu"
97 #endif 97 #endif
98 98
99 #ifndef PRIu32 99 #ifndef PRIu32
100 #define PRIu32 "u" 100 #define PRIu32 "u"
101 #endif 101 #endif
102 102
103 #ifndef PRIx32 103 #ifndef PRIx32
104 #define PRIx32 "x" 104 #define PRIx32 "x"
105 #endif 105 #endif
106 106
107 #ifndef PATH_SEP 107 #ifndef PATH_SEP
108 #define PATH_SEP ':' 108 #define PATH_SEP ':'
109 #endif 109 #endif
110 110
111 #ifndef STRIP_EXTENSION 111 #ifndef STRIP_EXTENSION
112 #define STRIP_EXTENSION "" 112 #define STRIP_EXTENSION ""
113 #endif 113 #endif
114 114
115 #ifndef has_dos_drive_prefix 115 #ifndef has_dos_drive_prefix
116 #define has_dos_drive_prefix(path) 0 116 #define has_dos_drive_prefix(path) 0
117 #endif 117 #endif
118 118
119 #ifndef is_dir_sep 119 #ifndef is_dir_sep
120 #define is_dir_sep(c) ((c) == '/') 120 #define is_dir_sep(c) ((c) == '/')
121 #endif 121 #endif
122 122
123 #ifdef __GNUC__ 123 #ifdef __GNUC__
124 #define NORETURN __attribute__((__noreturn__)) 124 #define NORETURN __attribute__((__noreturn__))
125 #else 125 #else
126 #define NORETURN 126 #define NORETURN
127 #ifndef __attribute__ 127 #ifndef __attribute__
128 #define __attribute__(x) 128 #define __attribute__(x)
129 #endif 129 #endif
130 #endif 130 #endif
131 131
132 /* General helper functions */ 132 /* General helper functions */
133 extern void usage(const char *err) NORETURN; 133 extern void usage(const char *err) NORETURN;
134 extern void die(const char *err, ...) NORETURN __attribute__((format (printf, 1, 2))); 134 extern void die(const char *err, ...) NORETURN __attribute__((format (printf, 1, 2)));
135 extern int error(const char *err, ...) __attribute__((format (printf, 1, 2))); 135 extern int error(const char *err, ...) __attribute__((format (printf, 1, 2)));
136 extern void warning(const char *err, ...) __attribute__((format (printf, 1, 2))); 136 extern void warning(const char *err, ...) __attribute__((format (printf, 1, 2)));
137 137
138 #include "../../../include/linux/stringify.h" 138 #include "../../../include/linux/stringify.h"
139 139
140 #define DIE_IF(cnd) \ 140 #define DIE_IF(cnd) \
141 do { if (cnd) \ 141 do { if (cnd) \
142 die(" at (" __FILE__ ":" __stringify(__LINE__) "): " \ 142 die(" at (" __FILE__ ":" __stringify(__LINE__) "): " \
143 __stringify(cnd) "\n"); \ 143 __stringify(cnd) "\n"); \
144 } while (0) 144 } while (0)
145 145
146 146
147 extern void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN); 147 extern void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN);
148 148
149 extern int prefixcmp(const char *str, const char *prefix); 149 extern int prefixcmp(const char *str, const char *prefix);
150 extern void set_buildid_dir(void); 150 extern void set_buildid_dir(void);
151 extern void disable_buildid_cache(void); 151 extern void disable_buildid_cache(void);
152 152
153 static inline const char *skip_prefix(const char *str, const char *prefix) 153 static inline const char *skip_prefix(const char *str, const char *prefix)
154 { 154 {
155 size_t len = strlen(prefix); 155 size_t len = strlen(prefix);
156 return strncmp(str, prefix, len) ? NULL : str + len; 156 return strncmp(str, prefix, len) ? NULL : str + len;
157 } 157 }
158 158
159 #ifdef __GLIBC_PREREQ 159 #ifdef __GLIBC_PREREQ
160 #if __GLIBC_PREREQ(2, 1) 160 #if __GLIBC_PREREQ(2, 1)
161 #define HAVE_STRCHRNUL 161 #define HAVE_STRCHRNUL
162 #endif 162 #endif
163 #endif 163 #endif
164 164
165 #ifndef HAVE_STRCHRNUL 165 #ifndef HAVE_STRCHRNUL
166 #define strchrnul gitstrchrnul 166 #define strchrnul gitstrchrnul
167 static inline char *gitstrchrnul(const char *s, int c) 167 static inline char *gitstrchrnul(const char *s, int c)
168 { 168 {
169 while (*s && *s != c) 169 while (*s && *s != c)
170 s++; 170 s++;
171 return (char *)s; 171 return (char *)s;
172 } 172 }
173 #endif 173 #endif
174 174
175 /* 175 /*
176 * Wrappers: 176 * Wrappers:
177 */ 177 */
178 extern char *xstrdup(const char *str); 178 extern char *xstrdup(const char *str);
179 extern void *xrealloc(void *ptr, size_t size) __attribute__((weak)); 179 extern void *xrealloc(void *ptr, size_t size) __attribute__((weak));
180 180
181 181
182 static inline void *zalloc(size_t size) 182 static inline void *zalloc(size_t size)
183 { 183 {
184 return calloc(1, size); 184 return calloc(1, size);
185 } 185 }
186 186
187 static inline int has_extension(const char *filename, const char *ext) 187 static inline int has_extension(const char *filename, const char *ext)
188 { 188 {
189 size_t len = strlen(filename); 189 size_t len = strlen(filename);
190 size_t extlen = strlen(ext); 190 size_t extlen = strlen(ext);
191 191
192 return len > extlen && !memcmp(filename + len - extlen, ext, extlen); 192 return len > extlen && !memcmp(filename + len - extlen, ext, extlen);
193 } 193 }
194 194
195 /* Sane ctype - no locale, and works with signed chars */ 195 /* Sane ctype - no locale, and works with signed chars */
196 #undef isascii 196 #undef isascii
197 #undef isspace 197 #undef isspace
198 #undef isdigit 198 #undef isdigit
199 #undef isxdigit 199 #undef isxdigit
200 #undef isalpha 200 #undef isalpha
201 #undef isprint 201 #undef isprint
202 #undef isalnum 202 #undef isalnum
203 #undef tolower 203 #undef tolower
204 #undef toupper 204 #undef toupper
205 205
206 extern unsigned char sane_ctype[256]; 206 extern unsigned char sane_ctype[256];
207 #define GIT_SPACE 0x01 207 #define GIT_SPACE 0x01
208 #define GIT_DIGIT 0x02 208 #define GIT_DIGIT 0x02
209 #define GIT_ALPHA 0x04 209 #define GIT_ALPHA 0x04
210 #define GIT_GLOB_SPECIAL 0x08 210 #define GIT_GLOB_SPECIAL 0x08
211 #define GIT_REGEX_SPECIAL 0x10 211 #define GIT_REGEX_SPECIAL 0x10
212 #define GIT_PRINT_EXTRA 0x20 212 #define GIT_PRINT_EXTRA 0x20
213 #define GIT_PRINT 0x3E 213 #define GIT_PRINT 0x3E
214 #define sane_istest(x,mask) ((sane_ctype[(unsigned char)(x)] & (mask)) != 0) 214 #define sane_istest(x,mask) ((sane_ctype[(unsigned char)(x)] & (mask)) != 0)
215 #define isascii(x) (((x) & ~0x7f) == 0) 215 #define isascii(x) (((x) & ~0x7f) == 0)
216 #define isspace(x) sane_istest(x,GIT_SPACE) 216 #define isspace(x) sane_istest(x,GIT_SPACE)
217 #define isdigit(x) sane_istest(x,GIT_DIGIT) 217 #define isdigit(x) sane_istest(x,GIT_DIGIT)
218 #define isxdigit(x) \ 218 #define isxdigit(x) \
219 (sane_istest(toupper(x), GIT_ALPHA | GIT_DIGIT) && toupper(x) < 'G') 219 (sane_istest(toupper(x), GIT_ALPHA | GIT_DIGIT) && toupper(x) < 'G')
220 #define isalpha(x) sane_istest(x,GIT_ALPHA) 220 #define isalpha(x) sane_istest(x,GIT_ALPHA)
221 #define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT) 221 #define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT)
222 #define isprint(x) sane_istest(x,GIT_PRINT) 222 #define isprint(x) sane_istest(x,GIT_PRINT)
223 #define tolower(x) sane_case((unsigned char)(x), 0x20) 223 #define tolower(x) sane_case((unsigned char)(x), 0x20)
224 #define toupper(x) sane_case((unsigned char)(x), 0) 224 #define toupper(x) sane_case((unsigned char)(x), 0)
225 225
226 static inline int sane_case(int x, int high) 226 static inline int sane_case(int x, int high)
227 { 227 {
228 if (sane_istest(x, GIT_ALPHA)) 228 if (sane_istest(x, GIT_ALPHA))
229 x = (x & ~0x20) | high; 229 x = (x & ~0x20) | high;
230 return x; 230 return x;
231 } 231 }
232 232
233 int mkdir_p(char *path, mode_t mode); 233 int mkdir_p(char *path, mode_t mode);
234 int copyfile(const char *from, const char *to); 234 int copyfile(const char *from, const char *to);
235 235
236 s64 perf_atoll(const char *str); 236 s64 perf_atoll(const char *str);
237 char **argv_split(const char *str, int *argcp); 237 char **argv_split(const char *str, int *argcp);
238 void argv_free(char **argv); 238 void argv_free(char **argv);
239 bool strglobmatch(const char *str, const char *pat); 239 bool strglobmatch(const char *str, const char *pat);
240 bool strlazymatch(const char *str, const char *pat); 240 bool strlazymatch(const char *str, const char *pat);
241 int strtailcmp(const char *s1, const char *s2); 241 int strtailcmp(const char *s1, const char *s2);
242 unsigned long convert_unit(unsigned long value, char *unit); 242 unsigned long convert_unit(unsigned long value, char *unit);
243 int readn(int fd, void *buf, size_t size); 243 int readn(int fd, void *buf, size_t size);
244 244
245 struct perf_event_attr;
246
247 void event_attr_init(struct perf_event_attr *attr);
248
245 #define _STR(x) #x 249 #define _STR(x) #x
246 #define STR(x) _STR(x) 250 #define STR(x) _STR(x)
247 251
248 /* 252 /*
249 * Determine whether some value is a power of two, where zero is 253 * Determine whether some value is a power of two, where zero is
250 * *not* considered a power of two. 254 * *not* considered a power of two.
251 */ 255 */
252 256
253 static inline __attribute__((const)) 257 static inline __attribute__((const))
254 bool is_power_of_2(unsigned long n) 258 bool is_power_of_2(unsigned long n)
255 { 259 {
256 return (n != 0 && ((n & (n - 1)) == 0)); 260 return (n != 0 && ((n & (n - 1)) == 0));
257 } 261 }
258 262
259 #endif 263 #endif
260 264