Commit 72f4a11d2fb16792f5e5107922652366194cfd66

Authored by Ingo Molnar

Merge tag 'perf-urgent-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/g…

…it/acme/linux into perf/urgent

Pull perf/urgent fixes from Arnaldo Carvalho de Melo:

 * Fix parsing with no sample_id_all bit set, this regression prevents perf
   from reading old perf.data files generated in systems where
   perf_event_attr.sample_id_all isn't available, from Adrian Hunter.

 * Add signal checking to the inner 'perf trace' event processing loop, allowing
   faster response to control+C.

 * Fix formatting of long symbol names removing the hardcoding of a buffer
   size used to format histogram entries, which was truncating the lines.

 * Separate progress bar update when processing events, reducing potentially big
   overhead in not needed TUI progress bar screen updates, from Jiri Olsa.

 * Fix 'perf trace' build in architectures where MAP_32BIT is not defined, from
   Kyle McMartin.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>

Showing 8 changed files Inline Diff

1 include ../scripts/Makefile.include 1 include ../scripts/Makefile.include
2 2
3 # The default target of this Makefile is... 3 # The default target of this Makefile is...
4 all: 4 all:
5 5
6 include config/utilities.mak 6 include config/utilities.mak
7 7
8 # Define V to have a more verbose compile. 8 # Define V to have a more verbose compile.
9 # 9 #
10 # Define O to save output files in a separate directory. 10 # Define O to save output files in a separate directory.
11 # 11 #
12 # Define ARCH as name of target architecture if you want cross-builds. 12 # Define ARCH as name of target architecture if you want cross-builds.
13 # 13 #
14 # Define CROSS_COMPILE as prefix name of compiler if you want cross-builds. 14 # Define CROSS_COMPILE as prefix name of compiler if you want cross-builds.
15 # 15 #
16 # Define NO_LIBPERL to disable perl script extension. 16 # Define NO_LIBPERL to disable perl script extension.
17 # 17 #
18 # Define NO_LIBPYTHON to disable python script extension. 18 # Define NO_LIBPYTHON to disable python script extension.
19 # 19 #
20 # Define PYTHON to point to the python binary if the default 20 # Define PYTHON to point to the python binary if the default
21 # `python' is not correct; for example: PYTHON=python2 21 # `python' is not correct; for example: PYTHON=python2
22 # 22 #
23 # Define PYTHON_CONFIG to point to the python-config binary if 23 # Define PYTHON_CONFIG to point to the python-config binary if
24 # the default `$(PYTHON)-config' is not correct. 24 # the default `$(PYTHON)-config' is not correct.
25 # 25 #
26 # Define ASCIIDOC8 if you want to format documentation with AsciiDoc 8 26 # Define ASCIIDOC8 if you want to format documentation with AsciiDoc 8
27 # 27 #
28 # Define DOCBOOK_XSL_172 if you want to format man pages with DocBook XSL v1.72. 28 # Define DOCBOOK_XSL_172 if you want to format man pages with DocBook XSL v1.72.
29 # 29 #
30 # Define LDFLAGS=-static to build a static binary. 30 # Define LDFLAGS=-static to build a static binary.
31 # 31 #
32 # Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds. 32 # Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds.
33 # 33 #
34 # Define NO_DWARF if you do not want debug-info analysis feature at all. 34 # Define NO_DWARF if you do not want debug-info analysis feature at all.
35 # 35 #
36 # Define WERROR=0 to disable treating any warnings as errors. 36 # Define WERROR=0 to disable treating any warnings as errors.
37 # 37 #
38 # Define NO_NEWT if you do not want TUI support. (deprecated) 38 # Define NO_NEWT if you do not want TUI support. (deprecated)
39 # 39 #
40 # Define NO_SLANG if you do not want TUI support. 40 # Define NO_SLANG if you do not want TUI support.
41 # 41 #
42 # Define NO_GTK2 if you do not want GTK+ GUI support. 42 # Define NO_GTK2 if you do not want GTK+ GUI support.
43 # 43 #
44 # Define NO_DEMANGLE if you do not want C++ symbol demangling. 44 # Define NO_DEMANGLE if you do not want C++ symbol demangling.
45 # 45 #
46 # Define NO_LIBELF if you do not want libelf dependency (e.g. cross-builds) 46 # Define NO_LIBELF if you do not want libelf dependency (e.g. cross-builds)
47 # 47 #
48 # Define NO_LIBUNWIND if you do not want libunwind dependency for dwarf 48 # Define NO_LIBUNWIND if you do not want libunwind dependency for dwarf
49 # backtrace post unwind. 49 # backtrace post unwind.
50 # 50 #
51 # Define NO_BACKTRACE if you do not want stack backtrace debug feature 51 # Define NO_BACKTRACE if you do not want stack backtrace debug feature
52 # 52 #
53 # Define NO_LIBNUMA if you do not want numa perf benchmark 53 # Define NO_LIBNUMA if you do not want numa perf benchmark
54 # 54 #
55 # Define NO_LIBAUDIT if you do not want libaudit support 55 # Define NO_LIBAUDIT if you do not want libaudit support
56 # 56 #
57 # Define NO_LIBBIONIC if you do not want bionic support 57 # Define NO_LIBBIONIC if you do not want bionic support
58 58
59 ifeq ($(srctree),) 59 ifeq ($(srctree),)
60 srctree := $(patsubst %/,%,$(dir $(shell pwd))) 60 srctree := $(patsubst %/,%,$(dir $(shell pwd)))
61 srctree := $(patsubst %/,%,$(dir $(srctree))) 61 srctree := $(patsubst %/,%,$(dir $(srctree)))
62 #$(info Determined 'srctree' to be $(srctree)) 62 #$(info Determined 'srctree' to be $(srctree))
63 endif 63 endif
64 64
65 ifneq ($(objtree),) 65 ifneq ($(objtree),)
66 #$(info Determined 'objtree' to be $(objtree)) 66 #$(info Determined 'objtree' to be $(objtree))
67 endif 67 endif
68 68
69 ifneq ($(OUTPUT),) 69 ifneq ($(OUTPUT),)
70 #$(info Determined 'OUTPUT' to be $(OUTPUT)) 70 #$(info Determined 'OUTPUT' to be $(OUTPUT))
71 endif 71 endif
72 72
73 $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE 73 $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
74 @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT) 74 @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
75 75
76 CC = $(CROSS_COMPILE)gcc 76 CC = $(CROSS_COMPILE)gcc
77 AR = $(CROSS_COMPILE)ar 77 AR = $(CROSS_COMPILE)ar
78 78
79 RM = rm -f 79 RM = rm -f
80 MKDIR = mkdir 80 MKDIR = mkdir
81 FIND = find 81 FIND = find
82 INSTALL = install 82 INSTALL = install
83 FLEX = flex 83 FLEX = flex
84 BISON = bison 84 BISON = bison
85 STRIP = strip 85 STRIP = strip
86 86
87 LK_DIR = $(srctree)/tools/lib/lk/ 87 LK_DIR = $(srctree)/tools/lib/lk/
88 TRACE_EVENT_DIR = $(srctree)/tools/lib/traceevent/ 88 TRACE_EVENT_DIR = $(srctree)/tools/lib/traceevent/
89 89
90 # include config/Makefile by default and rule out 90 # include config/Makefile by default and rule out
91 # non-config cases 91 # non-config cases
92 config := 1 92 config := 1
93 93
94 NON_CONFIG_TARGETS := clean TAGS tags cscope help 94 NON_CONFIG_TARGETS := clean TAGS tags cscope help
95 95
96 ifdef MAKECMDGOALS 96 ifdef MAKECMDGOALS
97 ifeq ($(filter-out $(NON_CONFIG_TARGETS),$(MAKECMDGOALS)),) 97 ifeq ($(filter-out $(NON_CONFIG_TARGETS),$(MAKECMDGOALS)),)
98 config := 0 98 config := 0
99 endif 99 endif
100 endif 100 endif
101 101
102 ifeq ($(config),1) 102 ifeq ($(config),1)
103 include config/Makefile 103 include config/Makefile
104 endif 104 endif
105 105
106 export prefix bindir sharedir sysconfdir 106 export prefix bindir sharedir sysconfdir
107 107
108 # sparse is architecture-neutral, which means that we need to tell it 108 # sparse is architecture-neutral, which means that we need to tell it
109 # explicitly what architecture to check for. Fix this up for yours.. 109 # explicitly what architecture to check for. Fix this up for yours..
110 SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ 110 SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__
111 111
112 # Guard against environment variables 112 # Guard against environment variables
113 BUILTIN_OBJS = 113 BUILTIN_OBJS =
114 LIB_H = 114 LIB_H =
115 LIB_OBJS = 115 LIB_OBJS =
116 PYRF_OBJS = 116 PYRF_OBJS =
117 SCRIPT_SH = 117 SCRIPT_SH =
118 118
119 SCRIPT_SH += perf-archive.sh 119 SCRIPT_SH += perf-archive.sh
120 120
121 grep-libs = $(filter -l%,$(1)) 121 grep-libs = $(filter -l%,$(1))
122 strip-libs = $(filter-out -l%,$(1)) 122 strip-libs = $(filter-out -l%,$(1))
123 123
124 ifneq ($(OUTPUT),) 124 ifneq ($(OUTPUT),)
125 TE_PATH=$(OUTPUT) 125 TE_PATH=$(OUTPUT)
126 ifneq ($(subdir),) 126 ifneq ($(subdir),)
127 LK_PATH=$(OUTPUT)/../lib/lk/ 127 LK_PATH=$(OUTPUT)/../lib/lk/
128 else 128 else
129 LK_PATH=$(OUTPUT) 129 LK_PATH=$(OUTPUT)
130 endif 130 endif
131 else 131 else
132 TE_PATH=$(TRACE_EVENT_DIR) 132 TE_PATH=$(TRACE_EVENT_DIR)
133 LK_PATH=$(LK_DIR) 133 LK_PATH=$(LK_DIR)
134 endif 134 endif
135 135
136 LIBTRACEEVENT = $(TE_PATH)libtraceevent.a 136 LIBTRACEEVENT = $(TE_PATH)libtraceevent.a
137 export LIBTRACEEVENT 137 export LIBTRACEEVENT
138 138
139 LIBLK = $(LK_PATH)liblk.a 139 LIBLK = $(LK_PATH)liblk.a
140 export LIBLK 140 export LIBLK
141 141
142 # python extension build directories 142 # python extension build directories
143 PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/ 143 PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/
144 PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/ 144 PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/
145 PYTHON_EXTBUILD_TMP := $(PYTHON_EXTBUILD)tmp/ 145 PYTHON_EXTBUILD_TMP := $(PYTHON_EXTBUILD)tmp/
146 export PYTHON_EXTBUILD_LIB PYTHON_EXTBUILD_TMP 146 export PYTHON_EXTBUILD_LIB PYTHON_EXTBUILD_TMP
147 147
148 python-clean := rm -rf $(PYTHON_EXTBUILD) $(OUTPUT)python/perf.so 148 python-clean := rm -rf $(PYTHON_EXTBUILD) $(OUTPUT)python/perf.so
149 149
150 PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources) 150 PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources)
151 PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py $(LIBTRACEEVENT) $(LIBLK) 151 PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py $(LIBTRACEEVENT) $(LIBLK)
152 152
153 $(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) 153 $(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS)
154 $(QUIET_GEN)CFLAGS='$(CFLAGS)' $(PYTHON_WORD) util/setup.py \ 154 $(QUIET_GEN)CFLAGS='$(CFLAGS)' $(PYTHON_WORD) util/setup.py \
155 --quiet build_ext; \ 155 --quiet build_ext; \
156 mkdir -p $(OUTPUT)python && \ 156 mkdir -p $(OUTPUT)python && \
157 cp $(PYTHON_EXTBUILD_LIB)perf.so $(OUTPUT)python/ 157 cp $(PYTHON_EXTBUILD_LIB)perf.so $(OUTPUT)python/
158 # 158 #
159 # No Perl scripts right now: 159 # No Perl scripts right now:
160 # 160 #
161 161
162 SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH)) 162 SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH))
163 163
164 # 164 #
165 # Single 'perf' binary right now: 165 # Single 'perf' binary right now:
166 # 166 #
167 PROGRAMS += $(OUTPUT)perf 167 PROGRAMS += $(OUTPUT)perf
168 168
169 # what 'all' will build and 'install' will install, in perfexecdir 169 # what 'all' will build and 'install' will install, in perfexecdir
170 ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS) 170 ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS)
171 171
172 # what 'all' will build but not install in perfexecdir 172 # what 'all' will build but not install in perfexecdir
173 OTHER_PROGRAMS = $(OUTPUT)perf 173 OTHER_PROGRAMS = $(OUTPUT)perf
174 174
175 # Set paths to tools early so that they can be used for version tests. 175 # Set paths to tools early so that they can be used for version tests.
176 ifndef SHELL_PATH 176 ifndef SHELL_PATH
177 SHELL_PATH = /bin/sh 177 SHELL_PATH = /bin/sh
178 endif 178 endif
179 ifndef PERL_PATH 179 ifndef PERL_PATH
180 PERL_PATH = /usr/bin/perl 180 PERL_PATH = /usr/bin/perl
181 endif 181 endif
182 182
183 export PERL_PATH 183 export PERL_PATH
184 184
185 $(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c 185 $(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c
186 $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c 186 $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c
187 187
188 $(OUTPUT)util/parse-events-bison.c: util/parse-events.y 188 $(OUTPUT)util/parse-events-bison.c: util/parse-events.y
189 $(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c -p parse_events_ 189 $(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c -p parse_events_
190 190
191 $(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c 191 $(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c
192 $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c 192 $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c
193 193
194 $(OUTPUT)util/pmu-bison.c: util/pmu.y 194 $(OUTPUT)util/pmu-bison.c: util/pmu.y
195 $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c -p perf_pmu_ 195 $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c -p perf_pmu_
196 196
197 $(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c 197 $(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c
198 $(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c 198 $(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c
199 199
200 LIB_FILE=$(OUTPUT)libperf.a 200 LIB_FILE=$(OUTPUT)libperf.a
201 201
202 LIB_H += ../../include/uapi/linux/perf_event.h 202 LIB_H += ../../include/uapi/linux/perf_event.h
203 LIB_H += ../../include/linux/rbtree.h 203 LIB_H += ../../include/linux/rbtree.h
204 LIB_H += ../../include/linux/list.h 204 LIB_H += ../../include/linux/list.h
205 LIB_H += ../../include/uapi/linux/const.h 205 LIB_H += ../../include/uapi/linux/const.h
206 LIB_H += ../../include/linux/hash.h 206 LIB_H += ../../include/linux/hash.h
207 LIB_H += ../../include/linux/stringify.h 207 LIB_H += ../../include/linux/stringify.h
208 LIB_H += util/include/linux/bitmap.h 208 LIB_H += util/include/linux/bitmap.h
209 LIB_H += util/include/linux/bitops.h 209 LIB_H += util/include/linux/bitops.h
210 LIB_H += util/include/linux/compiler.h 210 LIB_H += util/include/linux/compiler.h
211 LIB_H += util/include/linux/const.h 211 LIB_H += util/include/linux/const.h
212 LIB_H += util/include/linux/ctype.h 212 LIB_H += util/include/linux/ctype.h
213 LIB_H += util/include/linux/kernel.h 213 LIB_H += util/include/linux/kernel.h
214 LIB_H += util/include/linux/list.h 214 LIB_H += util/include/linux/list.h
215 LIB_H += util/include/linux/export.h 215 LIB_H += util/include/linux/export.h
216 LIB_H += util/include/linux/magic.h 216 LIB_H += util/include/linux/magic.h
217 LIB_H += util/include/linux/poison.h 217 LIB_H += util/include/linux/poison.h
218 LIB_H += util/include/linux/prefetch.h 218 LIB_H += util/include/linux/prefetch.h
219 LIB_H += util/include/linux/rbtree.h 219 LIB_H += util/include/linux/rbtree.h
220 LIB_H += util/include/linux/rbtree_augmented.h 220 LIB_H += util/include/linux/rbtree_augmented.h
221 LIB_H += util/include/linux/string.h 221 LIB_H += util/include/linux/string.h
222 LIB_H += util/include/linux/types.h 222 LIB_H += util/include/linux/types.h
223 LIB_H += util/include/linux/linkage.h 223 LIB_H += util/include/linux/linkage.h
224 LIB_H += util/include/asm/asm-offsets.h 224 LIB_H += util/include/asm/asm-offsets.h
225 LIB_H += util/include/asm/bug.h 225 LIB_H += util/include/asm/bug.h
226 LIB_H += util/include/asm/byteorder.h 226 LIB_H += util/include/asm/byteorder.h
227 LIB_H += util/include/asm/hweight.h 227 LIB_H += util/include/asm/hweight.h
228 LIB_H += util/include/asm/swab.h 228 LIB_H += util/include/asm/swab.h
229 LIB_H += util/include/asm/system.h 229 LIB_H += util/include/asm/system.h
230 LIB_H += util/include/asm/uaccess.h 230 LIB_H += util/include/asm/uaccess.h
231 LIB_H += util/include/dwarf-regs.h 231 LIB_H += util/include/dwarf-regs.h
232 LIB_H += util/include/asm/dwarf2.h 232 LIB_H += util/include/asm/dwarf2.h
233 LIB_H += util/include/asm/cpufeature.h 233 LIB_H += util/include/asm/cpufeature.h
234 LIB_H += util/include/asm/unistd_32.h 234 LIB_H += util/include/asm/unistd_32.h
235 LIB_H += util/include/asm/unistd_64.h 235 LIB_H += util/include/asm/unistd_64.h
236 LIB_H += perf.h 236 LIB_H += perf.h
237 LIB_H += util/annotate.h 237 LIB_H += util/annotate.h
238 LIB_H += util/cache.h 238 LIB_H += util/cache.h
239 LIB_H += util/callchain.h 239 LIB_H += util/callchain.h
240 LIB_H += util/build-id.h 240 LIB_H += util/build-id.h
241 LIB_H += util/debug.h 241 LIB_H += util/debug.h
242 LIB_H += util/sysfs.h 242 LIB_H += util/sysfs.h
243 LIB_H += util/pmu.h 243 LIB_H += util/pmu.h
244 LIB_H += util/event.h 244 LIB_H += util/event.h
245 LIB_H += util/evsel.h 245 LIB_H += util/evsel.h
246 LIB_H += util/evlist.h 246 LIB_H += util/evlist.h
247 LIB_H += util/exec_cmd.h 247 LIB_H += util/exec_cmd.h
248 LIB_H += util/types.h 248 LIB_H += util/types.h
249 LIB_H += util/levenshtein.h 249 LIB_H += util/levenshtein.h
250 LIB_H += util/machine.h 250 LIB_H += util/machine.h
251 LIB_H += util/map.h 251 LIB_H += util/map.h
252 LIB_H += util/parse-options.h 252 LIB_H += util/parse-options.h
253 LIB_H += util/parse-events.h 253 LIB_H += util/parse-events.h
254 LIB_H += util/quote.h 254 LIB_H += util/quote.h
255 LIB_H += util/util.h 255 LIB_H += util/util.h
256 LIB_H += util/xyarray.h 256 LIB_H += util/xyarray.h
257 LIB_H += util/header.h 257 LIB_H += util/header.h
258 LIB_H += util/help.h 258 LIB_H += util/help.h
259 LIB_H += util/session.h 259 LIB_H += util/session.h
260 LIB_H += util/strbuf.h 260 LIB_H += util/strbuf.h
261 LIB_H += util/strlist.h 261 LIB_H += util/strlist.h
262 LIB_H += util/strfilter.h 262 LIB_H += util/strfilter.h
263 LIB_H += util/svghelper.h 263 LIB_H += util/svghelper.h
264 LIB_H += util/tool.h 264 LIB_H += util/tool.h
265 LIB_H += util/run-command.h 265 LIB_H += util/run-command.h
266 LIB_H += util/sigchain.h 266 LIB_H += util/sigchain.h
267 LIB_H += util/dso.h 267 LIB_H += util/dso.h
268 LIB_H += util/symbol.h 268 LIB_H += util/symbol.h
269 LIB_H += util/color.h 269 LIB_H += util/color.h
270 LIB_H += util/values.h 270 LIB_H += util/values.h
271 LIB_H += util/sort.h 271 LIB_H += util/sort.h
272 LIB_H += util/hist.h 272 LIB_H += util/hist.h
273 LIB_H += util/thread.h 273 LIB_H += util/thread.h
274 LIB_H += util/thread_map.h 274 LIB_H += util/thread_map.h
275 LIB_H += util/trace-event.h 275 LIB_H += util/trace-event.h
276 LIB_H += util/probe-finder.h 276 LIB_H += util/probe-finder.h
277 LIB_H += util/dwarf-aux.h 277 LIB_H += util/dwarf-aux.h
278 LIB_H += util/probe-event.h 278 LIB_H += util/probe-event.h
279 LIB_H += util/pstack.h 279 LIB_H += util/pstack.h
280 LIB_H += util/cpumap.h 280 LIB_H += util/cpumap.h
281 LIB_H += util/top.h 281 LIB_H += util/top.h
282 LIB_H += $(ARCH_INCLUDE) 282 LIB_H += $(ARCH_INCLUDE)
283 LIB_H += util/cgroup.h 283 LIB_H += util/cgroup.h
284 LIB_H += $(LIB_INCLUDE)traceevent/event-parse.h 284 LIB_H += $(LIB_INCLUDE)traceevent/event-parse.h
285 LIB_H += util/target.h 285 LIB_H += util/target.h
286 LIB_H += util/rblist.h 286 LIB_H += util/rblist.h
287 LIB_H += util/intlist.h 287 LIB_H += util/intlist.h
288 LIB_H += util/perf_regs.h 288 LIB_H += util/perf_regs.h
289 LIB_H += util/unwind.h 289 LIB_H += util/unwind.h
290 LIB_H += util/vdso.h 290 LIB_H += util/vdso.h
291 LIB_H += ui/helpline.h 291 LIB_H += ui/helpline.h
292 LIB_H += ui/progress.h 292 LIB_H += ui/progress.h
293 LIB_H += ui/util.h 293 LIB_H += ui/util.h
294 LIB_H += ui/ui.h 294 LIB_H += ui/ui.h
295 295
296 LIB_OBJS += $(OUTPUT)util/abspath.o 296 LIB_OBJS += $(OUTPUT)util/abspath.o
297 LIB_OBJS += $(OUTPUT)util/alias.o 297 LIB_OBJS += $(OUTPUT)util/alias.o
298 LIB_OBJS += $(OUTPUT)util/annotate.o 298 LIB_OBJS += $(OUTPUT)util/annotate.o
299 LIB_OBJS += $(OUTPUT)util/build-id.o 299 LIB_OBJS += $(OUTPUT)util/build-id.o
300 LIB_OBJS += $(OUTPUT)util/config.o 300 LIB_OBJS += $(OUTPUT)util/config.o
301 LIB_OBJS += $(OUTPUT)util/ctype.o 301 LIB_OBJS += $(OUTPUT)util/ctype.o
302 LIB_OBJS += $(OUTPUT)util/sysfs.o 302 LIB_OBJS += $(OUTPUT)util/sysfs.o
303 LIB_OBJS += $(OUTPUT)util/pmu.o 303 LIB_OBJS += $(OUTPUT)util/pmu.o
304 LIB_OBJS += $(OUTPUT)util/environment.o 304 LIB_OBJS += $(OUTPUT)util/environment.o
305 LIB_OBJS += $(OUTPUT)util/event.o 305 LIB_OBJS += $(OUTPUT)util/event.o
306 LIB_OBJS += $(OUTPUT)util/evlist.o 306 LIB_OBJS += $(OUTPUT)util/evlist.o
307 LIB_OBJS += $(OUTPUT)util/evsel.o 307 LIB_OBJS += $(OUTPUT)util/evsel.o
308 LIB_OBJS += $(OUTPUT)util/exec_cmd.o 308 LIB_OBJS += $(OUTPUT)util/exec_cmd.o
309 LIB_OBJS += $(OUTPUT)util/help.o 309 LIB_OBJS += $(OUTPUT)util/help.o
310 LIB_OBJS += $(OUTPUT)util/levenshtein.o 310 LIB_OBJS += $(OUTPUT)util/levenshtein.o
311 LIB_OBJS += $(OUTPUT)util/parse-options.o 311 LIB_OBJS += $(OUTPUT)util/parse-options.o
312 LIB_OBJS += $(OUTPUT)util/parse-events.o 312 LIB_OBJS += $(OUTPUT)util/parse-events.o
313 LIB_OBJS += $(OUTPUT)util/path.o 313 LIB_OBJS += $(OUTPUT)util/path.o
314 LIB_OBJS += $(OUTPUT)util/rbtree.o 314 LIB_OBJS += $(OUTPUT)util/rbtree.o
315 LIB_OBJS += $(OUTPUT)util/bitmap.o 315 LIB_OBJS += $(OUTPUT)util/bitmap.o
316 LIB_OBJS += $(OUTPUT)util/hweight.o 316 LIB_OBJS += $(OUTPUT)util/hweight.o
317 LIB_OBJS += $(OUTPUT)util/run-command.o 317 LIB_OBJS += $(OUTPUT)util/run-command.o
318 LIB_OBJS += $(OUTPUT)util/quote.o 318 LIB_OBJS += $(OUTPUT)util/quote.o
319 LIB_OBJS += $(OUTPUT)util/strbuf.o 319 LIB_OBJS += $(OUTPUT)util/strbuf.o
320 LIB_OBJS += $(OUTPUT)util/string.o 320 LIB_OBJS += $(OUTPUT)util/string.o
321 LIB_OBJS += $(OUTPUT)util/strlist.o 321 LIB_OBJS += $(OUTPUT)util/strlist.o
322 LIB_OBJS += $(OUTPUT)util/strfilter.o 322 LIB_OBJS += $(OUTPUT)util/strfilter.o
323 LIB_OBJS += $(OUTPUT)util/top.o 323 LIB_OBJS += $(OUTPUT)util/top.o
324 LIB_OBJS += $(OUTPUT)util/usage.o 324 LIB_OBJS += $(OUTPUT)util/usage.o
325 LIB_OBJS += $(OUTPUT)util/wrapper.o 325 LIB_OBJS += $(OUTPUT)util/wrapper.o
326 LIB_OBJS += $(OUTPUT)util/sigchain.o 326 LIB_OBJS += $(OUTPUT)util/sigchain.o
327 LIB_OBJS += $(OUTPUT)util/dso.o 327 LIB_OBJS += $(OUTPUT)util/dso.o
328 LIB_OBJS += $(OUTPUT)util/symbol.o 328 LIB_OBJS += $(OUTPUT)util/symbol.o
329 LIB_OBJS += $(OUTPUT)util/symbol-elf.o 329 LIB_OBJS += $(OUTPUT)util/symbol-elf.o
330 LIB_OBJS += $(OUTPUT)util/color.o 330 LIB_OBJS += $(OUTPUT)util/color.o
331 LIB_OBJS += $(OUTPUT)util/pager.o 331 LIB_OBJS += $(OUTPUT)util/pager.o
332 LIB_OBJS += $(OUTPUT)util/header.o 332 LIB_OBJS += $(OUTPUT)util/header.o
333 LIB_OBJS += $(OUTPUT)util/callchain.o 333 LIB_OBJS += $(OUTPUT)util/callchain.o
334 LIB_OBJS += $(OUTPUT)util/values.o 334 LIB_OBJS += $(OUTPUT)util/values.o
335 LIB_OBJS += $(OUTPUT)util/debug.o 335 LIB_OBJS += $(OUTPUT)util/debug.o
336 LIB_OBJS += $(OUTPUT)util/machine.o 336 LIB_OBJS += $(OUTPUT)util/machine.o
337 LIB_OBJS += $(OUTPUT)util/map.o 337 LIB_OBJS += $(OUTPUT)util/map.o
338 LIB_OBJS += $(OUTPUT)util/pstack.o 338 LIB_OBJS += $(OUTPUT)util/pstack.o
339 LIB_OBJS += $(OUTPUT)util/session.o 339 LIB_OBJS += $(OUTPUT)util/session.o
340 LIB_OBJS += $(OUTPUT)util/thread.o 340 LIB_OBJS += $(OUTPUT)util/thread.o
341 LIB_OBJS += $(OUTPUT)util/thread_map.o 341 LIB_OBJS += $(OUTPUT)util/thread_map.o
342 LIB_OBJS += $(OUTPUT)util/trace-event-parse.o 342 LIB_OBJS += $(OUTPUT)util/trace-event-parse.o
343 LIB_OBJS += $(OUTPUT)util/parse-events-flex.o 343 LIB_OBJS += $(OUTPUT)util/parse-events-flex.o
344 LIB_OBJS += $(OUTPUT)util/parse-events-bison.o 344 LIB_OBJS += $(OUTPUT)util/parse-events-bison.o
345 LIB_OBJS += $(OUTPUT)util/pmu-flex.o 345 LIB_OBJS += $(OUTPUT)util/pmu-flex.o
346 LIB_OBJS += $(OUTPUT)util/pmu-bison.o 346 LIB_OBJS += $(OUTPUT)util/pmu-bison.o
347 LIB_OBJS += $(OUTPUT)util/trace-event-read.o 347 LIB_OBJS += $(OUTPUT)util/trace-event-read.o
348 LIB_OBJS += $(OUTPUT)util/trace-event-info.o 348 LIB_OBJS += $(OUTPUT)util/trace-event-info.o
349 LIB_OBJS += $(OUTPUT)util/trace-event-scripting.o 349 LIB_OBJS += $(OUTPUT)util/trace-event-scripting.o
350 LIB_OBJS += $(OUTPUT)util/svghelper.o 350 LIB_OBJS += $(OUTPUT)util/svghelper.o
351 LIB_OBJS += $(OUTPUT)util/sort.o 351 LIB_OBJS += $(OUTPUT)util/sort.o
352 LIB_OBJS += $(OUTPUT)util/hist.o 352 LIB_OBJS += $(OUTPUT)util/hist.o
353 LIB_OBJS += $(OUTPUT)util/probe-event.o 353 LIB_OBJS += $(OUTPUT)util/probe-event.o
354 LIB_OBJS += $(OUTPUT)util/util.o 354 LIB_OBJS += $(OUTPUT)util/util.o
355 LIB_OBJS += $(OUTPUT)util/xyarray.o 355 LIB_OBJS += $(OUTPUT)util/xyarray.o
356 LIB_OBJS += $(OUTPUT)util/cpumap.o 356 LIB_OBJS += $(OUTPUT)util/cpumap.o
357 LIB_OBJS += $(OUTPUT)util/cgroup.o 357 LIB_OBJS += $(OUTPUT)util/cgroup.o
358 LIB_OBJS += $(OUTPUT)util/target.o 358 LIB_OBJS += $(OUTPUT)util/target.o
359 LIB_OBJS += $(OUTPUT)util/rblist.o 359 LIB_OBJS += $(OUTPUT)util/rblist.o
360 LIB_OBJS += $(OUTPUT)util/intlist.o 360 LIB_OBJS += $(OUTPUT)util/intlist.o
361 LIB_OBJS += $(OUTPUT)util/vdso.o 361 LIB_OBJS += $(OUTPUT)util/vdso.o
362 LIB_OBJS += $(OUTPUT)util/stat.o 362 LIB_OBJS += $(OUTPUT)util/stat.o
363 LIB_OBJS += $(OUTPUT)util/record.o 363 LIB_OBJS += $(OUTPUT)util/record.o
364 364
365 LIB_OBJS += $(OUTPUT)ui/setup.o 365 LIB_OBJS += $(OUTPUT)ui/setup.o
366 LIB_OBJS += $(OUTPUT)ui/helpline.o 366 LIB_OBJS += $(OUTPUT)ui/helpline.o
367 LIB_OBJS += $(OUTPUT)ui/progress.o 367 LIB_OBJS += $(OUTPUT)ui/progress.o
368 LIB_OBJS += $(OUTPUT)ui/util.o 368 LIB_OBJS += $(OUTPUT)ui/util.o
369 LIB_OBJS += $(OUTPUT)ui/hist.o 369 LIB_OBJS += $(OUTPUT)ui/hist.o
370 LIB_OBJS += $(OUTPUT)ui/stdio/hist.o 370 LIB_OBJS += $(OUTPUT)ui/stdio/hist.o
371 371
372 LIB_OBJS += $(OUTPUT)arch/common.o 372 LIB_OBJS += $(OUTPUT)arch/common.o
373 373
374 LIB_OBJS += $(OUTPUT)tests/parse-events.o 374 LIB_OBJS += $(OUTPUT)tests/parse-events.o
375 LIB_OBJS += $(OUTPUT)tests/dso-data.o 375 LIB_OBJS += $(OUTPUT)tests/dso-data.o
376 LIB_OBJS += $(OUTPUT)tests/attr.o 376 LIB_OBJS += $(OUTPUT)tests/attr.o
377 LIB_OBJS += $(OUTPUT)tests/vmlinux-kallsyms.o 377 LIB_OBJS += $(OUTPUT)tests/vmlinux-kallsyms.o
378 LIB_OBJS += $(OUTPUT)tests/open-syscall.o 378 LIB_OBJS += $(OUTPUT)tests/open-syscall.o
379 LIB_OBJS += $(OUTPUT)tests/open-syscall-all-cpus.o 379 LIB_OBJS += $(OUTPUT)tests/open-syscall-all-cpus.o
380 LIB_OBJS += $(OUTPUT)tests/open-syscall-tp-fields.o 380 LIB_OBJS += $(OUTPUT)tests/open-syscall-tp-fields.o
381 LIB_OBJS += $(OUTPUT)tests/mmap-basic.o 381 LIB_OBJS += $(OUTPUT)tests/mmap-basic.o
382 LIB_OBJS += $(OUTPUT)tests/perf-record.o 382 LIB_OBJS += $(OUTPUT)tests/perf-record.o
383 LIB_OBJS += $(OUTPUT)tests/rdpmc.o 383 LIB_OBJS += $(OUTPUT)tests/rdpmc.o
384 LIB_OBJS += $(OUTPUT)tests/evsel-roundtrip-name.o 384 LIB_OBJS += $(OUTPUT)tests/evsel-roundtrip-name.o
385 LIB_OBJS += $(OUTPUT)tests/evsel-tp-sched.o 385 LIB_OBJS += $(OUTPUT)tests/evsel-tp-sched.o
386 LIB_OBJS += $(OUTPUT)tests/pmu.o 386 LIB_OBJS += $(OUTPUT)tests/pmu.o
387 LIB_OBJS += $(OUTPUT)tests/hists_link.o 387 LIB_OBJS += $(OUTPUT)tests/hists_link.o
388 LIB_OBJS += $(OUTPUT)tests/python-use.o 388 LIB_OBJS += $(OUTPUT)tests/python-use.o
389 LIB_OBJS += $(OUTPUT)tests/bp_signal.o 389 LIB_OBJS += $(OUTPUT)tests/bp_signal.o
390 LIB_OBJS += $(OUTPUT)tests/bp_signal_overflow.o 390 LIB_OBJS += $(OUTPUT)tests/bp_signal_overflow.o
391 LIB_OBJS += $(OUTPUT)tests/task-exit.o 391 LIB_OBJS += $(OUTPUT)tests/task-exit.o
392 LIB_OBJS += $(OUTPUT)tests/sw-clock.o 392 LIB_OBJS += $(OUTPUT)tests/sw-clock.o
393 ifeq ($(ARCH),x86) 393 ifeq ($(ARCH),x86)
394 LIB_OBJS += $(OUTPUT)tests/perf-time-to-tsc.o 394 LIB_OBJS += $(OUTPUT)tests/perf-time-to-tsc.o
395 endif 395 endif
396 LIB_OBJS += $(OUTPUT)tests/code-reading.o 396 LIB_OBJS += $(OUTPUT)tests/code-reading.o
397 LIB_OBJS += $(OUTPUT)tests/sample-parsing.o
398 LIB_OBJS += $(OUTPUT)tests/parse-no-sample-id-all.o
397 399
398 BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o 400 BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
399 BUILTIN_OBJS += $(OUTPUT)builtin-bench.o 401 BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
400 # Benchmark modules 402 # Benchmark modules
401 BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o 403 BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o
402 BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o 404 BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o
403 ifeq ($(RAW_ARCH),x86_64) 405 ifeq ($(RAW_ARCH),x86_64)
404 BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy-x86-64-asm.o 406 BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy-x86-64-asm.o
405 BUILTIN_OBJS += $(OUTPUT)bench/mem-memset-x86-64-asm.o 407 BUILTIN_OBJS += $(OUTPUT)bench/mem-memset-x86-64-asm.o
406 endif 408 endif
407 BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o 409 BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o
408 BUILTIN_OBJS += $(OUTPUT)bench/mem-memset.o 410 BUILTIN_OBJS += $(OUTPUT)bench/mem-memset.o
409 411
410 BUILTIN_OBJS += $(OUTPUT)builtin-diff.o 412 BUILTIN_OBJS += $(OUTPUT)builtin-diff.o
411 BUILTIN_OBJS += $(OUTPUT)builtin-evlist.o 413 BUILTIN_OBJS += $(OUTPUT)builtin-evlist.o
412 BUILTIN_OBJS += $(OUTPUT)builtin-help.o 414 BUILTIN_OBJS += $(OUTPUT)builtin-help.o
413 BUILTIN_OBJS += $(OUTPUT)builtin-sched.o 415 BUILTIN_OBJS += $(OUTPUT)builtin-sched.o
414 BUILTIN_OBJS += $(OUTPUT)builtin-buildid-list.o 416 BUILTIN_OBJS += $(OUTPUT)builtin-buildid-list.o
415 BUILTIN_OBJS += $(OUTPUT)builtin-buildid-cache.o 417 BUILTIN_OBJS += $(OUTPUT)builtin-buildid-cache.o
416 BUILTIN_OBJS += $(OUTPUT)builtin-list.o 418 BUILTIN_OBJS += $(OUTPUT)builtin-list.o
417 BUILTIN_OBJS += $(OUTPUT)builtin-record.o 419 BUILTIN_OBJS += $(OUTPUT)builtin-record.o
418 BUILTIN_OBJS += $(OUTPUT)builtin-report.o 420 BUILTIN_OBJS += $(OUTPUT)builtin-report.o
419 BUILTIN_OBJS += $(OUTPUT)builtin-stat.o 421 BUILTIN_OBJS += $(OUTPUT)builtin-stat.o
420 BUILTIN_OBJS += $(OUTPUT)builtin-timechart.o 422 BUILTIN_OBJS += $(OUTPUT)builtin-timechart.o
421 BUILTIN_OBJS += $(OUTPUT)builtin-top.o 423 BUILTIN_OBJS += $(OUTPUT)builtin-top.o
422 BUILTIN_OBJS += $(OUTPUT)builtin-script.o 424 BUILTIN_OBJS += $(OUTPUT)builtin-script.o
423 BUILTIN_OBJS += $(OUTPUT)builtin-probe.o 425 BUILTIN_OBJS += $(OUTPUT)builtin-probe.o
424 BUILTIN_OBJS += $(OUTPUT)builtin-kmem.o 426 BUILTIN_OBJS += $(OUTPUT)builtin-kmem.o
425 BUILTIN_OBJS += $(OUTPUT)builtin-lock.o 427 BUILTIN_OBJS += $(OUTPUT)builtin-lock.o
426 BUILTIN_OBJS += $(OUTPUT)builtin-kvm.o 428 BUILTIN_OBJS += $(OUTPUT)builtin-kvm.o
427 BUILTIN_OBJS += $(OUTPUT)builtin-inject.o 429 BUILTIN_OBJS += $(OUTPUT)builtin-inject.o
428 BUILTIN_OBJS += $(OUTPUT)tests/builtin-test.o 430 BUILTIN_OBJS += $(OUTPUT)tests/builtin-test.o
429 BUILTIN_OBJS += $(OUTPUT)builtin-mem.o 431 BUILTIN_OBJS += $(OUTPUT)builtin-mem.o
430 432
431 PERFLIBS = $(LIB_FILE) $(LIBLK) $(LIBTRACEEVENT) 433 PERFLIBS = $(LIB_FILE) $(LIBLK) $(LIBTRACEEVENT)
432 434
433 # We choose to avoid "if .. else if .. else .. endif endif" 435 # We choose to avoid "if .. else if .. else .. endif endif"
434 # because maintaining the nesting to match is a pain. If 436 # because maintaining the nesting to match is a pain. If
435 # we had "elif" things would have been much nicer... 437 # we had "elif" things would have been much nicer...
436 438
437 -include arch/$(ARCH)/Makefile 439 -include arch/$(ARCH)/Makefile
438 440
439 ifneq ($(OUTPUT),) 441 ifneq ($(OUTPUT),)
440 CFLAGS += -I$(OUTPUT) 442 CFLAGS += -I$(OUTPUT)
441 endif 443 endif
442 LIB_OBJS += $(OUTPUT)tests/sample-parsing.o
443 444
444 ifdef NO_LIBELF 445 ifdef NO_LIBELF
445 EXTLIBS := $(filter-out -lelf,$(EXTLIBS)) 446 EXTLIBS := $(filter-out -lelf,$(EXTLIBS))
446 447
447 # Remove ELF/DWARF dependent codes 448 # Remove ELF/DWARF dependent codes
448 LIB_OBJS := $(filter-out $(OUTPUT)util/symbol-elf.o,$(LIB_OBJS)) 449 LIB_OBJS := $(filter-out $(OUTPUT)util/symbol-elf.o,$(LIB_OBJS))
449 LIB_OBJS := $(filter-out $(OUTPUT)util/dwarf-aux.o,$(LIB_OBJS)) 450 LIB_OBJS := $(filter-out $(OUTPUT)util/dwarf-aux.o,$(LIB_OBJS))
450 LIB_OBJS := $(filter-out $(OUTPUT)util/probe-event.o,$(LIB_OBJS)) 451 LIB_OBJS := $(filter-out $(OUTPUT)util/probe-event.o,$(LIB_OBJS))
451 LIB_OBJS := $(filter-out $(OUTPUT)util/probe-finder.o,$(LIB_OBJS)) 452 LIB_OBJS := $(filter-out $(OUTPUT)util/probe-finder.o,$(LIB_OBJS))
452 453
453 BUILTIN_OBJS := $(filter-out $(OUTPUT)builtin-probe.o,$(BUILTIN_OBJS)) 454 BUILTIN_OBJS := $(filter-out $(OUTPUT)builtin-probe.o,$(BUILTIN_OBJS))
454 455
455 # Use minimal symbol handling 456 # Use minimal symbol handling
456 LIB_OBJS += $(OUTPUT)util/symbol-minimal.o 457 LIB_OBJS += $(OUTPUT)util/symbol-minimal.o
457 458
458 else # NO_LIBELF 459 else # NO_LIBELF
459 ifndef NO_DWARF 460 ifndef NO_DWARF
460 LIB_OBJS += $(OUTPUT)util/probe-finder.o 461 LIB_OBJS += $(OUTPUT)util/probe-finder.o
461 LIB_OBJS += $(OUTPUT)util/dwarf-aux.o 462 LIB_OBJS += $(OUTPUT)util/dwarf-aux.o
462 endif # NO_DWARF 463 endif # NO_DWARF
463 endif # NO_LIBELF 464 endif # NO_LIBELF
464 465
465 ifndef NO_LIBUNWIND 466 ifndef NO_LIBUNWIND
466 LIB_OBJS += $(OUTPUT)util/unwind.o 467 LIB_OBJS += $(OUTPUT)util/unwind.o
467 endif 468 endif
468 LIB_OBJS += $(OUTPUT)tests/keep-tracking.o 469 LIB_OBJS += $(OUTPUT)tests/keep-tracking.o
469 470
470 ifndef NO_LIBAUDIT 471 ifndef NO_LIBAUDIT
471 BUILTIN_OBJS += $(OUTPUT)builtin-trace.o 472 BUILTIN_OBJS += $(OUTPUT)builtin-trace.o
472 endif 473 endif
473 474
474 ifndef NO_SLANG 475 ifndef NO_SLANG
475 LIB_OBJS += $(OUTPUT)ui/browser.o 476 LIB_OBJS += $(OUTPUT)ui/browser.o
476 LIB_OBJS += $(OUTPUT)ui/browsers/annotate.o 477 LIB_OBJS += $(OUTPUT)ui/browsers/annotate.o
477 LIB_OBJS += $(OUTPUT)ui/browsers/hists.o 478 LIB_OBJS += $(OUTPUT)ui/browsers/hists.o
478 LIB_OBJS += $(OUTPUT)ui/browsers/map.o 479 LIB_OBJS += $(OUTPUT)ui/browsers/map.o
479 LIB_OBJS += $(OUTPUT)ui/browsers/scripts.o 480 LIB_OBJS += $(OUTPUT)ui/browsers/scripts.o
480 LIB_OBJS += $(OUTPUT)ui/tui/setup.o 481 LIB_OBJS += $(OUTPUT)ui/tui/setup.o
481 LIB_OBJS += $(OUTPUT)ui/tui/util.o 482 LIB_OBJS += $(OUTPUT)ui/tui/util.o
482 LIB_OBJS += $(OUTPUT)ui/tui/helpline.o 483 LIB_OBJS += $(OUTPUT)ui/tui/helpline.o
483 LIB_OBJS += $(OUTPUT)ui/tui/progress.o 484 LIB_OBJS += $(OUTPUT)ui/tui/progress.o
484 LIB_H += ui/browser.h 485 LIB_H += ui/browser.h
485 LIB_H += ui/browsers/map.h 486 LIB_H += ui/browsers/map.h
486 LIB_H += ui/keysyms.h 487 LIB_H += ui/keysyms.h
487 LIB_H += ui/libslang.h 488 LIB_H += ui/libslang.h
488 endif 489 endif
489 490
490 ifndef NO_GTK2 491 ifndef NO_GTK2
491 LIB_OBJS += $(OUTPUT)ui/gtk/browser.o 492 LIB_OBJS += $(OUTPUT)ui/gtk/browser.o
492 LIB_OBJS += $(OUTPUT)ui/gtk/hists.o 493 LIB_OBJS += $(OUTPUT)ui/gtk/hists.o
493 LIB_OBJS += $(OUTPUT)ui/gtk/setup.o 494 LIB_OBJS += $(OUTPUT)ui/gtk/setup.o
494 LIB_OBJS += $(OUTPUT)ui/gtk/util.o 495 LIB_OBJS += $(OUTPUT)ui/gtk/util.o
495 LIB_OBJS += $(OUTPUT)ui/gtk/helpline.o 496 LIB_OBJS += $(OUTPUT)ui/gtk/helpline.o
496 LIB_OBJS += $(OUTPUT)ui/gtk/progress.o 497 LIB_OBJS += $(OUTPUT)ui/gtk/progress.o
497 LIB_OBJS += $(OUTPUT)ui/gtk/annotate.o 498 LIB_OBJS += $(OUTPUT)ui/gtk/annotate.o
498 endif 499 endif
499 500
500 ifndef NO_LIBPERL 501 ifndef NO_LIBPERL
501 LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-perl.o 502 LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-perl.o
502 LIB_OBJS += $(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o 503 LIB_OBJS += $(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o
503 endif 504 endif
504 505
505 ifndef NO_LIBPYTHON 506 ifndef NO_LIBPYTHON
506 LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o 507 LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o
507 LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o 508 LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o
508 endif 509 endif
509 510
510 ifeq ($(NO_PERF_REGS),0) 511 ifeq ($(NO_PERF_REGS),0)
511 ifeq ($(ARCH),x86) 512 ifeq ($(ARCH),x86)
512 LIB_H += arch/x86/include/perf_regs.h 513 LIB_H += arch/x86/include/perf_regs.h
513 endif 514 endif
514 endif 515 endif
515 516
516 ifndef NO_LIBNUMA 517 ifndef NO_LIBNUMA
517 BUILTIN_OBJS += $(OUTPUT)bench/numa.o 518 BUILTIN_OBJS += $(OUTPUT)bench/numa.o
518 endif 519 endif
519 520
520 ifdef ASCIIDOC8 521 ifdef ASCIIDOC8
521 export ASCIIDOC8 522 export ASCIIDOC8
522 endif 523 endif
523 524
524 LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive -Wl,--start-group $(EXTLIBS) -Wl,--end-group 525 LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive -Wl,--start-group $(EXTLIBS) -Wl,--end-group
525 526
526 export INSTALL SHELL_PATH 527 export INSTALL SHELL_PATH
527 528
528 ### Build rules 529 ### Build rules
529 530
530 SHELL = $(SHELL_PATH) 531 SHELL = $(SHELL_PATH)
531 532
532 all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS) 533 all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS)
533 534
534 please_set_SHELL_PATH_to_a_more_modern_shell: 535 please_set_SHELL_PATH_to_a_more_modern_shell:
535 @$$(:) 536 @$$(:)
536 537
537 shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell 538 shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell
538 539
539 strip: $(PROGRAMS) $(OUTPUT)perf 540 strip: $(PROGRAMS) $(OUTPUT)perf
540 $(STRIP) $(STRIP_OPTS) $(PROGRAMS) $(OUTPUT)perf 541 $(STRIP) $(STRIP_OPTS) $(PROGRAMS) $(OUTPUT)perf
541 542
542 $(OUTPUT)perf.o: perf.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS 543 $(OUTPUT)perf.o: perf.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
543 $(QUIET_CC)$(CC) -include $(OUTPUT)PERF-VERSION-FILE \ 544 $(QUIET_CC)$(CC) -include $(OUTPUT)PERF-VERSION-FILE \
544 '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ 545 '-DPERF_HTML_PATH="$(htmldir_SQ)"' \
545 $(CFLAGS) -c $(filter %.c,$^) -o $@ 546 $(CFLAGS) -c $(filter %.c,$^) -o $@
546 547
547 $(OUTPUT)perf: $(OUTPUT)perf.o $(BUILTIN_OBJS) $(PERFLIBS) 548 $(OUTPUT)perf: $(OUTPUT)perf.o $(BUILTIN_OBJS) $(PERFLIBS)
548 $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(OUTPUT)perf.o \ 549 $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(OUTPUT)perf.o \
549 $(BUILTIN_OBJS) $(LIBS) -o $@ 550 $(BUILTIN_OBJS) $(LIBS) -o $@
550 551
551 $(OUTPUT)builtin-help.o: builtin-help.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS 552 $(OUTPUT)builtin-help.o: builtin-help.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
552 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \ 553 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
553 '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ 554 '-DPERF_HTML_PATH="$(htmldir_SQ)"' \
554 '-DPERF_MAN_PATH="$(mandir_SQ)"' \ 555 '-DPERF_MAN_PATH="$(mandir_SQ)"' \
555 '-DPERF_INFO_PATH="$(infodir_SQ)"' $< 556 '-DPERF_INFO_PATH="$(infodir_SQ)"' $<
556 557
557 $(OUTPUT)builtin-timechart.o: builtin-timechart.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS 558 $(OUTPUT)builtin-timechart.o: builtin-timechart.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
558 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \ 559 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
559 '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ 560 '-DPERF_HTML_PATH="$(htmldir_SQ)"' \
560 '-DPERF_MAN_PATH="$(mandir_SQ)"' \ 561 '-DPERF_MAN_PATH="$(mandir_SQ)"' \
561 '-DPERF_INFO_PATH="$(infodir_SQ)"' $< 562 '-DPERF_INFO_PATH="$(infodir_SQ)"' $<
562 563
563 $(OUTPUT)common-cmds.h: util/generate-cmdlist.sh command-list.txt 564 $(OUTPUT)common-cmds.h: util/generate-cmdlist.sh command-list.txt
564 565
565 $(OUTPUT)common-cmds.h: $(wildcard Documentation/perf-*.txt) 566 $(OUTPUT)common-cmds.h: $(wildcard Documentation/perf-*.txt)
566 $(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@ 567 $(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@
567 568
568 $(SCRIPTS) : % : %.sh 569 $(SCRIPTS) : % : %.sh
569 $(QUIET_GEN)$(INSTALL) '$@.sh' '$(OUTPUT)$@' 570 $(QUIET_GEN)$(INSTALL) '$@.sh' '$(OUTPUT)$@'
570 571
571 # These can record PERF_VERSION 572 # These can record PERF_VERSION
572 $(OUTPUT)perf.o perf.spec \ 573 $(OUTPUT)perf.o perf.spec \
573 $(SCRIPTS) \ 574 $(SCRIPTS) \
574 : $(OUTPUT)PERF-VERSION-FILE 575 : $(OUTPUT)PERF-VERSION-FILE
575 576
576 .SUFFIXES: 577 .SUFFIXES:
577 .SUFFIXES: .o .c .S .s 578 .SUFFIXES: .o .c .S .s
578 579
579 # These two need to be here so that when O= is not used they take precedence 580 # These two need to be here so that when O= is not used they take precedence
580 # over the general rule for .o 581 # over the general rule for .o
581 582
582 $(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS 583 $(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS
583 $(QUIET_CC)$(CC) -o $@ -c -Iutil/ $(CFLAGS) -w $< 584 $(QUIET_CC)$(CC) -o $@ -c -Iutil/ $(CFLAGS) -w $<
584 585
585 $(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS 586 $(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS
586 $(QUIET_CC)$(CC) -o $@ -c -Iutil/ $(CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w $< 587 $(QUIET_CC)$(CC) -o $@ -c -Iutil/ $(CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w $<
587 588
588 $(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS 589 $(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS
589 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $< 590 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $<
590 $(OUTPUT)%.i: %.c $(OUTPUT)PERF-CFLAGS 591 $(OUTPUT)%.i: %.c $(OUTPUT)PERF-CFLAGS
591 $(QUIET_CC)$(CC) -o $@ -E $(CFLAGS) $< 592 $(QUIET_CC)$(CC) -o $@ -E $(CFLAGS) $<
592 $(OUTPUT)%.s: %.c $(OUTPUT)PERF-CFLAGS 593 $(OUTPUT)%.s: %.c $(OUTPUT)PERF-CFLAGS
593 $(QUIET_CC)$(CC) -o $@ -S $(CFLAGS) $< 594 $(QUIET_CC)$(CC) -o $@ -S $(CFLAGS) $<
594 $(OUTPUT)%.o: %.S 595 $(OUTPUT)%.o: %.S
595 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $< 596 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $<
596 $(OUTPUT)%.s: %.S 597 $(OUTPUT)%.s: %.S
597 $(QUIET_CC)$(CC) -o $@ -E $(CFLAGS) $< 598 $(QUIET_CC)$(CC) -o $@ -E $(CFLAGS) $<
598 599
599 $(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS 600 $(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS
600 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \ 601 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
601 '-DPERF_EXEC_PATH="$(perfexecdir_SQ)"' \ 602 '-DPERF_EXEC_PATH="$(perfexecdir_SQ)"' \
602 '-DPREFIX="$(prefix_SQ)"' \ 603 '-DPREFIX="$(prefix_SQ)"' \
603 $< 604 $<
604 605
605 $(OUTPUT)tests/attr.o: tests/attr.c $(OUTPUT)PERF-CFLAGS 606 $(OUTPUT)tests/attr.o: tests/attr.c $(OUTPUT)PERF-CFLAGS
606 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \ 607 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
607 '-DBINDIR="$(bindir_SQ)"' -DPYTHON='"$(PYTHON_WORD)"' \ 608 '-DBINDIR="$(bindir_SQ)"' -DPYTHON='"$(PYTHON_WORD)"' \
608 $< 609 $<
609 610
610 $(OUTPUT)tests/python-use.o: tests/python-use.c $(OUTPUT)PERF-CFLAGS 611 $(OUTPUT)tests/python-use.o: tests/python-use.c $(OUTPUT)PERF-CFLAGS
611 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \ 612 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
612 -DPYTHONPATH='"$(OUTPUT)python"' \ 613 -DPYTHONPATH='"$(OUTPUT)python"' \
613 -DPYTHON='"$(PYTHON_WORD)"' \ 614 -DPYTHON='"$(PYTHON_WORD)"' \
614 $< 615 $<
615 616
616 $(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS 617 $(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS
617 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< 618 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
618 619
619 $(OUTPUT)ui/browser.o: ui/browser.c $(OUTPUT)PERF-CFLAGS 620 $(OUTPUT)ui/browser.o: ui/browser.c $(OUTPUT)PERF-CFLAGS
620 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $< 621 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $<
621 622
622 $(OUTPUT)ui/browsers/annotate.o: ui/browsers/annotate.c $(OUTPUT)PERF-CFLAGS 623 $(OUTPUT)ui/browsers/annotate.o: ui/browsers/annotate.c $(OUTPUT)PERF-CFLAGS
623 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $< 624 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $<
624 625
625 $(OUTPUT)ui/browsers/hists.o: ui/browsers/hists.c $(OUTPUT)PERF-CFLAGS 626 $(OUTPUT)ui/browsers/hists.o: ui/browsers/hists.c $(OUTPUT)PERF-CFLAGS
626 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $< 627 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $<
627 628
628 $(OUTPUT)ui/browsers/map.o: ui/browsers/map.c $(OUTPUT)PERF-CFLAGS 629 $(OUTPUT)ui/browsers/map.o: ui/browsers/map.c $(OUTPUT)PERF-CFLAGS
629 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $< 630 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $<
630 631
631 $(OUTPUT)ui/browsers/scripts.o: ui/browsers/scripts.c $(OUTPUT)PERF-CFLAGS 632 $(OUTPUT)ui/browsers/scripts.o: ui/browsers/scripts.c $(OUTPUT)PERF-CFLAGS
632 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $< 633 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DENABLE_SLFUTURE_CONST $<
633 634
634 $(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS 635 $(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS
635 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< 636 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
636 637
637 $(OUTPUT)util/parse-events.o: util/parse-events.c $(OUTPUT)PERF-CFLAGS 638 $(OUTPUT)util/parse-events.o: util/parse-events.c $(OUTPUT)PERF-CFLAGS
638 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-redundant-decls $< 639 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-redundant-decls $<
639 640
640 $(OUTPUT)util/scripting-engines/trace-event-perl.o: util/scripting-engines/trace-event-perl.c $(OUTPUT)PERF-CFLAGS 641 $(OUTPUT)util/scripting-engines/trace-event-perl.o: util/scripting-engines/trace-event-perl.c $(OUTPUT)PERF-CFLAGS
641 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-undef -Wno-switch-default $< 642 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-undef -Wno-switch-default $<
642 643
643 $(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS 644 $(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS
644 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs -Wno-undef -Wno-switch-default $< 645 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs -Wno-undef -Wno-switch-default $<
645 646
646 $(OUTPUT)util/scripting-engines/trace-event-python.o: util/scripting-engines/trace-event-python.c $(OUTPUT)PERF-CFLAGS 647 $(OUTPUT)util/scripting-engines/trace-event-python.o: util/scripting-engines/trace-event-python.c $(OUTPUT)PERF-CFLAGS
647 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $< 648 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
648 649
649 $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o: scripts/python/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS 650 $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o: scripts/python/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS
650 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $< 651 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
651 652
652 $(OUTPUT)perf-%: %.o $(PERFLIBS) 653 $(OUTPUT)perf-%: %.o $(PERFLIBS)
653 $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $(LDFLAGS) $(filter %.o,$^) $(LIBS) 654 $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $(LDFLAGS) $(filter %.o,$^) $(LIBS)
654 655
655 $(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H) 656 $(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H)
656 $(patsubst perf-%,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h) 657 $(patsubst perf-%,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h)
657 658
658 # we compile into subdirectories. if the target directory is not the source directory, they might not exists. So 659 # we compile into subdirectories. if the target directory is not the source directory, they might not exists. So
659 # we depend the various files onto their directories. 660 # we depend the various files onto their directories.
660 DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h 661 DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h
661 $(DIRECTORY_DEPS): | $(sort $(dir $(DIRECTORY_DEPS))) 662 $(DIRECTORY_DEPS): | $(sort $(dir $(DIRECTORY_DEPS)))
662 # In the second step, we make a rule to actually create these directories 663 # In the second step, we make a rule to actually create these directories
663 $(sort $(dir $(DIRECTORY_DEPS))): 664 $(sort $(dir $(DIRECTORY_DEPS))):
664 $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null 665 $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null
665 666
666 $(LIB_FILE): $(LIB_OBJS) 667 $(LIB_FILE): $(LIB_OBJS)
667 $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS) 668 $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS)
668 669
669 # libtraceevent.a 670 # libtraceevent.a
670 $(LIBTRACEEVENT): 671 $(LIBTRACEEVENT):
671 $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) libtraceevent.a 672 $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) libtraceevent.a
672 673
673 $(LIBTRACEEVENT)-clean: 674 $(LIBTRACEEVENT)-clean:
674 $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) clean 675 $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) clean
675 676
676 # if subdir is set, we've been called from above so target has been built 677 # if subdir is set, we've been called from above so target has been built
677 # already 678 # already
678 $(LIBLK): 679 $(LIBLK):
679 ifeq ($(subdir),) 680 ifeq ($(subdir),)
680 $(QUIET_SUBDIR0)$(LK_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) liblk.a 681 $(QUIET_SUBDIR0)$(LK_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) liblk.a
681 endif 682 endif
682 683
683 $(LIBLK)-clean: 684 $(LIBLK)-clean:
684 ifeq ($(subdir),) 685 ifeq ($(subdir),)
685 $(QUIET_SUBDIR0)$(LK_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) clean 686 $(QUIET_SUBDIR0)$(LK_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) clean
686 endif 687 endif
687 688
688 help: 689 help:
689 @echo 'Perf make targets:' 690 @echo 'Perf make targets:'
690 @echo ' doc - make *all* documentation (see below)' 691 @echo ' doc - make *all* documentation (see below)'
691 @echo ' man - make manpage documentation (access with man <foo>)' 692 @echo ' man - make manpage documentation (access with man <foo>)'
692 @echo ' html - make html documentation' 693 @echo ' html - make html documentation'
693 @echo ' info - make GNU info documentation (access with info <foo>)' 694 @echo ' info - make GNU info documentation (access with info <foo>)'
694 @echo ' pdf - make pdf documentation' 695 @echo ' pdf - make pdf documentation'
695 @echo ' TAGS - use etags to make tag information for source browsing' 696 @echo ' TAGS - use etags to make tag information for source browsing'
696 @echo ' tags - use ctags to make tag information for source browsing' 697 @echo ' tags - use ctags to make tag information for source browsing'
697 @echo ' cscope - use cscope to make interactive browsing database' 698 @echo ' cscope - use cscope to make interactive browsing database'
698 @echo '' 699 @echo ''
699 @echo 'Perf install targets:' 700 @echo 'Perf install targets:'
700 @echo ' NOTE: documentation build requires asciidoc, xmlto packages to be installed' 701 @echo ' NOTE: documentation build requires asciidoc, xmlto packages to be installed'
701 @echo ' HINT: use "make prefix=<path> <install target>" to install to a particular' 702 @echo ' HINT: use "make prefix=<path> <install target>" to install to a particular'
702 @echo ' path like make prefix=/usr/local install install-doc' 703 @echo ' path like make prefix=/usr/local install install-doc'
703 @echo ' install - install compiled binaries' 704 @echo ' install - install compiled binaries'
704 @echo ' install-doc - install *all* documentation' 705 @echo ' install-doc - install *all* documentation'
705 @echo ' install-man - install manpage documentation' 706 @echo ' install-man - install manpage documentation'
706 @echo ' install-html - install html documentation' 707 @echo ' install-html - install html documentation'
707 @echo ' install-info - install GNU info documentation' 708 @echo ' install-info - install GNU info documentation'
708 @echo ' install-pdf - install pdf documentation' 709 @echo ' install-pdf - install pdf documentation'
709 @echo '' 710 @echo ''
710 @echo ' quick-install-doc - alias for quick-install-man' 711 @echo ' quick-install-doc - alias for quick-install-man'
711 @echo ' quick-install-man - install the documentation quickly' 712 @echo ' quick-install-man - install the documentation quickly'
712 @echo ' quick-install-html - install the html documentation quickly' 713 @echo ' quick-install-html - install the html documentation quickly'
713 @echo '' 714 @echo ''
714 @echo 'Perf maintainer targets:' 715 @echo 'Perf maintainer targets:'
715 @echo ' clean - clean all binary objects and build output' 716 @echo ' clean - clean all binary objects and build output'
716 717
717 718
718 DOC_TARGETS := doc man html info pdf 719 DOC_TARGETS := doc man html info pdf
719 720
720 INSTALL_DOC_TARGETS := $(patsubst %,install-%,$(DOC_TARGETS)) try-install-man 721 INSTALL_DOC_TARGETS := $(patsubst %,install-%,$(DOC_TARGETS)) try-install-man
721 INSTALL_DOC_TARGETS += quick-install-doc quick-install-man quick-install-html 722 INSTALL_DOC_TARGETS += quick-install-doc quick-install-man quick-install-html
722 723
723 # 'make doc' should call 'make -C Documentation all' 724 # 'make doc' should call 'make -C Documentation all'
724 $(DOC_TARGETS): 725 $(DOC_TARGETS):
725 $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) $(@:doc=all) 726 $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) $(@:doc=all)
726 727
727 TAGS: 728 TAGS:
728 $(RM) TAGS 729 $(RM) TAGS
729 $(FIND) . -name '*.[hcS]' -print | xargs etags -a 730 $(FIND) . -name '*.[hcS]' -print | xargs etags -a
730 731
731 tags: 732 tags:
732 $(RM) tags 733 $(RM) tags
733 $(FIND) . -name '*.[hcS]' -print | xargs ctags -a 734 $(FIND) . -name '*.[hcS]' -print | xargs ctags -a
734 735
735 cscope: 736 cscope:
736 $(RM) cscope* 737 $(RM) cscope*
737 $(FIND) . -name '*.[hcS]' -print | xargs cscope -b 738 $(FIND) . -name '*.[hcS]' -print | xargs cscope -b
738 739
739 ### Detect prefix changes 740 ### Detect prefix changes
740 TRACK_CFLAGS = $(subst ','\'',$(CFLAGS)):\ 741 TRACK_CFLAGS = $(subst ','\'',$(CFLAGS)):\
741 $(bindir_SQ):$(perfexecdir_SQ):$(template_dir_SQ):$(prefix_SQ) 742 $(bindir_SQ):$(perfexecdir_SQ):$(template_dir_SQ):$(prefix_SQ)
742 743
743 $(OUTPUT)PERF-CFLAGS: .FORCE-PERF-CFLAGS 744 $(OUTPUT)PERF-CFLAGS: .FORCE-PERF-CFLAGS
744 @FLAGS='$(TRACK_CFLAGS)'; \ 745 @FLAGS='$(TRACK_CFLAGS)'; \
745 if test x"$$FLAGS" != x"`cat $(OUTPUT)PERF-CFLAGS 2>/dev/null`" ; then \ 746 if test x"$$FLAGS" != x"`cat $(OUTPUT)PERF-CFLAGS 2>/dev/null`" ; then \
746 echo 1>&2 " * new build flags or prefix"; \ 747 echo 1>&2 " * new build flags or prefix"; \
747 echo "$$FLAGS" >$(OUTPUT)PERF-CFLAGS; \ 748 echo "$$FLAGS" >$(OUTPUT)PERF-CFLAGS; \
748 fi 749 fi
749 750
750 ### Testing rules 751 ### Testing rules
751 752
752 # GNU make supports exporting all variables by "export" without parameters. 753 # GNU make supports exporting all variables by "export" without parameters.
753 # However, the environment gets quite big, and some programs have problems 754 # However, the environment gets quite big, and some programs have problems
754 # with that. 755 # with that.
755 756
756 check: $(OUTPUT)common-cmds.h 757 check: $(OUTPUT)common-cmds.h
757 if sparse; \ 758 if sparse; \
758 then \ 759 then \
759 for i in *.c */*.c; \ 760 for i in *.c */*.c; \
760 do \ 761 do \
761 sparse $(CFLAGS) $(SPARSE_FLAGS) $$i || exit; \ 762 sparse $(CFLAGS) $(SPARSE_FLAGS) $$i || exit; \
762 done; \ 763 done; \
763 else \ 764 else \
764 exit 1; \ 765 exit 1; \
765 fi 766 fi
766 767
767 ### Installation rules 768 ### Installation rules
768 769
769 install-bin: all 770 install-bin: all
770 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' 771 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'
771 $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)' 772 $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)'
772 $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' 773 $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
773 ifndef NO_LIBPERL 774 ifndef NO_LIBPERL
774 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' 775 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
775 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin' 776 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
776 $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' 777 $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
777 $(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl' 778 $(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl'
778 $(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin' 779 $(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
779 endif 780 endif
780 ifndef NO_LIBPYTHON 781 ifndef NO_LIBPYTHON
781 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace' 782 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'
782 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin' 783 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
783 $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace' 784 $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'
784 $(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python' 785 $(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'
785 $(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin' 786 $(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
786 endif 787 endif
787 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d' 788 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d'
788 $(INSTALL) bash_completion '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf' 789 $(INSTALL) bash_completion '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf'
789 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests' 790 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'
790 $(INSTALL) tests/attr.py '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests' 791 $(INSTALL) tests/attr.py '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'
791 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr' 792 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'
792 $(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr' 793 $(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'
793 794
794 install: install-bin try-install-man 795 install: install-bin try-install-man
795 796
796 install-python_ext: 797 install-python_ext:
797 $(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)' 798 $(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)'
798 799
799 # 'make install-doc' should call 'make -C Documentation install' 800 # 'make install-doc' should call 'make -C Documentation install'
800 $(INSTALL_DOC_TARGETS): 801 $(INSTALL_DOC_TARGETS):
801 $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) $(@:-doc=) 802 $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) $(@:-doc=)
802 803
803 ### Cleaning rules 804 ### Cleaning rules
804 805
805 clean: $(LIBTRACEEVENT)-clean $(LIBLK)-clean 806 clean: $(LIBTRACEEVENT)-clean $(LIBLK)-clean
806 $(RM) $(LIB_OBJS) $(BUILTIN_OBJS) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf.o $(LANG_BINDINGS) 807 $(RM) $(LIB_OBJS) $(BUILTIN_OBJS) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf.o $(LANG_BINDINGS)
807 $(RM) $(ALL_PROGRAMS) perf 808 $(RM) $(ALL_PROGRAMS) perf
808 $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* 809 $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope*
809 $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean 810 $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
810 $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS 811 $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS
811 $(RM) $(OUTPUT)util/*-bison* 812 $(RM) $(OUTPUT)util/*-bison*
812 $(RM) $(OUTPUT)util/*-flex* 813 $(RM) $(OUTPUT)util/*-flex*
813 $(python-clean) 814 $(python-clean)
814 815
815 .PHONY: all install clean strip $(LIBTRACEEVENT) $(LIBLK) 816 .PHONY: all install clean strip $(LIBTRACEEVENT) $(LIBLK)
816 .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell 817 .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
817 .PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope .FORCE-PERF-CFLAGS 818 .PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope .FORCE-PERF-CFLAGS
tools/perf/builtin-trace.c
1 #include <traceevent/event-parse.h> 1 #include <traceevent/event-parse.h>
2 #include "builtin.h" 2 #include "builtin.h"
3 #include "util/color.h" 3 #include "util/color.h"
4 #include "util/debug.h" 4 #include "util/debug.h"
5 #include "util/evlist.h" 5 #include "util/evlist.h"
6 #include "util/machine.h" 6 #include "util/machine.h"
7 #include "util/session.h" 7 #include "util/session.h"
8 #include "util/thread.h" 8 #include "util/thread.h"
9 #include "util/parse-options.h" 9 #include "util/parse-options.h"
10 #include "util/strlist.h" 10 #include "util/strlist.h"
11 #include "util/intlist.h" 11 #include "util/intlist.h"
12 #include "util/thread_map.h" 12 #include "util/thread_map.h"
13 13
14 #include <libaudit.h> 14 #include <libaudit.h>
15 #include <stdlib.h> 15 #include <stdlib.h>
16 #include <sys/mman.h> 16 #include <sys/mman.h>
17 #include <linux/futex.h> 17 #include <linux/futex.h>
18 18
19 static size_t syscall_arg__scnprintf_hex(char *bf, size_t size, 19 static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
20 unsigned long arg, 20 unsigned long arg,
21 u8 arg_idx __maybe_unused, 21 u8 arg_idx __maybe_unused,
22 u8 *arg_mask __maybe_unused) 22 u8 *arg_mask __maybe_unused)
23 { 23 {
24 return scnprintf(bf, size, "%#lx", arg); 24 return scnprintf(bf, size, "%#lx", arg);
25 } 25 }
26 26
27 #define SCA_HEX syscall_arg__scnprintf_hex 27 #define SCA_HEX syscall_arg__scnprintf_hex
28 28
29 static size_t syscall_arg__scnprintf_whence(char *bf, size_t size, 29 static size_t syscall_arg__scnprintf_whence(char *bf, size_t size,
30 unsigned long arg, 30 unsigned long arg,
31 u8 arg_idx __maybe_unused, 31 u8 arg_idx __maybe_unused,
32 u8 *arg_mask __maybe_unused) 32 u8 *arg_mask __maybe_unused)
33 { 33 {
34 int whence = arg; 34 int whence = arg;
35 35
36 switch (whence) { 36 switch (whence) {
37 #define P_WHENCE(n) case SEEK_##n: return scnprintf(bf, size, #n) 37 #define P_WHENCE(n) case SEEK_##n: return scnprintf(bf, size, #n)
38 P_WHENCE(SET); 38 P_WHENCE(SET);
39 P_WHENCE(CUR); 39 P_WHENCE(CUR);
40 P_WHENCE(END); 40 P_WHENCE(END);
41 #ifdef SEEK_DATA 41 #ifdef SEEK_DATA
42 P_WHENCE(DATA); 42 P_WHENCE(DATA);
43 #endif 43 #endif
44 #ifdef SEEK_HOLE 44 #ifdef SEEK_HOLE
45 P_WHENCE(HOLE); 45 P_WHENCE(HOLE);
46 #endif 46 #endif
47 #undef P_WHENCE 47 #undef P_WHENCE
48 default: break; 48 default: break;
49 } 49 }
50 50
51 return scnprintf(bf, size, "%#x", whence); 51 return scnprintf(bf, size, "%#x", whence);
52 } 52 }
53 53
54 #define SCA_WHENCE syscall_arg__scnprintf_whence 54 #define SCA_WHENCE syscall_arg__scnprintf_whence
55 55
56 static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size, 56 static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
57 unsigned long arg, 57 unsigned long arg,
58 u8 arg_idx __maybe_unused, 58 u8 arg_idx __maybe_unused,
59 u8 *arg_mask __maybe_unused) 59 u8 *arg_mask __maybe_unused)
60 { 60 {
61 int printed = 0, prot = arg; 61 int printed = 0, prot = arg;
62 62
63 if (prot == PROT_NONE) 63 if (prot == PROT_NONE)
64 return scnprintf(bf, size, "NONE"); 64 return scnprintf(bf, size, "NONE");
65 #define P_MMAP_PROT(n) \ 65 #define P_MMAP_PROT(n) \
66 if (prot & PROT_##n) { \ 66 if (prot & PROT_##n) { \
67 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \ 67 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
68 prot &= ~PROT_##n; \ 68 prot &= ~PROT_##n; \
69 } 69 }
70 70
71 P_MMAP_PROT(EXEC); 71 P_MMAP_PROT(EXEC);
72 P_MMAP_PROT(READ); 72 P_MMAP_PROT(READ);
73 P_MMAP_PROT(WRITE); 73 P_MMAP_PROT(WRITE);
74 #ifdef PROT_SEM 74 #ifdef PROT_SEM
75 P_MMAP_PROT(SEM); 75 P_MMAP_PROT(SEM);
76 #endif 76 #endif
77 P_MMAP_PROT(GROWSDOWN); 77 P_MMAP_PROT(GROWSDOWN);
78 P_MMAP_PROT(GROWSUP); 78 P_MMAP_PROT(GROWSUP);
79 #undef P_MMAP_PROT 79 #undef P_MMAP_PROT
80 80
81 if (prot) 81 if (prot)
82 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", prot); 82 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", prot);
83 83
84 return printed; 84 return printed;
85 } 85 }
86 86
87 #define SCA_MMAP_PROT syscall_arg__scnprintf_mmap_prot 87 #define SCA_MMAP_PROT syscall_arg__scnprintf_mmap_prot
88 88
89 static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size, 89 static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
90 unsigned long arg, u8 arg_idx __maybe_unused, 90 unsigned long arg, u8 arg_idx __maybe_unused,
91 u8 *arg_mask __maybe_unused) 91 u8 *arg_mask __maybe_unused)
92 { 92 {
93 int printed = 0, flags = arg; 93 int printed = 0, flags = arg;
94 94
95 #define P_MMAP_FLAG(n) \ 95 #define P_MMAP_FLAG(n) \
96 if (flags & MAP_##n) { \ 96 if (flags & MAP_##n) { \
97 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \ 97 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
98 flags &= ~MAP_##n; \ 98 flags &= ~MAP_##n; \
99 } 99 }
100 100
101 P_MMAP_FLAG(SHARED); 101 P_MMAP_FLAG(SHARED);
102 P_MMAP_FLAG(PRIVATE); 102 P_MMAP_FLAG(PRIVATE);
103 #ifdef MAP_32BIT
103 P_MMAP_FLAG(32BIT); 104 P_MMAP_FLAG(32BIT);
105 #endif
104 P_MMAP_FLAG(ANONYMOUS); 106 P_MMAP_FLAG(ANONYMOUS);
105 P_MMAP_FLAG(DENYWRITE); 107 P_MMAP_FLAG(DENYWRITE);
106 P_MMAP_FLAG(EXECUTABLE); 108 P_MMAP_FLAG(EXECUTABLE);
107 P_MMAP_FLAG(FILE); 109 P_MMAP_FLAG(FILE);
108 P_MMAP_FLAG(FIXED); 110 P_MMAP_FLAG(FIXED);
109 P_MMAP_FLAG(GROWSDOWN); 111 P_MMAP_FLAG(GROWSDOWN);
110 #ifdef MAP_HUGETLB 112 #ifdef MAP_HUGETLB
111 P_MMAP_FLAG(HUGETLB); 113 P_MMAP_FLAG(HUGETLB);
112 #endif 114 #endif
113 P_MMAP_FLAG(LOCKED); 115 P_MMAP_FLAG(LOCKED);
114 P_MMAP_FLAG(NONBLOCK); 116 P_MMAP_FLAG(NONBLOCK);
115 P_MMAP_FLAG(NORESERVE); 117 P_MMAP_FLAG(NORESERVE);
116 P_MMAP_FLAG(POPULATE); 118 P_MMAP_FLAG(POPULATE);
117 P_MMAP_FLAG(STACK); 119 P_MMAP_FLAG(STACK);
118 #ifdef MAP_UNINITIALIZED 120 #ifdef MAP_UNINITIALIZED
119 P_MMAP_FLAG(UNINITIALIZED); 121 P_MMAP_FLAG(UNINITIALIZED);
120 #endif 122 #endif
121 #undef P_MMAP_FLAG 123 #undef P_MMAP_FLAG
122 124
123 if (flags) 125 if (flags)
124 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); 126 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
125 127
126 return printed; 128 return printed;
127 } 129 }
128 130
129 #define SCA_MMAP_FLAGS syscall_arg__scnprintf_mmap_flags 131 #define SCA_MMAP_FLAGS syscall_arg__scnprintf_mmap_flags
130 132
131 static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size, 133 static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size,
132 unsigned long arg, u8 arg_idx __maybe_unused, 134 unsigned long arg, u8 arg_idx __maybe_unused,
133 u8 *arg_mask __maybe_unused) 135 u8 *arg_mask __maybe_unused)
134 { 136 {
135 int behavior = arg; 137 int behavior = arg;
136 138
137 switch (behavior) { 139 switch (behavior) {
138 #define P_MADV_BHV(n) case MADV_##n: return scnprintf(bf, size, #n) 140 #define P_MADV_BHV(n) case MADV_##n: return scnprintf(bf, size, #n)
139 P_MADV_BHV(NORMAL); 141 P_MADV_BHV(NORMAL);
140 P_MADV_BHV(RANDOM); 142 P_MADV_BHV(RANDOM);
141 P_MADV_BHV(SEQUENTIAL); 143 P_MADV_BHV(SEQUENTIAL);
142 P_MADV_BHV(WILLNEED); 144 P_MADV_BHV(WILLNEED);
143 P_MADV_BHV(DONTNEED); 145 P_MADV_BHV(DONTNEED);
144 P_MADV_BHV(REMOVE); 146 P_MADV_BHV(REMOVE);
145 P_MADV_BHV(DONTFORK); 147 P_MADV_BHV(DONTFORK);
146 P_MADV_BHV(DOFORK); 148 P_MADV_BHV(DOFORK);
147 P_MADV_BHV(HWPOISON); 149 P_MADV_BHV(HWPOISON);
148 #ifdef MADV_SOFT_OFFLINE 150 #ifdef MADV_SOFT_OFFLINE
149 P_MADV_BHV(SOFT_OFFLINE); 151 P_MADV_BHV(SOFT_OFFLINE);
150 #endif 152 #endif
151 P_MADV_BHV(MERGEABLE); 153 P_MADV_BHV(MERGEABLE);
152 P_MADV_BHV(UNMERGEABLE); 154 P_MADV_BHV(UNMERGEABLE);
153 #ifdef MADV_HUGEPAGE 155 #ifdef MADV_HUGEPAGE
154 P_MADV_BHV(HUGEPAGE); 156 P_MADV_BHV(HUGEPAGE);
155 #endif 157 #endif
156 #ifdef MADV_NOHUGEPAGE 158 #ifdef MADV_NOHUGEPAGE
157 P_MADV_BHV(NOHUGEPAGE); 159 P_MADV_BHV(NOHUGEPAGE);
158 #endif 160 #endif
159 #ifdef MADV_DONTDUMP 161 #ifdef MADV_DONTDUMP
160 P_MADV_BHV(DONTDUMP); 162 P_MADV_BHV(DONTDUMP);
161 #endif 163 #endif
162 #ifdef MADV_DODUMP 164 #ifdef MADV_DODUMP
163 P_MADV_BHV(DODUMP); 165 P_MADV_BHV(DODUMP);
164 #endif 166 #endif
165 #undef P_MADV_PHV 167 #undef P_MADV_PHV
166 default: break; 168 default: break;
167 } 169 }
168 170
169 return scnprintf(bf, size, "%#x", behavior); 171 return scnprintf(bf, size, "%#x", behavior);
170 } 172 }
171 173
172 #define SCA_MADV_BHV syscall_arg__scnprintf_madvise_behavior 174 #define SCA_MADV_BHV syscall_arg__scnprintf_madvise_behavior
173 175
174 static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, unsigned long arg, 176 static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, unsigned long arg,
175 u8 arg_idx __maybe_unused, u8 *arg_mask) 177 u8 arg_idx __maybe_unused, u8 *arg_mask)
176 { 178 {
177 enum syscall_futex_args { 179 enum syscall_futex_args {
178 SCF_UADDR = (1 << 0), 180 SCF_UADDR = (1 << 0),
179 SCF_OP = (1 << 1), 181 SCF_OP = (1 << 1),
180 SCF_VAL = (1 << 2), 182 SCF_VAL = (1 << 2),
181 SCF_TIMEOUT = (1 << 3), 183 SCF_TIMEOUT = (1 << 3),
182 SCF_UADDR2 = (1 << 4), 184 SCF_UADDR2 = (1 << 4),
183 SCF_VAL3 = (1 << 5), 185 SCF_VAL3 = (1 << 5),
184 }; 186 };
185 int op = arg; 187 int op = arg;
186 int cmd = op & FUTEX_CMD_MASK; 188 int cmd = op & FUTEX_CMD_MASK;
187 size_t printed = 0; 189 size_t printed = 0;
188 190
189 switch (cmd) { 191 switch (cmd) {
190 #define P_FUTEX_OP(n) case FUTEX_##n: printed = scnprintf(bf, size, #n); 192 #define P_FUTEX_OP(n) case FUTEX_##n: printed = scnprintf(bf, size, #n);
191 P_FUTEX_OP(WAIT); *arg_mask |= SCF_VAL3|SCF_UADDR2; break; 193 P_FUTEX_OP(WAIT); *arg_mask |= SCF_VAL3|SCF_UADDR2; break;
192 P_FUTEX_OP(WAKE); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break; 194 P_FUTEX_OP(WAKE); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
193 P_FUTEX_OP(FD); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break; 195 P_FUTEX_OP(FD); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
194 P_FUTEX_OP(REQUEUE); *arg_mask |= SCF_VAL3|SCF_TIMEOUT; break; 196 P_FUTEX_OP(REQUEUE); *arg_mask |= SCF_VAL3|SCF_TIMEOUT; break;
195 P_FUTEX_OP(CMP_REQUEUE); *arg_mask |= SCF_TIMEOUT; break; 197 P_FUTEX_OP(CMP_REQUEUE); *arg_mask |= SCF_TIMEOUT; break;
196 P_FUTEX_OP(CMP_REQUEUE_PI); *arg_mask |= SCF_TIMEOUT; break; 198 P_FUTEX_OP(CMP_REQUEUE_PI); *arg_mask |= SCF_TIMEOUT; break;
197 P_FUTEX_OP(WAKE_OP); break; 199 P_FUTEX_OP(WAKE_OP); break;
198 P_FUTEX_OP(LOCK_PI); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break; 200 P_FUTEX_OP(LOCK_PI); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
199 P_FUTEX_OP(UNLOCK_PI); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break; 201 P_FUTEX_OP(UNLOCK_PI); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
200 P_FUTEX_OP(TRYLOCK_PI); *arg_mask |= SCF_VAL3|SCF_UADDR2; break; 202 P_FUTEX_OP(TRYLOCK_PI); *arg_mask |= SCF_VAL3|SCF_UADDR2; break;
201 P_FUTEX_OP(WAIT_BITSET); *arg_mask |= SCF_UADDR2; break; 203 P_FUTEX_OP(WAIT_BITSET); *arg_mask |= SCF_UADDR2; break;
202 P_FUTEX_OP(WAKE_BITSET); *arg_mask |= SCF_UADDR2; break; 204 P_FUTEX_OP(WAKE_BITSET); *arg_mask |= SCF_UADDR2; break;
203 P_FUTEX_OP(WAIT_REQUEUE_PI); break; 205 P_FUTEX_OP(WAIT_REQUEUE_PI); break;
204 default: printed = scnprintf(bf, size, "%#x", cmd); break; 206 default: printed = scnprintf(bf, size, "%#x", cmd); break;
205 } 207 }
206 208
207 if (op & FUTEX_PRIVATE_FLAG) 209 if (op & FUTEX_PRIVATE_FLAG)
208 printed += scnprintf(bf + printed, size - printed, "|PRIV"); 210 printed += scnprintf(bf + printed, size - printed, "|PRIV");
209 211
210 if (op & FUTEX_CLOCK_REALTIME) 212 if (op & FUTEX_CLOCK_REALTIME)
211 printed += scnprintf(bf + printed, size - printed, "|CLKRT"); 213 printed += scnprintf(bf + printed, size - printed, "|CLKRT");
212 214
213 return printed; 215 return printed;
214 } 216 }
215 217
216 #define SCA_FUTEX_OP syscall_arg__scnprintf_futex_op 218 #define SCA_FUTEX_OP syscall_arg__scnprintf_futex_op
217 219
218 static size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size, 220 static size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size,
219 unsigned long arg, 221 unsigned long arg,
220 u8 arg_idx, u8 *arg_mask) 222 u8 arg_idx, u8 *arg_mask)
221 { 223 {
222 int printed = 0, flags = arg; 224 int printed = 0, flags = arg;
223 225
224 if (!(flags & O_CREAT)) 226 if (!(flags & O_CREAT))
225 *arg_mask |= 1 << (arg_idx + 1); /* Mask the mode parm */ 227 *arg_mask |= 1 << (arg_idx + 1); /* Mask the mode parm */
226 228
227 if (flags == 0) 229 if (flags == 0)
228 return scnprintf(bf, size, "RDONLY"); 230 return scnprintf(bf, size, "RDONLY");
229 #define P_FLAG(n) \ 231 #define P_FLAG(n) \
230 if (flags & O_##n) { \ 232 if (flags & O_##n) { \
231 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \ 233 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
232 flags &= ~O_##n; \ 234 flags &= ~O_##n; \
233 } 235 }
234 236
235 P_FLAG(APPEND); 237 P_FLAG(APPEND);
236 P_FLAG(ASYNC); 238 P_FLAG(ASYNC);
237 P_FLAG(CLOEXEC); 239 P_FLAG(CLOEXEC);
238 P_FLAG(CREAT); 240 P_FLAG(CREAT);
239 P_FLAG(DIRECT); 241 P_FLAG(DIRECT);
240 P_FLAG(DIRECTORY); 242 P_FLAG(DIRECTORY);
241 P_FLAG(EXCL); 243 P_FLAG(EXCL);
242 P_FLAG(LARGEFILE); 244 P_FLAG(LARGEFILE);
243 P_FLAG(NOATIME); 245 P_FLAG(NOATIME);
244 P_FLAG(NOCTTY); 246 P_FLAG(NOCTTY);
245 #ifdef O_NONBLOCK 247 #ifdef O_NONBLOCK
246 P_FLAG(NONBLOCK); 248 P_FLAG(NONBLOCK);
247 #elif O_NDELAY 249 #elif O_NDELAY
248 P_FLAG(NDELAY); 250 P_FLAG(NDELAY);
249 #endif 251 #endif
250 #ifdef O_PATH 252 #ifdef O_PATH
251 P_FLAG(PATH); 253 P_FLAG(PATH);
252 #endif 254 #endif
253 P_FLAG(RDWR); 255 P_FLAG(RDWR);
254 #ifdef O_DSYNC 256 #ifdef O_DSYNC
255 if ((flags & O_SYNC) == O_SYNC) 257 if ((flags & O_SYNC) == O_SYNC)
256 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", "SYNC"); 258 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", "SYNC");
257 else { 259 else {
258 P_FLAG(DSYNC); 260 P_FLAG(DSYNC);
259 } 261 }
260 #else 262 #else
261 P_FLAG(SYNC); 263 P_FLAG(SYNC);
262 #endif 264 #endif
263 P_FLAG(TRUNC); 265 P_FLAG(TRUNC);
264 P_FLAG(WRONLY); 266 P_FLAG(WRONLY);
265 #undef P_FLAG 267 #undef P_FLAG
266 268
267 if (flags) 269 if (flags)
268 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); 270 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
269 271
270 return printed; 272 return printed;
271 } 273 }
272 274
273 #define SCA_OPEN_FLAGS syscall_arg__scnprintf_open_flags 275 #define SCA_OPEN_FLAGS syscall_arg__scnprintf_open_flags
274 276
275 static struct syscall_fmt { 277 static struct syscall_fmt {
276 const char *name; 278 const char *name;
277 const char *alias; 279 const char *alias;
278 size_t (*arg_scnprintf[6])(char *bf, size_t size, unsigned long arg, u8 arg_idx, u8 *arg_mask); 280 size_t (*arg_scnprintf[6])(char *bf, size_t size, unsigned long arg, u8 arg_idx, u8 *arg_mask);
279 bool errmsg; 281 bool errmsg;
280 bool timeout; 282 bool timeout;
281 bool hexret; 283 bool hexret;
282 } syscall_fmts[] = { 284 } syscall_fmts[] = {
283 { .name = "access", .errmsg = true, }, 285 { .name = "access", .errmsg = true, },
284 { .name = "arch_prctl", .errmsg = true, .alias = "prctl", }, 286 { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
285 { .name = "brk", .hexret = true, 287 { .name = "brk", .hexret = true,
286 .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, }, 288 .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
287 { .name = "mmap", .hexret = true, }, 289 { .name = "mmap", .hexret = true, },
288 { .name = "connect", .errmsg = true, }, 290 { .name = "connect", .errmsg = true, },
289 { .name = "fstat", .errmsg = true, .alias = "newfstat", }, 291 { .name = "fstat", .errmsg = true, .alias = "newfstat", },
290 { .name = "fstatat", .errmsg = true, .alias = "newfstatat", }, 292 { .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
291 { .name = "futex", .errmsg = true, 293 { .name = "futex", .errmsg = true,
292 .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, }, 294 .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
293 { .name = "ioctl", .errmsg = true, 295 { .name = "ioctl", .errmsg = true,
294 .arg_scnprintf = { [2] = SCA_HEX, /* arg */ }, }, 296 .arg_scnprintf = { [2] = SCA_HEX, /* arg */ }, },
295 { .name = "lseek", .errmsg = true, 297 { .name = "lseek", .errmsg = true,
296 .arg_scnprintf = { [2] = SCA_WHENCE, /* whence */ }, }, 298 .arg_scnprintf = { [2] = SCA_WHENCE, /* whence */ }, },
297 { .name = "lstat", .errmsg = true, .alias = "newlstat", }, 299 { .name = "lstat", .errmsg = true, .alias = "newlstat", },
298 { .name = "madvise", .errmsg = true, 300 { .name = "madvise", .errmsg = true,
299 .arg_scnprintf = { [0] = SCA_HEX, /* start */ 301 .arg_scnprintf = { [0] = SCA_HEX, /* start */
300 [2] = SCA_MADV_BHV, /* behavior */ }, }, 302 [2] = SCA_MADV_BHV, /* behavior */ }, },
301 { .name = "mmap", .hexret = true, 303 { .name = "mmap", .hexret = true,
302 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ 304 .arg_scnprintf = { [0] = SCA_HEX, /* addr */
303 [2] = SCA_MMAP_PROT, /* prot */ 305 [2] = SCA_MMAP_PROT, /* prot */
304 [3] = SCA_MMAP_FLAGS, /* flags */ }, }, 306 [3] = SCA_MMAP_FLAGS, /* flags */ }, },
305 { .name = "mprotect", .errmsg = true, 307 { .name = "mprotect", .errmsg = true,
306 .arg_scnprintf = { [0] = SCA_HEX, /* start */ 308 .arg_scnprintf = { [0] = SCA_HEX, /* start */
307 [2] = SCA_MMAP_PROT, /* prot */ }, }, 309 [2] = SCA_MMAP_PROT, /* prot */ }, },
308 { .name = "mremap", .hexret = true, 310 { .name = "mremap", .hexret = true,
309 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ 311 .arg_scnprintf = { [0] = SCA_HEX, /* addr */
310 [4] = SCA_HEX, /* new_addr */ }, }, 312 [4] = SCA_HEX, /* new_addr */ }, },
311 { .name = "munmap", .errmsg = true, 313 { .name = "munmap", .errmsg = true,
312 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, }, 314 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
313 { .name = "open", .errmsg = true, 315 { .name = "open", .errmsg = true,
314 .arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, }, 316 .arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, },
315 { .name = "open_by_handle_at", .errmsg = true, 317 { .name = "open_by_handle_at", .errmsg = true,
316 .arg_scnprintf = { [2] = SCA_OPEN_FLAGS, /* flags */ }, }, 318 .arg_scnprintf = { [2] = SCA_OPEN_FLAGS, /* flags */ }, },
317 { .name = "openat", .errmsg = true, 319 { .name = "openat", .errmsg = true,
318 .arg_scnprintf = { [2] = SCA_OPEN_FLAGS, /* flags */ }, }, 320 .arg_scnprintf = { [2] = SCA_OPEN_FLAGS, /* flags */ }, },
319 { .name = "poll", .errmsg = true, .timeout = true, }, 321 { .name = "poll", .errmsg = true, .timeout = true, },
320 { .name = "ppoll", .errmsg = true, .timeout = true, }, 322 { .name = "ppoll", .errmsg = true, .timeout = true, },
321 { .name = "pread", .errmsg = true, .alias = "pread64", }, 323 { .name = "pread", .errmsg = true, .alias = "pread64", },
322 { .name = "pwrite", .errmsg = true, .alias = "pwrite64", }, 324 { .name = "pwrite", .errmsg = true, .alias = "pwrite64", },
323 { .name = "read", .errmsg = true, }, 325 { .name = "read", .errmsg = true, },
324 { .name = "recvfrom", .errmsg = true, }, 326 { .name = "recvfrom", .errmsg = true, },
325 { .name = "select", .errmsg = true, .timeout = true, }, 327 { .name = "select", .errmsg = true, .timeout = true, },
326 { .name = "socket", .errmsg = true, }, 328 { .name = "socket", .errmsg = true, },
327 { .name = "stat", .errmsg = true, .alias = "newstat", }, 329 { .name = "stat", .errmsg = true, .alias = "newstat", },
328 { .name = "uname", .errmsg = true, .alias = "newuname", }, 330 { .name = "uname", .errmsg = true, .alias = "newuname", },
329 }; 331 };
330 332
331 static int syscall_fmt__cmp(const void *name, const void *fmtp) 333 static int syscall_fmt__cmp(const void *name, const void *fmtp)
332 { 334 {
333 const struct syscall_fmt *fmt = fmtp; 335 const struct syscall_fmt *fmt = fmtp;
334 return strcmp(name, fmt->name); 336 return strcmp(name, fmt->name);
335 } 337 }
336 338
337 static struct syscall_fmt *syscall_fmt__find(const char *name) 339 static struct syscall_fmt *syscall_fmt__find(const char *name)
338 { 340 {
339 const int nmemb = ARRAY_SIZE(syscall_fmts); 341 const int nmemb = ARRAY_SIZE(syscall_fmts);
340 return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp); 342 return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
341 } 343 }
342 344
343 struct syscall { 345 struct syscall {
344 struct event_format *tp_format; 346 struct event_format *tp_format;
345 const char *name; 347 const char *name;
346 bool filtered; 348 bool filtered;
347 struct syscall_fmt *fmt; 349 struct syscall_fmt *fmt;
348 size_t (**arg_scnprintf)(char *bf, size_t size, 350 size_t (**arg_scnprintf)(char *bf, size_t size,
349 unsigned long arg, u8 arg_idx, u8 *args_mask); 351 unsigned long arg, u8 arg_idx, u8 *args_mask);
350 }; 352 };
351 353
352 static size_t fprintf_duration(unsigned long t, FILE *fp) 354 static size_t fprintf_duration(unsigned long t, FILE *fp)
353 { 355 {
354 double duration = (double)t / NSEC_PER_MSEC; 356 double duration = (double)t / NSEC_PER_MSEC;
355 size_t printed = fprintf(fp, "("); 357 size_t printed = fprintf(fp, "(");
356 358
357 if (duration >= 1.0) 359 if (duration >= 1.0)
358 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration); 360 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
359 else if (duration >= 0.01) 361 else if (duration >= 0.01)
360 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration); 362 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
361 else 363 else
362 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration); 364 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
363 return printed + fprintf(fp, "): "); 365 return printed + fprintf(fp, "): ");
364 } 366 }
365 367
366 struct thread_trace { 368 struct thread_trace {
367 u64 entry_time; 369 u64 entry_time;
368 u64 exit_time; 370 u64 exit_time;
369 bool entry_pending; 371 bool entry_pending;
370 unsigned long nr_events; 372 unsigned long nr_events;
371 char *entry_str; 373 char *entry_str;
372 double runtime_ms; 374 double runtime_ms;
373 }; 375 };
374 376
375 static struct thread_trace *thread_trace__new(void) 377 static struct thread_trace *thread_trace__new(void)
376 { 378 {
377 return zalloc(sizeof(struct thread_trace)); 379 return zalloc(sizeof(struct thread_trace));
378 } 380 }
379 381
380 static struct thread_trace *thread__trace(struct thread *thread, FILE *fp) 382 static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
381 { 383 {
382 struct thread_trace *ttrace; 384 struct thread_trace *ttrace;
383 385
384 if (thread == NULL) 386 if (thread == NULL)
385 goto fail; 387 goto fail;
386 388
387 if (thread->priv == NULL) 389 if (thread->priv == NULL)
388 thread->priv = thread_trace__new(); 390 thread->priv = thread_trace__new();
389 391
390 if (thread->priv == NULL) 392 if (thread->priv == NULL)
391 goto fail; 393 goto fail;
392 394
393 ttrace = thread->priv; 395 ttrace = thread->priv;
394 ++ttrace->nr_events; 396 ++ttrace->nr_events;
395 397
396 return ttrace; 398 return ttrace;
397 fail: 399 fail:
398 color_fprintf(fp, PERF_COLOR_RED, 400 color_fprintf(fp, PERF_COLOR_RED,
399 "WARNING: not enough memory, dropping samples!\n"); 401 "WARNING: not enough memory, dropping samples!\n");
400 return NULL; 402 return NULL;
401 } 403 }
402 404
403 struct trace { 405 struct trace {
404 struct perf_tool tool; 406 struct perf_tool tool;
405 int audit_machine; 407 int audit_machine;
406 struct { 408 struct {
407 int max; 409 int max;
408 struct syscall *table; 410 struct syscall *table;
409 } syscalls; 411 } syscalls;
410 struct perf_record_opts opts; 412 struct perf_record_opts opts;
411 struct machine host; 413 struct machine host;
412 u64 base_time; 414 u64 base_time;
413 FILE *output; 415 FILE *output;
414 unsigned long nr_events; 416 unsigned long nr_events;
415 struct strlist *ev_qualifier; 417 struct strlist *ev_qualifier;
416 bool not_ev_qualifier; 418 bool not_ev_qualifier;
417 struct intlist *tid_list; 419 struct intlist *tid_list;
418 struct intlist *pid_list; 420 struct intlist *pid_list;
419 bool sched; 421 bool sched;
420 bool multiple_threads; 422 bool multiple_threads;
421 double duration_filter; 423 double duration_filter;
422 double runtime_ms; 424 double runtime_ms;
423 }; 425 };
424 426
425 static bool trace__filter_duration(struct trace *trace, double t) 427 static bool trace__filter_duration(struct trace *trace, double t)
426 { 428 {
427 return t < (trace->duration_filter * NSEC_PER_MSEC); 429 return t < (trace->duration_filter * NSEC_PER_MSEC);
428 } 430 }
429 431
430 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) 432 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
431 { 433 {
432 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC; 434 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
433 435
434 return fprintf(fp, "%10.3f ", ts); 436 return fprintf(fp, "%10.3f ", ts);
435 } 437 }
436 438
437 static bool done = false; 439 static bool done = false;
438 440
439 static void sig_handler(int sig __maybe_unused) 441 static void sig_handler(int sig __maybe_unused)
440 { 442 {
441 done = true; 443 done = true;
442 } 444 }
443 445
444 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread, 446 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
445 u64 duration, u64 tstamp, FILE *fp) 447 u64 duration, u64 tstamp, FILE *fp)
446 { 448 {
447 size_t printed = trace__fprintf_tstamp(trace, tstamp, fp); 449 size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
448 printed += fprintf_duration(duration, fp); 450 printed += fprintf_duration(duration, fp);
449 451
450 if (trace->multiple_threads) 452 if (trace->multiple_threads)
451 printed += fprintf(fp, "%d ", thread->tid); 453 printed += fprintf(fp, "%d ", thread->tid);
452 454
453 return printed; 455 return printed;
454 } 456 }
455 457
456 static int trace__process_event(struct trace *trace, struct machine *machine, 458 static int trace__process_event(struct trace *trace, struct machine *machine,
457 union perf_event *event) 459 union perf_event *event)
458 { 460 {
459 int ret = 0; 461 int ret = 0;
460 462
461 switch (event->header.type) { 463 switch (event->header.type) {
462 case PERF_RECORD_LOST: 464 case PERF_RECORD_LOST:
463 color_fprintf(trace->output, PERF_COLOR_RED, 465 color_fprintf(trace->output, PERF_COLOR_RED,
464 "LOST %" PRIu64 " events!\n", event->lost.lost); 466 "LOST %" PRIu64 " events!\n", event->lost.lost);
465 ret = machine__process_lost_event(machine, event); 467 ret = machine__process_lost_event(machine, event);
466 default: 468 default:
467 ret = machine__process_event(machine, event); 469 ret = machine__process_event(machine, event);
468 break; 470 break;
469 } 471 }
470 472
471 return ret; 473 return ret;
472 } 474 }
473 475
474 static int trace__tool_process(struct perf_tool *tool, 476 static int trace__tool_process(struct perf_tool *tool,
475 union perf_event *event, 477 union perf_event *event,
476 struct perf_sample *sample __maybe_unused, 478 struct perf_sample *sample __maybe_unused,
477 struct machine *machine) 479 struct machine *machine)
478 { 480 {
479 struct trace *trace = container_of(tool, struct trace, tool); 481 struct trace *trace = container_of(tool, struct trace, tool);
480 return trace__process_event(trace, machine, event); 482 return trace__process_event(trace, machine, event);
481 } 483 }
482 484
483 static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist) 485 static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
484 { 486 {
485 int err = symbol__init(); 487 int err = symbol__init();
486 488
487 if (err) 489 if (err)
488 return err; 490 return err;
489 491
490 machine__init(&trace->host, "", HOST_KERNEL_ID); 492 machine__init(&trace->host, "", HOST_KERNEL_ID);
491 machine__create_kernel_maps(&trace->host); 493 machine__create_kernel_maps(&trace->host);
492 494
493 if (perf_target__has_task(&trace->opts.target)) { 495 if (perf_target__has_task(&trace->opts.target)) {
494 err = perf_event__synthesize_thread_map(&trace->tool, evlist->threads, 496 err = perf_event__synthesize_thread_map(&trace->tool, evlist->threads,
495 trace__tool_process, 497 trace__tool_process,
496 &trace->host); 498 &trace->host);
497 } else { 499 } else {
498 err = perf_event__synthesize_threads(&trace->tool, trace__tool_process, 500 err = perf_event__synthesize_threads(&trace->tool, trace__tool_process,
499 &trace->host); 501 &trace->host);
500 } 502 }
501 503
502 if (err) 504 if (err)
503 symbol__exit(); 505 symbol__exit();
504 506
505 return err; 507 return err;
506 } 508 }
507 509
508 static int syscall__set_arg_fmts(struct syscall *sc) 510 static int syscall__set_arg_fmts(struct syscall *sc)
509 { 511 {
510 struct format_field *field; 512 struct format_field *field;
511 int idx = 0; 513 int idx = 0;
512 514
513 sc->arg_scnprintf = calloc(sc->tp_format->format.nr_fields - 1, sizeof(void *)); 515 sc->arg_scnprintf = calloc(sc->tp_format->format.nr_fields - 1, sizeof(void *));
514 if (sc->arg_scnprintf == NULL) 516 if (sc->arg_scnprintf == NULL)
515 return -1; 517 return -1;
516 518
517 for (field = sc->tp_format->format.fields->next; field; field = field->next) { 519 for (field = sc->tp_format->format.fields->next; field; field = field->next) {
518 if (sc->fmt && sc->fmt->arg_scnprintf[idx]) 520 if (sc->fmt && sc->fmt->arg_scnprintf[idx])
519 sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx]; 521 sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
520 else if (field->flags & FIELD_IS_POINTER) 522 else if (field->flags & FIELD_IS_POINTER)
521 sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex; 523 sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex;
522 ++idx; 524 ++idx;
523 } 525 }
524 526
525 return 0; 527 return 0;
526 } 528 }
527 529
528 static int trace__read_syscall_info(struct trace *trace, int id) 530 static int trace__read_syscall_info(struct trace *trace, int id)
529 { 531 {
530 char tp_name[128]; 532 char tp_name[128];
531 struct syscall *sc; 533 struct syscall *sc;
532 const char *name = audit_syscall_to_name(id, trace->audit_machine); 534 const char *name = audit_syscall_to_name(id, trace->audit_machine);
533 535
534 if (name == NULL) 536 if (name == NULL)
535 return -1; 537 return -1;
536 538
537 if (id > trace->syscalls.max) { 539 if (id > trace->syscalls.max) {
538 struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc)); 540 struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
539 541
540 if (nsyscalls == NULL) 542 if (nsyscalls == NULL)
541 return -1; 543 return -1;
542 544
543 if (trace->syscalls.max != -1) { 545 if (trace->syscalls.max != -1) {
544 memset(nsyscalls + trace->syscalls.max + 1, 0, 546 memset(nsyscalls + trace->syscalls.max + 1, 0,
545 (id - trace->syscalls.max) * sizeof(*sc)); 547 (id - trace->syscalls.max) * sizeof(*sc));
546 } else { 548 } else {
547 memset(nsyscalls, 0, (id + 1) * sizeof(*sc)); 549 memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
548 } 550 }
549 551
550 trace->syscalls.table = nsyscalls; 552 trace->syscalls.table = nsyscalls;
551 trace->syscalls.max = id; 553 trace->syscalls.max = id;
552 } 554 }
553 555
554 sc = trace->syscalls.table + id; 556 sc = trace->syscalls.table + id;
555 sc->name = name; 557 sc->name = name;
556 558
557 if (trace->ev_qualifier) { 559 if (trace->ev_qualifier) {
558 bool in = strlist__find(trace->ev_qualifier, name) != NULL; 560 bool in = strlist__find(trace->ev_qualifier, name) != NULL;
559 561
560 if (!(in ^ trace->not_ev_qualifier)) { 562 if (!(in ^ trace->not_ev_qualifier)) {
561 sc->filtered = true; 563 sc->filtered = true;
562 /* 564 /*
563 * No need to do read tracepoint information since this will be 565 * No need to do read tracepoint information since this will be
564 * filtered out. 566 * filtered out.
565 */ 567 */
566 return 0; 568 return 0;
567 } 569 }
568 } 570 }
569 571
570 sc->fmt = syscall_fmt__find(sc->name); 572 sc->fmt = syscall_fmt__find(sc->name);
571 573
572 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name); 574 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
573 sc->tp_format = event_format__new("syscalls", tp_name); 575 sc->tp_format = event_format__new("syscalls", tp_name);
574 576
575 if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) { 577 if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) {
576 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias); 578 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
577 sc->tp_format = event_format__new("syscalls", tp_name); 579 sc->tp_format = event_format__new("syscalls", tp_name);
578 } 580 }
579 581
580 if (sc->tp_format == NULL) 582 if (sc->tp_format == NULL)
581 return -1; 583 return -1;
582 584
583 return syscall__set_arg_fmts(sc); 585 return syscall__set_arg_fmts(sc);
584 } 586 }
585 587
586 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size, 588 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
587 unsigned long *args) 589 unsigned long *args)
588 { 590 {
589 int i = 0; 591 int i = 0;
590 size_t printed = 0; 592 size_t printed = 0;
591 593
592 if (sc->tp_format != NULL) { 594 if (sc->tp_format != NULL) {
593 struct format_field *field; 595 struct format_field *field;
594 u8 mask = 0, bit = 1; 596 u8 mask = 0, bit = 1;
595 597
596 for (field = sc->tp_format->format.fields->next; field; 598 for (field = sc->tp_format->format.fields->next; field;
597 field = field->next, ++i, bit <<= 1) { 599 field = field->next, ++i, bit <<= 1) {
598 if (mask & bit) 600 if (mask & bit)
599 continue; 601 continue;
600 602
601 printed += scnprintf(bf + printed, size - printed, 603 printed += scnprintf(bf + printed, size - printed,
602 "%s%s: ", printed ? ", " : "", field->name); 604 "%s%s: ", printed ? ", " : "", field->name);
603 605
604 if (sc->arg_scnprintf && sc->arg_scnprintf[i]) { 606 if (sc->arg_scnprintf && sc->arg_scnprintf[i]) {
605 printed += sc->arg_scnprintf[i](bf + printed, size - printed, 607 printed += sc->arg_scnprintf[i](bf + printed, size - printed,
606 args[i], i, &mask); 608 args[i], i, &mask);
607 } else { 609 } else {
608 printed += scnprintf(bf + printed, size - printed, 610 printed += scnprintf(bf + printed, size - printed,
609 "%ld", args[i]); 611 "%ld", args[i]);
610 } 612 }
611 } 613 }
612 } else { 614 } else {
613 while (i < 6) { 615 while (i < 6) {
614 printed += scnprintf(bf + printed, size - printed, 616 printed += scnprintf(bf + printed, size - printed,
615 "%sarg%d: %ld", 617 "%sarg%d: %ld",
616 printed ? ", " : "", i, args[i]); 618 printed ? ", " : "", i, args[i]);
617 ++i; 619 ++i;
618 } 620 }
619 } 621 }
620 622
621 return printed; 623 return printed;
622 } 624 }
623 625
624 typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel, 626 typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
625 struct perf_sample *sample); 627 struct perf_sample *sample);
626 628
627 static struct syscall *trace__syscall_info(struct trace *trace, 629 static struct syscall *trace__syscall_info(struct trace *trace,
628 struct perf_evsel *evsel, 630 struct perf_evsel *evsel,
629 struct perf_sample *sample) 631 struct perf_sample *sample)
630 { 632 {
631 int id = perf_evsel__intval(evsel, sample, "id"); 633 int id = perf_evsel__intval(evsel, sample, "id");
632 634
633 if (id < 0) { 635 if (id < 0) {
634 636
635 /* 637 /*
636 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried 638 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
637 * before that, leaving at a higher verbosity level till that is 639 * before that, leaving at a higher verbosity level till that is
638 * explained. Reproduced with plain ftrace with: 640 * explained. Reproduced with plain ftrace with:
639 * 641 *
640 * echo 1 > /t/events/raw_syscalls/sys_exit/enable 642 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
641 * grep "NR -1 " /t/trace_pipe 643 * grep "NR -1 " /t/trace_pipe
642 * 644 *
643 * After generating some load on the machine. 645 * After generating some load on the machine.
644 */ 646 */
645 if (verbose > 1) { 647 if (verbose > 1) {
646 static u64 n; 648 static u64 n;
647 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n", 649 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
648 id, perf_evsel__name(evsel), ++n); 650 id, perf_evsel__name(evsel), ++n);
649 } 651 }
650 return NULL; 652 return NULL;
651 } 653 }
652 654
653 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) && 655 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
654 trace__read_syscall_info(trace, id)) 656 trace__read_syscall_info(trace, id))
655 goto out_cant_read; 657 goto out_cant_read;
656 658
657 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL)) 659 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
658 goto out_cant_read; 660 goto out_cant_read;
659 661
660 return &trace->syscalls.table[id]; 662 return &trace->syscalls.table[id];
661 663
662 out_cant_read: 664 out_cant_read:
663 if (verbose) { 665 if (verbose) {
664 fprintf(trace->output, "Problems reading syscall %d", id); 666 fprintf(trace->output, "Problems reading syscall %d", id);
665 if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL) 667 if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
666 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name); 668 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
667 fputs(" information\n", trace->output); 669 fputs(" information\n", trace->output);
668 } 670 }
669 return NULL; 671 return NULL;
670 } 672 }
671 673
672 static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel, 674 static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
673 struct perf_sample *sample) 675 struct perf_sample *sample)
674 { 676 {
675 char *msg; 677 char *msg;
676 void *args; 678 void *args;
677 size_t printed = 0; 679 size_t printed = 0;
678 struct thread *thread; 680 struct thread *thread;
679 struct syscall *sc = trace__syscall_info(trace, evsel, sample); 681 struct syscall *sc = trace__syscall_info(trace, evsel, sample);
680 struct thread_trace *ttrace; 682 struct thread_trace *ttrace;
681 683
682 if (sc == NULL) 684 if (sc == NULL)
683 return -1; 685 return -1;
684 686
685 if (sc->filtered) 687 if (sc->filtered)
686 return 0; 688 return 0;
687 689
688 thread = machine__findnew_thread(&trace->host, sample->pid, 690 thread = machine__findnew_thread(&trace->host, sample->pid,
689 sample->tid); 691 sample->tid);
690 ttrace = thread__trace(thread, trace->output); 692 ttrace = thread__trace(thread, trace->output);
691 if (ttrace == NULL) 693 if (ttrace == NULL)
692 return -1; 694 return -1;
693 695
694 args = perf_evsel__rawptr(evsel, sample, "args"); 696 args = perf_evsel__rawptr(evsel, sample, "args");
695 if (args == NULL) { 697 if (args == NULL) {
696 fprintf(trace->output, "Problems reading syscall arguments\n"); 698 fprintf(trace->output, "Problems reading syscall arguments\n");
697 return -1; 699 return -1;
698 } 700 }
699 701
700 ttrace = thread->priv; 702 ttrace = thread->priv;
701 703
702 if (ttrace->entry_str == NULL) { 704 if (ttrace->entry_str == NULL) {
703 ttrace->entry_str = malloc(1024); 705 ttrace->entry_str = malloc(1024);
704 if (!ttrace->entry_str) 706 if (!ttrace->entry_str)
705 return -1; 707 return -1;
706 } 708 }
707 709
708 ttrace->entry_time = sample->time; 710 ttrace->entry_time = sample->time;
709 msg = ttrace->entry_str; 711 msg = ttrace->entry_str;
710 printed += scnprintf(msg + printed, 1024 - printed, "%s(", sc->name); 712 printed += scnprintf(msg + printed, 1024 - printed, "%s(", sc->name);
711 713
712 printed += syscall__scnprintf_args(sc, msg + printed, 1024 - printed, args); 714 printed += syscall__scnprintf_args(sc, msg + printed, 1024 - printed, args);
713 715
714 if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) { 716 if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) {
715 if (!trace->duration_filter) { 717 if (!trace->duration_filter) {
716 trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output); 718 trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
717 fprintf(trace->output, "%-70s\n", ttrace->entry_str); 719 fprintf(trace->output, "%-70s\n", ttrace->entry_str);
718 } 720 }
719 } else 721 } else
720 ttrace->entry_pending = true; 722 ttrace->entry_pending = true;
721 723
722 return 0; 724 return 0;
723 } 725 }
724 726
725 static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel, 727 static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
726 struct perf_sample *sample) 728 struct perf_sample *sample)
727 { 729 {
728 int ret; 730 int ret;
729 u64 duration = 0; 731 u64 duration = 0;
730 struct thread *thread; 732 struct thread *thread;
731 struct syscall *sc = trace__syscall_info(trace, evsel, sample); 733 struct syscall *sc = trace__syscall_info(trace, evsel, sample);
732 struct thread_trace *ttrace; 734 struct thread_trace *ttrace;
733 735
734 if (sc == NULL) 736 if (sc == NULL)
735 return -1; 737 return -1;
736 738
737 if (sc->filtered) 739 if (sc->filtered)
738 return 0; 740 return 0;
739 741
740 thread = machine__findnew_thread(&trace->host, sample->pid, 742 thread = machine__findnew_thread(&trace->host, sample->pid,
741 sample->tid); 743 sample->tid);
742 ttrace = thread__trace(thread, trace->output); 744 ttrace = thread__trace(thread, trace->output);
743 if (ttrace == NULL) 745 if (ttrace == NULL)
744 return -1; 746 return -1;
745 747
746 ret = perf_evsel__intval(evsel, sample, "ret"); 748 ret = perf_evsel__intval(evsel, sample, "ret");
747 749
748 ttrace = thread->priv; 750 ttrace = thread->priv;
749 751
750 ttrace->exit_time = sample->time; 752 ttrace->exit_time = sample->time;
751 753
752 if (ttrace->entry_time) { 754 if (ttrace->entry_time) {
753 duration = sample->time - ttrace->entry_time; 755 duration = sample->time - ttrace->entry_time;
754 if (trace__filter_duration(trace, duration)) 756 if (trace__filter_duration(trace, duration))
755 goto out; 757 goto out;
756 } else if (trace->duration_filter) 758 } else if (trace->duration_filter)
757 goto out; 759 goto out;
758 760
759 trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output); 761 trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output);
760 762
761 if (ttrace->entry_pending) { 763 if (ttrace->entry_pending) {
762 fprintf(trace->output, "%-70s", ttrace->entry_str); 764 fprintf(trace->output, "%-70s", ttrace->entry_str);
763 } else { 765 } else {
764 fprintf(trace->output, " ... ["); 766 fprintf(trace->output, " ... [");
765 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued"); 767 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
766 fprintf(trace->output, "]: %s()", sc->name); 768 fprintf(trace->output, "]: %s()", sc->name);
767 } 769 }
768 770
769 if (sc->fmt == NULL) { 771 if (sc->fmt == NULL) {
770 signed_print: 772 signed_print:
771 fprintf(trace->output, ") = %d", ret); 773 fprintf(trace->output, ") = %d", ret);
772 } else if (ret < 0 && sc->fmt->errmsg) { 774 } else if (ret < 0 && sc->fmt->errmsg) {
773 char bf[256]; 775 char bf[256];
774 const char *emsg = strerror_r(-ret, bf, sizeof(bf)), 776 const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
775 *e = audit_errno_to_name(-ret); 777 *e = audit_errno_to_name(-ret);
776 778
777 fprintf(trace->output, ") = -1 %s %s", e, emsg); 779 fprintf(trace->output, ") = -1 %s %s", e, emsg);
778 } else if (ret == 0 && sc->fmt->timeout) 780 } else if (ret == 0 && sc->fmt->timeout)
779 fprintf(trace->output, ") = 0 Timeout"); 781 fprintf(trace->output, ") = 0 Timeout");
780 else if (sc->fmt->hexret) 782 else if (sc->fmt->hexret)
781 fprintf(trace->output, ") = %#x", ret); 783 fprintf(trace->output, ") = %#x", ret);
782 else 784 else
783 goto signed_print; 785 goto signed_print;
784 786
785 fputc('\n', trace->output); 787 fputc('\n', trace->output);
786 out: 788 out:
787 ttrace->entry_pending = false; 789 ttrace->entry_pending = false;
788 790
789 return 0; 791 return 0;
790 } 792 }
791 793
792 static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel, 794 static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
793 struct perf_sample *sample) 795 struct perf_sample *sample)
794 { 796 {
795 u64 runtime = perf_evsel__intval(evsel, sample, "runtime"); 797 u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
796 double runtime_ms = (double)runtime / NSEC_PER_MSEC; 798 double runtime_ms = (double)runtime / NSEC_PER_MSEC;
797 struct thread *thread = machine__findnew_thread(&trace->host, 799 struct thread *thread = machine__findnew_thread(&trace->host,
798 sample->pid, 800 sample->pid,
799 sample->tid); 801 sample->tid);
800 struct thread_trace *ttrace = thread__trace(thread, trace->output); 802 struct thread_trace *ttrace = thread__trace(thread, trace->output);
801 803
802 if (ttrace == NULL) 804 if (ttrace == NULL)
803 goto out_dump; 805 goto out_dump;
804 806
805 ttrace->runtime_ms += runtime_ms; 807 ttrace->runtime_ms += runtime_ms;
806 trace->runtime_ms += runtime_ms; 808 trace->runtime_ms += runtime_ms;
807 return 0; 809 return 0;
808 810
809 out_dump: 811 out_dump:
810 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n", 812 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
811 evsel->name, 813 evsel->name,
812 perf_evsel__strval(evsel, sample, "comm"), 814 perf_evsel__strval(evsel, sample, "comm"),
813 (pid_t)perf_evsel__intval(evsel, sample, "pid"), 815 (pid_t)perf_evsel__intval(evsel, sample, "pid"),
814 runtime, 816 runtime,
815 perf_evsel__intval(evsel, sample, "vruntime")); 817 perf_evsel__intval(evsel, sample, "vruntime"));
816 return 0; 818 return 0;
817 } 819 }
818 820
819 static bool skip_sample(struct trace *trace, struct perf_sample *sample) 821 static bool skip_sample(struct trace *trace, struct perf_sample *sample)
820 { 822 {
821 if ((trace->pid_list && intlist__find(trace->pid_list, sample->pid)) || 823 if ((trace->pid_list && intlist__find(trace->pid_list, sample->pid)) ||
822 (trace->tid_list && intlist__find(trace->tid_list, sample->tid))) 824 (trace->tid_list && intlist__find(trace->tid_list, sample->tid)))
823 return false; 825 return false;
824 826
825 if (trace->pid_list || trace->tid_list) 827 if (trace->pid_list || trace->tid_list)
826 return true; 828 return true;
827 829
828 return false; 830 return false;
829 } 831 }
830 832
831 static int trace__process_sample(struct perf_tool *tool, 833 static int trace__process_sample(struct perf_tool *tool,
832 union perf_event *event __maybe_unused, 834 union perf_event *event __maybe_unused,
833 struct perf_sample *sample, 835 struct perf_sample *sample,
834 struct perf_evsel *evsel, 836 struct perf_evsel *evsel,
835 struct machine *machine __maybe_unused) 837 struct machine *machine __maybe_unused)
836 { 838 {
837 struct trace *trace = container_of(tool, struct trace, tool); 839 struct trace *trace = container_of(tool, struct trace, tool);
838 int err = 0; 840 int err = 0;
839 841
840 tracepoint_handler handler = evsel->handler.func; 842 tracepoint_handler handler = evsel->handler.func;
841 843
842 if (skip_sample(trace, sample)) 844 if (skip_sample(trace, sample))
843 return 0; 845 return 0;
844 846
845 if (trace->base_time == 0) 847 if (trace->base_time == 0)
846 trace->base_time = sample->time; 848 trace->base_time = sample->time;
847 849
848 if (handler) 850 if (handler)
849 handler(trace, evsel, sample); 851 handler(trace, evsel, sample);
850 852
851 return err; 853 return err;
852 } 854 }
853 855
854 static bool 856 static bool
855 perf_session__has_tp(struct perf_session *session, const char *name) 857 perf_session__has_tp(struct perf_session *session, const char *name)
856 { 858 {
857 struct perf_evsel *evsel; 859 struct perf_evsel *evsel;
858 860
859 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, name); 861 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, name);
860 862
861 return evsel != NULL; 863 return evsel != NULL;
862 } 864 }
863 865
864 static int parse_target_str(struct trace *trace) 866 static int parse_target_str(struct trace *trace)
865 { 867 {
866 if (trace->opts.target.pid) { 868 if (trace->opts.target.pid) {
867 trace->pid_list = intlist__new(trace->opts.target.pid); 869 trace->pid_list = intlist__new(trace->opts.target.pid);
868 if (trace->pid_list == NULL) { 870 if (trace->pid_list == NULL) {
869 pr_err("Error parsing process id string\n"); 871 pr_err("Error parsing process id string\n");
870 return -EINVAL; 872 return -EINVAL;
871 } 873 }
872 } 874 }
873 875
874 if (trace->opts.target.tid) { 876 if (trace->opts.target.tid) {
875 trace->tid_list = intlist__new(trace->opts.target.tid); 877 trace->tid_list = intlist__new(trace->opts.target.tid);
876 if (trace->tid_list == NULL) { 878 if (trace->tid_list == NULL) {
877 pr_err("Error parsing thread id string\n"); 879 pr_err("Error parsing thread id string\n");
878 return -EINVAL; 880 return -EINVAL;
879 } 881 }
880 } 882 }
881 883
882 return 0; 884 return 0;
883 } 885 }
884 886
885 static int trace__run(struct trace *trace, int argc, const char **argv) 887 static int trace__run(struct trace *trace, int argc, const char **argv)
886 { 888 {
887 struct perf_evlist *evlist = perf_evlist__new(); 889 struct perf_evlist *evlist = perf_evlist__new();
888 struct perf_evsel *evsel; 890 struct perf_evsel *evsel;
889 int err = -1, i; 891 int err = -1, i;
890 unsigned long before; 892 unsigned long before;
891 const bool forks = argc > 0; 893 const bool forks = argc > 0;
892 894
893 if (evlist == NULL) { 895 if (evlist == NULL) {
894 fprintf(trace->output, "Not enough memory to run!\n"); 896 fprintf(trace->output, "Not enough memory to run!\n");
895 goto out; 897 goto out;
896 } 898 }
897 899
898 if (perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_enter", trace__sys_enter) || 900 if (perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_enter", trace__sys_enter) ||
899 perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_exit", trace__sys_exit)) { 901 perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_exit", trace__sys_exit)) {
900 fprintf(trace->output, "Couldn't read the raw_syscalls tracepoints information!\n"); 902 fprintf(trace->output, "Couldn't read the raw_syscalls tracepoints information!\n");
901 goto out_delete_evlist; 903 goto out_delete_evlist;
902 } 904 }
903 905
904 if (trace->sched && 906 if (trace->sched &&
905 perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime", 907 perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
906 trace__sched_stat_runtime)) { 908 trace__sched_stat_runtime)) {
907 fprintf(trace->output, "Couldn't read the sched_stat_runtime tracepoint information!\n"); 909 fprintf(trace->output, "Couldn't read the sched_stat_runtime tracepoint information!\n");
908 goto out_delete_evlist; 910 goto out_delete_evlist;
909 } 911 }
910 912
911 err = perf_evlist__create_maps(evlist, &trace->opts.target); 913 err = perf_evlist__create_maps(evlist, &trace->opts.target);
912 if (err < 0) { 914 if (err < 0) {
913 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n"); 915 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
914 goto out_delete_evlist; 916 goto out_delete_evlist;
915 } 917 }
916 918
917 err = trace__symbols_init(trace, evlist); 919 err = trace__symbols_init(trace, evlist);
918 if (err < 0) { 920 if (err < 0) {
919 fprintf(trace->output, "Problems initializing symbol libraries!\n"); 921 fprintf(trace->output, "Problems initializing symbol libraries!\n");
920 goto out_delete_maps; 922 goto out_delete_maps;
921 } 923 }
922 924
923 perf_evlist__config(evlist, &trace->opts); 925 perf_evlist__config(evlist, &trace->opts);
924 926
925 signal(SIGCHLD, sig_handler); 927 signal(SIGCHLD, sig_handler);
926 signal(SIGINT, sig_handler); 928 signal(SIGINT, sig_handler);
927 929
928 if (forks) { 930 if (forks) {
929 err = perf_evlist__prepare_workload(evlist, &trace->opts.target, 931 err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
930 argv, false, false); 932 argv, false, false);
931 if (err < 0) { 933 if (err < 0) {
932 fprintf(trace->output, "Couldn't run the workload!\n"); 934 fprintf(trace->output, "Couldn't run the workload!\n");
933 goto out_delete_maps; 935 goto out_delete_maps;
934 } 936 }
935 } 937 }
936 938
937 err = perf_evlist__open(evlist); 939 err = perf_evlist__open(evlist);
938 if (err < 0) { 940 if (err < 0) {
939 fprintf(trace->output, "Couldn't create the events: %s\n", strerror(errno)); 941 fprintf(trace->output, "Couldn't create the events: %s\n", strerror(errno));
940 goto out_delete_maps; 942 goto out_delete_maps;
941 } 943 }
942 944
943 err = perf_evlist__mmap(evlist, UINT_MAX, false); 945 err = perf_evlist__mmap(evlist, UINT_MAX, false);
944 if (err < 0) { 946 if (err < 0) {
945 fprintf(trace->output, "Couldn't mmap the events: %s\n", strerror(errno)); 947 fprintf(trace->output, "Couldn't mmap the events: %s\n", strerror(errno));
946 goto out_close_evlist; 948 goto out_close_evlist;
947 } 949 }
948 950
949 perf_evlist__enable(evlist); 951 perf_evlist__enable(evlist);
950 952
951 if (forks) 953 if (forks)
952 perf_evlist__start_workload(evlist); 954 perf_evlist__start_workload(evlist);
953 955
954 trace->multiple_threads = evlist->threads->map[0] == -1 || evlist->threads->nr > 1; 956 trace->multiple_threads = evlist->threads->map[0] == -1 || evlist->threads->nr > 1;
955 again: 957 again:
956 before = trace->nr_events; 958 before = trace->nr_events;
957 959
958 for (i = 0; i < evlist->nr_mmaps; i++) { 960 for (i = 0; i < evlist->nr_mmaps; i++) {
959 union perf_event *event; 961 union perf_event *event;
960 962
961 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { 963 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
962 const u32 type = event->header.type; 964 const u32 type = event->header.type;
963 tracepoint_handler handler; 965 tracepoint_handler handler;
964 struct perf_sample sample; 966 struct perf_sample sample;
965 967
966 ++trace->nr_events; 968 ++trace->nr_events;
967 969
968 err = perf_evlist__parse_sample(evlist, event, &sample); 970 err = perf_evlist__parse_sample(evlist, event, &sample);
969 if (err) { 971 if (err) {
970 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err); 972 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
971 continue; 973 continue;
972 } 974 }
973 975
974 if (trace->base_time == 0) 976 if (trace->base_time == 0)
975 trace->base_time = sample.time; 977 trace->base_time = sample.time;
976 978
977 if (type != PERF_RECORD_SAMPLE) { 979 if (type != PERF_RECORD_SAMPLE) {
978 trace__process_event(trace, &trace->host, event); 980 trace__process_event(trace, &trace->host, event);
979 continue; 981 continue;
980 } 982 }
981 983
982 evsel = perf_evlist__id2evsel(evlist, sample.id); 984 evsel = perf_evlist__id2evsel(evlist, sample.id);
983 if (evsel == NULL) { 985 if (evsel == NULL) {
984 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample.id); 986 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
985 continue; 987 continue;
986 } 988 }
987 989
988 if (sample.raw_data == NULL) { 990 if (sample.raw_data == NULL) {
989 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n", 991 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
990 perf_evsel__name(evsel), sample.tid, 992 perf_evsel__name(evsel), sample.tid,
991 sample.cpu, sample.raw_size); 993 sample.cpu, sample.raw_size);
992 continue; 994 continue;
993 } 995 }
994 996
995 handler = evsel->handler.func; 997 handler = evsel->handler.func;
996 handler(trace, evsel, &sample); 998 handler(trace, evsel, &sample);
999
1000 if (done)
1001 goto out_unmap_evlist;
997 } 1002 }
998 } 1003 }
999 1004
1000 if (trace->nr_events == before) { 1005 if (trace->nr_events == before) {
1001 if (done) 1006 if (done)
1002 goto out_unmap_evlist; 1007 goto out_unmap_evlist;
1003 1008
1004 poll(evlist->pollfd, evlist->nr_fds, -1); 1009 poll(evlist->pollfd, evlist->nr_fds, -1);
1005 } 1010 }
1006 1011
1007 if (done) 1012 if (done)
1008 perf_evlist__disable(evlist); 1013 perf_evlist__disable(evlist);
1009 1014
1010 goto again; 1015 goto again;
1011 1016
1012 out_unmap_evlist: 1017 out_unmap_evlist:
1013 perf_evlist__munmap(evlist); 1018 perf_evlist__munmap(evlist);
1014 out_close_evlist: 1019 out_close_evlist:
1015 perf_evlist__close(evlist); 1020 perf_evlist__close(evlist);
1016 out_delete_maps: 1021 out_delete_maps:
1017 perf_evlist__delete_maps(evlist); 1022 perf_evlist__delete_maps(evlist);
1018 out_delete_evlist: 1023 out_delete_evlist:
1019 perf_evlist__delete(evlist); 1024 perf_evlist__delete(evlist);
1020 out: 1025 out:
1021 return err; 1026 return err;
1022 } 1027 }
1023 1028
1024 static int trace__replay(struct trace *trace) 1029 static int trace__replay(struct trace *trace)
1025 { 1030 {
1026 const struct perf_evsel_str_handler handlers[] = { 1031 const struct perf_evsel_str_handler handlers[] = {
1027 { "raw_syscalls:sys_enter", trace__sys_enter, }, 1032 { "raw_syscalls:sys_enter", trace__sys_enter, },
1028 { "raw_syscalls:sys_exit", trace__sys_exit, }, 1033 { "raw_syscalls:sys_exit", trace__sys_exit, },
1029 }; 1034 };
1030 1035
1031 struct perf_session *session; 1036 struct perf_session *session;
1032 int err = -1; 1037 int err = -1;
1033 1038
1034 trace->tool.sample = trace__process_sample; 1039 trace->tool.sample = trace__process_sample;
1035 trace->tool.mmap = perf_event__process_mmap; 1040 trace->tool.mmap = perf_event__process_mmap;
1036 trace->tool.comm = perf_event__process_comm; 1041 trace->tool.comm = perf_event__process_comm;
1037 trace->tool.exit = perf_event__process_exit; 1042 trace->tool.exit = perf_event__process_exit;
1038 trace->tool.fork = perf_event__process_fork; 1043 trace->tool.fork = perf_event__process_fork;
1039 trace->tool.attr = perf_event__process_attr; 1044 trace->tool.attr = perf_event__process_attr;
1040 trace->tool.tracing_data = perf_event__process_tracing_data; 1045 trace->tool.tracing_data = perf_event__process_tracing_data;
1041 trace->tool.build_id = perf_event__process_build_id; 1046 trace->tool.build_id = perf_event__process_build_id;
1042 1047
1043 trace->tool.ordered_samples = true; 1048 trace->tool.ordered_samples = true;
1044 trace->tool.ordering_requires_timestamps = true; 1049 trace->tool.ordering_requires_timestamps = true;
1045 1050
1046 /* add tid to output */ 1051 /* add tid to output */
1047 trace->multiple_threads = true; 1052 trace->multiple_threads = true;
1048 1053
1049 if (symbol__init() < 0) 1054 if (symbol__init() < 0)
1050 return -1; 1055 return -1;
1051 1056
1052 session = perf_session__new(input_name, O_RDONLY, 0, false, 1057 session = perf_session__new(input_name, O_RDONLY, 0, false,
1053 &trace->tool); 1058 &trace->tool);
1054 if (session == NULL) 1059 if (session == NULL)
1055 return -ENOMEM; 1060 return -ENOMEM;
1056 1061
1057 err = perf_session__set_tracepoints_handlers(session, handlers); 1062 err = perf_session__set_tracepoints_handlers(session, handlers);
1058 if (err) 1063 if (err)
1059 goto out; 1064 goto out;
1060 1065
1061 if (!perf_session__has_tp(session, "raw_syscalls:sys_enter")) { 1066 if (!perf_session__has_tp(session, "raw_syscalls:sys_enter")) {
1062 pr_err("Data file does not have raw_syscalls:sys_enter events\n"); 1067 pr_err("Data file does not have raw_syscalls:sys_enter events\n");
1063 goto out; 1068 goto out;
1064 } 1069 }
1065 1070
1066 if (!perf_session__has_tp(session, "raw_syscalls:sys_exit")) { 1071 if (!perf_session__has_tp(session, "raw_syscalls:sys_exit")) {
1067 pr_err("Data file does not have raw_syscalls:sys_exit events\n"); 1072 pr_err("Data file does not have raw_syscalls:sys_exit events\n");
1068 goto out; 1073 goto out;
1069 } 1074 }
1070 1075
1071 err = parse_target_str(trace); 1076 err = parse_target_str(trace);
1072 if (err != 0) 1077 if (err != 0)
1073 goto out; 1078 goto out;
1074 1079
1075 setup_pager(); 1080 setup_pager();
1076 1081
1077 err = perf_session__process_events(session, &trace->tool); 1082 err = perf_session__process_events(session, &trace->tool);
1078 if (err) 1083 if (err)
1079 pr_err("Failed to process events, error %d", err); 1084 pr_err("Failed to process events, error %d", err);
1080 1085
1081 out: 1086 out:
1082 perf_session__delete(session); 1087 perf_session__delete(session);
1083 1088
1084 return err; 1089 return err;
1085 } 1090 }
1086 1091
1087 static size_t trace__fprintf_threads_header(FILE *fp) 1092 static size_t trace__fprintf_threads_header(FILE *fp)
1088 { 1093 {
1089 size_t printed; 1094 size_t printed;
1090 1095
1091 printed = fprintf(fp, "\n _____________________________________________________________________\n"); 1096 printed = fprintf(fp, "\n _____________________________________________________________________\n");
1092 printed += fprintf(fp," __) Summary of events (__\n\n"); 1097 printed += fprintf(fp," __) Summary of events (__\n\n");
1093 printed += fprintf(fp," [ task - pid ] [ events ] [ ratio ] [ runtime ]\n"); 1098 printed += fprintf(fp," [ task - pid ] [ events ] [ ratio ] [ runtime ]\n");
1094 printed += fprintf(fp," _____________________________________________________________________\n\n"); 1099 printed += fprintf(fp," _____________________________________________________________________\n\n");
1095 1100
1096 return printed; 1101 return printed;
1097 } 1102 }
1098 1103
1099 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp) 1104 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
1100 { 1105 {
1101 size_t printed = trace__fprintf_threads_header(fp); 1106 size_t printed = trace__fprintf_threads_header(fp);
1102 struct rb_node *nd; 1107 struct rb_node *nd;
1103 1108
1104 for (nd = rb_first(&trace->host.threads); nd; nd = rb_next(nd)) { 1109 for (nd = rb_first(&trace->host.threads); nd; nd = rb_next(nd)) {
1105 struct thread *thread = rb_entry(nd, struct thread, rb_node); 1110 struct thread *thread = rb_entry(nd, struct thread, rb_node);
1106 struct thread_trace *ttrace = thread->priv; 1111 struct thread_trace *ttrace = thread->priv;
1107 const char *color; 1112 const char *color;
1108 double ratio; 1113 double ratio;
1109 1114
1110 if (ttrace == NULL) 1115 if (ttrace == NULL)
1111 continue; 1116 continue;
1112 1117
1113 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0; 1118 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
1114 1119
1115 color = PERF_COLOR_NORMAL; 1120 color = PERF_COLOR_NORMAL;
1116 if (ratio > 50.0) 1121 if (ratio > 50.0)
1117 color = PERF_COLOR_RED; 1122 color = PERF_COLOR_RED;
1118 else if (ratio > 25.0) 1123 else if (ratio > 25.0)
1119 color = PERF_COLOR_GREEN; 1124 color = PERF_COLOR_GREEN;
1120 else if (ratio > 5.0) 1125 else if (ratio > 5.0)
1121 color = PERF_COLOR_YELLOW; 1126 color = PERF_COLOR_YELLOW;
1122 1127
1123 printed += color_fprintf(fp, color, "%20s", thread->comm); 1128 printed += color_fprintf(fp, color, "%20s", thread->comm);
1124 printed += fprintf(fp, " - %-5d :%11lu [", thread->tid, ttrace->nr_events); 1129 printed += fprintf(fp, " - %-5d :%11lu [", thread->tid, ttrace->nr_events);
1125 printed += color_fprintf(fp, color, "%5.1f%%", ratio); 1130 printed += color_fprintf(fp, color, "%5.1f%%", ratio);
1126 printed += fprintf(fp, " ] %10.3f ms\n", ttrace->runtime_ms); 1131 printed += fprintf(fp, " ] %10.3f ms\n", ttrace->runtime_ms);
1127 } 1132 }
1128 1133
1129 return printed; 1134 return printed;
1130 } 1135 }
1131 1136
1132 static int trace__set_duration(const struct option *opt, const char *str, 1137 static int trace__set_duration(const struct option *opt, const char *str,
1133 int unset __maybe_unused) 1138 int unset __maybe_unused)
1134 { 1139 {
1135 struct trace *trace = opt->value; 1140 struct trace *trace = opt->value;
1136 1141
1137 trace->duration_filter = atof(str); 1142 trace->duration_filter = atof(str);
1138 return 0; 1143 return 0;
1139 } 1144 }
1140 1145
1141 static int trace__open_output(struct trace *trace, const char *filename) 1146 static int trace__open_output(struct trace *trace, const char *filename)
1142 { 1147 {
1143 struct stat st; 1148 struct stat st;
1144 1149
1145 if (!stat(filename, &st) && st.st_size) { 1150 if (!stat(filename, &st) && st.st_size) {
1146 char oldname[PATH_MAX]; 1151 char oldname[PATH_MAX];
1147 1152
1148 scnprintf(oldname, sizeof(oldname), "%s.old", filename); 1153 scnprintf(oldname, sizeof(oldname), "%s.old", filename);
1149 unlink(oldname); 1154 unlink(oldname);
1150 rename(filename, oldname); 1155 rename(filename, oldname);
1151 } 1156 }
1152 1157
1153 trace->output = fopen(filename, "w"); 1158 trace->output = fopen(filename, "w");
1154 1159
1155 return trace->output == NULL ? -errno : 0; 1160 return trace->output == NULL ? -errno : 0;
1156 } 1161 }
1157 1162
1158 int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused) 1163 int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
1159 { 1164 {
1160 const char * const trace_usage[] = { 1165 const char * const trace_usage[] = {
1161 "perf trace [<options>] [<command>]", 1166 "perf trace [<options>] [<command>]",
1162 "perf trace [<options>] -- <command> [<options>]", 1167 "perf trace [<options>] -- <command> [<options>]",
1163 NULL 1168 NULL
1164 }; 1169 };
1165 struct trace trace = { 1170 struct trace trace = {
1166 .audit_machine = audit_detect_machine(), 1171 .audit_machine = audit_detect_machine(),
1167 .syscalls = { 1172 .syscalls = {
1168 . max = -1, 1173 . max = -1,
1169 }, 1174 },
1170 .opts = { 1175 .opts = {
1171 .target = { 1176 .target = {
1172 .uid = UINT_MAX, 1177 .uid = UINT_MAX,
1173 .uses_mmap = true, 1178 .uses_mmap = true,
1174 }, 1179 },
1175 .user_freq = UINT_MAX, 1180 .user_freq = UINT_MAX,
1176 .user_interval = ULLONG_MAX, 1181 .user_interval = ULLONG_MAX,
1177 .no_delay = true, 1182 .no_delay = true,
1178 .mmap_pages = 1024, 1183 .mmap_pages = 1024,
1179 }, 1184 },
1180 .output = stdout, 1185 .output = stdout,
1181 }; 1186 };
1182 const char *output_name = NULL; 1187 const char *output_name = NULL;
1183 const char *ev_qualifier_str = NULL; 1188 const char *ev_qualifier_str = NULL;
1184 const struct option trace_options[] = { 1189 const struct option trace_options[] = {
1185 OPT_STRING('e', "expr", &ev_qualifier_str, "expr", 1190 OPT_STRING('e', "expr", &ev_qualifier_str, "expr",
1186 "list of events to trace"), 1191 "list of events to trace"),
1187 OPT_STRING('o', "output", &output_name, "file", "output file name"), 1192 OPT_STRING('o', "output", &output_name, "file", "output file name"),
1188 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"), 1193 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
1189 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid", 1194 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
1190 "trace events on existing process id"), 1195 "trace events on existing process id"),
1191 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid", 1196 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
1192 "trace events on existing thread id"), 1197 "trace events on existing thread id"),
1193 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide, 1198 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
1194 "system-wide collection from all CPUs"), 1199 "system-wide collection from all CPUs"),
1195 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu", 1200 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
1196 "list of cpus to monitor"), 1201 "list of cpus to monitor"),
1197 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit, 1202 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
1198 "child tasks do not inherit counters"), 1203 "child tasks do not inherit counters"),
1199 OPT_UINTEGER('m', "mmap-pages", &trace.opts.mmap_pages, 1204 OPT_UINTEGER('m', "mmap-pages", &trace.opts.mmap_pages,
1200 "number of mmap data pages"), 1205 "number of mmap data pages"),
1201 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user", 1206 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
1202 "user to profile"), 1207 "user to profile"),
1203 OPT_CALLBACK(0, "duration", &trace, "float", 1208 OPT_CALLBACK(0, "duration", &trace, "float",
1204 "show only events with duration > N.M ms", 1209 "show only events with duration > N.M ms",
1205 trace__set_duration), 1210 trace__set_duration),
1206 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"), 1211 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
1207 OPT_INCR('v', "verbose", &verbose, "be more verbose"), 1212 OPT_INCR('v', "verbose", &verbose, "be more verbose"),
1208 OPT_END() 1213 OPT_END()
1209 }; 1214 };
1210 int err; 1215 int err;
1211 char bf[BUFSIZ]; 1216 char bf[BUFSIZ];
1212 1217
1213 argc = parse_options(argc, argv, trace_options, trace_usage, 0); 1218 argc = parse_options(argc, argv, trace_options, trace_usage, 0);
1214 1219
1215 if (output_name != NULL) { 1220 if (output_name != NULL) {
1216 err = trace__open_output(&trace, output_name); 1221 err = trace__open_output(&trace, output_name);
1217 if (err < 0) { 1222 if (err < 0) {
1218 perror("failed to create output file"); 1223 perror("failed to create output file");
1219 goto out; 1224 goto out;
1220 } 1225 }
1221 } 1226 }
1222 1227
1223 if (ev_qualifier_str != NULL) { 1228 if (ev_qualifier_str != NULL) {
1224 const char *s = ev_qualifier_str; 1229 const char *s = ev_qualifier_str;
1225 1230
1226 trace.not_ev_qualifier = *s == '!'; 1231 trace.not_ev_qualifier = *s == '!';
1227 if (trace.not_ev_qualifier) 1232 if (trace.not_ev_qualifier)
1228 ++s; 1233 ++s;
1229 trace.ev_qualifier = strlist__new(true, s); 1234 trace.ev_qualifier = strlist__new(true, s);
1230 if (trace.ev_qualifier == NULL) { 1235 if (trace.ev_qualifier == NULL) {
1231 fputs("Not enough memory to parse event qualifier", 1236 fputs("Not enough memory to parse event qualifier",
1232 trace.output); 1237 trace.output);
1233 err = -ENOMEM; 1238 err = -ENOMEM;
1234 goto out_close; 1239 goto out_close;
1235 } 1240 }
1236 } 1241 }
1237 1242
1238 err = perf_target__validate(&trace.opts.target); 1243 err = perf_target__validate(&trace.opts.target);
1239 if (err) { 1244 if (err) {
1240 perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf)); 1245 perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
1241 fprintf(trace.output, "%s", bf); 1246 fprintf(trace.output, "%s", bf);
1242 goto out_close; 1247 goto out_close;
1243 } 1248 }
1244 1249
1245 err = perf_target__parse_uid(&trace.opts.target); 1250 err = perf_target__parse_uid(&trace.opts.target);
1246 if (err) { 1251 if (err) {
1247 perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf)); 1252 perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
1248 fprintf(trace.output, "%s", bf); 1253 fprintf(trace.output, "%s", bf);
1249 goto out_close; 1254 goto out_close;
1250 } 1255 }
1251 1256
1252 if (!argc && perf_target__none(&trace.opts.target)) 1257 if (!argc && perf_target__none(&trace.opts.target))
1253 trace.opts.target.system_wide = true; 1258 trace.opts.target.system_wide = true;
1254 1259
1255 if (input_name) 1260 if (input_name)
1256 err = trace__replay(&trace); 1261 err = trace__replay(&trace);
1257 else 1262 else
1258 err = trace__run(&trace, argc, argv); 1263 err = trace__run(&trace, argc, argv);
1259 1264
1260 if (trace.sched && !err) 1265 if (trace.sched && !err)
1261 trace__fprintf_thread_summary(&trace, trace.output); 1266 trace__fprintf_thread_summary(&trace, trace.output);
1262 1267
1263 out_close: 1268 out_close:
1264 if (output_name != NULL) 1269 if (output_name != NULL)
1265 fclose(trace.output); 1270 fclose(trace.output);
1266 out: 1271 out:
1267 return err; 1272 return err;
1268 } 1273 }
1269 1274
tools/perf/tests/builtin-test.c
1 /* 1 /*
2 * builtin-test.c 2 * builtin-test.c
3 * 3 *
4 * Builtin regression testing command: ever growing number of sanity tests 4 * Builtin regression testing command: ever growing number of sanity tests
5 */ 5 */
6 #include "builtin.h" 6 #include "builtin.h"
7 #include "intlist.h" 7 #include "intlist.h"
8 #include "tests.h" 8 #include "tests.h"
9 #include "debug.h" 9 #include "debug.h"
10 #include "color.h" 10 #include "color.h"
11 #include "parse-options.h" 11 #include "parse-options.h"
12 #include "symbol.h" 12 #include "symbol.h"
13 13
14 static struct test { 14 static struct test {
15 const char *desc; 15 const char *desc;
16 int (*func)(void); 16 int (*func)(void);
17 } tests[] = { 17 } tests[] = {
18 { 18 {
19 .desc = "vmlinux symtab matches kallsyms", 19 .desc = "vmlinux symtab matches kallsyms",
20 .func = test__vmlinux_matches_kallsyms, 20 .func = test__vmlinux_matches_kallsyms,
21 }, 21 },
22 { 22 {
23 .desc = "detect open syscall event", 23 .desc = "detect open syscall event",
24 .func = test__open_syscall_event, 24 .func = test__open_syscall_event,
25 }, 25 },
26 { 26 {
27 .desc = "detect open syscall event on all cpus", 27 .desc = "detect open syscall event on all cpus",
28 .func = test__open_syscall_event_on_all_cpus, 28 .func = test__open_syscall_event_on_all_cpus,
29 }, 29 },
30 { 30 {
31 .desc = "read samples using the mmap interface", 31 .desc = "read samples using the mmap interface",
32 .func = test__basic_mmap, 32 .func = test__basic_mmap,
33 }, 33 },
34 { 34 {
35 .desc = "parse events tests", 35 .desc = "parse events tests",
36 .func = test__parse_events, 36 .func = test__parse_events,
37 }, 37 },
38 #if defined(__x86_64__) || defined(__i386__) 38 #if defined(__x86_64__) || defined(__i386__)
39 { 39 {
40 .desc = "x86 rdpmc test", 40 .desc = "x86 rdpmc test",
41 .func = test__rdpmc, 41 .func = test__rdpmc,
42 }, 42 },
43 #endif 43 #endif
44 { 44 {
45 .desc = "Validate PERF_RECORD_* events & perf_sample fields", 45 .desc = "Validate PERF_RECORD_* events & perf_sample fields",
46 .func = test__PERF_RECORD, 46 .func = test__PERF_RECORD,
47 }, 47 },
48 { 48 {
49 .desc = "Test perf pmu format parsing", 49 .desc = "Test perf pmu format parsing",
50 .func = test__pmu, 50 .func = test__pmu,
51 }, 51 },
52 { 52 {
53 .desc = "Test dso data interface", 53 .desc = "Test dso data interface",
54 .func = test__dso_data, 54 .func = test__dso_data,
55 }, 55 },
56 { 56 {
57 .desc = "roundtrip evsel->name check", 57 .desc = "roundtrip evsel->name check",
58 .func = test__perf_evsel__roundtrip_name_test, 58 .func = test__perf_evsel__roundtrip_name_test,
59 }, 59 },
60 { 60 {
61 .desc = "Check parsing of sched tracepoints fields", 61 .desc = "Check parsing of sched tracepoints fields",
62 .func = test__perf_evsel__tp_sched_test, 62 .func = test__perf_evsel__tp_sched_test,
63 }, 63 },
64 { 64 {
65 .desc = "Generate and check syscalls:sys_enter_open event fields", 65 .desc = "Generate and check syscalls:sys_enter_open event fields",
66 .func = test__syscall_open_tp_fields, 66 .func = test__syscall_open_tp_fields,
67 }, 67 },
68 { 68 {
69 .desc = "struct perf_event_attr setup", 69 .desc = "struct perf_event_attr setup",
70 .func = test__attr, 70 .func = test__attr,
71 }, 71 },
72 { 72 {
73 .desc = "Test matching and linking multiple hists", 73 .desc = "Test matching and linking multiple hists",
74 .func = test__hists_link, 74 .func = test__hists_link,
75 }, 75 },
76 { 76 {
77 .desc = "Try 'use perf' in python, checking link problems", 77 .desc = "Try 'use perf' in python, checking link problems",
78 .func = test__python_use, 78 .func = test__python_use,
79 }, 79 },
80 { 80 {
81 .desc = "Test breakpoint overflow signal handler", 81 .desc = "Test breakpoint overflow signal handler",
82 .func = test__bp_signal, 82 .func = test__bp_signal,
83 }, 83 },
84 { 84 {
85 .desc = "Test breakpoint overflow sampling", 85 .desc = "Test breakpoint overflow sampling",
86 .func = test__bp_signal_overflow, 86 .func = test__bp_signal_overflow,
87 }, 87 },
88 { 88 {
89 .desc = "Test number of exit event of a simple workload", 89 .desc = "Test number of exit event of a simple workload",
90 .func = test__task_exit, 90 .func = test__task_exit,
91 }, 91 },
92 { 92 {
93 .desc = "Test software clock events have valid period values", 93 .desc = "Test software clock events have valid period values",
94 .func = test__sw_clock_freq, 94 .func = test__sw_clock_freq,
95 }, 95 },
96 #if defined(__x86_64__) || defined(__i386__) 96 #if defined(__x86_64__) || defined(__i386__)
97 { 97 {
98 .desc = "Test converting perf time to TSC", 98 .desc = "Test converting perf time to TSC",
99 .func = test__perf_time_to_tsc, 99 .func = test__perf_time_to_tsc,
100 }, 100 },
101 #endif 101 #endif
102 { 102 {
103 .desc = "Test object code reading", 103 .desc = "Test object code reading",
104 .func = test__code_reading, 104 .func = test__code_reading,
105 }, 105 },
106 { 106 {
107 .desc = "Test sample parsing", 107 .desc = "Test sample parsing",
108 .func = test__sample_parsing, 108 .func = test__sample_parsing,
109 }, 109 },
110 { 110 {
111 .desc = "Test using a dummy software event to keep tracking", 111 .desc = "Test using a dummy software event to keep tracking",
112 .func = test__keep_tracking, 112 .func = test__keep_tracking,
113 }, 113 },
114 { 114 {
115 .desc = "Test parsing with no sample_id_all bit set",
116 .func = test__parse_no_sample_id_all,
117 },
118 {
115 .func = NULL, 119 .func = NULL,
116 }, 120 },
117 }; 121 };
118 122
119 static bool perf_test__matches(int curr, int argc, const char *argv[]) 123 static bool perf_test__matches(int curr, int argc, const char *argv[])
120 { 124 {
121 int i; 125 int i;
122 126
123 if (argc == 0) 127 if (argc == 0)
124 return true; 128 return true;
125 129
126 for (i = 0; i < argc; ++i) { 130 for (i = 0; i < argc; ++i) {
127 char *end; 131 char *end;
128 long nr = strtoul(argv[i], &end, 10); 132 long nr = strtoul(argv[i], &end, 10);
129 133
130 if (*end == '\0') { 134 if (*end == '\0') {
131 if (nr == curr + 1) 135 if (nr == curr + 1)
132 return true; 136 return true;
133 continue; 137 continue;
134 } 138 }
135 139
136 if (strstr(tests[curr].desc, argv[i])) 140 if (strstr(tests[curr].desc, argv[i]))
137 return true; 141 return true;
138 } 142 }
139 143
140 return false; 144 return false;
141 } 145 }
142 146
143 static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist) 147 static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
144 { 148 {
145 int i = 0; 149 int i = 0;
146 int width = 0; 150 int width = 0;
147 151
148 while (tests[i].func) { 152 while (tests[i].func) {
149 int len = strlen(tests[i].desc); 153 int len = strlen(tests[i].desc);
150 154
151 if (width < len) 155 if (width < len)
152 width = len; 156 width = len;
153 ++i; 157 ++i;
154 } 158 }
155 159
156 i = 0; 160 i = 0;
157 while (tests[i].func) { 161 while (tests[i].func) {
158 int curr = i++, err; 162 int curr = i++, err;
159 163
160 if (!perf_test__matches(curr, argc, argv)) 164 if (!perf_test__matches(curr, argc, argv))
161 continue; 165 continue;
162 166
163 pr_info("%2d: %-*s:", i, width, tests[curr].desc); 167 pr_info("%2d: %-*s:", i, width, tests[curr].desc);
164 168
165 if (intlist__find(skiplist, i)) { 169 if (intlist__find(skiplist, i)) {
166 color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n"); 170 color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
167 continue; 171 continue;
168 } 172 }
169 173
170 pr_debug("\n--- start ---\n"); 174 pr_debug("\n--- start ---\n");
171 err = tests[curr].func(); 175 err = tests[curr].func();
172 pr_debug("---- end ----\n%s:", tests[curr].desc); 176 pr_debug("---- end ----\n%s:", tests[curr].desc);
173 177
174 switch (err) { 178 switch (err) {
175 case TEST_OK: 179 case TEST_OK:
176 pr_info(" Ok\n"); 180 pr_info(" Ok\n");
177 break; 181 break;
178 case TEST_SKIP: 182 case TEST_SKIP:
179 color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n"); 183 color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n");
180 break; 184 break;
181 case TEST_FAIL: 185 case TEST_FAIL:
182 default: 186 default:
183 color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n"); 187 color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
184 break; 188 break;
185 } 189 }
186 } 190 }
187 191
188 return 0; 192 return 0;
189 } 193 }
190 194
191 static int perf_test__list(int argc, const char **argv) 195 static int perf_test__list(int argc, const char **argv)
192 { 196 {
193 int i = 0; 197 int i = 0;
194 198
195 while (tests[i].func) { 199 while (tests[i].func) {
196 int curr = i++; 200 int curr = i++;
197 201
198 if (argc > 1 && !strstr(tests[curr].desc, argv[1])) 202 if (argc > 1 && !strstr(tests[curr].desc, argv[1]))
199 continue; 203 continue;
200 204
201 pr_info("%2d: %s\n", i, tests[curr].desc); 205 pr_info("%2d: %s\n", i, tests[curr].desc);
202 } 206 }
203 207
204 return 0; 208 return 0;
205 } 209 }
206 210
207 int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused) 211 int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused)
208 { 212 {
209 const char * const test_usage[] = { 213 const char * const test_usage[] = {
210 "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]", 214 "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
211 NULL, 215 NULL,
212 }; 216 };
213 const char *skip = NULL; 217 const char *skip = NULL;
214 const struct option test_options[] = { 218 const struct option test_options[] = {
215 OPT_STRING('s', "skip", &skip, "tests", "tests to skip"), 219 OPT_STRING('s', "skip", &skip, "tests", "tests to skip"),
216 OPT_INCR('v', "verbose", &verbose, 220 OPT_INCR('v', "verbose", &verbose,
217 "be more verbose (show symbol address, etc)"), 221 "be more verbose (show symbol address, etc)"),
218 OPT_END() 222 OPT_END()
219 }; 223 };
220 struct intlist *skiplist = NULL; 224 struct intlist *skiplist = NULL;
221 225
222 argc = parse_options(argc, argv, test_options, test_usage, 0); 226 argc = parse_options(argc, argv, test_options, test_usage, 0);
223 if (argc >= 1 && !strcmp(argv[0], "list")) 227 if (argc >= 1 && !strcmp(argv[0], "list"))
224 return perf_test__list(argc, argv); 228 return perf_test__list(argc, argv);
225 229
226 symbol_conf.priv_size = sizeof(int); 230 symbol_conf.priv_size = sizeof(int);
227 symbol_conf.sort_by_name = true; 231 symbol_conf.sort_by_name = true;
228 symbol_conf.try_vmlinux_path = true; 232 symbol_conf.try_vmlinux_path = true;
229 233
230 if (symbol__init() < 0) 234 if (symbol__init() < 0)
231 return -1; 235 return -1;
232 236
233 if (skip != NULL) 237 if (skip != NULL)
234 skiplist = intlist__new(skip); 238 skiplist = intlist__new(skip);
235 239
236 return __cmd_test(argc, argv, skiplist); 240 return __cmd_test(argc, argv, skiplist);
237 } 241 }
238 242
tools/perf/tests/parse-no-sample-id-all.c
File was created 1 #include <sys/types.h>
2 #include <stddef.h>
3
4 #include "tests.h"
5
6 #include "event.h"
7 #include "evlist.h"
8 #include "header.h"
9 #include "util.h"
10
11 static int process_event(struct perf_evlist **pevlist, union perf_event *event)
12 {
13 struct perf_sample sample;
14
15 if (event->header.type == PERF_RECORD_HEADER_ATTR) {
16 if (perf_event__process_attr(NULL, event, pevlist)) {
17 pr_debug("perf_event__process_attr failed\n");
18 return -1;
19 }
20 return 0;
21 }
22
23 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
24 return -1;
25
26 if (!*pevlist)
27 return -1;
28
29 if (perf_evlist__parse_sample(*pevlist, event, &sample)) {
30 pr_debug("perf_evlist__parse_sample failed\n");
31 return -1;
32 }
33
34 return 0;
35 }
36
37 static int process_events(union perf_event **events, size_t count)
38 {
39 struct perf_evlist *evlist = NULL;
40 int err = 0;
41 size_t i;
42
43 for (i = 0; i < count && !err; i++)
44 err = process_event(&evlist, events[i]);
45
46 if (evlist)
47 perf_evlist__delete(evlist);
48
49 return err;
50 }
51
52 struct test_attr_event {
53 struct attr_event attr;
54 u64 id;
55 };
56
57 /**
58 * test__parse_no_sample_id_all - test parsing with no sample_id_all bit set.
59 *
60 * This function tests parsing data produced on kernel's that do not support the
61 * sample_id_all bit. Without the sample_id_all bit, non-sample events (such as
62 * mmap events) do not have an id sample appended, and consequently logic
63 * designed to determine the id will not work. That case happens when there is
64 * more than one selected event, so this test processes three events: 2
65 * attributes representing the selected events and one mmap event.
66 *
67 * Return: %0 on success, %-1 if the test fails.
68 */
69 int test__parse_no_sample_id_all(void)
70 {
71 int err;
72
73 struct test_attr_event event1 = {
74 .attr = {
75 .header = {
76 .type = PERF_RECORD_HEADER_ATTR,
77 .size = sizeof(struct test_attr_event),
78 },
79 },
80 .id = 1,
81 };
82 struct test_attr_event event2 = {
83 .attr = {
84 .header = {
85 .type = PERF_RECORD_HEADER_ATTR,
86 .size = sizeof(struct test_attr_event),
87 },
88 },
89 .id = 2,
90 };
91 struct mmap_event event3 = {
92 .header = {
93 .type = PERF_RECORD_MMAP,
94 .size = sizeof(struct mmap_event),
95 },
96 };
97 union perf_event *events[] = {
98 (union perf_event *)&event1,
99 (union perf_event *)&event2,
100 (union perf_event *)&event3,
101 };
102
103 err = process_events(events, ARRAY_SIZE(events));
104 if (err)
105 return -1;
106
107 return 0;
108 }
109
tools/perf/tests/tests.h
1 #ifndef TESTS_H 1 #ifndef TESTS_H
2 #define TESTS_H 2 #define TESTS_H
3 3
4 #define TEST_ASSERT_VAL(text, cond) \ 4 #define TEST_ASSERT_VAL(text, cond) \
5 do { \ 5 do { \
6 if (!(cond)) { \ 6 if (!(cond)) { \
7 pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \ 7 pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \
8 return -1; \ 8 return -1; \
9 } \ 9 } \
10 } while (0) 10 } while (0)
11 11
12 enum { 12 enum {
13 TEST_OK = 0, 13 TEST_OK = 0,
14 TEST_FAIL = -1, 14 TEST_FAIL = -1,
15 TEST_SKIP = -2, 15 TEST_SKIP = -2,
16 }; 16 };
17 17
18 /* Tests */ 18 /* Tests */
19 int test__vmlinux_matches_kallsyms(void); 19 int test__vmlinux_matches_kallsyms(void);
20 int test__open_syscall_event(void); 20 int test__open_syscall_event(void);
21 int test__open_syscall_event_on_all_cpus(void); 21 int test__open_syscall_event_on_all_cpus(void);
22 int test__basic_mmap(void); 22 int test__basic_mmap(void);
23 int test__PERF_RECORD(void); 23 int test__PERF_RECORD(void);
24 int test__rdpmc(void); 24 int test__rdpmc(void);
25 int test__perf_evsel__roundtrip_name_test(void); 25 int test__perf_evsel__roundtrip_name_test(void);
26 int test__perf_evsel__tp_sched_test(void); 26 int test__perf_evsel__tp_sched_test(void);
27 int test__syscall_open_tp_fields(void); 27 int test__syscall_open_tp_fields(void);
28 int test__pmu(void); 28 int test__pmu(void);
29 int test__attr(void); 29 int test__attr(void);
30 int test__dso_data(void); 30 int test__dso_data(void);
31 int test__parse_events(void); 31 int test__parse_events(void);
32 int test__hists_link(void); 32 int test__hists_link(void);
33 int test__python_use(void); 33 int test__python_use(void);
34 int test__bp_signal(void); 34 int test__bp_signal(void);
35 int test__bp_signal_overflow(void); 35 int test__bp_signal_overflow(void);
36 int test__task_exit(void); 36 int test__task_exit(void);
37 int test__sw_clock_freq(void); 37 int test__sw_clock_freq(void);
38 int test__perf_time_to_tsc(void); 38 int test__perf_time_to_tsc(void);
39 int test__code_reading(void); 39 int test__code_reading(void);
40 int test__sample_parsing(void); 40 int test__sample_parsing(void);
41 int test__keep_tracking(void); 41 int test__keep_tracking(void);
42 int test__parse_no_sample_id_all(void);
42 43
43 #endif /* TESTS_H */ 44 #endif /* TESTS_H */
44 45
tools/perf/ui/stdio/hist.c
1 #include <stdio.h> 1 #include <stdio.h>
2 2
3 #include "../../util/util.h" 3 #include "../../util/util.h"
4 #include "../../util/hist.h" 4 #include "../../util/hist.h"
5 #include "../../util/sort.h" 5 #include "../../util/sort.h"
6 #include "../../util/evsel.h" 6 #include "../../util/evsel.h"
7 7
8 8
9 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin) 9 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
10 { 10 {
11 int i; 11 int i;
12 int ret = fprintf(fp, " "); 12 int ret = fprintf(fp, " ");
13 13
14 for (i = 0; i < left_margin; i++) 14 for (i = 0; i < left_margin; i++)
15 ret += fprintf(fp, " "); 15 ret += fprintf(fp, " ");
16 16
17 return ret; 17 return ret;
18 } 18 }
19 19
20 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask, 20 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
21 int left_margin) 21 int left_margin)
22 { 22 {
23 int i; 23 int i;
24 size_t ret = callchain__fprintf_left_margin(fp, left_margin); 24 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
25 25
26 for (i = 0; i < depth; i++) 26 for (i = 0; i < depth; i++)
27 if (depth_mask & (1 << i)) 27 if (depth_mask & (1 << i))
28 ret += fprintf(fp, "| "); 28 ret += fprintf(fp, "| ");
29 else 29 else
30 ret += fprintf(fp, " "); 30 ret += fprintf(fp, " ");
31 31
32 ret += fprintf(fp, "\n"); 32 ret += fprintf(fp, "\n");
33 33
34 return ret; 34 return ret;
35 } 35 }
36 36
37 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, 37 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
38 int depth, int depth_mask, int period, 38 int depth, int depth_mask, int period,
39 u64 total_samples, u64 hits, 39 u64 total_samples, u64 hits,
40 int left_margin) 40 int left_margin)
41 { 41 {
42 int i; 42 int i;
43 size_t ret = 0; 43 size_t ret = 0;
44 44
45 ret += callchain__fprintf_left_margin(fp, left_margin); 45 ret += callchain__fprintf_left_margin(fp, left_margin);
46 for (i = 0; i < depth; i++) { 46 for (i = 0; i < depth; i++) {
47 if (depth_mask & (1 << i)) 47 if (depth_mask & (1 << i))
48 ret += fprintf(fp, "|"); 48 ret += fprintf(fp, "|");
49 else 49 else
50 ret += fprintf(fp, " "); 50 ret += fprintf(fp, " ");
51 if (!period && i == depth - 1) { 51 if (!period && i == depth - 1) {
52 double percent; 52 double percent;
53 53
54 percent = hits * 100.0 / total_samples; 54 percent = hits * 100.0 / total_samples;
55 ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent); 55 ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
56 } else 56 } else
57 ret += fprintf(fp, "%s", " "); 57 ret += fprintf(fp, "%s", " ");
58 } 58 }
59 if (chain->ms.sym) 59 if (chain->ms.sym)
60 ret += fprintf(fp, "%s\n", chain->ms.sym->name); 60 ret += fprintf(fp, "%s\n", chain->ms.sym->name);
61 else 61 else
62 ret += fprintf(fp, "0x%0" PRIx64 "\n", chain->ip); 62 ret += fprintf(fp, "0x%0" PRIx64 "\n", chain->ip);
63 63
64 return ret; 64 return ret;
65 } 65 }
66 66
67 static struct symbol *rem_sq_bracket; 67 static struct symbol *rem_sq_bracket;
68 static struct callchain_list rem_hits; 68 static struct callchain_list rem_hits;
69 69
70 static void init_rem_hits(void) 70 static void init_rem_hits(void)
71 { 71 {
72 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6); 72 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
73 if (!rem_sq_bracket) { 73 if (!rem_sq_bracket) {
74 fprintf(stderr, "Not enough memory to display remaining hits\n"); 74 fprintf(stderr, "Not enough memory to display remaining hits\n");
75 return; 75 return;
76 } 76 }
77 77
78 strcpy(rem_sq_bracket->name, "[...]"); 78 strcpy(rem_sq_bracket->name, "[...]");
79 rem_hits.ms.sym = rem_sq_bracket; 79 rem_hits.ms.sym = rem_sq_bracket;
80 } 80 }
81 81
82 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root, 82 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
83 u64 total_samples, int depth, 83 u64 total_samples, int depth,
84 int depth_mask, int left_margin) 84 int depth_mask, int left_margin)
85 { 85 {
86 struct rb_node *node, *next; 86 struct rb_node *node, *next;
87 struct callchain_node *child; 87 struct callchain_node *child;
88 struct callchain_list *chain; 88 struct callchain_list *chain;
89 int new_depth_mask = depth_mask; 89 int new_depth_mask = depth_mask;
90 u64 remaining; 90 u64 remaining;
91 size_t ret = 0; 91 size_t ret = 0;
92 int i; 92 int i;
93 uint entries_printed = 0; 93 uint entries_printed = 0;
94 94
95 remaining = total_samples; 95 remaining = total_samples;
96 96
97 node = rb_first(root); 97 node = rb_first(root);
98 while (node) { 98 while (node) {
99 u64 new_total; 99 u64 new_total;
100 u64 cumul; 100 u64 cumul;
101 101
102 child = rb_entry(node, struct callchain_node, rb_node); 102 child = rb_entry(node, struct callchain_node, rb_node);
103 cumul = callchain_cumul_hits(child); 103 cumul = callchain_cumul_hits(child);
104 remaining -= cumul; 104 remaining -= cumul;
105 105
106 /* 106 /*
107 * The depth mask manages the output of pipes that show 107 * The depth mask manages the output of pipes that show
108 * the depth. We don't want to keep the pipes of the current 108 * the depth. We don't want to keep the pipes of the current
109 * level for the last child of this depth. 109 * level for the last child of this depth.
110 * Except if we have remaining filtered hits. They will 110 * Except if we have remaining filtered hits. They will
111 * supersede the last child 111 * supersede the last child
112 */ 112 */
113 next = rb_next(node); 113 next = rb_next(node);
114 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining)) 114 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
115 new_depth_mask &= ~(1 << (depth - 1)); 115 new_depth_mask &= ~(1 << (depth - 1));
116 116
117 /* 117 /*
118 * But we keep the older depth mask for the line separator 118 * But we keep the older depth mask for the line separator
119 * to keep the level link until we reach the last child 119 * to keep the level link until we reach the last child
120 */ 120 */
121 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask, 121 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
122 left_margin); 122 left_margin);
123 i = 0; 123 i = 0;
124 list_for_each_entry(chain, &child->val, list) { 124 list_for_each_entry(chain, &child->val, list) {
125 ret += ipchain__fprintf_graph(fp, chain, depth, 125 ret += ipchain__fprintf_graph(fp, chain, depth,
126 new_depth_mask, i++, 126 new_depth_mask, i++,
127 total_samples, 127 total_samples,
128 cumul, 128 cumul,
129 left_margin); 129 left_margin);
130 } 130 }
131 131
132 if (callchain_param.mode == CHAIN_GRAPH_REL) 132 if (callchain_param.mode == CHAIN_GRAPH_REL)
133 new_total = child->children_hit; 133 new_total = child->children_hit;
134 else 134 else
135 new_total = total_samples; 135 new_total = total_samples;
136 136
137 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total, 137 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
138 depth + 1, 138 depth + 1,
139 new_depth_mask | (1 << depth), 139 new_depth_mask | (1 << depth),
140 left_margin); 140 left_margin);
141 node = next; 141 node = next;
142 if (++entries_printed == callchain_param.print_limit) 142 if (++entries_printed == callchain_param.print_limit)
143 break; 143 break;
144 } 144 }
145 145
146 if (callchain_param.mode == CHAIN_GRAPH_REL && 146 if (callchain_param.mode == CHAIN_GRAPH_REL &&
147 remaining && remaining != total_samples) { 147 remaining && remaining != total_samples) {
148 148
149 if (!rem_sq_bracket) 149 if (!rem_sq_bracket)
150 return ret; 150 return ret;
151 151
152 new_depth_mask &= ~(1 << (depth - 1)); 152 new_depth_mask &= ~(1 << (depth - 1));
153 ret += ipchain__fprintf_graph(fp, &rem_hits, depth, 153 ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
154 new_depth_mask, 0, total_samples, 154 new_depth_mask, 0, total_samples,
155 remaining, left_margin); 155 remaining, left_margin);
156 } 156 }
157 157
158 return ret; 158 return ret;
159 } 159 }
160 160
161 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root, 161 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
162 u64 total_samples, int left_margin) 162 u64 total_samples, int left_margin)
163 { 163 {
164 struct callchain_node *cnode; 164 struct callchain_node *cnode;
165 struct callchain_list *chain; 165 struct callchain_list *chain;
166 u32 entries_printed = 0; 166 u32 entries_printed = 0;
167 bool printed = false; 167 bool printed = false;
168 struct rb_node *node; 168 struct rb_node *node;
169 int i = 0; 169 int i = 0;
170 int ret = 0; 170 int ret = 0;
171 171
172 /* 172 /*
173 * If have one single callchain root, don't bother printing 173 * If have one single callchain root, don't bother printing
174 * its percentage (100 % in fractal mode and the same percentage 174 * its percentage (100 % in fractal mode and the same percentage
175 * than the hist in graph mode). This also avoid one level of column. 175 * than the hist in graph mode). This also avoid one level of column.
176 */ 176 */
177 node = rb_first(root); 177 node = rb_first(root);
178 if (node && !rb_next(node)) { 178 if (node && !rb_next(node)) {
179 cnode = rb_entry(node, struct callchain_node, rb_node); 179 cnode = rb_entry(node, struct callchain_node, rb_node);
180 list_for_each_entry(chain, &cnode->val, list) { 180 list_for_each_entry(chain, &cnode->val, list) {
181 /* 181 /*
182 * If we sort by symbol, the first entry is the same than 182 * If we sort by symbol, the first entry is the same than
183 * the symbol. No need to print it otherwise it appears as 183 * the symbol. No need to print it otherwise it appears as
184 * displayed twice. 184 * displayed twice.
185 */ 185 */
186 if (!i++ && sort__first_dimension == SORT_SYM) 186 if (!i++ && sort__first_dimension == SORT_SYM)
187 continue; 187 continue;
188 if (!printed) { 188 if (!printed) {
189 ret += callchain__fprintf_left_margin(fp, left_margin); 189 ret += callchain__fprintf_left_margin(fp, left_margin);
190 ret += fprintf(fp, "|\n"); 190 ret += fprintf(fp, "|\n");
191 ret += callchain__fprintf_left_margin(fp, left_margin); 191 ret += callchain__fprintf_left_margin(fp, left_margin);
192 ret += fprintf(fp, "---"); 192 ret += fprintf(fp, "---");
193 left_margin += 3; 193 left_margin += 3;
194 printed = true; 194 printed = true;
195 } else 195 } else
196 ret += callchain__fprintf_left_margin(fp, left_margin); 196 ret += callchain__fprintf_left_margin(fp, left_margin);
197 197
198 if (chain->ms.sym) 198 if (chain->ms.sym)
199 ret += fprintf(fp, " %s\n", chain->ms.sym->name); 199 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
200 else 200 else
201 ret += fprintf(fp, " %p\n", (void *)(long)chain->ip); 201 ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
202 202
203 if (++entries_printed == callchain_param.print_limit) 203 if (++entries_printed == callchain_param.print_limit)
204 break; 204 break;
205 } 205 }
206 root = &cnode->rb_root; 206 root = &cnode->rb_root;
207 } 207 }
208 208
209 ret += __callchain__fprintf_graph(fp, root, total_samples, 209 ret += __callchain__fprintf_graph(fp, root, total_samples,
210 1, 1, left_margin); 210 1, 1, left_margin);
211 ret += fprintf(fp, "\n"); 211 ret += fprintf(fp, "\n");
212 212
213 return ret; 213 return ret;
214 } 214 }
215 215
216 static size_t __callchain__fprintf_flat(FILE *fp, 216 static size_t __callchain__fprintf_flat(FILE *fp,
217 struct callchain_node *self, 217 struct callchain_node *self,
218 u64 total_samples) 218 u64 total_samples)
219 { 219 {
220 struct callchain_list *chain; 220 struct callchain_list *chain;
221 size_t ret = 0; 221 size_t ret = 0;
222 222
223 if (!self) 223 if (!self)
224 return 0; 224 return 0;
225 225
226 ret += __callchain__fprintf_flat(fp, self->parent, total_samples); 226 ret += __callchain__fprintf_flat(fp, self->parent, total_samples);
227 227
228 228
229 list_for_each_entry(chain, &self->val, list) { 229 list_for_each_entry(chain, &self->val, list) {
230 if (chain->ip >= PERF_CONTEXT_MAX) 230 if (chain->ip >= PERF_CONTEXT_MAX)
231 continue; 231 continue;
232 if (chain->ms.sym) 232 if (chain->ms.sym)
233 ret += fprintf(fp, " %s\n", chain->ms.sym->name); 233 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
234 else 234 else
235 ret += fprintf(fp, " %p\n", 235 ret += fprintf(fp, " %p\n",
236 (void *)(long)chain->ip); 236 (void *)(long)chain->ip);
237 } 237 }
238 238
239 return ret; 239 return ret;
240 } 240 }
241 241
242 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *self, 242 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *self,
243 u64 total_samples) 243 u64 total_samples)
244 { 244 {
245 size_t ret = 0; 245 size_t ret = 0;
246 u32 entries_printed = 0; 246 u32 entries_printed = 0;
247 struct rb_node *rb_node; 247 struct rb_node *rb_node;
248 struct callchain_node *chain; 248 struct callchain_node *chain;
249 249
250 rb_node = rb_first(self); 250 rb_node = rb_first(self);
251 while (rb_node) { 251 while (rb_node) {
252 double percent; 252 double percent;
253 253
254 chain = rb_entry(rb_node, struct callchain_node, rb_node); 254 chain = rb_entry(rb_node, struct callchain_node, rb_node);
255 percent = chain->hit * 100.0 / total_samples; 255 percent = chain->hit * 100.0 / total_samples;
256 256
257 ret = percent_color_fprintf(fp, " %6.2f%%\n", percent); 257 ret = percent_color_fprintf(fp, " %6.2f%%\n", percent);
258 ret += __callchain__fprintf_flat(fp, chain, total_samples); 258 ret += __callchain__fprintf_flat(fp, chain, total_samples);
259 ret += fprintf(fp, "\n"); 259 ret += fprintf(fp, "\n");
260 if (++entries_printed == callchain_param.print_limit) 260 if (++entries_printed == callchain_param.print_limit)
261 break; 261 break;
262 262
263 rb_node = rb_next(rb_node); 263 rb_node = rb_next(rb_node);
264 } 264 }
265 265
266 return ret; 266 return ret;
267 } 267 }
268 268
269 static size_t hist_entry_callchain__fprintf(struct hist_entry *he, 269 static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
270 u64 total_samples, int left_margin, 270 u64 total_samples, int left_margin,
271 FILE *fp) 271 FILE *fp)
272 { 272 {
273 switch (callchain_param.mode) { 273 switch (callchain_param.mode) {
274 case CHAIN_GRAPH_REL: 274 case CHAIN_GRAPH_REL:
275 return callchain__fprintf_graph(fp, &he->sorted_chain, he->stat.period, 275 return callchain__fprintf_graph(fp, &he->sorted_chain, he->stat.period,
276 left_margin); 276 left_margin);
277 break; 277 break;
278 case CHAIN_GRAPH_ABS: 278 case CHAIN_GRAPH_ABS:
279 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples, 279 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
280 left_margin); 280 left_margin);
281 break; 281 break;
282 case CHAIN_FLAT: 282 case CHAIN_FLAT:
283 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples); 283 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
284 break; 284 break;
285 case CHAIN_NONE: 285 case CHAIN_NONE:
286 break; 286 break;
287 default: 287 default:
288 pr_err("Bad callchain mode\n"); 288 pr_err("Bad callchain mode\n");
289 } 289 }
290 290
291 return 0; 291 return 0;
292 } 292 }
293 293
294 static size_t hist_entry__callchain_fprintf(struct hist_entry *he, 294 static size_t hist_entry__callchain_fprintf(struct hist_entry *he,
295 struct hists *hists, 295 struct hists *hists,
296 FILE *fp) 296 FILE *fp)
297 { 297 {
298 int left_margin = 0; 298 int left_margin = 0;
299 u64 total_period = hists->stats.total_period; 299 u64 total_period = hists->stats.total_period;
300 300
301 if (sort__first_dimension == SORT_COMM) { 301 if (sort__first_dimension == SORT_COMM) {
302 struct sort_entry *se = list_first_entry(&hist_entry__sort_list, 302 struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
303 typeof(*se), list); 303 typeof(*se), list);
304 left_margin = hists__col_len(hists, se->se_width_idx); 304 left_margin = hists__col_len(hists, se->se_width_idx);
305 left_margin -= thread__comm_len(he->thread); 305 left_margin -= thread__comm_len(he->thread);
306 } 306 }
307 307
308 return hist_entry_callchain__fprintf(he, total_period, left_margin, fp); 308 return hist_entry_callchain__fprintf(he, total_period, left_margin, fp);
309 } 309 }
310 310
311 static inline void advance_hpp(struct perf_hpp *hpp, int inc) 311 static inline void advance_hpp(struct perf_hpp *hpp, int inc)
312 { 312 {
313 hpp->buf += inc; 313 hpp->buf += inc;
314 hpp->size -= inc; 314 hpp->size -= inc;
315 } 315 }
316 316
317 static int hist_entry__period_snprintf(struct perf_hpp *hpp, 317 static int hist_entry__period_snprintf(struct perf_hpp *hpp,
318 struct hist_entry *he, 318 struct hist_entry *he,
319 bool color) 319 bool color)
320 { 320 {
321 const char *sep = symbol_conf.field_sep; 321 const char *sep = symbol_conf.field_sep;
322 struct perf_hpp_fmt *fmt; 322 struct perf_hpp_fmt *fmt;
323 char *start = hpp->buf; 323 char *start = hpp->buf;
324 int ret; 324 int ret;
325 bool first = true; 325 bool first = true;
326 326
327 if (symbol_conf.exclude_other && !he->parent) 327 if (symbol_conf.exclude_other && !he->parent)
328 return 0; 328 return 0;
329 329
330 perf_hpp__for_each_format(fmt) { 330 perf_hpp__for_each_format(fmt) {
331 /* 331 /*
332 * If there's no field_sep, we still need 332 * If there's no field_sep, we still need
333 * to display initial ' '. 333 * to display initial ' '.
334 */ 334 */
335 if (!sep || !first) { 335 if (!sep || !first) {
336 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " "); 336 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
337 advance_hpp(hpp, ret); 337 advance_hpp(hpp, ret);
338 } else 338 } else
339 first = false; 339 first = false;
340 340
341 if (color && fmt->color) 341 if (color && fmt->color)
342 ret = fmt->color(fmt, hpp, he); 342 ret = fmt->color(fmt, hpp, he);
343 else 343 else
344 ret = fmt->entry(fmt, hpp, he); 344 ret = fmt->entry(fmt, hpp, he);
345 345
346 advance_hpp(hpp, ret); 346 advance_hpp(hpp, ret);
347 } 347 }
348 348
349 return hpp->buf - start; 349 return hpp->buf - start;
350 } 350 }
351 351
352 static int hist_entry__fprintf(struct hist_entry *he, size_t size, 352 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
353 struct hists *hists, FILE *fp) 353 struct hists *hists,
354 char *bf, size_t bfsz, FILE *fp)
354 { 355 {
355 char bf[512];
356 int ret; 356 int ret;
357 struct perf_hpp hpp = { 357 struct perf_hpp hpp = {
358 .buf = bf, 358 .buf = bf,
359 .size = size, 359 .size = size,
360 }; 360 };
361 bool color = !symbol_conf.field_sep; 361 bool color = !symbol_conf.field_sep;
362 362
363 if (size == 0 || size > sizeof(bf)) 363 if (size == 0 || size > bfsz)
364 size = hpp.size = sizeof(bf); 364 size = hpp.size = bfsz;
365 365
366 ret = hist_entry__period_snprintf(&hpp, he, color); 366 ret = hist_entry__period_snprintf(&hpp, he, color);
367 hist_entry__sort_snprintf(he, bf + ret, size - ret, hists); 367 hist_entry__sort_snprintf(he, bf + ret, size - ret, hists);
368 368
369 ret = fprintf(fp, "%s\n", bf); 369 ret = fprintf(fp, "%s\n", bf);
370 370
371 if (symbol_conf.use_callchain) 371 if (symbol_conf.use_callchain)
372 ret += hist_entry__callchain_fprintf(he, hists, fp); 372 ret += hist_entry__callchain_fprintf(he, hists, fp);
373 373
374 return ret; 374 return ret;
375 } 375 }
376 376
377 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows, 377 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
378 int max_cols, float min_pcnt, FILE *fp) 378 int max_cols, float min_pcnt, FILE *fp)
379 { 379 {
380 struct perf_hpp_fmt *fmt; 380 struct perf_hpp_fmt *fmt;
381 struct sort_entry *se; 381 struct sort_entry *se;
382 struct rb_node *nd; 382 struct rb_node *nd;
383 size_t ret = 0; 383 size_t ret = 0;
384 unsigned int width; 384 unsigned int width;
385 const char *sep = symbol_conf.field_sep; 385 const char *sep = symbol_conf.field_sep;
386 const char *col_width = symbol_conf.col_width_list_str; 386 const char *col_width = symbol_conf.col_width_list_str;
387 int nr_rows = 0; 387 int nr_rows = 0;
388 char bf[96]; 388 char bf[96];
389 struct perf_hpp dummy_hpp = { 389 struct perf_hpp dummy_hpp = {
390 .buf = bf, 390 .buf = bf,
391 .size = sizeof(bf), 391 .size = sizeof(bf),
392 .ptr = hists_to_evsel(hists), 392 .ptr = hists_to_evsel(hists),
393 }; 393 };
394 bool first = true; 394 bool first = true;
395 size_t linesz;
396 char *line = NULL;
395 397
396 init_rem_hits(); 398 init_rem_hits();
397 399
398 if (!show_header) 400 if (!show_header)
399 goto print_entries; 401 goto print_entries;
400 402
401 fprintf(fp, "# "); 403 fprintf(fp, "# ");
402 404
403 perf_hpp__for_each_format(fmt) { 405 perf_hpp__for_each_format(fmt) {
404 if (!first) 406 if (!first)
405 fprintf(fp, "%s", sep ?: " "); 407 fprintf(fp, "%s", sep ?: " ");
406 else 408 else
407 first = false; 409 first = false;
408 410
409 fmt->header(fmt, &dummy_hpp); 411 fmt->header(fmt, &dummy_hpp);
410 fprintf(fp, "%s", bf); 412 fprintf(fp, "%s", bf);
411 } 413 }
412 414
413 list_for_each_entry(se, &hist_entry__sort_list, list) { 415 list_for_each_entry(se, &hist_entry__sort_list, list) {
414 if (se->elide) 416 if (se->elide)
415 continue; 417 continue;
416 if (sep) { 418 if (sep) {
417 fprintf(fp, "%c%s", *sep, se->se_header); 419 fprintf(fp, "%c%s", *sep, se->se_header);
418 continue; 420 continue;
419 } 421 }
420 width = strlen(se->se_header); 422 width = strlen(se->se_header);
421 if (symbol_conf.col_width_list_str) { 423 if (symbol_conf.col_width_list_str) {
422 if (col_width) { 424 if (col_width) {
423 hists__set_col_len(hists, se->se_width_idx, 425 hists__set_col_len(hists, se->se_width_idx,
424 atoi(col_width)); 426 atoi(col_width));
425 col_width = strchr(col_width, ','); 427 col_width = strchr(col_width, ',');
426 if (col_width) 428 if (col_width)
427 ++col_width; 429 ++col_width;
428 } 430 }
429 } 431 }
430 if (!hists__new_col_len(hists, se->se_width_idx, width)) 432 if (!hists__new_col_len(hists, se->se_width_idx, width))
431 width = hists__col_len(hists, se->se_width_idx); 433 width = hists__col_len(hists, se->se_width_idx);
432 fprintf(fp, " %*s", width, se->se_header); 434 fprintf(fp, " %*s", width, se->se_header);
433 } 435 }
434 436
435 fprintf(fp, "\n"); 437 fprintf(fp, "\n");
436 if (max_rows && ++nr_rows >= max_rows) 438 if (max_rows && ++nr_rows >= max_rows)
437 goto out; 439 goto out;
438 440
439 if (sep) 441 if (sep)
440 goto print_entries; 442 goto print_entries;
441 443
442 first = true; 444 first = true;
443 445
444 fprintf(fp, "# "); 446 fprintf(fp, "# ");
445 447
446 perf_hpp__for_each_format(fmt) { 448 perf_hpp__for_each_format(fmt) {
447 unsigned int i; 449 unsigned int i;
448 450
449 if (!first) 451 if (!first)
450 fprintf(fp, "%s", sep ?: " "); 452 fprintf(fp, "%s", sep ?: " ");
451 else 453 else
452 first = false; 454 first = false;
453 455
454 width = fmt->width(fmt, &dummy_hpp); 456 width = fmt->width(fmt, &dummy_hpp);
455 for (i = 0; i < width; i++) 457 for (i = 0; i < width; i++)
456 fprintf(fp, "."); 458 fprintf(fp, ".");
457 } 459 }
458 460
459 list_for_each_entry(se, &hist_entry__sort_list, list) { 461 list_for_each_entry(se, &hist_entry__sort_list, list) {
460 unsigned int i; 462 unsigned int i;
461 463
462 if (se->elide) 464 if (se->elide)
463 continue; 465 continue;
464 466
465 fprintf(fp, " "); 467 fprintf(fp, " ");
466 width = hists__col_len(hists, se->se_width_idx); 468 width = hists__col_len(hists, se->se_width_idx);
467 if (width == 0) 469 if (width == 0)
468 width = strlen(se->se_header); 470 width = strlen(se->se_header);
469 for (i = 0; i < width; i++) 471 for (i = 0; i < width; i++)
470 fprintf(fp, "."); 472 fprintf(fp, ".");
471 } 473 }
472 474
473 fprintf(fp, "\n"); 475 fprintf(fp, "\n");
474 if (max_rows && ++nr_rows >= max_rows) 476 if (max_rows && ++nr_rows >= max_rows)
475 goto out; 477 goto out;
476 478
477 fprintf(fp, "#\n"); 479 fprintf(fp, "#\n");
478 if (max_rows && ++nr_rows >= max_rows) 480 if (max_rows && ++nr_rows >= max_rows)
479 goto out; 481 goto out;
480 482
481 print_entries: 483 print_entries:
484 linesz = hists__sort_list_width(hists) + 3 + 1;
485 line = malloc(linesz);
486 if (line == NULL) {
487 ret = -1;
488 goto out;
489 }
490
482 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { 491 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
483 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 492 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
484 float percent = h->stat.period * 100.0 / 493 float percent = h->stat.period * 100.0 /
485 hists->stats.total_period; 494 hists->stats.total_period;
486 495
487 if (h->filtered) 496 if (h->filtered)
488 continue; 497 continue;
489 498
490 if (percent < min_pcnt) 499 if (percent < min_pcnt)
491 continue; 500 continue;
492 501
493 ret += hist_entry__fprintf(h, max_cols, hists, fp); 502 ret += hist_entry__fprintf(h, max_cols, hists, line, linesz, fp);
494 503
495 if (max_rows && ++nr_rows >= max_rows) 504 if (max_rows && ++nr_rows >= max_rows)
496 goto out; 505 break;
497 506
498 if (h->ms.map == NULL && verbose > 1) { 507 if (h->ms.map == NULL && verbose > 1) {
499 __map_groups__fprintf_maps(&h->thread->mg, 508 __map_groups__fprintf_maps(&h->thread->mg,
500 MAP__FUNCTION, verbose, fp); 509 MAP__FUNCTION, verbose, fp);
501 fprintf(fp, "%.10s end\n", graph_dotted_line); 510 fprintf(fp, "%.10s end\n", graph_dotted_line);
502 } 511 }
503 } 512 }
513
514 free(line);
504 out: 515 out:
505 free(rem_sq_bracket); 516 free(rem_sq_bracket);
506 517
507 return ret; 518 return ret;
508 } 519 }
509 520
510 size_t events_stats__fprintf(struct events_stats *stats, FILE *fp) 521 size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
511 { 522 {
512 int i; 523 int i;
513 size_t ret = 0; 524 size_t ret = 0;
514 525
515 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) { 526 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
516 const char *name; 527 const char *name;
517 528
518 if (stats->nr_events[i] == 0) 529 if (stats->nr_events[i] == 0)
519 continue; 530 continue;
520 531
521 name = perf_event__name(i); 532 name = perf_event__name(i);
522 if (!strcmp(name, "UNKNOWN")) 533 if (!strcmp(name, "UNKNOWN"))
523 continue; 534 continue;
524 535
525 ret += fprintf(fp, "%16s events: %10d\n", name, 536 ret += fprintf(fp, "%16s events: %10d\n", name,
526 stats->nr_events[i]); 537 stats->nr_events[i]);
527 } 538 }
528 539
529 return ret; 540 return ret;
530 } 541 }
tools/perf/util/evlist.c
1 /* 1 /*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 * 3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further 4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes. 5 * copyright notes.
6 * 6 *
7 * Released under the GPL v2. (and only v2, not any later version) 7 * Released under the GPL v2. (and only v2, not any later version)
8 */ 8 */
9 #include "util.h" 9 #include "util.h"
10 #include <lk/debugfs.h> 10 #include <lk/debugfs.h>
11 #include <poll.h> 11 #include <poll.h>
12 #include "cpumap.h" 12 #include "cpumap.h"
13 #include "thread_map.h" 13 #include "thread_map.h"
14 #include "target.h" 14 #include "target.h"
15 #include "evlist.h" 15 #include "evlist.h"
16 #include "evsel.h" 16 #include "evsel.h"
17 #include "debug.h" 17 #include "debug.h"
18 #include <unistd.h> 18 #include <unistd.h>
19 19
20 #include "parse-events.h" 20 #include "parse-events.h"
21 21
22 #include <sys/mman.h> 22 #include <sys/mman.h>
23 23
24 #include <linux/bitops.h> 24 #include <linux/bitops.h>
25 #include <linux/hash.h> 25 #include <linux/hash.h>
26 26
27 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 27 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
28 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) 28 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
29 29
30 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, 30 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
31 struct thread_map *threads) 31 struct thread_map *threads)
32 { 32 {
33 int i; 33 int i;
34 34
35 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) 35 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
36 INIT_HLIST_HEAD(&evlist->heads[i]); 36 INIT_HLIST_HEAD(&evlist->heads[i]);
37 INIT_LIST_HEAD(&evlist->entries); 37 INIT_LIST_HEAD(&evlist->entries);
38 perf_evlist__set_maps(evlist, cpus, threads); 38 perf_evlist__set_maps(evlist, cpus, threads);
39 evlist->workload.pid = -1; 39 evlist->workload.pid = -1;
40 } 40 }
41 41
42 struct perf_evlist *perf_evlist__new(void) 42 struct perf_evlist *perf_evlist__new(void)
43 { 43 {
44 struct perf_evlist *evlist = zalloc(sizeof(*evlist)); 44 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
45 45
46 if (evlist != NULL) 46 if (evlist != NULL)
47 perf_evlist__init(evlist, NULL, NULL); 47 perf_evlist__init(evlist, NULL, NULL);
48 48
49 return evlist; 49 return evlist;
50 } 50 }
51 51
52 /** 52 /**
53 * perf_evlist__set_id_pos - set the positions of event ids. 53 * perf_evlist__set_id_pos - set the positions of event ids.
54 * @evlist: selected event list 54 * @evlist: selected event list
55 * 55 *
56 * Events with compatible sample types all have the same id_pos 56 * Events with compatible sample types all have the same id_pos
57 * and is_pos. For convenience, put a copy on evlist. 57 * and is_pos. For convenience, put a copy on evlist.
58 */ 58 */
59 void perf_evlist__set_id_pos(struct perf_evlist *evlist) 59 void perf_evlist__set_id_pos(struct perf_evlist *evlist)
60 { 60 {
61 struct perf_evsel *first = perf_evlist__first(evlist); 61 struct perf_evsel *first = perf_evlist__first(evlist);
62 62
63 evlist->id_pos = first->id_pos; 63 evlist->id_pos = first->id_pos;
64 evlist->is_pos = first->is_pos; 64 evlist->is_pos = first->is_pos;
65 } 65 }
66 66
67 static void perf_evlist__purge(struct perf_evlist *evlist) 67 static void perf_evlist__purge(struct perf_evlist *evlist)
68 { 68 {
69 struct perf_evsel *pos, *n; 69 struct perf_evsel *pos, *n;
70 70
71 list_for_each_entry_safe(pos, n, &evlist->entries, node) { 71 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
72 list_del_init(&pos->node); 72 list_del_init(&pos->node);
73 perf_evsel__delete(pos); 73 perf_evsel__delete(pos);
74 } 74 }
75 75
76 evlist->nr_entries = 0; 76 evlist->nr_entries = 0;
77 } 77 }
78 78
79 void perf_evlist__exit(struct perf_evlist *evlist) 79 void perf_evlist__exit(struct perf_evlist *evlist)
80 { 80 {
81 free(evlist->mmap); 81 free(evlist->mmap);
82 free(evlist->pollfd); 82 free(evlist->pollfd);
83 evlist->mmap = NULL; 83 evlist->mmap = NULL;
84 evlist->pollfd = NULL; 84 evlist->pollfd = NULL;
85 } 85 }
86 86
87 void perf_evlist__delete(struct perf_evlist *evlist) 87 void perf_evlist__delete(struct perf_evlist *evlist)
88 { 88 {
89 perf_evlist__purge(evlist); 89 perf_evlist__purge(evlist);
90 perf_evlist__exit(evlist); 90 perf_evlist__exit(evlist);
91 free(evlist); 91 free(evlist);
92 } 92 }
93 93
94 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) 94 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
95 { 95 {
96 list_add_tail(&entry->node, &evlist->entries); 96 list_add_tail(&entry->node, &evlist->entries);
97 if (!evlist->nr_entries++) 97 if (!evlist->nr_entries++)
98 perf_evlist__set_id_pos(evlist); 98 perf_evlist__set_id_pos(evlist);
99 } 99 }
100 100
101 void perf_evlist__splice_list_tail(struct perf_evlist *evlist, 101 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
102 struct list_head *list, 102 struct list_head *list,
103 int nr_entries) 103 int nr_entries)
104 { 104 {
105 bool set_id_pos = !evlist->nr_entries; 105 bool set_id_pos = !evlist->nr_entries;
106 106
107 list_splice_tail(list, &evlist->entries); 107 list_splice_tail(list, &evlist->entries);
108 evlist->nr_entries += nr_entries; 108 evlist->nr_entries += nr_entries;
109 if (set_id_pos) 109 if (set_id_pos)
110 perf_evlist__set_id_pos(evlist); 110 perf_evlist__set_id_pos(evlist);
111 } 111 }
112 112
113 void __perf_evlist__set_leader(struct list_head *list) 113 void __perf_evlist__set_leader(struct list_head *list)
114 { 114 {
115 struct perf_evsel *evsel, *leader; 115 struct perf_evsel *evsel, *leader;
116 116
117 leader = list_entry(list->next, struct perf_evsel, node); 117 leader = list_entry(list->next, struct perf_evsel, node);
118 evsel = list_entry(list->prev, struct perf_evsel, node); 118 evsel = list_entry(list->prev, struct perf_evsel, node);
119 119
120 leader->nr_members = evsel->idx - leader->idx + 1; 120 leader->nr_members = evsel->idx - leader->idx + 1;
121 121
122 list_for_each_entry(evsel, list, node) { 122 list_for_each_entry(evsel, list, node) {
123 evsel->leader = leader; 123 evsel->leader = leader;
124 } 124 }
125 } 125 }
126 126
127 void perf_evlist__set_leader(struct perf_evlist *evlist) 127 void perf_evlist__set_leader(struct perf_evlist *evlist)
128 { 128 {
129 if (evlist->nr_entries) { 129 if (evlist->nr_entries) {
130 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0; 130 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
131 __perf_evlist__set_leader(&evlist->entries); 131 __perf_evlist__set_leader(&evlist->entries);
132 } 132 }
133 } 133 }
134 134
135 int perf_evlist__add_default(struct perf_evlist *evlist) 135 int perf_evlist__add_default(struct perf_evlist *evlist)
136 { 136 {
137 struct perf_event_attr attr = { 137 struct perf_event_attr attr = {
138 .type = PERF_TYPE_HARDWARE, 138 .type = PERF_TYPE_HARDWARE,
139 .config = PERF_COUNT_HW_CPU_CYCLES, 139 .config = PERF_COUNT_HW_CPU_CYCLES,
140 }; 140 };
141 struct perf_evsel *evsel; 141 struct perf_evsel *evsel;
142 142
143 event_attr_init(&attr); 143 event_attr_init(&attr);
144 144
145 evsel = perf_evsel__new(&attr, 0); 145 evsel = perf_evsel__new(&attr, 0);
146 if (evsel == NULL) 146 if (evsel == NULL)
147 goto error; 147 goto error;
148 148
149 /* use strdup() because free(evsel) assumes name is allocated */ 149 /* use strdup() because free(evsel) assumes name is allocated */
150 evsel->name = strdup("cycles"); 150 evsel->name = strdup("cycles");
151 if (!evsel->name) 151 if (!evsel->name)
152 goto error_free; 152 goto error_free;
153 153
154 perf_evlist__add(evlist, evsel); 154 perf_evlist__add(evlist, evsel);
155 return 0; 155 return 0;
156 error_free: 156 error_free:
157 perf_evsel__delete(evsel); 157 perf_evsel__delete(evsel);
158 error: 158 error:
159 return -ENOMEM; 159 return -ENOMEM;
160 } 160 }
161 161
162 static int perf_evlist__add_attrs(struct perf_evlist *evlist, 162 static int perf_evlist__add_attrs(struct perf_evlist *evlist,
163 struct perf_event_attr *attrs, size_t nr_attrs) 163 struct perf_event_attr *attrs, size_t nr_attrs)
164 { 164 {
165 struct perf_evsel *evsel, *n; 165 struct perf_evsel *evsel, *n;
166 LIST_HEAD(head); 166 LIST_HEAD(head);
167 size_t i; 167 size_t i;
168 168
169 for (i = 0; i < nr_attrs; i++) { 169 for (i = 0; i < nr_attrs; i++) {
170 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i); 170 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
171 if (evsel == NULL) 171 if (evsel == NULL)
172 goto out_delete_partial_list; 172 goto out_delete_partial_list;
173 list_add_tail(&evsel->node, &head); 173 list_add_tail(&evsel->node, &head);
174 } 174 }
175 175
176 perf_evlist__splice_list_tail(evlist, &head, nr_attrs); 176 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
177 177
178 return 0; 178 return 0;
179 179
180 out_delete_partial_list: 180 out_delete_partial_list:
181 list_for_each_entry_safe(evsel, n, &head, node) 181 list_for_each_entry_safe(evsel, n, &head, node)
182 perf_evsel__delete(evsel); 182 perf_evsel__delete(evsel);
183 return -1; 183 return -1;
184 } 184 }
185 185
186 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, 186 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
187 struct perf_event_attr *attrs, size_t nr_attrs) 187 struct perf_event_attr *attrs, size_t nr_attrs)
188 { 188 {
189 size_t i; 189 size_t i;
190 190
191 for (i = 0; i < nr_attrs; i++) 191 for (i = 0; i < nr_attrs; i++)
192 event_attr_init(attrs + i); 192 event_attr_init(attrs + i);
193 193
194 return perf_evlist__add_attrs(evlist, attrs, nr_attrs); 194 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
195 } 195 }
196 196
197 struct perf_evsel * 197 struct perf_evsel *
198 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) 198 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
199 { 199 {
200 struct perf_evsel *evsel; 200 struct perf_evsel *evsel;
201 201
202 list_for_each_entry(evsel, &evlist->entries, node) { 202 list_for_each_entry(evsel, &evlist->entries, node) {
203 if (evsel->attr.type == PERF_TYPE_TRACEPOINT && 203 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
204 (int)evsel->attr.config == id) 204 (int)evsel->attr.config == id)
205 return evsel; 205 return evsel;
206 } 206 }
207 207
208 return NULL; 208 return NULL;
209 } 209 }
210 210
211 struct perf_evsel * 211 struct perf_evsel *
212 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist, 212 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
213 const char *name) 213 const char *name)
214 { 214 {
215 struct perf_evsel *evsel; 215 struct perf_evsel *evsel;
216 216
217 list_for_each_entry(evsel, &evlist->entries, node) { 217 list_for_each_entry(evsel, &evlist->entries, node) {
218 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) && 218 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
219 (strcmp(evsel->name, name) == 0)) 219 (strcmp(evsel->name, name) == 0))
220 return evsel; 220 return evsel;
221 } 221 }
222 222
223 return NULL; 223 return NULL;
224 } 224 }
225 225
226 int perf_evlist__add_newtp(struct perf_evlist *evlist, 226 int perf_evlist__add_newtp(struct perf_evlist *evlist,
227 const char *sys, const char *name, void *handler) 227 const char *sys, const char *name, void *handler)
228 { 228 {
229 struct perf_evsel *evsel; 229 struct perf_evsel *evsel;
230 230
231 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries); 231 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
232 if (evsel == NULL) 232 if (evsel == NULL)
233 return -1; 233 return -1;
234 234
235 evsel->handler.func = handler; 235 evsel->handler.func = handler;
236 perf_evlist__add(evlist, evsel); 236 perf_evlist__add(evlist, evsel);
237 return 0; 237 return 0;
238 } 238 }
239 239
240 void perf_evlist__disable(struct perf_evlist *evlist) 240 void perf_evlist__disable(struct perf_evlist *evlist)
241 { 241 {
242 int cpu, thread; 242 int cpu, thread;
243 struct perf_evsel *pos; 243 struct perf_evsel *pos;
244 int nr_cpus = cpu_map__nr(evlist->cpus); 244 int nr_cpus = cpu_map__nr(evlist->cpus);
245 int nr_threads = thread_map__nr(evlist->threads); 245 int nr_threads = thread_map__nr(evlist->threads);
246 246
247 for (cpu = 0; cpu < nr_cpus; cpu++) { 247 for (cpu = 0; cpu < nr_cpus; cpu++) {
248 list_for_each_entry(pos, &evlist->entries, node) { 248 list_for_each_entry(pos, &evlist->entries, node) {
249 if (!perf_evsel__is_group_leader(pos) || !pos->fd) 249 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
250 continue; 250 continue;
251 for (thread = 0; thread < nr_threads; thread++) 251 for (thread = 0; thread < nr_threads; thread++)
252 ioctl(FD(pos, cpu, thread), 252 ioctl(FD(pos, cpu, thread),
253 PERF_EVENT_IOC_DISABLE, 0); 253 PERF_EVENT_IOC_DISABLE, 0);
254 } 254 }
255 } 255 }
256 } 256 }
257 257
258 void perf_evlist__enable(struct perf_evlist *evlist) 258 void perf_evlist__enable(struct perf_evlist *evlist)
259 { 259 {
260 int cpu, thread; 260 int cpu, thread;
261 struct perf_evsel *pos; 261 struct perf_evsel *pos;
262 int nr_cpus = cpu_map__nr(evlist->cpus); 262 int nr_cpus = cpu_map__nr(evlist->cpus);
263 int nr_threads = thread_map__nr(evlist->threads); 263 int nr_threads = thread_map__nr(evlist->threads);
264 264
265 for (cpu = 0; cpu < nr_cpus; cpu++) { 265 for (cpu = 0; cpu < nr_cpus; cpu++) {
266 list_for_each_entry(pos, &evlist->entries, node) { 266 list_for_each_entry(pos, &evlist->entries, node) {
267 if (!perf_evsel__is_group_leader(pos) || !pos->fd) 267 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
268 continue; 268 continue;
269 for (thread = 0; thread < nr_threads; thread++) 269 for (thread = 0; thread < nr_threads; thread++)
270 ioctl(FD(pos, cpu, thread), 270 ioctl(FD(pos, cpu, thread),
271 PERF_EVENT_IOC_ENABLE, 0); 271 PERF_EVENT_IOC_ENABLE, 0);
272 } 272 }
273 } 273 }
274 } 274 }
275 275
276 int perf_evlist__disable_event(struct perf_evlist *evlist, 276 int perf_evlist__disable_event(struct perf_evlist *evlist,
277 struct perf_evsel *evsel) 277 struct perf_evsel *evsel)
278 { 278 {
279 int cpu, thread, err; 279 int cpu, thread, err;
280 280
281 if (!evsel->fd) 281 if (!evsel->fd)
282 return 0; 282 return 0;
283 283
284 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 284 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
285 for (thread = 0; thread < evlist->threads->nr; thread++) { 285 for (thread = 0; thread < evlist->threads->nr; thread++) {
286 err = ioctl(FD(evsel, cpu, thread), 286 err = ioctl(FD(evsel, cpu, thread),
287 PERF_EVENT_IOC_DISABLE, 0); 287 PERF_EVENT_IOC_DISABLE, 0);
288 if (err) 288 if (err)
289 return err; 289 return err;
290 } 290 }
291 } 291 }
292 return 0; 292 return 0;
293 } 293 }
294 294
295 int perf_evlist__enable_event(struct perf_evlist *evlist, 295 int perf_evlist__enable_event(struct perf_evlist *evlist,
296 struct perf_evsel *evsel) 296 struct perf_evsel *evsel)
297 { 297 {
298 int cpu, thread, err; 298 int cpu, thread, err;
299 299
300 if (!evsel->fd) 300 if (!evsel->fd)
301 return -EINVAL; 301 return -EINVAL;
302 302
303 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 303 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
304 for (thread = 0; thread < evlist->threads->nr; thread++) { 304 for (thread = 0; thread < evlist->threads->nr; thread++) {
305 err = ioctl(FD(evsel, cpu, thread), 305 err = ioctl(FD(evsel, cpu, thread),
306 PERF_EVENT_IOC_ENABLE, 0); 306 PERF_EVENT_IOC_ENABLE, 0);
307 if (err) 307 if (err)
308 return err; 308 return err;
309 } 309 }
310 } 310 }
311 return 0; 311 return 0;
312 } 312 }
313 313
314 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) 314 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
315 { 315 {
316 int nr_cpus = cpu_map__nr(evlist->cpus); 316 int nr_cpus = cpu_map__nr(evlist->cpus);
317 int nr_threads = thread_map__nr(evlist->threads); 317 int nr_threads = thread_map__nr(evlist->threads);
318 int nfds = nr_cpus * nr_threads * evlist->nr_entries; 318 int nfds = nr_cpus * nr_threads * evlist->nr_entries;
319 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); 319 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
320 return evlist->pollfd != NULL ? 0 : -ENOMEM; 320 return evlist->pollfd != NULL ? 0 : -ENOMEM;
321 } 321 }
322 322
323 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) 323 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
324 { 324 {
325 fcntl(fd, F_SETFL, O_NONBLOCK); 325 fcntl(fd, F_SETFL, O_NONBLOCK);
326 evlist->pollfd[evlist->nr_fds].fd = fd; 326 evlist->pollfd[evlist->nr_fds].fd = fd;
327 evlist->pollfd[evlist->nr_fds].events = POLLIN; 327 evlist->pollfd[evlist->nr_fds].events = POLLIN;
328 evlist->nr_fds++; 328 evlist->nr_fds++;
329 } 329 }
330 330
331 static void perf_evlist__id_hash(struct perf_evlist *evlist, 331 static void perf_evlist__id_hash(struct perf_evlist *evlist,
332 struct perf_evsel *evsel, 332 struct perf_evsel *evsel,
333 int cpu, int thread, u64 id) 333 int cpu, int thread, u64 id)
334 { 334 {
335 int hash; 335 int hash;
336 struct perf_sample_id *sid = SID(evsel, cpu, thread); 336 struct perf_sample_id *sid = SID(evsel, cpu, thread);
337 337
338 sid->id = id; 338 sid->id = id;
339 sid->evsel = evsel; 339 sid->evsel = evsel;
340 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); 340 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
341 hlist_add_head(&sid->node, &evlist->heads[hash]); 341 hlist_add_head(&sid->node, &evlist->heads[hash]);
342 } 342 }
343 343
344 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, 344 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
345 int cpu, int thread, u64 id) 345 int cpu, int thread, u64 id)
346 { 346 {
347 perf_evlist__id_hash(evlist, evsel, cpu, thread, id); 347 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
348 evsel->id[evsel->ids++] = id; 348 evsel->id[evsel->ids++] = id;
349 } 349 }
350 350
351 static int perf_evlist__id_add_fd(struct perf_evlist *evlist, 351 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
352 struct perf_evsel *evsel, 352 struct perf_evsel *evsel,
353 int cpu, int thread, int fd) 353 int cpu, int thread, int fd)
354 { 354 {
355 u64 read_data[4] = { 0, }; 355 u64 read_data[4] = { 0, };
356 int id_idx = 1; /* The first entry is the counter value */ 356 int id_idx = 1; /* The first entry is the counter value */
357 u64 id; 357 u64 id;
358 int ret; 358 int ret;
359 359
360 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id); 360 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
361 if (!ret) 361 if (!ret)
362 goto add; 362 goto add;
363 363
364 if (errno != ENOTTY) 364 if (errno != ENOTTY)
365 return -1; 365 return -1;
366 366
367 /* Legacy way to get event id.. All hail to old kernels! */ 367 /* Legacy way to get event id.. All hail to old kernels! */
368 368
369 /* 369 /*
370 * This way does not work with group format read, so bail 370 * This way does not work with group format read, so bail
371 * out in that case. 371 * out in that case.
372 */ 372 */
373 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP) 373 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
374 return -1; 374 return -1;
375 375
376 if (!(evsel->attr.read_format & PERF_FORMAT_ID) || 376 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
377 read(fd, &read_data, sizeof(read_data)) == -1) 377 read(fd, &read_data, sizeof(read_data)) == -1)
378 return -1; 378 return -1;
379 379
380 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 380 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
381 ++id_idx; 381 ++id_idx;
382 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 382 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
383 ++id_idx; 383 ++id_idx;
384 384
385 id = read_data[id_idx]; 385 id = read_data[id_idx];
386 386
387 add: 387 add:
388 perf_evlist__id_add(evlist, evsel, cpu, thread, id); 388 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
389 return 0; 389 return 0;
390 } 390 }
391 391
392 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id) 392 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
393 { 393 {
394 struct hlist_head *head; 394 struct hlist_head *head;
395 struct perf_sample_id *sid; 395 struct perf_sample_id *sid;
396 int hash; 396 int hash;
397 397
398 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 398 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
399 head = &evlist->heads[hash]; 399 head = &evlist->heads[hash];
400 400
401 hlist_for_each_entry(sid, head, node) 401 hlist_for_each_entry(sid, head, node)
402 if (sid->id == id) 402 if (sid->id == id)
403 return sid; 403 return sid;
404 404
405 return NULL; 405 return NULL;
406 } 406 }
407 407
408 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) 408 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
409 { 409 {
410 struct perf_sample_id *sid; 410 struct perf_sample_id *sid;
411 411
412 if (evlist->nr_entries == 1) 412 if (evlist->nr_entries == 1)
413 return perf_evlist__first(evlist); 413 return perf_evlist__first(evlist);
414 414
415 sid = perf_evlist__id2sid(evlist, id); 415 sid = perf_evlist__id2sid(evlist, id);
416 if (sid) 416 if (sid)
417 return sid->evsel; 417 return sid->evsel;
418 418
419 if (!perf_evlist__sample_id_all(evlist)) 419 if (!perf_evlist__sample_id_all(evlist))
420 return perf_evlist__first(evlist); 420 return perf_evlist__first(evlist);
421 421
422 return NULL; 422 return NULL;
423 } 423 }
424 424
425 static int perf_evlist__event2id(struct perf_evlist *evlist, 425 static int perf_evlist__event2id(struct perf_evlist *evlist,
426 union perf_event *event, u64 *id) 426 union perf_event *event, u64 *id)
427 { 427 {
428 const u64 *array = event->sample.array; 428 const u64 *array = event->sample.array;
429 ssize_t n; 429 ssize_t n;
430 430
431 n = (event->header.size - sizeof(event->header)) >> 3; 431 n = (event->header.size - sizeof(event->header)) >> 3;
432 432
433 if (event->header.type == PERF_RECORD_SAMPLE) { 433 if (event->header.type == PERF_RECORD_SAMPLE) {
434 if (evlist->id_pos >= n) 434 if (evlist->id_pos >= n)
435 return -1; 435 return -1;
436 *id = array[evlist->id_pos]; 436 *id = array[evlist->id_pos];
437 } else { 437 } else {
438 if (evlist->is_pos > n) 438 if (evlist->is_pos > n)
439 return -1; 439 return -1;
440 n -= evlist->is_pos; 440 n -= evlist->is_pos;
441 *id = array[n]; 441 *id = array[n];
442 } 442 }
443 return 0; 443 return 0;
444 } 444 }
445 445
446 static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, 446 static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
447 union perf_event *event) 447 union perf_event *event)
448 { 448 {
449 struct perf_evsel *first = perf_evlist__first(evlist);
449 struct hlist_head *head; 450 struct hlist_head *head;
450 struct perf_sample_id *sid; 451 struct perf_sample_id *sid;
451 int hash; 452 int hash;
452 u64 id; 453 u64 id;
453 454
454 if (evlist->nr_entries == 1) 455 if (evlist->nr_entries == 1)
455 return perf_evlist__first(evlist); 456 return first;
456 457
458 if (!first->attr.sample_id_all &&
459 event->header.type != PERF_RECORD_SAMPLE)
460 return first;
461
457 if (perf_evlist__event2id(evlist, event, &id)) 462 if (perf_evlist__event2id(evlist, event, &id))
458 return NULL; 463 return NULL;
459 464
460 /* Synthesized events have an id of zero */ 465 /* Synthesized events have an id of zero */
461 if (!id) 466 if (!id)
462 return perf_evlist__first(evlist); 467 return first;
463 468
464 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 469 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
465 head = &evlist->heads[hash]; 470 head = &evlist->heads[hash];
466 471
467 hlist_for_each_entry(sid, head, node) { 472 hlist_for_each_entry(sid, head, node) {
468 if (sid->id == id) 473 if (sid->id == id)
469 return sid->evsel; 474 return sid->evsel;
470 } 475 }
471 return NULL; 476 return NULL;
472 } 477 }
473 478
474 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) 479 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
475 { 480 {
476 struct perf_mmap *md = &evlist->mmap[idx]; 481 struct perf_mmap *md = &evlist->mmap[idx];
477 unsigned int head = perf_mmap__read_head(md); 482 unsigned int head = perf_mmap__read_head(md);
478 unsigned int old = md->prev; 483 unsigned int old = md->prev;
479 unsigned char *data = md->base + page_size; 484 unsigned char *data = md->base + page_size;
480 union perf_event *event = NULL; 485 union perf_event *event = NULL;
481 486
482 if (evlist->overwrite) { 487 if (evlist->overwrite) {
483 /* 488 /*
484 * If we're further behind than half the buffer, there's a chance 489 * If we're further behind than half the buffer, there's a chance
485 * the writer will bite our tail and mess up the samples under us. 490 * the writer will bite our tail and mess up the samples under us.
486 * 491 *
487 * If we somehow ended up ahead of the head, we got messed up. 492 * If we somehow ended up ahead of the head, we got messed up.
488 * 493 *
489 * In either case, truncate and restart at head. 494 * In either case, truncate and restart at head.
490 */ 495 */
491 int diff = head - old; 496 int diff = head - old;
492 if (diff > md->mask / 2 || diff < 0) { 497 if (diff > md->mask / 2 || diff < 0) {
493 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); 498 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
494 499
495 /* 500 /*
496 * head points to a known good entry, start there. 501 * head points to a known good entry, start there.
497 */ 502 */
498 old = head; 503 old = head;
499 } 504 }
500 } 505 }
501 506
502 if (old != head) { 507 if (old != head) {
503 size_t size; 508 size_t size;
504 509
505 event = (union perf_event *)&data[old & md->mask]; 510 event = (union perf_event *)&data[old & md->mask];
506 size = event->header.size; 511 size = event->header.size;
507 512
508 /* 513 /*
509 * Event straddles the mmap boundary -- header should always 514 * Event straddles the mmap boundary -- header should always
510 * be inside due to u64 alignment of output. 515 * be inside due to u64 alignment of output.
511 */ 516 */
512 if ((old & md->mask) + size != ((old + size) & md->mask)) { 517 if ((old & md->mask) + size != ((old + size) & md->mask)) {
513 unsigned int offset = old; 518 unsigned int offset = old;
514 unsigned int len = min(sizeof(*event), size), cpy; 519 unsigned int len = min(sizeof(*event), size), cpy;
515 void *dst = &md->event_copy; 520 void *dst = &md->event_copy;
516 521
517 do { 522 do {
518 cpy = min(md->mask + 1 - (offset & md->mask), len); 523 cpy = min(md->mask + 1 - (offset & md->mask), len);
519 memcpy(dst, &data[offset & md->mask], cpy); 524 memcpy(dst, &data[offset & md->mask], cpy);
520 offset += cpy; 525 offset += cpy;
521 dst += cpy; 526 dst += cpy;
522 len -= cpy; 527 len -= cpy;
523 } while (len); 528 } while (len);
524 529
525 event = &md->event_copy; 530 event = &md->event_copy;
526 } 531 }
527 532
528 old += size; 533 old += size;
529 } 534 }
530 535
531 md->prev = old; 536 md->prev = old;
532 537
533 if (!evlist->overwrite) 538 if (!evlist->overwrite)
534 perf_mmap__write_tail(md, old); 539 perf_mmap__write_tail(md, old);
535 540
536 return event; 541 return event;
537 } 542 }
538 543
539 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx) 544 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
540 { 545 {
541 if (evlist->mmap[idx].base != NULL) { 546 if (evlist->mmap[idx].base != NULL) {
542 munmap(evlist->mmap[idx].base, evlist->mmap_len); 547 munmap(evlist->mmap[idx].base, evlist->mmap_len);
543 evlist->mmap[idx].base = NULL; 548 evlist->mmap[idx].base = NULL;
544 } 549 }
545 } 550 }
546 551
547 void perf_evlist__munmap(struct perf_evlist *evlist) 552 void perf_evlist__munmap(struct perf_evlist *evlist)
548 { 553 {
549 int i; 554 int i;
550 555
551 for (i = 0; i < evlist->nr_mmaps; i++) 556 for (i = 0; i < evlist->nr_mmaps; i++)
552 __perf_evlist__munmap(evlist, i); 557 __perf_evlist__munmap(evlist, i);
553 558
554 free(evlist->mmap); 559 free(evlist->mmap);
555 evlist->mmap = NULL; 560 evlist->mmap = NULL;
556 } 561 }
557 562
558 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) 563 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
559 { 564 {
560 evlist->nr_mmaps = cpu_map__nr(evlist->cpus); 565 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
561 if (cpu_map__empty(evlist->cpus)) 566 if (cpu_map__empty(evlist->cpus))
562 evlist->nr_mmaps = thread_map__nr(evlist->threads); 567 evlist->nr_mmaps = thread_map__nr(evlist->threads);
563 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); 568 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
564 return evlist->mmap != NULL ? 0 : -ENOMEM; 569 return evlist->mmap != NULL ? 0 : -ENOMEM;
565 } 570 }
566 571
567 static int __perf_evlist__mmap(struct perf_evlist *evlist, 572 static int __perf_evlist__mmap(struct perf_evlist *evlist,
568 int idx, int prot, int mask, int fd) 573 int idx, int prot, int mask, int fd)
569 { 574 {
570 evlist->mmap[idx].prev = 0; 575 evlist->mmap[idx].prev = 0;
571 evlist->mmap[idx].mask = mask; 576 evlist->mmap[idx].mask = mask;
572 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, 577 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
573 MAP_SHARED, fd, 0); 578 MAP_SHARED, fd, 0);
574 if (evlist->mmap[idx].base == MAP_FAILED) { 579 if (evlist->mmap[idx].base == MAP_FAILED) {
575 evlist->mmap[idx].base = NULL; 580 evlist->mmap[idx].base = NULL;
576 return -1; 581 return -1;
577 } 582 }
578 583
579 perf_evlist__add_pollfd(evlist, fd); 584 perf_evlist__add_pollfd(evlist, fd);
580 return 0; 585 return 0;
581 } 586 }
582 587
583 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask) 588 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
584 { 589 {
585 struct perf_evsel *evsel; 590 struct perf_evsel *evsel;
586 int cpu, thread; 591 int cpu, thread;
587 int nr_cpus = cpu_map__nr(evlist->cpus); 592 int nr_cpus = cpu_map__nr(evlist->cpus);
588 int nr_threads = thread_map__nr(evlist->threads); 593 int nr_threads = thread_map__nr(evlist->threads);
589 594
590 pr_debug2("perf event ring buffer mmapped per cpu\n"); 595 pr_debug2("perf event ring buffer mmapped per cpu\n");
591 for (cpu = 0; cpu < nr_cpus; cpu++) { 596 for (cpu = 0; cpu < nr_cpus; cpu++) {
592 int output = -1; 597 int output = -1;
593 598
594 for (thread = 0; thread < nr_threads; thread++) { 599 for (thread = 0; thread < nr_threads; thread++) {
595 list_for_each_entry(evsel, &evlist->entries, node) { 600 list_for_each_entry(evsel, &evlist->entries, node) {
596 int fd = FD(evsel, cpu, thread); 601 int fd = FD(evsel, cpu, thread);
597 602
598 if (output == -1) { 603 if (output == -1) {
599 output = fd; 604 output = fd;
600 if (__perf_evlist__mmap(evlist, cpu, 605 if (__perf_evlist__mmap(evlist, cpu,
601 prot, mask, output) < 0) 606 prot, mask, output) < 0)
602 goto out_unmap; 607 goto out_unmap;
603 } else { 608 } else {
604 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) 609 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
605 goto out_unmap; 610 goto out_unmap;
606 } 611 }
607 612
608 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 613 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
609 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) 614 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
610 goto out_unmap; 615 goto out_unmap;
611 } 616 }
612 } 617 }
613 } 618 }
614 619
615 return 0; 620 return 0;
616 621
617 out_unmap: 622 out_unmap:
618 for (cpu = 0; cpu < nr_cpus; cpu++) 623 for (cpu = 0; cpu < nr_cpus; cpu++)
619 __perf_evlist__munmap(evlist, cpu); 624 __perf_evlist__munmap(evlist, cpu);
620 return -1; 625 return -1;
621 } 626 }
622 627
623 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask) 628 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
624 { 629 {
625 struct perf_evsel *evsel; 630 struct perf_evsel *evsel;
626 int thread; 631 int thread;
627 int nr_threads = thread_map__nr(evlist->threads); 632 int nr_threads = thread_map__nr(evlist->threads);
628 633
629 pr_debug2("perf event ring buffer mmapped per thread\n"); 634 pr_debug2("perf event ring buffer mmapped per thread\n");
630 for (thread = 0; thread < nr_threads; thread++) { 635 for (thread = 0; thread < nr_threads; thread++) {
631 int output = -1; 636 int output = -1;
632 637
633 list_for_each_entry(evsel, &evlist->entries, node) { 638 list_for_each_entry(evsel, &evlist->entries, node) {
634 int fd = FD(evsel, 0, thread); 639 int fd = FD(evsel, 0, thread);
635 640
636 if (output == -1) { 641 if (output == -1) {
637 output = fd; 642 output = fd;
638 if (__perf_evlist__mmap(evlist, thread, 643 if (__perf_evlist__mmap(evlist, thread,
639 prot, mask, output) < 0) 644 prot, mask, output) < 0)
640 goto out_unmap; 645 goto out_unmap;
641 } else { 646 } else {
642 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) 647 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
643 goto out_unmap; 648 goto out_unmap;
644 } 649 }
645 650
646 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 651 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
647 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0) 652 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
648 goto out_unmap; 653 goto out_unmap;
649 } 654 }
650 } 655 }
651 656
652 return 0; 657 return 0;
653 658
654 out_unmap: 659 out_unmap:
655 for (thread = 0; thread < nr_threads; thread++) 660 for (thread = 0; thread < nr_threads; thread++)
656 __perf_evlist__munmap(evlist, thread); 661 __perf_evlist__munmap(evlist, thread);
657 return -1; 662 return -1;
658 } 663 }
659 664
660 /** perf_evlist__mmap - Create per cpu maps to receive events 665 /** perf_evlist__mmap - Create per cpu maps to receive events
661 * 666 *
662 * @evlist - list of events 667 * @evlist - list of events
663 * @pages - map length in pages 668 * @pages - map length in pages
664 * @overwrite - overwrite older events? 669 * @overwrite - overwrite older events?
665 * 670 *
666 * If overwrite is false the user needs to signal event consuption using: 671 * If overwrite is false the user needs to signal event consuption using:
667 * 672 *
668 * struct perf_mmap *m = &evlist->mmap[cpu]; 673 * struct perf_mmap *m = &evlist->mmap[cpu];
669 * unsigned int head = perf_mmap__read_head(m); 674 * unsigned int head = perf_mmap__read_head(m);
670 * 675 *
671 * perf_mmap__write_tail(m, head) 676 * perf_mmap__write_tail(m, head)
672 * 677 *
673 * Using perf_evlist__read_on_cpu does this automatically. 678 * Using perf_evlist__read_on_cpu does this automatically.
674 */ 679 */
675 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, 680 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
676 bool overwrite) 681 bool overwrite)
677 { 682 {
678 struct perf_evsel *evsel; 683 struct perf_evsel *evsel;
679 const struct cpu_map *cpus = evlist->cpus; 684 const struct cpu_map *cpus = evlist->cpus;
680 const struct thread_map *threads = evlist->threads; 685 const struct thread_map *threads = evlist->threads;
681 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask; 686 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
682 687
683 /* 512 kiB: default amount of unprivileged mlocked memory */ 688 /* 512 kiB: default amount of unprivileged mlocked memory */
684 if (pages == UINT_MAX) 689 if (pages == UINT_MAX)
685 pages = (512 * 1024) / page_size; 690 pages = (512 * 1024) / page_size;
686 else if (!is_power_of_2(pages)) 691 else if (!is_power_of_2(pages))
687 return -EINVAL; 692 return -EINVAL;
688 693
689 mask = pages * page_size - 1; 694 mask = pages * page_size - 1;
690 695
691 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) 696 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
692 return -ENOMEM; 697 return -ENOMEM;
693 698
694 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0) 699 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
695 return -ENOMEM; 700 return -ENOMEM;
696 701
697 evlist->overwrite = overwrite; 702 evlist->overwrite = overwrite;
698 evlist->mmap_len = (pages + 1) * page_size; 703 evlist->mmap_len = (pages + 1) * page_size;
699 704
700 list_for_each_entry(evsel, &evlist->entries, node) { 705 list_for_each_entry(evsel, &evlist->entries, node) {
701 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 706 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
702 evsel->sample_id == NULL && 707 evsel->sample_id == NULL &&
703 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0) 708 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
704 return -ENOMEM; 709 return -ENOMEM;
705 } 710 }
706 711
707 if (cpu_map__empty(cpus)) 712 if (cpu_map__empty(cpus))
708 return perf_evlist__mmap_per_thread(evlist, prot, mask); 713 return perf_evlist__mmap_per_thread(evlist, prot, mask);
709 714
710 return perf_evlist__mmap_per_cpu(evlist, prot, mask); 715 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
711 } 716 }
712 717
713 int perf_evlist__create_maps(struct perf_evlist *evlist, 718 int perf_evlist__create_maps(struct perf_evlist *evlist,
714 struct perf_target *target) 719 struct perf_target *target)
715 { 720 {
716 evlist->threads = thread_map__new_str(target->pid, target->tid, 721 evlist->threads = thread_map__new_str(target->pid, target->tid,
717 target->uid); 722 target->uid);
718 723
719 if (evlist->threads == NULL) 724 if (evlist->threads == NULL)
720 return -1; 725 return -1;
721 726
722 if (perf_target__has_task(target)) 727 if (perf_target__has_task(target))
723 evlist->cpus = cpu_map__dummy_new(); 728 evlist->cpus = cpu_map__dummy_new();
724 else if (!perf_target__has_cpu(target) && !target->uses_mmap) 729 else if (!perf_target__has_cpu(target) && !target->uses_mmap)
725 evlist->cpus = cpu_map__dummy_new(); 730 evlist->cpus = cpu_map__dummy_new();
726 else 731 else
727 evlist->cpus = cpu_map__new(target->cpu_list); 732 evlist->cpus = cpu_map__new(target->cpu_list);
728 733
729 if (evlist->cpus == NULL) 734 if (evlist->cpus == NULL)
730 goto out_delete_threads; 735 goto out_delete_threads;
731 736
732 return 0; 737 return 0;
733 738
734 out_delete_threads: 739 out_delete_threads:
735 thread_map__delete(evlist->threads); 740 thread_map__delete(evlist->threads);
736 return -1; 741 return -1;
737 } 742 }
738 743
739 void perf_evlist__delete_maps(struct perf_evlist *evlist) 744 void perf_evlist__delete_maps(struct perf_evlist *evlist)
740 { 745 {
741 cpu_map__delete(evlist->cpus); 746 cpu_map__delete(evlist->cpus);
742 thread_map__delete(evlist->threads); 747 thread_map__delete(evlist->threads);
743 evlist->cpus = NULL; 748 evlist->cpus = NULL;
744 evlist->threads = NULL; 749 evlist->threads = NULL;
745 } 750 }
746 751
747 int perf_evlist__apply_filters(struct perf_evlist *evlist) 752 int perf_evlist__apply_filters(struct perf_evlist *evlist)
748 { 753 {
749 struct perf_evsel *evsel; 754 struct perf_evsel *evsel;
750 int err = 0; 755 int err = 0;
751 const int ncpus = cpu_map__nr(evlist->cpus), 756 const int ncpus = cpu_map__nr(evlist->cpus),
752 nthreads = thread_map__nr(evlist->threads); 757 nthreads = thread_map__nr(evlist->threads);
753 758
754 list_for_each_entry(evsel, &evlist->entries, node) { 759 list_for_each_entry(evsel, &evlist->entries, node) {
755 if (evsel->filter == NULL) 760 if (evsel->filter == NULL)
756 continue; 761 continue;
757 762
758 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter); 763 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
759 if (err) 764 if (err)
760 break; 765 break;
761 } 766 }
762 767
763 return err; 768 return err;
764 } 769 }
765 770
766 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) 771 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
767 { 772 {
768 struct perf_evsel *evsel; 773 struct perf_evsel *evsel;
769 int err = 0; 774 int err = 0;
770 const int ncpus = cpu_map__nr(evlist->cpus), 775 const int ncpus = cpu_map__nr(evlist->cpus),
771 nthreads = thread_map__nr(evlist->threads); 776 nthreads = thread_map__nr(evlist->threads);
772 777
773 list_for_each_entry(evsel, &evlist->entries, node) { 778 list_for_each_entry(evsel, &evlist->entries, node) {
774 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter); 779 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
775 if (err) 780 if (err)
776 break; 781 break;
777 } 782 }
778 783
779 return err; 784 return err;
780 } 785 }
781 786
782 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) 787 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
783 { 788 {
784 struct perf_evsel *pos; 789 struct perf_evsel *pos;
785 790
786 if (evlist->nr_entries == 1) 791 if (evlist->nr_entries == 1)
787 return true; 792 return true;
788 793
789 if (evlist->id_pos < 0 || evlist->is_pos < 0) 794 if (evlist->id_pos < 0 || evlist->is_pos < 0)
790 return false; 795 return false;
791 796
792 list_for_each_entry(pos, &evlist->entries, node) { 797 list_for_each_entry(pos, &evlist->entries, node) {
793 if (pos->id_pos != evlist->id_pos || 798 if (pos->id_pos != evlist->id_pos ||
794 pos->is_pos != evlist->is_pos) 799 pos->is_pos != evlist->is_pos)
795 return false; 800 return false;
796 } 801 }
797 802
798 return true; 803 return true;
799 } 804 }
800 805
801 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist) 806 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
802 { 807 {
803 struct perf_evsel *evsel; 808 struct perf_evsel *evsel;
804 809
805 if (evlist->combined_sample_type) 810 if (evlist->combined_sample_type)
806 return evlist->combined_sample_type; 811 return evlist->combined_sample_type;
807 812
808 list_for_each_entry(evsel, &evlist->entries, node) 813 list_for_each_entry(evsel, &evlist->entries, node)
809 evlist->combined_sample_type |= evsel->attr.sample_type; 814 evlist->combined_sample_type |= evsel->attr.sample_type;
810 815
811 return evlist->combined_sample_type; 816 return evlist->combined_sample_type;
812 } 817 }
813 818
814 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist) 819 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
815 { 820 {
816 evlist->combined_sample_type = 0; 821 evlist->combined_sample_type = 0;
817 return __perf_evlist__combined_sample_type(evlist); 822 return __perf_evlist__combined_sample_type(evlist);
818 } 823 }
819 824
820 bool perf_evlist__valid_read_format(struct perf_evlist *evlist) 825 bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
821 { 826 {
822 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; 827 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
823 u64 read_format = first->attr.read_format; 828 u64 read_format = first->attr.read_format;
824 u64 sample_type = first->attr.sample_type; 829 u64 sample_type = first->attr.sample_type;
825 830
826 list_for_each_entry_continue(pos, &evlist->entries, node) { 831 list_for_each_entry_continue(pos, &evlist->entries, node) {
827 if (read_format != pos->attr.read_format) 832 if (read_format != pos->attr.read_format)
828 return false; 833 return false;
829 } 834 }
830 835
831 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */ 836 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
832 if ((sample_type & PERF_SAMPLE_READ) && 837 if ((sample_type & PERF_SAMPLE_READ) &&
833 !(read_format & PERF_FORMAT_ID)) { 838 !(read_format & PERF_FORMAT_ID)) {
834 return false; 839 return false;
835 } 840 }
836 841
837 return true; 842 return true;
838 } 843 }
839 844
840 u64 perf_evlist__read_format(struct perf_evlist *evlist) 845 u64 perf_evlist__read_format(struct perf_evlist *evlist)
841 { 846 {
842 struct perf_evsel *first = perf_evlist__first(evlist); 847 struct perf_evsel *first = perf_evlist__first(evlist);
843 return first->attr.read_format; 848 return first->attr.read_format;
844 } 849 }
845 850
846 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) 851 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
847 { 852 {
848 struct perf_evsel *first = perf_evlist__first(evlist); 853 struct perf_evsel *first = perf_evlist__first(evlist);
849 struct perf_sample *data; 854 struct perf_sample *data;
850 u64 sample_type; 855 u64 sample_type;
851 u16 size = 0; 856 u16 size = 0;
852 857
853 if (!first->attr.sample_id_all) 858 if (!first->attr.sample_id_all)
854 goto out; 859 goto out;
855 860
856 sample_type = first->attr.sample_type; 861 sample_type = first->attr.sample_type;
857 862
858 if (sample_type & PERF_SAMPLE_TID) 863 if (sample_type & PERF_SAMPLE_TID)
859 size += sizeof(data->tid) * 2; 864 size += sizeof(data->tid) * 2;
860 865
861 if (sample_type & PERF_SAMPLE_TIME) 866 if (sample_type & PERF_SAMPLE_TIME)
862 size += sizeof(data->time); 867 size += sizeof(data->time);
863 868
864 if (sample_type & PERF_SAMPLE_ID) 869 if (sample_type & PERF_SAMPLE_ID)
865 size += sizeof(data->id); 870 size += sizeof(data->id);
866 871
867 if (sample_type & PERF_SAMPLE_STREAM_ID) 872 if (sample_type & PERF_SAMPLE_STREAM_ID)
868 size += sizeof(data->stream_id); 873 size += sizeof(data->stream_id);
869 874
870 if (sample_type & PERF_SAMPLE_CPU) 875 if (sample_type & PERF_SAMPLE_CPU)
871 size += sizeof(data->cpu) * 2; 876 size += sizeof(data->cpu) * 2;
872 877
873 if (sample_type & PERF_SAMPLE_IDENTIFIER) 878 if (sample_type & PERF_SAMPLE_IDENTIFIER)
874 size += sizeof(data->id); 879 size += sizeof(data->id);
875 out: 880 out:
876 return size; 881 return size;
877 } 882 }
878 883
879 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist) 884 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
880 { 885 {
881 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; 886 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
882 887
883 list_for_each_entry_continue(pos, &evlist->entries, node) { 888 list_for_each_entry_continue(pos, &evlist->entries, node) {
884 if (first->attr.sample_id_all != pos->attr.sample_id_all) 889 if (first->attr.sample_id_all != pos->attr.sample_id_all)
885 return false; 890 return false;
886 } 891 }
887 892
888 return true; 893 return true;
889 } 894 }
890 895
891 bool perf_evlist__sample_id_all(struct perf_evlist *evlist) 896 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
892 { 897 {
893 struct perf_evsel *first = perf_evlist__first(evlist); 898 struct perf_evsel *first = perf_evlist__first(evlist);
894 return first->attr.sample_id_all; 899 return first->attr.sample_id_all;
895 } 900 }
896 901
897 void perf_evlist__set_selected(struct perf_evlist *evlist, 902 void perf_evlist__set_selected(struct perf_evlist *evlist,
898 struct perf_evsel *evsel) 903 struct perf_evsel *evsel)
899 { 904 {
900 evlist->selected = evsel; 905 evlist->selected = evsel;
901 } 906 }
902 907
903 void perf_evlist__close(struct perf_evlist *evlist) 908 void perf_evlist__close(struct perf_evlist *evlist)
904 { 909 {
905 struct perf_evsel *evsel; 910 struct perf_evsel *evsel;
906 int ncpus = cpu_map__nr(evlist->cpus); 911 int ncpus = cpu_map__nr(evlist->cpus);
907 int nthreads = thread_map__nr(evlist->threads); 912 int nthreads = thread_map__nr(evlist->threads);
908 913
909 list_for_each_entry_reverse(evsel, &evlist->entries, node) 914 list_for_each_entry_reverse(evsel, &evlist->entries, node)
910 perf_evsel__close(evsel, ncpus, nthreads); 915 perf_evsel__close(evsel, ncpus, nthreads);
911 } 916 }
912 917
913 int perf_evlist__open(struct perf_evlist *evlist) 918 int perf_evlist__open(struct perf_evlist *evlist)
914 { 919 {
915 struct perf_evsel *evsel; 920 struct perf_evsel *evsel;
916 int err; 921 int err;
917 922
918 list_for_each_entry(evsel, &evlist->entries, node) { 923 list_for_each_entry(evsel, &evlist->entries, node) {
919 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads); 924 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
920 if (err < 0) 925 if (err < 0)
921 goto out_err; 926 goto out_err;
922 } 927 }
923 928
924 return 0; 929 return 0;
925 out_err: 930 out_err:
926 perf_evlist__close(evlist); 931 perf_evlist__close(evlist);
927 errno = -err; 932 errno = -err;
928 return err; 933 return err;
929 } 934 }
930 935
931 int perf_evlist__prepare_workload(struct perf_evlist *evlist, 936 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
932 struct perf_target *target, 937 struct perf_target *target,
933 const char *argv[], bool pipe_output, 938 const char *argv[], bool pipe_output,
934 bool want_signal) 939 bool want_signal)
935 { 940 {
936 int child_ready_pipe[2], go_pipe[2]; 941 int child_ready_pipe[2], go_pipe[2];
937 char bf; 942 char bf;
938 943
939 if (pipe(child_ready_pipe) < 0) { 944 if (pipe(child_ready_pipe) < 0) {
940 perror("failed to create 'ready' pipe"); 945 perror("failed to create 'ready' pipe");
941 return -1; 946 return -1;
942 } 947 }
943 948
944 if (pipe(go_pipe) < 0) { 949 if (pipe(go_pipe) < 0) {
945 perror("failed to create 'go' pipe"); 950 perror("failed to create 'go' pipe");
946 goto out_close_ready_pipe; 951 goto out_close_ready_pipe;
947 } 952 }
948 953
949 evlist->workload.pid = fork(); 954 evlist->workload.pid = fork();
950 if (evlist->workload.pid < 0) { 955 if (evlist->workload.pid < 0) {
951 perror("failed to fork"); 956 perror("failed to fork");
952 goto out_close_pipes; 957 goto out_close_pipes;
953 } 958 }
954 959
955 if (!evlist->workload.pid) { 960 if (!evlist->workload.pid) {
956 if (pipe_output) 961 if (pipe_output)
957 dup2(2, 1); 962 dup2(2, 1);
958 963
959 signal(SIGTERM, SIG_DFL); 964 signal(SIGTERM, SIG_DFL);
960 965
961 close(child_ready_pipe[0]); 966 close(child_ready_pipe[0]);
962 close(go_pipe[1]); 967 close(go_pipe[1]);
963 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); 968 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
964 969
965 /* 970 /*
966 * Tell the parent we're ready to go 971 * Tell the parent we're ready to go
967 */ 972 */
968 close(child_ready_pipe[1]); 973 close(child_ready_pipe[1]);
969 974
970 /* 975 /*
971 * Wait until the parent tells us to go. 976 * Wait until the parent tells us to go.
972 */ 977 */
973 if (read(go_pipe[0], &bf, 1) == -1) 978 if (read(go_pipe[0], &bf, 1) == -1)
974 perror("unable to read pipe"); 979 perror("unable to read pipe");
975 980
976 execvp(argv[0], (char **)argv); 981 execvp(argv[0], (char **)argv);
977 982
978 perror(argv[0]); 983 perror(argv[0]);
979 if (want_signal) 984 if (want_signal)
980 kill(getppid(), SIGUSR1); 985 kill(getppid(), SIGUSR1);
981 exit(-1); 986 exit(-1);
982 } 987 }
983 988
984 if (perf_target__none(target)) 989 if (perf_target__none(target))
985 evlist->threads->map[0] = evlist->workload.pid; 990 evlist->threads->map[0] = evlist->workload.pid;
986 991
987 close(child_ready_pipe[1]); 992 close(child_ready_pipe[1]);
988 close(go_pipe[0]); 993 close(go_pipe[0]);
989 /* 994 /*
990 * wait for child to settle 995 * wait for child to settle
991 */ 996 */
992 if (read(child_ready_pipe[0], &bf, 1) == -1) { 997 if (read(child_ready_pipe[0], &bf, 1) == -1) {
993 perror("unable to read pipe"); 998 perror("unable to read pipe");
994 goto out_close_pipes; 999 goto out_close_pipes;
995 } 1000 }
996 1001
997 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC); 1002 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
998 evlist->workload.cork_fd = go_pipe[1]; 1003 evlist->workload.cork_fd = go_pipe[1];
999 close(child_ready_pipe[0]); 1004 close(child_ready_pipe[0]);
1000 return 0; 1005 return 0;
1001 1006
1002 out_close_pipes: 1007 out_close_pipes:
1003 close(go_pipe[0]); 1008 close(go_pipe[0]);
1004 close(go_pipe[1]); 1009 close(go_pipe[1]);
1005 out_close_ready_pipe: 1010 out_close_ready_pipe:
1006 close(child_ready_pipe[0]); 1011 close(child_ready_pipe[0]);
1007 close(child_ready_pipe[1]); 1012 close(child_ready_pipe[1]);
1008 return -1; 1013 return -1;
1009 } 1014 }
1010 1015
1011 int perf_evlist__start_workload(struct perf_evlist *evlist) 1016 int perf_evlist__start_workload(struct perf_evlist *evlist)
1012 { 1017 {
1013 if (evlist->workload.cork_fd > 0) { 1018 if (evlist->workload.cork_fd > 0) {
1014 char bf = 0; 1019 char bf = 0;
1015 int ret; 1020 int ret;
1016 /* 1021 /*
1017 * Remove the cork, let it rip! 1022 * Remove the cork, let it rip!
1018 */ 1023 */
1019 ret = write(evlist->workload.cork_fd, &bf, 1); 1024 ret = write(evlist->workload.cork_fd, &bf, 1);
1020 if (ret < 0) 1025 if (ret < 0)
1021 perror("enable to write to pipe"); 1026 perror("enable to write to pipe");
1022 1027
1023 close(evlist->workload.cork_fd); 1028 close(evlist->workload.cork_fd);
1024 return ret; 1029 return ret;
1025 } 1030 }
1026 1031
1027 return 0; 1032 return 0;
1028 } 1033 }
1029 1034
1030 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, 1035 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1031 struct perf_sample *sample) 1036 struct perf_sample *sample)
1032 { 1037 {
1033 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event); 1038 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1034 1039
1035 if (!evsel) 1040 if (!evsel)
1036 return -EFAULT; 1041 return -EFAULT;
1037 return perf_evsel__parse_sample(evsel, event, sample); 1042 return perf_evsel__parse_sample(evsel, event, sample);
1038 } 1043 }
1039 1044
1040 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) 1045 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1041 { 1046 {
1042 struct perf_evsel *evsel; 1047 struct perf_evsel *evsel;
1043 size_t printed = 0; 1048 size_t printed = 0;
1044 1049
1045 list_for_each_entry(evsel, &evlist->entries, node) { 1050 list_for_each_entry(evsel, &evlist->entries, node) {
1046 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "", 1051 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1047 perf_evsel__name(evsel)); 1052 perf_evsel__name(evsel));
1048 } 1053 }
1049 1054
1050 return printed + fprintf(fp, "\n");; 1055 return printed + fprintf(fp, "\n");;
1051 } 1056 }
1052 1057
tools/perf/util/session.c
1 #include <linux/kernel.h> 1 #include <linux/kernel.h>
2 #include <traceevent/event-parse.h> 2 #include <traceevent/event-parse.h>
3 3
4 #include <byteswap.h> 4 #include <byteswap.h>
5 #include <unistd.h> 5 #include <unistd.h>
6 #include <sys/types.h> 6 #include <sys/types.h>
7 #include <sys/mman.h> 7 #include <sys/mman.h>
8 8
9 #include "evlist.h" 9 #include "evlist.h"
10 #include "evsel.h" 10 #include "evsel.h"
11 #include "session.h" 11 #include "session.h"
12 #include "tool.h" 12 #include "tool.h"
13 #include "sort.h" 13 #include "sort.h"
14 #include "util.h" 14 #include "util.h"
15 #include "cpumap.h" 15 #include "cpumap.h"
16 #include "perf_regs.h" 16 #include "perf_regs.h"
17 #include "vdso.h" 17 #include "vdso.h"
18 18
19 static int perf_session__open(struct perf_session *self, bool force) 19 static int perf_session__open(struct perf_session *self, bool force)
20 { 20 {
21 struct stat input_stat; 21 struct stat input_stat;
22 22
23 if (!strcmp(self->filename, "-")) { 23 if (!strcmp(self->filename, "-")) {
24 self->fd_pipe = true; 24 self->fd_pipe = true;
25 self->fd = STDIN_FILENO; 25 self->fd = STDIN_FILENO;
26 26
27 if (perf_session__read_header(self) < 0) 27 if (perf_session__read_header(self) < 0)
28 pr_err("incompatible file format (rerun with -v to learn more)"); 28 pr_err("incompatible file format (rerun with -v to learn more)");
29 29
30 return 0; 30 return 0;
31 } 31 }
32 32
33 self->fd = open(self->filename, O_RDONLY); 33 self->fd = open(self->filename, O_RDONLY);
34 if (self->fd < 0) { 34 if (self->fd < 0) {
35 int err = errno; 35 int err = errno;
36 36
37 pr_err("failed to open %s: %s", self->filename, strerror(err)); 37 pr_err("failed to open %s: %s", self->filename, strerror(err));
38 if (err == ENOENT && !strcmp(self->filename, "perf.data")) 38 if (err == ENOENT && !strcmp(self->filename, "perf.data"))
39 pr_err(" (try 'perf record' first)"); 39 pr_err(" (try 'perf record' first)");
40 pr_err("\n"); 40 pr_err("\n");
41 return -errno; 41 return -errno;
42 } 42 }
43 43
44 if (fstat(self->fd, &input_stat) < 0) 44 if (fstat(self->fd, &input_stat) < 0)
45 goto out_close; 45 goto out_close;
46 46
47 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { 47 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
48 pr_err("file %s not owned by current user or root\n", 48 pr_err("file %s not owned by current user or root\n",
49 self->filename); 49 self->filename);
50 goto out_close; 50 goto out_close;
51 } 51 }
52 52
53 if (!input_stat.st_size) { 53 if (!input_stat.st_size) {
54 pr_info("zero-sized file (%s), nothing to do!\n", 54 pr_info("zero-sized file (%s), nothing to do!\n",
55 self->filename); 55 self->filename);
56 goto out_close; 56 goto out_close;
57 } 57 }
58 58
59 if (perf_session__read_header(self) < 0) { 59 if (perf_session__read_header(self) < 0) {
60 pr_err("incompatible file format (rerun with -v to learn more)"); 60 pr_err("incompatible file format (rerun with -v to learn more)");
61 goto out_close; 61 goto out_close;
62 } 62 }
63 63
64 if (!perf_evlist__valid_sample_type(self->evlist)) { 64 if (!perf_evlist__valid_sample_type(self->evlist)) {
65 pr_err("non matching sample_type"); 65 pr_err("non matching sample_type");
66 goto out_close; 66 goto out_close;
67 } 67 }
68 68
69 if (!perf_evlist__valid_sample_id_all(self->evlist)) { 69 if (!perf_evlist__valid_sample_id_all(self->evlist)) {
70 pr_err("non matching sample_id_all"); 70 pr_err("non matching sample_id_all");
71 goto out_close; 71 goto out_close;
72 } 72 }
73 73
74 if (!perf_evlist__valid_read_format(self->evlist)) { 74 if (!perf_evlist__valid_read_format(self->evlist)) {
75 pr_err("non matching read_format"); 75 pr_err("non matching read_format");
76 goto out_close; 76 goto out_close;
77 } 77 }
78 78
79 self->size = input_stat.st_size; 79 self->size = input_stat.st_size;
80 return 0; 80 return 0;
81 81
82 out_close: 82 out_close:
83 close(self->fd); 83 close(self->fd);
84 self->fd = -1; 84 self->fd = -1;
85 return -1; 85 return -1;
86 } 86 }
87 87
88 void perf_session__set_id_hdr_size(struct perf_session *session) 88 void perf_session__set_id_hdr_size(struct perf_session *session)
89 { 89 {
90 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist); 90 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
91 91
92 machines__set_id_hdr_size(&session->machines, id_hdr_size); 92 machines__set_id_hdr_size(&session->machines, id_hdr_size);
93 } 93 }
94 94
95 int perf_session__create_kernel_maps(struct perf_session *self) 95 int perf_session__create_kernel_maps(struct perf_session *self)
96 { 96 {
97 int ret = machine__create_kernel_maps(&self->machines.host); 97 int ret = machine__create_kernel_maps(&self->machines.host);
98 98
99 if (ret >= 0) 99 if (ret >= 0)
100 ret = machines__create_guest_kernel_maps(&self->machines); 100 ret = machines__create_guest_kernel_maps(&self->machines);
101 return ret; 101 return ret;
102 } 102 }
103 103
104 static void perf_session__destroy_kernel_maps(struct perf_session *self) 104 static void perf_session__destroy_kernel_maps(struct perf_session *self)
105 { 105 {
106 machines__destroy_kernel_maps(&self->machines); 106 machines__destroy_kernel_maps(&self->machines);
107 } 107 }
108 108
109 struct perf_session *perf_session__new(const char *filename, int mode, 109 struct perf_session *perf_session__new(const char *filename, int mode,
110 bool force, bool repipe, 110 bool force, bool repipe,
111 struct perf_tool *tool) 111 struct perf_tool *tool)
112 { 112 {
113 struct perf_session *self; 113 struct perf_session *self;
114 struct stat st; 114 struct stat st;
115 size_t len; 115 size_t len;
116 116
117 if (!filename || !strlen(filename)) { 117 if (!filename || !strlen(filename)) {
118 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 118 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
119 filename = "-"; 119 filename = "-";
120 else 120 else
121 filename = "perf.data"; 121 filename = "perf.data";
122 } 122 }
123 123
124 len = strlen(filename); 124 len = strlen(filename);
125 self = zalloc(sizeof(*self) + len); 125 self = zalloc(sizeof(*self) + len);
126 126
127 if (self == NULL) 127 if (self == NULL)
128 goto out; 128 goto out;
129 129
130 memcpy(self->filename, filename, len); 130 memcpy(self->filename, filename, len);
131 self->repipe = repipe; 131 self->repipe = repipe;
132 INIT_LIST_HEAD(&self->ordered_samples.samples); 132 INIT_LIST_HEAD(&self->ordered_samples.samples);
133 INIT_LIST_HEAD(&self->ordered_samples.sample_cache); 133 INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
134 INIT_LIST_HEAD(&self->ordered_samples.to_free); 134 INIT_LIST_HEAD(&self->ordered_samples.to_free);
135 machines__init(&self->machines); 135 machines__init(&self->machines);
136 136
137 if (mode == O_RDONLY) { 137 if (mode == O_RDONLY) {
138 if (perf_session__open(self, force) < 0) 138 if (perf_session__open(self, force) < 0)
139 goto out_delete; 139 goto out_delete;
140 perf_session__set_id_hdr_size(self); 140 perf_session__set_id_hdr_size(self);
141 } else if (mode == O_WRONLY) { 141 } else if (mode == O_WRONLY) {
142 /* 142 /*
143 * In O_RDONLY mode this will be performed when reading the 143 * In O_RDONLY mode this will be performed when reading the
144 * kernel MMAP event, in perf_event__process_mmap(). 144 * kernel MMAP event, in perf_event__process_mmap().
145 */ 145 */
146 if (perf_session__create_kernel_maps(self) < 0) 146 if (perf_session__create_kernel_maps(self) < 0)
147 goto out_delete; 147 goto out_delete;
148 } 148 }
149 149
150 if (tool && tool->ordering_requires_timestamps && 150 if (tool && tool->ordering_requires_timestamps &&
151 tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) { 151 tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) {
152 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); 152 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
153 tool->ordered_samples = false; 153 tool->ordered_samples = false;
154 } 154 }
155 155
156 out: 156 out:
157 return self; 157 return self;
158 out_delete: 158 out_delete:
159 perf_session__delete(self); 159 perf_session__delete(self);
160 return NULL; 160 return NULL;
161 } 161 }
162 162
163 static void perf_session__delete_dead_threads(struct perf_session *session) 163 static void perf_session__delete_dead_threads(struct perf_session *session)
164 { 164 {
165 machine__delete_dead_threads(&session->machines.host); 165 machine__delete_dead_threads(&session->machines.host);
166 } 166 }
167 167
168 static void perf_session__delete_threads(struct perf_session *session) 168 static void perf_session__delete_threads(struct perf_session *session)
169 { 169 {
170 machine__delete_threads(&session->machines.host); 170 machine__delete_threads(&session->machines.host);
171 } 171 }
172 172
173 static void perf_session_env__delete(struct perf_session_env *env) 173 static void perf_session_env__delete(struct perf_session_env *env)
174 { 174 {
175 free(env->hostname); 175 free(env->hostname);
176 free(env->os_release); 176 free(env->os_release);
177 free(env->version); 177 free(env->version);
178 free(env->arch); 178 free(env->arch);
179 free(env->cpu_desc); 179 free(env->cpu_desc);
180 free(env->cpuid); 180 free(env->cpuid);
181 181
182 free(env->cmdline); 182 free(env->cmdline);
183 free(env->sibling_cores); 183 free(env->sibling_cores);
184 free(env->sibling_threads); 184 free(env->sibling_threads);
185 free(env->numa_nodes); 185 free(env->numa_nodes);
186 free(env->pmu_mappings); 186 free(env->pmu_mappings);
187 } 187 }
188 188
189 void perf_session__delete(struct perf_session *self) 189 void perf_session__delete(struct perf_session *self)
190 { 190 {
191 perf_session__destroy_kernel_maps(self); 191 perf_session__destroy_kernel_maps(self);
192 perf_session__delete_dead_threads(self); 192 perf_session__delete_dead_threads(self);
193 perf_session__delete_threads(self); 193 perf_session__delete_threads(self);
194 perf_session_env__delete(&self->header.env); 194 perf_session_env__delete(&self->header.env);
195 machines__exit(&self->machines); 195 machines__exit(&self->machines);
196 close(self->fd); 196 close(self->fd);
197 free(self); 197 free(self);
198 vdso__exit(); 198 vdso__exit();
199 } 199 }
200 200
201 static int process_event_synth_tracing_data_stub(struct perf_tool *tool 201 static int process_event_synth_tracing_data_stub(struct perf_tool *tool
202 __maybe_unused, 202 __maybe_unused,
203 union perf_event *event 203 union perf_event *event
204 __maybe_unused, 204 __maybe_unused,
205 struct perf_session *session 205 struct perf_session *session
206 __maybe_unused) 206 __maybe_unused)
207 { 207 {
208 dump_printf(": unhandled!\n"); 208 dump_printf(": unhandled!\n");
209 return 0; 209 return 0;
210 } 210 }
211 211
212 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused, 212 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
213 union perf_event *event __maybe_unused, 213 union perf_event *event __maybe_unused,
214 struct perf_evlist **pevlist 214 struct perf_evlist **pevlist
215 __maybe_unused) 215 __maybe_unused)
216 { 216 {
217 dump_printf(": unhandled!\n"); 217 dump_printf(": unhandled!\n");
218 return 0; 218 return 0;
219 } 219 }
220 220
221 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused, 221 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
222 union perf_event *event __maybe_unused, 222 union perf_event *event __maybe_unused,
223 struct perf_sample *sample __maybe_unused, 223 struct perf_sample *sample __maybe_unused,
224 struct perf_evsel *evsel __maybe_unused, 224 struct perf_evsel *evsel __maybe_unused,
225 struct machine *machine __maybe_unused) 225 struct machine *machine __maybe_unused)
226 { 226 {
227 dump_printf(": unhandled!\n"); 227 dump_printf(": unhandled!\n");
228 return 0; 228 return 0;
229 } 229 }
230 230
231 static int process_event_stub(struct perf_tool *tool __maybe_unused, 231 static int process_event_stub(struct perf_tool *tool __maybe_unused,
232 union perf_event *event __maybe_unused, 232 union perf_event *event __maybe_unused,
233 struct perf_sample *sample __maybe_unused, 233 struct perf_sample *sample __maybe_unused,
234 struct machine *machine __maybe_unused) 234 struct machine *machine __maybe_unused)
235 { 235 {
236 dump_printf(": unhandled!\n"); 236 dump_printf(": unhandled!\n");
237 return 0; 237 return 0;
238 } 238 }
239 239
240 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused, 240 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
241 union perf_event *event __maybe_unused, 241 union perf_event *event __maybe_unused,
242 struct perf_session *perf_session 242 struct perf_session *perf_session
243 __maybe_unused) 243 __maybe_unused)
244 { 244 {
245 dump_printf(": unhandled!\n"); 245 dump_printf(": unhandled!\n");
246 return 0; 246 return 0;
247 } 247 }
248 248
249 static int process_finished_round(struct perf_tool *tool, 249 static int process_finished_round(struct perf_tool *tool,
250 union perf_event *event, 250 union perf_event *event,
251 struct perf_session *session); 251 struct perf_session *session);
252 252
253 void perf_tool__fill_defaults(struct perf_tool *tool) 253 void perf_tool__fill_defaults(struct perf_tool *tool)
254 { 254 {
255 if (tool->sample == NULL) 255 if (tool->sample == NULL)
256 tool->sample = process_event_sample_stub; 256 tool->sample = process_event_sample_stub;
257 if (tool->mmap == NULL) 257 if (tool->mmap == NULL)
258 tool->mmap = process_event_stub; 258 tool->mmap = process_event_stub;
259 if (tool->comm == NULL) 259 if (tool->comm == NULL)
260 tool->comm = process_event_stub; 260 tool->comm = process_event_stub;
261 if (tool->fork == NULL) 261 if (tool->fork == NULL)
262 tool->fork = process_event_stub; 262 tool->fork = process_event_stub;
263 if (tool->exit == NULL) 263 if (tool->exit == NULL)
264 tool->exit = process_event_stub; 264 tool->exit = process_event_stub;
265 if (tool->lost == NULL) 265 if (tool->lost == NULL)
266 tool->lost = perf_event__process_lost; 266 tool->lost = perf_event__process_lost;
267 if (tool->read == NULL) 267 if (tool->read == NULL)
268 tool->read = process_event_sample_stub; 268 tool->read = process_event_sample_stub;
269 if (tool->throttle == NULL) 269 if (tool->throttle == NULL)
270 tool->throttle = process_event_stub; 270 tool->throttle = process_event_stub;
271 if (tool->unthrottle == NULL) 271 if (tool->unthrottle == NULL)
272 tool->unthrottle = process_event_stub; 272 tool->unthrottle = process_event_stub;
273 if (tool->attr == NULL) 273 if (tool->attr == NULL)
274 tool->attr = process_event_synth_attr_stub; 274 tool->attr = process_event_synth_attr_stub;
275 if (tool->tracing_data == NULL) 275 if (tool->tracing_data == NULL)
276 tool->tracing_data = process_event_synth_tracing_data_stub; 276 tool->tracing_data = process_event_synth_tracing_data_stub;
277 if (tool->build_id == NULL) 277 if (tool->build_id == NULL)
278 tool->build_id = process_finished_round_stub; 278 tool->build_id = process_finished_round_stub;
279 if (tool->finished_round == NULL) { 279 if (tool->finished_round == NULL) {
280 if (tool->ordered_samples) 280 if (tool->ordered_samples)
281 tool->finished_round = process_finished_round; 281 tool->finished_round = process_finished_round;
282 else 282 else
283 tool->finished_round = process_finished_round_stub; 283 tool->finished_round = process_finished_round_stub;
284 } 284 }
285 } 285 }
286 286
287 void mem_bswap_32(void *src, int byte_size) 287 void mem_bswap_32(void *src, int byte_size)
288 { 288 {
289 u32 *m = src; 289 u32 *m = src;
290 while (byte_size > 0) { 290 while (byte_size > 0) {
291 *m = bswap_32(*m); 291 *m = bswap_32(*m);
292 byte_size -= sizeof(u32); 292 byte_size -= sizeof(u32);
293 ++m; 293 ++m;
294 } 294 }
295 } 295 }
296 296
297 void mem_bswap_64(void *src, int byte_size) 297 void mem_bswap_64(void *src, int byte_size)
298 { 298 {
299 u64 *m = src; 299 u64 *m = src;
300 300
301 while (byte_size > 0) { 301 while (byte_size > 0) {
302 *m = bswap_64(*m); 302 *m = bswap_64(*m);
303 byte_size -= sizeof(u64); 303 byte_size -= sizeof(u64);
304 ++m; 304 ++m;
305 } 305 }
306 } 306 }
307 307
308 static void swap_sample_id_all(union perf_event *event, void *data) 308 static void swap_sample_id_all(union perf_event *event, void *data)
309 { 309 {
310 void *end = (void *) event + event->header.size; 310 void *end = (void *) event + event->header.size;
311 int size = end - data; 311 int size = end - data;
312 312
313 BUG_ON(size % sizeof(u64)); 313 BUG_ON(size % sizeof(u64));
314 mem_bswap_64(data, size); 314 mem_bswap_64(data, size);
315 } 315 }
316 316
317 static void perf_event__all64_swap(union perf_event *event, 317 static void perf_event__all64_swap(union perf_event *event,
318 bool sample_id_all __maybe_unused) 318 bool sample_id_all __maybe_unused)
319 { 319 {
320 struct perf_event_header *hdr = &event->header; 320 struct perf_event_header *hdr = &event->header;
321 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); 321 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
322 } 322 }
323 323
324 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) 324 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
325 { 325 {
326 event->comm.pid = bswap_32(event->comm.pid); 326 event->comm.pid = bswap_32(event->comm.pid);
327 event->comm.tid = bswap_32(event->comm.tid); 327 event->comm.tid = bswap_32(event->comm.tid);
328 328
329 if (sample_id_all) { 329 if (sample_id_all) {
330 void *data = &event->comm.comm; 330 void *data = &event->comm.comm;
331 331
332 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 332 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
333 swap_sample_id_all(event, data); 333 swap_sample_id_all(event, data);
334 } 334 }
335 } 335 }
336 336
337 static void perf_event__mmap_swap(union perf_event *event, 337 static void perf_event__mmap_swap(union perf_event *event,
338 bool sample_id_all) 338 bool sample_id_all)
339 { 339 {
340 event->mmap.pid = bswap_32(event->mmap.pid); 340 event->mmap.pid = bswap_32(event->mmap.pid);
341 event->mmap.tid = bswap_32(event->mmap.tid); 341 event->mmap.tid = bswap_32(event->mmap.tid);
342 event->mmap.start = bswap_64(event->mmap.start); 342 event->mmap.start = bswap_64(event->mmap.start);
343 event->mmap.len = bswap_64(event->mmap.len); 343 event->mmap.len = bswap_64(event->mmap.len);
344 event->mmap.pgoff = bswap_64(event->mmap.pgoff); 344 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
345 345
346 if (sample_id_all) { 346 if (sample_id_all) {
347 void *data = &event->mmap.filename; 347 void *data = &event->mmap.filename;
348 348
349 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 349 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
350 swap_sample_id_all(event, data); 350 swap_sample_id_all(event, data);
351 } 351 }
352 } 352 }
353 353
354 static void perf_event__task_swap(union perf_event *event, bool sample_id_all) 354 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
355 { 355 {
356 event->fork.pid = bswap_32(event->fork.pid); 356 event->fork.pid = bswap_32(event->fork.pid);
357 event->fork.tid = bswap_32(event->fork.tid); 357 event->fork.tid = bswap_32(event->fork.tid);
358 event->fork.ppid = bswap_32(event->fork.ppid); 358 event->fork.ppid = bswap_32(event->fork.ppid);
359 event->fork.ptid = bswap_32(event->fork.ptid); 359 event->fork.ptid = bswap_32(event->fork.ptid);
360 event->fork.time = bswap_64(event->fork.time); 360 event->fork.time = bswap_64(event->fork.time);
361 361
362 if (sample_id_all) 362 if (sample_id_all)
363 swap_sample_id_all(event, &event->fork + 1); 363 swap_sample_id_all(event, &event->fork + 1);
364 } 364 }
365 365
366 static void perf_event__read_swap(union perf_event *event, bool sample_id_all) 366 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
367 { 367 {
368 event->read.pid = bswap_32(event->read.pid); 368 event->read.pid = bswap_32(event->read.pid);
369 event->read.tid = bswap_32(event->read.tid); 369 event->read.tid = bswap_32(event->read.tid);
370 event->read.value = bswap_64(event->read.value); 370 event->read.value = bswap_64(event->read.value);
371 event->read.time_enabled = bswap_64(event->read.time_enabled); 371 event->read.time_enabled = bswap_64(event->read.time_enabled);
372 event->read.time_running = bswap_64(event->read.time_running); 372 event->read.time_running = bswap_64(event->read.time_running);
373 event->read.id = bswap_64(event->read.id); 373 event->read.id = bswap_64(event->read.id);
374 374
375 if (sample_id_all) 375 if (sample_id_all)
376 swap_sample_id_all(event, &event->read + 1); 376 swap_sample_id_all(event, &event->read + 1);
377 } 377 }
378 378
379 static u8 revbyte(u8 b) 379 static u8 revbyte(u8 b)
380 { 380 {
381 int rev = (b >> 4) | ((b & 0xf) << 4); 381 int rev = (b >> 4) | ((b & 0xf) << 4);
382 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2); 382 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
383 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1); 383 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
384 return (u8) rev; 384 return (u8) rev;
385 } 385 }
386 386
387 /* 387 /*
388 * XXX this is hack in attempt to carry flags bitfield 388 * XXX this is hack in attempt to carry flags bitfield
389 * throught endian village. ABI says: 389 * throught endian village. ABI says:
390 * 390 *
391 * Bit-fields are allocated from right to left (least to most significant) 391 * Bit-fields are allocated from right to left (least to most significant)
392 * on little-endian implementations and from left to right (most to least 392 * on little-endian implementations and from left to right (most to least
393 * significant) on big-endian implementations. 393 * significant) on big-endian implementations.
394 * 394 *
395 * The above seems to be byte specific, so we need to reverse each 395 * The above seems to be byte specific, so we need to reverse each
396 * byte of the bitfield. 'Internet' also says this might be implementation 396 * byte of the bitfield. 'Internet' also says this might be implementation
397 * specific and we probably need proper fix and carry perf_event_attr 397 * specific and we probably need proper fix and carry perf_event_attr
398 * bitfield flags in separate data file FEAT_ section. Thought this seems 398 * bitfield flags in separate data file FEAT_ section. Thought this seems
399 * to work for now. 399 * to work for now.
400 */ 400 */
401 static void swap_bitfield(u8 *p, unsigned len) 401 static void swap_bitfield(u8 *p, unsigned len)
402 { 402 {
403 unsigned i; 403 unsigned i;
404 404
405 for (i = 0; i < len; i++) { 405 for (i = 0; i < len; i++) {
406 *p = revbyte(*p); 406 *p = revbyte(*p);
407 p++; 407 p++;
408 } 408 }
409 } 409 }
410 410
411 /* exported for swapping attributes in file header */ 411 /* exported for swapping attributes in file header */
412 void perf_event__attr_swap(struct perf_event_attr *attr) 412 void perf_event__attr_swap(struct perf_event_attr *attr)
413 { 413 {
414 attr->type = bswap_32(attr->type); 414 attr->type = bswap_32(attr->type);
415 attr->size = bswap_32(attr->size); 415 attr->size = bswap_32(attr->size);
416 attr->config = bswap_64(attr->config); 416 attr->config = bswap_64(attr->config);
417 attr->sample_period = bswap_64(attr->sample_period); 417 attr->sample_period = bswap_64(attr->sample_period);
418 attr->sample_type = bswap_64(attr->sample_type); 418 attr->sample_type = bswap_64(attr->sample_type);
419 attr->read_format = bswap_64(attr->read_format); 419 attr->read_format = bswap_64(attr->read_format);
420 attr->wakeup_events = bswap_32(attr->wakeup_events); 420 attr->wakeup_events = bswap_32(attr->wakeup_events);
421 attr->bp_type = bswap_32(attr->bp_type); 421 attr->bp_type = bswap_32(attr->bp_type);
422 attr->bp_addr = bswap_64(attr->bp_addr); 422 attr->bp_addr = bswap_64(attr->bp_addr);
423 attr->bp_len = bswap_64(attr->bp_len); 423 attr->bp_len = bswap_64(attr->bp_len);
424 424
425 swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64)); 425 swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
426 } 426 }
427 427
428 static void perf_event__hdr_attr_swap(union perf_event *event, 428 static void perf_event__hdr_attr_swap(union perf_event *event,
429 bool sample_id_all __maybe_unused) 429 bool sample_id_all __maybe_unused)
430 { 430 {
431 size_t size; 431 size_t size;
432 432
433 perf_event__attr_swap(&event->attr.attr); 433 perf_event__attr_swap(&event->attr.attr);
434 434
435 size = event->header.size; 435 size = event->header.size;
436 size -= (void *)&event->attr.id - (void *)event; 436 size -= (void *)&event->attr.id - (void *)event;
437 mem_bswap_64(event->attr.id, size); 437 mem_bswap_64(event->attr.id, size);
438 } 438 }
439 439
440 static void perf_event__event_type_swap(union perf_event *event, 440 static void perf_event__event_type_swap(union perf_event *event,
441 bool sample_id_all __maybe_unused) 441 bool sample_id_all __maybe_unused)
442 { 442 {
443 event->event_type.event_type.event_id = 443 event->event_type.event_type.event_id =
444 bswap_64(event->event_type.event_type.event_id); 444 bswap_64(event->event_type.event_type.event_id);
445 } 445 }
446 446
447 static void perf_event__tracing_data_swap(union perf_event *event, 447 static void perf_event__tracing_data_swap(union perf_event *event,
448 bool sample_id_all __maybe_unused) 448 bool sample_id_all __maybe_unused)
449 { 449 {
450 event->tracing_data.size = bswap_32(event->tracing_data.size); 450 event->tracing_data.size = bswap_32(event->tracing_data.size);
451 } 451 }
452 452
453 typedef void (*perf_event__swap_op)(union perf_event *event, 453 typedef void (*perf_event__swap_op)(union perf_event *event,
454 bool sample_id_all); 454 bool sample_id_all);
455 455
456 static perf_event__swap_op perf_event__swap_ops[] = { 456 static perf_event__swap_op perf_event__swap_ops[] = {
457 [PERF_RECORD_MMAP] = perf_event__mmap_swap, 457 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
458 [PERF_RECORD_COMM] = perf_event__comm_swap, 458 [PERF_RECORD_COMM] = perf_event__comm_swap,
459 [PERF_RECORD_FORK] = perf_event__task_swap, 459 [PERF_RECORD_FORK] = perf_event__task_swap,
460 [PERF_RECORD_EXIT] = perf_event__task_swap, 460 [PERF_RECORD_EXIT] = perf_event__task_swap,
461 [PERF_RECORD_LOST] = perf_event__all64_swap, 461 [PERF_RECORD_LOST] = perf_event__all64_swap,
462 [PERF_RECORD_READ] = perf_event__read_swap, 462 [PERF_RECORD_READ] = perf_event__read_swap,
463 [PERF_RECORD_SAMPLE] = perf_event__all64_swap, 463 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
464 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, 464 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
465 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, 465 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
466 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, 466 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
467 [PERF_RECORD_HEADER_BUILD_ID] = NULL, 467 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
468 [PERF_RECORD_HEADER_MAX] = NULL, 468 [PERF_RECORD_HEADER_MAX] = NULL,
469 }; 469 };
470 470
471 struct sample_queue { 471 struct sample_queue {
472 u64 timestamp; 472 u64 timestamp;
473 u64 file_offset; 473 u64 file_offset;
474 union perf_event *event; 474 union perf_event *event;
475 struct list_head list; 475 struct list_head list;
476 }; 476 };
477 477
478 static void perf_session_free_sample_buffers(struct perf_session *session) 478 static void perf_session_free_sample_buffers(struct perf_session *session)
479 { 479 {
480 struct ordered_samples *os = &session->ordered_samples; 480 struct ordered_samples *os = &session->ordered_samples;
481 481
482 while (!list_empty(&os->to_free)) { 482 while (!list_empty(&os->to_free)) {
483 struct sample_queue *sq; 483 struct sample_queue *sq;
484 484
485 sq = list_entry(os->to_free.next, struct sample_queue, list); 485 sq = list_entry(os->to_free.next, struct sample_queue, list);
486 list_del(&sq->list); 486 list_del(&sq->list);
487 free(sq); 487 free(sq);
488 } 488 }
489 } 489 }
490 490
491 static int perf_session_deliver_event(struct perf_session *session, 491 static int perf_session_deliver_event(struct perf_session *session,
492 union perf_event *event, 492 union perf_event *event,
493 struct perf_sample *sample, 493 struct perf_sample *sample,
494 struct perf_tool *tool, 494 struct perf_tool *tool,
495 u64 file_offset); 495 u64 file_offset);
496 496
497 static int flush_sample_queue(struct perf_session *s, 497 static int flush_sample_queue(struct perf_session *s,
498 struct perf_tool *tool) 498 struct perf_tool *tool)
499 { 499 {
500 struct ordered_samples *os = &s->ordered_samples; 500 struct ordered_samples *os = &s->ordered_samples;
501 struct list_head *head = &os->samples; 501 struct list_head *head = &os->samples;
502 struct sample_queue *tmp, *iter; 502 struct sample_queue *tmp, *iter;
503 struct perf_sample sample; 503 struct perf_sample sample;
504 u64 limit = os->next_flush; 504 u64 limit = os->next_flush;
505 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL; 505 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
506 unsigned idx = 0, progress_next = os->nr_samples / 16; 506 unsigned idx = 0, progress_next = os->nr_samples / 16;
507 bool show_progress = limit == ULLONG_MAX;
507 int ret; 508 int ret;
508 509
509 if (!tool->ordered_samples || !limit) 510 if (!tool->ordered_samples || !limit)
510 return 0; 511 return 0;
511 512
512 list_for_each_entry_safe(iter, tmp, head, list) { 513 list_for_each_entry_safe(iter, tmp, head, list) {
513 if (iter->timestamp > limit) 514 if (iter->timestamp > limit)
514 break; 515 break;
515 516
516 ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample); 517 ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
517 if (ret) 518 if (ret)
518 pr_err("Can't parse sample, err = %d\n", ret); 519 pr_err("Can't parse sample, err = %d\n", ret);
519 else { 520 else {
520 ret = perf_session_deliver_event(s, iter->event, &sample, tool, 521 ret = perf_session_deliver_event(s, iter->event, &sample, tool,
521 iter->file_offset); 522 iter->file_offset);
522 if (ret) 523 if (ret)
523 return ret; 524 return ret;
524 } 525 }
525 526
526 os->last_flush = iter->timestamp; 527 os->last_flush = iter->timestamp;
527 list_del(&iter->list); 528 list_del(&iter->list);
528 list_add(&iter->list, &os->sample_cache); 529 list_add(&iter->list, &os->sample_cache);
529 if (++idx >= progress_next) { 530 if (show_progress && (++idx >= progress_next)) {
530 progress_next += os->nr_samples / 16; 531 progress_next += os->nr_samples / 16;
531 ui_progress__update(idx, os->nr_samples, 532 ui_progress__update(idx, os->nr_samples,
532 "Processing time ordered events..."); 533 "Processing time ordered events...");
533 } 534 }
534 } 535 }
535 536
536 if (list_empty(head)) { 537 if (list_empty(head)) {
537 os->last_sample = NULL; 538 os->last_sample = NULL;
538 } else if (last_ts <= limit) { 539 } else if (last_ts <= limit) {
539 os->last_sample = 540 os->last_sample =
540 list_entry(head->prev, struct sample_queue, list); 541 list_entry(head->prev, struct sample_queue, list);
541 } 542 }
542 543
543 os->nr_samples = 0; 544 os->nr_samples = 0;
544 545
545 return 0; 546 return 0;
546 } 547 }
547 548
548 /* 549 /*
549 * When perf record finishes a pass on every buffers, it records this pseudo 550 * When perf record finishes a pass on every buffers, it records this pseudo
550 * event. 551 * event.
551 * We record the max timestamp t found in the pass n. 552 * We record the max timestamp t found in the pass n.
552 * Assuming these timestamps are monotonic across cpus, we know that if 553 * Assuming these timestamps are monotonic across cpus, we know that if
553 * a buffer still has events with timestamps below t, they will be all 554 * a buffer still has events with timestamps below t, they will be all
554 * available and then read in the pass n + 1. 555 * available and then read in the pass n + 1.
555 * Hence when we start to read the pass n + 2, we can safely flush every 556 * Hence when we start to read the pass n + 2, we can safely flush every
556 * events with timestamps below t. 557 * events with timestamps below t.
557 * 558 *
558 * ============ PASS n ================= 559 * ============ PASS n =================
559 * CPU 0 | CPU 1 560 * CPU 0 | CPU 1
560 * | 561 * |
561 * cnt1 timestamps | cnt2 timestamps 562 * cnt1 timestamps | cnt2 timestamps
562 * 1 | 2 563 * 1 | 2
563 * 2 | 3 564 * 2 | 3
564 * - | 4 <--- max recorded 565 * - | 4 <--- max recorded
565 * 566 *
566 * ============ PASS n + 1 ============== 567 * ============ PASS n + 1 ==============
567 * CPU 0 | CPU 1 568 * CPU 0 | CPU 1
568 * | 569 * |
569 * cnt1 timestamps | cnt2 timestamps 570 * cnt1 timestamps | cnt2 timestamps
570 * 3 | 5 571 * 3 | 5
571 * 4 | 6 572 * 4 | 6
572 * 5 | 7 <---- max recorded 573 * 5 | 7 <---- max recorded
573 * 574 *
574 * Flush every events below timestamp 4 575 * Flush every events below timestamp 4
575 * 576 *
576 * ============ PASS n + 2 ============== 577 * ============ PASS n + 2 ==============
577 * CPU 0 | CPU 1 578 * CPU 0 | CPU 1
578 * | 579 * |
579 * cnt1 timestamps | cnt2 timestamps 580 * cnt1 timestamps | cnt2 timestamps
580 * 6 | 8 581 * 6 | 8
581 * 7 | 9 582 * 7 | 9
582 * - | 10 583 * - | 10
583 * 584 *
584 * Flush every events below timestamp 7 585 * Flush every events below timestamp 7
585 * etc... 586 * etc...
586 */ 587 */
587 static int process_finished_round(struct perf_tool *tool, 588 static int process_finished_round(struct perf_tool *tool,
588 union perf_event *event __maybe_unused, 589 union perf_event *event __maybe_unused,
589 struct perf_session *session) 590 struct perf_session *session)
590 { 591 {
591 int ret = flush_sample_queue(session, tool); 592 int ret = flush_sample_queue(session, tool);
592 if (!ret) 593 if (!ret)
593 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp; 594 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
594 595
595 return ret; 596 return ret;
596 } 597 }
597 598
598 /* The queue is ordered by time */ 599 /* The queue is ordered by time */
599 static void __queue_event(struct sample_queue *new, struct perf_session *s) 600 static void __queue_event(struct sample_queue *new, struct perf_session *s)
600 { 601 {
601 struct ordered_samples *os = &s->ordered_samples; 602 struct ordered_samples *os = &s->ordered_samples;
602 struct sample_queue *sample = os->last_sample; 603 struct sample_queue *sample = os->last_sample;
603 u64 timestamp = new->timestamp; 604 u64 timestamp = new->timestamp;
604 struct list_head *p; 605 struct list_head *p;
605 606
606 ++os->nr_samples; 607 ++os->nr_samples;
607 os->last_sample = new; 608 os->last_sample = new;
608 609
609 if (!sample) { 610 if (!sample) {
610 list_add(&new->list, &os->samples); 611 list_add(&new->list, &os->samples);
611 os->max_timestamp = timestamp; 612 os->max_timestamp = timestamp;
612 return; 613 return;
613 } 614 }
614 615
615 /* 616 /*
616 * last_sample might point to some random place in the list as it's 617 * last_sample might point to some random place in the list as it's
617 * the last queued event. We expect that the new event is close to 618 * the last queued event. We expect that the new event is close to
618 * this. 619 * this.
619 */ 620 */
620 if (sample->timestamp <= timestamp) { 621 if (sample->timestamp <= timestamp) {
621 while (sample->timestamp <= timestamp) { 622 while (sample->timestamp <= timestamp) {
622 p = sample->list.next; 623 p = sample->list.next;
623 if (p == &os->samples) { 624 if (p == &os->samples) {
624 list_add_tail(&new->list, &os->samples); 625 list_add_tail(&new->list, &os->samples);
625 os->max_timestamp = timestamp; 626 os->max_timestamp = timestamp;
626 return; 627 return;
627 } 628 }
628 sample = list_entry(p, struct sample_queue, list); 629 sample = list_entry(p, struct sample_queue, list);
629 } 630 }
630 list_add_tail(&new->list, &sample->list); 631 list_add_tail(&new->list, &sample->list);
631 } else { 632 } else {
632 while (sample->timestamp > timestamp) { 633 while (sample->timestamp > timestamp) {
633 p = sample->list.prev; 634 p = sample->list.prev;
634 if (p == &os->samples) { 635 if (p == &os->samples) {
635 list_add(&new->list, &os->samples); 636 list_add(&new->list, &os->samples);
636 return; 637 return;
637 } 638 }
638 sample = list_entry(p, struct sample_queue, list); 639 sample = list_entry(p, struct sample_queue, list);
639 } 640 }
640 list_add(&new->list, &sample->list); 641 list_add(&new->list, &sample->list);
641 } 642 }
642 } 643 }
643 644
644 #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue)) 645 #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
645 646
646 int perf_session_queue_event(struct perf_session *s, union perf_event *event, 647 int perf_session_queue_event(struct perf_session *s, union perf_event *event,
647 struct perf_sample *sample, u64 file_offset) 648 struct perf_sample *sample, u64 file_offset)
648 { 649 {
649 struct ordered_samples *os = &s->ordered_samples; 650 struct ordered_samples *os = &s->ordered_samples;
650 struct list_head *sc = &os->sample_cache; 651 struct list_head *sc = &os->sample_cache;
651 u64 timestamp = sample->time; 652 u64 timestamp = sample->time;
652 struct sample_queue *new; 653 struct sample_queue *new;
653 654
654 if (!timestamp || timestamp == ~0ULL) 655 if (!timestamp || timestamp == ~0ULL)
655 return -ETIME; 656 return -ETIME;
656 657
657 if (timestamp < s->ordered_samples.last_flush) { 658 if (timestamp < s->ordered_samples.last_flush) {
658 printf("Warning: Timestamp below last timeslice flush\n"); 659 printf("Warning: Timestamp below last timeslice flush\n");
659 return -EINVAL; 660 return -EINVAL;
660 } 661 }
661 662
662 if (!list_empty(sc)) { 663 if (!list_empty(sc)) {
663 new = list_entry(sc->next, struct sample_queue, list); 664 new = list_entry(sc->next, struct sample_queue, list);
664 list_del(&new->list); 665 list_del(&new->list);
665 } else if (os->sample_buffer) { 666 } else if (os->sample_buffer) {
666 new = os->sample_buffer + os->sample_buffer_idx; 667 new = os->sample_buffer + os->sample_buffer_idx;
667 if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER) 668 if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
668 os->sample_buffer = NULL; 669 os->sample_buffer = NULL;
669 } else { 670 } else {
670 os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new)); 671 os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
671 if (!os->sample_buffer) 672 if (!os->sample_buffer)
672 return -ENOMEM; 673 return -ENOMEM;
673 list_add(&os->sample_buffer->list, &os->to_free); 674 list_add(&os->sample_buffer->list, &os->to_free);
674 os->sample_buffer_idx = 2; 675 os->sample_buffer_idx = 2;
675 new = os->sample_buffer + 1; 676 new = os->sample_buffer + 1;
676 } 677 }
677 678
678 new->timestamp = timestamp; 679 new->timestamp = timestamp;
679 new->file_offset = file_offset; 680 new->file_offset = file_offset;
680 new->event = event; 681 new->event = event;
681 682
682 __queue_event(new, s); 683 __queue_event(new, s);
683 684
684 return 0; 685 return 0;
685 } 686 }
686 687
687 static void callchain__printf(struct perf_sample *sample) 688 static void callchain__printf(struct perf_sample *sample)
688 { 689 {
689 unsigned int i; 690 unsigned int i;
690 691
691 printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr); 692 printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
692 693
693 for (i = 0; i < sample->callchain->nr; i++) 694 for (i = 0; i < sample->callchain->nr; i++)
694 printf("..... %2d: %016" PRIx64 "\n", 695 printf("..... %2d: %016" PRIx64 "\n",
695 i, sample->callchain->ips[i]); 696 i, sample->callchain->ips[i]);
696 } 697 }
697 698
698 static void branch_stack__printf(struct perf_sample *sample) 699 static void branch_stack__printf(struct perf_sample *sample)
699 { 700 {
700 uint64_t i; 701 uint64_t i;
701 702
702 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr); 703 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
703 704
704 for (i = 0; i < sample->branch_stack->nr; i++) 705 for (i = 0; i < sample->branch_stack->nr; i++)
705 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n", 706 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
706 i, sample->branch_stack->entries[i].from, 707 i, sample->branch_stack->entries[i].from,
707 sample->branch_stack->entries[i].to); 708 sample->branch_stack->entries[i].to);
708 } 709 }
709 710
710 static void regs_dump__printf(u64 mask, u64 *regs) 711 static void regs_dump__printf(u64 mask, u64 *regs)
711 { 712 {
712 unsigned rid, i = 0; 713 unsigned rid, i = 0;
713 714
714 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) { 715 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
715 u64 val = regs[i++]; 716 u64 val = regs[i++];
716 717
717 printf(".... %-5s 0x%" PRIx64 "\n", 718 printf(".... %-5s 0x%" PRIx64 "\n",
718 perf_reg_name(rid), val); 719 perf_reg_name(rid), val);
719 } 720 }
720 } 721 }
721 722
722 static void regs_user__printf(struct perf_sample *sample, u64 mask) 723 static void regs_user__printf(struct perf_sample *sample, u64 mask)
723 { 724 {
724 struct regs_dump *user_regs = &sample->user_regs; 725 struct regs_dump *user_regs = &sample->user_regs;
725 726
726 if (user_regs->regs) { 727 if (user_regs->regs) {
727 printf("... user regs: mask 0x%" PRIx64 "\n", mask); 728 printf("... user regs: mask 0x%" PRIx64 "\n", mask);
728 regs_dump__printf(mask, user_regs->regs); 729 regs_dump__printf(mask, user_regs->regs);
729 } 730 }
730 } 731 }
731 732
732 static void stack_user__printf(struct stack_dump *dump) 733 static void stack_user__printf(struct stack_dump *dump)
733 { 734 {
734 printf("... ustack: size %" PRIu64 ", offset 0x%x\n", 735 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
735 dump->size, dump->offset); 736 dump->size, dump->offset);
736 } 737 }
737 738
738 static void perf_session__print_tstamp(struct perf_session *session, 739 static void perf_session__print_tstamp(struct perf_session *session,
739 union perf_event *event, 740 union perf_event *event,
740 struct perf_sample *sample) 741 struct perf_sample *sample)
741 { 742 {
742 u64 sample_type = __perf_evlist__combined_sample_type(session->evlist); 743 u64 sample_type = __perf_evlist__combined_sample_type(session->evlist);
743 744
744 if (event->header.type != PERF_RECORD_SAMPLE && 745 if (event->header.type != PERF_RECORD_SAMPLE &&
745 !perf_evlist__sample_id_all(session->evlist)) { 746 !perf_evlist__sample_id_all(session->evlist)) {
746 fputs("-1 -1 ", stdout); 747 fputs("-1 -1 ", stdout);
747 return; 748 return;
748 } 749 }
749 750
750 if ((sample_type & PERF_SAMPLE_CPU)) 751 if ((sample_type & PERF_SAMPLE_CPU))
751 printf("%u ", sample->cpu); 752 printf("%u ", sample->cpu);
752 753
753 if (sample_type & PERF_SAMPLE_TIME) 754 if (sample_type & PERF_SAMPLE_TIME)
754 printf("%" PRIu64 " ", sample->time); 755 printf("%" PRIu64 " ", sample->time);
755 } 756 }
756 757
757 static void sample_read__printf(struct perf_sample *sample, u64 read_format) 758 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
758 { 759 {
759 printf("... sample_read:\n"); 760 printf("... sample_read:\n");
760 761
761 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 762 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
762 printf("...... time enabled %016" PRIx64 "\n", 763 printf("...... time enabled %016" PRIx64 "\n",
763 sample->read.time_enabled); 764 sample->read.time_enabled);
764 765
765 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 766 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
766 printf("...... time running %016" PRIx64 "\n", 767 printf("...... time running %016" PRIx64 "\n",
767 sample->read.time_running); 768 sample->read.time_running);
768 769
769 if (read_format & PERF_FORMAT_GROUP) { 770 if (read_format & PERF_FORMAT_GROUP) {
770 u64 i; 771 u64 i;
771 772
772 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr); 773 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
773 774
774 for (i = 0; i < sample->read.group.nr; i++) { 775 for (i = 0; i < sample->read.group.nr; i++) {
775 struct sample_read_value *value; 776 struct sample_read_value *value;
776 777
777 value = &sample->read.group.values[i]; 778 value = &sample->read.group.values[i];
778 printf("..... id %016" PRIx64 779 printf("..... id %016" PRIx64
779 ", value %016" PRIx64 "\n", 780 ", value %016" PRIx64 "\n",
780 value->id, value->value); 781 value->id, value->value);
781 } 782 }
782 } else 783 } else
783 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n", 784 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
784 sample->read.one.id, sample->read.one.value); 785 sample->read.one.id, sample->read.one.value);
785 } 786 }
786 787
787 static void dump_event(struct perf_session *session, union perf_event *event, 788 static void dump_event(struct perf_session *session, union perf_event *event,
788 u64 file_offset, struct perf_sample *sample) 789 u64 file_offset, struct perf_sample *sample)
789 { 790 {
790 if (!dump_trace) 791 if (!dump_trace)
791 return; 792 return;
792 793
793 printf("\n%#" PRIx64 " [%#x]: event: %d\n", 794 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
794 file_offset, event->header.size, event->header.type); 795 file_offset, event->header.size, event->header.type);
795 796
796 trace_event(event); 797 trace_event(event);
797 798
798 if (sample) 799 if (sample)
799 perf_session__print_tstamp(session, event, sample); 800 perf_session__print_tstamp(session, event, sample);
800 801
801 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, 802 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
802 event->header.size, perf_event__name(event->header.type)); 803 event->header.size, perf_event__name(event->header.type));
803 } 804 }
804 805
805 static void dump_sample(struct perf_evsel *evsel, union perf_event *event, 806 static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
806 struct perf_sample *sample) 807 struct perf_sample *sample)
807 { 808 {
808 u64 sample_type; 809 u64 sample_type;
809 810
810 if (!dump_trace) 811 if (!dump_trace)
811 return; 812 return;
812 813
813 printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n", 814 printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
814 event->header.misc, sample->pid, sample->tid, sample->ip, 815 event->header.misc, sample->pid, sample->tid, sample->ip,
815 sample->period, sample->addr); 816 sample->period, sample->addr);
816 817
817 sample_type = evsel->attr.sample_type; 818 sample_type = evsel->attr.sample_type;
818 819
819 if (sample_type & PERF_SAMPLE_CALLCHAIN) 820 if (sample_type & PERF_SAMPLE_CALLCHAIN)
820 callchain__printf(sample); 821 callchain__printf(sample);
821 822
822 if (sample_type & PERF_SAMPLE_BRANCH_STACK) 823 if (sample_type & PERF_SAMPLE_BRANCH_STACK)
823 branch_stack__printf(sample); 824 branch_stack__printf(sample);
824 825
825 if (sample_type & PERF_SAMPLE_REGS_USER) 826 if (sample_type & PERF_SAMPLE_REGS_USER)
826 regs_user__printf(sample, evsel->attr.sample_regs_user); 827 regs_user__printf(sample, evsel->attr.sample_regs_user);
827 828
828 if (sample_type & PERF_SAMPLE_STACK_USER) 829 if (sample_type & PERF_SAMPLE_STACK_USER)
829 stack_user__printf(&sample->user_stack); 830 stack_user__printf(&sample->user_stack);
830 831
831 if (sample_type & PERF_SAMPLE_WEIGHT) 832 if (sample_type & PERF_SAMPLE_WEIGHT)
832 printf("... weight: %" PRIu64 "\n", sample->weight); 833 printf("... weight: %" PRIu64 "\n", sample->weight);
833 834
834 if (sample_type & PERF_SAMPLE_DATA_SRC) 835 if (sample_type & PERF_SAMPLE_DATA_SRC)
835 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src); 836 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
836 837
837 if (sample_type & PERF_SAMPLE_READ) 838 if (sample_type & PERF_SAMPLE_READ)
838 sample_read__printf(sample, evsel->attr.read_format); 839 sample_read__printf(sample, evsel->attr.read_format);
839 } 840 }
840 841
841 static struct machine * 842 static struct machine *
842 perf_session__find_machine_for_cpumode(struct perf_session *session, 843 perf_session__find_machine_for_cpumode(struct perf_session *session,
843 union perf_event *event, 844 union perf_event *event,
844 struct perf_sample *sample) 845 struct perf_sample *sample)
845 { 846 {
846 const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 847 const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
847 848
848 if (perf_guest && 849 if (perf_guest &&
849 ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) || 850 ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
850 (cpumode == PERF_RECORD_MISC_GUEST_USER))) { 851 (cpumode == PERF_RECORD_MISC_GUEST_USER))) {
851 u32 pid; 852 u32 pid;
852 853
853 if (event->header.type == PERF_RECORD_MMAP) 854 if (event->header.type == PERF_RECORD_MMAP)
854 pid = event->mmap.pid; 855 pid = event->mmap.pid;
855 else 856 else
856 pid = sample->pid; 857 pid = sample->pid;
857 858
858 return perf_session__findnew_machine(session, pid); 859 return perf_session__findnew_machine(session, pid);
859 } 860 }
860 861
861 return &session->machines.host; 862 return &session->machines.host;
862 } 863 }
863 864
864 static int deliver_sample_value(struct perf_session *session, 865 static int deliver_sample_value(struct perf_session *session,
865 struct perf_tool *tool, 866 struct perf_tool *tool,
866 union perf_event *event, 867 union perf_event *event,
867 struct perf_sample *sample, 868 struct perf_sample *sample,
868 struct sample_read_value *v, 869 struct sample_read_value *v,
869 struct machine *machine) 870 struct machine *machine)
870 { 871 {
871 struct perf_sample_id *sid; 872 struct perf_sample_id *sid;
872 873
873 sid = perf_evlist__id2sid(session->evlist, v->id); 874 sid = perf_evlist__id2sid(session->evlist, v->id);
874 if (sid) { 875 if (sid) {
875 sample->id = v->id; 876 sample->id = v->id;
876 sample->period = v->value - sid->period; 877 sample->period = v->value - sid->period;
877 sid->period = v->value; 878 sid->period = v->value;
878 } 879 }
879 880
880 if (!sid || sid->evsel == NULL) { 881 if (!sid || sid->evsel == NULL) {
881 ++session->stats.nr_unknown_id; 882 ++session->stats.nr_unknown_id;
882 return 0; 883 return 0;
883 } 884 }
884 885
885 return tool->sample(tool, event, sample, sid->evsel, machine); 886 return tool->sample(tool, event, sample, sid->evsel, machine);
886 } 887 }
887 888
888 static int deliver_sample_group(struct perf_session *session, 889 static int deliver_sample_group(struct perf_session *session,
889 struct perf_tool *tool, 890 struct perf_tool *tool,
890 union perf_event *event, 891 union perf_event *event,
891 struct perf_sample *sample, 892 struct perf_sample *sample,
892 struct machine *machine) 893 struct machine *machine)
893 { 894 {
894 int ret = -EINVAL; 895 int ret = -EINVAL;
895 u64 i; 896 u64 i;
896 897
897 for (i = 0; i < sample->read.group.nr; i++) { 898 for (i = 0; i < sample->read.group.nr; i++) {
898 ret = deliver_sample_value(session, tool, event, sample, 899 ret = deliver_sample_value(session, tool, event, sample,
899 &sample->read.group.values[i], 900 &sample->read.group.values[i],
900 machine); 901 machine);
901 if (ret) 902 if (ret)
902 break; 903 break;
903 } 904 }
904 905
905 return ret; 906 return ret;
906 } 907 }
907 908
908 static int 909 static int
909 perf_session__deliver_sample(struct perf_session *session, 910 perf_session__deliver_sample(struct perf_session *session,
910 struct perf_tool *tool, 911 struct perf_tool *tool,
911 union perf_event *event, 912 union perf_event *event,
912 struct perf_sample *sample, 913 struct perf_sample *sample,
913 struct perf_evsel *evsel, 914 struct perf_evsel *evsel,
914 struct machine *machine) 915 struct machine *machine)
915 { 916 {
916 /* We know evsel != NULL. */ 917 /* We know evsel != NULL. */
917 u64 sample_type = evsel->attr.sample_type; 918 u64 sample_type = evsel->attr.sample_type;
918 u64 read_format = evsel->attr.read_format; 919 u64 read_format = evsel->attr.read_format;
919 920
920 /* Standard sample delievery. */ 921 /* Standard sample delievery. */
921 if (!(sample_type & PERF_SAMPLE_READ)) 922 if (!(sample_type & PERF_SAMPLE_READ))
922 return tool->sample(tool, event, sample, evsel, machine); 923 return tool->sample(tool, event, sample, evsel, machine);
923 924
924 /* For PERF_SAMPLE_READ we have either single or group mode. */ 925 /* For PERF_SAMPLE_READ we have either single or group mode. */
925 if (read_format & PERF_FORMAT_GROUP) 926 if (read_format & PERF_FORMAT_GROUP)
926 return deliver_sample_group(session, tool, event, sample, 927 return deliver_sample_group(session, tool, event, sample,
927 machine); 928 machine);
928 else 929 else
929 return deliver_sample_value(session, tool, event, sample, 930 return deliver_sample_value(session, tool, event, sample,
930 &sample->read.one, machine); 931 &sample->read.one, machine);
931 } 932 }
932 933
933 static int perf_session_deliver_event(struct perf_session *session, 934 static int perf_session_deliver_event(struct perf_session *session,
934 union perf_event *event, 935 union perf_event *event,
935 struct perf_sample *sample, 936 struct perf_sample *sample,
936 struct perf_tool *tool, 937 struct perf_tool *tool,
937 u64 file_offset) 938 u64 file_offset)
938 { 939 {
939 struct perf_evsel *evsel; 940 struct perf_evsel *evsel;
940 struct machine *machine; 941 struct machine *machine;
941 942
942 dump_event(session, event, file_offset, sample); 943 dump_event(session, event, file_offset, sample);
943 944
944 evsel = perf_evlist__id2evsel(session->evlist, sample->id); 945 evsel = perf_evlist__id2evsel(session->evlist, sample->id);
945 if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) { 946 if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
946 /* 947 /*
947 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here 948 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
948 * because the tools right now may apply filters, discarding 949 * because the tools right now may apply filters, discarding
949 * some of the samples. For consistency, in the future we 950 * some of the samples. For consistency, in the future we
950 * should have something like nr_filtered_samples and remove 951 * should have something like nr_filtered_samples and remove
951 * the sample->period from total_sample_period, etc, KISS for 952 * the sample->period from total_sample_period, etc, KISS for
952 * now tho. 953 * now tho.
953 * 954 *
954 * Also testing against NULL allows us to handle files without 955 * Also testing against NULL allows us to handle files without
955 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the 956 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
956 * future probably it'll be a good idea to restrict event 957 * future probably it'll be a good idea to restrict event
957 * processing via perf_session to files with both set. 958 * processing via perf_session to files with both set.
958 */ 959 */
959 hists__inc_nr_events(&evsel->hists, event->header.type); 960 hists__inc_nr_events(&evsel->hists, event->header.type);
960 } 961 }
961 962
962 machine = perf_session__find_machine_for_cpumode(session, event, 963 machine = perf_session__find_machine_for_cpumode(session, event,
963 sample); 964 sample);
964 965
965 switch (event->header.type) { 966 switch (event->header.type) {
966 case PERF_RECORD_SAMPLE: 967 case PERF_RECORD_SAMPLE:
967 dump_sample(evsel, event, sample); 968 dump_sample(evsel, event, sample);
968 if (evsel == NULL) { 969 if (evsel == NULL) {
969 ++session->stats.nr_unknown_id; 970 ++session->stats.nr_unknown_id;
970 return 0; 971 return 0;
971 } 972 }
972 if (machine == NULL) { 973 if (machine == NULL) {
973 ++session->stats.nr_unprocessable_samples; 974 ++session->stats.nr_unprocessable_samples;
974 return 0; 975 return 0;
975 } 976 }
976 return perf_session__deliver_sample(session, tool, event, 977 return perf_session__deliver_sample(session, tool, event,
977 sample, evsel, machine); 978 sample, evsel, machine);
978 case PERF_RECORD_MMAP: 979 case PERF_RECORD_MMAP:
979 return tool->mmap(tool, event, sample, machine); 980 return tool->mmap(tool, event, sample, machine);
980 case PERF_RECORD_COMM: 981 case PERF_RECORD_COMM:
981 return tool->comm(tool, event, sample, machine); 982 return tool->comm(tool, event, sample, machine);
982 case PERF_RECORD_FORK: 983 case PERF_RECORD_FORK:
983 return tool->fork(tool, event, sample, machine); 984 return tool->fork(tool, event, sample, machine);
984 case PERF_RECORD_EXIT: 985 case PERF_RECORD_EXIT:
985 return tool->exit(tool, event, sample, machine); 986 return tool->exit(tool, event, sample, machine);
986 case PERF_RECORD_LOST: 987 case PERF_RECORD_LOST:
987 if (tool->lost == perf_event__process_lost) 988 if (tool->lost == perf_event__process_lost)
988 session->stats.total_lost += event->lost.lost; 989 session->stats.total_lost += event->lost.lost;
989 return tool->lost(tool, event, sample, machine); 990 return tool->lost(tool, event, sample, machine);
990 case PERF_RECORD_READ: 991 case PERF_RECORD_READ:
991 return tool->read(tool, event, sample, evsel, machine); 992 return tool->read(tool, event, sample, evsel, machine);
992 case PERF_RECORD_THROTTLE: 993 case PERF_RECORD_THROTTLE:
993 return tool->throttle(tool, event, sample, machine); 994 return tool->throttle(tool, event, sample, machine);
994 case PERF_RECORD_UNTHROTTLE: 995 case PERF_RECORD_UNTHROTTLE:
995 return tool->unthrottle(tool, event, sample, machine); 996 return tool->unthrottle(tool, event, sample, machine);
996 default: 997 default:
997 ++session->stats.nr_unknown_events; 998 ++session->stats.nr_unknown_events;
998 return -1; 999 return -1;
999 } 1000 }
1000 } 1001 }
1001 1002
1002 static int perf_session__process_user_event(struct perf_session *session, union perf_event *event, 1003 static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
1003 struct perf_tool *tool, u64 file_offset) 1004 struct perf_tool *tool, u64 file_offset)
1004 { 1005 {
1005 int err; 1006 int err;
1006 1007
1007 dump_event(session, event, file_offset, NULL); 1008 dump_event(session, event, file_offset, NULL);
1008 1009
1009 /* These events are processed right away */ 1010 /* These events are processed right away */
1010 switch (event->header.type) { 1011 switch (event->header.type) {
1011 case PERF_RECORD_HEADER_ATTR: 1012 case PERF_RECORD_HEADER_ATTR:
1012 err = tool->attr(tool, event, &session->evlist); 1013 err = tool->attr(tool, event, &session->evlist);
1013 if (err == 0) 1014 if (err == 0)
1014 perf_session__set_id_hdr_size(session); 1015 perf_session__set_id_hdr_size(session);
1015 return err; 1016 return err;
1016 case PERF_RECORD_HEADER_TRACING_DATA: 1017 case PERF_RECORD_HEADER_TRACING_DATA:
1017 /* setup for reading amidst mmap */ 1018 /* setup for reading amidst mmap */
1018 lseek(session->fd, file_offset, SEEK_SET); 1019 lseek(session->fd, file_offset, SEEK_SET);
1019 return tool->tracing_data(tool, event, session); 1020 return tool->tracing_data(tool, event, session);
1020 case PERF_RECORD_HEADER_BUILD_ID: 1021 case PERF_RECORD_HEADER_BUILD_ID:
1021 return tool->build_id(tool, event, session); 1022 return tool->build_id(tool, event, session);
1022 case PERF_RECORD_FINISHED_ROUND: 1023 case PERF_RECORD_FINISHED_ROUND:
1023 return tool->finished_round(tool, event, session); 1024 return tool->finished_round(tool, event, session);
1024 default: 1025 default:
1025 return -EINVAL; 1026 return -EINVAL;
1026 } 1027 }
1027 } 1028 }
1028 1029
1029 static void event_swap(union perf_event *event, bool sample_id_all) 1030 static void event_swap(union perf_event *event, bool sample_id_all)
1030 { 1031 {
1031 perf_event__swap_op swap; 1032 perf_event__swap_op swap;
1032 1033
1033 swap = perf_event__swap_ops[event->header.type]; 1034 swap = perf_event__swap_ops[event->header.type];
1034 if (swap) 1035 if (swap)
1035 swap(event, sample_id_all); 1036 swap(event, sample_id_all);
1036 } 1037 }
1037 1038
1038 static int perf_session__process_event(struct perf_session *session, 1039 static int perf_session__process_event(struct perf_session *session,
1039 union perf_event *event, 1040 union perf_event *event,
1040 struct perf_tool *tool, 1041 struct perf_tool *tool,
1041 u64 file_offset) 1042 u64 file_offset)
1042 { 1043 {
1043 struct perf_sample sample; 1044 struct perf_sample sample;
1044 int ret; 1045 int ret;
1045 1046
1046 if (session->header.needs_swap) 1047 if (session->header.needs_swap)
1047 event_swap(event, perf_evlist__sample_id_all(session->evlist)); 1048 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1048 1049
1049 if (event->header.type >= PERF_RECORD_HEADER_MAX) 1050 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1050 return -EINVAL; 1051 return -EINVAL;
1051 1052
1052 events_stats__inc(&session->stats, event->header.type); 1053 events_stats__inc(&session->stats, event->header.type);
1053 1054
1054 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 1055 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1055 return perf_session__process_user_event(session, event, tool, file_offset); 1056 return perf_session__process_user_event(session, event, tool, file_offset);
1056 1057
1057 /* 1058 /*
1058 * For all kernel events we get the sample data 1059 * For all kernel events we get the sample data
1059 */ 1060 */
1060 ret = perf_evlist__parse_sample(session->evlist, event, &sample); 1061 ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1061 if (ret) 1062 if (ret)
1062 return ret; 1063 return ret;
1063 1064
1064 if (tool->ordered_samples) { 1065 if (tool->ordered_samples) {
1065 ret = perf_session_queue_event(session, event, &sample, 1066 ret = perf_session_queue_event(session, event, &sample,
1066 file_offset); 1067 file_offset);
1067 if (ret != -ETIME) 1068 if (ret != -ETIME)
1068 return ret; 1069 return ret;
1069 } 1070 }
1070 1071
1071 return perf_session_deliver_event(session, event, &sample, tool, 1072 return perf_session_deliver_event(session, event, &sample, tool,
1072 file_offset); 1073 file_offset);
1073 } 1074 }
1074 1075
1075 void perf_event_header__bswap(struct perf_event_header *self) 1076 void perf_event_header__bswap(struct perf_event_header *self)
1076 { 1077 {
1077 self->type = bswap_32(self->type); 1078 self->type = bswap_32(self->type);
1078 self->misc = bswap_16(self->misc); 1079 self->misc = bswap_16(self->misc);
1079 self->size = bswap_16(self->size); 1080 self->size = bswap_16(self->size);
1080 } 1081 }
1081 1082
1082 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) 1083 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1083 { 1084 {
1084 return machine__findnew_thread(&session->machines.host, 0, pid); 1085 return machine__findnew_thread(&session->machines.host, 0, pid);
1085 } 1086 }
1086 1087
1087 static struct thread *perf_session__register_idle_thread(struct perf_session *self) 1088 static struct thread *perf_session__register_idle_thread(struct perf_session *self)
1088 { 1089 {
1089 struct thread *thread = perf_session__findnew(self, 0); 1090 struct thread *thread = perf_session__findnew(self, 0);
1090 1091
1091 if (thread == NULL || thread__set_comm(thread, "swapper")) { 1092 if (thread == NULL || thread__set_comm(thread, "swapper")) {
1092 pr_err("problem inserting idle task.\n"); 1093 pr_err("problem inserting idle task.\n");
1093 thread = NULL; 1094 thread = NULL;
1094 } 1095 }
1095 1096
1096 return thread; 1097 return thread;
1097 } 1098 }
1098 1099
1099 static void perf_session__warn_about_errors(const struct perf_session *session, 1100 static void perf_session__warn_about_errors(const struct perf_session *session,
1100 const struct perf_tool *tool) 1101 const struct perf_tool *tool)
1101 { 1102 {
1102 if (tool->lost == perf_event__process_lost && 1103 if (tool->lost == perf_event__process_lost &&
1103 session->stats.nr_events[PERF_RECORD_LOST] != 0) { 1104 session->stats.nr_events[PERF_RECORD_LOST] != 0) {
1104 ui__warning("Processed %d events and lost %d chunks!\n\n" 1105 ui__warning("Processed %d events and lost %d chunks!\n\n"
1105 "Check IO/CPU overload!\n\n", 1106 "Check IO/CPU overload!\n\n",
1106 session->stats.nr_events[0], 1107 session->stats.nr_events[0],
1107 session->stats.nr_events[PERF_RECORD_LOST]); 1108 session->stats.nr_events[PERF_RECORD_LOST]);
1108 } 1109 }
1109 1110
1110 if (session->stats.nr_unknown_events != 0) { 1111 if (session->stats.nr_unknown_events != 0) {
1111 ui__warning("Found %u unknown events!\n\n" 1112 ui__warning("Found %u unknown events!\n\n"
1112 "Is this an older tool processing a perf.data " 1113 "Is this an older tool processing a perf.data "
1113 "file generated by a more recent tool?\n\n" 1114 "file generated by a more recent tool?\n\n"
1114 "If that is not the case, consider " 1115 "If that is not the case, consider "
1115 "reporting to linux-kernel@vger.kernel.org.\n\n", 1116 "reporting to linux-kernel@vger.kernel.org.\n\n",
1116 session->stats.nr_unknown_events); 1117 session->stats.nr_unknown_events);
1117 } 1118 }
1118 1119
1119 if (session->stats.nr_unknown_id != 0) { 1120 if (session->stats.nr_unknown_id != 0) {
1120 ui__warning("%u samples with id not present in the header\n", 1121 ui__warning("%u samples with id not present in the header\n",
1121 session->stats.nr_unknown_id); 1122 session->stats.nr_unknown_id);
1122 } 1123 }
1123 1124
1124 if (session->stats.nr_invalid_chains != 0) { 1125 if (session->stats.nr_invalid_chains != 0) {
1125 ui__warning("Found invalid callchains!\n\n" 1126 ui__warning("Found invalid callchains!\n\n"
1126 "%u out of %u events were discarded for this reason.\n\n" 1127 "%u out of %u events were discarded for this reason.\n\n"
1127 "Consider reporting to linux-kernel@vger.kernel.org.\n\n", 1128 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1128 session->stats.nr_invalid_chains, 1129 session->stats.nr_invalid_chains,
1129 session->stats.nr_events[PERF_RECORD_SAMPLE]); 1130 session->stats.nr_events[PERF_RECORD_SAMPLE]);
1130 } 1131 }
1131 1132
1132 if (session->stats.nr_unprocessable_samples != 0) { 1133 if (session->stats.nr_unprocessable_samples != 0) {
1133 ui__warning("%u unprocessable samples recorded.\n" 1134 ui__warning("%u unprocessable samples recorded.\n"
1134 "Do you have a KVM guest running and not using 'perf kvm'?\n", 1135 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1135 session->stats.nr_unprocessable_samples); 1136 session->stats.nr_unprocessable_samples);
1136 } 1137 }
1137 } 1138 }
1138 1139
1139 #define session_done() (*(volatile int *)(&session_done)) 1140 #define session_done() (*(volatile int *)(&session_done))
1140 volatile int session_done; 1141 volatile int session_done;
1141 1142
1142 static int __perf_session__process_pipe_events(struct perf_session *self, 1143 static int __perf_session__process_pipe_events(struct perf_session *self,
1143 struct perf_tool *tool) 1144 struct perf_tool *tool)
1144 { 1145 {
1145 union perf_event *event; 1146 union perf_event *event;
1146 uint32_t size, cur_size = 0; 1147 uint32_t size, cur_size = 0;
1147 void *buf = NULL; 1148 void *buf = NULL;
1148 int skip = 0; 1149 int skip = 0;
1149 u64 head; 1150 u64 head;
1150 int err; 1151 int err;
1151 void *p; 1152 void *p;
1152 1153
1153 perf_tool__fill_defaults(tool); 1154 perf_tool__fill_defaults(tool);
1154 1155
1155 head = 0; 1156 head = 0;
1156 cur_size = sizeof(union perf_event); 1157 cur_size = sizeof(union perf_event);
1157 1158
1158 buf = malloc(cur_size); 1159 buf = malloc(cur_size);
1159 if (!buf) 1160 if (!buf)
1160 return -errno; 1161 return -errno;
1161 more: 1162 more:
1162 event = buf; 1163 event = buf;
1163 err = readn(self->fd, event, sizeof(struct perf_event_header)); 1164 err = readn(self->fd, event, sizeof(struct perf_event_header));
1164 if (err <= 0) { 1165 if (err <= 0) {
1165 if (err == 0) 1166 if (err == 0)
1166 goto done; 1167 goto done;
1167 1168
1168 pr_err("failed to read event header\n"); 1169 pr_err("failed to read event header\n");
1169 goto out_err; 1170 goto out_err;
1170 } 1171 }
1171 1172
1172 if (self->header.needs_swap) 1173 if (self->header.needs_swap)
1173 perf_event_header__bswap(&event->header); 1174 perf_event_header__bswap(&event->header);
1174 1175
1175 size = event->header.size; 1176 size = event->header.size;
1176 if (size < sizeof(struct perf_event_header)) { 1177 if (size < sizeof(struct perf_event_header)) {
1177 pr_err("bad event header size\n"); 1178 pr_err("bad event header size\n");
1178 goto out_err; 1179 goto out_err;
1179 } 1180 }
1180 1181
1181 if (size > cur_size) { 1182 if (size > cur_size) {
1182 void *new = realloc(buf, size); 1183 void *new = realloc(buf, size);
1183 if (!new) { 1184 if (!new) {
1184 pr_err("failed to allocate memory to read event\n"); 1185 pr_err("failed to allocate memory to read event\n");
1185 goto out_err; 1186 goto out_err;
1186 } 1187 }
1187 buf = new; 1188 buf = new;
1188 cur_size = size; 1189 cur_size = size;
1189 event = buf; 1190 event = buf;
1190 } 1191 }
1191 p = event; 1192 p = event;
1192 p += sizeof(struct perf_event_header); 1193 p += sizeof(struct perf_event_header);
1193 1194
1194 if (size - sizeof(struct perf_event_header)) { 1195 if (size - sizeof(struct perf_event_header)) {
1195 err = readn(self->fd, p, size - sizeof(struct perf_event_header)); 1196 err = readn(self->fd, p, size - sizeof(struct perf_event_header));
1196 if (err <= 0) { 1197 if (err <= 0) {
1197 if (err == 0) { 1198 if (err == 0) {
1198 pr_err("unexpected end of event stream\n"); 1199 pr_err("unexpected end of event stream\n");
1199 goto done; 1200 goto done;
1200 } 1201 }
1201 1202
1202 pr_err("failed to read event data\n"); 1203 pr_err("failed to read event data\n");
1203 goto out_err; 1204 goto out_err;
1204 } 1205 }
1205 } 1206 }
1206 1207
1207 if ((skip = perf_session__process_event(self, event, tool, head)) < 0) { 1208 if ((skip = perf_session__process_event(self, event, tool, head)) < 0) {
1208 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1209 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1209 head, event->header.size, event->header.type); 1210 head, event->header.size, event->header.type);
1210 err = -EINVAL; 1211 err = -EINVAL;
1211 goto out_err; 1212 goto out_err;
1212 } 1213 }
1213 1214
1214 head += size; 1215 head += size;
1215 1216
1216 if (skip > 0) 1217 if (skip > 0)
1217 head += skip; 1218 head += skip;
1218 1219
1219 if (!session_done()) 1220 if (!session_done())
1220 goto more; 1221 goto more;
1221 done: 1222 done:
1222 err = 0; 1223 err = 0;
1223 out_err: 1224 out_err:
1224 free(buf); 1225 free(buf);
1225 perf_session__warn_about_errors(self, tool); 1226 perf_session__warn_about_errors(self, tool);
1226 perf_session_free_sample_buffers(self); 1227 perf_session_free_sample_buffers(self);
1227 return err; 1228 return err;
1228 } 1229 }
1229 1230
1230 static union perf_event * 1231 static union perf_event *
1231 fetch_mmaped_event(struct perf_session *session, 1232 fetch_mmaped_event(struct perf_session *session,
1232 u64 head, size_t mmap_size, char *buf) 1233 u64 head, size_t mmap_size, char *buf)
1233 { 1234 {
1234 union perf_event *event; 1235 union perf_event *event;
1235 1236
1236 /* 1237 /*
1237 * Ensure we have enough space remaining to read 1238 * Ensure we have enough space remaining to read
1238 * the size of the event in the headers. 1239 * the size of the event in the headers.
1239 */ 1240 */
1240 if (head + sizeof(event->header) > mmap_size) 1241 if (head + sizeof(event->header) > mmap_size)
1241 return NULL; 1242 return NULL;
1242 1243
1243 event = (union perf_event *)(buf + head); 1244 event = (union perf_event *)(buf + head);
1244 1245
1245 if (session->header.needs_swap) 1246 if (session->header.needs_swap)
1246 perf_event_header__bswap(&event->header); 1247 perf_event_header__bswap(&event->header);
1247 1248
1248 if (head + event->header.size > mmap_size) { 1249 if (head + event->header.size > mmap_size) {
1249 /* We're not fetching the event so swap back again */ 1250 /* We're not fetching the event so swap back again */
1250 if (session->header.needs_swap) 1251 if (session->header.needs_swap)
1251 perf_event_header__bswap(&event->header); 1252 perf_event_header__bswap(&event->header);
1252 return NULL; 1253 return NULL;
1253 } 1254 }
1254 1255
1255 return event; 1256 return event;
1256 } 1257 }
1257 1258
1258 /* 1259 /*
1259 * On 64bit we can mmap the data file in one go. No need for tiny mmap 1260 * On 64bit we can mmap the data file in one go. No need for tiny mmap
1260 * slices. On 32bit we use 32MB. 1261 * slices. On 32bit we use 32MB.
1261 */ 1262 */
1262 #if BITS_PER_LONG == 64 1263 #if BITS_PER_LONG == 64
1263 #define MMAP_SIZE ULLONG_MAX 1264 #define MMAP_SIZE ULLONG_MAX
1264 #define NUM_MMAPS 1 1265 #define NUM_MMAPS 1
1265 #else 1266 #else
1266 #define MMAP_SIZE (32 * 1024 * 1024ULL) 1267 #define MMAP_SIZE (32 * 1024 * 1024ULL)
1267 #define NUM_MMAPS 128 1268 #define NUM_MMAPS 128
1268 #endif 1269 #endif
1269 1270
1270 int __perf_session__process_events(struct perf_session *session, 1271 int __perf_session__process_events(struct perf_session *session,
1271 u64 data_offset, u64 data_size, 1272 u64 data_offset, u64 data_size,
1272 u64 file_size, struct perf_tool *tool) 1273 u64 file_size, struct perf_tool *tool)
1273 { 1274 {
1274 u64 head, page_offset, file_offset, file_pos, progress_next; 1275 u64 head, page_offset, file_offset, file_pos, progress_next;
1275 int err, mmap_prot, mmap_flags, map_idx = 0; 1276 int err, mmap_prot, mmap_flags, map_idx = 0;
1276 size_t mmap_size; 1277 size_t mmap_size;
1277 char *buf, *mmaps[NUM_MMAPS]; 1278 char *buf, *mmaps[NUM_MMAPS];
1278 union perf_event *event; 1279 union perf_event *event;
1279 uint32_t size; 1280 uint32_t size;
1280 1281
1281 perf_tool__fill_defaults(tool); 1282 perf_tool__fill_defaults(tool);
1282 1283
1283 page_offset = page_size * (data_offset / page_size); 1284 page_offset = page_size * (data_offset / page_size);
1284 file_offset = page_offset; 1285 file_offset = page_offset;
1285 head = data_offset - page_offset; 1286 head = data_offset - page_offset;
1286 1287
1287 if (data_offset + data_size < file_size) 1288 if (data_offset + data_size < file_size)
1288 file_size = data_offset + data_size; 1289 file_size = data_offset + data_size;
1289 1290
1290 progress_next = file_size / 16; 1291 progress_next = file_size / 16;
1291 1292
1292 mmap_size = MMAP_SIZE; 1293 mmap_size = MMAP_SIZE;
1293 if (mmap_size > file_size) 1294 if (mmap_size > file_size)
1294 mmap_size = file_size; 1295 mmap_size = file_size;
1295 1296
1296 memset(mmaps, 0, sizeof(mmaps)); 1297 memset(mmaps, 0, sizeof(mmaps));
1297 1298
1298 mmap_prot = PROT_READ; 1299 mmap_prot = PROT_READ;
1299 mmap_flags = MAP_SHARED; 1300 mmap_flags = MAP_SHARED;
1300 1301
1301 if (session->header.needs_swap) { 1302 if (session->header.needs_swap) {
1302 mmap_prot |= PROT_WRITE; 1303 mmap_prot |= PROT_WRITE;
1303 mmap_flags = MAP_PRIVATE; 1304 mmap_flags = MAP_PRIVATE;
1304 } 1305 }
1305 remap: 1306 remap:
1306 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd, 1307 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
1307 file_offset); 1308 file_offset);
1308 if (buf == MAP_FAILED) { 1309 if (buf == MAP_FAILED) {
1309 pr_err("failed to mmap file\n"); 1310 pr_err("failed to mmap file\n");
1310 err = -errno; 1311 err = -errno;
1311 goto out_err; 1312 goto out_err;
1312 } 1313 }
1313 mmaps[map_idx] = buf; 1314 mmaps[map_idx] = buf;
1314 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1); 1315 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1315 file_pos = file_offset + head; 1316 file_pos = file_offset + head;
1316 1317
1317 more: 1318 more:
1318 event = fetch_mmaped_event(session, head, mmap_size, buf); 1319 event = fetch_mmaped_event(session, head, mmap_size, buf);
1319 if (!event) { 1320 if (!event) {
1320 if (mmaps[map_idx]) { 1321 if (mmaps[map_idx]) {
1321 munmap(mmaps[map_idx], mmap_size); 1322 munmap(mmaps[map_idx], mmap_size);
1322 mmaps[map_idx] = NULL; 1323 mmaps[map_idx] = NULL;
1323 } 1324 }
1324 1325
1325 page_offset = page_size * (head / page_size); 1326 page_offset = page_size * (head / page_size);
1326 file_offset += page_offset; 1327 file_offset += page_offset;
1327 head -= page_offset; 1328 head -= page_offset;
1328 goto remap; 1329 goto remap;
1329 } 1330 }
1330 1331
1331 size = event->header.size; 1332 size = event->header.size;
1332 1333
1333 if (size < sizeof(struct perf_event_header) || 1334 if (size < sizeof(struct perf_event_header) ||
1334 perf_session__process_event(session, event, tool, file_pos) < 0) { 1335 perf_session__process_event(session, event, tool, file_pos) < 0) {
1335 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1336 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1336 file_offset + head, event->header.size, 1337 file_offset + head, event->header.size,
1337 event->header.type); 1338 event->header.type);
1338 err = -EINVAL; 1339 err = -EINVAL;
1339 goto out_err; 1340 goto out_err;
1340 } 1341 }
1341 1342
1342 head += size; 1343 head += size;
1343 file_pos += size; 1344 file_pos += size;
1344 1345
1345 if (file_pos >= progress_next) { 1346 if (file_pos >= progress_next) {
1346 progress_next += file_size / 16; 1347 progress_next += file_size / 16;
1347 ui_progress__update(file_pos, file_size, 1348 ui_progress__update(file_pos, file_size,
1348 "Processing events..."); 1349 "Processing events...");
1349 } 1350 }
1350 1351
1351 if (file_pos < file_size) 1352 if (file_pos < file_size)
1352 goto more; 1353 goto more;
1353 1354
1354 err = 0; 1355 err = 0;
1355 /* do the final flush for ordered samples */ 1356 /* do the final flush for ordered samples */
1356 session->ordered_samples.next_flush = ULLONG_MAX; 1357 session->ordered_samples.next_flush = ULLONG_MAX;
1357 err = flush_sample_queue(session, tool); 1358 err = flush_sample_queue(session, tool);
1358 out_err: 1359 out_err:
1359 ui_progress__finish(); 1360 ui_progress__finish();
1360 perf_session__warn_about_errors(session, tool); 1361 perf_session__warn_about_errors(session, tool);
1361 perf_session_free_sample_buffers(session); 1362 perf_session_free_sample_buffers(session);
1362 return err; 1363 return err;
1363 } 1364 }
1364 1365
1365 int perf_session__process_events(struct perf_session *self, 1366 int perf_session__process_events(struct perf_session *self,
1366 struct perf_tool *tool) 1367 struct perf_tool *tool)
1367 { 1368 {
1368 int err; 1369 int err;
1369 1370
1370 if (perf_session__register_idle_thread(self) == NULL) 1371 if (perf_session__register_idle_thread(self) == NULL)
1371 return -ENOMEM; 1372 return -ENOMEM;
1372 1373
1373 if (!self->fd_pipe) 1374 if (!self->fd_pipe)
1374 err = __perf_session__process_events(self, 1375 err = __perf_session__process_events(self,
1375 self->header.data_offset, 1376 self->header.data_offset,
1376 self->header.data_size, 1377 self->header.data_size,
1377 self->size, tool); 1378 self->size, tool);
1378 else 1379 else
1379 err = __perf_session__process_pipe_events(self, tool); 1380 err = __perf_session__process_pipe_events(self, tool);
1380 1381
1381 return err; 1382 return err;
1382 } 1383 }
1383 1384
1384 bool perf_session__has_traces(struct perf_session *session, const char *msg) 1385 bool perf_session__has_traces(struct perf_session *session, const char *msg)
1385 { 1386 {
1386 struct perf_evsel *evsel; 1387 struct perf_evsel *evsel;
1387 1388
1388 list_for_each_entry(evsel, &session->evlist->entries, node) { 1389 list_for_each_entry(evsel, &session->evlist->entries, node) {
1389 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) 1390 if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
1390 return true; 1391 return true;
1391 } 1392 }
1392 1393
1393 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); 1394 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1394 return false; 1395 return false;
1395 } 1396 }
1396 1397
1397 int maps__set_kallsyms_ref_reloc_sym(struct map **maps, 1398 int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1398 const char *symbol_name, u64 addr) 1399 const char *symbol_name, u64 addr)
1399 { 1400 {
1400 char *bracket; 1401 char *bracket;
1401 enum map_type i; 1402 enum map_type i;
1402 struct ref_reloc_sym *ref; 1403 struct ref_reloc_sym *ref;
1403 1404
1404 ref = zalloc(sizeof(struct ref_reloc_sym)); 1405 ref = zalloc(sizeof(struct ref_reloc_sym));
1405 if (ref == NULL) 1406 if (ref == NULL)
1406 return -ENOMEM; 1407 return -ENOMEM;
1407 1408
1408 ref->name = strdup(symbol_name); 1409 ref->name = strdup(symbol_name);
1409 if (ref->name == NULL) { 1410 if (ref->name == NULL) {
1410 free(ref); 1411 free(ref);
1411 return -ENOMEM; 1412 return -ENOMEM;
1412 } 1413 }
1413 1414
1414 bracket = strchr(ref->name, ']'); 1415 bracket = strchr(ref->name, ']');
1415 if (bracket) 1416 if (bracket)
1416 *bracket = '\0'; 1417 *bracket = '\0';
1417 1418
1418 ref->addr = addr; 1419 ref->addr = addr;
1419 1420
1420 for (i = 0; i < MAP__NR_TYPES; ++i) { 1421 for (i = 0; i < MAP__NR_TYPES; ++i) {
1421 struct kmap *kmap = map__kmap(maps[i]); 1422 struct kmap *kmap = map__kmap(maps[i]);
1422 kmap->ref_reloc_sym = ref; 1423 kmap->ref_reloc_sym = ref;
1423 } 1424 }
1424 1425
1425 return 0; 1426 return 0;
1426 } 1427 }
1427 1428
1428 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp) 1429 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
1429 { 1430 {
1430 return machines__fprintf_dsos(&self->machines, fp); 1431 return machines__fprintf_dsos(&self->machines, fp);
1431 } 1432 }
1432 1433
1433 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp, 1434 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
1434 bool (skip)(struct dso *dso, int parm), int parm) 1435 bool (skip)(struct dso *dso, int parm), int parm)
1435 { 1436 {
1436 return machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm); 1437 return machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm);
1437 } 1438 }
1438 1439
1439 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) 1440 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1440 { 1441 {
1441 struct perf_evsel *pos; 1442 struct perf_evsel *pos;
1442 size_t ret = fprintf(fp, "Aggregated stats:\n"); 1443 size_t ret = fprintf(fp, "Aggregated stats:\n");
1443 1444
1444 ret += events_stats__fprintf(&session->stats, fp); 1445 ret += events_stats__fprintf(&session->stats, fp);
1445 1446
1446 list_for_each_entry(pos, &session->evlist->entries, node) { 1447 list_for_each_entry(pos, &session->evlist->entries, node) {
1447 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); 1448 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1448 ret += events_stats__fprintf(&pos->hists.stats, fp); 1449 ret += events_stats__fprintf(&pos->hists.stats, fp);
1449 } 1450 }
1450 1451
1451 return ret; 1452 return ret;
1452 } 1453 }
1453 1454
1454 size_t perf_session__fprintf(struct perf_session *session, FILE *fp) 1455 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
1455 { 1456 {
1456 /* 1457 /*
1457 * FIXME: Here we have to actually print all the machines in this 1458 * FIXME: Here we have to actually print all the machines in this
1458 * session, not just the host... 1459 * session, not just the host...
1459 */ 1460 */
1460 return machine__fprintf(&session->machines.host, fp); 1461 return machine__fprintf(&session->machines.host, fp);
1461 } 1462 }
1462 1463
1463 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, 1464 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
1464 unsigned int type) 1465 unsigned int type)
1465 { 1466 {
1466 struct perf_evsel *pos; 1467 struct perf_evsel *pos;
1467 1468
1468 list_for_each_entry(pos, &session->evlist->entries, node) { 1469 list_for_each_entry(pos, &session->evlist->entries, node) {
1469 if (pos->attr.type == type) 1470 if (pos->attr.type == type)
1470 return pos; 1471 return pos;
1471 } 1472 }
1472 return NULL; 1473 return NULL;
1473 } 1474 }
1474 1475
1475 void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event, 1476 void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event,
1476 struct perf_sample *sample, struct machine *machine, 1477 struct perf_sample *sample, struct machine *machine,
1477 unsigned int print_opts, unsigned int stack_depth) 1478 unsigned int print_opts, unsigned int stack_depth)
1478 { 1479 {
1479 struct addr_location al; 1480 struct addr_location al;
1480 struct callchain_cursor_node *node; 1481 struct callchain_cursor_node *node;
1481 int print_ip = print_opts & PRINT_IP_OPT_IP; 1482 int print_ip = print_opts & PRINT_IP_OPT_IP;
1482 int print_sym = print_opts & PRINT_IP_OPT_SYM; 1483 int print_sym = print_opts & PRINT_IP_OPT_SYM;
1483 int print_dso = print_opts & PRINT_IP_OPT_DSO; 1484 int print_dso = print_opts & PRINT_IP_OPT_DSO;
1484 int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET; 1485 int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET;
1485 int print_oneline = print_opts & PRINT_IP_OPT_ONELINE; 1486 int print_oneline = print_opts & PRINT_IP_OPT_ONELINE;
1486 char s = print_oneline ? ' ' : '\t'; 1487 char s = print_oneline ? ' ' : '\t';
1487 1488
1488 if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) { 1489 if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
1489 error("problem processing %d event, skipping it.\n", 1490 error("problem processing %d event, skipping it.\n",
1490 event->header.type); 1491 event->header.type);
1491 return; 1492 return;
1492 } 1493 }
1493 1494
1494 if (symbol_conf.use_callchain && sample->callchain) { 1495 if (symbol_conf.use_callchain && sample->callchain) {
1495 1496
1496 if (machine__resolve_callchain(machine, evsel, al.thread, 1497 if (machine__resolve_callchain(machine, evsel, al.thread,
1497 sample, NULL, NULL) != 0) { 1498 sample, NULL, NULL) != 0) {
1498 if (verbose) 1499 if (verbose)
1499 error("Failed to resolve callchain. Skipping\n"); 1500 error("Failed to resolve callchain. Skipping\n");
1500 return; 1501 return;
1501 } 1502 }
1502 callchain_cursor_commit(&callchain_cursor); 1503 callchain_cursor_commit(&callchain_cursor);
1503 1504
1504 while (stack_depth) { 1505 while (stack_depth) {
1505 node = callchain_cursor_current(&callchain_cursor); 1506 node = callchain_cursor_current(&callchain_cursor);
1506 if (!node) 1507 if (!node)
1507 break; 1508 break;
1508 1509
1509 if (print_ip) 1510 if (print_ip)
1510 printf("%c%16" PRIx64, s, node->ip); 1511 printf("%c%16" PRIx64, s, node->ip);
1511 1512
1512 if (print_sym) { 1513 if (print_sym) {
1513 printf(" "); 1514 printf(" ");
1514 if (print_symoffset) { 1515 if (print_symoffset) {
1515 al.addr = node->ip; 1516 al.addr = node->ip;
1516 al.map = node->map; 1517 al.map = node->map;
1517 symbol__fprintf_symname_offs(node->sym, &al, stdout); 1518 symbol__fprintf_symname_offs(node->sym, &al, stdout);
1518 } else 1519 } else
1519 symbol__fprintf_symname(node->sym, stdout); 1520 symbol__fprintf_symname(node->sym, stdout);
1520 } 1521 }
1521 1522
1522 if (print_dso) { 1523 if (print_dso) {
1523 printf(" ("); 1524 printf(" (");
1524 map__fprintf_dsoname(node->map, stdout); 1525 map__fprintf_dsoname(node->map, stdout);
1525 printf(")"); 1526 printf(")");
1526 } 1527 }
1527 1528
1528 if (!print_oneline) 1529 if (!print_oneline)
1529 printf("\n"); 1530 printf("\n");
1530 1531
1531 callchain_cursor_advance(&callchain_cursor); 1532 callchain_cursor_advance(&callchain_cursor);
1532 1533
1533 stack_depth--; 1534 stack_depth--;
1534 } 1535 }
1535 1536
1536 } else { 1537 } else {
1537 if (print_ip) 1538 if (print_ip)
1538 printf("%16" PRIx64, sample->ip); 1539 printf("%16" PRIx64, sample->ip);
1539 1540
1540 if (print_sym) { 1541 if (print_sym) {
1541 printf(" "); 1542 printf(" ");
1542 if (print_symoffset) 1543 if (print_symoffset)
1543 symbol__fprintf_symname_offs(al.sym, &al, 1544 symbol__fprintf_symname_offs(al.sym, &al,
1544 stdout); 1545 stdout);
1545 else 1546 else
1546 symbol__fprintf_symname(al.sym, stdout); 1547 symbol__fprintf_symname(al.sym, stdout);
1547 } 1548 }
1548 1549
1549 if (print_dso) { 1550 if (print_dso) {
1550 printf(" ("); 1551 printf(" (");
1551 map__fprintf_dsoname(al.map, stdout); 1552 map__fprintf_dsoname(al.map, stdout);
1552 printf(")"); 1553 printf(")");
1553 } 1554 }
1554 } 1555 }
1555 } 1556 }
1556 1557
1557 int perf_session__cpu_bitmap(struct perf_session *session, 1558 int perf_session__cpu_bitmap(struct perf_session *session,
1558 const char *cpu_list, unsigned long *cpu_bitmap) 1559 const char *cpu_list, unsigned long *cpu_bitmap)
1559 { 1560 {
1560 int i; 1561 int i;
1561 struct cpu_map *map; 1562 struct cpu_map *map;
1562 1563
1563 for (i = 0; i < PERF_TYPE_MAX; ++i) { 1564 for (i = 0; i < PERF_TYPE_MAX; ++i) {
1564 struct perf_evsel *evsel; 1565 struct perf_evsel *evsel;
1565 1566
1566 evsel = perf_session__find_first_evtype(session, i); 1567 evsel = perf_session__find_first_evtype(session, i);
1567 if (!evsel) 1568 if (!evsel)
1568 continue; 1569 continue;
1569 1570
1570 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) { 1571 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
1571 pr_err("File does not contain CPU events. " 1572 pr_err("File does not contain CPU events. "
1572 "Remove -c option to proceed.\n"); 1573 "Remove -c option to proceed.\n");
1573 return -1; 1574 return -1;
1574 } 1575 }
1575 } 1576 }
1576 1577
1577 map = cpu_map__new(cpu_list); 1578 map = cpu_map__new(cpu_list);
1578 if (map == NULL) { 1579 if (map == NULL) {
1579 pr_err("Invalid cpu_list\n"); 1580 pr_err("Invalid cpu_list\n");
1580 return -1; 1581 return -1;
1581 } 1582 }
1582 1583
1583 for (i = 0; i < map->nr; i++) { 1584 for (i = 0; i < map->nr; i++) {
1584 int cpu = map->map[i]; 1585 int cpu = map->map[i];
1585 1586
1586 if (cpu >= MAX_NR_CPUS) { 1587 if (cpu >= MAX_NR_CPUS) {
1587 pr_err("Requested CPU %d too large. " 1588 pr_err("Requested CPU %d too large. "
1588 "Consider raising MAX_NR_CPUS\n", cpu); 1589 "Consider raising MAX_NR_CPUS\n", cpu);
1589 return -1; 1590 return -1;
1590 } 1591 }
1591 1592
1592 set_bit(cpu, cpu_bitmap); 1593 set_bit(cpu, cpu_bitmap);
1593 } 1594 }
1594 1595
1595 return 0; 1596 return 0;
1596 } 1597 }
1597 1598
1598 void perf_session__fprintf_info(struct perf_session *session, FILE *fp, 1599 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
1599 bool full) 1600 bool full)
1600 { 1601 {
1601 struct stat st; 1602 struct stat st;
1602 int ret; 1603 int ret;
1603 1604
1604 if (session == NULL || fp == NULL) 1605 if (session == NULL || fp == NULL)
1605 return; 1606 return;
1606 1607
1607 ret = fstat(session->fd, &st); 1608 ret = fstat(session->fd, &st);
1608 if (ret == -1) 1609 if (ret == -1)
1609 return; 1610 return;
1610 1611
1611 fprintf(fp, "# ========\n"); 1612 fprintf(fp, "# ========\n");
1612 fprintf(fp, "# captured on: %s", ctime(&st.st_ctime)); 1613 fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
1613 perf_header__fprintf_info(session, fp, full); 1614 perf_header__fprintf_info(session, fp, full);
1614 fprintf(fp, "# ========\n#\n"); 1615 fprintf(fp, "# ========\n#\n");
1615 } 1616 }
1616 1617
1617 1618
1618 int __perf_session__set_tracepoints_handlers(struct perf_session *session, 1619 int __perf_session__set_tracepoints_handlers(struct perf_session *session,
1619 const struct perf_evsel_str_handler *assocs, 1620 const struct perf_evsel_str_handler *assocs,
1620 size_t nr_assocs) 1621 size_t nr_assocs)
1621 { 1622 {
1622 struct perf_evlist *evlist = session->evlist; 1623 struct perf_evlist *evlist = session->evlist;
1623 struct event_format *format; 1624 struct event_format *format;
1624 struct perf_evsel *evsel; 1625 struct perf_evsel *evsel;
1625 char *tracepoint, *name; 1626 char *tracepoint, *name;
1626 size_t i; 1627 size_t i;
1627 int err; 1628 int err;
1628 1629
1629 for (i = 0; i < nr_assocs; i++) { 1630 for (i = 0; i < nr_assocs; i++) {
1630 err = -ENOMEM; 1631 err = -ENOMEM;
1631 tracepoint = strdup(assocs[i].name); 1632 tracepoint = strdup(assocs[i].name);
1632 if (tracepoint == NULL) 1633 if (tracepoint == NULL)
1633 goto out; 1634 goto out;
1634 1635
1635 err = -ENOENT; 1636 err = -ENOENT;
1636 name = strchr(tracepoint, ':'); 1637 name = strchr(tracepoint, ':');
1637 if (name == NULL) 1638 if (name == NULL)
1638 goto out_free; 1639 goto out_free;
1639 1640
1640 *name++ = '\0'; 1641 *name++ = '\0';
1641 format = pevent_find_event_by_name(session->pevent, 1642 format = pevent_find_event_by_name(session->pevent,
1642 tracepoint, name); 1643 tracepoint, name);
1643 if (format == NULL) { 1644 if (format == NULL) {
1644 /* 1645 /*
1645 * Adding a handler for an event not in the session, 1646 * Adding a handler for an event not in the session,
1646 * just ignore it. 1647 * just ignore it.
1647 */ 1648 */
1648 goto next; 1649 goto next;
1649 } 1650 }
1650 1651
1651 evsel = perf_evlist__find_tracepoint_by_id(evlist, format->id); 1652 evsel = perf_evlist__find_tracepoint_by_id(evlist, format->id);
1652 if (evsel == NULL) 1653 if (evsel == NULL)
1653 goto next; 1654 goto next;
1654 1655
1655 err = -EEXIST; 1656 err = -EEXIST;
1656 if (evsel->handler.func != NULL) 1657 if (evsel->handler.func != NULL)
1657 goto out_free; 1658 goto out_free;
1658 evsel->handler.func = assocs[i].handler; 1659 evsel->handler.func = assocs[i].handler;
1659 next: 1660 next:
1660 free(tracepoint); 1661 free(tracepoint);
1661 } 1662 }
1662 1663
1663 err = 0; 1664 err = 0;
1664 out: 1665 out:
1665 return err; 1666 return err;
1666 1667
1667 out_free: 1668 out_free:
1668 free(tracepoint); 1669 free(tracepoint);
1669 goto out; 1670 goto out;
1670 } 1671 }
1671 1672