Blame view

include/linux/perf_event.h 31.6 KB
0793a61d4   Thomas Gleixner   performance count...
1
  /*
57c0c15b5   Ingo Molnar   perf: Tidy up aft...
2
   * Performance events:
0793a61d4   Thomas Gleixner   performance count...
3
   *
a308444ce   Ingo Molnar   perf_counter: Bet...
4
   *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
e7e7ee2ea   Ingo Molnar   perf events: Clea...
5
6
   *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
   *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
0793a61d4   Thomas Gleixner   performance count...
7
   *
57c0c15b5   Ingo Molnar   perf: Tidy up aft...
8
   * Data type definitions, declarations, prototypes.
0793a61d4   Thomas Gleixner   performance count...
9
   *
a308444ce   Ingo Molnar   perf_counter: Bet...
10
   *    Started by: Thomas Gleixner and Ingo Molnar
0793a61d4   Thomas Gleixner   performance count...
11
   *
57c0c15b5   Ingo Molnar   perf: Tidy up aft...
12
   * For licencing details see kernel-base/COPYING
0793a61d4   Thomas Gleixner   performance count...
13
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
14
15
  #ifndef _LINUX_PERF_EVENT_H
  #define _LINUX_PERF_EVENT_H
0793a61d4   Thomas Gleixner   performance count...
16

f3dfd2656   Paul Mackerras   perfcounters: fix...
17
18
  #include <linux/types.h>
  #include <linux/ioctl.h>
9aaa131a2   Paul Mackerras   perf_counter: fix...
19
  #include <asm/byteorder.h>
0793a61d4   Thomas Gleixner   performance count...
20
21
  
  /*
9f66a3810   Ingo Molnar   perf counters: re...
22
23
24
25
   * User-space ABI bits:
   */
  
  /*
0d48696f8   Peter Zijlstra   perf_counter: Ren...
26
   * attr.type
0793a61d4   Thomas Gleixner   performance count...
27
   */
1c432d899   Peter Zijlstra   perf_counter: Ren...
28
  enum perf_type_id {
a308444ce   Ingo Molnar   perf_counter: Bet...
29
30
31
32
33
  	PERF_TYPE_HARDWARE			= 0,
  	PERF_TYPE_SOFTWARE			= 1,
  	PERF_TYPE_TRACEPOINT			= 2,
  	PERF_TYPE_HW_CACHE			= 3,
  	PERF_TYPE_RAW				= 4,
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
34
  	PERF_TYPE_BREAKPOINT			= 5,
b8e83514b   Peter Zijlstra   perf_counter: rev...
35

a308444ce   Ingo Molnar   perf_counter: Bet...
36
  	PERF_TYPE_MAX,				/* non-ABI */
b8e83514b   Peter Zijlstra   perf_counter: rev...
37
  };
6c594c21f   Ingo Molnar   perfcounters: add...
38

b8e83514b   Peter Zijlstra   perf_counter: rev...
39
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
40
41
   * Generalized performance event event_id types, used by the
   * attr.event_id parameter of the sys_perf_event_open()
a308444ce   Ingo Molnar   perf_counter: Bet...
42
   * syscall:
b8e83514b   Peter Zijlstra   perf_counter: rev...
43
   */
1c432d899   Peter Zijlstra   perf_counter: Ren...
44
  enum perf_hw_id {
9f66a3810   Ingo Molnar   perf counters: re...
45
  	/*
b8e83514b   Peter Zijlstra   perf_counter: rev...
46
  	 * Common hardware events, generalized by the kernel:
9f66a3810   Ingo Molnar   perf counters: re...
47
  	 */
f4dbfa8f3   Peter Zijlstra   perf_counter: Sta...
48
49
50
51
52
53
54
  	PERF_COUNT_HW_CPU_CYCLES		= 0,
  	PERF_COUNT_HW_INSTRUCTIONS		= 1,
  	PERF_COUNT_HW_CACHE_REFERENCES		= 2,
  	PERF_COUNT_HW_CACHE_MISSES		= 3,
  	PERF_COUNT_HW_BRANCH_INSTRUCTIONS	= 4,
  	PERF_COUNT_HW_BRANCH_MISSES		= 5,
  	PERF_COUNT_HW_BUS_CYCLES		= 6,
8f6224224   Ingo Molnar   perf events: Add ...
55
56
  	PERF_COUNT_HW_STALLED_CYCLES_FRONTEND	= 7,
  	PERF_COUNT_HW_STALLED_CYCLES_BACKEND	= 8,
c37e17497   Stephane Eranian   perf events: Add ...
57
  	PERF_COUNT_HW_REF_CPU_CYCLES		= 9,
f4dbfa8f3   Peter Zijlstra   perf_counter: Sta...
58

a308444ce   Ingo Molnar   perf_counter: Bet...
59
  	PERF_COUNT_HW_MAX,			/* non-ABI */
b8e83514b   Peter Zijlstra   perf_counter: rev...
60
  };
e077df4f4   Peter Zijlstra   perf_counter: hoo...
61

b8e83514b   Peter Zijlstra   perf_counter: rev...
62
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
63
   * Generalized hardware cache events:
8326f44da   Ingo Molnar   perf_counter: Imp...
64
   *
89d6c0b5b   Peter Zijlstra   perf, arch: Add g...
65
   *       { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
8326f44da   Ingo Molnar   perf_counter: Imp...
66
67
68
   *       { read, write, prefetch } x
   *       { accesses, misses }
   */
1c432d899   Peter Zijlstra   perf_counter: Ren...
69
  enum perf_hw_cache_id {
a308444ce   Ingo Molnar   perf_counter: Bet...
70
71
72
73
74
75
  	PERF_COUNT_HW_CACHE_L1D			= 0,
  	PERF_COUNT_HW_CACHE_L1I			= 1,
  	PERF_COUNT_HW_CACHE_LL			= 2,
  	PERF_COUNT_HW_CACHE_DTLB		= 3,
  	PERF_COUNT_HW_CACHE_ITLB		= 4,
  	PERF_COUNT_HW_CACHE_BPU			= 5,
89d6c0b5b   Peter Zijlstra   perf, arch: Add g...
76
  	PERF_COUNT_HW_CACHE_NODE		= 6,
a308444ce   Ingo Molnar   perf_counter: Bet...
77
78
  
  	PERF_COUNT_HW_CACHE_MAX,		/* non-ABI */
8326f44da   Ingo Molnar   perf_counter: Imp...
79
  };
1c432d899   Peter Zijlstra   perf_counter: Ren...
80
  enum perf_hw_cache_op_id {
a308444ce   Ingo Molnar   perf_counter: Bet...
81
82
83
  	PERF_COUNT_HW_CACHE_OP_READ		= 0,
  	PERF_COUNT_HW_CACHE_OP_WRITE		= 1,
  	PERF_COUNT_HW_CACHE_OP_PREFETCH		= 2,
8326f44da   Ingo Molnar   perf_counter: Imp...
84

a308444ce   Ingo Molnar   perf_counter: Bet...
85
  	PERF_COUNT_HW_CACHE_OP_MAX,		/* non-ABI */
8326f44da   Ingo Molnar   perf_counter: Imp...
86
  };
1c432d899   Peter Zijlstra   perf_counter: Ren...
87
88
89
  enum perf_hw_cache_op_result_id {
  	PERF_COUNT_HW_CACHE_RESULT_ACCESS	= 0,
  	PERF_COUNT_HW_CACHE_RESULT_MISS		= 1,
8326f44da   Ingo Molnar   perf_counter: Imp...
90

a308444ce   Ingo Molnar   perf_counter: Bet...
91
  	PERF_COUNT_HW_CACHE_RESULT_MAX,		/* non-ABI */
8326f44da   Ingo Molnar   perf_counter: Imp...
92
93
94
  };
  
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
95
96
   * Special "software" events provided by the kernel, even if the hardware
   * does not support performance events. These events measure various
b8e83514b   Peter Zijlstra   perf_counter: rev...
97
98
99
   * physical and sw events of the kernel (and allow the profiling of them as
   * well):
   */
1c432d899   Peter Zijlstra   perf_counter: Ren...
100
  enum perf_sw_ids {
a308444ce   Ingo Molnar   perf_counter: Bet...
101
102
103
104
105
106
107
  	PERF_COUNT_SW_CPU_CLOCK			= 0,
  	PERF_COUNT_SW_TASK_CLOCK		= 1,
  	PERF_COUNT_SW_PAGE_FAULTS		= 2,
  	PERF_COUNT_SW_CONTEXT_SWITCHES		= 3,
  	PERF_COUNT_SW_CPU_MIGRATIONS		= 4,
  	PERF_COUNT_SW_PAGE_FAULTS_MIN		= 5,
  	PERF_COUNT_SW_PAGE_FAULTS_MAJ		= 6,
f7d798606   Anton Blanchard   perf_event: Add a...
108
109
  	PERF_COUNT_SW_ALIGNMENT_FAULTS		= 7,
  	PERF_COUNT_SW_EMULATION_FAULTS		= 8,
a308444ce   Ingo Molnar   perf_counter: Bet...
110
111
  
  	PERF_COUNT_SW_MAX,			/* non-ABI */
0793a61d4   Thomas Gleixner   performance count...
112
  };
9f66a3810   Ingo Molnar   perf counters: re...
113
  /*
0d48696f8   Peter Zijlstra   perf_counter: Ren...
114
   * Bits that can be set in attr.sample_type to request information
8a057d849   Peter Zijlstra   perf_counter: mov...
115
116
   * in the overflow packets.
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
117
  enum perf_event_sample_format {
a308444ce   Ingo Molnar   perf_counter: Bet...
118
119
120
121
  	PERF_SAMPLE_IP				= 1U << 0,
  	PERF_SAMPLE_TID				= 1U << 1,
  	PERF_SAMPLE_TIME			= 1U << 2,
  	PERF_SAMPLE_ADDR			= 1U << 3,
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
122
  	PERF_SAMPLE_READ			= 1U << 4,
a308444ce   Ingo Molnar   perf_counter: Bet...
123
124
125
126
  	PERF_SAMPLE_CALLCHAIN			= 1U << 5,
  	PERF_SAMPLE_ID				= 1U << 6,
  	PERF_SAMPLE_CPU				= 1U << 7,
  	PERF_SAMPLE_PERIOD			= 1U << 8,
7f453c24b   Peter Zijlstra   perf_counter: PER...
127
  	PERF_SAMPLE_STREAM_ID			= 1U << 9,
3a43ce68a   Frederic Weisbecker   perf_counter: Fix...
128
  	PERF_SAMPLE_RAW				= 1U << 10,
974802eaa   Peter Zijlstra   perf_counter: Add...
129

f413cdb80   Frederic Weisbecker   perf_counter: Fix...
130
  	PERF_SAMPLE_MAX = 1U << 11,		/* non-ABI */
8a057d849   Peter Zijlstra   perf_counter: mov...
131
132
133
  };
  
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
134
   * The format of the data returned by read() on a perf event fd,
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
135
136
137
   * as specified by attr.read_format:
   *
   * struct read_format {
57c0c15b5   Ingo Molnar   perf: Tidy up aft...
138
   *	{ u64		value;
d7ebe75b0   Vince Weaver   perf: Fix comment...
139
140
   *	  { u64		time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
   *	  { u64		time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
57c0c15b5   Ingo Molnar   perf: Tidy up aft...
141
142
   *	  { u64		id;           } && PERF_FORMAT_ID
   *	} && !PERF_FORMAT_GROUP
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
143
   *
57c0c15b5   Ingo Molnar   perf: Tidy up aft...
144
   *	{ u64		nr;
d7ebe75b0   Vince Weaver   perf: Fix comment...
145
146
   *	  { u64		time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
   *	  { u64		time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
57c0c15b5   Ingo Molnar   perf: Tidy up aft...
147
148
149
150
   *	  { u64		value;
   *	    { u64	id;           } && PERF_FORMAT_ID
   *	  }		cntr[nr];
   *	} && PERF_FORMAT_GROUP
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
151
   * };
53cfbf593   Paul Mackerras   perf_counter: rec...
152
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
153
  enum perf_event_read_format {
a308444ce   Ingo Molnar   perf_counter: Bet...
154
155
156
  	PERF_FORMAT_TOTAL_TIME_ENABLED		= 1U << 0,
  	PERF_FORMAT_TOTAL_TIME_RUNNING		= 1U << 1,
  	PERF_FORMAT_ID				= 1U << 2,
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
157
  	PERF_FORMAT_GROUP			= 1U << 3,
974802eaa   Peter Zijlstra   perf_counter: Add...
158

57c0c15b5   Ingo Molnar   perf: Tidy up aft...
159
  	PERF_FORMAT_MAX = 1U << 4,		/* non-ABI */
53cfbf593   Paul Mackerras   perf_counter: rec...
160
  };
974802eaa   Peter Zijlstra   perf_counter: Add...
161
  #define PERF_ATTR_SIZE_VER0	64	/* sizeof first published struct */
53cfbf593   Paul Mackerras   perf_counter: rec...
162
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
163
   * Hardware event_id to monitor via a performance monitoring event:
9f66a3810   Ingo Molnar   perf counters: re...
164
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
165
  struct perf_event_attr {
974802eaa   Peter Zijlstra   perf_counter: Add...
166

f4a2deb48   Peter Zijlstra   perf_counter: rem...
167
  	/*
a21ca2cac   Ingo Molnar   perf_counter: Sep...
168
169
170
  	 * Major type: hardware/software/tracepoint/etc.
  	 */
  	__u32			type;
974802eaa   Peter Zijlstra   perf_counter: Add...
171
172
173
174
175
  
  	/*
  	 * Size of the attr structure, for fwd/bwd compat.
  	 */
  	__u32			size;
a21ca2cac   Ingo Molnar   perf_counter: Sep...
176
177
178
  
  	/*
  	 * Type specific configuration information.
f4a2deb48   Peter Zijlstra   perf_counter: rem...
179
180
  	 */
  	__u64			config;
9f66a3810   Ingo Molnar   perf counters: re...
181

60db5e09c   Peter Zijlstra   perf_counter: fre...
182
  	union {
b23f3325e   Peter Zijlstra   perf_counter: Ren...
183
184
  		__u64		sample_period;
  		__u64		sample_freq;
60db5e09c   Peter Zijlstra   perf_counter: fre...
185
  	};
b23f3325e   Peter Zijlstra   perf_counter: Ren...
186
187
  	__u64			sample_type;
  	__u64			read_format;
9f66a3810   Ingo Molnar   perf counters: re...
188

2743a5b0f   Paul Mackerras   perfcounters: pro...
189
  	__u64			disabled       :  1, /* off by default        */
0475f9ea8   Paul Mackerras   perf_counters: al...
190
191
192
193
194
195
  				inherit	       :  1, /* children inherit it   */
  				pinned	       :  1, /* must always be on PMU */
  				exclusive      :  1, /* only group on PMU     */
  				exclude_user   :  1, /* don't count user      */
  				exclude_kernel :  1, /* ditto kernel          */
  				exclude_hv     :  1, /* ditto hypervisor      */
2743a5b0f   Paul Mackerras   perfcounters: pro...
196
  				exclude_idle   :  1, /* don't count when idle */
0a4a93919   Peter Zijlstra   perf_counter: exe...
197
  				mmap           :  1, /* include mmap data     */
8d1b2d936   Peter Zijlstra   perf_counter: tra...
198
  				comm	       :  1, /* include comm data     */
60db5e09c   Peter Zijlstra   perf_counter: fre...
199
  				freq           :  1, /* use freq, not period  */
bfbd3381e   Peter Zijlstra   perf_counter: Imp...
200
  				inherit_stat   :  1, /* per task counts       */
57e7986ed   Paul Mackerras   perf_counter: Pro...
201
  				enable_on_exec :  1, /* next exec enables     */
9f498cc5b   Peter Zijlstra   perf_counter: Ful...
202
  				task           :  1, /* trace fork/exit       */
2667de81f   Peter Zijlstra   perf_counter: All...
203
  				watermark      :  1, /* wakeup_watermark      */
ab608344b   Peter Zijlstra   perf, x86: Improv...
204
205
206
207
208
209
210
211
212
213
214
  				/*
  				 * precise_ip:
  				 *
  				 *  0 - SAMPLE_IP can have arbitrary skid
  				 *  1 - SAMPLE_IP must have constant skid
  				 *  2 - SAMPLE_IP requested to have 0 skid
  				 *  3 - SAMPLE_IP must have 0 skid
  				 *
  				 *  See also PERF_RECORD_MISC_EXACT_IP
  				 */
  				precise_ip     :  2, /* skid constraint       */
3af9e8592   Eric B Munson   perf: Add non-exe...
215
  				mmap_data      :  1, /* non-exec mmap data    */
c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
216
  				sample_id_all  :  1, /* sample_type all events */
ab608344b   Peter Zijlstra   perf, x86: Improv...
217

a240f7616   Joerg Roedel   perf, core: Intro...
218
219
220
221
  				exclude_host   :  1, /* don't count in host   */
  				exclude_guest  :  1, /* don't count in guest  */
  
  				__reserved_1   : 43;
2743a5b0f   Paul Mackerras   perfcounters: pro...
222

2667de81f   Peter Zijlstra   perf_counter: All...
223
224
225
226
  	union {
  		__u32		wakeup_events;	  /* wakeup every n events */
  		__u32		wakeup_watermark; /* bytes before wakeup   */
  	};
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
227

f13c12c63   Peter Zijlstra   perf_events: Fix ...
228
  	__u32			bp_type;
a7e3ed1e4   Andi Kleen   perf: Add support...
229
230
231
232
233
234
235
236
  	union {
  		__u64		bp_addr;
  		__u64		config1; /* extension of config */
  	};
  	union {
  		__u64		bp_len;
  		__u64		config2; /* extension of config1 */
  	};
eab656ae0   Thomas Gleixner   perf counters: cl...
237
  };
9f66a3810   Ingo Molnar   perf counters: re...
238
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
239
   * Ioctls that can be done on a perf event fd:
d859e29fe   Paul Mackerras   perf_counter: Add...
240
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
241
  #define PERF_EVENT_IOC_ENABLE		_IO ('$', 0)
57c0c15b5   Ingo Molnar   perf: Tidy up aft...
242
243
  #define PERF_EVENT_IOC_DISABLE		_IO ('$', 1)
  #define PERF_EVENT_IOC_REFRESH		_IO ('$', 2)
cdd6c482c   Ingo Molnar   perf: Do the big ...
244
  #define PERF_EVENT_IOC_RESET		_IO ('$', 3)
4c49b1285   Arjan van de Ven   perf_event: Fix i...
245
  #define PERF_EVENT_IOC_PERIOD		_IOW('$', 4, __u64)
cdd6c482c   Ingo Molnar   perf: Do the big ...
246
  #define PERF_EVENT_IOC_SET_OUTPUT	_IO ('$', 5)
6fb2915df   Li Zefan   tracing/profile: ...
247
  #define PERF_EVENT_IOC_SET_FILTER	_IOW('$', 6, char *)
cdd6c482c   Ingo Molnar   perf: Do the big ...
248
249
  
  enum perf_event_ioc_flags {
3df5edad8   Peter Zijlstra   perf_counter: rew...
250
251
  	PERF_IOC_FLAG_GROUP		= 1U << 0,
  };
d859e29fe   Paul Mackerras   perf_counter: Add...
252

37d818283   Paul Mackerras   perf_counter: add...
253
254
255
  /*
   * Structure of the page that can be mapped via mmap
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
256
  struct perf_event_mmap_page {
37d818283   Paul Mackerras   perf_counter: add...
257
258
  	__u32	version;		/* version number of this structure */
  	__u32	compat_version;		/* lowest version this is compat with */
38ff667b3   Peter Zijlstra   perf_counter: fix...
259
260
  
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
261
  	 * Bits needed to read the hw events in user-space.
38ff667b3   Peter Zijlstra   perf_counter: fix...
262
  	 *
92f22a386   Peter Zijlstra   perf_counter: upd...
263
264
  	 *   u32 seq;
  	 *   s64 count;
38ff667b3   Peter Zijlstra   perf_counter: fix...
265
  	 *
a2e87d06d   Peter Zijlstra   perf_counter: upd...
266
267
  	 *   do {
  	 *     seq = pc->lock;
38ff667b3   Peter Zijlstra   perf_counter: fix...
268
  	 *
a2e87d06d   Peter Zijlstra   perf_counter: upd...
269
270
271
272
273
274
  	 *     barrier()
  	 *     if (pc->index) {
  	 *       count = pmc_read(pc->index - 1);
  	 *       count += pc->offset;
  	 *     } else
  	 *       goto regular_read;
38ff667b3   Peter Zijlstra   perf_counter: fix...
275
  	 *
a2e87d06d   Peter Zijlstra   perf_counter: upd...
276
277
  	 *     barrier();
  	 *   } while (pc->lock != seq);
38ff667b3   Peter Zijlstra   perf_counter: fix...
278
  	 *
92f22a386   Peter Zijlstra   perf_counter: upd...
279
280
  	 * NOTE: for obvious reason this only works on self-monitoring
  	 *       processes.
38ff667b3   Peter Zijlstra   perf_counter: fix...
281
  	 */
37d818283   Paul Mackerras   perf_counter: add...
282
  	__u32	lock;			/* seqlock for synchronization */
cdd6c482c   Ingo Molnar   perf: Do the big ...
283
284
285
286
  	__u32	index;			/* hardware event identifier */
  	__s64	offset;			/* add to hardware event value */
  	__u64	time_enabled;		/* time event active */
  	__u64	time_running;		/* time event on cpu */
7b732a750   Peter Zijlstra   perf_counter: new...
287

41f95331b   Peter Zijlstra   perf_counter: Spl...
288
289
290
  		/*
  		 * Hole for extension of the self monitor capabilities
  		 */
7f8b4e4e0   Peter Zijlstra   perf_counter: Add...
291
  	__u64	__reserved[123];	/* align to 1k */
41f95331b   Peter Zijlstra   perf_counter: Spl...
292

38ff667b3   Peter Zijlstra   perf_counter: fix...
293
294
295
  	/*
  	 * Control data for the mmap() data buffer.
  	 *
43a21ea81   Peter Zijlstra   perf_counter: Add...
296
297
  	 * User-space reading the @data_head value should issue an rmb(), on
  	 * SMP capable platforms, after reading this value -- see
cdd6c482c   Ingo Molnar   perf: Do the big ...
298
  	 * perf_event_wakeup().
43a21ea81   Peter Zijlstra   perf_counter: Add...
299
300
301
302
  	 *
  	 * When the mapping is PROT_WRITE the @data_tail value should be
  	 * written by userspace to reflect the last read data. In this case
  	 * the kernel will not over-write unread data.
38ff667b3   Peter Zijlstra   perf_counter: fix...
303
  	 */
8e3747c13   Peter Zijlstra   perf_counter: Cha...
304
  	__u64   data_head;		/* head in the data section */
43a21ea81   Peter Zijlstra   perf_counter: Add...
305
  	__u64	data_tail;		/* user-space written tail */
37d818283   Paul Mackerras   perf_counter: add...
306
  };
39447b386   Zhang, Yanmin   perf: Enhance per...
307
  #define PERF_RECORD_MISC_CPUMODE_MASK		(7 << 0)
184f412c3   Ingo Molnar   perf, x86: Clean ...
308
  #define PERF_RECORD_MISC_CPUMODE_UNKNOWN	(0 << 0)
cdd6c482c   Ingo Molnar   perf: Do the big ...
309
310
311
  #define PERF_RECORD_MISC_KERNEL			(1 << 0)
  #define PERF_RECORD_MISC_USER			(2 << 0)
  #define PERF_RECORD_MISC_HYPERVISOR		(3 << 0)
39447b386   Zhang, Yanmin   perf: Enhance per...
312
313
  #define PERF_RECORD_MISC_GUEST_KERNEL		(4 << 0)
  #define PERF_RECORD_MISC_GUEST_USER		(5 << 0)
6fab01927   Peter Zijlstra   perf_counter: pro...
314

ab608344b   Peter Zijlstra   perf, x86: Improv...
315
316
317
318
319
320
  /*
   * Indicates that the content of PERF_SAMPLE_IP points to
   * the actual instruction that triggered the event. See also
   * perf_event_attr::precise_ip.
   */
  #define PERF_RECORD_MISC_EXACT_IP		(1 << 14)
ef21f683a   Peter Zijlstra   perf, x86: use LB...
321
322
323
324
  /*
   * Reserve the last bit to indicate some extended misc field
   */
  #define PERF_RECORD_MISC_EXT_RESERVED		(1 << 15)
5c1481943   Peter Zijlstra   perf_counter: out...
325
326
  struct perf_event_header {
  	__u32	type;
6fab01927   Peter Zijlstra   perf_counter: pro...
327
328
  	__u16	misc;
  	__u16	size;
5c1481943   Peter Zijlstra   perf_counter: out...
329
330
331
  };
  
  enum perf_event_type {
5ed00415e   Peter Zijlstra   perf_counter: re-...
332

0c593b341   Peter Zijlstra   perf_counter: com...
333
  	/*
c980d1091   Arnaldo Carvalho de Melo   perf events: Make...
334
335
336
337
338
339
340
341
342
  	 * If perf_event_attr.sample_id_all is set then all event types will
  	 * have the sample_type selected fields related to where/when
  	 * (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID)
  	 * described in PERF_RECORD_SAMPLE below, it will be stashed just after
  	 * the perf_event_header and the fields already present for the existing
  	 * fields, i.e. at the end of the payload. That way a newer perf.data
  	 * file will be supported by older perf tools, with these new optional
  	 * fields being ignored.
  	 *
0c593b341   Peter Zijlstra   perf_counter: com...
343
344
345
346
  	 * The MMAP events record the PROT_EXEC mappings so that we can
  	 * correlate userspace IPs to code. They have the following structure:
  	 *
  	 * struct {
0127c3ea0   Ingo Molnar   perf_counter: fix...
347
  	 *	struct perf_event_header	header;
0c593b341   Peter Zijlstra   perf_counter: com...
348
  	 *
0127c3ea0   Ingo Molnar   perf_counter: fix...
349
350
351
352
353
  	 *	u32				pid, tid;
  	 *	u64				addr;
  	 *	u64				len;
  	 *	u64				pgoff;
  	 *	char				filename[];
0c593b341   Peter Zijlstra   perf_counter: com...
354
355
  	 * };
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
356
  	PERF_RECORD_MMAP			= 1,
0a4a93919   Peter Zijlstra   perf_counter: exe...
357

8a057d849   Peter Zijlstra   perf_counter: mov...
358
  	/*
8d1b2d936   Peter Zijlstra   perf_counter: tra...
359
  	 * struct {
57c0c15b5   Ingo Molnar   perf: Tidy up aft...
360
361
362
  	 *	struct perf_event_header	header;
  	 *	u64				id;
  	 *	u64				lost;
43a21ea81   Peter Zijlstra   perf_counter: Add...
363
364
  	 * };
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
365
  	PERF_RECORD_LOST			= 2,
43a21ea81   Peter Zijlstra   perf_counter: Add...
366
367
368
  
  	/*
  	 * struct {
0127c3ea0   Ingo Molnar   perf_counter: fix...
369
  	 *	struct perf_event_header	header;
8d1b2d936   Peter Zijlstra   perf_counter: tra...
370
  	 *
0127c3ea0   Ingo Molnar   perf_counter: fix...
371
372
  	 *	u32				pid, tid;
  	 *	char				comm[];
8d1b2d936   Peter Zijlstra   perf_counter: tra...
373
374
  	 * };
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
375
  	PERF_RECORD_COMM			= 3,
8d1b2d936   Peter Zijlstra   perf_counter: tra...
376
377
  
  	/*
26b119bc8   Peter Zijlstra   perf_counter: Log...
378
  	 * struct {
0127c3ea0   Ingo Molnar   perf_counter: fix...
379
  	 *	struct perf_event_header	header;
9f498cc5b   Peter Zijlstra   perf_counter: Ful...
380
381
  	 *	u32				pid, ppid;
  	 *	u32				tid, ptid;
393b2ad8c   Arjan van de Ven   perf: Add a times...
382
  	 *	u64				time;
9f498cc5b   Peter Zijlstra   perf_counter: Ful...
383
384
  	 * };
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
385
  	PERF_RECORD_EXIT			= 4,
9f498cc5b   Peter Zijlstra   perf_counter: Ful...
386
387
388
389
  
  	/*
  	 * struct {
  	 *	struct perf_event_header	header;
0127c3ea0   Ingo Molnar   perf_counter: fix...
390
  	 *	u64				time;
689802b2d   Peter Zijlstra   perf_counter: Add...
391
  	 *	u64				id;
7f453c24b   Peter Zijlstra   perf_counter: PER...
392
  	 *	u64				stream_id;
a78ac3258   Peter Zijlstra   perf_counter: Gen...
393
394
  	 * };
  	 */
184f412c3   Ingo Molnar   perf, x86: Clean ...
395
396
  	PERF_RECORD_THROTTLE			= 5,
  	PERF_RECORD_UNTHROTTLE			= 6,
a78ac3258   Peter Zijlstra   perf_counter: Gen...
397
398
  
  	/*
60313ebed   Peter Zijlstra   perf_counter: Add...
399
  	 * struct {
a21ca2cac   Ingo Molnar   perf_counter: Sep...
400
401
  	 *	struct perf_event_header	header;
  	 *	u32				pid, ppid;
9f498cc5b   Peter Zijlstra   perf_counter: Ful...
402
  	 *	u32				tid, ptid;
a6f10a2f5   Anton Blanchard   perf_event: Updat...
403
  	 *	u64				time;
60313ebed   Peter Zijlstra   perf_counter: Add...
404
405
  	 * };
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
406
  	PERF_RECORD_FORK			= 7,
60313ebed   Peter Zijlstra   perf_counter: Add...
407
408
  
  	/*
38b200d67   Peter Zijlstra   perf_counter: Add...
409
  	 * struct {
184f412c3   Ingo Molnar   perf, x86: Clean ...
410
411
  	 *	struct perf_event_header	header;
  	 *	u32				pid, tid;
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
412
  	 *
184f412c3   Ingo Molnar   perf, x86: Clean ...
413
  	 *	struct read_format		values;
38b200d67   Peter Zijlstra   perf_counter: Add...
414
415
  	 * };
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
416
  	PERF_RECORD_READ			= 8,
38b200d67   Peter Zijlstra   perf_counter: Add...
417
418
  
  	/*
0c593b341   Peter Zijlstra   perf_counter: com...
419
  	 * struct {
0127c3ea0   Ingo Molnar   perf_counter: fix...
420
  	 *	struct perf_event_header	header;
0c593b341   Peter Zijlstra   perf_counter: com...
421
  	 *
43a21ea81   Peter Zijlstra   perf_counter: Add...
422
423
424
425
  	 *	{ u64			ip;	  } && PERF_SAMPLE_IP
  	 *	{ u32			pid, tid; } && PERF_SAMPLE_TID
  	 *	{ u64			time;     } && PERF_SAMPLE_TIME
  	 *	{ u64			addr;     } && PERF_SAMPLE_ADDR
e6e18ec79   Peter Zijlstra   perf_counter: Rew...
426
  	 *	{ u64			id;	  } && PERF_SAMPLE_ID
7f453c24b   Peter Zijlstra   perf_counter: PER...
427
  	 *	{ u64			stream_id;} && PERF_SAMPLE_STREAM_ID
43a21ea81   Peter Zijlstra   perf_counter: Add...
428
  	 *	{ u32			cpu, res; } && PERF_SAMPLE_CPU
57c0c15b5   Ingo Molnar   perf: Tidy up aft...
429
  	 *	{ u64			period;   } && PERF_SAMPLE_PERIOD
0c593b341   Peter Zijlstra   perf_counter: com...
430
  	 *
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
431
  	 *	{ struct read_format	values;	  } && PERF_SAMPLE_READ
0c593b341   Peter Zijlstra   perf_counter: com...
432
  	 *
f9188e023   Peter Zijlstra   perf_counter: Mak...
433
  	 *	{ u64			nr,
43a21ea81   Peter Zijlstra   perf_counter: Add...
434
  	 *	  u64			ips[nr];  } && PERF_SAMPLE_CALLCHAIN
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
435
  	 *
57c0c15b5   Ingo Molnar   perf: Tidy up aft...
436
437
438
439
440
441
442
443
444
445
  	 *	#
  	 *	# The RAW record below is opaque data wrt the ABI
  	 *	#
  	 *	# That is, the ABI doesn't make any promises wrt to
  	 *	# the stability of its content, it may vary depending
  	 *	# on event, hardware, kernel version and phase of
  	 *	# the moon.
  	 *	#
  	 *	# In other words, PERF_SAMPLE_RAW contents are not an ABI.
  	 *	#
3dab77fb1   Peter Zijlstra   perf: Rework/fix ...
446
  	 *
a044560c3   Peter Zijlstra   perf_counter: Cor...
447
448
  	 *	{ u32			size;
  	 *	  char                  data[size];}&& PERF_SAMPLE_RAW
0c593b341   Peter Zijlstra   perf_counter: com...
449
  	 * };
8a057d849   Peter Zijlstra   perf_counter: mov...
450
  	 */
184f412c3   Ingo Molnar   perf, x86: Clean ...
451
  	PERF_RECORD_SAMPLE			= 9,
e6e18ec79   Peter Zijlstra   perf_counter: Rew...
452

cdd6c482c   Ingo Molnar   perf: Do the big ...
453
  	PERF_RECORD_MAX,			/* non-ABI */
5c1481943   Peter Zijlstra   perf_counter: out...
454
  };
f9188e023   Peter Zijlstra   perf_counter: Mak...
455
456
457
458
  enum perf_callchain_context {
  	PERF_CONTEXT_HV			= (__u64)-32,
  	PERF_CONTEXT_KERNEL		= (__u64)-128,
  	PERF_CONTEXT_USER		= (__u64)-512,
7522060c9   Ingo Molnar   perf report: Add ...
459

f9188e023   Peter Zijlstra   perf_counter: Mak...
460
461
462
463
464
  	PERF_CONTEXT_GUEST		= (__u64)-2048,
  	PERF_CONTEXT_GUEST_KERNEL	= (__u64)-2176,
  	PERF_CONTEXT_GUEST_USER		= (__u64)-2560,
  
  	PERF_CONTEXT_MAX		= (__u64)-4095,
7522060c9   Ingo Molnar   perf report: Add ...
465
  };
e7e7ee2ea   Ingo Molnar   perf events: Clea...
466
467
468
  #define PERF_FLAG_FD_NO_GROUP		(1U << 0)
  #define PERF_FLAG_FD_OUTPUT		(1U << 1)
  #define PERF_FLAG_PID_CGROUP		(1U << 2) /* pid=cgroup id, per-cpu mode only */
a4be7c277   Peter Zijlstra   perf_counter: All...
469

f3dfd2656   Paul Mackerras   perfcounters: fix...
470
  #ifdef __KERNEL__
d859e29fe   Paul Mackerras   perf_counter: Add...
471
  /*
f3dfd2656   Paul Mackerras   perfcounters: fix...
472
   * Kernel-internal data types and definitions:
9f66a3810   Ingo Molnar   perf counters: re...
473
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
474
  #ifdef CONFIG_PERF_EVENTS
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
475
  # include <linux/cgroup.h>
cdd6c482c   Ingo Molnar   perf: Do the big ...
476
  # include <asm/perf_event.h>
7be792363   Peter Zijlstra   perf: Fix build b...
477
  # include <asm/local64.h>
f3dfd2656   Paul Mackerras   perfcounters: fix...
478
  #endif
39447b386   Zhang, Yanmin   perf: Enhance per...
479
  struct perf_guest_info_callbacks {
e7e7ee2ea   Ingo Molnar   perf events: Clea...
480
481
482
  	int				(*is_in_guest)(void);
  	int				(*is_user_mode)(void);
  	unsigned long			(*get_guest_ip)(void);
39447b386   Zhang, Yanmin   perf: Enhance per...
483
  };
2ff6cfd70   Arnd Bergmann   perf events: hw_b...
484
485
486
  #ifdef CONFIG_HAVE_HW_BREAKPOINT
  #include <asm/hw_breakpoint.h>
  #endif
f3dfd2656   Paul Mackerras   perfcounters: fix...
487
488
489
490
491
  #include <linux/list.h>
  #include <linux/mutex.h>
  #include <linux/rculist.h>
  #include <linux/rcupdate.h>
  #include <linux/spinlock.h>
d6d020e99   Peter Zijlstra   perf_counter: hrt...
492
  #include <linux/hrtimer.h>
3c446b3d3   Peter Zijlstra   perf_counter: SIG...
493
  #include <linux/fs.h>
709e50cf8   Peter Zijlstra   perf_counter: Use...
494
  #include <linux/pid_namespace.h>
906010b21   Peter Zijlstra   perf_event: Provi...
495
  #include <linux/workqueue.h>
5331d7b84   Frederic Weisbecker   perf: Introduce n...
496
  #include <linux/ftrace.h>
85cfabbcd   Peter Zijlstra   perf, ppc: Fix co...
497
  #include <linux/cpu.h>
e360adbe2   Peter Zijlstra   irq_work: Add gen...
498
  #include <linux/irq_work.h>
d430d3d7e   Jason Baron   jump label: Intro...
499
  #include <linux/jump_label.h>
60063497a   Arun Sharma   atomic: use <linu...
500
  #include <linux/atomic.h>
fa5881514   Peter Zijlstra   perf: Optimize th...
501
  #include <asm/local.h>
f3dfd2656   Paul Mackerras   perfcounters: fix...
502

f9188e023   Peter Zijlstra   perf_counter: Mak...
503
504
505
506
507
508
  #define PERF_MAX_STACK_DEPTH		255
  
  struct perf_callchain_entry {
  	__u64				nr;
  	__u64				ip[PERF_MAX_STACK_DEPTH];
  };
3a43ce68a   Frederic Weisbecker   perf_counter: Fix...
509
510
511
  struct perf_raw_record {
  	u32				size;
  	void				*data;
f413cdb80   Frederic Weisbecker   perf_counter: Fix...
512
  };
caff2beff   Peter Zijlstra   perf, x86: Implem...
513
514
515
516
517
518
519
520
521
522
  struct perf_branch_entry {
  	__u64				from;
  	__u64				to;
  	__u64				flags;
  };
  
  struct perf_branch_stack {
  	__u64				nr;
  	struct perf_branch_entry	entries[0];
  };
f3dfd2656   Paul Mackerras   perfcounters: fix...
523
  struct task_struct;
efc9f05df   Stephane Eranian   perf_events: Upda...
524
525
526
527
528
529
530
531
532
  /*
   * extra PMU register associated with an event
   */
  struct hw_perf_event_extra {
  	u64		config;	/* register value */
  	unsigned int	reg;	/* register address or index */
  	int		alloc;	/* extra register already allocated */
  	int		idx;	/* index in shared_regs->regs[] */
  };
0793a61d4   Thomas Gleixner   performance count...
533
  /**
cdd6c482c   Ingo Molnar   perf: Do the big ...
534
   * struct hw_perf_event - performance event hardware details:
0793a61d4   Thomas Gleixner   performance count...
535
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
536
537
  struct hw_perf_event {
  #ifdef CONFIG_PERF_EVENTS
d6d020e99   Peter Zijlstra   perf_counter: hrt...
538
539
  	union {
  		struct { /* hardware */
a308444ce   Ingo Molnar   perf_counter: Bet...
540
  			u64		config;
447a194b3   Stephane Eranian   perf_events, x86:...
541
  			u64		last_tag;
a308444ce   Ingo Molnar   perf_counter: Bet...
542
  			unsigned long	config_base;
cdd6c482c   Ingo Molnar   perf: Do the big ...
543
  			unsigned long	event_base;
a308444ce   Ingo Molnar   perf_counter: Bet...
544
  			int		idx;
447a194b3   Stephane Eranian   perf_events, x86:...
545
  			int		last_cpu;
efc9f05df   Stephane Eranian   perf_events: Upda...
546
  			struct hw_perf_event_extra extra_reg;
d6d020e99   Peter Zijlstra   perf_counter: hrt...
547
  		};
721a669b7   Soeren Sandmann   perf events: Fix ...
548
  		struct { /* software */
a308444ce   Ingo Molnar   perf_counter: Bet...
549
  			struct hrtimer	hrtimer;
d6d020e99   Peter Zijlstra   perf_counter: hrt...
550
  		};
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
551
  #ifdef CONFIG_HAVE_HW_BREAKPOINT
45a73372e   Frederic Weisbecker   hw_breakpoints: F...
552
553
554
  		struct { /* breakpoint */
  			struct arch_hw_breakpoint	info;
  			struct list_head		bp_list;
d580ff869   Peter Zijlstra   perf, hw_breakpoi...
555
556
557
558
559
560
  			/*
  			 * Crufty hack to avoid the chicken and egg
  			 * problem hw_breakpoint has with context
  			 * creation and event initalization.
  			 */
  			struct task_struct		*bp_target;
45a73372e   Frederic Weisbecker   hw_breakpoints: F...
561
  		};
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
562
  #endif
d6d020e99   Peter Zijlstra   perf_counter: hrt...
563
  	};
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
564
  	int				state;
e78505958   Peter Zijlstra   perf: Convert per...
565
  	local64_t			prev_count;
b23f3325e   Peter Zijlstra   perf_counter: Ren...
566
  	u64				sample_period;
9e350de37   Peter Zijlstra   perf_counter: Acc...
567
  	u64				last_period;
e78505958   Peter Zijlstra   perf: Convert per...
568
  	local64_t			period_left;
60db5e09c   Peter Zijlstra   perf_counter: fre...
569
  	u64				interrupts;
6a24ed6c6   Peter Zijlstra   perf_counter: Fix...
570

abd507139   Peter Zijlstra   perf: Reimplement...
571
572
  	u64				freq_time_stamp;
  	u64				freq_count_stamp;
ee06094f8   Ingo Molnar   perfcounters: res...
573
  #endif
0793a61d4   Thomas Gleixner   performance count...
574
  };
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
575
576
577
578
579
580
  /*
   * hw_perf_event::state flags
   */
  #define PERF_HES_STOPPED	0x01 /* the counter is stopped */
  #define PERF_HES_UPTODATE	0x02 /* event->count up-to-date */
  #define PERF_HES_ARCH		0x04
cdd6c482c   Ingo Molnar   perf: Do the big ...
581
  struct perf_event;
621a01eac   Ingo Molnar   perf counters: hw...
582

8d2cacbbb   Peter Zijlstra   perf: Cleanup {st...
583
584
585
586
  /*
   * Common implementation detail of pmu::{start,commit,cancel}_txn
   */
  #define PERF_EVENT_TXN 0x1
6bde9b6ce   Lin Ming   perf: Add group s...
587

621a01eac   Ingo Molnar   perf counters: hw...
588
  /**
4aeb0b423   Robert Richter   perfcounters: ren...
589
   * struct pmu - generic performance monitoring unit
621a01eac   Ingo Molnar   perf counters: hw...
590
   */
4aeb0b423   Robert Richter   perfcounters: ren...
591
  struct pmu {
b0a873ebb   Peter Zijlstra   perf: Register PM...
592
  	struct list_head		entry;
abe434005   Peter Zijlstra   perf: Sysfs enume...
593
  	struct device			*dev;
2e80a82a4   Peter Zijlstra   perf: Dynamic pmu...
594
595
  	char				*name;
  	int				type;
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
596
597
  	int * __percpu			pmu_disable_count;
  	struct perf_cpu_context * __percpu pmu_cpu_context;
8dc85d547   Peter Zijlstra   perf: Multiple ta...
598
  	int				task_ctx_nr;
6bde9b6ce   Lin Ming   perf: Add group s...
599
600
  
  	/*
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
601
602
  	 * Fully disable/enable this PMU, can be used to protect from the PMI
  	 * as well as for lazy/batch writing of the MSRs.
6bde9b6ce   Lin Ming   perf: Add group s...
603
  	 */
ad5133b70   Peter Zijlstra   perf: Default PMU...
604
605
  	void (*pmu_enable)		(struct pmu *pmu); /* optional */
  	void (*pmu_disable)		(struct pmu *pmu); /* optional */
6bde9b6ce   Lin Ming   perf: Add group s...
606

8d2cacbbb   Peter Zijlstra   perf: Cleanup {st...
607
  	/*
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
608
  	 * Try and initialize the event for this PMU.
24cd7f54a   Peter Zijlstra   perf: Reduce perf...
609
  	 * Should return -ENOENT when the @event doesn't match this PMU.
8d2cacbbb   Peter Zijlstra   perf: Cleanup {st...
610
  	 */
b0a873ebb   Peter Zijlstra   perf: Register PM...
611
  	int (*event_init)		(struct perf_event *event);
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
612
613
614
  #define PERF_EF_START	0x01		/* start the counter when adding    */
  #define PERF_EF_RELOAD	0x02		/* reload the counter when starting */
  #define PERF_EF_UPDATE	0x04		/* update the counter when stopping */
8d2cacbbb   Peter Zijlstra   perf: Cleanup {st...
615
  	/*
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
  	 * Adds/Removes a counter to/from the PMU, can be done inside
  	 * a transaction, see the ->*_txn() methods.
  	 */
  	int  (*add)			(struct perf_event *event, int flags);
  	void (*del)			(struct perf_event *event, int flags);
  
  	/*
  	 * Starts/Stops a counter present on the PMU. The PMI handler
  	 * should stop the counter when perf_event_overflow() returns
  	 * !0. ->start() will be used to continue.
  	 */
  	void (*start)			(struct perf_event *event, int flags);
  	void (*stop)			(struct perf_event *event, int flags);
  
  	/*
  	 * Updates the counter value of the event.
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
633
  	void (*read)			(struct perf_event *event);
6bde9b6ce   Lin Ming   perf: Add group s...
634
635
  
  	/*
24cd7f54a   Peter Zijlstra   perf: Reduce perf...
636
637
638
  	 * Group events scheduling is treated as a transaction, add
  	 * group events as a whole and perform one schedulability test.
  	 * If the test fails, roll back the whole group
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
639
640
  	 *
  	 * Start the transaction, after this ->add() doesn't need to
24cd7f54a   Peter Zijlstra   perf: Reduce perf...
641
  	 * do schedulability tests.
8d2cacbbb   Peter Zijlstra   perf: Cleanup {st...
642
  	 */
e7e7ee2ea   Ingo Molnar   perf events: Clea...
643
  	void (*start_txn)		(struct pmu *pmu); /* optional */
8d2cacbbb   Peter Zijlstra   perf: Cleanup {st...
644
  	/*
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
645
  	 * If ->start_txn() disabled the ->add() schedulability test
8d2cacbbb   Peter Zijlstra   perf: Cleanup {st...
646
647
648
649
  	 * then ->commit_txn() is required to perform one. On success
  	 * the transaction is closed. On error the transaction is kept
  	 * open until ->cancel_txn() is called.
  	 */
e7e7ee2ea   Ingo Molnar   perf events: Clea...
650
  	int  (*commit_txn)		(struct pmu *pmu); /* optional */
8d2cacbbb   Peter Zijlstra   perf: Cleanup {st...
651
  	/*
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
652
  	 * Will cancel the transaction, assumes ->del() is called
25985edce   Lucas De Marchi   Fix common misspe...
653
  	 * for each successful ->add() during the transaction.
8d2cacbbb   Peter Zijlstra   perf: Cleanup {st...
654
  	 */
e7e7ee2ea   Ingo Molnar   perf events: Clea...
655
  	void (*cancel_txn)		(struct pmu *pmu); /* optional */
621a01eac   Ingo Molnar   perf counters: hw...
656
  };
0793a61d4   Thomas Gleixner   performance count...
657
  /**
cdd6c482c   Ingo Molnar   perf: Do the big ...
658
   * enum perf_event_active_state - the states of a event
6a930700c   Ingo Molnar   perf counters: cl...
659
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
660
  enum perf_event_active_state {
57c0c15b5   Ingo Molnar   perf: Tidy up aft...
661
  	PERF_EVENT_STATE_ERROR		= -2,
cdd6c482c   Ingo Molnar   perf: Do the big ...
662
663
  	PERF_EVENT_STATE_OFF		= -1,
  	PERF_EVENT_STATE_INACTIVE	=  0,
57c0c15b5   Ingo Molnar   perf: Tidy up aft...
664
  	PERF_EVENT_STATE_ACTIVE		=  1,
6a930700c   Ingo Molnar   perf counters: cl...
665
  };
9b51f66dc   Ingo Molnar   perfcounters: imp...
666
  struct file;
453f19eea   Peter Zijlstra   perf: Allow for c...
667
  struct perf_sample_data;
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
668
  typedef void (*perf_overflow_handler_t)(struct perf_event *,
b326e9560   Frederic Weisbecker   hw-breakpoints: U...
669
670
  					struct perf_sample_data *,
  					struct pt_regs *regs);
d6f962b57   Frederic Weisbecker   perf: Export soft...
671
  enum perf_group_flag {
e7e7ee2ea   Ingo Molnar   perf events: Clea...
672
  	PERF_GROUP_SOFTWARE		= 0x1,
d6f962b57   Frederic Weisbecker   perf: Export soft...
673
  };
e7e7ee2ea   Ingo Molnar   perf events: Clea...
674
675
  #define SWEVENT_HLIST_BITS		8
  #define SWEVENT_HLIST_SIZE		(1 << SWEVENT_HLIST_BITS)
76e1d9047   Frederic Weisbecker   perf: Store activ...
676
677
  
  struct swevent_hlist {
e7e7ee2ea   Ingo Molnar   perf events: Clea...
678
679
  	struct hlist_head		heads[SWEVENT_HLIST_SIZE];
  	struct rcu_head			rcu_head;
76e1d9047   Frederic Weisbecker   perf: Store activ...
680
  };
8a49542c0   Peter Zijlstra   perf_events: Fix ...
681
682
  #define PERF_ATTACH_CONTEXT	0x01
  #define PERF_ATTACH_GROUP	0x02
d580ff869   Peter Zijlstra   perf, hw_breakpoi...
683
  #define PERF_ATTACH_TASK	0x04
8a49542c0   Peter Zijlstra   perf_events: Fix ...
684

e5d1367f1   Stephane Eranian   perf: Add cgroup ...
685
686
687
688
689
690
  #ifdef CONFIG_CGROUP_PERF
  /*
   * perf_cgroup_info keeps track of time_enabled for a cgroup.
   * This is a per-cpu dynamically allocated data structure.
   */
  struct perf_cgroup_info {
e7e7ee2ea   Ingo Molnar   perf events: Clea...
691
692
  	u64				time;
  	u64				timestamp;
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
693
694
695
  };
  
  struct perf_cgroup {
e7e7ee2ea   Ingo Molnar   perf events: Clea...
696
697
  	struct				cgroup_subsys_state css;
  	struct				perf_cgroup_info *info;	/* timing info, one per cpu */
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
698
699
  };
  #endif
76369139c   Frederic Weisbecker   perf: Split up bu...
700
  struct ring_buffer;
6a930700c   Ingo Molnar   perf counters: cl...
701
  /**
cdd6c482c   Ingo Molnar   perf: Do the big ...
702
   * struct perf_event - performance event kernel representation:
0793a61d4   Thomas Gleixner   performance count...
703
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
704
705
  struct perf_event {
  #ifdef CONFIG_PERF_EVENTS
65abc8653   Ingo Molnar   perf_counter: Ren...
706
  	struct list_head		group_entry;
592903cdc   Peter Zijlstra   perf_counter: add...
707
  	struct list_head		event_entry;
04289bb98   Ingo Molnar   perf counters: ad...
708
  	struct list_head		sibling_list;
76e1d9047   Frederic Weisbecker   perf: Store activ...
709
  	struct hlist_node		hlist_entry;
0127c3ea0   Ingo Molnar   perf_counter: fix...
710
  	int				nr_siblings;
d6f962b57   Frederic Weisbecker   perf: Export soft...
711
  	int				group_flags;
cdd6c482c   Ingo Molnar   perf: Do the big ...
712
  	struct perf_event		*group_leader;
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
713
  	struct pmu			*pmu;
04289bb98   Ingo Molnar   perf counters: ad...
714

cdd6c482c   Ingo Molnar   perf: Do the big ...
715
  	enum perf_event_active_state	state;
8a49542c0   Peter Zijlstra   perf_events: Fix ...
716
  	unsigned int			attach_state;
e78505958   Peter Zijlstra   perf: Convert per...
717
  	local64_t			count;
a6e6dea68   Peter Zijlstra   perf: Add perf_ev...
718
  	atomic64_t			child_count;
ee06094f8   Ingo Molnar   perfcounters: res...
719

53cfbf593   Paul Mackerras   perf_counter: rec...
720
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
721
  	 * These are the total time in nanoseconds that the event
53cfbf593   Paul Mackerras   perf_counter: rec...
722
  	 * has been enabled (i.e. eligible to run, and the task has
cdd6c482c   Ingo Molnar   perf: Do the big ...
723
  	 * been scheduled in, if this is a per-task event)
53cfbf593   Paul Mackerras   perf_counter: rec...
724
725
726
  	 * and running (scheduled onto the CPU), respectively.
  	 *
  	 * They are computed from tstamp_enabled, tstamp_running and
cdd6c482c   Ingo Molnar   perf: Do the big ...
727
  	 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
53cfbf593   Paul Mackerras   perf_counter: rec...
728
729
730
731
732
733
  	 */
  	u64				total_time_enabled;
  	u64				total_time_running;
  
  	/*
  	 * These are timestamps used for computing total_time_enabled
cdd6c482c   Ingo Molnar   perf: Do the big ...
734
  	 * and total_time_running when the event is in INACTIVE or
53cfbf593   Paul Mackerras   perf_counter: rec...
735
736
  	 * ACTIVE state, measured in nanoseconds from an arbitrary point
  	 * in time.
cdd6c482c   Ingo Molnar   perf: Do the big ...
737
738
  	 * tstamp_enabled: the notional time when the event was enabled
  	 * tstamp_running: the notional time when the event was scheduled on
53cfbf593   Paul Mackerras   perf_counter: rec...
739
  	 * tstamp_stopped: in INACTIVE state, the notional time when the
cdd6c482c   Ingo Molnar   perf: Do the big ...
740
  	 *	event was scheduled off.
53cfbf593   Paul Mackerras   perf_counter: rec...
741
742
743
744
  	 */
  	u64				tstamp_enabled;
  	u64				tstamp_running;
  	u64				tstamp_stopped;
eed01528a   Stephane Eranian   perf_events: Fix ...
745
746
747
748
749
750
751
752
753
  	/*
  	 * timestamp shadows the actual context timing but it can
  	 * be safely used in NMI interrupt context. It reflects the
  	 * context time as it was when the event was last scheduled in.
  	 *
  	 * ctx_time already accounts for ctx->timestamp. Therefore to
  	 * compute ctx_time for a sample, simply add perf_clock().
  	 */
  	u64				shadow_ctx_time;
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
754
  	struct perf_event_attr		attr;
c320c7b7d   Arnaldo Carvalho de Melo   perf events: Prec...
755
  	u16				header_size;
6844c09d8   Arnaldo Carvalho de Melo   perf events: Sepa...
756
  	u16				id_header_size;
c320c7b7d   Arnaldo Carvalho de Melo   perf events: Prec...
757
  	u16				read_size;
cdd6c482c   Ingo Molnar   perf: Do the big ...
758
  	struct hw_perf_event		hw;
0793a61d4   Thomas Gleixner   performance count...
759

cdd6c482c   Ingo Molnar   perf: Do the big ...
760
  	struct perf_event_context	*ctx;
9b51f66dc   Ingo Molnar   perfcounters: imp...
761
  	struct file			*filp;
0793a61d4   Thomas Gleixner   performance count...
762
763
  
  	/*
53cfbf593   Paul Mackerras   perf_counter: rec...
764
  	 * These accumulate total time (in nanoseconds) that children
cdd6c482c   Ingo Molnar   perf: Do the big ...
765
  	 * events have been enabled and running, respectively.
53cfbf593   Paul Mackerras   perf_counter: rec...
766
767
768
769
770
  	 */
  	atomic64_t			child_total_time_enabled;
  	atomic64_t			child_total_time_running;
  
  	/*
d859e29fe   Paul Mackerras   perf_counter: Add...
771
  	 * Protect attach/detach and child_list:
0793a61d4   Thomas Gleixner   performance count...
772
  	 */
fccc714b3   Peter Zijlstra   perf_counter: San...
773
774
  	struct mutex			child_mutex;
  	struct list_head		child_list;
cdd6c482c   Ingo Molnar   perf: Do the big ...
775
  	struct perf_event		*parent;
0793a61d4   Thomas Gleixner   performance count...
776
777
778
  
  	int				oncpu;
  	int				cpu;
082ff5a27   Peter Zijlstra   perf_counter: Cha...
779
780
  	struct list_head		owner_entry;
  	struct task_struct		*owner;
7b732a750   Peter Zijlstra   perf_counter: new...
781
782
783
  	/* mmap bits */
  	struct mutex			mmap_mutex;
  	atomic_t			mmap_count;
ac9721f3f   Peter Zijlstra   perf_events: Fix ...
784
785
  	int				mmap_locked;
  	struct user_struct		*mmap_user;
76369139c   Frederic Weisbecker   perf: Split up bu...
786
  	struct ring_buffer		*rb;
10c6db110   Peter Zijlstra   perf: Fix loss of...
787
  	struct list_head		rb_entry;
37d818283   Paul Mackerras   perf_counter: add...
788

7b732a750   Peter Zijlstra   perf_counter: new...
789
  	/* poll related */
0793a61d4   Thomas Gleixner   performance count...
790
  	wait_queue_head_t		waitq;
3c446b3d3   Peter Zijlstra   perf_counter: SIG...
791
  	struct fasync_struct		*fasync;
79f146415   Peter Zijlstra   perf_counter: cou...
792
793
794
  
  	/* delayed work for NMIs and such */
  	int				pending_wakeup;
4c9e25428   Peter Zijlstra   perf_counter: cha...
795
  	int				pending_kill;
79f146415   Peter Zijlstra   perf_counter: cou...
796
  	int				pending_disable;
e360adbe2   Peter Zijlstra   irq_work: Add gen...
797
  	struct irq_work			pending;
592903cdc   Peter Zijlstra   perf_counter: add...
798

79f146415   Peter Zijlstra   perf_counter: cou...
799
  	atomic_t			event_limit;
cdd6c482c   Ingo Molnar   perf: Do the big ...
800
  	void (*destroy)(struct perf_event *);
592903cdc   Peter Zijlstra   perf_counter: add...
801
  	struct rcu_head			rcu_head;
709e50cf8   Peter Zijlstra   perf_counter: Use...
802
803
  
  	struct pid_namespace		*ns;
8e5799b1a   Peter Zijlstra   perf_counter: Add...
804
  	u64				id;
6fb2915df   Li Zefan   tracing/profile: ...
805

b326e9560   Frederic Weisbecker   hw-breakpoints: U...
806
  	perf_overflow_handler_t		overflow_handler;
4dc0da869   Avi Kivity   perf: Add context...
807
  	void				*overflow_handler_context;
453f19eea   Peter Zijlstra   perf: Allow for c...
808

07b139c8c   Li Zefan   perf events: Remo...
809
  #ifdef CONFIG_EVENT_TRACING
1c024eca5   Peter Zijlstra   perf, trace: Opti...
810
  	struct ftrace_event_call	*tp_event;
6fb2915df   Li Zefan   tracing/profile: ...
811
  	struct event_filter		*filter;
ee06094f8   Ingo Molnar   perfcounters: res...
812
  #endif
6fb2915df   Li Zefan   tracing/profile: ...
813

e5d1367f1   Stephane Eranian   perf: Add cgroup ...
814
815
816
817
  #ifdef CONFIG_CGROUP_PERF
  	struct perf_cgroup		*cgrp; /* cgroup event is attach to */
  	int				cgrp_defer_enabled;
  #endif
6fb2915df   Li Zefan   tracing/profile: ...
818
  #endif /* CONFIG_PERF_EVENTS */
0793a61d4   Thomas Gleixner   performance count...
819
  };
b04243ef7   Peter Zijlstra   perf: Complete so...
820
821
822
823
  enum perf_event_context_type {
  	task_context,
  	cpu_context,
  };
0793a61d4   Thomas Gleixner   performance count...
824
  /**
cdd6c482c   Ingo Molnar   perf: Do the big ...
825
   * struct perf_event_context - event context structure
0793a61d4   Thomas Gleixner   performance count...
826
   *
cdd6c482c   Ingo Molnar   perf: Do the big ...
827
   * Used as a container for task events and CPU events as well:
0793a61d4   Thomas Gleixner   performance count...
828
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
829
  struct perf_event_context {
108b02cfc   Peter Zijlstra   perf: Per-pmu-per...
830
  	struct pmu			*pmu;
ee643c417   Richard Kennedy   perf: Reorder & o...
831
  	enum perf_event_context_type	type;
0793a61d4   Thomas Gleixner   performance count...
832
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
833
  	 * Protect the states of the events in the list,
d859e29fe   Paul Mackerras   perf_counter: Add...
834
  	 * nr_active, and the list:
0793a61d4   Thomas Gleixner   performance count...
835
  	 */
e625cce1b   Thomas Gleixner   perf_event: Conve...
836
  	raw_spinlock_t			lock;
d859e29fe   Paul Mackerras   perf_counter: Add...
837
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
838
  	 * Protect the list of events.  Locking either mutex or lock
d859e29fe   Paul Mackerras   perf_counter: Add...
839
840
841
  	 * is sufficient to ensure the list doesn't change; to change
  	 * the list you need to lock both the mutex and the spinlock.
  	 */
a308444ce   Ingo Molnar   perf_counter: Bet...
842
  	struct mutex			mutex;
04289bb98   Ingo Molnar   perf counters: ad...
843

889ff0150   Frederic Weisbecker   perf/core: Split ...
844
845
  	struct list_head		pinned_groups;
  	struct list_head		flexible_groups;
a308444ce   Ingo Molnar   perf_counter: Bet...
846
  	struct list_head		event_list;
cdd6c482c   Ingo Molnar   perf: Do the big ...
847
  	int				nr_events;
a308444ce   Ingo Molnar   perf_counter: Bet...
848
849
  	int				nr_active;
  	int				is_active;
bfbd3381e   Peter Zijlstra   perf_counter: Imp...
850
  	int				nr_stat;
0f5a26012   Peter Zijlstra   perf: Avoid a use...
851
  	int				nr_freq;
dddd3379a   Thomas Gleixner   perf: Fix inherit...
852
  	int				rotate_disable;
a308444ce   Ingo Molnar   perf_counter: Bet...
853
854
  	atomic_t			refcount;
  	struct task_struct		*task;
53cfbf593   Paul Mackerras   perf_counter: rec...
855
856
  
  	/*
4af4998b8   Peter Zijlstra   perf_counter: rew...
857
  	 * Context clock, runs when context enabled.
53cfbf593   Paul Mackerras   perf_counter: rec...
858
  	 */
a308444ce   Ingo Molnar   perf_counter: Bet...
859
860
  	u64				time;
  	u64				timestamp;
564c2b210   Paul Mackerras   perf_counter: Opt...
861
862
863
864
865
  
  	/*
  	 * These fields let us detect when two contexts have both
  	 * been cloned (inherited) from a common ancestor.
  	 */
cdd6c482c   Ingo Molnar   perf: Do the big ...
866
  	struct perf_event_context	*parent_ctx;
a308444ce   Ingo Molnar   perf_counter: Bet...
867
868
869
  	u64				parent_gen;
  	u64				generation;
  	int				pin_count;
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
870
  	int				nr_cgroups; /* cgroup events present */
28009ce4a   Richard Kennedy   perf: Remove 64-b...
871
  	struct rcu_head			rcu_head;
0793a61d4   Thomas Gleixner   performance count...
872
  };
7ae07ea3a   Frederic Weisbecker   perf: Humanize th...
873
874
  /*
   * Number of contexts where an event can trigger:
e7e7ee2ea   Ingo Molnar   perf events: Clea...
875
   *	task, softirq, hardirq, nmi.
7ae07ea3a   Frederic Weisbecker   perf: Humanize th...
876
877
   */
  #define PERF_NR_CONTEXTS	4
0793a61d4   Thomas Gleixner   performance count...
878
  /**
cdd6c482c   Ingo Molnar   perf: Do the big ...
879
   * struct perf_event_cpu_context - per cpu event context structure
0793a61d4   Thomas Gleixner   performance count...
880
881
   */
  struct perf_cpu_context {
cdd6c482c   Ingo Molnar   perf: Do the big ...
882
883
  	struct perf_event_context	ctx;
  	struct perf_event_context	*task_ctx;
0793a61d4   Thomas Gleixner   performance count...
884
  	int				active_oncpu;
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
885
  	int				exclusive;
e9d2b0641   Peter Zijlstra   perf: Undo the pe...
886
887
  	struct list_head		rotation_list;
  	int				jiffies_interval;
516769575   Peter Zijlstra   perf: Fix duplica...
888
  	struct pmu			*active_pmu;
e5d1367f1   Stephane Eranian   perf: Add cgroup ...
889
  	struct perf_cgroup		*cgrp;
0793a61d4   Thomas Gleixner   performance count...
890
  };
5622f295b   Markus Metzger   x86, perf_counter...
891
  struct perf_output_handle {
57c0c15b5   Ingo Molnar   perf: Tidy up aft...
892
  	struct perf_event		*event;
76369139c   Frederic Weisbecker   perf: Split up bu...
893
  	struct ring_buffer		*rb;
6d1acfd5c   Peter Zijlstra   perf: Optimize pe...
894
  	unsigned long			wakeup;
5d967a8be   Peter Zijlstra   perf: Optimize pe...
895
896
897
  	unsigned long			size;
  	void				*addr;
  	int				page;
5622f295b   Markus Metzger   x86, perf_counter...
898
  };
cdd6c482c   Ingo Molnar   perf: Do the big ...
899
  #ifdef CONFIG_PERF_EVENTS
829b42dd3   Robert Richter   perf_counter, x86...
900

2e80a82a4   Peter Zijlstra   perf: Dynamic pmu...
901
  extern int perf_pmu_register(struct pmu *pmu, char *name, int type);
b0a873ebb   Peter Zijlstra   perf: Register PM...
902
  extern void perf_pmu_unregister(struct pmu *pmu);
621a01eac   Ingo Molnar   perf counters: hw...
903

3bf101ba4   Matt Fleming   perf: Add helper ...
904
  extern int perf_num_counters(void);
84c799105   Matt Fleming   perf: New helper ...
905
  extern const char *perf_pmu_name(void);
a8d757ef0   Stephane Eranian   perf events: Fix ...
906
907
908
909
  extern void __perf_event_task_sched_in(struct task_struct *prev,
  				       struct task_struct *task);
  extern void __perf_event_task_sched_out(struct task_struct *prev,
  					struct task_struct *next);
cdd6c482c   Ingo Molnar   perf: Do the big ...
910
911
912
  extern int perf_event_init_task(struct task_struct *child);
  extern void perf_event_exit_task(struct task_struct *child);
  extern void perf_event_free_task(struct task_struct *task);
4e231c796   Peter Zijlstra   perf: Fix up dela...
913
  extern void perf_event_delayed_put(struct task_struct *task);
cdd6c482c   Ingo Molnar   perf: Do the big ...
914
  extern void perf_event_print_debug(void);
33696fc0d   Peter Zijlstra   perf: Per PMU dis...
915
916
  extern void perf_pmu_disable(struct pmu *pmu);
  extern void perf_pmu_enable(struct pmu *pmu);
cdd6c482c   Ingo Molnar   perf: Do the big ...
917
918
  extern int perf_event_task_disable(void);
  extern int perf_event_task_enable(void);
26ca5c11f   Avi Kivity   perf: export perf...
919
  extern int perf_event_refresh(struct perf_event *event, int refresh);
cdd6c482c   Ingo Molnar   perf: Do the big ...
920
  extern void perf_event_update_userpage(struct perf_event *event);
fb0459d75   Arjan van de Ven   perf/core: Provid...
921
922
923
924
  extern int perf_event_release_kernel(struct perf_event *event);
  extern struct perf_event *
  perf_event_create_kernel_counter(struct perf_event_attr *attr,
  				int cpu,
38a81da22   Matt Helsley   perf events: Clea...
925
  				struct task_struct *task,
4dc0da869   Avi Kivity   perf: Add context...
926
927
  				perf_overflow_handler_t callback,
  				void *context);
59ed446f7   Peter Zijlstra   perf: Fix event s...
928
929
  extern u64 perf_event_read_value(struct perf_event *event,
  				 u64 *enabled, u64 *running);
5c92d1241   Ingo Molnar   perf counters: im...
930

df1a132bf   Peter Zijlstra   perf_counter: Int...
931
  struct perf_sample_data {
5622f295b   Markus Metzger   x86, perf_counter...
932
933
934
935
936
937
938
939
  	u64				type;
  
  	u64				ip;
  	struct {
  		u32	pid;
  		u32	tid;
  	}				tid_entry;
  	u64				time;
a308444ce   Ingo Molnar   perf_counter: Bet...
940
  	u64				addr;
5622f295b   Markus Metzger   x86, perf_counter...
941
942
943
944
945
946
  	u64				id;
  	u64				stream_id;
  	struct {
  		u32	cpu;
  		u32	reserved;
  	}				cpu_entry;
a308444ce   Ingo Molnar   perf_counter: Bet...
947
  	u64				period;
5622f295b   Markus Metzger   x86, perf_counter...
948
  	struct perf_callchain_entry	*callchain;
3a43ce68a   Frederic Weisbecker   perf_counter: Fix...
949
  	struct perf_raw_record		*raw;
df1a132bf   Peter Zijlstra   perf_counter: Int...
950
  };
e7e7ee2ea   Ingo Molnar   perf events: Clea...
951
  static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
dc1d628a6   Peter Zijlstra   perf: Provide gen...
952
953
954
955
  {
  	data->addr = addr;
  	data->raw  = NULL;
  }
5622f295b   Markus Metzger   x86, perf_counter...
956
957
958
  extern void perf_output_sample(struct perf_output_handle *handle,
  			       struct perf_event_header *header,
  			       struct perf_sample_data *data,
cdd6c482c   Ingo Molnar   perf: Do the big ...
959
  			       struct perf_event *event);
5622f295b   Markus Metzger   x86, perf_counter...
960
961
  extern void perf_prepare_sample(struct perf_event_header *header,
  				struct perf_sample_data *data,
cdd6c482c   Ingo Molnar   perf: Do the big ...
962
  				struct perf_event *event,
5622f295b   Markus Metzger   x86, perf_counter...
963
  				struct pt_regs *regs);
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
964
  extern int perf_event_overflow(struct perf_event *event,
5622f295b   Markus Metzger   x86, perf_counter...
965
966
  				 struct perf_sample_data *data,
  				 struct pt_regs *regs);
df1a132bf   Peter Zijlstra   perf_counter: Int...
967

6c7e550f1   Franck Bui-Huu   perf: Introduce i...
968
969
970
971
  static inline bool is_sampling_event(struct perf_event *event)
  {
  	return event->attr.sample_period != 0;
  }
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
972
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
973
   * Return 1 for a software event, 0 for a hardware event
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
974
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
975
  static inline int is_software_event(struct perf_event *event)
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
976
  {
89a1e1873   Peter Zijlstra   perf: Provide a s...
977
  	return event->pmu->task_ctx_nr == perf_sw_context;
3b6f9e5cb   Paul Mackerras   perf_counter: Add...
978
  }
d430d3d7e   Jason Baron   jump label: Intro...
979
  extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
f29ac756a   Peter Zijlstra   perf_counter: Opt...
980

a8b0ca17b   Peter Zijlstra   perf: Remove the ...
981
  extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
f29ac756a   Peter Zijlstra   perf_counter: Opt...
982

b0f82b81f   Frederic Weisbecker   perf: Drop the sk...
983
  #ifndef perf_arch_fetch_caller_regs
e7e7ee2ea   Ingo Molnar   perf events: Clea...
984
  static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
b0f82b81f   Frederic Weisbecker   perf: Drop the sk...
985
  #endif
5331d7b84   Frederic Weisbecker   perf: Introduce n...
986
987
988
989
990
991
992
993
994
  
  /*
   * Take a snapshot of the regs. Skip ip and frame pointer to
   * the nth caller. We only need a few of the regs:
   * - ip for PERF_SAMPLE_IP
   * - cs for user_mode() tests
   * - bp for callchains
   * - eflags, for future purposes, just in case
   */
b0f82b81f   Frederic Weisbecker   perf: Drop the sk...
995
  static inline void perf_fetch_caller_regs(struct pt_regs *regs)
5331d7b84   Frederic Weisbecker   perf: Introduce n...
996
  {
5331d7b84   Frederic Weisbecker   perf: Introduce n...
997
  	memset(regs, 0, sizeof(*regs));
b0f82b81f   Frederic Weisbecker   perf: Drop the sk...
998
  	perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
5331d7b84   Frederic Weisbecker   perf: Introduce n...
999
  }
7e54a5a0b   Peter Zijlstra   perf: Optimize sw...
1000
  static __always_inline void
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
1001
  perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
e49a5bd38   Frederic Weisbecker   perf: Use hot reg...
1002
  {
7e54a5a0b   Peter Zijlstra   perf: Optimize sw...
1003
  	struct pt_regs hot_regs;
d430d3d7e   Jason Baron   jump label: Intro...
1004
1005
1006
1007
1008
  	if (static_branch(&perf_swevent_enabled[event_id])) {
  		if (!regs) {
  			perf_fetch_caller_regs(&hot_regs);
  			regs = &hot_regs;
  		}
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
1009
  		__perf_sw_event(event_id, nr, regs, addr);
e49a5bd38   Frederic Weisbecker   perf: Use hot reg...
1010
1011
  	}
  }
b20295207   Gleb Natapov   perf, core: Rate ...
1012
  extern struct jump_label_key_deferred perf_sched_events;
ee6dcfa40   Peter Zijlstra   perf: Fix the sof...
1013

a8d757ef0   Stephane Eranian   perf events: Fix ...
1014
1015
  static inline void perf_event_task_sched_in(struct task_struct *prev,
  					    struct task_struct *task)
ee6dcfa40   Peter Zijlstra   perf: Fix the sof...
1016
  {
b20295207   Gleb Natapov   perf, core: Rate ...
1017
  	if (static_branch(&perf_sched_events.key))
a8d757ef0   Stephane Eranian   perf events: Fix ...
1018
  		__perf_event_task_sched_in(prev, task);
ee6dcfa40   Peter Zijlstra   perf: Fix the sof...
1019
  }
a8d757ef0   Stephane Eranian   perf events: Fix ...
1020
1021
  static inline void perf_event_task_sched_out(struct task_struct *prev,
  					     struct task_struct *next)
ee6dcfa40   Peter Zijlstra   perf: Fix the sof...
1022
  {
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
1023
  	perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
ee6dcfa40   Peter Zijlstra   perf: Fix the sof...
1024

b20295207   Gleb Natapov   perf, core: Rate ...
1025
  	if (static_branch(&perf_sched_events.key))
a8d757ef0   Stephane Eranian   perf events: Fix ...
1026
  		__perf_event_task_sched_out(prev, next);
ee6dcfa40   Peter Zijlstra   perf: Fix the sof...
1027
  }
3af9e8592   Eric B Munson   perf: Add non-exe...
1028
  extern void perf_event_mmap(struct vm_area_struct *vma);
39447b386   Zhang, Yanmin   perf: Enhance per...
1029
  extern struct perf_guest_info_callbacks *perf_guest_cbs;
dcf46b944   Zhang, Yanmin   perf & kvm: Clean...
1030
1031
  extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
  extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
39447b386   Zhang, Yanmin   perf: Enhance per...
1032

cdd6c482c   Ingo Molnar   perf: Do the big ...
1033
1034
  extern void perf_event_comm(struct task_struct *tsk);
  extern void perf_event_fork(struct task_struct *tsk);
8d1b2d936   Peter Zijlstra   perf_counter: tra...
1035

56962b444   Frederic Weisbecker   perf: Generalize ...
1036
1037
  /* Callchains */
  DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
e7e7ee2ea   Ingo Molnar   perf events: Clea...
1038
1039
  extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
  extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
394ee0762   Peter Zijlstra   perf_counter: pro...
1040

e7e7ee2ea   Ingo Molnar   perf events: Clea...
1041
  static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
70791ce9b   Frederic Weisbecker   perf: Generalize ...
1042
1043
1044
1045
  {
  	if (entry->nr < PERF_MAX_STACK_DEPTH)
  		entry->ip[entry->nr++] = ip;
  }
394ee0762   Peter Zijlstra   perf_counter: pro...
1046

cdd6c482c   Ingo Molnar   perf: Do the big ...
1047
1048
1049
  extern int sysctl_perf_event_paranoid;
  extern int sysctl_perf_event_mlock;
  extern int sysctl_perf_event_sample_rate;
1ccd15497   Peter Zijlstra   perf_counter: sys...
1050

163ec4354   Peter Zijlstra   perf: Optimize th...
1051
1052
1053
  extern int perf_proc_update_handler(struct ctl_table *table, int write,
  		void __user *buffer, size_t *lenp,
  		loff_t *ppos);
320ebf09c   Peter Zijlstra   perf, x86: Restri...
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
  static inline bool perf_paranoid_tracepoint_raw(void)
  {
  	return sysctl_perf_event_paranoid > -1;
  }
  
  static inline bool perf_paranoid_cpu(void)
  {
  	return sysctl_perf_event_paranoid > 0;
  }
  
  static inline bool perf_paranoid_kernel(void)
  {
  	return sysctl_perf_event_paranoid > 1;
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
1068
  extern void perf_event_init(void);
1c024eca5   Peter Zijlstra   perf, trace: Opti...
1069
1070
  extern void perf_tp_event(u64 addr, u64 count, void *record,
  			  int entry_size, struct pt_regs *regs,
ecc55f84b   Peter Zijlstra   perf, trace: Inli...
1071
  			  struct hlist_head *head, int rctx);
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
1072
  extern void perf_bp_event(struct perf_event *event, void *data);
0d905bca2   Ingo Molnar   perf_counter: ini...
1073

9d23a90a6   Paul Mackerras   perf_counter: all...
1074
  #ifndef perf_misc_flags
e7e7ee2ea   Ingo Molnar   perf events: Clea...
1075
1076
1077
  # define perf_misc_flags(regs) \
  		(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
  # define perf_instruction_pointer(regs)	instruction_pointer(regs)
9d23a90a6   Paul Mackerras   perf_counter: all...
1078
  #endif
5622f295b   Markus Metzger   x86, perf_counter...
1079
  extern int perf_output_begin(struct perf_output_handle *handle,
a7ac67ea0   Peter Zijlstra   perf: Remove the ...
1080
  			     struct perf_event *event, unsigned int size);
5622f295b   Markus Metzger   x86, perf_counter...
1081
1082
1083
  extern void perf_output_end(struct perf_output_handle *handle);
  extern void perf_output_copy(struct perf_output_handle *handle,
  			     const void *buf, unsigned int len);
4ed7c92d6   Peter Zijlstra   perf_events: Undo...
1084
1085
  extern int perf_swevent_get_recursion_context(void);
  extern void perf_swevent_put_recursion_context(int rctx);
44234adcd   Frederic Weisbecker   hw-breakpoints: M...
1086
1087
  extern void perf_event_enable(struct perf_event *event);
  extern void perf_event_disable(struct perf_event *event);
e9d2b0641   Peter Zijlstra   perf: Undo the pe...
1088
  extern void perf_event_task_tick(void);
0793a61d4   Thomas Gleixner   performance count...
1089
1090
  #else
  static inline void
a8d757ef0   Stephane Eranian   perf events: Fix ...
1091
1092
  perf_event_task_sched_in(struct task_struct *prev,
  			 struct task_struct *task)			{ }
0793a61d4   Thomas Gleixner   performance count...
1093
  static inline void
a8d757ef0   Stephane Eranian   perf events: Fix ...
1094
1095
  perf_event_task_sched_out(struct task_struct *prev,
  			  struct task_struct *next)			{ }
cdd6c482c   Ingo Molnar   perf: Do the big ...
1096
1097
1098
  static inline int perf_event_init_task(struct task_struct *child)	{ return 0; }
  static inline void perf_event_exit_task(struct task_struct *child)	{ }
  static inline void perf_event_free_task(struct task_struct *task)	{ }
4e231c796   Peter Zijlstra   perf: Fix up dela...
1099
  static inline void perf_event_delayed_put(struct task_struct *task)	{ }
57c0c15b5   Ingo Molnar   perf: Tidy up aft...
1100
  static inline void perf_event_print_debug(void)				{ }
57c0c15b5   Ingo Molnar   perf: Tidy up aft...
1101
1102
  static inline int perf_event_task_disable(void)				{ return -EINVAL; }
  static inline int perf_event_task_enable(void)				{ return -EINVAL; }
26ca5c11f   Avi Kivity   perf: export perf...
1103
1104
1105
1106
  static inline int perf_event_refresh(struct perf_event *event, int refresh)
  {
  	return -EINVAL;
  }
15dbf27cc   Peter Zijlstra   perf_counter: sof...
1107

925d519ab   Peter Zijlstra   perf_counter: uni...
1108
  static inline void
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
1109
  perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)	{ }
24f1e32c6   Frederic Weisbecker   hw-breakpoints: R...
1110
  static inline void
184f412c3   Ingo Molnar   perf, x86: Clean ...
1111
  perf_bp_event(struct perf_event *event, void *data)			{ }
0a4a93919   Peter Zijlstra   perf_counter: exe...
1112

39447b386   Zhang, Yanmin   perf: Enhance per...
1113
  static inline int perf_register_guest_info_callbacks
e7e7ee2ea   Ingo Molnar   perf events: Clea...
1114
  (struct perf_guest_info_callbacks *callbacks)				{ return 0; }
39447b386   Zhang, Yanmin   perf: Enhance per...
1115
  static inline int perf_unregister_guest_info_callbacks
e7e7ee2ea   Ingo Molnar   perf events: Clea...
1116
  (struct perf_guest_info_callbacks *callbacks)				{ return 0; }
39447b386   Zhang, Yanmin   perf: Enhance per...
1117

57c0c15b5   Ingo Molnar   perf: Tidy up aft...
1118
  static inline void perf_event_mmap(struct vm_area_struct *vma)		{ }
cdd6c482c   Ingo Molnar   perf: Do the big ...
1119
1120
1121
  static inline void perf_event_comm(struct task_struct *tsk)		{ }
  static inline void perf_event_fork(struct task_struct *tsk)		{ }
  static inline void perf_event_init(void)				{ }
184f412c3   Ingo Molnar   perf, x86: Clean ...
1122
  static inline int  perf_swevent_get_recursion_context(void)		{ return -1; }
4ed7c92d6   Peter Zijlstra   perf_events: Undo...
1123
  static inline void perf_swevent_put_recursion_context(int rctx)		{ }
44234adcd   Frederic Weisbecker   hw-breakpoints: M...
1124
1125
  static inline void perf_event_enable(struct perf_event *event)		{ }
  static inline void perf_event_disable(struct perf_event *event)		{ }
e9d2b0641   Peter Zijlstra   perf: Undo the pe...
1126
  static inline void perf_event_task_tick(void)				{ }
0793a61d4   Thomas Gleixner   performance count...
1127
  #endif
e7e7ee2ea   Ingo Molnar   perf events: Clea...
1128
  #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
5622f295b   Markus Metzger   x86, perf_counter...
1129

3f6da3905   Peter Zijlstra   perf: Rework and ...
1130
1131
1132
  /*
   * This has to have a higher priority than migration_notifier in sched.c.
   */
e7e7ee2ea   Ingo Molnar   perf events: Clea...
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
  #define perf_cpu_notifier(fn)						\
  do {									\
  	static struct notifier_block fn##_nb __cpuinitdata =		\
  		{ .notifier_call = fn, .priority = CPU_PRI_PERF };	\
  	fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,			\
  		(void *)(unsigned long)smp_processor_id());		\
  	fn(&fn##_nb, (unsigned long)CPU_STARTING,			\
  		(void *)(unsigned long)smp_processor_id());		\
  	fn(&fn##_nb, (unsigned long)CPU_ONLINE,				\
  		(void *)(unsigned long)smp_processor_id());		\
  	register_cpu_notifier(&fn##_nb);				\
3f6da3905   Peter Zijlstra   perf: Rework and ...
1144
  } while (0)
f3dfd2656   Paul Mackerras   perfcounters: fix...
1145
  #endif /* __KERNEL__ */
cdd6c482c   Ingo Molnar   perf: Do the big ...
1146
  #endif /* _LINUX_PERF_EVENT_H */