Blame view

arch/x86/kernel/cpu/perf_event.c 44.8 KB
241771ef0   Ingo Molnar   performance count...
1
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
2
   * Performance events x86 architecture code
241771ef0   Ingo Molnar   performance count...
3
   *
981445114   Ingo Molnar   perf_counter: add...
4
5
6
7
8
   *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
   *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
   *  Copyright (C) 2009 Jaswinder Singh Rajput
   *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
   *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
30dd568c9   Markus Metzger   x86, perf_counter...
9
   *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
1da53e023   Stephane Eranian   perf_events, x86:...
10
   *  Copyright (C) 2009 Google, Inc., Stephane Eranian
241771ef0   Ingo Molnar   performance count...
11
12
13
   *
   *  For licencing details see kernel-base/COPYING
   */
cdd6c482c   Ingo Molnar   perf: Do the big ...
14
  #include <linux/perf_event.h>
241771ef0   Ingo Molnar   performance count...
15
16
17
18
  #include <linux/capability.h>
  #include <linux/notifier.h>
  #include <linux/hardirq.h>
  #include <linux/kprobes.h>
4ac13294e   Thomas Gleixner   perf counters: pr...
19
  #include <linux/module.h>
241771ef0   Ingo Molnar   performance count...
20
21
  #include <linux/kdebug.h>
  #include <linux/sched.h>
d7d59fb32   Peter Zijlstra   perf_counter: x86...
22
  #include <linux/uaccess.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
23
  #include <linux/slab.h>
74193ef0e   Peter Zijlstra   perf_counter: x86...
24
  #include <linux/highmem.h>
30dd568c9   Markus Metzger   x86, perf_counter...
25
  #include <linux/cpu.h>
272d30be6   Peter Zijlstra   perf_event: x86: ...
26
  #include <linux/bitops.h>
241771ef0   Ingo Molnar   performance count...
27

241771ef0   Ingo Molnar   performance count...
28
  #include <asm/apic.h>
d7d59fb32   Peter Zijlstra   perf_counter: x86...
29
  #include <asm/stacktrace.h>
4e935e471   Peter Zijlstra   perf_counter: pmc...
30
  #include <asm/nmi.h>
257ef9d21   Torok Edwin   perf, x86: Fix ca...
31
  #include <asm/compat.h>
690926242   Lin Ming   perf: Avoid the p...
32
  #include <asm/smp.h>
c8e5910ed   Robert Richter   perf, x86: Use AL...
33
  #include <asm/alternative.h>
241771ef0   Ingo Molnar   performance count...
34

7645a24cb   Peter Zijlstra   perf, x86: Remove...
35
36
37
38
39
40
41
42
43
44
45
  #if 0
  #undef wrmsrl
  #define wrmsrl(msr, val) 					\
  do {								\
  	trace_printk("wrmsrl(%lx, %lx)
  ", (unsigned long)(msr),\
  			(unsigned long)(val));			\
  	native_write_msr((msr), (u32)((u64)(val)), 		\
  			(u32)((u64)(val) >> 32));		\
  } while (0)
  #endif
ef21f683a   Peter Zijlstra   perf, x86: use LB...
46
  /*
efc9f05df   Stephane Eranian   perf_events: Upda...
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
   *          |   NHM/WSM    |      SNB     |
   * register -------------------------------
   *          |  HT  | no HT |  HT  | no HT |
   *-----------------------------------------
   * offcore  | core | core  | cpu  | core  |
   * lbr_sel  | core | core  | cpu  | core  |
   * ld_lat   | cpu  | core  | cpu  | core  |
   *-----------------------------------------
   *
   * Given that there is a small number of shared regs,
   * we can pre-allocate their slot in the per-cpu
   * per-core reg tables.
   */
  enum extra_reg_type {
  	EXTRA_REG_NONE  = -1,	/* not used */
  
  	EXTRA_REG_RSP_0 = 0,	/* offcore_response_0 */
  	EXTRA_REG_RSP_1 = 1,	/* offcore_response_1 */
  
  	EXTRA_REG_MAX		/* number of entries needed */
  };
  
  /*
ef21f683a   Peter Zijlstra   perf, x86: use LB...
70
71
72
73
74
75
   * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
   */
  static unsigned long
  copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
  {
  	unsigned long offset, addr = (unsigned long)from;
ef21f683a   Peter Zijlstra   perf, x86: use LB...
76
77
78
79
80
81
82
83
84
85
86
87
  	unsigned long size, len = 0;
  	struct page *page;
  	void *map;
  	int ret;
  
  	do {
  		ret = __get_user_pages_fast(addr, 1, 0, &page);
  		if (!ret)
  			break;
  
  		offset = addr & (PAGE_SIZE - 1);
  		size = min(PAGE_SIZE - offset, n - len);
7a837d1bb   Peter Zijlstra   perf, x86: Fix up...
88
  		map = kmap_atomic(page);
ef21f683a   Peter Zijlstra   perf, x86: use LB...
89
  		memcpy(to, map+offset, size);
7a837d1bb   Peter Zijlstra   perf, x86: Fix up...
90
  		kunmap_atomic(map);
ef21f683a   Peter Zijlstra   perf, x86: use LB...
91
92
93
94
95
96
97
98
99
100
  		put_page(page);
  
  		len  += size;
  		to   += size;
  		addr += size;
  
  	} while (len < n);
  
  	return len;
  }
1da53e023   Stephane Eranian   perf_events, x86:...
101
  struct event_constraint {
c91e0f5da   Peter Zijlstra   perf_event: x86: ...
102
103
  	union {
  		unsigned long	idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
b622d644c   Peter Zijlstra   perf_events, x86:...
104
  		u64		idxmsk64;
c91e0f5da   Peter Zijlstra   perf_event: x86: ...
105
  	};
b622d644c   Peter Zijlstra   perf_events, x86:...
106
107
  	u64	code;
  	u64	cmask;
272d30be6   Peter Zijlstra   perf_event: x86: ...
108
  	int	weight;
1da53e023   Stephane Eranian   perf_events, x86:...
109
  };
38331f62c   Stephane Eranian   perf_events, x86:...
110
111
112
113
114
115
  struct amd_nb {
  	int nb_id;  /* NorthBridge id */
  	int refcnt; /* reference count */
  	struct perf_event *owners[X86_PMC_IDX_MAX];
  	struct event_constraint event_constraints[X86_PMC_IDX_MAX];
  };
a7e3ed1e4   Andi Kleen   perf: Add support...
116
  struct intel_percore;
caff2beff   Peter Zijlstra   perf, x86: Implem...
117
  #define MAX_LBR_ENTRIES		16
cdd6c482c   Ingo Molnar   perf: Do the big ...
118
  struct cpu_hw_events {
ca037701a   Peter Zijlstra   perf, x86: Add PE...
119
120
121
  	/*
  	 * Generic x86 PMC bits
  	 */
1da53e023   Stephane Eranian   perf_events, x86:...
122
  	struct perf_event	*events[X86_PMC_IDX_MAX]; /* in counter order */
43f6201a2   Robert Richter   perf_counter, x86...
123
  	unsigned long		active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
63e6be6d9   Robert Richter   perf, x86: Catch ...
124
  	unsigned long		running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
b0f3f28e0   Peter Zijlstra   perfcounters: IRQ...
125
  	int			enabled;
241771ef0   Ingo Molnar   performance count...
126

1da53e023   Stephane Eranian   perf_events, x86:...
127
128
  	int			n_events;
  	int			n_added;
90151c35b   Stephane Eranian   perf_events: Fix ...
129
  	int			n_txn;
1da53e023   Stephane Eranian   perf_events, x86:...
130
  	int			assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
447a194b3   Stephane Eranian   perf_events, x86:...
131
  	u64			tags[X86_PMC_IDX_MAX];
1da53e023   Stephane Eranian   perf_events, x86:...
132
  	struct perf_event	*event_list[X86_PMC_IDX_MAX]; /* in enabled order */
ca037701a   Peter Zijlstra   perf, x86: Add PE...
133

4d1c52b02   Lin Ming   perf, x86: implem...
134
  	unsigned int		group_flag;
ca037701a   Peter Zijlstra   perf, x86: Add PE...
135
136
137
138
139
140
141
  	/*
  	 * Intel DebugStore bits
  	 */
  	struct debug_store	*ds;
  	u64			pebs_enabled;
  
  	/*
caff2beff   Peter Zijlstra   perf, x86: Implem...
142
143
144
145
146
147
148
149
  	 * Intel LBR bits
  	 */
  	int				lbr_users;
  	void				*lbr_context;
  	struct perf_branch_stack	lbr_stack;
  	struct perf_branch_entry	lbr_entries[MAX_LBR_ENTRIES];
  
  	/*
efc9f05df   Stephane Eranian   perf_events: Upda...
150
151
  	 * manage shared (per-core, per-cpu) registers
  	 * used on Intel NHM/WSM/SNB
a7e3ed1e4   Andi Kleen   perf: Add support...
152
  	 */
efc9f05df   Stephane Eranian   perf_events: Upda...
153
  	struct intel_shared_regs	*shared_regs;
a7e3ed1e4   Andi Kleen   perf: Add support...
154
155
  
  	/*
ca037701a   Peter Zijlstra   perf, x86: Add PE...
156
157
  	 * AMD specific bits
  	 */
38331f62c   Stephane Eranian   perf_events, x86:...
158
  	struct amd_nb		*amd_nb;
b690081d4   Stephane Eranian   perf_events: Add ...
159
  };
fce877e3a   Peter Zijlstra   bitops: Ensure th...
160
  #define __EVENT_CONSTRAINT(c, n, m, w) {\
b622d644c   Peter Zijlstra   perf_events, x86:...
161
  	{ .idxmsk64 = (n) },		\
c91e0f5da   Peter Zijlstra   perf_event: x86: ...
162
163
  	.code = (c),			\
  	.cmask = (m),			\
fce877e3a   Peter Zijlstra   bitops: Ensure th...
164
  	.weight = (w),			\
c91e0f5da   Peter Zijlstra   perf_event: x86: ...
165
  }
b690081d4   Stephane Eranian   perf_events: Add ...
166

fce877e3a   Peter Zijlstra   bitops: Ensure th...
167
168
  #define EVENT_CONSTRAINT(c, n, m)	\
  	__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
ca037701a   Peter Zijlstra   perf, x86: Add PE...
169
170
171
  /*
   * Constraint on the Event code.
   */
ed8777fc1   Peter Zijlstra   perf_events, x86:...
172
  #define INTEL_EVENT_CONSTRAINT(c, n)	\
a098f4484   Robert Richter   perf, x86: implem...
173
  	EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
8433be118   Peter Zijlstra   perf_event: x86: ...
174

ca037701a   Peter Zijlstra   perf, x86: Add PE...
175
176
  /*
   * Constraint on the Event code + UMask + fixed-mask
a098f4484   Robert Richter   perf, x86: implem...
177
178
179
180
181
182
183
184
   *
   * filter mask to validate fixed counter events.
   * the following filters disqualify for fixed counters:
   *  - inv
   *  - edge
   *  - cnt-mask
   *  The other filters are supported by fixed counters.
   *  The any-thread option is supported starting with v3.
ca037701a   Peter Zijlstra   perf, x86: Add PE...
185
   */
ed8777fc1   Peter Zijlstra   perf_events, x86:...
186
  #define FIXED_EVENT_CONSTRAINT(c, n)	\
a098f4484   Robert Richter   perf, x86: implem...
187
  	EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
8433be118   Peter Zijlstra   perf_event: x86: ...
188

ca037701a   Peter Zijlstra   perf, x86: Add PE...
189
190
191
  /*
   * Constraint on the Event code + UMask
   */
b06b3d496   Lin Ming   perf, x86: Add In...
192
  #define INTEL_UEVENT_CONSTRAINT(c, n)	\
ca037701a   Peter Zijlstra   perf, x86: Add PE...
193
  	EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
ed8777fc1   Peter Zijlstra   perf_events, x86:...
194
195
196
197
  #define EVENT_CONSTRAINT_END		\
  	EVENT_CONSTRAINT(0, 0, 0)
  
  #define for_each_event_constraint(e, c)	\
a1f2b70a9   Robert Richter   perf, x86: Use we...
198
  	for ((e) = (c); (e)->weight; (e)++)
b690081d4   Stephane Eranian   perf_events: Add ...
199

a7e3ed1e4   Andi Kleen   perf: Add support...
200
  /*
efc9f05df   Stephane Eranian   perf_events: Upda...
201
202
203
204
205
206
207
208
209
210
   * Per register state.
   */
  struct er_account {
  	raw_spinlock_t		lock;	/* per-core: protect structure */
  	u64			config;	/* extra MSR config */
  	u64			reg;	/* extra MSR number */
  	atomic_t		ref;	/* reference count */
  };
  
  /*
a7e3ed1e4   Andi Kleen   perf: Add support...
211
   * Extra registers for specific events.
efc9f05df   Stephane Eranian   perf_events: Upda...
212
   *
a7e3ed1e4   Andi Kleen   perf: Add support...
213
   * Some events need large masks and require external MSRs.
efc9f05df   Stephane Eranian   perf_events: Upda...
214
215
216
217
218
   * Those extra MSRs end up being shared for all events on
   * a PMU and sometimes between PMU of sibling HT threads.
   * In either case, the kernel needs to handle conflicting
   * accesses to those extra, shared, regs. The data structure
   * to manage those registers is stored in cpu_hw_event.
a7e3ed1e4   Andi Kleen   perf: Add support...
219
220
221
222
223
224
   */
  struct extra_reg {
  	unsigned int		event;
  	unsigned int		msr;
  	u64			config_mask;
  	u64			valid_mask;
efc9f05df   Stephane Eranian   perf_events: Upda...
225
  	int			idx;  /* per_xxx->regs[] reg index */
a7e3ed1e4   Andi Kleen   perf: Add support...
226
  };
efc9f05df   Stephane Eranian   perf_events: Upda...
227
  #define EVENT_EXTRA_REG(e, ms, m, vm, i) {	\
a7e3ed1e4   Andi Kleen   perf: Add support...
228
229
230
231
  	.event = (e),		\
  	.msr = (ms),		\
  	.config_mask = (m),	\
  	.valid_mask = (vm),	\
efc9f05df   Stephane Eranian   perf_events: Upda...
232
  	.idx = EXTRA_REG_##i	\
a7e3ed1e4   Andi Kleen   perf: Add support...
233
  	}
efc9f05df   Stephane Eranian   perf_events: Upda...
234
235
236
237
238
  
  #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)	\
  	EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
  
  #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
a7e3ed1e4   Andi Kleen   perf: Add support...
239

8db909a7e   Peter Zijlstra   perf, x86: Clean ...
240
241
242
243
244
245
246
247
248
249
  union perf_capabilities {
  	struct {
  		u64	lbr_format    : 6;
  		u64	pebs_trap     : 1;
  		u64	pebs_arch_reg : 1;
  		u64	pebs_format   : 4;
  		u64	smm_freeze    : 1;
  	};
  	u64	capabilities;
  };
241771ef0   Ingo Molnar   performance count...
250
  /*
5f4ec28ff   Robert Richter   perf_counter, x86...
251
   * struct x86_pmu - generic x86 pmu
241771ef0   Ingo Molnar   performance count...
252
   */
5f4ec28ff   Robert Richter   perf_counter, x86...
253
  struct x86_pmu {
ca037701a   Peter Zijlstra   perf, x86: Add PE...
254
255
256
  	/*
  	 * Generic x86 PMC bits
  	 */
faa28ae01   Robert Richter   perf_counter, x86...
257
258
  	const char	*name;
  	int		version;
a32881066   Yong Wang   perf_counter/x86:...
259
  	int		(*handle_irq)(struct pt_regs *);
9e35ad388   Peter Zijlstra   perf_counter: Rew...
260
  	void		(*disable_all)(void);
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
261
  	void		(*enable_all)(int added);
aff3d91a9   Peter Zijlstra   perf, x86: Change...
262
263
  	void		(*enable)(struct perf_event *);
  	void		(*disable)(struct perf_event *);
b4cdc5c26   Peter Zijlstra   perf, x86: Fix up...
264
  	int		(*hw_config)(struct perf_event *event);
a072738e0   Cyrill Gorcunov   perf, x86: Implem...
265
  	int		(*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
169e41eb7   Jaswinder Singh Rajput   x86: decent decla...
266
267
  	unsigned	eventsel;
  	unsigned	perfctr;
b0f3f28e0   Peter Zijlstra   perfcounters: IRQ...
268
  	u64		(*event_map)(int);
169e41eb7   Jaswinder Singh Rajput   x86: decent decla...
269
  	int		max_events;
948b1bb89   Robert Richter   perf, x86: Undo s...
270
271
272
273
  	int		num_counters;
  	int		num_counters_fixed;
  	int		cntval_bits;
  	u64		cntval_mask;
04da8a43d   Ingo Molnar   perf_counter, x86...
274
  	int		apic;
c619b8ffb   Robert Richter   perf_counter, x86...
275
  	u64		max_period;
63b146490   Peter Zijlstra   perf_event: x86: ...
276
277
278
  	struct event_constraint *
  			(*get_event_constraints)(struct cpu_hw_events *cpuc,
  						 struct perf_event *event);
c91e0f5da   Peter Zijlstra   perf_event: x86: ...
279
280
  	void		(*put_event_constraints)(struct cpu_hw_events *cpuc,
  						 struct perf_event *event);
63b146490   Peter Zijlstra   perf_event: x86: ...
281
  	struct event_constraint *event_constraints;
3c44780b2   Peter Zijlstra   perf, x86: Disabl...
282
  	void		(*quirks)(void);
68aa00ac0   Cyrill Gorcunov   perf, x86: Make a...
283
  	int		perfctr_second_write;
3f6da3905   Peter Zijlstra   perf: Rework and ...
284

b38b24ead   Peter Zijlstra   perf, x86: Fix AM...
285
  	int		(*cpu_prepare)(int cpu);
3f6da3905   Peter Zijlstra   perf: Rework and ...
286
287
288
  	void		(*cpu_starting)(int cpu);
  	void		(*cpu_dying)(int cpu);
  	void		(*cpu_dead)(int cpu);
ca037701a   Peter Zijlstra   perf, x86: Add PE...
289
290
291
292
  
  	/*
  	 * Intel Arch Perfmon v2+
  	 */
8db909a7e   Peter Zijlstra   perf, x86: Clean ...
293
294
  	u64			intel_ctrl;
  	union perf_capabilities intel_cap;
ca037701a   Peter Zijlstra   perf, x86: Add PE...
295
296
297
298
299
  
  	/*
  	 * Intel DebugStore bits
  	 */
  	int		bts, pebs;
6809b6ea7   Peter Zijlstra   perf, x86: Less d...
300
  	int		bts_active, pebs_active;
ca037701a   Peter Zijlstra   perf, x86: Add PE...
301
302
303
  	int		pebs_record_size;
  	void		(*drain_pebs)(struct pt_regs *regs);
  	struct event_constraint *pebs_constraints;
caff2beff   Peter Zijlstra   perf, x86: Implem...
304
305
306
307
308
309
  
  	/*
  	 * Intel LBR
  	 */
  	unsigned long	lbr_tos, lbr_from, lbr_to; /* MSR base regs       */
  	int		lbr_nr;			   /* hardware stack size */
a7e3ed1e4   Andi Kleen   perf: Add support...
310
311
312
313
314
  
  	/*
  	 * Extra registers for events
  	 */
  	struct extra_reg *extra_regs;
b79e8941f   Peter Zijlstra   perf, intel: Try ...
315
  	unsigned int er_flags;
b56a3802d   Jaswinder Singh Rajput   x86: prepare perf...
316
  };
b79e8941f   Peter Zijlstra   perf, intel: Try ...
317
318
  #define ERF_NO_HT_SHARING	1
  #define ERF_HAS_RSP_1		2
4a06bd850   Robert Richter   perf_counter, x86...
319
  static struct x86_pmu x86_pmu __read_mostly;
b56a3802d   Jaswinder Singh Rajput   x86: prepare perf...
320

cdd6c482c   Ingo Molnar   perf: Do the big ...
321
  static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
b0f3f28e0   Peter Zijlstra   perfcounters: IRQ...
322
323
  	.enabled = 1,
  };
241771ef0   Ingo Molnar   performance count...
324

07088edb8   Peter Zijlstra   perf, x86: Remove...
325
  static int x86_perf_event_set_period(struct perf_event *event);
b690081d4   Stephane Eranian   perf_events: Add ...
326

b56a3802d   Jaswinder Singh Rajput   x86: prepare perf...
327
  /*
dfc65094d   Ingo Molnar   perf_counter: Ren...
328
   * Generalized hw caching related hw_event table, filled
8326f44da   Ingo Molnar   perf_counter: Imp...
329
   * in on a per model basis. A value of 0 means
dfc65094d   Ingo Molnar   perf_counter: Ren...
330
331
   * 'not supported', -1 means 'hw_event makes no sense on
   * this CPU', any other value means the raw hw_event
8326f44da   Ingo Molnar   perf_counter: Imp...
332
333
334
335
336
337
338
339
340
   * ID.
   */
  
  #define C(x) PERF_COUNT_HW_CACHE_##x
  
  static u64 __read_mostly hw_cache_event_ids
  				[PERF_COUNT_HW_CACHE_MAX]
  				[PERF_COUNT_HW_CACHE_OP_MAX]
  				[PERF_COUNT_HW_CACHE_RESULT_MAX];
e994d7d23   Andi Kleen   perf: Fix LLC-* e...
341
342
343
344
  static u64 __read_mostly hw_cache_extra_regs
  				[PERF_COUNT_HW_CACHE_MAX]
  				[PERF_COUNT_HW_CACHE_OP_MAX]
  				[PERF_COUNT_HW_CACHE_RESULT_MAX];
8326f44da   Ingo Molnar   perf_counter: Imp...
345

f87ad35d3   Jaswinder Singh Rajput   x86: AMD Support ...
346
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
347
348
   * Propagate event elapsed time into the generic event.
   * Can only be executed on the CPU where the event is active.
ee06094f8   Ingo Molnar   perfcounters: res...
349
350
   * Returns the delta events processed.
   */
4b7bfd0d2   Robert Richter   perf_counter, x86...
351
  static u64
cc2ad4ba8   Peter Zijlstra   perf, x86: Remove...
352
  x86_perf_event_update(struct perf_event *event)
ee06094f8   Ingo Molnar   perfcounters: res...
353
  {
cc2ad4ba8   Peter Zijlstra   perf, x86: Remove...
354
  	struct hw_perf_event *hwc = &event->hw;
948b1bb89   Robert Richter   perf, x86: Undo s...
355
  	int shift = 64 - x86_pmu.cntval_bits;
ec3232bdf   Peter Zijlstra   perf_counter: x86...
356
  	u64 prev_raw_count, new_raw_count;
cc2ad4ba8   Peter Zijlstra   perf, x86: Remove...
357
  	int idx = hwc->idx;
ec3232bdf   Peter Zijlstra   perf_counter: x86...
358
  	s64 delta;
ee06094f8   Ingo Molnar   perfcounters: res...
359

30dd568c9   Markus Metzger   x86, perf_counter...
360
361
  	if (idx == X86_PMC_IDX_FIXED_BTS)
  		return 0;
ee06094f8   Ingo Molnar   perfcounters: res...
362
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
363
  	 * Careful: an NMI might modify the previous event value.
ee06094f8   Ingo Molnar   perfcounters: res...
364
365
366
  	 *
  	 * Our tactic to handle this is to first atomically read and
  	 * exchange a new raw count - then add that new-prev delta
cdd6c482c   Ingo Molnar   perf: Do the big ...
367
  	 * count to the generic event atomically:
ee06094f8   Ingo Molnar   perfcounters: res...
368
369
  	 */
  again:
e78505958   Peter Zijlstra   perf: Convert per...
370
  	prev_raw_count = local64_read(&hwc->prev_count);
73d6e5220   Robert Richter   perf, x86: Store ...
371
  	rdmsrl(hwc->event_base, new_raw_count);
ee06094f8   Ingo Molnar   perfcounters: res...
372

e78505958   Peter Zijlstra   perf: Convert per...
373
  	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
ee06094f8   Ingo Molnar   perfcounters: res...
374
375
376
377
378
379
  					new_raw_count) != prev_raw_count)
  		goto again;
  
  	/*
  	 * Now we have the new raw value and have updated the prev
  	 * timestamp already. We can now calculate the elapsed delta
cdd6c482c   Ingo Molnar   perf: Do the big ...
380
  	 * (event-)time and add that to the generic event.
ee06094f8   Ingo Molnar   perfcounters: res...
381
382
  	 *
  	 * Careful, not all hw sign-extends above the physical width
ec3232bdf   Peter Zijlstra   perf_counter: x86...
383
  	 * of the count.
ee06094f8   Ingo Molnar   perfcounters: res...
384
  	 */
ec3232bdf   Peter Zijlstra   perf_counter: x86...
385
386
  	delta = (new_raw_count << shift) - (prev_raw_count << shift);
  	delta >>= shift;
ee06094f8   Ingo Molnar   perfcounters: res...
387

e78505958   Peter Zijlstra   perf: Convert per...
388
389
  	local64_add(delta, &event->count);
  	local64_sub(delta, &hwc->period_left);
4b7bfd0d2   Robert Richter   perf_counter, x86...
390
391
  
  	return new_raw_count;
ee06094f8   Ingo Molnar   perfcounters: res...
392
  }
4979d2729   Robert Richter   perf, x86: Add su...
393
394
  static inline int x86_pmu_addr_offset(int index)
  {
c8e5910ed   Robert Richter   perf, x86: Use AL...
395
396
397
398
399
400
401
402
403
404
  	int offset;
  
  	/* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
  	alternative_io(ASM_NOP2,
  		       "shll $1, %%eax",
  		       X86_FEATURE_PERFCTR_CORE,
  		       "=a" (offset),
  		       "a"  (index));
  
  	return offset;
4979d2729   Robert Richter   perf, x86: Add su...
405
  }
41bf49894   Robert Richter   perf, x86: Calcul...
406
407
  static inline unsigned int x86_pmu_config_addr(int index)
  {
4979d2729   Robert Richter   perf, x86: Add su...
408
  	return x86_pmu.eventsel + x86_pmu_addr_offset(index);
41bf49894   Robert Richter   perf, x86: Calcul...
409
410
411
412
  }
  
  static inline unsigned int x86_pmu_event_addr(int index)
  {
4979d2729   Robert Richter   perf, x86: Add su...
413
  	return x86_pmu.perfctr + x86_pmu_addr_offset(index);
41bf49894   Robert Richter   perf, x86: Calcul...
414
  }
a7e3ed1e4   Andi Kleen   perf: Add support...
415
416
417
418
419
  /*
   * Find and validate any extra registers to set up.
   */
  static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
  {
efc9f05df   Stephane Eranian   perf_events: Upda...
420
  	struct hw_perf_event_extra *reg;
a7e3ed1e4   Andi Kleen   perf: Add support...
421
  	struct extra_reg *er;
efc9f05df   Stephane Eranian   perf_events: Upda...
422
  	reg = &event->hw.extra_reg;
a7e3ed1e4   Andi Kleen   perf: Add support...
423
424
425
426
427
428
429
430
431
  
  	if (!x86_pmu.extra_regs)
  		return 0;
  
  	for (er = x86_pmu.extra_regs; er->msr; er++) {
  		if (er->event != (config & er->config_mask))
  			continue;
  		if (event->attr.config1 & ~er->valid_mask)
  			return -EINVAL;
efc9f05df   Stephane Eranian   perf_events: Upda...
432
433
434
435
  
  		reg->idx = er->idx;
  		reg->config = event->attr.config1;
  		reg->reg = er->msr;
a7e3ed1e4   Andi Kleen   perf: Add support...
436
437
438
439
  		break;
  	}
  	return 0;
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
440
  static atomic_t active_events;
4e935e471   Peter Zijlstra   perf_counter: pmc...
441
  static DEFINE_MUTEX(pmc_reserve_mutex);
b27ea29c6   Robert Richter   perf/core, x86: R...
442
  #ifdef CONFIG_X86_LOCAL_APIC
4e935e471   Peter Zijlstra   perf_counter: pmc...
443
444
445
  static bool reserve_pmc_hardware(void)
  {
  	int i;
948b1bb89   Robert Richter   perf, x86: Undo s...
446
  	for (i = 0; i < x86_pmu.num_counters; i++) {
41bf49894   Robert Richter   perf, x86: Calcul...
447
  		if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
4e935e471   Peter Zijlstra   perf_counter: pmc...
448
449
  			goto perfctr_fail;
  	}
948b1bb89   Robert Richter   perf, x86: Undo s...
450
  	for (i = 0; i < x86_pmu.num_counters; i++) {
41bf49894   Robert Richter   perf, x86: Calcul...
451
  		if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
4e935e471   Peter Zijlstra   perf_counter: pmc...
452
453
454
455
456
457
458
  			goto eventsel_fail;
  	}
  
  	return true;
  
  eventsel_fail:
  	for (i--; i >= 0; i--)
41bf49894   Robert Richter   perf, x86: Calcul...
459
  		release_evntsel_nmi(x86_pmu_config_addr(i));
4e935e471   Peter Zijlstra   perf_counter: pmc...
460

948b1bb89   Robert Richter   perf, x86: Undo s...
461
  	i = x86_pmu.num_counters;
4e935e471   Peter Zijlstra   perf_counter: pmc...
462
463
464
  
  perfctr_fail:
  	for (i--; i >= 0; i--)
41bf49894   Robert Richter   perf, x86: Calcul...
465
  		release_perfctr_nmi(x86_pmu_event_addr(i));
4e935e471   Peter Zijlstra   perf_counter: pmc...
466

4e935e471   Peter Zijlstra   perf_counter: pmc...
467
468
469
470
471
472
  	return false;
  }
  
  static void release_pmc_hardware(void)
  {
  	int i;
948b1bb89   Robert Richter   perf, x86: Undo s...
473
  	for (i = 0; i < x86_pmu.num_counters; i++) {
41bf49894   Robert Richter   perf, x86: Calcul...
474
475
  		release_perfctr_nmi(x86_pmu_event_addr(i));
  		release_evntsel_nmi(x86_pmu_config_addr(i));
4e935e471   Peter Zijlstra   perf_counter: pmc...
476
  	}
4e935e471   Peter Zijlstra   perf_counter: pmc...
477
  }
b27ea29c6   Robert Richter   perf/core, x86: R...
478
479
480
481
482
483
  #else
  
  static bool reserve_pmc_hardware(void) { return true; }
  static void release_pmc_hardware(void) {}
  
  #endif
33c6d6a7a   Don Zickus   x86, perf, nmi: D...
484
485
486
  static bool check_hw_exists(void)
  {
  	u64 val, val_new = 0;
4407204c5   Peter Zijlstra   perf, x86: Detect...
487
  	int i, reg, ret = 0;
33c6d6a7a   Don Zickus   x86, perf, nmi: D...
488

4407204c5   Peter Zijlstra   perf, x86: Detect...
489
490
491
492
493
  	/*
  	 * Check to see if the BIOS enabled any of the counters, if so
  	 * complain and bail.
  	 */
  	for (i = 0; i < x86_pmu.num_counters; i++) {
41bf49894   Robert Richter   perf, x86: Calcul...
494
  		reg = x86_pmu_config_addr(i);
4407204c5   Peter Zijlstra   perf, x86: Detect...
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
  		ret = rdmsrl_safe(reg, &val);
  		if (ret)
  			goto msr_fail;
  		if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
  			goto bios_fail;
  	}
  
  	if (x86_pmu.num_counters_fixed) {
  		reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
  		ret = rdmsrl_safe(reg, &val);
  		if (ret)
  			goto msr_fail;
  		for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
  			if (val & (0x03 << i*4))
  				goto bios_fail;
  		}
  	}
  
  	/*
  	 * Now write a value and read it back to see if it matches,
  	 * this is needed to detect certain hardware emulators (qemu/kvm)
  	 * that don't trap on the MSR access and always return 0s.
  	 */
33c6d6a7a   Don Zickus   x86, perf, nmi: D...
518
  	val = 0xabcdUL;
41bf49894   Robert Richter   perf, x86: Calcul...
519
520
  	ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
  	ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
33c6d6a7a   Don Zickus   x86, perf, nmi: D...
521
  	if (ret || val != val_new)
4407204c5   Peter Zijlstra   perf, x86: Detect...
522
  		goto msr_fail;
33c6d6a7a   Don Zickus   x86, perf, nmi: D...
523
524
  
  	return true;
4407204c5   Peter Zijlstra   perf, x86: Detect...
525
526
  
  bios_fail:
45daae575   Ingo Molnar   perf, x86: Compla...
527
528
529
530
531
  	/*
  	 * We still allow the PMU driver to operate:
  	 */
  	printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.
  ");
4407204c5   Peter Zijlstra   perf, x86: Detect...
532
533
  	printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)
  ", reg, val);
45daae575   Ingo Molnar   perf, x86: Compla...
534
535
  
  	return true;
4407204c5   Peter Zijlstra   perf, x86: Detect...
536
537
538
539
  
  msr_fail:
  	printk(KERN_CONT "Broken PMU hardware detected, using software events only.
  ");
45daae575   Ingo Molnar   perf, x86: Compla...
540

4407204c5   Peter Zijlstra   perf, x86: Detect...
541
  	return false;
33c6d6a7a   Don Zickus   x86, perf, nmi: D...
542
  }
f80c9e304   Peter Zijlstra   perf, x86: Clean ...
543
  static void reserve_ds_buffers(void);
ca037701a   Peter Zijlstra   perf, x86: Add PE...
544
  static void release_ds_buffers(void);
30dd568c9   Markus Metzger   x86, perf_counter...
545

cdd6c482c   Ingo Molnar   perf: Do the big ...
546
  static void hw_perf_event_destroy(struct perf_event *event)
4e935e471   Peter Zijlstra   perf_counter: pmc...
547
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
548
  	if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
4e935e471   Peter Zijlstra   perf_counter: pmc...
549
  		release_pmc_hardware();
ca037701a   Peter Zijlstra   perf, x86: Add PE...
550
  		release_ds_buffers();
4e935e471   Peter Zijlstra   perf_counter: pmc...
551
552
553
  		mutex_unlock(&pmc_reserve_mutex);
  	}
  }
85cf9dba9   Robert Richter   perf_counter, x86...
554
555
556
557
  static inline int x86_pmu_initialized(void)
  {
  	return x86_pmu.handle_irq != NULL;
  }
8326f44da   Ingo Molnar   perf_counter: Imp...
558
  static inline int
e994d7d23   Andi Kleen   perf: Fix LLC-* e...
559
  set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
8326f44da   Ingo Molnar   perf_counter: Imp...
560
  {
e994d7d23   Andi Kleen   perf: Fix LLC-* e...
561
  	struct perf_event_attr *attr = &event->attr;
8326f44da   Ingo Molnar   perf_counter: Imp...
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
  	unsigned int cache_type, cache_op, cache_result;
  	u64 config, val;
  
  	config = attr->config;
  
  	cache_type = (config >>  0) & 0xff;
  	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
  		return -EINVAL;
  
  	cache_op = (config >>  8) & 0xff;
  	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
  		return -EINVAL;
  
  	cache_result = (config >> 16) & 0xff;
  	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
  		return -EINVAL;
  
  	val = hw_cache_event_ids[cache_type][cache_op][cache_result];
  
  	if (val == 0)
  		return -ENOENT;
  
  	if (val == -1)
  		return -EINVAL;
  
  	hwc->config |= val;
e994d7d23   Andi Kleen   perf: Fix LLC-* e...
588
589
  	attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
  	return x86_pmu_extra_regs(val, event);
8326f44da   Ingo Molnar   perf_counter: Imp...
590
  }
c1726f343   Robert Richter   perf, x86: Move x...
591
592
593
594
595
  static int x86_setup_perfctr(struct perf_event *event)
  {
  	struct perf_event_attr *attr = &event->attr;
  	struct hw_perf_event *hwc = &event->hw;
  	u64 config;
6c7e550f1   Franck Bui-Huu   perf: Introduce i...
596
  	if (!is_sampling_event(event)) {
c1726f343   Robert Richter   perf, x86: Move x...
597
598
  		hwc->sample_period = x86_pmu.max_period;
  		hwc->last_period = hwc->sample_period;
e78505958   Peter Zijlstra   perf: Convert per...
599
  		local64_set(&hwc->period_left, hwc->sample_period);
c1726f343   Robert Richter   perf, x86: Move x...
600
601
602
603
604
605
606
607
608
609
  	} else {
  		/*
  		 * If we have a PMU initialized but no APIC
  		 * interrupts, we cannot sample hardware
  		 * events (user-space has to fall back and
  		 * sample via a hrtimer based software event):
  		 */
  		if (!x86_pmu.apic)
  			return -EOPNOTSUPP;
  	}
b52c55c6a   Ingo Molnar   x86, perf event: ...
610
611
612
613
  	/*
  	 * Do not allow config1 (extended registers) to propagate,
  	 * there's no sane user-space generalization yet:
  	 */
c1726f343   Robert Richter   perf, x86: Move x...
614
  	if (attr->type == PERF_TYPE_RAW)
b52c55c6a   Ingo Molnar   x86, perf event: ...
615
  		return 0;
c1726f343   Robert Richter   perf, x86: Move x...
616
617
  
  	if (attr->type == PERF_TYPE_HW_CACHE)
e994d7d23   Andi Kleen   perf: Fix LLC-* e...
618
  		return set_ext_hw_attr(hwc, event);
c1726f343   Robert Richter   perf, x86: Move x...
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
  
  	if (attr->config >= x86_pmu.max_events)
  		return -EINVAL;
  
  	/*
  	 * The generic map:
  	 */
  	config = x86_pmu.event_map(attr->config);
  
  	if (config == 0)
  		return -ENOENT;
  
  	if (config == -1LL)
  		return -EINVAL;
  
  	/*
  	 * Branch tracing:
  	 */
18a073a3a   Peter Zijlstra   perf, x86: Fix BT...
637
638
  	if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
  	    !attr->freq && hwc->sample_period == 1) {
c1726f343   Robert Richter   perf, x86: Move x...
639
  		/* BTS is not supported by this architecture. */
6809b6ea7   Peter Zijlstra   perf, x86: Less d...
640
  		if (!x86_pmu.bts_active)
c1726f343   Robert Richter   perf, x86: Move x...
641
642
643
644
645
646
647
648
649
650
651
  			return -EOPNOTSUPP;
  
  		/* BTS is currently only allowed for user-mode. */
  		if (!attr->exclude_kernel)
  			return -EOPNOTSUPP;
  	}
  
  	hwc->config |= config;
  
  	return 0;
  }
4261e0e0e   Robert Richter   perf, x86: Move p...
652

b4cdc5c26   Peter Zijlstra   perf, x86: Fix up...
653
  static int x86_pmu_hw_config(struct perf_event *event)
a072738e0   Cyrill Gorcunov   perf, x86: Implem...
654
  {
ab608344b   Peter Zijlstra   perf, x86: Improv...
655
656
657
658
  	if (event->attr.precise_ip) {
  		int precise = 0;
  
  		/* Support for constant skid */
6809b6ea7   Peter Zijlstra   perf, x86: Less d...
659
  		if (x86_pmu.pebs_active) {
ab608344b   Peter Zijlstra   perf, x86: Improv...
660
  			precise++;
5553be262   Peter Zijlstra   perf, x86: Fixup ...
661
662
663
664
  			/* Support for IP fixup */
  			if (x86_pmu.lbr_nr)
  				precise++;
  		}
ab608344b   Peter Zijlstra   perf, x86: Improv...
665
666
667
668
  
  		if (event->attr.precise_ip > precise)
  			return -EOPNOTSUPP;
  	}
a072738e0   Cyrill Gorcunov   perf, x86: Implem...
669
670
671
672
  	/*
  	 * Generate PMC IRQs:
  	 * (keep 'enabled' bit clear for now)
  	 */
b4cdc5c26   Peter Zijlstra   perf, x86: Fix up...
673
  	event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
a072738e0   Cyrill Gorcunov   perf, x86: Implem...
674
675
676
677
  
  	/*
  	 * Count user and OS events unless requested not to
  	 */
b4cdc5c26   Peter Zijlstra   perf, x86: Fix up...
678
679
680
681
  	if (!event->attr.exclude_user)
  		event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
  	if (!event->attr.exclude_kernel)
  		event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
a072738e0   Cyrill Gorcunov   perf, x86: Implem...
682

b4cdc5c26   Peter Zijlstra   perf, x86: Fix up...
683
684
  	if (event->attr.type == PERF_TYPE_RAW)
  		event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
a072738e0   Cyrill Gorcunov   perf, x86: Implem...
685

9d0fcba67   Robert Richter   perf, x86: Call x...
686
  	return x86_setup_perfctr(event);
a098f4484   Robert Richter   perf, x86: implem...
687
  }
ee06094f8   Ingo Molnar   perfcounters: res...
688
  /*
0d48696f8   Peter Zijlstra   perf_counter: Ren...
689
   * Setup the hardware configuration for a given attr_type
241771ef0   Ingo Molnar   performance count...
690
   */
b0a873ebb   Peter Zijlstra   perf: Register PM...
691
  static int __x86_pmu_event_init(struct perf_event *event)
241771ef0   Ingo Molnar   performance count...
692
  {
4e935e471   Peter Zijlstra   perf_counter: pmc...
693
  	int err;
241771ef0   Ingo Molnar   performance count...
694

85cf9dba9   Robert Richter   perf_counter, x86...
695
696
  	if (!x86_pmu_initialized())
  		return -ENODEV;
241771ef0   Ingo Molnar   performance count...
697

4e935e471   Peter Zijlstra   perf_counter: pmc...
698
  	err = 0;
cdd6c482c   Ingo Molnar   perf: Do the big ...
699
  	if (!atomic_inc_not_zero(&active_events)) {
4e935e471   Peter Zijlstra   perf_counter: pmc...
700
  		mutex_lock(&pmc_reserve_mutex);
cdd6c482c   Ingo Molnar   perf: Do the big ...
701
  		if (atomic_read(&active_events) == 0) {
30dd568c9   Markus Metzger   x86, perf_counter...
702
703
  			if (!reserve_pmc_hardware())
  				err = -EBUSY;
f80c9e304   Peter Zijlstra   perf, x86: Clean ...
704
705
  			else
  				reserve_ds_buffers();
30dd568c9   Markus Metzger   x86, perf_counter...
706
707
  		}
  		if (!err)
cdd6c482c   Ingo Molnar   perf: Do the big ...
708
  			atomic_inc(&active_events);
4e935e471   Peter Zijlstra   perf_counter: pmc...
709
710
711
712
  		mutex_unlock(&pmc_reserve_mutex);
  	}
  	if (err)
  		return err;
cdd6c482c   Ingo Molnar   perf: Do the big ...
713
  	event->destroy = hw_perf_event_destroy;
a1792cdac   Peter Zijlstra   perf_counter: x86...
714

4261e0e0e   Robert Richter   perf, x86: Move p...
715
716
717
  	event->hw.idx = -1;
  	event->hw.last_cpu = -1;
  	event->hw.last_tag = ~0ULL;
b690081d4   Stephane Eranian   perf_events: Add ...
718

efc9f05df   Stephane Eranian   perf_events: Upda...
719
720
  	/* mark unused */
  	event->hw.extra_reg.idx = EXTRA_REG_NONE;
9d0fcba67   Robert Richter   perf, x86: Call x...
721
  	return x86_pmu.hw_config(event);
4261e0e0e   Robert Richter   perf, x86: Move p...
722
  }
8c48e4441   Peter Zijlstra   perf_events, x86:...
723
  static void x86_pmu_disable_all(void)
f87ad35d3   Jaswinder Singh Rajput   x86: AMD Support ...
724
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
725
  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
9e35ad388   Peter Zijlstra   perf_counter: Rew...
726
  	int idx;
948b1bb89   Robert Richter   perf, x86: Undo s...
727
  	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
b0f3f28e0   Peter Zijlstra   perfcounters: IRQ...
728
  		u64 val;
43f6201a2   Robert Richter   perf_counter, x86...
729
  		if (!test_bit(idx, cpuc->active_mask))
4295ee626   Robert Richter   perf_counter, x86...
730
  			continue;
41bf49894   Robert Richter   perf, x86: Calcul...
731
  		rdmsrl(x86_pmu_config_addr(idx), val);
bb1165d68   Robert Richter   perf, x86: rename...
732
  		if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
4295ee626   Robert Richter   perf_counter, x86...
733
  			continue;
bb1165d68   Robert Richter   perf, x86: rename...
734
  		val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
41bf49894   Robert Richter   perf, x86: Calcul...
735
  		wrmsrl(x86_pmu_config_addr(idx), val);
f87ad35d3   Jaswinder Singh Rajput   x86: AMD Support ...
736
  	}
f87ad35d3   Jaswinder Singh Rajput   x86: AMD Support ...
737
  }
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
738
  static void x86_pmu_disable(struct pmu *pmu)
b56a3802d   Jaswinder Singh Rajput   x86: prepare perf...
739
  {
1da53e023   Stephane Eranian   perf_events, x86:...
740
  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
85cf9dba9   Robert Richter   perf_counter, x86...
741
  	if (!x86_pmu_initialized())
9e35ad388   Peter Zijlstra   perf_counter: Rew...
742
  		return;
1da53e023   Stephane Eranian   perf_events, x86:...
743

1a6e21f79   Peter Zijlstra   perf_events, x86:...
744
745
746
747
748
749
  	if (!cpuc->enabled)
  		return;
  
  	cpuc->n_added = 0;
  	cpuc->enabled = 0;
  	barrier();
1da53e023   Stephane Eranian   perf_events, x86:...
750
751
  
  	x86_pmu.disable_all();
b56a3802d   Jaswinder Singh Rajput   x86: prepare perf...
752
  }
241771ef0   Ingo Molnar   performance count...
753

d45dd923f   Robert Richter   perf, x86: Use he...
754
755
756
  static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
  					  u64 enable_mask)
  {
efc9f05df   Stephane Eranian   perf_events: Upda...
757
758
  	if (hwc->extra_reg.reg)
  		wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
73d6e5220   Robert Richter   perf, x86: Store ...
759
  	wrmsrl(hwc->config_base, hwc->config | enable_mask);
d45dd923f   Robert Richter   perf, x86: Use he...
760
  }
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
761
  static void x86_pmu_enable_all(int added)
f87ad35d3   Jaswinder Singh Rajput   x86: AMD Support ...
762
  {
cdd6c482c   Ingo Molnar   perf: Do the big ...
763
  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
f87ad35d3   Jaswinder Singh Rajput   x86: AMD Support ...
764
  	int idx;
948b1bb89   Robert Richter   perf, x86: Undo s...
765
  	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
d45dd923f   Robert Richter   perf, x86: Use he...
766
  		struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
b0f3f28e0   Peter Zijlstra   perfcounters: IRQ...
767

43f6201a2   Robert Richter   perf_counter, x86...
768
  		if (!test_bit(idx, cpuc->active_mask))
4295ee626   Robert Richter   perf_counter, x86...
769
  			continue;
984b838ce   Peter Zijlstra   perf_counter: Cle...
770

d45dd923f   Robert Richter   perf, x86: Use he...
771
  		__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
f87ad35d3   Jaswinder Singh Rajput   x86: AMD Support ...
772
773
  	}
  }
51b0fe395   Peter Zijlstra   perf: Deconstify ...
774
  static struct pmu pmu;
1da53e023   Stephane Eranian   perf_events, x86:...
775
776
777
778
779
780
781
782
  
  static inline int is_x86_event(struct perf_event *event)
  {
  	return event->pmu == &pmu;
  }
  
  static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
  {
63b146490   Peter Zijlstra   perf_event: x86: ...
783
  	struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
1da53e023   Stephane Eranian   perf_events, x86:...
784
  	unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
c933c1a60   Peter Zijlstra   perf_event: x86: ...
785
  	int i, j, w, wmax, num = 0;
1da53e023   Stephane Eranian   perf_events, x86:...
786
787
788
789
790
  	struct hw_perf_event *hwc;
  
  	bitmap_zero(used_mask, X86_PMC_IDX_MAX);
  
  	for (i = 0; i < n; i++) {
b622d644c   Peter Zijlstra   perf_events, x86:...
791
792
  		c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
  		constraints[i] = c;
1da53e023   Stephane Eranian   perf_events, x86:...
793
794
795
  	}
  
  	/*
8113070d6   Stephane Eranian   perf_events: Add ...
796
797
  	 * fastpath, try to reuse previous register
  	 */
c933c1a60   Peter Zijlstra   perf_event: x86: ...
798
  	for (i = 0; i < n; i++) {
8113070d6   Stephane Eranian   perf_events: Add ...
799
  		hwc = &cpuc->event_list[i]->hw;
81269a085   Peter Zijlstra   perf_event: x86: ...
800
  		c = constraints[i];
8113070d6   Stephane Eranian   perf_events: Add ...
801
802
803
804
805
806
  
  		/* never assigned */
  		if (hwc->idx == -1)
  			break;
  
  		/* constraint still honored */
63b146490   Peter Zijlstra   perf_event: x86: ...
807
  		if (!test_bit(hwc->idx, c->idxmsk))
8113070d6   Stephane Eranian   perf_events: Add ...
808
809
810
811
812
  			break;
  
  		/* not already used */
  		if (test_bit(hwc->idx, used_mask))
  			break;
34538ee77   Peter Zijlstra   perf, x86: Use un...
813
  		__set_bit(hwc->idx, used_mask);
8113070d6   Stephane Eranian   perf_events: Add ...
814
815
816
  		if (assign)
  			assign[i] = hwc->idx;
  	}
c933c1a60   Peter Zijlstra   perf_event: x86: ...
817
  	if (i == n)
8113070d6   Stephane Eranian   perf_events: Add ...
818
819
820
821
822
823
824
825
826
  		goto done;
  
  	/*
  	 * begin slow path
  	 */
  
  	bitmap_zero(used_mask, X86_PMC_IDX_MAX);
  
  	/*
1da53e023   Stephane Eranian   perf_events, x86:...
827
828
829
830
831
832
833
834
  	 * weight = number of possible counters
  	 *
  	 * 1    = most constrained, only works on one counter
  	 * wmax = least constrained, works on any counter
  	 *
  	 * assign events to counters starting with most
  	 * constrained events.
  	 */
948b1bb89   Robert Richter   perf, x86: Undo s...
835
  	wmax = x86_pmu.num_counters;
1da53e023   Stephane Eranian   perf_events, x86:...
836
837
838
839
840
841
  
  	/*
  	 * when fixed event counters are present,
  	 * wmax is incremented by 1 to account
  	 * for one more choice
  	 */
948b1bb89   Robert Richter   perf, x86: Undo s...
842
  	if (x86_pmu.num_counters_fixed)
1da53e023   Stephane Eranian   perf_events, x86:...
843
  		wmax++;
8113070d6   Stephane Eranian   perf_events: Add ...
844
  	for (w = 1, num = n; num && w <= wmax; w++) {
1da53e023   Stephane Eranian   perf_events, x86:...
845
  		/* for each event */
8113070d6   Stephane Eranian   perf_events: Add ...
846
  		for (i = 0; num && i < n; i++) {
81269a085   Peter Zijlstra   perf_event: x86: ...
847
  			c = constraints[i];
1da53e023   Stephane Eranian   perf_events, x86:...
848
  			hwc = &cpuc->event_list[i]->hw;
272d30be6   Peter Zijlstra   perf_event: x86: ...
849
  			if (c->weight != w)
1da53e023   Stephane Eranian   perf_events, x86:...
850
  				continue;
984b3f574   Akinobu Mita   bitops: rename fo...
851
  			for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
1da53e023   Stephane Eranian   perf_events, x86:...
852
853
854
855
856
857
  				if (!test_bit(j, used_mask))
  					break;
  			}
  
  			if (j == X86_PMC_IDX_MAX)
  				break;
1da53e023   Stephane Eranian   perf_events, x86:...
858

34538ee77   Peter Zijlstra   perf, x86: Use un...
859
  			__set_bit(j, used_mask);
8113070d6   Stephane Eranian   perf_events: Add ...
860

1da53e023   Stephane Eranian   perf_events, x86:...
861
862
863
864
865
  			if (assign)
  				assign[i] = j;
  			num--;
  		}
  	}
8113070d6   Stephane Eranian   perf_events: Add ...
866
  done:
1da53e023   Stephane Eranian   perf_events, x86:...
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
  	/*
  	 * scheduling failed or is just a simulation,
  	 * free resources if necessary
  	 */
  	if (!assign || num) {
  		for (i = 0; i < n; i++) {
  			if (x86_pmu.put_event_constraints)
  				x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
  		}
  	}
  	return num ? -ENOSPC : 0;
  }
  
  /*
   * dogrp: true if must collect siblings events (group)
   * returns total number of events and error code
   */
  static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
  {
  	struct perf_event *event;
  	int n, max_count;
948b1bb89   Robert Richter   perf, x86: Undo s...
888
  	max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
1da53e023   Stephane Eranian   perf_events, x86:...
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
  
  	/* current number of events already accepted */
  	n = cpuc->n_events;
  
  	if (is_x86_event(leader)) {
  		if (n >= max_count)
  			return -ENOSPC;
  		cpuc->event_list[n] = leader;
  		n++;
  	}
  	if (!dogrp)
  		return n;
  
  	list_for_each_entry(event, &leader->sibling_list, group_entry) {
  		if (!is_x86_event(event) ||
8113070d6   Stephane Eranian   perf_events: Add ...
904
  		    event->state <= PERF_EVENT_STATE_OFF)
1da53e023   Stephane Eranian   perf_events, x86:...
905
906
907
908
909
910
911
912
913
914
  			continue;
  
  		if (n >= max_count)
  			return -ENOSPC;
  
  		cpuc->event_list[n] = event;
  		n++;
  	}
  	return n;
  }
1da53e023   Stephane Eranian   perf_events, x86:...
915
  static inline void x86_assign_hw_event(struct perf_event *event,
447a194b3   Stephane Eranian   perf_events, x86:...
916
  				struct cpu_hw_events *cpuc, int i)
1da53e023   Stephane Eranian   perf_events, x86:...
917
  {
447a194b3   Stephane Eranian   perf_events, x86:...
918
919
920
921
922
  	struct hw_perf_event *hwc = &event->hw;
  
  	hwc->idx = cpuc->assign[i];
  	hwc->last_cpu = smp_processor_id();
  	hwc->last_tag = ++cpuc->tags[i];
1da53e023   Stephane Eranian   perf_events, x86:...
923
924
925
926
927
928
  
  	if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
  		hwc->config_base = 0;
  		hwc->event_base	= 0;
  	} else if (hwc->idx >= X86_PMC_IDX_FIXED) {
  		hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
fc66c5210   Stephane Eranian   perf, x86: Fix In...
929
  		hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED);
1da53e023   Stephane Eranian   perf_events, x86:...
930
  	} else {
73d6e5220   Robert Richter   perf, x86: Store ...
931
932
  		hwc->config_base = x86_pmu_config_addr(hwc->idx);
  		hwc->event_base  = x86_pmu_event_addr(hwc->idx);
1da53e023   Stephane Eranian   perf_events, x86:...
933
934
  	}
  }
447a194b3   Stephane Eranian   perf_events, x86:...
935
936
937
938
939
940
941
942
  static inline int match_prev_assignment(struct hw_perf_event *hwc,
  					struct cpu_hw_events *cpuc,
  					int i)
  {
  	return hwc->idx == cpuc->assign[i] &&
  		hwc->last_cpu == smp_processor_id() &&
  		hwc->last_tag == cpuc->tags[i];
  }
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
943
944
  static void x86_pmu_start(struct perf_event *event, int flags);
  static void x86_pmu_stop(struct perf_event *event, int flags);
2e8418736   Peter Zijlstra   perf_event: x86: ...
945

a4eaf7f14   Peter Zijlstra   perf: Rework the ...
946
  static void x86_pmu_enable(struct pmu *pmu)
ee06094f8   Ingo Molnar   perfcounters: res...
947
  {
1da53e023   Stephane Eranian   perf_events, x86:...
948
949
950
  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  	struct perf_event *event;
  	struct hw_perf_event *hwc;
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
951
  	int i, added = cpuc->n_added;
1da53e023   Stephane Eranian   perf_events, x86:...
952

85cf9dba9   Robert Richter   perf_counter, x86...
953
  	if (!x86_pmu_initialized())
2b9ff0db1   Ingo Molnar   perfcounters: fix...
954
  		return;
1a6e21f79   Peter Zijlstra   perf_events, x86:...
955
956
957
  
  	if (cpuc->enabled)
  		return;
1da53e023   Stephane Eranian   perf_events, x86:...
958
  	if (cpuc->n_added) {
19925ce77   Peter Zijlstra   perf, x86: Fix do...
959
  		int n_running = cpuc->n_events - cpuc->n_added;
1da53e023   Stephane Eranian   perf_events, x86:...
960
961
962
963
964
965
966
  		/*
  		 * apply assignment obtained either from
  		 * hw_perf_group_sched_in() or x86_pmu_enable()
  		 *
  		 * step1: save events moving to new counters
  		 * step2: reprogram moved events into new counters
  		 */
19925ce77   Peter Zijlstra   perf, x86: Fix do...
967
  		for (i = 0; i < n_running; i++) {
1da53e023   Stephane Eranian   perf_events, x86:...
968
969
  			event = cpuc->event_list[i];
  			hwc = &event->hw;
447a194b3   Stephane Eranian   perf_events, x86:...
970
971
972
973
974
975
976
977
  			/*
  			 * we can avoid reprogramming counter if:
  			 * - assigned same counter as last time
  			 * - running on same CPU as last time
  			 * - no other event has used the counter since
  			 */
  			if (hwc->idx == -1 ||
  			    match_prev_assignment(hwc, cpuc, i))
1da53e023   Stephane Eranian   perf_events, x86:...
978
  				continue;
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
979
980
981
982
983
984
985
986
  			/*
  			 * Ensure we don't accidentally enable a stopped
  			 * counter simply because we rescheduled.
  			 */
  			if (hwc->state & PERF_HES_STOPPED)
  				hwc->state |= PERF_HES_ARCH;
  
  			x86_pmu_stop(event, PERF_EF_UPDATE);
1da53e023   Stephane Eranian   perf_events, x86:...
987
988
989
  		}
  
  		for (i = 0; i < cpuc->n_events; i++) {
1da53e023   Stephane Eranian   perf_events, x86:...
990
991
  			event = cpuc->event_list[i];
  			hwc = &event->hw;
45e16a683   Peter Zijlstra   perf, x86: Fix hw...
992
  			if (!match_prev_assignment(hwc, cpuc, i))
447a194b3   Stephane Eranian   perf_events, x86:...
993
  				x86_assign_hw_event(event, cpuc, i);
45e16a683   Peter Zijlstra   perf, x86: Fix hw...
994
995
  			else if (i < n_running)
  				continue;
1da53e023   Stephane Eranian   perf_events, x86:...
996

a4eaf7f14   Peter Zijlstra   perf: Rework the ...
997
998
999
1000
  			if (hwc->state & PERF_HES_ARCH)
  				continue;
  
  			x86_pmu_start(event, PERF_EF_RELOAD);
1da53e023   Stephane Eranian   perf_events, x86:...
1001
1002
1003
1004
  		}
  		cpuc->n_added = 0;
  		perf_events_lapic_init();
  	}
1a6e21f79   Peter Zijlstra   perf_events, x86:...
1005
1006
1007
  
  	cpuc->enabled = 1;
  	barrier();
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
1008
  	x86_pmu.enable_all(added);
ee06094f8   Ingo Molnar   perfcounters: res...
1009
  }
ee06094f8   Ingo Molnar   perfcounters: res...
1010

aff3d91a9   Peter Zijlstra   perf, x86: Change...
1011
  static inline void x86_pmu_disable_event(struct perf_event *event)
b0f3f28e0   Peter Zijlstra   perfcounters: IRQ...
1012
  {
aff3d91a9   Peter Zijlstra   perf, x86: Change...
1013
  	struct hw_perf_event *hwc = &event->hw;
7645a24cb   Peter Zijlstra   perf, x86: Remove...
1014

73d6e5220   Robert Richter   perf, x86: Store ...
1015
  	wrmsrl(hwc->config_base, hwc->config);
b0f3f28e0   Peter Zijlstra   perfcounters: IRQ...
1016
  }
245b2e70e   Tejun Heo   percpu: clean up ...
1017
  static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
241771ef0   Ingo Molnar   performance count...
1018

ee06094f8   Ingo Molnar   perfcounters: res...
1019
1020
  /*
   * Set the next IRQ period, based on the hwc->period_left value.
cdd6c482c   Ingo Molnar   perf: Do the big ...
1021
   * To be called with the event disabled in hw:
ee06094f8   Ingo Molnar   perfcounters: res...
1022
   */
e4abb5d4f   Peter Zijlstra   perf_counter: x86...
1023
  static int
07088edb8   Peter Zijlstra   perf, x86: Remove...
1024
  x86_perf_event_set_period(struct perf_event *event)
241771ef0   Ingo Molnar   performance count...
1025
  {
07088edb8   Peter Zijlstra   perf, x86: Remove...
1026
  	struct hw_perf_event *hwc = &event->hw;
e78505958   Peter Zijlstra   perf: Convert per...
1027
  	s64 left = local64_read(&hwc->period_left);
e4abb5d4f   Peter Zijlstra   perf_counter: x86...
1028
  	s64 period = hwc->sample_period;
7645a24cb   Peter Zijlstra   perf, x86: Remove...
1029
  	int ret = 0, idx = hwc->idx;
ee06094f8   Ingo Molnar   perfcounters: res...
1030

30dd568c9   Markus Metzger   x86, perf_counter...
1031
1032
  	if (idx == X86_PMC_IDX_FIXED_BTS)
  		return 0;
ee06094f8   Ingo Molnar   perfcounters: res...
1033
  	/*
af901ca18   André Goddard Rosa   tree-wide: fix as...
1034
  	 * If we are way outside a reasonable range then just skip forward:
ee06094f8   Ingo Molnar   perfcounters: res...
1035
1036
1037
  	 */
  	if (unlikely(left <= -period)) {
  		left = period;
e78505958   Peter Zijlstra   perf: Convert per...
1038
  		local64_set(&hwc->period_left, left);
9e350de37   Peter Zijlstra   perf_counter: Acc...
1039
  		hwc->last_period = period;
e4abb5d4f   Peter Zijlstra   perf_counter: x86...
1040
  		ret = 1;
ee06094f8   Ingo Molnar   perfcounters: res...
1041
1042
1043
1044
  	}
  
  	if (unlikely(left <= 0)) {
  		left += period;
e78505958   Peter Zijlstra   perf: Convert per...
1045
  		local64_set(&hwc->period_left, left);
9e350de37   Peter Zijlstra   perf_counter: Acc...
1046
  		hwc->last_period = period;
e4abb5d4f   Peter Zijlstra   perf_counter: x86...
1047
  		ret = 1;
ee06094f8   Ingo Molnar   perfcounters: res...
1048
  	}
1c80f4b59   Ingo Molnar   perf_counter: x86...
1049
  	/*
dfc65094d   Ingo Molnar   perf_counter: Ren...
1050
  	 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1c80f4b59   Ingo Molnar   perf_counter: x86...
1051
1052
1053
  	 */
  	if (unlikely(left < 2))
  		left = 2;
241771ef0   Ingo Molnar   performance count...
1054

e4abb5d4f   Peter Zijlstra   perf_counter: x86...
1055
1056
  	if (left > x86_pmu.max_period)
  		left = x86_pmu.max_period;
245b2e70e   Tejun Heo   percpu: clean up ...
1057
  	per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
ee06094f8   Ingo Molnar   perfcounters: res...
1058
1059
  
  	/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1060
  	 * The hw event starts counting from this event offset,
ee06094f8   Ingo Molnar   perfcounters: res...
1061
1062
  	 * mark it to be able to extra future deltas:
  	 */
e78505958   Peter Zijlstra   perf: Convert per...
1063
  	local64_set(&hwc->prev_count, (u64)-left);
ee06094f8   Ingo Molnar   perfcounters: res...
1064

73d6e5220   Robert Richter   perf, x86: Store ...
1065
  	wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
68aa00ac0   Cyrill Gorcunov   perf, x86: Make a...
1066
1067
1068
1069
1070
1071
1072
  
  	/*
  	 * Due to erratum on certan cpu we need
  	 * a second write to be sure the register
  	 * is updated properly
  	 */
  	if (x86_pmu.perfctr_second_write) {
73d6e5220   Robert Richter   perf, x86: Store ...
1073
  		wrmsrl(hwc->event_base,
948b1bb89   Robert Richter   perf, x86: Undo s...
1074
  			(u64)(-left) & x86_pmu.cntval_mask);
68aa00ac0   Cyrill Gorcunov   perf, x86: Make a...
1075
  	}
e4abb5d4f   Peter Zijlstra   perf_counter: x86...
1076

cdd6c482c   Ingo Molnar   perf: Do the big ...
1077
  	perf_event_update_userpage(event);
194002b27   Peter Zijlstra   perf_counter, x86...
1078

e4abb5d4f   Peter Zijlstra   perf_counter: x86...
1079
  	return ret;
2f18d1e8d   Ingo Molnar   x86, perfcounters...
1080
  }
aff3d91a9   Peter Zijlstra   perf, x86: Change...
1081
  static void x86_pmu_enable_event(struct perf_event *event)
7c90cc45f   Robert Richter   perf_counter, x86...
1082
  {
0a3aee0da   Tejun Heo   x86: Use this_cpu...
1083
  	if (__this_cpu_read(cpu_hw_events.enabled))
31fa58af5   Robert Richter   perf, x86: Pass e...
1084
1085
  		__x86_pmu_enable_event(&event->hw,
  				       ARCH_PERFMON_EVENTSEL_ENABLE);
241771ef0   Ingo Molnar   performance count...
1086
  }
b690081d4   Stephane Eranian   perf_events: Add ...
1087
  /*
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
1088
   * Add a single event to the PMU.
1da53e023   Stephane Eranian   perf_events, x86:...
1089
1090
1091
   *
   * The event is added to the group of enabled events
   * but only if it can be scehduled with existing events.
fe9081cc9   Peter Zijlstra   perf, x86: Add si...
1092
   */
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
1093
  static int x86_pmu_add(struct perf_event *event, int flags)
fe9081cc9   Peter Zijlstra   perf, x86: Add si...
1094
1095
  {
  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1da53e023   Stephane Eranian   perf_events, x86:...
1096
1097
1098
  	struct hw_perf_event *hwc;
  	int assign[X86_PMC_IDX_MAX];
  	int n, n0, ret;
fe9081cc9   Peter Zijlstra   perf, x86: Add si...
1099

1da53e023   Stephane Eranian   perf_events, x86:...
1100
  	hwc = &event->hw;
fe9081cc9   Peter Zijlstra   perf, x86: Add si...
1101

33696fc0d   Peter Zijlstra   perf: Per PMU dis...
1102
  	perf_pmu_disable(event->pmu);
1da53e023   Stephane Eranian   perf_events, x86:...
1103
  	n0 = cpuc->n_events;
24cd7f54a   Peter Zijlstra   perf: Reduce perf...
1104
1105
1106
  	ret = n = collect_events(cpuc, event, false);
  	if (ret < 0)
  		goto out;
53b441a56   Ingo Molnar   Revert "perf_coun...
1107

a4eaf7f14   Peter Zijlstra   perf: Rework the ...
1108
1109
1110
  	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
  	if (!(flags & PERF_EF_START))
  		hwc->state |= PERF_HES_ARCH;
4d1c52b02   Lin Ming   perf, x86: implem...
1111
1112
  	/*
  	 * If group events scheduling transaction was started,
0d2eb44f6   Lucas De Marchi   x86: Fix common m...
1113
  	 * skip the schedulability test here, it will be performed
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
1114
  	 * at commit time (->commit_txn) as a whole
4d1c52b02   Lin Ming   perf, x86: implem...
1115
  	 */
8d2cacbbb   Peter Zijlstra   perf: Cleanup {st...
1116
  	if (cpuc->group_flag & PERF_EVENT_TXN)
24cd7f54a   Peter Zijlstra   perf: Reduce perf...
1117
  		goto done_collect;
4d1c52b02   Lin Ming   perf, x86: implem...
1118

a072738e0   Cyrill Gorcunov   perf, x86: Implem...
1119
  	ret = x86_pmu.schedule_events(cpuc, n, assign);
1da53e023   Stephane Eranian   perf_events, x86:...
1120
  	if (ret)
24cd7f54a   Peter Zijlstra   perf: Reduce perf...
1121
  		goto out;
1da53e023   Stephane Eranian   perf_events, x86:...
1122
1123
1124
1125
1126
  	/*
  	 * copy new assignment, now we know it is possible
  	 * will be used by hw_perf_enable()
  	 */
  	memcpy(cpuc->assign, assign, n*sizeof(int));
7e2ae3474   Ingo Molnar   perfcounters, x86...
1127

24cd7f54a   Peter Zijlstra   perf: Reduce perf...
1128
  done_collect:
1da53e023   Stephane Eranian   perf_events, x86:...
1129
  	cpuc->n_events = n;
356e1f2e0   Peter Zijlstra   perf, x86: Proper...
1130
  	cpuc->n_added += n - n0;
90151c35b   Stephane Eranian   perf_events: Fix ...
1131
  	cpuc->n_txn += n - n0;
95cdd2e78   Ingo Molnar   perfcounters: ena...
1132

24cd7f54a   Peter Zijlstra   perf: Reduce perf...
1133
1134
  	ret = 0;
  out:
33696fc0d   Peter Zijlstra   perf: Per PMU dis...
1135
  	perf_pmu_enable(event->pmu);
24cd7f54a   Peter Zijlstra   perf: Reduce perf...
1136
  	return ret;
241771ef0   Ingo Molnar   performance count...
1137
  }
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
1138
  static void x86_pmu_start(struct perf_event *event, int flags)
d76a0812a   Stephane Eranian   perf_events: Add ...
1139
  {
c08053e62   Peter Zijlstra   perf, x86: Fix x8...
1140
1141
  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  	int idx = event->hw.idx;
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
  	if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
  		return;
  
  	if (WARN_ON_ONCE(idx == -1))
  		return;
  
  	if (flags & PERF_EF_RELOAD) {
  		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
  		x86_perf_event_set_period(event);
  	}
  
  	event->hw.state = 0;
d76a0812a   Stephane Eranian   perf_events: Add ...
1154

c08053e62   Peter Zijlstra   perf, x86: Fix x8...
1155
1156
  	cpuc->events[idx] = event;
  	__set_bit(idx, cpuc->active_mask);
63e6be6d9   Robert Richter   perf, x86: Catch ...
1157
  	__set_bit(idx, cpuc->running);
aff3d91a9   Peter Zijlstra   perf, x86: Change...
1158
  	x86_pmu.enable(event);
c08053e62   Peter Zijlstra   perf, x86: Fix x8...
1159
  	perf_event_update_userpage(event);
a78ac3258   Peter Zijlstra   perf_counter: Gen...
1160
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
1161
  void perf_event_print_debug(void)
241771ef0   Ingo Molnar   performance count...
1162
  {
2f18d1e8d   Ingo Molnar   x86, perfcounters...
1163
  	u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
ca037701a   Peter Zijlstra   perf, x86: Add PE...
1164
  	u64 pebs;
cdd6c482c   Ingo Molnar   perf: Do the big ...
1165
  	struct cpu_hw_events *cpuc;
5bb9efe33   Peter Zijlstra   perf_counter: fix...
1166
  	unsigned long flags;
1e1256767   Ingo Molnar   perfcounters, x86...
1167
  	int cpu, idx;
948b1bb89   Robert Richter   perf, x86: Undo s...
1168
  	if (!x86_pmu.num_counters)
1e1256767   Ingo Molnar   perfcounters, x86...
1169
  		return;
241771ef0   Ingo Molnar   performance count...
1170

5bb9efe33   Peter Zijlstra   perf_counter: fix...
1171
  	local_irq_save(flags);
241771ef0   Ingo Molnar   performance count...
1172
1173
  
  	cpu = smp_processor_id();
cdd6c482c   Ingo Molnar   perf: Do the big ...
1174
  	cpuc = &per_cpu(cpu_hw_events, cpu);
241771ef0   Ingo Molnar   performance count...
1175

faa28ae01   Robert Richter   perf_counter, x86...
1176
  	if (x86_pmu.version >= 2) {
a1ef58f44   Jaswinder Singh Rajput   x86: use pr_info ...
1177
1178
1179
1180
  		rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
  		rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
  		rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
  		rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
ca037701a   Peter Zijlstra   perf, x86: Add PE...
1181
  		rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
a1ef58f44   Jaswinder Singh Rajput   x86: use pr_info ...
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
  
  		pr_info("
  ");
  		pr_info("CPU#%d: ctrl:       %016llx
  ", cpu, ctrl);
  		pr_info("CPU#%d: status:     %016llx
  ", cpu, status);
  		pr_info("CPU#%d: overflow:   %016llx
  ", cpu, overflow);
  		pr_info("CPU#%d: fixed:      %016llx
  ", cpu, fixed);
ca037701a   Peter Zijlstra   perf, x86: Add PE...
1193
1194
  		pr_info("CPU#%d: pebs:       %016llx
  ", cpu, pebs);
f87ad35d3   Jaswinder Singh Rajput   x86: AMD Support ...
1195
  	}
7645a24cb   Peter Zijlstra   perf, x86: Remove...
1196
1197
  	pr_info("CPU#%d: active:     %016llx
  ", cpu, *(u64 *)cpuc->active_mask);
241771ef0   Ingo Molnar   performance count...
1198

948b1bb89   Robert Richter   perf, x86: Undo s...
1199
  	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
41bf49894   Robert Richter   perf, x86: Calcul...
1200
1201
  		rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
  		rdmsrl(x86_pmu_event_addr(idx), pmc_count);
241771ef0   Ingo Molnar   performance count...
1202

245b2e70e   Tejun Heo   percpu: clean up ...
1203
  		prev_left = per_cpu(pmc_prev_left[idx], cpu);
241771ef0   Ingo Molnar   performance count...
1204

a1ef58f44   Jaswinder Singh Rajput   x86: use pr_info ...
1205
1206
  		pr_info("CPU#%d:   gen-PMC%d ctrl:  %016llx
  ",
241771ef0   Ingo Molnar   performance count...
1207
  			cpu, idx, pmc_ctrl);
a1ef58f44   Jaswinder Singh Rajput   x86: use pr_info ...
1208
1209
  		pr_info("CPU#%d:   gen-PMC%d count: %016llx
  ",
241771ef0   Ingo Molnar   performance count...
1210
  			cpu, idx, pmc_count);
a1ef58f44   Jaswinder Singh Rajput   x86: use pr_info ...
1211
1212
  		pr_info("CPU#%d:   gen-PMC%d left:  %016llx
  ",
ee06094f8   Ingo Molnar   perfcounters: res...
1213
  			cpu, idx, prev_left);
241771ef0   Ingo Molnar   performance count...
1214
  	}
948b1bb89   Robert Richter   perf, x86: Undo s...
1215
  	for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
2f18d1e8d   Ingo Molnar   x86, perfcounters...
1216
  		rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
a1ef58f44   Jaswinder Singh Rajput   x86: use pr_info ...
1217
1218
  		pr_info("CPU#%d: fixed-PMC%d count: %016llx
  ",
2f18d1e8d   Ingo Molnar   x86, perfcounters...
1219
1220
  			cpu, idx, pmc_count);
  	}
5bb9efe33   Peter Zijlstra   perf_counter: fix...
1221
  	local_irq_restore(flags);
241771ef0   Ingo Molnar   performance count...
1222
  }
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
1223
  static void x86_pmu_stop(struct perf_event *event, int flags)
241771ef0   Ingo Molnar   performance count...
1224
  {
d76a0812a   Stephane Eranian   perf_events: Add ...
1225
  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
cdd6c482c   Ingo Molnar   perf: Do the big ...
1226
  	struct hw_perf_event *hwc = &event->hw;
241771ef0   Ingo Molnar   performance count...
1227

a4eaf7f14   Peter Zijlstra   perf: Rework the ...
1228
1229
1230
1231
1232
1233
  	if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
  		x86_pmu.disable(event);
  		cpuc->events[hwc->idx] = NULL;
  		WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
  		hwc->state |= PERF_HES_STOPPED;
  	}
30dd568c9   Markus Metzger   x86, perf_counter...
1234

a4eaf7f14   Peter Zijlstra   perf: Rework the ...
1235
1236
1237
1238
1239
1240
1241
1242
  	if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
  		/*
  		 * Drain the remaining delta count out of a event
  		 * that we are disabling:
  		 */
  		x86_perf_event_update(event);
  		hwc->state |= PERF_HES_UPTODATE;
  	}
2e8418736   Peter Zijlstra   perf_event: x86: ...
1243
  }
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
1244
  static void x86_pmu_del(struct perf_event *event, int flags)
2e8418736   Peter Zijlstra   perf_event: x86: ...
1245
1246
1247
  {
  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  	int i;
90151c35b   Stephane Eranian   perf_events: Fix ...
1248
1249
1250
1251
1252
  	/*
  	 * If we're called during a txn, we don't need to do anything.
  	 * The events never got scheduled and ->cancel_txn will truncate
  	 * the event_list.
  	 */
8d2cacbbb   Peter Zijlstra   perf: Cleanup {st...
1253
  	if (cpuc->group_flag & PERF_EVENT_TXN)
90151c35b   Stephane Eranian   perf_events: Fix ...
1254
  		return;
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
1255
  	x86_pmu_stop(event, PERF_EF_UPDATE);
194002b27   Peter Zijlstra   perf_counter, x86...
1256

1da53e023   Stephane Eranian   perf_events, x86:...
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
  	for (i = 0; i < cpuc->n_events; i++) {
  		if (event == cpuc->event_list[i]) {
  
  			if (x86_pmu.put_event_constraints)
  				x86_pmu.put_event_constraints(cpuc, event);
  
  			while (++i < cpuc->n_events)
  				cpuc->event_list[i-1] = cpuc->event_list[i];
  
  			--cpuc->n_events;
6c9687abe   Peter Zijlstra   perf_event: x86: ...
1267
  			break;
1da53e023   Stephane Eranian   perf_events, x86:...
1268
1269
  		}
  	}
cdd6c482c   Ingo Molnar   perf: Do the big ...
1270
  	perf_event_update_userpage(event);
241771ef0   Ingo Molnar   performance count...
1271
  }
8c48e4441   Peter Zijlstra   perf_events, x86:...
1272
  static int x86_pmu_handle_irq(struct pt_regs *regs)
a29aa8a7f   Robert Richter   perf_counter, x86...
1273
  {
df1a132bf   Peter Zijlstra   perf_counter: Int...
1274
  	struct perf_sample_data data;
cdd6c482c   Ingo Molnar   perf: Do the big ...
1275
1276
  	struct cpu_hw_events *cpuc;
  	struct perf_event *event;
11d1578f9   Vince Weaver   perf_counter: Add...
1277
  	int idx, handled = 0;
9029a5e38   Ingo Molnar   perf_counter: x86...
1278
  	u64 val;
dc1d628a6   Peter Zijlstra   perf: Provide gen...
1279
  	perf_sample_data_init(&data, 0);
df1a132bf   Peter Zijlstra   perf_counter: Int...
1280

cdd6c482c   Ingo Molnar   perf: Do the big ...
1281
  	cpuc = &__get_cpu_var(cpu_hw_events);
962bf7a66   Peter Zijlstra   perf_counter: x86...
1282

2bce5daca   Don Zickus   perf, x86, nmi: M...
1283
1284
1285
1286
1287
1288
1289
1290
1291
  	/*
  	 * Some chipsets need to unmask the LVTPC in a particular spot
  	 * inside the nmi handler.  As a result, the unmasking was pushed
  	 * into all the nmi handlers.
  	 *
  	 * This generic handler doesn't seem to have any issues where the
  	 * unmasking occurs so it was left at the top.
  	 */
  	apic_write(APIC_LVTPC, APIC_DM_NMI);
948b1bb89   Robert Richter   perf, x86: Undo s...
1292
  	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
63e6be6d9   Robert Richter   perf, x86: Catch ...
1293
1294
1295
1296
1297
1298
1299
1300
  		if (!test_bit(idx, cpuc->active_mask)) {
  			/*
  			 * Though we deactivated the counter some cpus
  			 * might still deliver spurious interrupts still
  			 * in flight. Catch them:
  			 */
  			if (__test_and_clear_bit(idx, cpuc->running))
  				handled++;
a29aa8a7f   Robert Richter   perf_counter, x86...
1301
  			continue;
63e6be6d9   Robert Richter   perf, x86: Catch ...
1302
  		}
962bf7a66   Peter Zijlstra   perf_counter: x86...
1303

cdd6c482c   Ingo Molnar   perf: Do the big ...
1304
  		event = cpuc->events[idx];
a4016a79f   Peter Zijlstra   perf_counter: x86...
1305

cc2ad4ba8   Peter Zijlstra   perf, x86: Remove...
1306
  		val = x86_perf_event_update(event);
948b1bb89   Robert Richter   perf, x86: Undo s...
1307
  		if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
48e22d56e   Peter Zijlstra   perf_counter: x86...
1308
  			continue;
962bf7a66   Peter Zijlstra   perf_counter: x86...
1309

9e350de37   Peter Zijlstra   perf_counter: Acc...
1310
  		/*
cdd6c482c   Ingo Molnar   perf: Do the big ...
1311
  		 * event overflow
9e350de37   Peter Zijlstra   perf_counter: Acc...
1312
  		 */
4177c42a6   Robert Richter   perf, x86: Try to...
1313
  		handled++;
cdd6c482c   Ingo Molnar   perf: Do the big ...
1314
  		data.period	= event->hw.last_period;
9e350de37   Peter Zijlstra   perf_counter: Acc...
1315

07088edb8   Peter Zijlstra   perf, x86: Remove...
1316
  		if (!x86_perf_event_set_period(event))
e4abb5d4f   Peter Zijlstra   perf_counter: x86...
1317
  			continue;
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
1318
  		if (perf_event_overflow(event, &data, regs))
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
1319
  			x86_pmu_stop(event, 0);
a29aa8a7f   Robert Richter   perf_counter, x86...
1320
  	}
962bf7a66   Peter Zijlstra   perf_counter: x86...
1321

9e350de37   Peter Zijlstra   perf_counter: Acc...
1322
1323
  	if (handled)
  		inc_irq_stat(apic_perf_irqs);
a29aa8a7f   Robert Richter   perf_counter, x86...
1324
1325
  	return handled;
  }
39d81eab2   Robert Richter   perf_counter, x86...
1326

cdd6c482c   Ingo Molnar   perf: Do the big ...
1327
  void perf_events_lapic_init(void)
241771ef0   Ingo Molnar   performance count...
1328
  {
04da8a43d   Ingo Molnar   perf_counter, x86...
1329
  	if (!x86_pmu.apic || !x86_pmu_initialized())
241771ef0   Ingo Molnar   performance count...
1330
  		return;
85cf9dba9   Robert Richter   perf_counter, x86...
1331

241771ef0   Ingo Molnar   performance count...
1332
  	/*
c323d95fa   Yong Wang   perf_counter/x86:...
1333
  	 * Always use NMI for PMU
241771ef0   Ingo Molnar   performance count...
1334
  	 */
c323d95fa   Yong Wang   perf_counter/x86:...
1335
  	apic_write(APIC_LVTPC, APIC_DM_NMI);
241771ef0   Ingo Molnar   performance count...
1336
  }
4177c42a6   Robert Richter   perf, x86: Try to...
1337
1338
1339
1340
1341
1342
  struct pmu_nmi_state {
  	unsigned int	marked;
  	int		handled;
  };
  
  static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
241771ef0   Ingo Molnar   performance count...
1343
  static int __kprobes
cdd6c482c   Ingo Molnar   perf: Do the big ...
1344
  perf_event_nmi_handler(struct notifier_block *self,
241771ef0   Ingo Molnar   performance count...
1345
1346
1347
  			 unsigned long cmd, void *__args)
  {
  	struct die_args *args = __args;
4177c42a6   Robert Richter   perf, x86: Try to...
1348
1349
  	unsigned int this_nmi;
  	int handled;
b0f3f28e0   Peter Zijlstra   perfcounters: IRQ...
1350

cdd6c482c   Ingo Molnar   perf: Do the big ...
1351
  	if (!atomic_read(&active_events))
63a809a2d   Peter Zijlstra   perf_counter: fix...
1352
  		return NOTIFY_DONE;
b0f3f28e0   Peter Zijlstra   perfcounters: IRQ...
1353
1354
  	switch (cmd) {
  	case DIE_NMI:
b0f3f28e0   Peter Zijlstra   perfcounters: IRQ...
1355
  		break;
4177c42a6   Robert Richter   perf, x86: Try to...
1356
1357
  	case DIE_NMIUNKNOWN:
  		this_nmi = percpu_read(irq_stat.__nmi_count);
0a3aee0da   Tejun Heo   x86: Use this_cpu...
1358
  		if (this_nmi != __this_cpu_read(pmu_nmi.marked))
4177c42a6   Robert Richter   perf, x86: Try to...
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
  			/* let the kernel handle the unknown nmi */
  			return NOTIFY_DONE;
  		/*
  		 * This one is a PMU back-to-back nmi. Two events
  		 * trigger 'simultaneously' raising two back-to-back
  		 * NMIs. If the first NMI handles both, the latter
  		 * will be empty and daze the CPU. So, we drop it to
  		 * avoid false-positive 'unknown nmi' messages.
  		 */
  		return NOTIFY_STOP;
b0f3f28e0   Peter Zijlstra   perfcounters: IRQ...
1369
  	default:
241771ef0   Ingo Molnar   performance count...
1370
  		return NOTIFY_DONE;
b0f3f28e0   Peter Zijlstra   perfcounters: IRQ...
1371
  	}
241771ef0   Ingo Molnar   performance count...
1372

4177c42a6   Robert Richter   perf, x86: Try to...
1373
1374
1375
1376
1377
1378
1379
  	handled = x86_pmu.handle_irq(args->regs);
  	if (!handled)
  		return NOTIFY_DONE;
  
  	this_nmi = percpu_read(irq_stat.__nmi_count);
  	if ((handled > 1) ||
  		/* the next nmi could be a back-to-back nmi */
0a3aee0da   Tejun Heo   x86: Use this_cpu...
1380
1381
  	    ((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
  	     (__this_cpu_read(pmu_nmi.handled) > 1))) {
4177c42a6   Robert Richter   perf, x86: Try to...
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
  		/*
  		 * We could have two subsequent back-to-back nmis: The
  		 * first handles more than one counter, the 2nd
  		 * handles only one counter and the 3rd handles no
  		 * counter.
  		 *
  		 * This is the 2nd nmi because the previous was
  		 * handling more than one counter. We will mark the
  		 * next (3rd) and then drop it if unhandled.
  		 */
0a3aee0da   Tejun Heo   x86: Use this_cpu...
1392
1393
  		__this_cpu_write(pmu_nmi.marked, this_nmi + 1);
  		__this_cpu_write(pmu_nmi.handled, handled);
4177c42a6   Robert Richter   perf, x86: Try to...
1394
  	}
241771ef0   Ingo Molnar   performance count...
1395

a4016a79f   Peter Zijlstra   perf_counter: x86...
1396
  	return NOTIFY_STOP;
241771ef0   Ingo Molnar   performance count...
1397
  }
f22f54f44   Peter Zijlstra   perf_events, x86:...
1398
1399
1400
  static __read_mostly struct notifier_block perf_event_nmi_notifier = {
  	.notifier_call		= perf_event_nmi_handler,
  	.next			= NULL,
166d75147   Don Zickus   x86, NMI: Add pri...
1401
  	.priority		= NMI_LOCAL_LOW_PRIOR,
f22f54f44   Peter Zijlstra   perf_events, x86:...
1402
  };
63b146490   Peter Zijlstra   perf_event: x86: ...
1403
  static struct event_constraint unconstrained;
38331f62c   Stephane Eranian   perf_events, x86:...
1404
  static struct event_constraint emptyconstraint;
63b146490   Peter Zijlstra   perf_event: x86: ...
1405

63b146490   Peter Zijlstra   perf_event: x86: ...
1406
  static struct event_constraint *
f22f54f44   Peter Zijlstra   perf_events, x86:...
1407
  x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1da53e023   Stephane Eranian   perf_events, x86:...
1408
  {
63b146490   Peter Zijlstra   perf_event: x86: ...
1409
  	struct event_constraint *c;
1da53e023   Stephane Eranian   perf_events, x86:...
1410

1da53e023   Stephane Eranian   perf_events, x86:...
1411
1412
  	if (x86_pmu.event_constraints) {
  		for_each_event_constraint(c, x86_pmu.event_constraints) {
63b146490   Peter Zijlstra   perf_event: x86: ...
1413
1414
  			if ((event->hw.config & c->cmask) == c->code)
  				return c;
1da53e023   Stephane Eranian   perf_events, x86:...
1415
1416
  		}
  	}
63b146490   Peter Zijlstra   perf_event: x86: ...
1417
1418
  
  	return &unconstrained;
1da53e023   Stephane Eranian   perf_events, x86:...
1419
  }
f22f54f44   Peter Zijlstra   perf_events, x86:...
1420
1421
  #include "perf_event_amd.c"
  #include "perf_event_p6.c"
a072738e0   Cyrill Gorcunov   perf, x86: Implem...
1422
  #include "perf_event_p4.c"
caff2beff   Peter Zijlstra   perf, x86: Implem...
1423
  #include "perf_event_intel_lbr.c"
ca037701a   Peter Zijlstra   perf, x86: Add PE...
1424
  #include "perf_event_intel_ds.c"
f22f54f44   Peter Zijlstra   perf_events, x86:...
1425
  #include "perf_event_intel.c"
f87ad35d3   Jaswinder Singh Rajput   x86: AMD Support ...
1426

3f6da3905   Peter Zijlstra   perf: Rework and ...
1427
1428
1429
1430
  static int __cpuinit
  x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
  {
  	unsigned int cpu = (long)hcpu;
b38b24ead   Peter Zijlstra   perf, x86: Fix AM...
1431
  	int ret = NOTIFY_OK;
3f6da3905   Peter Zijlstra   perf: Rework and ...
1432
1433
1434
1435
  
  	switch (action & ~CPU_TASKS_FROZEN) {
  	case CPU_UP_PREPARE:
  		if (x86_pmu.cpu_prepare)
b38b24ead   Peter Zijlstra   perf, x86: Fix AM...
1436
  			ret = x86_pmu.cpu_prepare(cpu);
3f6da3905   Peter Zijlstra   perf: Rework and ...
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
  		break;
  
  	case CPU_STARTING:
  		if (x86_pmu.cpu_starting)
  			x86_pmu.cpu_starting(cpu);
  		break;
  
  	case CPU_DYING:
  		if (x86_pmu.cpu_dying)
  			x86_pmu.cpu_dying(cpu);
  		break;
b38b24ead   Peter Zijlstra   perf, x86: Fix AM...
1448
  	case CPU_UP_CANCELED:
3f6da3905   Peter Zijlstra   perf: Rework and ...
1449
1450
1451
1452
1453
1454
1455
1456
  	case CPU_DEAD:
  		if (x86_pmu.cpu_dead)
  			x86_pmu.cpu_dead(cpu);
  		break;
  
  	default:
  		break;
  	}
b38b24ead   Peter Zijlstra   perf, x86: Fix AM...
1457
  	return ret;
3f6da3905   Peter Zijlstra   perf: Rework and ...
1458
  }
125580380   Cyrill Gorcunov   x86, perf events:...
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
  static void __init pmu_check_apic(void)
  {
  	if (cpu_has_apic)
  		return;
  
  	x86_pmu.apic = 0;
  	pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.
  ");
  	pr_info("no hardware sampling interrupt available.
  ");
  }
dda991169   Yinghai Lu   x86, perf: Change...
1470
  static int __init init_hw_perf_events(void)
b56a3802d   Jaswinder Singh Rajput   x86: prepare perf...
1471
  {
b622d644c   Peter Zijlstra   perf_events, x86:...
1472
  	struct event_constraint *c;
72eae04d3   Robert Richter   perf_counter, x86...
1473
  	int err;
cdd6c482c   Ingo Molnar   perf: Do the big ...
1474
  	pr_info("Performance Events: ");
1123e3ad7   Ingo Molnar   perf_counter: Cle...
1475

b56a3802d   Jaswinder Singh Rajput   x86: prepare perf...
1476
1477
  	switch (boot_cpu_data.x86_vendor) {
  	case X86_VENDOR_INTEL:
72eae04d3   Robert Richter   perf_counter, x86...
1478
  		err = intel_pmu_init();
b56a3802d   Jaswinder Singh Rajput   x86: prepare perf...
1479
  		break;
f87ad35d3   Jaswinder Singh Rajput   x86: AMD Support ...
1480
  	case X86_VENDOR_AMD:
72eae04d3   Robert Richter   perf_counter, x86...
1481
  		err = amd_pmu_init();
f87ad35d3   Jaswinder Singh Rajput   x86: AMD Support ...
1482
  		break;
4138960a9   Robert Richter   perf_counter, x86...
1483
  	default:
004417a6d   Peter Zijlstra   perf, arch: Clean...
1484
  		return 0;
b56a3802d   Jaswinder Singh Rajput   x86: prepare perf...
1485
  	}
1123e3ad7   Ingo Molnar   perf_counter: Cle...
1486
  	if (err != 0) {
cdd6c482c   Ingo Molnar   perf: Do the big ...
1487
1488
  		pr_cont("no PMU driver, software events only.
  ");
004417a6d   Peter Zijlstra   perf, arch: Clean...
1489
  		return 0;
1123e3ad7   Ingo Molnar   perf_counter: Cle...
1490
  	}
b56a3802d   Jaswinder Singh Rajput   x86: prepare perf...
1491

125580380   Cyrill Gorcunov   x86, perf events:...
1492
  	pmu_check_apic();
33c6d6a7a   Don Zickus   x86, perf, nmi: D...
1493
  	/* sanity check that the hardware exists or is emulated */
4407204c5   Peter Zijlstra   perf, x86: Detect...
1494
  	if (!check_hw_exists())
004417a6d   Peter Zijlstra   perf, arch: Clean...
1495
  		return 0;
33c6d6a7a   Don Zickus   x86, perf, nmi: D...
1496

1123e3ad7   Ingo Molnar   perf_counter: Cle...
1497
1498
  	pr_cont("%s PMU driver.
  ", x86_pmu.name);
faa28ae01   Robert Richter   perf_counter, x86...
1499

3c44780b2   Peter Zijlstra   perf, x86: Disabl...
1500
1501
  	if (x86_pmu.quirks)
  		x86_pmu.quirks();
948b1bb89   Robert Richter   perf, x86: Undo s...
1502
  	if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
cdd6c482c   Ingo Molnar   perf: Do the big ...
1503
  		WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
948b1bb89   Robert Richter   perf, x86: Undo s...
1504
1505
  		     x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
  		x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
241771ef0   Ingo Molnar   performance count...
1506
  	}
948b1bb89   Robert Richter   perf, x86: Undo s...
1507
  	x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
241771ef0   Ingo Molnar   performance count...
1508

948b1bb89   Robert Richter   perf, x86: Undo s...
1509
  	if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
cdd6c482c   Ingo Molnar   perf: Do the big ...
1510
  		WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
948b1bb89   Robert Richter   perf, x86: Undo s...
1511
1512
  		     x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
  		x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
703e937c8   Ingo Molnar   perfcounters: add...
1513
  	}
862a1a5f3   Ingo Molnar   x86, perfcounters...
1514

d6dc0b4ea   Robert Richter   perf/core, x86: R...
1515
  	x86_pmu.intel_ctrl |=
948b1bb89   Robert Richter   perf, x86: Undo s...
1516
  		((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
241771ef0   Ingo Molnar   performance count...
1517

cdd6c482c   Ingo Molnar   perf: Do the big ...
1518
1519
  	perf_events_lapic_init();
  	register_die_notifier(&perf_event_nmi_notifier);
1123e3ad7   Ingo Molnar   perf_counter: Cle...
1520

63b146490   Peter Zijlstra   perf_event: x86: ...
1521
  	unconstrained = (struct event_constraint)
948b1bb89   Robert Richter   perf, x86: Undo s...
1522
1523
  		__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
  				   0, x86_pmu.num_counters);
63b146490   Peter Zijlstra   perf_event: x86: ...
1524

b622d644c   Peter Zijlstra   perf_events, x86:...
1525
1526
  	if (x86_pmu.event_constraints) {
  		for_each_event_constraint(c, x86_pmu.event_constraints) {
a098f4484   Robert Richter   perf, x86: implem...
1527
  			if (c->cmask != X86_RAW_EVENT_MASK)
b622d644c   Peter Zijlstra   perf_events, x86:...
1528
  				continue;
948b1bb89   Robert Richter   perf, x86: Undo s...
1529
1530
  			c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
  			c->weight += x86_pmu.num_counters;
b622d644c   Peter Zijlstra   perf_events, x86:...
1531
1532
  		}
  	}
57c0c15b5   Ingo Molnar   perf: Tidy up aft...
1533
1534
  	pr_info("... version:                %d
  ",     x86_pmu.version);
948b1bb89   Robert Richter   perf, x86: Undo s...
1535
1536
1537
1538
1539
1540
  	pr_info("... bit width:              %d
  ",     x86_pmu.cntval_bits);
  	pr_info("... generic registers:      %d
  ",     x86_pmu.num_counters);
  	pr_info("... value mask:             %016Lx
  ", x86_pmu.cntval_mask);
57c0c15b5   Ingo Molnar   perf: Tidy up aft...
1541
1542
  	pr_info("... max period:             %016Lx
  ", x86_pmu.max_period);
948b1bb89   Robert Richter   perf, x86: Undo s...
1543
1544
  	pr_info("... fixed-purpose events:   %d
  ",     x86_pmu.num_counters_fixed);
d6dc0b4ea   Robert Richter   perf/core, x86: R...
1545
1546
  	pr_info("... event mask:             %016Lx
  ", x86_pmu.intel_ctrl);
3f6da3905   Peter Zijlstra   perf: Rework and ...
1547

2e80a82a4   Peter Zijlstra   perf: Dynamic pmu...
1548
  	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
3f6da3905   Peter Zijlstra   perf: Rework and ...
1549
  	perf_cpu_notifier(x86_pmu_notifier);
004417a6d   Peter Zijlstra   perf, arch: Clean...
1550
1551
  
  	return 0;
241771ef0   Ingo Molnar   performance count...
1552
  }
004417a6d   Peter Zijlstra   perf, arch: Clean...
1553
  early_initcall(init_hw_perf_events);
621a01eac   Ingo Molnar   perf counters: hw...
1554

cdd6c482c   Ingo Molnar   perf: Do the big ...
1555
  static inline void x86_pmu_read(struct perf_event *event)
ee06094f8   Ingo Molnar   perfcounters: res...
1556
  {
cc2ad4ba8   Peter Zijlstra   perf, x86: Remove...
1557
  	x86_perf_event_update(event);
ee06094f8   Ingo Molnar   perfcounters: res...
1558
  }
4d1c52b02   Lin Ming   perf, x86: implem...
1559
1560
1561
1562
1563
  /*
   * Start group events scheduling transaction
   * Set the flag to make pmu::enable() not perform the
   * schedulability test, it will be performed at commit time
   */
51b0fe395   Peter Zijlstra   perf: Deconstify ...
1564
  static void x86_pmu_start_txn(struct pmu *pmu)
4d1c52b02   Lin Ming   perf, x86: implem...
1565
  {
33696fc0d   Peter Zijlstra   perf: Per PMU dis...
1566
  	perf_pmu_disable(pmu);
0a3aee0da   Tejun Heo   x86: Use this_cpu...
1567
1568
  	__this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
  	__this_cpu_write(cpu_hw_events.n_txn, 0);
4d1c52b02   Lin Ming   perf, x86: implem...
1569
1570
1571
1572
1573
1574
1575
  }
  
  /*
   * Stop group events scheduling transaction
   * Clear the flag and pmu::enable() will perform the
   * schedulability test.
   */
51b0fe395   Peter Zijlstra   perf: Deconstify ...
1576
  static void x86_pmu_cancel_txn(struct pmu *pmu)
4d1c52b02   Lin Ming   perf, x86: implem...
1577
  {
0a3aee0da   Tejun Heo   x86: Use this_cpu...
1578
  	__this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
90151c35b   Stephane Eranian   perf_events: Fix ...
1579
1580
1581
  	/*
  	 * Truncate the collected events.
  	 */
0a3aee0da   Tejun Heo   x86: Use this_cpu...
1582
1583
  	__this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
  	__this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
33696fc0d   Peter Zijlstra   perf: Per PMU dis...
1584
  	perf_pmu_enable(pmu);
4d1c52b02   Lin Ming   perf, x86: implem...
1585
1586
1587
1588
1589
1590
1591
  }
  
  /*
   * Commit group events scheduling transaction
   * Perform the group schedulability test as a whole
   * Return 0 if success
   */
51b0fe395   Peter Zijlstra   perf: Deconstify ...
1592
  static int x86_pmu_commit_txn(struct pmu *pmu)
4d1c52b02   Lin Ming   perf, x86: implem...
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
  {
  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  	int assign[X86_PMC_IDX_MAX];
  	int n, ret;
  
  	n = cpuc->n_events;
  
  	if (!x86_pmu_initialized())
  		return -EAGAIN;
  
  	ret = x86_pmu.schedule_events(cpuc, n, assign);
  	if (ret)
  		return ret;
  
  	/*
  	 * copy new assignment, now we know it is possible
  	 * will be used by hw_perf_enable()
  	 */
  	memcpy(cpuc->assign, assign, n*sizeof(int));
8d2cacbbb   Peter Zijlstra   perf: Cleanup {st...
1612
  	cpuc->group_flag &= ~PERF_EVENT_TXN;
33696fc0d   Peter Zijlstra   perf: Per PMU dis...
1613
  	perf_pmu_enable(pmu);
4d1c52b02   Lin Ming   perf, x86: implem...
1614
1615
  	return 0;
  }
cd8a38d33   Stephane Eranian   perf_events: Fix ...
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
  /*
   * a fake_cpuc is used to validate event groups. Due to
   * the extra reg logic, we need to also allocate a fake
   * per_core and per_cpu structure. Otherwise, group events
   * using extra reg may conflict without the kernel being
   * able to catch this when the last event gets added to
   * the group.
   */
  static void free_fake_cpuc(struct cpu_hw_events *cpuc)
  {
  	kfree(cpuc->shared_regs);
  	kfree(cpuc);
  }
  
  static struct cpu_hw_events *allocate_fake_cpuc(void)
  {
  	struct cpu_hw_events *cpuc;
  	int cpu = raw_smp_processor_id();
  
  	cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
  	if (!cpuc)
  		return ERR_PTR(-ENOMEM);
  
  	/* only needed, if we have extra_regs */
  	if (x86_pmu.extra_regs) {
  		cpuc->shared_regs = allocate_shared_regs(cpu);
  		if (!cpuc->shared_regs)
  			goto error;
  	}
  	return cpuc;
  error:
  	free_fake_cpuc(cpuc);
  	return ERR_PTR(-ENOMEM);
  }
4d1c52b02   Lin Ming   perf, x86: implem...
1650

1da53e023   Stephane Eranian   perf_events, x86:...
1651
  /*
ca037701a   Peter Zijlstra   perf, x86: Add PE...
1652
1653
1654
1655
1656
1657
1658
   * validate that we can schedule this event
   */
  static int validate_event(struct perf_event *event)
  {
  	struct cpu_hw_events *fake_cpuc;
  	struct event_constraint *c;
  	int ret = 0;
cd8a38d33   Stephane Eranian   perf_events: Fix ...
1659
1660
1661
  	fake_cpuc = allocate_fake_cpuc();
  	if (IS_ERR(fake_cpuc))
  		return PTR_ERR(fake_cpuc);
ca037701a   Peter Zijlstra   perf, x86: Add PE...
1662
1663
1664
1665
1666
1667
1668
1669
  
  	c = x86_pmu.get_event_constraints(fake_cpuc, event);
  
  	if (!c || !c->weight)
  		ret = -ENOSPC;
  
  	if (x86_pmu.put_event_constraints)
  		x86_pmu.put_event_constraints(fake_cpuc, event);
cd8a38d33   Stephane Eranian   perf_events: Fix ...
1670
  	free_fake_cpuc(fake_cpuc);
ca037701a   Peter Zijlstra   perf, x86: Add PE...
1671
1672
1673
1674
1675
  
  	return ret;
  }
  
  /*
1da53e023   Stephane Eranian   perf_events, x86:...
1676
1677
1678
   * validate a single event group
   *
   * validation include:
184f412c3   Ingo Molnar   perf, x86: Clean ...
1679
1680
1681
   *	- check events are compatible which each other
   *	- events do not compete for the same counter
   *	- number of events <= number of counters
1da53e023   Stephane Eranian   perf_events, x86:...
1682
1683
1684
1685
   *
   * validation ensures the group can be loaded onto the
   * PMU if it was the only group available.
   */
fe9081cc9   Peter Zijlstra   perf, x86: Add si...
1686
1687
  static int validate_group(struct perf_event *event)
  {
1da53e023   Stephane Eranian   perf_events, x86:...
1688
  	struct perf_event *leader = event->group_leader;
502568d56   Peter Zijlstra   perf_event: x86: ...
1689
  	struct cpu_hw_events *fake_cpuc;
cd8a38d33   Stephane Eranian   perf_events: Fix ...
1690
  	int ret = -ENOSPC, n;
fe9081cc9   Peter Zijlstra   perf, x86: Add si...
1691

cd8a38d33   Stephane Eranian   perf_events: Fix ...
1692
1693
1694
  	fake_cpuc = allocate_fake_cpuc();
  	if (IS_ERR(fake_cpuc))
  		return PTR_ERR(fake_cpuc);
1da53e023   Stephane Eranian   perf_events, x86:...
1695
1696
1697
1698
1699
1700
  	/*
  	 * the event is not yet connected with its
  	 * siblings therefore we must first collect
  	 * existing siblings, then add the new event
  	 * before we can simulate the scheduling
  	 */
502568d56   Peter Zijlstra   perf_event: x86: ...
1701
  	n = collect_events(fake_cpuc, leader, true);
1da53e023   Stephane Eranian   perf_events, x86:...
1702
  	if (n < 0)
cd8a38d33   Stephane Eranian   perf_events: Fix ...
1703
  		goto out;
fe9081cc9   Peter Zijlstra   perf, x86: Add si...
1704

502568d56   Peter Zijlstra   perf_event: x86: ...
1705
1706
  	fake_cpuc->n_events = n;
  	n = collect_events(fake_cpuc, event, false);
1da53e023   Stephane Eranian   perf_events, x86:...
1707
  	if (n < 0)
cd8a38d33   Stephane Eranian   perf_events: Fix ...
1708
  		goto out;
fe9081cc9   Peter Zijlstra   perf, x86: Add si...
1709

502568d56   Peter Zijlstra   perf_event: x86: ...
1710
  	fake_cpuc->n_events = n;
1da53e023   Stephane Eranian   perf_events, x86:...
1711

a072738e0   Cyrill Gorcunov   perf, x86: Implem...
1712
  	ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
502568d56   Peter Zijlstra   perf_event: x86: ...
1713

502568d56   Peter Zijlstra   perf_event: x86: ...
1714
  out:
cd8a38d33   Stephane Eranian   perf_events: Fix ...
1715
  	free_fake_cpuc(fake_cpuc);
502568d56   Peter Zijlstra   perf_event: x86: ...
1716
  	return ret;
fe9081cc9   Peter Zijlstra   perf, x86: Add si...
1717
  }
dda991169   Yinghai Lu   x86, perf: Change...
1718
  static int x86_pmu_event_init(struct perf_event *event)
621a01eac   Ingo Molnar   perf counters: hw...
1719
  {
51b0fe395   Peter Zijlstra   perf: Deconstify ...
1720
  	struct pmu *tmp;
621a01eac   Ingo Molnar   perf counters: hw...
1721
  	int err;
b0a873ebb   Peter Zijlstra   perf: Register PM...
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
  	switch (event->attr.type) {
  	case PERF_TYPE_RAW:
  	case PERF_TYPE_HARDWARE:
  	case PERF_TYPE_HW_CACHE:
  		break;
  
  	default:
  		return -ENOENT;
  	}
  
  	err = __x86_pmu_event_init(event);
fe9081cc9   Peter Zijlstra   perf, x86: Add si...
1733
  	if (!err) {
8113070d6   Stephane Eranian   perf_events: Add ...
1734
1735
1736
1737
1738
1739
1740
  		/*
  		 * we temporarily connect event to its pmu
  		 * such that validate_group() can classify
  		 * it as an x86 event using is_x86_event()
  		 */
  		tmp = event->pmu;
  		event->pmu = &pmu;
fe9081cc9   Peter Zijlstra   perf, x86: Add si...
1741
1742
  		if (event->group_leader != event)
  			err = validate_group(event);
ca037701a   Peter Zijlstra   perf, x86: Add PE...
1743
1744
  		else
  			err = validate_event(event);
8113070d6   Stephane Eranian   perf_events: Add ...
1745
1746
  
  		event->pmu = tmp;
fe9081cc9   Peter Zijlstra   perf, x86: Add si...
1747
  	}
a1792cdac   Peter Zijlstra   perf_counter: x86...
1748
  	if (err) {
cdd6c482c   Ingo Molnar   perf: Do the big ...
1749
1750
  		if (event->destroy)
  			event->destroy(event);
a1792cdac   Peter Zijlstra   perf_counter: x86...
1751
  	}
621a01eac   Ingo Molnar   perf counters: hw...
1752

b0a873ebb   Peter Zijlstra   perf: Register PM...
1753
  	return err;
621a01eac   Ingo Molnar   perf counters: hw...
1754
  }
d7d59fb32   Peter Zijlstra   perf_counter: x86...
1755

b0a873ebb   Peter Zijlstra   perf: Register PM...
1756
  static struct pmu pmu = {
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
1757
1758
  	.pmu_enable	= x86_pmu_enable,
  	.pmu_disable	= x86_pmu_disable,
b0a873ebb   Peter Zijlstra   perf: Register PM...
1759
  	.event_init	= x86_pmu_event_init,
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
1760
1761
1762
  
  	.add		= x86_pmu_add,
  	.del		= x86_pmu_del,
b0a873ebb   Peter Zijlstra   perf: Register PM...
1763
1764
1765
  	.start		= x86_pmu_start,
  	.stop		= x86_pmu_stop,
  	.read		= x86_pmu_read,
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
1766

b0a873ebb   Peter Zijlstra   perf: Register PM...
1767
1768
1769
1770
  	.start_txn	= x86_pmu_start_txn,
  	.cancel_txn	= x86_pmu_cancel_txn,
  	.commit_txn	= x86_pmu_commit_txn,
  };
d7d59fb32   Peter Zijlstra   perf_counter: x86...
1771
1772
1773
  /*
   * callchain support
   */
d7d59fb32   Peter Zijlstra   perf_counter: x86...
1774
1775
  static int backtrace_stack(void *data, char *name)
  {
038e836e9   Ingo Molnar   perf_counter, x86...
1776
  	return 0;
d7d59fb32   Peter Zijlstra   perf_counter: x86...
1777
1778
1779
1780
1781
  }
  
  static void backtrace_address(void *data, unsigned long addr, int reliable)
  {
  	struct perf_callchain_entry *entry = data;
70791ce9b   Frederic Weisbecker   perf: Generalize ...
1782
  	perf_callchain_store(entry, addr);
d7d59fb32   Peter Zijlstra   perf_counter: x86...
1783
1784
1785
  }
  
  static const struct stacktrace_ops backtrace_ops = {
d7d59fb32   Peter Zijlstra   perf_counter: x86...
1786
1787
  	.stack			= backtrace_stack,
  	.address		= backtrace_address,
06d65bda7   Frederic Weisbecker   perf events, x86/...
1788
  	.walk_stack		= print_context_stack_bp,
d7d59fb32   Peter Zijlstra   perf_counter: x86...
1789
  };
56962b444   Frederic Weisbecker   perf: Generalize ...
1790
1791
  void
  perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
d7d59fb32   Peter Zijlstra   perf_counter: x86...
1792
  {
927c7a9e9   Frederic Weisbecker   perf: Fix race in...
1793
1794
  	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  		/* TODO: We don't support guest os callchain now */
ed8052616   Peter Zijlstra   perf: Remove supe...
1795
  		return;
927c7a9e9   Frederic Weisbecker   perf: Fix race in...
1796
  	}
70791ce9b   Frederic Weisbecker   perf: Generalize ...
1797
  	perf_callchain_store(entry, regs->ip);
d7d59fb32   Peter Zijlstra   perf_counter: x86...
1798

e8e999cf3   Namhyung Kim   x86, dumpstack: C...
1799
  	dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
d7d59fb32   Peter Zijlstra   perf_counter: x86...
1800
  }
257ef9d21   Torok Edwin   perf, x86: Fix ca...
1801
1802
1803
  #ifdef CONFIG_COMPAT
  static inline int
  perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
74193ef0e   Peter Zijlstra   perf_counter: x86...
1804
  {
257ef9d21   Torok Edwin   perf, x86: Fix ca...
1805
1806
1807
  	/* 32-bit process in 64-bit kernel. */
  	struct stack_frame_ia32 frame;
  	const void __user *fp;
74193ef0e   Peter Zijlstra   perf_counter: x86...
1808

257ef9d21   Torok Edwin   perf, x86: Fix ca...
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
  	if (!test_thread_flag(TIF_IA32))
  		return 0;
  
  	fp = compat_ptr(regs->bp);
  	while (entry->nr < PERF_MAX_STACK_DEPTH) {
  		unsigned long bytes;
  		frame.next_frame     = 0;
  		frame.return_address = 0;
  
  		bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
  		if (bytes != sizeof(frame))
  			break;
74193ef0e   Peter Zijlstra   perf_counter: x86...
1821

257ef9d21   Torok Edwin   perf, x86: Fix ca...
1822
1823
  		if (fp < compat_ptr(regs->sp))
  			break;
74193ef0e   Peter Zijlstra   perf_counter: x86...
1824

70791ce9b   Frederic Weisbecker   perf: Generalize ...
1825
  		perf_callchain_store(entry, frame.return_address);
257ef9d21   Torok Edwin   perf, x86: Fix ca...
1826
1827
1828
  		fp = compat_ptr(frame.next_frame);
  	}
  	return 1;
d7d59fb32   Peter Zijlstra   perf_counter: x86...
1829
  }
257ef9d21   Torok Edwin   perf, x86: Fix ca...
1830
1831
1832
1833
1834
1835
1836
  #else
  static inline int
  perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
  {
      return 0;
  }
  #endif
d7d59fb32   Peter Zijlstra   perf_counter: x86...
1837

56962b444   Frederic Weisbecker   perf: Generalize ...
1838
1839
  void
  perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
d7d59fb32   Peter Zijlstra   perf_counter: x86...
1840
1841
1842
  {
  	struct stack_frame frame;
  	const void __user *fp;
927c7a9e9   Frederic Weisbecker   perf: Fix race in...
1843
1844
  	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  		/* TODO: We don't support guest os callchain now */
ed8052616   Peter Zijlstra   perf: Remove supe...
1845
  		return;
927c7a9e9   Frederic Weisbecker   perf: Fix race in...
1846
  	}
5a6cec3ab   Ingo Molnar   perf_counter, x86...
1847

74193ef0e   Peter Zijlstra   perf_counter: x86...
1848
  	fp = (void __user *)regs->bp;
d7d59fb32   Peter Zijlstra   perf_counter: x86...
1849

70791ce9b   Frederic Weisbecker   perf: Generalize ...
1850
  	perf_callchain_store(entry, regs->ip);
d7d59fb32   Peter Zijlstra   perf_counter: x86...
1851

257ef9d21   Torok Edwin   perf, x86: Fix ca...
1852
1853
  	if (perf_callchain_user32(regs, entry))
  		return;
f9188e023   Peter Zijlstra   perf_counter: Mak...
1854
  	while (entry->nr < PERF_MAX_STACK_DEPTH) {
257ef9d21   Torok Edwin   perf, x86: Fix ca...
1855
  		unsigned long bytes;
038e836e9   Ingo Molnar   perf_counter, x86...
1856
  		frame.next_frame	     = NULL;
d7d59fb32   Peter Zijlstra   perf_counter: x86...
1857
  		frame.return_address = 0;
257ef9d21   Torok Edwin   perf, x86: Fix ca...
1858
1859
  		bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
  		if (bytes != sizeof(frame))
d7d59fb32   Peter Zijlstra   perf_counter: x86...
1860
  			break;
5a6cec3ab   Ingo Molnar   perf_counter, x86...
1861
  		if ((unsigned long)fp < regs->sp)
d7d59fb32   Peter Zijlstra   perf_counter: x86...
1862
  			break;
70791ce9b   Frederic Weisbecker   perf: Generalize ...
1863
  		perf_callchain_store(entry, frame.return_address);
038e836e9   Ingo Molnar   perf_counter, x86...
1864
  		fp = frame.next_frame;
d7d59fb32   Peter Zijlstra   perf_counter: x86...
1865
1866
  	}
  }
39447b386   Zhang, Yanmin   perf: Enhance per...
1867
1868
1869
  unsigned long perf_instruction_pointer(struct pt_regs *regs)
  {
  	unsigned long ip;
dcf46b944   Zhang, Yanmin   perf & kvm: Clean...
1870

39447b386   Zhang, Yanmin   perf: Enhance per...
1871
1872
1873
1874
  	if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
  		ip = perf_guest_cbs->get_guest_ip();
  	else
  		ip = instruction_pointer(regs);
dcf46b944   Zhang, Yanmin   perf & kvm: Clean...
1875

39447b386   Zhang, Yanmin   perf: Enhance per...
1876
1877
1878
1879
1880
1881
  	return ip;
  }
  
  unsigned long perf_misc_flags(struct pt_regs *regs)
  {
  	int misc = 0;
dcf46b944   Zhang, Yanmin   perf & kvm: Clean...
1882

39447b386   Zhang, Yanmin   perf: Enhance per...
1883
  	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
dcf46b944   Zhang, Yanmin   perf & kvm: Clean...
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
  		if (perf_guest_cbs->is_user_mode())
  			misc |= PERF_RECORD_MISC_GUEST_USER;
  		else
  			misc |= PERF_RECORD_MISC_GUEST_KERNEL;
  	} else {
  		if (user_mode(regs))
  			misc |= PERF_RECORD_MISC_USER;
  		else
  			misc |= PERF_RECORD_MISC_KERNEL;
  	}
39447b386   Zhang, Yanmin   perf: Enhance per...
1894
  	if (regs->flags & PERF_EFLAGS_EXACT)
ab608344b   Peter Zijlstra   perf, x86: Improv...
1895
  		misc |= PERF_RECORD_MISC_EXACT_IP;
39447b386   Zhang, Yanmin   perf: Enhance per...
1896
1897
1898
  
  	return misc;
  }