Blame view

arch/x86/include/asm/perf_event.h 6.55 KB
cdd6c482c   Ingo Molnar   perf: Do the big ...
1
2
  #ifndef _ASM_X86_PERF_EVENT_H
  #define _ASM_X86_PERF_EVENT_H
003a46cff   Thomas Gleixner   x86: unify some m...
3

eb2b86181   Ingo Molnar   x86, perfcounters...
4
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
5
   * Performance event hw details:
eb2b86181   Ingo Molnar   x86, perfcounters...
6
   */
a072738e0   Cyrill Gorcunov   perf, x86: Implem...
7
  #define X86_PMC_MAX_GENERIC				       32
eb2b86181   Ingo Molnar   x86, perfcounters...
8
  #define X86_PMC_MAX_FIXED					3
862a1a5f3   Ingo Molnar   x86, perfcounters...
9
10
11
  #define X86_PMC_IDX_GENERIC				        0
  #define X86_PMC_IDX_FIXED				       32
  #define X86_PMC_IDX_MAX					       64
241771ef0   Ingo Molnar   performance count...
12
13
  #define MSR_ARCH_PERFMON_PERFCTR0			      0xc1
  #define MSR_ARCH_PERFMON_PERFCTR1			      0xc2
003a46cff   Thomas Gleixner   x86: unify some m...
14

241771ef0   Ingo Molnar   performance count...
15
16
  #define MSR_ARCH_PERFMON_EVENTSEL0			     0x186
  #define MSR_ARCH_PERFMON_EVENTSEL1			     0x187
003a46cff   Thomas Gleixner   x86: unify some m...
17

a098f4484   Robert Richter   perf, x86: implem...
18
19
20
21
22
23
24
25
26
27
  #define ARCH_PERFMON_EVENTSEL_EVENT			0x000000FFULL
  #define ARCH_PERFMON_EVENTSEL_UMASK			0x0000FF00ULL
  #define ARCH_PERFMON_EVENTSEL_USR			(1ULL << 16)
  #define ARCH_PERFMON_EVENTSEL_OS			(1ULL << 17)
  #define ARCH_PERFMON_EVENTSEL_EDGE			(1ULL << 18)
  #define ARCH_PERFMON_EVENTSEL_INT			(1ULL << 20)
  #define ARCH_PERFMON_EVENTSEL_ANY			(1ULL << 21)
  #define ARCH_PERFMON_EVENTSEL_ENABLE			(1ULL << 22)
  #define ARCH_PERFMON_EVENTSEL_INV			(1ULL << 23)
  #define ARCH_PERFMON_EVENTSEL_CMASK			0xFF000000ULL
011af8578   Joerg Roedel   perf, amd: Use GO...
28
29
  #define AMD_PERFMON_EVENTSEL_GUESTONLY			(1ULL << 40)
  #define AMD_PERFMON_EVENTSEL_HOSTONLY			(1ULL << 41)
a098f4484   Robert Richter   perf, x86: implem...
30
31
32
33
34
35
36
37
38
39
40
41
42
43
  #define AMD64_EVENTSEL_EVENT	\
  	(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
  #define INTEL_ARCH_EVENT_MASK	\
  	(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
  
  #define X86_RAW_EVENT_MASK		\
  	(ARCH_PERFMON_EVENTSEL_EVENT |	\
  	 ARCH_PERFMON_EVENTSEL_UMASK |	\
  	 ARCH_PERFMON_EVENTSEL_EDGE  |	\
  	 ARCH_PERFMON_EVENTSEL_INV   |	\
  	 ARCH_PERFMON_EVENTSEL_CMASK)
  #define AMD64_RAW_EVENT_MASK		\
  	(X86_RAW_EVENT_MASK          |  \
  	 AMD64_EVENTSEL_EVENT)
ee5789dbc   Robert Richter   perf, x86: Share ...
44
45
46
  #define AMD64_NUM_COUNTERS				4
  #define AMD64_NUM_COUNTERS_F15H				6
  #define AMD64_NUM_COUNTERS_MAX				AMD64_NUM_COUNTERS_F15H
04a705df4   Stephane Eranian   perf_events: Chec...
47

ee5789dbc   Robert Richter   perf, x86: Share ...
48
  #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL		0x3c
241771ef0   Ingo Molnar   performance count...
49
  #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK		(0x00 << 8)
ee5789dbc   Robert Richter   perf, x86: Share ...
50
  #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX		0
003a46cff   Thomas Gleixner   x86: unify some m...
51
  #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
241771ef0   Ingo Molnar   performance count...
52
  		(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
ee5789dbc   Robert Richter   perf, x86: Share ...
53
  #define ARCH_PERFMON_BRANCH_MISSES_RETIRED		6
ffb871bc9   Gleb Natapov   x86, perf: Disabl...
54
  #define ARCH_PERFMON_EVENTS_COUNT			7
003a46cff   Thomas Gleixner   x86: unify some m...
55

eb2b86181   Ingo Molnar   x86, perfcounters...
56
57
58
59
  /*
   * Intel "Architectural Performance Monitoring" CPUID
   * detection/enumeration details:
   */
003a46cff   Thomas Gleixner   x86: unify some m...
60
61
62
  union cpuid10_eax {
  	struct {
  		unsigned int version_id:8;
948b1bb89   Robert Richter   perf, x86: Undo s...
63
  		unsigned int num_counters:8;
003a46cff   Thomas Gleixner   x86: unify some m...
64
65
66
67
68
  		unsigned int bit_width:8;
  		unsigned int mask_length:8;
  	} split;
  	unsigned int full;
  };
ffb871bc9   Gleb Natapov   x86, perf: Disabl...
69
70
71
72
73
74
75
76
77
78
79
80
  union cpuid10_ebx {
  	struct {
  		unsigned int no_unhalted_core_cycles:1;
  		unsigned int no_instructions_retired:1;
  		unsigned int no_unhalted_reference_cycles:1;
  		unsigned int no_llc_reference:1;
  		unsigned int no_llc_misses:1;
  		unsigned int no_branch_instruction_retired:1;
  		unsigned int no_branch_misses_retired:1;
  	} split;
  	unsigned int full;
  };
703e937c8   Ingo Molnar   perfcounters: add...
81
82
  union cpuid10_edx {
  	struct {
e768aee89   Livio Soares   perf, x86: Small ...
83
84
85
  		unsigned int num_counters_fixed:5;
  		unsigned int bit_width_fixed:8;
  		unsigned int reserved:19;
703e937c8   Ingo Molnar   perfcounters: add...
86
87
88
  	} split;
  	unsigned int full;
  };
b3d9468a8   Gleb Natapov   perf, x86: Expose...
89
90
91
92
93
94
95
96
97
  struct x86_pmu_capability {
  	int		version;
  	int		num_counters_gp;
  	int		num_counters_fixed;
  	int		bit_width_gp;
  	int		bit_width_fixed;
  	unsigned int	events_mask;
  	int		events_mask_len;
  };
703e937c8   Ingo Molnar   perfcounters: add...
98
99
  
  /*
cdd6c482c   Ingo Molnar   perf: Do the big ...
100
   * Fixed-purpose performance events:
703e937c8   Ingo Molnar   perfcounters: add...
101
   */
862a1a5f3   Ingo Molnar   x86, perfcounters...
102
103
104
  /*
   * All 3 fixed-mode PMCs are configured via this single MSR:
   */
cd09c0c40   Stephane Eranian   perf events: Enab...
105
  #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL	0x38d
862a1a5f3   Ingo Molnar   x86, perfcounters...
106
107
108
109
  
  /*
   * The counts are available in three separate MSRs:
   */
703e937c8   Ingo Molnar   perfcounters: add...
110
  /* Instr_Retired.Any: */
cd09c0c40   Stephane Eranian   perf events: Enab...
111
112
  #define MSR_ARCH_PERFMON_FIXED_CTR0	0x309
  #define X86_PMC_IDX_FIXED_INSTRUCTIONS	(X86_PMC_IDX_FIXED + 0)
703e937c8   Ingo Molnar   perfcounters: add...
113
114
  
  /* CPU_CLK_Unhalted.Core: */
cd09c0c40   Stephane Eranian   perf events: Enab...
115
116
  #define MSR_ARCH_PERFMON_FIXED_CTR1	0x30a
  #define X86_PMC_IDX_FIXED_CPU_CYCLES	(X86_PMC_IDX_FIXED + 1)
703e937c8   Ingo Molnar   perfcounters: add...
117
118
  
  /* CPU_CLK_Unhalted.Ref: */
cd09c0c40   Stephane Eranian   perf events: Enab...
119
120
121
  #define MSR_ARCH_PERFMON_FIXED_CTR2	0x30b
  #define X86_PMC_IDX_FIXED_REF_CYCLES	(X86_PMC_IDX_FIXED + 2)
  #define X86_PMC_MSK_FIXED_REF_CYCLES	(1ULL << X86_PMC_IDX_FIXED_REF_CYCLES)
703e937c8   Ingo Molnar   perfcounters: add...
122

30dd568c9   Markus Metzger   x86, perf_counter...
123
124
125
  /*
   * We model BTS tracing as another fixed-mode PMC.
   *
cdd6c482c   Ingo Molnar   perf: Do the big ...
126
127
   * We choose a value in the middle of the fixed event range, since lower
   * values are used by actual fixed events and higher values are used
30dd568c9   Markus Metzger   x86, perf_counter...
128
129
130
   * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
   */
  #define X86_PMC_IDX_FIXED_BTS				(X86_PMC_IDX_FIXED + 16)
ee5789dbc   Robert Richter   perf, x86: Share ...
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
  /*
   * IBS cpuid feature detection
   */
  
  #define IBS_CPUID_FEATURES		0x8000001b
  
  /*
   * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
   * bit 0 is used to indicate the existence of IBS.
   */
  #define IBS_CAPS_AVAIL			(1U<<0)
  #define IBS_CAPS_FETCHSAM		(1U<<1)
  #define IBS_CAPS_OPSAM			(1U<<2)
  #define IBS_CAPS_RDWROPCNT		(1U<<3)
  #define IBS_CAPS_OPCNT			(1U<<4)
  #define IBS_CAPS_BRNTRGT		(1U<<5)
  #define IBS_CAPS_OPCNTEXT		(1U<<6)
  
  #define IBS_CAPS_DEFAULT		(IBS_CAPS_AVAIL		\
  					 | IBS_CAPS_FETCHSAM	\
  					 | IBS_CAPS_OPSAM)
  
  /*
   * IBS APIC setup
   */
  #define IBSCTL				0x1cc
  #define IBSCTL_LVT_OFFSET_VALID		(1ULL<<8)
  #define IBSCTL_LVT_OFFSET_MASK		0x0F
1d6040f17   Robert Richter   perf, x86: make I...
159
  /* IbsFetchCtl bits/masks */
b47fad3bf   Robert Richter   oprofile, x86: Ad...
160
161
162
163
164
  #define IBS_FETCH_RAND_EN	(1ULL<<57)
  #define IBS_FETCH_VAL		(1ULL<<49)
  #define IBS_FETCH_ENABLE	(1ULL<<48)
  #define IBS_FETCH_CNT		0xFFFF0000ULL
  #define IBS_FETCH_MAX_CNT	0x0000FFFFULL
1d6040f17   Robert Richter   perf, x86: make I...
165
166
  
  /* IbsOpCtl bits */
b47fad3bf   Robert Richter   oprofile, x86: Ad...
167
168
169
170
171
  #define IBS_OP_CNT_CTL		(1ULL<<19)
  #define IBS_OP_VAL		(1ULL<<18)
  #define IBS_OP_ENABLE		(1ULL<<17)
  #define IBS_OP_MAX_CNT		0x0000FFFFULL
  #define IBS_OP_MAX_CNT_EXT	0x007FFFFFULL	/* not a register bit mask */
30dd568c9   Markus Metzger   x86, perf_counter...
172

b71691667   Robert Richter   perf, x86: Implem...
173
  extern u32 get_ibs_caps(void);
cdd6c482c   Ingo Molnar   perf: Do the big ...
174
  #ifdef CONFIG_PERF_EVENTS
cdd6c482c   Ingo Molnar   perf: Do the big ...
175
  extern void perf_events_lapic_init(void);
194002b27   Peter Zijlstra   perf_counter, x86...
176

cdd6c482c   Ingo Molnar   perf: Do the big ...
177
  #define PERF_EVENT_INDEX_OFFSET			0
194002b27   Peter Zijlstra   perf_counter, x86...
178

ef21f683a   Peter Zijlstra   perf, x86: use LB...
179
180
181
182
183
184
  /*
   * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
   * This flag is otherwise unused and ABI specified to be 0, so nobody should
   * care what we do with it.
   */
  #define PERF_EFLAGS_EXACT	(1UL << 3)
39447b386   Zhang, Yanmin   perf: Enhance per...
185
186
187
188
  struct pt_regs;
  extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
  extern unsigned long perf_misc_flags(struct pt_regs *regs);
  #define perf_misc_flags(regs)	perf_misc_flags(regs)
ef21f683a   Peter Zijlstra   perf, x86: use LB...
189

b0f82b81f   Frederic Weisbecker   perf: Drop the sk...
190
191
192
193
194
195
196
197
198
199
200
  #include <asm/stacktrace.h>
  
  /*
   * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
   * and the comment with PERF_EFLAGS_EXACT.
   */
  #define perf_arch_fetch_caller_regs(regs, __ip)		{	\
  	(regs)->ip = (__ip);					\
  	(regs)->bp = caller_frame_pointer();			\
  	(regs)->cs = __KERNEL_CS;				\
  	regs->flags = 0;					\
9e46294da   Frederic Weisbecker   x86: Save stack p...
201
202
203
204
205
206
  	asm volatile(						\
  		_ASM_MOV "%%"_ASM_SP ", %0
  "			\
  		: "=m" ((regs)->sp)				\
  		:: "memory"					\
  	);							\
b0f82b81f   Frederic Weisbecker   perf: Drop the sk...
207
  }
144d31e6f   Gleb Natapov   perf, intel: Use ...
208
209
210
211
212
213
  struct perf_guest_switch_msr {
  	unsigned msr;
  	u64 host, guest;
  };
  
  extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
b3d9468a8   Gleb Natapov   perf, x86: Expose...
214
  extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
241771ef0   Ingo Molnar   performance count...
215
  #else
144d31e6f   Gleb Natapov   perf, intel: Use ...
216
217
218
219
220
  static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
  {
  	*nr = 0;
  	return NULL;
  }
b3d9468a8   Gleb Natapov   perf, x86: Expose...
221
222
223
224
  static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
  {
  	memset(cap, 0, sizeof(*cap));
  }
cdd6c482c   Ingo Molnar   perf: Do the big ...
225
  static inline void perf_events_lapic_init(void)	{ }
241771ef0   Ingo Molnar   performance count...
226
  #endif
cdd6c482c   Ingo Molnar   perf: Do the big ...
227
  #endif /* _ASM_X86_PERF_EVENT_H */