Blame view

arch/x86/kernel/cpu/perf_event_intel.c 43.3 KB
f22f54f44   Peter Zijlstra   perf_events, x86:...
1
  #ifdef CONFIG_CPU_SUP_INTEL
a7e3ed1e4   Andi Kleen   perf: Add support...
2
  /*
efc9f05df   Stephane Eranian   perf_events: Upda...
3
4
5
6
   * Per core/cpu state
   *
   * Used to coordinate shared registers between HT threads or
   * among events on a single PMU.
a7e3ed1e4   Andi Kleen   perf: Add support...
7
   */
efc9f05df   Stephane Eranian   perf_events: Upda...
8
9
10
11
  struct intel_shared_regs {
  	struct er_account       regs[EXTRA_REG_MAX];
  	int                     refcnt;		/* per-core: #HT threads */
  	unsigned                core_id;	/* per-core: core id */
a7e3ed1e4   Andi Kleen   perf: Add support...
12
  };
f22f54f44   Peter Zijlstra   perf_events, x86:...
13
  /*
b622d644c   Peter Zijlstra   perf_events, x86:...
14
   * Intel PerfMon, used on Core and later.
f22f54f44   Peter Zijlstra   perf_events, x86:...
15
   */
ec75a7163   Ingo Molnar   perf events, x86:...
16
  static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
f22f54f44   Peter Zijlstra   perf_events, x86:...
17
18
19
20
21
22
23
24
25
  {
    [PERF_COUNT_HW_CPU_CYCLES]		= 0x003c,
    [PERF_COUNT_HW_INSTRUCTIONS]		= 0x00c0,
    [PERF_COUNT_HW_CACHE_REFERENCES]	= 0x4f2e,
    [PERF_COUNT_HW_CACHE_MISSES]		= 0x412e,
    [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x00c4,
    [PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c5,
    [PERF_COUNT_HW_BUS_CYCLES]		= 0x013c,
  };
5c543e3c4   Ingo Molnar   perf events, x86:...
26
  static struct event_constraint intel_core_event_constraints[] __read_mostly =
f22f54f44   Peter Zijlstra   perf_events, x86:...
27
28
29
30
31
32
33
34
35
  {
  	INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
  	INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
  	INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
  	INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
  	INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
  	INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
  	EVENT_CONSTRAINT_END
  };
5c543e3c4   Ingo Molnar   perf events, x86:...
36
  static struct event_constraint intel_core2_event_constraints[] __read_mostly =
f22f54f44   Peter Zijlstra   perf_events, x86:...
37
  {
b622d644c   Peter Zijlstra   perf_events, x86:...
38
39
40
41
42
43
44
45
  	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  	/*
  	 * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
  	 * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
  	 * ratio between these counters.
  	 */
  	/* FIXED_EVENT_CONSTRAINT(0x013c, 2),  CPU_CLK_UNHALTED.REF */
f22f54f44   Peter Zijlstra   perf_events, x86:...
46
47
48
49
50
51
52
53
  	INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
  	INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
  	INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
  	INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
  	INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
  	INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
  	INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
  	INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
b622d644c   Peter Zijlstra   perf_events, x86:...
54
  	INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
f22f54f44   Peter Zijlstra   perf_events, x86:...
55
56
57
  	INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
  	EVENT_CONSTRAINT_END
  };
5c543e3c4   Ingo Molnar   perf events, x86:...
58
  static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
f22f54f44   Peter Zijlstra   perf_events, x86:...
59
  {
b622d644c   Peter Zijlstra   perf_events, x86:...
60
61
62
  	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  	/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
f22f54f44   Peter Zijlstra   perf_events, x86:...
63
64
65
66
67
68
69
70
71
72
  	INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
  	INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
  	INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
  	INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
  	INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
  	INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
  	INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
  	INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
  	EVENT_CONSTRAINT_END
  };
5c543e3c4   Ingo Molnar   perf events, x86:...
73
  static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
a7e3ed1e4   Andi Kleen   perf: Add support...
74
  {
efc9f05df   Stephane Eranian   perf_events: Upda...
75
  	INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
a7e3ed1e4   Andi Kleen   perf: Add support...
76
77
  	EVENT_EXTRA_END
  };
5c543e3c4   Ingo Molnar   perf events, x86:...
78
  static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
f22f54f44   Peter Zijlstra   perf_events, x86:...
79
  {
b622d644c   Peter Zijlstra   perf_events, x86:...
80
81
82
  	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  	/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
f22f54f44   Peter Zijlstra   perf_events, x86:...
83
84
85
  	INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
  	INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
  	INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
d11007703   Stephane Eranian   perf_events: Fix ...
86
  	INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
f22f54f44   Peter Zijlstra   perf_events, x86:...
87
88
  	EVENT_CONSTRAINT_END
  };
5c543e3c4   Ingo Molnar   perf events, x86:...
89
  static struct event_constraint intel_snb_event_constraints[] __read_mostly =
b06b3d496   Lin Ming   perf, x86: Add In...
90
91
92
93
94
  {
  	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  	/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
  	INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
b06b3d496   Lin Ming   perf, x86: Add In...
95
96
97
98
  	INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
  	INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
  	EVENT_CONSTRAINT_END
  };
5c543e3c4   Ingo Molnar   perf events, x86:...
99
  static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
a7e3ed1e4   Andi Kleen   perf: Add support...
100
  {
efc9f05df   Stephane Eranian   perf_events: Upda...
101
102
  	INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
  	INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
a7e3ed1e4   Andi Kleen   perf: Add support...
103
104
  	EVENT_EXTRA_END
  };
5c543e3c4   Ingo Molnar   perf events, x86:...
105
  static struct event_constraint intel_gen_event_constraints[] __read_mostly =
f22f54f44   Peter Zijlstra   perf_events, x86:...
106
  {
b622d644c   Peter Zijlstra   perf_events, x86:...
107
108
109
  	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  	/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
f22f54f44   Peter Zijlstra   perf_events, x86:...
110
111
  	EVENT_CONSTRAINT_END
  };
ee89cbc2d   Stephane Eranian   perf_events: Add ...
112
113
114
115
116
  static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
  	INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
  	INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
  	EVENT_EXTRA_END
  };
f22f54f44   Peter Zijlstra   perf_events, x86:...
117
118
119
120
  static u64 intel_pmu_event_map(int hw_event)
  {
  	return intel_perfmon_event_map[hw_event];
  }
b06b3d496   Lin Ming   perf, x86: Add In...
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
  static __initconst const u64 snb_hw_cache_event_ids
  				[PERF_COUNT_HW_CACHE_MAX]
  				[PERF_COUNT_HW_CACHE_OP_MAX]
  				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
  {
   [ C(L1D) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS        */
  		[ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPLACEMENT              */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES       */
  		[ C(RESULT_MISS)   ] = 0x0851, /* L1D.ALL_M_REPLACEMENT        */
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0,
  		[ C(RESULT_MISS)   ] = 0x024e, /* HW_PRE_REQ.DL1_MISS          */
  	},
   },
   [ C(L1I ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0,
  		[ C(RESULT_MISS)   ] = 0x0280, /* ICACHE.MISSES */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0,
  		[ C(RESULT_MISS)   ] = 0x0,
  	},
   },
   [ C(LL  ) ] = {
b06b3d496   Lin Ming   perf, x86: Add In...
155
  	[ C(OP_READ) ] = {
63b6a6758   Peter Zijlstra   perf events, x86:...
156
  		/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
b06b3d496   Lin Ming   perf, x86: Add In...
157
  		[ C(RESULT_ACCESS) ] = 0x01b7,
63b6a6758   Peter Zijlstra   perf events, x86:...
158
159
  		/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
  		[ C(RESULT_MISS)   ] = 0x01b7,
b06b3d496   Lin Ming   perf, x86: Add In...
160
161
  	},
  	[ C(OP_WRITE) ] = {
63b6a6758   Peter Zijlstra   perf events, x86:...
162
  		/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
b06b3d496   Lin Ming   perf, x86: Add In...
163
  		[ C(RESULT_ACCESS) ] = 0x01b7,
63b6a6758   Peter Zijlstra   perf events, x86:...
164
165
  		/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
  		[ C(RESULT_MISS)   ] = 0x01b7,
b06b3d496   Lin Ming   perf, x86: Add In...
166
167
  	},
  	[ C(OP_PREFETCH) ] = {
63b6a6758   Peter Zijlstra   perf events, x86:...
168
  		/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
b06b3d496   Lin Ming   perf, x86: Add In...
169
  		[ C(RESULT_ACCESS) ] = 0x01b7,
63b6a6758   Peter Zijlstra   perf events, x86:...
170
171
  		/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
  		[ C(RESULT_MISS)   ] = 0x01b7,
b06b3d496   Lin Ming   perf, x86: Add In...
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
  	},
   },
   [ C(DTLB) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
  		[ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
  		[ C(RESULT_MISS)   ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0,
  		[ C(RESULT_MISS)   ] = 0x0,
  	},
   },
   [ C(ITLB) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT         */
  		[ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK    */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
   },
   [ C(BPU ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
  		[ C(RESULT_MISS)   ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
   },
89d6c0b5b   Peter Zijlstra   perf, arch: Add g...
216
217
218
219
220
221
222
223
224
225
226
227
228
229
   [ C(NODE) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
   },
b06b3d496   Lin Ming   perf, x86: Add In...
230
  };
caaa8be3b   Peter Zijlstra   perf, x86: Fix __...
231
  static __initconst const u64 westmere_hw_cache_event_ids
f22f54f44   Peter Zijlstra   perf_events, x86:...
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
  				[PERF_COUNT_HW_CACHE_MAX]
  				[PERF_COUNT_HW_CACHE_OP_MAX]
  				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
  {
   [ C(L1D) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
  		[ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
  		[ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
  		[ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
  	},
   },
   [ C(L1I ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
  		[ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0,
  		[ C(RESULT_MISS)   ] = 0x0,
  	},
   },
   [ C(LL  ) ] = {
  	[ C(OP_READ) ] = {
63b6a6758   Peter Zijlstra   perf events, x86:...
266
  		/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
e994d7d23   Andi Kleen   perf: Fix LLC-* e...
267
  		[ C(RESULT_ACCESS) ] = 0x01b7,
63b6a6758   Peter Zijlstra   perf events, x86:...
268
269
  		/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
  		[ C(RESULT_MISS)   ] = 0x01b7,
f22f54f44   Peter Zijlstra   perf_events, x86:...
270
  	},
e994d7d23   Andi Kleen   perf: Fix LLC-* e...
271
272
273
274
  	/*
  	 * Use RFO, not WRITEBACK, because a write miss would typically occur
  	 * on RFO.
  	 */
f22f54f44   Peter Zijlstra   perf_events, x86:...
275
  	[ C(OP_WRITE) ] = {
63b6a6758   Peter Zijlstra   perf events, x86:...
276
277
278
  		/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
  		[ C(RESULT_ACCESS) ] = 0x01b7,
  		/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
e994d7d23   Andi Kleen   perf: Fix LLC-* e...
279
  		[ C(RESULT_MISS)   ] = 0x01b7,
f22f54f44   Peter Zijlstra   perf_events, x86:...
280
281
  	},
  	[ C(OP_PREFETCH) ] = {
63b6a6758   Peter Zijlstra   perf events, x86:...
282
  		/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
e994d7d23   Andi Kleen   perf: Fix LLC-* e...
283
  		[ C(RESULT_ACCESS) ] = 0x01b7,
63b6a6758   Peter Zijlstra   perf events, x86:...
284
285
  		/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
  		[ C(RESULT_MISS)   ] = 0x01b7,
f22f54f44   Peter Zijlstra   perf_events, x86:...
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
  	},
   },
   [ C(DTLB) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
  		[ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
  		[ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0,
  		[ C(RESULT_MISS)   ] = 0x0,
  	},
   },
   [ C(ITLB) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
  		[ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.ANY              */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
   },
   [ C(BPU ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
  		[ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
   },
89d6c0b5b   Peter Zijlstra   perf, arch: Add g...
330
331
332
333
334
335
336
337
338
339
340
341
342
343
   [ C(NODE) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x01b7,
  		[ C(RESULT_MISS)   ] = 0x01b7,
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x01b7,
  		[ C(RESULT_MISS)   ] = 0x01b7,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0x01b7,
  		[ C(RESULT_MISS)   ] = 0x01b7,
  	},
   },
f22f54f44   Peter Zijlstra   perf_events, x86:...
344
  };
e994d7d23   Andi Kleen   perf: Fix LLC-* e...
345
  /*
63b6a6758   Peter Zijlstra   perf events, x86:...
346
347
   * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
   * See IA32 SDM Vol 3B 30.6.1.3
e994d7d23   Andi Kleen   perf: Fix LLC-* e...
348
   */
63b6a6758   Peter Zijlstra   perf events, x86:...
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
  #define NHM_DMND_DATA_RD	(1 << 0)
  #define NHM_DMND_RFO		(1 << 1)
  #define NHM_DMND_IFETCH		(1 << 2)
  #define NHM_DMND_WB		(1 << 3)
  #define NHM_PF_DATA_RD		(1 << 4)
  #define NHM_PF_DATA_RFO		(1 << 5)
  #define NHM_PF_IFETCH		(1 << 6)
  #define NHM_OFFCORE_OTHER	(1 << 7)
  #define NHM_UNCORE_HIT		(1 << 8)
  #define NHM_OTHER_CORE_HIT_SNP	(1 << 9)
  #define NHM_OTHER_CORE_HITM	(1 << 10)
          			/* reserved */
  #define NHM_REMOTE_CACHE_FWD	(1 << 12)
  #define NHM_REMOTE_DRAM		(1 << 13)
  #define NHM_LOCAL_DRAM		(1 << 14)
  #define NHM_NON_DRAM		(1 << 15)
  
  #define NHM_ALL_DRAM		(NHM_REMOTE_DRAM|NHM_LOCAL_DRAM)
  
  #define NHM_DMND_READ		(NHM_DMND_DATA_RD)
  #define NHM_DMND_WRITE		(NHM_DMND_RFO|NHM_DMND_WB)
  #define NHM_DMND_PREFETCH	(NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
  
  #define NHM_L3_HIT	(NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
  #define NHM_L3_MISS	(NHM_NON_DRAM|NHM_ALL_DRAM|NHM_REMOTE_CACHE_FWD)
  #define NHM_L3_ACCESS	(NHM_L3_HIT|NHM_L3_MISS)
e994d7d23   Andi Kleen   perf: Fix LLC-* e...
375
376
377
378
379
380
381
382
  
  static __initconst const u64 nehalem_hw_cache_extra_regs
  				[PERF_COUNT_HW_CACHE_MAX]
  				[PERF_COUNT_HW_CACHE_OP_MAX]
  				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
  {
   [ C(LL  ) ] = {
  	[ C(OP_READ) ] = {
63b6a6758   Peter Zijlstra   perf events, x86:...
383
384
  		[ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
  		[ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_L3_MISS,
e994d7d23   Andi Kleen   perf: Fix LLC-* e...
385
386
  	},
  	[ C(OP_WRITE) ] = {
63b6a6758   Peter Zijlstra   perf events, x86:...
387
388
  		[ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
  		[ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_L3_MISS,
e994d7d23   Andi Kleen   perf: Fix LLC-* e...
389
390
  	},
  	[ C(OP_PREFETCH) ] = {
63b6a6758   Peter Zijlstra   perf events, x86:...
391
392
  		[ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
  		[ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
e994d7d23   Andi Kleen   perf: Fix LLC-* e...
393
  	},
89d6c0b5b   Peter Zijlstra   perf, arch: Add g...
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
   },
   [ C(NODE) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_ALL_DRAM,
  		[ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_REMOTE_DRAM,
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_ALL_DRAM,
  		[ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_REMOTE_DRAM,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_ALL_DRAM,
  		[ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_REMOTE_DRAM,
  	},
   },
e994d7d23   Andi Kleen   perf: Fix LLC-* e...
409
  };
caaa8be3b   Peter Zijlstra   perf, x86: Fix __...
410
  static __initconst const u64 nehalem_hw_cache_event_ids
f22f54f44   Peter Zijlstra   perf_events, x86:...
411
412
413
414
415
416
  				[PERF_COUNT_HW_CACHE_MAX]
  				[PERF_COUNT_HW_CACHE_OP_MAX]
  				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
  {
   [ C(L1D) ] = {
  	[ C(OP_READ) ] = {
f4929bd37   Peter Zijlstra   perf, x86: Update...
417
418
  		[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
  		[ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
f22f54f44   Peter Zijlstra   perf_events, x86:...
419
420
  	},
  	[ C(OP_WRITE) ] = {
f4929bd37   Peter Zijlstra   perf, x86: Update...
421
422
  		[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
  		[ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
f22f54f44   Peter Zijlstra   perf_events, x86:...
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
  		[ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
  	},
   },
   [ C(L1I ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
  		[ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0,
  		[ C(RESULT_MISS)   ] = 0x0,
  	},
   },
   [ C(LL  ) ] = {
  	[ C(OP_READ) ] = {
e994d7d23   Andi Kleen   perf: Fix LLC-* e...
445
446
447
448
  		/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
  		[ C(RESULT_ACCESS) ] = 0x01b7,
  		/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
  		[ C(RESULT_MISS)   ] = 0x01b7,
f22f54f44   Peter Zijlstra   perf_events, x86:...
449
  	},
e994d7d23   Andi Kleen   perf: Fix LLC-* e...
450
451
452
453
  	/*
  	 * Use RFO, not WRITEBACK, because a write miss would typically occur
  	 * on RFO.
  	 */
f22f54f44   Peter Zijlstra   perf_events, x86:...
454
  	[ C(OP_WRITE) ] = {
e994d7d23   Andi Kleen   perf: Fix LLC-* e...
455
456
457
458
  		/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
  		[ C(RESULT_ACCESS) ] = 0x01b7,
  		/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
  		[ C(RESULT_MISS)   ] = 0x01b7,
f22f54f44   Peter Zijlstra   perf_events, x86:...
459
460
  	},
  	[ C(OP_PREFETCH) ] = {
e994d7d23   Andi Kleen   perf: Fix LLC-* e...
461
462
463
464
  		/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
  		[ C(RESULT_ACCESS) ] = 0x01b7,
  		/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
  		[ C(RESULT_MISS)   ] = 0x01b7,
f22f54f44   Peter Zijlstra   perf_events, x86:...
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
  	},
   },
   [ C(DTLB) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
  		[ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
  		[ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0,
  		[ C(RESULT_MISS)   ] = 0x0,
  	},
   },
   [ C(ITLB) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
  		[ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
   },
   [ C(BPU ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
  		[ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
   },
89d6c0b5b   Peter Zijlstra   perf, arch: Add g...
509
510
511
512
513
514
515
516
517
518
519
520
521
522
   [ C(NODE) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x01b7,
  		[ C(RESULT_MISS)   ] = 0x01b7,
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x01b7,
  		[ C(RESULT_MISS)   ] = 0x01b7,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0x01b7,
  		[ C(RESULT_MISS)   ] = 0x01b7,
  	},
   },
f22f54f44   Peter Zijlstra   perf_events, x86:...
523
  };
caaa8be3b   Peter Zijlstra   perf, x86: Fix __...
524
  static __initconst const u64 core2_hw_cache_event_ids
f22f54f44   Peter Zijlstra   perf_events, x86:...
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
  				[PERF_COUNT_HW_CACHE_MAX]
  				[PERF_COUNT_HW_CACHE_OP_MAX]
  				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
  {
   [ C(L1D) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
  		[ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
  		[ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
  		[ C(RESULT_MISS)   ] = 0,
  	},
   },
   [ C(L1I ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
  		[ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0,
  		[ C(RESULT_MISS)   ] = 0,
  	},
   },
   [ C(LL  ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
  		[ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
  		[ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0,
  		[ C(RESULT_MISS)   ] = 0,
  	},
   },
   [ C(DTLB) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
  		[ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
  		[ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0,
  		[ C(RESULT_MISS)   ] = 0,
  	},
   },
   [ C(ITLB) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
  		[ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
   },
   [ C(BPU ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
  		[ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
   },
  };
caaa8be3b   Peter Zijlstra   perf, x86: Fix __...
614
  static __initconst const u64 atom_hw_cache_event_ids
f22f54f44   Peter Zijlstra   perf_events, x86:...
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
  				[PERF_COUNT_HW_CACHE_MAX]
  				[PERF_COUNT_HW_CACHE_OP_MAX]
  				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
  {
   [ C(L1D) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
  		[ C(RESULT_MISS)   ] = 0,
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
  		[ C(RESULT_MISS)   ] = 0,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0,
  		[ C(RESULT_MISS)   ] = 0,
  	},
   },
   [ C(L1I ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
  		[ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0,
  		[ C(RESULT_MISS)   ] = 0,
  	},
   },
   [ C(LL  ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
  		[ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
  		[ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0,
  		[ C(RESULT_MISS)   ] = 0,
  	},
   },
   [ C(DTLB) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
  		[ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
  		[ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0,
  		[ C(RESULT_MISS)   ] = 0,
  	},
   },
   [ C(ITLB) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
  		[ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
   },
   [ C(BPU ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
  		[ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
   },
  };
f22f54f44   Peter Zijlstra   perf_events, x86:...
704
705
706
707
708
709
710
711
  static void intel_pmu_disable_all(void)
  {
  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  
  	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
  
  	if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
  		intel_pmu_disable_bts();
ca037701a   Peter Zijlstra   perf, x86: Add PE...
712
713
  
  	intel_pmu_pebs_disable_all();
caff2beff   Peter Zijlstra   perf, x86: Implem...
714
  	intel_pmu_lbr_disable_all();
f22f54f44   Peter Zijlstra   perf_events, x86:...
715
  }
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
716
  static void intel_pmu_enable_all(int added)
f22f54f44   Peter Zijlstra   perf_events, x86:...
717
718
  {
  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
d329527e4   Peter Zijlstra   perf, x86: Reorde...
719
720
  	intel_pmu_pebs_enable_all();
  	intel_pmu_lbr_enable_all();
f22f54f44   Peter Zijlstra   perf_events, x86:...
721
722
723
724
725
726
727
728
729
730
731
732
  	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
  
  	if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
  		struct perf_event *event =
  			cpuc->events[X86_PMC_IDX_FIXED_BTS];
  
  		if (WARN_ON_ONCE(!event))
  			return;
  
  		intel_pmu_enable_bts(event->hw.config);
  	}
  }
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
733
734
735
736
  /*
   * Workaround for:
   *   Intel Errata AAK100 (model 26)
   *   Intel Errata AAP53  (model 30)
40b91cd10   Peter Zijlstra   perf, x86: Add Ne...
737
   *   Intel Errata BD53   (model 44)
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
738
   *
351af0725   Zhang, Yanmin   perf, x86: Fix In...
739
740
741
742
743
744
745
   * The official story:
   *   These chips need to be 'reset' when adding counters by programming the
   *   magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
   *   in sequence on the same PMC or on different PMCs.
   *
   * In practise it appears some of these events do in fact count, and
   * we need to programm all 4 events.
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
746
   */
351af0725   Zhang, Yanmin   perf, x86: Fix In...
747
  static void intel_pmu_nhm_workaround(void)
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
748
  {
351af0725   Zhang, Yanmin   perf, x86: Fix In...
749
750
751
752
753
754
755
756
757
  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  	static const unsigned long nhm_magic[4] = {
  		0x4300B5,
  		0x4300D2,
  		0x4300B1,
  		0x4300B1
  	};
  	struct perf_event *event;
  	int i;
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
758

351af0725   Zhang, Yanmin   perf, x86: Fix In...
759
760
761
762
763
764
765
766
767
  	/*
  	 * The Errata requires below steps:
  	 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
  	 * 2) Configure 4 PERFEVTSELx with the magic events and clear
  	 *    the corresponding PMCx;
  	 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
  	 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
  	 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
  	 */
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
768

351af0725   Zhang, Yanmin   perf, x86: Fix In...
769
770
771
772
773
774
775
776
777
778
  	/*
  	 * The real steps we choose are a little different from above.
  	 * A) To reduce MSR operations, we don't run step 1) as they
  	 *    are already cleared before this function is called;
  	 * B) Call x86_perf_event_update to save PMCx before configuring
  	 *    PERFEVTSELx with magic number;
  	 * C) With step 5), we do clear only when the PERFEVTSELx is
  	 *    not used currently.
  	 * D) Call x86_perf_event_set_period to restore PMCx;
  	 */
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
779

351af0725   Zhang, Yanmin   perf, x86: Fix In...
780
781
782
783
784
785
  	/* We always operate 4 pairs of PERF Counters */
  	for (i = 0; i < 4; i++) {
  		event = cpuc->events[i];
  		if (event)
  			x86_perf_event_update(event);
  	}
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
786

351af0725   Zhang, Yanmin   perf, x86: Fix In...
787
788
789
790
791
792
793
  	for (i = 0; i < 4; i++) {
  		wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
  		wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
  	}
  
  	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
  	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
794

351af0725   Zhang, Yanmin   perf, x86: Fix In...
795
796
797
798
799
  	for (i = 0; i < 4; i++) {
  		event = cpuc->events[i];
  
  		if (event) {
  			x86_perf_event_set_period(event);
31fa58af5   Robert Richter   perf, x86: Pass e...
800
  			__x86_pmu_enable_event(&event->hw,
351af0725   Zhang, Yanmin   perf, x86: Fix In...
801
802
803
  					ARCH_PERFMON_EVENTSEL_ENABLE);
  		} else
  			wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
804
  	}
351af0725   Zhang, Yanmin   perf, x86: Fix In...
805
806
807
808
809
810
  }
  
  static void intel_pmu_nhm_enable_all(int added)
  {
  	if (added)
  		intel_pmu_nhm_workaround();
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
811
812
  	intel_pmu_enable_all(added);
  }
f22f54f44   Peter Zijlstra   perf_events, x86:...
813
814
815
816
817
818
819
820
821
822
823
824
825
  static inline u64 intel_pmu_get_status(void)
  {
  	u64 status;
  
  	rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
  
  	return status;
  }
  
  static inline void intel_pmu_ack_status(u64 ack)
  {
  	wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
  }
ca037701a   Peter Zijlstra   perf, x86: Add PE...
826
  static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
f22f54f44   Peter Zijlstra   perf_events, x86:...
827
  {
aff3d91a9   Peter Zijlstra   perf, x86: Change...
828
  	int idx = hwc->idx - X86_PMC_IDX_FIXED;
f22f54f44   Peter Zijlstra   perf_events, x86:...
829
830
831
832
833
834
  	u64 ctrl_val, mask;
  
  	mask = 0xfULL << (idx * 4);
  
  	rdmsrl(hwc->config_base, ctrl_val);
  	ctrl_val &= ~mask;
7645a24cb   Peter Zijlstra   perf, x86: Remove...
835
  	wrmsrl(hwc->config_base, ctrl_val);
f22f54f44   Peter Zijlstra   perf_events, x86:...
836
  }
ca037701a   Peter Zijlstra   perf, x86: Add PE...
837
  static void intel_pmu_disable_event(struct perf_event *event)
f22f54f44   Peter Zijlstra   perf_events, x86:...
838
  {
aff3d91a9   Peter Zijlstra   perf, x86: Change...
839
840
841
  	struct hw_perf_event *hwc = &event->hw;
  
  	if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
f22f54f44   Peter Zijlstra   perf_events, x86:...
842
843
844
845
846
847
  		intel_pmu_disable_bts();
  		intel_pmu_drain_bts_buffer();
  		return;
  	}
  
  	if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
aff3d91a9   Peter Zijlstra   perf, x86: Change...
848
  		intel_pmu_disable_fixed(hwc);
f22f54f44   Peter Zijlstra   perf_events, x86:...
849
850
  		return;
  	}
aff3d91a9   Peter Zijlstra   perf, x86: Change...
851
  	x86_pmu_disable_event(event);
ca037701a   Peter Zijlstra   perf, x86: Add PE...
852

ab608344b   Peter Zijlstra   perf, x86: Improv...
853
  	if (unlikely(event->attr.precise_ip))
ef21f683a   Peter Zijlstra   perf, x86: use LB...
854
  		intel_pmu_pebs_disable(event);
f22f54f44   Peter Zijlstra   perf_events, x86:...
855
  }
ca037701a   Peter Zijlstra   perf, x86: Add PE...
856
  static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
f22f54f44   Peter Zijlstra   perf_events, x86:...
857
  {
aff3d91a9   Peter Zijlstra   perf, x86: Change...
858
  	int idx = hwc->idx - X86_PMC_IDX_FIXED;
f22f54f44   Peter Zijlstra   perf_events, x86:...
859
  	u64 ctrl_val, bits, mask;
f22f54f44   Peter Zijlstra   perf_events, x86:...
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
  
  	/*
  	 * Enable IRQ generation (0x8),
  	 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
  	 * if requested:
  	 */
  	bits = 0x8ULL;
  	if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
  		bits |= 0x2;
  	if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
  		bits |= 0x1;
  
  	/*
  	 * ANY bit is supported in v3 and up
  	 */
  	if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
  		bits |= 0x4;
  
  	bits <<= (idx * 4);
  	mask = 0xfULL << (idx * 4);
  
  	rdmsrl(hwc->config_base, ctrl_val);
  	ctrl_val &= ~mask;
  	ctrl_val |= bits;
7645a24cb   Peter Zijlstra   perf, x86: Remove...
884
  	wrmsrl(hwc->config_base, ctrl_val);
f22f54f44   Peter Zijlstra   perf_events, x86:...
885
  }
aff3d91a9   Peter Zijlstra   perf, x86: Change...
886
  static void intel_pmu_enable_event(struct perf_event *event)
f22f54f44   Peter Zijlstra   perf_events, x86:...
887
  {
aff3d91a9   Peter Zijlstra   perf, x86: Change...
888
889
890
  	struct hw_perf_event *hwc = &event->hw;
  
  	if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
0a3aee0da   Tejun Heo   x86: Use this_cpu...
891
  		if (!__this_cpu_read(cpu_hw_events.enabled))
f22f54f44   Peter Zijlstra   perf_events, x86:...
892
893
894
895
896
897
898
  			return;
  
  		intel_pmu_enable_bts(hwc->config);
  		return;
  	}
  
  	if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
aff3d91a9   Peter Zijlstra   perf, x86: Change...
899
  		intel_pmu_enable_fixed(hwc);
f22f54f44   Peter Zijlstra   perf_events, x86:...
900
901
  		return;
  	}
ab608344b   Peter Zijlstra   perf, x86: Improv...
902
  	if (unlikely(event->attr.precise_ip))
ef21f683a   Peter Zijlstra   perf, x86: use LB...
903
  		intel_pmu_pebs_enable(event);
ca037701a   Peter Zijlstra   perf, x86: Add PE...
904

31fa58af5   Robert Richter   perf, x86: Pass e...
905
  	__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
f22f54f44   Peter Zijlstra   perf_events, x86:...
906
907
908
909
910
911
912
913
  }
  
  /*
   * Save and restart an expired event. Called by NMI contexts,
   * so it has to be careful about preempting normal event ops:
   */
  static int intel_pmu_save_and_restart(struct perf_event *event)
  {
cc2ad4ba8   Peter Zijlstra   perf, x86: Remove...
914
915
  	x86_perf_event_update(event);
  	return x86_perf_event_set_period(event);
f22f54f44   Peter Zijlstra   perf_events, x86:...
916
917
918
919
  }
  
  static void intel_pmu_reset(void)
  {
0a3aee0da   Tejun Heo   x86: Use this_cpu...
920
  	struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
f22f54f44   Peter Zijlstra   perf_events, x86:...
921
922
  	unsigned long flags;
  	int idx;
948b1bb89   Robert Richter   perf, x86: Undo s...
923
  	if (!x86_pmu.num_counters)
f22f54f44   Peter Zijlstra   perf_events, x86:...
924
925
926
927
928
929
  		return;
  
  	local_irq_save(flags);
  
  	printk("clearing PMU state on CPU#%d
  ", smp_processor_id());
948b1bb89   Robert Richter   perf, x86: Undo s...
930
  	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
41bf49894   Robert Richter   perf, x86: Calcul...
931
932
  		checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
  		checking_wrmsrl(x86_pmu_event_addr(idx),  0ull);
f22f54f44   Peter Zijlstra   perf_events, x86:...
933
  	}
948b1bb89   Robert Richter   perf, x86: Undo s...
934
  	for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
f22f54f44   Peter Zijlstra   perf_events, x86:...
935
  		checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
948b1bb89   Robert Richter   perf, x86: Undo s...
936

f22f54f44   Peter Zijlstra   perf_events, x86:...
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
  	if (ds)
  		ds->bts_index = ds->bts_buffer_base;
  
  	local_irq_restore(flags);
  }
  
  /*
   * This handler is triggered by the local APIC, so the APIC IRQ handling
   * rules apply:
   */
  static int intel_pmu_handle_irq(struct pt_regs *regs)
  {
  	struct perf_sample_data data;
  	struct cpu_hw_events *cpuc;
  	int bit, loops;
2e556b5b3   Don Zickus   perf, x86: Fix ac...
952
  	u64 status;
b0b2072df   Stephane Eranian   perf_events: Fix ...
953
  	int handled;
f22f54f44   Peter Zijlstra   perf_events, x86:...
954

dc1d628a6   Peter Zijlstra   perf: Provide gen...
955
  	perf_sample_data_init(&data, 0);
f22f54f44   Peter Zijlstra   perf_events, x86:...
956
957
  
  	cpuc = &__get_cpu_var(cpu_hw_events);
2bce5daca   Don Zickus   perf, x86, nmi: M...
958
959
960
961
962
963
964
965
966
  	/*
  	 * Some chipsets need to unmask the LVTPC in a particular spot
  	 * inside the nmi handler.  As a result, the unmasking was pushed
  	 * into all the nmi handlers.
  	 *
  	 * This handler doesn't seem to have any issues with the unmasking
  	 * so it was left at the top.
  	 */
  	apic_write(APIC_LVTPC, APIC_DM_NMI);
3fb2b8ddc   Peter Zijlstra   perf, x86, Do not...
967
  	intel_pmu_disable_all();
b0b2072df   Stephane Eranian   perf_events: Fix ...
968
  	handled = intel_pmu_drain_bts_buffer();
f22f54f44   Peter Zijlstra   perf_events, x86:...
969
970
  	status = intel_pmu_get_status();
  	if (!status) {
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
971
  		intel_pmu_enable_all(0);
b0b2072df   Stephane Eranian   perf_events: Fix ...
972
  		return handled;
f22f54f44   Peter Zijlstra   perf_events, x86:...
973
974
975
976
  	}
  
  	loops = 0;
  again:
2e556b5b3   Don Zickus   perf, x86: Fix ac...
977
  	intel_pmu_ack_status(status);
f22f54f44   Peter Zijlstra   perf_events, x86:...
978
979
980
981
982
  	if (++loops > 100) {
  		WARN_ONCE(1, "perfevents: irq loop stuck!
  ");
  		perf_event_print_debug();
  		intel_pmu_reset();
3fb2b8ddc   Peter Zijlstra   perf, x86, Do not...
983
  		goto done;
f22f54f44   Peter Zijlstra   perf_events, x86:...
984
985
986
  	}
  
  	inc_irq_stat(apic_perf_irqs);
ca037701a   Peter Zijlstra   perf, x86: Add PE...
987

caff2beff   Peter Zijlstra   perf, x86: Implem...
988
  	intel_pmu_lbr_read();
ca037701a   Peter Zijlstra   perf, x86: Add PE...
989
990
991
  	/*
  	 * PEBS overflow sets bit 62 in the global status register
  	 */
de725dec9   Peter Zijlstra   perf, x86: Fix ha...
992
993
  	if (__test_and_clear_bit(62, (unsigned long *)&status)) {
  		handled++;
ca037701a   Peter Zijlstra   perf, x86: Add PE...
994
  		x86_pmu.drain_pebs(regs);
de725dec9   Peter Zijlstra   perf, x86: Fix ha...
995
  	}
ca037701a   Peter Zijlstra   perf, x86: Add PE...
996

984b3f574   Akinobu Mita   bitops: rename fo...
997
  	for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
f22f54f44   Peter Zijlstra   perf_events, x86:...
998
  		struct perf_event *event = cpuc->events[bit];
de725dec9   Peter Zijlstra   perf, x86: Fix ha...
999
  		handled++;
f22f54f44   Peter Zijlstra   perf_events, x86:...
1000
1001
1002
1003
1004
1005
1006
  		if (!test_bit(bit, cpuc->active_mask))
  			continue;
  
  		if (!intel_pmu_save_and_restart(event))
  			continue;
  
  		data.period = event->hw.last_period;
a8b0ca17b   Peter Zijlstra   perf: Remove the ...
1007
  		if (perf_event_overflow(event, &data, regs))
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
1008
  			x86_pmu_stop(event, 0);
f22f54f44   Peter Zijlstra   perf_events, x86:...
1009
  	}
f22f54f44   Peter Zijlstra   perf_events, x86:...
1010
1011
1012
1013
1014
1015
  	/*
  	 * Repeat if there is more work to be done:
  	 */
  	status = intel_pmu_get_status();
  	if (status)
  		goto again;
3fb2b8ddc   Peter Zijlstra   perf, x86, Do not...
1016
  done:
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
1017
  	intel_pmu_enable_all(0);
de725dec9   Peter Zijlstra   perf, x86: Fix ha...
1018
  	return handled;
f22f54f44   Peter Zijlstra   perf_events, x86:...
1019
  }
f22f54f44   Peter Zijlstra   perf_events, x86:...
1020
  static struct event_constraint *
ca037701a   Peter Zijlstra   perf, x86: Add PE...
1021
  intel_bts_constraints(struct perf_event *event)
f22f54f44   Peter Zijlstra   perf_events, x86:...
1022
  {
ca037701a   Peter Zijlstra   perf, x86: Add PE...
1023
1024
  	struct hw_perf_event *hwc = &event->hw;
  	unsigned int hw_event, bts_event;
f22f54f44   Peter Zijlstra   perf_events, x86:...
1025

18a073a3a   Peter Zijlstra   perf, x86: Fix BT...
1026
1027
  	if (event->attr.freq)
  		return NULL;
ca037701a   Peter Zijlstra   perf, x86: Add PE...
1028
1029
  	hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
  	bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
f22f54f44   Peter Zijlstra   perf_events, x86:...
1030

ca037701a   Peter Zijlstra   perf, x86: Add PE...
1031
  	if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
f22f54f44   Peter Zijlstra   perf_events, x86:...
1032
  		return &bts_constraint;
ca037701a   Peter Zijlstra   perf, x86: Add PE...
1033

f22f54f44   Peter Zijlstra   perf_events, x86:...
1034
1035
  	return NULL;
  }
b79e8941f   Peter Zijlstra   perf, intel: Try ...
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
  static bool intel_try_alt_er(struct perf_event *event, int orig_idx)
  {
  	if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
  		return false;
  
  	if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) {
  		event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
  		event->hw.config |= 0x01bb;
  		event->hw.extra_reg.idx = EXTRA_REG_RSP_1;
  		event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
  	} else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) {
  		event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
  		event->hw.config |= 0x01b7;
  		event->hw.extra_reg.idx = EXTRA_REG_RSP_0;
  		event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
  	}
  
  	if (event->hw.extra_reg.idx == orig_idx)
  		return false;
  
  	return true;
  }
efc9f05df   Stephane Eranian   perf_events: Upda...
1058
1059
1060
1061
1062
1063
1064
  /*
   * manage allocation of shared extra msr for certain events
   *
   * sharing can be:
   * per-cpu: to be shared between the various events on a single PMU
   * per-core: per-cpu + shared by HT threads
   */
f22f54f44   Peter Zijlstra   perf_events, x86:...
1065
  static struct event_constraint *
efc9f05df   Stephane Eranian   perf_events: Upda...
1066
  __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
b79e8941f   Peter Zijlstra   perf, intel: Try ...
1067
  				   struct perf_event *event)
a7e3ed1e4   Andi Kleen   perf: Add support...
1068
  {
efc9f05df   Stephane Eranian   perf_events: Upda...
1069
  	struct event_constraint *c = &emptyconstraint;
b79e8941f   Peter Zijlstra   perf, intel: Try ...
1070
  	struct hw_perf_event_extra *reg = &event->hw.extra_reg;
a7e3ed1e4   Andi Kleen   perf: Add support...
1071
  	struct er_account *era;
cd8a38d33   Stephane Eranian   perf_events: Fix ...
1072
  	unsigned long flags;
b79e8941f   Peter Zijlstra   perf, intel: Try ...
1073
  	int orig_idx = reg->idx;
a7e3ed1e4   Andi Kleen   perf: Add support...
1074

efc9f05df   Stephane Eranian   perf_events: Upda...
1075
  	/* already allocated shared msr */
cd8a38d33   Stephane Eranian   perf_events: Fix ...
1076
  	if (reg->alloc)
efc9f05df   Stephane Eranian   perf_events: Upda...
1077
  		return &unconstrained;
a7e3ed1e4   Andi Kleen   perf: Add support...
1078

b79e8941f   Peter Zijlstra   perf, intel: Try ...
1079
  again:
efc9f05df   Stephane Eranian   perf_events: Upda...
1080
  	era = &cpuc->shared_regs->regs[reg->idx];
cd8a38d33   Stephane Eranian   perf_events: Fix ...
1081
1082
1083
1084
1085
  	/*
  	 * we use spin_lock_irqsave() to avoid lockdep issues when
  	 * passing a fake cpuc
  	 */
  	raw_spin_lock_irqsave(&era->lock, flags);
efc9f05df   Stephane Eranian   perf_events: Upda...
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
  
  	if (!atomic_read(&era->ref) || era->config == reg->config) {
  
  		/* lock in msr value */
  		era->config = reg->config;
  		era->reg = reg->reg;
  
  		/* one more user */
  		atomic_inc(&era->ref);
  
  		/* no need to reallocate during incremental event scheduling */
  		reg->alloc = 1;
a7e3ed1e4   Andi Kleen   perf: Add support...
1098
1099
  
  		/*
efc9f05df   Stephane Eranian   perf_events: Upda...
1100
1101
1102
1103
1104
1105
  		 * All events using extra_reg are unconstrained.
  		 * Avoids calling x86_get_event_constraints()
  		 *
  		 * Must revisit if extra_reg controlling events
  		 * ever have constraints. Worst case we go through
  		 * the regular event constraint table.
a7e3ed1e4   Andi Kleen   perf: Add support...
1106
  		 */
efc9f05df   Stephane Eranian   perf_events: Upda...
1107
  		c = &unconstrained;
b79e8941f   Peter Zijlstra   perf, intel: Try ...
1108
1109
1110
  	} else if (intel_try_alt_er(event, orig_idx)) {
  		raw_spin_unlock(&era->lock);
  		goto again;
a7e3ed1e4   Andi Kleen   perf: Add support...
1111
  	}
cd8a38d33   Stephane Eranian   perf_events: Fix ...
1112
  	raw_spin_unlock_irqrestore(&era->lock, flags);
a7e3ed1e4   Andi Kleen   perf: Add support...
1113

efc9f05df   Stephane Eranian   perf_events: Upda...
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
  	return c;
  }
  
  static void
  __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
  				   struct hw_perf_event_extra *reg)
  {
  	struct er_account *era;
  
  	/*
  	 * only put constraint if extra reg was actually
  	 * allocated. Also takes care of event which do
  	 * not use an extra shared reg
  	 */
  	if (!reg->alloc)
  		return;
  
  	era = &cpuc->shared_regs->regs[reg->idx];
  
  	/* one fewer user */
  	atomic_dec(&era->ref);
  
  	/* allocate again next time */
  	reg->alloc = 0;
  }
  
  static struct event_constraint *
  intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
  			      struct perf_event *event)
  {
  	struct event_constraint *c = NULL;
efc9f05df   Stephane Eranian   perf_events: Upda...
1145

b79e8941f   Peter Zijlstra   perf, intel: Try ...
1146
1147
  	if (event->hw.extra_reg.idx != EXTRA_REG_NONE)
  		c = __intel_shared_reg_get_constraints(cpuc, event);
efc9f05df   Stephane Eranian   perf_events: Upda...
1148
  	return c;
a7e3ed1e4   Andi Kleen   perf: Add support...
1149
1150
1151
  }
  
  static struct event_constraint *
f22f54f44   Peter Zijlstra   perf_events, x86:...
1152
1153
1154
  intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
  {
  	struct event_constraint *c;
ca037701a   Peter Zijlstra   perf, x86: Add PE...
1155
1156
1157
1158
1159
  	c = intel_bts_constraints(event);
  	if (c)
  		return c;
  
  	c = intel_pebs_constraints(event);
f22f54f44   Peter Zijlstra   perf_events, x86:...
1160
1161
  	if (c)
  		return c;
efc9f05df   Stephane Eranian   perf_events: Upda...
1162
  	c = intel_shared_regs_constraints(cpuc, event);
a7e3ed1e4   Andi Kleen   perf: Add support...
1163
1164
  	if (c)
  		return c;
f22f54f44   Peter Zijlstra   perf_events, x86:...
1165
1166
  	return x86_get_event_constraints(cpuc, event);
  }
efc9f05df   Stephane Eranian   perf_events: Upda...
1167
1168
  static void
  intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
a7e3ed1e4   Andi Kleen   perf: Add support...
1169
1170
  					struct perf_event *event)
  {
efc9f05df   Stephane Eranian   perf_events: Upda...
1171
  	struct hw_perf_event_extra *reg;
a7e3ed1e4   Andi Kleen   perf: Add support...
1172

efc9f05df   Stephane Eranian   perf_events: Upda...
1173
1174
1175
1176
  	reg = &event->hw.extra_reg;
  	if (reg->idx != EXTRA_REG_NONE)
  		__intel_shared_reg_put_constraints(cpuc, reg);
  }
a7e3ed1e4   Andi Kleen   perf: Add support...
1177

efc9f05df   Stephane Eranian   perf_events: Upda...
1178
1179
1180
1181
  static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
  					struct perf_event *event)
  {
  	intel_put_shared_regs_event_constraints(cpuc, event);
a7e3ed1e4   Andi Kleen   perf: Add support...
1182
  }
b4cdc5c26   Peter Zijlstra   perf, x86: Fix up...
1183
1184
1185
1186
1187
1188
  static int intel_pmu_hw_config(struct perf_event *event)
  {
  	int ret = x86_pmu_hw_config(event);
  
  	if (ret)
  		return ret;
7639dae0c   Peter Zijlstra   perf, x86: Provid...
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
  	if (event->attr.precise_ip &&
  	    (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
  		/*
  		 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
  		 * (0x003c) so that we can use it with PEBS.
  		 *
  		 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
  		 * PEBS capable. However we can use INST_RETIRED.ANY_P
  		 * (0x00c0), which is a PEBS capable event, to get the same
  		 * count.
  		 *
  		 * INST_RETIRED.ANY_P counts the number of cycles that retires
  		 * CNTMASK instructions. By setting CNTMASK to a value (16)
  		 * larger than the maximum number of instructions that can be
  		 * retired per cycle (4) and then inverting the condition, we
  		 * count all cycles that retire 16 or less instructions, which
  		 * is every cycle.
  		 *
  		 * Thereby we gain a PEBS capable cycle counter.
  		 */
  		u64 alt_config = 0x108000c0; /* INST_RETIRED.TOTAL_CYCLES */
  
  		alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
  		event->hw.config = alt_config;
  	}
b4cdc5c26   Peter Zijlstra   perf, x86: Fix up...
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
  	if (event->attr.type != PERF_TYPE_RAW)
  		return 0;
  
  	if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
  		return 0;
  
  	if (x86_pmu.version < 3)
  		return -EINVAL;
  
  	if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
  		return -EACCES;
  
  	event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
  
  	return 0;
  }
caaa8be3b   Peter Zijlstra   perf, x86: Fix __...
1230
  static __initconst const struct x86_pmu core_pmu = {
f22f54f44   Peter Zijlstra   perf_events, x86:...
1231
1232
1233
1234
1235
1236
  	.name			= "core",
  	.handle_irq		= x86_pmu_handle_irq,
  	.disable_all		= x86_pmu_disable_all,
  	.enable_all		= x86_pmu_enable_all,
  	.enable			= x86_pmu_enable_event,
  	.disable		= x86_pmu_disable_event,
b4cdc5c26   Peter Zijlstra   perf, x86: Fix up...
1237
  	.hw_config		= x86_pmu_hw_config,
a072738e0   Cyrill Gorcunov   perf, x86: Implem...
1238
  	.schedule_events	= x86_schedule_events,
f22f54f44   Peter Zijlstra   perf_events, x86:...
1239
1240
1241
  	.eventsel		= MSR_ARCH_PERFMON_EVENTSEL0,
  	.perfctr		= MSR_ARCH_PERFMON_PERFCTR0,
  	.event_map		= intel_pmu_event_map,
f22f54f44   Peter Zijlstra   perf_events, x86:...
1242
1243
1244
1245
1246
1247
1248
1249
1250
  	.max_events		= ARRAY_SIZE(intel_perfmon_event_map),
  	.apic			= 1,
  	/*
  	 * Intel PMCs cannot be accessed sanely above 32 bit width,
  	 * so we install an artificial 1<<31 period regardless of
  	 * the generic event period:
  	 */
  	.max_period		= (1ULL << 31) - 1,
  	.get_event_constraints	= intel_get_event_constraints,
a7e3ed1e4   Andi Kleen   perf: Add support...
1251
  	.put_event_constraints	= intel_put_event_constraints,
f22f54f44   Peter Zijlstra   perf_events, x86:...
1252
1253
  	.event_constraints	= intel_core_event_constraints,
  };
efc9f05df   Stephane Eranian   perf_events: Upda...
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
  static struct intel_shared_regs *allocate_shared_regs(int cpu)
  {
  	struct intel_shared_regs *regs;
  	int i;
  
  	regs = kzalloc_node(sizeof(struct intel_shared_regs),
  			    GFP_KERNEL, cpu_to_node(cpu));
  	if (regs) {
  		/*
  		 * initialize the locks to keep lockdep happy
  		 */
  		for (i = 0; i < EXTRA_REG_MAX; i++)
  			raw_spin_lock_init(&regs->regs[i].lock);
  
  		regs->core_id = -1;
  	}
  	return regs;
  }
a7e3ed1e4   Andi Kleen   perf: Add support...
1272
1273
1274
  static int intel_pmu_cpu_prepare(int cpu)
  {
  	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
efc9f05df   Stephane Eranian   perf_events: Upda...
1275
  	if (!x86_pmu.extra_regs)
690926242   Lin Ming   perf: Avoid the p...
1276
  		return NOTIFY_OK;
efc9f05df   Stephane Eranian   perf_events: Upda...
1277
1278
  	cpuc->shared_regs = allocate_shared_regs(cpu);
  	if (!cpuc->shared_regs)
a7e3ed1e4   Andi Kleen   perf: Add support...
1279
  		return NOTIFY_BAD;
a7e3ed1e4   Andi Kleen   perf: Add support...
1280
1281
  	return NOTIFY_OK;
  }
74846d35b   Peter Zijlstra   perf, x86: Clear ...
1282
1283
  static void intel_pmu_cpu_starting(int cpu)
  {
a7e3ed1e4   Andi Kleen   perf: Add support...
1284
1285
1286
  	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
  	int core_id = topology_core_id(cpu);
  	int i;
690926242   Lin Ming   perf: Avoid the p...
1287
1288
1289
1290
1291
  	init_debug_store_on_cpu(cpu);
  	/*
  	 * Deal with CPUs that don't clear their LBRs on power-up.
  	 */
  	intel_pmu_lbr_reset();
b79e8941f   Peter Zijlstra   perf, intel: Try ...
1292
  	if (!cpuc->shared_regs || (x86_pmu.er_flags & ERF_NO_HT_SHARING))
690926242   Lin Ming   perf: Avoid the p...
1293
  		return;
a7e3ed1e4   Andi Kleen   perf: Add support...
1294
  	for_each_cpu(i, topology_thread_cpumask(cpu)) {
efc9f05df   Stephane Eranian   perf_events: Upda...
1295
  		struct intel_shared_regs *pc;
a7e3ed1e4   Andi Kleen   perf: Add support...
1296

efc9f05df   Stephane Eranian   perf_events: Upda...
1297
  		pc = per_cpu(cpu_hw_events, i).shared_regs;
a7e3ed1e4   Andi Kleen   perf: Add support...
1298
  		if (pc && pc->core_id == core_id) {
efc9f05df   Stephane Eranian   perf_events: Upda...
1299
1300
  			kfree(cpuc->shared_regs);
  			cpuc->shared_regs = pc;
a7e3ed1e4   Andi Kleen   perf: Add support...
1301
1302
1303
  			break;
  		}
  	}
efc9f05df   Stephane Eranian   perf_events: Upda...
1304
1305
  	cpuc->shared_regs->core_id = core_id;
  	cpuc->shared_regs->refcnt++;
74846d35b   Peter Zijlstra   perf, x86: Clear ...
1306
1307
1308
1309
  }
  
  static void intel_pmu_cpu_dying(int cpu)
  {
a7e3ed1e4   Andi Kleen   perf: Add support...
1310
  	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
efc9f05df   Stephane Eranian   perf_events: Upda...
1311
  	struct intel_shared_regs *pc;
a7e3ed1e4   Andi Kleen   perf: Add support...
1312

efc9f05df   Stephane Eranian   perf_events: Upda...
1313
  	pc = cpuc->shared_regs;
a7e3ed1e4   Andi Kleen   perf: Add support...
1314
1315
1316
  	if (pc) {
  		if (pc->core_id == -1 || --pc->refcnt == 0)
  			kfree(pc);
efc9f05df   Stephane Eranian   perf_events: Upda...
1317
  		cpuc->shared_regs = NULL;
a7e3ed1e4   Andi Kleen   perf: Add support...
1318
  	}
74846d35b   Peter Zijlstra   perf, x86: Clear ...
1319
1320
  	fini_debug_store_on_cpu(cpu);
  }
caaa8be3b   Peter Zijlstra   perf, x86: Fix __...
1321
  static __initconst const struct x86_pmu intel_pmu = {
f22f54f44   Peter Zijlstra   perf_events, x86:...
1322
1323
1324
1325
1326
1327
  	.name			= "Intel",
  	.handle_irq		= intel_pmu_handle_irq,
  	.disable_all		= intel_pmu_disable_all,
  	.enable_all		= intel_pmu_enable_all,
  	.enable			= intel_pmu_enable_event,
  	.disable		= intel_pmu_disable_event,
b4cdc5c26   Peter Zijlstra   perf, x86: Fix up...
1328
  	.hw_config		= intel_pmu_hw_config,
a072738e0   Cyrill Gorcunov   perf, x86: Implem...
1329
  	.schedule_events	= x86_schedule_events,
f22f54f44   Peter Zijlstra   perf_events, x86:...
1330
1331
1332
  	.eventsel		= MSR_ARCH_PERFMON_EVENTSEL0,
  	.perfctr		= MSR_ARCH_PERFMON_PERFCTR0,
  	.event_map		= intel_pmu_event_map,
f22f54f44   Peter Zijlstra   perf_events, x86:...
1333
1334
1335
1336
1337
1338
1339
1340
  	.max_events		= ARRAY_SIZE(intel_perfmon_event_map),
  	.apic			= 1,
  	/*
  	 * Intel PMCs cannot be accessed sanely above 32 bit width,
  	 * so we install an artificial 1<<31 period regardless of
  	 * the generic event period:
  	 */
  	.max_period		= (1ULL << 31) - 1,
3f6da3905   Peter Zijlstra   perf: Rework and ...
1341
  	.get_event_constraints	= intel_get_event_constraints,
a7e3ed1e4   Andi Kleen   perf: Add support...
1342
  	.put_event_constraints	= intel_put_event_constraints,
3f6da3905   Peter Zijlstra   perf: Rework and ...
1343

a7e3ed1e4   Andi Kleen   perf: Add support...
1344
  	.cpu_prepare		= intel_pmu_cpu_prepare,
74846d35b   Peter Zijlstra   perf, x86: Clear ...
1345
1346
  	.cpu_starting		= intel_pmu_cpu_starting,
  	.cpu_dying		= intel_pmu_cpu_dying,
f22f54f44   Peter Zijlstra   perf_events, x86:...
1347
  };
3c44780b2   Peter Zijlstra   perf, x86: Disabl...
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
  static void intel_clovertown_quirks(void)
  {
  	/*
  	 * PEBS is unreliable due to:
  	 *
  	 *   AJ67  - PEBS may experience CPL leaks
  	 *   AJ68  - PEBS PMI may be delayed by one event
  	 *   AJ69  - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
  	 *   AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
  	 *
  	 * AJ67 could be worked around by restricting the OS/USR flags.
  	 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
  	 *
  	 * AJ106 could possibly be worked around by not allowing LBR
  	 *       usage from PEBS, including the fixup.
  	 * AJ68  could possibly be worked around by always programming
ec75a7163   Ingo Molnar   perf events, x86:...
1364
  	 *	 a pebs_event_reset[0] value and coping with the lost events.
3c44780b2   Peter Zijlstra   perf, x86: Disabl...
1365
1366
1367
1368
1369
1370
1371
1372
1373
  	 *
  	 * But taken together it might just make sense to not enable PEBS on
  	 * these chips.
  	 */
  	printk(KERN_WARNING "PEBS disabled due to CPU errata.
  ");
  	x86_pmu.pebs = 0;
  	x86_pmu.pebs_constraints = NULL;
  }
f22f54f44   Peter Zijlstra   perf_events, x86:...
1374
1375
1376
1377
1378
1379
1380
1381
1382
  static __init int intel_pmu_init(void)
  {
  	union cpuid10_edx edx;
  	union cpuid10_eax eax;
  	unsigned int unused;
  	unsigned int ebx;
  	int version;
  
  	if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
a072738e0   Cyrill Gorcunov   perf, x86: Implem...
1383
1384
1385
1386
1387
1388
  		switch (boot_cpu_data.x86) {
  		case 0x6:
  			return p6_pmu_init();
  		case 0xf:
  			return p4_pmu_init();
  		}
f22f54f44   Peter Zijlstra   perf_events, x86:...
1389
  		return -ENODEV;
f22f54f44   Peter Zijlstra   perf_events, x86:...
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
  	}
  
  	/*
  	 * Check whether the Architectural PerfMon supports
  	 * Branch Misses Retired hw_event or not.
  	 */
  	cpuid(10, &eax.full, &ebx, &unused, &edx.full);
  	if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
  		return -ENODEV;
  
  	version = eax.split.version_id;
  	if (version < 2)
  		x86_pmu = core_pmu;
  	else
  		x86_pmu = intel_pmu;
  
  	x86_pmu.version			= version;
948b1bb89   Robert Richter   perf, x86: Undo s...
1407
1408
1409
  	x86_pmu.num_counters		= eax.split.num_counters;
  	x86_pmu.cntval_bits		= eax.split.bit_width;
  	x86_pmu.cntval_mask		= (1ULL << eax.split.bit_width) - 1;
f22f54f44   Peter Zijlstra   perf_events, x86:...
1410
1411
1412
1413
1414
1415
  
  	/*
  	 * Quirk: v2 perfmon does not report fixed-purpose events, so
  	 * assume at least 3 events:
  	 */
  	if (version > 1)
948b1bb89   Robert Richter   perf, x86: Undo s...
1416
  		x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
f22f54f44   Peter Zijlstra   perf_events, x86:...
1417

8db909a7e   Peter Zijlstra   perf, x86: Clean ...
1418
1419
1420
1421
1422
1423
1424
1425
1426
  	/*
  	 * v2 and above have a perf capabilities MSR
  	 */
  	if (version > 1) {
  		u64 capabilities;
  
  		rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
  		x86_pmu.intel_cap.capabilities = capabilities;
  	}
ca037701a   Peter Zijlstra   perf, x86: Add PE...
1427
  	intel_ds_init();
f22f54f44   Peter Zijlstra   perf_events, x86:...
1428
1429
1430
1431
1432
1433
1434
1435
1436
  	/*
  	 * Install the hw-cache-events table:
  	 */
  	switch (boot_cpu_data.x86_model) {
  	case 14: /* 65 nm core solo/duo, "Yonah" */
  		pr_cont("Core events, ");
  		break;
  
  	case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
3c44780b2   Peter Zijlstra   perf, x86: Disabl...
1437
  		x86_pmu.quirks = intel_clovertown_quirks;
f22f54f44   Peter Zijlstra   perf_events, x86:...
1438
1439
1440
1441
1442
  	case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
  	case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
  	case 29: /* six-core 45 nm xeon "Dunnington" */
  		memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
  		       sizeof(hw_cache_event_ids));
caff2beff   Peter Zijlstra   perf, x86: Implem...
1443
  		intel_pmu_lbr_init_core();
f22f54f44   Peter Zijlstra   perf_events, x86:...
1444
  		x86_pmu.event_constraints = intel_core2_event_constraints;
17e316297   Stephane Eranian   perf_events: Upda...
1445
  		x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
f22f54f44   Peter Zijlstra   perf_events, x86:...
1446
1447
1448
1449
1450
  		pr_cont("Core2 events, ");
  		break;
  
  	case 26: /* 45 nm nehalem, "Bloomfield" */
  	case 30: /* 45 nm nehalem, "Lynnfield" */
134fbadf0   Vince Weaver   perf, x86: Enable...
1451
  	case 46: /* 45 nm nehalem-ex, "Beckton" */
f22f54f44   Peter Zijlstra   perf_events, x86:...
1452
1453
  		memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
  		       sizeof(hw_cache_event_ids));
e994d7d23   Andi Kleen   perf: Fix LLC-* e...
1454
1455
  		memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
  		       sizeof(hw_cache_extra_regs));
f22f54f44   Peter Zijlstra   perf_events, x86:...
1456

caff2beff   Peter Zijlstra   perf, x86: Implem...
1457
  		intel_pmu_lbr_init_nhm();
f22f54f44   Peter Zijlstra   perf_events, x86:...
1458
  		x86_pmu.event_constraints = intel_nehalem_event_constraints;
17e316297   Stephane Eranian   perf_events: Upda...
1459
  		x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
1460
  		x86_pmu.enable_all = intel_pmu_nhm_enable_all;
a7e3ed1e4   Andi Kleen   perf: Add support...
1461
  		x86_pmu.extra_regs = intel_nehalem_extra_regs;
ec75a7163   Ingo Molnar   perf events, x86:...
1462

91fc4cc00   Ingo Molnar   perf, x86: Add ne...
1463
1464
1465
  		/* UOPS_ISSUED.STALLED_CYCLES */
  		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
  		/* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
8f6224224   Ingo Molnar   perf events: Add ...
1466
  		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1;
94403f886   Ingo Molnar   perf events: Add ...
1467

ec75a7163   Ingo Molnar   perf events, x86:...
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
  		if (ebx & 0x40) {
  			/*
  			 * Erratum AAJ80 detected, we work it around by using
  			 * the BR_MISP_EXEC.ANY event. This will over-count
  			 * branch-misses, but it's still much better than the
  			 * architectural event which is often completely bogus:
  			 */
  			intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
  
  			pr_cont("erratum AAJ80 worked around, ");
  		}
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
1479
  		pr_cont("Nehalem events, ");
f22f54f44   Peter Zijlstra   perf_events, x86:...
1480
  		break;
caff2beff   Peter Zijlstra   perf, x86: Implem...
1481

b622d644c   Peter Zijlstra   perf_events, x86:...
1482
  	case 28: /* Atom */
f22f54f44   Peter Zijlstra   perf_events, x86:...
1483
1484
  		memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
  		       sizeof(hw_cache_event_ids));
caff2beff   Peter Zijlstra   perf, x86: Implem...
1485
  		intel_pmu_lbr_init_atom();
f22f54f44   Peter Zijlstra   perf_events, x86:...
1486
  		x86_pmu.event_constraints = intel_gen_event_constraints;
17e316297   Stephane Eranian   perf_events: Upda...
1487
  		x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
f22f54f44   Peter Zijlstra   perf_events, x86:...
1488
1489
1490
1491
1492
  		pr_cont("Atom events, ");
  		break;
  
  	case 37: /* 32 nm nehalem, "Clarkdale" */
  	case 44: /* 32 nm nehalem, "Gulftown" */
b2508e828   Andi Kleen   perf: Support Xeo...
1493
  	case 47: /* 32 nm Xeon E7 */
f22f54f44   Peter Zijlstra   perf_events, x86:...
1494
1495
  		memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
  		       sizeof(hw_cache_event_ids));
e994d7d23   Andi Kleen   perf: Fix LLC-* e...
1496
1497
  		memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
  		       sizeof(hw_cache_extra_regs));
f22f54f44   Peter Zijlstra   perf_events, x86:...
1498

caff2beff   Peter Zijlstra   perf, x86: Implem...
1499
  		intel_pmu_lbr_init_nhm();
f22f54f44   Peter Zijlstra   perf_events, x86:...
1500
  		x86_pmu.event_constraints = intel_westmere_event_constraints;
40b91cd10   Peter Zijlstra   perf, x86: Add Ne...
1501
  		x86_pmu.enable_all = intel_pmu_nhm_enable_all;
17e316297   Stephane Eranian   perf_events: Upda...
1502
  		x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
a7e3ed1e4   Andi Kleen   perf: Add support...
1503
  		x86_pmu.extra_regs = intel_westmere_extra_regs;
b79e8941f   Peter Zijlstra   perf, intel: Try ...
1504
  		x86_pmu.er_flags |= ERF_HAS_RSP_1;
301120396   Ingo Molnar   perf events, x86:...
1505
1506
1507
1508
1509
  
  		/* UOPS_ISSUED.STALLED_CYCLES */
  		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
  		/* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
  		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1;
f22f54f44   Peter Zijlstra   perf_events, x86:...
1510
1511
  		pr_cont("Westmere events, ");
  		break;
b622d644c   Peter Zijlstra   perf_events, x86:...
1512

b06b3d496   Lin Ming   perf, x86: Add In...
1513
1514
1515
1516
1517
1518
1519
1520
  	case 42: /* SandyBridge */
  		memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
  		       sizeof(hw_cache_event_ids));
  
  		intel_pmu_lbr_init_nhm();
  
  		x86_pmu.event_constraints = intel_snb_event_constraints;
  		x86_pmu.pebs_constraints = intel_snb_pebs_events;
ee89cbc2d   Stephane Eranian   perf_events: Add ...
1521
1522
  		x86_pmu.extra_regs = intel_snb_extra_regs;
  		/* all extra regs are per-cpu when HT is on */
b79e8941f   Peter Zijlstra   perf, intel: Try ...
1523
1524
  		x86_pmu.er_flags |= ERF_HAS_RSP_1;
  		x86_pmu.er_flags |= ERF_NO_HT_SHARING;
e04d1b23f   Lin Ming   perf events, x86:...
1525
1526
1527
1528
1529
  
  		/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
  		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
  		/* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
  		intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x18001b1;
b06b3d496   Lin Ming   perf, x86: Add In...
1530
1531
  		pr_cont("SandyBridge events, ");
  		break;
f22f54f44   Peter Zijlstra   perf_events, x86:...
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
  	default:
  		/*
  		 * default constraints for v2 and up
  		 */
  		x86_pmu.event_constraints = intel_gen_event_constraints;
  		pr_cont("generic architected perfmon, ");
  	}
  	return 0;
  }
  
  #else /* CONFIG_CPU_SUP_INTEL */
  
  static int intel_pmu_init(void)
  {
  	return 0;
  }
cd8a38d33   Stephane Eranian   perf_events: Fix ...
1548
1549
1550
1551
  static struct intel_shared_regs *allocate_shared_regs(int cpu)
  {
  	return NULL;
  }
f22f54f44   Peter Zijlstra   perf_events, x86:...
1552
  #endif /* CONFIG_CPU_SUP_INTEL */