Blame view

arch/x86/kernel/cpu/perf_event_intel.c 27.7 KB
f22f54f44   Peter Zijlstra   perf_events, x86:...
1
2
3
  #ifdef CONFIG_CPU_SUP_INTEL
  
  /*
b622d644c   Peter Zijlstra   perf_events, x86:...
4
   * Intel PerfMon, used on Core and later.
f22f54f44   Peter Zijlstra   perf_events, x86:...
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
   */
  static const u64 intel_perfmon_event_map[] =
  {
    [PERF_COUNT_HW_CPU_CYCLES]		= 0x003c,
    [PERF_COUNT_HW_INSTRUCTIONS]		= 0x00c0,
    [PERF_COUNT_HW_CACHE_REFERENCES]	= 0x4f2e,
    [PERF_COUNT_HW_CACHE_MISSES]		= 0x412e,
    [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x00c4,
    [PERF_COUNT_HW_BRANCH_MISSES]		= 0x00c5,
    [PERF_COUNT_HW_BUS_CYCLES]		= 0x013c,
  };
  
  static struct event_constraint intel_core_event_constraints[] =
  {
  	INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
  	INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
  	INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
  	INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
  	INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
  	INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
  	EVENT_CONSTRAINT_END
  };
  
  static struct event_constraint intel_core2_event_constraints[] =
  {
b622d644c   Peter Zijlstra   perf_events, x86:...
30
31
32
33
34
35
36
37
  	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  	/*
  	 * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
  	 * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
  	 * ratio between these counters.
  	 */
  	/* FIXED_EVENT_CONSTRAINT(0x013c, 2),  CPU_CLK_UNHALTED.REF */
f22f54f44   Peter Zijlstra   perf_events, x86:...
38
39
40
41
42
43
44
45
  	INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
  	INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
  	INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
  	INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
  	INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
  	INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
  	INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
  	INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
b622d644c   Peter Zijlstra   perf_events, x86:...
46
  	INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
f22f54f44   Peter Zijlstra   perf_events, x86:...
47
48
49
50
51
52
  	INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
  	EVENT_CONSTRAINT_END
  };
  
  static struct event_constraint intel_nehalem_event_constraints[] =
  {
b622d644c   Peter Zijlstra   perf_events, x86:...
53
54
55
  	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  	/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
f22f54f44   Peter Zijlstra   perf_events, x86:...
56
57
58
59
60
61
62
63
64
65
66
67
68
  	INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
  	INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
  	INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
  	INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
  	INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
  	INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
  	INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
  	INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
  	EVENT_CONSTRAINT_END
  };
  
  static struct event_constraint intel_westmere_event_constraints[] =
  {
b622d644c   Peter Zijlstra   perf_events, x86:...
69
70
71
  	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  	/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
f22f54f44   Peter Zijlstra   perf_events, x86:...
72
73
74
  	INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
  	INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
  	INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
d11007703   Stephane Eranian   perf_events: Fix ...
75
  	INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
f22f54f44   Peter Zijlstra   perf_events, x86:...
76
77
78
79
80
  	EVENT_CONSTRAINT_END
  };
  
  static struct event_constraint intel_gen_event_constraints[] =
  {
b622d644c   Peter Zijlstra   perf_events, x86:...
81
82
83
  	FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  	FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  	/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
f22f54f44   Peter Zijlstra   perf_events, x86:...
84
85
86
87
88
89
90
  	EVENT_CONSTRAINT_END
  };
  
  static u64 intel_pmu_event_map(int hw_event)
  {
  	return intel_perfmon_event_map[hw_event];
  }
caaa8be3b   Peter Zijlstra   perf, x86: Fix __...
91
  static __initconst const u64 westmere_hw_cache_event_ids
f22f54f44   Peter Zijlstra   perf_events, x86:...
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
  				[PERF_COUNT_HW_CACHE_MAX]
  				[PERF_COUNT_HW_CACHE_OP_MAX]
  				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
  {
   [ C(L1D) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
  		[ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
  		[ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
  		[ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
  	},
   },
   [ C(L1I ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
  		[ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0,
  		[ C(RESULT_MISS)   ] = 0x0,
  	},
   },
   [ C(LL  ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS               */
  		[ C(RESULT_MISS)   ] = 0x0224, /* L2_RQSTS.LD_MISS             */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS                */
  		[ C(RESULT_MISS)   ] = 0x0824, /* L2_RQSTS.RFO_MISS            */
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference                */
  		[ C(RESULT_MISS)   ] = 0x412e, /* LLC Misses                   */
  	},
   },
   [ C(DTLB) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
  		[ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
  		[ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0,
  		[ C(RESULT_MISS)   ] = 0x0,
  	},
   },
   [ C(ITLB) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
  		[ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.ANY              */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
   },
   [ C(BPU ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
  		[ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
   },
  };
caaa8be3b   Peter Zijlstra   perf, x86: Fix __...
181
  static __initconst const u64 nehalem_hw_cache_event_ids
f22f54f44   Peter Zijlstra   perf_events, x86:...
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
  				[PERF_COUNT_HW_CACHE_MAX]
  				[PERF_COUNT_HW_CACHE_OP_MAX]
  				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
  {
   [ C(L1D) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI            */
  		[ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE         */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI            */
  		[ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE         */
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
  		[ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
  	},
   },
   [ C(L1I ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
  		[ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0,
  		[ C(RESULT_MISS)   ] = 0x0,
  	},
   },
   [ C(LL  ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS               */
  		[ C(RESULT_MISS)   ] = 0x0224, /* L2_RQSTS.LD_MISS             */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS                */
  		[ C(RESULT_MISS)   ] = 0x0824, /* L2_RQSTS.RFO_MISS            */
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference                */
  		[ C(RESULT_MISS)   ] = 0x412e, /* LLC Misses                   */
  	},
   },
   [ C(DTLB) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
  		[ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
  		[ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0,
  		[ C(RESULT_MISS)   ] = 0x0,
  	},
   },
   [ C(ITLB) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
  		[ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
   },
   [ C(BPU ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
  		[ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
   },
  };
caaa8be3b   Peter Zijlstra   perf, x86: Fix __...
271
  static __initconst const u64 core2_hw_cache_event_ids
f22f54f44   Peter Zijlstra   perf_events, x86:...
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
  				[PERF_COUNT_HW_CACHE_MAX]
  				[PERF_COUNT_HW_CACHE_OP_MAX]
  				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
  {
   [ C(L1D) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
  		[ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
  		[ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
  		[ C(RESULT_MISS)   ] = 0,
  	},
   },
   [ C(L1I ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
  		[ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0,
  		[ C(RESULT_MISS)   ] = 0,
  	},
   },
   [ C(LL  ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
  		[ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
  		[ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0,
  		[ C(RESULT_MISS)   ] = 0,
  	},
   },
   [ C(DTLB) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
  		[ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
  		[ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0,
  		[ C(RESULT_MISS)   ] = 0,
  	},
   },
   [ C(ITLB) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
  		[ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
   },
   [ C(BPU ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
  		[ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
   },
  };
caaa8be3b   Peter Zijlstra   perf, x86: Fix __...
361
  static __initconst const u64 atom_hw_cache_event_ids
f22f54f44   Peter Zijlstra   perf_events, x86:...
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
  				[PERF_COUNT_HW_CACHE_MAX]
  				[PERF_COUNT_HW_CACHE_OP_MAX]
  				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
  {
   [ C(L1D) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
  		[ C(RESULT_MISS)   ] = 0,
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
  		[ C(RESULT_MISS)   ] = 0,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0,
  		[ C(RESULT_MISS)   ] = 0,
  	},
   },
   [ C(L1I ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
  		[ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0,
  		[ C(RESULT_MISS)   ] = 0,
  	},
   },
   [ C(LL  ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
  		[ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
  		[ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0,
  		[ C(RESULT_MISS)   ] = 0,
  	},
   },
   [ C(DTLB) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
  		[ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
  		[ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = 0,
  		[ C(RESULT_MISS)   ] = 0,
  	},
   },
   [ C(ITLB) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
  		[ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
   },
   [ C(BPU ) ] = {
  	[ C(OP_READ) ] = {
  		[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
  		[ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
  	},
  	[ C(OP_WRITE) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
  	[ C(OP_PREFETCH) ] = {
  		[ C(RESULT_ACCESS) ] = -1,
  		[ C(RESULT_MISS)   ] = -1,
  	},
   },
  };
f22f54f44   Peter Zijlstra   perf_events, x86:...
451
452
453
454
455
456
457
458
  static void intel_pmu_disable_all(void)
  {
  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  
  	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
  
  	if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
  		intel_pmu_disable_bts();
ca037701a   Peter Zijlstra   perf, x86: Add PE...
459
460
  
  	intel_pmu_pebs_disable_all();
caff2beff   Peter Zijlstra   perf, x86: Implem...
461
  	intel_pmu_lbr_disable_all();
f22f54f44   Peter Zijlstra   perf_events, x86:...
462
  }
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
463
  static void intel_pmu_enable_all(int added)
f22f54f44   Peter Zijlstra   perf_events, x86:...
464
465
  {
  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
d329527e4   Peter Zijlstra   perf, x86: Reorde...
466
467
  	intel_pmu_pebs_enable_all();
  	intel_pmu_lbr_enable_all();
f22f54f44   Peter Zijlstra   perf_events, x86:...
468
469
470
471
472
473
474
475
476
477
478
479
  	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
  
  	if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
  		struct perf_event *event =
  			cpuc->events[X86_PMC_IDX_FIXED_BTS];
  
  		if (WARN_ON_ONCE(!event))
  			return;
  
  		intel_pmu_enable_bts(event->hw.config);
  	}
  }
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
480
481
482
483
  /*
   * Workaround for:
   *   Intel Errata AAK100 (model 26)
   *   Intel Errata AAP53  (model 30)
40b91cd10   Peter Zijlstra   perf, x86: Add Ne...
484
   *   Intel Errata BD53   (model 44)
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
485
   *
351af0725   Zhang, Yanmin   perf, x86: Fix In...
486
487
488
489
490
491
492
   * The official story:
   *   These chips need to be 'reset' when adding counters by programming the
   *   magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
   *   in sequence on the same PMC or on different PMCs.
   *
   * In practise it appears some of these events do in fact count, and
   * we need to programm all 4 events.
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
493
   */
351af0725   Zhang, Yanmin   perf, x86: Fix In...
494
  static void intel_pmu_nhm_workaround(void)
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
495
  {
351af0725   Zhang, Yanmin   perf, x86: Fix In...
496
497
498
499
500
501
502
503
504
  	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  	static const unsigned long nhm_magic[4] = {
  		0x4300B5,
  		0x4300D2,
  		0x4300B1,
  		0x4300B1
  	};
  	struct perf_event *event;
  	int i;
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
505

351af0725   Zhang, Yanmin   perf, x86: Fix In...
506
507
508
509
510
511
512
513
514
  	/*
  	 * The Errata requires below steps:
  	 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
  	 * 2) Configure 4 PERFEVTSELx with the magic events and clear
  	 *    the corresponding PMCx;
  	 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
  	 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
  	 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
  	 */
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
515

351af0725   Zhang, Yanmin   perf, x86: Fix In...
516
517
518
519
520
521
522
523
524
525
  	/*
  	 * The real steps we choose are a little different from above.
  	 * A) To reduce MSR operations, we don't run step 1) as they
  	 *    are already cleared before this function is called;
  	 * B) Call x86_perf_event_update to save PMCx before configuring
  	 *    PERFEVTSELx with magic number;
  	 * C) With step 5), we do clear only when the PERFEVTSELx is
  	 *    not used currently.
  	 * D) Call x86_perf_event_set_period to restore PMCx;
  	 */
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
526

351af0725   Zhang, Yanmin   perf, x86: Fix In...
527
528
529
530
531
532
  	/* We always operate 4 pairs of PERF Counters */
  	for (i = 0; i < 4; i++) {
  		event = cpuc->events[i];
  		if (event)
  			x86_perf_event_update(event);
  	}
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
533

351af0725   Zhang, Yanmin   perf, x86: Fix In...
534
535
536
537
538
539
540
  	for (i = 0; i < 4; i++) {
  		wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
  		wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
  	}
  
  	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
  	wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
541

351af0725   Zhang, Yanmin   perf, x86: Fix In...
542
543
544
545
546
  	for (i = 0; i < 4; i++) {
  		event = cpuc->events[i];
  
  		if (event) {
  			x86_perf_event_set_period(event);
31fa58af5   Robert Richter   perf, x86: Pass e...
547
  			__x86_pmu_enable_event(&event->hw,
351af0725   Zhang, Yanmin   perf, x86: Fix In...
548
549
550
  					ARCH_PERFMON_EVENTSEL_ENABLE);
  		} else
  			wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
551
  	}
351af0725   Zhang, Yanmin   perf, x86: Fix In...
552
553
554
555
556
557
  }
  
  static void intel_pmu_nhm_enable_all(int added)
  {
  	if (added)
  		intel_pmu_nhm_workaround();
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
558
559
  	intel_pmu_enable_all(added);
  }
f22f54f44   Peter Zijlstra   perf_events, x86:...
560
561
562
563
564
565
566
567
568
569
570
571
572
  static inline u64 intel_pmu_get_status(void)
  {
  	u64 status;
  
  	rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
  
  	return status;
  }
  
  static inline void intel_pmu_ack_status(u64 ack)
  {
  	wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
  }
ca037701a   Peter Zijlstra   perf, x86: Add PE...
573
  static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
f22f54f44   Peter Zijlstra   perf_events, x86:...
574
  {
aff3d91a9   Peter Zijlstra   perf, x86: Change...
575
  	int idx = hwc->idx - X86_PMC_IDX_FIXED;
f22f54f44   Peter Zijlstra   perf_events, x86:...
576
577
578
579
580
581
  	u64 ctrl_val, mask;
  
  	mask = 0xfULL << (idx * 4);
  
  	rdmsrl(hwc->config_base, ctrl_val);
  	ctrl_val &= ~mask;
7645a24cb   Peter Zijlstra   perf, x86: Remove...
582
  	wrmsrl(hwc->config_base, ctrl_val);
f22f54f44   Peter Zijlstra   perf_events, x86:...
583
  }
ca037701a   Peter Zijlstra   perf, x86: Add PE...
584
  static void intel_pmu_disable_event(struct perf_event *event)
f22f54f44   Peter Zijlstra   perf_events, x86:...
585
  {
aff3d91a9   Peter Zijlstra   perf, x86: Change...
586
587
588
  	struct hw_perf_event *hwc = &event->hw;
  
  	if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
f22f54f44   Peter Zijlstra   perf_events, x86:...
589
590
591
592
593
594
  		intel_pmu_disable_bts();
  		intel_pmu_drain_bts_buffer();
  		return;
  	}
  
  	if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
aff3d91a9   Peter Zijlstra   perf, x86: Change...
595
  		intel_pmu_disable_fixed(hwc);
f22f54f44   Peter Zijlstra   perf_events, x86:...
596
597
  		return;
  	}
aff3d91a9   Peter Zijlstra   perf, x86: Change...
598
  	x86_pmu_disable_event(event);
ca037701a   Peter Zijlstra   perf, x86: Add PE...
599

ab608344b   Peter Zijlstra   perf, x86: Improv...
600
  	if (unlikely(event->attr.precise_ip))
ef21f683a   Peter Zijlstra   perf, x86: use LB...
601
  		intel_pmu_pebs_disable(event);
f22f54f44   Peter Zijlstra   perf_events, x86:...
602
  }
ca037701a   Peter Zijlstra   perf, x86: Add PE...
603
  static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
f22f54f44   Peter Zijlstra   perf_events, x86:...
604
  {
aff3d91a9   Peter Zijlstra   perf, x86: Change...
605
  	int idx = hwc->idx - X86_PMC_IDX_FIXED;
f22f54f44   Peter Zijlstra   perf_events, x86:...
606
  	u64 ctrl_val, bits, mask;
f22f54f44   Peter Zijlstra   perf_events, x86:...
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
  
  	/*
  	 * Enable IRQ generation (0x8),
  	 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
  	 * if requested:
  	 */
  	bits = 0x8ULL;
  	if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
  		bits |= 0x2;
  	if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
  		bits |= 0x1;
  
  	/*
  	 * ANY bit is supported in v3 and up
  	 */
  	if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
  		bits |= 0x4;
  
  	bits <<= (idx * 4);
  	mask = 0xfULL << (idx * 4);
  
  	rdmsrl(hwc->config_base, ctrl_val);
  	ctrl_val &= ~mask;
  	ctrl_val |= bits;
7645a24cb   Peter Zijlstra   perf, x86: Remove...
631
  	wrmsrl(hwc->config_base, ctrl_val);
f22f54f44   Peter Zijlstra   perf_events, x86:...
632
  }
aff3d91a9   Peter Zijlstra   perf, x86: Change...
633
  static void intel_pmu_enable_event(struct perf_event *event)
f22f54f44   Peter Zijlstra   perf_events, x86:...
634
  {
aff3d91a9   Peter Zijlstra   perf, x86: Change...
635
636
637
  	struct hw_perf_event *hwc = &event->hw;
  
  	if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
f22f54f44   Peter Zijlstra   perf_events, x86:...
638
639
640
641
642
643
644
645
  		if (!__get_cpu_var(cpu_hw_events).enabled)
  			return;
  
  		intel_pmu_enable_bts(hwc->config);
  		return;
  	}
  
  	if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
aff3d91a9   Peter Zijlstra   perf, x86: Change...
646
  		intel_pmu_enable_fixed(hwc);
f22f54f44   Peter Zijlstra   perf_events, x86:...
647
648
  		return;
  	}
ab608344b   Peter Zijlstra   perf, x86: Improv...
649
  	if (unlikely(event->attr.precise_ip))
ef21f683a   Peter Zijlstra   perf, x86: use LB...
650
  		intel_pmu_pebs_enable(event);
ca037701a   Peter Zijlstra   perf, x86: Add PE...
651

31fa58af5   Robert Richter   perf, x86: Pass e...
652
  	__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
f22f54f44   Peter Zijlstra   perf_events, x86:...
653
654
655
656
657
658
659
660
  }
  
  /*
   * Save and restart an expired event. Called by NMI contexts,
   * so it has to be careful about preempting normal event ops:
   */
  static int intel_pmu_save_and_restart(struct perf_event *event)
  {
cc2ad4ba8   Peter Zijlstra   perf, x86: Remove...
661
662
  	x86_perf_event_update(event);
  	return x86_perf_event_set_period(event);
f22f54f44   Peter Zijlstra   perf_events, x86:...
663
664
665
666
667
668
669
  }
  
  static void intel_pmu_reset(void)
  {
  	struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
  	unsigned long flags;
  	int idx;
948b1bb89   Robert Richter   perf, x86: Undo s...
670
  	if (!x86_pmu.num_counters)
f22f54f44   Peter Zijlstra   perf_events, x86:...
671
672
673
674
675
676
  		return;
  
  	local_irq_save(flags);
  
  	printk("clearing PMU state on CPU#%d
  ", smp_processor_id());
948b1bb89   Robert Richter   perf, x86: Undo s...
677
  	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
f22f54f44   Peter Zijlstra   perf_events, x86:...
678
679
680
  		checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
  		checking_wrmsrl(x86_pmu.perfctr  + idx, 0ull);
  	}
948b1bb89   Robert Richter   perf, x86: Undo s...
681
  	for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
f22f54f44   Peter Zijlstra   perf_events, x86:...
682
  		checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
948b1bb89   Robert Richter   perf, x86: Undo s...
683

f22f54f44   Peter Zijlstra   perf_events, x86:...
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
  	if (ds)
  		ds->bts_index = ds->bts_buffer_base;
  
  	local_irq_restore(flags);
  }
  
  /*
   * This handler is triggered by the local APIC, so the APIC IRQ handling
   * rules apply:
   */
  static int intel_pmu_handle_irq(struct pt_regs *regs)
  {
  	struct perf_sample_data data;
  	struct cpu_hw_events *cpuc;
  	int bit, loops;
2e556b5b3   Don Zickus   perf, x86: Fix ac...
699
  	u64 status;
b0b2072df   Stephane Eranian   perf_events: Fix ...
700
  	int handled;
f22f54f44   Peter Zijlstra   perf_events, x86:...
701

dc1d628a6   Peter Zijlstra   perf: Provide gen...
702
  	perf_sample_data_init(&data, 0);
f22f54f44   Peter Zijlstra   perf_events, x86:...
703
704
  
  	cpuc = &__get_cpu_var(cpu_hw_events);
3fb2b8ddc   Peter Zijlstra   perf, x86, Do not...
705
  	intel_pmu_disable_all();
b0b2072df   Stephane Eranian   perf_events: Fix ...
706
  	handled = intel_pmu_drain_bts_buffer();
f22f54f44   Peter Zijlstra   perf_events, x86:...
707
708
  	status = intel_pmu_get_status();
  	if (!status) {
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
709
  		intel_pmu_enable_all(0);
b0b2072df   Stephane Eranian   perf_events: Fix ...
710
  		return handled;
f22f54f44   Peter Zijlstra   perf_events, x86:...
711
712
713
714
  	}
  
  	loops = 0;
  again:
2e556b5b3   Don Zickus   perf, x86: Fix ac...
715
  	intel_pmu_ack_status(status);
f22f54f44   Peter Zijlstra   perf_events, x86:...
716
717
718
719
720
  	if (++loops > 100) {
  		WARN_ONCE(1, "perfevents: irq loop stuck!
  ");
  		perf_event_print_debug();
  		intel_pmu_reset();
3fb2b8ddc   Peter Zijlstra   perf, x86, Do not...
721
  		goto done;
f22f54f44   Peter Zijlstra   perf_events, x86:...
722
723
724
  	}
  
  	inc_irq_stat(apic_perf_irqs);
ca037701a   Peter Zijlstra   perf, x86: Add PE...
725

caff2beff   Peter Zijlstra   perf, x86: Implem...
726
  	intel_pmu_lbr_read();
ca037701a   Peter Zijlstra   perf, x86: Add PE...
727
728
729
  	/*
  	 * PEBS overflow sets bit 62 in the global status register
  	 */
de725dec9   Peter Zijlstra   perf, x86: Fix ha...
730
731
  	if (__test_and_clear_bit(62, (unsigned long *)&status)) {
  		handled++;
ca037701a   Peter Zijlstra   perf, x86: Add PE...
732
  		x86_pmu.drain_pebs(regs);
de725dec9   Peter Zijlstra   perf, x86: Fix ha...
733
  	}
ca037701a   Peter Zijlstra   perf, x86: Add PE...
734

984b3f574   Akinobu Mita   bitops: rename fo...
735
  	for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
f22f54f44   Peter Zijlstra   perf_events, x86:...
736
  		struct perf_event *event = cpuc->events[bit];
de725dec9   Peter Zijlstra   perf, x86: Fix ha...
737
  		handled++;
f22f54f44   Peter Zijlstra   perf_events, x86:...
738
739
740
741
742
743
744
745
746
  		if (!test_bit(bit, cpuc->active_mask))
  			continue;
  
  		if (!intel_pmu_save_and_restart(event))
  			continue;
  
  		data.period = event->hw.last_period;
  
  		if (perf_event_overflow(event, 1, &data, regs))
a4eaf7f14   Peter Zijlstra   perf: Rework the ...
747
  			x86_pmu_stop(event, 0);
f22f54f44   Peter Zijlstra   perf_events, x86:...
748
  	}
f22f54f44   Peter Zijlstra   perf_events, x86:...
749
750
751
752
753
754
  	/*
  	 * Repeat if there is more work to be done:
  	 */
  	status = intel_pmu_get_status();
  	if (status)
  		goto again;
3fb2b8ddc   Peter Zijlstra   perf, x86, Do not...
755
  done:
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
756
  	intel_pmu_enable_all(0);
de725dec9   Peter Zijlstra   perf, x86: Fix ha...
757
  	return handled;
f22f54f44   Peter Zijlstra   perf_events, x86:...
758
  }
f22f54f44   Peter Zijlstra   perf_events, x86:...
759
  static struct event_constraint *
ca037701a   Peter Zijlstra   perf, x86: Add PE...
760
  intel_bts_constraints(struct perf_event *event)
f22f54f44   Peter Zijlstra   perf_events, x86:...
761
  {
ca037701a   Peter Zijlstra   perf, x86: Add PE...
762
763
  	struct hw_perf_event *hwc = &event->hw;
  	unsigned int hw_event, bts_event;
f22f54f44   Peter Zijlstra   perf_events, x86:...
764

ca037701a   Peter Zijlstra   perf, x86: Add PE...
765
766
  	hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
  	bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
f22f54f44   Peter Zijlstra   perf_events, x86:...
767

ca037701a   Peter Zijlstra   perf, x86: Add PE...
768
  	if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
f22f54f44   Peter Zijlstra   perf_events, x86:...
769
  		return &bts_constraint;
ca037701a   Peter Zijlstra   perf, x86: Add PE...
770

f22f54f44   Peter Zijlstra   perf_events, x86:...
771
772
773
774
775
776
777
  	return NULL;
  }
  
  static struct event_constraint *
  intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
  {
  	struct event_constraint *c;
ca037701a   Peter Zijlstra   perf, x86: Add PE...
778
779
780
781
782
  	c = intel_bts_constraints(event);
  	if (c)
  		return c;
  
  	c = intel_pebs_constraints(event);
f22f54f44   Peter Zijlstra   perf_events, x86:...
783
784
785
786
787
  	if (c)
  		return c;
  
  	return x86_get_event_constraints(cpuc, event);
  }
b4cdc5c26   Peter Zijlstra   perf, x86: Fix up...
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
  static int intel_pmu_hw_config(struct perf_event *event)
  {
  	int ret = x86_pmu_hw_config(event);
  
  	if (ret)
  		return ret;
  
  	if (event->attr.type != PERF_TYPE_RAW)
  		return 0;
  
  	if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
  		return 0;
  
  	if (x86_pmu.version < 3)
  		return -EINVAL;
  
  	if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
  		return -EACCES;
  
  	event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
  
  	return 0;
  }
caaa8be3b   Peter Zijlstra   perf, x86: Fix __...
811
  static __initconst const struct x86_pmu core_pmu = {
f22f54f44   Peter Zijlstra   perf_events, x86:...
812
813
814
815
816
817
  	.name			= "core",
  	.handle_irq		= x86_pmu_handle_irq,
  	.disable_all		= x86_pmu_disable_all,
  	.enable_all		= x86_pmu_enable_all,
  	.enable			= x86_pmu_enable_event,
  	.disable		= x86_pmu_disable_event,
b4cdc5c26   Peter Zijlstra   perf, x86: Fix up...
818
  	.hw_config		= x86_pmu_hw_config,
a072738e0   Cyrill Gorcunov   perf, x86: Implem...
819
  	.schedule_events	= x86_schedule_events,
f22f54f44   Peter Zijlstra   perf_events, x86:...
820
821
822
  	.eventsel		= MSR_ARCH_PERFMON_EVENTSEL0,
  	.perfctr		= MSR_ARCH_PERFMON_PERFCTR0,
  	.event_map		= intel_pmu_event_map,
f22f54f44   Peter Zijlstra   perf_events, x86:...
823
824
825
826
827
828
829
830
831
832
833
  	.max_events		= ARRAY_SIZE(intel_perfmon_event_map),
  	.apic			= 1,
  	/*
  	 * Intel PMCs cannot be accessed sanely above 32 bit width,
  	 * so we install an artificial 1<<31 period regardless of
  	 * the generic event period:
  	 */
  	.max_period		= (1ULL << 31) - 1,
  	.get_event_constraints	= intel_get_event_constraints,
  	.event_constraints	= intel_core_event_constraints,
  };
74846d35b   Peter Zijlstra   perf, x86: Clear ...
834
835
836
837
838
839
840
841
842
843
844
845
846
  static void intel_pmu_cpu_starting(int cpu)
  {
  	init_debug_store_on_cpu(cpu);
  	/*
  	 * Deal with CPUs that don't clear their LBRs on power-up.
  	 */
  	intel_pmu_lbr_reset();
  }
  
  static void intel_pmu_cpu_dying(int cpu)
  {
  	fini_debug_store_on_cpu(cpu);
  }
caaa8be3b   Peter Zijlstra   perf, x86: Fix __...
847
  static __initconst const struct x86_pmu intel_pmu = {
f22f54f44   Peter Zijlstra   perf_events, x86:...
848
849
850
851
852
853
  	.name			= "Intel",
  	.handle_irq		= intel_pmu_handle_irq,
  	.disable_all		= intel_pmu_disable_all,
  	.enable_all		= intel_pmu_enable_all,
  	.enable			= intel_pmu_enable_event,
  	.disable		= intel_pmu_disable_event,
b4cdc5c26   Peter Zijlstra   perf, x86: Fix up...
854
  	.hw_config		= intel_pmu_hw_config,
a072738e0   Cyrill Gorcunov   perf, x86: Implem...
855
  	.schedule_events	= x86_schedule_events,
f22f54f44   Peter Zijlstra   perf_events, x86:...
856
857
858
  	.eventsel		= MSR_ARCH_PERFMON_EVENTSEL0,
  	.perfctr		= MSR_ARCH_PERFMON_PERFCTR0,
  	.event_map		= intel_pmu_event_map,
f22f54f44   Peter Zijlstra   perf_events, x86:...
859
860
861
862
863
864
865
866
  	.max_events		= ARRAY_SIZE(intel_perfmon_event_map),
  	.apic			= 1,
  	/*
  	 * Intel PMCs cannot be accessed sanely above 32 bit width,
  	 * so we install an artificial 1<<31 period regardless of
  	 * the generic event period:
  	 */
  	.max_period		= (1ULL << 31) - 1,
3f6da3905   Peter Zijlstra   perf: Rework and ...
867
  	.get_event_constraints	= intel_get_event_constraints,
74846d35b   Peter Zijlstra   perf, x86: Clear ...
868
869
  	.cpu_starting		= intel_pmu_cpu_starting,
  	.cpu_dying		= intel_pmu_cpu_dying,
f22f54f44   Peter Zijlstra   perf_events, x86:...
870
  };
3c44780b2   Peter Zijlstra   perf, x86: Disabl...
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
  static void intel_clovertown_quirks(void)
  {
  	/*
  	 * PEBS is unreliable due to:
  	 *
  	 *   AJ67  - PEBS may experience CPL leaks
  	 *   AJ68  - PEBS PMI may be delayed by one event
  	 *   AJ69  - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
  	 *   AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
  	 *
  	 * AJ67 could be worked around by restricting the OS/USR flags.
  	 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
  	 *
  	 * AJ106 could possibly be worked around by not allowing LBR
  	 *       usage from PEBS, including the fixup.
  	 * AJ68  could possibly be worked around by always programming
  	 * 	 a pebs_event_reset[0] value and coping with the lost events.
  	 *
  	 * But taken together it might just make sense to not enable PEBS on
  	 * these chips.
  	 */
  	printk(KERN_WARNING "PEBS disabled due to CPU errata.
  ");
  	x86_pmu.pebs = 0;
  	x86_pmu.pebs_constraints = NULL;
  }
f22f54f44   Peter Zijlstra   perf_events, x86:...
897
898
899
900
901
902
903
904
905
  static __init int intel_pmu_init(void)
  {
  	union cpuid10_edx edx;
  	union cpuid10_eax eax;
  	unsigned int unused;
  	unsigned int ebx;
  	int version;
  
  	if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
a072738e0   Cyrill Gorcunov   perf, x86: Implem...
906
907
908
909
910
911
  		switch (boot_cpu_data.x86) {
  		case 0x6:
  			return p6_pmu_init();
  		case 0xf:
  			return p4_pmu_init();
  		}
f22f54f44   Peter Zijlstra   perf_events, x86:...
912
  		return -ENODEV;
f22f54f44   Peter Zijlstra   perf_events, x86:...
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
  	}
  
  	/*
  	 * Check whether the Architectural PerfMon supports
  	 * Branch Misses Retired hw_event or not.
  	 */
  	cpuid(10, &eax.full, &ebx, &unused, &edx.full);
  	if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
  		return -ENODEV;
  
  	version = eax.split.version_id;
  	if (version < 2)
  		x86_pmu = core_pmu;
  	else
  		x86_pmu = intel_pmu;
  
  	x86_pmu.version			= version;
948b1bb89   Robert Richter   perf, x86: Undo s...
930
931
932
  	x86_pmu.num_counters		= eax.split.num_counters;
  	x86_pmu.cntval_bits		= eax.split.bit_width;
  	x86_pmu.cntval_mask		= (1ULL << eax.split.bit_width) - 1;
f22f54f44   Peter Zijlstra   perf_events, x86:...
933
934
935
936
937
938
  
  	/*
  	 * Quirk: v2 perfmon does not report fixed-purpose events, so
  	 * assume at least 3 events:
  	 */
  	if (version > 1)
948b1bb89   Robert Richter   perf, x86: Undo s...
939
  		x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
f22f54f44   Peter Zijlstra   perf_events, x86:...
940

8db909a7e   Peter Zijlstra   perf, x86: Clean ...
941
942
943
944
945
946
947
948
949
  	/*
  	 * v2 and above have a perf capabilities MSR
  	 */
  	if (version > 1) {
  		u64 capabilities;
  
  		rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
  		x86_pmu.intel_cap.capabilities = capabilities;
  	}
ca037701a   Peter Zijlstra   perf, x86: Add PE...
950
  	intel_ds_init();
f22f54f44   Peter Zijlstra   perf_events, x86:...
951
952
953
954
955
956
957
958
959
  	/*
  	 * Install the hw-cache-events table:
  	 */
  	switch (boot_cpu_data.x86_model) {
  	case 14: /* 65 nm core solo/duo, "Yonah" */
  		pr_cont("Core events, ");
  		break;
  
  	case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
3c44780b2   Peter Zijlstra   perf, x86: Disabl...
960
  		x86_pmu.quirks = intel_clovertown_quirks;
f22f54f44   Peter Zijlstra   perf_events, x86:...
961
962
963
964
965
  	case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
  	case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
  	case 29: /* six-core 45 nm xeon "Dunnington" */
  		memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
  		       sizeof(hw_cache_event_ids));
caff2beff   Peter Zijlstra   perf, x86: Implem...
966
  		intel_pmu_lbr_init_core();
f22f54f44   Peter Zijlstra   perf_events, x86:...
967
968
969
970
971
972
  		x86_pmu.event_constraints = intel_core2_event_constraints;
  		pr_cont("Core2 events, ");
  		break;
  
  	case 26: /* 45 nm nehalem, "Bloomfield" */
  	case 30: /* 45 nm nehalem, "Lynnfield" */
134fbadf0   Vince Weaver   perf, x86: Enable...
973
  	case 46: /* 45 nm nehalem-ex, "Beckton" */
f22f54f44   Peter Zijlstra   perf_events, x86:...
974
975
  		memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
  		       sizeof(hw_cache_event_ids));
caff2beff   Peter Zijlstra   perf, x86: Implem...
976
  		intel_pmu_lbr_init_nhm();
f22f54f44   Peter Zijlstra   perf_events, x86:...
977
  		x86_pmu.event_constraints = intel_nehalem_event_constraints;
11164cd4f   Peter Zijlstra   perf, x86: Add Ne...
978
979
  		x86_pmu.enable_all = intel_pmu_nhm_enable_all;
  		pr_cont("Nehalem events, ");
f22f54f44   Peter Zijlstra   perf_events, x86:...
980
  		break;
caff2beff   Peter Zijlstra   perf, x86: Implem...
981

b622d644c   Peter Zijlstra   perf_events, x86:...
982
  	case 28: /* Atom */
f22f54f44   Peter Zijlstra   perf_events, x86:...
983
984
  		memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
  		       sizeof(hw_cache_event_ids));
caff2beff   Peter Zijlstra   perf, x86: Implem...
985
  		intel_pmu_lbr_init_atom();
f22f54f44   Peter Zijlstra   perf_events, x86:...
986
987
988
989
990
991
992
993
  		x86_pmu.event_constraints = intel_gen_event_constraints;
  		pr_cont("Atom events, ");
  		break;
  
  	case 37: /* 32 nm nehalem, "Clarkdale" */
  	case 44: /* 32 nm nehalem, "Gulftown" */
  		memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
  		       sizeof(hw_cache_event_ids));
caff2beff   Peter Zijlstra   perf, x86: Implem...
994
  		intel_pmu_lbr_init_nhm();
f22f54f44   Peter Zijlstra   perf_events, x86:...
995
  		x86_pmu.event_constraints = intel_westmere_event_constraints;
40b91cd10   Peter Zijlstra   perf, x86: Add Ne...
996
  		x86_pmu.enable_all = intel_pmu_nhm_enable_all;
f22f54f44   Peter Zijlstra   perf_events, x86:...
997
998
  		pr_cont("Westmere events, ");
  		break;
b622d644c   Peter Zijlstra   perf_events, x86:...
999

f22f54f44   Peter Zijlstra   perf_events, x86:...
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
  	default:
  		/*
  		 * default constraints for v2 and up
  		 */
  		x86_pmu.event_constraints = intel_gen_event_constraints;
  		pr_cont("generic architected perfmon, ");
  	}
  	return 0;
  }
  
  #else /* CONFIG_CPU_SUP_INTEL */
  
  static int intel_pmu_init(void)
  {
  	return 0;
  }
  
  #endif /* CONFIG_CPU_SUP_INTEL */