Blame view
arch/x86/kernel/cpu/perf_event.c
44.8 KB
241771ef0
|
1 |
/* |
cdd6c482c
|
2 |
* Performance events x86 architecture code |
241771ef0
|
3 |
* |
981445114
|
4 5 6 7 8 |
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar * Copyright (C) 2009 Jaswinder Singh Rajput * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
30dd568c9
|
9 |
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> |
1da53e023
|
10 |
* Copyright (C) 2009 Google, Inc., Stephane Eranian |
241771ef0
|
11 12 13 |
* * For licencing details see kernel-base/COPYING */ |
cdd6c482c
|
14 |
#include <linux/perf_event.h> |
241771ef0
|
15 16 17 18 |
#include <linux/capability.h> #include <linux/notifier.h> #include <linux/hardirq.h> #include <linux/kprobes.h> |
4ac13294e
|
19 |
#include <linux/module.h> |
241771ef0
|
20 21 |
#include <linux/kdebug.h> #include <linux/sched.h> |
d7d59fb32
|
22 |
#include <linux/uaccess.h> |
5a0e3ad6a
|
23 |
#include <linux/slab.h> |
74193ef0e
|
24 |
#include <linux/highmem.h> |
30dd568c9
|
25 |
#include <linux/cpu.h> |
272d30be6
|
26 |
#include <linux/bitops.h> |
241771ef0
|
27 |
|
241771ef0
|
28 |
#include <asm/apic.h> |
d7d59fb32
|
29 |
#include <asm/stacktrace.h> |
4e935e471
|
30 |
#include <asm/nmi.h> |
257ef9d21
|
31 |
#include <asm/compat.h> |
690926242
|
32 |
#include <asm/smp.h> |
c8e5910ed
|
33 |
#include <asm/alternative.h> |
241771ef0
|
34 |
|
7645a24cb
|
35 36 37 38 39 40 41 42 43 44 45 |
#if 0 #undef wrmsrl #define wrmsrl(msr, val) \ do { \ trace_printk("wrmsrl(%lx, %lx) ", (unsigned long)(msr),\ (unsigned long)(val)); \ native_write_msr((msr), (u32)((u64)(val)), \ (u32)((u64)(val) >> 32)); \ } while (0) #endif |
ef21f683a
|
46 |
/* |
efc9f05df
|
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
* | NHM/WSM | SNB | * register ------------------------------- * | HT | no HT | HT | no HT | *----------------------------------------- * offcore | core | core | cpu | core | * lbr_sel | core | core | cpu | core | * ld_lat | cpu | core | cpu | core | *----------------------------------------- * * Given that there is a small number of shared regs, * we can pre-allocate their slot in the per-cpu * per-core reg tables. */ enum extra_reg_type { EXTRA_REG_NONE = -1, /* not used */ EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ EXTRA_REG_MAX /* number of entries needed */ }; /* |
ef21f683a
|
70 71 72 73 74 75 |
* best effort, GUP based copy_from_user() that assumes IRQ or NMI context */ static unsigned long copy_from_user_nmi(void *to, const void __user *from, unsigned long n) { unsigned long offset, addr = (unsigned long)from; |
ef21f683a
|
76 77 78 79 80 81 82 83 84 85 86 87 |
unsigned long size, len = 0; struct page *page; void *map; int ret; do { ret = __get_user_pages_fast(addr, 1, 0, &page); if (!ret) break; offset = addr & (PAGE_SIZE - 1); size = min(PAGE_SIZE - offset, n - len); |
7a837d1bb
|
88 |
map = kmap_atomic(page); |
ef21f683a
|
89 |
memcpy(to, map+offset, size); |
7a837d1bb
|
90 |
kunmap_atomic(map); |
ef21f683a
|
91 92 93 94 95 96 97 98 99 100 |
put_page(page); len += size; to += size; addr += size; } while (len < n); return len; } |
1da53e023
|
101 |
struct event_constraint { |
c91e0f5da
|
102 103 |
union { unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
b622d644c
|
104 |
u64 idxmsk64; |
c91e0f5da
|
105 |
}; |
b622d644c
|
106 107 |
u64 code; u64 cmask; |
272d30be6
|
108 |
int weight; |
1da53e023
|
109 |
}; |
38331f62c
|
110 111 112 113 114 115 |
struct amd_nb { int nb_id; /* NorthBridge id */ int refcnt; /* reference count */ struct perf_event *owners[X86_PMC_IDX_MAX]; struct event_constraint event_constraints[X86_PMC_IDX_MAX]; }; |
a7e3ed1e4
|
116 |
struct intel_percore; |
caff2beff
|
117 |
#define MAX_LBR_ENTRIES 16 |
cdd6c482c
|
118 |
struct cpu_hw_events { |
ca037701a
|
119 120 121 |
/* * Generic x86 PMC bits */ |
1da53e023
|
122 |
struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ |
43f6201a2
|
123 |
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
63e6be6d9
|
124 |
unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
b0f3f28e0
|
125 |
int enabled; |
241771ef0
|
126 |
|
1da53e023
|
127 128 |
int n_events; int n_added; |
90151c35b
|
129 |
int n_txn; |
1da53e023
|
130 |
int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ |
447a194b3
|
131 |
u64 tags[X86_PMC_IDX_MAX]; |
1da53e023
|
132 |
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ |
ca037701a
|
133 |
|
4d1c52b02
|
134 |
unsigned int group_flag; |
ca037701a
|
135 136 137 138 139 140 141 |
/* * Intel DebugStore bits */ struct debug_store *ds; u64 pebs_enabled; /* |
caff2beff
|
142 143 144 145 146 147 148 149 |
* Intel LBR bits */ int lbr_users; void *lbr_context; struct perf_branch_stack lbr_stack; struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; /* |
efc9f05df
|
150 151 |
* manage shared (per-core, per-cpu) registers * used on Intel NHM/WSM/SNB |
a7e3ed1e4
|
152 |
*/ |
efc9f05df
|
153 |
struct intel_shared_regs *shared_regs; |
a7e3ed1e4
|
154 155 |
/* |
ca037701a
|
156 157 |
* AMD specific bits */ |
38331f62c
|
158 |
struct amd_nb *amd_nb; |
b690081d4
|
159 |
}; |
fce877e3a
|
160 |
#define __EVENT_CONSTRAINT(c, n, m, w) {\ |
b622d644c
|
161 |
{ .idxmsk64 = (n) }, \ |
c91e0f5da
|
162 163 |
.code = (c), \ .cmask = (m), \ |
fce877e3a
|
164 |
.weight = (w), \ |
c91e0f5da
|
165 |
} |
b690081d4
|
166 |
|
fce877e3a
|
167 168 |
#define EVENT_CONSTRAINT(c, n, m) \ __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n)) |
ca037701a
|
169 170 171 |
/* * Constraint on the Event code. */ |
ed8777fc1
|
172 |
#define INTEL_EVENT_CONSTRAINT(c, n) \ |
a098f4484
|
173 |
EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT) |
8433be118
|
174 |
|
ca037701a
|
175 176 |
/* * Constraint on the Event code + UMask + fixed-mask |
a098f4484
|
177 178 179 180 181 182 183 184 |
* * filter mask to validate fixed counter events. * the following filters disqualify for fixed counters: * - inv * - edge * - cnt-mask * The other filters are supported by fixed counters. * The any-thread option is supported starting with v3. |
ca037701a
|
185 |
*/ |
ed8777fc1
|
186 |
#define FIXED_EVENT_CONSTRAINT(c, n) \ |
a098f4484
|
187 |
EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK) |
8433be118
|
188 |
|
ca037701a
|
189 190 191 |
/* * Constraint on the Event code + UMask */ |
b06b3d496
|
192 |
#define INTEL_UEVENT_CONSTRAINT(c, n) \ |
ca037701a
|
193 |
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK) |
ed8777fc1
|
194 195 196 197 |
#define EVENT_CONSTRAINT_END \ EVENT_CONSTRAINT(0, 0, 0) #define for_each_event_constraint(e, c) \ |
a1f2b70a9
|
198 |
for ((e) = (c); (e)->weight; (e)++) |
b690081d4
|
199 |
|
a7e3ed1e4
|
200 |
/* |
efc9f05df
|
201 202 203 204 205 206 207 208 209 210 |
* Per register state. */ struct er_account { raw_spinlock_t lock; /* per-core: protect structure */ u64 config; /* extra MSR config */ u64 reg; /* extra MSR number */ atomic_t ref; /* reference count */ }; /* |
a7e3ed1e4
|
211 |
* Extra registers for specific events. |
efc9f05df
|
212 |
* |
a7e3ed1e4
|
213 |
* Some events need large masks and require external MSRs. |
efc9f05df
|
214 215 216 217 218 |
* Those extra MSRs end up being shared for all events on * a PMU and sometimes between PMU of sibling HT threads. * In either case, the kernel needs to handle conflicting * accesses to those extra, shared, regs. The data structure * to manage those registers is stored in cpu_hw_event. |
a7e3ed1e4
|
219 220 221 222 223 224 |
*/ struct extra_reg { unsigned int event; unsigned int msr; u64 config_mask; u64 valid_mask; |
efc9f05df
|
225 |
int idx; /* per_xxx->regs[] reg index */ |
a7e3ed1e4
|
226 |
}; |
efc9f05df
|
227 |
#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ |
a7e3ed1e4
|
228 229 230 231 |
.event = (e), \ .msr = (ms), \ .config_mask = (m), \ .valid_mask = (vm), \ |
efc9f05df
|
232 |
.idx = EXTRA_REG_##i \ |
a7e3ed1e4
|
233 |
} |
efc9f05df
|
234 235 236 237 238 |
#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx) #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0) |
a7e3ed1e4
|
239 |
|
8db909a7e
|
240 241 242 243 244 245 246 247 248 249 |
union perf_capabilities { struct { u64 lbr_format : 6; u64 pebs_trap : 1; u64 pebs_arch_reg : 1; u64 pebs_format : 4; u64 smm_freeze : 1; }; u64 capabilities; }; |
241771ef0
|
250 |
/* |
5f4ec28ff
|
251 |
* struct x86_pmu - generic x86 pmu |
241771ef0
|
252 |
*/ |
5f4ec28ff
|
253 |
struct x86_pmu { |
ca037701a
|
254 255 256 |
/* * Generic x86 PMC bits */ |
faa28ae01
|
257 258 |
const char *name; int version; |
a32881066
|
259 |
int (*handle_irq)(struct pt_regs *); |
9e35ad388
|
260 |
void (*disable_all)(void); |
11164cd4f
|
261 |
void (*enable_all)(int added); |
aff3d91a9
|
262 263 |
void (*enable)(struct perf_event *); void (*disable)(struct perf_event *); |
b4cdc5c26
|
264 |
int (*hw_config)(struct perf_event *event); |
a072738e0
|
265 |
int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); |
169e41eb7
|
266 267 |
unsigned eventsel; unsigned perfctr; |
b0f3f28e0
|
268 |
u64 (*event_map)(int); |
169e41eb7
|
269 |
int max_events; |
948b1bb89
|
270 271 272 273 |
int num_counters; int num_counters_fixed; int cntval_bits; u64 cntval_mask; |
04da8a43d
|
274 |
int apic; |
c619b8ffb
|
275 |
u64 max_period; |
63b146490
|
276 277 278 |
struct event_constraint * (*get_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event); |
c91e0f5da
|
279 280 |
void (*put_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event); |
63b146490
|
281 |
struct event_constraint *event_constraints; |
3c44780b2
|
282 |
void (*quirks)(void); |
68aa00ac0
|
283 |
int perfctr_second_write; |
3f6da3905
|
284 |
|
b38b24ead
|
285 |
int (*cpu_prepare)(int cpu); |
3f6da3905
|
286 287 288 |
void (*cpu_starting)(int cpu); void (*cpu_dying)(int cpu); void (*cpu_dead)(int cpu); |
ca037701a
|
289 290 291 292 |
/* * Intel Arch Perfmon v2+ */ |
8db909a7e
|
293 294 |
u64 intel_ctrl; union perf_capabilities intel_cap; |
ca037701a
|
295 296 297 298 299 |
/* * Intel DebugStore bits */ int bts, pebs; |
6809b6ea7
|
300 |
int bts_active, pebs_active; |
ca037701a
|
301 302 303 |
int pebs_record_size; void (*drain_pebs)(struct pt_regs *regs); struct event_constraint *pebs_constraints; |
caff2beff
|
304 305 306 307 308 309 |
/* * Intel LBR */ unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ int lbr_nr; /* hardware stack size */ |
a7e3ed1e4
|
310 311 312 313 314 |
/* * Extra registers for events */ struct extra_reg *extra_regs; |
b79e8941f
|
315 |
unsigned int er_flags; |
b56a3802d
|
316 |
}; |
b79e8941f
|
317 318 |
#define ERF_NO_HT_SHARING 1 #define ERF_HAS_RSP_1 2 |
4a06bd850
|
319 |
static struct x86_pmu x86_pmu __read_mostly; |
b56a3802d
|
320 |
|
cdd6c482c
|
321 |
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { |
b0f3f28e0
|
322 323 |
.enabled = 1, }; |
241771ef0
|
324 |
|
07088edb8
|
325 |
static int x86_perf_event_set_period(struct perf_event *event); |
b690081d4
|
326 |
|
b56a3802d
|
327 |
/* |
dfc65094d
|
328 |
* Generalized hw caching related hw_event table, filled |
8326f44da
|
329 |
* in on a per model basis. A value of 0 means |
dfc65094d
|
330 331 |
* 'not supported', -1 means 'hw_event makes no sense on * this CPU', any other value means the raw hw_event |
8326f44da
|
332 333 334 335 336 337 338 339 340 |
* ID. */ #define C(x) PERF_COUNT_HW_CACHE_##x static u64 __read_mostly hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
e994d7d23
|
341 342 343 344 |
static u64 __read_mostly hw_cache_extra_regs [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX]; |
8326f44da
|
345 |
|
f87ad35d3
|
346 |
/* |
cdd6c482c
|
347 348 |
* Propagate event elapsed time into the generic event. * Can only be executed on the CPU where the event is active. |
ee06094f8
|
349 350 |
* Returns the delta events processed. */ |
4b7bfd0d2
|
351 |
static u64 |
cc2ad4ba8
|
352 |
x86_perf_event_update(struct perf_event *event) |
ee06094f8
|
353 |
{ |
cc2ad4ba8
|
354 |
struct hw_perf_event *hwc = &event->hw; |
948b1bb89
|
355 |
int shift = 64 - x86_pmu.cntval_bits; |
ec3232bdf
|
356 |
u64 prev_raw_count, new_raw_count; |
cc2ad4ba8
|
357 |
int idx = hwc->idx; |
ec3232bdf
|
358 |
s64 delta; |
ee06094f8
|
359 |
|
30dd568c9
|
360 361 |
if (idx == X86_PMC_IDX_FIXED_BTS) return 0; |
ee06094f8
|
362 |
/* |
cdd6c482c
|
363 |
* Careful: an NMI might modify the previous event value. |
ee06094f8
|
364 365 366 |
* * Our tactic to handle this is to first atomically read and * exchange a new raw count - then add that new-prev delta |
cdd6c482c
|
367 |
* count to the generic event atomically: |
ee06094f8
|
368 369 |
*/ again: |
e78505958
|
370 |
prev_raw_count = local64_read(&hwc->prev_count); |
73d6e5220
|
371 |
rdmsrl(hwc->event_base, new_raw_count); |
ee06094f8
|
372 |
|
e78505958
|
373 |
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
ee06094f8
|
374 375 376 377 378 379 |
new_raw_count) != prev_raw_count) goto again; /* * Now we have the new raw value and have updated the prev * timestamp already. We can now calculate the elapsed delta |
cdd6c482c
|
380 |
* (event-)time and add that to the generic event. |
ee06094f8
|
381 382 |
* * Careful, not all hw sign-extends above the physical width |
ec3232bdf
|
383 |
* of the count. |
ee06094f8
|
384 |
*/ |
ec3232bdf
|
385 386 |
delta = (new_raw_count << shift) - (prev_raw_count << shift); delta >>= shift; |
ee06094f8
|
387 |
|
e78505958
|
388 389 |
local64_add(delta, &event->count); local64_sub(delta, &hwc->period_left); |
4b7bfd0d2
|
390 391 |
return new_raw_count; |
ee06094f8
|
392 |
} |
4979d2729
|
393 394 |
static inline int x86_pmu_addr_offset(int index) { |
c8e5910ed
|
395 396 397 398 399 400 401 402 403 404 |
int offset; /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */ alternative_io(ASM_NOP2, "shll $1, %%eax", X86_FEATURE_PERFCTR_CORE, "=a" (offset), "a" (index)); return offset; |
4979d2729
|
405 |
} |
41bf49894
|
406 407 |
static inline unsigned int x86_pmu_config_addr(int index) { |
4979d2729
|
408 |
return x86_pmu.eventsel + x86_pmu_addr_offset(index); |
41bf49894
|
409 410 411 412 |
} static inline unsigned int x86_pmu_event_addr(int index) { |
4979d2729
|
413 |
return x86_pmu.perfctr + x86_pmu_addr_offset(index); |
41bf49894
|
414 |
} |
a7e3ed1e4
|
415 416 417 418 419 |
/* * Find and validate any extra registers to set up. */ static int x86_pmu_extra_regs(u64 config, struct perf_event *event) { |
efc9f05df
|
420 |
struct hw_perf_event_extra *reg; |
a7e3ed1e4
|
421 |
struct extra_reg *er; |
efc9f05df
|
422 |
reg = &event->hw.extra_reg; |
a7e3ed1e4
|
423 424 425 426 427 428 429 430 431 |
if (!x86_pmu.extra_regs) return 0; for (er = x86_pmu.extra_regs; er->msr; er++) { if (er->event != (config & er->config_mask)) continue; if (event->attr.config1 & ~er->valid_mask) return -EINVAL; |
efc9f05df
|
432 433 434 435 |
reg->idx = er->idx; reg->config = event->attr.config1; reg->reg = er->msr; |
a7e3ed1e4
|
436 437 438 439 |
break; } return 0; } |
cdd6c482c
|
440 |
static atomic_t active_events; |
4e935e471
|
441 |
static DEFINE_MUTEX(pmc_reserve_mutex); |
b27ea29c6
|
442 |
#ifdef CONFIG_X86_LOCAL_APIC |
4e935e471
|
443 444 445 |
static bool reserve_pmc_hardware(void) { int i; |
948b1bb89
|
446 |
for (i = 0; i < x86_pmu.num_counters; i++) { |
41bf49894
|
447 |
if (!reserve_perfctr_nmi(x86_pmu_event_addr(i))) |
4e935e471
|
448 449 |
goto perfctr_fail; } |
948b1bb89
|
450 |
for (i = 0; i < x86_pmu.num_counters; i++) { |
41bf49894
|
451 |
if (!reserve_evntsel_nmi(x86_pmu_config_addr(i))) |
4e935e471
|
452 453 454 455 456 457 458 |
goto eventsel_fail; } return true; eventsel_fail: for (i--; i >= 0; i--) |
41bf49894
|
459 |
release_evntsel_nmi(x86_pmu_config_addr(i)); |
4e935e471
|
460 |
|
948b1bb89
|
461 |
i = x86_pmu.num_counters; |
4e935e471
|
462 463 464 |
perfctr_fail: for (i--; i >= 0; i--) |
41bf49894
|
465 |
release_perfctr_nmi(x86_pmu_event_addr(i)); |
4e935e471
|
466 |
|
4e935e471
|
467 468 469 470 471 472 |
return false; } static void release_pmc_hardware(void) { int i; |
948b1bb89
|
473 |
for (i = 0; i < x86_pmu.num_counters; i++) { |
41bf49894
|
474 475 |
release_perfctr_nmi(x86_pmu_event_addr(i)); release_evntsel_nmi(x86_pmu_config_addr(i)); |
4e935e471
|
476 |
} |
4e935e471
|
477 |
} |
b27ea29c6
|
478 479 480 481 482 483 |
#else static bool reserve_pmc_hardware(void) { return true; } static void release_pmc_hardware(void) {} #endif |
33c6d6a7a
|
484 485 486 |
static bool check_hw_exists(void) { u64 val, val_new = 0; |
4407204c5
|
487 |
int i, reg, ret = 0; |
33c6d6a7a
|
488 |
|
4407204c5
|
489 490 491 492 493 |
/* * Check to see if the BIOS enabled any of the counters, if so * complain and bail. */ for (i = 0; i < x86_pmu.num_counters; i++) { |
41bf49894
|
494 |
reg = x86_pmu_config_addr(i); |
4407204c5
|
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 |
ret = rdmsrl_safe(reg, &val); if (ret) goto msr_fail; if (val & ARCH_PERFMON_EVENTSEL_ENABLE) goto bios_fail; } if (x86_pmu.num_counters_fixed) { reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; ret = rdmsrl_safe(reg, &val); if (ret) goto msr_fail; for (i = 0; i < x86_pmu.num_counters_fixed; i++) { if (val & (0x03 << i*4)) goto bios_fail; } } /* * Now write a value and read it back to see if it matches, * this is needed to detect certain hardware emulators (qemu/kvm) * that don't trap on the MSR access and always return 0s. */ |
33c6d6a7a
|
518 |
val = 0xabcdUL; |
41bf49894
|
519 520 |
ret = checking_wrmsrl(x86_pmu_event_addr(0), val); ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new); |
33c6d6a7a
|
521 |
if (ret || val != val_new) |
4407204c5
|
522 |
goto msr_fail; |
33c6d6a7a
|
523 524 |
return true; |
4407204c5
|
525 526 |
bios_fail: |
45daae575
|
527 528 529 530 531 |
/* * We still allow the PMU driver to operate: */ printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor. "); |
4407204c5
|
532 533 |
printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx) ", reg, val); |
45daae575
|
534 535 |
return true; |
4407204c5
|
536 537 538 539 |
msr_fail: printk(KERN_CONT "Broken PMU hardware detected, using software events only. "); |
45daae575
|
540 |
|
4407204c5
|
541 |
return false; |
33c6d6a7a
|
542 |
} |
f80c9e304
|
543 |
static void reserve_ds_buffers(void); |
ca037701a
|
544 |
static void release_ds_buffers(void); |
30dd568c9
|
545 |
|
cdd6c482c
|
546 |
static void hw_perf_event_destroy(struct perf_event *event) |
4e935e471
|
547 |
{ |
cdd6c482c
|
548 |
if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) { |
4e935e471
|
549 |
release_pmc_hardware(); |
ca037701a
|
550 |
release_ds_buffers(); |
4e935e471
|
551 552 553 |
mutex_unlock(&pmc_reserve_mutex); } } |
85cf9dba9
|
554 555 556 557 |
static inline int x86_pmu_initialized(void) { return x86_pmu.handle_irq != NULL; } |
8326f44da
|
558 |
static inline int |
e994d7d23
|
559 |
set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) |
8326f44da
|
560 |
{ |
e994d7d23
|
561 |
struct perf_event_attr *attr = &event->attr; |
8326f44da
|
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 |
unsigned int cache_type, cache_op, cache_result; u64 config, val; config = attr->config; cache_type = (config >> 0) & 0xff; if (cache_type >= PERF_COUNT_HW_CACHE_MAX) return -EINVAL; cache_op = (config >> 8) & 0xff; if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) return -EINVAL; cache_result = (config >> 16) & 0xff; if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) return -EINVAL; val = hw_cache_event_ids[cache_type][cache_op][cache_result]; if (val == 0) return -ENOENT; if (val == -1) return -EINVAL; hwc->config |= val; |
e994d7d23
|
588 589 |
attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result]; return x86_pmu_extra_regs(val, event); |
8326f44da
|
590 |
} |
c1726f343
|
591 592 593 594 595 |
static int x86_setup_perfctr(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; struct hw_perf_event *hwc = &event->hw; u64 config; |
6c7e550f1
|
596 |
if (!is_sampling_event(event)) { |
c1726f343
|
597 598 |
hwc->sample_period = x86_pmu.max_period; hwc->last_period = hwc->sample_period; |
e78505958
|
599 |
local64_set(&hwc->period_left, hwc->sample_period); |
c1726f343
|
600 601 602 603 604 605 606 607 608 609 |
} else { /* * If we have a PMU initialized but no APIC * interrupts, we cannot sample hardware * events (user-space has to fall back and * sample via a hrtimer based software event): */ if (!x86_pmu.apic) return -EOPNOTSUPP; } |
b52c55c6a
|
610 611 612 613 |
/* * Do not allow config1 (extended registers) to propagate, * there's no sane user-space generalization yet: */ |
c1726f343
|
614 |
if (attr->type == PERF_TYPE_RAW) |
b52c55c6a
|
615 |
return 0; |
c1726f343
|
616 617 |
if (attr->type == PERF_TYPE_HW_CACHE) |
e994d7d23
|
618 |
return set_ext_hw_attr(hwc, event); |
c1726f343
|
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 |
if (attr->config >= x86_pmu.max_events) return -EINVAL; /* * The generic map: */ config = x86_pmu.event_map(attr->config); if (config == 0) return -ENOENT; if (config == -1LL) return -EINVAL; /* * Branch tracing: */ |
18a073a3a
|
637 638 |
if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS && !attr->freq && hwc->sample_period == 1) { |
c1726f343
|
639 |
/* BTS is not supported by this architecture. */ |
6809b6ea7
|
640 |
if (!x86_pmu.bts_active) |
c1726f343
|
641 642 643 644 645 646 647 648 649 650 651 |
return -EOPNOTSUPP; /* BTS is currently only allowed for user-mode. */ if (!attr->exclude_kernel) return -EOPNOTSUPP; } hwc->config |= config; return 0; } |
4261e0e0e
|
652 |
|
b4cdc5c26
|
653 |
static int x86_pmu_hw_config(struct perf_event *event) |
a072738e0
|
654 |
{ |
ab608344b
|
655 656 657 658 |
if (event->attr.precise_ip) { int precise = 0; /* Support for constant skid */ |
6809b6ea7
|
659 |
if (x86_pmu.pebs_active) { |
ab608344b
|
660 |
precise++; |
5553be262
|
661 662 663 664 |
/* Support for IP fixup */ if (x86_pmu.lbr_nr) precise++; } |
ab608344b
|
665 666 667 668 |
if (event->attr.precise_ip > precise) return -EOPNOTSUPP; } |
a072738e0
|
669 670 671 672 |
/* * Generate PMC IRQs: * (keep 'enabled' bit clear for now) */ |
b4cdc5c26
|
673 |
event->hw.config = ARCH_PERFMON_EVENTSEL_INT; |
a072738e0
|
674 675 676 677 |
/* * Count user and OS events unless requested not to */ |
b4cdc5c26
|
678 679 680 681 |
if (!event->attr.exclude_user) event->hw.config |= ARCH_PERFMON_EVENTSEL_USR; if (!event->attr.exclude_kernel) event->hw.config |= ARCH_PERFMON_EVENTSEL_OS; |
a072738e0
|
682 |
|
b4cdc5c26
|
683 684 |
if (event->attr.type == PERF_TYPE_RAW) event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; |
a072738e0
|
685 |
|
9d0fcba67
|
686 |
return x86_setup_perfctr(event); |
a098f4484
|
687 |
} |
ee06094f8
|
688 |
/* |
0d48696f8
|
689 |
* Setup the hardware configuration for a given attr_type |
241771ef0
|
690 |
*/ |
b0a873ebb
|
691 |
static int __x86_pmu_event_init(struct perf_event *event) |
241771ef0
|
692 |
{ |
4e935e471
|
693 |
int err; |
241771ef0
|
694 |
|
85cf9dba9
|
695 696 |
if (!x86_pmu_initialized()) return -ENODEV; |
241771ef0
|
697 |
|
4e935e471
|
698 |
err = 0; |
cdd6c482c
|
699 |
if (!atomic_inc_not_zero(&active_events)) { |
4e935e471
|
700 |
mutex_lock(&pmc_reserve_mutex); |
cdd6c482c
|
701 |
if (atomic_read(&active_events) == 0) { |
30dd568c9
|
702 703 |
if (!reserve_pmc_hardware()) err = -EBUSY; |
f80c9e304
|
704 705 |
else reserve_ds_buffers(); |
30dd568c9
|
706 707 |
} if (!err) |
cdd6c482c
|
708 |
atomic_inc(&active_events); |
4e935e471
|
709 710 711 712 |
mutex_unlock(&pmc_reserve_mutex); } if (err) return err; |
cdd6c482c
|
713 |
event->destroy = hw_perf_event_destroy; |
a1792cdac
|
714 |
|
4261e0e0e
|
715 716 717 |
event->hw.idx = -1; event->hw.last_cpu = -1; event->hw.last_tag = ~0ULL; |
b690081d4
|
718 |
|
efc9f05df
|
719 720 |
/* mark unused */ event->hw.extra_reg.idx = EXTRA_REG_NONE; |
9d0fcba67
|
721 |
return x86_pmu.hw_config(event); |
4261e0e0e
|
722 |
} |
8c48e4441
|
723 |
static void x86_pmu_disable_all(void) |
f87ad35d3
|
724 |
{ |
cdd6c482c
|
725 |
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
9e35ad388
|
726 |
int idx; |
948b1bb89
|
727 |
for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
b0f3f28e0
|
728 |
u64 val; |
43f6201a2
|
729 |
if (!test_bit(idx, cpuc->active_mask)) |
4295ee626
|
730 |
continue; |
41bf49894
|
731 |
rdmsrl(x86_pmu_config_addr(idx), val); |
bb1165d68
|
732 |
if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) |
4295ee626
|
733 |
continue; |
bb1165d68
|
734 |
val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; |
41bf49894
|
735 |
wrmsrl(x86_pmu_config_addr(idx), val); |
f87ad35d3
|
736 |
} |
f87ad35d3
|
737 |
} |
a4eaf7f14
|
738 |
static void x86_pmu_disable(struct pmu *pmu) |
b56a3802d
|
739 |
{ |
1da53e023
|
740 |
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
85cf9dba9
|
741 |
if (!x86_pmu_initialized()) |
9e35ad388
|
742 |
return; |
1da53e023
|
743 |
|
1a6e21f79
|
744 745 746 747 748 749 |
if (!cpuc->enabled) return; cpuc->n_added = 0; cpuc->enabled = 0; barrier(); |
1da53e023
|
750 751 |
x86_pmu.disable_all(); |
b56a3802d
|
752 |
} |
241771ef0
|
753 |
|
d45dd923f
|
754 755 756 |
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, u64 enable_mask) { |
efc9f05df
|
757 758 |
if (hwc->extra_reg.reg) wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); |
73d6e5220
|
759 |
wrmsrl(hwc->config_base, hwc->config | enable_mask); |
d45dd923f
|
760 |
} |
11164cd4f
|
761 |
static void x86_pmu_enable_all(int added) |
f87ad35d3
|
762 |
{ |
cdd6c482c
|
763 |
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
f87ad35d3
|
764 |
int idx; |
948b1bb89
|
765 |
for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
d45dd923f
|
766 |
struct hw_perf_event *hwc = &cpuc->events[idx]->hw; |
b0f3f28e0
|
767 |
|
43f6201a2
|
768 |
if (!test_bit(idx, cpuc->active_mask)) |
4295ee626
|
769 |
continue; |
984b838ce
|
770 |
|
d45dd923f
|
771 |
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); |
f87ad35d3
|
772 773 |
} } |
51b0fe395
|
774 |
static struct pmu pmu; |
1da53e023
|
775 776 777 778 779 780 781 782 |
static inline int is_x86_event(struct perf_event *event) { return event->pmu == &pmu; } static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) { |
63b146490
|
783 |
struct event_constraint *c, *constraints[X86_PMC_IDX_MAX]; |
1da53e023
|
784 |
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
c933c1a60
|
785 |
int i, j, w, wmax, num = 0; |
1da53e023
|
786 787 788 789 790 |
struct hw_perf_event *hwc; bitmap_zero(used_mask, X86_PMC_IDX_MAX); for (i = 0; i < n; i++) { |
b622d644c
|
791 792 |
c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]); constraints[i] = c; |
1da53e023
|
793 794 795 |
} /* |
8113070d6
|
796 797 |
* fastpath, try to reuse previous register */ |
c933c1a60
|
798 |
for (i = 0; i < n; i++) { |
8113070d6
|
799 |
hwc = &cpuc->event_list[i]->hw; |
81269a085
|
800 |
c = constraints[i]; |
8113070d6
|
801 802 803 804 805 806 |
/* never assigned */ if (hwc->idx == -1) break; /* constraint still honored */ |
63b146490
|
807 |
if (!test_bit(hwc->idx, c->idxmsk)) |
8113070d6
|
808 809 810 811 812 |
break; /* not already used */ if (test_bit(hwc->idx, used_mask)) break; |
34538ee77
|
813 |
__set_bit(hwc->idx, used_mask); |
8113070d6
|
814 815 816 |
if (assign) assign[i] = hwc->idx; } |
c933c1a60
|
817 |
if (i == n) |
8113070d6
|
818 819 820 821 822 823 824 825 826 |
goto done; /* * begin slow path */ bitmap_zero(used_mask, X86_PMC_IDX_MAX); /* |
1da53e023
|
827 828 829 830 831 832 833 834 |
* weight = number of possible counters * * 1 = most constrained, only works on one counter * wmax = least constrained, works on any counter * * assign events to counters starting with most * constrained events. */ |
948b1bb89
|
835 |
wmax = x86_pmu.num_counters; |
1da53e023
|
836 837 838 839 840 841 |
/* * when fixed event counters are present, * wmax is incremented by 1 to account * for one more choice */ |
948b1bb89
|
842 |
if (x86_pmu.num_counters_fixed) |
1da53e023
|
843 |
wmax++; |
8113070d6
|
844 |
for (w = 1, num = n; num && w <= wmax; w++) { |
1da53e023
|
845 |
/* for each event */ |
8113070d6
|
846 |
for (i = 0; num && i < n; i++) { |
81269a085
|
847 |
c = constraints[i]; |
1da53e023
|
848 |
hwc = &cpuc->event_list[i]->hw; |
272d30be6
|
849 |
if (c->weight != w) |
1da53e023
|
850 |
continue; |
984b3f574
|
851 |
for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) { |
1da53e023
|
852 853 854 855 856 857 |
if (!test_bit(j, used_mask)) break; } if (j == X86_PMC_IDX_MAX) break; |
1da53e023
|
858 |
|
34538ee77
|
859 |
__set_bit(j, used_mask); |
8113070d6
|
860 |
|
1da53e023
|
861 862 863 864 865 |
if (assign) assign[i] = j; num--; } } |
8113070d6
|
866 |
done: |
1da53e023
|
867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 |
/* * scheduling failed or is just a simulation, * free resources if necessary */ if (!assign || num) { for (i = 0; i < n; i++) { if (x86_pmu.put_event_constraints) x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]); } } return num ? -ENOSPC : 0; } /* * dogrp: true if must collect siblings events (group) * returns total number of events and error code */ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp) { struct perf_event *event; int n, max_count; |
948b1bb89
|
888 |
max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed; |
1da53e023
|
889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 |
/* current number of events already accepted */ n = cpuc->n_events; if (is_x86_event(leader)) { if (n >= max_count) return -ENOSPC; cpuc->event_list[n] = leader; n++; } if (!dogrp) return n; list_for_each_entry(event, &leader->sibling_list, group_entry) { if (!is_x86_event(event) || |
8113070d6
|
904 |
event->state <= PERF_EVENT_STATE_OFF) |
1da53e023
|
905 906 907 908 909 910 911 912 913 914 |
continue; if (n >= max_count) return -ENOSPC; cpuc->event_list[n] = event; n++; } return n; } |
1da53e023
|
915 |
static inline void x86_assign_hw_event(struct perf_event *event, |
447a194b3
|
916 |
struct cpu_hw_events *cpuc, int i) |
1da53e023
|
917 |
{ |
447a194b3
|
918 919 920 921 922 |
struct hw_perf_event *hwc = &event->hw; hwc->idx = cpuc->assign[i]; hwc->last_cpu = smp_processor_id(); hwc->last_tag = ++cpuc->tags[i]; |
1da53e023
|
923 924 925 926 927 928 |
if (hwc->idx == X86_PMC_IDX_FIXED_BTS) { hwc->config_base = 0; hwc->event_base = 0; } else if (hwc->idx >= X86_PMC_IDX_FIXED) { hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; |
fc66c5210
|
929 |
hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED); |
1da53e023
|
930 |
} else { |
73d6e5220
|
931 932 |
hwc->config_base = x86_pmu_config_addr(hwc->idx); hwc->event_base = x86_pmu_event_addr(hwc->idx); |
1da53e023
|
933 934 |
} } |
447a194b3
|
935 936 937 938 939 940 941 942 |
static inline int match_prev_assignment(struct hw_perf_event *hwc, struct cpu_hw_events *cpuc, int i) { return hwc->idx == cpuc->assign[i] && hwc->last_cpu == smp_processor_id() && hwc->last_tag == cpuc->tags[i]; } |
a4eaf7f14
|
943 944 |
static void x86_pmu_start(struct perf_event *event, int flags); static void x86_pmu_stop(struct perf_event *event, int flags); |
2e8418736
|
945 |
|
a4eaf7f14
|
946 |
static void x86_pmu_enable(struct pmu *pmu) |
ee06094f8
|
947 |
{ |
1da53e023
|
948 949 950 |
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct perf_event *event; struct hw_perf_event *hwc; |
11164cd4f
|
951 |
int i, added = cpuc->n_added; |
1da53e023
|
952 |
|
85cf9dba9
|
953 |
if (!x86_pmu_initialized()) |
2b9ff0db1
|
954 |
return; |
1a6e21f79
|
955 956 957 |
if (cpuc->enabled) return; |
1da53e023
|
958 |
if (cpuc->n_added) { |
19925ce77
|
959 |
int n_running = cpuc->n_events - cpuc->n_added; |
1da53e023
|
960 961 962 963 964 965 966 |
/* * apply assignment obtained either from * hw_perf_group_sched_in() or x86_pmu_enable() * * step1: save events moving to new counters * step2: reprogram moved events into new counters */ |
19925ce77
|
967 |
for (i = 0; i < n_running; i++) { |
1da53e023
|
968 969 |
event = cpuc->event_list[i]; hwc = &event->hw; |
447a194b3
|
970 971 972 973 974 975 976 977 |
/* * we can avoid reprogramming counter if: * - assigned same counter as last time * - running on same CPU as last time * - no other event has used the counter since */ if (hwc->idx == -1 || match_prev_assignment(hwc, cpuc, i)) |
1da53e023
|
978 |
continue; |
a4eaf7f14
|
979 980 981 982 983 984 985 986 |
/* * Ensure we don't accidentally enable a stopped * counter simply because we rescheduled. */ if (hwc->state & PERF_HES_STOPPED) hwc->state |= PERF_HES_ARCH; x86_pmu_stop(event, PERF_EF_UPDATE); |
1da53e023
|
987 988 989 |
} for (i = 0; i < cpuc->n_events; i++) { |
1da53e023
|
990 991 |
event = cpuc->event_list[i]; hwc = &event->hw; |
45e16a683
|
992 |
if (!match_prev_assignment(hwc, cpuc, i)) |
447a194b3
|
993 |
x86_assign_hw_event(event, cpuc, i); |
45e16a683
|
994 995 |
else if (i < n_running) continue; |
1da53e023
|
996 |
|
a4eaf7f14
|
997 998 999 1000 |
if (hwc->state & PERF_HES_ARCH) continue; x86_pmu_start(event, PERF_EF_RELOAD); |
1da53e023
|
1001 1002 1003 1004 |
} cpuc->n_added = 0; perf_events_lapic_init(); } |
1a6e21f79
|
1005 1006 1007 |
cpuc->enabled = 1; barrier(); |
11164cd4f
|
1008 |
x86_pmu.enable_all(added); |
ee06094f8
|
1009 |
} |
ee06094f8
|
1010 |
|
aff3d91a9
|
1011 |
static inline void x86_pmu_disable_event(struct perf_event *event) |
b0f3f28e0
|
1012 |
{ |
aff3d91a9
|
1013 |
struct hw_perf_event *hwc = &event->hw; |
7645a24cb
|
1014 |
|
73d6e5220
|
1015 |
wrmsrl(hwc->config_base, hwc->config); |
b0f3f28e0
|
1016 |
} |
245b2e70e
|
1017 |
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); |
241771ef0
|
1018 |
|
ee06094f8
|
1019 1020 |
/* * Set the next IRQ period, based on the hwc->period_left value. |
cdd6c482c
|
1021 |
* To be called with the event disabled in hw: |
ee06094f8
|
1022 |
*/ |
e4abb5d4f
|
1023 |
static int |
07088edb8
|
1024 |
x86_perf_event_set_period(struct perf_event *event) |
241771ef0
|
1025 |
{ |
07088edb8
|
1026 |
struct hw_perf_event *hwc = &event->hw; |
e78505958
|
1027 |
s64 left = local64_read(&hwc->period_left); |
e4abb5d4f
|
1028 |
s64 period = hwc->sample_period; |
7645a24cb
|
1029 |
int ret = 0, idx = hwc->idx; |
ee06094f8
|
1030 |
|
30dd568c9
|
1031 1032 |
if (idx == X86_PMC_IDX_FIXED_BTS) return 0; |
ee06094f8
|
1033 |
/* |
af901ca18
|
1034 |
* If we are way outside a reasonable range then just skip forward: |
ee06094f8
|
1035 1036 1037 |
*/ if (unlikely(left <= -period)) { left = period; |
e78505958
|
1038 |
local64_set(&hwc->period_left, left); |
9e350de37
|
1039 |
hwc->last_period = period; |
e4abb5d4f
|
1040 |
ret = 1; |
ee06094f8
|
1041 1042 1043 1044 |
} if (unlikely(left <= 0)) { left += period; |
e78505958
|
1045 |
local64_set(&hwc->period_left, left); |
9e350de37
|
1046 |
hwc->last_period = period; |
e4abb5d4f
|
1047 |
ret = 1; |
ee06094f8
|
1048 |
} |
1c80f4b59
|
1049 |
/* |
dfc65094d
|
1050 |
* Quirk: certain CPUs dont like it if just 1 hw_event is left: |
1c80f4b59
|
1051 1052 1053 |
*/ if (unlikely(left < 2)) left = 2; |
241771ef0
|
1054 |
|
e4abb5d4f
|
1055 1056 |
if (left > x86_pmu.max_period) left = x86_pmu.max_period; |
245b2e70e
|
1057 |
per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; |
ee06094f8
|
1058 1059 |
/* |
cdd6c482c
|
1060 |
* The hw event starts counting from this event offset, |
ee06094f8
|
1061 1062 |
* mark it to be able to extra future deltas: */ |
e78505958
|
1063 |
local64_set(&hwc->prev_count, (u64)-left); |
ee06094f8
|
1064 |
|
73d6e5220
|
1065 |
wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); |
68aa00ac0
|
1066 1067 1068 1069 1070 1071 1072 |
/* * Due to erratum on certan cpu we need * a second write to be sure the register * is updated properly */ if (x86_pmu.perfctr_second_write) { |
73d6e5220
|
1073 |
wrmsrl(hwc->event_base, |
948b1bb89
|
1074 |
(u64)(-left) & x86_pmu.cntval_mask); |
68aa00ac0
|
1075 |
} |
e4abb5d4f
|
1076 |
|
cdd6c482c
|
1077 |
perf_event_update_userpage(event); |
194002b27
|
1078 |
|
e4abb5d4f
|
1079 |
return ret; |
2f18d1e8d
|
1080 |
} |
aff3d91a9
|
1081 |
static void x86_pmu_enable_event(struct perf_event *event) |
7c90cc45f
|
1082 |
{ |
0a3aee0da
|
1083 |
if (__this_cpu_read(cpu_hw_events.enabled)) |
31fa58af5
|
1084 1085 |
__x86_pmu_enable_event(&event->hw, ARCH_PERFMON_EVENTSEL_ENABLE); |
241771ef0
|
1086 |
} |
b690081d4
|
1087 |
/* |
a4eaf7f14
|
1088 |
* Add a single event to the PMU. |
1da53e023
|
1089 1090 1091 |
* * The event is added to the group of enabled events * but only if it can be scehduled with existing events. |
fe9081cc9
|
1092 |
*/ |
a4eaf7f14
|
1093 |
static int x86_pmu_add(struct perf_event *event, int flags) |
fe9081cc9
|
1094 1095 |
{ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1da53e023
|
1096 1097 1098 |
struct hw_perf_event *hwc; int assign[X86_PMC_IDX_MAX]; int n, n0, ret; |
fe9081cc9
|
1099 |
|
1da53e023
|
1100 |
hwc = &event->hw; |
fe9081cc9
|
1101 |
|
33696fc0d
|
1102 |
perf_pmu_disable(event->pmu); |
1da53e023
|
1103 |
n0 = cpuc->n_events; |
24cd7f54a
|
1104 1105 1106 |
ret = n = collect_events(cpuc, event, false); if (ret < 0) goto out; |
53b441a56
|
1107 |
|
a4eaf7f14
|
1108 1109 1110 |
hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; if (!(flags & PERF_EF_START)) hwc->state |= PERF_HES_ARCH; |
4d1c52b02
|
1111 1112 |
/* * If group events scheduling transaction was started, |
0d2eb44f6
|
1113 |
* skip the schedulability test here, it will be performed |
a4eaf7f14
|
1114 |
* at commit time (->commit_txn) as a whole |
4d1c52b02
|
1115 |
*/ |
8d2cacbbb
|
1116 |
if (cpuc->group_flag & PERF_EVENT_TXN) |
24cd7f54a
|
1117 |
goto done_collect; |
4d1c52b02
|
1118 |
|
a072738e0
|
1119 |
ret = x86_pmu.schedule_events(cpuc, n, assign); |
1da53e023
|
1120 |
if (ret) |
24cd7f54a
|
1121 |
goto out; |
1da53e023
|
1122 1123 1124 1125 1126 |
/* * copy new assignment, now we know it is possible * will be used by hw_perf_enable() */ memcpy(cpuc->assign, assign, n*sizeof(int)); |
7e2ae3474
|
1127 |
|
24cd7f54a
|
1128 |
done_collect: |
1da53e023
|
1129 |
cpuc->n_events = n; |
356e1f2e0
|
1130 |
cpuc->n_added += n - n0; |
90151c35b
|
1131 |
cpuc->n_txn += n - n0; |
95cdd2e78
|
1132 |
|
24cd7f54a
|
1133 1134 |
ret = 0; out: |
33696fc0d
|
1135 |
perf_pmu_enable(event->pmu); |
24cd7f54a
|
1136 |
return ret; |
241771ef0
|
1137 |
} |
a4eaf7f14
|
1138 |
static void x86_pmu_start(struct perf_event *event, int flags) |
d76a0812a
|
1139 |
{ |
c08053e62
|
1140 1141 |
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int idx = event->hw.idx; |
a4eaf7f14
|
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 |
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) return; if (WARN_ON_ONCE(idx == -1)) return; if (flags & PERF_EF_RELOAD) { WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); x86_perf_event_set_period(event); } event->hw.state = 0; |
d76a0812a
|
1154 |
|
c08053e62
|
1155 1156 |
cpuc->events[idx] = event; __set_bit(idx, cpuc->active_mask); |
63e6be6d9
|
1157 |
__set_bit(idx, cpuc->running); |
aff3d91a9
|
1158 |
x86_pmu.enable(event); |
c08053e62
|
1159 |
perf_event_update_userpage(event); |
a78ac3258
|
1160 |
} |
cdd6c482c
|
1161 |
void perf_event_print_debug(void) |
241771ef0
|
1162 |
{ |
2f18d1e8d
|
1163 |
u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; |
ca037701a
|
1164 |
u64 pebs; |
cdd6c482c
|
1165 |
struct cpu_hw_events *cpuc; |
5bb9efe33
|
1166 |
unsigned long flags; |
1e1256767
|
1167 |
int cpu, idx; |
948b1bb89
|
1168 |
if (!x86_pmu.num_counters) |
1e1256767
|
1169 |
return; |
241771ef0
|
1170 |
|
5bb9efe33
|
1171 |
local_irq_save(flags); |
241771ef0
|
1172 1173 |
cpu = smp_processor_id(); |
cdd6c482c
|
1174 |
cpuc = &per_cpu(cpu_hw_events, cpu); |
241771ef0
|
1175 |
|
faa28ae01
|
1176 |
if (x86_pmu.version >= 2) { |
a1ef58f44
|
1177 1178 1179 1180 |
rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed); |
ca037701a
|
1181 |
rdmsrl(MSR_IA32_PEBS_ENABLE, pebs); |
a1ef58f44
|
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 |
pr_info(" "); pr_info("CPU#%d: ctrl: %016llx ", cpu, ctrl); pr_info("CPU#%d: status: %016llx ", cpu, status); pr_info("CPU#%d: overflow: %016llx ", cpu, overflow); pr_info("CPU#%d: fixed: %016llx ", cpu, fixed); |
ca037701a
|
1193 1194 |
pr_info("CPU#%d: pebs: %016llx ", cpu, pebs); |
f87ad35d3
|
1195 |
} |
7645a24cb
|
1196 1197 |
pr_info("CPU#%d: active: %016llx ", cpu, *(u64 *)cpuc->active_mask); |
241771ef0
|
1198 |
|
948b1bb89
|
1199 |
for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
41bf49894
|
1200 1201 |
rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl); rdmsrl(x86_pmu_event_addr(idx), pmc_count); |
241771ef0
|
1202 |
|
245b2e70e
|
1203 |
prev_left = per_cpu(pmc_prev_left[idx], cpu); |
241771ef0
|
1204 |
|
a1ef58f44
|
1205 1206 |
pr_info("CPU#%d: gen-PMC%d ctrl: %016llx ", |
241771ef0
|
1207 |
cpu, idx, pmc_ctrl); |
a1ef58f44
|
1208 1209 |
pr_info("CPU#%d: gen-PMC%d count: %016llx ", |
241771ef0
|
1210 |
cpu, idx, pmc_count); |
a1ef58f44
|
1211 1212 |
pr_info("CPU#%d: gen-PMC%d left: %016llx ", |
ee06094f8
|
1213 |
cpu, idx, prev_left); |
241771ef0
|
1214 |
} |
948b1bb89
|
1215 |
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { |
2f18d1e8d
|
1216 |
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); |
a1ef58f44
|
1217 1218 |
pr_info("CPU#%d: fixed-PMC%d count: %016llx ", |
2f18d1e8d
|
1219 1220 |
cpu, idx, pmc_count); } |
5bb9efe33
|
1221 |
local_irq_restore(flags); |
241771ef0
|
1222 |
} |
a4eaf7f14
|
1223 |
static void x86_pmu_stop(struct perf_event *event, int flags) |
241771ef0
|
1224 |
{ |
d76a0812a
|
1225 |
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
cdd6c482c
|
1226 |
struct hw_perf_event *hwc = &event->hw; |
241771ef0
|
1227 |
|
a4eaf7f14
|
1228 1229 1230 1231 1232 1233 |
if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { x86_pmu.disable(event); cpuc->events[hwc->idx] = NULL; WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); hwc->state |= PERF_HES_STOPPED; } |
30dd568c9
|
1234 |
|
a4eaf7f14
|
1235 1236 1237 1238 1239 1240 1241 1242 |
if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { /* * Drain the remaining delta count out of a event * that we are disabling: */ x86_perf_event_update(event); hwc->state |= PERF_HES_UPTODATE; } |
2e8418736
|
1243 |
} |
a4eaf7f14
|
1244 |
static void x86_pmu_del(struct perf_event *event, int flags) |
2e8418736
|
1245 1246 1247 |
{ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int i; |
90151c35b
|
1248 1249 1250 1251 1252 |
/* * If we're called during a txn, we don't need to do anything. * The events never got scheduled and ->cancel_txn will truncate * the event_list. */ |
8d2cacbbb
|
1253 |
if (cpuc->group_flag & PERF_EVENT_TXN) |
90151c35b
|
1254 |
return; |
a4eaf7f14
|
1255 |
x86_pmu_stop(event, PERF_EF_UPDATE); |
194002b27
|
1256 |
|
1da53e023
|
1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 |
for (i = 0; i < cpuc->n_events; i++) { if (event == cpuc->event_list[i]) { if (x86_pmu.put_event_constraints) x86_pmu.put_event_constraints(cpuc, event); while (++i < cpuc->n_events) cpuc->event_list[i-1] = cpuc->event_list[i]; --cpuc->n_events; |
6c9687abe
|
1267 |
break; |
1da53e023
|
1268 1269 |
} } |
cdd6c482c
|
1270 |
perf_event_update_userpage(event); |
241771ef0
|
1271 |
} |
8c48e4441
|
1272 |
static int x86_pmu_handle_irq(struct pt_regs *regs) |
a29aa8a7f
|
1273 |
{ |
df1a132bf
|
1274 |
struct perf_sample_data data; |
cdd6c482c
|
1275 1276 |
struct cpu_hw_events *cpuc; struct perf_event *event; |
11d1578f9
|
1277 |
int idx, handled = 0; |
9029a5e38
|
1278 |
u64 val; |
dc1d628a6
|
1279 |
perf_sample_data_init(&data, 0); |
df1a132bf
|
1280 |
|
cdd6c482c
|
1281 |
cpuc = &__get_cpu_var(cpu_hw_events); |
962bf7a66
|
1282 |
|
2bce5daca
|
1283 1284 1285 1286 1287 1288 1289 1290 1291 |
/* * Some chipsets need to unmask the LVTPC in a particular spot * inside the nmi handler. As a result, the unmasking was pushed * into all the nmi handlers. * * This generic handler doesn't seem to have any issues where the * unmasking occurs so it was left at the top. */ apic_write(APIC_LVTPC, APIC_DM_NMI); |
948b1bb89
|
1292 |
for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
63e6be6d9
|
1293 1294 1295 1296 1297 1298 1299 1300 |
if (!test_bit(idx, cpuc->active_mask)) { /* * Though we deactivated the counter some cpus * might still deliver spurious interrupts still * in flight. Catch them: */ if (__test_and_clear_bit(idx, cpuc->running)) handled++; |
a29aa8a7f
|
1301 |
continue; |
63e6be6d9
|
1302 |
} |
962bf7a66
|
1303 |
|
cdd6c482c
|
1304 |
event = cpuc->events[idx]; |
a4016a79f
|
1305 |
|
cc2ad4ba8
|
1306 |
val = x86_perf_event_update(event); |
948b1bb89
|
1307 |
if (val & (1ULL << (x86_pmu.cntval_bits - 1))) |
48e22d56e
|
1308 |
continue; |
962bf7a66
|
1309 |
|
9e350de37
|
1310 |
/* |
cdd6c482c
|
1311 |
* event overflow |
9e350de37
|
1312 |
*/ |
4177c42a6
|
1313 |
handled++; |
cdd6c482c
|
1314 |
data.period = event->hw.last_period; |
9e350de37
|
1315 |
|
07088edb8
|
1316 |
if (!x86_perf_event_set_period(event)) |
e4abb5d4f
|
1317 |
continue; |
a8b0ca17b
|
1318 |
if (perf_event_overflow(event, &data, regs)) |
a4eaf7f14
|
1319 |
x86_pmu_stop(event, 0); |
a29aa8a7f
|
1320 |
} |
962bf7a66
|
1321 |
|
9e350de37
|
1322 1323 |
if (handled) inc_irq_stat(apic_perf_irqs); |
a29aa8a7f
|
1324 1325 |
return handled; } |
39d81eab2
|
1326 |
|
cdd6c482c
|
1327 |
void perf_events_lapic_init(void) |
241771ef0
|
1328 |
{ |
04da8a43d
|
1329 |
if (!x86_pmu.apic || !x86_pmu_initialized()) |
241771ef0
|
1330 |
return; |
85cf9dba9
|
1331 |
|
241771ef0
|
1332 |
/* |
c323d95fa
|
1333 |
* Always use NMI for PMU |
241771ef0
|
1334 |
*/ |
c323d95fa
|
1335 |
apic_write(APIC_LVTPC, APIC_DM_NMI); |
241771ef0
|
1336 |
} |
4177c42a6
|
1337 1338 1339 1340 1341 1342 |
struct pmu_nmi_state { unsigned int marked; int handled; }; static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi); |
241771ef0
|
1343 |
static int __kprobes |
cdd6c482c
|
1344 |
perf_event_nmi_handler(struct notifier_block *self, |
241771ef0
|
1345 1346 1347 |
unsigned long cmd, void *__args) { struct die_args *args = __args; |
4177c42a6
|
1348 1349 |
unsigned int this_nmi; int handled; |
b0f3f28e0
|
1350 |
|
cdd6c482c
|
1351 |
if (!atomic_read(&active_events)) |
63a809a2d
|
1352 |
return NOTIFY_DONE; |
b0f3f28e0
|
1353 1354 |
switch (cmd) { case DIE_NMI: |
b0f3f28e0
|
1355 |
break; |
4177c42a6
|
1356 1357 |
case DIE_NMIUNKNOWN: this_nmi = percpu_read(irq_stat.__nmi_count); |
0a3aee0da
|
1358 |
if (this_nmi != __this_cpu_read(pmu_nmi.marked)) |
4177c42a6
|
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 |
/* let the kernel handle the unknown nmi */ return NOTIFY_DONE; /* * This one is a PMU back-to-back nmi. Two events * trigger 'simultaneously' raising two back-to-back * NMIs. If the first NMI handles both, the latter * will be empty and daze the CPU. So, we drop it to * avoid false-positive 'unknown nmi' messages. */ return NOTIFY_STOP; |
b0f3f28e0
|
1369 |
default: |
241771ef0
|
1370 |
return NOTIFY_DONE; |
b0f3f28e0
|
1371 |
} |
241771ef0
|
1372 |
|
4177c42a6
|
1373 1374 1375 1376 1377 1378 1379 |
handled = x86_pmu.handle_irq(args->regs); if (!handled) return NOTIFY_DONE; this_nmi = percpu_read(irq_stat.__nmi_count); if ((handled > 1) || /* the next nmi could be a back-to-back nmi */ |
0a3aee0da
|
1380 1381 |
((__this_cpu_read(pmu_nmi.marked) == this_nmi) && (__this_cpu_read(pmu_nmi.handled) > 1))) { |
4177c42a6
|
1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 |
/* * We could have two subsequent back-to-back nmis: The * first handles more than one counter, the 2nd * handles only one counter and the 3rd handles no * counter. * * This is the 2nd nmi because the previous was * handling more than one counter. We will mark the * next (3rd) and then drop it if unhandled. */ |
0a3aee0da
|
1392 1393 |
__this_cpu_write(pmu_nmi.marked, this_nmi + 1); __this_cpu_write(pmu_nmi.handled, handled); |
4177c42a6
|
1394 |
} |
241771ef0
|
1395 |
|
a4016a79f
|
1396 |
return NOTIFY_STOP; |
241771ef0
|
1397 |
} |
f22f54f44
|
1398 1399 1400 |
static __read_mostly struct notifier_block perf_event_nmi_notifier = { .notifier_call = perf_event_nmi_handler, .next = NULL, |
166d75147
|
1401 |
.priority = NMI_LOCAL_LOW_PRIOR, |
f22f54f44
|
1402 |
}; |
63b146490
|
1403 |
static struct event_constraint unconstrained; |
38331f62c
|
1404 |
static struct event_constraint emptyconstraint; |
63b146490
|
1405 |
|
63b146490
|
1406 |
static struct event_constraint * |
f22f54f44
|
1407 |
x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) |
1da53e023
|
1408 |
{ |
63b146490
|
1409 |
struct event_constraint *c; |
1da53e023
|
1410 |
|
1da53e023
|
1411 1412 |
if (x86_pmu.event_constraints) { for_each_event_constraint(c, x86_pmu.event_constraints) { |
63b146490
|
1413 1414 |
if ((event->hw.config & c->cmask) == c->code) return c; |
1da53e023
|
1415 1416 |
} } |
63b146490
|
1417 1418 |
return &unconstrained; |
1da53e023
|
1419 |
} |
f22f54f44
|
1420 1421 |
#include "perf_event_amd.c" #include "perf_event_p6.c" |
a072738e0
|
1422 |
#include "perf_event_p4.c" |
caff2beff
|
1423 |
#include "perf_event_intel_lbr.c" |
ca037701a
|
1424 |
#include "perf_event_intel_ds.c" |
f22f54f44
|
1425 |
#include "perf_event_intel.c" |
f87ad35d3
|
1426 |
|
3f6da3905
|
1427 1428 1429 1430 |
static int __cpuinit x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (long)hcpu; |
b38b24ead
|
1431 |
int ret = NOTIFY_OK; |
3f6da3905
|
1432 1433 1434 1435 |
switch (action & ~CPU_TASKS_FROZEN) { case CPU_UP_PREPARE: if (x86_pmu.cpu_prepare) |
b38b24ead
|
1436 |
ret = x86_pmu.cpu_prepare(cpu); |
3f6da3905
|
1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 |
break; case CPU_STARTING: if (x86_pmu.cpu_starting) x86_pmu.cpu_starting(cpu); break; case CPU_DYING: if (x86_pmu.cpu_dying) x86_pmu.cpu_dying(cpu); break; |
b38b24ead
|
1448 |
case CPU_UP_CANCELED: |
3f6da3905
|
1449 1450 1451 1452 1453 1454 1455 1456 |
case CPU_DEAD: if (x86_pmu.cpu_dead) x86_pmu.cpu_dead(cpu); break; default: break; } |
b38b24ead
|
1457 |
return ret; |
3f6da3905
|
1458 |
} |
125580380
|
1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 |
static void __init pmu_check_apic(void) { if (cpu_has_apic) return; x86_pmu.apic = 0; pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it. "); pr_info("no hardware sampling interrupt available. "); } |
dda991169
|
1470 |
static int __init init_hw_perf_events(void) |
b56a3802d
|
1471 |
{ |
b622d644c
|
1472 |
struct event_constraint *c; |
72eae04d3
|
1473 |
int err; |
cdd6c482c
|
1474 |
pr_info("Performance Events: "); |
1123e3ad7
|
1475 |
|
b56a3802d
|
1476 1477 |
switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: |
72eae04d3
|
1478 |
err = intel_pmu_init(); |
b56a3802d
|
1479 |
break; |
f87ad35d3
|
1480 |
case X86_VENDOR_AMD: |
72eae04d3
|
1481 |
err = amd_pmu_init(); |
f87ad35d3
|
1482 |
break; |
4138960a9
|
1483 |
default: |
004417a6d
|
1484 |
return 0; |
b56a3802d
|
1485 |
} |
1123e3ad7
|
1486 |
if (err != 0) { |
cdd6c482c
|
1487 1488 |
pr_cont("no PMU driver, software events only. "); |
004417a6d
|
1489 |
return 0; |
1123e3ad7
|
1490 |
} |
b56a3802d
|
1491 |
|
125580380
|
1492 |
pmu_check_apic(); |
33c6d6a7a
|
1493 |
/* sanity check that the hardware exists or is emulated */ |
4407204c5
|
1494 |
if (!check_hw_exists()) |
004417a6d
|
1495 |
return 0; |
33c6d6a7a
|
1496 |
|
1123e3ad7
|
1497 1498 |
pr_cont("%s PMU driver. ", x86_pmu.name); |
faa28ae01
|
1499 |
|
3c44780b2
|
1500 1501 |
if (x86_pmu.quirks) x86_pmu.quirks(); |
948b1bb89
|
1502 |
if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { |
cdd6c482c
|
1503 |
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", |
948b1bb89
|
1504 1505 |
x86_pmu.num_counters, X86_PMC_MAX_GENERIC); x86_pmu.num_counters = X86_PMC_MAX_GENERIC; |
241771ef0
|
1506 |
} |
948b1bb89
|
1507 |
x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; |
241771ef0
|
1508 |
|
948b1bb89
|
1509 |
if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { |
cdd6c482c
|
1510 |
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", |
948b1bb89
|
1511 1512 |
x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED); x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED; |
703e937c8
|
1513 |
} |
862a1a5f3
|
1514 |
|
d6dc0b4ea
|
1515 |
x86_pmu.intel_ctrl |= |
948b1bb89
|
1516 |
((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; |
241771ef0
|
1517 |
|
cdd6c482c
|
1518 1519 |
perf_events_lapic_init(); register_die_notifier(&perf_event_nmi_notifier); |
1123e3ad7
|
1520 |
|
63b146490
|
1521 |
unconstrained = (struct event_constraint) |
948b1bb89
|
1522 1523 |
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, 0, x86_pmu.num_counters); |
63b146490
|
1524 |
|
b622d644c
|
1525 1526 |
if (x86_pmu.event_constraints) { for_each_event_constraint(c, x86_pmu.event_constraints) { |
a098f4484
|
1527 |
if (c->cmask != X86_RAW_EVENT_MASK) |
b622d644c
|
1528 |
continue; |
948b1bb89
|
1529 1530 |
c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; c->weight += x86_pmu.num_counters; |
b622d644c
|
1531 1532 |
} } |
57c0c15b5
|
1533 1534 |
pr_info("... version: %d ", x86_pmu.version); |
948b1bb89
|
1535 1536 1537 1538 1539 1540 |
pr_info("... bit width: %d ", x86_pmu.cntval_bits); pr_info("... generic registers: %d ", x86_pmu.num_counters); pr_info("... value mask: %016Lx ", x86_pmu.cntval_mask); |
57c0c15b5
|
1541 1542 |
pr_info("... max period: %016Lx ", x86_pmu.max_period); |
948b1bb89
|
1543 1544 |
pr_info("... fixed-purpose events: %d ", x86_pmu.num_counters_fixed); |
d6dc0b4ea
|
1545 1546 |
pr_info("... event mask: %016Lx ", x86_pmu.intel_ctrl); |
3f6da3905
|
1547 |
|
2e80a82a4
|
1548 |
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); |
3f6da3905
|
1549 |
perf_cpu_notifier(x86_pmu_notifier); |
004417a6d
|
1550 1551 |
return 0; |
241771ef0
|
1552 |
} |
004417a6d
|
1553 |
early_initcall(init_hw_perf_events); |
621a01eac
|
1554 |
|
cdd6c482c
|
1555 |
static inline void x86_pmu_read(struct perf_event *event) |
ee06094f8
|
1556 |
{ |
cc2ad4ba8
|
1557 |
x86_perf_event_update(event); |
ee06094f8
|
1558 |
} |
4d1c52b02
|
1559 1560 1561 1562 1563 |
/* * Start group events scheduling transaction * Set the flag to make pmu::enable() not perform the * schedulability test, it will be performed at commit time */ |
51b0fe395
|
1564 |
static void x86_pmu_start_txn(struct pmu *pmu) |
4d1c52b02
|
1565 |
{ |
33696fc0d
|
1566 |
perf_pmu_disable(pmu); |
0a3aee0da
|
1567 1568 |
__this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN); __this_cpu_write(cpu_hw_events.n_txn, 0); |
4d1c52b02
|
1569 1570 1571 1572 1573 1574 1575 |
} /* * Stop group events scheduling transaction * Clear the flag and pmu::enable() will perform the * schedulability test. */ |
51b0fe395
|
1576 |
static void x86_pmu_cancel_txn(struct pmu *pmu) |
4d1c52b02
|
1577 |
{ |
0a3aee0da
|
1578 |
__this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN); |
90151c35b
|
1579 1580 1581 |
/* * Truncate the collected events. */ |
0a3aee0da
|
1582 1583 |
__this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn)); __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn)); |
33696fc0d
|
1584 |
perf_pmu_enable(pmu); |
4d1c52b02
|
1585 1586 1587 1588 1589 1590 1591 |
} /* * Commit group events scheduling transaction * Perform the group schedulability test as a whole * Return 0 if success */ |
51b0fe395
|
1592 |
static int x86_pmu_commit_txn(struct pmu *pmu) |
4d1c52b02
|
1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 |
{ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int assign[X86_PMC_IDX_MAX]; int n, ret; n = cpuc->n_events; if (!x86_pmu_initialized()) return -EAGAIN; ret = x86_pmu.schedule_events(cpuc, n, assign); if (ret) return ret; /* * copy new assignment, now we know it is possible * will be used by hw_perf_enable() */ memcpy(cpuc->assign, assign, n*sizeof(int)); |
8d2cacbbb
|
1612 |
cpuc->group_flag &= ~PERF_EVENT_TXN; |
33696fc0d
|
1613 |
perf_pmu_enable(pmu); |
4d1c52b02
|
1614 1615 |
return 0; } |
cd8a38d33
|
1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 |
/* * a fake_cpuc is used to validate event groups. Due to * the extra reg logic, we need to also allocate a fake * per_core and per_cpu structure. Otherwise, group events * using extra reg may conflict without the kernel being * able to catch this when the last event gets added to * the group. */ static void free_fake_cpuc(struct cpu_hw_events *cpuc) { kfree(cpuc->shared_regs); kfree(cpuc); } static struct cpu_hw_events *allocate_fake_cpuc(void) { struct cpu_hw_events *cpuc; int cpu = raw_smp_processor_id(); cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL); if (!cpuc) return ERR_PTR(-ENOMEM); /* only needed, if we have extra_regs */ if (x86_pmu.extra_regs) { cpuc->shared_regs = allocate_shared_regs(cpu); if (!cpuc->shared_regs) goto error; } return cpuc; error: free_fake_cpuc(cpuc); return ERR_PTR(-ENOMEM); } |
4d1c52b02
|
1650 |
|
1da53e023
|
1651 |
/* |
ca037701a
|
1652 1653 1654 1655 1656 1657 1658 |
* validate that we can schedule this event */ static int validate_event(struct perf_event *event) { struct cpu_hw_events *fake_cpuc; struct event_constraint *c; int ret = 0; |
cd8a38d33
|
1659 1660 1661 |
fake_cpuc = allocate_fake_cpuc(); if (IS_ERR(fake_cpuc)) return PTR_ERR(fake_cpuc); |
ca037701a
|
1662 1663 1664 1665 1666 1667 1668 1669 |
c = x86_pmu.get_event_constraints(fake_cpuc, event); if (!c || !c->weight) ret = -ENOSPC; if (x86_pmu.put_event_constraints) x86_pmu.put_event_constraints(fake_cpuc, event); |
cd8a38d33
|
1670 |
free_fake_cpuc(fake_cpuc); |
ca037701a
|
1671 1672 1673 1674 1675 |
return ret; } /* |
1da53e023
|
1676 1677 1678 |
* validate a single event group * * validation include: |
184f412c3
|
1679 1680 1681 |
* - check events are compatible which each other * - events do not compete for the same counter * - number of events <= number of counters |
1da53e023
|
1682 1683 1684 1685 |
* * validation ensures the group can be loaded onto the * PMU if it was the only group available. */ |
fe9081cc9
|
1686 1687 |
static int validate_group(struct perf_event *event) { |
1da53e023
|
1688 |
struct perf_event *leader = event->group_leader; |
502568d56
|
1689 |
struct cpu_hw_events *fake_cpuc; |
cd8a38d33
|
1690 |
int ret = -ENOSPC, n; |
fe9081cc9
|
1691 |
|
cd8a38d33
|
1692 1693 1694 |
fake_cpuc = allocate_fake_cpuc(); if (IS_ERR(fake_cpuc)) return PTR_ERR(fake_cpuc); |
1da53e023
|
1695 1696 1697 1698 1699 1700 |
/* * the event is not yet connected with its * siblings therefore we must first collect * existing siblings, then add the new event * before we can simulate the scheduling */ |
502568d56
|
1701 |
n = collect_events(fake_cpuc, leader, true); |
1da53e023
|
1702 |
if (n < 0) |
cd8a38d33
|
1703 |
goto out; |
fe9081cc9
|
1704 |
|
502568d56
|
1705 1706 |
fake_cpuc->n_events = n; n = collect_events(fake_cpuc, event, false); |
1da53e023
|
1707 |
if (n < 0) |
cd8a38d33
|
1708 |
goto out; |
fe9081cc9
|
1709 |
|
502568d56
|
1710 |
fake_cpuc->n_events = n; |
1da53e023
|
1711 |
|
a072738e0
|
1712 |
ret = x86_pmu.schedule_events(fake_cpuc, n, NULL); |
502568d56
|
1713 |
|
502568d56
|
1714 |
out: |
cd8a38d33
|
1715 |
free_fake_cpuc(fake_cpuc); |
502568d56
|
1716 |
return ret; |
fe9081cc9
|
1717 |
} |
dda991169
|
1718 |
static int x86_pmu_event_init(struct perf_event *event) |
621a01eac
|
1719 |
{ |
51b0fe395
|
1720 |
struct pmu *tmp; |
621a01eac
|
1721 |
int err; |
b0a873ebb
|
1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 |
switch (event->attr.type) { case PERF_TYPE_RAW: case PERF_TYPE_HARDWARE: case PERF_TYPE_HW_CACHE: break; default: return -ENOENT; } err = __x86_pmu_event_init(event); |
fe9081cc9
|
1733 |
if (!err) { |
8113070d6
|
1734 1735 1736 1737 1738 1739 1740 |
/* * we temporarily connect event to its pmu * such that validate_group() can classify * it as an x86 event using is_x86_event() */ tmp = event->pmu; event->pmu = &pmu; |
fe9081cc9
|
1741 1742 |
if (event->group_leader != event) err = validate_group(event); |
ca037701a
|
1743 1744 |
else err = validate_event(event); |
8113070d6
|
1745 1746 |
event->pmu = tmp; |
fe9081cc9
|
1747 |
} |
a1792cdac
|
1748 |
if (err) { |
cdd6c482c
|
1749 1750 |
if (event->destroy) event->destroy(event); |
a1792cdac
|
1751 |
} |
621a01eac
|
1752 |
|
b0a873ebb
|
1753 |
return err; |
621a01eac
|
1754 |
} |
d7d59fb32
|
1755 |
|
b0a873ebb
|
1756 |
static struct pmu pmu = { |
a4eaf7f14
|
1757 1758 |
.pmu_enable = x86_pmu_enable, .pmu_disable = x86_pmu_disable, |
b0a873ebb
|
1759 |
.event_init = x86_pmu_event_init, |
a4eaf7f14
|
1760 1761 1762 |
.add = x86_pmu_add, .del = x86_pmu_del, |
b0a873ebb
|
1763 1764 1765 |
.start = x86_pmu_start, .stop = x86_pmu_stop, .read = x86_pmu_read, |
a4eaf7f14
|
1766 |
|
b0a873ebb
|
1767 1768 1769 1770 |
.start_txn = x86_pmu_start_txn, .cancel_txn = x86_pmu_cancel_txn, .commit_txn = x86_pmu_commit_txn, }; |
d7d59fb32
|
1771 1772 1773 |
/* * callchain support */ |
d7d59fb32
|
1774 1775 |
static int backtrace_stack(void *data, char *name) { |
038e836e9
|
1776 |
return 0; |
d7d59fb32
|
1777 1778 1779 1780 1781 |
} static void backtrace_address(void *data, unsigned long addr, int reliable) { struct perf_callchain_entry *entry = data; |
70791ce9b
|
1782 |
perf_callchain_store(entry, addr); |
d7d59fb32
|
1783 1784 1785 |
} static const struct stacktrace_ops backtrace_ops = { |
d7d59fb32
|
1786 1787 |
.stack = backtrace_stack, .address = backtrace_address, |
06d65bda7
|
1788 |
.walk_stack = print_context_stack_bp, |
d7d59fb32
|
1789 |
}; |
56962b444
|
1790 1791 |
void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) |
d7d59fb32
|
1792 |
{ |
927c7a9e9
|
1793 1794 |
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { /* TODO: We don't support guest os callchain now */ |
ed8052616
|
1795 |
return; |
927c7a9e9
|
1796 |
} |
70791ce9b
|
1797 |
perf_callchain_store(entry, regs->ip); |
d7d59fb32
|
1798 |
|
e8e999cf3
|
1799 |
dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); |
d7d59fb32
|
1800 |
} |
257ef9d21
|
1801 1802 1803 |
#ifdef CONFIG_COMPAT static inline int perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) |
74193ef0e
|
1804 |
{ |
257ef9d21
|
1805 1806 1807 |
/* 32-bit process in 64-bit kernel. */ struct stack_frame_ia32 frame; const void __user *fp; |
74193ef0e
|
1808 |
|
257ef9d21
|
1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 |
if (!test_thread_flag(TIF_IA32)) return 0; fp = compat_ptr(regs->bp); while (entry->nr < PERF_MAX_STACK_DEPTH) { unsigned long bytes; frame.next_frame = 0; frame.return_address = 0; bytes = copy_from_user_nmi(&frame, fp, sizeof(frame)); if (bytes != sizeof(frame)) break; |
74193ef0e
|
1821 |
|
257ef9d21
|
1822 1823 |
if (fp < compat_ptr(regs->sp)) break; |
74193ef0e
|
1824 |
|
70791ce9b
|
1825 |
perf_callchain_store(entry, frame.return_address); |
257ef9d21
|
1826 1827 1828 |
fp = compat_ptr(frame.next_frame); } return 1; |
d7d59fb32
|
1829 |
} |
257ef9d21
|
1830 1831 1832 1833 1834 1835 1836 |
#else static inline int perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) { return 0; } #endif |
d7d59fb32
|
1837 |
|
56962b444
|
1838 1839 |
void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) |
d7d59fb32
|
1840 1841 1842 |
{ struct stack_frame frame; const void __user *fp; |
927c7a9e9
|
1843 1844 |
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { /* TODO: We don't support guest os callchain now */ |
ed8052616
|
1845 |
return; |
927c7a9e9
|
1846 |
} |
5a6cec3ab
|
1847 |
|
74193ef0e
|
1848 |
fp = (void __user *)regs->bp; |
d7d59fb32
|
1849 |
|
70791ce9b
|
1850 |
perf_callchain_store(entry, regs->ip); |
d7d59fb32
|
1851 |
|
257ef9d21
|
1852 1853 |
if (perf_callchain_user32(regs, entry)) return; |
f9188e023
|
1854 |
while (entry->nr < PERF_MAX_STACK_DEPTH) { |
257ef9d21
|
1855 |
unsigned long bytes; |
038e836e9
|
1856 |
frame.next_frame = NULL; |
d7d59fb32
|
1857 |
frame.return_address = 0; |
257ef9d21
|
1858 1859 |
bytes = copy_from_user_nmi(&frame, fp, sizeof(frame)); if (bytes != sizeof(frame)) |
d7d59fb32
|
1860 |
break; |
5a6cec3ab
|
1861 |
if ((unsigned long)fp < regs->sp) |
d7d59fb32
|
1862 |
break; |
70791ce9b
|
1863 |
perf_callchain_store(entry, frame.return_address); |
038e836e9
|
1864 |
fp = frame.next_frame; |
d7d59fb32
|
1865 1866 |
} } |
39447b386
|
1867 1868 1869 |
unsigned long perf_instruction_pointer(struct pt_regs *regs) { unsigned long ip; |
dcf46b944
|
1870 |
|
39447b386
|
1871 1872 1873 1874 |
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) ip = perf_guest_cbs->get_guest_ip(); else ip = instruction_pointer(regs); |
dcf46b944
|
1875 |
|
39447b386
|
1876 1877 1878 1879 1880 1881 |
return ip; } unsigned long perf_misc_flags(struct pt_regs *regs) { int misc = 0; |
dcf46b944
|
1882 |
|
39447b386
|
1883 |
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { |
dcf46b944
|
1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 |
if (perf_guest_cbs->is_user_mode()) misc |= PERF_RECORD_MISC_GUEST_USER; else misc |= PERF_RECORD_MISC_GUEST_KERNEL; } else { if (user_mode(regs)) misc |= PERF_RECORD_MISC_USER; else misc |= PERF_RECORD_MISC_KERNEL; } |
39447b386
|
1894 |
if (regs->flags & PERF_EFLAGS_EXACT) |
ab608344b
|
1895 |
misc |= PERF_RECORD_MISC_EXACT_IP; |
39447b386
|
1896 1897 1898 |
return misc; } |