Commit a1eac7ac903ea9afbd4f133659710a0588c8eca5
Committed by
Ingo Molnar
1 parent
15c7ad51ad
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
perf/x86: Move Intel specific code to intel_pmu_init()
There is some Intel specific code in the generic x86 path. Move it to intel_pmu_init(). Since p4 and p6 pmus don't have fixed counters we may skip the check in case such a pmu is detected. Signed-off-by: Robert Richter <robert.richter@amd.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1340217996-2254-3-git-send-email-robert.richter@amd.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Showing 2 changed files with 35 additions and 32 deletions Side-by-side Diff
arch/x86/kernel/cpu/perf_event.c
... | ... | @@ -1307,7 +1307,6 @@ |
1307 | 1307 | static int __init init_hw_perf_events(void) |
1308 | 1308 | { |
1309 | 1309 | struct x86_pmu_quirk *quirk; |
1310 | - struct event_constraint *c; | |
1311 | 1310 | int err; |
1312 | 1311 | |
1313 | 1312 | pr_info("Performance Events: "); |
1314 | 1313 | |
1315 | 1314 | |
... | ... | @@ -1338,44 +1337,15 @@ |
1338 | 1337 | for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) |
1339 | 1338 | quirk->func(); |
1340 | 1339 | |
1341 | - if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) { | |
1342 | - WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", | |
1343 | - x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC); | |
1344 | - x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC; | |
1345 | - } | |
1346 | - x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; | |
1340 | + if (!x86_pmu.intel_ctrl) | |
1341 | + x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; | |
1347 | 1342 | |
1348 | - if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) { | |
1349 | - WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", | |
1350 | - x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED); | |
1351 | - x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED; | |
1352 | - } | |
1353 | - | |
1354 | - x86_pmu.intel_ctrl |= | |
1355 | - ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED; | |
1356 | - | |
1357 | 1343 | perf_events_lapic_init(); |
1358 | 1344 | register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI"); |
1359 | 1345 | |
1360 | 1346 | unconstrained = (struct event_constraint) |
1361 | 1347 | __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, |
1362 | 1348 | 0, x86_pmu.num_counters, 0); |
1363 | - | |
1364 | - if (x86_pmu.event_constraints) { | |
1365 | - /* | |
1366 | - * event on fixed counter2 (REF_CYCLES) only works on this | |
1367 | - * counter, so do not extend mask to generic counters | |
1368 | - */ | |
1369 | - for_each_event_constraint(c, x86_pmu.event_constraints) { | |
1370 | - if (c->cmask != X86_RAW_EVENT_MASK | |
1371 | - || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) { | |
1372 | - continue; | |
1373 | - } | |
1374 | - | |
1375 | - c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; | |
1376 | - c->weight += x86_pmu.num_counters; | |
1377 | - } | |
1378 | - } | |
1379 | 1349 | |
1380 | 1350 | x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ |
1381 | 1351 | x86_pmu_format_group.attrs = x86_pmu.format_attrs; |
arch/x86/kernel/cpu/perf_event_intel.c
... | ... | @@ -1765,6 +1765,7 @@ |
1765 | 1765 | union cpuid10_edx edx; |
1766 | 1766 | union cpuid10_eax eax; |
1767 | 1767 | union cpuid10_ebx ebx; |
1768 | + struct event_constraint *c; | |
1768 | 1769 | unsigned int unused; |
1769 | 1770 | int version; |
1770 | 1771 | |
... | ... | @@ -1950,6 +1951,38 @@ |
1950 | 1951 | x86_pmu.event_constraints = intel_gen_event_constraints; |
1951 | 1952 | pr_cont("generic architected perfmon, "); |
1952 | 1953 | break; |
1954 | + } | |
1955 | + } | |
1956 | + | |
1957 | + if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) { | |
1958 | + WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", | |
1959 | + x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC); | |
1960 | + x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC; | |
1961 | + } | |
1962 | + x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; | |
1963 | + | |
1964 | + if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) { | |
1965 | + WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", | |
1966 | + x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED); | |
1967 | + x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED; | |
1968 | + } | |
1969 | + | |
1970 | + x86_pmu.intel_ctrl |= | |
1971 | + ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED; | |
1972 | + | |
1973 | + if (x86_pmu.event_constraints) { | |
1974 | + /* | |
1975 | + * event on fixed counter2 (REF_CYCLES) only works on this | |
1976 | + * counter, so do not extend mask to generic counters | |
1977 | + */ | |
1978 | + for_each_event_constraint(c, x86_pmu.event_constraints) { | |
1979 | + if (c->cmask != X86_RAW_EVENT_MASK | |
1980 | + || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) { | |
1981 | + continue; | |
1982 | + } | |
1983 | + | |
1984 | + c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; | |
1985 | + c->weight += x86_pmu.num_counters; | |
1953 | 1986 | } |
1954 | 1987 | } |
1955 | 1988 |