Commit a8b0ca17b80e92faab46ee7179ba9e99ccb61233
Committed by
Ingo Molnar
1 parent
1880c4ae18
Exists in
master
and in
6 other branches
perf: Remove the nmi parameter from the swevent and overflow interface
The nmi parameter indicated if we could do wakeups from the current context, if not, we would set some state and self-IPI and let the resulting interrupt do the wakeup. For the various event classes: - hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from the PMI-tail (ARM etc.) - tracepoint: nmi=0; since tracepoint could be from NMI context. - software: nmi=[0,1]; some, like the schedule thing cannot perform wakeups, and hence need 0. As one can see, there is very little nmi=1 usage, and the down-side of not using it is that on some platforms some software events can have a jiffy delay in wakeup (when arch_irq_work_raise isn't implemented). The up-side however is that we can remove the nmi parameter and save a bunch of conditionals in fast paths. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Michael Cree <mcree@orcon.net.nz> Cc: Will Deacon <will.deacon@arm.com> Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> Cc: Anton Blanchard <anton@samba.org> Cc: Eric B Munson <emunson@mgebm.net> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: David S. Miller <davem@davemloft.net> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Jason Wessel <jason.wessel@windriver.com> Cc: Don Zickus <dzickus@redhat.com> Link: http://lkml.kernel.org/n/tip-agjev8eu666tvknpb3iaj0fg@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 46 changed files with 119 additions and 141 deletions Side-by-side Diff
- arch/alpha/kernel/perf_event.c
- arch/arm/kernel/perf_event_v6.c
- arch/arm/kernel/perf_event_v7.c
- arch/arm/kernel/perf_event_xscale.c
- arch/arm/kernel/ptrace.c
- arch/arm/kernel/swp_emulate.c
- arch/arm/mm/fault.c
- arch/mips/kernel/perf_event.c
- arch/mips/kernel/traps.c
- arch/mips/kernel/unaligned.c
- arch/mips/math-emu/cp1emu.c
- arch/mips/mm/fault.c
- arch/powerpc/include/asm/emulated_ops.h
- arch/powerpc/kernel/perf_event.c
- arch/powerpc/kernel/perf_event_fsl_emb.c
- arch/powerpc/kernel/ptrace.c
- arch/powerpc/mm/fault.c
- arch/s390/mm/fault.c
- arch/sh/kernel/ptrace_32.c
- arch/sh/kernel/traps_32.c
- arch/sh/kernel/traps_64.c
- arch/sh/math-emu/math.c
- arch/sh/mm/fault_32.c
- arch/sh/mm/tlbflush_64.c
- arch/sparc/kernel/perf_event.c
- arch/sparc/kernel/unaligned_32.c
- arch/sparc/kernel/unaligned_64.c
- arch/sparc/kernel/visemul.c
- arch/sparc/math-emu/math_32.c
- arch/sparc/math-emu/math_64.c
- arch/sparc/mm/fault_32.c
- arch/sparc/mm/fault_64.c
- arch/x86/kernel/cpu/perf_event.c
- arch/x86/kernel/cpu/perf_event_intel.c
- arch/x86/kernel/cpu/perf_event_intel_ds.c
- arch/x86/kernel/cpu/perf_event_p4.c
- arch/x86/kernel/kgdb.c
- arch/x86/kernel/ptrace.c
- arch/x86/mm/fault.c
- include/linux/perf_event.h
- kernel/events/core.c
- kernel/events/internal.h
- kernel/events/ring_buffer.c
- kernel/sched.c
- kernel/watchdog.c
- samples/hw_breakpoint/data_breakpoint.c
arch/alpha/kernel/perf_event.c
... | ... | @@ -847,7 +847,7 @@ |
847 | 847 | data.period = event->hw.last_period; |
848 | 848 | |
849 | 849 | if (alpha_perf_event_set_period(event, hwc, idx)) { |
850 | - if (perf_event_overflow(event, 1, &data, regs)) { | |
850 | + if (perf_event_overflow(event, &data, regs)) { | |
851 | 851 | /* Interrupts coming too quickly; "throttle" the |
852 | 852 | * counter, i.e., disable it for a little while. |
853 | 853 | */ |
arch/arm/kernel/perf_event_v6.c
arch/arm/kernel/perf_event_v7.c
arch/arm/kernel/perf_event_xscale.c
... | ... | @@ -251,7 +251,7 @@ |
251 | 251 | if (!armpmu_event_set_period(event, hwc, idx)) |
252 | 252 | continue; |
253 | 253 | |
254 | - if (perf_event_overflow(event, 0, &data, regs)) | |
254 | + if (perf_event_overflow(event, &data, regs)) | |
255 | 255 | armpmu->disable(hwc, idx); |
256 | 256 | } |
257 | 257 | |
... | ... | @@ -583,7 +583,7 @@ |
583 | 583 | if (!armpmu_event_set_period(event, hwc, idx)) |
584 | 584 | continue; |
585 | 585 | |
586 | - if (perf_event_overflow(event, 0, &data, regs)) | |
586 | + if (perf_event_overflow(event, &data, regs)) | |
587 | 587 | armpmu->disable(hwc, idx); |
588 | 588 | } |
589 | 589 |
arch/arm/kernel/ptrace.c
... | ... | @@ -396,7 +396,7 @@ |
396 | 396 | /* |
397 | 397 | * Handle hitting a HW-breakpoint. |
398 | 398 | */ |
399 | -static void ptrace_hbptriggered(struct perf_event *bp, int unused, | |
399 | +static void ptrace_hbptriggered(struct perf_event *bp, | |
400 | 400 | struct perf_sample_data *data, |
401 | 401 | struct pt_regs *regs) |
402 | 402 | { |
arch/arm/kernel/swp_emulate.c
... | ... | @@ -183,7 +183,7 @@ |
183 | 183 | unsigned int address, destreg, data, type; |
184 | 184 | unsigned int res = 0; |
185 | 185 | |
186 | - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc); | |
186 | + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc); | |
187 | 187 | |
188 | 188 | if (current->pid != previous_pid) { |
189 | 189 | pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n", |
arch/arm/mm/fault.c
... | ... | @@ -318,11 +318,11 @@ |
318 | 318 | fault = __do_page_fault(mm, addr, fsr, tsk); |
319 | 319 | up_read(&mm->mmap_sem); |
320 | 320 | |
321 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, addr); | |
321 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); | |
322 | 322 | if (fault & VM_FAULT_MAJOR) |
323 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, addr); | |
323 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr); | |
324 | 324 | else if (fault & VM_FAULT_MINOR) |
325 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, addr); | |
325 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr); | |
326 | 326 | |
327 | 327 | /* |
328 | 328 | * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR |
arch/mips/kernel/perf_event.c
arch/mips/kernel/traps.c
... | ... | @@ -578,12 +578,12 @@ |
578 | 578 | { |
579 | 579 | if ((opcode & OPCODE) == LL) { |
580 | 580 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, |
581 | - 1, 0, regs, 0); | |
581 | + 1, regs, 0); | |
582 | 582 | return simulate_ll(regs, opcode); |
583 | 583 | } |
584 | 584 | if ((opcode & OPCODE) == SC) { |
585 | 585 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, |
586 | - 1, 0, regs, 0); | |
586 | + 1, regs, 0); | |
587 | 587 | return simulate_sc(regs, opcode); |
588 | 588 | } |
589 | 589 | |
... | ... | @@ -602,7 +602,7 @@ |
602 | 602 | int rd = (opcode & RD) >> 11; |
603 | 603 | int rt = (opcode & RT) >> 16; |
604 | 604 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, |
605 | - 1, 0, regs, 0); | |
605 | + 1, regs, 0); | |
606 | 606 | switch (rd) { |
607 | 607 | case 0: /* CPU number */ |
608 | 608 | regs->regs[rt] = smp_processor_id(); |
... | ... | @@ -640,7 +640,7 @@ |
640 | 640 | { |
641 | 641 | if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) { |
642 | 642 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, |
643 | - 1, 0, regs, 0); | |
643 | + 1, regs, 0); | |
644 | 644 | return 0; |
645 | 645 | } |
646 | 646 |
arch/mips/kernel/unaligned.c
... | ... | @@ -111,8 +111,7 @@ |
111 | 111 | unsigned long value; |
112 | 112 | unsigned int res; |
113 | 113 | |
114 | - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, | |
115 | - 1, 0, regs, 0); | |
114 | + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); | |
116 | 115 | |
117 | 116 | /* |
118 | 117 | * This load never faults. |
... | ... | @@ -517,7 +516,7 @@ |
517 | 516 | mm_segment_t seg; |
518 | 517 | |
519 | 518 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, |
520 | - 1, 0, regs, regs->cp0_badvaddr); | |
519 | + 1, regs, regs->cp0_badvaddr); | |
521 | 520 | /* |
522 | 521 | * Did we catch a fault trying to load an instruction? |
523 | 522 | * Or are we running in MIPS16 mode? |
arch/mips/math-emu/cp1emu.c
... | ... | @@ -272,8 +272,7 @@ |
272 | 272 | } |
273 | 273 | |
274 | 274 | emul: |
275 | - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, | |
276 | - 1, 0, xcp, 0); | |
275 | + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, xcp, 0); | |
277 | 276 | MIPS_FPU_EMU_INC_STATS(emulated); |
278 | 277 | switch (MIPSInst_OPCODE(ir)) { |
279 | 278 | case ldc1_op:{ |
arch/mips/mm/fault.c
... | ... | @@ -145,7 +145,7 @@ |
145 | 145 | * the fault. |
146 | 146 | */ |
147 | 147 | fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); |
148 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | |
148 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | |
149 | 149 | if (unlikely(fault & VM_FAULT_ERROR)) { |
150 | 150 | if (fault & VM_FAULT_OOM) |
151 | 151 | goto out_of_memory; |
152 | 152 | |
... | ... | @@ -154,12 +154,10 @@ |
154 | 154 | BUG(); |
155 | 155 | } |
156 | 156 | if (fault & VM_FAULT_MAJOR) { |
157 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, | |
158 | - 1, 0, regs, address); | |
157 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); | |
159 | 158 | tsk->maj_flt++; |
160 | 159 | } else { |
161 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, | |
162 | - 1, 0, regs, address); | |
160 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); | |
163 | 161 | tsk->min_flt++; |
164 | 162 | } |
165 | 163 |
arch/powerpc/include/asm/emulated_ops.h
... | ... | @@ -78,14 +78,14 @@ |
78 | 78 | #define PPC_WARN_EMULATED(type, regs) \ |
79 | 79 | do { \ |
80 | 80 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \ |
81 | - 1, 0, regs, 0); \ | |
81 | + 1, regs, 0); \ | |
82 | 82 | __PPC_WARN_EMULATED(type); \ |
83 | 83 | } while (0) |
84 | 84 | |
85 | 85 | #define PPC_WARN_ALIGNMENT(type, regs) \ |
86 | 86 | do { \ |
87 | 87 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \ |
88 | - 1, 0, regs, regs->dar); \ | |
88 | + 1, regs, regs->dar); \ | |
89 | 89 | __PPC_WARN_EMULATED(type); \ |
90 | 90 | } while (0) |
91 | 91 |
arch/powerpc/kernel/perf_event.c
... | ... | @@ -1207,7 +1207,7 @@ |
1207 | 1207 | * here so there is no possibility of being interrupted. |
1208 | 1208 | */ |
1209 | 1209 | static void record_and_restart(struct perf_event *event, unsigned long val, |
1210 | - struct pt_regs *regs, int nmi) | |
1210 | + struct pt_regs *regs) | |
1211 | 1211 | { |
1212 | 1212 | u64 period = event->hw.sample_period; |
1213 | 1213 | s64 prev, delta, left; |
... | ... | @@ -1258,7 +1258,7 @@ |
1258 | 1258 | if (event->attr.sample_type & PERF_SAMPLE_ADDR) |
1259 | 1259 | perf_get_data_addr(regs, &data.addr); |
1260 | 1260 | |
1261 | - if (perf_event_overflow(event, nmi, &data, regs)) | |
1261 | + if (perf_event_overflow(event, &data, regs)) | |
1262 | 1262 | power_pmu_stop(event, 0); |
1263 | 1263 | } |
1264 | 1264 | } |
... | ... | @@ -1346,7 +1346,7 @@ |
1346 | 1346 | if ((int)val < 0) { |
1347 | 1347 | /* event has overflowed */ |
1348 | 1348 | found = 1; |
1349 | - record_and_restart(event, val, regs, nmi); | |
1349 | + record_and_restart(event, val, regs); | |
1350 | 1350 | } |
1351 | 1351 | } |
1352 | 1352 |
arch/powerpc/kernel/perf_event_fsl_emb.c
... | ... | @@ -568,7 +568,7 @@ |
568 | 568 | * here so there is no possibility of being interrupted. |
569 | 569 | */ |
570 | 570 | static void record_and_restart(struct perf_event *event, unsigned long val, |
571 | - struct pt_regs *regs, int nmi) | |
571 | + struct pt_regs *regs) | |
572 | 572 | { |
573 | 573 | u64 period = event->hw.sample_period; |
574 | 574 | s64 prev, delta, left; |
... | ... | @@ -616,7 +616,7 @@ |
616 | 616 | perf_sample_data_init(&data, 0); |
617 | 617 | data.period = event->hw.last_period; |
618 | 618 | |
619 | - if (perf_event_overflow(event, nmi, &data, regs)) | |
619 | + if (perf_event_overflow(event, &data, regs)) | |
620 | 620 | fsl_emb_pmu_stop(event, 0); |
621 | 621 | } |
622 | 622 | } |
... | ... | @@ -644,7 +644,7 @@ |
644 | 644 | if (event) { |
645 | 645 | /* event has overflowed */ |
646 | 646 | found = 1; |
647 | - record_and_restart(event, val, regs, nmi); | |
647 | + record_and_restart(event, val, regs); | |
648 | 648 | } else { |
649 | 649 | /* |
650 | 650 | * Disabled counter is negative, |
arch/powerpc/kernel/ptrace.c
... | ... | @@ -882,7 +882,7 @@ |
882 | 882 | } |
883 | 883 | |
884 | 884 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
885 | -void ptrace_triggered(struct perf_event *bp, int nmi, | |
885 | +void ptrace_triggered(struct perf_event *bp, | |
886 | 886 | struct perf_sample_data *data, struct pt_regs *regs) |
887 | 887 | { |
888 | 888 | struct perf_event_attr attr; |
arch/powerpc/mm/fault.c
... | ... | @@ -173,7 +173,7 @@ |
173 | 173 | die("Weird page fault", regs, SIGSEGV); |
174 | 174 | } |
175 | 175 | |
176 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | |
176 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | |
177 | 177 | |
178 | 178 | /* When running in the kernel we expect faults to occur only to |
179 | 179 | * addresses in user space. All other faults represent errors in the |
... | ... | @@ -319,7 +319,7 @@ |
319 | 319 | } |
320 | 320 | if (ret & VM_FAULT_MAJOR) { |
321 | 321 | current->maj_flt++; |
322 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | |
322 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, | |
323 | 323 | regs, address); |
324 | 324 | #ifdef CONFIG_PPC_SMLPAR |
325 | 325 | if (firmware_has_feature(FW_FEATURE_CMO)) { |
... | ... | @@ -330,7 +330,7 @@ |
330 | 330 | #endif |
331 | 331 | } else { |
332 | 332 | current->min_flt++; |
333 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | |
333 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, | |
334 | 334 | regs, address); |
335 | 335 | } |
336 | 336 | up_read(&mm->mmap_sem); |
arch/s390/mm/fault.c
... | ... | @@ -299,7 +299,7 @@ |
299 | 299 | goto out; |
300 | 300 | |
301 | 301 | address = trans_exc_code & __FAIL_ADDR_MASK; |
302 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | |
302 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | |
303 | 303 | flags = FAULT_FLAG_ALLOW_RETRY; |
304 | 304 | if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) |
305 | 305 | flags |= FAULT_FLAG_WRITE; |
306 | 306 | |
... | ... | @@ -345,11 +345,11 @@ |
345 | 345 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
346 | 346 | if (fault & VM_FAULT_MAJOR) { |
347 | 347 | tsk->maj_flt++; |
348 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | |
348 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, | |
349 | 349 | regs, address); |
350 | 350 | } else { |
351 | 351 | tsk->min_flt++; |
352 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | |
352 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, | |
353 | 353 | regs, address); |
354 | 354 | } |
355 | 355 | if (fault & VM_FAULT_RETRY) { |
arch/sh/kernel/ptrace_32.c
arch/sh/kernel/traps_32.c
arch/sh/kernel/traps_64.c
... | ... | @@ -434,7 +434,7 @@ |
434 | 434 | return error; |
435 | 435 | } |
436 | 436 | |
437 | - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address); | |
437 | + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address); | |
438 | 438 | |
439 | 439 | destreg = (opcode >> 4) & 0x3f; |
440 | 440 | if (user_mode(regs)) { |
... | ... | @@ -512,7 +512,7 @@ |
512 | 512 | return error; |
513 | 513 | } |
514 | 514 | |
515 | - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address); | |
515 | + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address); | |
516 | 516 | |
517 | 517 | srcreg = (opcode >> 4) & 0x3f; |
518 | 518 | if (user_mode(regs)) { |
... | ... | @@ -588,7 +588,7 @@ |
588 | 588 | return error; |
589 | 589 | } |
590 | 590 | |
591 | - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address); | |
591 | + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address); | |
592 | 592 | |
593 | 593 | destreg = (opcode >> 4) & 0x3f; |
594 | 594 | if (user_mode(regs)) { |
... | ... | @@ -665,7 +665,7 @@ |
665 | 665 | return error; |
666 | 666 | } |
667 | 667 | |
668 | - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address); | |
668 | + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address); | |
669 | 669 | |
670 | 670 | srcreg = (opcode >> 4) & 0x3f; |
671 | 671 | if (user_mode(regs)) { |
arch/sh/math-emu/math.c
... | ... | @@ -620,7 +620,7 @@ |
620 | 620 | struct task_struct *tsk = current; |
621 | 621 | struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu); |
622 | 622 | |
623 | - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); | |
623 | + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); | |
624 | 624 | |
625 | 625 | if (!(task_thread_info(tsk)->status & TS_USEDFPU)) { |
626 | 626 | /* initialize once. */ |
arch/sh/mm/fault_32.c
... | ... | @@ -160,7 +160,7 @@ |
160 | 160 | if ((regs->sr & SR_IMASK) != SR_IMASK) |
161 | 161 | local_irq_enable(); |
162 | 162 | |
163 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | |
163 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | |
164 | 164 | |
165 | 165 | /* |
166 | 166 | * If we're in an interrupt, have no user context or are running |
167 | 167 | |
... | ... | @@ -210,11 +210,11 @@ |
210 | 210 | } |
211 | 211 | if (fault & VM_FAULT_MAJOR) { |
212 | 212 | tsk->maj_flt++; |
213 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | |
213 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, | |
214 | 214 | regs, address); |
215 | 215 | } else { |
216 | 216 | tsk->min_flt++; |
217 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | |
217 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, | |
218 | 218 | regs, address); |
219 | 219 | } |
220 | 220 |
arch/sh/mm/tlbflush_64.c
... | ... | @@ -116,7 +116,7 @@ |
116 | 116 | /* Not an IO address, so reenable interrupts */ |
117 | 117 | local_irq_enable(); |
118 | 118 | |
119 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | |
119 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | |
120 | 120 | |
121 | 121 | /* |
122 | 122 | * If we're in an interrupt or have no user |
123 | 123 | |
... | ... | @@ -200,11 +200,11 @@ |
200 | 200 | |
201 | 201 | if (fault & VM_FAULT_MAJOR) { |
202 | 202 | tsk->maj_flt++; |
203 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | |
203 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, | |
204 | 204 | regs, address); |
205 | 205 | } else { |
206 | 206 | tsk->min_flt++; |
207 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | |
207 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, | |
208 | 208 | regs, address); |
209 | 209 | } |
210 | 210 |
arch/sparc/kernel/perf_event.c
arch/sparc/kernel/unaligned_32.c
... | ... | @@ -247,7 +247,7 @@ |
247 | 247 | unsigned long addr = compute_effective_address(regs, insn); |
248 | 248 | int err; |
249 | 249 | |
250 | - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); | |
250 | + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); | |
251 | 251 | switch (dir) { |
252 | 252 | case load: |
253 | 253 | err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), |
... | ... | @@ -338,7 +338,7 @@ |
338 | 338 | } |
339 | 339 | |
340 | 340 | addr = compute_effective_address(regs, insn); |
341 | - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); | |
341 | + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); | |
342 | 342 | switch(dir) { |
343 | 343 | case load: |
344 | 344 | err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), |
arch/sparc/kernel/unaligned_64.c
... | ... | @@ -317,7 +317,7 @@ |
317 | 317 | |
318 | 318 | addr = compute_effective_address(regs, insn, |
319 | 319 | ((insn >> 25) & 0x1f)); |
320 | - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); | |
320 | + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); | |
321 | 321 | switch (asi) { |
322 | 322 | case ASI_NL: |
323 | 323 | case ASI_AIUPL: |
... | ... | @@ -384,7 +384,7 @@ |
384 | 384 | int ret, i, rd = ((insn >> 25) & 0x1f); |
385 | 385 | int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; |
386 | 386 | |
387 | - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); | |
387 | + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); | |
388 | 388 | if (insn & 0x2000) { |
389 | 389 | maybe_flush_windows(0, 0, rd, from_kernel); |
390 | 390 | value = sign_extend_imm13(insn); |
... | ... | @@ -431,7 +431,7 @@ |
431 | 431 | int asi = decode_asi(insn, regs); |
432 | 432 | int flag = (freg < 32) ? FPRS_DL : FPRS_DU; |
433 | 433 | |
434 | - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); | |
434 | + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); | |
435 | 435 | |
436 | 436 | save_and_clear_fpu(); |
437 | 437 | current_thread_info()->xfsr[0] &= ~0x1c000; |
... | ... | @@ -554,7 +554,7 @@ |
554 | 554 | int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; |
555 | 555 | unsigned long *reg; |
556 | 556 | |
557 | - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); | |
557 | + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); | |
558 | 558 | |
559 | 559 | maybe_flush_windows(0, 0, rd, from_kernel); |
560 | 560 | reg = fetch_reg_addr(rd, regs); |
... | ... | @@ -586,7 +586,7 @@ |
586 | 586 | |
587 | 587 | if (tstate & TSTATE_PRIV) |
588 | 588 | die_if_kernel("lddfmna from kernel", regs); |
589 | - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); | |
589 | + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar); | |
590 | 590 | if (test_thread_flag(TIF_32BIT)) |
591 | 591 | pc = (u32)pc; |
592 | 592 | if (get_user(insn, (u32 __user *) pc) != -EFAULT) { |
... | ... | @@ -647,7 +647,7 @@ |
647 | 647 | |
648 | 648 | if (tstate & TSTATE_PRIV) |
649 | 649 | die_if_kernel("stdfmna from kernel", regs); |
650 | - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); | |
650 | + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar); | |
651 | 651 | if (test_thread_flag(TIF_32BIT)) |
652 | 652 | pc = (u32)pc; |
653 | 653 | if (get_user(insn, (u32 __user *) pc) != -EFAULT) { |
arch/sparc/kernel/visemul.c
arch/sparc/math-emu/math_32.c
... | ... | @@ -164,7 +164,7 @@ |
164 | 164 | int retcode = 0; /* assume all succeed */ |
165 | 165 | unsigned long insn; |
166 | 166 | |
167 | - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); | |
167 | + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); | |
168 | 168 | |
169 | 169 | #ifdef DEBUG_MATHEMU |
170 | 170 | printk("In do_mathemu()... pc is %08lx\n", regs->pc); |
arch/sparc/math-emu/math_64.c
... | ... | @@ -184,7 +184,7 @@ |
184 | 184 | |
185 | 185 | if (tstate & TSTATE_PRIV) |
186 | 186 | die_if_kernel("unfinished/unimplemented FPop from kernel", regs); |
187 | - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); | |
187 | + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); | |
188 | 188 | if (test_thread_flag(TIF_32BIT)) |
189 | 189 | pc = (u32)pc; |
190 | 190 | if (get_user(insn, (u32 __user *) pc) != -EFAULT) { |
arch/sparc/mm/fault_32.c
... | ... | @@ -251,7 +251,7 @@ |
251 | 251 | if (in_atomic() || !mm) |
252 | 252 | goto no_context; |
253 | 253 | |
254 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | |
254 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | |
255 | 255 | |
256 | 256 | down_read(&mm->mmap_sem); |
257 | 257 | |
258 | 258 | |
... | ... | @@ -301,12 +301,10 @@ |
301 | 301 | } |
302 | 302 | if (fault & VM_FAULT_MAJOR) { |
303 | 303 | current->maj_flt++; |
304 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | |
305 | - regs, address); | |
304 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); | |
306 | 305 | } else { |
307 | 306 | current->min_flt++; |
308 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | |
309 | - regs, address); | |
307 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); | |
310 | 308 | } |
311 | 309 | up_read(&mm->mmap_sem); |
312 | 310 | return; |
arch/sparc/mm/fault_64.c
... | ... | @@ -325,7 +325,7 @@ |
325 | 325 | if (in_atomic() || !mm) |
326 | 326 | goto intr_or_no_mm; |
327 | 327 | |
328 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | |
328 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | |
329 | 329 | |
330 | 330 | if (!down_read_trylock(&mm->mmap_sem)) { |
331 | 331 | if ((regs->tstate & TSTATE_PRIV) && |
332 | 332 | |
... | ... | @@ -433,12 +433,10 @@ |
433 | 433 | } |
434 | 434 | if (fault & VM_FAULT_MAJOR) { |
435 | 435 | current->maj_flt++; |
436 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | |
437 | - regs, address); | |
436 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); | |
438 | 437 | } else { |
439 | 438 | current->min_flt++; |
440 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | |
441 | - regs, address); | |
439 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); | |
442 | 440 | } |
443 | 441 | up_read(&mm->mmap_sem); |
444 | 442 |
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
... | ... | @@ -340,7 +340,7 @@ |
340 | 340 | */ |
341 | 341 | perf_prepare_sample(&header, &data, event, ®s); |
342 | 342 | |
343 | - if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1)) | |
343 | + if (perf_output_begin(&handle, event, header.size * (top - at), 1)) | |
344 | 344 | return 1; |
345 | 345 | |
346 | 346 | for (; at < top; at++) { |
... | ... | @@ -616,7 +616,7 @@ |
616 | 616 | else |
617 | 617 | regs.flags &= ~PERF_EFLAGS_EXACT; |
618 | 618 | |
619 | - if (perf_event_overflow(event, 1, &data, ®s)) | |
619 | + if (perf_event_overflow(event, &data, ®s)) | |
620 | 620 | x86_pmu_stop(event, 0); |
621 | 621 | } |
622 | 622 |
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/kgdb.c
... | ... | @@ -608,7 +608,7 @@ |
608 | 608 | return register_die_notifier(&kgdb_notifier); |
609 | 609 | } |
610 | 610 | |
611 | -static void kgdb_hw_overflow_handler(struct perf_event *event, int nmi, | |
611 | +static void kgdb_hw_overflow_handler(struct perf_event *event, | |
612 | 612 | struct perf_sample_data *data, struct pt_regs *regs) |
613 | 613 | { |
614 | 614 | struct task_struct *tsk = current; |
arch/x86/kernel/ptrace.c
arch/x86/mm/fault.c
... | ... | @@ -1059,7 +1059,7 @@ |
1059 | 1059 | if (unlikely(error_code & PF_RSVD)) |
1060 | 1060 | pgtable_bad(regs, error_code, address); |
1061 | 1061 | |
1062 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | |
1062 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | |
1063 | 1063 | |
1064 | 1064 | /* |
1065 | 1065 | * If we're in an interrupt, have no user context or are running |
1066 | 1066 | |
... | ... | @@ -1161,11 +1161,11 @@ |
1161 | 1161 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
1162 | 1162 | if (fault & VM_FAULT_MAJOR) { |
1163 | 1163 | tsk->maj_flt++; |
1164 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | |
1164 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, | |
1165 | 1165 | regs, address); |
1166 | 1166 | } else { |
1167 | 1167 | tsk->min_flt++; |
1168 | - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | |
1168 | + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, | |
1169 | 1169 | regs, address); |
1170 | 1170 | } |
1171 | 1171 | if (fault & VM_FAULT_RETRY) { |
include/linux/perf_event.h
... | ... | @@ -682,7 +682,7 @@ |
682 | 682 | struct file; |
683 | 683 | struct perf_sample_data; |
684 | 684 | |
685 | -typedef void (*perf_overflow_handler_t)(struct perf_event *, int, | |
685 | +typedef void (*perf_overflow_handler_t)(struct perf_event *, | |
686 | 686 | struct perf_sample_data *, |
687 | 687 | struct pt_regs *regs); |
688 | 688 | |
... | ... | @@ -925,7 +925,6 @@ |
925 | 925 | unsigned long size; |
926 | 926 | void *addr; |
927 | 927 | int page; |
928 | - int nmi; | |
929 | 928 | int sample; |
930 | 929 | }; |
931 | 930 | |
... | ... | @@ -993,7 +992,7 @@ |
993 | 992 | struct perf_event *event, |
994 | 993 | struct pt_regs *regs); |
995 | 994 | |
996 | -extern int perf_event_overflow(struct perf_event *event, int nmi, | |
995 | +extern int perf_event_overflow(struct perf_event *event, | |
997 | 996 | struct perf_sample_data *data, |
998 | 997 | struct pt_regs *regs); |
999 | 998 | |
... | ... | @@ -1012,7 +1011,7 @@ |
1012 | 1011 | |
1013 | 1012 | extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
1014 | 1013 | |
1015 | -extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); | |
1014 | +extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); | |
1016 | 1015 | |
1017 | 1016 | #ifndef perf_arch_fetch_caller_regs |
1018 | 1017 | static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } |
... | ... | @@ -1034,7 +1033,7 @@ |
1034 | 1033 | } |
1035 | 1034 | |
1036 | 1035 | static __always_inline void |
1037 | -perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | |
1036 | +perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) | |
1038 | 1037 | { |
1039 | 1038 | struct pt_regs hot_regs; |
1040 | 1039 | |
... | ... | @@ -1043,7 +1042,7 @@ |
1043 | 1042 | perf_fetch_caller_regs(&hot_regs); |
1044 | 1043 | regs = &hot_regs; |
1045 | 1044 | } |
1046 | - __perf_sw_event(event_id, nr, nmi, regs, addr); | |
1045 | + __perf_sw_event(event_id, nr, regs, addr); | |
1047 | 1046 | } |
1048 | 1047 | } |
1049 | 1048 | |
... | ... | @@ -1057,7 +1056,7 @@ |
1057 | 1056 | |
1058 | 1057 | static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) |
1059 | 1058 | { |
1060 | - perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); | |
1059 | + perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); | |
1061 | 1060 | |
1062 | 1061 | __perf_event_task_sched_out(task, next); |
1063 | 1062 | } |
... | ... | @@ -1119,7 +1118,7 @@ |
1119 | 1118 | |
1120 | 1119 | extern int perf_output_begin(struct perf_output_handle *handle, |
1121 | 1120 | struct perf_event *event, unsigned int size, |
1122 | - int nmi, int sample); | |
1121 | + int sample); | |
1123 | 1122 | extern void perf_output_end(struct perf_output_handle *handle); |
1124 | 1123 | extern void perf_output_copy(struct perf_output_handle *handle, |
1125 | 1124 | const void *buf, unsigned int len); |
... | ... | @@ -1143,8 +1142,7 @@ |
1143 | 1142 | static inline int perf_event_task_enable(void) { return -EINVAL; } |
1144 | 1143 | |
1145 | 1144 | static inline void |
1146 | -perf_sw_event(u32 event_id, u64 nr, int nmi, | |
1147 | - struct pt_regs *regs, u64 addr) { } | |
1145 | +perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } | |
1148 | 1146 | static inline void |
1149 | 1147 | perf_bp_event(struct perf_event *event, void *data) { } |
1150 | 1148 |
kernel/events/core.c
... | ... | @@ -3972,7 +3972,7 @@ |
3972 | 3972 | } |
3973 | 3973 | } |
3974 | 3974 | |
3975 | -static void perf_event_output(struct perf_event *event, int nmi, | |
3975 | +static void perf_event_output(struct perf_event *event, | |
3976 | 3976 | struct perf_sample_data *data, |
3977 | 3977 | struct pt_regs *regs) |
3978 | 3978 | { |
... | ... | @@ -3984,7 +3984,7 @@ |
3984 | 3984 | |
3985 | 3985 | perf_prepare_sample(&header, data, event, regs); |
3986 | 3986 | |
3987 | - if (perf_output_begin(&handle, event, header.size, nmi, 1)) | |
3987 | + if (perf_output_begin(&handle, event, header.size, 1)) | |
3988 | 3988 | goto exit; |
3989 | 3989 | |
3990 | 3990 | perf_output_sample(&handle, &header, data, event); |
... | ... | @@ -4024,7 +4024,7 @@ |
4024 | 4024 | int ret; |
4025 | 4025 | |
4026 | 4026 | perf_event_header__init_id(&read_event.header, &sample, event); |
4027 | - ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0); | |
4027 | + ret = perf_output_begin(&handle, event, read_event.header.size, 0); | |
4028 | 4028 | if (ret) |
4029 | 4029 | return; |
4030 | 4030 | |
... | ... | @@ -4067,7 +4067,7 @@ |
4067 | 4067 | perf_event_header__init_id(&task_event->event_id.header, &sample, event); |
4068 | 4068 | |
4069 | 4069 | ret = perf_output_begin(&handle, event, |
4070 | - task_event->event_id.header.size, 0, 0); | |
4070 | + task_event->event_id.header.size, 0); | |
4071 | 4071 | if (ret) |
4072 | 4072 | goto out; |
4073 | 4073 | |
... | ... | @@ -4204,7 +4204,7 @@ |
4204 | 4204 | |
4205 | 4205 | perf_event_header__init_id(&comm_event->event_id.header, &sample, event); |
4206 | 4206 | ret = perf_output_begin(&handle, event, |
4207 | - comm_event->event_id.header.size, 0, 0); | |
4207 | + comm_event->event_id.header.size, 0); | |
4208 | 4208 | |
4209 | 4209 | if (ret) |
4210 | 4210 | goto out; |
... | ... | @@ -4351,7 +4351,7 @@ |
4351 | 4351 | |
4352 | 4352 | perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); |
4353 | 4353 | ret = perf_output_begin(&handle, event, |
4354 | - mmap_event->event_id.header.size, 0, 0); | |
4354 | + mmap_event->event_id.header.size, 0); | |
4355 | 4355 | if (ret) |
4356 | 4356 | goto out; |
4357 | 4357 | |
... | ... | @@ -4546,7 +4546,7 @@ |
4546 | 4546 | perf_event_header__init_id(&throttle_event.header, &sample, event); |
4547 | 4547 | |
4548 | 4548 | ret = perf_output_begin(&handle, event, |
4549 | - throttle_event.header.size, 1, 0); | |
4549 | + throttle_event.header.size, 0); | |
4550 | 4550 | if (ret) |
4551 | 4551 | return; |
4552 | 4552 | |
... | ... | @@ -4559,7 +4559,7 @@ |
4559 | 4559 | * Generic event overflow handling, sampling. |
4560 | 4560 | */ |
4561 | 4561 | |
4562 | -static int __perf_event_overflow(struct perf_event *event, int nmi, | |
4562 | +static int __perf_event_overflow(struct perf_event *event, | |
4563 | 4563 | int throttle, struct perf_sample_data *data, |
4564 | 4564 | struct pt_regs *regs) |
4565 | 4565 | { |
4566 | 4566 | |
4567 | 4567 | |
4568 | 4568 | |
4569 | 4569 | |
4570 | 4570 | |
... | ... | @@ -4602,34 +4602,28 @@ |
4602 | 4602 | if (events && atomic_dec_and_test(&event->event_limit)) { |
4603 | 4603 | ret = 1; |
4604 | 4604 | event->pending_kill = POLL_HUP; |
4605 | - if (nmi) { | |
4606 | - event->pending_disable = 1; | |
4607 | - irq_work_queue(&event->pending); | |
4608 | - } else | |
4609 | - perf_event_disable(event); | |
4605 | + event->pending_disable = 1; | |
4606 | + irq_work_queue(&event->pending); | |
4610 | 4607 | } |
4611 | 4608 | |
4612 | 4609 | if (event->overflow_handler) |
4613 | - event->overflow_handler(event, nmi, data, regs); | |
4610 | + event->overflow_handler(event, data, regs); | |
4614 | 4611 | else |
4615 | - perf_event_output(event, nmi, data, regs); | |
4612 | + perf_event_output(event, data, regs); | |
4616 | 4613 | |
4617 | 4614 | if (event->fasync && event->pending_kill) { |
4618 | - if (nmi) { | |
4619 | - event->pending_wakeup = 1; | |
4620 | - irq_work_queue(&event->pending); | |
4621 | - } else | |
4622 | - perf_event_wakeup(event); | |
4615 | + event->pending_wakeup = 1; | |
4616 | + irq_work_queue(&event->pending); | |
4623 | 4617 | } |
4624 | 4618 | |
4625 | 4619 | return ret; |
4626 | 4620 | } |
4627 | 4621 | |
4628 | -int perf_event_overflow(struct perf_event *event, int nmi, | |
4622 | +int perf_event_overflow(struct perf_event *event, | |
4629 | 4623 | struct perf_sample_data *data, |
4630 | 4624 | struct pt_regs *regs) |
4631 | 4625 | { |
4632 | - return __perf_event_overflow(event, nmi, 1, data, regs); | |
4626 | + return __perf_event_overflow(event, 1, data, regs); | |
4633 | 4627 | } |
4634 | 4628 | |
4635 | 4629 | /* |
... | ... | @@ -4678,7 +4672,7 @@ |
4678 | 4672 | } |
4679 | 4673 | |
4680 | 4674 | static void perf_swevent_overflow(struct perf_event *event, u64 overflow, |
4681 | - int nmi, struct perf_sample_data *data, | |
4675 | + struct perf_sample_data *data, | |
4682 | 4676 | struct pt_regs *regs) |
4683 | 4677 | { |
4684 | 4678 | struct hw_perf_event *hwc = &event->hw; |
... | ... | @@ -4692,7 +4686,7 @@ |
4692 | 4686 | return; |
4693 | 4687 | |
4694 | 4688 | for (; overflow; overflow--) { |
4695 | - if (__perf_event_overflow(event, nmi, throttle, | |
4689 | + if (__perf_event_overflow(event, throttle, | |
4696 | 4690 | data, regs)) { |
4697 | 4691 | /* |
4698 | 4692 | * We inhibit the overflow from happening when |
... | ... | @@ -4705,7 +4699,7 @@ |
4705 | 4699 | } |
4706 | 4700 | |
4707 | 4701 | static void perf_swevent_event(struct perf_event *event, u64 nr, |
4708 | - int nmi, struct perf_sample_data *data, | |
4702 | + struct perf_sample_data *data, | |
4709 | 4703 | struct pt_regs *regs) |
4710 | 4704 | { |
4711 | 4705 | struct hw_perf_event *hwc = &event->hw; |
4712 | 4706 | |
... | ... | @@ -4719,12 +4713,12 @@ |
4719 | 4713 | return; |
4720 | 4714 | |
4721 | 4715 | if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) |
4722 | - return perf_swevent_overflow(event, 1, nmi, data, regs); | |
4716 | + return perf_swevent_overflow(event, 1, data, regs); | |
4723 | 4717 | |
4724 | 4718 | if (local64_add_negative(nr, &hwc->period_left)) |
4725 | 4719 | return; |
4726 | 4720 | |
4727 | - perf_swevent_overflow(event, 0, nmi, data, regs); | |
4721 | + perf_swevent_overflow(event, 0, data, regs); | |
4728 | 4722 | } |
4729 | 4723 | |
4730 | 4724 | static int perf_exclude_event(struct perf_event *event, |
... | ... | @@ -4812,7 +4806,7 @@ |
4812 | 4806 | } |
4813 | 4807 | |
4814 | 4808 | static void do_perf_sw_event(enum perf_type_id type, u32 event_id, |
4815 | - u64 nr, int nmi, | |
4809 | + u64 nr, | |
4816 | 4810 | struct perf_sample_data *data, |
4817 | 4811 | struct pt_regs *regs) |
4818 | 4812 | { |
... | ... | @@ -4828,7 +4822,7 @@ |
4828 | 4822 | |
4829 | 4823 | hlist_for_each_entry_rcu(event, node, head, hlist_entry) { |
4830 | 4824 | if (perf_swevent_match(event, type, event_id, data, regs)) |
4831 | - perf_swevent_event(event, nr, nmi, data, regs); | |
4825 | + perf_swevent_event(event, nr, data, regs); | |
4832 | 4826 | } |
4833 | 4827 | end: |
4834 | 4828 | rcu_read_unlock(); |
... | ... | @@ -4849,8 +4843,7 @@ |
4849 | 4843 | put_recursion_context(swhash->recursion, rctx); |
4850 | 4844 | } |
4851 | 4845 | |
4852 | -void __perf_sw_event(u32 event_id, u64 nr, int nmi, | |
4853 | - struct pt_regs *regs, u64 addr) | |
4846 | +void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) | |
4854 | 4847 | { |
4855 | 4848 | struct perf_sample_data data; |
4856 | 4849 | int rctx; |
... | ... | @@ -4862,7 +4855,7 @@ |
4862 | 4855 | |
4863 | 4856 | perf_sample_data_init(&data, addr); |
4864 | 4857 | |
4865 | - do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs); | |
4858 | + do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); | |
4866 | 4859 | |
4867 | 4860 | perf_swevent_put_recursion_context(rctx); |
4868 | 4861 | preempt_enable_notrace(); |
... | ... | @@ -5110,7 +5103,7 @@ |
5110 | 5103 | |
5111 | 5104 | hlist_for_each_entry_rcu(event, node, head, hlist_entry) { |
5112 | 5105 | if (perf_tp_event_match(event, &data, regs)) |
5113 | - perf_swevent_event(event, count, 1, &data, regs); | |
5106 | + perf_swevent_event(event, count, &data, regs); | |
5114 | 5107 | } |
5115 | 5108 | |
5116 | 5109 | perf_swevent_put_recursion_context(rctx); |
... | ... | @@ -5203,7 +5196,7 @@ |
5203 | 5196 | perf_sample_data_init(&sample, bp->attr.bp_addr); |
5204 | 5197 | |
5205 | 5198 | if (!bp->hw.state && !perf_exclude_event(bp, regs)) |
5206 | - perf_swevent_event(bp, 1, 1, &sample, regs); | |
5199 | + perf_swevent_event(bp, 1, &sample, regs); | |
5207 | 5200 | } |
5208 | 5201 | #endif |
5209 | 5202 | |
... | ... | @@ -5232,7 +5225,7 @@ |
5232 | 5225 | |
5233 | 5226 | if (regs && !perf_exclude_event(event, regs)) { |
5234 | 5227 | if (!(event->attr.exclude_idle && current->pid == 0)) |
5235 | - if (perf_event_overflow(event, 0, &data, regs)) | |
5228 | + if (perf_event_overflow(event, &data, regs)) | |
5236 | 5229 | ret = HRTIMER_NORESTART; |
5237 | 5230 | } |
5238 | 5231 |
kernel/events/internal.h
kernel/events/ring_buffer.c
... | ... | @@ -38,11 +38,8 @@ |
38 | 38 | { |
39 | 39 | atomic_set(&handle->rb->poll, POLL_IN); |
40 | 40 | |
41 | - if (handle->nmi) { | |
42 | - handle->event->pending_wakeup = 1; | |
43 | - irq_work_queue(&handle->event->pending); | |
44 | - } else | |
45 | - perf_event_wakeup(handle->event); | |
41 | + handle->event->pending_wakeup = 1; | |
42 | + irq_work_queue(&handle->event->pending); | |
46 | 43 | } |
47 | 44 | |
48 | 45 | /* |
... | ... | @@ -102,7 +99,7 @@ |
102 | 99 | |
103 | 100 | int perf_output_begin(struct perf_output_handle *handle, |
104 | 101 | struct perf_event *event, unsigned int size, |
105 | - int nmi, int sample) | |
102 | + int sample) | |
106 | 103 | { |
107 | 104 | struct ring_buffer *rb; |
108 | 105 | unsigned long tail, offset, head; |
... | ... | @@ -127,7 +124,6 @@ |
127 | 124 | |
128 | 125 | handle->rb = rb; |
129 | 126 | handle->event = event; |
130 | - handle->nmi = nmi; | |
131 | 127 | handle->sample = sample; |
132 | 128 | |
133 | 129 | if (!rb->nr_pages) |
kernel/sched.c
... | ... | @@ -2220,7 +2220,7 @@ |
2220 | 2220 | |
2221 | 2221 | if (task_cpu(p) != new_cpu) { |
2222 | 2222 | p->se.nr_migrations++; |
2223 | - perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0); | |
2223 | + perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0); | |
2224 | 2224 | } |
2225 | 2225 | |
2226 | 2226 | __set_task_cpu(p, new_cpu); |
kernel/watchdog.c
... | ... | @@ -211,7 +211,7 @@ |
211 | 211 | }; |
212 | 212 | |
213 | 213 | /* Callback function for perf event subsystem */ |
214 | -static void watchdog_overflow_callback(struct perf_event *event, int nmi, | |
214 | +static void watchdog_overflow_callback(struct perf_event *event, | |
215 | 215 | struct perf_sample_data *data, |
216 | 216 | struct pt_regs *regs) |
217 | 217 | { |
samples/hw_breakpoint/data_breakpoint.c
... | ... | @@ -41,7 +41,7 @@ |
41 | 41 | MODULE_PARM_DESC(ksym, "Kernel symbol to monitor; this module will report any" |
42 | 42 | " write operations on the kernel symbol"); |
43 | 43 | |
44 | -static void sample_hbp_handler(struct perf_event *bp, int nmi, | |
44 | +static void sample_hbp_handler(struct perf_event *bp, | |
45 | 45 | struct perf_sample_data *data, |
46 | 46 | struct pt_regs *regs) |
47 | 47 | { |