Commit b76834bc1b6db0a0923eed85c81b1113021b0612

Authored by Christoph Lameter
Committed by Tejun Heo
1 parent 4a6f4fe837

kprobes: Use this_cpu_ops

Use this_cpu ops in various places to optimize per cpu data access.

Cc: Jason Baron <jbaron@redhat.com>
Cc: Namhyung Kim <namhyung@gmail.com>
Acked-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>

Showing 3 changed files with 13 additions and 13 deletions Side-by-side Diff

arch/x86/kernel/kprobes.c
... ... @@ -403,7 +403,7 @@
403 403  
404 404 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
405 405 {
406   - __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
  406 + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
407 407 kcb->kprobe_status = kcb->prev_kprobe.status;
408 408 kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
409 409 kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
... ... @@ -412,7 +412,7 @@
412 412 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
413 413 struct kprobe_ctlblk *kcb)
414 414 {
415   - __get_cpu_var(current_kprobe) = p;
  415 + __this_cpu_write(current_kprobe, p);
416 416 kcb->kprobe_saved_flags = kcb->kprobe_old_flags
417 417 = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
418 418 if (is_IF_modifier(p->ainsn.insn))
... ... @@ -586,7 +586,7 @@
586 586 preempt_enable_no_resched();
587 587 return 1;
588 588 } else if (kprobe_running()) {
589   - p = __get_cpu_var(current_kprobe);
  589 + p = __this_cpu_read(current_kprobe);
590 590 if (p->break_handler && p->break_handler(p, regs)) {
591 591 setup_singlestep(p, regs, kcb, 0);
592 592 return 1;
593 593  
... ... @@ -759,11 +759,11 @@
759 759  
760 760 orig_ret_address = (unsigned long)ri->ret_addr;
761 761 if (ri->rp && ri->rp->handler) {
762   - __get_cpu_var(current_kprobe) = &ri->rp->kp;
  762 + __this_cpu_write(current_kprobe, &ri->rp->kp);
763 763 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
764 764 ri->ret_addr = correct_ret_addr;
765 765 ri->rp->handler(ri, regs);
766   - __get_cpu_var(current_kprobe) = NULL;
  766 + __this_cpu_write(current_kprobe, NULL);
767 767 }
768 768  
769 769 recycle_rp_inst(ri, &empty_rp);
770 770  
... ... @@ -1198,10 +1198,10 @@
1198 1198 regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
1199 1199 regs->orig_ax = ~0UL;
1200 1200  
1201   - __get_cpu_var(current_kprobe) = &op->kp;
  1201 + __this_cpu_write(current_kprobe, &op->kp);
1202 1202 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1203 1203 opt_pre_handler(&op->kp, regs);
1204   - __get_cpu_var(current_kprobe) = NULL;
  1204 + __this_cpu_write(current_kprobe, NULL);
1205 1205 }
1206 1206 preempt_enable_no_resched();
1207 1207 }
include/linux/kprobes.h
... ... @@ -303,12 +303,12 @@
303 303 /* kprobe_running() will just return the current_kprobe on this CPU */
304 304 static inline struct kprobe *kprobe_running(void)
305 305 {
306   - return (__get_cpu_var(current_kprobe));
  306 + return (__this_cpu_read(current_kprobe));
307 307 }
308 308  
309 309 static inline void reset_current_kprobe(void)
310 310 {
311   - __get_cpu_var(current_kprobe) = NULL;
  311 + __this_cpu_write(current_kprobe, NULL);
312 312 }
313 313  
314 314 static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
... ... @@ -317,12 +317,12 @@
317 317 /* We have preemption disabled.. so it is safe to use __ versions */
318 318 static inline void set_kprobe_instance(struct kprobe *kp)
319 319 {
320   - __get_cpu_var(kprobe_instance) = kp;
  320 + __this_cpu_write(kprobe_instance, kp);
321 321 }
322 322  
323 323 static inline void reset_kprobe_instance(void)
324 324 {
325   - __get_cpu_var(kprobe_instance) = NULL;
  325 + __this_cpu_write(kprobe_instance, NULL);
326 326 }
327 327  
328 328 /*
... ... @@ -775,7 +775,7 @@
775 775 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
776 776 int trapnr)
777 777 {
778   - struct kprobe *cur = __get_cpu_var(kprobe_instance);
  778 + struct kprobe *cur = __this_cpu_read(kprobe_instance);
779 779  
780 780 /*
781 781 * if we faulted "during" the execution of a user specified
... ... @@ -790,7 +790,7 @@
790 790  
791 791 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
792 792 {
793   - struct kprobe *cur = __get_cpu_var(kprobe_instance);
  793 + struct kprobe *cur = __this_cpu_read(kprobe_instance);
794 794 int ret = 0;
795 795  
796 796 if (cur && cur->break_handler) {