Commit 390dfd95c5df1ab3921dd388d11b2aee332c3f2c

Authored by Tejun Heo
1 parent 877105cc49

percpu: make misc percpu symbols unique

This patch updates misc percpu related symbols such that percpu
symbols are unique and don't clash with local symbols.  This serves
two purposes of decreasing the possibility of global percpu symbol
collision and allowing dropping per_cpu__ prefix from percpu symbols.

* drivers/crypto/padlock-aes.c: s/last_cword/paes_last_cword/

* drivers/lguest/x86/core.c: s/last_cpu/lg_last_cpu/

* drivers/s390/net/netiucv.c: rename the variable used in a macro to
  avoid clashing with percpu symbol

* arch/mn10300/kernel/kprobes.c: replace current_ prefix with cur_ for
  static variables.  Please note that percpu symbol current_kprobe
  can't be changed as it's used by generic code.

Partly based on Rusty Russell's "alloc_percpu: rename percpu vars
which cause name clashes" patch.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Chuck Ebbert <cebbert@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: linux390@de.ibm.com

Showing 4 changed files with 42 additions and 45 deletions Side-by-side Diff

arch/mn10300/kernel/kprobes.c
... ... @@ -31,13 +31,13 @@
31 31 #define KPROBE_HIT_ACTIVE 0x00000001
32 32 #define KPROBE_HIT_SS 0x00000002
33 33  
34   -static struct kprobe *current_kprobe;
35   -static unsigned long current_kprobe_orig_pc;
36   -static unsigned long current_kprobe_next_pc;
37   -static int current_kprobe_ss_flags;
  34 +static struct kprobe *cur_kprobe;
  35 +static unsigned long cur_kprobe_orig_pc;
  36 +static unsigned long cur_kprobe_next_pc;
  37 +static int cur_kprobe_ss_flags;
38 38 static unsigned long kprobe_status;
39   -static kprobe_opcode_t current_kprobe_ss_buf[MAX_INSN_SIZE + 2];
40   -static unsigned long current_kprobe_bp_addr;
  39 +static kprobe_opcode_t cur_kprobe_ss_buf[MAX_INSN_SIZE + 2];
  40 +static unsigned long cur_kprobe_bp_addr;
41 41  
42 42 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
43 43  
44 44  
45 45  
46 46  
47 47  
48 48  
... ... @@ -399,26 +399,25 @@
399 399 {
400 400 unsigned long nextpc;
401 401  
402   - current_kprobe_orig_pc = regs->pc;
403   - memcpy(current_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE);
404   - regs->pc = (unsigned long) current_kprobe_ss_buf;
  402 + cur_kprobe_orig_pc = regs->pc;
  403 + memcpy(cur_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE);
  404 + regs->pc = (unsigned long) cur_kprobe_ss_buf;
405 405  
406   - nextpc = find_nextpc(regs, &current_kprobe_ss_flags);
407   - if (current_kprobe_ss_flags & SINGLESTEP_PCREL)
408   - current_kprobe_next_pc =
409   - current_kprobe_orig_pc + (nextpc - regs->pc);
  406 + nextpc = find_nextpc(regs, &cur_kprobe_ss_flags);
  407 + if (cur_kprobe_ss_flags & SINGLESTEP_PCREL)
  408 + cur_kprobe_next_pc = cur_kprobe_orig_pc + (nextpc - regs->pc);
410 409 else
411   - current_kprobe_next_pc = nextpc;
  410 + cur_kprobe_next_pc = nextpc;
412 411  
413 412 /* branching instructions need special handling */
414   - if (current_kprobe_ss_flags & SINGLESTEP_BRANCH)
  413 + if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH)
415 414 nextpc = singlestep_branch_setup(regs);
416 415  
417   - current_kprobe_bp_addr = nextpc;
  416 + cur_kprobe_bp_addr = nextpc;
418 417  
419 418 *(u8 *) nextpc = BREAKPOINT_INSTRUCTION;
420   - mn10300_dcache_flush_range2((unsigned) current_kprobe_ss_buf,
421   - sizeof(current_kprobe_ss_buf));
  419 + mn10300_dcache_flush_range2((unsigned) cur_kprobe_ss_buf,
  420 + sizeof(cur_kprobe_ss_buf));
422 421 mn10300_icache_inv();
423 422 }
424 423  
... ... @@ -440,7 +439,7 @@
440 439 disarm_kprobe(p, regs);
441 440 ret = 1;
442 441 } else {
443   - p = current_kprobe;
  442 + p = cur_kprobe;
444 443 if (p->break_handler && p->break_handler(p, regs))
445 444 goto ss_probe;
446 445 }
... ... @@ -464,7 +463,7 @@
464 463 }
465 464  
466 465 kprobe_status = KPROBE_HIT_ACTIVE;
467   - current_kprobe = p;
  466 + cur_kprobe = p;
468 467 if (p->pre_handler(p, regs)) {
469 468 /* handler has already set things up, so skip ss setup */
470 469 return 1;
... ... @@ -491,8 +490,8 @@
491 490 static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
492 491 {
493 492 /* we may need to fixup regs/stack after singlestepping a call insn */
494   - if (current_kprobe_ss_flags & SINGLESTEP_BRANCH) {
495   - regs->pc = current_kprobe_orig_pc;
  493 + if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH) {
  494 + regs->pc = cur_kprobe_orig_pc;
496 495 switch (p->ainsn.insn[0]) {
497 496 case 0xcd: /* CALL (d16,PC) */
498 497 *(unsigned *) regs->sp = regs->mdr = regs->pc + 5;
... ... @@ -523,8 +522,8 @@
523 522 }
524 523 }
525 524  
526   - regs->pc = current_kprobe_next_pc;
527   - current_kprobe_bp_addr = 0;
  525 + regs->pc = cur_kprobe_next_pc;
  526 + cur_kprobe_bp_addr = 0;
528 527 }
529 528  
530 529 static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
531 530  
... ... @@ -532,10 +531,10 @@
532 531 if (!kprobe_running())
533 532 return 0;
534 533  
535   - if (current_kprobe->post_handler)
536   - current_kprobe->post_handler(current_kprobe, regs, 0);
  534 + if (cur_kprobe->post_handler)
  535 + cur_kprobe->post_handler(cur_kprobe, regs, 0);
537 536  
538   - resume_execution(current_kprobe, regs);
  537 + resume_execution(cur_kprobe, regs);
539 538 reset_current_kprobe();
540 539 preempt_enable_no_resched();
541 540 return 1;
542 541  
... ... @@ -545,12 +544,12 @@
545 544 static inline
546 545 int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
547 546 {
548   - if (current_kprobe->fault_handler &&
549   - current_kprobe->fault_handler(current_kprobe, regs, trapnr))
  547 + if (cur_kprobe->fault_handler &&
  548 + cur_kprobe->fault_handler(cur_kprobe, regs, trapnr))
550 549 return 1;
551 550  
552 551 if (kprobe_status & KPROBE_HIT_SS) {
553   - resume_execution(current_kprobe, regs);
  552 + resume_execution(cur_kprobe, regs);
554 553 reset_current_kprobe();
555 554 preempt_enable_no_resched();
556 555 }
... ... @@ -567,7 +566,7 @@
567 566  
568 567 switch (val) {
569 568 case DIE_BREAKPOINT:
570   - if (current_kprobe_bp_addr != args->regs->pc) {
  569 + if (cur_kprobe_bp_addr != args->regs->pc) {
571 570 if (kprobe_handler(args->regs))
572 571 return NOTIFY_STOP;
573 572 } else {
drivers/crypto/padlock-aes.c
... ... @@ -64,7 +64,7 @@
64 64 u32 *D;
65 65 };
66 66  
67   -static DEFINE_PER_CPU(struct cword *, last_cword);
  67 +static DEFINE_PER_CPU(struct cword *, paes_last_cword);
68 68  
69 69 /* Tells whether the ACE is capable to generate
70 70 the extended key for a given key_len. */
... ... @@ -152,9 +152,9 @@
152 152  
153 153 ok:
154 154 for_each_online_cpu(cpu)
155   - if (&ctx->cword.encrypt == per_cpu(last_cword, cpu) ||
156   - &ctx->cword.decrypt == per_cpu(last_cword, cpu))
157   - per_cpu(last_cword, cpu) = NULL;
  155 + if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
  156 + &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
  157 + per_cpu(paes_last_cword, cpu) = NULL;
158 158  
159 159 return 0;
160 160 }
... ... @@ -166,7 +166,7 @@
166 166 {
167 167 int cpu = raw_smp_processor_id();
168 168  
169   - if (cword != per_cpu(last_cword, cpu))
  169 + if (cword != per_cpu(paes_last_cword, cpu))
170 170 #ifndef CONFIG_X86_64
171 171 asm volatile ("pushfl; popfl");
172 172 #else
... ... @@ -176,7 +176,7 @@
176 176  
177 177 static inline void padlock_store_cword(struct cword *cword)
178 178 {
179   - per_cpu(last_cword, raw_smp_processor_id()) = cword;
  179 + per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
180 180 }
181 181  
182 182 /*
drivers/lguest/x86/core.c
... ... @@ -69,7 +69,7 @@
69 69 (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
70 70 }
71 71  
72   -static DEFINE_PER_CPU(struct lg_cpu *, last_cpu);
  72 +static DEFINE_PER_CPU(struct lg_cpu *, lg_last_cpu);
73 73  
74 74 /*S:010
75 75 * We approach the Switcher.
... ... @@ -90,8 +90,8 @@
90 90 * meanwhile). If that's not the case, we pretend everything in the
91 91 * Guest has changed.
92 92 */
93   - if (__get_cpu_var(last_cpu) != cpu || cpu->last_pages != pages) {
94   - __get_cpu_var(last_cpu) = cpu;
  93 + if (__get_cpu_var(lg_last_cpu) != cpu || cpu->last_pages != pages) {
  94 + __get_cpu_var(lg_last_cpu) = cpu;
95 95 cpu->last_pages = pages;
96 96 cpu->changed = CHANGED_ALL;
97 97 }
drivers/s390/net/netiucv.c
... ... @@ -113,11 +113,9 @@
113 113 #define IUCV_DBF_TEXT_(name, level, text...) \
114 114 do { \
115 115 if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
116   - char* iucv_dbf_txt_buf = \
117   - get_cpu_var(iucv_dbf_txt_buf); \
118   - sprintf(iucv_dbf_txt_buf, text); \
119   - debug_text_event(iucv_dbf_##name, level, \
120   - iucv_dbf_txt_buf); \
  116 + char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
  117 + sprintf(__buf, text); \
  118 + debug_text_event(iucv_dbf_##name, level, __buf); \
121 119 put_cpu_var(iucv_dbf_txt_buf); \
122 120 } \
123 121 } while (0)