Commit 3a0340be06a9356eb61f6804107480acbe62c069

Authored by Joe Perches
Committed by Ingo Molnar
1 parent 1bd591a5f1

x86: mmio-mod.c: Use pr_fmt

- Add #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 - Remove #define NAME
 - Remove NAME from pr_<level>

Signed-off-by: Joe Perches <joe@perches.com>
LKML-Reference: <009cb214c45ef932df0242856228f4739cc91408.1260383912.git.joe@perches.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 1 changed file with 35 additions and 36 deletions Side-by-side Diff

arch/x86/mm/mmio-mod.c
... ... @@ -19,6 +19,9 @@
19 19 *
20 20 * Derived from the read-mod example from relay-examples by Tom Zanussi.
21 21 */
  22 +
  23 +#define pr_fmt(fmt) "mmiotrace: "
  24 +
22 25 #define DEBUG 1
23 26  
24 27 #include <linux/module.h>
... ... @@ -36,8 +39,6 @@
36 39  
37 40 #include "pf_in.h"
38 41  
39   -#define NAME "mmiotrace: "
40   -
41 42 struct trap_reason {
42 43 unsigned long addr;
43 44 unsigned long ip;
44 45  
45 46  
... ... @@ -96,17 +97,18 @@
96 97 pte_t *pte = lookup_address(address, &level);
97 98  
98 99 if (!pte) {
99   - pr_err(NAME "Error in %s: no pte for page 0x%08lx\n",
100   - __func__, address);
  100 + pr_err("Error in %s: no pte for page 0x%08lx\n",
  101 + __func__, address);
101 102 return;
102 103 }
103 104  
104 105 if (level == PG_LEVEL_2M) {
105   - pr_emerg(NAME "4MB pages are not currently supported: "
106   - "0x%08lx\n", address);
  106 + pr_emerg("4MB pages are not currently supported: 0x%08lx\n",
  107 + address);
107 108 BUG();
108 109 }
109   - pr_info(NAME "pte for 0x%lx: 0x%llx 0x%llx\n", address,
  110 + pr_info("pte for 0x%lx: 0x%llx 0x%llx\n",
  111 + address,
110 112 (unsigned long long)pte_val(*pte),
111 113 (unsigned long long)pte_val(*pte) & _PAGE_PRESENT);
112 114 }
113 115  
114 116  
115 117  
116 118  
... ... @@ -118,22 +120,21 @@
118 120 static void die_kmmio_nesting_error(struct pt_regs *regs, unsigned long addr)
119 121 {
120 122 const struct trap_reason *my_reason = &get_cpu_var(pf_reason);
121   - pr_emerg(NAME "unexpected fault for address: 0x%08lx, "
122   - "last fault for address: 0x%08lx\n",
123   - addr, my_reason->addr);
  123 + pr_emerg("unexpected fault for address: 0x%08lx, last fault for address: 0x%08lx\n",
  124 + addr, my_reason->addr);
124 125 print_pte(addr);
125 126 print_symbol(KERN_EMERG "faulting IP is at %s\n", regs->ip);
126 127 print_symbol(KERN_EMERG "last faulting IP was at %s\n", my_reason->ip);
127 128 #ifdef __i386__
128 129 pr_emerg("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
129   - regs->ax, regs->bx, regs->cx, regs->dx);
  130 + regs->ax, regs->bx, regs->cx, regs->dx);
130 131 pr_emerg("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
131   - regs->si, regs->di, regs->bp, regs->sp);
  132 + regs->si, regs->di, regs->bp, regs->sp);
132 133 #else
133 134 pr_emerg("rax: %016lx rcx: %016lx rdx: %016lx\n",
134   - regs->ax, regs->cx, regs->dx);
  135 + regs->ax, regs->cx, regs->dx);
135 136 pr_emerg("rsi: %016lx rdi: %016lx rbp: %016lx rsp: %016lx\n",
136   - regs->si, regs->di, regs->bp, regs->sp);
  137 + regs->si, regs->di, regs->bp, regs->sp);
137 138 #endif
138 139 put_cpu_var(pf_reason);
139 140 BUG();
... ... @@ -213,7 +214,7 @@
213 214 /* this should always return the active_trace count to 0 */
214 215 my_reason->active_traces--;
215 216 if (my_reason->active_traces) {
216   - pr_emerg(NAME "unexpected post handler");
  217 + pr_emerg("unexpected post handler");
217 218 BUG();
218 219 }
219 220  
... ... @@ -244,7 +245,7 @@
244 245 };
245 246  
246 247 if (!trace) {
247   - pr_err(NAME "kmalloc failed in ioremap\n");
  248 + pr_err("kmalloc failed in ioremap\n");
248 249 return;
249 250 }
250 251  
... ... @@ -282,8 +283,8 @@
282 283 if (!is_enabled()) /* recheck and proper locking in *_core() */
283 284 return;
284 285  
285   - pr_debug(NAME "ioremap_*(0x%llx, 0x%lx) = %p\n",
286   - (unsigned long long)offset, size, addr);
  286 + pr_debug("ioremap_*(0x%llx, 0x%lx) = %p\n",
  287 + (unsigned long long)offset, size, addr);
287 288 if ((filter_offset) && (offset != filter_offset))
288 289 return;
289 290 ioremap_trace_core(offset, size, addr);
... ... @@ -301,7 +302,7 @@
301 302 struct remap_trace *tmp;
302 303 struct remap_trace *found_trace = NULL;
303 304  
304   - pr_debug(NAME "Unmapping %p.\n", addr);
  305 + pr_debug("Unmapping %p.\n", addr);
305 306  
306 307 spin_lock_irq(&trace_lock);
307 308 if (!is_enabled())
... ... @@ -363,9 +364,8 @@
363 364 * Caller also ensures is_enabled() cannot change.
364 365 */
365 366 list_for_each_entry(trace, &trace_list, list) {
366   - pr_notice(NAME "purging non-iounmapped "
367   - "trace @0x%08lx, size 0x%lx.\n",
368   - trace->probe.addr, trace->probe.len);
  367 + pr_notice("purging non-iounmapped trace @0x%08lx, size 0x%lx.\n",
  368 + trace->probe.addr, trace->probe.len);
369 369 if (!nommiotrace)
370 370 unregister_kmmio_probe(&trace->probe);
371 371 }
... ... @@ -387,7 +387,7 @@
387 387  
388 388 if (downed_cpus == NULL &&
389 389 !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) {
390   - pr_notice(NAME "Failed to allocate mask\n");
  390 + pr_notice("Failed to allocate mask\n");
391 391 goto out;
392 392 }
393 393  
394 394  
395 395  
396 396  
... ... @@ -395,20 +395,19 @@
395 395 cpumask_copy(downed_cpus, cpu_online_mask);
396 396 cpumask_clear_cpu(cpumask_first(cpu_online_mask), downed_cpus);
397 397 if (num_online_cpus() > 1)
398   - pr_notice(NAME "Disabling non-boot CPUs...\n");
  398 + pr_notice("Disabling non-boot CPUs...\n");
399 399 put_online_cpus();
400 400  
401 401 for_each_cpu(cpu, downed_cpus) {
402 402 err = cpu_down(cpu);
403 403 if (!err)
404   - pr_info(NAME "CPU%d is down.\n", cpu);
  404 + pr_info("CPU%d is down.\n", cpu);
405 405 else
406   - pr_err(NAME "Error taking CPU%d down: %d\n", cpu, err);
  406 + pr_err("Error taking CPU%d down: %d\n", cpu, err);
407 407 }
408 408 out:
409 409 if (num_online_cpus() > 1)
410   - pr_warning(NAME "multiple CPUs still online, "
411   - "may miss events.\n");
  410 + pr_warning("multiple CPUs still online, may miss events.\n");
412 411 }
413 412  
414 413 /* __ref because leave_uniprocessor calls cpu_up which is __cpuinit,
415 414  
416 415  
... ... @@ -420,13 +419,13 @@
420 419  
421 420 if (downed_cpus == NULL || cpumask_weight(downed_cpus) == 0)
422 421 return;
423   - pr_notice(NAME "Re-enabling CPUs...\n");
  422 + pr_notice("Re-enabling CPUs...\n");
424 423 for_each_cpu(cpu, downed_cpus) {
425 424 err = cpu_up(cpu);
426 425 if (!err)
427   - pr_info(NAME "enabled CPU%d.\n", cpu);
  426 + pr_info("enabled CPU%d.\n", cpu);
428 427 else
429   - pr_err(NAME "cannot re-enable CPU%d: %d\n", cpu, err);
  428 + pr_err("cannot re-enable CPU%d: %d\n", cpu, err);
430 429 }
431 430 }
432 431  
... ... @@ -434,8 +433,8 @@
434 433 static void enter_uniprocessor(void)
435 434 {
436 435 if (num_online_cpus() > 1)
437   - pr_warning(NAME "multiple CPUs are online, may miss events. "
438   - "Suggest booting with maxcpus=1 kernel argument.\n");
  436 + pr_warning("multiple CPUs are online, may miss events. "
  437 + "Suggest booting with maxcpus=1 kernel argument.\n");
439 438 }
440 439  
441 440 static void leave_uniprocessor(void)
442 441  
... ... @@ -450,13 +449,13 @@
450 449 goto out;
451 450  
452 451 if (nommiotrace)
453   - pr_info(NAME "MMIO tracing disabled.\n");
  452 + pr_info("MMIO tracing disabled.\n");
454 453 kmmio_init();
455 454 enter_uniprocessor();
456 455 spin_lock_irq(&trace_lock);
457 456 atomic_inc(&mmiotrace_enabled);
458 457 spin_unlock_irq(&trace_lock);
459   - pr_info(NAME "enabled.\n");
  458 + pr_info("enabled.\n");
460 459 out:
461 460 mutex_unlock(&mmiotrace_mutex);
462 461 }
... ... @@ -475,7 +474,7 @@
475 474 clear_trace_list(); /* guarantees: no more kmmio callbacks */
476 475 leave_uniprocessor();
477 476 kmmio_cleanup();
478   - pr_info(NAME "disabled.\n");
  477 + pr_info("disabled.\n");
479 478 out:
480 479 mutex_unlock(&mmiotrace_mutex);
481 480 }