Commit a5a2bad55de89a0adf7d6f783cb87ab7eb1a894f

Authored by Ingo Molnar

Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/ro…

…stedt/linux-2.6-trace into perf/core

Showing 33 changed files Side-by-side Diff

... ... @@ -591,6 +591,11 @@
591 591 # conserve stack if available
592 592 KBUILD_CFLAGS += $(call cc-option,-fconserve-stack)
593 593  
  594 +# check for 'asm goto'
  595 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
  596 + KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
  597 +endif
  598 +
594 599 # Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
595 600 # But warn user when we do so
596 601 warn-assign = \
... ... @@ -158,5 +158,8 @@
158 158 subsystem. Also has support for calculating CPU cycle events
159 159 to determine how many clock cycles in a given period.
160 160  
  161 +config HAVE_ARCH_JUMP_LABEL
  162 + bool
  163 +
161 164 source "kernel/gcov/Kconfig"
... ... @@ -30,6 +30,7 @@
30 30 select PERF_USE_VMALLOC
31 31 select HAVE_DMA_ATTRS
32 32 select HAVE_DMA_API_DEBUG
  33 + select HAVE_ARCH_JUMP_LABEL
33 34  
34 35 config SPARC32
35 36 def_bool !64BIT
arch/sparc/include/asm/jump_label.h
  1 +#ifndef _ASM_SPARC_JUMP_LABEL_H
  2 +#define _ASM_SPARC_JUMP_LABEL_H
  3 +
  4 +#ifdef __KERNEL__
  5 +
  6 +#include <linux/types.h>
  7 +#include <asm/system.h>
  8 +
  9 +#define JUMP_LABEL_NOP_SIZE 4
  10 +
  11 +#define JUMP_LABEL(key, label) \
  12 + do { \
  13 + asm goto("1:\n\t" \
  14 + "nop\n\t" \
  15 + "nop\n\t" \
  16 + ".pushsection __jump_table, \"a\"\n\t"\
  17 + ".word 1b, %l[" #label "], %c0\n\t" \
  18 + ".popsection \n\t" \
  19 + : : "i" (key) : : label);\
  20 + } while (0)
  21 +
  22 +#endif /* __KERNEL__ */
  23 +
  24 +typedef u32 jump_label_t;
  25 +
  26 +struct jump_entry {
  27 + jump_label_t code;
  28 + jump_label_t target;
  29 + jump_label_t key;
  30 +};
  31 +
  32 +#endif
arch/sparc/kernel/Makefile
... ... @@ -119,4 +119,6 @@
119 119  
120 120 pc--$(CONFIG_PERF_EVENTS) := perf_event.o
121 121 obj-$(CONFIG_SPARC64) += $(pc--y)
  122 +
  123 +obj-$(CONFIG_SPARC64) += jump_label.o
arch/sparc/kernel/jump_label.c
  1 +#include <linux/kernel.h>
  2 +#include <linux/types.h>
  3 +#include <linux/mutex.h>
  4 +#include <linux/cpu.h>
  5 +
  6 +#include <linux/jump_label.h>
  7 +#include <linux/memory.h>
  8 +
  9 +#ifdef HAVE_JUMP_LABEL
  10 +
  11 +void arch_jump_label_transform(struct jump_entry *entry,
  12 + enum jump_label_type type)
  13 +{
  14 + u32 val;
  15 + u32 *insn = (u32 *) (unsigned long) entry->code;
  16 +
  17 + if (type == JUMP_LABEL_ENABLE) {
  18 + s32 off = (s32)entry->target - (s32)entry->code;
  19 +
  20 +#ifdef CONFIG_SPARC64
  21 + /* ba,pt %xcc, . + (off << 2) */
  22 + val = 0x10680000 | ((u32) off >> 2);
  23 +#else
  24 + /* ba . + (off << 2) */
  25 + val = 0x10800000 | ((u32) off >> 2);
  26 +#endif
  27 + } else {
  28 + val = 0x01000000;
  29 + }
  30 +
  31 + get_online_cpus();
  32 + mutex_lock(&text_mutex);
  33 + *insn = val;
  34 + flushi(insn);
  35 + mutex_unlock(&text_mutex);
  36 + put_online_cpus();
  37 +}
  38 +
  39 +void arch_jump_label_text_poke_early(jump_label_t addr)
  40 +{
  41 + u32 *insn_p = (u32 *) (unsigned long) addr;
  42 +
  43 + *insn_p = 0x01000000;
  44 + flushi(insn_p);
  45 +}
  46 +
  47 +#endif
arch/sparc/kernel/module.c
... ... @@ -18,6 +18,9 @@
18 18 #include <asm/spitfire.h>
19 19  
20 20 #ifdef CONFIG_SPARC64
  21 +
  22 +#include <linux/jump_label.h>
  23 +
21 24 static void *module_map(unsigned long size)
22 25 {
23 26 struct vm_struct *area;
... ... @@ -227,6 +230,9 @@
227 230 const Elf_Shdr *sechdrs,
228 231 struct module *me)
229 232 {
  233 + /* make jump label nops */
  234 + jump_label_apply_nops(me);
  235 +
230 236 /* Cheetah's I-cache is fully coherent. */
231 237 if (tlb_type == spitfire) {
232 238 unsigned long va;
... ... @@ -59,6 +59,7 @@
59 59 select ANON_INODES
60 60 select HAVE_ARCH_KMEMCHECK
61 61 select HAVE_USER_RETURN_NOTIFIER
  62 + select HAVE_ARCH_JUMP_LABEL
62 63  
63 64 config INSTRUCTION_DECODER
64 65 def_bool (KPROBES || PERF_EVENTS)
arch/x86/include/asm/alternative.h
... ... @@ -4,6 +4,7 @@
4 4 #include <linux/types.h>
5 5 #include <linux/stddef.h>
6 6 #include <linux/stringify.h>
  7 +#include <linux/jump_label.h>
7 8 #include <asm/asm.h>
8 9  
9 10 /*
... ... @@ -160,6 +161,8 @@
160 161 #define __parainstructions_end NULL
161 162 #endif
162 163  
  164 +extern void *text_poke_early(void *addr, const void *opcode, size_t len);
  165 +
163 166 /*
164 167 * Clear and restore the kernel write-protection flag on the local CPU.
165 168 * Allows the kernel to edit read-only pages.
... ... @@ -179,6 +182,14 @@
179 182 */
180 183 extern void *text_poke(void *addr, const void *opcode, size_t len);
181 184 extern void *text_poke_smp(void *addr, const void *opcode, size_t len);
  185 +
  186 +#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
  187 +#define IDEAL_NOP_SIZE_5 5
  188 +extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
  189 +extern void arch_init_ideal_nop5(void);
  190 +#else
  191 +static inline void arch_init_ideal_nop5(void) {}
  192 +#endif
182 193  
183 194 #endif /* _ASM_X86_ALTERNATIVE_H */
arch/x86/include/asm/jump_label.h
  1 +#ifndef _ASM_X86_JUMP_LABEL_H
  2 +#define _ASM_X86_JUMP_LABEL_H
  3 +
  4 +#ifdef __KERNEL__
  5 +
  6 +#include <linux/types.h>
  7 +#include <asm/nops.h>
  8 +
  9 +#define JUMP_LABEL_NOP_SIZE 5
  10 +
  11 +# define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t"
  12 +
  13 +# define JUMP_LABEL(key, label) \
  14 + do { \
  15 + asm goto("1:" \
  16 + JUMP_LABEL_INITIAL_NOP \
  17 + ".pushsection __jump_table, \"a\" \n\t"\
  18 + _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \
  19 + ".popsection \n\t" \
  20 + : : "i" (key) : : label); \
  21 + } while (0)
  22 +
  23 +#endif /* __KERNEL__ */
  24 +
  25 +#ifdef CONFIG_X86_64
  26 +typedef u64 jump_label_t;
  27 +#else
  28 +typedef u32 jump_label_t;
  29 +#endif
  30 +
  31 +struct jump_entry {
  32 + jump_label_t code;
  33 + jump_label_t target;
  34 + jump_label_t key;
  35 +};
  36 +
  37 +#endif
arch/x86/kernel/Makefile
... ... @@ -32,7 +32,7 @@
32 32 obj-y := process_$(BITS).o signal.o entry_$(BITS).o
33 33 obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
34 34 obj-y += time.o ioport.o ldt.o dumpstack.o
35   -obj-y += setup.o x86_init.o i8259.o irqinit.o
  35 +obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
36 36 obj-$(CONFIG_X86_VISWS) += visws_quirks.o
37 37 obj-$(CONFIG_X86_32) += probe_roms_32.o
38 38 obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
arch/x86/kernel/alternative.c
... ... @@ -195,7 +195,7 @@
195 195  
196 196 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
197 197 extern s32 __smp_locks[], __smp_locks_end[];
198   -static void *text_poke_early(void *addr, const void *opcode, size_t len);
  198 +void *text_poke_early(void *addr, const void *opcode, size_t len);
199 199  
200 200 /* Replace instructions with better alternatives for this CPU type.
201 201 This runs before SMP is initialized to avoid SMP problems with
... ... @@ -522,7 +522,7 @@
522 522 * instructions. And on the local CPU you need to be protected again NMI or MCE
523 523 * handlers seeing an inconsistent instruction while you patch.
524 524 */
525   -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
  525 +void *__init_or_module text_poke_early(void *addr, const void *opcode,
526 526 size_t len)
527 527 {
528 528 unsigned long flags;
... ... @@ -640,4 +640,69 @@
640 640 stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
641 641 return addr;
642 642 }
  643 +
  644 +#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
  645 +
  646 +unsigned char ideal_nop5[IDEAL_NOP_SIZE_5];
  647 +
  648 +void __init arch_init_ideal_nop5(void)
  649 +{
  650 + extern const unsigned char ftrace_test_p6nop[];
  651 + extern const unsigned char ftrace_test_nop5[];
  652 + extern const unsigned char ftrace_test_jmp[];
  653 + int faulted = 0;
  654 +
  655 + /*
  656 + * There is no good nop for all x86 archs.
  657 + * We will default to using the P6_NOP5, but first we
  658 + * will test to make sure that the nop will actually
  659 + * work on this CPU. If it faults, we will then
  660 + * go to a lesser efficient 5 byte nop. If that fails
  661 + * we then just use a jmp as our nop. This isn't the most
  662 + * efficient nop, but we can not use a multi part nop
  663 + * since we would then risk being preempted in the middle
  664 + * of that nop, and if we enabled tracing then, it might
  665 + * cause a system crash.
  666 + *
  667 + * TODO: check the cpuid to determine the best nop.
  668 + */
  669 + asm volatile (
  670 + "ftrace_test_jmp:"
  671 + "jmp ftrace_test_p6nop\n"
  672 + "nop\n"
  673 + "nop\n"
  674 + "nop\n" /* 2 byte jmp + 3 bytes */
  675 + "ftrace_test_p6nop:"
  676 + P6_NOP5
  677 + "jmp 1f\n"
  678 + "ftrace_test_nop5:"
  679 + ".byte 0x66,0x66,0x66,0x66,0x90\n"
  680 + "1:"
  681 + ".section .fixup, \"ax\"\n"
  682 + "2: movl $1, %0\n"
  683 + " jmp ftrace_test_nop5\n"
  684 + "3: movl $2, %0\n"
  685 + " jmp 1b\n"
  686 + ".previous\n"
  687 + _ASM_EXTABLE(ftrace_test_p6nop, 2b)
  688 + _ASM_EXTABLE(ftrace_test_nop5, 3b)
  689 + : "=r"(faulted) : "0" (faulted));
  690 +
  691 + switch (faulted) {
  692 + case 0:
  693 + pr_info("converting mcount calls to 0f 1f 44 00 00\n");
  694 + memcpy(ideal_nop5, ftrace_test_p6nop, IDEAL_NOP_SIZE_5);
  695 + break;
  696 + case 1:
  697 + pr_info("converting mcount calls to 66 66 66 66 90\n");
  698 + memcpy(ideal_nop5, ftrace_test_nop5, IDEAL_NOP_SIZE_5);
  699 + break;
  700 + case 2:
  701 + pr_info("converting mcount calls to jmp . + 5\n");
  702 + memcpy(ideal_nop5, ftrace_test_jmp, IDEAL_NOP_SIZE_5);
  703 + break;
  704 + }
  705 +
  706 +}
  707 +#endif
arch/x86/kernel/ftrace.c
... ... @@ -257,14 +257,9 @@
257 257 return mod_code_status;
258 258 }
259 259  
260   -
261   -
262   -
263   -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
264   -
265 260 static unsigned char *ftrace_nop_replace(void)
266 261 {
267   - return ftrace_nop;
  262 + return ideal_nop5;
268 263 }
269 264  
270 265 static int
... ... @@ -338,62 +333,6 @@
338 333  
339 334 int __init ftrace_dyn_arch_init(void *data)
340 335 {
341   - extern const unsigned char ftrace_test_p6nop[];
342   - extern const unsigned char ftrace_test_nop5[];
343   - extern const unsigned char ftrace_test_jmp[];
344   - int faulted = 0;
345   -
346   - /*
347   - * There is no good nop for all x86 archs.
348   - * We will default to using the P6_NOP5, but first we
349   - * will test to make sure that the nop will actually
350   - * work on this CPU. If it faults, we will then
351   - * go to a lesser efficient 5 byte nop. If that fails
352   - * we then just use a jmp as our nop. This isn't the most
353   - * efficient nop, but we can not use a multi part nop
354   - * since we would then risk being preempted in the middle
355   - * of that nop, and if we enabled tracing then, it might
356   - * cause a system crash.
357   - *
358   - * TODO: check the cpuid to determine the best nop.
359   - */
360   - asm volatile (
361   - "ftrace_test_jmp:"
362   - "jmp ftrace_test_p6nop\n"
363   - "nop\n"
364   - "nop\n"
365   - "nop\n" /* 2 byte jmp + 3 bytes */
366   - "ftrace_test_p6nop:"
367   - P6_NOP5
368   - "jmp 1f\n"
369   - "ftrace_test_nop5:"
370   - ".byte 0x66,0x66,0x66,0x66,0x90\n"
371   - "1:"
372   - ".section .fixup, \"ax\"\n"
373   - "2: movl $1, %0\n"
374   - " jmp ftrace_test_nop5\n"
375   - "3: movl $2, %0\n"
376   - " jmp 1b\n"
377   - ".previous\n"
378   - _ASM_EXTABLE(ftrace_test_p6nop, 2b)
379   - _ASM_EXTABLE(ftrace_test_nop5, 3b)
380   - : "=r"(faulted) : "0" (faulted));
381   -
382   - switch (faulted) {
383   - case 0:
384   - pr_info("converting mcount calls to 0f 1f 44 00 00\n");
385   - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
386   - break;
387   - case 1:
388   - pr_info("converting mcount calls to 66 66 66 66 90\n");
389   - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
390   - break;
391   - case 2:
392   - pr_info("converting mcount calls to jmp . + 5\n");
393   - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
394   - break;
395   - }
396   -
397 336 /* The return code is retured via data */
398 337 *(unsigned long *)data = 0;
399 338  
arch/x86/kernel/jump_label.c
  1 +/*
  2 + * jump label x86 support
  3 + *
  4 + * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
  5 + *
  6 + */
  7 +#include <linux/jump_label.h>
  8 +#include <linux/memory.h>
  9 +#include <linux/uaccess.h>
  10 +#include <linux/module.h>
  11 +#include <linux/list.h>
  12 +#include <linux/jhash.h>
  13 +#include <linux/cpu.h>
  14 +#include <asm/kprobes.h>
  15 +#include <asm/alternative.h>
  16 +
  17 +#ifdef HAVE_JUMP_LABEL
  18 +
  19 +union jump_code_union {
  20 + char code[JUMP_LABEL_NOP_SIZE];
  21 + struct {
  22 + char jump;
  23 + int offset;
  24 + } __attribute__((packed));
  25 +};
  26 +
  27 +void arch_jump_label_transform(struct jump_entry *entry,
  28 + enum jump_label_type type)
  29 +{
  30 + union jump_code_union code;
  31 +
  32 + if (type == JUMP_LABEL_ENABLE) {
  33 + code.jump = 0xe9;
  34 + code.offset = entry->target -
  35 + (entry->code + JUMP_LABEL_NOP_SIZE);
  36 + } else
  37 + memcpy(&code, ideal_nop5, JUMP_LABEL_NOP_SIZE);
  38 + get_online_cpus();
  39 + mutex_lock(&text_mutex);
  40 + text_poke_smp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE);
  41 + mutex_unlock(&text_mutex);
  42 + put_online_cpus();
  43 +}
  44 +
  45 +void arch_jump_label_text_poke_early(jump_label_t addr)
  46 +{
  47 + text_poke_early((void *)addr, ideal_nop5, JUMP_LABEL_NOP_SIZE);
  48 +}
  49 +
  50 +#endif
arch/x86/kernel/kprobes.c
... ... @@ -1218,7 +1218,8 @@
1218 1218 }
1219 1219 /* Check whether the address range is reserved */
1220 1220 if (ftrace_text_reserved(src, src + len - 1) ||
1221   - alternatives_text_reserved(src, src + len - 1))
  1221 + alternatives_text_reserved(src, src + len - 1) ||
  1222 + jump_label_text_reserved(src, src + len - 1))
1222 1223 return -EBUSY;
1223 1224  
1224 1225 return len;
arch/x86/kernel/module.c
... ... @@ -239,6 +239,9 @@
239 239 apply_paravirt(pseg, pseg + para->sh_size);
240 240 }
241 241  
  242 + /* make jump label nops */
  243 + jump_label_apply_nops(me);
  244 +
242 245 return module_bug_finalize(hdr, sechdrs, me);
243 246 }
244 247  
arch/x86/kernel/setup.c
... ... @@ -112,6 +112,7 @@
112 112 #include <asm/numa_64.h>
113 113 #endif
114 114 #include <asm/mce.h>
  115 +#include <asm/alternative.h>
115 116  
116 117 /*
117 118 * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
... ... @@ -726,6 +727,7 @@
726 727 {
727 728 int acpi = 0;
728 729 int k8 = 0;
  730 + unsigned long flags;
729 731  
730 732 #ifdef CONFIG_X86_32
731 733 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
... ... @@ -1071,6 +1073,10 @@
1071 1073 x86_init.oem.banner();
1072 1074  
1073 1075 mcheck_init();
  1076 +
  1077 + local_irq_save(flags);
  1078 + arch_init_ideal_nop5();
  1079 + local_irq_restore(flags);
1074 1080 }
1075 1081  
1076 1082 #ifdef CONFIG_X86_32
include/asm-generic/vmlinux.lds.h
... ... @@ -220,6 +220,8 @@
220 220 \
221 221 BUG_TABLE \
222 222 \
  223 + JUMP_TABLE \
  224 + \
223 225 /* PCI quirks */ \
224 226 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
225 227 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
... ... @@ -562,6 +564,14 @@
562 564 #else
563 565 #define BUG_TABLE
564 566 #endif
  567 +
  568 +#define JUMP_TABLE \
  569 + . = ALIGN(8); \
  570 + __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \
  571 + VMLINUX_SYMBOL(__start___jump_table) = .; \
  572 + *(__jump_table) \
  573 + VMLINUX_SYMBOL(__stop___jump_table) = .; \
  574 + }
565 575  
566 576 #ifdef CONFIG_PM_TRACE
567 577 #define TRACEDATA \
include/linux/dynamic_debug.h
1 1 #ifndef _DYNAMIC_DEBUG_H
2 2 #define _DYNAMIC_DEBUG_H
3 3  
  4 +#include <linux/jump_label.h>
  5 +
4 6 /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which
5 7 * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
6 8 * use independent hash functions, to reduce the chance of false positives.
... ... @@ -22,8 +24,6 @@
22 24 const char *function;
23 25 const char *filename;
24 26 const char *format;
25   - char primary_hash;
26   - char secondary_hash;
27 27 unsigned int lineno:24;
28 28 /*
29 29 * The flags field controls the behaviour at the callsite.
... ... @@ -33,6 +33,7 @@
33 33 #define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */
34 34 #define _DPRINTK_FLAGS_DEFAULT 0
35 35 unsigned int flags:8;
  36 + char enabled;
36 37 } __attribute__((aligned(8)));
37 38  
38 39  
39 40  
40 41  
41 42  
42 43  
... ... @@ -42,33 +43,35 @@
42 43 #if defined(CONFIG_DYNAMIC_DEBUG)
43 44 extern int ddebug_remove_module(const char *mod_name);
44 45  
45   -#define __dynamic_dbg_enabled(dd) ({ \
46   - int __ret = 0; \
47   - if (unlikely((dynamic_debug_enabled & (1LL << DEBUG_HASH)) && \
48   - (dynamic_debug_enabled2 & (1LL << DEBUG_HASH2)))) \
49   - if (unlikely(dd.flags)) \
50   - __ret = 1; \
51   - __ret; })
52   -
53 46 #define dynamic_pr_debug(fmt, ...) do { \
  47 + __label__ do_printk; \
  48 + __label__ out; \
54 49 static struct _ddebug descriptor \
55 50 __used \
56 51 __attribute__((section("__verbose"), aligned(8))) = \
57   - { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \
58   - DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \
59   - if (__dynamic_dbg_enabled(descriptor)) \
60   - printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
  52 + { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \
  53 + _DPRINTK_FLAGS_DEFAULT }; \
  54 + JUMP_LABEL(&descriptor.enabled, do_printk); \
  55 + goto out; \
  56 +do_printk: \
  57 + printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
  58 +out: ; \
61 59 } while (0)
62 60  
63 61  
64 62 #define dynamic_dev_dbg(dev, fmt, ...) do { \
  63 + __label__ do_printk; \
  64 + __label__ out; \
65 65 static struct _ddebug descriptor \
66 66 __used \
67 67 __attribute__((section("__verbose"), aligned(8))) = \
68   - { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \
69   - DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \
70   - if (__dynamic_dbg_enabled(descriptor)) \
71   - dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
  68 + { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \
  69 + _DPRINTK_FLAGS_DEFAULT }; \
  70 + JUMP_LABEL(&descriptor.enabled, do_printk); \
  71 + goto out; \
  72 +do_printk: \
  73 + dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
  74 +out: ; \
72 75 } while (0)
73 76  
74 77 #else
include/linux/jump_label.h
  1 +#ifndef _LINUX_JUMP_LABEL_H
  2 +#define _LINUX_JUMP_LABEL_H
  3 +
  4 +#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_HAVE_ARCH_JUMP_LABEL)
  5 +# include <asm/jump_label.h>
  6 +# define HAVE_JUMP_LABEL
  7 +#endif
  8 +
  9 +enum jump_label_type {
  10 + JUMP_LABEL_ENABLE,
  11 + JUMP_LABEL_DISABLE
  12 +};
  13 +
  14 +struct module;
  15 +
  16 +#ifdef HAVE_JUMP_LABEL
  17 +
  18 +extern struct jump_entry __start___jump_table[];
  19 +extern struct jump_entry __stop___jump_table[];
  20 +
  21 +extern void arch_jump_label_transform(struct jump_entry *entry,
  22 + enum jump_label_type type);
  23 +extern void arch_jump_label_text_poke_early(jump_label_t addr);
  24 +extern void jump_label_update(unsigned long key, enum jump_label_type type);
  25 +extern void jump_label_apply_nops(struct module *mod);
  26 +extern int jump_label_text_reserved(void *start, void *end);
  27 +
  28 +#define enable_jump_label(key) \
  29 + jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE);
  30 +
  31 +#define disable_jump_label(key) \
  32 + jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE);
  33 +
  34 +#else
  35 +
  36 +#define JUMP_LABEL(key, label) \
  37 +do { \
  38 + if (unlikely(*key)) \
  39 + goto label; \
  40 +} while (0)
  41 +
  42 +#define enable_jump_label(cond_var) \
  43 +do { \
  44 + *(cond_var) = 1; \
  45 +} while (0)
  46 +
  47 +#define disable_jump_label(cond_var) \
  48 +do { \
  49 + *(cond_var) = 0; \
  50 +} while (0)
  51 +
  52 +static inline int jump_label_apply_nops(struct module *mod)
  53 +{
  54 + return 0;
  55 +}
  56 +
  57 +static inline int jump_label_text_reserved(void *start, void *end)
  58 +{
  59 + return 0;
  60 +}
  61 +
  62 +#endif
  63 +
  64 +#endif
include/linux/module.h
... ... @@ -350,7 +350,10 @@
350 350 struct tracepoint *tracepoints;
351 351 unsigned int num_tracepoints;
352 352 #endif
353   -
  353 +#ifdef HAVE_JUMP_LABEL
  354 + struct jump_entry *jump_entries;
  355 + unsigned int num_jump_entries;
  356 +#endif
354 357 #ifdef CONFIG_TRACING
355 358 const char **trace_bprintk_fmt_start;
356 359 unsigned int num_trace_bprintk_fmt;
include/linux/tracepoint.h
... ... @@ -17,6 +17,7 @@
17 17 #include <linux/errno.h>
18 18 #include <linux/types.h>
19 19 #include <linux/rcupdate.h>
  20 +#include <linux/jump_label.h>
20 21  
21 22 struct module;
22 23 struct tracepoint;
... ... @@ -145,7 +146,9 @@
145 146 extern struct tracepoint __tracepoint_##name; \
146 147 static inline void trace_##name(proto) \
147 148 { \
148   - if (unlikely(__tracepoint_##name.state)) \
  149 + JUMP_LABEL(&__tracepoint_##name.state, do_trace); \
  150 + return; \
  151 +do_trace: \
149 152 __DO_TRACE(&__tracepoint_##name, \
150 153 TP_PROTO(data_proto), \
151 154 TP_ARGS(data_args)); \
... ... @@ -10,7 +10,7 @@
10 10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
11 11 hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
12 12 notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
13   - async.o range.o
  13 + async.o range.o jump_label.o
14 14 obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o
15 15 obj-y += groups.o
16 16  
  1 +/*
  2 + * jump label support
  3 + *
  4 + * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
  5 + *
  6 + */
  7 +#include <linux/jump_label.h>
  8 +#include <linux/memory.h>
  9 +#include <linux/uaccess.h>
  10 +#include <linux/module.h>
  11 +#include <linux/list.h>
  12 +#include <linux/jhash.h>
  13 +#include <linux/slab.h>
  14 +#include <linux/sort.h>
  15 +#include <linux/err.h>
  16 +
  17 +#ifdef HAVE_JUMP_LABEL
  18 +
  19 +#define JUMP_LABEL_HASH_BITS 6
  20 +#define JUMP_LABEL_TABLE_SIZE (1 << JUMP_LABEL_HASH_BITS)
  21 +static struct hlist_head jump_label_table[JUMP_LABEL_TABLE_SIZE];
  22 +
  23 +/* mutex to protect coming/going of the the jump_label table */
  24 +static DEFINE_MUTEX(jump_label_mutex);
  25 +
  26 +struct jump_label_entry {
  27 + struct hlist_node hlist;
  28 + struct jump_entry *table;
  29 + int nr_entries;
  30 + /* hang modules off here */
  31 + struct hlist_head modules;
  32 + unsigned long key;
  33 +};
  34 +
  35 +struct jump_label_module_entry {
  36 + struct hlist_node hlist;
  37 + struct jump_entry *table;
  38 + int nr_entries;
  39 + struct module *mod;
  40 +};
  41 +
  42 +static int jump_label_cmp(const void *a, const void *b)
  43 +{
  44 + const struct jump_entry *jea = a;
  45 + const struct jump_entry *jeb = b;
  46 +
  47 + if (jea->key < jeb->key)
  48 + return -1;
  49 +
  50 + if (jea->key > jeb->key)
  51 + return 1;
  52 +
  53 + return 0;
  54 +}
  55 +
  56 +static void
  57 +sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop)
  58 +{
  59 + unsigned long size;
  60 +
  61 + size = (((unsigned long)stop - (unsigned long)start)
  62 + / sizeof(struct jump_entry));
  63 + sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
  64 +}
  65 +
  66 +static struct jump_label_entry *get_jump_label_entry(jump_label_t key)
  67 +{
  68 + struct hlist_head *head;
  69 + struct hlist_node *node;
  70 + struct jump_label_entry *e;
  71 + u32 hash = jhash((void *)&key, sizeof(jump_label_t), 0);
  72 +
  73 + head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)];
  74 + hlist_for_each_entry(e, node, head, hlist) {
  75 + if (key == e->key)
  76 + return e;
  77 + }
  78 + return NULL;
  79 +}
  80 +
  81 +static struct jump_label_entry *
  82 +add_jump_label_entry(jump_label_t key, int nr_entries, struct jump_entry *table)
  83 +{
  84 + struct hlist_head *head;
  85 + struct jump_label_entry *e;
  86 + u32 hash;
  87 +
  88 + e = get_jump_label_entry(key);
  89 + if (e)
  90 + return ERR_PTR(-EEXIST);
  91 +
  92 + e = kmalloc(sizeof(struct jump_label_entry), GFP_KERNEL);
  93 + if (!e)
  94 + return ERR_PTR(-ENOMEM);
  95 +
  96 + hash = jhash((void *)&key, sizeof(jump_label_t), 0);
  97 + head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)];
  98 + e->key = key;
  99 + e->table = table;
  100 + e->nr_entries = nr_entries;
  101 + INIT_HLIST_HEAD(&(e->modules));
  102 + hlist_add_head(&e->hlist, head);
  103 + return e;
  104 +}
  105 +
  106 +static int
  107 +build_jump_label_hashtable(struct jump_entry *start, struct jump_entry *stop)
  108 +{
  109 + struct jump_entry *iter, *iter_begin;
  110 + struct jump_label_entry *entry;
  111 + int count;
  112 +
  113 + sort_jump_label_entries(start, stop);
  114 + iter = start;
  115 + while (iter < stop) {
  116 + entry = get_jump_label_entry(iter->key);
  117 + if (!entry) {
  118 + iter_begin = iter;
  119 + count = 0;
  120 + while ((iter < stop) &&
  121 + (iter->key == iter_begin->key)) {
  122 + iter++;
  123 + count++;
  124 + }
  125 + entry = add_jump_label_entry(iter_begin->key,
  126 + count, iter_begin);
  127 + if (IS_ERR(entry))
  128 + return PTR_ERR(entry);
  129 + } else {
  130 + WARN_ONCE(1, KERN_ERR "build_jump_hashtable: unexpected entry!\n");
  131 + return -1;
  132 + }
  133 + }
  134 + return 0;
  135 +}
  136 +
  137 +/***
  138 + * jump_label_update - update jump label text
  139 + * @key - key value associated with a a jump label
  140 + * @type - enum set to JUMP_LABEL_ENABLE or JUMP_LABEL_DISABLE
  141 + *
  142 + * Will enable/disable the jump for jump label @key, depending on the
  143 + * value of @type.
  144 + *
  145 + */
  146 +
  147 +void jump_label_update(unsigned long key, enum jump_label_type type)
  148 +{
  149 + struct jump_entry *iter;
  150 + struct jump_label_entry *entry;
  151 + struct hlist_node *module_node;
  152 + struct jump_label_module_entry *e_module;
  153 + int count;
  154 +
  155 + mutex_lock(&jump_label_mutex);
  156 + entry = get_jump_label_entry((jump_label_t)key);
  157 + if (entry) {
  158 + count = entry->nr_entries;
  159 + iter = entry->table;
  160 + while (count--) {
  161 + if (kernel_text_address(iter->code))
  162 + arch_jump_label_transform(iter, type);
  163 + iter++;
  164 + }
  165 + /* eanble/disable jump labels in modules */
  166 + hlist_for_each_entry(e_module, module_node, &(entry->modules),
  167 + hlist) {
  168 + count = e_module->nr_entries;
  169 + iter = e_module->table;
  170 + while (count--) {
  171 + if (kernel_text_address(iter->code))
  172 + arch_jump_label_transform(iter, type);
  173 + iter++;
  174 + }
  175 + }
  176 + }
  177 + mutex_unlock(&jump_label_mutex);
  178 +}
  179 +
  180 +static int addr_conflict(struct jump_entry *entry, void *start, void *end)
  181 +{
  182 + if (entry->code <= (unsigned long)end &&
  183 + entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
  184 + return 1;
  185 +
  186 + return 0;
  187 +}
  188 +
  189 +#ifdef CONFIG_MODULES
  190 +
  191 +static int module_conflict(void *start, void *end)
  192 +{
  193 + struct hlist_head *head;
  194 + struct hlist_node *node, *node_next, *module_node, *module_node_next;
  195 + struct jump_label_entry *e;
  196 + struct jump_label_module_entry *e_module;
  197 + struct jump_entry *iter;
  198 + int i, count;
  199 + int conflict = 0;
  200 +
  201 + for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) {
  202 + head = &jump_label_table[i];
  203 + hlist_for_each_entry_safe(e, node, node_next, head, hlist) {
  204 + hlist_for_each_entry_safe(e_module, module_node,
  205 + module_node_next,
  206 + &(e->modules), hlist) {
  207 + count = e_module->nr_entries;
  208 + iter = e_module->table;
  209 + while (count--) {
  210 + if (addr_conflict(iter, start, end)) {
  211 + conflict = 1;
  212 + goto out;
  213 + }
  214 + iter++;
  215 + }
  216 + }
  217 + }
  218 + }
  219 +out:
  220 + return conflict;
  221 +}
  222 +
  223 +#endif
  224 +
  225 +/***
  226 + * jump_label_text_reserved - check if addr range is reserved
  227 + * @start: start text addr
  228 + * @end: end text addr
  229 + *
  230 + * checks if the text addr located between @start and @end
  231 + * overlaps with any of the jump label patch addresses. Code
  232 + * that wants to modify kernel text should first verify that
  233 + * it does not overlap with any of the jump label addresses.
  234 + *
  235 + * returns 1 if there is an overlap, 0 otherwise
  236 + */
  237 +int jump_label_text_reserved(void *start, void *end)
  238 +{
  239 + struct jump_entry *iter;
  240 + struct jump_entry *iter_start = __start___jump_table;
  241 + struct jump_entry *iter_stop = __start___jump_table;
  242 + int conflict = 0;
  243 +
  244 + mutex_lock(&jump_label_mutex);
  245 + iter = iter_start;
  246 + while (iter < iter_stop) {
  247 + if (addr_conflict(iter, start, end)) {
  248 + conflict = 1;
  249 + goto out;
  250 + }
  251 + iter++;
  252 + }
  253 +
  254 + /* now check modules */
  255 +#ifdef CONFIG_MODULES
  256 + conflict = module_conflict(start, end);
  257 +#endif
  258 +out:
  259 + mutex_unlock(&jump_label_mutex);
  260 + return conflict;
  261 +}
  262 +
  263 +static __init int init_jump_label(void)
  264 +{
  265 + int ret;
  266 + struct jump_entry *iter_start = __start___jump_table;
  267 + struct jump_entry *iter_stop = __stop___jump_table;
  268 + struct jump_entry *iter;
  269 +
  270 + mutex_lock(&jump_label_mutex);
  271 + ret = build_jump_label_hashtable(__start___jump_table,
  272 + __stop___jump_table);
  273 + iter = iter_start;
  274 + while (iter < iter_stop) {
  275 + arch_jump_label_text_poke_early(iter->code);
  276 + iter++;
  277 + }
  278 + mutex_unlock(&jump_label_mutex);
  279 + return ret;
  280 +}
  281 +early_initcall(init_jump_label);
  282 +
  283 +#ifdef CONFIG_MODULES
  284 +
  285 +static struct jump_label_module_entry *
  286 +add_jump_label_module_entry(struct jump_label_entry *entry,
  287 + struct jump_entry *iter_begin,
  288 + int count, struct module *mod)
  289 +{
  290 + struct jump_label_module_entry *e;
  291 +
  292 + e = kmalloc(sizeof(struct jump_label_module_entry), GFP_KERNEL);
  293 + if (!e)
  294 + return ERR_PTR(-ENOMEM);
  295 + e->mod = mod;
  296 + e->nr_entries = count;
  297 + e->table = iter_begin;
  298 + hlist_add_head(&e->hlist, &entry->modules);
  299 + return e;
  300 +}
  301 +
  302 +static int add_jump_label_module(struct module *mod)
  303 +{
  304 + struct jump_entry *iter, *iter_begin;
  305 + struct jump_label_entry *entry;
  306 + struct jump_label_module_entry *module_entry;
  307 + int count;
  308 +
  309 + /* if the module doesn't have jump label entries, just return */
  310 + if (!mod->num_jump_entries)
  311 + return 0;
  312 +
  313 + sort_jump_label_entries(mod->jump_entries,
  314 + mod->jump_entries + mod->num_jump_entries);
  315 + iter = mod->jump_entries;
  316 + while (iter < mod->jump_entries + mod->num_jump_entries) {
  317 + entry = get_jump_label_entry(iter->key);
  318 + iter_begin = iter;
  319 + count = 0;
  320 + while ((iter < mod->jump_entries + mod->num_jump_entries) &&
  321 + (iter->key == iter_begin->key)) {
  322 + iter++;
  323 + count++;
  324 + }
  325 + if (!entry) {
  326 + entry = add_jump_label_entry(iter_begin->key, 0, NULL);
  327 + if (IS_ERR(entry))
  328 + return PTR_ERR(entry);
  329 + }
  330 + module_entry = add_jump_label_module_entry(entry, iter_begin,
  331 + count, mod);
  332 + if (IS_ERR(module_entry))
  333 + return PTR_ERR(module_entry);
  334 + }
  335 + return 0;
  336 +}
  337 +
  338 +static void remove_jump_label_module(struct module *mod)
  339 +{
  340 + struct hlist_head *head;
  341 + struct hlist_node *node, *node_next, *module_node, *module_node_next;
  342 + struct jump_label_entry *e;
  343 + struct jump_label_module_entry *e_module;
  344 + int i;
  345 +
  346 + /* if the module doesn't have jump label entries, just return */
  347 + if (!mod->num_jump_entries)
  348 + return;
  349 +
  350 + for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) {
  351 + head = &jump_label_table[i];
  352 + hlist_for_each_entry_safe(e, node, node_next, head, hlist) {
  353 + hlist_for_each_entry_safe(e_module, module_node,
  354 + module_node_next,
  355 + &(e->modules), hlist) {
  356 + if (e_module->mod == mod) {
  357 + hlist_del(&e_module->hlist);
  358 + kfree(e_module);
  359 + }
  360 + }
  361 + if (hlist_empty(&e->modules) && (e->nr_entries == 0)) {
  362 + hlist_del(&e->hlist);
  363 + kfree(e);
  364 + }
  365 + }
  366 + }
  367 +}
  368 +
  369 +static int
  370 +jump_label_module_notify(struct notifier_block *self, unsigned long val,
  371 + void *data)
  372 +{
  373 + struct module *mod = data;
  374 + int ret = 0;
  375 +
  376 + switch (val) {
  377 + case MODULE_STATE_COMING:
  378 + mutex_lock(&jump_label_mutex);
  379 + ret = add_jump_label_module(mod);
  380 + if (ret)
  381 + remove_jump_label_module(mod);
  382 + mutex_unlock(&jump_label_mutex);
  383 + break;
  384 + case MODULE_STATE_GOING:
  385 + mutex_lock(&jump_label_mutex);
  386 + remove_jump_label_module(mod);
  387 + mutex_unlock(&jump_label_mutex);
  388 + break;
  389 + }
  390 + return ret;
  391 +}
  392 +
  393 +/***
  394 + * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
  395 + * @mod: module to patch
  396 + *
  397 + * Allow for run-time selection of the optimal nops. Before the module
  398 + * loads patch these with arch_get_jump_label_nop(), which is specified by
  399 + * the arch specific jump label code.
  400 + */
  401 +void jump_label_apply_nops(struct module *mod)
  402 +{
  403 + struct jump_entry *iter;
  404 +
  405 + /* if the module doesn't have jump label entries, just return */
  406 + if (!mod->num_jump_entries)
  407 + return;
  408 +
  409 + iter = mod->jump_entries;
  410 + while (iter < mod->jump_entries + mod->num_jump_entries) {
  411 + arch_jump_label_text_poke_early(iter->code);
  412 + iter++;
  413 + }
  414 +}
  415 +
  416 +struct notifier_block jump_label_module_nb = {
  417 + .notifier_call = jump_label_module_notify,
  418 + .priority = 0,
  419 +};
  420 +
  421 +static __init int init_jump_label_module(void)
  422 +{
  423 + return register_module_notifier(&jump_label_module_nb);
  424 +}
  425 +early_initcall(init_jump_label_module);
  426 +
  427 +#endif /* CONFIG_MODULES */
  428 +
  429 +#endif
... ... @@ -47,6 +47,7 @@
47 47 #include <linux/memory.h>
48 48 #include <linux/ftrace.h>
49 49 #include <linux/cpu.h>
  50 +#include <linux/jump_label.h>
50 51  
51 52 #include <asm-generic/sections.h>
52 53 #include <asm/cacheflush.h>
... ... @@ -1146,7 +1147,8 @@
1146 1147 preempt_disable();
1147 1148 if (!kernel_text_address((unsigned long) p->addr) ||
1148 1149 in_kprobes_functions((unsigned long) p->addr) ||
1149   - ftrace_text_reserved(p->addr, p->addr)) {
  1150 + ftrace_text_reserved(p->addr, p->addr) ||
  1151 + jump_label_text_reserved(p->addr, p->addr)) {
1150 1152 preempt_enable();
1151 1153 return -EINVAL;
1152 1154 }
... ... @@ -55,6 +55,7 @@
55 55 #include <linux/async.h>
56 56 #include <linux/percpu.h>
57 57 #include <linux/kmemleak.h>
  58 +#include <linux/jump_label.h>
58 59  
59 60 #define CREATE_TRACE_POINTS
60 61 #include <trace/events/module.h>
... ... @@ -2307,6 +2308,11 @@
2307 2308 mod->tracepoints = section_objs(info, "__tracepoints",
2308 2309 sizeof(*mod->tracepoints),
2309 2310 &mod->num_tracepoints);
  2311 +#endif
  2312 +#ifdef HAVE_JUMP_LABEL
  2313 + mod->jump_entries = section_objs(info, "__jump_table",
  2314 + sizeof(*mod->jump_entries),
  2315 + &mod->num_jump_entries);
2310 2316 #endif
2311 2317 #ifdef CONFIG_EVENT_TRACING
2312 2318 mod->trace_events = section_objs(info, "_ftrace_events",
kernel/trace/trace_workqueue.c
... ... @@ -263,6 +263,11 @@
263 263 {
264 264 int ret, cpu;
265 265  
  266 + for_each_possible_cpu(cpu) {
  267 + spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
  268 + INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
  269 + }
  270 +
266 271 ret = register_trace_workqueue_insertion(probe_workqueue_insertion, NULL);
267 272 if (ret)
268 273 goto out;
... ... @@ -278,11 +283,6 @@
278 283 ret = register_trace_workqueue_destruction(probe_workqueue_destruction, NULL);
279 284 if (ret)
280 285 goto no_creation;
281   -
282   - for_each_possible_cpu(cpu) {
283   - spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
284   - INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
285   - }
286 286  
287 287 return 0;
288 288  
... ... @@ -25,6 +25,7 @@
25 25 #include <linux/err.h>
26 26 #include <linux/slab.h>
27 27 #include <linux/sched.h>
  28 +#include <linux/jump_label.h>
28 29  
29 30 extern struct tracepoint __start___tracepoints[];
30 31 extern struct tracepoint __stop___tracepoints[];
... ... @@ -263,7 +264,13 @@
263 264 * is used.
264 265 */
265 266 rcu_assign_pointer(elem->funcs, (*entry)->funcs);
266   - elem->state = active;
  267 + if (!elem->state && active) {
  268 + enable_jump_label(&elem->state);
  269 + elem->state = active;
  270 + } else if (elem->state && !active) {
  271 + disable_jump_label(&elem->state);
  272 + elem->state = active;
  273 + }
267 274 }
268 275  
269 276 /*
... ... @@ -277,7 +284,10 @@
277 284 if (elem->unregfunc && elem->state)
278 285 elem->unregfunc();
279 286  
280   - elem->state = 0;
  287 + if (elem->state) {
  288 + disable_jump_label(&elem->state);
  289 + elem->state = 0;
  290 + }
281 291 rcu_assign_pointer(elem->funcs, NULL);
282 292 }
283 293  
... ... @@ -26,19 +26,11 @@
26 26 #include <linux/dynamic_debug.h>
27 27 #include <linux/debugfs.h>
28 28 #include <linux/slab.h>
  29 +#include <linux/jump_label.h>
29 30  
30 31 extern struct _ddebug __start___verbose[];
31 32 extern struct _ddebug __stop___verbose[];
32 33  
33   -/* dynamic_debug_enabled, and dynamic_debug_enabled2 are bitmasks in which
34   - * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
35   - * use independent hash functions, to reduce the chance of false positives.
36   - */
37   -long long dynamic_debug_enabled;
38   -EXPORT_SYMBOL_GPL(dynamic_debug_enabled);
39   -long long dynamic_debug_enabled2;
40   -EXPORT_SYMBOL_GPL(dynamic_debug_enabled2);
41   -
42 34 struct ddebug_table {
43 35 struct list_head link;
44 36 char *mod_name;
... ... @@ -88,26 +80,6 @@
88 80 }
89 81  
90 82 /*
91   - * must be called with ddebug_lock held
92   - */
93   -
94   -static int disabled_hash(char hash, bool first_table)
95   -{
96   - struct ddebug_table *dt;
97   - char table_hash_value;
98   -
99   - list_for_each_entry(dt, &ddebug_tables, link) {
100   - if (first_table)
101   - table_hash_value = dt->ddebugs->primary_hash;
102   - else
103   - table_hash_value = dt->ddebugs->secondary_hash;
104   - if (dt->num_enabled && (hash == table_hash_value))
105   - return 0;
106   - }
107   - return 1;
108   -}
109   -
110   -/*
111 83 * Search the tables for _ddebug's which match the given
112 84 * `query' and apply the `flags' and `mask' to them. Tells
113 85 * the user which ddebug's were changed, or whether none
114 86  
... ... @@ -170,17 +142,9 @@
170 142 dt->num_enabled++;
171 143 dp->flags = newflags;
172 144 if (newflags) {
173   - dynamic_debug_enabled |=
174   - (1LL << dp->primary_hash);
175   - dynamic_debug_enabled2 |=
176   - (1LL << dp->secondary_hash);
  145 + enable_jump_label(&dp->enabled);
177 146 } else {
178   - if (disabled_hash(dp->primary_hash, true))
179   - dynamic_debug_enabled &=
180   - ~(1LL << dp->primary_hash);
181   - if (disabled_hash(dp->secondary_hash, false))
182   - dynamic_debug_enabled2 &=
183   - ~(1LL << dp->secondary_hash);
  147 + disable_jump_label(&dp->enabled);
184 148 }
185 149 if (verbose)
186 150 printk(KERN_INFO
scripts/Makefile.lib
... ... @@ -101,14 +101,6 @@
101 101 modname_flags = $(if $(filter 1,$(words $(modname))),\
102 102 -D"KBUILD_MODNAME=KBUILD_STR($(call name-fix,$(modname)))")
103 103  
104   -#hash values
105   -ifdef CONFIG_DYNAMIC_DEBUG
106   -debug_flags = -D"DEBUG_HASH=$(shell ./scripts/basic/hash djb2 $(@D)$(modname))"\
107   - -D"DEBUG_HASH2=$(shell ./scripts/basic/hash r5 $(@D)$(modname))"
108   -else
109   -debug_flags =
110   -endif
111   -
112 104 orig_c_flags = $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(KBUILD_SUBDIR_CCFLAGS) \
113 105 $(ccflags-y) $(CFLAGS_$(basetarget).o)
114 106 _c_flags = $(filter-out $(CFLAGS_REMOVE_$(basetarget).o), $(orig_c_flags))
... ... @@ -152,8 +144,7 @@
152 144  
153 145 c_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
154 146 $(__c_flags) $(modkern_cflags) \
155   - -D"KBUILD_STR(s)=\#s" $(basename_flags) $(modname_flags) \
156   - $(debug_flags)
  147 + -D"KBUILD_STR(s)=\#s" $(basename_flags) $(modname_flags)
157 148  
158 149 a_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
159 150 $(__a_flags) $(modkern_aflags)
scripts/basic/Makefile
... ... @@ -9,7 +9,7 @@
9 9 # fixdep: Used to generate dependency information during build process
10 10 # docproc: Used in Documentation/DocBook
11 11  
12   -hostprogs-y := fixdep docproc hash
  12 +hostprogs-y := fixdep docproc
13 13 always := $(hostprogs-y)
14 14  
15 15 # fixdep is needed to compile other host programs
scripts/basic/hash.c
1   -/*
2   - * Copyright (C) 2008 Red Hat, Inc., Jason Baron <jbaron@redhat.com>
3   - *
4   - */
5   -
6   -#include <stdio.h>
7   -#include <stdlib.h>
8   -#include <string.h>
9   -
10   -#define DYNAMIC_DEBUG_HASH_BITS 6
11   -
12   -static const char *program;
13   -
14   -static void usage(void)
15   -{
16   - printf("Usage: %s <djb2|r5> <modname>\n", program);
17   - exit(1);
18   -}
19   -
20   -/* djb2 hashing algorithm by Dan Bernstein. From:
21   - * http://www.cse.yorku.ca/~oz/hash.html
22   - */
23   -
24   -static unsigned int djb2_hash(char *str)
25   -{
26   - unsigned long hash = 5381;
27   - int c;
28   -
29   - c = *str;
30   - while (c) {
31   - hash = ((hash << 5) + hash) + c;
32   - c = *++str;
33   - }
34   - return (unsigned int)(hash & ((1 << DYNAMIC_DEBUG_HASH_BITS) - 1));
35   -}
36   -
37   -static unsigned int r5_hash(char *str)
38   -{
39   - unsigned long hash = 0;
40   - int c;
41   -
42   - c = *str;
43   - while (c) {
44   - hash = (hash + (c << 4) + (c >> 4)) * 11;
45   - c = *++str;
46   - }
47   - return (unsigned int)(hash & ((1 << DYNAMIC_DEBUG_HASH_BITS) - 1));
48   -}
49   -
50   -int main(int argc, char *argv[])
51   -{
52   - program = argv[0];
53   -
54   - if (argc != 3)
55   - usage();
56   - if (!strcmp(argv[1], "djb2"))
57   - printf("%d\n", djb2_hash(argv[2]));
58   - else if (!strcmp(argv[1], "r5"))
59   - printf("%d\n", r5_hash(argv[2]));
60   - else
61   - usage();
62   - exit(0);
63   -}
  1 +#!/bin/sh
  2 +# Test for gcc 'asm goto' suport
  3 +# Copyright (C) 2010, Jason Baron <jbaron@redhat.com>
  4 +
  5 +echo "int main(void) { entry: asm goto (\"\"::::entry); return 0; }" | $1 -x c - -c -o /dev/null >/dev/null 2>&1 && echo "y"