Commit 39c715b71740c4a78ba4769fb54826929bac03cb

Authored by Ingo Molnar
Committed by Linus Torvalds
1 parent 84929801e1

[PATCH] smp_processor_id() cleanup

This patch implements a number of smp_processor_id() cleanup ideas that
Arjan van de Ven and I came up with.

The previous __smp_processor_id/_smp_processor_id/smp_processor_id API
spaghetti was hard to follow both on the implementational and on the
usage side.

Some of the complexity arose from picking wrong names, some of the
complexity comes from the fact that not all architectures defined
__smp_processor_id.

In the new code, there are two externally visible symbols:

 - smp_processor_id(): debug variant.

 - raw_smp_processor_id(): nondebug variant. Replaces all existing
   uses of _smp_processor_id() and __smp_processor_id(). Defined
   by every SMP architecture in include/asm-*/smp.h.

There is one new internal symbol, dependent on DEBUG_PREEMPT:

 - debug_smp_processor_id(): internal debug variant, mapped to
                             smp_processor_id().

Also, i moved debug_smp_processor_id() from lib/kernel_lock.c into a new
lib/smp_processor_id.c file.  All related comments got updated and/or
clarified.

I have build/boot tested the following 8 .config combinations on x86:

 {SMP,UP} x {PREEMPT,!PREEMPT} x {DEBUG_PREEMPT,!DEBUG_PREEMPT}

I have also build/boot tested x64 on UP/PREEMPT/DEBUG_PREEMPT.  (Other
architectures are untested, but should work just fine.)

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@infradead.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 37 changed files with 119 additions and 125 deletions Side-by-side Diff

arch/i386/kernel/traps.c
... ... @@ -306,7 +306,7 @@
306 306 };
307 307 static int die_counter;
308 308  
309   - if (die.lock_owner != _smp_processor_id()) {
  309 + if (die.lock_owner != raw_smp_processor_id()) {
310 310 console_verbose();
311 311 spin_lock_irq(&die.lock);
312 312 die.lock_owner = smp_processor_id();
arch/i386/lib/delay.c
... ... @@ -34,7 +34,7 @@
34 34 xloops *= 4;
35 35 __asm__("mull %0"
36 36 :"=d" (xloops), "=&a" (d0)
37   - :"1" (xloops),"0" (cpu_data[_smp_processor_id()].loops_per_jiffy * (HZ/4)));
  37 + :"1" (xloops),"0" (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4)));
38 38 __delay(++xloops);
39 39 }
40 40  
arch/ppc/lib/locks.c
... ... @@ -130,7 +130,7 @@
130 130 while (!read_can_lock(rw)) {
131 131 if (--stuck == 0) {
132 132 printk("_read_lock(%p) CPU#%d lock %d\n",
133   - rw, _smp_processor_id(), rw->lock);
  133 + rw, raw_smp_processor_id(), rw->lock);
134 134 stuck = INIT_STUCK;
135 135 }
136 136 }
... ... @@ -158,7 +158,7 @@
158 158 while (!write_can_lock(rw)) {
159 159 if (--stuck == 0) {
160 160 printk("write_lock(%p) CPU#%d lock %d)\n",
161   - rw, _smp_processor_id(), rw->lock);
  161 + rw, raw_smp_processor_id(), rw->lock);
162 162 stuck = INIT_STUCK;
163 163 }
164 164 }
arch/ppc64/kernel/idle.c
... ... @@ -292,7 +292,7 @@
292 292 if (need_resched())
293 293 schedule();
294 294  
295   - if (cpu_is_offline(_smp_processor_id()) &&
  295 + if (cpu_is_offline(raw_smp_processor_id()) &&
296 296 system_state == SYSTEM_RUNNING)
297 297 cpu_die();
298 298 }
... ... @@ -24,7 +24,7 @@
24 24 __asm__("dmulu.l %0, %2\n\t"
25 25 "sts mach, %0"
26 26 : "=r" (xloops)
27   - : "0" (xloops), "r" (cpu_data[_smp_processor_id()].loops_per_jiffy)
  27 + : "0" (xloops), "r" (cpu_data[raw_smp_processor_id()].loops_per_jiffy)
28 28 : "macl", "mach");
29 29 __delay(xloops * HZ);
30 30 }
arch/sparc64/lib/delay.c
... ... @@ -31,7 +31,7 @@
31 31 {
32 32 n *= 4;
33 33  
34   - n *= (cpu_data(_smp_processor_id()).udelay_val * (HZ/4));
  34 + n *= (cpu_data(raw_smp_processor_id()).udelay_val * (HZ/4));
35 35 n >>= 32;
36 36  
37 37 __delay(n + 1);
arch/x86_64/lib/delay.c
... ... @@ -34,7 +34,7 @@
34 34  
35 35 inline void __const_udelay(unsigned long xloops)
36 36 {
37   - __delay(((xloops * cpu_data[_smp_processor_id()].loops_per_jiffy) >> 32) * HZ);
  37 + __delay(((xloops * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32) * HZ);
38 38 }
39 39  
40 40 void __udelay(unsigned long usecs)
drivers/acpi/processor_idle.c
... ... @@ -171,7 +171,7 @@
171 171 int sleep_ticks = 0;
172 172 u32 t1, t2 = 0;
173 173  
174   - pr = processors[_smp_processor_id()];
  174 + pr = processors[raw_smp_processor_id()];
175 175 if (!pr)
176 176 return;
177 177  
drivers/input/gameport/gameport.c
... ... @@ -134,7 +134,7 @@
134 134 }
135 135  
136 136 gameport_close(gameport);
137   - return (cpu_data[_smp_processor_id()].loops_per_jiffy * (unsigned long)HZ / (1000 / 50)) / (tx < 1 ? 1 : tx);
  137 + return (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (unsigned long)HZ / (1000 / 50)) / (tx < 1 ? 1 : tx);
138 138  
139 139 #else
140 140  
drivers/oprofile/buffer_sync.c
... ... @@ -62,7 +62,7 @@
62 62 /* To avoid latency problems, we only process the current CPU,
63 63 * hoping that most samples for the task are on this CPU
64 64 */
65   - sync_buffer(_smp_processor_id());
  65 + sync_buffer(raw_smp_processor_id());
66 66 return 0;
67 67 }
68 68  
... ... @@ -86,7 +86,7 @@
86 86 /* To avoid latency problems, we only process the current CPU,
87 87 * hoping that most samples for the task are on this CPU
88 88 */
89   - sync_buffer(_smp_processor_id());
  89 + sync_buffer(raw_smp_processor_id());
90 90 return 0;
91 91 }
92 92  
fs/xfs/linux-2.6/xfs_linux.h
... ... @@ -145,10 +145,10 @@
145 145 #define xfs_inherit_nosymlinks xfs_params.inherit_nosym.val
146 146 #define xfs_rotorstep xfs_params.rotorstep.val
147 147  
148   -#ifndef __smp_processor_id
149   -#define __smp_processor_id() smp_processor_id()
  148 +#ifndef raw_smp_processor_id
  149 +#define raw_smp_processor_id() smp_processor_id()
150 150 #endif
151   -#define current_cpu() __smp_processor_id()
  151 +#define current_cpu() raw_smp_processor_id()
152 152 #define current_pid() (current->pid)
153 153 #define current_fsuid(cred) (current->fsuid)
154 154 #define current_fsgid(cred) (current->fsgid)
include/asm-alpha/smp.h
... ... @@ -43,7 +43,7 @@
43 43 #define PROC_CHANGE_PENALTY 20
44 44  
45 45 #define hard_smp_processor_id() __hard_smp_processor_id()
46   -#define smp_processor_id() (current_thread_info()->cpu)
  46 +#define raw_smp_processor_id() (current_thread_info()->cpu)
47 47  
48 48 extern cpumask_t cpu_present_mask;
49 49 extern cpumask_t cpu_online_map;
include/asm-arm/smp.h
... ... @@ -21,7 +21,7 @@
21 21 # error "<asm-arm/smp.h> included in non-SMP build"
22 22 #endif
23 23  
24   -#define smp_processor_id() (current_thread_info()->cpu)
  24 +#define raw_smp_processor_id() (current_thread_info()->cpu)
25 25  
26 26 extern cpumask_t cpu_present_mask;
27 27 #define cpu_possible_map cpu_present_mask
include/asm-i386/smp.h
... ... @@ -51,7 +51,7 @@
51 51 * from the initial startup. We map APIC_BASE very early in page_setup(),
52 52 * so this is correct in the x86 case.
53 53 */
54   -#define __smp_processor_id() (current_thread_info()->cpu)
  54 +#define raw_smp_processor_id() (current_thread_info()->cpu)
55 55  
56 56 extern cpumask_t cpu_callout_map;
57 57 extern cpumask_t cpu_callin_map;
include/asm-ia64/smp.h
... ... @@ -46,7 +46,7 @@
46 46 #define SMP_IRQ_REDIRECTION (1 << 0)
47 47 #define SMP_IPI_REDIRECTION (1 << 1)
48 48  
49   -#define smp_processor_id() (current_thread_info()->cpu)
  49 +#define raw_smp_processor_id() (current_thread_info()->cpu)
50 50  
51 51 extern struct smp_boot_data {
52 52 int cpu_count;
include/asm-m32r/smp.h
... ... @@ -66,7 +66,7 @@
66 66 #define physid_to_cpu(physid) physid_2_cpu[physid]
67 67 #define cpu_to_physid(cpu_id) cpu_2_physid[cpu_id]
68 68  
69   -#define smp_processor_id() (current_thread_info()->cpu)
  69 +#define raw_smp_processor_id() (current_thread_info()->cpu)
70 70  
71 71 extern cpumask_t cpu_callout_map;
72 72 #define cpu_possible_map cpu_callout_map
include/asm-mips/smp.h
... ... @@ -21,7 +21,7 @@
21 21 #include <linux/cpumask.h>
22 22 #include <asm/atomic.h>
23 23  
24   -#define smp_processor_id() (current_thread_info()->cpu)
  24 +#define raw_smp_processor_id() (current_thread_info()->cpu)
25 25  
26 26 /* Map from cpu id to sequential logical cpu number. This will only
27 27 not be idempotent when cpus failed to come on-line. */
include/asm-parisc/smp.h
... ... @@ -51,7 +51,7 @@
51 51  
52 52 extern unsigned long cpu_present_mask;
53 53  
54   -#define smp_processor_id() (current_thread_info()->cpu)
  54 +#define raw_smp_processor_id() (current_thread_info()->cpu)
55 55  
56 56 #endif /* CONFIG_SMP */
57 57  
include/asm-ppc/smp.h
... ... @@ -44,7 +44,7 @@
44 44 #define NO_PROC_ID 0xFF /* No processor magic marker */
45 45 #define PROC_CHANGE_PENALTY 20
46 46  
47   -#define smp_processor_id() (current_thread_info()->cpu)
  47 +#define raw_smp_processor_id() (current_thread_info()->cpu)
48 48  
49 49 extern int __cpu_up(unsigned int cpu);
50 50  
include/asm-ppc64/smp.h
... ... @@ -45,7 +45,7 @@
45 45 void generic_mach_cpu_die(void);
46 46 #endif
47 47  
48   -#define __smp_processor_id() (get_paca()->paca_index)
  48 +#define raw_smp_processor_id() (get_paca()->paca_index)
49 49 #define hard_smp_processor_id() (get_paca()->hw_cpu_id)
50 50  
51 51 extern cpumask_t cpu_sibling_map[NR_CPUS];
include/asm-s390/smp.h
... ... @@ -47,7 +47,7 @@
47 47  
48 48 #define PROC_CHANGE_PENALTY 20 /* Schedule penalty */
49 49  
50   -#define smp_processor_id() (S390_lowcore.cpu_data.cpu_nr)
  50 +#define raw_smp_processor_id() (S390_lowcore.cpu_data.cpu_nr)
51 51  
52 52 extern int smp_get_cpu(cpumask_t cpu_map);
53 53 extern void smp_put_cpu(int cpu);
include/asm-sh/smp.h
... ... @@ -25,7 +25,7 @@
25 25  
26 26 #define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
27 27  
28   -#define smp_processor_id() (current_thread_info()->cpu)
  28 +#define raw_smp_processor_id() (current_thread_info()->cpu)
29 29  
30 30 /* I've no idea what the real meaning of this is */
31 31 #define PROC_CHANGE_PENALTY 20
include/asm-sparc/smp.h
... ... @@ -148,7 +148,7 @@
148 148 }
149 149 #endif
150 150  
151   -#define smp_processor_id() (current_thread_info()->cpu)
  151 +#define raw_smp_processor_id() (current_thread_info()->cpu)
152 152  
153 153 #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
154 154 #define prof_counter(__cpu) cpu_data(__cpu).counter
include/asm-sparc64/smp.h
... ... @@ -64,7 +64,7 @@
64 64 }
65 65 }
66 66  
67   -#define smp_processor_id() (current_thread_info()->cpu)
  67 +#define raw_smp_processor_id() (current_thread_info()->cpu)
68 68  
69 69 #endif /* !(__ASSEMBLY__) */
70 70  
include/asm-um/smp.h
... ... @@ -8,7 +8,8 @@
8 8 #include "asm/current.h"
9 9 #include "linux/cpumask.h"
10 10  
11   -#define smp_processor_id() (current_thread->cpu)
  11 +#define raw_smp_processor_id() (current_thread->cpu)
  12 +
12 13 #define cpu_logical_map(n) (n)
13 14 #define cpu_number_map(n) (n)
14 15 #define PROC_CHANGE_PENALTY 15 /* Pick a number, any number */
include/asm-x86_64/smp.h
... ... @@ -68,7 +68,7 @@
68 68 return cpus_weight(cpu_callout_map);
69 69 }
70 70  
71   -#define __smp_processor_id() read_pda(cpunumber)
  71 +#define raw_smp_processor_id() read_pda(cpunumber)
72 72  
73 73 extern __inline int hard_smp_processor_id(void)
74 74 {
include/linux/mmzone.h
... ... @@ -381,7 +381,7 @@
381 381  
382 382 #include <linux/topology.h>
383 383 /* Returns the number of the current Node. */
384   -#define numa_node_id() (cpu_to_node(_smp_processor_id()))
  384 +#define numa_node_id() (cpu_to_node(raw_smp_processor_id()))
385 385  
386 386 #ifndef CONFIG_DISCONTIGMEM
387 387  
... ... @@ -92,10 +92,7 @@
92 92 /*
93 93 * These macros fold the SMP functionality into a single CPU system
94 94 */
95   -
96   -#if !defined(__smp_processor_id) || !defined(CONFIG_PREEMPT)
97   -# define smp_processor_id() 0
98   -#endif
  95 +#define raw_smp_processor_id() 0
99 96 #define hard_smp_processor_id() 0
100 97 #define smp_call_function(func,info,retry,wait) ({ 0; })
101 98 #define on_each_cpu(func,info,retry,wait) ({ func(info); 0; })
102 99  
103 100  
104 101  
105 102  
... ... @@ -106,30 +103,25 @@
106 103 #endif /* !SMP */
107 104  
108 105 /*
109   - * DEBUG_PREEMPT support: check whether smp_processor_id() is being
110   - * used in a preemption-safe way.
  106 + * smp_processor_id(): get the current CPU ID.
111 107 *
112   - * An architecture has to enable this debugging code explicitly.
113   - * It can do so by renaming the smp_processor_id() macro to
114   - * __smp_processor_id(). This should only be done after some minimal
115   - * testing, because usually there are a number of false positives
116   - * that an architecture will trigger.
  108 + * if DEBUG_PREEMPT is enabled the we check whether it is
  109 + * used in a preemption-safe way. (smp_processor_id() is safe
  110 + * if it's used in a preemption-off critical section, or in
  111 + * a thread that is bound to the current CPU.)
117 112 *
118   - * To fix a false positive (i.e. smp_processor_id() use that the
119   - * debugging code reports but which use for some reason is legal),
120   - * change the smp_processor_id() reference to _smp_processor_id(),
121   - * which is the nondebug variant. NOTE: don't use this to hack around
122   - * real bugs.
  113 + * NOTE: raw_smp_processor_id() is for internal use only
  114 + * (smp_processor_id() is the preferred variant), but in rare
  115 + * instances it might also be used to turn off false positives
  116 + * (i.e. smp_processor_id() use that the debugging code reports but
  117 + * which use for some reason is legal). Don't use this to hack around
  118 + * the warning message, as your code might not work under PREEMPT.
123 119 */
124   -#ifdef __smp_processor_id
125   -# if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
126   - extern unsigned int smp_processor_id(void);
127   -# else
128   -# define smp_processor_id() __smp_processor_id()
129   -# endif
130   -# define _smp_processor_id() __smp_processor_id()
  120 +#ifdef CONFIG_DEBUG_PREEMPT
  121 + extern unsigned int debug_smp_processor_id(void);
  122 +# define smp_processor_id() debug_smp_processor_id()
131 123 #else
132   -# define _smp_processor_id() smp_processor_id()
  124 +# define smp_processor_id() raw_smp_processor_id()
133 125 #endif
134 126  
135 127 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
... ... @@ -107,7 +107,7 @@
107 107  
108 108 extern struct rt_cache_stat *rt_cache_stat;
109 109 #define RT_CACHE_STAT_INC(field) \
110   - (per_cpu_ptr(rt_cache_stat, _smp_processor_id())->field++)
  110 + (per_cpu_ptr(rt_cache_stat, raw_smp_processor_id())->field++)
111 111  
112 112 extern struct ip_rt_acct *ip_rt_acct;
113 113  
... ... @@ -128,19 +128,19 @@
128 128 #define SNMP_STAT_USRPTR(name) (name[1])
129 129  
130 130 #define SNMP_INC_STATS_BH(mib, field) \
131   - (per_cpu_ptr(mib[0], _smp_processor_id())->mibs[field]++)
  131 + (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field]++)
132 132 #define SNMP_INC_STATS_OFFSET_BH(mib, field, offset) \
133   - (per_cpu_ptr(mib[0], _smp_processor_id())->mibs[field + (offset)]++)
  133 + (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field + (offset)]++)
134 134 #define SNMP_INC_STATS_USER(mib, field) \
135   - (per_cpu_ptr(mib[1], _smp_processor_id())->mibs[field]++)
  135 + (per_cpu_ptr(mib[1], raw_smp_processor_id())->mibs[field]++)
136 136 #define SNMP_INC_STATS(mib, field) \
137   - (per_cpu_ptr(mib[!in_softirq()], _smp_processor_id())->mibs[field]++)
  137 + (per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id())->mibs[field]++)
138 138 #define SNMP_DEC_STATS(mib, field) \
139   - (per_cpu_ptr(mib[!in_softirq()], _smp_processor_id())->mibs[field]--)
  139 + (per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id())->mibs[field]--)
140 140 #define SNMP_ADD_STATS_BH(mib, field, addend) \
141   - (per_cpu_ptr(mib[0], _smp_processor_id())->mibs[field] += addend)
  141 + (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field] += addend)
142 142 #define SNMP_ADD_STATS_USER(mib, field, addend) \
143   - (per_cpu_ptr(mib[1], _smp_processor_id())->mibs[field] += addend)
  143 + (per_cpu_ptr(mib[1], raw_smp_processor_id())->mibs[field] += addend)
144 144  
145 145 #endif
... ... @@ -379,7 +379,7 @@
379 379 for (i = 0; i < NR_CPUS; i++)
380 380 local_set(&mod->ref[i].count, 0);
381 381 /* Hold reference count during initialization. */
382   - local_set(&mod->ref[_smp_processor_id()].count, 1);
  382 + local_set(&mod->ref[raw_smp_processor_id()].count, 1);
383 383 /* Backwards compatibility macros put refcount during init. */
384 384 mod->waiter = current;
385 385 }
... ... @@ -48,11 +48,11 @@
48 48 {
49 49 oldmask = current->cpus_allowed;
50 50 set_cpus_allowed(current, cpumask_of_cpu(0));
51   - printk("Freezing CPUs (at %d)", _smp_processor_id());
  51 + printk("Freezing CPUs (at %d)", raw_smp_processor_id());
52 52 current->state = TASK_INTERRUPTIBLE;
53 53 schedule_timeout(HZ);
54 54 printk("...");
55   - BUG_ON(_smp_processor_id() != 0);
  55 + BUG_ON(raw_smp_processor_id() != 0);
56 56  
57 57 /* FIXME: for this to work, all the CPUs must be running
58 58 * "idle" thread (or we deadlock). Is that guaranteed? */
... ... @@ -3814,7 +3814,7 @@
3814 3814 */
3815 3815 void __sched io_schedule(void)
3816 3816 {
3817   - struct runqueue *rq = &per_cpu(runqueues, _smp_processor_id());
  3817 + struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
3818 3818  
3819 3819 atomic_inc(&rq->nr_iowait);
3820 3820 schedule();
... ... @@ -3825,7 +3825,7 @@
3825 3825  
3826 3826 long __sched io_schedule_timeout(long timeout)
3827 3827 {
3828   - struct runqueue *rq = &per_cpu(runqueues, _smp_processor_id());
  3828 + struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
3829 3829 long ret;
3830 3830  
3831 3831 atomic_inc(&rq->nr_iowait);
kernel/stop_machine.c
... ... @@ -100,7 +100,7 @@
100 100 stopmachine_state = STOPMACHINE_WAIT;
101 101  
102 102 for_each_online_cpu(i) {
103   - if (i == _smp_processor_id())
  103 + if (i == raw_smp_processor_id())
104 104 continue;
105 105 ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL);
106 106 if (ret < 0)
... ... @@ -182,7 +182,7 @@
182 182  
183 183 /* If they don't care which CPU fn runs on, bind to any online one. */
184 184 if (cpu == NR_CPUS)
185   - cpu = _smp_processor_id();
  185 + cpu = raw_smp_processor_id();
186 186  
187 187 p = kthread_create(do_stop, &smdata, "kstopmachine");
188 188 if (!IS_ERR(p)) {
... ... @@ -20,6 +20,7 @@
20 20 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
21 21 lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
22 22 obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
  23 +obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
23 24  
24 25 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
25 26 lib-y += dec_and_lock.o
... ... @@ -9,61 +9,6 @@
9 9 #include <linux/module.h>
10 10 #include <linux/kallsyms.h>
11 11  
12   -#if defined(CONFIG_PREEMPT) && defined(__smp_processor_id) && \
13   - defined(CONFIG_DEBUG_PREEMPT)
14   -
15   -/*
16   - * Debugging check.
17   - */
18   -unsigned int smp_processor_id(void)
19   -{
20   - unsigned long preempt_count = preempt_count();
21   - int this_cpu = __smp_processor_id();
22   - cpumask_t this_mask;
23   -
24   - if (likely(preempt_count))
25   - goto out;
26   -
27   - if (irqs_disabled())
28   - goto out;
29   -
30   - /*
31   - * Kernel threads bound to a single CPU can safely use
32   - * smp_processor_id():
33   - */
34   - this_mask = cpumask_of_cpu(this_cpu);
35   -
36   - if (cpus_equal(current->cpus_allowed, this_mask))
37   - goto out;
38   -
39   - /*
40   - * It is valid to assume CPU-locality during early bootup:
41   - */
42   - if (system_state != SYSTEM_RUNNING)
43   - goto out;
44   -
45   - /*
46   - * Avoid recursion:
47   - */
48   - preempt_disable();
49   -
50   - if (!printk_ratelimit())
51   - goto out_enable;
52   -
53   - printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid);
54   - print_symbol("caller is %s\n", (long)__builtin_return_address(0));
55   - dump_stack();
56   -
57   -out_enable:
58   - preempt_enable_no_resched();
59   -out:
60   - return this_cpu;
61   -}
62   -
63   -EXPORT_SYMBOL(smp_processor_id);
64   -
65   -#endif /* PREEMPT && __smp_processor_id && DEBUG_PREEMPT */
66   -
67 12 #ifdef CONFIG_PREEMPT_BKL
68 13 /*
69 14 * The 'big kernel semaphore'
lib/smp_processor_id.c
  1 +/*
  2 + * lib/smp_processor_id.c
  3 + *
  4 + * DEBUG_PREEMPT variant of smp_processor_id().
  5 + */
  6 +#include <linux/module.h>
  7 +#include <linux/kallsyms.h>
  8 +
  9 +unsigned int debug_smp_processor_id(void)
  10 +{
  11 + unsigned long preempt_count = preempt_count();
  12 + int this_cpu = raw_smp_processor_id();
  13 + cpumask_t this_mask;
  14 +
  15 + if (likely(preempt_count))
  16 + goto out;
  17 +
  18 + if (irqs_disabled())
  19 + goto out;
  20 +
  21 + /*
  22 + * Kernel threads bound to a single CPU can safely use
  23 + * smp_processor_id():
  24 + */
  25 + this_mask = cpumask_of_cpu(this_cpu);
  26 +
  27 + if (cpus_equal(current->cpus_allowed, this_mask))
  28 + goto out;
  29 +
  30 + /*
  31 + * It is valid to assume CPU-locality during early bootup:
  32 + */
  33 + if (system_state != SYSTEM_RUNNING)
  34 + goto out;
  35 +
  36 + /*
  37 + * Avoid recursion:
  38 + */
  39 + preempt_disable();
  40 +
  41 + if (!printk_ratelimit())
  42 + goto out_enable;
  43 +
  44 + printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid);
  45 + print_symbol("caller is %s\n", (long)__builtin_return_address(0));
  46 + dump_stack();
  47 +
  48 +out_enable:
  49 + preempt_enable_no_resched();
  50 +out:
  51 + return this_cpu;
  52 +}
  53 +
  54 +EXPORT_SYMBOL(debug_smp_processor_id);