Commit f5d6c63a67a8f124ddae88511427249d1dd87880
1 parent
1275361c40
Exists in
master
and in
7 other branches
[MIPS] Do topology_init even on uniprocessor kernels.
Otherwise CPU 0 doesn't show up in sysfs which breaks some software. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Showing 3 changed files with 30 additions and 24 deletions Inline Diff
arch/mips/kernel/Makefile
1 | # | 1 | # |
2 | # Makefile for the Linux/MIPS kernel. | 2 | # Makefile for the Linux/MIPS kernel. |
3 | # | 3 | # |
4 | 4 | ||
5 | extra-y := head.o init_task.o vmlinux.lds | 5 | extra-y := head.o init_task.o vmlinux.lds |
6 | 6 | ||
7 | obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ | 7 | obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ |
8 | ptrace.o reset.o semaphore.o setup.o signal.o syscall.o \ | 8 | ptrace.o reset.o semaphore.o setup.o signal.o syscall.o \ |
9 | time.o traps.o unaligned.o | 9 | time.o topology.o traps.o unaligned.o |
10 | 10 | ||
11 | binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \ | 11 | binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \ |
12 | irix5sys.o sysirix.o | 12 | irix5sys.o sysirix.o |
13 | 13 | ||
14 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 14 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
15 | obj-$(CONFIG_MODULES) += mips_ksyms.o module.o | 15 | obj-$(CONFIG_MODULES) += mips_ksyms.o module.o |
16 | 16 | ||
17 | obj-$(CONFIG_APM) += apm.o | 17 | obj-$(CONFIG_APM) += apm.o |
18 | 18 | ||
19 | obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o | 19 | obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o |
20 | obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o | 20 | obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o |
21 | obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o | 21 | obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o |
22 | obj-$(CONFIG_CPU_R4000) += r4k_fpu.o r4k_switch.o | 22 | obj-$(CONFIG_CPU_R4000) += r4k_fpu.o r4k_switch.o |
23 | obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o | 23 | obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o |
24 | obj-$(CONFIG_CPU_R4300) += r4k_fpu.o r4k_switch.o | 24 | obj-$(CONFIG_CPU_R4300) += r4k_fpu.o r4k_switch.o |
25 | obj-$(CONFIG_CPU_R4X00) += r4k_fpu.o r4k_switch.o | 25 | obj-$(CONFIG_CPU_R4X00) += r4k_fpu.o r4k_switch.o |
26 | obj-$(CONFIG_CPU_R5000) += r4k_fpu.o r4k_switch.o | 26 | obj-$(CONFIG_CPU_R5000) += r4k_fpu.o r4k_switch.o |
27 | obj-$(CONFIG_CPU_R5432) += r4k_fpu.o r4k_switch.o | 27 | obj-$(CONFIG_CPU_R5432) += r4k_fpu.o r4k_switch.o |
28 | obj-$(CONFIG_CPU_R8000) += r4k_fpu.o r4k_switch.o | 28 | obj-$(CONFIG_CPU_R8000) += r4k_fpu.o r4k_switch.o |
29 | obj-$(CONFIG_CPU_RM7000) += r4k_fpu.o r4k_switch.o | 29 | obj-$(CONFIG_CPU_RM7000) += r4k_fpu.o r4k_switch.o |
30 | obj-$(CONFIG_CPU_RM9000) += r4k_fpu.o r4k_switch.o | 30 | obj-$(CONFIG_CPU_RM9000) += r4k_fpu.o r4k_switch.o |
31 | obj-$(CONFIG_CPU_NEVADA) += r4k_fpu.o r4k_switch.o | 31 | obj-$(CONFIG_CPU_NEVADA) += r4k_fpu.o r4k_switch.o |
32 | obj-$(CONFIG_CPU_R10000) += r4k_fpu.o r4k_switch.o | 32 | obj-$(CONFIG_CPU_R10000) += r4k_fpu.o r4k_switch.o |
33 | obj-$(CONFIG_CPU_SB1) += r4k_fpu.o r4k_switch.o | 33 | obj-$(CONFIG_CPU_SB1) += r4k_fpu.o r4k_switch.o |
34 | obj-$(CONFIG_CPU_MIPS32) += r4k_fpu.o r4k_switch.o | 34 | obj-$(CONFIG_CPU_MIPS32) += r4k_fpu.o r4k_switch.o |
35 | obj-$(CONFIG_CPU_MIPS64) += r4k_fpu.o r4k_switch.o | 35 | obj-$(CONFIG_CPU_MIPS64) += r4k_fpu.o r4k_switch.o |
36 | obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o | 36 | obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o |
37 | 37 | ||
38 | obj-$(CONFIG_SMP) += smp.o | 38 | obj-$(CONFIG_SMP) += smp.o |
39 | 39 | ||
40 | obj-$(CONFIG_MIPS_MT) += mips-mt.o | 40 | obj-$(CONFIG_MIPS_MT) += mips-mt.o |
41 | obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o | 41 | obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o |
42 | obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o | 42 | obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o |
43 | 43 | ||
44 | obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o | 44 | obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o |
45 | obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o | 45 | obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o |
46 | obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o | 46 | obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o |
47 | 47 | ||
48 | obj-$(CONFIG_NO_ISA) += dma-no-isa.o | 48 | obj-$(CONFIG_NO_ISA) += dma-no-isa.o |
49 | obj-$(CONFIG_I8259) += i8259.o | 49 | obj-$(CONFIG_I8259) += i8259.o |
50 | obj-$(CONFIG_IRQ_CPU) += irq_cpu.o | 50 | obj-$(CONFIG_IRQ_CPU) += irq_cpu.o |
51 | obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o | 51 | obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o |
52 | obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o | 52 | obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o |
53 | obj-$(CONFIG_IRQ_MV64340) += irq-mv6434x.o | 53 | obj-$(CONFIG_IRQ_MV64340) += irq-mv6434x.o |
54 | obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o | 54 | obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o |
55 | 55 | ||
56 | obj-$(CONFIG_32BIT) += scall32-o32.o | 56 | obj-$(CONFIG_32BIT) += scall32-o32.o |
57 | obj-$(CONFIG_64BIT) += scall64-64.o | 57 | obj-$(CONFIG_64BIT) += scall64-64.o |
58 | obj-$(CONFIG_BINFMT_IRIX) += binfmt_irix.o | 58 | obj-$(CONFIG_BINFMT_IRIX) += binfmt_irix.o |
59 | obj-$(CONFIG_MIPS32_COMPAT) += linux32.o signal32.o | 59 | obj-$(CONFIG_MIPS32_COMPAT) += linux32.o signal32.o |
60 | obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o | 60 | obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o |
61 | obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o ptrace32.o | 61 | obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o ptrace32.o |
62 | 62 | ||
63 | obj-$(CONFIG_KGDB) += gdb-low.o gdb-stub.o | 63 | obj-$(CONFIG_KGDB) += gdb-low.o gdb-stub.o |
64 | obj-$(CONFIG_PROC_FS) += proc.o | 64 | obj-$(CONFIG_PROC_FS) += proc.o |
65 | 65 | ||
66 | obj-$(CONFIG_64BIT) += cpu-bugs64.o | 66 | obj-$(CONFIG_64BIT) += cpu-bugs64.o |
67 | 67 | ||
68 | obj-$(CONFIG_I8253) += i8253.o | 68 | obj-$(CONFIG_I8253) += i8253.o |
69 | 69 | ||
70 | CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) | 70 | CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) |
71 | 71 | ||
72 | EXTRA_AFLAGS := $(CFLAGS) | 72 | EXTRA_AFLAGS := $(CFLAGS) |
73 | 73 |
arch/mips/kernel/smp.c
1 | /* | 1 | /* |
2 | * This program is free software; you can redistribute it and/or | 2 | * This program is free software; you can redistribute it and/or |
3 | * modify it under the terms of the GNU General Public License | 3 | * modify it under the terms of the GNU General Public License |
4 | * as published by the Free Software Foundation; either version 2 | 4 | * as published by the Free Software Foundation; either version 2 |
5 | * of the License, or (at your option) any later version. | 5 | * of the License, or (at your option) any later version. |
6 | * | 6 | * |
7 | * This program is distributed in the hope that it will be useful, | 7 | * This program is distributed in the hope that it will be useful, |
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
10 | * GNU General Public License for more details. | 10 | * GNU General Public License for more details. |
11 | * | 11 | * |
12 | * You should have received a copy of the GNU General Public License | 12 | * You should have received a copy of the GNU General Public License |
13 | * along with this program; if not, write to the Free Software | 13 | * along with this program; if not, write to the Free Software |
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
15 | * | 15 | * |
16 | * Copyright (C) 2000, 2001 Kanoj Sarcar | 16 | * Copyright (C) 2000, 2001 Kanoj Sarcar |
17 | * Copyright (C) 2000, 2001 Ralf Baechle | 17 | * Copyright (C) 2000, 2001 Ralf Baechle |
18 | * Copyright (C) 2000, 2001 Silicon Graphics, Inc. | 18 | * Copyright (C) 2000, 2001 Silicon Graphics, Inc. |
19 | * Copyright (C) 2000, 2001, 2003 Broadcom Corporation | 19 | * Copyright (C) 2000, 2001, 2003 Broadcom Corporation |
20 | */ | 20 | */ |
21 | #include <linux/cache.h> | 21 | #include <linux/cache.h> |
22 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/spinlock.h> | 25 | #include <linux/spinlock.h> |
26 | #include <linux/threads.h> | 26 | #include <linux/threads.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/time.h> | 28 | #include <linux/time.h> |
29 | #include <linux/timex.h> | 29 | #include <linux/timex.h> |
30 | #include <linux/sched.h> | 30 | #include <linux/sched.h> |
31 | #include <linux/cpumask.h> | 31 | #include <linux/cpumask.h> |
32 | #include <linux/cpu.h> | 32 | #include <linux/cpu.h> |
33 | 33 | ||
34 | #include <asm/atomic.h> | 34 | #include <asm/atomic.h> |
35 | #include <asm/cpu.h> | 35 | #include <asm/cpu.h> |
36 | #include <asm/processor.h> | 36 | #include <asm/processor.h> |
37 | #include <asm/system.h> | 37 | #include <asm/system.h> |
38 | #include <asm/mmu_context.h> | 38 | #include <asm/mmu_context.h> |
39 | #include <asm/smp.h> | 39 | #include <asm/smp.h> |
40 | 40 | ||
41 | #ifdef CONFIG_MIPS_MT_SMTC | 41 | #ifdef CONFIG_MIPS_MT_SMTC |
42 | #include <asm/mipsmtregs.h> | 42 | #include <asm/mipsmtregs.h> |
43 | #endif /* CONFIG_MIPS_MT_SMTC */ | 43 | #endif /* CONFIG_MIPS_MT_SMTC */ |
44 | 44 | ||
45 | cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */ | 45 | cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */ |
46 | volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ | 46 | volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ |
47 | cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */ | 47 | cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */ |
48 | int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ | 48 | int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ |
49 | int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ | 49 | int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ |
50 | 50 | ||
51 | EXPORT_SYMBOL(phys_cpu_present_map); | 51 | EXPORT_SYMBOL(phys_cpu_present_map); |
52 | EXPORT_SYMBOL(cpu_online_map); | 52 | EXPORT_SYMBOL(cpu_online_map); |
53 | 53 | ||
54 | static void smp_tune_scheduling (void) | 54 | static void smp_tune_scheduling (void) |
55 | { | 55 | { |
56 | struct cache_desc *cd = ¤t_cpu_data.scache; | 56 | struct cache_desc *cd = ¤t_cpu_data.scache; |
57 | unsigned long cachesize; /* kB */ | 57 | unsigned long cachesize; /* kB */ |
58 | unsigned long cpu_khz; | 58 | unsigned long cpu_khz; |
59 | 59 | ||
60 | /* | 60 | /* |
61 | * Crude estimate until we actually meassure ... | 61 | * Crude estimate until we actually meassure ... |
62 | */ | 62 | */ |
63 | cpu_khz = loops_per_jiffy * 2 * HZ / 1000; | 63 | cpu_khz = loops_per_jiffy * 2 * HZ / 1000; |
64 | 64 | ||
65 | /* | 65 | /* |
66 | * Rough estimation for SMP scheduling, this is the number of | 66 | * Rough estimation for SMP scheduling, this is the number of |
67 | * cycles it takes for a fully memory-limited process to flush | 67 | * cycles it takes for a fully memory-limited process to flush |
68 | * the SMP-local cache. | 68 | * the SMP-local cache. |
69 | * | 69 | * |
70 | * (For a P5 this pretty much means we will choose another idle | 70 | * (For a P5 this pretty much means we will choose another idle |
71 | * CPU almost always at wakeup time (this is due to the small | 71 | * CPU almost always at wakeup time (this is due to the small |
72 | * L1 cache), on PIIs it's around 50-100 usecs, depending on | 72 | * L1 cache), on PIIs it's around 50-100 usecs, depending on |
73 | * the cache size) | 73 | * the cache size) |
74 | */ | 74 | */ |
75 | if (!cpu_khz) | 75 | if (!cpu_khz) |
76 | return; | 76 | return; |
77 | 77 | ||
78 | cachesize = cd->linesz * cd->sets * cd->ways; | 78 | cachesize = cd->linesz * cd->sets * cd->ways; |
79 | } | 79 | } |
80 | 80 | ||
81 | extern void __init calibrate_delay(void); | 81 | extern void __init calibrate_delay(void); |
82 | extern ATTRIB_NORET void cpu_idle(void); | 82 | extern ATTRIB_NORET void cpu_idle(void); |
83 | 83 | ||
84 | /* | 84 | /* |
85 | * First C code run on the secondary CPUs after being started up by | 85 | * First C code run on the secondary CPUs after being started up by |
86 | * the master. | 86 | * the master. |
87 | */ | 87 | */ |
88 | asmlinkage void start_secondary(void) | 88 | asmlinkage void start_secondary(void) |
89 | { | 89 | { |
90 | unsigned int cpu; | 90 | unsigned int cpu; |
91 | 91 | ||
92 | #ifdef CONFIG_MIPS_MT_SMTC | 92 | #ifdef CONFIG_MIPS_MT_SMTC |
93 | /* Only do cpu_probe for first TC of CPU */ | 93 | /* Only do cpu_probe for first TC of CPU */ |
94 | if ((read_c0_tcbind() & TCBIND_CURTC) == 0) | 94 | if ((read_c0_tcbind() & TCBIND_CURTC) == 0) |
95 | #endif /* CONFIG_MIPS_MT_SMTC */ | 95 | #endif /* CONFIG_MIPS_MT_SMTC */ |
96 | cpu_probe(); | 96 | cpu_probe(); |
97 | cpu_report(); | 97 | cpu_report(); |
98 | per_cpu_trap_init(); | 98 | per_cpu_trap_init(); |
99 | prom_init_secondary(); | 99 | prom_init_secondary(); |
100 | 100 | ||
101 | /* | 101 | /* |
102 | * XXX parity protection should be folded in here when it's converted | 102 | * XXX parity protection should be folded in here when it's converted |
103 | * to an option instead of something based on .cputype | 103 | * to an option instead of something based on .cputype |
104 | */ | 104 | */ |
105 | 105 | ||
106 | calibrate_delay(); | 106 | calibrate_delay(); |
107 | preempt_disable(); | 107 | preempt_disable(); |
108 | cpu = smp_processor_id(); | 108 | cpu = smp_processor_id(); |
109 | cpu_data[cpu].udelay_val = loops_per_jiffy; | 109 | cpu_data[cpu].udelay_val = loops_per_jiffy; |
110 | 110 | ||
111 | prom_smp_finish(); | 111 | prom_smp_finish(); |
112 | 112 | ||
113 | cpu_set(cpu, cpu_callin_map); | 113 | cpu_set(cpu, cpu_callin_map); |
114 | 114 | ||
115 | cpu_idle(); | 115 | cpu_idle(); |
116 | } | 116 | } |
117 | 117 | ||
118 | DEFINE_SPINLOCK(smp_call_lock); | 118 | DEFINE_SPINLOCK(smp_call_lock); |
119 | 119 | ||
120 | struct call_data_struct *call_data; | 120 | struct call_data_struct *call_data; |
121 | 121 | ||
122 | /* | 122 | /* |
123 | * Run a function on all other CPUs. | 123 | * Run a function on all other CPUs. |
124 | * <func> The function to run. This must be fast and non-blocking. | 124 | * <func> The function to run. This must be fast and non-blocking. |
125 | * <info> An arbitrary pointer to pass to the function. | 125 | * <info> An arbitrary pointer to pass to the function. |
126 | * <retry> If true, keep retrying until ready. | 126 | * <retry> If true, keep retrying until ready. |
127 | * <wait> If true, wait until function has completed on other CPUs. | 127 | * <wait> If true, wait until function has completed on other CPUs. |
128 | * [RETURNS] 0 on success, else a negative status code. | 128 | * [RETURNS] 0 on success, else a negative status code. |
129 | * | 129 | * |
130 | * Does not return until remote CPUs are nearly ready to execute <func> | 130 | * Does not return until remote CPUs are nearly ready to execute <func> |
131 | * or are or have executed. | 131 | * or are or have executed. |
132 | * | 132 | * |
133 | * You must not call this function with disabled interrupts or from a | 133 | * You must not call this function with disabled interrupts or from a |
134 | * hardware interrupt handler or from a bottom half handler: | 134 | * hardware interrupt handler or from a bottom half handler: |
135 | * | 135 | * |
136 | * CPU A CPU B | 136 | * CPU A CPU B |
137 | * Disable interrupts | 137 | * Disable interrupts |
138 | * smp_call_function() | 138 | * smp_call_function() |
139 | * Take call_lock | 139 | * Take call_lock |
140 | * Send IPIs | 140 | * Send IPIs |
141 | * Wait for all cpus to acknowledge IPI | 141 | * Wait for all cpus to acknowledge IPI |
142 | * CPU A has not responded, spin waiting | 142 | * CPU A has not responded, spin waiting |
143 | * for cpu A to respond, holding call_lock | 143 | * for cpu A to respond, holding call_lock |
144 | * smp_call_function() | 144 | * smp_call_function() |
145 | * Spin waiting for call_lock | 145 | * Spin waiting for call_lock |
146 | * Deadlock Deadlock | 146 | * Deadlock Deadlock |
147 | */ | 147 | */ |
148 | int smp_call_function (void (*func) (void *info), void *info, int retry, | 148 | int smp_call_function (void (*func) (void *info), void *info, int retry, |
149 | int wait) | 149 | int wait) |
150 | { | 150 | { |
151 | struct call_data_struct data; | 151 | struct call_data_struct data; |
152 | int i, cpus = num_online_cpus() - 1; | 152 | int i, cpus = num_online_cpus() - 1; |
153 | int cpu = smp_processor_id(); | 153 | int cpu = smp_processor_id(); |
154 | 154 | ||
155 | /* | 155 | /* |
156 | * Can die spectacularly if this CPU isn't yet marked online | 156 | * Can die spectacularly if this CPU isn't yet marked online |
157 | */ | 157 | */ |
158 | BUG_ON(!cpu_online(cpu)); | 158 | BUG_ON(!cpu_online(cpu)); |
159 | 159 | ||
160 | if (!cpus) | 160 | if (!cpus) |
161 | return 0; | 161 | return 0; |
162 | 162 | ||
163 | /* Can deadlock when called with interrupts disabled */ | 163 | /* Can deadlock when called with interrupts disabled */ |
164 | WARN_ON(irqs_disabled()); | 164 | WARN_ON(irqs_disabled()); |
165 | 165 | ||
166 | data.func = func; | 166 | data.func = func; |
167 | data.info = info; | 167 | data.info = info; |
168 | atomic_set(&data.started, 0); | 168 | atomic_set(&data.started, 0); |
169 | data.wait = wait; | 169 | data.wait = wait; |
170 | if (wait) | 170 | if (wait) |
171 | atomic_set(&data.finished, 0); | 171 | atomic_set(&data.finished, 0); |
172 | 172 | ||
173 | spin_lock(&smp_call_lock); | 173 | spin_lock(&smp_call_lock); |
174 | call_data = &data; | 174 | call_data = &data; |
175 | mb(); | 175 | mb(); |
176 | 176 | ||
177 | /* Send a message to all other CPUs and wait for them to respond */ | 177 | /* Send a message to all other CPUs and wait for them to respond */ |
178 | for_each_online_cpu(i) | 178 | for_each_online_cpu(i) |
179 | if (i != cpu) | 179 | if (i != cpu) |
180 | core_send_ipi(i, SMP_CALL_FUNCTION); | 180 | core_send_ipi(i, SMP_CALL_FUNCTION); |
181 | 181 | ||
182 | /* Wait for response */ | 182 | /* Wait for response */ |
183 | /* FIXME: lock-up detection, backtrace on lock-up */ | 183 | /* FIXME: lock-up detection, backtrace on lock-up */ |
184 | while (atomic_read(&data.started) != cpus) | 184 | while (atomic_read(&data.started) != cpus) |
185 | barrier(); | 185 | barrier(); |
186 | 186 | ||
187 | if (wait) | 187 | if (wait) |
188 | while (atomic_read(&data.finished) != cpus) | 188 | while (atomic_read(&data.finished) != cpus) |
189 | barrier(); | 189 | barrier(); |
190 | call_data = NULL; | 190 | call_data = NULL; |
191 | spin_unlock(&smp_call_lock); | 191 | spin_unlock(&smp_call_lock); |
192 | 192 | ||
193 | return 0; | 193 | return 0; |
194 | } | 194 | } |
195 | 195 | ||
196 | 196 | ||
197 | void smp_call_function_interrupt(void) | 197 | void smp_call_function_interrupt(void) |
198 | { | 198 | { |
199 | void (*func) (void *info) = call_data->func; | 199 | void (*func) (void *info) = call_data->func; |
200 | void *info = call_data->info; | 200 | void *info = call_data->info; |
201 | int wait = call_data->wait; | 201 | int wait = call_data->wait; |
202 | 202 | ||
203 | /* | 203 | /* |
204 | * Notify initiating CPU that I've grabbed the data and am | 204 | * Notify initiating CPU that I've grabbed the data and am |
205 | * about to execute the function. | 205 | * about to execute the function. |
206 | */ | 206 | */ |
207 | mb(); | 207 | mb(); |
208 | atomic_inc(&call_data->started); | 208 | atomic_inc(&call_data->started); |
209 | 209 | ||
210 | /* | 210 | /* |
211 | * At this point the info structure may be out of scope unless wait==1. | 211 | * At this point the info structure may be out of scope unless wait==1. |
212 | */ | 212 | */ |
213 | irq_enter(); | 213 | irq_enter(); |
214 | (*func)(info); | 214 | (*func)(info); |
215 | irq_exit(); | 215 | irq_exit(); |
216 | 216 | ||
217 | if (wait) { | 217 | if (wait) { |
218 | mb(); | 218 | mb(); |
219 | atomic_inc(&call_data->finished); | 219 | atomic_inc(&call_data->finished); |
220 | } | 220 | } |
221 | } | 221 | } |
222 | 222 | ||
223 | static void stop_this_cpu(void *dummy) | 223 | static void stop_this_cpu(void *dummy) |
224 | { | 224 | { |
225 | /* | 225 | /* |
226 | * Remove this CPU: | 226 | * Remove this CPU: |
227 | */ | 227 | */ |
228 | cpu_clear(smp_processor_id(), cpu_online_map); | 228 | cpu_clear(smp_processor_id(), cpu_online_map); |
229 | local_irq_enable(); /* May need to service _machine_restart IPI */ | 229 | local_irq_enable(); /* May need to service _machine_restart IPI */ |
230 | for (;;); /* Wait if available. */ | 230 | for (;;); /* Wait if available. */ |
231 | } | 231 | } |
232 | 232 | ||
233 | void smp_send_stop(void) | 233 | void smp_send_stop(void) |
234 | { | 234 | { |
235 | smp_call_function(stop_this_cpu, NULL, 1, 0); | 235 | smp_call_function(stop_this_cpu, NULL, 1, 0); |
236 | } | 236 | } |
237 | 237 | ||
238 | void __init smp_cpus_done(unsigned int max_cpus) | 238 | void __init smp_cpus_done(unsigned int max_cpus) |
239 | { | 239 | { |
240 | prom_cpus_done(); | 240 | prom_cpus_done(); |
241 | } | 241 | } |
242 | 242 | ||
243 | /* called from main before smp_init() */ | 243 | /* called from main before smp_init() */ |
244 | void __init smp_prepare_cpus(unsigned int max_cpus) | 244 | void __init smp_prepare_cpus(unsigned int max_cpus) |
245 | { | 245 | { |
246 | init_new_context(current, &init_mm); | 246 | init_new_context(current, &init_mm); |
247 | current_thread_info()->cpu = 0; | 247 | current_thread_info()->cpu = 0; |
248 | smp_tune_scheduling(); | 248 | smp_tune_scheduling(); |
249 | plat_prepare_cpus(max_cpus); | 249 | plat_prepare_cpus(max_cpus); |
250 | #ifndef CONFIG_HOTPLUG_CPU | 250 | #ifndef CONFIG_HOTPLUG_CPU |
251 | cpu_present_map = cpu_possible_map; | 251 | cpu_present_map = cpu_possible_map; |
252 | #endif | 252 | #endif |
253 | } | 253 | } |
254 | 254 | ||
255 | /* preload SMP state for boot cpu */ | 255 | /* preload SMP state for boot cpu */ |
256 | void __devinit smp_prepare_boot_cpu(void) | 256 | void __devinit smp_prepare_boot_cpu(void) |
257 | { | 257 | { |
258 | /* | 258 | /* |
259 | * This assumes that bootup is always handled by the processor | 259 | * This assumes that bootup is always handled by the processor |
260 | * with the logic and physical number 0. | 260 | * with the logic and physical number 0. |
261 | */ | 261 | */ |
262 | __cpu_number_map[0] = 0; | 262 | __cpu_number_map[0] = 0; |
263 | __cpu_logical_map[0] = 0; | 263 | __cpu_logical_map[0] = 0; |
264 | cpu_set(0, phys_cpu_present_map); | 264 | cpu_set(0, phys_cpu_present_map); |
265 | cpu_set(0, cpu_online_map); | 265 | cpu_set(0, cpu_online_map); |
266 | cpu_set(0, cpu_callin_map); | 266 | cpu_set(0, cpu_callin_map); |
267 | } | 267 | } |
268 | 268 | ||
269 | /* | 269 | /* |
270 | * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu | 270 | * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu |
271 | * and keep control until "cpu_online(cpu)" is set. Note: cpu is | 271 | * and keep control until "cpu_online(cpu)" is set. Note: cpu is |
272 | * physical, not logical. | 272 | * physical, not logical. |
273 | */ | 273 | */ |
274 | int __devinit __cpu_up(unsigned int cpu) | 274 | int __devinit __cpu_up(unsigned int cpu) |
275 | { | 275 | { |
276 | struct task_struct *idle; | 276 | struct task_struct *idle; |
277 | 277 | ||
278 | /* | 278 | /* |
279 | * Processor goes to start_secondary(), sets online flag | 279 | * Processor goes to start_secondary(), sets online flag |
280 | * The following code is purely to make sure | 280 | * The following code is purely to make sure |
281 | * Linux can schedule processes on this slave. | 281 | * Linux can schedule processes on this slave. |
282 | */ | 282 | */ |
283 | idle = fork_idle(cpu); | 283 | idle = fork_idle(cpu); |
284 | if (IS_ERR(idle)) | 284 | if (IS_ERR(idle)) |
285 | panic(KERN_ERR "Fork failed for CPU %d", cpu); | 285 | panic(KERN_ERR "Fork failed for CPU %d", cpu); |
286 | 286 | ||
287 | prom_boot_secondary(cpu, idle); | 287 | prom_boot_secondary(cpu, idle); |
288 | 288 | ||
289 | /* | 289 | /* |
290 | * Trust is futile. We should really have timeouts ... | 290 | * Trust is futile. We should really have timeouts ... |
291 | */ | 291 | */ |
292 | while (!cpu_isset(cpu, cpu_callin_map)) | 292 | while (!cpu_isset(cpu, cpu_callin_map)) |
293 | udelay(100); | 293 | udelay(100); |
294 | 294 | ||
295 | cpu_set(cpu, cpu_online_map); | 295 | cpu_set(cpu, cpu_online_map); |
296 | 296 | ||
297 | return 0; | 297 | return 0; |
298 | } | 298 | } |
299 | 299 | ||
300 | /* Not really SMP stuff ... */ | 300 | /* Not really SMP stuff ... */ |
301 | int setup_profiling_timer(unsigned int multiplier) | 301 | int setup_profiling_timer(unsigned int multiplier) |
302 | { | 302 | { |
303 | return 0; | 303 | return 0; |
304 | } | 304 | } |
305 | 305 | ||
306 | static void flush_tlb_all_ipi(void *info) | 306 | static void flush_tlb_all_ipi(void *info) |
307 | { | 307 | { |
308 | local_flush_tlb_all(); | 308 | local_flush_tlb_all(); |
309 | } | 309 | } |
310 | 310 | ||
311 | void flush_tlb_all(void) | 311 | void flush_tlb_all(void) |
312 | { | 312 | { |
313 | on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1); | 313 | on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1); |
314 | } | 314 | } |
315 | 315 | ||
316 | static void flush_tlb_mm_ipi(void *mm) | 316 | static void flush_tlb_mm_ipi(void *mm) |
317 | { | 317 | { |
318 | local_flush_tlb_mm((struct mm_struct *)mm); | 318 | local_flush_tlb_mm((struct mm_struct *)mm); |
319 | } | 319 | } |
320 | 320 | ||
321 | /* | 321 | /* |
322 | * Special Variant of smp_call_function for use by TLB functions: | 322 | * Special Variant of smp_call_function for use by TLB functions: |
323 | * | 323 | * |
324 | * o No return value | 324 | * o No return value |
325 | * o collapses to normal function call on UP kernels | 325 | * o collapses to normal function call on UP kernels |
326 | * o collapses to normal function call on systems with a single shared | 326 | * o collapses to normal function call on systems with a single shared |
327 | * primary cache. | 327 | * primary cache. |
328 | * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core. | 328 | * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core. |
329 | */ | 329 | */ |
330 | static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) | 330 | static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) |
331 | { | 331 | { |
332 | #ifndef CONFIG_MIPS_MT_SMTC | 332 | #ifndef CONFIG_MIPS_MT_SMTC |
333 | smp_call_function(func, info, 1, 1); | 333 | smp_call_function(func, info, 1, 1); |
334 | #endif | 334 | #endif |
335 | } | 335 | } |
336 | 336 | ||
337 | static inline void smp_on_each_tlb(void (*func) (void *info), void *info) | 337 | static inline void smp_on_each_tlb(void (*func) (void *info), void *info) |
338 | { | 338 | { |
339 | preempt_disable(); | 339 | preempt_disable(); |
340 | 340 | ||
341 | smp_on_other_tlbs(func, info); | 341 | smp_on_other_tlbs(func, info); |
342 | func(info); | 342 | func(info); |
343 | 343 | ||
344 | preempt_enable(); | 344 | preempt_enable(); |
345 | } | 345 | } |
346 | 346 | ||
347 | /* | 347 | /* |
348 | * The following tlb flush calls are invoked when old translations are | 348 | * The following tlb flush calls are invoked when old translations are |
349 | * being torn down, or pte attributes are changing. For single threaded | 349 | * being torn down, or pte attributes are changing. For single threaded |
350 | * address spaces, a new context is obtained on the current cpu, and tlb | 350 | * address spaces, a new context is obtained on the current cpu, and tlb |
351 | * context on other cpus are invalidated to force a new context allocation | 351 | * context on other cpus are invalidated to force a new context allocation |
352 | * at switch_mm time, should the mm ever be used on other cpus. For | 352 | * at switch_mm time, should the mm ever be used on other cpus. For |
353 | * multithreaded address spaces, intercpu interrupts have to be sent. | 353 | * multithreaded address spaces, intercpu interrupts have to be sent. |
354 | * Another case where intercpu interrupts are required is when the target | 354 | * Another case where intercpu interrupts are required is when the target |
355 | * mm might be active on another cpu (eg debuggers doing the flushes on | 355 | * mm might be active on another cpu (eg debuggers doing the flushes on |
356 | * behalf of debugees, kswapd stealing pages from another process etc). | 356 | * behalf of debugees, kswapd stealing pages from another process etc). |
357 | * Kanoj 07/00. | 357 | * Kanoj 07/00. |
358 | */ | 358 | */ |
359 | 359 | ||
360 | void flush_tlb_mm(struct mm_struct *mm) | 360 | void flush_tlb_mm(struct mm_struct *mm) |
361 | { | 361 | { |
362 | preempt_disable(); | 362 | preempt_disable(); |
363 | 363 | ||
364 | if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { | 364 | if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { |
365 | smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm); | 365 | smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm); |
366 | } else { | 366 | } else { |
367 | int i; | 367 | int i; |
368 | for (i = 0; i < num_online_cpus(); i++) | 368 | for (i = 0; i < num_online_cpus(); i++) |
369 | if (smp_processor_id() != i) | 369 | if (smp_processor_id() != i) |
370 | cpu_context(i, mm) = 0; | 370 | cpu_context(i, mm) = 0; |
371 | } | 371 | } |
372 | local_flush_tlb_mm(mm); | 372 | local_flush_tlb_mm(mm); |
373 | 373 | ||
374 | preempt_enable(); | 374 | preempt_enable(); |
375 | } | 375 | } |
376 | 376 | ||
377 | struct flush_tlb_data { | 377 | struct flush_tlb_data { |
378 | struct vm_area_struct *vma; | 378 | struct vm_area_struct *vma; |
379 | unsigned long addr1; | 379 | unsigned long addr1; |
380 | unsigned long addr2; | 380 | unsigned long addr2; |
381 | }; | 381 | }; |
382 | 382 | ||
383 | static void flush_tlb_range_ipi(void *info) | 383 | static void flush_tlb_range_ipi(void *info) |
384 | { | 384 | { |
385 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; | 385 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; |
386 | 386 | ||
387 | local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); | 387 | local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); |
388 | } | 388 | } |
389 | 389 | ||
390 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | 390 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
391 | { | 391 | { |
392 | struct mm_struct *mm = vma->vm_mm; | 392 | struct mm_struct *mm = vma->vm_mm; |
393 | 393 | ||
394 | preempt_disable(); | 394 | preempt_disable(); |
395 | if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { | 395 | if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { |
396 | struct flush_tlb_data fd; | 396 | struct flush_tlb_data fd; |
397 | 397 | ||
398 | fd.vma = vma; | 398 | fd.vma = vma; |
399 | fd.addr1 = start; | 399 | fd.addr1 = start; |
400 | fd.addr2 = end; | 400 | fd.addr2 = end; |
401 | smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd); | 401 | smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd); |
402 | } else { | 402 | } else { |
403 | int i; | 403 | int i; |
404 | for (i = 0; i < num_online_cpus(); i++) | 404 | for (i = 0; i < num_online_cpus(); i++) |
405 | if (smp_processor_id() != i) | 405 | if (smp_processor_id() != i) |
406 | cpu_context(i, mm) = 0; | 406 | cpu_context(i, mm) = 0; |
407 | } | 407 | } |
408 | local_flush_tlb_range(vma, start, end); | 408 | local_flush_tlb_range(vma, start, end); |
409 | preempt_enable(); | 409 | preempt_enable(); |
410 | } | 410 | } |
411 | 411 | ||
412 | static void flush_tlb_kernel_range_ipi(void *info) | 412 | static void flush_tlb_kernel_range_ipi(void *info) |
413 | { | 413 | { |
414 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; | 414 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; |
415 | 415 | ||
416 | local_flush_tlb_kernel_range(fd->addr1, fd->addr2); | 416 | local_flush_tlb_kernel_range(fd->addr1, fd->addr2); |
417 | } | 417 | } |
418 | 418 | ||
419 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | 419 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
420 | { | 420 | { |
421 | struct flush_tlb_data fd; | 421 | struct flush_tlb_data fd; |
422 | 422 | ||
423 | fd.addr1 = start; | 423 | fd.addr1 = start; |
424 | fd.addr2 = end; | 424 | fd.addr2 = end; |
425 | on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1); | 425 | on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1); |
426 | } | 426 | } |
427 | 427 | ||
428 | static void flush_tlb_page_ipi(void *info) | 428 | static void flush_tlb_page_ipi(void *info) |
429 | { | 429 | { |
430 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; | 430 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; |
431 | 431 | ||
432 | local_flush_tlb_page(fd->vma, fd->addr1); | 432 | local_flush_tlb_page(fd->vma, fd->addr1); |
433 | } | 433 | } |
434 | 434 | ||
435 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | 435 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
436 | { | 436 | { |
437 | preempt_disable(); | 437 | preempt_disable(); |
438 | if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { | 438 | if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { |
439 | struct flush_tlb_data fd; | 439 | struct flush_tlb_data fd; |
440 | 440 | ||
441 | fd.vma = vma; | 441 | fd.vma = vma; |
442 | fd.addr1 = page; | 442 | fd.addr1 = page; |
443 | smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd); | 443 | smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd); |
444 | } else { | 444 | } else { |
445 | int i; | 445 | int i; |
446 | for (i = 0; i < num_online_cpus(); i++) | 446 | for (i = 0; i < num_online_cpus(); i++) |
447 | if (smp_processor_id() != i) | 447 | if (smp_processor_id() != i) |
448 | cpu_context(i, vma->vm_mm) = 0; | 448 | cpu_context(i, vma->vm_mm) = 0; |
449 | } | 449 | } |
450 | local_flush_tlb_page(vma, page); | 450 | local_flush_tlb_page(vma, page); |
451 | preempt_enable(); | 451 | preempt_enable(); |
452 | } | 452 | } |
453 | 453 | ||
454 | static void flush_tlb_one_ipi(void *info) | 454 | static void flush_tlb_one_ipi(void *info) |
455 | { | 455 | { |
456 | unsigned long vaddr = (unsigned long) info; | 456 | unsigned long vaddr = (unsigned long) info; |
457 | 457 | ||
458 | local_flush_tlb_one(vaddr); | 458 | local_flush_tlb_one(vaddr); |
459 | } | 459 | } |
460 | 460 | ||
461 | void flush_tlb_one(unsigned long vaddr) | 461 | void flush_tlb_one(unsigned long vaddr) |
462 | { | 462 | { |
463 | smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr); | 463 | smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr); |
464 | } | 464 | } |
465 | 465 | ||
466 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | ||
467 | |||
468 | static int __init topology_init(void) | ||
469 | { | ||
470 | int i, ret; | ||
471 | |||
472 | #ifdef CONFIG_NUMA | ||
473 | for_each_online_node(i) | ||
474 | register_one_node(i); | ||
475 | #endif /* CONFIG_NUMA */ | ||
476 | |||
477 | for_each_present_cpu(i) { | ||
478 | ret = register_cpu(&per_cpu(cpu_devices, i), i); | ||
479 | if (ret) | ||
480 | printk(KERN_WARNING "topology_init: register_cpu %d " | ||
481 | "failed (%d)\n", i, ret); | ||
482 | } | ||
483 | |||
484 | return 0; | ||
485 | } | ||
486 | |||
487 | subsys_initcall(topology_init); | ||
488 | |||
489 | EXPORT_SYMBOL(flush_tlb_page); | 466 | EXPORT_SYMBOL(flush_tlb_page); |
490 | EXPORT_SYMBOL(flush_tlb_one); | 467 | EXPORT_SYMBOL(flush_tlb_one); |
491 | 468 |
arch/mips/kernel/topology.c
File was created | 1 | #include <linux/cpu.h> | |
2 | #include <linux/cpumask.h> | ||
3 | #include <linux/init.h> | ||
4 | #include <linux/node.h> | ||
5 | #include <linux/nodemask.h> | ||
6 | #include <linux/percpu.h> | ||
7 | |||
8 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | ||
9 | |||
10 | static int __init topology_init(void) | ||
11 | { | ||
12 | int i, ret; | ||
13 | |||
14 | #ifdef CONFIG_NUMA | ||
15 | for_each_online_node(i) | ||
16 | register_one_node(i); | ||
17 | #endif /* CONFIG_NUMA */ | ||
18 | |||
19 | for_each_present_cpu(i) { | ||
20 | ret = register_cpu(&per_cpu(cpu_devices, i), i); | ||
21 | if (ret) | ||
22 | printk(KERN_WARNING "topology_init: register_cpu %d " | ||
23 | "failed (%d)\n", i, ret); | ||
24 | } | ||
25 | |||
26 | return 0; | ||
27 | } | ||
28 | |||
29 | subsys_initcall(topology_init); | ||
30 |