Commit c41917df8a1adde34864116ce2231a7fe308d2ff
Committed by
Ingo Molnar
1 parent
ce8c2293be
Exists in
master
and in
20 other branches
[PATCH] sched: sched_cacheflush is now unused
Since Ingo's recent scheduler rewrite which was merged as commit 0437e109e1841607f2988891eaa36c531c6aa6ac sched_cacheflush is unused. Signed-off-by: Ralf Baechle <ralf@linux-mips.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 16 changed files with 0 additions and 149 deletions Side-by-side Diff
- arch/ia64/kernel/setup.c
- include/asm-alpha/system.h
- include/asm-arm/system.h
- include/asm-arm26/system.h
- include/asm-i386/system.h
- include/asm-ia64/system.h
- include/asm-m32r/system.h
- include/asm-mips/system.h
- include/asm-parisc/system.h
- include/asm-powerpc/system.h
- include/asm-ppc/system.h
- include/asm-s390/system.h
- include/asm-sh/system.h
- include/asm-sparc/system.h
- include/asm-sparc64/system.h
- include/asm-x86_64/system.h
arch/ia64/kernel/setup.c
... | ... | @@ -980,15 +980,6 @@ |
980 | 980 | pm_idle = default_idle; |
981 | 981 | } |
982 | 982 | |
983 | -/* | |
984 | - * On SMP systems, when the scheduler does migration-cost autodetection, | |
985 | - * it needs a way to flush as much of the CPU's caches as possible. | |
986 | - */ | |
987 | -void sched_cacheflush(void) | |
988 | -{ | |
989 | - ia64_sal_cache_flush(3); | |
990 | -} | |
991 | - | |
992 | 983 | void __init |
993 | 984 | check_bugs (void) |
994 | 985 | { |
include/asm-alpha/system.h
... | ... | @@ -139,16 +139,6 @@ |
139 | 139 | struct task_struct; |
140 | 140 | extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*); |
141 | 141 | |
142 | -/* | |
143 | - * On SMP systems, when the scheduler does migration-cost autodetection, | |
144 | - * it needs a way to flush as much of the CPU's caches as possible. | |
145 | - * | |
146 | - * TODO: fill this in! | |
147 | - */ | |
148 | -static inline void sched_cacheflush(void) | |
149 | -{ | |
150 | -} | |
151 | - | |
152 | 142 | #define imb() \ |
153 | 143 | __asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory") |
154 | 144 |
include/asm-arm/system.h
... | ... | @@ -254,16 +254,6 @@ |
254 | 254 | last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ |
255 | 255 | } while (0) |
256 | 256 | |
257 | -/* | |
258 | - * On SMP systems, when the scheduler does migration-cost autodetection, | |
259 | - * it needs a way to flush as much of the CPU's caches as possible. | |
260 | - * | |
261 | - * TODO: fill this in! | |
262 | - */ | |
263 | -static inline void sched_cacheflush(void) | |
264 | -{ | |
265 | -} | |
266 | - | |
267 | 257 | #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) |
268 | 258 | /* |
269 | 259 | * On the StrongARM, "swp" is terminally broken since it bypasses the |
include/asm-arm26/system.h
... | ... | @@ -110,16 +110,6 @@ |
110 | 110 | } while (0) |
111 | 111 | |
112 | 112 | /* |
113 | - * On SMP systems, when the scheduler does migration-cost autodetection, | |
114 | - * it needs a way to flush as much of the CPU's caches as possible. | |
115 | - * | |
116 | - * TODO: fill this in! | |
117 | - */ | |
118 | -static inline void sched_cacheflush(void) | |
119 | -{ | |
120 | -} | |
121 | - | |
122 | -/* | |
123 | 113 | * Save the current interrupt enable state & disable IRQs |
124 | 114 | */ |
125 | 115 | #define local_irq_save(x) \ |
include/asm-i386/system.h
... | ... | @@ -310,15 +310,6 @@ |
310 | 310 | extern int es7000_plat; |
311 | 311 | void cpu_idle_wait(void); |
312 | 312 | |
313 | -/* | |
314 | - * On SMP systems, when the scheduler does migration-cost autodetection, | |
315 | - * it needs a way to flush as much of the CPU's caches as possible: | |
316 | - */ | |
317 | -static inline void sched_cacheflush(void) | |
318 | -{ | |
319 | - wbinvd(); | |
320 | -} | |
321 | - | |
322 | 313 | extern unsigned long arch_align_stack(unsigned long sp); |
323 | 314 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); |
324 | 315 |
include/asm-ia64/system.h
include/asm-m32r/system.h
... | ... | @@ -54,16 +54,6 @@ |
54 | 54 | ); \ |
55 | 55 | } while(0) |
56 | 56 | |
57 | -/* | |
58 | - * On SMP systems, when the scheduler does migration-cost autodetection, | |
59 | - * it needs a way to flush as much of the CPU's caches as possible. | |
60 | - * | |
61 | - * TODO: fill this in! | |
62 | - */ | |
63 | -static inline void sched_cacheflush(void) | |
64 | -{ | |
65 | -} | |
66 | - | |
67 | 57 | /* Interrupt Control */ |
68 | 58 | #if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) |
69 | 59 | #define local_irq_enable() \ |
include/asm-mips/system.h
... | ... | @@ -71,16 +71,6 @@ |
71 | 71 | write_c0_userlocal(task_thread_info(current)->tp_value);\ |
72 | 72 | } while(0) |
73 | 73 | |
74 | -/* | |
75 | - * On SMP systems, when the scheduler does migration-cost autodetection, | |
76 | - * it needs a way to flush as much of the CPU's caches as possible. | |
77 | - * | |
78 | - * TODO: fill this in! | |
79 | - */ | |
80 | -static inline void sched_cacheflush(void) | |
81 | -{ | |
82 | -} | |
83 | - | |
84 | 74 | static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) |
85 | 75 | { |
86 | 76 | __u32 retval; |
include/asm-parisc/system.h
... | ... | @@ -48,17 +48,6 @@ |
48 | 48 | (last) = _switch_to(prev, next); \ |
49 | 49 | } while(0) |
50 | 50 | |
51 | -/* | |
52 | - * On SMP systems, when the scheduler does migration-cost autodetection, | |
53 | - * it needs a way to flush as much of the CPU's caches as possible. | |
54 | - * | |
55 | - * TODO: fill this in! | |
56 | - */ | |
57 | -static inline void sched_cacheflush(void) | |
58 | -{ | |
59 | -} | |
60 | - | |
61 | - | |
62 | 51 | /* interrupt control */ |
63 | 52 | #define local_save_flags(x) __asm__ __volatile__("ssm 0, %0" : "=r" (x) : : "memory") |
64 | 53 | #define local_irq_disable() __asm__ __volatile__("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory" ) |
include/asm-powerpc/system.h
... | ... | @@ -184,16 +184,6 @@ |
184 | 184 | extern struct task_struct *_switch(struct thread_struct *prev, |
185 | 185 | struct thread_struct *next); |
186 | 186 | |
187 | -/* | |
188 | - * On SMP systems, when the scheduler does migration-cost autodetection, | |
189 | - * it needs a way to flush as much of the CPU's caches as possible. | |
190 | - * | |
191 | - * TODO: fill this in! | |
192 | - */ | |
193 | -static inline void sched_cacheflush(void) | |
194 | -{ | |
195 | -} | |
196 | - | |
197 | 187 | extern unsigned int rtas_data; |
198 | 188 | extern int mem_init_done; /* set on boot once kmalloc can be called */ |
199 | 189 | extern unsigned long memory_limit; |
include/asm-ppc/system.h
... | ... | @@ -129,16 +129,6 @@ |
129 | 129 | struct task_struct *); |
130 | 130 | #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) |
131 | 131 | |
132 | -/* | |
133 | - * On SMP systems, when the scheduler does migration-cost autodetection, | |
134 | - * it needs a way to flush as much of the CPU's caches as possible. | |
135 | - * | |
136 | - * TODO: fill this in! | |
137 | - */ | |
138 | -static inline void sched_cacheflush(void) | |
139 | -{ | |
140 | -} | |
141 | - | |
142 | 132 | struct thread_struct; |
143 | 133 | extern struct task_struct *_switch(struct thread_struct *prev, |
144 | 134 | struct thread_struct *next); |
include/asm-s390/system.h
... | ... | @@ -97,16 +97,6 @@ |
97 | 97 | prev = __switch_to(prev,next); \ |
98 | 98 | } while (0) |
99 | 99 | |
100 | -/* | |
101 | - * On SMP systems, when the scheduler does migration-cost autodetection, | |
102 | - * it needs a way to flush as much of the CPU's caches as possible. | |
103 | - * | |
104 | - * TODO: fill this in! | |
105 | - */ | |
106 | -static inline void sched_cacheflush(void) | |
107 | -{ | |
108 | -} | |
109 | - | |
110 | 100 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
111 | 101 | extern void account_vtime(struct task_struct *); |
112 | 102 | extern void account_tick_vtime(struct task_struct *); |
include/asm-sh/system.h
... | ... | @@ -64,16 +64,6 @@ |
64 | 64 | last = __last; \ |
65 | 65 | } while (0) |
66 | 66 | |
67 | -/* | |
68 | - * On SMP systems, when the scheduler does migration-cost autodetection, | |
69 | - * it needs a way to flush as much of the CPU's caches as possible. | |
70 | - * | |
71 | - * TODO: fill this in! | |
72 | - */ | |
73 | -static inline void sched_cacheflush(void) | |
74 | -{ | |
75 | -} | |
76 | - | |
77 | 67 | #ifdef CONFIG_CPU_SH4A |
78 | 68 | #define __icbi() \ |
79 | 69 | { \ |
include/asm-sparc/system.h
... | ... | @@ -165,16 +165,6 @@ |
165 | 165 | } while(0) |
166 | 166 | |
167 | 167 | /* |
168 | - * On SMP systems, when the scheduler does migration-cost autodetection, | |
169 | - * it needs a way to flush as much of the CPU's caches as possible. | |
170 | - * | |
171 | - * TODO: fill this in! | |
172 | - */ | |
173 | -static inline void sched_cacheflush(void) | |
174 | -{ | |
175 | -} | |
176 | - | |
177 | -/* | |
178 | 168 | * Changing the IRQ level on the Sparc. |
179 | 169 | */ |
180 | 170 | extern void local_irq_restore(unsigned long); |
include/asm-sparc64/system.h
... | ... | @@ -204,16 +204,6 @@ |
204 | 204 | } \ |
205 | 205 | } while(0) |
206 | 206 | |
207 | -/* | |
208 | - * On SMP systems, when the scheduler does migration-cost autodetection, | |
209 | - * it needs a way to flush as much of the CPU's caches as possible. | |
210 | - * | |
211 | - * TODO: fill this in! | |
212 | - */ | |
213 | -static inline void sched_cacheflush(void) | |
214 | -{ | |
215 | -} | |
216 | - | |
217 | 207 | static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) |
218 | 208 | { |
219 | 209 | unsigned long tmp1, tmp2; |
include/asm-x86_64/system.h
... | ... | @@ -111,15 +111,6 @@ |
111 | 111 | #define wbinvd() \ |
112 | 112 | __asm__ __volatile__ ("wbinvd": : :"memory"); |
113 | 113 | |
114 | -/* | |
115 | - * On SMP systems, when the scheduler does migration-cost autodetection, | |
116 | - * it needs a way to flush as much of the CPU's caches as possible. | |
117 | - */ | |
118 | -static inline void sched_cacheflush(void) | |
119 | -{ | |
120 | - wbinvd(); | |
121 | -} | |
122 | - | |
123 | 114 | #endif /* __KERNEL__ */ |
124 | 115 | |
125 | 116 | #define nop() __asm__ __volatile__ ("nop") |