Commit 6619a8fb594486363783cc4a8372e4d4ee4b913e

Authored by H. Peter Anvin
Committed by Thomas Gleixner
1 parent 9689ba8ad0

x86: Create clflush() inline, remove hardcoded wbinvd

Create an inline function for clflush(), with the proper arguments,
and use it instead of hard-coding the instruction.

This also removes one instance of hard-coded wbinvd, based on a patch
by Bauder de Oliveira Costa.

[ tglx: arch/x86 adaptation ]

Cc: Andi Kleen <andi@firstfloor.org>
Cc: Glauber de Oliveira Costa <gcosta@redhat.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

Showing 6 changed files with 20 additions and 10 deletions Side-by-side Diff

arch/x86/kernel/tce_64.c
... ... @@ -40,9 +40,9 @@
40 40 {
41 41 /* a single tce can't cross a cache line */
42 42 if (cpu_has_clflush)
43   - asm volatile("clflush (%0)" :: "r" (tceaddr));
  43 + clflush(tceaddr);
44 44 else
45   - asm volatile("wbinvd":::"memory");
  45 + wbinvd();
46 46 }
47 47  
48 48 void tce_build(struct iommu_table *tbl, unsigned long index,
arch/x86/mm/pageattr_32.c
... ... @@ -70,10 +70,10 @@
70 70  
71 71 static void cache_flush_page(struct page *p)
72 72 {
73   - unsigned long adr = (unsigned long)page_address(p);
  73 + void *adr = page_address(p);
74 74 int i;
75 75 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
76   - asm volatile("clflush (%0)" :: "r" (adr + i));
  76 + clflush(adr+i);
77 77 }
78 78  
79 79 static void flush_kernel_map(void *arg)
arch/x86/mm/pageattr_64.c
... ... @@ -65,7 +65,7 @@
65 65 {
66 66 int i;
67 67 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
68   - asm volatile("clflush (%0)" :: "r" (adr + i));
  68 + clflush(adr+i);
69 69 }
70 70  
71 71 static void flush_kernel_map(void *arg)
drivers/char/agp/efficeon-agp.c
... ... @@ -221,7 +221,7 @@
221 221 SetPageReserved(virt_to_page((char *)page));
222 222  
223 223 for (offset = 0; offset < PAGE_SIZE; offset += clflush_chunk)
224   - asm volatile("clflush %0" : : "m" (*(char *)(page+offset)));
  224 + clflush((char *)page+offset);
225 225  
226 226 efficeon_private.l1_table[index] = page;
227 227  
228 228  
... ... @@ -268,15 +268,16 @@
268 268 *page = insert;
269 269  
270 270 /* clflush is slow, so don't clflush until we have to */
271   - if ( last_page &&
272   - ((unsigned long)page^(unsigned long)last_page) & clflush_mask )
273   - asm volatile("clflush %0" : : "m" (*last_page));
  271 + if (last_page &&
  272 + (((unsigned long)page^(unsigned long)last_page) &
  273 + clflush_mask))
  274 + clflush(last_page);
274 275  
275 276 last_page = page;
276 277 }
277 278  
278 279 if ( last_page )
279   - asm volatile("clflush %0" : : "m" (*last_page));
  280 + clflush(last_page);
280 281  
281 282 agp_bridge->driver->tlb_flush(mem);
282 283 return 0;
include/asm-x86/system_32.h
... ... @@ -161,6 +161,10 @@
161 161 asm volatile("wbinvd": : :"memory");
162 162 }
163 163  
  164 +static inline void clflush(volatile void *__p)
  165 +{
  166 + asm volatile("clflush %0" : "+m" (*(char __force *)__p));
  167 +}
164 168  
165 169 #ifdef CONFIG_PARAVIRT
166 170 #include <asm/paravirt.h>
include/asm-x86/system_64.h
... ... @@ -137,6 +137,11 @@
137 137  
138 138 #endif /* __KERNEL__ */
139 139  
  140 +static inline void clflush(volatile void *__p)
  141 +{
  142 + asm volatile("clflush %0" : "+m" (*(char __force *)__p));
  143 +}
  144 +
140 145 #define nop() __asm__ __volatile__ ("nop")
141 146  
142 147 #ifdef CONFIG_SMP