Commit ec8c0446b6e2b67b5c8813eb517f4bf00efa99a9
Committed by
Linus Torvalds
1 parent
bcd022801e
Exists in
master
and in
39 other branches
[PATCH] Optimize D-cache alias handling on fork
Virtually index, physically tagged cache architectures can get away without cache flushing when forking. This patch adds a new cache flushing function flush_cache_dup_mm(struct mm_struct *) which for the moment I've implemented to do the same thing on all architectures except on MIPS where it's a no-op. Signed-off-by: Ralf Baechle <ralf@linux-mips.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 27 changed files with 54 additions and 7 deletions Side-by-side Diff
- Documentation/cachetlb.txt
- include/asm-alpha/cacheflush.h
- include/asm-arm/cacheflush.h
- include/asm-arm26/cacheflush.h
- include/asm-avr32/cacheflush.h
- include/asm-cris/cacheflush.h
- include/asm-frv/cacheflush.h
- include/asm-h8300/cacheflush.h
- include/asm-i386/cacheflush.h
- include/asm-ia64/cacheflush.h
- include/asm-m32r/cacheflush.h
- include/asm-m68k/cacheflush.h
- include/asm-m68knommu/cacheflush.h
- include/asm-mips/cacheflush.h
- include/asm-parisc/cacheflush.h
- include/asm-powerpc/cacheflush.h
- include/asm-s390/cacheflush.h
- include/asm-sh/cpu-sh2/cacheflush.h
- include/asm-sh/cpu-sh3/cacheflush.h
- include/asm-sh/cpu-sh4/cacheflush.h
- include/asm-sh64/cacheflush.h
- include/asm-sparc/cacheflush.h
- include/asm-sparc64/cacheflush.h
- include/asm-v850/cacheflush.h
- include/asm-x86_64/cacheflush.h
- include/asm-xtensa/cacheflush.h
- kernel/fork.c
Documentation/cachetlb.txt
... | ... | @@ -179,10 +179,21 @@ |
179 | 179 | lines associated with 'mm'. |
180 | 180 | |
181 | 181 | This interface is used to handle whole address space |
182 | - page table operations such as what happens during | |
183 | - fork, exit, and exec. | |
182 | + page table operations such as what happens during exit and exec. | |
184 | 183 | |
185 | -2) void flush_cache_range(struct vm_area_struct *vma, | |
184 | +2) void flush_cache_dup_mm(struct mm_struct *mm) | |
185 | + | |
186 | + This interface flushes an entire user address space from | |
187 | + the caches. That is, after running, there will be no cache | |
188 | + lines associated with 'mm'. | |
189 | + | |
190 | + This interface is used to handle whole address space | |
191 | + page table operations such as what happens during fork. | |
192 | + | |
193 | + This option is separate from flush_cache_mm to allow some | |
194 | + optimizations for VIPT caches. | |
195 | + | |
196 | +3) void flush_cache_range(struct vm_area_struct *vma, | |
186 | 197 | unsigned long start, unsigned long end) |
187 | 198 | |
188 | 199 | Here we are flushing a specific range of (user) virtual |
... | ... | @@ -199,7 +210,7 @@ |
199 | 210 | call flush_cache_page (see below) for each entry which may be |
200 | 211 | modified. |
201 | 212 | |
202 | -3) void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) | |
213 | +4) void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) | |
203 | 214 | |
204 | 215 | This time we need to remove a PAGE_SIZE sized range |
205 | 216 | from the cache. The 'vma' is the backing structure used by |
... | ... | @@ -220,7 +231,7 @@ |
220 | 231 | |
221 | 232 | This is used primarily during fault processing. |
222 | 233 | |
223 | -4) void flush_cache_kmaps(void) | |
234 | +5) void flush_cache_kmaps(void) | |
224 | 235 | |
225 | 236 | This routine need only be implemented if the platform utilizes |
226 | 237 | highmem. It will be called right before all of the kmaps |
... | ... | @@ -232,7 +243,7 @@ |
232 | 243 | |
233 | 244 | This routing should be implemented in asm/highmem.h |
234 | 245 | |
235 | -5) void flush_cache_vmap(unsigned long start, unsigned long end) | |
246 | +6) void flush_cache_vmap(unsigned long start, unsigned long end) | |
236 | 247 | void flush_cache_vunmap(unsigned long start, unsigned long end) |
237 | 248 | |
238 | 249 | Here in these two interfaces we are flushing a specific range |
include/asm-alpha/cacheflush.h
... | ... | @@ -6,6 +6,7 @@ |
6 | 6 | /* Caches aren't brain-dead on the Alpha. */ |
7 | 7 | #define flush_cache_all() do { } while (0) |
8 | 8 | #define flush_cache_mm(mm) do { } while (0) |
9 | +#define flush_cache_dup_mm(mm) do { } while (0) | |
9 | 10 | #define flush_cache_range(vma, start, end) do { } while (0) |
10 | 11 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
11 | 12 | #define flush_dcache_page(page) do { } while (0) |
include/asm-arm/cacheflush.h
... | ... | @@ -319,6 +319,8 @@ |
319 | 319 | unsigned long len, int write); |
320 | 320 | #endif |
321 | 321 | |
322 | +#define flush_cache_dup_mm(mm) flush_cache_mm(mm) | |
323 | + | |
322 | 324 | /* |
323 | 325 | * flush_cache_user_range is used when we want to ensure that the |
324 | 326 | * Harvard caches are synchronised for the user space address range. |
include/asm-arm26/cacheflush.h
... | ... | @@ -22,6 +22,7 @@ |
22 | 22 | |
23 | 23 | #define flush_cache_all() do { } while (0) |
24 | 24 | #define flush_cache_mm(mm) do { } while (0) |
25 | +#define flush_cache_dup_mm(mm) do { } while (0) | |
25 | 26 | #define flush_cache_range(vma,start,end) do { } while (0) |
26 | 27 | #define flush_cache_page(vma,vmaddr,pfn) do { } while (0) |
27 | 28 | #define flush_cache_vmap(start, end) do { } while (0) |
include/asm-avr32/cacheflush.h
... | ... | @@ -87,6 +87,7 @@ |
87 | 87 | */ |
88 | 88 | #define flush_cache_all() do { } while (0) |
89 | 89 | #define flush_cache_mm(mm) do { } while (0) |
90 | +#define flush_cache_dup_mm(mm) do { } while (0) | |
90 | 91 | #define flush_cache_range(vma, start, end) do { } while (0) |
91 | 92 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
92 | 93 | #define flush_cache_vmap(start, end) do { } while (0) |
include/asm-cris/cacheflush.h
... | ... | @@ -9,6 +9,7 @@ |
9 | 9 | */ |
10 | 10 | #define flush_cache_all() do { } while (0) |
11 | 11 | #define flush_cache_mm(mm) do { } while (0) |
12 | +#define flush_cache_dup_mm(mm) do { } while (0) | |
12 | 13 | #define flush_cache_range(vma, start, end) do { } while (0) |
13 | 14 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
14 | 15 | #define flush_dcache_page(page) do { } while (0) |
include/asm-frv/cacheflush.h
... | ... | @@ -20,6 +20,7 @@ |
20 | 20 | */ |
21 | 21 | #define flush_cache_all() do {} while(0) |
22 | 22 | #define flush_cache_mm(mm) do {} while(0) |
23 | +#define flush_cache_dup_mm(mm) do {} while(0) | |
23 | 24 | #define flush_cache_range(mm, start, end) do {} while(0) |
24 | 25 | #define flush_cache_page(vma, vmaddr, pfn) do {} while(0) |
25 | 26 | #define flush_cache_vmap(start, end) do {} while(0) |
include/asm-h8300/cacheflush.h
include/asm-i386/cacheflush.h
... | ... | @@ -7,6 +7,7 @@ |
7 | 7 | /* Caches aren't brain-dead on the intel. */ |
8 | 8 | #define flush_cache_all() do { } while (0) |
9 | 9 | #define flush_cache_mm(mm) do { } while (0) |
10 | +#define flush_cache_dup_mm(mm) do { } while (0) | |
10 | 11 | #define flush_cache_range(vma, start, end) do { } while (0) |
11 | 12 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
12 | 13 | #define flush_dcache_page(page) do { } while (0) |
include/asm-ia64/cacheflush.h
... | ... | @@ -18,6 +18,7 @@ |
18 | 18 | |
19 | 19 | #define flush_cache_all() do { } while (0) |
20 | 20 | #define flush_cache_mm(mm) do { } while (0) |
21 | +#define flush_cache_dup_mm(mm) do { } while (0) | |
21 | 22 | #define flush_cache_range(vma, start, end) do { } while (0) |
22 | 23 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
23 | 24 | #define flush_icache_page(vma,page) do { } while (0) |
include/asm-m32r/cacheflush.h
... | ... | @@ -9,6 +9,7 @@ |
9 | 9 | #if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104) |
10 | 10 | #define flush_cache_all() do { } while (0) |
11 | 11 | #define flush_cache_mm(mm) do { } while (0) |
12 | +#define flush_cache_dup_mm(mm) do { } while (0) | |
12 | 13 | #define flush_cache_range(vma, start, end) do { } while (0) |
13 | 14 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
14 | 15 | #define flush_dcache_page(page) do { } while (0) |
... | ... | @@ -29,6 +30,7 @@ |
29 | 30 | #elif defined(CONFIG_CHIP_M32102) |
30 | 31 | #define flush_cache_all() do { } while (0) |
31 | 32 | #define flush_cache_mm(mm) do { } while (0) |
33 | +#define flush_cache_dup_mm(mm) do { } while (0) | |
32 | 34 | #define flush_cache_range(vma, start, end) do { } while (0) |
33 | 35 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
34 | 36 | #define flush_dcache_page(page) do { } while (0) |
... | ... | @@ -41,6 +43,7 @@ |
41 | 43 | #else |
42 | 44 | #define flush_cache_all() do { } while (0) |
43 | 45 | #define flush_cache_mm(mm) do { } while (0) |
46 | +#define flush_cache_dup_mm(mm) do { } while (0) | |
44 | 47 | #define flush_cache_range(vma, start, end) do { } while (0) |
45 | 48 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
46 | 49 | #define flush_dcache_page(page) do { } while (0) |
include/asm-m68k/cacheflush.h
... | ... | @@ -89,6 +89,8 @@ |
89 | 89 | __flush_cache_030(); |
90 | 90 | } |
91 | 91 | |
92 | +#define flush_cache_dup_mm(mm) flush_cache_mm(mm) | |
93 | + | |
92 | 94 | /* flush_cache_range/flush_cache_page must be macros to avoid |
93 | 95 | a dependency on linux/mm.h, which includes this file... */ |
94 | 96 | static inline void flush_cache_range(struct vm_area_struct *vma, |
include/asm-m68knommu/cacheflush.h
... | ... | @@ -8,6 +8,7 @@ |
8 | 8 | |
9 | 9 | #define flush_cache_all() __flush_cache_all() |
10 | 10 | #define flush_cache_mm(mm) do { } while (0) |
11 | +#define flush_cache_dup_mm(mm) do { } while (0) | |
11 | 12 | #define flush_cache_range(vma, start, end) __flush_cache_all() |
12 | 13 | #define flush_cache_page(vma, vmaddr) do { } while (0) |
13 | 14 | #define flush_dcache_range(start,len) __flush_cache_all() |
include/asm-mips/cacheflush.h
... | ... | @@ -17,6 +17,7 @@ |
17 | 17 | * |
18 | 18 | * - flush_cache_all() flushes entire cache |
19 | 19 | * - flush_cache_mm(mm) flushes the specified mm context's cache lines |
20 | + * - flush_cache_dup mm(mm) handles cache flushing when forking | |
20 | 21 | * - flush_cache_page(mm, vmaddr, pfn) flushes a single page |
21 | 22 | * - flush_cache_range(vma, start, end) flushes a range of pages |
22 | 23 | * - flush_icache_range(start, end) flush a range of instructions |
... | ... | @@ -31,6 +32,7 @@ |
31 | 32 | extern void (*flush_cache_all)(void); |
32 | 33 | extern void (*__flush_cache_all)(void); |
33 | 34 | extern void (*flush_cache_mm)(struct mm_struct *mm); |
35 | +#define flush_cache_dup_mm(mm) do { (void) (mm); } while (0) | |
34 | 36 | extern void (*flush_cache_range)(struct vm_area_struct *vma, |
35 | 37 | unsigned long start, unsigned long end); |
36 | 38 | extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn); |
include/asm-parisc/cacheflush.h
include/asm-powerpc/cacheflush.h
... | ... | @@ -18,6 +18,7 @@ |
18 | 18 | */ |
19 | 19 | #define flush_cache_all() do { } while (0) |
20 | 20 | #define flush_cache_mm(mm) do { } while (0) |
21 | +#define flush_cache_dup_mm(mm) do { } while (0) | |
21 | 22 | #define flush_cache_range(vma, start, end) do { } while (0) |
22 | 23 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
23 | 24 | #define flush_icache_page(vma, page) do { } while (0) |
include/asm-s390/cacheflush.h
... | ... | @@ -7,6 +7,7 @@ |
7 | 7 | /* Caches aren't brain-dead on the s390. */ |
8 | 8 | #define flush_cache_all() do { } while (0) |
9 | 9 | #define flush_cache_mm(mm) do { } while (0) |
10 | +#define flush_cache_dup_mm(mm) do { } while (0) | |
10 | 11 | #define flush_cache_range(vma, start, end) do { } while (0) |
11 | 12 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
12 | 13 | #define flush_dcache_page(page) do { } while (0) |
include/asm-sh/cpu-sh2/cacheflush.h
... | ... | @@ -15,6 +15,7 @@ |
15 | 15 | * |
16 | 16 | * - flush_cache_all() flushes entire cache |
17 | 17 | * - flush_cache_mm(mm) flushes the specified mm context's cache lines |
18 | + * - flush_cache_dup mm(mm) handles cache flushing when forking | |
18 | 19 | * - flush_cache_page(mm, vmaddr, pfn) flushes a single page |
19 | 20 | * - flush_cache_range(vma, start, end) flushes a range of pages |
20 | 21 | * |
... | ... | @@ -27,6 +28,7 @@ |
27 | 28 | */ |
28 | 29 | #define flush_cache_all() do { } while (0) |
29 | 30 | #define flush_cache_mm(mm) do { } while (0) |
31 | +#define flush_cache_dup_mm(mm) do { } while (0) | |
30 | 32 | #define flush_cache_range(vma, start, end) do { } while (0) |
31 | 33 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
32 | 34 | #define flush_dcache_page(page) do { } while (0) |
include/asm-sh/cpu-sh3/cacheflush.h
... | ... | @@ -15,6 +15,7 @@ |
15 | 15 | * |
16 | 16 | * - flush_cache_all() flushes entire cache |
17 | 17 | * - flush_cache_mm(mm) flushes the specified mm context's cache lines |
18 | + * - flush_cache_dup mm(mm) handles cache flushing when forking | |
18 | 19 | * - flush_cache_page(mm, vmaddr, pfn) flushes a single page |
19 | 20 | * - flush_cache_range(vma, start, end) flushes a range of pages |
20 | 21 | * |
... | ... | @@ -39,6 +40,7 @@ |
39 | 40 | |
40 | 41 | void flush_cache_all(void); |
41 | 42 | void flush_cache_mm(struct mm_struct *mm); |
43 | +#define flush_cache_dup_mm(mm) flush_cache_mm(mm) | |
42 | 44 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, |
43 | 45 | unsigned long end); |
44 | 46 | void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); |
... | ... | @@ -48,6 +50,7 @@ |
48 | 50 | #else |
49 | 51 | #define flush_cache_all() do { } while (0) |
50 | 52 | #define flush_cache_mm(mm) do { } while (0) |
53 | +#define flush_cache_dup_mm(mm) do { } while (0) | |
51 | 54 | #define flush_cache_range(vma, start, end) do { } while (0) |
52 | 55 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
53 | 56 | #define flush_dcache_page(page) do { } while (0) |
include/asm-sh/cpu-sh4/cacheflush.h
... | ... | @@ -18,6 +18,7 @@ |
18 | 18 | */ |
19 | 19 | void flush_cache_all(void); |
20 | 20 | void flush_cache_mm(struct mm_struct *mm); |
21 | +#define flush_cache_dup_mm(mm) flush_cache_mm(mm) | |
21 | 22 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, |
22 | 23 | unsigned long end); |
23 | 24 | void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, |
include/asm-sh64/cacheflush.h
include/asm-sparc/cacheflush.h
... | ... | @@ -48,6 +48,7 @@ |
48 | 48 | |
49 | 49 | #define flush_cache_all() BTFIXUP_CALL(flush_cache_all)() |
50 | 50 | #define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm) |
51 | +#define flush_cache_dup_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm) | |
51 | 52 | #define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end) |
52 | 53 | #define flush_cache_page(vma,addr,pfn) BTFIXUP_CALL(flush_cache_page)(vma,addr) |
53 | 54 | #define flush_icache_range(start, end) do { } while (0) |
include/asm-sparc64/cacheflush.h
... | ... | @@ -12,6 +12,7 @@ |
12 | 12 | /* These are the same regardless of whether this is an SMP kernel or not. */ |
13 | 13 | #define flush_cache_mm(__mm) \ |
14 | 14 | do { if ((__mm) == current->mm) flushw_user(); } while(0) |
15 | +#define flush_cache_dup_mm(mm) flush_cache_mm(mm) | |
15 | 16 | #define flush_cache_range(vma, start, end) \ |
16 | 17 | flush_cache_mm((vma)->vm_mm) |
17 | 18 | #define flush_cache_page(vma, page, pfn) \ |
include/asm-v850/cacheflush.h
... | ... | @@ -24,6 +24,7 @@ |
24 | 24 | systems with MMUs, so we don't need them. */ |
25 | 25 | #define flush_cache_all() ((void)0) |
26 | 26 | #define flush_cache_mm(mm) ((void)0) |
27 | +#define flush_cache_dup_mm(mm) ((void)0) | |
27 | 28 | #define flush_cache_range(vma, start, end) ((void)0) |
28 | 29 | #define flush_cache_page(vma, vmaddr, pfn) ((void)0) |
29 | 30 | #define flush_dcache_page(page) ((void)0) |
include/asm-x86_64/cacheflush.h
... | ... | @@ -7,6 +7,7 @@ |
7 | 7 | /* Caches aren't brain-dead on the intel. */ |
8 | 8 | #define flush_cache_all() do { } while (0) |
9 | 9 | #define flush_cache_mm(mm) do { } while (0) |
10 | +#define flush_cache_dup_mm(mm) do { } while (0) | |
10 | 11 | #define flush_cache_range(vma, start, end) do { } while (0) |
11 | 12 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
12 | 13 | #define flush_dcache_page(page) do { } while (0) |
include/asm-xtensa/cacheflush.h
... | ... | @@ -75,6 +75,7 @@ |
75 | 75 | |
76 | 76 | #define flush_cache_all() __flush_invalidate_cache_all(); |
77 | 77 | #define flush_cache_mm(mm) __flush_invalidate_cache_all(); |
78 | +#define flush_cache_dup_mm(mm) __flush_invalidate_cache_all(); | |
78 | 79 | |
79 | 80 | #define flush_cache_vmap(start,end) __flush_invalidate_cache_all(); |
80 | 81 | #define flush_cache_vunmap(start,end) __flush_invalidate_cache_all(); |
... | ... | @@ -88,6 +89,7 @@ |
88 | 89 | |
89 | 90 | #define flush_cache_all() do { } while (0) |
90 | 91 | #define flush_cache_mm(mm) do { } while (0) |
92 | +#define flush_cache_dup_mm(mm) do { } while (0) | |
91 | 93 | |
92 | 94 | #define flush_cache_vmap(start,end) do { } while (0) |
93 | 95 | #define flush_cache_vunmap(start,end) do { } while (0) |