Commit 7856dfeb23c16ef3d8dac8871b4d5b93c70b59b9
Committed by
Linus Torvalds
1 parent
c4d1fcf3a2
Exists in
master
and in
20 other branches
[PATCH] x86_64: Fixed guard page handling again in iounmap
Caused oopses again. Also fix potential mismatch in checking if change_page_attr was needed. To do it without races I needed to change mm/vmalloc.c to export a __remove_vm_area that does not take vmlist lock. Noticed by Terence Ripperda and based on a patch of his. Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 3 changed files with 29 additions and 34 deletions Side-by-side Diff
arch/x86_64/mm/ioremap.c
... | ... | @@ -133,7 +133,7 @@ |
133 | 133 | unsigned long flags) |
134 | 134 | { |
135 | 135 | int err = 0; |
136 | - if (flags && phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) { | |
136 | + if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) { | |
137 | 137 | unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
138 | 138 | unsigned long vaddr = (unsigned long) __va(phys_addr); |
139 | 139 | |
... | ... | @@ -214,7 +214,7 @@ |
214 | 214 | remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); |
215 | 215 | return NULL; |
216 | 216 | } |
217 | - if (ioremap_change_attr(phys_addr, size, flags) < 0) { | |
217 | + if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) { | |
218 | 218 | area->flags &= 0xffffff; |
219 | 219 | vunmap(addr); |
220 | 220 | return NULL; |
... | ... | @@ -251,7 +251,7 @@ |
251 | 251 | |
252 | 252 | void iounmap(volatile void __iomem *addr) |
253 | 253 | { |
254 | - struct vm_struct *p, **pprev; | |
254 | + struct vm_struct *p; | |
255 | 255 | |
256 | 256 | if (addr <= high_memory) |
257 | 257 | return; |
... | ... | @@ -260,24 +260,11 @@ |
260 | 260 | return; |
261 | 261 | |
262 | 262 | write_lock(&vmlist_lock); |
263 | - for (p = vmlist, pprev = &vmlist; p != NULL; pprev = &p->next, p = *pprev) | |
264 | - if (p->addr == (void *)(PAGE_MASK & (unsigned long)addr)) | |
265 | - break; | |
266 | - if (!p) { | |
267 | - printk("__iounmap: bad address %p\n", addr); | |
268 | - goto out_unlock; | |
269 | - } | |
270 | - *pprev = p->next; | |
271 | - unmap_vm_area(p); | |
272 | - if ((p->flags >> 20) && | |
273 | - p->phys_addr + p->size - 1 < virt_to_phys(high_memory)) { | |
274 | - /* p->size includes the guard page, but cpa doesn't like that */ | |
275 | - change_page_attr_addr((unsigned long)__va(p->phys_addr), | |
276 | - p->size >> PAGE_SHIFT, | |
277 | - PAGE_KERNEL); | |
278 | - global_flush_tlb(); | |
279 | - } | |
280 | -out_unlock: | |
263 | + p = __remove_vm_area((void *)((unsigned long)addr & PAGE_MASK)); | |
264 | + if (!p) | |
265 | + printk("iounmap: bad address %p\n", addr); | |
266 | + else if (p->flags >> 20) | |
267 | + ioremap_change_attr(p->phys_addr, p->size, 0); | |
281 | 268 | write_unlock(&vmlist_lock); |
282 | 269 | kfree(p); |
283 | 270 | } |
include/linux/vmalloc.h
... | ... | @@ -41,6 +41,7 @@ |
41 | 41 | extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, |
42 | 42 | unsigned long start, unsigned long end); |
43 | 43 | extern struct vm_struct *remove_vm_area(void *addr); |
44 | +extern struct vm_struct *__remove_vm_area(void *addr); | |
44 | 45 | extern int map_vm_area(struct vm_struct *area, pgprot_t prot, |
45 | 46 | struct page ***pages); |
46 | 47 | extern void unmap_vm_area(struct vm_struct *area); |
mm/vmalloc.c
... | ... | @@ -248,37 +248,44 @@ |
248 | 248 | return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); |
249 | 249 | } |
250 | 250 | |
251 | -/** | |
252 | - * remove_vm_area - find and remove a contingous kernel virtual area | |
253 | - * | |
254 | - * @addr: base address | |
255 | - * | |
256 | - * Search for the kernel VM area starting at @addr, and remove it. | |
257 | - * This function returns the found VM area, but using it is NOT safe | |
258 | - * on SMP machines. | |
259 | - */ | |
260 | -struct vm_struct *remove_vm_area(void *addr) | |
251 | +/* Caller must hold vmlist_lock */ | |
252 | +struct vm_struct *__remove_vm_area(void *addr) | |
261 | 253 | { |
262 | 254 | struct vm_struct **p, *tmp; |
263 | 255 | |
264 | - write_lock(&vmlist_lock); | |
265 | 256 | for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) { |
266 | 257 | if (tmp->addr == addr) |
267 | 258 | goto found; |
268 | 259 | } |
269 | - write_unlock(&vmlist_lock); | |
270 | 260 | return NULL; |
271 | 261 | |
272 | 262 | found: |
273 | 263 | unmap_vm_area(tmp); |
274 | 264 | *p = tmp->next; |
275 | - write_unlock(&vmlist_lock); | |
276 | 265 | |
277 | 266 | /* |
278 | 267 | * Remove the guard page. |
279 | 268 | */ |
280 | 269 | tmp->size -= PAGE_SIZE; |
281 | 270 | return tmp; |
271 | +} | |
272 | + | |
273 | +/** | |
274 | + * remove_vm_area - find and remove a contingous kernel virtual area | |
275 | + * | |
276 | + * @addr: base address | |
277 | + * | |
278 | + * Search for the kernel VM area starting at @addr, and remove it. | |
279 | + * This function returns the found VM area, but using it is NOT safe | |
280 | + * on SMP machines, except for its size or flags. | |
281 | + */ | |
282 | +struct vm_struct *remove_vm_area(void *addr) | |
283 | +{ | |
284 | + struct vm_struct *v; | |
285 | + write_lock(&vmlist_lock); | |
286 | + v = __remove_vm_area(addr); | |
287 | + write_unlock(&vmlist_lock); | |
288 | + return v; | |
282 | 289 | } |
283 | 290 | |
284 | 291 | void __vunmap(void *addr, int deallocate_pages) |