Commit d57d64080ddc0ff13fcffc898b6251074a482ba1
1 parent
af1415314a
Exists in
master
and in
7 other branches
sh: Prevent 64-bit pgprot clobbering across ioremap implementations.
Presently 'flags' gets passed around a lot between the various ioremap helpers and implementations, which is only 32-bits. In the X2TLB case we use 64-bit pgprots which presently results in the upper 32bits being chopped off (which handily include our read/write/exec permissions). As such, we convert everything internally to using pgprot_t directly and simply convert over with pgprot_val() where needed. With this in place, transparent fixmap utilization for early ioremap works as expected. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Showing 6 changed files with 41 additions and 33 deletions Side-by-side Diff
arch/sh/boards/board-sh7785lcr.c
... | ... | @@ -21,6 +21,7 @@ |
21 | 21 | #include <linux/i2c-algo-pca.h> |
22 | 22 | #include <linux/usb/r8a66597.h> |
23 | 23 | #include <linux/irq.h> |
24 | +#include <linux/io.h> | |
24 | 25 | #include <linux/clk.h> |
25 | 26 | #include <linux/errno.h> |
26 | 27 | #include <mach/sh7785lcr.h> |
27 | 28 | |
... | ... | @@ -332,15 +333,14 @@ |
332 | 333 | pm_power_off = sh7785lcr_power_off; |
333 | 334 | |
334 | 335 | /* sm501 DRAM configuration */ |
335 | - sm501_reg = ioremap_fixed(SM107_REG_ADDR, SM501_DRAM_CONTROL, | |
336 | - PAGE_KERNEL); | |
336 | + sm501_reg = ioremap_nocache(SM107_REG_ADDR, SM501_DRAM_CONTROL); | |
337 | 337 | if (!sm501_reg) { |
338 | 338 | printk(KERN_ERR "%s: ioremap error.\n", __func__); |
339 | 339 | return; |
340 | 340 | } |
341 | 341 | |
342 | 342 | writel(0x000307c2, sm501_reg + SM501_DRAM_CONTROL); |
343 | - iounmap_fixed(sm501_reg); | |
343 | + iounmap(sm501_reg); | |
344 | 344 | } |
345 | 345 | |
346 | 346 | /* Return the board specific boot mode pin configuration */ |
arch/sh/boards/mach-landisk/setup.c
... | ... | @@ -63,7 +63,7 @@ |
63 | 63 | /* open I/O area window */ |
64 | 64 | paddrbase = virt_to_phys((void *)PA_AREA5_IO); |
65 | 65 | prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16); |
66 | - cf_ide_base = p3_ioremap(paddrbase, PAGE_SIZE, prot.pgprot); | |
66 | + cf_ide_base = p3_ioremap(paddrbase, PAGE_SIZE, prot); | |
67 | 67 | if (!cf_ide_base) { |
68 | 68 | printk("allocate_cf_area : can't open CF I/O window!\n"); |
69 | 69 | return -ENOMEM; |
arch/sh/boards/mach-lboxre2/setup.c
... | ... | @@ -57,7 +57,7 @@ |
57 | 57 | paddrbase = virt_to_phys((void*)PA_AREA5_IO); |
58 | 58 | psize = PAGE_SIZE; |
59 | 59 | prot = PAGE_KERNEL_PCC( 1 , _PAGE_PCC_IO16); |
60 | - cf0_io_base = (u32)p3_ioremap(paddrbase, psize, prot.pgprot); | |
60 | + cf0_io_base = (u32)p3_ioremap(paddrbase, psize, prot); | |
61 | 61 | if (!cf0_io_base) { |
62 | 62 | printk(KERN_ERR "%s : can't open CF I/O window!\n" , __func__ ); |
63 | 63 | return -ENOMEM; |
arch/sh/boards/mach-sh03/setup.c
... | ... | @@ -82,7 +82,7 @@ |
82 | 82 | /* open I/O area window */ |
83 | 83 | paddrbase = virt_to_phys((void *)PA_AREA5_IO); |
84 | 84 | prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16); |
85 | - cf_ide_base = p3_ioremap(paddrbase, PAGE_SIZE, prot.pgprot); | |
85 | + cf_ide_base = p3_ioremap(paddrbase, PAGE_SIZE, prot); | |
86 | 86 | if (!cf_ide_base) { |
87 | 87 | printk("allocate_cf_area : can't open CF I/O window!\n"); |
88 | 88 | return -ENOMEM; |
arch/sh/include/asm/io.h
... | ... | @@ -235,7 +235,7 @@ |
235 | 235 | */ |
236 | 236 | #ifdef CONFIG_MMU |
237 | 237 | void __iomem *__ioremap_caller(unsigned long offset, unsigned long size, |
238 | - unsigned long flags, void *caller); | |
238 | + pgprot_t prot, void *caller); | |
239 | 239 | void __iounmap(void __iomem *addr); |
240 | 240 | |
241 | 241 | #ifdef CONFIG_IOREMAP_FIXED |
242 | 242 | |
243 | 243 | |
... | ... | @@ -254,13 +254,13 @@ |
254 | 254 | #endif |
255 | 255 | |
256 | 256 | static inline void __iomem * |
257 | -__ioremap(unsigned long offset, unsigned long size, unsigned long flags) | |
257 | +__ioremap(unsigned long offset, unsigned long size, pgprot_t prot) | |
258 | 258 | { |
259 | - return __ioremap_caller(offset, size, flags, __builtin_return_address(0)); | |
259 | + return __ioremap_caller(offset, size, prot, __builtin_return_address(0)); | |
260 | 260 | } |
261 | 261 | |
262 | 262 | static inline void __iomem * |
263 | -__ioremap_29bit(unsigned long offset, unsigned long size, unsigned long flags) | |
263 | +__ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot) | |
264 | 264 | { |
265 | 265 | #ifdef CONFIG_29BIT |
266 | 266 | unsigned long last_addr = offset + size - 1; |
... | ... | @@ -272,7 +272,7 @@ |
272 | 272 | * mapping must be done by the PMB or by using page tables. |
273 | 273 | */ |
274 | 274 | if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) { |
275 | - if (unlikely(flags & _PAGE_CACHABLE)) | |
275 | + if (unlikely(pgprot_val(prot) & _PAGE_CACHABLE)) | |
276 | 276 | return (void __iomem *)P1SEGADDR(offset); |
277 | 277 | |
278 | 278 | return (void __iomem *)P2SEGADDR(offset); |
... | ... | @@ -287,7 +287,7 @@ |
287 | 287 | } |
288 | 288 | |
289 | 289 | static inline void __iomem * |
290 | -__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) | |
290 | +__ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot) | |
291 | 291 | { |
292 | 292 | void __iomem *ret; |
293 | 293 | |
294 | 294 | |
295 | 295 | |
296 | 296 | |
... | ... | @@ -295,30 +295,39 @@ |
295 | 295 | if (ret) |
296 | 296 | return ret; |
297 | 297 | |
298 | - ret = __ioremap_29bit(offset, size, flags); | |
298 | + ret = __ioremap_29bit(offset, size, prot); | |
299 | 299 | if (ret) |
300 | 300 | return ret; |
301 | 301 | |
302 | - return __ioremap(offset, size, flags); | |
302 | + return __ioremap(offset, size, prot); | |
303 | 303 | } |
304 | 304 | #else |
305 | -#define __ioremap(offset, size, flags) ((void __iomem *)(offset)) | |
306 | -#define __ioremap_mode(offset, size, flags) ((void __iomem *)(offset)) | |
305 | +#define __ioremap(offset, size, prot) ((void __iomem *)(offset)) | |
306 | +#define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset)) | |
307 | 307 | #define __iounmap(addr) do { } while (0) |
308 | 308 | #endif /* CONFIG_MMU */ |
309 | 309 | |
310 | -#define ioremap(offset, size) \ | |
311 | - __ioremap_mode((offset), (size), 0) | |
312 | -#define ioremap_nocache(offset, size) \ | |
313 | - __ioremap_mode((offset), (size), 0) | |
314 | -#define ioremap_cache(offset, size) \ | |
315 | - __ioremap_mode((offset), (size), _PAGE_CACHABLE) | |
316 | -#define p3_ioremap(offset, size, flags) \ | |
317 | - __ioremap((offset), (size), (flags)) | |
318 | -#define ioremap_prot(offset, size, flags) \ | |
319 | - __ioremap_mode((offset), (size), (flags)) | |
320 | -#define iounmap(addr) \ | |
321 | - __iounmap((addr)) | |
310 | +static inline void __iomem * | |
311 | +ioremap(unsigned long offset, unsigned long size) | |
312 | +{ | |
313 | + return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE); | |
314 | +} | |
315 | + | |
316 | +static inline void __iomem * | |
317 | +ioremap_cache(unsigned long offset, unsigned long size) | |
318 | +{ | |
319 | + return __ioremap_mode(offset, size, PAGE_KERNEL); | |
320 | +} | |
321 | + | |
322 | +static inline void __iomem * | |
323 | +ioremap_prot(resource_size_t offset, unsigned long size, unsigned long flags) | |
324 | +{ | |
325 | + return __ioremap_mode(offset, size, __pgprot(flags)); | |
326 | +} | |
327 | + | |
328 | +#define ioremap_nocache ioremap | |
329 | +#define p3_ioremap __ioremap | |
330 | +#define iounmap __iounmap | |
322 | 331 | |
323 | 332 | #define maybebadio(port) \ |
324 | 333 | printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \ |
arch/sh/mm/ioremap.c
... | ... | @@ -35,11 +35,10 @@ |
35 | 35 | */ |
36 | 36 | void __iomem * __init_refok |
37 | 37 | __ioremap_caller(unsigned long phys_addr, unsigned long size, |
38 | - unsigned long flags, void *caller) | |
38 | + pgprot_t pgprot, void *caller) | |
39 | 39 | { |
40 | 40 | struct vm_struct *area; |
41 | 41 | unsigned long offset, last_addr, addr, orig_addr; |
42 | - pgprot_t pgprot; | |
43 | 42 | |
44 | 43 | /* Don't allow wraparound or zero size */ |
45 | 44 | last_addr = phys_addr + size - 1; |
... | ... | @@ -69,7 +68,7 @@ |
69 | 68 | * If we can't yet use the regular approach, go the fixmap route. |
70 | 69 | */ |
71 | 70 | if (!mem_init_done) |
72 | - return ioremap_fixed(phys_addr, size, __pgprot(flags)); | |
71 | + return ioremap_fixed(phys_addr, size, pgprot); | |
73 | 72 | |
74 | 73 | /* |
75 | 74 | * Ok, go for it.. |
76 | 75 | |
... | ... | @@ -91,8 +90,9 @@ |
91 | 90 | * PMB entries are all pre-faulted. |
92 | 91 | */ |
93 | 92 | if (unlikely(phys_addr >= P1SEG)) { |
94 | - unsigned long mapped = pmb_remap(addr, phys_addr, size, flags); | |
93 | + unsigned long mapped; | |
95 | 94 | |
95 | + mapped = pmb_remap(addr, phys_addr, size, pgprot_val(pgprot)); | |
96 | 96 | if (likely(mapped)) { |
97 | 97 | addr += mapped; |
98 | 98 | phys_addr += mapped; |
... | ... | @@ -101,7 +101,6 @@ |
101 | 101 | } |
102 | 102 | #endif |
103 | 103 | |
104 | - pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); | |
105 | 104 | if (likely(size)) |
106 | 105 | if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { |
107 | 106 | vunmap((void *)orig_addr); |