Commit 7bdda6209f224aa784a036df54b22cb338d2e859

Authored by Paul Mundt
1 parent 49f3bfe933

sh: Fix up more 64-bit pgprot truncation on SH-X2 TLB.

Both the store queue API and the PMB remapping take unsigned long for
their pgprot flags, which cuts off the extended protection bits. In the
case of the PMB this isn't really a problem since the cache attribute
bits that we care about are all in the lower 32-bits, but we do it just
to be safe. The store queue remapping on the other hand depends on the
extended prot bits for enabling userspace access to the mappings.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>

Showing 6 changed files with 18 additions and 13 deletions Side-by-side Diff

arch/sh/include/asm/mmu.h
... ... @@ -33,6 +33,7 @@
33 33 #ifndef __ASSEMBLY__
34 34 #include <linux/errno.h>
35 35 #include <linux/threads.h>
  36 +#include <asm/page.h>
36 37  
37 38 /* Default "unsigned long" context */
38 39 typedef unsigned long mm_context_id_t[NR_CPUS];
39 40  
... ... @@ -71,13 +72,13 @@
71 72 #ifdef CONFIG_PMB
72 73 /* arch/sh/mm/pmb.c */
73 74 long pmb_remap(unsigned long virt, unsigned long phys,
74   - unsigned long size, unsigned long flags);
  75 + unsigned long size, pgprot_t prot);
75 76 void pmb_unmap(unsigned long addr);
76 77 int pmb_init(void);
77 78 bool __in_29bit_mode(void);
78 79 #else
79 80 static inline long pmb_remap(unsigned long virt, unsigned long phys,
80   - unsigned long size, unsigned long flags)
  81 + unsigned long size, pgprot_t prot)
81 82 {
82 83 return -EINVAL;
83 84 }
arch/sh/include/cpu-sh4/cpu/sq.h
... ... @@ -12,6 +12,7 @@
12 12 #define __ASM_CPU_SH4_SQ_H
13 13  
14 14 #include <asm/addrspace.h>
  15 +#include <asm/page.h>
15 16  
16 17 /*
17 18 * Store queues range from e0000000-e3fffffc, allowing approx. 64MB to be
... ... @@ -28,7 +29,7 @@
28 29  
29 30 /* arch/sh/kernel/cpu/sh4/sq.c */
30 31 unsigned long sq_remap(unsigned long phys, unsigned int size,
31   - const char *name, unsigned long flags);
  32 + const char *name, pgprot_t prot);
32 33 void sq_unmap(unsigned long vaddr);
33 34 void sq_flush_range(unsigned long start, unsigned int len);
34 35  
arch/sh/kernel/cpu/sh4/sq.c
... ... @@ -100,7 +100,7 @@
100 100 spin_unlock_irq(&sq_mapping_lock);
101 101 }
102 102  
103   -static int __sq_remap(struct sq_mapping *map, unsigned long flags)
  103 +static int __sq_remap(struct sq_mapping *map, pgprot_t prot)
104 104 {
105 105 #if defined(CONFIG_MMU)
106 106 struct vm_struct *vma;
... ... @@ -113,7 +113,7 @@
113 113  
114 114 if (ioremap_page_range((unsigned long)vma->addr,
115 115 (unsigned long)vma->addr + map->size,
116   - vma->phys_addr, __pgprot(flags))) {
  116 + vma->phys_addr, prot)) {
117 117 vunmap(vma->addr);
118 118 return -EAGAIN;
119 119 }
120 120  
... ... @@ -135,14 +135,14 @@
135 135 * @phys: Physical address of mapping.
136 136 * @size: Length of mapping.
137 137 * @name: User invoking mapping.
138   - * @flags: Protection flags.
  138 + * @prot: Protection bits.
139 139 *
140 140 * Remaps the physical address @phys through the next available store queue
141 141 * address of @size length. @name is logged at boot time as well as through
142 142 * the sysfs interface.
143 143 */
144 144 unsigned long sq_remap(unsigned long phys, unsigned int size,
145   - const char *name, unsigned long flags)
  145 + const char *name, pgprot_t prot)
146 146 {
147 147 struct sq_mapping *map;
148 148 unsigned long end;
... ... @@ -177,7 +177,7 @@
177 177  
178 178 map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
179 179  
180   - ret = __sq_remap(map, pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
  180 + ret = __sq_remap(map, prot);
181 181 if (unlikely(ret != 0))
182 182 goto out;
183 183  
... ... @@ -309,8 +309,7 @@
309 309 return -EIO;
310 310  
311 311 if (likely(len)) {
312   - int ret = sq_remap(base, len, "Userspace",
313   - pgprot_val(PAGE_SHARED));
  312 + int ret = sq_remap(base, len, "Userspace", PAGE_SHARED);
314 313 if (ret < 0)
315 314 return ret;
316 315 } else
arch/sh/mm/ioremap.c
... ... @@ -80,7 +80,7 @@
80 80 if (unlikely(phys_addr >= P1SEG)) {
81 81 unsigned long mapped;
82 82  
83   - mapped = pmb_remap(addr, phys_addr, size, pgprot_val(pgprot));
  83 + mapped = pmb_remap(addr, phys_addr, size, pgprot);
84 84 if (likely(mapped)) {
85 85 addr += mapped;
86 86 phys_addr += mapped;
... ... @@ -24,6 +24,7 @@
24 24 #include <asm/system.h>
25 25 #include <asm/uaccess.h>
26 26 #include <asm/pgtable.h>
  27 +#include <asm/page.h>
27 28 #include <asm/mmu.h>
28 29 #include <asm/io.h>
29 30 #include <asm/mmu_context.h>
30 31  
... ... @@ -166,12 +167,15 @@
166 167 };
167 168  
168 169 long pmb_remap(unsigned long vaddr, unsigned long phys,
169   - unsigned long size, unsigned long flags)
  170 + unsigned long size, pgprot_t prot)
170 171 {
171 172 struct pmb_entry *pmbp, *pmbe;
172 173 unsigned long wanted;
173 174 int pmb_flags, i;
174 175 long err;
  176 + u64 flags;
  177 +
  178 + flags = pgprot_val(prot);
175 179  
176 180 /* Convert typical pgprot value to the PMB equivalent */
177 181 if (flags & _PAGE_CACHABLE) {
drivers/video/pvr2fb.c
... ... @@ -831,7 +831,7 @@
831 831 printk(KERN_NOTICE "fb%d: registering with SQ API\n", fb_info->node);
832 832  
833 833 pvr2fb_map = sq_remap(fb_info->fix.smem_start, fb_info->fix.smem_len,
834   - fb_info->fix.id, pgprot_val(PAGE_SHARED));
  834 + fb_info->fix.id, PAGE_SHARED);
835 835  
836 836 printk(KERN_NOTICE "fb%d: Mapped video memory to SQ addr 0x%lx\n",
837 837 fb_info->node, pvr2fb_map);