Commit d04a0f79f502a87bb17b147afc4b3e39e75275c3

Authored by Paul Mundt
1 parent db2504966c

sh: Fix up extended mode TLB for SH-X2+ cores.

The extended mode TLB requires both 64-bit PTEs and a 64-bit pgprot,
correspondingly, the PGD also has to be 64-bits, so fix that up.

The kernel and user permission bits really are decoupled in early
cuts of the silicon, which means that we also have to set corresponding
kernel permissions on user pages or we end up with user pages that the
kernel simply can't touch (!).

Finally, with those things corrected, really enable MMUCR.ME and
correct the PTEA value (this simply needs to be the upper 32-bits
of the PTE, with the size and protection bit encoding).

Signed-off-by: Paul Mundt <lethal@linux-sh.org>

Showing 4 changed files with 88 additions and 72 deletions Side-by-side Diff

arch/sh/mm/tlb-sh4.c
... ... @@ -4,27 +4,14 @@
4 4 * SH-4 specific TLB operations
5 5 *
6 6 * Copyright (C) 1999 Niibe Yutaka
7   - * Copyright (C) 2002 Paul Mundt
  7 + * Copyright (C) 2002 - 2007 Paul Mundt
8 8 *
9 9 * Released under the terms of the GNU GPL v2.0.
10 10 */
11   -#include <linux/signal.h>
12   -#include <linux/sched.h>
13 11 #include <linux/kernel.h>
14   -#include <linux/errno.h>
15   -#include <linux/string.h>
16   -#include <linux/types.h>
17   -#include <linux/ptrace.h>
18   -#include <linux/mman.h>
19 12 #include <linux/mm.h>
20   -#include <linux/smp.h>
21   -#include <linux/smp_lock.h>
22   -#include <linux/interrupt.h>
23   -
  13 +#include <linux/io.h>
24 14 #include <asm/system.h>
25   -#include <asm/io.h>
26   -#include <asm/uaccess.h>
27   -#include <asm/pgalloc.h>
28 15 #include <asm/mmu_context.h>
29 16 #include <asm/cacheflush.h>
30 17  
31 18  
32 19  
... ... @@ -62,12 +49,22 @@
62 49 vpn = (address & MMU_VPN_MASK) | get_asid();
63 50 ctrl_outl(vpn, MMU_PTEH);
64 51  
65   - pteval = pte_val(pte);
  52 + pteval = pte.pte_low;
66 53  
67 54 /* Set PTEA register */
  55 +#ifdef CONFIG_X2TLB
  56 + /*
  57 + * For the extended mode TLB this is trivial, only the ESZ and
  58 + * EPR bits need to be written out to PTEA, with the remainder of
  59 + * the protection bits (with the exception of the compat-mode SZ
  60 + * and PR bits, which are cleared) being written out in PTEL.
  61 + */
  62 + ctrl_outl(pte.pte_high, MMU_PTEA);
  63 +#else
68 64 if (cpu_data->flags & CPU_HAS_PTEA)
69 65 /* TODO: make this look less hacky */
70 66 ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA);
  67 +#endif
71 68  
72 69 /* Set PTEL register */
73 70 pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
include/asm-sh/cpu-sh4/mmu_context.h
... ... @@ -22,12 +22,20 @@
22 22 #define MMU_UTLB_ADDRESS_ARRAY 0xF6000000
23 23 #define MMU_PAGE_ASSOC_BIT 0x80
24 24  
25   -#define MMU_NTLB_ENTRIES 64 /* for 7750 */
  25 +#ifdef CONFIG_X2TLB
  26 +#define MMUCR_ME (1 << 7)
  27 +#else
  28 +#define MMUCR_ME (0)
  29 +#endif
  30 +
26 31 #ifdef CONFIG_SH_STORE_QUEUES
27   -#define MMU_CONTROL_INIT 0x05 /* SQMD=0, SV=0, TI=1, AT=1 */
  32 +#define MMUCR_SQMD (1 << 9)
28 33 #else
29   -#define MMU_CONTROL_INIT 0x205 /* SQMD=1, SV=0, TI=1, AT=1 */
  34 +#define MMUCR_SQMD (0)
30 35 #endif
  36 +
  37 +#define MMU_NTLB_ENTRIES 64
  38 +#define MMU_CONTROL_INIT (0x05|MMUCR_SQMD|MMUCR_ME)
31 39  
32 40 #define MMU_ITLB_DATA_ARRAY 0xF3000000
33 41 #define MMU_UTLB_DATA_ARRAY 0xF7000000
include/asm-sh/page.h
... ... @@ -88,6 +88,7 @@
88 88 #ifdef CONFIG_X2TLB
89 89 typedef struct { unsigned long pte_low, pte_high; } pte_t;
90 90 typedef struct { unsigned long long pgprot; } pgprot_t;
  91 +typedef struct { unsigned long long pgd; } pgd_t;
91 92 #define pte_val(x) \
92 93 ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
93 94 #define __pte(x) \
94 95  
... ... @@ -95,11 +96,10 @@
95 96 #else
96 97 typedef struct { unsigned long pte_low; } pte_t;
97 98 typedef struct { unsigned long pgprot; } pgprot_t;
  99 +typedef struct { unsigned long pgd; } pgd_t;
98 100 #define pte_val(x) ((x).pte_low)
99 101 #define __pte(x) ((pte_t) { (x) } )
100 102 #endif
101   -
102   -typedef struct { unsigned long pgd; } pgd_t;
103 103  
104 104 #define pgd_val(x) ((x).pgd)
105 105 #define pgprot_val(x) ((x).pgprot)
include/asm-sh/pgtable.h
... ... @@ -42,13 +42,12 @@
42 42  
43 43 /* PGD bits */
44 44 #define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS)
45   -#define PGDIR_BITS (32 - PGDIR_SHIFT)
46 45 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
47 46 #define PGDIR_MASK (~(PGDIR_SIZE-1))
48 47  
49 48 /* Entries per level */
50 49 #define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE))
51   -#define PTRS_PER_PGD (PAGE_SIZE / 4)
  50 +#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
52 51  
53 52 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
54 53 #define FIRST_USER_ADDRESS 0
55 54  
... ... @@ -100,17 +99,18 @@
100 99 #define _PAGE_HW_SHARED 0x002 /* SH-bit : shared among processes */
101 100 #define _PAGE_DIRTY 0x004 /* D-bit : page changed */
102 101 #define _PAGE_CACHABLE 0x008 /* C-bit : cachable */
103   -#ifndef CONFIG_X2TLB
104   -# define _PAGE_SZ0 0x010 /* SZ0-bit : Size of page */
105   -# define _PAGE_RW 0x020 /* PR0-bit : write access allowed */
106   -# define _PAGE_USER 0x040 /* PR1-bit : user space access allowed*/
107   -# define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */
108   -#endif
  102 +#define _PAGE_SZ0 0x010 /* SZ0-bit : Size of page */
  103 +#define _PAGE_RW 0x020 /* PR0-bit : write access allowed */
  104 +#define _PAGE_USER 0x040 /* PR1-bit : user space access allowed*/
  105 +#define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */
109 106 #define _PAGE_PRESENT 0x100 /* V-bit : page is valid */
110 107 #define _PAGE_PROTNONE 0x200 /* software: if not present */
111 108 #define _PAGE_ACCESSED 0x400 /* software: page referenced */
112 109 #define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */
113 110  
  111 +#define _PAGE_SZ_MASK (_PAGE_SZ0 | _PAGE_SZ1)
  112 +#define _PAGE_PR_MASK (_PAGE_RW | _PAGE_USER)
  113 +
114 114 /* Extended mode bits */
115 115 #define _PAGE_EXT_ESZ0 0x0010 /* ESZ0-bit: Size of page */
116 116 #define _PAGE_EXT_ESZ1 0x0020 /* ESZ1-bit: Size of page */
... ... @@ -126,11 +126,7 @@
126 126 #define _PAGE_EXT_KERN_READ 0x2000 /* EPR5-bit: Kernel space readable */
127 127  
128 128 /* Wrapper for extended mode pgprot twiddling */
129   -#ifdef CONFIG_X2TLB
130   -# define _PAGE_EXT(x) ((unsigned long long)(x) << 32)
131   -#else
132   -# define _PAGE_EXT(x) (0)
133   -#endif
  129 +#define _PAGE_EXT(x) ((unsigned long long)(x) << 32)
134 130  
135 131 /* software: moves to PTEA.TC (Timing Control) */
136 132 #define _PAGE_PCC_AREA5 0x00000000 /* use BSC registers for area5 */
137 133  
... ... @@ -146,10 +142,14 @@
146 142 #define _PAGE_PCC_ATR16 0x60000001 /* Attribute Memory space, 6 bit bus */
147 143  
148 144 /* Mask which drops unused bits from the PTEL value */
149   -#ifdef CONFIG_CPU_SH3
  145 +#if defined(CONFIG_CPU_SH3)
150 146 #define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED| \
151 147 _PAGE_FILE | _PAGE_SZ1 | \
152 148 _PAGE_HW_SHARED)
  149 +#elif defined(CONFIG_X2TLB)
  150 +/* Get rid of the legacy PR/SZ bits when using extended mode */
  151 +#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | \
  152 + _PAGE_FILE | _PAGE_PR_MASK | _PAGE_SZ_MASK)
153 153 #else
154 154 #define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE)
155 155 #endif
156 156  
157 157  
158 158  
159 159  
... ... @@ -212,27 +212,36 @@
212 212  
213 213 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
214 214 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
215   - _PAGE_EXT(_PAGE_EXT_USER_READ | \
  215 + _PAGE_EXT(_PAGE_EXT_KERN_READ | \
  216 + _PAGE_EXT_KERN_WRITE | \
  217 + _PAGE_EXT_USER_READ | \
216 218 _PAGE_EXT_USER_WRITE))
217 219  
218 220 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
219 221 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
220   - _PAGE_EXT(_PAGE_EXT_USER_EXEC | \
  222 + _PAGE_EXT(_PAGE_EXT_KERN_EXEC | \
  223 + _PAGE_EXT_KERN_READ | \
  224 + _PAGE_EXT_USER_EXEC | \
221 225 _PAGE_EXT_USER_READ))
222 226  
223 227 #define PAGE_COPY PAGE_EXECREAD
224 228  
225 229 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
226 230 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
227   - _PAGE_EXT(_PAGE_EXT_USER_READ))
  231 + _PAGE_EXT(_PAGE_EXT_KERN_READ | \
  232 + _PAGE_EXT_USER_READ))
228 233  
229 234 #define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
230 235 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
231   - _PAGE_EXT(_PAGE_EXT_USER_WRITE))
  236 + _PAGE_EXT(_PAGE_EXT_KERN_WRITE | \
  237 + _PAGE_EXT_USER_WRITE))
232 238  
233 239 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
234 240 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
235   - _PAGE_EXT(_PAGE_EXT_USER_WRITE | \
  241 + _PAGE_EXT(_PAGE_EXT_KERN_WRITE | \
  242 + _PAGE_EXT_KERN_READ | \
  243 + _PAGE_EXT_KERN_EXEC | \
  244 + _PAGE_EXT_USER_WRITE | \
236 245 _PAGE_EXT_USER_READ | \
237 246 _PAGE_EXT_USER_EXEC))
238 247  
239 248  
... ... @@ -373,11 +382,15 @@
373 382 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
374 383  
375 384 #define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
376   -#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
377   -#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
378 385  
379   -#define pte_none(x) (!pte_val(x))
380   -#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
  386 +#define pfn_pte(pfn, prot) \
  387 + __pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
  388 +#define pfn_pmd(pfn, prot) \
  389 + __pmd(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
  390 +
  391 +#define pte_none(x) (!pte_val(x))
  392 +#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
  393 +
381 394 #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
382 395  
383 396 #define pmd_none(x) (!pmd_val(x))
384 397  
... ... @@ -392,15 +405,15 @@
392 405 * The following only work if pte_present() is true.
393 406 * Undefined behaviour if not..
394 407 */
395   -#define pte_not_present(pte) (!(pte_val(pte) & _PAGE_PRESENT))
396   -#define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY)
397   -#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
398   -#define pte_file(pte) (pte_val(pte) & _PAGE_FILE)
  408 +#define pte_not_present(pte) (!((pte).pte_low & _PAGE_PRESENT))
  409 +#define pte_dirty(pte) ((pte).pte_low & _PAGE_DIRTY)
  410 +#define pte_young(pte) ((pte).pte_low & _PAGE_ACCESSED)
  411 +#define pte_file(pte) ((pte).pte_low & _PAGE_FILE)
399 412  
400 413 #ifdef CONFIG_X2TLB
401 414 #define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE)
402 415 #else
403   -#define pte_write(pte) (pte_val(pte) & _PAGE_RW)
  416 +#define pte_write(pte) ((pte).pte_low & _PAGE_RW)
404 417 #endif
405 418  
406 419 #define PTE_BIT_FUNC(h,fn,op) \
407 420  
408 421  
... ... @@ -429,18 +442,11 @@
429 442 /*
430 443 * Macro and implementation to make a page protection as uncachable.
431 444 */
432   -#define pgprot_noncached pgprot_noncached
  445 +#define pgprot_writecombine(prot) \
  446 + __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
433 447  
434   -static inline pgprot_t pgprot_noncached(pgprot_t _prot)
435   -{
436   - unsigned long prot = pgprot_val(_prot);
  448 +#define pgprot_noncached pgprot_writecombine
437 449  
438   - prot &= ~_PAGE_CACHABLE;
439   - return __pgprot(prot);
440   -}
441   -
442   -#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
443   -
444 450 /*
445 451 * Conversion functions: convert a page and protection to a page entry,
446 452 * and a page entry and page directory to the page they refer to.
447 453  
448 454  
449 455  
450 456  
451 457  
... ... @@ -451,28 +457,33 @@
451 457  
452 458 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
453 459 {
454   - set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) |
455   - pgprot_val(newprot)));
  460 + pte.pte_low &= _PAGE_CHG_MASK;
  461 + pte.pte_low |= pgprot_val(newprot);
  462 +
  463 +#ifdef CONFIG_X2TLB
  464 + pte.pte_high |= pgprot_val(newprot) >> 32;
  465 +#endif
  466 +
456 467 return pte;
457 468 }
458 469  
459   -#define pmd_page_vaddr(pmd) pmd_val(pmd)
  470 +#define pmd_page_vaddr(pmd) ((unsigned long)pmd_val(pmd))
460 471 #define pmd_page(pmd) (virt_to_page(pmd_val(pmd)))
461 472  
462 473 /* to find an entry in a page-table-directory. */
463   -#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
464   -#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
  474 +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
  475 +#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
465 476  
466 477 /* to find an entry in a kernel page-table-directory */
467   -#define pgd_offset_k(address) pgd_offset(&init_mm, address)
  478 +#define pgd_offset_k(address) pgd_offset(&init_mm, address)
468 479  
469 480 /* Find an entry in the third-level page table.. */
470   -#define pte_index(address) \
471   - ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  481 +#define pte_index(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
472 482 #define pte_offset_kernel(dir, address) \
473 483 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
474   -#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
475   -#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)
  484 +#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
  485 +#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)
  486 +
476 487 #define pte_unmap(pte) do { } while (0)
477 488 #define pte_unmap_nested(pte) do { } while (0)
478 489  
479 490  
480 491  
... ... @@ -480,13 +491,14 @@
480 491 #define pte_ERROR(e) \
481 492 printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, \
482 493 &(e), (e).pte_high, (e).pte_low)
  494 +#define pgd_ERROR(e) \
  495 + printk("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
483 496 #else
484 497 #define pte_ERROR(e) \
485 498 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
486   -#endif
487   -
488 499 #define pgd_ERROR(e) \
489 500 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
  501 +#endif
490 502  
491 503 struct vm_area_struct;
492 504 extern void update_mmu_cache(struct vm_area_struct * vma,