Commit 0d6d82b6712c808e461090dd483f111f72b3b38b

Authored by Paul Mundt
Committed by Linus Torvalds
1 parent 65463b73b1

[PATCH] sh: Use pfn_valid() for lazy dcache write-back on SH7705

SH7705 in extended cache mode has some left-over VALID_PAGE() cruft that it
checks when doing lazy dcache write-back.  This has been gone for some time
(the last bits were in the discontig code, which should now also be gone --
this also fixes up a build error in the non-discontig case).

pfn_valid() gives the desired behaviour, so we switch to that.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 1 changed file with 12 additions and 7 deletions Inline Diff

arch/sh/mm/tlb-sh3.c
1 /* 1 /*
2 * arch/sh/mm/tlb-sh3.c 2 * arch/sh/mm/tlb-sh3.c
3 * 3 *
4 * SH-3 specific TLB operations 4 * SH-3 specific TLB operations
5 * 5 *
6 * Copyright (C) 1999 Niibe Yutaka 6 * Copyright (C) 1999 Niibe Yutaka
7 * Copyright (C) 2002 Paul Mundt 7 * Copyright (C) 2002 Paul Mundt
8 * 8 *
9 * Released under the terms of the GNU GPL v2.0. 9 * Released under the terms of the GNU GPL v2.0.
10 */ 10 */
11 #include <linux/signal.h> 11 #include <linux/signal.h>
12 #include <linux/sched.h> 12 #include <linux/sched.h>
13 #include <linux/kernel.h> 13 #include <linux/kernel.h>
14 #include <linux/errno.h> 14 #include <linux/errno.h>
15 #include <linux/string.h> 15 #include <linux/string.h>
16 #include <linux/types.h> 16 #include <linux/types.h>
17 #include <linux/ptrace.h> 17 #include <linux/ptrace.h>
18 #include <linux/mman.h> 18 #include <linux/mman.h>
19 #include <linux/mm.h> 19 #include <linux/mm.h>
20 #include <linux/smp.h> 20 #include <linux/smp.h>
21 #include <linux/smp_lock.h> 21 #include <linux/smp_lock.h>
22 #include <linux/interrupt.h> 22 #include <linux/interrupt.h>
23 23
24 #include <asm/system.h> 24 #include <asm/system.h>
25 #include <asm/io.h> 25 #include <asm/io.h>
26 #include <asm/uaccess.h> 26 #include <asm/uaccess.h>
27 #include <asm/pgalloc.h> 27 #include <asm/pgalloc.h>
28 #include <asm/mmu_context.h> 28 #include <asm/mmu_context.h>
29 #include <asm/cacheflush.h> 29 #include <asm/cacheflush.h>
30 30
31 void update_mmu_cache(struct vm_area_struct * vma, 31 void update_mmu_cache(struct vm_area_struct * vma,
32 unsigned long address, pte_t pte) 32 unsigned long address, pte_t pte)
33 { 33 {
34 unsigned long flags; 34 unsigned long flags;
35 unsigned long pteval; 35 unsigned long pteval;
36 unsigned long vpn; 36 unsigned long vpn;
37 37
38 /* Ptrace may call this routine. */ 38 /* Ptrace may call this routine. */
39 if (vma && current->active_mm != vma->vm_mm) 39 if (vma && current->active_mm != vma->vm_mm)
40 return; 40 return;
41 41
42 #if defined(CONFIG_SH7705_CACHE_32KB) 42 #if defined(CONFIG_SH7705_CACHE_32KB)
43 struct page *page; 43 {
44 page = pte_page(pte); 44 struct page *page = pte_page(pte);
45 if (VALID_PAGE(page) && !test_bit(PG_mapped, &page->flags)) { 45 unsigned long pfn = pte_pfn(pte);
46 unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; 46
47 __flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE); 47 if (pfn_valid(pfn) && !test_bit(PG_mapped, &page->flags)) {
48 __set_bit(PG_mapped, &page->flags); 48 unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
49
50 __flush_wback_region((void *)P1SEGADDR(phys),
51 PAGE_SIZE);
52 __set_bit(PG_mapped, &page->flags);
53 }
49 } 54 }
50 #endif 55 #endif
51 56
52 local_irq_save(flags); 57 local_irq_save(flags);
53 58
54 /* Set PTEH register */ 59 /* Set PTEH register */
55 vpn = (address & MMU_VPN_MASK) | get_asid(); 60 vpn = (address & MMU_VPN_MASK) | get_asid();
56 ctrl_outl(vpn, MMU_PTEH); 61 ctrl_outl(vpn, MMU_PTEH);
57 62
58 pteval = pte_val(pte); 63 pteval = pte_val(pte);
59 64
60 /* Set PTEL register */ 65 /* Set PTEL register */
61 pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ 66 pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
62 /* conveniently, we want all the software flags to be 0 anyway */ 67 /* conveniently, we want all the software flags to be 0 anyway */
63 ctrl_outl(pteval, MMU_PTEL); 68 ctrl_outl(pteval, MMU_PTEL);
64 69
65 /* Load the TLB */ 70 /* Load the TLB */
66 asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); 71 asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
67 local_irq_restore(flags); 72 local_irq_restore(flags);
68 } 73 }
69 74
70 void __flush_tlb_page(unsigned long asid, unsigned long page) 75 void __flush_tlb_page(unsigned long asid, unsigned long page)
71 { 76 {
72 unsigned long addr, data; 77 unsigned long addr, data;
73 int i, ways = MMU_NTLB_WAYS; 78 int i, ways = MMU_NTLB_WAYS;
74 79
75 /* 80 /*
76 * NOTE: PTEH.ASID should be set to this MM 81 * NOTE: PTEH.ASID should be set to this MM
77 * _AND_ we need to write ASID to the array. 82 * _AND_ we need to write ASID to the array.
78 * 83 *
79 * It would be simple if we didn't need to set PTEH.ASID... 84 * It would be simple if we didn't need to set PTEH.ASID...
80 */ 85 */
81 addr = MMU_TLB_ADDRESS_ARRAY | (page & 0x1F000); 86 addr = MMU_TLB_ADDRESS_ARRAY | (page & 0x1F000);
82 data = (page & 0xfffe0000) | asid; /* VALID bit is off */ 87 data = (page & 0xfffe0000) | asid; /* VALID bit is off */
83 88
84 if ((cpu_data->flags & CPU_HAS_MMU_PAGE_ASSOC)) { 89 if ((cpu_data->flags & CPU_HAS_MMU_PAGE_ASSOC)) {
85 addr |= MMU_PAGE_ASSOC_BIT; 90 addr |= MMU_PAGE_ASSOC_BIT;
86 ways = 1; /* we already know the way .. */ 91 ways = 1; /* we already know the way .. */
87 } 92 }
88 93
89 for (i = 0; i < ways; i++) 94 for (i = 0; i < ways; i++)
90 ctrl_outl(data, addr + (i << 8)); 95 ctrl_outl(data, addr + (i << 8));
91 } 96 }
92 97
93 98