Blame view

arch/sh/mm/tlb-pteaex.c 2.76 KB
8263a67e1   Paul Mundt   sh: Support for e...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
  /*
   * arch/sh/mm/tlb-pteaex.c
   *
   * TLB operations for SH-X3 CPUs featuring PTE ASID Extensions.
   *
   * Copyright (C) 2009 Paul Mundt
   *
   * This file is subject to the terms and conditions of the GNU General Public
   * License.  See the file "COPYING" in the main directory of this archive
   * for more details.
   */
  #include <linux/kernel.h>
  #include <linux/mm.h>
  #include <linux/io.h>
  #include <asm/system.h>
  #include <asm/mmu_context.h>
  #include <asm/cacheflush.h>
9cef74926   Paul Mundt   sh: update_mmu_ca...
18
  void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
8263a67e1   Paul Mundt   sh: Support for e...
19
  {
9cef74926   Paul Mundt   sh: update_mmu_ca...
20
  	unsigned long flags, pteval, vpn;
8263a67e1   Paul Mundt   sh: Support for e...
21

9cef74926   Paul Mundt   sh: update_mmu_ca...
22
23
24
  	/*
  	 * Handle debugger faulting in for debugee.
  	 */
3ed6e1293   Paul Mundt   sh: Handle a NULL...
25
  	if (vma && current->active_mm != vma->vm_mm)
8263a67e1   Paul Mundt   sh: Support for e...
26
  		return;
8263a67e1   Paul Mundt   sh: Support for e...
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
  	local_irq_save(flags);
  
  	/* Set PTEH register */
  	vpn = address & MMU_VPN_MASK;
  	__raw_writel(vpn, MMU_PTEH);
  
  	/* Set PTEAEX */
  	__raw_writel(get_asid(), MMU_PTEAEX);
  
  	pteval = pte.pte_low;
  
  	/* Set PTEA register */
  #ifdef CONFIG_X2TLB
  	/*
  	 * For the extended mode TLB this is trivial, only the ESZ and
  	 * EPR bits need to be written out to PTEA, with the remainder of
  	 * the protection bits (with the exception of the compat-mode SZ
  	 * and PR bits, which are cleared) being written out in PTEL.
  	 */
  	__raw_writel(pte.pte_high, MMU_PTEA);
8263a67e1   Paul Mundt   sh: Support for e...
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
  #endif
  
  	/* Set PTEL register */
  	pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
  #ifdef CONFIG_CACHE_WRITETHROUGH
  	pteval |= _PAGE_WT;
  #endif
  	/* conveniently, we want all the software flags to be 0 anyway */
  	__raw_writel(pteval, MMU_PTEL);
  
  	/* Load the TLB */
  	asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
  	local_irq_restore(flags);
  }
  
  /*
   * While SH-X2 extended TLB mode splits out the memory-mapped I/UTLB
   * data arrays, SH-X3 cores with PTEAEX split out the memory-mapped
   * address arrays. In compat mode the second array is inaccessible, while
   * in extended mode, the legacy 8-bit ASID field in address array 1 has
   * undefined behaviour.
   */
2dc2f8e0c   Paul Mundt   sh: Kill off the ...
69
  void local_flush_tlb_one(unsigned long asid, unsigned long page)
8263a67e1   Paul Mundt   sh: Support for e...
70
71
72
73
  {
  	jump_to_uncached();
  	__raw_writel(page, MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT);
  	__raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
a9eb4f6d1   Matt Fleming   sh: Flush ITLB to...
74
75
  	__raw_writel(page, MMU_ITLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT);
  	__raw_writel(asid, MMU_ITLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
8263a67e1   Paul Mundt   sh: Support for e...
76
77
  	back_to_cached();
  }
be97d758e   Paul Mundt   sh: Fix up the SH...
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
  
  void local_flush_tlb_all(void)
  {
  	unsigned long flags, status;
  	int i;
  
  	/*
  	 * Flush all the TLB.
  	 */
  	local_irq_save(flags);
  	jump_to_uncached();
  
  	status = __raw_readl(MMUCR);
  	status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
  
  	if (status == 0)
  		status = MMUCR_URB_NENTRIES;
  
  	for (i = 0; i < status; i++)
  		__raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
  
  	for (i = 0; i < 4; i++)
  		__raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
  
  	back_to_cached();
  	ctrl_barrier();
  	local_irq_restore(flags);
  }