Blame view

include/asm-generic/tlb.h 5.16 KB
f30c22695   Uwe Zeisberger   fix file specific...
1
  /* include/asm-generic/tlb.h
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2
3
4
5
6
7
   *
   *	Generic TLB shootdown code
   *
   * Copyright 2001 Red Hat, Inc.
   * Based on code from mm/memory.c Copyright Linus Torvalds and others.
   *
d16dfc550   Peter Zijlstra   mm: mmu_gather re...
8
9
   * Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
   *
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
10
11
12
13
14
15
16
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of the GNU General Public License
   * as published by the Free Software Foundation; either version
   * 2 of the License, or (at your option) any later version.
   */
  #ifndef _ASM_GENERIC__TLB_H
  #define _ASM_GENERIC__TLB_H
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
17
  #include <linux/swap.h>
62152d0ea   Ingo Molnar   asm-generic/tlb.h...
18
  #include <asm/pgalloc.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
19
  #include <asm/tlbflush.h>
267239116   Peter Zijlstra   mm, powerpc: move...
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
  #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  /*
   * Semi RCU freeing of the page directories.
   *
   * This is needed by some architectures to implement software pagetable walkers.
   *
   * gup_fast() and other software pagetable walkers do a lockless page-table
   * walk and therefore needs some synchronization with the freeing of the page
   * directories. The chosen means to accomplish that is by disabling IRQs over
   * the walk.
   *
   * Architectures that use IPIs to flush TLBs will then automagically DTRT,
   * since we unlink the page, flush TLBs, free the page. Since the disabling of
   * IRQs delays the completion of the TLB flush we can never observe an already
   * freed page.
   *
   * Architectures that do not have this (PPC) need to delay the freeing by some
   * other means, this is that means.
   *
   * What we do is batch the freed directory pages (tables) and RCU free them.
   * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
   * holds off grace periods.
   *
   * However, in order to batch these pages we need to allocate storage, this
   * allocation is deep inside the MM code and can thus easily fail on memory
   * pressure. To guarantee progress we fall back to single table freeing, see
   * the implementation of tlb_remove_table_one().
   *
   */
  struct mmu_table_batch {
  	struct rcu_head		rcu;
  	unsigned int		nr;
  	void			*tables[0];
  };
  
  #define MAX_TABLE_BATCH		\
  	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
  
  extern void tlb_table_flush(struct mmu_gather *tlb);
  extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
  
  #endif
d16dfc550   Peter Zijlstra   mm: mmu_gather re...
62
63
64
65
66
  /*
   * If we can't allocate a page to make a big batch of page pointers
   * to work on, then just handle a few from the on-stack structure.
   */
  #define MMU_GATHER_BUNDLE	8
e303297e6   Peter Zijlstra   mm: extended batc...
67
68
69
70
71
72
73
74
75
  struct mmu_gather_batch {
  	struct mmu_gather_batch	*next;
  	unsigned int		nr;
  	unsigned int		max;
  	struct page		*pages[0];
  };
  
  #define MAX_GATHER_BATCH	\
  	((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
76
  /* struct mmu_gather is an opaque type used by the mm code for passing around
15a23ffa2   Hugh Dickins   [PATCH] mm: tlb_g...
77
   * any data needed by arch specific code for tlb_remove_page.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
78
79
80
   */
  struct mmu_gather {
  	struct mm_struct	*mm;
267239116   Peter Zijlstra   mm, powerpc: move...
81
82
83
  #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  	struct mmu_table_batch	*batch;
  #endif
e303297e6   Peter Zijlstra   mm: extended batc...
84
85
86
87
88
89
90
91
  	unsigned int		need_flush : 1,	/* Did free PTEs */
  				fast_mode  : 1; /* No batching   */
  
  	unsigned int		fullmm;
  
  	struct mmu_gather_batch *active;
  	struct mmu_gather_batch	local;
  	struct page		*__pages[MMU_GATHER_BUNDLE];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
92
  };
9547d01bf   Peter Zijlstra   mm: uninline larg...
93
  #define HAVE_GENERIC_MMU_GATHER
e303297e6   Peter Zijlstra   mm: extended batc...
94

9547d01bf   Peter Zijlstra   mm: uninline larg...
95
  static inline int tlb_fast_mode(struct mmu_gather *tlb)
d16dfc550   Peter Zijlstra   mm: mmu_gather re...
96
  {
9547d01bf   Peter Zijlstra   mm: uninline larg...
97
98
99
100
101
102
103
  #ifdef CONFIG_SMP
  	return tlb->fast_mode;
  #else
  	/*
  	 * For UP we don't need to worry about TLB flush
  	 * and page free order so much..
  	 */
e303297e6   Peter Zijlstra   mm: extended batc...
104
  	return 1;
d16dfc550   Peter Zijlstra   mm: mmu_gather re...
105
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
106
  }
9547d01bf   Peter Zijlstra   mm: uninline larg...
107
108
109
110
  void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
  void tlb_flush_mmu(struct mmu_gather *tlb);
  void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end);
  int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
d16dfc550   Peter Zijlstra   mm: mmu_gather re...
111
112
113
114
115
116
117
118
119
  
  /* tlb_remove_page
   *	Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
   *	required.
   */
  static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  {
  	if (!__tlb_remove_page(tlb, page))
  		tlb_flush_mmu(tlb);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
120
121
122
123
124
125
126
127
128
129
130
131
132
133
  }
  
  /**
   * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
   *
   * Record the fact that pte's were really umapped in ->need_flush, so we can
   * later optimise away the tlb invalidate.   This helps when userspace is
   * unmapping already-unmapped pages, which happens quite a lot.
   */
  #define tlb_remove_tlb_entry(tlb, ptep, address)		\
  	do {							\
  		tlb->need_flush = 1;				\
  		__tlb_remove_tlb_entry(tlb, ptep, address);	\
  	} while (0)
f21760b15   Shaohua Li   thp: add tlb_remo...
134
135
136
137
138
139
140
141
142
143
144
145
146
  /**
   * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
   * This is a nop so far, because only x86 needs it.
   */
  #ifndef __tlb_remove_pmd_tlb_entry
  #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
  #endif
  
  #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)		\
  	do {							\
  		tlb->need_flush = 1;				\
  		__tlb_remove_pmd_tlb_entry(tlb, pmdp, address);	\
  	} while (0)
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
147
  #define pte_free_tlb(tlb, ptep, address)			\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
148
149
  	do {							\
  		tlb->need_flush = 1;				\
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
150
  		__pte_free_tlb(tlb, ptep, address);		\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
151
152
153
  	} while (0)
  
  #ifndef __ARCH_HAS_4LEVEL_HACK
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
154
  #define pud_free_tlb(tlb, pudp, address)			\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
155
156
  	do {							\
  		tlb->need_flush = 1;				\
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
157
  		__pud_free_tlb(tlb, pudp, address);		\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
158
159
  	} while (0)
  #endif
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
160
  #define pmd_free_tlb(tlb, pmdp, address)			\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
161
162
  	do {							\
  		tlb->need_flush = 1;				\
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
163
  		__pmd_free_tlb(tlb, pmdp, address);		\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
164
165
166
167
168
  	} while (0)
  
  #define tlb_migrate_finish(mm) do {} while (0)
  
  #endif /* _ASM_GENERIC__TLB_H */