Blame view

include/asm-generic/tlb.h 18.5 KB
2874c5fd2   Thomas Gleixner   treewide: Replace...
1
  /* SPDX-License-Identifier: GPL-2.0-or-later */
f30c22695   Uwe Zeisberger   fix file specific...
2
  /* include/asm-generic/tlb.h
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
3
4
5
6
7
8
   *
   *	Generic TLB shootdown code
   *
   * Copyright 2001 Red Hat, Inc.
   * Based on code from mm/memory.c Copyright Linus Torvalds and others.
   *
90eec103b   Peter Zijlstra   treewide: Remove ...
9
   * Copyright 2011 Red Hat, Inc., Peter Zijlstra
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
10
11
12
   */
  #ifndef _ASM_GENERIC__TLB_H
  #define _ASM_GENERIC__TLB_H
fd1102f0a   Nicholas Piggin   mm: mmu_notifier ...
13
  #include <linux/mmu_notifier.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
14
  #include <linux/swap.h>
03911132a   Anshuman Khandual   mm/vma: replace a...
15
  #include <linux/hugetlb_inline.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
16
  #include <asm/tlbflush.h>
e7fd28a70   Peter Zijlstra   asm-generic/tlb, ...
17
  #include <asm/cacheflush.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
18

5932c9fd1   Nadav Amit   mm/tlb: Provide d...
19
20
21
22
23
24
25
26
  /*
   * Blindly accessing user memory from NMI context can be dangerous
   * if we're in the middle of switching the current user task or switching
   * the loaded mm.
   */
  #ifndef nmi_uaccess_okay
  # define nmi_uaccess_okay() true
  #endif
faaadaf31   Will Deacon   asm-generic/tlb: ...
27
  #ifdef CONFIG_MMU
dea2434c2   Peter Zijlstra   asm-generic/tlb: ...
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
  /*
   * Generic MMU-gather implementation.
   *
   * The mmu_gather data structure is used by the mm code to implement the
   * correct and efficient ordering of freeing pages and TLB invalidations.
   *
   * This correct ordering is:
   *
   *  1) unhook page
   *  2) TLB invalidate page
   *  3) free page
   *
   * That is, we must never free a page before we have ensured there are no live
   * translations left to it. Otherwise it might be possible to observe (or
   * worse, change) the page content after it has been reused.
   *
   * The mmu_gather API consists of:
   *
   *  - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
   *
   *    Finish in particular will issue a (final) TLB invalidate and free
   *    all (remaining) queued pages.
   *
   *  - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
   *
   *    Defaults to flushing at tlb_end_vma() to reset the range; helps when
   *    there's large holes between the VMAs.
   *
0d6e24d43   Peter Zijlstra   asm-generic/tlb: ...
56
57
58
59
60
61
62
63
64
   *  - tlb_remove_table()
   *
   *    tlb_remove_table() is the basic primitive to free page-table directories
   *    (__p*_free_tlb()).  In it's most primitive form it is an alias for
   *    tlb_remove_page() below, for when page directories are pages and have no
   *    additional constraints.
   *
   *    See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
   *
dea2434c2   Peter Zijlstra   asm-generic/tlb: ...
65
66
67
68
69
70
71
72
73
74
75
   *  - tlb_remove_page() / __tlb_remove_page()
   *  - tlb_remove_page_size() / __tlb_remove_page_size()
   *
   *    __tlb_remove_page_size() is the basic primitive that queues a page for
   *    freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
   *    boolean indicating if the queue is (now) full and a call to
   *    tlb_flush_mmu() is required.
   *
   *    tlb_remove_page() and tlb_remove_page_size() imply the call to
   *    tlb_flush_mmu() when required and has no return value.
   *
ed6a79352   Peter Zijlstra   asm-generic/tlb, ...
76
   *  - tlb_change_page_size()
dea2434c2   Peter Zijlstra   asm-generic/tlb: ...
77
78
79
80
   *
   *    call before __tlb_remove_page*() to set the current page-size; implies a
   *    possible tlb_flush_mmu() call.
   *
fa0aafb8a   Peter Zijlstra   asm-generic/tlb: ...
81
   *  - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
dea2434c2   Peter Zijlstra   asm-generic/tlb: ...
82
83
84
85
   *
   *    tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
   *                              related state, like the range)
   *
fa0aafb8a   Peter Zijlstra   asm-generic/tlb: ...
86
87
   *    tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
   *			whatever pages are still batched.
dea2434c2   Peter Zijlstra   asm-generic/tlb: ...
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
   *
   *  - mmu_gather::fullmm
   *
   *    A flag set by tlb_gather_mmu() to indicate we're going to free
   *    the entire mm; this allows a number of optimizations.
   *
   *    - We can ignore tlb_{start,end}_vma(); because we don't
   *      care about ranges. Everything will be shot down.
   *
   *    - (RISC) architectures that use ASIDs can cycle to a new ASID
   *      and delay the invalidation until ASID space runs out.
   *
   *  - mmu_gather::need_flush_all
   *
   *    A flag that can be set by the arch code if it wants to force
   *    flush the entire TLB irrespective of the range. For instance
   *    x86-PAE needs this when changing top-level entries.
   *
5f307be18   Peter Zijlstra   asm-generic/tlb, ...
106
   * And allows the architecture to provide and implement tlb_flush():
dea2434c2   Peter Zijlstra   asm-generic/tlb: ...
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
   *
   * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
   * use of:
   *
   *  - mmu_gather::start / mmu_gather::end
   *
   *    which provides the range that needs to be flushed to cover the pages to
   *    be freed.
   *
   *  - mmu_gather::freed_tables
   *
   *    set when we freed page table pages
   *
   *  - tlb_get_unmap_shift() / tlb_get_unmap_size()
   *
5f307be18   Peter Zijlstra   asm-generic/tlb, ...
122
123
124
   *    returns the smallest TLB entry size unmapped in this range.
   *
   * If an architecture does not provide tlb_flush() a default implementation
a30e32bd7   Peter Zijlstra   asm-generic/tlb: ...
125
126
   * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
   * specified, in which case we'll default to flush_tlb_mm().
dea2434c2   Peter Zijlstra   asm-generic/tlb: ...
127
128
129
   *
   * Additionally there are a few opt-in features:
   *
3af4bd033   Peter Zijlstra   asm-generic/tlb: ...
130
   *  MMU_GATHER_PAGE_SIZE
ed6a79352   Peter Zijlstra   asm-generic/tlb, ...
131
132
133
134
   *
   *  This ensures we call tlb_flush() every time tlb_change_page_size() actually
   *  changes the size and provides mmu_gather::page_size to tlb_flush().
   *
3af4bd033   Peter Zijlstra   asm-generic/tlb: ...
135
136
137
   *  This might be useful if your architecture has size specific TLB
   *  invalidation instructions.
   *
0d6e24d43   Peter Zijlstra   asm-generic/tlb: ...
138
   *  MMU_GATHER_TABLE_FREE
dea2434c2   Peter Zijlstra   asm-generic/tlb: ...
139
140
   *
   *  This provides tlb_remove_table(), to be used instead of tlb_remove_page()
0d6e24d43   Peter Zijlstra   asm-generic/tlb: ...
141
142
143
   *  for page directores (__p*_free_tlb()).
   *
   *  Useful if your architecture has non-page page directories.
dea2434c2   Peter Zijlstra   asm-generic/tlb: ...
144
145
146
147
   *
   *  When used, an architecture is expected to provide __tlb_remove_table()
   *  which does the actual freeing of these pages.
   *
0d6e24d43   Peter Zijlstra   asm-generic/tlb: ...
148
149
150
151
152
153
154
155
   *  MMU_GATHER_RCU_TABLE_FREE
   *
   *  Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
   *  comment below).
   *
   *  Useful if your architecture doesn't use IPIs for remote TLB invalidates
   *  and therefore doesn't naturally serialize with software page-table walkers.
   *
a30e32bd7   Peter Zijlstra   asm-generic/tlb: ...
156
157
158
   *  MMU_GATHER_NO_RANGE
   *
   *  Use this if your architecture lacks an efficient flush_tlb_range().
580a586c4   Peter Zijlstra   asm-generic/tlb: ...
159
160
161
162
163
164
165
166
167
168
   *
   *  MMU_GATHER_NO_GATHER
   *
   *  If the option is set the mmu_gather will not track individual pages for
   *  delayed page free anymore. A platform that enables the option needs to
   *  provide its own implementation of the __tlb_remove_page_size() function to
   *  free pages.
   *
   *  This is useful if your architecture already flushes TLB entries in the
   *  various ptep_get_and_clear() functions.
dea2434c2   Peter Zijlstra   asm-generic/tlb: ...
169
   */
dea2434c2   Peter Zijlstra   asm-generic/tlb: ...
170

0d6e24d43   Peter Zijlstra   asm-generic/tlb: ...
171
  #ifdef CONFIG_MMU_GATHER_TABLE_FREE
267239116   Peter Zijlstra   mm, powerpc: move...
172
  struct mmu_table_batch {
0d6e24d43   Peter Zijlstra   asm-generic/tlb: ...
173
  #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
267239116   Peter Zijlstra   mm, powerpc: move...
174
  	struct rcu_head		rcu;
0d6e24d43   Peter Zijlstra   asm-generic/tlb: ...
175
  #endif
267239116   Peter Zijlstra   mm, powerpc: move...
176
177
178
179
180
181
  	unsigned int		nr;
  	void			*tables[0];
  };
  
  #define MAX_TABLE_BATCH		\
  	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
267239116   Peter Zijlstra   mm, powerpc: move...
182
  extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
0d6e24d43   Peter Zijlstra   asm-generic/tlb: ...
183
184
185
186
187
188
189
190
191
192
193
  #else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
  
  /*
   * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
   * page directories and we can use the normal page batching to free them.
   */
  #define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
  
  #endif /* CONFIG_MMU_GATHER_TABLE_FREE */
  
  #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
0ed132596   Peter Zijlstra   mm/mmu_gather: in...
194
195
196
197
198
199
  /*
   * This allows an architecture that does not use the linux page-tables for
   * hardware to skip the TLBI when freeing page tables.
   */
  #ifndef tlb_needs_table_invalidate
  #define tlb_needs_table_invalidate() (true)
267239116   Peter Zijlstra   mm, powerpc: move...
200
  #endif
0ed132596   Peter Zijlstra   mm/mmu_gather: in...
201
202
203
  #else
  
  #ifdef tlb_needs_table_invalidate
ff2e6d725   Peter Zijlstra   asm-generic/tlb: ...
204
  #error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
0ed132596   Peter Zijlstra   mm/mmu_gather: in...
205
  #endif
ff2e6d725   Peter Zijlstra   asm-generic/tlb: ...
206
  #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
0ed132596   Peter Zijlstra   mm/mmu_gather: in...
207

580a586c4   Peter Zijlstra   asm-generic/tlb: ...
208
  #ifndef CONFIG_MMU_GATHER_NO_GATHER
d16dfc550   Peter Zijlstra   mm: mmu_gather re...
209
210
211
212
213
  /*
   * If we can't allocate a page to make a big batch of page pointers
   * to work on, then just handle a few from the on-stack structure.
   */
  #define MMU_GATHER_BUNDLE	8
e303297e6   Peter Zijlstra   mm: extended batc...
214
215
216
217
218
219
220
221
222
  struct mmu_gather_batch {
  	struct mmu_gather_batch	*next;
  	unsigned int		nr;
  	unsigned int		max;
  	struct page		*pages[0];
  };
  
  #define MAX_GATHER_BATCH	\
  	((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
53a59fc67   Michal Hocko   mm: limit mmu_gat...
223
224
225
226
227
228
229
  /*
   * Limit the maximum number of mmu_gather batches to reduce a risk of soft
   * lockups for non-preemptible kernels on huge machines when a lot of memory
   * is zapped during unmapping.
   * 10K pages freed at once should be safe even without a preemption point.
   */
  #define MAX_GATHER_BATCH_COUNT	(10000UL/MAX_GATHER_BATCH)
952a31c9e   Martin Schwidefsky   asm-generic/tlb: ...
230
231
232
  extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
  				   int page_size);
  #endif
dea2434c2   Peter Zijlstra   asm-generic/tlb: ...
233
234
  /*
   * struct mmu_gather is an opaque type used by the mm code for passing around
15a23ffa2   Hugh Dickins   [PATCH] mm: tlb_g...
235
   * any data needed by arch specific code for tlb_remove_page.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
236
237
238
   */
  struct mmu_gather {
  	struct mm_struct	*mm;
dea2434c2   Peter Zijlstra   asm-generic/tlb: ...
239

0d6e24d43   Peter Zijlstra   asm-generic/tlb: ...
240
  #ifdef CONFIG_MMU_GATHER_TABLE_FREE
267239116   Peter Zijlstra   mm, powerpc: move...
241
242
  	struct mmu_table_batch	*batch;
  #endif
dea2434c2   Peter Zijlstra   asm-generic/tlb: ...
243

597e1c358   Alex Shi   mm/mmu_gather: en...
244
245
  	unsigned long		start;
  	unsigned long		end;
22a61c3c4   Peter Zijlstra   asm-generic/tlb: ...
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
  	/*
  	 * we are in the middle of an operation to clear
  	 * a full mm and can make some optimizations
  	 */
  	unsigned int		fullmm : 1;
  
  	/*
  	 * we have performed an operation which
  	 * requires a complete flush of the tlb
  	 */
  	unsigned int		need_flush_all : 1;
  
  	/*
  	 * we have removed page directories
  	 */
  	unsigned int		freed_tables : 1;
e303297e6   Peter Zijlstra   mm: extended batc...
262

a6d60245d   Will Deacon   asm-generic/tlb: ...
263
264
265
266
267
268
269
  	/*
  	 * at which levels have we cleared entries?
  	 */
  	unsigned int		cleared_ptes : 1;
  	unsigned int		cleared_pmds : 1;
  	unsigned int		cleared_puds : 1;
  	unsigned int		cleared_p4ds : 1;
5f307be18   Peter Zijlstra   asm-generic/tlb, ...
270
271
272
273
274
  	/*
  	 * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
  	 */
  	unsigned int		vma_exec : 1;
  	unsigned int		vma_huge : 1;
ed6a79352   Peter Zijlstra   asm-generic/tlb, ...
275
  	unsigned int		batch_count;
580a586c4   Peter Zijlstra   asm-generic/tlb: ...
276
  #ifndef CONFIG_MMU_GATHER_NO_GATHER
e303297e6   Peter Zijlstra   mm: extended batc...
277
278
279
  	struct mmu_gather_batch *active;
  	struct mmu_gather_batch	local;
  	struct page		*__pages[MMU_GATHER_BUNDLE];
ed6a79352   Peter Zijlstra   asm-generic/tlb, ...
280

3af4bd033   Peter Zijlstra   asm-generic/tlb: ...
281
  #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
ed6a79352   Peter Zijlstra   asm-generic/tlb, ...
282
283
  	unsigned int page_size;
  #endif
952a31c9e   Martin Schwidefsky   asm-generic/tlb: ...
284
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
285
  };
9547d01bf   Peter Zijlstra   mm: uninline larg...
286
  void tlb_flush_mmu(struct mmu_gather *tlb);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
287

fb7332a9f   Will Deacon   mmu_gather: move ...
288
  static inline void __tlb_adjust_range(struct mmu_gather *tlb,
b5bc66b71   Aneesh Kumar K.V   mm: update mmu_ga...
289
290
  				      unsigned long address,
  				      unsigned int range_size)
fb7332a9f   Will Deacon   mmu_gather: move ...
291
292
  {
  	tlb->start = min(tlb->start, address);
b5bc66b71   Aneesh Kumar K.V   mm: update mmu_ga...
293
  	tlb->end = max(tlb->end, address + range_size);
fb7332a9f   Will Deacon   mmu_gather: move ...
294
295
296
297
  }
  
  static inline void __tlb_reset_range(struct mmu_gather *tlb)
  {
721c21c17   Will Deacon   mm: mmu_gather: u...
298
299
300
301
302
303
  	if (tlb->fullmm) {
  		tlb->start = tlb->end = ~0;
  	} else {
  		tlb->start = TASK_SIZE;
  		tlb->end = 0;
  	}
22a61c3c4   Peter Zijlstra   asm-generic/tlb: ...
304
  	tlb->freed_tables = 0;
a6d60245d   Will Deacon   asm-generic/tlb: ...
305
306
307
308
  	tlb->cleared_ptes = 0;
  	tlb->cleared_pmds = 0;
  	tlb->cleared_puds = 0;
  	tlb->cleared_p4ds = 0;
5f307be18   Peter Zijlstra   asm-generic/tlb, ...
309
310
311
312
313
314
  	/*
  	 * Do not reset mmu_gather::vma_* fields here, we do not
  	 * call into tlb_start_vma() again to set them if there is an
  	 * intermediate flush.
  	 */
  }
a30e32bd7   Peter Zijlstra   asm-generic/tlb: ...
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
  #ifdef CONFIG_MMU_GATHER_NO_RANGE
  
  #if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
  #error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
  #endif
  
  /*
   * When an architecture does not have efficient means of range flushing TLBs
   * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
   * range small. We equally don't have to worry about page granularity or other
   * things.
   *
   * All we need to do is issue a full flush for any !0 range.
   */
  static inline void tlb_flush(struct mmu_gather *tlb)
  {
  	if (tlb->end)
  		flush_tlb_mm(tlb->mm);
  }
  
  static inline void
  tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
  
  #define tlb_end_vma tlb_end_vma
  static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
  
  #else /* CONFIG_MMU_GATHER_NO_RANGE */
5f307be18   Peter Zijlstra   asm-generic/tlb, ...
342
343
344
345
346
  #ifndef tlb_flush
  
  #if defined(tlb_start_vma) || defined(tlb_end_vma)
  #error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
  #endif
a30e32bd7   Peter Zijlstra   asm-generic/tlb: ...
347
348
349
350
351
  /*
   * When an architecture does not provide its own tlb_flush() implementation
   * but does have a reasonably efficient flush_vma_range() implementation
   * use that.
   */
5f307be18   Peter Zijlstra   asm-generic/tlb, ...
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
  static inline void tlb_flush(struct mmu_gather *tlb)
  {
  	if (tlb->fullmm || tlb->need_flush_all) {
  		flush_tlb_mm(tlb->mm);
  	} else if (tlb->end) {
  		struct vm_area_struct vma = {
  			.vm_mm = tlb->mm,
  			.vm_flags = (tlb->vma_exec ? VM_EXEC    : 0) |
  				    (tlb->vma_huge ? VM_HUGETLB : 0),
  		};
  
  		flush_tlb_range(&vma, tlb->start, tlb->end);
  	}
  }
  
  static inline void
  tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
  {
  	/*
  	 * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
  	 * mips-4k) flush only large pages.
  	 *
  	 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
  	 * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
  	 * range.
  	 *
  	 * We rely on tlb_end_vma() to issue a flush, such that when we reset
  	 * these values the batch is empty.
  	 */
03911132a   Anshuman Khandual   mm/vma: replace a...
381
  	tlb->vma_huge = is_vm_hugetlb_page(vma);
5f307be18   Peter Zijlstra   asm-generic/tlb, ...
382
  	tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
fb7332a9f   Will Deacon   mmu_gather: move ...
383
  }
5f307be18   Peter Zijlstra   asm-generic/tlb, ...
384
385
386
387
388
389
  #else
  
  static inline void
  tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
  
  #endif
a30e32bd7   Peter Zijlstra   asm-generic/tlb: ...
390
  #endif /* CONFIG_MMU_GATHER_NO_RANGE */
fd1102f0a   Nicholas Piggin   mm: mmu_notifier ...
391
392
  static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
  {
0758cd830   Peter Zijlstra   asm-generic/tlb: ...
393
394
395
396
397
398
  	/*
  	 * Anything calling __tlb_adjust_range() also sets at least one of
  	 * these bits.
  	 */
  	if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
  	      tlb->cleared_puds || tlb->cleared_p4ds))
fd1102f0a   Nicholas Piggin   mm: mmu_notifier ...
399
400
401
402
403
404
  		return;
  
  	tlb_flush(tlb);
  	mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
  	__tlb_reset_range(tlb);
  }
e77b0852b   Aneesh Kumar K.V   mm/mmu_gather: tr...
405
406
407
  static inline void tlb_remove_page_size(struct mmu_gather *tlb,
  					struct page *page, int page_size)
  {
692a68c15   Aneesh Kumar K.V   mm: remove the pa...
408
  	if (__tlb_remove_page_size(tlb, page, page_size))
e77b0852b   Aneesh Kumar K.V   mm/mmu_gather: tr...
409
  		tlb_flush_mmu(tlb);
e77b0852b   Aneesh Kumar K.V   mm/mmu_gather: tr...
410
  }
692a68c15   Aneesh Kumar K.V   mm: remove the pa...
411
  static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
e77b0852b   Aneesh Kumar K.V   mm/mmu_gather: tr...
412
413
414
  {
  	return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
  }
e9d55e157   Aneesh Kumar K.V   mm: change the in...
415
416
417
418
419
420
  /* tlb_remove_page
   *	Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
   *	required.
   */
  static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  {
e77b0852b   Aneesh Kumar K.V   mm/mmu_gather: tr...
421
  	return tlb_remove_page_size(tlb, page, PAGE_SIZE);
e9d55e157   Aneesh Kumar K.V   mm: change the in...
422
  }
ed6a79352   Peter Zijlstra   asm-generic/tlb, ...
423
  static inline void tlb_change_page_size(struct mmu_gather *tlb,
07e326610   Aneesh Kumar K.V   mm: add tlb_remov...
424
425
  						     unsigned int page_size)
  {
3af4bd033   Peter Zijlstra   asm-generic/tlb: ...
426
  #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
ed6a79352   Peter Zijlstra   asm-generic/tlb, ...
427
  	if (tlb->page_size && tlb->page_size != page_size) {
864edb758   Aneesh Kumar K.V   powerpc/mm/book3s...
428
  		if (!tlb->fullmm && !tlb->need_flush_all)
ed6a79352   Peter Zijlstra   asm-generic/tlb, ...
429
430
  			tlb_flush_mmu(tlb);
  	}
07e326610   Aneesh Kumar K.V   mm: add tlb_remov...
431
432
433
  	tlb->page_size = page_size;
  #endif
  }
07e326610   Aneesh Kumar K.V   mm: add tlb_remov...
434

a6d60245d   Will Deacon   asm-generic/tlb: ...
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
  static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
  {
  	if (tlb->cleared_ptes)
  		return PAGE_SHIFT;
  	if (tlb->cleared_pmds)
  		return PMD_SHIFT;
  	if (tlb->cleared_puds)
  		return PUD_SHIFT;
  	if (tlb->cleared_p4ds)
  		return P4D_SHIFT;
  
  	return PAGE_SHIFT;
  }
  
  static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
  {
  	return 1UL << tlb_get_unmap_shift(tlb);
  }
fb7332a9f   Will Deacon   mmu_gather: move ...
453
454
455
456
457
458
  /*
   * In the case of tlb vma handling, we can optimise these away in the
   * case where we're doing a full MM flush.  When we're doing a munmap,
   * the vmas are adjusted to only cover the region to be torn down.
   */
  #ifndef tlb_start_vma
5f307be18   Peter Zijlstra   asm-generic/tlb, ...
459
460
461
462
463
464
465
466
  static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  {
  	if (tlb->fullmm)
  		return;
  
  	tlb_update_vma_flags(tlb, vma);
  	flush_cache_range(vma, vma->vm_start, vma->vm_end);
  }
fb7332a9f   Will Deacon   mmu_gather: move ...
467
  #endif
fb7332a9f   Will Deacon   mmu_gather: move ...
468
  #ifndef tlb_end_vma
5f307be18   Peter Zijlstra   asm-generic/tlb, ...
469
470
471
472
473
474
475
476
477
478
479
480
481
  static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  {
  	if (tlb->fullmm)
  		return;
  
  	/*
  	 * Do a TLB flush and reset the range at VMA boundaries; this avoids
  	 * the ranges growing with the unused space between consecutive VMAs,
  	 * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
  	 * this.
  	 */
  	tlb_flush_mmu_tlbonly(tlb);
  }
fb7332a9f   Will Deacon   mmu_gather: move ...
482
  #endif
2631ed00b   Peter Zijlstra (Intel)   tlb: mmu_gather: ...
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
  /*
   * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
   * and set corresponding cleared_*.
   */
  static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
  				     unsigned long address, unsigned long size)
  {
  	__tlb_adjust_range(tlb, address, size);
  	tlb->cleared_ptes = 1;
  }
  
  static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
  				     unsigned long address, unsigned long size)
  {
  	__tlb_adjust_range(tlb, address, size);
  	tlb->cleared_pmds = 1;
  }
  
  static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
  				     unsigned long address, unsigned long size)
  {
  	__tlb_adjust_range(tlb, address, size);
  	tlb->cleared_puds = 1;
  }
  
  static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
  				     unsigned long address, unsigned long size)
  {
  	__tlb_adjust_range(tlb, address, size);
  	tlb->cleared_p4ds = 1;
  }
fb7332a9f   Will Deacon   mmu_gather: move ...
514
515
516
  #ifndef __tlb_remove_tlb_entry
  #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
517
518
519
  /**
   * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
   *
fb7332a9f   Will Deacon   mmu_gather: move ...
520
521
522
   * Record the fact that pte's were really unmapped by updating the range,
   * so we can later optimise away the tlb invalidate.   This helps when
   * userspace is unmapping already-unmapped pages, which happens quite a lot.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
523
524
525
   */
  #define tlb_remove_tlb_entry(tlb, ptep, address)		\
  	do {							\
2631ed00b   Peter Zijlstra (Intel)   tlb: mmu_gather: ...
526
  		tlb_flush_pte_range(tlb, address, PAGE_SIZE);	\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
527
528
  		__tlb_remove_tlb_entry(tlb, ptep, address);	\
  	} while (0)
a6d60245d   Will Deacon   asm-generic/tlb: ...
529
530
531
  #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	\
  	do {							\
  		unsigned long _sz = huge_page_size(h);		\
a6d60245d   Will Deacon   asm-generic/tlb: ...
532
  		if (_sz == PMD_SIZE)				\
2631ed00b   Peter Zijlstra (Intel)   tlb: mmu_gather: ...
533
  			tlb_flush_pmd_range(tlb, address, _sz);	\
a6d60245d   Will Deacon   asm-generic/tlb: ...
534
  		else if (_sz == PUD_SIZE)			\
2631ed00b   Peter Zijlstra (Intel)   tlb: mmu_gather: ...
535
  			tlb_flush_pud_range(tlb, address, _sz);	\
a6d60245d   Will Deacon   asm-generic/tlb: ...
536
  		__tlb_remove_tlb_entry(tlb, ptep, address);	\
b528e4b64   Aneesh Kumar K.V   mm/hugetlb: add t...
537
  	} while (0)
f21760b15   Shaohua Li   thp: add tlb_remo...
538
539
540
541
542
543
544
  /**
   * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
   * This is a nop so far, because only x86 needs it.
   */
  #ifndef __tlb_remove_pmd_tlb_entry
  #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
  #endif
b5bc66b71   Aneesh Kumar K.V   mm: update mmu_ga...
545
546
  #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)			\
  	do {								\
2631ed00b   Peter Zijlstra (Intel)   tlb: mmu_gather: ...
547
  		tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE);	\
b5bc66b71   Aneesh Kumar K.V   mm: update mmu_ga...
548
  		__tlb_remove_pmd_tlb_entry(tlb, pmdp, address);		\
f21760b15   Shaohua Li   thp: add tlb_remo...
549
  	} while (0)
a00cc7d9d   Matthew Wilcox   mm, x86: add supp...
550
551
552
553
554
555
556
557
558
559
  /**
   * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
   * invalidation. This is a nop so far, because only x86 needs it.
   */
  #ifndef __tlb_remove_pud_tlb_entry
  #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
  #endif
  
  #define tlb_remove_pud_tlb_entry(tlb, pudp, address)			\
  	do {								\
2631ed00b   Peter Zijlstra (Intel)   tlb: mmu_gather: ...
560
  		tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE);	\
a00cc7d9d   Matthew Wilcox   mm, x86: add supp...
561
562
  		__tlb_remove_pud_tlb_entry(tlb, pudp, address);		\
  	} while (0)
b5bc66b71   Aneesh Kumar K.V   mm: update mmu_ga...
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
  /*
   * For things like page tables caches (ie caching addresses "inside" the
   * page tables, like x86 does), for legacy reasons, flushing an
   * individual page had better flush the page table caches behind it. This
   * is definitely how x86 works, for example. And if you have an
   * architected non-legacy page table cache (which I'm not aware of
   * anybody actually doing), you're going to have some architecturally
   * explicit flushing for that, likely *separate* from a regular TLB entry
   * flush, and thus you'd need more than just some range expansion..
   *
   * So if we ever find an architecture
   * that would want something that odd, I think it is up to that
   * architecture to do its own odd thing, not cause pain for others
   * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
   *
   * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
   */
a90744bac   Nicholas Piggin   mm: allow arch to...
580
  #ifndef pte_free_tlb
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
581
  #define pte_free_tlb(tlb, ptep, address)			\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
582
  	do {							\
2631ed00b   Peter Zijlstra (Intel)   tlb: mmu_gather: ...
583
  		tlb_flush_pmd_range(tlb, address, PAGE_SIZE);	\
a6d60245d   Will Deacon   asm-generic/tlb: ...
584
  		tlb->freed_tables = 1;				\
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
585
  		__pte_free_tlb(tlb, ptep, address);		\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
586
  	} while (0)
a90744bac   Nicholas Piggin   mm: allow arch to...
587
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
588

a90744bac   Nicholas Piggin   mm: allow arch to...
589
  #ifndef pmd_free_tlb
048456dcf   Kirill A. Shutemov   asm-generic: intr...
590
591
  #define pmd_free_tlb(tlb, pmdp, address)			\
  	do {							\
2631ed00b   Peter Zijlstra (Intel)   tlb: mmu_gather: ...
592
  		tlb_flush_pud_range(tlb, address, PAGE_SIZE);	\
a6d60245d   Will Deacon   asm-generic/tlb: ...
593
  		tlb->freed_tables = 1;				\
048456dcf   Kirill A. Shutemov   asm-generic: intr...
594
595
  		__pmd_free_tlb(tlb, pmdp, address);		\
  	} while (0)
a90744bac   Nicholas Piggin   mm: allow arch to...
596
  #endif
048456dcf   Kirill A. Shutemov   asm-generic: intr...
597

a90744bac   Nicholas Piggin   mm: allow arch to...
598
  #ifndef pud_free_tlb
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
599
  #define pud_free_tlb(tlb, pudp, address)			\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
600
  	do {							\
2631ed00b   Peter Zijlstra (Intel)   tlb: mmu_gather: ...
601
  		tlb_flush_p4d_range(tlb, address, PAGE_SIZE);	\
a6d60245d   Will Deacon   asm-generic/tlb: ...
602
  		tlb->freed_tables = 1;				\
9e1b32caa   Benjamin Herrenschmidt   mm: Pass virtual ...
603
  		__pud_free_tlb(tlb, pudp, address);		\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
604
605
  	} while (0)
  #endif
a90744bac   Nicholas Piggin   mm: allow arch to...
606
  #ifndef p4d_free_tlb
048456dcf   Kirill A. Shutemov   asm-generic: intr...
607
  #define p4d_free_tlb(tlb, pudp, address)			\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
608
  	do {							\
22a61c3c4   Peter Zijlstra   asm-generic/tlb: ...
609
  		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
a6d60245d   Will Deacon   asm-generic/tlb: ...
610
  		tlb->freed_tables = 1;				\
048456dcf   Kirill A. Shutemov   asm-generic: intr...
611
  		__p4d_free_tlb(tlb, pudp, address);		\
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
612
  	} while (0)
048456dcf   Kirill A. Shutemov   asm-generic: intr...
613
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
614

faaadaf31   Will Deacon   asm-generic/tlb: ...
615
  #endif /* CONFIG_MMU */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
616
  #endif /* _ASM_GENERIC__TLB_H */