Blame view

include/linux/pgtable.h 40.2 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  /* SPDX-License-Identifier: GPL-2.0 */
ca5999fde   Mike Rapoport   mm: introduce inc...
2
3
  #ifndef _LINUX_PGTABLE_H
  #define _LINUX_PGTABLE_H
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
4

f25748e3c   Dan Williams   mm, dax: convert ...
5
  #include <linux/pfn.h>
ca5999fde   Mike Rapoport   mm: introduce inc...
6
  #include <asm/pgtable.h>
f25748e3c   Dan Williams   mm, dax: convert ...
7

673eae823   Rusty Russell   [PATCH] x86: triv...
8
  #ifndef __ASSEMBLY__
9535239f6   Greg Ungerer   changing include/...
9
  #ifdef CONFIG_MMU
673eae823   Rusty Russell   [PATCH] x86: triv...
10

fbd718448   Ben Hutchings   mm: <asm-generic/...
11
  #include <linux/mm_types.h>
187f1882b   Paul Gortmaker   BUG: headers with...
12
  #include <linux/bug.h>
e61ce6ade   Toshi Kani   mm: change iorema...
13
  #include <linux/errno.h>
5a281062a   Andrea Arcangeli   userfaultfd: wp: ...
14
  #include <asm-generic/pgtable_uffd.h>
fbd718448   Ben Hutchings   mm: <asm-generic/...
15

c2febafc6   Kirill A. Shutemov   mm: convert gener...
16
17
18
  #if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
  	defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
  #error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
235a8f028   Kirill A. Shutemov   mm: define defaul...
19
  #endif
6ee8630e0   Hugh Dickins   mm: allow arch co...
20
21
22
23
24
25
26
27
28
  /*
   * On almost all architectures and configurations, 0 can be used as the
   * upper ceiling to free_pgtables(): on many architectures it has the same
   * effect as using TASK_SIZE.  However, there is one configuration which
   * must impose a more careful limit, to avoid freeing kernel pgtables.
   */
  #ifndef USER_PGTABLES_CEILING
  #define USER_PGTABLES_CEILING	0UL
  #endif
e05c7b1f2   Mike Rapoport   mm: pgtable: add ...
29
  /*
974b9b2c6   Mike Rapoport   mm: consolidate p...
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
   * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD]
   *
   * The pXx_index() functions return the index of the entry in the page
   * table page which would control the given virtual address
   *
   * As these functions may be used by the same code for different levels of
   * the page table folding, they are always available, regardless of
   * CONFIG_PGTABLE_LEVELS value. For the folded levels they simply return 0
   * because in such cases PTRS_PER_PxD equals 1.
   */
  
  static inline unsigned long pte_index(unsigned long address)
  {
  	return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
  }
  
  #ifndef pmd_index
  static inline unsigned long pmd_index(unsigned long address)
  {
  	return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
  }
  #define pmd_index pmd_index
  #endif
  
  #ifndef pud_index
  static inline unsigned long pud_index(unsigned long address)
  {
  	return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
  }
  #define pud_index pud_index
  #endif
  
  #ifndef pgd_index
  /* Must be a compile-time constant, so implement it as a macro */
  #define pgd_index(a)  (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
  #endif
  
  #ifndef pte_offset_kernel
  static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
  {
  	return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
  }
  #define pte_offset_kernel pte_offset_kernel
  #endif
  
  #if defined(CONFIG_HIGHPTE)
  #define pte_offset_map(dir, address)				\
  	((pte_t *)kmap_atomic(pmd_page(*(dir))) +		\
  	 pte_index((address)))
  #define pte_unmap(pte) kunmap_atomic((pte))
  #else
  #define pte_offset_map(dir, address)	pte_offset_kernel((dir), (address))
  #define pte_unmap(pte) ((void)(pte))	/* NOP */
  #endif
  
  /* Find an entry in the second-level page table.. */
  #ifndef pmd_offset
  static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
  {
  	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
  }
  #define pmd_offset pmd_offset
  #endif
  
  #ifndef pud_offset
  static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
  {
  	return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
  }
  #define pud_offset pud_offset
  #endif
  
  static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address)
  {
  	return (pgd + pgd_index(address));
  };
  
  /*
   * a shortcut to get a pgd_t in a given mm
   */
  #ifndef pgd_offset
  #define pgd_offset(mm, address)		pgd_offset_pgd((mm)->pgd, (address))
  #endif
  
  /*
   * a shortcut which implies the use of the kernel's pgd, instead
   * of a process's
   */
bd05220c7   Jessica Clarke   arch/ia64: Restor...
118
  #ifndef pgd_offset_k
974b9b2c6   Mike Rapoport   mm: consolidate p...
119
  #define pgd_offset_k(address)		pgd_offset(&init_mm, (address))
bd05220c7   Jessica Clarke   arch/ia64: Restor...
120
  #endif
974b9b2c6   Mike Rapoport   mm: consolidate p...
121
122
  
  /*
e05c7b1f2   Mike Rapoport   mm: pgtable: add ...
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
   * In many cases it is known that a virtual address is mapped at PMD or PTE
   * level, so instead of traversing all the page table levels, we can get a
   * pointer to the PMD entry in user or kernel page table or translate a virtual
   * address to the pointer in the PTE in the kernel page tables with simple
   * helpers.
   */
  static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va)
  {
  	return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va);
  }
  
  static inline pmd_t *pmd_off_k(unsigned long va)
  {
  	return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va);
  }
  
  static inline pte_t *virt_to_kpte(unsigned long vaddr)
  {
  	pmd_t *pmd = pmd_off_k(vaddr);
  
  	return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
145
  #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
e2cda3226   Andrea Arcangeli   thp: add pmd mang...
146
147
148
149
150
151
  extern int ptep_set_access_flags(struct vm_area_struct *vma,
  				 unsigned long address, pte_t *ptep,
  				 pte_t entry, int dirty);
  #endif
  
  #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
bd5e88ad7   Vineet Gupta   mm,thp: reduce if...
152
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
e2cda3226   Andrea Arcangeli   thp: add pmd mang...
153
154
155
  extern int pmdp_set_access_flags(struct vm_area_struct *vma,
  				 unsigned long address, pmd_t *pmdp,
  				 pmd_t entry, int dirty);
a00cc7d9d   Matthew Wilcox   mm, x86: add supp...
156
157
158
  extern int pudp_set_access_flags(struct vm_area_struct *vma,
  				 unsigned long address, pud_t *pudp,
  				 pud_t entry, int dirty);
bd5e88ad7   Vineet Gupta   mm,thp: reduce if...
159
160
161
162
163
164
165
166
  #else
  static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
  					unsigned long address, pmd_t *pmdp,
  					pmd_t entry, int dirty)
  {
  	BUILD_BUG();
  	return 0;
  }
a00cc7d9d   Matthew Wilcox   mm, x86: add supp...
167
168
169
170
171
172
173
  static inline int pudp_set_access_flags(struct vm_area_struct *vma,
  					unsigned long address, pud_t *pudp,
  					pud_t entry, int dirty)
  {
  	BUILD_BUG();
  	return 0;
  }
bd5e88ad7   Vineet Gupta   mm,thp: reduce if...
174
  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
175
176
177
  #endif
  
  #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
e2cda3226   Andrea Arcangeli   thp: add pmd mang...
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
  static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
  					    unsigned long address,
  					    pte_t *ptep)
  {
  	pte_t pte = *ptep;
  	int r = 1;
  	if (!pte_young(pte))
  		r = 0;
  	else
  		set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
  	return r;
  }
  #endif
  
  #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
  					    unsigned long address,
  					    pmd_t *pmdp)
  {
  	pmd_t pmd = *pmdp;
  	int r = 1;
  	if (!pmd_young(pmd))
  		r = 0;
  	else
  		set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
  	return r;
  }
bd5e88ad7   Vineet Gupta   mm,thp: reduce if...
206
  #else
e2cda3226   Andrea Arcangeli   thp: add pmd mang...
207
208
209
210
  static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
  					    unsigned long address,
  					    pmd_t *pmdp)
  {
bd5e88ad7   Vineet Gupta   mm,thp: reduce if...
211
  	BUILD_BUG();
e2cda3226   Andrea Arcangeli   thp: add pmd mang...
212
213
214
  	return 0;
  }
  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
215
216
217
  #endif
  
  #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
e2cda3226   Andrea Arcangeli   thp: add pmd mang...
218
219
220
221
222
  int ptep_clear_flush_young(struct vm_area_struct *vma,
  			   unsigned long address, pte_t *ptep);
  #endif
  
  #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
bd5e88ad7   Vineet Gupta   mm,thp: reduce if...
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
  				  unsigned long address, pmd_t *pmdp);
  #else
  /*
   * Despite relevant to THP only, this API is called from generic rmap code
   * under PageTransHuge(), hence needs a dummy implementation for !THP
   */
  static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
  					 unsigned long address, pmd_t *pmdp)
  {
  	BUILD_BUG();
  	return 0;
  }
  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
238
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
239
  #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
e2cda3226   Andrea Arcangeli   thp: add pmd mang...
240
241
242
243
244
245
246
247
248
  static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
  				       unsigned long address,
  				       pte_t *ptep)
  {
  	pte_t pte = *ptep;
  	pte_clear(mm, address, ptep);
  	return pte;
  }
  #endif
481e980a7   Christophe Leroy   mm: Allow arches ...
249
250
251
252
253
254
  #ifndef __HAVE_ARCH_PTEP_GET
  static inline pte_t ptep_get(pte_t *ptep)
  {
  	return READ_ONCE(*ptep);
  }
  #endif
e2cda3226   Andrea Arcangeli   thp: add pmd mang...
255
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
a00cc7d9d   Matthew Wilcox   mm, x86: add supp...
256
  #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
8809aa2d2   Aneesh Kumar K.V   mm: clarify that ...
257
258
259
  static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
  					    unsigned long address,
  					    pmd_t *pmdp)
e2cda3226   Andrea Arcangeli   thp: add pmd mang...
260
261
  {
  	pmd_t pmd = *pmdp;
2d28a2275   Catalin Marinas   mm: thp: fix the ...
262
  	pmd_clear(pmdp);
e2cda3226   Andrea Arcangeli   thp: add pmd mang...
263
  	return pmd;
49b24d6b4   Nicolas Kaiser   include/asm-gener...
264
  }
a00cc7d9d   Matthew Wilcox   mm, x86: add supp...
265
266
267
268
269
270
271
272
273
274
275
276
  #endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */
  #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
  static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
  					    unsigned long address,
  					    pud_t *pudp)
  {
  	pud_t pud = *pudp;
  
  	pud_clear(pudp);
  	return pud;
  }
  #endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */
e2cda3226   Andrea Arcangeli   thp: add pmd mang...
277
  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
278

fcbe08d66   Martin Schwidefsky   s390/mm: pmdp_get...
279
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
a00cc7d9d   Matthew Wilcox   mm, x86: add supp...
280
  #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
93a98695f   Aneesh Kumar K.V   mm: change pmdp_h...
281
  static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
fcbe08d66   Martin Schwidefsky   s390/mm: pmdp_get...
282
283
284
  					    unsigned long address, pmd_t *pmdp,
  					    int full)
  {
93a98695f   Aneesh Kumar K.V   mm: change pmdp_h...
285
  	return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
fcbe08d66   Martin Schwidefsky   s390/mm: pmdp_get...
286
  }
fcbe08d66   Martin Schwidefsky   s390/mm: pmdp_get...
287
  #endif
a00cc7d9d   Matthew Wilcox   mm, x86: add supp...
288
289
290
291
292
293
294
295
296
  #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
  static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm,
  					    unsigned long address, pud_t *pudp,
  					    int full)
  {
  	return pudp_huge_get_and_clear(mm, address, pudp);
  }
  #endif
  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
a600388d2   Zachary Amsden   [PATCH] x86: ptep...
297
  #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
e2cda3226   Andrea Arcangeli   thp: add pmd mang...
298
299
300
301
302
303
304
305
  static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
  					    unsigned long address, pte_t *ptep,
  					    int full)
  {
  	pte_t pte;
  	pte = ptep_get_and_clear(mm, address, ptep);
  	return pte;
  }
a600388d2   Zachary Amsden   [PATCH] x86: ptep...
306
  #endif
7df676974   Bibo Mao   mm/memory.c: Upda...
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
  
  /*
   * If two threads concurrently fault at the same page, the thread that
   * won the race updates the PTE and its local TLB/Cache. The other thread
   * gives up, simply does nothing, and continues; on architectures where
   * software can update TLB,  local TLB can be updated here to avoid next page
   * fault. This function updates TLB only, do nothing with cache or others.
   * It is the difference with function update_mmu_cache.
   */
  #ifndef __HAVE_ARCH_UPDATE_MMU_TLB
  static inline void update_mmu_tlb(struct vm_area_struct *vma,
  				unsigned long address, pte_t *ptep)
  {
  }
  #define __HAVE_ARCH_UPDATE_MMU_TLB
  #endif
9888a1cae   Zachary Amsden   [PATCH] paravirt:...
323
324
325
326
327
328
  /*
   * Some architectures may be able to avoid expensive synchronization
   * primitives when modifications are made to PTE's which are already
   * not present, or in the process of an address space destruction.
   */
  #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
e2cda3226   Andrea Arcangeli   thp: add pmd mang...
329
330
331
332
333
334
335
  static inline void pte_clear_not_present_full(struct mm_struct *mm,
  					      unsigned long address,
  					      pte_t *ptep,
  					      int full)
  {
  	pte_clear(mm, address, ptep);
  }
a600388d2   Zachary Amsden   [PATCH] x86: ptep...
336
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
337
  #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
e2cda3226   Andrea Arcangeli   thp: add pmd mang...
338
339
340
341
  extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
  			      unsigned long address,
  			      pte_t *ptep);
  #endif
8809aa2d2   Aneesh Kumar K.V   mm: clarify that ...
342
343
  #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
  extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
e2cda3226   Andrea Arcangeli   thp: add pmd mang...
344
345
  			      unsigned long address,
  			      pmd_t *pmdp);
a00cc7d9d   Matthew Wilcox   mm, x86: add supp...
346
347
348
  extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
  			      unsigned long address,
  			      pud_t *pudp);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
349
350
351
  #endif
  
  #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
8c65b4a60   Tim Schmielau   [PATCH] fix remai...
352
  struct mm_struct;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
353
354
355
356
357
358
  static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
  {
  	pte_t old_pte = *ptep;
  	set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
  }
  #endif
44bf431b4   Bibo Mao   mm/memory.c: Add ...
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
  /*
   * On some architectures hardware does not set page access bit when accessing
   * memory page, it is responsibilty of software setting this bit. It brings
   * out extra page fault penalty to track page access bit. For optimization page
   * access bit can be set during all page fault flow on these arches.
   * To be differentiate with macro pte_mkyoung, this macro is used on platforms
   * where software maintains page access bit.
   */
  #ifndef pte_sw_mkyoung
  static inline pte_t pte_sw_mkyoung(pte_t pte)
  {
  	return pte;
  }
  #define pte_sw_mkyoung	pte_sw_mkyoung
  #endif
288bc5494   Aneesh Kumar K.V   mm/autonuma: let ...
374
375
376
377
378
379
380
  #ifndef pte_savedwrite
  #define pte_savedwrite pte_write
  #endif
  
  #ifndef pte_mk_savedwrite
  #define pte_mk_savedwrite pte_mkwrite
  #endif
595cd8f25   Aneesh Kumar K.V   mm/ksm: handle pr...
381
382
383
  #ifndef pte_clear_savedwrite
  #define pte_clear_savedwrite pte_wrprotect
  #endif
288bc5494   Aneesh Kumar K.V   mm/autonuma: let ...
384
385
386
387
388
389
390
  #ifndef pmd_savedwrite
  #define pmd_savedwrite pmd_write
  #endif
  
  #ifndef pmd_mk_savedwrite
  #define pmd_mk_savedwrite pmd_mkwrite
  #endif
595cd8f25   Aneesh Kumar K.V   mm/ksm: handle pr...
391
392
393
  #ifndef pmd_clear_savedwrite
  #define pmd_clear_savedwrite pmd_wrprotect
  #endif
e2cda3226   Andrea Arcangeli   thp: add pmd mang...
394
395
396
397
398
399
400
401
  #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  static inline void pmdp_set_wrprotect(struct mm_struct *mm,
  				      unsigned long address, pmd_t *pmdp)
  {
  	pmd_t old_pmd = *pmdp;
  	set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
  }
bd5e88ad7   Vineet Gupta   mm,thp: reduce if...
402
  #else
e2cda3226   Andrea Arcangeli   thp: add pmd mang...
403
404
405
  static inline void pmdp_set_wrprotect(struct mm_struct *mm,
  				      unsigned long address, pmd_t *pmdp)
  {
bd5e88ad7   Vineet Gupta   mm,thp: reduce if...
406
  	BUILD_BUG();
e2cda3226   Andrea Arcangeli   thp: add pmd mang...
407
408
409
  }
  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  #endif
a00cc7d9d   Matthew Wilcox   mm, x86: add supp...
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
  #ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
  #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
  static inline void pudp_set_wrprotect(struct mm_struct *mm,
  				      unsigned long address, pud_t *pudp)
  {
  	pud_t old_pud = *pudp;
  
  	set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
  }
  #else
  static inline void pudp_set_wrprotect(struct mm_struct *mm,
  				      unsigned long address, pud_t *pudp)
  {
  	BUILD_BUG();
  }
  #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
  #endif
e2cda3226   Andrea Arcangeli   thp: add pmd mang...
427

15a25b2ea   Aneesh Kumar K.V   mm/thp: split out...
428
429
  #ifndef pmdp_collapse_flush
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
f28b6ff8c   Aneesh Kumar K.V   powerpc/mm: use g...
430
431
  extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
  				 unsigned long address, pmd_t *pmdp);
15a25b2ea   Aneesh Kumar K.V   mm/thp: split out...
432
433
434
435
436
437
438
439
440
441
442
  #else
  static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
  					unsigned long address,
  					pmd_t *pmdp)
  {
  	BUILD_BUG();
  	return *pmdp;
  }
  #define pmdp_collapse_flush pmdp_collapse_flush
  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  #endif
e3ebcf643   Gerald Schaefer   thp: remove assum...
443
  #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
6b0b50b06   Aneesh Kumar K.V   mm/THP: add pmd a...
444
445
  extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
  				       pgtable_t pgtable);
e3ebcf643   Gerald Schaefer   thp: remove assum...
446
447
448
  #endif
  
  #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
6b0b50b06   Aneesh Kumar K.V   mm/THP: add pmd a...
449
  extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
e3ebcf643   Gerald Schaefer   thp: remove assum...
450
  #endif
c58f0bb77   Kirill A. Shutemov   asm-generic: prov...
451
452
453
454
455
456
457
458
459
460
461
462
463
464
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  /*
   * This is an implementation of pmdp_establish() that is only suitable for an
   * architecture that doesn't have hardware dirty/accessed bits. In this case we
   * can't race with CPU which sets these bits and non-atomic aproach is fine.
   */
  static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
  		unsigned long address, pmd_t *pmdp, pmd_t pmd)
  {
  	pmd_t old_pmd = *pmdp;
  	set_pmd_at(vma->vm_mm, address, pmdp, pmd);
  	return old_pmd;
  }
  #endif
46dcde735   Gerald Schaefer   thp: introduce pm...
465
  #ifndef __HAVE_ARCH_PMDP_INVALIDATE
d52605d7c   Kirill A. Shutemov   mm: do not lose d...
466
  extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
46dcde735   Gerald Schaefer   thp: introduce pm...
467
468
  			    pmd_t *pmdp);
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
469
  #ifndef __HAVE_ARCH_PTE_SAME
e2cda3226   Andrea Arcangeli   thp: add pmd mang...
470
471
472
473
474
  static inline int pte_same(pte_t pte_a, pte_t pte_b)
  {
  	return pte_val(pte_a) == pte_val(pte_b);
  }
  #endif
45961722f   Konstantin Weitz   mm: add support f...
475
476
477
478
479
480
481
482
483
484
485
486
  #ifndef __HAVE_ARCH_PTE_UNUSED
  /*
   * Some architectures provide facilities to virtualization guests
   * so that they can flag allocated pages as unused. This allows the
   * host to transparently reclaim unused pages. This function returns
   * whether the pte's page is unused.
   */
  static inline int pte_unused(pte_t pte)
  {
  	return 0;
  }
  #endif
e7884f8ea   Kirill A. Shutemov   mm/gup: Move perm...
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
  #ifndef pte_access_permitted
  #define pte_access_permitted(pte, write) \
  	(pte_present(pte) && (!(write) || pte_write(pte)))
  #endif
  
  #ifndef pmd_access_permitted
  #define pmd_access_permitted(pmd, write) \
  	(pmd_present(pmd) && (!(write) || pmd_write(pmd)))
  #endif
  
  #ifndef pud_access_permitted
  #define pud_access_permitted(pud, write) \
  	(pud_present(pud) && (!(write) || pud_write(pud)))
  #endif
  
  #ifndef p4d_access_permitted
  #define p4d_access_permitted(p4d, write) \
  	(p4d_present(p4d) && (!(write) || p4d_write(p4d)))
  #endif
  
  #ifndef pgd_access_permitted
  #define pgd_access_permitted(pgd, write) \
  	(pgd_present(pgd) && (!(write) || pgd_write(pgd)))
  #endif
e2cda3226   Andrea Arcangeli   thp: add pmd mang...
511
  #ifndef __HAVE_ARCH_PMD_SAME
e2cda3226   Andrea Arcangeli   thp: add pmd mang...
512
513
514
515
  static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
  {
  	return pmd_val(pmd_a) == pmd_val(pmd_b);
  }
a00cc7d9d   Matthew Wilcox   mm, x86: add supp...
516
517
518
519
520
  
  static inline int pud_same(pud_t pud_a, pud_t pud_b)
  {
  	return pud_val(pud_a) == pud_val(pud_b);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
521
  #endif
0cebbb60f   Dan Williams   generic/pgtable: ...
522
523
524
525
526
527
528
529
530
531
532
533
534
  #ifndef __HAVE_ARCH_P4D_SAME
  static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
  {
  	return p4d_val(p4d_a) == p4d_val(p4d_b);
  }
  #endif
  
  #ifndef __HAVE_ARCH_PGD_SAME
  static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
  {
  	return pgd_val(pgd_a) == pgd_val(pgd_b);
  }
  #endif
4369deaa2   Dan Williams   generic/pgtable: ...
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
  /*
   * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
   * TLB flush will be required as a result of the "set". For example, use
   * in scenarios where it is known ahead of time that the routine is
   * setting non-present entries, or re-setting an existing entry to the
   * same value. Otherwise, use the typical "set" helpers and flush the
   * TLB.
   */
  #define set_pte_safe(ptep, pte) \
  ({ \
  	WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
  	set_pte(ptep, pte); \
  })
  
  #define set_pmd_safe(pmdp, pmd) \
  ({ \
  	WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
  	set_pmd(pmdp, pmd); \
  })
  
  #define set_pud_safe(pudp, pud) \
  ({ \
  	WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
  	set_pud(pudp, pud); \
  })
  
  #define set_p4d_safe(p4dp, p4d) \
  ({ \
  	WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
  	set_p4d(p4dp, p4d); \
  })
  
  #define set_pgd_safe(pgdp, pgd) \
  ({ \
  	WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
  	set_pgd(pgdp, pgd); \
  })
ca827d55e   Khalid Aziz   mm, swap: Add inf...
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
  #ifndef __HAVE_ARCH_DO_SWAP_PAGE
  /*
   * Some architectures support metadata associated with a page. When a
   * page is being swapped out, this metadata must be saved so it can be
   * restored when the page is swapped back in. SPARC M7 and newer
   * processors support an ADI (Application Data Integrity) tag for the
   * page as metadata for the page. arch_do_swap_page() can restore this
   * metadata when a page is swapped back in.
   */
  static inline void arch_do_swap_page(struct mm_struct *mm,
  				     struct vm_area_struct *vma,
  				     unsigned long addr,
  				     pte_t pte, pte_t oldpte)
  {
  
  }
  #endif
  
  #ifndef __HAVE_ARCH_UNMAP_ONE
  /*
   * Some architectures support metadata associated with a page. When a
   * page is being swapped out, this metadata must be saved so it can be
   * restored when the page is swapped back in. SPARC M7 and newer
   * processors support an ADI (Application Data Integrity) tag for the
   * page as metadata for the page. arch_unmap_one() can save this
   * metadata on a swap-out of a page.
   */
  static inline int arch_unmap_one(struct mm_struct *mm,
  				  struct vm_area_struct *vma,
  				  unsigned long addr,
  				  pte_t orig_pte)
  {
  	return 0;
  }
  #endif
8a84802e2   Steven Price   mm: Add arch hook...
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
  /*
   * Allow architectures to preserve additional metadata associated with
   * swapped-out pages. The corresponding __HAVE_ARCH_SWAP_* macros and function
   * prototypes must be defined in the arch-specific asm/pgtable.h file.
   */
  #ifndef __HAVE_ARCH_PREPARE_TO_SWAP
  static inline int arch_prepare_to_swap(struct page *page)
  {
  	return 0;
  }
  #endif
  
  #ifndef __HAVE_ARCH_SWAP_INVALIDATE
  static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
  {
  }
  
  static inline void arch_swap_invalidate_area(int type)
  {
  }
  #endif
  
  #ifndef __HAVE_ARCH_SWAP_RESTORE
  static inline void arch_swap_restore(swp_entry_t entry, struct page *page)
  {
  }
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
634
635
636
  #ifndef __HAVE_ARCH_PGD_OFFSET_GATE
  #define pgd_offset_gate(mm, addr)	pgd_offset(mm, addr)
  #endif
0b0968a3e   David S. Miller   [SPARC64]: Fix D-...
637
  #ifndef __HAVE_ARCH_MOVE_PTE
8b1f31246   Nick Piggin   [PATCH] mm: move_...
638
  #define move_pte(pte, prot, old_addr, new_addr)	(pte)
8b1f31246   Nick Piggin   [PATCH] mm: move_...
639
  #endif
2c3cf556b   Rik van Riel   x86/mm: Introduce...
640
  #ifndef pte_accessible
208414059   Rik van Riel   mm: fix TLB flush...
641
  # define pte_accessible(mm, pte)	((void)(pte), 1)
2c3cf556b   Rik van Riel   x86/mm: Introduce...
642
  #endif
61c77326d   Shaohua Li   x86, mm: Avoid un...
643
644
645
  #ifndef flush_tlb_fix_spurious_fault
  #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
646
  /*
8f6c99c11   Hugh Dickins   [PATCH] freepgt: ...
647
648
649
   * When walking page tables, get the address of the next boundary,
   * or the end address of the range if that comes earlier.  Although no
   * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
650
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
651
652
653
654
  #define pgd_addr_end(addr, end)						\
  ({	unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK;	\
  	(__boundary - 1 < (end) - 1)? __boundary: (end);		\
  })
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
655

c2febafc6   Kirill A. Shutemov   mm: convert gener...
656
657
658
659
660
661
  #ifndef p4d_addr_end
  #define p4d_addr_end(addr, end)						\
  ({	unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK;	\
  	(__boundary - 1 < (end) - 1)? __boundary: (end);		\
  })
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
662
663
664
665
666
667
668
669
670
671
672
673
674
  #ifndef pud_addr_end
  #define pud_addr_end(addr, end)						\
  ({	unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK;	\
  	(__boundary - 1 < (end) - 1)? __boundary: (end);		\
  })
  #endif
  
  #ifndef pmd_addr_end
  #define pmd_addr_end(addr, end)						\
  ({	unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK;	\
  	(__boundary - 1 < (end) - 1)? __boundary: (end);		\
  })
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
675
676
677
678
679
680
  /*
   * When walking page tables, we usually want to skip any p?d_none entries;
   * and any p?d_bad entries - reporting the error before resetting to none.
   * Do the tests inline, but report and clear the bad entry in mm/memory.c.
   */
  void pgd_clear_bad(pgd_t *);
f2400abc7   Vineet Gupta   asm-generic/mm: s...
681
682
  
  #ifndef __PAGETABLE_P4D_FOLDED
c2febafc6   Kirill A. Shutemov   mm: convert gener...
683
  void p4d_clear_bad(p4d_t *);
f2400abc7   Vineet Gupta   asm-generic/mm: s...
684
685
686
687
688
  #else
  #define p4d_clear_bad(p4d)        do { } while (0)
  #endif
  
  #ifndef __PAGETABLE_PUD_FOLDED
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
689
  void pud_clear_bad(pud_t *);
f2400abc7   Vineet Gupta   asm-generic/mm: s...
690
691
692
  #else
  #define pud_clear_bad(p4d)        do { } while (0)
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
693
694
695
696
697
698
699
700
701
702
703
704
  void pmd_clear_bad(pmd_t *);
  
  static inline int pgd_none_or_clear_bad(pgd_t *pgd)
  {
  	if (pgd_none(*pgd))
  		return 1;
  	if (unlikely(pgd_bad(*pgd))) {
  		pgd_clear_bad(pgd);
  		return 1;
  	}
  	return 0;
  }
c2febafc6   Kirill A. Shutemov   mm: convert gener...
705
706
707
708
709
710
711
712
713
714
  static inline int p4d_none_or_clear_bad(p4d_t *p4d)
  {
  	if (p4d_none(*p4d))
  		return 1;
  	if (unlikely(p4d_bad(*p4d))) {
  		p4d_clear_bad(p4d);
  		return 1;
  	}
  	return 0;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
  static inline int pud_none_or_clear_bad(pud_t *pud)
  {
  	if (pud_none(*pud))
  		return 1;
  	if (unlikely(pud_bad(*pud))) {
  		pud_clear_bad(pud);
  		return 1;
  	}
  	return 0;
  }
  
  static inline int pmd_none_or_clear_bad(pmd_t *pmd)
  {
  	if (pmd_none(*pmd))
  		return 1;
  	if (unlikely(pmd_bad(*pmd))) {
  		pmd_clear_bad(pmd);
  		return 1;
  	}
  	return 0;
  }
9535239f6   Greg Ungerer   changing include/...
736

0cbe3e26a   Aneesh Kumar K.V   mm: update ptep_m...
737
  static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma,
1ea0704e0   Jeremy Fitzhardinge   mm: add a ptep_mo...
738
739
740
741
742
743
744
745
  					     unsigned long addr,
  					     pte_t *ptep)
  {
  	/*
  	 * Get the current pte state, but zero it out to make it
  	 * non-present, preventing the hardware from asynchronously
  	 * updating it.
  	 */
0cbe3e26a   Aneesh Kumar K.V   mm: update ptep_m...
746
  	return ptep_get_and_clear(vma->vm_mm, addr, ptep);
1ea0704e0   Jeremy Fitzhardinge   mm: add a ptep_mo...
747
  }
0cbe3e26a   Aneesh Kumar K.V   mm: update ptep_m...
748
  static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma,
1ea0704e0   Jeremy Fitzhardinge   mm: add a ptep_mo...
749
750
751
752
753
754
755
  					     unsigned long addr,
  					     pte_t *ptep, pte_t pte)
  {
  	/*
  	 * The pte is non-present, so there's no hardware state to
  	 * preserve.
  	 */
0cbe3e26a   Aneesh Kumar K.V   mm: update ptep_m...
756
  	set_pte_at(vma->vm_mm, addr, ptep, pte);
1ea0704e0   Jeremy Fitzhardinge   mm: add a ptep_mo...
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
  }
  
  #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
  /*
   * Start a pte protection read-modify-write transaction, which
   * protects against asynchronous hardware modifications to the pte.
   * The intention is not to prevent the hardware from making pte
   * updates, but to prevent any updates it may make from being lost.
   *
   * This does not protect against other software modifications of the
   * pte; the appropriate pte lock must be held over the transation.
   *
   * Note that this interface is intended to be batchable, meaning that
   * ptep_modify_prot_commit may not actually update the pte, but merely
   * queue the update to be done at some later time.  The update must be
   * actually committed before the pte lock is released, however.
   */
0cbe3e26a   Aneesh Kumar K.V   mm: update ptep_m...
774
  static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
1ea0704e0   Jeremy Fitzhardinge   mm: add a ptep_mo...
775
776
777
  					   unsigned long addr,
  					   pte_t *ptep)
  {
0cbe3e26a   Aneesh Kumar K.V   mm: update ptep_m...
778
  	return __ptep_modify_prot_start(vma, addr, ptep);
1ea0704e0   Jeremy Fitzhardinge   mm: add a ptep_mo...
779
780
781
782
783
784
  }
  
  /*
   * Commit an update to a pte, leaving any hardware-controlled bits in
   * the PTE unmodified.
   */
0cbe3e26a   Aneesh Kumar K.V   mm: update ptep_m...
785
  static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
1ea0704e0   Jeremy Fitzhardinge   mm: add a ptep_mo...
786
  					   unsigned long addr,
04a864530   Aneesh Kumar K.V   mm: update ptep_m...
787
  					   pte_t *ptep, pte_t old_pte, pte_t pte)
1ea0704e0   Jeremy Fitzhardinge   mm: add a ptep_mo...
788
  {
0cbe3e26a   Aneesh Kumar K.V   mm: update ptep_m...
789
  	__ptep_modify_prot_commit(vma, addr, ptep, pte);
1ea0704e0   Jeremy Fitzhardinge   mm: add a ptep_mo...
790
791
  }
  #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
fe1a6875f   Sebastian Siewior   mm: fix build on ...
792
  #endif /* CONFIG_MMU */
1ea0704e0   Jeremy Fitzhardinge   mm: add a ptep_mo...
793

9535239f6   Greg Ungerer   changing include/...
794
  /*
21729f81c   Tom Lendacky   x86/mm: Provide g...
795
   * No-op macros that just return the current protection value. Defined here
1067b261c   Randy Dunlap   mm: drop duplicat...
796
   * because these macros can be used even if CONFIG_MMU is not defined.
21729f81c   Tom Lendacky   x86/mm: Provide g...
797
   */
63bb76de4   Pekka Enberg   mm: pgtable: Make...
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
  
  #ifndef pgprot_nx
  #define pgprot_nx(prot)	(prot)
  #endif
  
  #ifndef pgprot_noncached
  #define pgprot_noncached(prot)	(prot)
  #endif
  
  #ifndef pgprot_writecombine
  #define pgprot_writecombine pgprot_noncached
  #endif
  
  #ifndef pgprot_writethrough
  #define pgprot_writethrough pgprot_noncached
  #endif
  
  #ifndef pgprot_device
  #define pgprot_device pgprot_noncached
  #endif
  
  #ifdef CONFIG_MMU
  #ifndef pgprot_modify
  #define pgprot_modify pgprot_modify
  static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
  {
  	if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
  		newprot = pgprot_noncached(newprot);
  	if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
  		newprot = pgprot_writecombine(newprot);
  	if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
  		newprot = pgprot_device(newprot);
  	return newprot;
  }
  #endif
  #endif /* CONFIG_MMU */
21729f81c   Tom Lendacky   x86/mm: Provide g...
834
835
836
837
838
839
840
841
842
  #ifndef pgprot_encrypted
  #define pgprot_encrypted(prot)	(prot)
  #endif
  
  #ifndef pgprot_decrypted
  #define pgprot_decrypted(prot)	(prot)
  #endif
  
  /*
9535239f6   Greg Ungerer   changing include/...
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
   * A facility to provide lazy MMU batching.  This allows PTE updates and
   * page invalidations to be delayed until a call to leave lazy MMU mode
   * is issued.  Some architectures may benefit from doing this, and it is
   * beneficial for both shadow and direct mode hypervisors, which may batch
   * the PTE updates which happen during this window.  Note that using this
   * interface requires that read hazards be removed from the code.  A read
   * hazard could result in the direct mode hypervisor case, since the actual
   * write to the page tables may not yet have taken place, so reads though
   * a raw PTE pointer after it has been modified are not guaranteed to be
   * up to date.  This mode can only be entered and left under the protection of
   * the page table locks for all page tables which may be modified.  In the UP
   * case, this is required so that preemption is disabled, and in the SMP case,
   * it must synchronize the delayed page table writes properly on other CPUs.
   */
  #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
  #define arch_enter_lazy_mmu_mode()	do {} while (0)
  #define arch_leave_lazy_mmu_mode()	do {} while (0)
  #define arch_flush_lazy_mmu_mode()	do {} while (0)
  #endif
  
  /*
7fd7d83d4   Jeremy Fitzhardinge   x86/pvops: replac...
864
865
866
867
868
869
870
871
872
   * A facility to provide batching of the reload of page tables and
   * other process state with the actual context switch code for
   * paravirtualized guests.  By convention, only one of the batched
   * update (lazy) modes (CPU, MMU) should be active at any given time,
   * entry should never be nested, and entry and exits should always be
   * paired.  This is for sanity of maintaining and reasoning about the
   * kernel code.  In this case, the exit (end of the context switch) is
   * in architecture-specific code, and so doesn't need a generic
   * definition.
9535239f6   Greg Ungerer   changing include/...
873
   */
7fd7d83d4   Jeremy Fitzhardinge   x86/pvops: replac...
874
  #ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
224101ed6   Jeremy Fitzhardinge   x86/paravirt: fin...
875
  #define arch_start_context_switch(prev)	do {} while (0)
9535239f6   Greg Ungerer   changing include/...
876
  #endif
ab6e3d093   Naoya Horiguchi   mm: soft-dirty: k...
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
  #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
  #ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
  static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
  {
  	return pmd;
  }
  
  static inline int pmd_swp_soft_dirty(pmd_t pmd)
  {
  	return 0;
  }
  
  static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
  {
  	return pmd;
  }
  #endif
  #else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
  static inline int pte_soft_dirty(pte_t pte)
  {
  	return 0;
  }
  
  static inline int pmd_soft_dirty(pmd_t pmd)
  {
  	return 0;
  }
  
  static inline pte_t pte_mksoft_dirty(pte_t pte)
  {
  	return pte;
  }
  
  static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
  {
  	return pmd;
  }
179ef71cb   Cyrill Gorcunov   mm: save soft-dir...
914

a7b761749   Martin Schwidefsky   mm: add architect...
915
916
917
918
919
920
921
922
923
  static inline pte_t pte_clear_soft_dirty(pte_t pte)
  {
  	return pte;
  }
  
  static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
  {
  	return pmd;
  }
179ef71cb   Cyrill Gorcunov   mm: save soft-dir...
924
925
926
927
928
929
930
931
932
933
934
935
936
937
  static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
  {
  	return pte;
  }
  
  static inline int pte_swp_soft_dirty(pte_t pte)
  {
  	return 0;
  }
  
  static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
  {
  	return pte;
  }
ab6e3d093   Naoya Horiguchi   mm: soft-dirty: k...
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
  
  static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
  {
  	return pmd;
  }
  
  static inline int pmd_swp_soft_dirty(pmd_t pmd)
  {
  	return 0;
  }
  
  static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
  {
  	return pmd;
  }
0f8975ec4   Pavel Emelyanov   mm: soft-dirty bi...
953
  #endif
34801ba9b   venkatesh.pallipadi@intel.com   x86: PAT: move tr...
954
955
  #ifndef __HAVE_PFNMAP_TRACKING
  /*
5180da410   Suresh Siddha   x86, pat: separat...
956
957
   * Interfaces that can be used by architecture code to keep track of
   * memory type of pfn mappings specified by the remap_pfn_range,
67fa16662   Matthew Wilcox   mm: remove refere...
958
   * vmf_insert_pfn.
5180da410   Suresh Siddha   x86, pat: separat...
959
960
961
962
963
   */
  
  /*
   * track_pfn_remap is called when a _new_ pfn mapping is being established
   * by remap_pfn_range() for physical range indicated by pfn and size.
34801ba9b   venkatesh.pallipadi@intel.com   x86: PAT: move tr...
964
   */
5180da410   Suresh Siddha   x86, pat: separat...
965
  static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
b3b9c2932   Konstantin Khlebnikov   mm, x86, pat: rew...
966
967
  				  unsigned long pfn, unsigned long addr,
  				  unsigned long size)
34801ba9b   venkatesh.pallipadi@intel.com   x86: PAT: move tr...
968
969
970
971
972
  {
  	return 0;
  }
  
  /*
5180da410   Suresh Siddha   x86, pat: separat...
973
   * track_pfn_insert is called when a _new_ single pfn is established
67fa16662   Matthew Wilcox   mm: remove refere...
974
   * by vmf_insert_pfn().
5180da410   Suresh Siddha   x86, pat: separat...
975
   */
308a047c3   Borislav Petkov   x86/pat, mm: Make...
976
977
  static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
  				    pfn_t pfn)
5180da410   Suresh Siddha   x86, pat: separat...
978
  {
5180da410   Suresh Siddha   x86, pat: separat...
979
980
981
982
  }
  
  /*
   * track_pfn_copy is called when vma that is covering the pfnmap gets
34801ba9b   venkatesh.pallipadi@intel.com   x86: PAT: move tr...
983
984
   * copied through copy_page_range().
   */
5180da410   Suresh Siddha   x86, pat: separat...
985
  static inline int track_pfn_copy(struct vm_area_struct *vma)
34801ba9b   venkatesh.pallipadi@intel.com   x86: PAT: move tr...
986
987
988
989
990
  {
  	return 0;
  }
  
  /*
d9fe4fab1   Toshi Kani   x86/mm/pat: Add u...
991
   * untrack_pfn is called while unmapping a pfnmap for a region.
34801ba9b   venkatesh.pallipadi@intel.com   x86: PAT: move tr...
992
   * untrack can be called for a specific region indicated by pfn and size or
5180da410   Suresh Siddha   x86, pat: separat...
993
   * can be for the entire vma (in which case pfn, size are zero).
34801ba9b   venkatesh.pallipadi@intel.com   x86: PAT: move tr...
994
   */
5180da410   Suresh Siddha   x86, pat: separat...
995
996
  static inline void untrack_pfn(struct vm_area_struct *vma,
  			       unsigned long pfn, unsigned long size)
34801ba9b   venkatesh.pallipadi@intel.com   x86: PAT: move tr...
997
998
  {
  }
d9fe4fab1   Toshi Kani   x86/mm/pat: Add u...
999
1000
1001
1002
1003
1004
1005
  
  /*
   * untrack_pfn_moved is called while mremapping a pfnmap for a new region.
   */
  static inline void untrack_pfn_moved(struct vm_area_struct *vma)
  {
  }
34801ba9b   venkatesh.pallipadi@intel.com   x86: PAT: move tr...
1006
  #else
5180da410   Suresh Siddha   x86, pat: separat...
1007
  extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
b3b9c2932   Konstantin Khlebnikov   mm, x86, pat: rew...
1008
1009
  			   unsigned long pfn, unsigned long addr,
  			   unsigned long size);
308a047c3   Borislav Petkov   x86/pat, mm: Make...
1010
1011
  extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
  			     pfn_t pfn);
5180da410   Suresh Siddha   x86, pat: separat...
1012
1013
1014
  extern int track_pfn_copy(struct vm_area_struct *vma);
  extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
  			unsigned long size);
d9fe4fab1   Toshi Kani   x86/mm/pat: Add u...
1015
  extern void untrack_pfn_moved(struct vm_area_struct *vma);
34801ba9b   venkatesh.pallipadi@intel.com   x86: PAT: move tr...
1016
  #endif
816422ad7   Kirill A. Shutemov   asm-generic, mm: ...
1017
1018
1019
1020
1021
1022
1023
  #ifdef __HAVE_COLOR_ZERO_PAGE
  static inline int is_zero_pfn(unsigned long pfn)
  {
  	extern unsigned long zero_pfn;
  	unsigned long offset_from_zero_pfn = pfn - zero_pfn;
  	return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
  }
2f91ec8cc   Kirill A. Shutemov   asm-generic, mm: ...
1024
  #define my_zero_pfn(addr)	page_to_pfn(ZERO_PAGE(addr))
816422ad7   Kirill A. Shutemov   asm-generic, mm: ...
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
  #else
  static inline int is_zero_pfn(unsigned long pfn)
  {
  	extern unsigned long zero_pfn;
  	return pfn == zero_pfn;
  }
  
  static inline unsigned long my_zero_pfn(unsigned long addr)
  {
  	extern unsigned long zero_pfn;
  	return zero_pfn;
  }
  #endif
1a5a9906d   Andrea Arcangeli   mm: thp: fix pmd_...
1038
  #ifdef CONFIG_MMU
5f6e8da70   Andrea Arcangeli   thp: special pmd_...
1039
1040
1041
1042
1043
  #ifndef CONFIG_TRANSPARENT_HUGEPAGE
  static inline int pmd_trans_huge(pmd_t pmd)
  {
  	return 0;
  }
e4e40e026   Dan Williams   mm: switch to 'de...
1044
  #ifndef pmd_write
e2cda3226   Andrea Arcangeli   thp: add pmd mang...
1045
1046
1047
1048
1049
  static inline int pmd_write(pmd_t pmd)
  {
  	BUG();
  	return 0;
  }
e4e40e026   Dan Williams   mm: switch to 'de...
1050
  #endif /* pmd_write */
1a5a9906d   Andrea Arcangeli   mm: thp: fix pmd_...
1051
  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1501899a8   Dan Williams   mm: fix device-da...
1052
1053
1054
1055
1056
1057
1058
  #ifndef pud_write
  static inline int pud_write(pud_t pud)
  {
  	BUG();
  	return 0;
  }
  #endif /* pud_write */
bf1a12a80   Thomas Hellstrom   mm: move the back...
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
  #if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
  static inline int pmd_devmap(pmd_t pmd)
  {
  	return 0;
  }
  static inline int pud_devmap(pud_t pud)
  {
  	return 0;
  }
  static inline int pgd_devmap(pgd_t pgd)
  {
  	return 0;
  }
  #endif
a00cc7d9d   Matthew Wilcox   mm, x86: add supp...
1073
1074
1075
1076
1077
1078
1079
1080
  #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
  	(defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
  	 !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
  static inline int pud_trans_huge(pud_t pud)
  {
  	return 0;
  }
  #endif
625110b5e   Thomas Hellstrom   mm/memory.c: fix ...
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
  /* See pmd_none_or_trans_huge_or_clear_bad for discussion. */
  static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud)
  {
  	pud_t pudval = READ_ONCE(*pud);
  
  	if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
  		return 1;
  	if (unlikely(pud_bad(pudval))) {
  		pud_clear_bad(pud);
  		return 1;
  	}
  	return 0;
  }
  
  /* See pmd_trans_unstable for discussion. */
  static inline int pud_trans_unstable(pud_t *pud)
  {
  #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
  	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
  	return pud_none_or_trans_huge_or_dev_or_clear_bad(pud);
  #else
  	return 0;
  #endif
  }
26c191788   Andrea Arcangeli   mm: pmd_read_atom...
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
  #ifndef pmd_read_atomic
  static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
  {
  	/*
  	 * Depend on compiler for an atomic pmd read. NOTE: this is
  	 * only going to work, if the pmdval_t isn't larger than
  	 * an unsigned long.
  	 */
  	return *pmdp;
  }
  #endif
953c66c2b   Aneesh Kumar K.V   mm: THP page cach...
1116
1117
1118
  #ifndef arch_needs_pgtable_deposit
  #define arch_needs_pgtable_deposit() (false)
  #endif
1a5a9906d   Andrea Arcangeli   mm: thp: fix pmd_...
1119
1120
  /*
   * This function is meant to be used by sites walking pagetables with
c1e8d7c6a   Michel Lespinasse   mmap locking API:...
1121
   * the mmap_lock held in read mode to protect against MADV_DONTNEED and
1a5a9906d   Andrea Arcangeli   mm: thp: fix pmd_...
1122
1123
1124
   * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd
   * into a null pmd and the transhuge page fault can convert a null pmd
   * into an hugepmd or into a regular pmd (if the hugepage allocation
c1e8d7c6a   Michel Lespinasse   mmap locking API:...
1125
   * fails). While holding the mmap_lock in read mode the pmd becomes
1a5a9906d   Andrea Arcangeli   mm: thp: fix pmd_...
1126
1127
1128
1129
1130
1131
   * stable and stops changing under us only if it's not null and not a
   * transhuge pmd. When those races occurs and this function makes a
   * difference vs the standard pmd_none_or_clear_bad, the result is
   * undefined so behaving like if the pmd was none is safe (because it
   * can return none anyway). The compiler level barrier() is critically
   * important to compute the two checks atomically on the same pmdval.
26c191788   Andrea Arcangeli   mm: pmd_read_atom...
1132
1133
1134
   *
   * For 32bit kernels with a 64bit large pmd_t this automatically takes
   * care of reading the pmd atomically to avoid SMP race conditions
c1e8d7c6a   Michel Lespinasse   mmap locking API:...
1135
   * against pmd_populate() when the mmap_lock is hold for reading by the
26c191788   Andrea Arcangeli   mm: pmd_read_atom...
1136
1137
1138
   * caller (a special atomic read not done by "gcc" as in the generic
   * version above, is also needed when THP is disabled because the page
   * fault can populate the pmd from under us).
1a5a9906d   Andrea Arcangeli   mm: thp: fix pmd_...
1139
1140
1141
   */
  static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
  {
26c191788   Andrea Arcangeli   mm: pmd_read_atom...
1142
  	pmd_t pmdval = pmd_read_atomic(pmd);
1a5a9906d   Andrea Arcangeli   mm: thp: fix pmd_...
1143
1144
1145
  	/*
  	 * The barrier will stabilize the pmdval in a register or on
  	 * the stack so that it will stop changing under the code.
e4eed03fd   Andrea Arcangeli   thp: avoid atomic...
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
  	 *
  	 * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
  	 * pmd_read_atomic is allowed to return a not atomic pmdval
  	 * (for example pointing to an hugepage that has never been
  	 * mapped in the pmd). The below checks will only care about
  	 * the low part of the pmd with 32bit PAE x86 anyway, with the
  	 * exception of pmd_none(). So the important thing is that if
  	 * the low part of the pmd is found null, the high part will
  	 * be also null or the pmd_none() check below would be
  	 * confused.
1a5a9906d   Andrea Arcangeli   mm: thp: fix pmd_...
1156
1157
1158
1159
  	 */
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  	barrier();
  #endif
84c3fc4e9   Zi Yan   mm: thp: check pm...
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
  	/*
  	 * !pmd_present() checks for pmd migration entries
  	 *
  	 * The complete check uses is_pmd_migration_entry() in linux/swapops.h
  	 * But using that requires moving current function and pmd_trans_unstable()
  	 * to linux/swapops.h to resovle dependency, which is too much code move.
  	 *
  	 * !pmd_present() is equivalent to is_pmd_migration_entry() currently,
  	 * because !pmd_present() pages can only be under migration not swapped
  	 * out.
  	 *
  	 * pmd_none() is preseved for future condition checks on pmd migration
  	 * entries and not confusing with this function name, although it is
  	 * redundant with !pmd_present().
  	 */
  	if (pmd_none(pmdval) || pmd_trans_huge(pmdval) ||
  		(IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval)))
1a5a9906d   Andrea Arcangeli   mm: thp: fix pmd_...
1177
1178
  		return 1;
  	if (unlikely(pmd_bad(pmdval))) {
ee53664bd   Kirill A. Shutemov   mm: Fix NULL poin...
1179
  		pmd_clear_bad(pmd);
1a5a9906d   Andrea Arcangeli   mm: thp: fix pmd_...
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
  		return 1;
  	}
  	return 0;
  }
  
  /*
   * This is a noop if Transparent Hugepage Support is not built into
   * the kernel. Otherwise it is equivalent to
   * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in
   * places that already verified the pmd is not none and they want to
   * walk ptes while holding the mmap sem in read mode (write mode don't
   * need this). If THP is not enabled, the pmd can't go away under the
   * code even if MADV_DONTNEED runs, but if THP is enabled we need to
   * run a pmd_trans_unstable before walking the ptes after
9ef258bad   Kefeng Wang   thp: update split...
1194
1195
   * split_huge_pmd returns (because it may have run when the pmd become
   * null, but then a page fault can map in a THP and not a regular page).
1a5a9906d   Andrea Arcangeli   mm: thp: fix pmd_...
1196
1197
1198
1199
1200
1201
1202
   */
  static inline int pmd_trans_unstable(pmd_t *pmd)
  {
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  	return pmd_none_or_trans_huge_or_clear_bad(pmd);
  #else
  	return 0;
5f6e8da70   Andrea Arcangeli   thp: special pmd_...
1203
  #endif
1a5a9906d   Andrea Arcangeli   mm: thp: fix pmd_...
1204
  }
e7bb4b6d1   Mel Gorman   mm: add p[te|md] ...
1205
1206
1207
1208
1209
  #ifndef CONFIG_NUMA_BALANCING
  /*
   * Technically a PTE can be PROTNONE even when not doing NUMA balancing but
   * the only case the kernel cares is for NUMA balancing and is only ever set
   * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked
1067b261c   Randy Dunlap   mm: drop duplicat...
1210
   * _PAGE_PROTNONE so by default, implement the helper as "always no". It
e7bb4b6d1   Mel Gorman   mm: add p[te|md] ...
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
   * is the responsibility of the caller to distinguish between PROT_NONE
   * protections and NUMA hinting fault protections.
   */
  static inline int pte_protnone(pte_t pte)
  {
  	return 0;
  }
  
  static inline int pmd_protnone(pmd_t pmd)
  {
  	return 0;
  }
  #endif /* CONFIG_NUMA_BALANCING */
1a5a9906d   Andrea Arcangeli   mm: thp: fix pmd_...
1224
  #endif /* CONFIG_MMU */
5f6e8da70   Andrea Arcangeli   thp: special pmd_...
1225

e61ce6ade   Toshi Kani   mm: change iorema...
1226
  #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
c2febafc6   Kirill A. Shutemov   mm: convert gener...
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
  
  #ifndef __PAGETABLE_P4D_FOLDED
  int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
  int p4d_clear_huge(p4d_t *p4d);
  #else
  static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
  {
  	return 0;
  }
  static inline int p4d_clear_huge(p4d_t *p4d)
  {
  	return 0;
  }
  #endif /* !__PAGETABLE_P4D_FOLDED */
e61ce6ade   Toshi Kani   mm: change iorema...
1241
1242
  int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
  int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
b9820d8f3   Toshi Kani   mm: change vunmap...
1243
1244
  int pud_clear_huge(pud_t *pud);
  int pmd_clear_huge(pmd_t *pmd);
8e2d43405   Will Deacon   lib/ioremap: ensu...
1245
  int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
785a19f9d   Chintan Pandya   ioremap: Update p...
1246
1247
  int pud_free_pmd_page(pud_t *pud, unsigned long addr);
  int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
e61ce6ade   Toshi Kani   mm: change iorema...
1248
  #else	/* !CONFIG_HAVE_ARCH_HUGE_VMAP */
c2febafc6   Kirill A. Shutemov   mm: convert gener...
1249
1250
1251
1252
  static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
  {
  	return 0;
  }
e61ce6ade   Toshi Kani   mm: change iorema...
1253
1254
1255
1256
1257
1258
1259
1260
  static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
  {
  	return 0;
  }
  static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
  {
  	return 0;
  }
c2febafc6   Kirill A. Shutemov   mm: convert gener...
1261
1262
1263
1264
  static inline int p4d_clear_huge(p4d_t *p4d)
  {
  	return 0;
  }
b9820d8f3   Toshi Kani   mm: change vunmap...
1265
1266
1267
1268
1269
1270
1271
1272
  static inline int pud_clear_huge(pud_t *pud)
  {
  	return 0;
  }
  static inline int pmd_clear_huge(pmd_t *pmd)
  {
  	return 0;
  }
8e2d43405   Will Deacon   lib/ioremap: ensu...
1273
1274
1275
1276
  static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
  {
  	return 0;
  }
785a19f9d   Chintan Pandya   ioremap: Update p...
1277
  static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
b6bdb7517   Toshi Kani   mm/vmalloc: add i...
1278
1279
1280
  {
  	return 0;
  }
785a19f9d   Chintan Pandya   ioremap: Update p...
1281
  static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
b6bdb7517   Toshi Kani   mm/vmalloc: add i...
1282
1283
1284
  {
  	return 0;
  }
e61ce6ade   Toshi Kani   mm: change iorema...
1285
  #endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
458aa76d1   Aneesh Kumar K.V   mm/thp/migration:...
1286
1287
1288
1289
1290
  #ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  /*
   * ARCHes with special requirements for evicting THP backing TLB entries can
   * implement this. Otherwise also, it can help optimize normal TLB flush in
1067b261c   Randy Dunlap   mm: drop duplicat...
1291
1292
1293
1294
   * THP regime. Stock flush_tlb_range() typically has optimization to nuke the
   * entire TLB if flush span is greater than a threshold, which will
   * likely be true for a single huge page. Thus a single THP flush will
   * invalidate the entire TLB which is not desirable.
458aa76d1   Aneesh Kumar K.V   mm/thp/migration:...
1295
1296
1297
   * e.g. see arch/arc: flush_pmd_tlb_range
   */
  #define flush_pmd_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
a00cc7d9d   Matthew Wilcox   mm, x86: add supp...
1298
  #define flush_pud_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
458aa76d1   Aneesh Kumar K.V   mm/thp/migration:...
1299
1300
  #else
  #define flush_pmd_tlb_range(vma, addr, end)	BUILD_BUG()
a00cc7d9d   Matthew Wilcox   mm, x86: add supp...
1301
  #define flush_pud_tlb_range(vma, addr, end)	BUILD_BUG()
458aa76d1   Aneesh Kumar K.V   mm/thp/migration:...
1302
1303
  #endif
  #endif
08ea8c07f   Baoyou Xie   mm: move phys_mem...
1304
1305
1306
  struct file;
  int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
  			unsigned long size, pgprot_t *vma_prot);
613e396bc   Thomas Gleixner   init: Invoke init...
1307
1308
1309
1310
  
  #ifndef CONFIG_X86_ESPFIX64
  static inline void init_espfix_bsp(void) { }
  #endif
782de70c4   Mike Rapoport   mm: consolidate p...
1311
  extern void __init pgtable_cache_init(void);
caa841360   Nadav Amit   x86/mm: Initializ...
1312

6c26fcd2a   Jiri Kosina   x86/speculation/l...
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
  #ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
  static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
  {
  	return true;
  }
  
  static inline bool arch_has_pfn_modify_check(void)
  {
  	return false;
  }
  #endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
a3266bd49   Luis R. Rodriguez   mm: provide a fal...
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
  /*
   * Architecture PAGE_KERNEL_* fallbacks
   *
   * Some architectures don't define certain PAGE_KERNEL_* flags. This is either
   * because they really don't support them, or the port needs to be updated to
   * reflect the required functionality. Below are a set of relatively safe
   * fallbacks, as best effort, which we can count on in lieu of the architectures
   * not defining them on their own yet.
   */
  
  #ifndef PAGE_KERNEL_RO
  # define PAGE_KERNEL_RO PAGE_KERNEL
  #endif
1a9b4b3d7   Luis R. Rodriguez   mm: provide a fal...
1337
1338
1339
  #ifndef PAGE_KERNEL_EXEC
  # define PAGE_KERNEL_EXEC PAGE_KERNEL
  #endif
d86261380   Joerg Roedel   mm: add functions...
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
  /*
   * Page Table Modification bits for pgtbl_mod_mask.
   *
   * These are used by the p?d_alloc_track*() set of functions an in the generic
   * vmalloc/ioremap code to track at which page-table levels entries have been
   * modified. Based on that the code can better decide when vmalloc and ioremap
   * mapping changes need to be synchronized to other page-tables in the system.
   */
  #define		__PGTBL_PGD_MODIFIED	0
  #define		__PGTBL_P4D_MODIFIED	1
  #define		__PGTBL_PUD_MODIFIED	2
  #define		__PGTBL_PMD_MODIFIED	3
  #define		__PGTBL_PTE_MODIFIED	4
  
  #define		PGTBL_PGD_MODIFIED	BIT(__PGTBL_PGD_MODIFIED)
  #define		PGTBL_P4D_MODIFIED	BIT(__PGTBL_P4D_MODIFIED)
  #define		PGTBL_PUD_MODIFIED	BIT(__PGTBL_PUD_MODIFIED)
  #define		PGTBL_PMD_MODIFIED	BIT(__PGTBL_PMD_MODIFIED)
  #define		PGTBL_PTE_MODIFIED	BIT(__PGTBL_PTE_MODIFIED)
  
  /* Page-Table Modification Mask */
  typedef unsigned int pgtbl_mod_mask;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1362
  #endif /* !__ASSEMBLY__ */
cef397038   Arnd Bergmann   arch: pgtable: de...
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
  #if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
  #ifdef CONFIG_PHYS_ADDR_T_64BIT
  /*
   * ZSMALLOC needs to know the highest PFN on 32-bit architectures
   * with physical address space extension, but falls back to
   * BITS_PER_LONG otherwise.
   */
  #error Missing MAX_POSSIBLE_PHYSMEM_BITS definition
  #else
  #define MAX_POSSIBLE_PHYSMEM_BITS 32
  #endif
  #endif
fd8cfd300   Hugh Dickins   arch: fix has_tra...
1375
1376
1377
1378
1379
1380
1381
  #ifndef has_transparent_hugepage
  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  #define has_transparent_hugepage() 1
  #else
  #define has_transparent_hugepage() 0
  #endif
  #endif
1071fc577   Martin Schwidefsky   mm: introduce mm_...
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
  /*
   * On some architectures it depends on the mm if the p4d/pud or pmd
   * layer of the page table hierarchy is folded or not.
   */
  #ifndef mm_p4d_folded
  #define mm_p4d_folded(mm)	__is_defined(__PAGETABLE_P4D_FOLDED)
  #endif
  
  #ifndef mm_pud_folded
  #define mm_pud_folded(mm)	__is_defined(__PAGETABLE_PUD_FOLDED)
  #endif
  
  #ifndef mm_pmd_folded
  #define mm_pmd_folded(mm)	__is_defined(__PAGETABLE_PMD_FOLDED)
  #endif
d3f7b1bb2   Vasily Gorbik   mm/gup: fix gup_f...
1397
1398
1399
1400
1401
1402
1403
1404
1405
  #ifndef p4d_offset_lockless
  #define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address)
  #endif
  #ifndef pud_offset_lockless
  #define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address)
  #endif
  #ifndef pmd_offset_lockless
  #define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address)
  #endif
93fab1b22   Steven Price   mm: add generic p...
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
  /*
   * p?d_leaf() - true if this entry is a final mapping to a physical address.
   * This differs from p?d_huge() by the fact that they are always available (if
   * the architecture supports large pages at the appropriate level) even
   * if CONFIG_HUGETLB_PAGE is not defined.
   * Only meaningful when called on a valid entry.
   */
  #ifndef pgd_leaf
  #define pgd_leaf(x)	0
  #endif
  #ifndef p4d_leaf
  #define p4d_leaf(x)	0
  #endif
  #ifndef pud_leaf
  #define pud_leaf(x)	0
  #endif
  #ifndef pmd_leaf
  #define pmd_leaf(x)	0
  #endif
ca5999fde   Mike Rapoport   mm: introduce inc...
1425
  #endif /* _LINUX_PGTABLE_H */