Blame view
include/linux/pgtable.h
43.9 KB
b24413180 License cleanup: ... |
1 |
/* SPDX-License-Identifier: GPL-2.0 */ |
ca5999fde mm: introduce inc... |
2 3 |
#ifndef _LINUX_PGTABLE_H #define _LINUX_PGTABLE_H |
1da177e4c Linux-2.6.12-rc2 |
4 |
|
f25748e3c mm, dax: convert ... |
5 |
#include <linux/pfn.h> |
ca5999fde mm: introduce inc... |
6 |
#include <asm/pgtable.h> |
f25748e3c mm, dax: convert ... |
7 |
|
673eae823 [PATCH] x86: triv... |
8 |
#ifndef __ASSEMBLY__ |
9535239f6 changing include/... |
9 |
#ifdef CONFIG_MMU |
673eae823 [PATCH] x86: triv... |
10 |
|
fbd718448 mm: <asm-generic/... |
11 |
#include <linux/mm_types.h> |
187f1882b BUG: headers with... |
12 |
#include <linux/bug.h> |
e61ce6ade mm: change iorema... |
13 |
#include <linux/errno.h> |
5a281062a userfaultfd: wp: ... |
14 |
#include <asm-generic/pgtable_uffd.h> |
fbd718448 mm: <asm-generic/... |
15 |
|
c2febafc6 mm: convert gener... |
16 17 18 |
#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \ defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS #error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED |
235a8f028 mm: define defaul... |
19 |
#endif |
6ee8630e0 mm: allow arch co... |
20 21 22 23 24 25 26 27 28 |
/* * On almost all architectures and configurations, 0 can be used as the * upper ceiling to free_pgtables(): on many architectures it has the same * effect as using TASK_SIZE. However, there is one configuration which * must impose a more careful limit, to avoid freeing kernel pgtables. */ #ifndef USER_PGTABLES_CEILING #define USER_PGTABLES_CEILING 0UL #endif |
fac7757e1 mm: define defaul... |
29 30 31 32 33 34 35 36 37 |
/* * This defines the first usable user address. Platforms * can override its value with custom FIRST_USER_ADDRESS * defined in their respective <asm/pgtable.h>. */ #ifndef FIRST_USER_ADDRESS #define FIRST_USER_ADDRESS 0UL #endif |
1c2f7d14d mm/thp: define de... |
38 39 40 41 42 43 44 45 46 |
/* * This defines the generic helper for accessing PMD page * table page. Although platforms can still override this * via their respective <asm/pgtable.h>. */ #ifndef pmd_pgtable #define pmd_pgtable(pmd) pmd_page(pmd) #endif |
6ee8630e0 mm: allow arch co... |
47 |
|
e05c7b1f2 mm: pgtable: add ... |
48 |
/* |
974b9b2c6 mm: consolidate p... |
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
* A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD] * * The pXx_index() functions return the index of the entry in the page * table page which would control the given virtual address * * As these functions may be used by the same code for different levels of * the page table folding, they are always available, regardless of * CONFIG_PGTABLE_LEVELS value. For the folded levels they simply return 0 * because in such cases PTRS_PER_PxD equals 1. */ static inline unsigned long pte_index(unsigned long address) { return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); } |
65a4863a4 mm/pgtable: defin... |
64 |
#define pte_index pte_index |
974b9b2c6 mm: consolidate p... |
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 |
#ifndef pmd_index static inline unsigned long pmd_index(unsigned long address) { return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); } #define pmd_index pmd_index #endif #ifndef pud_index static inline unsigned long pud_index(unsigned long address) { return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); } #define pud_index pud_index #endif #ifndef pgd_index /* Must be a compile-time constant, so implement it as a macro */ #define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) #endif #ifndef pte_offset_kernel static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) { return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); } #define pte_offset_kernel pte_offset_kernel #endif #if defined(CONFIG_HIGHPTE) #define pte_offset_map(dir, address) \ ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \ pte_index((address))) #define pte_unmap(pte) kunmap_atomic((pte)) #else #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) #define pte_unmap(pte) ((void)(pte)) /* NOP */ #endif /* Find an entry in the second-level page table.. */ #ifndef pmd_offset static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) { |
9cf6fa245 mm: rename pud_pa... |
109 |
return pud_pgtable(*pud) + pmd_index(address); |
974b9b2c6 mm: consolidate p... |
110 111 112 113 114 115 116 |
} #define pmd_offset pmd_offset #endif #ifndef pud_offset static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) { |
dc4875f0e mm: rename p4d_pa... |
117 |
return p4d_pgtable(*p4d) + pud_index(address); |
974b9b2c6 mm: consolidate p... |
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
} #define pud_offset pud_offset #endif static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address) { return (pgd + pgd_index(address)); }; /* * a shortcut to get a pgd_t in a given mm */ #ifndef pgd_offset #define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address)) #endif /* * a shortcut which implies the use of the kernel's pgd, instead * of a process's */ |
bd05220c7 arch/ia64: Restor... |
138 |
#ifndef pgd_offset_k |
974b9b2c6 mm: consolidate p... |
139 |
#define pgd_offset_k(address) pgd_offset(&init_mm, (address)) |
bd05220c7 arch/ia64: Restor... |
140 |
#endif |
974b9b2c6 mm: consolidate p... |
141 142 |
/* |
e05c7b1f2 mm: pgtable: add ... |
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 |
* In many cases it is known that a virtual address is mapped at PMD or PTE * level, so instead of traversing all the page table levels, we can get a * pointer to the PMD entry in user or kernel page table or translate a virtual * address to the pointer in the PTE in the kernel page tables with simple * helpers. */ static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va) { return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va); } static inline pmd_t *pmd_off_k(unsigned long va) { return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va); } static inline pte_t *virt_to_kpte(unsigned long vaddr) { pmd_t *pmd = pmd_off_k(vaddr); return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr); } |
1da177e4c Linux-2.6.12-rc2 |
165 |
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
e2cda3226 thp: add pmd mang... |
166 167 168 169 170 171 |
extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t entry, int dirty); #endif #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS |
bd5e88ad7 mm,thp: reduce if... |
172 |
#ifdef CONFIG_TRANSPARENT_HUGEPAGE |
e2cda3226 thp: add pmd mang... |
173 174 175 |
extern int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty); |
a00cc7d9d mm, x86: add supp... |
176 177 178 |
extern int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, pud_t entry, int dirty); |
bd5e88ad7 mm,thp: reduce if... |
179 180 181 182 183 184 185 186 |
#else static inline int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty) { BUILD_BUG(); return 0; } |
a00cc7d9d mm, x86: add supp... |
187 188 189 190 191 192 193 |
static inline int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, pud_t entry, int dirty) { BUILD_BUG(); return 0; } |
bd5e88ad7 mm,thp: reduce if... |
194 |
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
1da177e4c Linux-2.6.12-rc2 |
195 196 197 |
#endif #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
e2cda3226 thp: add pmd mang... |
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 |
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { pte_t pte = *ptep; int r = 1; if (!pte_young(pte)) r = 0; else set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); return r; } #endif #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { pmd_t pmd = *pmdp; int r = 1; if (!pmd_young(pmd)) r = 0; else set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); return r; } |
bd5e88ad7 mm,thp: reduce if... |
226 |
#else |
e2cda3226 thp: add pmd mang... |
227 228 229 230 |
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { |
bd5e88ad7 mm,thp: reduce if... |
231 |
BUILD_BUG(); |
e2cda3226 thp: add pmd mang... |
232 233 234 |
return 0; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
1da177e4c Linux-2.6.12-rc2 |
235 236 237 |
#endif #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
e2cda3226 thp: add pmd mang... |
238 239 240 241 242 |
int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); #endif #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH |
bd5e88ad7 mm,thp: reduce if... |
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 |
#ifdef CONFIG_TRANSPARENT_HUGEPAGE extern int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); #else /* * Despite relevant to THP only, this API is called from generic rmap code * under PageTransHuge(), hence needs a dummy implementation for !THP */ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { BUILD_BUG(); return 0; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
1da177e4c Linux-2.6.12-rc2 |
258 |
#endif |
1da177e4c Linux-2.6.12-rc2 |
259 |
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR |
e2cda3226 thp: add pmd mang... |
260 261 262 263 264 265 266 267 268 |
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long address, pte_t *ptep) { pte_t pte = *ptep; pte_clear(mm, address, ptep); return pte; } #endif |
481e980a7 mm: Allow arches ... |
269 270 271 272 273 274 |
#ifndef __HAVE_ARCH_PTEP_GET static inline pte_t ptep_get(pte_t *ptep) { return READ_ONCE(*ptep); } #endif |
2a4a06da8 mm/gup: Provide g... |
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 |
#ifdef CONFIG_GUP_GET_PTE_LOW_HIGH /* * WARNING: only to be used in the get_user_pages_fast() implementation. * * With get_user_pages_fast(), we walk down the pagetables without taking any * locks. For this we would like to load the pointers atomically, but sometimes * that is not possible (e.g. without expensive cmpxchg8b on x86_32 PAE). What * we do have is the guarantee that a PTE will only either go from not present * to present, or present to not present or both -- it will not switch to a * completely different present page without a TLB flush in between; something * that we are blocking by holding interrupts off. * * Setting ptes from not present to present goes: * * ptep->pte_high = h; * smp_wmb(); * ptep->pte_low = l; * * And present to not present goes: * * ptep->pte_low = 0; * smp_wmb(); * ptep->pte_high = 0; * * We must ensure here that the load of pte_low sees 'l' IFF pte_high sees 'h'. * We load pte_high *after* loading pte_low, which ensures we don't see an older * value of pte_high. *Then* we recheck pte_low, which ensures that we haven't * picked up a changed pte high. We might have gotten rubbish values from * pte_low and pte_high, but we are guaranteed that pte_low will not have the * present bit set *unless* it is 'l'. Because get_user_pages_fast() only * operates on present ptes we're safe. */ static inline pte_t ptep_get_lockless(pte_t *ptep) { pte_t pte; do { pte.pte_low = ptep->pte_low; smp_rmb(); pte.pte_high = ptep->pte_high; smp_rmb(); } while (unlikely(pte.pte_low != ptep->pte_low)); return pte; } #else /* CONFIG_GUP_GET_PTE_LOW_HIGH */ /* * We require that the PTE can be read atomically. */ static inline pte_t ptep_get_lockless(pte_t *ptep) { return ptep_get(ptep); } #endif /* CONFIG_GUP_GET_PTE_LOW_HIGH */ |
e2cda3226 thp: add pmd mang... |
329 |
#ifdef CONFIG_TRANSPARENT_HUGEPAGE |
a00cc7d9d mm, x86: add supp... |
330 |
#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR |
8809aa2d2 mm: clarify that ... |
331 332 333 |
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) |
e2cda3226 thp: add pmd mang... |
334 335 |
{ pmd_t pmd = *pmdp; |
2d28a2275 mm: thp: fix the ... |
336 |
pmd_clear(pmdp); |
e2cda3226 thp: add pmd mang... |
337 |
return pmd; |
49b24d6b4 include/asm-gener... |
338 |
} |
a00cc7d9d mm, x86: add supp... |
339 340 341 342 343 344 345 346 347 348 349 350 |
#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */ #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, unsigned long address, pud_t *pudp) { pud_t pud = *pudp; pud_clear(pudp); return pud; } #endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */ |
e2cda3226 thp: add pmd mang... |
351 |
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
1da177e4c Linux-2.6.12-rc2 |
352 |
|
fcbe08d66 s390/mm: pmdp_get... |
353 |
#ifdef CONFIG_TRANSPARENT_HUGEPAGE |
a00cc7d9d mm, x86: add supp... |
354 |
#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL |
93a98695f mm: change pmdp_h... |
355 |
static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, |
fcbe08d66 s390/mm: pmdp_get... |
356 357 358 |
unsigned long address, pmd_t *pmdp, int full) { |
93a98695f mm: change pmdp_h... |
359 |
return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); |
fcbe08d66 s390/mm: pmdp_get... |
360 |
} |
fcbe08d66 s390/mm: pmdp_get... |
361 |
#endif |
a00cc7d9d mm, x86: add supp... |
362 363 364 365 366 367 368 369 370 |
#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm, unsigned long address, pud_t *pudp, int full) { return pudp_huge_get_and_clear(mm, address, pudp); } #endif #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
a600388d2 [PATCH] x86: ptep... |
371 |
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL |
e2cda3226 thp: add pmd mang... |
372 373 374 375 376 377 378 379 |
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long address, pte_t *ptep, int full) { pte_t pte; pte = ptep_get_and_clear(mm, address, ptep); return pte; } |
a600388d2 [PATCH] x86: ptep... |
380 |
#endif |
7df676974 mm/memory.c: Upda... |
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 |
/* * If two threads concurrently fault at the same page, the thread that * won the race updates the PTE and its local TLB/Cache. The other thread * gives up, simply does nothing, and continues; on architectures where * software can update TLB, local TLB can be updated here to avoid next page * fault. This function updates TLB only, do nothing with cache or others. * It is the difference with function update_mmu_cache. */ #ifndef __HAVE_ARCH_UPDATE_MMU_TLB static inline void update_mmu_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { } #define __HAVE_ARCH_UPDATE_MMU_TLB #endif |
9888a1cae [PATCH] paravirt:... |
397 398 399 400 401 402 |
/* * Some architectures may be able to avoid expensive synchronization * primitives when modifications are made to PTE's which are already * not present, or in the process of an address space destruction. */ #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL |
e2cda3226 thp: add pmd mang... |
403 404 405 406 407 408 409 |
static inline void pte_clear_not_present_full(struct mm_struct *mm, unsigned long address, pte_t *ptep, int full) { pte_clear(mm, address, ptep); } |
a600388d2 [PATCH] x86: ptep... |
410 |
#endif |
1da177e4c Linux-2.6.12-rc2 |
411 |
#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH |
e2cda3226 thp: add pmd mang... |
412 413 414 415 |
extern pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); #endif |
8809aa2d2 mm: clarify that ... |
416 417 |
#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, |
e2cda3226 thp: add pmd mang... |
418 419 |
unsigned long address, pmd_t *pmdp); |
a00cc7d9d mm, x86: add supp... |
420 421 422 |
extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, pud_t *pudp); |
1da177e4c Linux-2.6.12-rc2 |
423 424 425 |
#endif #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT |
8c65b4a60 [PATCH] fix remai... |
426 |
struct mm_struct; |
1da177e4c Linux-2.6.12-rc2 |
427 428 429 430 431 432 |
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) { pte_t old_pte = *ptep; set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); } #endif |
44bf431b4 mm/memory.c: Add ... |
433 434 |
/* * On some architectures hardware does not set page access bit when accessing |
2eb70aab2 include/linux/pgt... |
435 |
* memory page, it is responsibility of software setting this bit. It brings |
44bf431b4 mm/memory.c: Add ... |
436 437 438 439 440 |
* out extra page fault penalty to track page access bit. For optimization page * access bit can be set during all page fault flow on these arches. * To be differentiate with macro pte_mkyoung, this macro is used on platforms * where software maintains page access bit. */ |
50c25ee97 Revert "MIPS: mak... |
441 442 443 444 445 446 447 |
#ifndef pte_sw_mkyoung static inline pte_t pte_sw_mkyoung(pte_t pte) { return pte; } #define pte_sw_mkyoung pte_sw_mkyoung #endif |
288bc5494 mm/autonuma: let ... |
448 449 450 451 452 453 454 |
#ifndef pte_savedwrite #define pte_savedwrite pte_write #endif #ifndef pte_mk_savedwrite #define pte_mk_savedwrite pte_mkwrite #endif |
595cd8f25 mm/ksm: handle pr... |
455 456 457 |
#ifndef pte_clear_savedwrite #define pte_clear_savedwrite pte_wrprotect #endif |
288bc5494 mm/autonuma: let ... |
458 459 460 461 462 463 464 |
#ifndef pmd_savedwrite #define pmd_savedwrite pmd_write #endif #ifndef pmd_mk_savedwrite #define pmd_mk_savedwrite pmd_mkwrite #endif |
595cd8f25 mm/ksm: handle pr... |
465 466 467 |
#ifndef pmd_clear_savedwrite #define pmd_clear_savedwrite pmd_wrprotect #endif |
e2cda3226 thp: add pmd mang... |
468 469 470 471 472 473 474 475 |
#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { pmd_t old_pmd = *pmdp; set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); } |
bd5e88ad7 mm,thp: reduce if... |
476 |
#else |
e2cda3226 thp: add pmd mang... |
477 478 479 |
static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { |
bd5e88ad7 mm,thp: reduce if... |
480 |
BUILD_BUG(); |
e2cda3226 thp: add pmd mang... |
481 482 483 |
} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif |
a00cc7d9d mm, x86: add supp... |
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 |
#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD static inline void pudp_set_wrprotect(struct mm_struct *mm, unsigned long address, pud_t *pudp) { pud_t old_pud = *pudp; set_pud_at(mm, address, pudp, pud_wrprotect(old_pud)); } #else static inline void pudp_set_wrprotect(struct mm_struct *mm, unsigned long address, pud_t *pudp) { BUILD_BUG(); } #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ #endif |
e2cda3226 thp: add pmd mang... |
501 |
|
15a25b2ea mm/thp: split out... |
502 503 |
#ifndef pmdp_collapse_flush #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
f28b6ff8c powerpc/mm: use g... |
504 505 |
extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); |
15a25b2ea mm/thp: split out... |
506 507 508 509 510 511 512 513 514 515 516 |
#else static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { BUILD_BUG(); return *pmdp; } #define pmdp_collapse_flush pmdp_collapse_flush #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif |
e3ebcf643 thp: remove assum... |
517 |
#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT |
6b0b50b06 mm/THP: add pmd a... |
518 519 |
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pgtable); |
e3ebcf643 thp: remove assum... |
520 521 522 |
#endif #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW |
6b0b50b06 mm/THP: add pmd a... |
523 |
extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); |
e3ebcf643 thp: remove assum... |
524 |
#endif |
c58f0bb77 asm-generic: prov... |
525 526 527 528 |
#ifdef CONFIG_TRANSPARENT_HUGEPAGE /* * This is an implementation of pmdp_establish() that is only suitable for an * architecture that doesn't have hardware dirty/accessed bits. In this case we |
2eb70aab2 include/linux/pgt... |
529 |
* can't race with CPU which sets these bits and non-atomic approach is fine. |
c58f0bb77 asm-generic: prov... |
530 531 532 533 534 535 536 537 538 |
*/ static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t pmd) { pmd_t old_pmd = *pmdp; set_pmd_at(vma->vm_mm, address, pmdp, pmd); return old_pmd; } #endif |
46dcde735 thp: introduce pm... |
539 |
#ifndef __HAVE_ARCH_PMDP_INVALIDATE |
d52605d7c mm: do not lose d... |
540 |
extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, |
46dcde735 thp: introduce pm... |
541 542 |
pmd_t *pmdp); #endif |
1da177e4c Linux-2.6.12-rc2 |
543 |
#ifndef __HAVE_ARCH_PTE_SAME |
e2cda3226 thp: add pmd mang... |
544 545 546 547 548 |
static inline int pte_same(pte_t pte_a, pte_t pte_b) { return pte_val(pte_a) == pte_val(pte_b); } #endif |
45961722f mm: add support f... |
549 550 551 552 553 554 555 556 557 558 559 560 |
#ifndef __HAVE_ARCH_PTE_UNUSED /* * Some architectures provide facilities to virtualization guests * so that they can flag allocated pages as unused. This allows the * host to transparently reclaim unused pages. This function returns * whether the pte's page is unused. */ static inline int pte_unused(pte_t pte) { return 0; } #endif |
e7884f8ea mm/gup: Move perm... |
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 |
#ifndef pte_access_permitted #define pte_access_permitted(pte, write) \ (pte_present(pte) && (!(write) || pte_write(pte))) #endif #ifndef pmd_access_permitted #define pmd_access_permitted(pmd, write) \ (pmd_present(pmd) && (!(write) || pmd_write(pmd))) #endif #ifndef pud_access_permitted #define pud_access_permitted(pud, write) \ (pud_present(pud) && (!(write) || pud_write(pud))) #endif #ifndef p4d_access_permitted #define p4d_access_permitted(p4d, write) \ (p4d_present(p4d) && (!(write) || p4d_write(p4d))) #endif #ifndef pgd_access_permitted #define pgd_access_permitted(pgd, write) \ (pgd_present(pgd) && (!(write) || pgd_write(pgd))) #endif |
e2cda3226 thp: add pmd mang... |
585 |
#ifndef __HAVE_ARCH_PMD_SAME |
e2cda3226 thp: add pmd mang... |
586 587 588 589 |
static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) { return pmd_val(pmd_a) == pmd_val(pmd_b); } |
a00cc7d9d mm, x86: add supp... |
590 591 592 593 594 |
static inline int pud_same(pud_t pud_a, pud_t pud_b) { return pud_val(pud_a) == pud_val(pud_b); } |
1da177e4c Linux-2.6.12-rc2 |
595 |
#endif |
0cebbb60f generic/pgtable: ... |
596 597 598 599 600 601 602 603 604 605 606 607 608 |
#ifndef __HAVE_ARCH_P4D_SAME static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b) { return p4d_val(p4d_a) == p4d_val(p4d_b); } #endif #ifndef __HAVE_ARCH_PGD_SAME static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b) { return pgd_val(pgd_a) == pgd_val(pgd_b); } #endif |
4369deaa2 generic/pgtable: ... |
609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 |
/* * Use set_p*_safe(), and elide TLB flushing, when confident that *no* * TLB flush will be required as a result of the "set". For example, use * in scenarios where it is known ahead of time that the routine is * setting non-present entries, or re-setting an existing entry to the * same value. Otherwise, use the typical "set" helpers and flush the * TLB. */ #define set_pte_safe(ptep, pte) \ ({ \ WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \ set_pte(ptep, pte); \ }) #define set_pmd_safe(pmdp, pmd) \ ({ \ WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \ set_pmd(pmdp, pmd); \ }) #define set_pud_safe(pudp, pud) \ ({ \ WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \ set_pud(pudp, pud); \ }) #define set_p4d_safe(p4dp, p4d) \ ({ \ WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \ set_p4d(p4dp, p4d); \ }) #define set_pgd_safe(pgdp, pgd) \ ({ \ WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \ set_pgd(pgdp, pgd); \ }) |
ca827d55e mm, swap: Add inf... |
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 |
#ifndef __HAVE_ARCH_DO_SWAP_PAGE /* * Some architectures support metadata associated with a page. When a * page is being swapped out, this metadata must be saved so it can be * restored when the page is swapped back in. SPARC M7 and newer * processors support an ADI (Application Data Integrity) tag for the * page as metadata for the page. arch_do_swap_page() can restore this * metadata when a page is swapped back in. */ static inline void arch_do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t pte, pte_t oldpte) { } #endif #ifndef __HAVE_ARCH_UNMAP_ONE /* * Some architectures support metadata associated with a page. When a * page is being swapped out, this metadata must be saved so it can be * restored when the page is swapped back in. SPARC M7 and newer * processors support an ADI (Application Data Integrity) tag for the * page as metadata for the page. arch_unmap_one() can save this * metadata on a swap-out of a page. */ static inline int arch_unmap_one(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t orig_pte) { return 0; } #endif |
8a84802e2 mm: Add arch hook... |
681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 |
/* * Allow architectures to preserve additional metadata associated with * swapped-out pages. The corresponding __HAVE_ARCH_SWAP_* macros and function * prototypes must be defined in the arch-specific asm/pgtable.h file. */ #ifndef __HAVE_ARCH_PREPARE_TO_SWAP static inline int arch_prepare_to_swap(struct page *page) { return 0; } #endif #ifndef __HAVE_ARCH_SWAP_INVALIDATE static inline void arch_swap_invalidate_page(int type, pgoff_t offset) { } static inline void arch_swap_invalidate_area(int type) { } #endif #ifndef __HAVE_ARCH_SWAP_RESTORE static inline void arch_swap_restore(swp_entry_t entry, struct page *page) { } #endif |
1da177e4c Linux-2.6.12-rc2 |
708 709 710 |
#ifndef __HAVE_ARCH_PGD_OFFSET_GATE #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) #endif |
0b0968a3e [SPARC64]: Fix D-... |
711 |
#ifndef __HAVE_ARCH_MOVE_PTE |
8b1f31246 [PATCH] mm: move_... |
712 |
#define move_pte(pte, prot, old_addr, new_addr) (pte) |
8b1f31246 [PATCH] mm: move_... |
713 |
#endif |
2c3cf556b x86/mm: Introduce... |
714 |
#ifndef pte_accessible |
208414059 mm: fix TLB flush... |
715 |
# define pte_accessible(mm, pte) ((void)(pte), 1) |
2c3cf556b x86/mm: Introduce... |
716 |
#endif |
61c77326d x86, mm: Avoid un... |
717 718 719 |
#ifndef flush_tlb_fix_spurious_fault #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) #endif |
1da177e4c Linux-2.6.12-rc2 |
720 |
/* |
8f6c99c11 [PATCH] freepgt: ... |
721 722 723 |
* When walking page tables, get the address of the next boundary, * or the end address of the range if that comes earlier. Although no * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. |
1da177e4c Linux-2.6.12-rc2 |
724 |
*/ |
1da177e4c Linux-2.6.12-rc2 |
725 726 727 728 |
#define pgd_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) |
1da177e4c Linux-2.6.12-rc2 |
729 |
|
c2febafc6 mm: convert gener... |
730 731 732 733 734 735 |
#ifndef p4d_addr_end #define p4d_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \ (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) #endif |
1da177e4c Linux-2.6.12-rc2 |
736 737 738 739 740 741 742 743 744 745 746 747 748 |
#ifndef pud_addr_end #define pud_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) #endif #ifndef pmd_addr_end #define pmd_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) #endif |
1da177e4c Linux-2.6.12-rc2 |
749 750 751 752 753 754 |
/* * When walking page tables, we usually want to skip any p?d_none entries; * and any p?d_bad entries - reporting the error before resetting to none. * Do the tests inline, but report and clear the bad entry in mm/memory.c. */ void pgd_clear_bad(pgd_t *); |
f2400abc7 asm-generic/mm: s... |
755 756 |
#ifndef __PAGETABLE_P4D_FOLDED |
c2febafc6 mm: convert gener... |
757 |
void p4d_clear_bad(p4d_t *); |
f2400abc7 asm-generic/mm: s... |
758 759 760 761 762 |
#else #define p4d_clear_bad(p4d) do { } while (0) #endif #ifndef __PAGETABLE_PUD_FOLDED |
1da177e4c Linux-2.6.12-rc2 |
763 |
void pud_clear_bad(pud_t *); |
f2400abc7 asm-generic/mm: s... |
764 765 766 |
#else #define pud_clear_bad(p4d) do { } while (0) #endif |
1da177e4c Linux-2.6.12-rc2 |
767 768 769 770 771 772 773 774 775 776 777 778 |
void pmd_clear_bad(pmd_t *); static inline int pgd_none_or_clear_bad(pgd_t *pgd) { if (pgd_none(*pgd)) return 1; if (unlikely(pgd_bad(*pgd))) { pgd_clear_bad(pgd); return 1; } return 0; } |
c2febafc6 mm: convert gener... |
779 780 781 782 783 784 785 786 787 788 |
static inline int p4d_none_or_clear_bad(p4d_t *p4d) { if (p4d_none(*p4d)) return 1; if (unlikely(p4d_bad(*p4d))) { p4d_clear_bad(p4d); return 1; } return 0; } |
1da177e4c Linux-2.6.12-rc2 |
789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 |
static inline int pud_none_or_clear_bad(pud_t *pud) { if (pud_none(*pud)) return 1; if (unlikely(pud_bad(*pud))) { pud_clear_bad(pud); return 1; } return 0; } static inline int pmd_none_or_clear_bad(pmd_t *pmd) { if (pmd_none(*pmd)) return 1; if (unlikely(pmd_bad(*pmd))) { pmd_clear_bad(pmd); return 1; } return 0; } |
9535239f6 changing include/... |
810 |
|
0cbe3e26a mm: update ptep_m... |
811 |
static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma, |
1ea0704e0 mm: add a ptep_mo... |
812 813 814 815 816 817 818 819 |
unsigned long addr, pte_t *ptep) { /* * Get the current pte state, but zero it out to make it * non-present, preventing the hardware from asynchronously * updating it. */ |
0cbe3e26a mm: update ptep_m... |
820 |
return ptep_get_and_clear(vma->vm_mm, addr, ptep); |
1ea0704e0 mm: add a ptep_mo... |
821 |
} |
0cbe3e26a mm: update ptep_m... |
822 |
static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma, |
1ea0704e0 mm: add a ptep_mo... |
823 824 825 826 827 828 829 |
unsigned long addr, pte_t *ptep, pte_t pte) { /* * The pte is non-present, so there's no hardware state to * preserve. */ |
0cbe3e26a mm: update ptep_m... |
830 |
set_pte_at(vma->vm_mm, addr, ptep, pte); |
1ea0704e0 mm: add a ptep_mo... |
831 832 833 834 835 836 837 838 839 840 |
} #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION /* * Start a pte protection read-modify-write transaction, which * protects against asynchronous hardware modifications to the pte. * The intention is not to prevent the hardware from making pte * updates, but to prevent any updates it may make from being lost. * * This does not protect against other software modifications of the |
2eb70aab2 include/linux/pgt... |
841 |
* pte; the appropriate pte lock must be held over the transaction. |
1ea0704e0 mm: add a ptep_mo... |
842 843 844 845 846 847 |
* * Note that this interface is intended to be batchable, meaning that * ptep_modify_prot_commit may not actually update the pte, but merely * queue the update to be done at some later time. The update must be * actually committed before the pte lock is released, however. */ |
0cbe3e26a mm: update ptep_m... |
848 |
static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, |
1ea0704e0 mm: add a ptep_mo... |
849 850 851 |
unsigned long addr, pte_t *ptep) { |
0cbe3e26a mm: update ptep_m... |
852 |
return __ptep_modify_prot_start(vma, addr, ptep); |
1ea0704e0 mm: add a ptep_mo... |
853 854 855 856 857 858 |
} /* * Commit an update to a pte, leaving any hardware-controlled bits in * the PTE unmodified. */ |
0cbe3e26a mm: update ptep_m... |
859 |
static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, |
1ea0704e0 mm: add a ptep_mo... |
860 |
unsigned long addr, |
04a864530 mm: update ptep_m... |
861 |
pte_t *ptep, pte_t old_pte, pte_t pte) |
1ea0704e0 mm: add a ptep_mo... |
862 |
{ |
0cbe3e26a mm: update ptep_m... |
863 |
__ptep_modify_prot_commit(vma, addr, ptep, pte); |
1ea0704e0 mm: add a ptep_mo... |
864 865 |
} #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ |
fe1a6875f mm: fix build on ... |
866 |
#endif /* CONFIG_MMU */ |
1ea0704e0 mm: add a ptep_mo... |
867 |
|
9535239f6 changing include/... |
868 |
/* |
21729f81c x86/mm: Provide g... |
869 |
* No-op macros that just return the current protection value. Defined here |
1067b261c mm: drop duplicat... |
870 |
* because these macros can be used even if CONFIG_MMU is not defined. |
21729f81c x86/mm: Provide g... |
871 |
*/ |
63bb76de4 mm: pgtable: Make... |
872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 |
#ifndef pgprot_nx #define pgprot_nx(prot) (prot) #endif #ifndef pgprot_noncached #define pgprot_noncached(prot) (prot) #endif #ifndef pgprot_writecombine #define pgprot_writecombine pgprot_noncached #endif #ifndef pgprot_writethrough #define pgprot_writethrough pgprot_noncached #endif #ifndef pgprot_device #define pgprot_device pgprot_noncached #endif |
d15dfd313 arm64: mte: Map h... |
892 893 894 |
#ifndef pgprot_mhp #define pgprot_mhp(prot) (prot) #endif |
63bb76de4 mm: pgtable: Make... |
895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 |
#ifdef CONFIG_MMU #ifndef pgprot_modify #define pgprot_modify pgprot_modify static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) { if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot))) newprot = pgprot_noncached(newprot); if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot))) newprot = pgprot_writecombine(newprot); if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot))) newprot = pgprot_device(newprot); return newprot; } #endif #endif /* CONFIG_MMU */ |
21729f81c x86/mm: Provide g... |
910 911 912 913 914 915 916 917 918 |
#ifndef pgprot_encrypted #define pgprot_encrypted(prot) (prot) #endif #ifndef pgprot_decrypted #define pgprot_decrypted(prot) (prot) #endif /* |
9535239f6 changing include/... |
919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 |
* A facility to provide lazy MMU batching. This allows PTE updates and * page invalidations to be delayed until a call to leave lazy MMU mode * is issued. Some architectures may benefit from doing this, and it is * beneficial for both shadow and direct mode hypervisors, which may batch * the PTE updates which happen during this window. Note that using this * interface requires that read hazards be removed from the code. A read * hazard could result in the direct mode hypervisor case, since the actual * write to the page tables may not yet have taken place, so reads though * a raw PTE pointer after it has been modified are not guaranteed to be * up to date. This mode can only be entered and left under the protection of * the page table locks for all page tables which may be modified. In the UP * case, this is required so that preemption is disabled, and in the SMP case, * it must synchronize the delayed page table writes properly on other CPUs. */ #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE #define arch_enter_lazy_mmu_mode() do {} while (0) #define arch_leave_lazy_mmu_mode() do {} while (0) #define arch_flush_lazy_mmu_mode() do {} while (0) #endif /* |
7fd7d83d4 x86/pvops: replac... |
940 941 942 943 944 945 946 947 948 |
* A facility to provide batching of the reload of page tables and * other process state with the actual context switch code for * paravirtualized guests. By convention, only one of the batched * update (lazy) modes (CPU, MMU) should be active at any given time, * entry should never be nested, and entry and exits should always be * paired. This is for sanity of maintaining and reasoning about the * kernel code. In this case, the exit (end of the context switch) is * in architecture-specific code, and so doesn't need a generic * definition. |
9535239f6 changing include/... |
949 |
*/ |
7fd7d83d4 x86/pvops: replac... |
950 |
#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH |
224101ed6 x86/paravirt: fin... |
951 |
#define arch_start_context_switch(prev) do {} while (0) |
9535239f6 changing include/... |
952 |
#endif |
ab6e3d093 mm: soft-dirty: k... |
953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 |
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY #ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) { return pmd; } static inline int pmd_swp_soft_dirty(pmd_t pmd) { return 0; } static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) { return pmd; } #endif #else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */ |
0f8975ec4 mm: soft-dirty bi... |
971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 |
static inline int pte_soft_dirty(pte_t pte) { return 0; } static inline int pmd_soft_dirty(pmd_t pmd) { return 0; } static inline pte_t pte_mksoft_dirty(pte_t pte) { return pte; } static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) { return pmd; } |
179ef71cb mm: save soft-dir... |
990 |
|
a7b761749 mm: add architect... |
991 992 993 994 995 996 997 998 999 |
static inline pte_t pte_clear_soft_dirty(pte_t pte) { return pte; } static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) { return pmd; } |
179ef71cb mm: save soft-dir... |
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 |
static inline pte_t pte_swp_mksoft_dirty(pte_t pte) { return pte; } static inline int pte_swp_soft_dirty(pte_t pte) { return 0; } static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) { return pte; } |
ab6e3d093 mm: soft-dirty: k... |
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 |
static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) { return pmd; } static inline int pmd_swp_soft_dirty(pmd_t pmd) { return 0; } static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) { return pmd; } |
0f8975ec4 mm: soft-dirty bi... |
1029 |
#endif |
34801ba9b x86: PAT: move tr... |
1030 1031 |
#ifndef __HAVE_PFNMAP_TRACKING /* |
5180da410 x86, pat: separat... |
1032 1033 |
* Interfaces that can be used by architecture code to keep track of * memory type of pfn mappings specified by the remap_pfn_range, |
67fa16662 mm: remove refere... |
1034 |
* vmf_insert_pfn. |
5180da410 x86, pat: separat... |
1035 1036 1037 1038 1039 |
*/ /* * track_pfn_remap is called when a _new_ pfn mapping is being established * by remap_pfn_range() for physical range indicated by pfn and size. |
34801ba9b x86: PAT: move tr... |
1040 |
*/ |
5180da410 x86, pat: separat... |
1041 |
static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, |
b3b9c2932 mm, x86, pat: rew... |
1042 1043 |
unsigned long pfn, unsigned long addr, unsigned long size) |
34801ba9b x86: PAT: move tr... |
1044 1045 1046 1047 1048 |
{ return 0; } /* |
5180da410 x86, pat: separat... |
1049 |
* track_pfn_insert is called when a _new_ single pfn is established |
67fa16662 mm: remove refere... |
1050 |
* by vmf_insert_pfn(). |
5180da410 x86, pat: separat... |
1051 |
*/ |
308a047c3 x86/pat, mm: Make... |
1052 1053 |
static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn) |
5180da410 x86, pat: separat... |
1054 |
{ |
5180da410 x86, pat: separat... |
1055 1056 1057 1058 |
} /* * track_pfn_copy is called when vma that is covering the pfnmap gets |
34801ba9b x86: PAT: move tr... |
1059 1060 |
* copied through copy_page_range(). */ |
5180da410 x86, pat: separat... |
1061 |
static inline int track_pfn_copy(struct vm_area_struct *vma) |
34801ba9b x86: PAT: move tr... |
1062 1063 1064 1065 1066 |
{ return 0; } /* |
d9fe4fab1 x86/mm/pat: Add u... |
1067 |
* untrack_pfn is called while unmapping a pfnmap for a region. |
34801ba9b x86: PAT: move tr... |
1068 |
* untrack can be called for a specific region indicated by pfn and size or |
5180da410 x86, pat: separat... |
1069 |
* can be for the entire vma (in which case pfn, size are zero). |
34801ba9b x86: PAT: move tr... |
1070 |
*/ |
5180da410 x86, pat: separat... |
1071 1072 |
static inline void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, unsigned long size) |
34801ba9b x86: PAT: move tr... |
1073 1074 |
{ } |
d9fe4fab1 x86/mm/pat: Add u... |
1075 1076 1077 1078 1079 1080 1081 |
/* * untrack_pfn_moved is called while mremapping a pfnmap for a new region. */ static inline void untrack_pfn_moved(struct vm_area_struct *vma) { } |
34801ba9b x86: PAT: move tr... |
1082 |
#else |
5180da410 x86, pat: separat... |
1083 |
extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, |
b3b9c2932 mm, x86, pat: rew... |
1084 1085 |
unsigned long pfn, unsigned long addr, unsigned long size); |
308a047c3 x86/pat, mm: Make... |
1086 1087 |
extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn); |
5180da410 x86, pat: separat... |
1088 1089 1090 |
extern int track_pfn_copy(struct vm_area_struct *vma); extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, unsigned long size); |
d9fe4fab1 x86/mm/pat: Add u... |
1091 |
extern void untrack_pfn_moved(struct vm_area_struct *vma); |
34801ba9b x86: PAT: move tr... |
1092 |
#endif |
9afaf30f7 mm/gup: do not mi... |
1093 |
#ifdef CONFIG_MMU |
816422ad7 asm-generic, mm: ... |
1094 1095 1096 1097 1098 1099 1100 |
#ifdef __HAVE_COLOR_ZERO_PAGE static inline int is_zero_pfn(unsigned long pfn) { extern unsigned long zero_pfn; unsigned long offset_from_zero_pfn = pfn - zero_pfn; return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); } |
2f91ec8cc asm-generic, mm: ... |
1101 |
#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) |
816422ad7 asm-generic, mm: ... |
1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 |
#else static inline int is_zero_pfn(unsigned long pfn) { extern unsigned long zero_pfn; return pfn == zero_pfn; } static inline unsigned long my_zero_pfn(unsigned long addr) { extern unsigned long zero_pfn; return zero_pfn; } #endif |
9afaf30f7 mm/gup: do not mi... |
1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 |
#else static inline int is_zero_pfn(unsigned long pfn) { return 0; } static inline unsigned long my_zero_pfn(unsigned long addr) { return 0; } #endif /* CONFIG_MMU */ |
816422ad7 asm-generic, mm: ... |
1126 |
|
1a5a9906d mm: thp: fix pmd_... |
1127 |
#ifdef CONFIG_MMU |
5f6e8da70 thp: special pmd_... |
1128 1129 1130 1131 1132 |
#ifndef CONFIG_TRANSPARENT_HUGEPAGE static inline int pmd_trans_huge(pmd_t pmd) { return 0; } |
e4e40e026 mm: switch to 'de... |
1133 |
#ifndef pmd_write |
e2cda3226 thp: add pmd mang... |
1134 1135 1136 1137 1138 |
static inline int pmd_write(pmd_t pmd) { BUG(); return 0; } |
e4e40e026 mm: switch to 'de... |
1139 |
#endif /* pmd_write */ |
1a5a9906d mm: thp: fix pmd_... |
1140 |
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
1501899a8 mm: fix device-da... |
1141 1142 1143 1144 1145 1146 1147 |
#ifndef pud_write static inline int pud_write(pud_t pud) { BUG(); return 0; } #endif /* pud_write */ |
bf1a12a80 mm: move the back... |
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 |
#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) static inline int pmd_devmap(pmd_t pmd) { return 0; } static inline int pud_devmap(pud_t pud) { return 0; } static inline int pgd_devmap(pgd_t pgd) { return 0; } #endif |
a00cc7d9d mm, x86: add supp... |
1162 1163 1164 1165 1166 1167 1168 1169 |
#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \ (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) static inline int pud_trans_huge(pud_t pud) { return 0; } #endif |
625110b5e mm/memory.c: fix ... |
1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 |
/* See pmd_none_or_trans_huge_or_clear_bad for discussion. */ static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud) { pud_t pudval = READ_ONCE(*pud); if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval)) return 1; if (unlikely(pud_bad(pudval))) { pud_clear_bad(pud); return 1; } return 0; } /* See pmd_trans_unstable for discussion. */ static inline int pud_trans_unstable(pud_t *pud) { #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) return pud_none_or_trans_huge_or_dev_or_clear_bad(pud); #else return 0; #endif } |
26c191788 mm: pmd_read_atom... |
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 |
#ifndef pmd_read_atomic static inline pmd_t pmd_read_atomic(pmd_t *pmdp) { /* * Depend on compiler for an atomic pmd read. NOTE: this is * only going to work, if the pmdval_t isn't larger than * an unsigned long. */ return *pmdp; } #endif |
953c66c2b mm: THP page cach... |
1205 1206 1207 |
#ifndef arch_needs_pgtable_deposit #define arch_needs_pgtable_deposit() (false) #endif |
1a5a9906d mm: thp: fix pmd_... |
1208 1209 |
/* * This function is meant to be used by sites walking pagetables with |
c1e8d7c6a mmap locking API:... |
1210 |
* the mmap_lock held in read mode to protect against MADV_DONTNEED and |
1a5a9906d mm: thp: fix pmd_... |
1211 1212 1213 |
* transhuge page faults. MADV_DONTNEED can convert a transhuge pmd * into a null pmd and the transhuge page fault can convert a null pmd * into an hugepmd or into a regular pmd (if the hugepage allocation |
c1e8d7c6a mmap locking API:... |
1214 |
* fails). While holding the mmap_lock in read mode the pmd becomes |
1a5a9906d mm: thp: fix pmd_... |
1215 1216 1217 1218 1219 1220 |
* stable and stops changing under us only if it's not null and not a * transhuge pmd. When those races occurs and this function makes a * difference vs the standard pmd_none_or_clear_bad, the result is * undefined so behaving like if the pmd was none is safe (because it * can return none anyway). The compiler level barrier() is critically * important to compute the two checks atomically on the same pmdval. |
26c191788 mm: pmd_read_atom... |
1221 1222 1223 |
* * For 32bit kernels with a 64bit large pmd_t this automatically takes * care of reading the pmd atomically to avoid SMP race conditions |
c1e8d7c6a mmap locking API:... |
1224 |
* against pmd_populate() when the mmap_lock is hold for reading by the |
26c191788 mm: pmd_read_atom... |
1225 1226 1227 |
* caller (a special atomic read not done by "gcc" as in the generic * version above, is also needed when THP is disabled because the page * fault can populate the pmd from under us). |
1a5a9906d mm: thp: fix pmd_... |
1228 1229 1230 |
*/ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) { |
26c191788 mm: pmd_read_atom... |
1231 |
pmd_t pmdval = pmd_read_atomic(pmd); |
1a5a9906d mm: thp: fix pmd_... |
1232 1233 1234 |
/* * The barrier will stabilize the pmdval in a register or on * the stack so that it will stop changing under the code. |
e4eed03fd thp: avoid atomic... |
1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 |
* * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE, * pmd_read_atomic is allowed to return a not atomic pmdval * (for example pointing to an hugepage that has never been * mapped in the pmd). The below checks will only care about * the low part of the pmd with 32bit PAE x86 anyway, with the * exception of pmd_none(). So the important thing is that if * the low part of the pmd is found null, the high part will * be also null or the pmd_none() check below would be * confused. |
1a5a9906d mm: thp: fix pmd_... |
1245 1246 1247 1248 |
*/ #ifdef CONFIG_TRANSPARENT_HUGEPAGE barrier(); #endif |
84c3fc4e9 mm: thp: check pm... |
1249 1250 1251 1252 1253 |
/* * !pmd_present() checks for pmd migration entries * * The complete check uses is_pmd_migration_entry() in linux/swapops.h * But using that requires moving current function and pmd_trans_unstable() |
2eb70aab2 include/linux/pgt... |
1254 |
* to linux/swapops.h to resolve dependency, which is too much code move. |
84c3fc4e9 mm: thp: check pm... |
1255 1256 1257 1258 1259 |
* * !pmd_present() is equivalent to is_pmd_migration_entry() currently, * because !pmd_present() pages can only be under migration not swapped * out. * |
2eb70aab2 include/linux/pgt... |
1260 |
* pmd_none() is preserved for future condition checks on pmd migration |
84c3fc4e9 mm: thp: check pm... |
1261 1262 1263 1264 1265 |
* entries and not confusing with this function name, although it is * redundant with !pmd_present(). */ if (pmd_none(pmdval) || pmd_trans_huge(pmdval) || (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval))) |
1a5a9906d mm: thp: fix pmd_... |
1266 1267 |
return 1; if (unlikely(pmd_bad(pmdval))) { |
ee53664bd mm: Fix NULL poin... |
1268 |
pmd_clear_bad(pmd); |
1a5a9906d mm: thp: fix pmd_... |
1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 |
return 1; } return 0; } /* * This is a noop if Transparent Hugepage Support is not built into * the kernel. Otherwise it is equivalent to * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in * places that already verified the pmd is not none and they want to * walk ptes while holding the mmap sem in read mode (write mode don't * need this). If THP is not enabled, the pmd can't go away under the * code even if MADV_DONTNEED runs, but if THP is enabled we need to * run a pmd_trans_unstable before walking the ptes after |
9ef258bad thp: update split... |
1283 1284 |
* split_huge_pmd returns (because it may have run when the pmd become * null, but then a page fault can map in a THP and not a regular page). |
1a5a9906d mm: thp: fix pmd_... |
1285 1286 1287 1288 1289 1290 1291 |
*/ static inline int pmd_trans_unstable(pmd_t *pmd) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE return pmd_none_or_trans_huge_or_clear_bad(pmd); #else return 0; |
5f6e8da70 thp: special pmd_... |
1292 |
#endif |
1a5a9906d mm: thp: fix pmd_... |
1293 |
} |
f9ce0be71 mm: Cleanup fault... |
1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 |
/* * the ordering of these checks is important for pmds with _page_devmap set. * if we check pmd_trans_unstable() first we will trip the bad_pmd() check * inside of pmd_none_or_trans_huge_or_clear_bad(). this will end up correctly * returning 1 but not before it spams dmesg with the pmd_clear_bad() output. */ static inline int pmd_devmap_trans_unstable(pmd_t *pmd) { return pmd_devmap(*pmd) || pmd_trans_unstable(pmd); } |
e7bb4b6d1 mm: add p[te|md] ... |
1304 1305 1306 1307 1308 |
#ifndef CONFIG_NUMA_BALANCING /* * Technically a PTE can be PROTNONE even when not doing NUMA balancing but * the only case the kernel cares is for NUMA balancing and is only ever set * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked |
1067b261c mm: drop duplicat... |
1309 |
* _PAGE_PROTNONE so by default, implement the helper as "always no". It |
e7bb4b6d1 mm: add p[te|md] ... |
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 |
* is the responsibility of the caller to distinguish between PROT_NONE * protections and NUMA hinting fault protections. */ static inline int pte_protnone(pte_t pte) { return 0; } static inline int pmd_protnone(pmd_t pmd) { return 0; } #endif /* CONFIG_NUMA_BALANCING */ |
1a5a9906d mm: thp: fix pmd_... |
1323 |
#endif /* CONFIG_MMU */ |
5f6e8da70 thp: special pmd_... |
1324 |
|
e61ce6ade mm: change iorema... |
1325 |
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP |
c2febafc6 mm: convert gener... |
1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 |
#ifndef __PAGETABLE_P4D_FOLDED int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot); int p4d_clear_huge(p4d_t *p4d); #else static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) { return 0; } static inline int p4d_clear_huge(p4d_t *p4d) { return 0; } #endif /* !__PAGETABLE_P4D_FOLDED */ |
e61ce6ade mm: change iorema... |
1340 |
int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); |
c742199a0 mm/pgtable: add s... |
1341 |
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); |
d8a719059 Revert "mm/pgtabl... |
1342 |
int pud_clear_huge(pud_t *pud); |
b9820d8f3 mm: change vunmap... |
1343 |
int pmd_clear_huge(pmd_t *pmd); |
8e2d43405 lib/ioremap: ensu... |
1344 |
int p4d_free_pud_page(p4d_t *p4d, unsigned long addr); |
785a19f9d ioremap: Update p... |
1345 1346 |
int pud_free_pmd_page(pud_t *pud, unsigned long addr); int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); |
e61ce6ade mm: change iorema... |
1347 |
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ |
c2febafc6 mm: convert gener... |
1348 1349 1350 1351 |
static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) { return 0; } |
e61ce6ade mm: change iorema... |
1352 1353 1354 1355 1356 1357 1358 1359 |
static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) { return 0; } static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) { return 0; } |
c2febafc6 mm: convert gener... |
1360 1361 1362 1363 |
static inline int p4d_clear_huge(p4d_t *p4d) { return 0; } |
b9820d8f3 mm: change vunmap... |
1364 1365 1366 1367 1368 1369 1370 1371 |
static inline int pud_clear_huge(pud_t *pud) { return 0; } static inline int pmd_clear_huge(pmd_t *pmd) { return 0; } |
8e2d43405 lib/ioremap: ensu... |
1372 1373 1374 1375 |
static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) { return 0; } |
785a19f9d ioremap: Update p... |
1376 |
static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr) |
b6bdb7517 mm/vmalloc: add i... |
1377 1378 1379 |
{ return 0; } |
785a19f9d ioremap: Update p... |
1380 |
static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) |
b6bdb7517 mm/vmalloc: add i... |
1381 1382 1383 |
{ return 0; } |
e61ce6ade mm: change iorema... |
1384 |
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ |
458aa76d1 mm/thp/migration:... |
1385 1386 1387 1388 1389 |
#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* * ARCHes with special requirements for evicting THP backing TLB entries can * implement this. Otherwise also, it can help optimize normal TLB flush in |
1067b261c mm: drop duplicat... |
1390 1391 1392 1393 |
* THP regime. Stock flush_tlb_range() typically has optimization to nuke the * entire TLB if flush span is greater than a threshold, which will * likely be true for a single huge page. Thus a single THP flush will * invalidate the entire TLB which is not desirable. |
458aa76d1 mm/thp/migration:... |
1394 1395 1396 |
* e.g. see arch/arc: flush_pmd_tlb_range */ #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) |
a00cc7d9d mm, x86: add supp... |
1397 |
#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) |
458aa76d1 mm/thp/migration:... |
1398 1399 |
#else #define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG() |
a00cc7d9d mm, x86: add supp... |
1400 |
#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG() |
458aa76d1 mm/thp/migration:... |
1401 1402 |
#endif #endif |
08ea8c07f mm: move phys_mem... |
1403 1404 1405 |
struct file; int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot); |
613e396bc init: Invoke init... |
1406 1407 1408 1409 |
#ifndef CONFIG_X86_ESPFIX64 static inline void init_espfix_bsp(void) { } #endif |
782de70c4 mm: consolidate p... |
1410 |
extern void __init pgtable_cache_init(void); |
caa841360 x86/mm: Initializ... |
1411 |
|
6c26fcd2a x86/speculation/l... |
1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 |
#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) { return true; } static inline bool arch_has_pfn_modify_check(void) { return false; } #endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */ |
a3266bd49 mm: provide a fal... |
1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 |
/* * Architecture PAGE_KERNEL_* fallbacks * * Some architectures don't define certain PAGE_KERNEL_* flags. This is either * because they really don't support them, or the port needs to be updated to * reflect the required functionality. Below are a set of relatively safe * fallbacks, as best effort, which we can count on in lieu of the architectures * not defining them on their own yet. */ #ifndef PAGE_KERNEL_RO # define PAGE_KERNEL_RO PAGE_KERNEL #endif |
1a9b4b3d7 mm: provide a fal... |
1436 1437 1438 |
#ifndef PAGE_KERNEL_EXEC # define PAGE_KERNEL_EXEC PAGE_KERNEL #endif |
d86261380 mm: add functions... |
1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 |
/* * Page Table Modification bits for pgtbl_mod_mask. * * These are used by the p?d_alloc_track*() set of functions an in the generic * vmalloc/ioremap code to track at which page-table levels entries have been * modified. Based on that the code can better decide when vmalloc and ioremap * mapping changes need to be synchronized to other page-tables in the system. */ #define __PGTBL_PGD_MODIFIED 0 #define __PGTBL_P4D_MODIFIED 1 #define __PGTBL_PUD_MODIFIED 2 #define __PGTBL_PMD_MODIFIED 3 #define __PGTBL_PTE_MODIFIED 4 #define PGTBL_PGD_MODIFIED BIT(__PGTBL_PGD_MODIFIED) #define PGTBL_P4D_MODIFIED BIT(__PGTBL_P4D_MODIFIED) #define PGTBL_PUD_MODIFIED BIT(__PGTBL_PUD_MODIFIED) #define PGTBL_PMD_MODIFIED BIT(__PGTBL_PMD_MODIFIED) #define PGTBL_PTE_MODIFIED BIT(__PGTBL_PTE_MODIFIED) /* Page-Table Modification Mask */ typedef unsigned int pgtbl_mod_mask; |
1da177e4c Linux-2.6.12-rc2 |
1461 |
#endif /* !__ASSEMBLY__ */ |
cef397038 arch: pgtable: de... |
1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 |
#if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT) #ifdef CONFIG_PHYS_ADDR_T_64BIT /* * ZSMALLOC needs to know the highest PFN on 32-bit architectures * with physical address space extension, but falls back to * BITS_PER_LONG otherwise. */ #error Missing MAX_POSSIBLE_PHYSMEM_BITS definition #else #define MAX_POSSIBLE_PHYSMEM_BITS 32 #endif #endif |
fd8cfd300 arch: fix has_tra... |
1474 1475 1476 1477 1478 1479 1480 |
#ifndef has_transparent_hugepage #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define has_transparent_hugepage() 1 #else #define has_transparent_hugepage() 0 #endif #endif |
1071fc577 mm: introduce mm_... |
1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 |
/* * On some architectures it depends on the mm if the p4d/pud or pmd * layer of the page table hierarchy is folded or not. */ #ifndef mm_p4d_folded #define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED) #endif #ifndef mm_pud_folded #define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED) #endif #ifndef mm_pmd_folded #define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED) #endif |
d3f7b1bb2 mm/gup: fix gup_f... |
1496 1497 1498 1499 1500 1501 1502 1503 1504 |
#ifndef p4d_offset_lockless #define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address) #endif #ifndef pud_offset_lockless #define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address) #endif #ifndef pmd_offset_lockless #define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address) #endif |
93fab1b22 mm: add generic p... |
1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 |
/* * p?d_leaf() - true if this entry is a final mapping to a physical address. * This differs from p?d_huge() by the fact that they are always available (if * the architecture supports large pages at the appropriate level) even * if CONFIG_HUGETLB_PAGE is not defined. * Only meaningful when called on a valid entry. */ #ifndef pgd_leaf #define pgd_leaf(x) 0 #endif #ifndef p4d_leaf #define p4d_leaf(x) 0 #endif #ifndef pud_leaf #define pud_leaf(x) 0 #endif #ifndef pmd_leaf #define pmd_leaf(x) 0 #endif |
560dabbdf mm: Introduce pXX... |
1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 |
#ifndef pgd_leaf_size #define pgd_leaf_size(x) (1ULL << PGDIR_SHIFT) #endif #ifndef p4d_leaf_size #define p4d_leaf_size(x) P4D_SIZE #endif #ifndef pud_leaf_size #define pud_leaf_size(x) PUD_SIZE #endif #ifndef pmd_leaf_size #define pmd_leaf_size(x) PMD_SIZE #endif #ifndef pte_leaf_size #define pte_leaf_size(x) PAGE_SIZE #endif |
c0f8aa4fa mm: define defaul... |
1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 |
/* * Some architectures have MMUs that are configurable or selectable at boot * time. These lead to variable PTRS_PER_x. For statically allocated arrays it * helps to have a static maximum value. */ #ifndef MAX_PTRS_PER_PTE #define MAX_PTRS_PER_PTE PTRS_PER_PTE #endif #ifndef MAX_PTRS_PER_PMD #define MAX_PTRS_PER_PMD PTRS_PER_PMD #endif #ifndef MAX_PTRS_PER_PUD #define MAX_PTRS_PER_PUD PTRS_PER_PUD #endif #ifndef MAX_PTRS_PER_P4D #define MAX_PTRS_PER_P4D PTRS_PER_P4D #endif |
ca5999fde mm: introduce inc... |
1560 |
#endif /* _LINUX_PGTABLE_H */ |