Blame view
mm/khugepaged.c
58.3 KB
b24413180 License cleanup: ... |
1 |
// SPDX-License-Identifier: GPL-2.0 |
b46e756f5 thp: extract khug... |
2 3 4 5 |
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/mm.h> #include <linux/sched.h> |
6e84f3152 sched/headers: Pr... |
6 |
#include <linux/sched/mm.h> |
f7ccbae45 sched/headers: Pr... |
7 |
#include <linux/sched/coredump.h> |
b46e756f5 thp: extract khug... |
8 9 10 11 12 13 14 15 16 17 18 19 |
#include <linux/mmu_notifier.h> #include <linux/rmap.h> #include <linux/swap.h> #include <linux/mm_inline.h> #include <linux/kthread.h> #include <linux/khugepaged.h> #include <linux/freezer.h> #include <linux/mman.h> #include <linux/hashtable.h> #include <linux/userfaultfd_k.h> #include <linux/page_idle.h> #include <linux/swapops.h> |
f3f0e1d21 khugepaged: add s... |
20 |
#include <linux/shmem_fs.h> |
b46e756f5 thp: extract khug... |
21 22 23 24 25 26 27 28 29 30 |
#include <asm/tlb.h> #include <asm/pgalloc.h> #include "internal.h" enum scan_result { SCAN_FAIL, SCAN_SUCCEED, SCAN_PMD_NULL, SCAN_EXCEED_NONE_PTE, |
71a2c112a khugepaged: intro... |
31 32 |
SCAN_EXCEED_SWAP_PTE, SCAN_EXCEED_SHARED_PTE, |
b46e756f5 thp: extract khug... |
33 |
SCAN_PTE_NON_PRESENT, |
e1e267c79 khugepaged: skip ... |
34 |
SCAN_PTE_UFFD_WP, |
b46e756f5 thp: extract khug... |
35 |
SCAN_PAGE_RO, |
0db501f7a mm, thp: convert ... |
36 |
SCAN_LACK_REFERENCED_PAGE, |
b46e756f5 thp: extract khug... |
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
SCAN_PAGE_NULL, SCAN_SCAN_ABORT, SCAN_PAGE_COUNT, SCAN_PAGE_LRU, SCAN_PAGE_LOCK, SCAN_PAGE_ANON, SCAN_PAGE_COMPOUND, SCAN_ANY_PROCESS, SCAN_VMA_NULL, SCAN_VMA_CHECK, SCAN_ADDRESS_RANGE, SCAN_SWAP_CACHE_PAGE, SCAN_DEL_PAGE_LRU, SCAN_ALLOC_HUGE_PAGE_FAIL, SCAN_CGROUP_CHARGE_FAIL, |
f3f0e1d21 khugepaged: add s... |
52 |
SCAN_TRUNCATED, |
99cb0dbd4 mm,thp: add read-... |
53 |
SCAN_PAGE_HAS_PRIVATE, |
b46e756f5 thp: extract khug... |
54 55 56 57 |
}; #define CREATE_TRACE_POINTS #include <trace/events/huge_memory.h> |
4aab2be09 mm: khugepaged: r... |
58 59 |
static struct task_struct *khugepaged_thread __read_mostly; static DEFINE_MUTEX(khugepaged_mutex); |
b46e756f5 thp: extract khug... |
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
/* default scan 8*512 pte (or vmas) every 30 second */ static unsigned int khugepaged_pages_to_scan __read_mostly; static unsigned int khugepaged_pages_collapsed; static unsigned int khugepaged_full_scans; static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; /* during fragmentation poll the hugepage allocator once every minute */ static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; static unsigned long khugepaged_sleep_expire; static DEFINE_SPINLOCK(khugepaged_mm_lock); static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); /* * default collapse hugepages if there is at least one pte mapped like * it would have happened if the vma was large enough during page * fault. */ static unsigned int khugepaged_max_ptes_none __read_mostly; static unsigned int khugepaged_max_ptes_swap __read_mostly; |
71a2c112a khugepaged: intro... |
77 |
static unsigned int khugepaged_max_ptes_shared __read_mostly; |
b46e756f5 thp: extract khug... |
78 79 80 81 82 |
#define MM_SLOTS_HASH_BITS 10 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); static struct kmem_cache *mm_slot_cache __read_mostly; |
27e1f8273 khugepaged: enabl... |
83 |
#define MAX_PTE_MAPPED_THP 8 |
b46e756f5 thp: extract khug... |
84 85 86 87 88 89 90 91 92 93 |
/** * struct mm_slot - hash lookup from mm to mm_slot * @hash: hash collision list * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head * @mm: the mm that this information is valid for */ struct mm_slot { struct hlist_node hash; struct list_head mm_node; struct mm_struct *mm; |
27e1f8273 khugepaged: enabl... |
94 95 96 97 |
/* pte-mapped THP in this mm */ int nr_pte_mapped_thp; unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP]; |
b46e756f5 thp: extract khug... |
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 |
}; /** * struct khugepaged_scan - cursor for scanning * @mm_head: the head of the mm list to scan * @mm_slot: the current mm_slot we are scanning * @address: the next address inside that to be scanned * * There is only the one khugepaged_scan instance of this cursor structure. */ struct khugepaged_scan { struct list_head mm_head; struct mm_slot *mm_slot; unsigned long address; }; static struct khugepaged_scan khugepaged_scan = { .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), }; |
e1465d125 mm, thp: propagat... |
117 |
#ifdef CONFIG_SYSFS |
b46e756f5 thp: extract khug... |
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 |
static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u ", khugepaged_scan_sleep_millisecs); } static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned long msecs; int err; err = kstrtoul(buf, 10, &msecs); if (err || msecs > UINT_MAX) return -EINVAL; khugepaged_scan_sleep_millisecs = msecs; khugepaged_sleep_expire = 0; wake_up_interruptible(&khugepaged_wait); return count; } static struct kobj_attribute scan_sleep_millisecs_attr = __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show, scan_sleep_millisecs_store); static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u ", khugepaged_alloc_sleep_millisecs); } static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned long msecs; int err; err = kstrtoul(buf, 10, &msecs); if (err || msecs > UINT_MAX) return -EINVAL; khugepaged_alloc_sleep_millisecs = msecs; khugepaged_sleep_expire = 0; wake_up_interruptible(&khugepaged_wait); return count; } static struct kobj_attribute alloc_sleep_millisecs_attr = __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show, alloc_sleep_millisecs_store); static ssize_t pages_to_scan_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u ", khugepaged_pages_to_scan); } static ssize_t pages_to_scan_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long pages; err = kstrtoul(buf, 10, &pages); if (err || !pages || pages > UINT_MAX) return -EINVAL; khugepaged_pages_to_scan = pages; return count; } static struct kobj_attribute pages_to_scan_attr = __ATTR(pages_to_scan, 0644, pages_to_scan_show, pages_to_scan_store); static ssize_t pages_collapsed_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u ", khugepaged_pages_collapsed); } static struct kobj_attribute pages_collapsed_attr = __ATTR_RO(pages_collapsed); static ssize_t full_scans_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u ", khugepaged_full_scans); } static struct kobj_attribute full_scans_attr = __ATTR_RO(full_scans); static ssize_t khugepaged_defrag_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return single_hugepage_flag_show(kobj, attr, buf, TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); } static ssize_t khugepaged_defrag_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { return single_hugepage_flag_store(kobj, attr, buf, count, TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); } static struct kobj_attribute khugepaged_defrag_attr = __ATTR(defrag, 0644, khugepaged_defrag_show, khugepaged_defrag_store); /* * max_ptes_none controls if khugepaged should collapse hugepages over * any unmapped ptes in turn potentially increasing the memory * footprint of the vmas. When max_ptes_none is 0 khugepaged will not * reduce the available free memory in the system as it * runs. Increasing max_ptes_none will instead potentially reduce the * free memory in the system during the khugepaged scan. */ static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u ", khugepaged_max_ptes_none); } static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long max_ptes_none; err = kstrtoul(buf, 10, &max_ptes_none); if (err || max_ptes_none > HPAGE_PMD_NR-1) return -EINVAL; khugepaged_max_ptes_none = max_ptes_none; return count; } static struct kobj_attribute khugepaged_max_ptes_none_attr = __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, khugepaged_max_ptes_none_store); static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u ", khugepaged_max_ptes_swap); } static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long max_ptes_swap; err = kstrtoul(buf, 10, &max_ptes_swap); if (err || max_ptes_swap > HPAGE_PMD_NR-1) return -EINVAL; khugepaged_max_ptes_swap = max_ptes_swap; return count; } static struct kobj_attribute khugepaged_max_ptes_swap_attr = __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show, khugepaged_max_ptes_swap_store); |
71a2c112a khugepaged: intro... |
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 |
static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u ", khugepaged_max_ptes_shared); } static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long max_ptes_shared; err = kstrtoul(buf, 10, &max_ptes_shared); if (err || max_ptes_shared > HPAGE_PMD_NR-1) return -EINVAL; khugepaged_max_ptes_shared = max_ptes_shared; return count; } static struct kobj_attribute khugepaged_max_ptes_shared_attr = __ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show, khugepaged_max_ptes_shared_store); |
b46e756f5 thp: extract khug... |
327 328 329 |
static struct attribute *khugepaged_attr[] = { &khugepaged_defrag_attr.attr, &khugepaged_max_ptes_none_attr.attr, |
71a2c112a khugepaged: intro... |
330 331 |
&khugepaged_max_ptes_swap_attr.attr, &khugepaged_max_ptes_shared_attr.attr, |
b46e756f5 thp: extract khug... |
332 333 334 335 336 |
&pages_to_scan_attr.attr, &pages_collapsed_attr.attr, &full_scans_attr.attr, &scan_sleep_millisecs_attr.attr, &alloc_sleep_millisecs_attr.attr, |
b46e756f5 thp: extract khug... |
337 338 339 340 341 342 343 |
NULL, }; struct attribute_group khugepaged_attr_group = { .attrs = khugepaged_attr, .name = "khugepaged", }; |
e1465d125 mm, thp: propagat... |
344 |
#endif /* CONFIG_SYSFS */ |
b46e756f5 thp: extract khug... |
345 |
|
b46e756f5 thp: extract khug... |
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 |
int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) { switch (advice) { case MADV_HUGEPAGE: #ifdef CONFIG_S390 /* * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390 * can't handle this properly after s390_enable_sie, so we simply * ignore the madvise to prevent qemu from causing a SIGSEGV. */ if (mm_has_pgste(vma->vm_mm)) return 0; #endif *vm_flags &= ~VM_NOHUGEPAGE; *vm_flags |= VM_HUGEPAGE; /* * If the vma become good for khugepaged to scan, * register it here without waiting a page fault that * may not happen any time soon. */ if (!(*vm_flags & VM_NO_KHUGEPAGED) && khugepaged_enter_vma_merge(vma, *vm_flags)) return -ENOMEM; break; case MADV_NOHUGEPAGE: *vm_flags &= ~VM_HUGEPAGE; *vm_flags |= VM_NOHUGEPAGE; /* * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning * this vma even if we leave the mm registered in khugepaged if * it got registered before VM_NOHUGEPAGE was set. */ break; } return 0; } int __init khugepaged_init(void) { mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", sizeof(struct mm_slot), __alignof__(struct mm_slot), 0, NULL); if (!mm_slot_cache) return -ENOMEM; khugepaged_pages_to_scan = HPAGE_PMD_NR * 8; khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8; |
71a2c112a khugepaged: intro... |
396 |
khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2; |
b46e756f5 thp: extract khug... |
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 |
return 0; } void __init khugepaged_destroy(void) { kmem_cache_destroy(mm_slot_cache); } static inline struct mm_slot *alloc_mm_slot(void) { if (!mm_slot_cache) /* initialization failed */ return NULL; return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); } static inline void free_mm_slot(struct mm_slot *mm_slot) { kmem_cache_free(mm_slot_cache, mm_slot); } static struct mm_slot *get_mm_slot(struct mm_struct *mm) { struct mm_slot *mm_slot; hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm) if (mm == mm_slot->mm) return mm_slot; return NULL; } static void insert_to_mm_slots_hash(struct mm_struct *mm, struct mm_slot *mm_slot) { mm_slot->mm = mm; hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); } static inline int khugepaged_test_exit(struct mm_struct *mm) { |
4d45e75a9 mm: remove the no... |
438 |
return atomic_read(&mm->mm_users) == 0; |
b46e756f5 thp: extract khug... |
439 |
} |
50f8b92f2 mm: thp: pass cor... |
440 441 |
static bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags) |
c2231020e mm: thp: register... |
442 |
{ |
50f8b92f2 mm: thp: pass cor... |
443 444 |
if ((!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || (vm_flags & VM_NOHUGEPAGE) || |
c2231020e mm: thp: register... |
445 446 |
test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) return false; |
99cb0dbd4 mm,thp: add read-... |
447 448 449 450 451 |
if (shmem_file(vma->vm_file) || (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file && (vm_flags & VM_DENYWRITE))) { |
c2231020e mm: thp: register... |
452 453 454 455 456 |
return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, HPAGE_PMD_NR); } if (!vma->anon_vma || vma->vm_ops) return false; |
222100eed mm/vma: make is_v... |
457 |
if (vma_is_temporary_stack(vma)) |
c2231020e mm: thp: register... |
458 |
return false; |
50f8b92f2 mm: thp: pass cor... |
459 |
return !(vm_flags & VM_NO_KHUGEPAGED); |
c2231020e mm: thp: register... |
460 |
} |
b46e756f5 thp: extract khug... |
461 462 463 464 465 466 467 468 469 470 |
int __khugepaged_enter(struct mm_struct *mm) { struct mm_slot *mm_slot; int wakeup; mm_slot = alloc_mm_slot(); if (!mm_slot) return -ENOMEM; /* __khugepaged_exit() must not run from under us */ |
f3f99d63a khugepaged: adjus... |
471 |
VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm); |
b46e756f5 thp: extract khug... |
472 473 474 475 476 477 478 479 480 481 482 483 484 485 |
if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { free_mm_slot(mm_slot); return 0; } spin_lock(&khugepaged_mm_lock); insert_to_mm_slots_hash(mm, mm_slot); /* * Insert just behind the scanning cursor, to let the area settle * down a little. */ wakeup = list_empty(&khugepaged_scan.mm_head); list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); spin_unlock(&khugepaged_mm_lock); |
f1f100764 mm: add new mmgra... |
486 |
mmgrab(mm); |
b46e756f5 thp: extract khug... |
487 488 489 490 491 492 493 494 495 496 |
if (wakeup) wake_up_interruptible(&khugepaged_wait); return 0; } int khugepaged_enter_vma_merge(struct vm_area_struct *vma, unsigned long vm_flags) { unsigned long hstart, hend; |
c2231020e mm: thp: register... |
497 498 |
/* |
99cb0dbd4 mm,thp: add read-... |
499 500 501 |
* khugepaged only supports read-only files for non-shmem files. * khugepaged does not yet work on special mappings. And * file-private shmem THP is not supported. |
c2231020e mm: thp: register... |
502 |
*/ |
50f8b92f2 mm: thp: pass cor... |
503 |
if (!hugepage_vma_check(vma, vm_flags)) |
b46e756f5 thp: extract khug... |
504 |
return 0; |
c2231020e mm: thp: register... |
505 |
|
b46e756f5 thp: extract khug... |
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 |
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; if (hstart < hend) return khugepaged_enter(vma, vm_flags); return 0; } void __khugepaged_exit(struct mm_struct *mm) { struct mm_slot *mm_slot; int free = 0; spin_lock(&khugepaged_mm_lock); mm_slot = get_mm_slot(mm); if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { hash_del(&mm_slot->hash); list_del(&mm_slot->mm_node); free = 1; } spin_unlock(&khugepaged_mm_lock); if (free) { clear_bit(MMF_VM_HUGEPAGE, &mm->flags); free_mm_slot(mm_slot); mmdrop(mm); } else if (mm_slot) { /* * This is required to serialize against * khugepaged_test_exit() (which is guaranteed to run * under mmap sem read mode). Stop here (after we * return all pagetables will be destroyed) until * khugepaged has finished working on the pagetables |
c1e8d7c6a mmap locking API:... |
538 |
* under the mmap_lock. |
b46e756f5 thp: extract khug... |
539 |
*/ |
d8ed45c5d mmap locking API:... |
540 541 |
mmap_write_lock(mm); mmap_write_unlock(mm); |
b46e756f5 thp: extract khug... |
542 543 544 545 546 |
} } static void release_pte_page(struct page *page) { |
5503fbf2b khugepaged: allow... |
547 548 549 |
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page), -compound_nr(page)); |
b46e756f5 thp: extract khug... |
550 551 552 |
unlock_page(page); putback_lru_page(page); } |
5503fbf2b khugepaged: allow... |
553 554 |
static void release_pte_pages(pte_t *pte, pte_t *_pte, struct list_head *compound_pagelist) |
b46e756f5 thp: extract khug... |
555 |
{ |
5503fbf2b khugepaged: allow... |
556 |
struct page *page, *tmp; |
b46e756f5 thp: extract khug... |
557 558 |
while (--_pte >= pte) { pte_t pteval = *_pte; |
5503fbf2b khugepaged: allow... |
559 560 561 562 563 564 565 566 567 568 |
page = pte_page(pteval); if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) && !PageCompound(page)) release_pte_page(page); } list_for_each_entry_safe(page, tmp, compound_pagelist, lru) { list_del(&page->lru); release_pte_page(page); |
b46e756f5 thp: extract khug... |
569 570 |
} } |
9445689f3 khugepaged: allow... |
571 572 573 574 575 576 577 578 579 580 |
static bool is_refcount_suitable(struct page *page) { int expected_refcount; expected_refcount = total_mapcount(page); if (PageSwapCache(page)) expected_refcount += compound_nr(page); return page_count(page) == expected_refcount; } |
b46e756f5 thp: extract khug... |
581 582 |
static int __collapse_huge_page_isolate(struct vm_area_struct *vma, unsigned long address, |
5503fbf2b khugepaged: allow... |
583 584 |
pte_t *pte, struct list_head *compound_pagelist) |
b46e756f5 thp: extract khug... |
585 586 587 |
{ struct page *page = NULL; pte_t *_pte; |
71a2c112a khugepaged: intro... |
588 |
int none_or_zero = 0, shared = 0, result = 0, referenced = 0; |
0db501f7a mm, thp: convert ... |
589 |
bool writable = false; |
b46e756f5 thp: extract khug... |
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 |
for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++, address += PAGE_SIZE) { pte_t pteval = *_pte; if (pte_none(pteval) || (pte_present(pteval) && is_zero_pfn(pte_pfn(pteval)))) { if (!userfaultfd_armed(vma) && ++none_or_zero <= khugepaged_max_ptes_none) { continue; } else { result = SCAN_EXCEED_NONE_PTE; goto out; } } if (!pte_present(pteval)) { result = SCAN_PTE_NON_PRESENT; goto out; } page = vm_normal_page(vma, address, pteval); if (unlikely(!page)) { result = SCAN_PAGE_NULL; goto out; } |
5503fbf2b khugepaged: allow... |
613 |
VM_BUG_ON_PAGE(!PageAnon(page), page); |
71a2c112a khugepaged: intro... |
614 615 616 617 618 |
if (page_mapcount(page) > 1 && ++shared > khugepaged_max_ptes_shared) { result = SCAN_EXCEED_SHARED_PTE; goto out; } |
fece2029a mm/khugepaged.c: ... |
619 |
if (PageCompound(page)) { |
5503fbf2b khugepaged: allow... |
620 621 |
struct page *p; page = compound_head(page); |
fece2029a mm/khugepaged.c: ... |
622 |
|
5503fbf2b khugepaged: allow... |
623 624 625 626 627 628 629 630 631 |
/* * Check if we have dealt with the compound page * already */ list_for_each_entry(p, compound_pagelist, lru) { if (page == p) goto next; } } |
b46e756f5 thp: extract khug... |
632 633 634 635 636 637 638 639 640 641 642 643 644 |
/* * We can do it before isolate_lru_page because the * page can't be freed from under us. NOTE: PG_lock * is needed to serialize against split_huge_page * when invoked from the VM. */ if (!trylock_page(page)) { result = SCAN_PAGE_LOCK; goto out; } /* |
9445689f3 khugepaged: allow... |
645 646 647 648 649 650 651 652 653 |
* Check if the page has any GUP (or other external) pins. * * The page table that maps the page has been already unlinked * from the page table tree and this process cannot get * an additinal pin on the page. * * New pins can come later if the page is shared across fork, * but not from this process. The other process cannot write to * the page, only trigger CoW. |
b46e756f5 thp: extract khug... |
654 |
*/ |
9445689f3 khugepaged: allow... |
655 |
if (!is_refcount_suitable(page)) { |
b46e756f5 thp: extract khug... |
656 657 658 659 |
unlock_page(page); result = SCAN_PAGE_COUNT; goto out; } |
5503fbf2b khugepaged: allow... |
660 661 |
if (!pte_write(pteval) && PageSwapCache(page) && !reuse_swap_page(page, NULL)) { |
b46e756f5 thp: extract khug... |
662 |
/* |
5503fbf2b khugepaged: allow... |
663 664 |
* Page is in the swap cache and cannot be re-used. * It cannot be collapsed into a THP. |
b46e756f5 thp: extract khug... |
665 |
*/ |
5503fbf2b khugepaged: allow... |
666 667 668 |
unlock_page(page); result = SCAN_SWAP_CACHE_PAGE; goto out; |
b46e756f5 thp: extract khug... |
669 670 671 672 673 674 675 676 677 678 679 |
} /* * Isolate the page to avoid collapsing an hugepage * currently in use by the VM. */ if (isolate_lru_page(page)) { unlock_page(page); result = SCAN_DEL_PAGE_LRU; goto out; } |
5503fbf2b khugepaged: allow... |
680 681 682 |
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page), compound_nr(page)); |
b46e756f5 thp: extract khug... |
683 684 |
VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(PageLRU(page), page); |
5503fbf2b khugepaged: allow... |
685 686 687 |
if (PageCompound(page)) list_add_tail(&page->lru, compound_pagelist); next: |
0db501f7a mm, thp: convert ... |
688 |
/* There should be enough young pte to collapse the page */ |
b46e756f5 thp: extract khug... |
689 690 691 |
if (pte_young(pteval) || page_is_young(page) || PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, address)) |
0db501f7a mm, thp: convert ... |
692 |
referenced++; |
5503fbf2b khugepaged: allow... |
693 694 695 |
if (pte_write(pteval)) writable = true; |
b46e756f5 thp: extract khug... |
696 697 698 699 700 701 702 703 704 705 706 707 708 |
} if (likely(writable)) { if (likely(referenced)) { result = SCAN_SUCCEED; trace_mm_collapse_huge_page_isolate(page, none_or_zero, referenced, writable, result); return 1; } } else { result = SCAN_PAGE_RO; } out: |
5503fbf2b khugepaged: allow... |
709 |
release_pte_pages(pte, _pte, compound_pagelist); |
b46e756f5 thp: extract khug... |
710 711 712 713 714 715 716 717 |
trace_mm_collapse_huge_page_isolate(page, none_or_zero, referenced, writable, result); return 0; } static void __collapse_huge_page_copy(pte_t *pte, struct page *page, struct vm_area_struct *vma, unsigned long address, |
5503fbf2b khugepaged: allow... |
718 719 |
spinlock_t *ptl, struct list_head *compound_pagelist) |
b46e756f5 thp: extract khug... |
720 |
{ |
5503fbf2b khugepaged: allow... |
721 |
struct page *src_page, *tmp; |
b46e756f5 thp: extract khug... |
722 |
pte_t *_pte; |
338a16ba1 mm, thp: copying ... |
723 724 |
for (_pte = pte; _pte < pte + HPAGE_PMD_NR; _pte++, page++, address += PAGE_SIZE) { |
b46e756f5 thp: extract khug... |
725 |
pte_t pteval = *_pte; |
b46e756f5 thp: extract khug... |
726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 |
if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { clear_user_highpage(page, address); add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); if (is_zero_pfn(pte_pfn(pteval))) { /* * ptl mostly unnecessary. */ spin_lock(ptl); /* * paravirt calls inside pte_clear here are * superfluous. */ pte_clear(vma->vm_mm, address, _pte); spin_unlock(ptl); } } else { src_page = pte_page(pteval); copy_user_highpage(page, src_page, address, vma); |
5503fbf2b khugepaged: allow... |
745 746 |
if (!PageCompound(src_page)) release_pte_page(src_page); |
b46e756f5 thp: extract khug... |
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 |
/* * ptl mostly unnecessary, but preempt has to * be disabled to update the per-cpu stats * inside page_remove_rmap(). */ spin_lock(ptl); /* * paravirt calls inside pte_clear here are * superfluous. */ pte_clear(vma->vm_mm, address, _pte); page_remove_rmap(src_page, false); spin_unlock(ptl); free_page_and_swap_cache(src_page); } |
b46e756f5 thp: extract khug... |
762 |
} |
5503fbf2b khugepaged: allow... |
763 764 765 766 767 |
list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) { list_del(&src_page->lru); release_pte_page(src_page); } |
b46e756f5 thp: extract khug... |
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 |
} static void khugepaged_alloc_sleep(void) { DEFINE_WAIT(wait); add_wait_queue(&khugepaged_wait, &wait); freezable_schedule_timeout_interruptible( msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); remove_wait_queue(&khugepaged_wait, &wait); } static int khugepaged_node_load[MAX_NUMNODES]; static bool khugepaged_scan_abort(int nid) { int i; /* |
a5f5f91da mm: convert zone_... |
787 |
* If node_reclaim_mode is disabled, then no extra effort is made to |
b46e756f5 thp: extract khug... |
788 789 |
* allocate memory locally. */ |
a5f5f91da mm: convert zone_... |
790 |
if (!node_reclaim_mode) |
b46e756f5 thp: extract khug... |
791 792 793 794 795 796 797 798 799 |
return false; /* If there is a count for this node already, it must be acceptable */ if (khugepaged_node_load[nid]) return false; for (i = 0; i < MAX_NUMNODES; i++) { if (!khugepaged_node_load[i]) continue; |
a55c7454a sched/topology: I... |
800 |
if (node_distance(nid, i) > node_reclaim_distance) |
b46e756f5 thp: extract khug... |
801 802 803 804 805 806 807 808 |
return true; } return false; } /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */ static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void) { |
251603549 mm, thp: remove _... |
809 |
return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT; |
b46e756f5 thp: extract khug... |
810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 |
} #ifdef CONFIG_NUMA static int khugepaged_find_target_node(void) { static int last_khugepaged_target_node = NUMA_NO_NODE; int nid, target_node = 0, max_value = 0; /* find first node with max normal pages hit */ for (nid = 0; nid < MAX_NUMNODES; nid++) if (khugepaged_node_load[nid] > max_value) { max_value = khugepaged_node_load[nid]; target_node = nid; } /* do some balance if several nodes have the same hit record */ if (target_node <= last_khugepaged_target_node) for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES; nid++) if (max_value == khugepaged_node_load[nid]) { target_node = nid; break; } last_khugepaged_target_node = target_node; return target_node; } static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) { if (IS_ERR(*hpage)) { if (!*wait) return false; *wait = false; *hpage = NULL; khugepaged_alloc_sleep(); } else if (*hpage) { put_page(*hpage); *hpage = NULL; } return true; } static struct page * |
988ddb710 khugepaged: move ... |
856 |
khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) |
b46e756f5 thp: extract khug... |
857 858 |
{ VM_BUG_ON_PAGE(*hpage, *hpage); |
b46e756f5 thp: extract khug... |
859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 |
*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); if (unlikely(!*hpage)) { count_vm_event(THP_COLLAPSE_ALLOC_FAILED); *hpage = ERR_PTR(-ENOMEM); return NULL; } prep_transhuge_page(*hpage); count_vm_event(THP_COLLAPSE_ALLOC); return *hpage; } #else static int khugepaged_find_target_node(void) { return 0; } static inline struct page *alloc_khugepaged_hugepage(void) { struct page *page; page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(), HPAGE_PMD_ORDER); if (page) prep_transhuge_page(page); return page; } static struct page *khugepaged_alloc_hugepage(bool *wait) { struct page *hpage; do { hpage = alloc_khugepaged_hugepage(); if (!hpage) { count_vm_event(THP_COLLAPSE_ALLOC_FAILED); if (!*wait) return NULL; *wait = false; khugepaged_alloc_sleep(); } else count_vm_event(THP_COLLAPSE_ALLOC); } while (unlikely(!hpage) && likely(khugepaged_enabled())); return hpage; } static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) { |
033b5d775 mm/khugepaged: fi... |
909 910 911 912 913 914 915 916 917 918 919 |
/* * If the hpage allocated earlier was briefly exposed in page cache * before collapse_file() failed, it is possible that racing lookups * have not yet completed, and would then be unpleasantly surprised by * finding the hpage reused for the same mapping at a different offset. * Just release the previous allocation if there is any danger of that. */ if (*hpage && page_count(*hpage) > 1) { put_page(*hpage); *hpage = NULL; } |
b46e756f5 thp: extract khug... |
920 921 922 923 924 925 926 927 928 929 |
if (!*hpage) *hpage = khugepaged_alloc_hugepage(wait); if (unlikely(!*hpage)) return false; return true; } static struct page * |
988ddb710 khugepaged: move ... |
930 |
khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) |
b46e756f5 thp: extract khug... |
931 |
{ |
b46e756f5 thp: extract khug... |
932 933 934 935 936 |
VM_BUG_ON(!*hpage); return *hpage; } #endif |
b46e756f5 thp: extract khug... |
937 |
/* |
c1e8d7c6a mmap locking API:... |
938 939 |
* If mmap_lock temporarily dropped, revalidate vma * before taking mmap_lock. |
b46e756f5 thp: extract khug... |
940 941 942 |
* Return 0 if succeeds, otherwise return none-zero * value (scan code). */ |
c131f751a khugepaged: fix u... |
943 944 |
static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, struct vm_area_struct **vmap) |
b46e756f5 thp: extract khug... |
945 946 947 948 949 950 |
{ struct vm_area_struct *vma; unsigned long hstart, hend; if (unlikely(khugepaged_test_exit(mm))) return SCAN_ANY_PROCESS; |
c131f751a khugepaged: fix u... |
951 |
*vmap = vma = find_vma(mm, address); |
b46e756f5 thp: extract khug... |
952 953 954 955 956 957 958 |
if (!vma) return SCAN_VMA_NULL; hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; if (address < hstart || address + HPAGE_PMD_SIZE > hend) return SCAN_ADDRESS_RANGE; |
50f8b92f2 mm: thp: pass cor... |
959 |
if (!hugepage_vma_check(vma, vma->vm_flags)) |
b46e756f5 thp: extract khug... |
960 |
return SCAN_VMA_CHECK; |
594cced14 khugepaged: fix n... |
961 962 963 |
/* Anon VMA expected */ if (!vma->anon_vma || vma->vm_ops) return SCAN_VMA_CHECK; |
b46e756f5 thp: extract khug... |
964 965 966 967 968 969 970 971 |
return 0; } /* * Bring missing pages in from swap, to complete THP collapse. * Only done if khugepaged_scan_pmd believes it is worthwhile. * * Called and returns without pte mapped or spinlocks held, |
c1e8d7c6a mmap locking API:... |
972 |
* but with mmap_lock held to protect against vma changes. |
b46e756f5 thp: extract khug... |
973 974 975 976 |
*/ static bool __collapse_huge_page_swapin(struct mm_struct *mm, struct vm_area_struct *vma, |
0db501f7a mm, thp: convert ... |
977 978 |
unsigned long address, pmd_t *pmd, int referenced) |
b46e756f5 thp: extract khug... |
979 |
{ |
2b7403035 mm: Change return... |
980 981 |
int swapped_in = 0; vm_fault_t ret = 0; |
82b0f8c39 mm: join struct f... |
982 |
struct vm_fault vmf = { |
b46e756f5 thp: extract khug... |
983 984 985 986 |
.vma = vma, .address = address, .flags = FAULT_FLAG_ALLOW_RETRY, .pmd = pmd, |
0721ec8bc mm: use pgoff in ... |
987 |
.pgoff = linear_page_index(vma, address), |
b46e756f5 thp: extract khug... |
988 |
}; |
82b0f8c39 mm: join struct f... |
989 990 991 |
vmf.pte = pte_offset_map(pmd, address); for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE; vmf.pte++, vmf.address += PAGE_SIZE) { |
2994302bc mm: add orig_pte ... |
992 993 |
vmf.orig_pte = *vmf.pte; if (!is_swap_pte(vmf.orig_pte)) |
b46e756f5 thp: extract khug... |
994 995 |
continue; swapped_in++; |
2994302bc mm: add orig_pte ... |
996 |
ret = do_swap_page(&vmf); |
0db501f7a mm, thp: convert ... |
997 |
|
c1e8d7c6a mmap locking API:... |
998 |
/* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */ |
b46e756f5 thp: extract khug... |
999 |
if (ret & VM_FAULT_RETRY) { |
d8ed45c5d mmap locking API:... |
1000 |
mmap_read_lock(mm); |
82b0f8c39 mm: join struct f... |
1001 |
if (hugepage_vma_revalidate(mm, address, &vmf.vma)) { |
47f863ea2 mm, thp: fix comm... |
1002 |
/* vma is no longer available, don't continue to swapin */ |
0db501f7a mm, thp: convert ... |
1003 |
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); |
b46e756f5 thp: extract khug... |
1004 |
return false; |
47f863ea2 mm, thp: fix comm... |
1005 |
} |
b46e756f5 thp: extract khug... |
1006 |
/* check if the pmd is still valid */ |
835152a25 mm/khugepaged: ad... |
1007 1008 |
if (mm_find_pmd(mm, address) != pmd) { trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); |
b46e756f5 thp: extract khug... |
1009 |
return false; |
835152a25 mm/khugepaged: ad... |
1010 |
} |
b46e756f5 thp: extract khug... |
1011 1012 |
} if (ret & VM_FAULT_ERROR) { |
0db501f7a mm, thp: convert ... |
1013 |
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); |
b46e756f5 thp: extract khug... |
1014 1015 1016 |
return false; } /* pte is unmapped now, we need to map it */ |
82b0f8c39 mm: join struct f... |
1017 |
vmf.pte = pte_offset_map(pmd, vmf.address); |
b46e756f5 thp: extract khug... |
1018 |
} |
82b0f8c39 mm: join struct f... |
1019 1020 |
vmf.pte--; pte_unmap(vmf.pte); |
ae2c5d804 khugepaged: drain... |
1021 1022 1023 1024 |
/* Drain LRU add pagevec to remove extra pin on the swapped in pages */ if (swapped_in) lru_add_drain(); |
0db501f7a mm, thp: convert ... |
1025 |
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1); |
b46e756f5 thp: extract khug... |
1026 1027 1028 1029 1030 1031 |
return true; } static void collapse_huge_page(struct mm_struct *mm, unsigned long address, struct page **hpage, |
ffe945e63 khugepaged: do no... |
1032 |
int node, int referenced, int unmapped) |
b46e756f5 thp: extract khug... |
1033 |
{ |
5503fbf2b khugepaged: allow... |
1034 |
LIST_HEAD(compound_pagelist); |
b46e756f5 thp: extract khug... |
1035 1036 1037 1038 1039 1040 |
pmd_t *pmd, _pmd; pte_t *pte; pgtable_t pgtable; struct page *new_page; spinlock_t *pmd_ptl, *pte_ptl; int isolated = 0, result = 0; |
c131f751a khugepaged: fix u... |
1041 |
struct vm_area_struct *vma; |
ac46d4f3c mm/mmu_notifier: ... |
1042 |
struct mmu_notifier_range range; |
b46e756f5 thp: extract khug... |
1043 1044 1045 1046 1047 |
gfp_t gfp; VM_BUG_ON(address & ~HPAGE_PMD_MASK); /* Only allocate from the target node */ |
41b6167e8 mm: get rid of __... |
1048 |
gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE; |
b46e756f5 thp: extract khug... |
1049 |
|
988ddb710 khugepaged: move ... |
1050 |
/* |
c1e8d7c6a mmap locking API:... |
1051 |
* Before allocating the hugepage, release the mmap_lock read lock. |
988ddb710 khugepaged: move ... |
1052 |
* The allocation can take potentially a long time if it involves |
c1e8d7c6a mmap locking API:... |
1053 |
* sync compaction, and we do not need to hold the mmap_lock during |
988ddb710 khugepaged: move ... |
1054 1055 |
* that. We will recheck the vma after taking it again in write mode. */ |
d8ed45c5d mmap locking API:... |
1056 |
mmap_read_unlock(mm); |
988ddb710 khugepaged: move ... |
1057 |
new_page = khugepaged_alloc_page(hpage, gfp, node); |
b46e756f5 thp: extract khug... |
1058 1059 1060 1061 |
if (!new_page) { result = SCAN_ALLOC_HUGE_PAGE_FAIL; goto out_nolock; } |
d9eb1ea2b mm: memcontrol: d... |
1062 |
if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) { |
b46e756f5 thp: extract khug... |
1063 1064 1065 |
result = SCAN_CGROUP_CHARGE_FAIL; goto out_nolock; } |
9d82c6943 mm: memcontrol: c... |
1066 |
count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC); |
b46e756f5 thp: extract khug... |
1067 |
|
d8ed45c5d mmap locking API:... |
1068 |
mmap_read_lock(mm); |
c131f751a khugepaged: fix u... |
1069 |
result = hugepage_vma_revalidate(mm, address, &vma); |
b46e756f5 thp: extract khug... |
1070 |
if (result) { |
d8ed45c5d mmap locking API:... |
1071 |
mmap_read_unlock(mm); |
b46e756f5 thp: extract khug... |
1072 1073 1074 1075 1076 1077 |
goto out_nolock; } pmd = mm_find_pmd(mm, address); if (!pmd) { result = SCAN_PMD_NULL; |
d8ed45c5d mmap locking API:... |
1078 |
mmap_read_unlock(mm); |
b46e756f5 thp: extract khug... |
1079 1080 1081 1082 |
goto out_nolock; } /* |
c1e8d7c6a mmap locking API:... |
1083 1084 |
* __collapse_huge_page_swapin always returns with mmap_lock locked. * If it fails, we release mmap_lock and jump out_nolock. |
b46e756f5 thp: extract khug... |
1085 1086 |
* Continuing to collapse causes inconsistency. */ |
ffe945e63 khugepaged: do no... |
1087 1088 |
if (unmapped && !__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) { |
d8ed45c5d mmap locking API:... |
1089 |
mmap_read_unlock(mm); |
b46e756f5 thp: extract khug... |
1090 1091 |
goto out_nolock; } |
d8ed45c5d mmap locking API:... |
1092 |
mmap_read_unlock(mm); |
b46e756f5 thp: extract khug... |
1093 1094 1095 1096 1097 |
/* * Prevent all access to pagetables with the exception of * gup_fast later handled by the ptep_clear_flush and the VM * handled by the anon_vma lock + PG_lock. */ |
d8ed45c5d mmap locking API:... |
1098 |
mmap_write_lock(mm); |
c131f751a khugepaged: fix u... |
1099 |
result = hugepage_vma_revalidate(mm, address, &vma); |
b46e756f5 thp: extract khug... |
1100 1101 1102 1103 1104 1105 1106 |
if (result) goto out; /* check if the pmd is still valid */ if (mm_find_pmd(mm, address) != pmd) goto out; anon_vma_lock_write(vma->anon_vma); |
7269f9999 mm/mmu_notifier: ... |
1107 |
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, |
6f4f13e8d mm/mmu_notifier: ... |
1108 |
address, address + HPAGE_PMD_SIZE); |
ac46d4f3c mm/mmu_notifier: ... |
1109 |
mmu_notifier_invalidate_range_start(&range); |
ec649c9d4 mm/khugepaged: fi... |
1110 1111 1112 |
pte = pte_offset_map(pmd, address); pte_ptl = pte_lockptr(mm, pmd); |
b46e756f5 thp: extract khug... |
1113 1114 1115 1116 1117 1118 1119 1120 1121 |
pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ /* * After this gup_fast can't run anymore. This also removes * any huge TLB entry from the CPU so we won't allow * huge and small TLB entries for the same virtual address * to avoid the risk of CPU bugs in that area. */ _pmd = pmdp_collapse_flush(vma, address, pmd); spin_unlock(pmd_ptl); |
ac46d4f3c mm/mmu_notifier: ... |
1122 |
mmu_notifier_invalidate_range_end(&range); |
b46e756f5 thp: extract khug... |
1123 1124 |
spin_lock(pte_ptl); |
5503fbf2b khugepaged: allow... |
1125 1126 |
isolated = __collapse_huge_page_isolate(vma, address, pte, &compound_pagelist); |
b46e756f5 thp: extract khug... |
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 |
spin_unlock(pte_ptl); if (unlikely(!isolated)) { pte_unmap(pte); spin_lock(pmd_ptl); BUG_ON(!pmd_none(*pmd)); /* * We can only use set_pmd_at when establishing * hugepmds and never for establishing regular pmds that * points to regular pagetables. Use pmd_populate for that */ pmd_populate(mm, pmd, pmd_pgtable(_pmd)); spin_unlock(pmd_ptl); anon_vma_unlock_write(vma->anon_vma); result = SCAN_FAIL; goto out; } /* * All pages are isolated and locked so anon_vma rmap * can't run anymore. */ anon_vma_unlock_write(vma->anon_vma); |
5503fbf2b khugepaged: allow... |
1150 1151 |
__collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl, &compound_pagelist); |
b46e756f5 thp: extract khug... |
1152 1153 1154 1155 1156 |
pte_unmap(pte); __SetPageUptodate(new_page); pgtable = pmd_pgtable(_pmd); _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); |
f55e1014f Revert "mm, thp: ... |
1157 |
_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); |
b46e756f5 thp: extract khug... |
1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 |
/* * spin_lock() below is not the equivalent of smp_wmb(), so * this is needed to avoid the copy_huge_page writes to become * visible after the set_pmd_at() write. */ smp_wmb(); spin_lock(pmd_ptl); BUG_ON(!pmd_none(*pmd)); |
be5d0a74c mm: memcontrol: s... |
1168 |
page_add_new_anon_rmap(new_page, vma, address, true); |
b518154e5 mm/vmscan: protec... |
1169 |
lru_cache_add_inactive_or_unevictable(new_page, vma); |
b46e756f5 thp: extract khug... |
1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 |
pgtable_trans_huge_deposit(mm, pmd, pgtable); set_pmd_at(mm, address, pmd, _pmd); update_mmu_cache_pmd(vma, address, pmd); spin_unlock(pmd_ptl); *hpage = NULL; khugepaged_pages_collapsed++; result = SCAN_SUCCEED; out_up_write: |
d8ed45c5d mmap locking API:... |
1180 |
mmap_write_unlock(mm); |
b46e756f5 thp: extract khug... |
1181 |
out_nolock: |
9d82c6943 mm: memcontrol: c... |
1182 1183 |
if (!IS_ERR_OR_NULL(*hpage)) mem_cgroup_uncharge(*hpage); |
b46e756f5 thp: extract khug... |
1184 1185 1186 |
trace_mm_collapse_huge_page(mm, isolated, result); return; out: |
b46e756f5 thp: extract khug... |
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 |
goto out_up_write; } static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, struct page **hpage) { pmd_t *pmd; pte_t *pte, *_pte; |
71a2c112a khugepaged: intro... |
1197 1198 |
int ret = 0, result = 0, referenced = 0; int none_or_zero = 0, shared = 0; |
b46e756f5 thp: extract khug... |
1199 1200 1201 1202 |
struct page *page = NULL; unsigned long _address; spinlock_t *ptl; int node = NUMA_NO_NODE, unmapped = 0; |
0db501f7a mm, thp: convert ... |
1203 |
bool writable = false; |
b46e756f5 thp: extract khug... |
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 |
VM_BUG_ON(address & ~HPAGE_PMD_MASK); pmd = mm_find_pmd(mm, address); if (!pmd) { result = SCAN_PMD_NULL; goto out; } memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); pte = pte_offset_map_lock(mm, pmd, address, &ptl); for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++, _address += PAGE_SIZE) { pte_t pteval = *_pte; if (is_swap_pte(pteval)) { if (++unmapped <= khugepaged_max_ptes_swap) { |
e1e267c79 khugepaged: skip ... |
1220 1221 1222 1223 1224 1225 1226 1227 1228 |
/* * Always be strict with uffd-wp * enabled swap entries. Please see * comment below for pte_uffd_wp(). */ if (pte_swp_uffd_wp(pteval)) { result = SCAN_PTE_UFFD_WP; goto out_unmap; } |
b46e756f5 thp: extract khug... |
1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 |
continue; } else { result = SCAN_EXCEED_SWAP_PTE; goto out_unmap; } } if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { if (!userfaultfd_armed(vma) && ++none_or_zero <= khugepaged_max_ptes_none) { continue; } else { result = SCAN_EXCEED_NONE_PTE; goto out_unmap; } } if (!pte_present(pteval)) { result = SCAN_PTE_NON_PRESENT; goto out_unmap; } |
e1e267c79 khugepaged: skip ... |
1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 |
if (pte_uffd_wp(pteval)) { /* * Don't collapse the page if any of the small * PTEs are armed with uffd write protection. * Here we can also mark the new huge pmd as * write protected if any of the small ones is * marked but that could bring uknown * userfault messages that falls outside of * the registered range. So, just be simple. */ result = SCAN_PTE_UFFD_WP; goto out_unmap; } |
b46e756f5 thp: extract khug... |
1261 1262 1263 1264 1265 1266 1267 1268 |
if (pte_write(pteval)) writable = true; page = vm_normal_page(vma, _address, pteval); if (unlikely(!page)) { result = SCAN_PAGE_NULL; goto out_unmap; } |
71a2c112a khugepaged: intro... |
1269 1270 1271 1272 1273 |
if (page_mapcount(page) > 1 && ++shared > khugepaged_max_ptes_shared) { result = SCAN_EXCEED_SHARED_PTE; goto out_unmap; } |
5503fbf2b khugepaged: allow... |
1274 |
page = compound_head(page); |
b46e756f5 thp: extract khug... |
1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 |
/* * Record which node the original page is from and save this * information to khugepaged_node_load[]. * Khupaged will allocate hugepage from the node has the max * hit record. */ node = page_to_nid(page); if (khugepaged_scan_abort(node)) { result = SCAN_SCAN_ABORT; goto out_unmap; } khugepaged_node_load[node]++; if (!PageLRU(page)) { result = SCAN_PAGE_LRU; goto out_unmap; } if (PageLocked(page)) { result = SCAN_PAGE_LOCK; goto out_unmap; } if (!PageAnon(page)) { result = SCAN_PAGE_ANON; goto out_unmap; } /* |
9445689f3 khugepaged: allow... |
1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 |
* Check if the page has any GUP (or other external) pins. * * Here the check is racy it may see totmal_mapcount > refcount * in some cases. * For example, one process with one forked child process. * The parent has the PMD split due to MADV_DONTNEED, then * the child is trying unmap the whole PMD, but khugepaged * may be scanning the parent between the child has * PageDoubleMap flag cleared and dec the mapcount. So * khugepaged may see total_mapcount > refcount. * * But such case is ephemeral we could always retry collapse * later. However it may report false positive if the page * has excessive GUP pins (i.e. 512). Anyway the same check * will be done again later the risk seems low. |
b46e756f5 thp: extract khug... |
1317 |
*/ |
9445689f3 khugepaged: allow... |
1318 |
if (!is_refcount_suitable(page)) { |
b46e756f5 thp: extract khug... |
1319 1320 1321 1322 1323 1324 |
result = SCAN_PAGE_COUNT; goto out_unmap; } if (pte_young(pteval) || page_is_young(page) || PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, address)) |
0db501f7a mm, thp: convert ... |
1325 |
referenced++; |
b46e756f5 thp: extract khug... |
1326 |
} |
ffe945e63 khugepaged: do no... |
1327 |
if (!writable) { |
b46e756f5 thp: extract khug... |
1328 |
result = SCAN_PAGE_RO; |
ffe945e63 khugepaged: do no... |
1329 1330 1331 1332 1333 |
} else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) { result = SCAN_LACK_REFERENCED_PAGE; } else { result = SCAN_SUCCEED; ret = 1; |
b46e756f5 thp: extract khug... |
1334 1335 1336 1337 1338 |
} out_unmap: pte_unmap_unlock(pte, ptl); if (ret) { node = khugepaged_find_target_node(); |
c1e8d7c6a mmap locking API:... |
1339 |
/* collapse_huge_page will return with the mmap_lock released */ |
ffe945e63 khugepaged: do no... |
1340 1341 |
collapse_huge_page(mm, address, hpage, node, referenced, unmapped); |
b46e756f5 thp: extract khug... |
1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 |
} out: trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, none_or_zero, result, unmapped); return ret; } static void collect_mm_slot(struct mm_slot *mm_slot) { struct mm_struct *mm = mm_slot->mm; |
35f3aa39f mm: Replace spin_... |
1352 |
lockdep_assert_held(&khugepaged_mm_lock); |
b46e756f5 thp: extract khug... |
1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 |
if (khugepaged_test_exit(mm)) { /* free mm_slot */ hash_del(&mm_slot->hash); list_del(&mm_slot->mm_node); /* * Not strictly needed because the mm exited already. * * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); */ /* khugepaged_mm_lock actually not necessary for the below */ free_mm_slot(mm_slot); mmdrop(mm); } } |
396bcc529 mm: remove CONFIG... |
1370 |
#ifdef CONFIG_SHMEM |
27e1f8273 khugepaged: enabl... |
1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 |
/* * Notify khugepaged that given addr of the mm is pte-mapped THP. Then * khugepaged should try to collapse the page table. */ static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) { struct mm_slot *mm_slot; VM_BUG_ON(addr & ~HPAGE_PMD_MASK); spin_lock(&khugepaged_mm_lock); mm_slot = get_mm_slot(mm); if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr; spin_unlock(&khugepaged_mm_lock); return 0; } /** * Try to collapse a pte-mapped THP for mm at address haddr. * * This function checks whether all the PTEs in the PMD are pointing to the * right THP. If so, retract the page table so the THP can refault in with * as pmd-mapped. */ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) { unsigned long haddr = addr & HPAGE_PMD_MASK; struct vm_area_struct *vma = find_vma(mm, haddr); |
119a5fc16 khugepaged: colla... |
1401 |
struct page *hpage; |
27e1f8273 khugepaged: enabl... |
1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 |
pte_t *start_pte, *pte; pmd_t *pmd, _pmd; spinlock_t *ptl; int count = 0; int i; if (!vma || !vma->vm_file || vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE) return; /* * This vm_flags may not have VM_HUGEPAGE if the page was not * collapsed by this mm. But we can still collapse if the page is * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check() * will not fail the vma for missing VM_HUGEPAGE */ if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE)) return; |
119a5fc16 khugepaged: colla... |
1420 1421 1422 1423 1424 1425 1426 |
hpage = find_lock_page(vma->vm_file->f_mapping, linear_page_index(vma, haddr)); if (!hpage) return; if (!PageHead(hpage)) goto drop_hpage; |
27e1f8273 khugepaged: enabl... |
1427 1428 |
pmd = mm_find_pmd(mm, haddr); if (!pmd) |
119a5fc16 khugepaged: colla... |
1429 |
goto drop_hpage; |
27e1f8273 khugepaged: enabl... |
1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 |
start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); /* step 1: check all mapped PTEs are to the right huge page */ for (i = 0, addr = haddr, pte = start_pte; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) { struct page *page; /* empty pte, skip */ if (pte_none(*pte)) continue; /* page swapped out, abort */ if (!pte_present(*pte)) goto abort; page = vm_normal_page(vma, addr, *pte); |
27e1f8273 khugepaged: enabl... |
1447 |
/* |
119a5fc16 khugepaged: colla... |
1448 1449 |
* Note that uprobe, debugger, or MAP_PRIVATE may change the * page table, but the new page will not be a subpage of hpage. |
27e1f8273 khugepaged: enabl... |
1450 |
*/ |
119a5fc16 khugepaged: colla... |
1451 |
if (hpage + i != page) |
27e1f8273 khugepaged: enabl... |
1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 |
goto abort; count++; } /* step 2: adjust rmap */ for (i = 0, addr = haddr, pte = start_pte; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) { struct page *page; if (pte_none(*pte)) continue; page = vm_normal_page(vma, addr, *pte); page_remove_rmap(page, false); } pte_unmap_unlock(start_pte, ptl); /* step 3: set proper refcount and mm_counters. */ |
119a5fc16 khugepaged: colla... |
1470 |
if (count) { |
27e1f8273 khugepaged: enabl... |
1471 1472 1473 1474 1475 1476 |
page_ref_sub(hpage, count); add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count); } /* step 4: collapse pmd */ ptl = pmd_lock(vma->vm_mm, pmd); |
723a80daf khugepaged: colla... |
1477 |
_pmd = pmdp_collapse_flush(vma, haddr, pmd); |
27e1f8273 khugepaged: enabl... |
1478 1479 1480 |
spin_unlock(ptl); mm_dec_nr_ptes(mm); pte_free(mm, pmd_pgtable(_pmd)); |
119a5fc16 khugepaged: colla... |
1481 1482 1483 1484 |
drop_hpage: unlock_page(hpage); put_page(hpage); |
27e1f8273 khugepaged: enabl... |
1485 1486 1487 1488 |
return; abort: pte_unmap_unlock(start_pte, ptl); |
119a5fc16 khugepaged: colla... |
1489 |
goto drop_hpage; |
27e1f8273 khugepaged: enabl... |
1490 1491 1492 1493 1494 1495 1496 1497 1498 |
} static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot) { struct mm_struct *mm = mm_slot->mm; int i; if (likely(mm_slot->nr_pte_mapped_thp == 0)) return 0; |
d8ed45c5d mmap locking API:... |
1499 |
if (!mmap_write_trylock(mm)) |
27e1f8273 khugepaged: enabl... |
1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 |
return -EBUSY; if (unlikely(khugepaged_test_exit(mm))) goto out; for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++) collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]); out: mm_slot->nr_pte_mapped_thp = 0; |
d8ed45c5d mmap locking API:... |
1510 |
mmap_write_unlock(mm); |
27e1f8273 khugepaged: enabl... |
1511 1512 |
return 0; } |
f3f0e1d21 khugepaged: add s... |
1513 1514 1515 |
static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) { struct vm_area_struct *vma; |
18e77600f khugepaged: retra... |
1516 |
struct mm_struct *mm; |
f3f0e1d21 khugepaged: add s... |
1517 1518 1519 1520 1521 |
unsigned long addr; pmd_t *pmd, _pmd; i_mmap_lock_write(mapping); vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { |
27e1f8273 khugepaged: enabl... |
1522 1523 1524 |
/* * Check vma->anon_vma to exclude MAP_PRIVATE mappings that * got written to. These VMAs are likely not worth investing |
3e4e28c5a mmap locking API:... |
1525 |
* mmap_write_lock(mm) as PMD-mapping is likely to be split |
27e1f8273 khugepaged: enabl... |
1526 1527 1528 |
* later. * * Not that vma->anon_vma check is racy: it can be set up after |
c1e8d7c6a mmap locking API:... |
1529 |
* the check but before we took mmap_lock by the fault path. |
27e1f8273 khugepaged: enabl... |
1530 1531 1532 1533 1534 1535 1536 1537 |
* But page lock would prevent establishing any new ptes of the * page, so we are safe. * * An alternative would be drop the check, but check that page * table is clear before calling pmdp_collapse_flush() under * ptl. It has higher chance to recover THP for the VMA, but * has higher cost too. */ |
f3f0e1d21 khugepaged: add s... |
1538 1539 1540 1541 1542 1543 1544 |
if (vma->anon_vma) continue; addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); if (addr & ~HPAGE_PMD_MASK) continue; if (vma->vm_end < addr + HPAGE_PMD_SIZE) continue; |
18e77600f khugepaged: retra... |
1545 1546 |
mm = vma->vm_mm; pmd = mm_find_pmd(mm, addr); |
f3f0e1d21 khugepaged: add s... |
1547 1548 1549 |
if (!pmd) continue; /* |
c1e8d7c6a mmap locking API:... |
1550 |
* We need exclusive mmap_lock to retract page table. |
27e1f8273 khugepaged: enabl... |
1551 1552 |
* * We use trylock due to lock inversion: we need to acquire |
c1e8d7c6a mmap locking API:... |
1553 |
* mmap_lock while holding page lock. Fault path does it in |
27e1f8273 khugepaged: enabl... |
1554 |
* reverse order. Trylock is a way to avoid deadlock. |
f3f0e1d21 khugepaged: add s... |
1555 |
*/ |
18e77600f khugepaged: retra... |
1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 |
if (mmap_write_trylock(mm)) { if (!khugepaged_test_exit(mm)) { spinlock_t *ptl = pmd_lock(mm, pmd); /* assume page table is clear */ _pmd = pmdp_collapse_flush(vma, addr, pmd); spin_unlock(ptl); mm_dec_nr_ptes(mm); pte_free(mm, pmd_pgtable(_pmd)); } mmap_write_unlock(mm); |
27e1f8273 khugepaged: enabl... |
1566 1567 |
} else { /* Try again later */ |
18e77600f khugepaged: retra... |
1568 |
khugepaged_add_pte_mapped_thp(mm, addr); |
f3f0e1d21 khugepaged: add s... |
1569 1570 1571 1572 1573 1574 |
} } i_mmap_unlock_write(mapping); } /** |
99cb0dbd4 mm,thp: add read-... |
1575 |
* collapse_file - collapse filemap/tmpfs/shmem pages into huge one. |
f3f0e1d21 khugepaged: add s... |
1576 1577 |
* * Basic scheme is simple, details are more complex: |
87c460a0b mm/khugepaged: co... |
1578 |
* - allocate and lock a new huge page; |
77da9389b mm: Convert colla... |
1579 |
* - scan page cache replacing old pages with the new one |
99cb0dbd4 mm,thp: add read-... |
1580 |
* + swap/gup in pages if necessary; |
f3f0e1d21 khugepaged: add s... |
1581 |
* + fill in gaps; |
77da9389b mm: Convert colla... |
1582 1583 |
* + keep old pages around in case rollback is required; * - if replacing succeeds: |
f3f0e1d21 khugepaged: add s... |
1584 1585 |
* + copy data over; * + free old pages; |
87c460a0b mm/khugepaged: co... |
1586 |
* + unlock huge page; |
f3f0e1d21 khugepaged: add s... |
1587 1588 |
* - if replacing failed; * + put all pages back and unfreeze them; |
77da9389b mm: Convert colla... |
1589 |
* + restore gaps in the page cache; |
87c460a0b mm/khugepaged: co... |
1590 |
* + unlock and free huge page; |
f3f0e1d21 khugepaged: add s... |
1591 |
*/ |
579c571e2 khugepaged: renam... |
1592 1593 |
static void collapse_file(struct mm_struct *mm, struct file *file, pgoff_t start, |
f3f0e1d21 khugepaged: add s... |
1594 1595 |
struct page **hpage, int node) { |
579c571e2 khugepaged: renam... |
1596 |
struct address_space *mapping = file->f_mapping; |
f3f0e1d21 khugepaged: add s... |
1597 |
gfp_t gfp; |
77da9389b mm: Convert colla... |
1598 |
struct page *new_page; |
f3f0e1d21 khugepaged: add s... |
1599 1600 |
pgoff_t index, end = start + HPAGE_PMD_NR; LIST_HEAD(pagelist); |
77da9389b mm: Convert colla... |
1601 |
XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); |
f3f0e1d21 khugepaged: add s... |
1602 |
int nr_none = 0, result = SCAN_SUCCEED; |
99cb0dbd4 mm,thp: add read-... |
1603 |
bool is_shmem = shmem_file(file); |
f3f0e1d21 khugepaged: add s... |
1604 |
|
99cb0dbd4 mm,thp: add read-... |
1605 |
VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem); |
f3f0e1d21 khugepaged: add s... |
1606 1607 1608 |
VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); /* Only allocate from the target node */ |
41b6167e8 mm: get rid of __... |
1609 |
gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE; |
f3f0e1d21 khugepaged: add s... |
1610 1611 1612 1613 1614 1615 |
new_page = khugepaged_alloc_page(hpage, gfp, node); if (!new_page) { result = SCAN_ALLOC_HUGE_PAGE_FAIL; goto out; } |
d9eb1ea2b mm: memcontrol: d... |
1616 |
if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) { |
f3f0e1d21 khugepaged: add s... |
1617 1618 1619 |
result = SCAN_CGROUP_CHARGE_FAIL; goto out; } |
9d82c6943 mm: memcontrol: c... |
1620 |
count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC); |
f3f0e1d21 khugepaged: add s... |
1621 |
|
95feeabb7 mm/khugepaged: fi... |
1622 1623 1624 1625 1626 1627 1628 1629 |
/* This will be less messy when we use multi-index entries */ do { xas_lock_irq(&xas); xas_create_range(&xas); if (!xas_error(&xas)) break; xas_unlock_irq(&xas); if (!xas_nomem(&xas, GFP_KERNEL)) { |
95feeabb7 mm/khugepaged: fi... |
1630 1631 1632 1633 |
result = SCAN_FAIL; goto out; } } while (1); |
042a30824 mm/khugepaged: mi... |
1634 |
__SetPageLocked(new_page); |
99cb0dbd4 mm,thp: add read-... |
1635 1636 |
if (is_shmem) __SetPageSwapBacked(new_page); |
f3f0e1d21 khugepaged: add s... |
1637 1638 |
new_page->index = start; new_page->mapping = mapping; |
f3f0e1d21 khugepaged: add s... |
1639 |
|
f3f0e1d21 khugepaged: add s... |
1640 |
/* |
87c460a0b mm/khugepaged: co... |
1641 1642 1643 |
* At this point the new_page is locked and not up-to-date. * It's safe to insert it into the page cache, because nobody would * be able to map it or use it in another way until we unlock it. |
f3f0e1d21 khugepaged: add s... |
1644 |
*/ |
77da9389b mm: Convert colla... |
1645 1646 1647 1648 1649 |
xas_set(&xas, start); for (index = start; index < end; index++) { struct page *page = xas_next(&xas); VM_BUG_ON(index != xas.xa_index); |
99cb0dbd4 mm,thp: add read-... |
1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 |
if (is_shmem) { if (!page) { /* * Stop if extent has been truncated or * hole-punched, and is now completely * empty. */ if (index == start) { if (!xas_next_entry(&xas, end - 1)) { result = SCAN_TRUNCATED; goto xa_locked; } xas_set(&xas, index); } if (!shmem_charge(mapping->host, 1)) { result = SCAN_FAIL; |
042a30824 mm/khugepaged: mi... |
1666 |
goto xa_locked; |
701270fa1 mm/khugepaged: co... |
1667 |
} |
99cb0dbd4 mm,thp: add read-... |
1668 1669 1670 |
xas_store(&xas, new_page); nr_none++; continue; |
701270fa1 mm/khugepaged: co... |
1671 |
} |
99cb0dbd4 mm,thp: add read-... |
1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 |
if (xa_is_value(page) || !PageUptodate(page)) { xas_unlock_irq(&xas); /* swap in or instantiate fallocated page */ if (shmem_getpage(mapping->host, index, &page, SGP_NOHUGE)) { result = SCAN_FAIL; goto xa_unlocked; } } else if (trylock_page(page)) { get_page(page); xas_unlock_irq(&xas); } else { result = SCAN_PAGE_LOCK; |
042a30824 mm/khugepaged: mi... |
1686 |
goto xa_locked; |
77da9389b mm: Convert colla... |
1687 |
} |
99cb0dbd4 mm,thp: add read-... |
1688 1689 1690 1691 1692 |
} else { /* !is_shmem */ if (!page || xa_is_value(page)) { xas_unlock_irq(&xas); page_cache_sync_readahead(mapping, &file->f_ra, file, index, |
e5a59d308 mm/khugepaged.c: ... |
1693 |
end - index); |
99cb0dbd4 mm,thp: add read-... |
1694 1695 1696 1697 1698 1699 1700 |
/* drain pagevecs to help isolate_lru_page() */ lru_add_drain(); page = find_lock_page(mapping, index); if (unlikely(page == NULL)) { result = SCAN_FAIL; goto xa_unlocked; } |
75f360696 mm/thp: flush fil... |
1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 |
} else if (PageDirty(page)) { /* * khugepaged only works on read-only fd, * so this page is dirty because it hasn't * been flushed since first write. There * won't be new dirty pages. * * Trigger async flush here and hope the * writeback is done when khugepaged * revisits this page. * * This is a one-off situation. We are not * forcing writeback in loop. */ xas_unlock_irq(&xas); filemap_flush(mapping); result = SCAN_FAIL; goto xa_unlocked; |
99cb0dbd4 mm,thp: add read-... |
1719 1720 1721 1722 1723 1724 |
} else if (trylock_page(page)) { get_page(page); xas_unlock_irq(&xas); } else { result = SCAN_PAGE_LOCK; goto xa_locked; |
f3f0e1d21 khugepaged: add s... |
1725 |
} |
f3f0e1d21 khugepaged: add s... |
1726 1727 1728 |
} /* |
b93b01631 page cache: use x... |
1729 |
* The page must be locked, so we can drop the i_pages lock |
f3f0e1d21 khugepaged: add s... |
1730 1731 1732 |
* without racing with truncate. */ VM_BUG_ON_PAGE(!PageLocked(page), page); |
4655e5e5f mm,thp: recheck e... |
1733 1734 1735 1736 1737 1738 |
/* make sure the page is up to date */ if (unlikely(!PageUptodate(page))) { result = SCAN_FAIL; goto out_unlock; } |
06a5e1268 mm/khugepaged: co... |
1739 1740 1741 1742 1743 1744 1745 1746 1747 |
/* * If file was truncated then extended, or hole-punched, before * we locked the first page, then a THP might be there already. */ if (PageTransCompound(page)) { result = SCAN_PAGE_COMPOUND; goto out_unlock; } |
f3f0e1d21 khugepaged: add s... |
1748 1749 1750 1751 1752 |
if (page_mapping(page) != mapping) { result = SCAN_TRUNCATED; goto out_unlock; } |
f3f0e1d21 khugepaged: add s... |
1753 |
|
4655e5e5f mm,thp: recheck e... |
1754 1755 1756 1757 1758 1759 1760 1761 1762 |
if (!is_shmem && PageDirty(page)) { /* * khugepaged only works on read-only fd, so this * page is dirty because it hasn't been flushed * since first write. */ result = SCAN_FAIL; goto out_unlock; } |
f3f0e1d21 khugepaged: add s... |
1763 1764 |
if (isolate_lru_page(page)) { result = SCAN_DEL_PAGE_LRU; |
042a30824 mm/khugepaged: mi... |
1765 |
goto out_unlock; |
f3f0e1d21 khugepaged: add s... |
1766 |
} |
99cb0dbd4 mm,thp: add read-... |
1767 1768 1769 |
if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) { result = SCAN_PAGE_HAS_PRIVATE; |
2f33a7060 mm,thp: stop leak... |
1770 |
putback_lru_page(page); |
99cb0dbd4 mm,thp: add read-... |
1771 1772 |
goto out_unlock; } |
f3f0e1d21 khugepaged: add s... |
1773 |
if (page_mapped(page)) |
977fbdcd5 mm: add unmap_map... |
1774 |
unmap_mapping_pages(mapping, index, 1, false); |
f3f0e1d21 khugepaged: add s... |
1775 |
|
77da9389b mm: Convert colla... |
1776 1777 |
xas_lock_irq(&xas); xas_set(&xas, index); |
f3f0e1d21 khugepaged: add s... |
1778 |
|
77da9389b mm: Convert colla... |
1779 |
VM_BUG_ON_PAGE(page != xas_load(&xas), page); |
f3f0e1d21 khugepaged: add s... |
1780 1781 1782 1783 1784 |
VM_BUG_ON_PAGE(page_mapped(page), page); /* * The page is expected to have page_count() == 3: * - we hold a pin on it; |
77da9389b mm: Convert colla... |
1785 |
* - one reference from page cache; |
f3f0e1d21 khugepaged: add s... |
1786 1787 1788 1789 |
* - one from isolate_lru_page; */ if (!page_ref_freeze(page, 3)) { result = SCAN_PAGE_COUNT; |
042a30824 mm/khugepaged: mi... |
1790 1791 1792 |
xas_unlock_irq(&xas); putback_lru_page(page); goto out_unlock; |
f3f0e1d21 khugepaged: add s... |
1793 1794 1795 1796 1797 1798 1799 1800 1801 |
} /* * Add the page to the list to be able to undo the collapse if * something go wrong. */ list_add_tail(&page->lru, &pagelist); /* Finally, replace with the new page. */ |
4101196b1 mm: page cache: s... |
1802 |
xas_store(&xas, new_page); |
f3f0e1d21 khugepaged: add s... |
1803 |
continue; |
f3f0e1d21 khugepaged: add s... |
1804 1805 1806 |
out_unlock: unlock_page(page); put_page(page); |
042a30824 mm/khugepaged: mi... |
1807 |
goto xa_unlocked; |
f3f0e1d21 khugepaged: add s... |
1808 |
} |
99cb0dbd4 mm,thp: add read-... |
1809 1810 |
if (is_shmem) __inc_node_page_state(new_page, NR_SHMEM_THPS); |
09d91cda0 mm,thp: avoid wri... |
1811 |
else { |
99cb0dbd4 mm,thp: add read-... |
1812 |
__inc_node_page_state(new_page, NR_FILE_THPS); |
09d91cda0 mm,thp: avoid wri... |
1813 1814 |
filemap_nr_thps_inc(mapping); } |
99cb0dbd4 mm,thp: add read-... |
1815 |
|
042a30824 mm/khugepaged: mi... |
1816 |
if (nr_none) { |
9d82c6943 mm: memcontrol: c... |
1817 |
__mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none); |
99cb0dbd4 mm,thp: add read-... |
1818 |
if (is_shmem) |
9d82c6943 mm: memcontrol: c... |
1819 |
__mod_lruvec_page_state(new_page, NR_SHMEM, nr_none); |
042a30824 mm/khugepaged: mi... |
1820 1821 1822 1823 |
} xa_locked: xas_unlock_irq(&xas); |
77da9389b mm: Convert colla... |
1824 |
xa_unlocked: |
042a30824 mm/khugepaged: mi... |
1825 |
|
f3f0e1d21 khugepaged: add s... |
1826 |
if (result == SCAN_SUCCEED) { |
77da9389b mm: Convert colla... |
1827 |
struct page *page, *tmp; |
f3f0e1d21 khugepaged: add s... |
1828 1829 |
/* |
77da9389b mm: Convert colla... |
1830 1831 |
* Replacing old pages with new one has succeeded, now we * need to copy the content and free the old pages. |
f3f0e1d21 khugepaged: add s... |
1832 |
*/ |
2af8ff291 mm/khugepaged: co... |
1833 |
index = start; |
f3f0e1d21 khugepaged: add s... |
1834 |
list_for_each_entry_safe(page, tmp, &pagelist, lru) { |
2af8ff291 mm/khugepaged: co... |
1835 1836 1837 1838 |
while (index < page->index) { clear_highpage(new_page + (index % HPAGE_PMD_NR)); index++; } |
f3f0e1d21 khugepaged: add s... |
1839 1840 1841 |
copy_highpage(new_page + (page->index % HPAGE_PMD_NR), page); list_del(&page->lru); |
f3f0e1d21 khugepaged: add s... |
1842 |
page->mapping = NULL; |
042a30824 mm/khugepaged: mi... |
1843 |
page_ref_unfreeze(page, 1); |
f3f0e1d21 khugepaged: add s... |
1844 1845 |
ClearPageActive(page); ClearPageUnevictable(page); |
042a30824 mm/khugepaged: mi... |
1846 |
unlock_page(page); |
f3f0e1d21 khugepaged: add s... |
1847 |
put_page(page); |
2af8ff291 mm/khugepaged: co... |
1848 1849 1850 1851 1852 |
index++; } while (index < end) { clear_highpage(new_page + (index % HPAGE_PMD_NR)); index++; |
f3f0e1d21 khugepaged: add s... |
1853 |
} |
f3f0e1d21 khugepaged: add s... |
1854 |
SetPageUptodate(new_page); |
87c460a0b mm/khugepaged: co... |
1855 |
page_ref_add(new_page, HPAGE_PMD_NR - 1); |
6058eaec8 mm: fold and remo... |
1856 |
if (is_shmem) |
99cb0dbd4 mm,thp: add read-... |
1857 |
set_page_dirty(new_page); |
6058eaec8 mm: fold and remo... |
1858 |
lru_cache_add(new_page); |
f3f0e1d21 khugepaged: add s... |
1859 |
|
042a30824 mm/khugepaged: mi... |
1860 1861 1862 1863 |
/* * Remove pte page tables, so we can re-fault the page as huge. */ retract_page_tables(mapping, start); |
f3f0e1d21 khugepaged: add s... |
1864 |
*hpage = NULL; |
87aa75290 mm: thp: inc coun... |
1865 1866 |
khugepaged_pages_collapsed++; |
f3f0e1d21 khugepaged: add s... |
1867 |
} else { |
77da9389b mm: Convert colla... |
1868 |
struct page *page; |
aaa52e340 mm/khugepaged: fi... |
1869 |
|
77da9389b mm: Convert colla... |
1870 |
/* Something went wrong: roll back page cache changes */ |
77da9389b mm: Convert colla... |
1871 |
xas_lock_irq(&xas); |
aaa52e340 mm/khugepaged: fi... |
1872 |
mapping->nrpages -= nr_none; |
99cb0dbd4 mm,thp: add read-... |
1873 1874 1875 |
if (is_shmem) shmem_uncharge(mapping->host, nr_none); |
aaa52e340 mm/khugepaged: fi... |
1876 |
|
77da9389b mm: Convert colla... |
1877 1878 |
xas_set(&xas, start); xas_for_each(&xas, page, end - 1) { |
f3f0e1d21 khugepaged: add s... |
1879 1880 |
page = list_first_entry_or_null(&pagelist, struct page, lru); |
77da9389b mm: Convert colla... |
1881 |
if (!page || xas.xa_index < page->index) { |
f3f0e1d21 khugepaged: add s... |
1882 1883 |
if (!nr_none) break; |
f3f0e1d21 khugepaged: add s... |
1884 |
nr_none--; |
59749e6ce mm: khugepaged: f... |
1885 |
/* Put holes back where they were */ |
77da9389b mm: Convert colla... |
1886 |
xas_store(&xas, NULL); |
f3f0e1d21 khugepaged: add s... |
1887 1888 |
continue; } |
77da9389b mm: Convert colla... |
1889 |
VM_BUG_ON_PAGE(page->index != xas.xa_index, page); |
f3f0e1d21 khugepaged: add s... |
1890 1891 1892 1893 |
/* Unfreeze the page. */ list_del(&page->lru); page_ref_unfreeze(page, 2); |
77da9389b mm: Convert colla... |
1894 1895 1896 |
xas_store(&xas, page); xas_pause(&xas); xas_unlock_irq(&xas); |
f3f0e1d21 khugepaged: add s... |
1897 |
unlock_page(page); |
042a30824 mm/khugepaged: mi... |
1898 |
putback_lru_page(page); |
77da9389b mm: Convert colla... |
1899 |
xas_lock_irq(&xas); |
f3f0e1d21 khugepaged: add s... |
1900 1901 |
} VM_BUG_ON(nr_none); |
77da9389b mm: Convert colla... |
1902 |
xas_unlock_irq(&xas); |
f3f0e1d21 khugepaged: add s... |
1903 |
|
f3f0e1d21 khugepaged: add s... |
1904 1905 |
new_page->mapping = NULL; } |
042a30824 mm/khugepaged: mi... |
1906 1907 |
unlock_page(new_page); |
f3f0e1d21 khugepaged: add s... |
1908 1909 |
out: VM_BUG_ON(!list_empty(&pagelist)); |
9d82c6943 mm: memcontrol: c... |
1910 1911 |
if (!IS_ERR_OR_NULL(*hpage)) mem_cgroup_uncharge(*hpage); |
f3f0e1d21 khugepaged: add s... |
1912 1913 |
/* TODO: tracepoints */ } |
579c571e2 khugepaged: renam... |
1914 1915 |
static void khugepaged_scan_file(struct mm_struct *mm, struct file *file, pgoff_t start, struct page **hpage) |
f3f0e1d21 khugepaged: add s... |
1916 1917 |
{ struct page *page = NULL; |
579c571e2 khugepaged: renam... |
1918 |
struct address_space *mapping = file->f_mapping; |
85b392dba mm: Convert khuge... |
1919 |
XA_STATE(xas, &mapping->i_pages, start); |
f3f0e1d21 khugepaged: add s... |
1920 1921 1922 1923 1924 1925 1926 1927 |
int present, swap; int node = NUMA_NO_NODE; int result = SCAN_SUCCEED; present = 0; swap = 0; memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); rcu_read_lock(); |
85b392dba mm: Convert khuge... |
1928 1929 |
xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) { if (xas_retry(&xas, page)) |
f3f0e1d21 khugepaged: add s... |
1930 |
continue; |
f3f0e1d21 khugepaged: add s... |
1931 |
|
85b392dba mm: Convert khuge... |
1932 |
if (xa_is_value(page)) { |
f3f0e1d21 khugepaged: add s... |
1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 |
if (++swap > khugepaged_max_ptes_swap) { result = SCAN_EXCEED_SWAP_PTE; break; } continue; } if (PageTransCompound(page)) { result = SCAN_PAGE_COMPOUND; break; } node = page_to_nid(page); if (khugepaged_scan_abort(node)) { result = SCAN_SCAN_ABORT; break; } khugepaged_node_load[node]++; if (!PageLRU(page)) { result = SCAN_PAGE_LRU; break; } |
99cb0dbd4 mm,thp: add read-... |
1956 1957 |
if (page_count(page) != 1 + page_mapcount(page) + page_has_private(page)) { |
f3f0e1d21 khugepaged: add s... |
1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 |
result = SCAN_PAGE_COUNT; break; } /* * We probably should check if the page is referenced here, but * nobody would transfer pte_young() to PageReferenced() for us. * And rmap walk here is just too costly... */ present++; if (need_resched()) { |
85b392dba mm: Convert khuge... |
1971 |
xas_pause(&xas); |
f3f0e1d21 khugepaged: add s... |
1972 |
cond_resched_rcu(); |
f3f0e1d21 khugepaged: add s... |
1973 1974 1975 1976 1977 1978 1979 1980 1981 |
} } rcu_read_unlock(); if (result == SCAN_SUCCEED) { if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) { result = SCAN_EXCEED_NONE_PTE; } else { node = khugepaged_find_target_node(); |
579c571e2 khugepaged: renam... |
1982 |
collapse_file(mm, file, start, hpage, node); |
f3f0e1d21 khugepaged: add s... |
1983 1984 1985 1986 1987 1988 |
} } /* TODO: tracepoints */ } #else |
579c571e2 khugepaged: renam... |
1989 1990 |
static void khugepaged_scan_file(struct mm_struct *mm, struct file *file, pgoff_t start, struct page **hpage) |
f3f0e1d21 khugepaged: add s... |
1991 1992 1993 |
{ BUILD_BUG(); } |
27e1f8273 khugepaged: enabl... |
1994 1995 1996 1997 1998 |
static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot) { return 0; } |
f3f0e1d21 khugepaged: add s... |
1999 |
#endif |
b46e756f5 thp: extract khug... |
2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 |
static unsigned int khugepaged_scan_mm_slot(unsigned int pages, struct page **hpage) __releases(&khugepaged_mm_lock) __acquires(&khugepaged_mm_lock) { struct mm_slot *mm_slot; struct mm_struct *mm; struct vm_area_struct *vma; int progress = 0; VM_BUG_ON(!pages); |
35f3aa39f mm: Replace spin_... |
2011 |
lockdep_assert_held(&khugepaged_mm_lock); |
b46e756f5 thp: extract khug... |
2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 |
if (khugepaged_scan.mm_slot) mm_slot = khugepaged_scan.mm_slot; else { mm_slot = list_entry(khugepaged_scan.mm_head.next, struct mm_slot, mm_node); khugepaged_scan.address = 0; khugepaged_scan.mm_slot = mm_slot; } spin_unlock(&khugepaged_mm_lock); |
27e1f8273 khugepaged: enabl... |
2022 |
khugepaged_collapse_pte_mapped_thps(mm_slot); |
b46e756f5 thp: extract khug... |
2023 2024 |
mm = mm_slot->mm; |
3b454ad35 mm: thp: use down... |
2025 2026 2027 2028 2029 |
/* * Don't wait for semaphore (to avoid long wait times). Just move to * the next mm on the list. */ vma = NULL; |
d8ed45c5d mmap locking API:... |
2030 |
if (unlikely(!mmap_read_trylock(mm))) |
c1e8d7c6a mmap locking API:... |
2031 |
goto breakouterloop_mmap_lock; |
3b454ad35 mm: thp: use down... |
2032 |
if (likely(!khugepaged_test_exit(mm))) |
b46e756f5 thp: extract khug... |
2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 |
vma = find_vma(mm, khugepaged_scan.address); progress++; for (; vma; vma = vma->vm_next) { unsigned long hstart, hend; cond_resched(); if (unlikely(khugepaged_test_exit(mm))) { progress++; break; } |
50f8b92f2 mm: thp: pass cor... |
2044 |
if (!hugepage_vma_check(vma, vma->vm_flags)) { |
b46e756f5 thp: extract khug... |
2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 |
skip: progress++; continue; } hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; if (hstart >= hend) goto skip; if (khugepaged_scan.address > hend) goto skip; if (khugepaged_scan.address < hstart) khugepaged_scan.address = hstart; VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); |
396bcc529 mm: remove CONFIG... |
2058 2059 |
if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma)) goto skip; |
b46e756f5 thp: extract khug... |
2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 |
while (khugepaged_scan.address < hend) { int ret; cond_resched(); if (unlikely(khugepaged_test_exit(mm))) goto breakouterloop; VM_BUG_ON(khugepaged_scan.address < hstart || khugepaged_scan.address + HPAGE_PMD_SIZE > hend); |
99cb0dbd4 mm,thp: add read-... |
2070 |
if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { |
396bcc529 mm: remove CONFIG... |
2071 |
struct file *file = get_file(vma->vm_file); |
f3f0e1d21 khugepaged: add s... |
2072 2073 |
pgoff_t pgoff = linear_page_index(vma, khugepaged_scan.address); |
99cb0dbd4 mm,thp: add read-... |
2074 |
|
d8ed45c5d mmap locking API:... |
2075 |
mmap_read_unlock(mm); |
f3f0e1d21 khugepaged: add s... |
2076 |
ret = 1; |
579c571e2 khugepaged: renam... |
2077 |
khugepaged_scan_file(mm, file, pgoff, hpage); |
f3f0e1d21 khugepaged: add s... |
2078 2079 2080 2081 2082 2083 |
fput(file); } else { ret = khugepaged_scan_pmd(mm, vma, khugepaged_scan.address, hpage); } |
b46e756f5 thp: extract khug... |
2084 2085 2086 2087 |
/* move to next address */ khugepaged_scan.address += HPAGE_PMD_SIZE; progress += HPAGE_PMD_NR; if (ret) |
c1e8d7c6a mmap locking API:... |
2088 2089 |
/* we released mmap_lock so break loop */ goto breakouterloop_mmap_lock; |
b46e756f5 thp: extract khug... |
2090 2091 2092 2093 2094 |
if (progress >= pages) goto breakouterloop; } } breakouterloop: |
d8ed45c5d mmap locking API:... |
2095 |
mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */ |
c1e8d7c6a mmap locking API:... |
2096 |
breakouterloop_mmap_lock: |
b46e756f5 thp: extract khug... |
2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 |
spin_lock(&khugepaged_mm_lock); VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); /* * Release the current mm_slot if this mm is about to die, or * if we scanned all vmas of this mm. */ if (khugepaged_test_exit(mm) || !vma) { /* * Make sure that if mm_users is reaching zero while * khugepaged runs here, khugepaged_exit will find * mm_slot not pointing to the exiting mm. */ if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { khugepaged_scan.mm_slot = list_entry( mm_slot->mm_node.next, struct mm_slot, mm_node); khugepaged_scan.address = 0; } else { khugepaged_scan.mm_slot = NULL; khugepaged_full_scans++; } collect_mm_slot(mm_slot); } return progress; } static int khugepaged_has_work(void) { return !list_empty(&khugepaged_scan.mm_head) && khugepaged_enabled(); } static int khugepaged_wait_event(void) { return !list_empty(&khugepaged_scan.mm_head) || kthread_should_stop(); } static void khugepaged_do_scan(void) { struct page *hpage = NULL; unsigned int progress = 0, pass_through_head = 0; unsigned int pages = khugepaged_pages_to_scan; bool wait = true; barrier(); /* write khugepaged_pages_to_scan to local stack */ |
a980df33e khugepaged: drain... |
2146 |
lru_add_drain_all(); |
b46e756f5 thp: extract khug... |
2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 |
while (progress < pages) { if (!khugepaged_prealloc_page(&hpage, &wait)) break; cond_resched(); if (unlikely(kthread_should_stop() || try_to_freeze())) break; spin_lock(&khugepaged_mm_lock); if (!khugepaged_scan.mm_slot) pass_through_head++; if (khugepaged_has_work() && pass_through_head < 2) progress += khugepaged_scan_mm_slot(pages - progress, &hpage); else progress = pages; spin_unlock(&khugepaged_mm_lock); } if (!IS_ERR_OR_NULL(hpage)) put_page(hpage); } static bool khugepaged_should_wakeup(void) { return kthread_should_stop() || time_after_eq(jiffies, khugepaged_sleep_expire); } static void khugepaged_wait_work(void) { if (khugepaged_has_work()) { const unsigned long scan_sleep_jiffies = msecs_to_jiffies(khugepaged_scan_sleep_millisecs); if (!scan_sleep_jiffies) return; khugepaged_sleep_expire = jiffies + scan_sleep_jiffies; wait_event_freezable_timeout(khugepaged_wait, khugepaged_should_wakeup(), scan_sleep_jiffies); return; } if (khugepaged_enabled()) wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); } static int khugepaged(void *none) { struct mm_slot *mm_slot; set_freezable(); set_user_nice(current, MAX_NICE); while (!kthread_should_stop()) { khugepaged_do_scan(); khugepaged_wait_work(); } spin_lock(&khugepaged_mm_lock); mm_slot = khugepaged_scan.mm_slot; khugepaged_scan.mm_slot = NULL; if (mm_slot) collect_mm_slot(mm_slot); spin_unlock(&khugepaged_mm_lock); return 0; } static void set_recommended_min_free_kbytes(void) { struct zone *zone; int nr_zones = 0; unsigned long recommended_min; |
b7d349c74 mm/thp: don't cou... |
2224 2225 2226 2227 2228 2229 2230 |
for_each_populated_zone(zone) { /* * We don't need to worry about fragmentation of * ZONE_MOVABLE since it only has movable pages. */ if (zone_idx(zone) > gfp_zone(GFP_USER)) continue; |
b46e756f5 thp: extract khug... |
2231 |
nr_zones++; |
b7d349c74 mm/thp: don't cou... |
2232 |
} |
b46e756f5 thp: extract khug... |
2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 |
/* Ensure 2 pageblocks are free to assist fragmentation avoidance */ recommended_min = pageblock_nr_pages * nr_zones * 2; /* * Make sure that on average at least two pageblocks are almost free * of another type, one for a migratetype to fall back to and a * second to avoid subsequent fallbacks of other types There are 3 * MIGRATE_TYPES we care about. */ recommended_min += pageblock_nr_pages * nr_zones * MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; /* don't ever allow to reserve more than 5% of the lowmem */ recommended_min = min(recommended_min, (unsigned long) nr_free_buffer_pages() / 20); recommended_min <<= (PAGE_SHIFT-10); if (recommended_min > min_free_kbytes) { if (user_min_free_kbytes >= 0) pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations ", min_free_kbytes, recommended_min); min_free_kbytes = recommended_min; } setup_per_zone_wmarks(); } int start_stop_khugepaged(void) { |
b46e756f5 thp: extract khug... |
2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 |
int err = 0; mutex_lock(&khugepaged_mutex); if (khugepaged_enabled()) { if (!khugepaged_thread) khugepaged_thread = kthread_run(khugepaged, NULL, "khugepaged"); if (IS_ERR(khugepaged_thread)) { pr_err("khugepaged: kthread_run(khugepaged) failed "); err = PTR_ERR(khugepaged_thread); khugepaged_thread = NULL; goto fail; } if (!list_empty(&khugepaged_scan.mm_head)) wake_up_interruptible(&khugepaged_wait); set_recommended_min_free_kbytes(); } else if (khugepaged_thread) { kthread_stop(khugepaged_thread); khugepaged_thread = NULL; } fail: mutex_unlock(&khugepaged_mutex); return err; } |
4aab2be09 mm: khugepaged: r... |
2291 2292 2293 2294 2295 2296 2297 2298 |
void khugepaged_min_free_kbytes_update(void) { mutex_lock(&khugepaged_mutex); if (khugepaged_enabled() && khugepaged_thread) set_recommended_min_free_kbytes(); mutex_unlock(&khugepaged_mutex); } |