Commit d516904bd239fe2c9f1bd46cf146bb4b8831321c
Committed by
Linus Torvalds
1 parent
9817626e72
Exists in
master
and in
20 other branches
thp: merge page pre-alloc in khugepaged_loop into khugepaged_do_scan
There are two pre-alloc operations in these two function, the different is: - it allows to sleep if page alloc fail in khugepaged_loop - it exits immediately if page alloc fail in khugepaged_do_scan Actually, in khugepaged_do_scan, we can allow the pre-alloc to sleep on the first failure, then the operation in khugepaged_loop can be removed Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 1 changed file with 45 additions and 52 deletions Side-by-side Diff
mm/huge_memory.c
... | ... | @@ -2222,10 +2222,40 @@ |
2222 | 2222 | kthread_should_stop(); |
2223 | 2223 | } |
2224 | 2224 | |
2225 | -static void khugepaged_do_scan(struct page **hpage) | |
2225 | +static void khugepaged_alloc_sleep(void) | |
2226 | 2226 | { |
2227 | + wait_event_freezable_timeout(khugepaged_wait, false, | |
2228 | + msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); | |
2229 | +} | |
2230 | + | |
2231 | +#ifndef CONFIG_NUMA | |
2232 | +static struct page *khugepaged_alloc_hugepage(bool *wait) | |
2233 | +{ | |
2234 | + struct page *hpage; | |
2235 | + | |
2236 | + do { | |
2237 | + hpage = alloc_hugepage(khugepaged_defrag()); | |
2238 | + if (!hpage) { | |
2239 | + count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | |
2240 | + if (!*wait) | |
2241 | + return NULL; | |
2242 | + | |
2243 | + *wait = false; | |
2244 | + khugepaged_alloc_sleep(); | |
2245 | + } else | |
2246 | + count_vm_event(THP_COLLAPSE_ALLOC); | |
2247 | + } while (unlikely(!hpage) && likely(khugepaged_enabled())); | |
2248 | + | |
2249 | + return hpage; | |
2250 | +} | |
2251 | +#endif | |
2252 | + | |
2253 | +static void khugepaged_do_scan(void) | |
2254 | +{ | |
2255 | + struct page *hpage = NULL; | |
2227 | 2256 | unsigned int progress = 0, pass_through_head = 0; |
2228 | 2257 | unsigned int pages = khugepaged_pages_to_scan; |
2258 | + bool wait = true; | |
2229 | 2259 | |
2230 | 2260 | barrier(); /* write khugepaged_pages_to_scan to local stack */ |
2231 | 2261 | |
2232 | 2262 | |
2233 | 2263 | |
... | ... | @@ -2233,17 +2263,18 @@ |
2233 | 2263 | cond_resched(); |
2234 | 2264 | |
2235 | 2265 | #ifndef CONFIG_NUMA |
2236 | - if (!*hpage) { | |
2237 | - *hpage = alloc_hugepage(khugepaged_defrag()); | |
2238 | - if (unlikely(!*hpage)) { | |
2239 | - count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | |
2266 | + if (!hpage) | |
2267 | + hpage = khugepaged_alloc_hugepage(&wait); | |
2268 | + | |
2269 | + if (unlikely(!hpage)) | |
2270 | + break; | |
2271 | +#else | |
2272 | + if (IS_ERR(hpage)) { | |
2273 | + if (!wait) | |
2240 | 2274 | break; |
2241 | - } | |
2242 | - count_vm_event(THP_COLLAPSE_ALLOC); | |
2275 | + wait = false; | |
2276 | + khugepaged_alloc_sleep(); | |
2243 | 2277 | } |
2244 | -#else | |
2245 | - if (IS_ERR(*hpage)) | |
2246 | - break; | |
2247 | 2278 | #endif |
2248 | 2279 | |
2249 | 2280 | if (unlikely(kthread_should_stop() || freezing(current))) |
2250 | 2281 | |
2251 | 2282 | |
2252 | 2283 | |
... | ... | @@ -2255,37 +2286,16 @@ |
2255 | 2286 | if (khugepaged_has_work() && |
2256 | 2287 | pass_through_head < 2) |
2257 | 2288 | progress += khugepaged_scan_mm_slot(pages - progress, |
2258 | - hpage); | |
2289 | + &hpage); | |
2259 | 2290 | else |
2260 | 2291 | progress = pages; |
2261 | 2292 | spin_unlock(&khugepaged_mm_lock); |
2262 | 2293 | } |
2263 | -} | |
2264 | 2294 | |
2265 | -static void khugepaged_alloc_sleep(void) | |
2266 | -{ | |
2267 | - wait_event_freezable_timeout(khugepaged_wait, false, | |
2268 | - msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); | |
2295 | + if (!IS_ERR_OR_NULL(hpage)) | |
2296 | + put_page(hpage); | |
2269 | 2297 | } |
2270 | 2298 | |
2271 | -#ifndef CONFIG_NUMA | |
2272 | -static struct page *khugepaged_alloc_hugepage(void) | |
2273 | -{ | |
2274 | - struct page *hpage; | |
2275 | - | |
2276 | - do { | |
2277 | - hpage = alloc_hugepage(khugepaged_defrag()); | |
2278 | - if (!hpage) { | |
2279 | - count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | |
2280 | - khugepaged_alloc_sleep(); | |
2281 | - } else | |
2282 | - count_vm_event(THP_COLLAPSE_ALLOC); | |
2283 | - } while (unlikely(!hpage) && | |
2284 | - likely(khugepaged_enabled())); | |
2285 | - return hpage; | |
2286 | -} | |
2287 | -#endif | |
2288 | - | |
2289 | 2299 | static void khugepaged_wait_work(void) |
2290 | 2300 | { |
2291 | 2301 | try_to_freeze(); |
2292 | 2302 | |
... | ... | @@ -2306,25 +2316,8 @@ |
2306 | 2316 | |
2307 | 2317 | static void khugepaged_loop(void) |
2308 | 2318 | { |
2309 | - struct page *hpage = NULL; | |
2310 | - | |
2311 | 2319 | while (likely(khugepaged_enabled())) { |
2312 | -#ifndef CONFIG_NUMA | |
2313 | - hpage = khugepaged_alloc_hugepage(); | |
2314 | - if (unlikely(!hpage)) | |
2315 | - break; | |
2316 | -#else | |
2317 | - if (IS_ERR(hpage)) { | |
2318 | - khugepaged_alloc_sleep(); | |
2319 | - hpage = NULL; | |
2320 | - } | |
2321 | -#endif | |
2322 | - | |
2323 | - khugepaged_do_scan(&hpage); | |
2324 | - | |
2325 | - if (!IS_ERR_OR_NULL(hpage)) | |
2326 | - put_page(hpage); | |
2327 | - | |
2320 | + khugepaged_do_scan(); | |
2328 | 2321 | khugepaged_wait_work(); |
2329 | 2322 | } |
2330 | 2323 | } |