Blame view

mm/compaction.c 56.1 KB
748446bb6   Mel Gorman   mm: compaction: m...
1
2
3
4
5
6
7
8
9
  /*
   * linux/mm/compaction.c
   *
   * Memory compaction for the reduction of external fragmentation. Note that
   * this heavily depends upon page migration to do all the real heavy
   * lifting
   *
   * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
   */
698b1b306   Vlastimil Babka   mm, compaction: i...
10
  #include <linux/cpu.h>
748446bb6   Mel Gorman   mm: compaction: m...
11
12
13
14
15
  #include <linux/swap.h>
  #include <linux/migrate.h>
  #include <linux/compaction.h>
  #include <linux/mm_inline.h>
  #include <linux/backing-dev.h>
76ab0f530   Mel Gorman   mm: compaction: a...
16
  #include <linux/sysctl.h>
ed4a6d7f0   Mel Gorman   mm: compaction: a...
17
  #include <linux/sysfs.h>
194159fbc   Minchan Kim   mm: remove MIGRAT...
18
  #include <linux/page-isolation.h>
b8c73fc24   Andrey Ryabinin   mm: page_alloc: a...
19
  #include <linux/kasan.h>
698b1b306   Vlastimil Babka   mm, compaction: i...
20
21
  #include <linux/kthread.h>
  #include <linux/freezer.h>
83358ece2   Joonsoo Kim   mm/page_owner: in...
22
  #include <linux/page_owner.h>
748446bb6   Mel Gorman   mm: compaction: m...
23
  #include "internal.h"
010fc29a4   Minchan Kim   compaction: fix b...
24
25
26
27
28
29
30
31
32
33
34
35
36
37
  #ifdef CONFIG_COMPACTION
  static inline void count_compact_event(enum vm_event_item item)
  {
  	count_vm_event(item);
  }
  
  static inline void count_compact_events(enum vm_event_item item, long delta)
  {
  	count_vm_events(item, delta);
  }
  #else
  #define count_compact_event(item) do { } while (0)
  #define count_compact_events(item, delta) do { } while (0)
  #endif
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
38
  #if defined CONFIG_COMPACTION || defined CONFIG_CMA
b7aba6984   Mel Gorman   mm: compaction: a...
39
40
  #define CREATE_TRACE_POINTS
  #include <trace/events/compaction.h>
06b6640a3   Vlastimil Babka   mm, compaction: w...
41
42
43
44
  #define block_start_pfn(pfn, order)	round_down(pfn, 1UL << (order))
  #define block_end_pfn(pfn, order)	ALIGN((pfn) + 1, 1UL << (order))
  #define pageblock_start_pfn(pfn)	block_start_pfn(pfn, pageblock_order)
  #define pageblock_end_pfn(pfn)		block_end_pfn(pfn, pageblock_order)
748446bb6   Mel Gorman   mm: compaction: m...
45
46
47
  static unsigned long release_freepages(struct list_head *freelist)
  {
  	struct page *page, *next;
6bace090a   Vlastimil Babka   mm, compaction: a...
48
  	unsigned long high_pfn = 0;
748446bb6   Mel Gorman   mm: compaction: m...
49
50
  
  	list_for_each_entry_safe(page, next, freelist, lru) {
6bace090a   Vlastimil Babka   mm, compaction: a...
51
  		unsigned long pfn = page_to_pfn(page);
748446bb6   Mel Gorman   mm: compaction: m...
52
53
  		list_del(&page->lru);
  		__free_page(page);
6bace090a   Vlastimil Babka   mm, compaction: a...
54
55
  		if (pfn > high_pfn)
  			high_pfn = pfn;
748446bb6   Mel Gorman   mm: compaction: m...
56
  	}
6bace090a   Vlastimil Babka   mm, compaction: a...
57
  	return high_pfn;
748446bb6   Mel Gorman   mm: compaction: m...
58
  }
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
59
60
  static void map_pages(struct list_head *list)
  {
66c64223a   Joonsoo Kim   mm/compaction: sp...
61
62
63
64
65
66
67
68
69
  	unsigned int i, order, nr_pages;
  	struct page *page, *next;
  	LIST_HEAD(tmp_list);
  
  	list_for_each_entry_safe(page, next, list, lru) {
  		list_del(&page->lru);
  
  		order = page_private(page);
  		nr_pages = 1 << order;
66c64223a   Joonsoo Kim   mm/compaction: sp...
70

46f24fd85   Joonsoo Kim   mm/page_alloc: in...
71
  		post_alloc_hook(page, order, __GFP_MOVABLE);
66c64223a   Joonsoo Kim   mm/compaction: sp...
72
73
  		if (order)
  			split_page(page, order);
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
74

66c64223a   Joonsoo Kim   mm/compaction: sp...
75
76
77
78
  		for (i = 0; i < nr_pages; i++) {
  			list_add(&page->lru, &tmp_list);
  			page++;
  		}
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
79
  	}
66c64223a   Joonsoo Kim   mm/compaction: sp...
80
81
  
  	list_splice(&tmp_list, list);
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
82
  }
47118af07   Michal Nazarewicz   mm: mmzone: MIGRA...
83
84
85
86
  static inline bool migrate_async_suitable(int migratetype)
  {
  	return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
  }
bb13ffeb9   Mel Gorman   mm: compaction: c...
87
  #ifdef CONFIG_COMPACTION
24e2716f6   Joonsoo Kim   mm/compaction: ad...
88

bda807d44   Minchan Kim   mm: migrate: supp...
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
  int PageMovable(struct page *page)
  {
  	struct address_space *mapping;
  
  	VM_BUG_ON_PAGE(!PageLocked(page), page);
  	if (!__PageMovable(page))
  		return 0;
  
  	mapping = page_mapping(page);
  	if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
  		return 1;
  
  	return 0;
  }
  EXPORT_SYMBOL(PageMovable);
  
  void __SetPageMovable(struct page *page, struct address_space *mapping)
  {
  	VM_BUG_ON_PAGE(!PageLocked(page), page);
  	VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
  	page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
  }
  EXPORT_SYMBOL(__SetPageMovable);
  
  void __ClearPageMovable(struct page *page)
  {
  	VM_BUG_ON_PAGE(!PageLocked(page), page);
  	VM_BUG_ON_PAGE(!PageMovable(page), page);
  	/*
  	 * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
  	 * flag so that VM can catch up released page by driver after isolation.
  	 * With it, VM migration doesn't try to put it back.
  	 */
  	page->mapping = (void *)((unsigned long)page->mapping &
  				PAGE_MAPPING_MOVABLE);
  }
  EXPORT_SYMBOL(__ClearPageMovable);
24e2716f6   Joonsoo Kim   mm/compaction: ad...
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
  /* Do not skip compaction more than 64 times */
  #define COMPACT_MAX_DEFER_SHIFT 6
  
  /*
   * Compaction is deferred when compaction fails to result in a page
   * allocation success. 1 << compact_defer_limit compactions are skipped up
   * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
   */
  void defer_compaction(struct zone *zone, int order)
  {
  	zone->compact_considered = 0;
  	zone->compact_defer_shift++;
  
  	if (order < zone->compact_order_failed)
  		zone->compact_order_failed = order;
  
  	if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
  		zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
  
  	trace_mm_compaction_defer_compaction(zone, order);
  }
  
  /* Returns true if compaction should be skipped this time */
  bool compaction_deferred(struct zone *zone, int order)
  {
  	unsigned long defer_limit = 1UL << zone->compact_defer_shift;
  
  	if (order < zone->compact_order_failed)
  		return false;
  
  	/* Avoid possible overflow */
  	if (++zone->compact_considered > defer_limit)
  		zone->compact_considered = defer_limit;
  
  	if (zone->compact_considered >= defer_limit)
  		return false;
  
  	trace_mm_compaction_deferred(zone, order);
  
  	return true;
  }
  
  /*
   * Update defer tracking counters after successful compaction of given order,
   * which means an allocation either succeeded (alloc_success == true) or is
   * expected to succeed.
   */
  void compaction_defer_reset(struct zone *zone, int order,
  		bool alloc_success)
  {
  	if (alloc_success) {
  		zone->compact_considered = 0;
  		zone->compact_defer_shift = 0;
  	}
  	if (order >= zone->compact_order_failed)
  		zone->compact_order_failed = order + 1;
  
  	trace_mm_compaction_defer_reset(zone, order);
  }
  
  /* Returns true if restarting compaction after many failures */
  bool compaction_restarting(struct zone *zone, int order)
  {
  	if (order < zone->compact_order_failed)
  		return false;
  
  	return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
  		zone->compact_considered >= 1UL << zone->compact_defer_shift;
  }
bb13ffeb9   Mel Gorman   mm: compaction: c...
195
196
197
198
199
200
201
202
203
  /* Returns true if the pageblock should be scanned for pages to isolate. */
  static inline bool isolation_suitable(struct compact_control *cc,
  					struct page *page)
  {
  	if (cc->ignore_skip_hint)
  		return true;
  
  	return !get_pageblock_skip(page);
  }
02333641e   Vlastimil Babka   mm, compaction: e...
204
205
206
207
  static void reset_cached_positions(struct zone *zone)
  {
  	zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
  	zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
623446e4d   Joonsoo Kim   mm/compaction: fi...
208
  	zone->compact_cached_free_pfn =
06b6640a3   Vlastimil Babka   mm, compaction: w...
209
  				pageblock_start_pfn(zone_end_pfn(zone) - 1);
02333641e   Vlastimil Babka   mm, compaction: e...
210
  }
bb13ffeb9   Mel Gorman   mm: compaction: c...
211
212
213
214
215
  /*
   * This function is called to clear all cached information on pageblocks that
   * should be skipped for page isolation when the migrate and free page scanner
   * meet.
   */
62997027c   Mel Gorman   mm: compaction: c...
216
  static void __reset_isolation_suitable(struct zone *zone)
bb13ffeb9   Mel Gorman   mm: compaction: c...
217
218
  {
  	unsigned long start_pfn = zone->zone_start_pfn;
108bcc96e   Cody P Schafer   mm: add & use zon...
219
  	unsigned long end_pfn = zone_end_pfn(zone);
bb13ffeb9   Mel Gorman   mm: compaction: c...
220
  	unsigned long pfn;
62997027c   Mel Gorman   mm: compaction: c...
221
  	zone->compact_blockskip_flush = false;
bb13ffeb9   Mel Gorman   mm: compaction: c...
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
  
  	/* Walk the zone and mark every pageblock as suitable for isolation */
  	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  		struct page *page;
  
  		cond_resched();
  
  		if (!pfn_valid(pfn))
  			continue;
  
  		page = pfn_to_page(pfn);
  		if (zone != page_zone(page))
  			continue;
  
  		clear_pageblock_skip(page);
  	}
02333641e   Vlastimil Babka   mm, compaction: e...
238
239
  
  	reset_cached_positions(zone);
bb13ffeb9   Mel Gorman   mm: compaction: c...
240
  }
62997027c   Mel Gorman   mm: compaction: c...
241
242
243
244
245
246
247
248
249
250
251
252
253
254
  void reset_isolation_suitable(pg_data_t *pgdat)
  {
  	int zoneid;
  
  	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
  		struct zone *zone = &pgdat->node_zones[zoneid];
  		if (!populated_zone(zone))
  			continue;
  
  		/* Only flush if a full compaction finished recently */
  		if (zone->compact_blockskip_flush)
  			__reset_isolation_suitable(zone);
  	}
  }
bb13ffeb9   Mel Gorman   mm: compaction: c...
255
256
  /*
   * If no pages were isolated then mark this pageblock to be skipped in the
62997027c   Mel Gorman   mm: compaction: c...
257
   * future. The information is later cleared by __reset_isolation_suitable().
bb13ffeb9   Mel Gorman   mm: compaction: c...
258
   */
c89511ab2   Mel Gorman   mm: compaction: R...
259
260
  static void update_pageblock_skip(struct compact_control *cc,
  			struct page *page, unsigned long nr_isolated,
edc2ca612   Vlastimil Babka   mm, compaction: m...
261
  			bool migrate_scanner)
bb13ffeb9   Mel Gorman   mm: compaction: c...
262
  {
c89511ab2   Mel Gorman   mm: compaction: R...
263
  	struct zone *zone = cc->zone;
35979ef33   David Rientjes   mm, compaction: a...
264
  	unsigned long pfn;
6815bf3f2   Joonsoo Kim   mm/compaction: re...
265
266
267
  
  	if (cc->ignore_skip_hint)
  		return;
bb13ffeb9   Mel Gorman   mm: compaction: c...
268
269
  	if (!page)
  		return;
35979ef33   David Rientjes   mm, compaction: a...
270
271
  	if (nr_isolated)
  		return;
edc2ca612   Vlastimil Babka   mm, compaction: m...
272
  	set_pageblock_skip(page);
c89511ab2   Mel Gorman   mm: compaction: R...
273

35979ef33   David Rientjes   mm, compaction: a...
274
275
276
277
  	pfn = page_to_pfn(page);
  
  	/* Update where async and sync compaction should restart */
  	if (migrate_scanner) {
35979ef33   David Rientjes   mm, compaction: a...
278
279
  		if (pfn > zone->compact_cached_migrate_pfn[0])
  			zone->compact_cached_migrate_pfn[0] = pfn;
e0b9daeb4   David Rientjes   mm, compaction: e...
280
281
  		if (cc->mode != MIGRATE_ASYNC &&
  		    pfn > zone->compact_cached_migrate_pfn[1])
35979ef33   David Rientjes   mm, compaction: a...
282
283
  			zone->compact_cached_migrate_pfn[1] = pfn;
  	} else {
35979ef33   David Rientjes   mm, compaction: a...
284
285
  		if (pfn < zone->compact_cached_free_pfn)
  			zone->compact_cached_free_pfn = pfn;
c89511ab2   Mel Gorman   mm: compaction: R...
286
  	}
bb13ffeb9   Mel Gorman   mm: compaction: c...
287
288
289
290
291
292
293
  }
  #else
  static inline bool isolation_suitable(struct compact_control *cc,
  					struct page *page)
  {
  	return true;
  }
c89511ab2   Mel Gorman   mm: compaction: R...
294
295
  static void update_pageblock_skip(struct compact_control *cc,
  			struct page *page, unsigned long nr_isolated,
edc2ca612   Vlastimil Babka   mm, compaction: m...
296
  			bool migrate_scanner)
bb13ffeb9   Mel Gorman   mm: compaction: c...
297
298
299
  {
  }
  #endif /* CONFIG_COMPACTION */
8b44d2791   Vlastimil Babka   mm, compaction: p...
300
301
302
303
304
305
306
307
308
309
  /*
   * Compaction requires the taking of some coarse locks that are potentially
   * very heavily contended. For async compaction, back out if the lock cannot
   * be taken immediately. For sync compaction, spin on the lock if needed.
   *
   * Returns true if the lock is held
   * Returns false if the lock is not held and compaction should abort
   */
  static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
  						struct compact_control *cc)
2a1402aa0   Mel Gorman   mm: compaction: a...
310
  {
8b44d2791   Vlastimil Babka   mm, compaction: p...
311
312
  	if (cc->mode == MIGRATE_ASYNC) {
  		if (!spin_trylock_irqsave(lock, *flags)) {
c3486f537   Vlastimil Babka   mm, compaction: s...
313
  			cc->contended = true;
8b44d2791   Vlastimil Babka   mm, compaction: p...
314
315
316
317
318
  			return false;
  		}
  	} else {
  		spin_lock_irqsave(lock, *flags);
  	}
1f9efdef4   Vlastimil Babka   mm, compaction: k...
319

8b44d2791   Vlastimil Babka   mm, compaction: p...
320
  	return true;
2a1402aa0   Mel Gorman   mm: compaction: a...
321
  }
85aa125f0   Michal Nazarewicz   mm: compaction: i...
322
  /*
c67fe3752   Mel Gorman   mm: compaction: A...
323
   * Compaction requires the taking of some coarse locks that are potentially
8b44d2791   Vlastimil Babka   mm, compaction: p...
324
325
326
327
328
329
330
   * very heavily contended. The lock should be periodically unlocked to avoid
   * having disabled IRQs for a long time, even when there is nobody waiting on
   * the lock. It might also be that allowing the IRQs will result in
   * need_resched() becoming true. If scheduling is needed, async compaction
   * aborts. Sync compaction schedules.
   * Either compaction type will also abort if a fatal signal is pending.
   * In either case if the lock was locked, it is dropped and not regained.
c67fe3752   Mel Gorman   mm: compaction: A...
331
   *
8b44d2791   Vlastimil Babka   mm, compaction: p...
332
333
334
335
   * Returns true if compaction should abort due to fatal signal pending, or
   *		async compaction due to need_resched()
   * Returns false when compaction can continue (sync compaction might have
   *		scheduled)
c67fe3752   Mel Gorman   mm: compaction: A...
336
   */
8b44d2791   Vlastimil Babka   mm, compaction: p...
337
338
  static bool compact_unlock_should_abort(spinlock_t *lock,
  		unsigned long flags, bool *locked, struct compact_control *cc)
c67fe3752   Mel Gorman   mm: compaction: A...
339
  {
8b44d2791   Vlastimil Babka   mm, compaction: p...
340
341
342
343
  	if (*locked) {
  		spin_unlock_irqrestore(lock, flags);
  		*locked = false;
  	}
1f9efdef4   Vlastimil Babka   mm, compaction: k...
344

8b44d2791   Vlastimil Babka   mm, compaction: p...
345
  	if (fatal_signal_pending(current)) {
c3486f537   Vlastimil Babka   mm, compaction: s...
346
  		cc->contended = true;
8b44d2791   Vlastimil Babka   mm, compaction: p...
347
348
  		return true;
  	}
c67fe3752   Mel Gorman   mm: compaction: A...
349

8b44d2791   Vlastimil Babka   mm, compaction: p...
350
  	if (need_resched()) {
e0b9daeb4   David Rientjes   mm, compaction: e...
351
  		if (cc->mode == MIGRATE_ASYNC) {
c3486f537   Vlastimil Babka   mm, compaction: s...
352
  			cc->contended = true;
8b44d2791   Vlastimil Babka   mm, compaction: p...
353
  			return true;
c67fe3752   Mel Gorman   mm: compaction: A...
354
  		}
c67fe3752   Mel Gorman   mm: compaction: A...
355
  		cond_resched();
c67fe3752   Mel Gorman   mm: compaction: A...
356
  	}
8b44d2791   Vlastimil Babka   mm, compaction: p...
357
  	return false;
c67fe3752   Mel Gorman   mm: compaction: A...
358
  }
be9765722   Vlastimil Babka   mm, compaction: p...
359
360
361
  /*
   * Aside from avoiding lock contention, compaction also periodically checks
   * need_resched() and either schedules in sync compaction or aborts async
8b44d2791   Vlastimil Babka   mm, compaction: p...
362
   * compaction. This is similar to what compact_unlock_should_abort() does, but
be9765722   Vlastimil Babka   mm, compaction: p...
363
364
365
366
367
368
369
370
371
372
   * is used where no lock is concerned.
   *
   * Returns false when no scheduling was needed, or sync compaction scheduled.
   * Returns true when async compaction should abort.
   */
  static inline bool compact_should_abort(struct compact_control *cc)
  {
  	/* async compaction aborts if contended */
  	if (need_resched()) {
  		if (cc->mode == MIGRATE_ASYNC) {
c3486f537   Vlastimil Babka   mm, compaction: s...
373
  			cc->contended = true;
be9765722   Vlastimil Babka   mm, compaction: p...
374
375
376
377
378
379
380
381
  			return true;
  		}
  
  		cond_resched();
  	}
  
  	return false;
  }
c67fe3752   Mel Gorman   mm: compaction: A...
382
  /*
9e4be4708   Jerome Marchand   mm/compaction.c: ...
383
384
385
   * Isolate free pages onto a private freelist. If @strict is true, will abort
   * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
   * (even though it may still end up isolating some pages).
85aa125f0   Michal Nazarewicz   mm: compaction: i...
386
   */
f40d1e42b   Mel Gorman   mm: compaction: a...
387
  static unsigned long isolate_freepages_block(struct compact_control *cc,
e14c720ef   Vlastimil Babka   mm, compaction: r...
388
  				unsigned long *start_pfn,
85aa125f0   Michal Nazarewicz   mm: compaction: i...
389
390
391
  				unsigned long end_pfn,
  				struct list_head *freelist,
  				bool strict)
748446bb6   Mel Gorman   mm: compaction: m...
392
  {
b7aba6984   Mel Gorman   mm: compaction: a...
393
  	int nr_scanned = 0, total_isolated = 0;
bb13ffeb9   Mel Gorman   mm: compaction: c...
394
  	struct page *cursor, *valid_page = NULL;
b8b2d8253   Xiubo Li   mm/compaction.c: ...
395
  	unsigned long flags = 0;
f40d1e42b   Mel Gorman   mm: compaction: a...
396
  	bool locked = false;
e14c720ef   Vlastimil Babka   mm, compaction: r...
397
  	unsigned long blockpfn = *start_pfn;
66c64223a   Joonsoo Kim   mm/compaction: sp...
398
  	unsigned int order;
748446bb6   Mel Gorman   mm: compaction: m...
399

748446bb6   Mel Gorman   mm: compaction: m...
400
  	cursor = pfn_to_page(blockpfn);
f40d1e42b   Mel Gorman   mm: compaction: a...
401
  	/* Isolate free pages. */
748446bb6   Mel Gorman   mm: compaction: m...
402
  	for (; blockpfn < end_pfn; blockpfn++, cursor++) {
66c64223a   Joonsoo Kim   mm/compaction: sp...
403
  		int isolated;
748446bb6   Mel Gorman   mm: compaction: m...
404
  		struct page *page = cursor;
8b44d2791   Vlastimil Babka   mm, compaction: p...
405
406
407
408
409
410
411
412
413
  		/*
  		 * Periodically drop the lock (if held) regardless of its
  		 * contention, to give chance to IRQs. Abort if fatal signal
  		 * pending or async compaction detects need_resched()
  		 */
  		if (!(blockpfn % SWAP_CLUSTER_MAX)
  		    && compact_unlock_should_abort(&cc->zone->lock, flags,
  								&locked, cc))
  			break;
b7aba6984   Mel Gorman   mm: compaction: a...
414
  		nr_scanned++;
f40d1e42b   Mel Gorman   mm: compaction: a...
415
  		if (!pfn_valid_within(blockpfn))
2af120bc0   Laura Abbott   mm/compaction: br...
416
  			goto isolate_fail;
bb13ffeb9   Mel Gorman   mm: compaction: c...
417
418
  		if (!valid_page)
  			valid_page = page;
9fcd6d2e0   Vlastimil Babka   mm, compaction: s...
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
  
  		/*
  		 * For compound pages such as THP and hugetlbfs, we can save
  		 * potentially a lot of iterations if we skip them at once.
  		 * The check is racy, but we can consider only valid values
  		 * and the only danger is skipping too much.
  		 */
  		if (PageCompound(page)) {
  			unsigned int comp_order = compound_order(page);
  
  			if (likely(comp_order < MAX_ORDER)) {
  				blockpfn += (1UL << comp_order) - 1;
  				cursor += (1UL << comp_order) - 1;
  			}
  
  			goto isolate_fail;
  		}
f40d1e42b   Mel Gorman   mm: compaction: a...
436
  		if (!PageBuddy(page))
2af120bc0   Laura Abbott   mm/compaction: br...
437
  			goto isolate_fail;
f40d1e42b   Mel Gorman   mm: compaction: a...
438
439
  
  		/*
69b7189f1   Vlastimil Babka   mm, compaction: s...
440
441
442
443
444
  		 * If we already hold the lock, we can skip some rechecking.
  		 * Note that if we hold the lock now, checked_pageblock was
  		 * already set in some previous iteration (or strict is true),
  		 * so it is correct to skip the suitable migration target
  		 * recheck as well.
f40d1e42b   Mel Gorman   mm: compaction: a...
445
  		 */
69b7189f1   Vlastimil Babka   mm, compaction: s...
446
447
448
449
450
451
452
453
454
  		if (!locked) {
  			/*
  			 * The zone lock must be held to isolate freepages.
  			 * Unfortunately this is a very coarse lock and can be
  			 * heavily contended if there are parallel allocations
  			 * or parallel compactions. For async compaction do not
  			 * spin on the lock and we acquire the lock as late as
  			 * possible.
  			 */
8b44d2791   Vlastimil Babka   mm, compaction: p...
455
456
  			locked = compact_trylock_irqsave(&cc->zone->lock,
  								&flags, cc);
69b7189f1   Vlastimil Babka   mm, compaction: s...
457
458
  			if (!locked)
  				break;
f40d1e42b   Mel Gorman   mm: compaction: a...
459

69b7189f1   Vlastimil Babka   mm, compaction: s...
460
461
462
463
  			/* Recheck this is a buddy page under lock */
  			if (!PageBuddy(page))
  				goto isolate_fail;
  		}
748446bb6   Mel Gorman   mm: compaction: m...
464

66c64223a   Joonsoo Kim   mm/compaction: sp...
465
466
467
  		/* Found a free page, will break it into order-0 pages */
  		order = page_order(page);
  		isolated = __isolate_free_page(page, order);
a4f04f2c6   David Rientjes   mm, compaction: a...
468
469
  		if (!isolated)
  			break;
66c64223a   Joonsoo Kim   mm/compaction: sp...
470
  		set_page_private(page, order);
a4f04f2c6   David Rientjes   mm, compaction: a...
471

748446bb6   Mel Gorman   mm: compaction: m...
472
  		total_isolated += isolated;
a4f04f2c6   David Rientjes   mm, compaction: a...
473
  		cc->nr_freepages += isolated;
66c64223a   Joonsoo Kim   mm/compaction: sp...
474
  		list_add_tail(&page->lru, freelist);
a4f04f2c6   David Rientjes   mm, compaction: a...
475
476
477
  		if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
  			blockpfn += isolated;
  			break;
748446bb6   Mel Gorman   mm: compaction: m...
478
  		}
a4f04f2c6   David Rientjes   mm, compaction: a...
479
480
481
482
  		/* Advance to the end of split page */
  		blockpfn += isolated - 1;
  		cursor += isolated - 1;
  		continue;
2af120bc0   Laura Abbott   mm/compaction: br...
483
484
485
486
487
488
  
  isolate_fail:
  		if (strict)
  			break;
  		else
  			continue;
748446bb6   Mel Gorman   mm: compaction: m...
489
  	}
a4f04f2c6   David Rientjes   mm, compaction: a...
490
491
  	if (locked)
  		spin_unlock_irqrestore(&cc->zone->lock, flags);
9fcd6d2e0   Vlastimil Babka   mm, compaction: s...
492
493
494
495
496
497
  	/*
  	 * There is a tiny chance that we have read bogus compound_order(),
  	 * so be careful to not go outside of the pageblock.
  	 */
  	if (unlikely(blockpfn > end_pfn))
  		blockpfn = end_pfn;
e34d85f0e   Joonsoo Kim   mm/compaction: pr...
498
499
  	trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
  					nr_scanned, total_isolated);
e14c720ef   Vlastimil Babka   mm, compaction: r...
500
501
  	/* Record how far we have got within the block */
  	*start_pfn = blockpfn;
f40d1e42b   Mel Gorman   mm: compaction: a...
502
503
504
505
506
  	/*
  	 * If strict isolation is requested by CMA then check that all the
  	 * pages requested were isolated. If there were any failures, 0 is
  	 * returned and CMA will fail.
  	 */
2af120bc0   Laura Abbott   mm/compaction: br...
507
  	if (strict && blockpfn < end_pfn)
f40d1e42b   Mel Gorman   mm: compaction: a...
508
  		total_isolated = 0;
bb13ffeb9   Mel Gorman   mm: compaction: c...
509
510
  	/* Update the pageblock-skip if the whole pageblock was scanned */
  	if (blockpfn == end_pfn)
edc2ca612   Vlastimil Babka   mm, compaction: m...
511
  		update_pageblock_skip(cc, valid_page, total_isolated, false);
bb13ffeb9   Mel Gorman   mm: compaction: c...
512

010fc29a4   Minchan Kim   compaction: fix b...
513
  	count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
397487db6   Mel Gorman   mm: compaction: A...
514
  	if (total_isolated)
010fc29a4   Minchan Kim   compaction: fix b...
515
  		count_compact_events(COMPACTISOLATED, total_isolated);
748446bb6   Mel Gorman   mm: compaction: m...
516
517
  	return total_isolated;
  }
85aa125f0   Michal Nazarewicz   mm: compaction: i...
518
519
520
521
522
523
524
525
526
527
528
529
530
  /**
   * isolate_freepages_range() - isolate free pages.
   * @start_pfn: The first PFN to start isolating.
   * @end_pfn:   The one-past-last PFN.
   *
   * Non-free pages, invalid PFNs, or zone boundaries within the
   * [start_pfn, end_pfn) range are considered errors, cause function to
   * undo its actions and return zero.
   *
   * Otherwise, function returns one-past-the-last PFN of isolated page
   * (which may be greater then end_pfn if end fell in a middle of
   * a free page).
   */
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
531
  unsigned long
bb13ffeb9   Mel Gorman   mm: compaction: c...
532
533
  isolate_freepages_range(struct compact_control *cc,
  			unsigned long start_pfn, unsigned long end_pfn)
85aa125f0   Michal Nazarewicz   mm: compaction: i...
534
  {
e1409c325   Joonsoo Kim   mm/compaction: pa...
535
  	unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
85aa125f0   Michal Nazarewicz   mm: compaction: i...
536
  	LIST_HEAD(freelist);
7d49d8868   Vlastimil Babka   mm, compaction: r...
537
  	pfn = start_pfn;
06b6640a3   Vlastimil Babka   mm, compaction: w...
538
  	block_start_pfn = pageblock_start_pfn(pfn);
e1409c325   Joonsoo Kim   mm/compaction: pa...
539
540
  	if (block_start_pfn < cc->zone->zone_start_pfn)
  		block_start_pfn = cc->zone->zone_start_pfn;
06b6640a3   Vlastimil Babka   mm, compaction: w...
541
  	block_end_pfn = pageblock_end_pfn(pfn);
7d49d8868   Vlastimil Babka   mm, compaction: r...
542
543
  
  	for (; pfn < end_pfn; pfn += isolated,
e1409c325   Joonsoo Kim   mm/compaction: pa...
544
  				block_start_pfn = block_end_pfn,
7d49d8868   Vlastimil Babka   mm, compaction: r...
545
  				block_end_pfn += pageblock_nr_pages) {
e14c720ef   Vlastimil Babka   mm, compaction: r...
546
547
  		/* Protect pfn from changing by isolate_freepages_block */
  		unsigned long isolate_start_pfn = pfn;
85aa125f0   Michal Nazarewicz   mm: compaction: i...
548

85aa125f0   Michal Nazarewicz   mm: compaction: i...
549
  		block_end_pfn = min(block_end_pfn, end_pfn);
584200163   Joonsoo Kim   mm/compaction: sk...
550
551
552
553
554
555
  		/*
  		 * pfn could pass the block_end_pfn if isolated freepage
  		 * is more than pageblock order. In this case, we adjust
  		 * scanning range to right one.
  		 */
  		if (pfn >= block_end_pfn) {
06b6640a3   Vlastimil Babka   mm, compaction: w...
556
557
  			block_start_pfn = pageblock_start_pfn(pfn);
  			block_end_pfn = pageblock_end_pfn(pfn);
584200163   Joonsoo Kim   mm/compaction: sk...
558
559
  			block_end_pfn = min(block_end_pfn, end_pfn);
  		}
e1409c325   Joonsoo Kim   mm/compaction: pa...
560
561
  		if (!pageblock_pfn_to_page(block_start_pfn,
  					block_end_pfn, cc->zone))
7d49d8868   Vlastimil Babka   mm, compaction: r...
562
  			break;
e14c720ef   Vlastimil Babka   mm, compaction: r...
563
564
  		isolated = isolate_freepages_block(cc, &isolate_start_pfn,
  						block_end_pfn, &freelist, true);
85aa125f0   Michal Nazarewicz   mm: compaction: i...
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
  
  		/*
  		 * In strict mode, isolate_freepages_block() returns 0 if
  		 * there are any holes in the block (ie. invalid PFNs or
  		 * non-free pages).
  		 */
  		if (!isolated)
  			break;
  
  		/*
  		 * If we managed to isolate pages, it is always (1 << n) *
  		 * pageblock_nr_pages for some non-negative n.  (Max order
  		 * page may span two pageblocks).
  		 */
  	}
66c64223a   Joonsoo Kim   mm/compaction: sp...
580
  	/* __isolate_free_page() does not map the pages */
85aa125f0   Michal Nazarewicz   mm: compaction: i...
581
582
583
584
585
586
587
588
589
590
591
  	map_pages(&freelist);
  
  	if (pfn < end_pfn) {
  		/* Loop terminated early, cleanup. */
  		release_freepages(&freelist);
  		return 0;
  	}
  
  	/* We don't use freelists for anything. */
  	return pfn;
  }
748446bb6   Mel Gorman   mm: compaction: m...
592
  /* Update the number of anon and file isolated pages in the zone */
edc2ca612   Vlastimil Babka   mm, compaction: m...
593
  static void acct_isolated(struct zone *zone, struct compact_control *cc)
748446bb6   Mel Gorman   mm: compaction: m...
594
595
  {
  	struct page *page;
b9e84ac15   Minchan Kim   mm: compaction: t...
596
  	unsigned int count[2] = { 0, };
748446bb6   Mel Gorman   mm: compaction: m...
597

edc2ca612   Vlastimil Babka   mm, compaction: m...
598
599
  	if (list_empty(&cc->migratepages))
  		return;
b9e84ac15   Minchan Kim   mm: compaction: t...
600
601
  	list_for_each_entry(page, &cc->migratepages, lru)
  		count[!!page_is_file_cache(page)]++;
748446bb6   Mel Gorman   mm: compaction: m...
602

599d0c954   Mel Gorman   mm, vmscan: move ...
603
604
  	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, count[0]);
  	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, count[1]);
748446bb6   Mel Gorman   mm: compaction: m...
605
606
607
608
609
  }
  
  /* Similar to reclaim, but different enough that they don't share logic */
  static bool too_many_isolated(struct zone *zone)
  {
bc6930457   Minchan Kim   mm: compaction: h...
610
  	unsigned long active, inactive, isolated;
748446bb6   Mel Gorman   mm: compaction: m...
611

599d0c954   Mel Gorman   mm, vmscan: move ...
612
613
614
615
616
617
  	inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) +
  			node_page_state(zone->zone_pgdat, NR_INACTIVE_ANON);
  	active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) +
  			node_page_state(zone->zone_pgdat, NR_ACTIVE_ANON);
  	isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) +
  			node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON);
748446bb6   Mel Gorman   mm: compaction: m...
618

bc6930457   Minchan Kim   mm: compaction: h...
619
  	return isolated > (inactive + active) / 2;
748446bb6   Mel Gorman   mm: compaction: m...
620
  }
2fe86e000   Michal Nazarewicz   mm: compaction: i...
621
  /**
edc2ca612   Vlastimil Babka   mm, compaction: m...
622
623
   * isolate_migratepages_block() - isolate all migrate-able pages within
   *				  a single pageblock
2fe86e000   Michal Nazarewicz   mm: compaction: i...
624
   * @cc:		Compaction control structure.
edc2ca612   Vlastimil Babka   mm, compaction: m...
625
626
627
   * @low_pfn:	The first PFN to isolate
   * @end_pfn:	The one-past-the-last PFN to isolate, within same pageblock
   * @isolate_mode: Isolation mode to be used.
2fe86e000   Michal Nazarewicz   mm: compaction: i...
628
629
   *
   * Isolate all pages that can be migrated from the range specified by
edc2ca612   Vlastimil Babka   mm, compaction: m...
630
631
632
633
   * [low_pfn, end_pfn). The range is expected to be within same pageblock.
   * Returns zero if there is a fatal signal pending, otherwise PFN of the
   * first page that was not scanned (which may be both less, equal to or more
   * than end_pfn).
2fe86e000   Michal Nazarewicz   mm: compaction: i...
634
   *
edc2ca612   Vlastimil Babka   mm, compaction: m...
635
636
637
   * The pages are isolated on cc->migratepages list (not required to be empty),
   * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
   * is neither read nor updated.
748446bb6   Mel Gorman   mm: compaction: m...
638
   */
edc2ca612   Vlastimil Babka   mm, compaction: m...
639
640
641
  static unsigned long
  isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
  			unsigned long end_pfn, isolate_mode_t isolate_mode)
748446bb6   Mel Gorman   mm: compaction: m...
642
  {
edc2ca612   Vlastimil Babka   mm, compaction: m...
643
  	struct zone *zone = cc->zone;
b7aba6984   Mel Gorman   mm: compaction: a...
644
  	unsigned long nr_scanned = 0, nr_isolated = 0;
fa9add641   Hugh Dickins   mm/memcg: apply a...
645
  	struct lruvec *lruvec;
b8b2d8253   Xiubo Li   mm/compaction.c: ...
646
  	unsigned long flags = 0;
2a1402aa0   Mel Gorman   mm: compaction: a...
647
  	bool locked = false;
bb13ffeb9   Mel Gorman   mm: compaction: c...
648
  	struct page *page = NULL, *valid_page = NULL;
e34d85f0e   Joonsoo Kim   mm/compaction: pr...
649
  	unsigned long start_pfn = low_pfn;
fdd048e12   Vlastimil Babka   mm, compaction: s...
650
651
  	bool skip_on_failure = false;
  	unsigned long next_skip_pfn = 0;
748446bb6   Mel Gorman   mm: compaction: m...
652

748446bb6   Mel Gorman   mm: compaction: m...
653
654
655
656
657
658
  	/*
  	 * Ensure that there are not too many pages isolated from the LRU
  	 * list by either parallel reclaimers or compaction. If there are,
  	 * delay for some time until fewer pages are isolated
  	 */
  	while (unlikely(too_many_isolated(zone))) {
f9e35b3b4   Mel Gorman   mm: compaction: a...
659
  		/* async migration should just abort */
e0b9daeb4   David Rientjes   mm, compaction: e...
660
  		if (cc->mode == MIGRATE_ASYNC)
2fe86e000   Michal Nazarewicz   mm: compaction: i...
661
  			return 0;
f9e35b3b4   Mel Gorman   mm: compaction: a...
662

748446bb6   Mel Gorman   mm: compaction: m...
663
664
665
  		congestion_wait(BLK_RW_ASYNC, HZ/10);
  
  		if (fatal_signal_pending(current))
2fe86e000   Michal Nazarewicz   mm: compaction: i...
666
  			return 0;
748446bb6   Mel Gorman   mm: compaction: m...
667
  	}
be9765722   Vlastimil Babka   mm, compaction: p...
668
669
  	if (compact_should_abort(cc))
  		return 0;
aeef4b838   David Rientjes   mm, compaction: t...
670

fdd048e12   Vlastimil Babka   mm, compaction: s...
671
672
673
674
  	if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
  		skip_on_failure = true;
  		next_skip_pfn = block_end_pfn(low_pfn, cc->order);
  	}
748446bb6   Mel Gorman   mm: compaction: m...
675
  	/* Time to isolate some pages for migration */
748446bb6   Mel Gorman   mm: compaction: m...
676
  	for (; low_pfn < end_pfn; low_pfn++) {
29c0dde83   Vlastimil Babka   mm, compaction: a...
677

fdd048e12   Vlastimil Babka   mm, compaction: s...
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
  		if (skip_on_failure && low_pfn >= next_skip_pfn) {
  			/*
  			 * We have isolated all migration candidates in the
  			 * previous order-aligned block, and did not skip it due
  			 * to failure. We should migrate the pages now and
  			 * hopefully succeed compaction.
  			 */
  			if (nr_isolated)
  				break;
  
  			/*
  			 * We failed to isolate in the previous order-aligned
  			 * block. Set the new boundary to the end of the
  			 * current block. Note we can't simply increase
  			 * next_skip_pfn by 1 << order, as low_pfn might have
  			 * been incremented by a higher number due to skipping
  			 * a compound or a high-order buddy page in the
  			 * previous loop iteration.
  			 */
  			next_skip_pfn = block_end_pfn(low_pfn, cc->order);
  		}
8b44d2791   Vlastimil Babka   mm, compaction: p...
699
700
701
702
703
704
  		/*
  		 * Periodically drop the lock (if held) regardless of its
  		 * contention, to give chance to IRQs. Abort async compaction
  		 * if contended.
  		 */
  		if (!(low_pfn % SWAP_CLUSTER_MAX)
a52633d8e   Mel Gorman   mm, vmscan: move ...
705
  		    && compact_unlock_should_abort(zone_lru_lock(zone), flags,
8b44d2791   Vlastimil Babka   mm, compaction: p...
706
707
  								&locked, cc))
  			break;
c67fe3752   Mel Gorman   mm: compaction: A...
708

748446bb6   Mel Gorman   mm: compaction: m...
709
  		if (!pfn_valid_within(low_pfn))
fdd048e12   Vlastimil Babka   mm, compaction: s...
710
  			goto isolate_fail;
b7aba6984   Mel Gorman   mm: compaction: a...
711
  		nr_scanned++;
748446bb6   Mel Gorman   mm: compaction: m...
712

748446bb6   Mel Gorman   mm: compaction: m...
713
  		page = pfn_to_page(low_pfn);
dc9086004   Mel Gorman   mm: compaction: c...
714

bb13ffeb9   Mel Gorman   mm: compaction: c...
715
716
  		if (!valid_page)
  			valid_page = page;
6c14466cc   Mel Gorman   mm: improve docum...
717
  		/*
99c0fd5e5   Vlastimil Babka   mm, compaction: s...
718
719
720
721
  		 * Skip if free. We read page order here without zone lock
  		 * which is generally unsafe, but the race window is small and
  		 * the worst thing that can happen is that we skip some
  		 * potential isolation targets.
6c14466cc   Mel Gorman   mm: improve docum...
722
  		 */
99c0fd5e5   Vlastimil Babka   mm, compaction: s...
723
724
725
726
727
728
729
730
731
732
  		if (PageBuddy(page)) {
  			unsigned long freepage_order = page_order_unsafe(page);
  
  			/*
  			 * Without lock, we cannot be sure that what we got is
  			 * a valid page order. Consider only values in the
  			 * valid order range to prevent low_pfn overflow.
  			 */
  			if (freepage_order > 0 && freepage_order < MAX_ORDER)
  				low_pfn += (1UL << freepage_order) - 1;
748446bb6   Mel Gorman   mm: compaction: m...
733
  			continue;
99c0fd5e5   Vlastimil Babka   mm, compaction: s...
734
  		}
748446bb6   Mel Gorman   mm: compaction: m...
735

9927af740   Mel Gorman   mm: compaction: p...
736
  		/*
29c0dde83   Vlastimil Babka   mm, compaction: a...
737
738
739
740
741
  		 * Regardless of being on LRU, compound pages such as THP and
  		 * hugetlbfs are not to be compacted. We can potentially save
  		 * a lot of iterations if we skip them at once. The check is
  		 * racy, but we can consider only valid values and the only
  		 * danger is skipping too much.
bc835011a   Andrea Arcangeli   thp: transhuge is...
742
  		 */
29c0dde83   Vlastimil Babka   mm, compaction: a...
743
744
745
746
747
  		if (PageCompound(page)) {
  			unsigned int comp_order = compound_order(page);
  
  			if (likely(comp_order < MAX_ORDER))
  				low_pfn += (1UL << comp_order) - 1;
edc2ca612   Vlastimil Babka   mm, compaction: m...
748

fdd048e12   Vlastimil Babka   mm, compaction: s...
749
  			goto isolate_fail;
2a1402aa0   Mel Gorman   mm: compaction: a...
750
  		}
bda807d44   Minchan Kim   mm: migrate: supp...
751
752
753
754
755
756
  		/*
  		 * Check may be lockless but that's ok as we recheck later.
  		 * It's possible to migrate LRU and non-lru movable pages.
  		 * Skip any other type of page
  		 */
  		if (!PageLRU(page)) {
bda807d44   Minchan Kim   mm: migrate: supp...
757
758
759
760
761
762
763
  			/*
  			 * __PageMovable can return false positive so we need
  			 * to verify it under page_lock.
  			 */
  			if (unlikely(__PageMovable(page)) &&
  					!PageIsolated(page)) {
  				if (locked) {
a52633d8e   Mel Gorman   mm, vmscan: move ...
764
  					spin_unlock_irqrestore(zone_lru_lock(zone),
bda807d44   Minchan Kim   mm: migrate: supp...
765
766
767
768
769
770
771
  									flags);
  					locked = false;
  				}
  
  				if (isolate_movable_page(page, isolate_mode))
  					goto isolate_success;
  			}
fdd048e12   Vlastimil Babka   mm, compaction: s...
772
  			goto isolate_fail;
bda807d44   Minchan Kim   mm: migrate: supp...
773
  		}
29c0dde83   Vlastimil Babka   mm, compaction: a...
774

119d6d59d   David Rientjes   mm, compaction: a...
775
776
777
778
779
780
781
  		/*
  		 * Migration will fail if an anonymous page is pinned in memory,
  		 * so avoid taking lru_lock and isolating it unnecessarily in an
  		 * admittedly racy check.
  		 */
  		if (!page_mapping(page) &&
  		    page_count(page) > page_mapcount(page))
fdd048e12   Vlastimil Babka   mm, compaction: s...
782
  			goto isolate_fail;
119d6d59d   David Rientjes   mm, compaction: a...
783

69b7189f1   Vlastimil Babka   mm, compaction: s...
784
785
  		/* If we already hold the lock, we can skip some rechecking */
  		if (!locked) {
a52633d8e   Mel Gorman   mm, vmscan: move ...
786
  			locked = compact_trylock_irqsave(zone_lru_lock(zone),
8b44d2791   Vlastimil Babka   mm, compaction: p...
787
  								&flags, cc);
69b7189f1   Vlastimil Babka   mm, compaction: s...
788
789
  			if (!locked)
  				break;
2a1402aa0   Mel Gorman   mm: compaction: a...
790

29c0dde83   Vlastimil Babka   mm, compaction: a...
791
  			/* Recheck PageLRU and PageCompound under lock */
69b7189f1   Vlastimil Babka   mm, compaction: s...
792
  			if (!PageLRU(page))
fdd048e12   Vlastimil Babka   mm, compaction: s...
793
  				goto isolate_fail;
29c0dde83   Vlastimil Babka   mm, compaction: a...
794
795
796
797
798
799
800
801
  
  			/*
  			 * Page become compound since the non-locked check,
  			 * and it's on LRU. It can only be a THP so the order
  			 * is safe to read and it's 0 for tail pages.
  			 */
  			if (unlikely(PageCompound(page))) {
  				low_pfn += (1UL << compound_order(page)) - 1;
fdd048e12   Vlastimil Babka   mm, compaction: s...
802
  				goto isolate_fail;
69b7189f1   Vlastimil Babka   mm, compaction: s...
803
  			}
bc835011a   Andrea Arcangeli   thp: transhuge is...
804
  		}
599d0c954   Mel Gorman   mm, vmscan: move ...
805
  		lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
fa9add641   Hugh Dickins   mm/memcg: apply a...
806

748446bb6   Mel Gorman   mm: compaction: m...
807
  		/* Try isolate the page */
edc2ca612   Vlastimil Babka   mm, compaction: m...
808
  		if (__isolate_lru_page(page, isolate_mode) != 0)
fdd048e12   Vlastimil Babka   mm, compaction: s...
809
  			goto isolate_fail;
748446bb6   Mel Gorman   mm: compaction: m...
810

29c0dde83   Vlastimil Babka   mm, compaction: a...
811
  		VM_BUG_ON_PAGE(PageCompound(page), page);
bc835011a   Andrea Arcangeli   thp: transhuge is...
812

748446bb6   Mel Gorman   mm: compaction: m...
813
  		/* Successfully isolated */
fa9add641   Hugh Dickins   mm/memcg: apply a...
814
  		del_page_from_lru_list(page, lruvec, page_lru(page));
b6c750163   Joonsoo Kim   mm/compaction: cl...
815
816
  
  isolate_success:
fdd048e12   Vlastimil Babka   mm, compaction: s...
817
  		list_add(&page->lru, &cc->migratepages);
748446bb6   Mel Gorman   mm: compaction: m...
818
  		cc->nr_migratepages++;
b7aba6984   Mel Gorman   mm: compaction: a...
819
  		nr_isolated++;
748446bb6   Mel Gorman   mm: compaction: m...
820

a34753d27   Vlastimil Babka   mm, compaction: r...
821
822
823
824
825
826
827
828
  		/*
  		 * Record where we could have freed pages by migration and not
  		 * yet flushed them to buddy allocator.
  		 * - this is the lowest page that was isolated and likely be
  		 * then freed by migration.
  		 */
  		if (!cc->last_migrated_pfn)
  			cc->last_migrated_pfn = low_pfn;
748446bb6   Mel Gorman   mm: compaction: m...
829
  		/* Avoid isolating too much */
31b8384a5   Hillf Danton   mm: compaction: p...
830
831
  		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
  			++low_pfn;
748446bb6   Mel Gorman   mm: compaction: m...
832
  			break;
31b8384a5   Hillf Danton   mm: compaction: p...
833
  		}
fdd048e12   Vlastimil Babka   mm, compaction: s...
834
835
836
837
838
839
840
841
842
843
844
845
846
  
  		continue;
  isolate_fail:
  		if (!skip_on_failure)
  			continue;
  
  		/*
  		 * We have isolated some pages, but then failed. Release them
  		 * instead of migrating, as we cannot form the cc->order buddy
  		 * page anyway.
  		 */
  		if (nr_isolated) {
  			if (locked) {
a52633d8e   Mel Gorman   mm, vmscan: move ...
847
  				spin_unlock_irqrestore(zone_lru_lock(zone), flags);
fdd048e12   Vlastimil Babka   mm, compaction: s...
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
  				locked = false;
  			}
  			acct_isolated(zone, cc);
  			putback_movable_pages(&cc->migratepages);
  			cc->nr_migratepages = 0;
  			cc->last_migrated_pfn = 0;
  			nr_isolated = 0;
  		}
  
  		if (low_pfn < next_skip_pfn) {
  			low_pfn = next_skip_pfn - 1;
  			/*
  			 * The check near the loop beginning would have updated
  			 * next_skip_pfn too, but this is a bit simpler.
  			 */
  			next_skip_pfn += 1UL << cc->order;
  		}
748446bb6   Mel Gorman   mm: compaction: m...
865
  	}
99c0fd5e5   Vlastimil Babka   mm, compaction: s...
866
867
868
869
870
871
  	/*
  	 * The PageBuddy() check could have potentially brought us outside
  	 * the range to be scanned.
  	 */
  	if (unlikely(low_pfn > end_pfn))
  		low_pfn = end_pfn;
c67fe3752   Mel Gorman   mm: compaction: A...
872
  	if (locked)
a52633d8e   Mel Gorman   mm, vmscan: move ...
873
  		spin_unlock_irqrestore(zone_lru_lock(zone), flags);
748446bb6   Mel Gorman   mm: compaction: m...
874

50b5b094e   Vlastimil Babka   mm: compaction: d...
875
876
877
  	/*
  	 * Update the pageblock-skip information and cached scanner pfn,
  	 * if the whole pageblock was scanned without isolating any page.
50b5b094e   Vlastimil Babka   mm: compaction: d...
878
  	 */
35979ef33   David Rientjes   mm, compaction: a...
879
  	if (low_pfn == end_pfn)
edc2ca612   Vlastimil Babka   mm, compaction: m...
880
  		update_pageblock_skip(cc, valid_page, nr_isolated, true);
bb13ffeb9   Mel Gorman   mm: compaction: c...
881

e34d85f0e   Joonsoo Kim   mm/compaction: pr...
882
883
  	trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
  						nr_scanned, nr_isolated);
b7aba6984   Mel Gorman   mm: compaction: a...
884

010fc29a4   Minchan Kim   compaction: fix b...
885
  	count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
397487db6   Mel Gorman   mm: compaction: A...
886
  	if (nr_isolated)
010fc29a4   Minchan Kim   compaction: fix b...
887
  		count_compact_events(COMPACTISOLATED, nr_isolated);
397487db6   Mel Gorman   mm: compaction: A...
888

2fe86e000   Michal Nazarewicz   mm: compaction: i...
889
890
  	return low_pfn;
  }
edc2ca612   Vlastimil Babka   mm, compaction: m...
891
892
893
894
895
896
897
898
899
900
901
902
903
904
  /**
   * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
   * @cc:        Compaction control structure.
   * @start_pfn: The first PFN to start isolating.
   * @end_pfn:   The one-past-last PFN.
   *
   * Returns zero if isolation fails fatally due to e.g. pending signal.
   * Otherwise, function returns one-past-the-last PFN of isolated page
   * (which may be greater than end_pfn if end fell in a middle of a THP page).
   */
  unsigned long
  isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
  							unsigned long end_pfn)
  {
e1409c325   Joonsoo Kim   mm/compaction: pa...
905
  	unsigned long pfn, block_start_pfn, block_end_pfn;
edc2ca612   Vlastimil Babka   mm, compaction: m...
906
907
908
  
  	/* Scan block by block. First and last block may be incomplete */
  	pfn = start_pfn;
06b6640a3   Vlastimil Babka   mm, compaction: w...
909
  	block_start_pfn = pageblock_start_pfn(pfn);
e1409c325   Joonsoo Kim   mm/compaction: pa...
910
911
  	if (block_start_pfn < cc->zone->zone_start_pfn)
  		block_start_pfn = cc->zone->zone_start_pfn;
06b6640a3   Vlastimil Babka   mm, compaction: w...
912
  	block_end_pfn = pageblock_end_pfn(pfn);
edc2ca612   Vlastimil Babka   mm, compaction: m...
913
914
  
  	for (; pfn < end_pfn; pfn = block_end_pfn,
e1409c325   Joonsoo Kim   mm/compaction: pa...
915
  				block_start_pfn = block_end_pfn,
edc2ca612   Vlastimil Babka   mm, compaction: m...
916
917
918
  				block_end_pfn += pageblock_nr_pages) {
  
  		block_end_pfn = min(block_end_pfn, end_pfn);
e1409c325   Joonsoo Kim   mm/compaction: pa...
919
920
  		if (!pageblock_pfn_to_page(block_start_pfn,
  					block_end_pfn, cc->zone))
edc2ca612   Vlastimil Babka   mm, compaction: m...
921
922
923
924
  			continue;
  
  		pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
  							ISOLATE_UNEVICTABLE);
14af4a5e9   Hugh Dickins   mm, cma: prevent ...
925
  		if (!pfn)
edc2ca612   Vlastimil Babka   mm, compaction: m...
926
  			break;
6ea41c0c0   Joonsoo Kim   mm/compaction.c: ...
927
928
929
  
  		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
  			break;
edc2ca612   Vlastimil Babka   mm, compaction: m...
930
931
932
933
934
  	}
  	acct_isolated(cc->zone, cc);
  
  	return pfn;
  }
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
935
936
  #endif /* CONFIG_COMPACTION || CONFIG_CMA */
  #ifdef CONFIG_COMPACTION
018e9a49a   Andrew Morton   mm/compaction.c: ...
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
  
  /* Returns true if the page is within a block suitable for migration to */
  static bool suitable_migration_target(struct page *page)
  {
  	/* If the page is a large free page, then disallow migration */
  	if (PageBuddy(page)) {
  		/*
  		 * We are checking page_order without zone->lock taken. But
  		 * the only small danger is that we skip a potentially suitable
  		 * pageblock, so it's not worth to check order for valid range.
  		 */
  		if (page_order_unsafe(page) >= pageblock_order)
  			return false;
  	}
  
  	/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
  	if (migrate_async_suitable(get_pageblock_migratetype(page)))
  		return true;
  
  	/* Otherwise skip the block */
  	return false;
  }
2fe86e000   Michal Nazarewicz   mm: compaction: i...
959
  /*
f2849aa09   Vlastimil Babka   mm, compaction: m...
960
961
962
963
964
965
966
967
968
969
   * Test whether the free scanner has reached the same or lower pageblock than
   * the migration scanner, and compaction should thus terminate.
   */
  static inline bool compact_scanners_met(struct compact_control *cc)
  {
  	return (cc->free_pfn >> pageblock_order)
  		<= (cc->migrate_pfn >> pageblock_order);
  }
  
  /*
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
970
971
   * Based on information in the current compact_control, find blocks
   * suitable for isolating free pages from and then isolate them.
2fe86e000   Michal Nazarewicz   mm: compaction: i...
972
   */
edc2ca612   Vlastimil Babka   mm, compaction: m...
973
  static void isolate_freepages(struct compact_control *cc)
2fe86e000   Michal Nazarewicz   mm: compaction: i...
974
  {
edc2ca612   Vlastimil Babka   mm, compaction: m...
975
  	struct zone *zone = cc->zone;
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
976
  	struct page *page;
c96b9e508   Vlastimil Babka   mm/compaction: cl...
977
  	unsigned long block_start_pfn;	/* start of current pageblock */
e14c720ef   Vlastimil Babka   mm, compaction: r...
978
  	unsigned long isolate_start_pfn; /* exact pfn we start at */
c96b9e508   Vlastimil Babka   mm/compaction: cl...
979
980
  	unsigned long block_end_pfn;	/* end of current pageblock */
  	unsigned long low_pfn;	     /* lowest pfn scanner is able to scan */
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
981
  	struct list_head *freelist = &cc->freepages;
2fe86e000   Michal Nazarewicz   mm: compaction: i...
982

ff9543fd3   Michal Nazarewicz   mm: compaction: e...
983
984
  	/*
  	 * Initialise the free scanner. The starting point is where we last
49e068f0b   Vlastimil Babka   mm/compaction: ma...
985
  	 * successfully isolated from, zone-cached value, or the end of the
e14c720ef   Vlastimil Babka   mm, compaction: r...
986
987
  	 * zone when isolating for the first time. For looping we also need
  	 * this pfn aligned down to the pageblock boundary, because we do
c96b9e508   Vlastimil Babka   mm/compaction: cl...
988
989
990
  	 * block_start_pfn -= pageblock_nr_pages in the for loop.
  	 * For ending point, take care when isolating in last pageblock of a
  	 * a zone which ends in the middle of a pageblock.
49e068f0b   Vlastimil Babka   mm/compaction: ma...
991
992
  	 * The low boundary is the end of the pageblock the migration scanner
  	 * is using.
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
993
  	 */
e14c720ef   Vlastimil Babka   mm, compaction: r...
994
  	isolate_start_pfn = cc->free_pfn;
06b6640a3   Vlastimil Babka   mm, compaction: w...
995
  	block_start_pfn = pageblock_start_pfn(cc->free_pfn);
c96b9e508   Vlastimil Babka   mm/compaction: cl...
996
997
  	block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
  						zone_end_pfn(zone));
06b6640a3   Vlastimil Babka   mm, compaction: w...
998
  	low_pfn = pageblock_end_pfn(cc->migrate_pfn);
2fe86e000   Michal Nazarewicz   mm: compaction: i...
999

ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1000
  	/*
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1001
1002
1003
1004
  	 * Isolate free pages until enough are available to migrate the
  	 * pages on cc->migratepages. We stop searching if the migrate
  	 * and free page scanners meet or enough free pages are isolated.
  	 */
f5f61a320   Vlastimil Babka   mm, compaction: s...
1005
  	for (; block_start_pfn >= low_pfn;
c96b9e508   Vlastimil Babka   mm/compaction: cl...
1006
  				block_end_pfn = block_start_pfn,
e14c720ef   Vlastimil Babka   mm, compaction: r...
1007
1008
  				block_start_pfn -= pageblock_nr_pages,
  				isolate_start_pfn = block_start_pfn) {
f6ea3adb7   David Rientjes   mm/compaction.c: ...
1009
1010
1011
  		/*
  		 * This can iterate a massively long zone without finding any
  		 * suitable migration targets, so periodically check if we need
be9765722   Vlastimil Babka   mm, compaction: p...
1012
  		 * to schedule, or even abort async compaction.
f6ea3adb7   David Rientjes   mm/compaction.c: ...
1013
  		 */
be9765722   Vlastimil Babka   mm, compaction: p...
1014
1015
1016
  		if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
  						&& compact_should_abort(cc))
  			break;
f6ea3adb7   David Rientjes   mm/compaction.c: ...
1017

7d49d8868   Vlastimil Babka   mm, compaction: r...
1018
1019
1020
  		page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
  									zone);
  		if (!page)
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1021
1022
1023
  			continue;
  
  		/* Check the block is suitable for migration */
68e3e9262   Linus Torvalds   Revert "mm: compa...
1024
  		if (!suitable_migration_target(page))
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1025
  			continue;
68e3e9262   Linus Torvalds   Revert "mm: compa...
1026

bb13ffeb9   Mel Gorman   mm: compaction: c...
1027
1028
1029
  		/* If isolation recently failed, do not retry */
  		if (!isolation_suitable(cc, page))
  			continue;
e14c720ef   Vlastimil Babka   mm, compaction: r...
1030
  		/* Found a block suitable for isolating free pages from. */
a46cbf3bc   David Rientjes   mm, compaction: p...
1031
1032
  		isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
  					freelist, false);
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1033
1034
  
  		/*
a46cbf3bc   David Rientjes   mm, compaction: p...
1035
1036
  		 * If we isolated enough freepages, or aborted due to lock
  		 * contention, terminate.
e14c720ef   Vlastimil Babka   mm, compaction: r...
1037
  		 */
f5f61a320   Vlastimil Babka   mm, compaction: s...
1038
1039
  		if ((cc->nr_freepages >= cc->nr_migratepages)
  							|| cc->contended) {
a46cbf3bc   David Rientjes   mm, compaction: p...
1040
1041
1042
1043
1044
  			if (isolate_start_pfn >= block_end_pfn) {
  				/*
  				 * Restart at previous pageblock if more
  				 * freepages can be isolated next time.
  				 */
f5f61a320   Vlastimil Babka   mm, compaction: s...
1045
1046
  				isolate_start_pfn =
  					block_start_pfn - pageblock_nr_pages;
a46cbf3bc   David Rientjes   mm, compaction: p...
1047
  			}
be9765722   Vlastimil Babka   mm, compaction: p...
1048
  			break;
a46cbf3bc   David Rientjes   mm, compaction: p...
1049
  		} else if (isolate_start_pfn < block_end_pfn) {
f5f61a320   Vlastimil Babka   mm, compaction: s...
1050
  			/*
a46cbf3bc   David Rientjes   mm, compaction: p...
1051
1052
  			 * If isolation failed early, do not continue
  			 * needlessly.
f5f61a320   Vlastimil Babka   mm, compaction: s...
1053
  			 */
a46cbf3bc   David Rientjes   mm, compaction: p...
1054
  			break;
f5f61a320   Vlastimil Babka   mm, compaction: s...
1055
  		}
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1056
  	}
66c64223a   Joonsoo Kim   mm/compaction: sp...
1057
  	/* __isolate_free_page() does not map the pages */
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1058
  	map_pages(freelist);
7ed695e06   Vlastimil Babka   mm: compaction: d...
1059
  	/*
f5f61a320   Vlastimil Babka   mm, compaction: s...
1060
1061
1062
1063
  	 * Record where the free scanner will restart next time. Either we
  	 * broke from the loop and set isolate_start_pfn based on the last
  	 * call to isolate_freepages_block(), or we met the migration scanner
  	 * and the loop terminated due to isolate_start_pfn < low_pfn
7ed695e06   Vlastimil Babka   mm: compaction: d...
1064
  	 */
f5f61a320   Vlastimil Babka   mm, compaction: s...
1065
  	cc->free_pfn = isolate_start_pfn;
748446bb6   Mel Gorman   mm: compaction: m...
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
  }
  
  /*
   * This is a migrate-callback that "allocates" freepages by taking pages
   * from the isolated freelists in the block we are migrating to.
   */
  static struct page *compaction_alloc(struct page *migratepage,
  					unsigned long data,
  					int **result)
  {
  	struct compact_control *cc = (struct compact_control *)data;
  	struct page *freepage;
be9765722   Vlastimil Babka   mm, compaction: p...
1078
1079
1080
1081
  	/*
  	 * Isolate free pages if necessary, and if we are not aborting due to
  	 * contention.
  	 */
748446bb6   Mel Gorman   mm: compaction: m...
1082
  	if (list_empty(&cc->freepages)) {
be9765722   Vlastimil Babka   mm, compaction: p...
1083
  		if (!cc->contended)
edc2ca612   Vlastimil Babka   mm, compaction: m...
1084
  			isolate_freepages(cc);
748446bb6   Mel Gorman   mm: compaction: m...
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
  
  		if (list_empty(&cc->freepages))
  			return NULL;
  	}
  
  	freepage = list_entry(cc->freepages.next, struct page, lru);
  	list_del(&freepage->lru);
  	cc->nr_freepages--;
  
  	return freepage;
  }
  
  /*
d53aea3d4   David Rientjes   mm, compaction: r...
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
   * This is a migrate-callback that "frees" freepages back to the isolated
   * freelist.  All pages on the freelist are from the same zone, so there is no
   * special handling needed for NUMA.
   */
  static void compaction_free(struct page *page, unsigned long data)
  {
  	struct compact_control *cc = (struct compact_control *)data;
  
  	list_add(&page->lru, &cc->freepages);
  	cc->nr_freepages++;
  }
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1109
1110
1111
1112
1113
1114
1115
1116
  /* possible outcome of isolate_migratepages */
  typedef enum {
  	ISOLATE_ABORT,		/* Abort compaction now */
  	ISOLATE_NONE,		/* No pages isolated, continue scanning */
  	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
  } isolate_migrate_t;
  
  /*
5bbe3547a   Eric B Munson   mm: allow compact...
1117
1118
1119
1120
1121
1122
   * Allow userspace to control policy on scanning the unevictable LRU for
   * compactable pages.
   */
  int sysctl_compact_unevictable_allowed __read_mostly = 1;
  
  /*
edc2ca612   Vlastimil Babka   mm, compaction: m...
1123
1124
1125
   * Isolate all pages that can be migrated from the first suitable block,
   * starting at the block pointed to by the migrate scanner pfn within
   * compact_control.
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1126
1127
1128
1129
   */
  static isolate_migrate_t isolate_migratepages(struct zone *zone,
  					struct compact_control *cc)
  {
e1409c325   Joonsoo Kim   mm/compaction: pa...
1130
1131
1132
  	unsigned long block_start_pfn;
  	unsigned long block_end_pfn;
  	unsigned long low_pfn;
edc2ca612   Vlastimil Babka   mm, compaction: m...
1133
1134
  	struct page *page;
  	const isolate_mode_t isolate_mode =
5bbe3547a   Eric B Munson   mm: allow compact...
1135
  		(sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
1d2047fef   Hugh Dickins   mm, compaction: d...
1136
  		(cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1137

edc2ca612   Vlastimil Babka   mm, compaction: m...
1138
1139
1140
1141
1142
  	/*
  	 * Start at where we last stopped, or beginning of the zone as
  	 * initialized by compact_zone()
  	 */
  	low_pfn = cc->migrate_pfn;
06b6640a3   Vlastimil Babka   mm, compaction: w...
1143
  	block_start_pfn = pageblock_start_pfn(low_pfn);
e1409c325   Joonsoo Kim   mm/compaction: pa...
1144
1145
  	if (block_start_pfn < zone->zone_start_pfn)
  		block_start_pfn = zone->zone_start_pfn;
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1146
1147
  
  	/* Only scan within a pageblock boundary */
06b6640a3   Vlastimil Babka   mm, compaction: w...
1148
  	block_end_pfn = pageblock_end_pfn(low_pfn);
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1149

edc2ca612   Vlastimil Babka   mm, compaction: m...
1150
1151
1152
1153
  	/*
  	 * Iterate over whole pageblocks until we find the first suitable.
  	 * Do not cross the free scanner.
  	 */
e1409c325   Joonsoo Kim   mm/compaction: pa...
1154
1155
1156
1157
  	for (; block_end_pfn <= cc->free_pfn;
  			low_pfn = block_end_pfn,
  			block_start_pfn = block_end_pfn,
  			block_end_pfn += pageblock_nr_pages) {
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1158

edc2ca612   Vlastimil Babka   mm, compaction: m...
1159
1160
1161
1162
1163
1164
1165
1166
  		/*
  		 * This can potentially iterate a massively long zone with
  		 * many pageblocks unsuitable, so periodically check if we
  		 * need to schedule, or even abort async compaction.
  		 */
  		if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
  						&& compact_should_abort(cc))
  			break;
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1167

e1409c325   Joonsoo Kim   mm/compaction: pa...
1168
1169
  		page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
  									zone);
7d49d8868   Vlastimil Babka   mm, compaction: r...
1170
  		if (!page)
edc2ca612   Vlastimil Babka   mm, compaction: m...
1171
  			continue;
edc2ca612   Vlastimil Babka   mm, compaction: m...
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
  		/* If isolation recently failed, do not retry */
  		if (!isolation_suitable(cc, page))
  			continue;
  
  		/*
  		 * For async compaction, also only scan in MOVABLE blocks.
  		 * Async compaction is optimistic to see if the minimum amount
  		 * of work satisfies the allocation.
  		 */
  		if (cc->mode == MIGRATE_ASYNC &&
  		    !migrate_async_suitable(get_pageblock_migratetype(page)))
  			continue;
  
  		/* Perform the isolation */
e1409c325   Joonsoo Kim   mm/compaction: pa...
1186
1187
  		low_pfn = isolate_migratepages_block(cc, low_pfn,
  						block_end_pfn, isolate_mode);
edc2ca612   Vlastimil Babka   mm, compaction: m...
1188

ff59909a0   Hugh Dickins   mm: fix negative ...
1189
1190
  		if (!low_pfn || cc->contended) {
  			acct_isolated(zone, cc);
edc2ca612   Vlastimil Babka   mm, compaction: m...
1191
  			return ISOLATE_ABORT;
ff59909a0   Hugh Dickins   mm: fix negative ...
1192
  		}
edc2ca612   Vlastimil Babka   mm, compaction: m...
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
  
  		/*
  		 * Either we isolated something and proceed with migration. Or
  		 * we failed and compact_zone should decide if we should
  		 * continue or not.
  		 */
  		break;
  	}
  
  	acct_isolated(zone, cc);
f2849aa09   Vlastimil Babka   mm, compaction: m...
1203
1204
  	/* Record where migration scanner will be restarted. */
  	cc->migrate_pfn = low_pfn;
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1205

edc2ca612   Vlastimil Babka   mm, compaction: m...
1206
  	return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1207
  }
21c527a3c   Yaowei Bai   mm/compaction.c: ...
1208
1209
1210
1211
1212
1213
1214
1215
  /*
   * order == -1 is expected when compacting via
   * /proc/sys/vm/compact_memory
   */
  static inline bool is_via_compact_memory(int order)
  {
  	return order == -1;
  }
ea7ab982b   Michal Hocko   mm, compaction: c...
1216
  static enum compact_result __compact_finished(struct zone *zone, struct compact_control *cc,
6d7ce5594   David Rientjes   mm, compaction: p...
1217
  			    const int migratetype)
748446bb6   Mel Gorman   mm: compaction: m...
1218
  {
8fb74b9fb   Mel Gorman   mm: compaction: p...
1219
  	unsigned int order;
5a03b051e   Andrea Arcangeli   thp: use compacti...
1220
  	unsigned long watermark;
56de7263f   Mel Gorman   mm: compaction: d...
1221

be9765722   Vlastimil Babka   mm, compaction: p...
1222
  	if (cc->contended || fatal_signal_pending(current))
2d1e10412   Vlastimil Babka   mm, compaction: d...
1223
  		return COMPACT_CONTENDED;
748446bb6   Mel Gorman   mm: compaction: m...
1224

753341a4b   Mel Gorman   revert "mm: have ...
1225
  	/* Compaction run completes if the migrate and free scanner meet */
f2849aa09   Vlastimil Babka   mm, compaction: m...
1226
  	if (compact_scanners_met(cc)) {
55b7c4c99   Vlastimil Babka   mm: compaction: r...
1227
  		/* Let the next compaction start anew. */
02333641e   Vlastimil Babka   mm, compaction: e...
1228
  		reset_cached_positions(zone);
55b7c4c99   Vlastimil Babka   mm: compaction: r...
1229

62997027c   Mel Gorman   mm: compaction: c...
1230
1231
  		/*
  		 * Mark that the PG_migrate_skip information should be cleared
accf62422   Vlastimil Babka   mm, kswapd: repla...
1232
  		 * by kswapd when it goes to sleep. kcompactd does not set the
62997027c   Mel Gorman   mm: compaction: c...
1233
1234
1235
  		 * flag itself as the decision to be clear should be directly
  		 * based on an allocation request.
  		 */
accf62422   Vlastimil Babka   mm, kswapd: repla...
1236
  		if (cc->direct_compaction)
62997027c   Mel Gorman   mm: compaction: c...
1237
  			zone->compact_blockskip_flush = true;
c8f7de0bf   Michal Hocko   mm, compaction: d...
1238
1239
1240
1241
  		if (cc->whole_zone)
  			return COMPACT_COMPLETE;
  		else
  			return COMPACT_PARTIAL_SKIPPED;
bb13ffeb9   Mel Gorman   mm: compaction: c...
1242
  	}
748446bb6   Mel Gorman   mm: compaction: m...
1243

21c527a3c   Yaowei Bai   mm/compaction.c: ...
1244
  	if (is_via_compact_memory(cc->order))
56de7263f   Mel Gorman   mm: compaction: d...
1245
  		return COMPACT_CONTINUE;
3957c7768   Michal Hocko   mm: compaction: f...
1246
1247
  	/* Compaction run is not finished if the watermark is not met */
  	watermark = low_wmark_pages(zone);
3957c7768   Michal Hocko   mm: compaction: f...
1248

ebff39801   Vlastimil Babka   mm, compaction: p...
1249
1250
  	if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx,
  							cc->alloc_flags))
3957c7768   Michal Hocko   mm: compaction: f...
1251
  		return COMPACT_CONTINUE;
56de7263f   Mel Gorman   mm: compaction: d...
1252
  	/* Direct compactor: Is a suitable page free? */
8fb74b9fb   Mel Gorman   mm: compaction: p...
1253
1254
  	for (order = cc->order; order < MAX_ORDER; order++) {
  		struct free_area *area = &zone->free_area[order];
2149cdaef   Joonsoo Kim   mm/compaction: en...
1255
  		bool can_steal;
8fb74b9fb   Mel Gorman   mm: compaction: p...
1256
1257
  
  		/* Job done if page is free of the right migratetype */
6d7ce5594   David Rientjes   mm, compaction: p...
1258
  		if (!list_empty(&area->free_list[migratetype]))
8fb74b9fb   Mel Gorman   mm: compaction: p...
1259
  			return COMPACT_PARTIAL;
2149cdaef   Joonsoo Kim   mm/compaction: en...
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
  #ifdef CONFIG_CMA
  		/* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
  		if (migratetype == MIGRATE_MOVABLE &&
  			!list_empty(&area->free_list[MIGRATE_CMA]))
  			return COMPACT_PARTIAL;
  #endif
  		/*
  		 * Job done if allocation would steal freepages from
  		 * other migratetype buddy lists.
  		 */
  		if (find_suitable_fallback(area, order, migratetype,
  						true, &can_steal) != -1)
56de7263f   Mel Gorman   mm: compaction: d...
1272
1273
  			return COMPACT_PARTIAL;
  	}
837d026d5   Joonsoo Kim   mm/compaction: mo...
1274
1275
  	return COMPACT_NO_SUITABLE_PAGE;
  }
ea7ab982b   Michal Hocko   mm, compaction: c...
1276
1277
1278
  static enum compact_result compact_finished(struct zone *zone,
  			struct compact_control *cc,
  			const int migratetype)
837d026d5   Joonsoo Kim   mm/compaction: mo...
1279
1280
1281
1282
1283
1284
1285
1286
1287
  {
  	int ret;
  
  	ret = __compact_finished(zone, cc, migratetype);
  	trace_mm_compaction_finished(zone, cc->order, ret);
  	if (ret == COMPACT_NO_SUITABLE_PAGE)
  		ret = COMPACT_CONTINUE;
  
  	return ret;
748446bb6   Mel Gorman   mm: compaction: m...
1288
  }
3e7d34497   Mel Gorman   mm: vmscan: recla...
1289
1290
1291
1292
1293
1294
1295
  /*
   * compaction_suitable: Is this suitable to run compaction on this zone now?
   * Returns
   *   COMPACT_SKIPPED  - If there are too few free pages for compaction
   *   COMPACT_PARTIAL  - If the allocation would succeed without compaction
   *   COMPACT_CONTINUE - If compaction should run now
   */
ea7ab982b   Michal Hocko   mm, compaction: c...
1296
  static enum compact_result __compaction_suitable(struct zone *zone, int order,
c603844bd   Mel Gorman   mm, page_alloc: c...
1297
  					unsigned int alloc_flags,
86a294a81   Michal Hocko   mm, oom, compacti...
1298
1299
  					int classzone_idx,
  					unsigned long wmark_target)
3e7d34497   Mel Gorman   mm: vmscan: recla...
1300
1301
1302
  {
  	int fragindex;
  	unsigned long watermark;
21c527a3c   Yaowei Bai   mm/compaction.c: ...
1303
  	if (is_via_compact_memory(order))
3957c7768   Michal Hocko   mm: compaction: f...
1304
  		return COMPACT_CONTINUE;
ebff39801   Vlastimil Babka   mm, compaction: p...
1305
1306
1307
1308
1309
1310
1311
1312
  	watermark = low_wmark_pages(zone);
  	/*
  	 * If watermarks for high-order allocation are already met, there
  	 * should be no need for compaction at all.
  	 */
  	if (zone_watermark_ok(zone, order, watermark, classzone_idx,
  								alloc_flags))
  		return COMPACT_PARTIAL;
3957c7768   Michal Hocko   mm: compaction: f...
1313
  	/*
3e7d34497   Mel Gorman   mm: vmscan: recla...
1314
1315
1316
1317
  	 * Watermarks for order-0 must be met for compaction. Note the 2UL.
  	 * This is because during migration, copies of pages need to be
  	 * allocated and for a short time, the footprint is higher
  	 */
ebff39801   Vlastimil Babka   mm, compaction: p...
1318
  	watermark += (2UL << order);
86a294a81   Michal Hocko   mm, oom, compacti...
1319
1320
  	if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
  				 alloc_flags, wmark_target))
3e7d34497   Mel Gorman   mm: vmscan: recla...
1321
1322
1323
1324
1325
1326
  		return COMPACT_SKIPPED;
  
  	/*
  	 * fragmentation index determines if allocation failures are due to
  	 * low memory or external fragmentation
  	 *
ebff39801   Vlastimil Babka   mm, compaction: p...
1327
1328
  	 * index of -1000 would imply allocations might succeed depending on
  	 * watermarks, but we already failed the high-order watermark check
3e7d34497   Mel Gorman   mm: vmscan: recla...
1329
1330
1331
1332
1333
1334
1335
  	 * index towards 0 implies failure is due to lack of memory
  	 * index towards 1000 implies failure is due to fragmentation
  	 *
  	 * Only compact if a failure would be due to fragmentation.
  	 */
  	fragindex = fragmentation_index(zone, order);
  	if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
837d026d5   Joonsoo Kim   mm/compaction: mo...
1336
  		return COMPACT_NOT_SUITABLE_ZONE;
3e7d34497   Mel Gorman   mm: vmscan: recla...
1337

3e7d34497   Mel Gorman   mm: vmscan: recla...
1338
1339
  	return COMPACT_CONTINUE;
  }
ea7ab982b   Michal Hocko   mm, compaction: c...
1340
  enum compact_result compaction_suitable(struct zone *zone, int order,
c603844bd   Mel Gorman   mm, page_alloc: c...
1341
1342
  					unsigned int alloc_flags,
  					int classzone_idx)
837d026d5   Joonsoo Kim   mm/compaction: mo...
1343
  {
ea7ab982b   Michal Hocko   mm, compaction: c...
1344
  	enum compact_result ret;
837d026d5   Joonsoo Kim   mm/compaction: mo...
1345

86a294a81   Michal Hocko   mm, oom, compacti...
1346
1347
  	ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx,
  				    zone_page_state(zone, NR_FREE_PAGES));
837d026d5   Joonsoo Kim   mm/compaction: mo...
1348
1349
1350
1351
1352
1353
  	trace_mm_compaction_suitable(zone, order, ret);
  	if (ret == COMPACT_NOT_SUITABLE_ZONE)
  		ret = COMPACT_SKIPPED;
  
  	return ret;
  }
86a294a81   Michal Hocko   mm, oom, compacti...
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
  bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
  		int alloc_flags)
  {
  	struct zone *zone;
  	struct zoneref *z;
  
  	/*
  	 * Make sure at least one zone would pass __compaction_suitable if we continue
  	 * retrying the reclaim.
  	 */
  	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
  					ac->nodemask) {
  		unsigned long available;
  		enum compact_result compact_result;
  
  		/*
  		 * Do not consider all the reclaimable memory because we do not
  		 * want to trash just for a single high order allocation which
  		 * is even not guaranteed to appear even if __compaction_suitable
  		 * is happy about the watermark check.
  		 */
5a1c84b40   Mel Gorman   mm: remove reclai...
1375
  		available = zone_reclaimable_pages(zone) / order;
86a294a81   Michal Hocko   mm, oom, compacti...
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
  		available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
  		compact_result = __compaction_suitable(zone, order, alloc_flags,
  				ac_classzone_idx(ac), available);
  		if (compact_result != COMPACT_SKIPPED &&
  				compact_result != COMPACT_NOT_SUITABLE_ZONE)
  			return true;
  	}
  
  	return false;
  }
ea7ab982b   Michal Hocko   mm, compaction: c...
1386
  static enum compact_result compact_zone(struct zone *zone, struct compact_control *cc)
748446bb6   Mel Gorman   mm: compaction: m...
1387
  {
ea7ab982b   Michal Hocko   mm, compaction: c...
1388
  	enum compact_result ret;
c89511ab2   Mel Gorman   mm: compaction: R...
1389
  	unsigned long start_pfn = zone->zone_start_pfn;
108bcc96e   Cody P Schafer   mm: add & use zon...
1390
  	unsigned long end_pfn = zone_end_pfn(zone);
6d7ce5594   David Rientjes   mm, compaction: p...
1391
  	const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
e0b9daeb4   David Rientjes   mm, compaction: e...
1392
  	const bool sync = cc->mode != MIGRATE_ASYNC;
748446bb6   Mel Gorman   mm: compaction: m...
1393

ebff39801   Vlastimil Babka   mm, compaction: p...
1394
1395
  	ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
  							cc->classzone_idx);
c46649dea   Michal Hocko   mm, compaction: c...
1396
1397
  	/* Compaction is likely to fail */
  	if (ret == COMPACT_PARTIAL || ret == COMPACT_SKIPPED)
3e7d34497   Mel Gorman   mm: vmscan: recla...
1398
  		return ret;
c46649dea   Michal Hocko   mm, compaction: c...
1399
1400
1401
  
  	/* huh, compaction_suitable is returning something unexpected */
  	VM_BUG_ON(ret != COMPACT_CONTINUE);
3e7d34497   Mel Gorman   mm: vmscan: recla...
1402

c89511ab2   Mel Gorman   mm: compaction: R...
1403
  	/*
d3132e4b8   Vlastimil Babka   mm: compaction: r...
1404
  	 * Clear pageblock skip if there were failures recently and compaction
accf62422   Vlastimil Babka   mm, kswapd: repla...
1405
  	 * is about to be retried after being deferred.
d3132e4b8   Vlastimil Babka   mm: compaction: r...
1406
  	 */
accf62422   Vlastimil Babka   mm, kswapd: repla...
1407
  	if (compaction_restarting(zone, cc->order))
d3132e4b8   Vlastimil Babka   mm: compaction: r...
1408
1409
1410
  		__reset_isolation_suitable(zone);
  
  	/*
c89511ab2   Mel Gorman   mm: compaction: R...
1411
1412
1413
1414
  	 * Setup to move all movable pages to the end of the zone. Used cached
  	 * information on where the scanners should start but check that it
  	 * is initialised by ensuring the values are within zone boundaries.
  	 */
e0b9daeb4   David Rientjes   mm, compaction: e...
1415
  	cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
c89511ab2   Mel Gorman   mm: compaction: R...
1416
  	cc->free_pfn = zone->compact_cached_free_pfn;
623446e4d   Joonsoo Kim   mm/compaction: fi...
1417
  	if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
06b6640a3   Vlastimil Babka   mm, compaction: w...
1418
  		cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
c89511ab2   Mel Gorman   mm: compaction: R...
1419
1420
  		zone->compact_cached_free_pfn = cc->free_pfn;
  	}
623446e4d   Joonsoo Kim   mm/compaction: fi...
1421
  	if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
c89511ab2   Mel Gorman   mm: compaction: R...
1422
  		cc->migrate_pfn = start_pfn;
35979ef33   David Rientjes   mm, compaction: a...
1423
1424
  		zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
  		zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
c89511ab2   Mel Gorman   mm: compaction: R...
1425
  	}
c8f7de0bf   Michal Hocko   mm, compaction: d...
1426
1427
1428
  
  	if (cc->migrate_pfn == start_pfn)
  		cc->whole_zone = true;
1a16718cf   Joonsoo Kim   mm/compaction: co...
1429
  	cc->last_migrated_pfn = 0;
748446bb6   Mel Gorman   mm: compaction: m...
1430

16c4a097a   Joonsoo Kim   mm/compaction: en...
1431
1432
  	trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
  				cc->free_pfn, end_pfn, sync);
0eb927c0a   Mel Gorman   mm: compaction: t...
1433

748446bb6   Mel Gorman   mm: compaction: m...
1434
  	migrate_prep_local();
6d7ce5594   David Rientjes   mm, compaction: p...
1435
1436
  	while ((ret = compact_finished(zone, cc, migratetype)) ==
  						COMPACT_CONTINUE) {
9d502c1c8   Minchan Kim   mm/compaction: ch...
1437
  		int err;
748446bb6   Mel Gorman   mm: compaction: m...
1438

f9e35b3b4   Mel Gorman   mm: compaction: a...
1439
1440
  		switch (isolate_migratepages(zone, cc)) {
  		case ISOLATE_ABORT:
2d1e10412   Vlastimil Babka   mm, compaction: d...
1441
  			ret = COMPACT_CONTENDED;
5733c7d11   Rafael Aquini   mm: introduce put...
1442
  			putback_movable_pages(&cc->migratepages);
e64c5237c   Shaohua Li   mm: compaction: a...
1443
  			cc->nr_migratepages = 0;
f9e35b3b4   Mel Gorman   mm: compaction: a...
1444
1445
  			goto out;
  		case ISOLATE_NONE:
fdaf7f5c4   Vlastimil Babka   mm, compaction: m...
1446
1447
1448
1449
1450
1451
  			/*
  			 * We haven't isolated and migrated anything, but
  			 * there might still be unflushed migrations from
  			 * previous cc->order aligned block.
  			 */
  			goto check_drain;
f9e35b3b4   Mel Gorman   mm: compaction: a...
1452
1453
1454
  		case ISOLATE_SUCCESS:
  			;
  		}
748446bb6   Mel Gorman   mm: compaction: m...
1455

d53aea3d4   David Rientjes   mm, compaction: r...
1456
  		err = migrate_pages(&cc->migratepages, compaction_alloc,
e0b9daeb4   David Rientjes   mm, compaction: e...
1457
  				compaction_free, (unsigned long)cc, cc->mode,
7b2a2d4a1   Mel Gorman   mm: migrate: Add ...
1458
  				MR_COMPACTION);
748446bb6   Mel Gorman   mm: compaction: m...
1459

f8c9301fa   Vlastimil Babka   mm/compaction: do...
1460
1461
  		trace_mm_compaction_migratepages(cc->nr_migratepages, err,
  							&cc->migratepages);
748446bb6   Mel Gorman   mm: compaction: m...
1462

f8c9301fa   Vlastimil Babka   mm/compaction: do...
1463
1464
  		/* All pages were either migrated or will be released */
  		cc->nr_migratepages = 0;
9d502c1c8   Minchan Kim   mm/compaction: ch...
1465
  		if (err) {
5733c7d11   Rafael Aquini   mm: introduce put...
1466
  			putback_movable_pages(&cc->migratepages);
7ed695e06   Vlastimil Babka   mm: compaction: d...
1467
1468
1469
1470
  			/*
  			 * migrate_pages() may return -ENOMEM when scanners meet
  			 * and we want compact_finished() to detect it
  			 */
f2849aa09   Vlastimil Babka   mm, compaction: m...
1471
  			if (err == -ENOMEM && !compact_scanners_met(cc)) {
2d1e10412   Vlastimil Babka   mm, compaction: d...
1472
  				ret = COMPACT_CONTENDED;
4bf2bba37   David Rientjes   mm, thp: abort co...
1473
1474
  				goto out;
  			}
fdd048e12   Vlastimil Babka   mm, compaction: s...
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
  			/*
  			 * We failed to migrate at least one page in the current
  			 * order-aligned block, so skip the rest of it.
  			 */
  			if (cc->direct_compaction &&
  						(cc->mode == MIGRATE_ASYNC)) {
  				cc->migrate_pfn = block_end_pfn(
  						cc->migrate_pfn - 1, cc->order);
  				/* Draining pcplists is useless in this case */
  				cc->last_migrated_pfn = 0;
  
  			}
748446bb6   Mel Gorman   mm: compaction: m...
1487
  		}
fdaf7f5c4   Vlastimil Babka   mm, compaction: m...
1488

fdaf7f5c4   Vlastimil Babka   mm, compaction: m...
1489
1490
1491
1492
1493
1494
1495
1496
  check_drain:
  		/*
  		 * Has the migration scanner moved away from the previous
  		 * cc->order aligned block where we migrated from? If yes,
  		 * flush the pages that were freed, so that they can merge and
  		 * compact_finished() can detect immediately if allocation
  		 * would succeed.
  		 */
1a16718cf   Joonsoo Kim   mm/compaction: co...
1497
  		if (cc->order > 0 && cc->last_migrated_pfn) {
fdaf7f5c4   Vlastimil Babka   mm, compaction: m...
1498
1499
  			int cpu;
  			unsigned long current_block_start =
06b6640a3   Vlastimil Babka   mm, compaction: w...
1500
  				block_start_pfn(cc->migrate_pfn, cc->order);
fdaf7f5c4   Vlastimil Babka   mm, compaction: m...
1501

1a16718cf   Joonsoo Kim   mm/compaction: co...
1502
  			if (cc->last_migrated_pfn < current_block_start) {
fdaf7f5c4   Vlastimil Babka   mm, compaction: m...
1503
1504
1505
1506
1507
  				cpu = get_cpu();
  				lru_add_drain_cpu(cpu);
  				drain_local_pages(zone);
  				put_cpu();
  				/* No more flushing until we migrate again */
1a16718cf   Joonsoo Kim   mm/compaction: co...
1508
  				cc->last_migrated_pfn = 0;
fdaf7f5c4   Vlastimil Babka   mm, compaction: m...
1509
1510
  			}
  		}
748446bb6   Mel Gorman   mm: compaction: m...
1511
  	}
f9e35b3b4   Mel Gorman   mm: compaction: a...
1512
  out:
6bace090a   Vlastimil Babka   mm, compaction: a...
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
  	/*
  	 * Release free pages and update where the free scanner should restart,
  	 * so we don't leave any returned pages behind in the next attempt.
  	 */
  	if (cc->nr_freepages > 0) {
  		unsigned long free_pfn = release_freepages(&cc->freepages);
  
  		cc->nr_freepages = 0;
  		VM_BUG_ON(free_pfn == 0);
  		/* The cached pfn is always the first in a pageblock */
06b6640a3   Vlastimil Babka   mm, compaction: w...
1523
  		free_pfn = pageblock_start_pfn(free_pfn);
6bace090a   Vlastimil Babka   mm, compaction: a...
1524
1525
1526
1527
1528
1529
1530
  		/*
  		 * Only go back, not forward. The cached pfn might have been
  		 * already reset to zone end in compact_finished()
  		 */
  		if (free_pfn > zone->compact_cached_free_pfn)
  			zone->compact_cached_free_pfn = free_pfn;
  	}
748446bb6   Mel Gorman   mm: compaction: m...
1531

16c4a097a   Joonsoo Kim   mm/compaction: en...
1532
1533
  	trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
  				cc->free_pfn, end_pfn, sync, ret);
0eb927c0a   Mel Gorman   mm: compaction: t...
1534

748446bb6   Mel Gorman   mm: compaction: m...
1535
1536
  	return ret;
  }
76ab0f530   Mel Gorman   mm: compaction: a...
1537

ea7ab982b   Michal Hocko   mm, compaction: c...
1538
  static enum compact_result compact_zone_order(struct zone *zone, int order,
c3486f537   Vlastimil Babka   mm, compaction: s...
1539
  		gfp_t gfp_mask, enum compact_priority prio,
c603844bd   Mel Gorman   mm, page_alloc: c...
1540
  		unsigned int alloc_flags, int classzone_idx)
56de7263f   Mel Gorman   mm: compaction: d...
1541
  {
ea7ab982b   Michal Hocko   mm, compaction: c...
1542
  	enum compact_result ret;
56de7263f   Mel Gorman   mm: compaction: d...
1543
1544
1545
1546
  	struct compact_control cc = {
  		.nr_freepages = 0,
  		.nr_migratepages = 0,
  		.order = order,
6d7ce5594   David Rientjes   mm, compaction: p...
1547
  		.gfp_mask = gfp_mask,
56de7263f   Mel Gorman   mm: compaction: d...
1548
  		.zone = zone,
a5508cd83   Vlastimil Babka   mm, compaction: i...
1549
1550
  		.mode = (prio == COMPACT_PRIO_ASYNC) ?
  					MIGRATE_ASYNC :	MIGRATE_SYNC_LIGHT,
ebff39801   Vlastimil Babka   mm, compaction: p...
1551
1552
  		.alloc_flags = alloc_flags,
  		.classzone_idx = classzone_idx,
accf62422   Vlastimil Babka   mm, kswapd: repla...
1553
  		.direct_compaction = true,
56de7263f   Mel Gorman   mm: compaction: d...
1554
1555
1556
  	};
  	INIT_LIST_HEAD(&cc.freepages);
  	INIT_LIST_HEAD(&cc.migratepages);
e64c5237c   Shaohua Li   mm: compaction: a...
1557
1558
1559
1560
  	ret = compact_zone(zone, &cc);
  
  	VM_BUG_ON(!list_empty(&cc.freepages));
  	VM_BUG_ON(!list_empty(&cc.migratepages));
e64c5237c   Shaohua Li   mm: compaction: a...
1561
  	return ret;
56de7263f   Mel Gorman   mm: compaction: d...
1562
  }
5e7719058   Mel Gorman   mm: compaction: a...
1563
  int sysctl_extfrag_threshold = 500;
56de7263f   Mel Gorman   mm: compaction: d...
1564
1565
  /**
   * try_to_compact_pages - Direct compact to satisfy a high-order allocation
56de7263f   Mel Gorman   mm: compaction: d...
1566
   * @gfp_mask: The GFP mask of the current allocation
1a6d53a10   Vlastimil Babka   mm: reduce try_to...
1567
1568
1569
   * @order: The order of the current allocation
   * @alloc_flags: The allocation flags of the current allocation
   * @ac: The context of current allocation
e0b9daeb4   David Rientjes   mm, compaction: e...
1570
   * @mode: The migration mode for async, sync light, or sync migration
56de7263f   Mel Gorman   mm: compaction: d...
1571
1572
1573
   *
   * This is the main entry point for direct page compaction.
   */
ea7ab982b   Michal Hocko   mm, compaction: c...
1574
  enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
c603844bd   Mel Gorman   mm, page_alloc: c...
1575
  		unsigned int alloc_flags, const struct alloc_context *ac,
c3486f537   Vlastimil Babka   mm, compaction: s...
1576
  		enum compact_priority prio)
56de7263f   Mel Gorman   mm: compaction: d...
1577
  {
56de7263f   Mel Gorman   mm: compaction: d...
1578
1579
  	int may_enter_fs = gfp_mask & __GFP_FS;
  	int may_perform_io = gfp_mask & __GFP_IO;
56de7263f   Mel Gorman   mm: compaction: d...
1580
1581
  	struct zoneref *z;
  	struct zone *zone;
1d4746d39   Michal Hocko   mm, compaction: d...
1582
  	enum compact_result rc = COMPACT_SKIPPED;
56de7263f   Mel Gorman   mm: compaction: d...
1583

4ffb6335d   Mel Gorman   mm: compaction: u...
1584
  	/* Check if the GFP flags allow compaction */
b2b331f96   Ganesh Mahendran   mm/compaction: re...
1585
  	if (!may_enter_fs || !may_perform_io)
53853e2d2   Vlastimil Babka   mm, compaction: d...
1586
  		return COMPACT_SKIPPED;
56de7263f   Mel Gorman   mm: compaction: d...
1587

a5508cd83   Vlastimil Babka   mm, compaction: i...
1588
  	trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
837d026d5   Joonsoo Kim   mm/compaction: mo...
1589

56de7263f   Mel Gorman   mm: compaction: d...
1590
  	/* Compact each zone in the list */
1a6d53a10   Vlastimil Babka   mm: reduce try_to...
1591
1592
  	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
  								ac->nodemask) {
ea7ab982b   Michal Hocko   mm, compaction: c...
1593
  		enum compact_result status;
56de7263f   Mel Gorman   mm: compaction: d...
1594

1d4746d39   Michal Hocko   mm, compaction: d...
1595
1596
  		if (compaction_deferred(zone, order)) {
  			rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
53853e2d2   Vlastimil Babka   mm, compaction: d...
1597
  			continue;
1d4746d39   Michal Hocko   mm, compaction: d...
1598
  		}
53853e2d2   Vlastimil Babka   mm, compaction: d...
1599

a5508cd83   Vlastimil Babka   mm, compaction: i...
1600
  		status = compact_zone_order(zone, order, gfp_mask, prio,
c3486f537   Vlastimil Babka   mm, compaction: s...
1601
  					alloc_flags, ac_classzone_idx(ac));
56de7263f   Mel Gorman   mm: compaction: d...
1602
  		rc = max(status, rc);
3e7d34497   Mel Gorman   mm: vmscan: recla...
1603
  		/* If a normal allocation would succeed, stop compacting */
ebff39801   Vlastimil Babka   mm, compaction: p...
1604
  		if (zone_watermark_ok(zone, order, low_wmark_pages(zone),
93ea9964d   Mel Gorman   mm, page_alloc: r...
1605
  					ac_classzone_idx(ac), alloc_flags)) {
53853e2d2   Vlastimil Babka   mm, compaction: d...
1606
1607
1608
1609
1610
1611
1612
  			/*
  			 * We think the allocation will succeed in this zone,
  			 * but it is not certain, hence the false. The caller
  			 * will repeat this with true if allocation indeed
  			 * succeeds in this zone.
  			 */
  			compaction_defer_reset(zone, order, false);
1f9efdef4   Vlastimil Babka   mm, compaction: k...
1613

c3486f537   Vlastimil Babka   mm, compaction: s...
1614
  			break;
1f9efdef4   Vlastimil Babka   mm, compaction: k...
1615
  		}
a5508cd83   Vlastimil Babka   mm, compaction: i...
1616
  		if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE ||
c3486f537   Vlastimil Babka   mm, compaction: s...
1617
  					status == COMPACT_PARTIAL_SKIPPED))
53853e2d2   Vlastimil Babka   mm, compaction: d...
1618
1619
1620
1621
1622
1623
  			/*
  			 * We think that allocation won't succeed in this zone
  			 * so we defer compaction there. If it ends up
  			 * succeeding after all, it will be reset.
  			 */
  			defer_compaction(zone, order);
1f9efdef4   Vlastimil Babka   mm, compaction: k...
1624
1625
1626
1627
  
  		/*
  		 * We might have stopped compacting due to need_resched() in
  		 * async compaction, or due to a fatal signal detected. In that
c3486f537   Vlastimil Babka   mm, compaction: s...
1628
  		 * case do not try further zones
1f9efdef4   Vlastimil Babka   mm, compaction: k...
1629
  		 */
c3486f537   Vlastimil Babka   mm, compaction: s...
1630
1631
1632
  		if ((prio == COMPACT_PRIO_ASYNC && need_resched())
  					|| fatal_signal_pending(current))
  			break;
56de7263f   Mel Gorman   mm: compaction: d...
1633
1634
1635
1636
  	}
  
  	return rc;
  }
76ab0f530   Mel Gorman   mm: compaction: a...
1637
  /* Compact all zones within a node */
7103f16db   Andrew Morton   mm: compaction: m...
1638
  static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
76ab0f530   Mel Gorman   mm: compaction: a...
1639
1640
  {
  	int zoneid;
76ab0f530   Mel Gorman   mm: compaction: a...
1641
  	struct zone *zone;
76ab0f530   Mel Gorman   mm: compaction: a...
1642
  	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
76ab0f530   Mel Gorman   mm: compaction: a...
1643
1644
1645
1646
  
  		zone = &pgdat->node_zones[zoneid];
  		if (!populated_zone(zone))
  			continue;
7be62de99   Rik van Riel   vmscan: kswapd ca...
1647
1648
1649
1650
1651
  		cc->nr_freepages = 0;
  		cc->nr_migratepages = 0;
  		cc->zone = zone;
  		INIT_LIST_HEAD(&cc->freepages);
  		INIT_LIST_HEAD(&cc->migratepages);
76ab0f530   Mel Gorman   mm: compaction: a...
1652

195b0c608   Gioh Kim   mm/compaction: re...
1653
1654
1655
1656
1657
  		/*
  		 * When called via /proc/sys/vm/compact_memory
  		 * this makes sure we compact the whole zone regardless of
  		 * cached scanner positions.
  		 */
21c527a3c   Yaowei Bai   mm/compaction.c: ...
1658
  		if (is_via_compact_memory(cc->order))
195b0c608   Gioh Kim   mm/compaction: re...
1659
  			__reset_isolation_suitable(zone);
21c527a3c   Yaowei Bai   mm/compaction.c: ...
1660
1661
  		if (is_via_compact_memory(cc->order) ||
  				!compaction_deferred(zone, cc->order))
7be62de99   Rik van Riel   vmscan: kswapd ca...
1662
  			compact_zone(zone, cc);
76ab0f530   Mel Gorman   mm: compaction: a...
1663

7be62de99   Rik van Riel   vmscan: kswapd ca...
1664
1665
  		VM_BUG_ON(!list_empty(&cc->freepages));
  		VM_BUG_ON(!list_empty(&cc->migratepages));
754693457   Joonsoo Kim   mm/compaction.c: ...
1666
1667
1668
1669
1670
1671
1672
  
  		if (is_via_compact_memory(cc->order))
  			continue;
  
  		if (zone_watermark_ok(zone, cc->order,
  				low_wmark_pages(zone), 0, 0))
  			compaction_defer_reset(zone, cc->order, false);
76ab0f530   Mel Gorman   mm: compaction: a...
1673
  	}
76ab0f530   Mel Gorman   mm: compaction: a...
1674
  }
7103f16db   Andrew Morton   mm: compaction: m...
1675
  void compact_pgdat(pg_data_t *pgdat, int order)
7be62de99   Rik van Riel   vmscan: kswapd ca...
1676
1677
1678
  {
  	struct compact_control cc = {
  		.order = order,
e0b9daeb4   David Rientjes   mm, compaction: e...
1679
  		.mode = MIGRATE_ASYNC,
7be62de99   Rik van Riel   vmscan: kswapd ca...
1680
  	};
3a7200af3   Mel Gorman   mm: compaction: d...
1681
1682
  	if (!order)
  		return;
7103f16db   Andrew Morton   mm: compaction: m...
1683
  	__compact_pgdat(pgdat, &cc);
7be62de99   Rik van Riel   vmscan: kswapd ca...
1684
  }
7103f16db   Andrew Morton   mm: compaction: m...
1685
  static void compact_node(int nid)
7be62de99   Rik van Riel   vmscan: kswapd ca...
1686
  {
7be62de99   Rik van Riel   vmscan: kswapd ca...
1687
1688
  	struct compact_control cc = {
  		.order = -1,
e0b9daeb4   David Rientjes   mm, compaction: e...
1689
  		.mode = MIGRATE_SYNC,
91ca91864   David Rientjes   mm, compaction: i...
1690
  		.ignore_skip_hint = true,
7be62de99   Rik van Riel   vmscan: kswapd ca...
1691
  	};
7103f16db   Andrew Morton   mm: compaction: m...
1692
  	__compact_pgdat(NODE_DATA(nid), &cc);
7be62de99   Rik van Riel   vmscan: kswapd ca...
1693
  }
76ab0f530   Mel Gorman   mm: compaction: a...
1694
  /* Compact all nodes in the system */
7964c06d6   Jason Liu   mm: compaction: f...
1695
  static void compact_nodes(void)
76ab0f530   Mel Gorman   mm: compaction: a...
1696
1697
  {
  	int nid;
8575ec29f   Hugh Dickins   compact_pgdat: wo...
1698
1699
  	/* Flush pending updates to the LRU lists */
  	lru_add_drain_all();
76ab0f530   Mel Gorman   mm: compaction: a...
1700
1701
  	for_each_online_node(nid)
  		compact_node(nid);
76ab0f530   Mel Gorman   mm: compaction: a...
1702
1703
1704
1705
  }
  
  /* The written value is actually unused, all memory is compacted */
  int sysctl_compact_memory;
fec4eb2c8   Yaowei Bai   mm/compaction: im...
1706
1707
1708
1709
  /*
   * This is the entry point for compacting all nodes via
   * /proc/sys/vm/compact_memory
   */
76ab0f530   Mel Gorman   mm: compaction: a...
1710
1711
1712
1713
  int sysctl_compaction_handler(struct ctl_table *table, int write,
  			void __user *buffer, size_t *length, loff_t *ppos)
  {
  	if (write)
7964c06d6   Jason Liu   mm: compaction: f...
1714
  		compact_nodes();
76ab0f530   Mel Gorman   mm: compaction: a...
1715
1716
1717
  
  	return 0;
  }
ed4a6d7f0   Mel Gorman   mm: compaction: a...
1718

5e7719058   Mel Gorman   mm: compaction: a...
1719
1720
1721
1722
1723
1724
1725
  int sysctl_extfrag_handler(struct ctl_table *table, int write,
  			void __user *buffer, size_t *length, loff_t *ppos)
  {
  	proc_dointvec_minmax(table, write, buffer, length, ppos);
  
  	return 0;
  }
ed4a6d7f0   Mel Gorman   mm: compaction: a...
1726
  #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
74e77fb9a   Rashika Kheria   mm/compaction.c: ...
1727
  static ssize_t sysfs_compact_node(struct device *dev,
10fbcf4c6   Kay Sievers   convert 'memory' ...
1728
  			struct device_attribute *attr,
ed4a6d7f0   Mel Gorman   mm: compaction: a...
1729
1730
  			const char *buf, size_t count)
  {
8575ec29f   Hugh Dickins   compact_pgdat: wo...
1731
1732
1733
1734
1735
1736
1737
1738
  	int nid = dev->id;
  
  	if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
  		/* Flush pending updates to the LRU lists */
  		lru_add_drain_all();
  
  		compact_node(nid);
  	}
ed4a6d7f0   Mel Gorman   mm: compaction: a...
1739
1740
1741
  
  	return count;
  }
10fbcf4c6   Kay Sievers   convert 'memory' ...
1742
  static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
ed4a6d7f0   Mel Gorman   mm: compaction: a...
1743
1744
1745
  
  int compaction_register_node(struct node *node)
  {
10fbcf4c6   Kay Sievers   convert 'memory' ...
1746
  	return device_create_file(&node->dev, &dev_attr_compact);
ed4a6d7f0   Mel Gorman   mm: compaction: a...
1747
1748
1749
1750
  }
  
  void compaction_unregister_node(struct node *node)
  {
10fbcf4c6   Kay Sievers   convert 'memory' ...
1751
  	return device_remove_file(&node->dev, &dev_attr_compact);
ed4a6d7f0   Mel Gorman   mm: compaction: a...
1752
1753
  }
  #endif /* CONFIG_SYSFS && CONFIG_NUMA */
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1754

698b1b306   Vlastimil Babka   mm, compaction: i...
1755
1756
  static inline bool kcompactd_work_requested(pg_data_t *pgdat)
  {
172400c69   Vlastimil Babka   mm: fix kcompactd...
1757
  	return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
698b1b306   Vlastimil Babka   mm, compaction: i...
1758
1759
1760
1761
1762
1763
1764
  }
  
  static bool kcompactd_node_suitable(pg_data_t *pgdat)
  {
  	int zoneid;
  	struct zone *zone;
  	enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
6cd9dc3e7   Chen Feng   mm/compaction.c: ...
1765
  	for (zoneid = 0; zoneid <= classzone_idx; zoneid++) {
698b1b306   Vlastimil Babka   mm, compaction: i...
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
  		zone = &pgdat->node_zones[zoneid];
  
  		if (!populated_zone(zone))
  			continue;
  
  		if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
  					classzone_idx) == COMPACT_CONTINUE)
  			return true;
  	}
  
  	return false;
  }
  
  static void kcompactd_do_work(pg_data_t *pgdat)
  {
  	/*
  	 * With no special task, compact all zones so that a page of requested
  	 * order is allocatable.
  	 */
  	int zoneid;
  	struct zone *zone;
  	struct compact_control cc = {
  		.order = pgdat->kcompactd_max_order,
  		.classzone_idx = pgdat->kcompactd_classzone_idx,
  		.mode = MIGRATE_SYNC_LIGHT,
  		.ignore_skip_hint = true,
  
  	};
  	bool success = false;
  
  	trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
  							cc.classzone_idx);
  	count_vm_event(KCOMPACTD_WAKE);
6cd9dc3e7   Chen Feng   mm/compaction.c: ...
1799
  	for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) {
698b1b306   Vlastimil Babka   mm, compaction: i...
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
  		int status;
  
  		zone = &pgdat->node_zones[zoneid];
  		if (!populated_zone(zone))
  			continue;
  
  		if (compaction_deferred(zone, cc.order))
  			continue;
  
  		if (compaction_suitable(zone, cc.order, 0, zoneid) !=
  							COMPACT_CONTINUE)
  			continue;
  
  		cc.nr_freepages = 0;
  		cc.nr_migratepages = 0;
  		cc.zone = zone;
  		INIT_LIST_HEAD(&cc.freepages);
  		INIT_LIST_HEAD(&cc.migratepages);
172400c69   Vlastimil Babka   mm: fix kcompactd...
1818
1819
  		if (kthread_should_stop())
  			return;
698b1b306   Vlastimil Babka   mm, compaction: i...
1820
1821
1822
1823
1824
1825
  		status = compact_zone(zone, &cc);
  
  		if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone),
  						cc.classzone_idx, 0)) {
  			success = true;
  			compaction_defer_reset(zone, cc.order, false);
c8f7de0bf   Michal Hocko   mm, compaction: d...
1826
  		} else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
698b1b306   Vlastimil Babka   mm, compaction: i...
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
  			/*
  			 * We use sync migration mode here, so we defer like
  			 * sync direct compaction does.
  			 */
  			defer_compaction(zone, cc.order);
  		}
  
  		VM_BUG_ON(!list_empty(&cc.freepages));
  		VM_BUG_ON(!list_empty(&cc.migratepages));
  	}
  
  	/*
  	 * Regardless of success, we are done until woken up next. But remember
  	 * the requested order/classzone_idx in case it was higher/tighter than
  	 * our current ones
  	 */
  	if (pgdat->kcompactd_max_order <= cc.order)
  		pgdat->kcompactd_max_order = 0;
  	if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
  		pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
  }
  
  void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
  {
  	if (!order)
  		return;
  
  	if (pgdat->kcompactd_max_order < order)
  		pgdat->kcompactd_max_order = order;
  
  	if (pgdat->kcompactd_classzone_idx > classzone_idx)
  		pgdat->kcompactd_classzone_idx = classzone_idx;
  
  	if (!waitqueue_active(&pgdat->kcompactd_wait))
  		return;
  
  	if (!kcompactd_node_suitable(pgdat))
  		return;
  
  	trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
  							classzone_idx);
  	wake_up_interruptible(&pgdat->kcompactd_wait);
  }
  
  /*
   * The background compaction daemon, started as a kernel thread
   * from the init process.
   */
  static int kcompactd(void *p)
  {
  	pg_data_t *pgdat = (pg_data_t*)p;
  	struct task_struct *tsk = current;
  
  	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
  
  	if (!cpumask_empty(cpumask))
  		set_cpus_allowed_ptr(tsk, cpumask);
  
  	set_freezable();
  
  	pgdat->kcompactd_max_order = 0;
  	pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
  
  	while (!kthread_should_stop()) {
  		trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
  		wait_event_freezable(pgdat->kcompactd_wait,
  				kcompactd_work_requested(pgdat));
  
  		kcompactd_do_work(pgdat);
  	}
  
  	return 0;
  }
  
  /*
   * This kcompactd start function will be called by init and node-hot-add.
   * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
   */
  int kcompactd_run(int nid)
  {
  	pg_data_t *pgdat = NODE_DATA(nid);
  	int ret = 0;
  
  	if (pgdat->kcompactd)
  		return 0;
  
  	pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
  	if (IS_ERR(pgdat->kcompactd)) {
  		pr_err("Failed to start kcompactd on node %d
  ", nid);
  		ret = PTR_ERR(pgdat->kcompactd);
  		pgdat->kcompactd = NULL;
  	}
  	return ret;
  }
  
  /*
   * Called by memory hotplug when all memory in a node is offlined. Caller must
   * hold mem_hotplug_begin/end().
   */
  void kcompactd_stop(int nid)
  {
  	struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
  
  	if (kcompactd) {
  		kthread_stop(kcompactd);
  		NODE_DATA(nid)->kcompactd = NULL;
  	}
  }
  
  /*
   * It's optimal to keep kcompactd on the same CPUs as their memory, but
   * not required for correctness. So if the last cpu in a node goes
   * away, we get changed to run anywhere: as the first one comes back,
   * restore their cpu bindings.
   */
  static int cpu_callback(struct notifier_block *nfb, unsigned long action,
  			void *hcpu)
  {
  	int nid;
  
  	if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
  		for_each_node_state(nid, N_MEMORY) {
  			pg_data_t *pgdat = NODE_DATA(nid);
  			const struct cpumask *mask;
  
  			mask = cpumask_of_node(pgdat->node_id);
  
  			if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
  				/* One of our CPUs online: restore mask */
  				set_cpus_allowed_ptr(pgdat->kcompactd, mask);
  		}
  	}
  	return NOTIFY_OK;
  }
  
  static int __init kcompactd_init(void)
  {
  	int nid;
  
  	for_each_node_state(nid, N_MEMORY)
  		kcompactd_run(nid);
  	hotcpu_notifier(cpu_callback, 0);
  	return 0;
  }
  subsys_initcall(kcompactd_init)
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1973
  #endif /* CONFIG_COMPACTION */