Blame view

mm/compaction.c 47.3 KB
748446bb6   Mel Gorman   mm: compaction: m...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
  /*
   * linux/mm/compaction.c
   *
   * Memory compaction for the reduction of external fragmentation. Note that
   * this heavily depends upon page migration to do all the real heavy
   * lifting
   *
   * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
   */
  #include <linux/swap.h>
  #include <linux/migrate.h>
  #include <linux/compaction.h>
  #include <linux/mm_inline.h>
  #include <linux/backing-dev.h>
76ab0f530   Mel Gorman   mm: compaction: a...
15
  #include <linux/sysctl.h>
ed4a6d7f0   Mel Gorman   mm: compaction: a...
16
  #include <linux/sysfs.h>
bf6bddf19   Rafael Aquini   mm: introduce com...
17
  #include <linux/balloon_compaction.h>
194159fbc   Minchan Kim   mm: remove MIGRAT...
18
  #include <linux/page-isolation.h>
b8c73fc24   Andrey Ryabinin   mm: page_alloc: a...
19
  #include <linux/kasan.h>
748446bb6   Mel Gorman   mm: compaction: m...
20
  #include "internal.h"
010fc29a4   Minchan Kim   compaction: fix b...
21
22
23
24
25
26
27
28
29
30
31
32
33
34
  #ifdef CONFIG_COMPACTION
  static inline void count_compact_event(enum vm_event_item item)
  {
  	count_vm_event(item);
  }
  
  static inline void count_compact_events(enum vm_event_item item, long delta)
  {
  	count_vm_events(item, delta);
  }
  #else
  #define count_compact_event(item) do { } while (0)
  #define count_compact_events(item, delta) do { } while (0)
  #endif
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
35
  #if defined CONFIG_COMPACTION || defined CONFIG_CMA
16c4a097a   Joonsoo Kim   mm/compaction: en...
36
37
38
39
40
41
42
  #ifdef CONFIG_TRACEPOINTS
  static const char *const compaction_status_string[] = {
  	"deferred",
  	"skipped",
  	"continue",
  	"partial",
  	"complete",
837d026d5   Joonsoo Kim   mm/compaction: mo...
43
44
  	"no_suitable_page",
  	"not_suitable_zone",
16c4a097a   Joonsoo Kim   mm/compaction: en...
45
46
  };
  #endif
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
47

b7aba6984   Mel Gorman   mm: compaction: a...
48
49
  #define CREATE_TRACE_POINTS
  #include <trace/events/compaction.h>
748446bb6   Mel Gorman   mm: compaction: m...
50
51
52
  static unsigned long release_freepages(struct list_head *freelist)
  {
  	struct page *page, *next;
6bace090a   Vlastimil Babka   mm, compaction: a...
53
  	unsigned long high_pfn = 0;
748446bb6   Mel Gorman   mm: compaction: m...
54
55
  
  	list_for_each_entry_safe(page, next, freelist, lru) {
6bace090a   Vlastimil Babka   mm, compaction: a...
56
  		unsigned long pfn = page_to_pfn(page);
748446bb6   Mel Gorman   mm: compaction: m...
57
58
  		list_del(&page->lru);
  		__free_page(page);
6bace090a   Vlastimil Babka   mm, compaction: a...
59
60
  		if (pfn > high_pfn)
  			high_pfn = pfn;
748446bb6   Mel Gorman   mm: compaction: m...
61
  	}
6bace090a   Vlastimil Babka   mm, compaction: a...
62
  	return high_pfn;
748446bb6   Mel Gorman   mm: compaction: m...
63
  }
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
64
65
66
67
68
69
70
  static void map_pages(struct list_head *list)
  {
  	struct page *page;
  
  	list_for_each_entry(page, list, lru) {
  		arch_alloc_page(page, 0);
  		kernel_map_pages(page, 1, 1);
b8c73fc24   Andrey Ryabinin   mm: page_alloc: a...
71
  		kasan_alloc_pages(page, 0);
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
72
73
  	}
  }
47118af07   Michal Nazarewicz   mm: mmzone: MIGRA...
74
75
76
77
  static inline bool migrate_async_suitable(int migratetype)
  {
  	return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
  }
7d49d8868   Vlastimil Babka   mm, compaction: r...
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
  /*
   * Check that the whole (or subset of) a pageblock given by the interval of
   * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
   * with the migration of free compaction scanner. The scanners then need to
   * use only pfn_valid_within() check for arches that allow holes within
   * pageblocks.
   *
   * Return struct page pointer of start_pfn, or NULL if checks were not passed.
   *
   * It's possible on some configurations to have a setup like node0 node1 node0
   * i.e. it's possible that all pages within a zones range of pages do not
   * belong to a single zone. We assume that a border between node0 and node1
   * can occur within a single pageblock, but not a node0 node1 node0
   * interleaving within a single pageblock. It is therefore sufficient to check
   * the first and last page of a pageblock and avoid checking each individual
   * page in a pageblock.
   */
  static struct page *pageblock_pfn_to_page(unsigned long start_pfn,
  				unsigned long end_pfn, struct zone *zone)
  {
  	struct page *start_page;
  	struct page *end_page;
  
  	/* end_pfn is one past the range we are checking */
  	end_pfn--;
  
  	if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
  		return NULL;
  
  	start_page = pfn_to_page(start_pfn);
  
  	if (page_zone(start_page) != zone)
  		return NULL;
  
  	end_page = pfn_to_page(end_pfn);
  
  	/* This gives a shorter code than deriving page_zone(end_page) */
  	if (page_zone_id(start_page) != page_zone_id(end_page))
  		return NULL;
  
  	return start_page;
  }
bb13ffeb9   Mel Gorman   mm: compaction: c...
120
  #ifdef CONFIG_COMPACTION
24e2716f6   Joonsoo Kim   mm/compaction: ad...
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
  
  /* Do not skip compaction more than 64 times */
  #define COMPACT_MAX_DEFER_SHIFT 6
  
  /*
   * Compaction is deferred when compaction fails to result in a page
   * allocation success. 1 << compact_defer_limit compactions are skipped up
   * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
   */
  void defer_compaction(struct zone *zone, int order)
  {
  	zone->compact_considered = 0;
  	zone->compact_defer_shift++;
  
  	if (order < zone->compact_order_failed)
  		zone->compact_order_failed = order;
  
  	if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
  		zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
  
  	trace_mm_compaction_defer_compaction(zone, order);
  }
  
  /* Returns true if compaction should be skipped this time */
  bool compaction_deferred(struct zone *zone, int order)
  {
  	unsigned long defer_limit = 1UL << zone->compact_defer_shift;
  
  	if (order < zone->compact_order_failed)
  		return false;
  
  	/* Avoid possible overflow */
  	if (++zone->compact_considered > defer_limit)
  		zone->compact_considered = defer_limit;
  
  	if (zone->compact_considered >= defer_limit)
  		return false;
  
  	trace_mm_compaction_deferred(zone, order);
  
  	return true;
  }
  
  /*
   * Update defer tracking counters after successful compaction of given order,
   * which means an allocation either succeeded (alloc_success == true) or is
   * expected to succeed.
   */
  void compaction_defer_reset(struct zone *zone, int order,
  		bool alloc_success)
  {
  	if (alloc_success) {
  		zone->compact_considered = 0;
  		zone->compact_defer_shift = 0;
  	}
  	if (order >= zone->compact_order_failed)
  		zone->compact_order_failed = order + 1;
  
  	trace_mm_compaction_defer_reset(zone, order);
  }
  
  /* Returns true if restarting compaction after many failures */
  bool compaction_restarting(struct zone *zone, int order)
  {
  	if (order < zone->compact_order_failed)
  		return false;
  
  	return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
  		zone->compact_considered >= 1UL << zone->compact_defer_shift;
  }
bb13ffeb9   Mel Gorman   mm: compaction: c...
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
  /* Returns true if the pageblock should be scanned for pages to isolate. */
  static inline bool isolation_suitable(struct compact_control *cc,
  					struct page *page)
  {
  	if (cc->ignore_skip_hint)
  		return true;
  
  	return !get_pageblock_skip(page);
  }
  
  /*
   * This function is called to clear all cached information on pageblocks that
   * should be skipped for page isolation when the migrate and free page scanner
   * meet.
   */
62997027c   Mel Gorman   mm: compaction: c...
206
  static void __reset_isolation_suitable(struct zone *zone)
bb13ffeb9   Mel Gorman   mm: compaction: c...
207
208
  {
  	unsigned long start_pfn = zone->zone_start_pfn;
108bcc96e   Cody P Schafer   mm: add & use zon...
209
  	unsigned long end_pfn = zone_end_pfn(zone);
bb13ffeb9   Mel Gorman   mm: compaction: c...
210
  	unsigned long pfn;
35979ef33   David Rientjes   mm, compaction: a...
211
212
  	zone->compact_cached_migrate_pfn[0] = start_pfn;
  	zone->compact_cached_migrate_pfn[1] = start_pfn;
c89511ab2   Mel Gorman   mm: compaction: R...
213
  	zone->compact_cached_free_pfn = end_pfn;
62997027c   Mel Gorman   mm: compaction: c...
214
  	zone->compact_blockskip_flush = false;
bb13ffeb9   Mel Gorman   mm: compaction: c...
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
  
  	/* Walk the zone and mark every pageblock as suitable for isolation */
  	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  		struct page *page;
  
  		cond_resched();
  
  		if (!pfn_valid(pfn))
  			continue;
  
  		page = pfn_to_page(pfn);
  		if (zone != page_zone(page))
  			continue;
  
  		clear_pageblock_skip(page);
  	}
  }
62997027c   Mel Gorman   mm: compaction: c...
232
233
234
235
236
237
238
239
240
241
242
243
244
245
  void reset_isolation_suitable(pg_data_t *pgdat)
  {
  	int zoneid;
  
  	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
  		struct zone *zone = &pgdat->node_zones[zoneid];
  		if (!populated_zone(zone))
  			continue;
  
  		/* Only flush if a full compaction finished recently */
  		if (zone->compact_blockskip_flush)
  			__reset_isolation_suitable(zone);
  	}
  }
bb13ffeb9   Mel Gorman   mm: compaction: c...
246
247
  /*
   * If no pages were isolated then mark this pageblock to be skipped in the
62997027c   Mel Gorman   mm: compaction: c...
248
   * future. The information is later cleared by __reset_isolation_suitable().
bb13ffeb9   Mel Gorman   mm: compaction: c...
249
   */
c89511ab2   Mel Gorman   mm: compaction: R...
250
251
  static void update_pageblock_skip(struct compact_control *cc,
  			struct page *page, unsigned long nr_isolated,
edc2ca612   Vlastimil Babka   mm, compaction: m...
252
  			bool migrate_scanner)
bb13ffeb9   Mel Gorman   mm: compaction: c...
253
  {
c89511ab2   Mel Gorman   mm: compaction: R...
254
  	struct zone *zone = cc->zone;
35979ef33   David Rientjes   mm, compaction: a...
255
  	unsigned long pfn;
6815bf3f2   Joonsoo Kim   mm/compaction: re...
256
257
258
  
  	if (cc->ignore_skip_hint)
  		return;
bb13ffeb9   Mel Gorman   mm: compaction: c...
259
260
  	if (!page)
  		return;
35979ef33   David Rientjes   mm, compaction: a...
261
262
  	if (nr_isolated)
  		return;
edc2ca612   Vlastimil Babka   mm, compaction: m...
263
  	set_pageblock_skip(page);
c89511ab2   Mel Gorman   mm: compaction: R...
264

35979ef33   David Rientjes   mm, compaction: a...
265
266
267
268
  	pfn = page_to_pfn(page);
  
  	/* Update where async and sync compaction should restart */
  	if (migrate_scanner) {
35979ef33   David Rientjes   mm, compaction: a...
269
270
  		if (pfn > zone->compact_cached_migrate_pfn[0])
  			zone->compact_cached_migrate_pfn[0] = pfn;
e0b9daeb4   David Rientjes   mm, compaction: e...
271
272
  		if (cc->mode != MIGRATE_ASYNC &&
  		    pfn > zone->compact_cached_migrate_pfn[1])
35979ef33   David Rientjes   mm, compaction: a...
273
274
  			zone->compact_cached_migrate_pfn[1] = pfn;
  	} else {
35979ef33   David Rientjes   mm, compaction: a...
275
276
  		if (pfn < zone->compact_cached_free_pfn)
  			zone->compact_cached_free_pfn = pfn;
c89511ab2   Mel Gorman   mm: compaction: R...
277
  	}
bb13ffeb9   Mel Gorman   mm: compaction: c...
278
279
280
281
282
283
284
  }
  #else
  static inline bool isolation_suitable(struct compact_control *cc,
  					struct page *page)
  {
  	return true;
  }
c89511ab2   Mel Gorman   mm: compaction: R...
285
286
  static void update_pageblock_skip(struct compact_control *cc,
  			struct page *page, unsigned long nr_isolated,
edc2ca612   Vlastimil Babka   mm, compaction: m...
287
  			bool migrate_scanner)
bb13ffeb9   Mel Gorman   mm: compaction: c...
288
289
290
  {
  }
  #endif /* CONFIG_COMPACTION */
8b44d2791   Vlastimil Babka   mm, compaction: p...
291
292
293
294
295
296
297
298
299
300
  /*
   * Compaction requires the taking of some coarse locks that are potentially
   * very heavily contended. For async compaction, back out if the lock cannot
   * be taken immediately. For sync compaction, spin on the lock if needed.
   *
   * Returns true if the lock is held
   * Returns false if the lock is not held and compaction should abort
   */
  static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
  						struct compact_control *cc)
2a1402aa0   Mel Gorman   mm: compaction: a...
301
  {
8b44d2791   Vlastimil Babka   mm, compaction: p...
302
303
304
305
306
307
308
309
  	if (cc->mode == MIGRATE_ASYNC) {
  		if (!spin_trylock_irqsave(lock, *flags)) {
  			cc->contended = COMPACT_CONTENDED_LOCK;
  			return false;
  		}
  	} else {
  		spin_lock_irqsave(lock, *flags);
  	}
1f9efdef4   Vlastimil Babka   mm, compaction: k...
310

8b44d2791   Vlastimil Babka   mm, compaction: p...
311
  	return true;
2a1402aa0   Mel Gorman   mm: compaction: a...
312
  }
85aa125f0   Michal Nazarewicz   mm: compaction: i...
313
  /*
c67fe3752   Mel Gorman   mm: compaction: A...
314
   * Compaction requires the taking of some coarse locks that are potentially
8b44d2791   Vlastimil Babka   mm, compaction: p...
315
316
317
318
319
320
321
   * very heavily contended. The lock should be periodically unlocked to avoid
   * having disabled IRQs for a long time, even when there is nobody waiting on
   * the lock. It might also be that allowing the IRQs will result in
   * need_resched() becoming true. If scheduling is needed, async compaction
   * aborts. Sync compaction schedules.
   * Either compaction type will also abort if a fatal signal is pending.
   * In either case if the lock was locked, it is dropped and not regained.
c67fe3752   Mel Gorman   mm: compaction: A...
322
   *
8b44d2791   Vlastimil Babka   mm, compaction: p...
323
324
325
326
   * Returns true if compaction should abort due to fatal signal pending, or
   *		async compaction due to need_resched()
   * Returns false when compaction can continue (sync compaction might have
   *		scheduled)
c67fe3752   Mel Gorman   mm: compaction: A...
327
   */
8b44d2791   Vlastimil Babka   mm, compaction: p...
328
329
  static bool compact_unlock_should_abort(spinlock_t *lock,
  		unsigned long flags, bool *locked, struct compact_control *cc)
c67fe3752   Mel Gorman   mm: compaction: A...
330
  {
8b44d2791   Vlastimil Babka   mm, compaction: p...
331
332
333
334
  	if (*locked) {
  		spin_unlock_irqrestore(lock, flags);
  		*locked = false;
  	}
1f9efdef4   Vlastimil Babka   mm, compaction: k...
335

8b44d2791   Vlastimil Babka   mm, compaction: p...
336
337
338
339
  	if (fatal_signal_pending(current)) {
  		cc->contended = COMPACT_CONTENDED_SCHED;
  		return true;
  	}
c67fe3752   Mel Gorman   mm: compaction: A...
340

8b44d2791   Vlastimil Babka   mm, compaction: p...
341
  	if (need_resched()) {
e0b9daeb4   David Rientjes   mm, compaction: e...
342
  		if (cc->mode == MIGRATE_ASYNC) {
8b44d2791   Vlastimil Babka   mm, compaction: p...
343
344
  			cc->contended = COMPACT_CONTENDED_SCHED;
  			return true;
c67fe3752   Mel Gorman   mm: compaction: A...
345
  		}
c67fe3752   Mel Gorman   mm: compaction: A...
346
  		cond_resched();
c67fe3752   Mel Gorman   mm: compaction: A...
347
  	}
8b44d2791   Vlastimil Babka   mm, compaction: p...
348
  	return false;
c67fe3752   Mel Gorman   mm: compaction: A...
349
  }
be9765722   Vlastimil Babka   mm, compaction: p...
350
351
352
  /*
   * Aside from avoiding lock contention, compaction also periodically checks
   * need_resched() and either schedules in sync compaction or aborts async
8b44d2791   Vlastimil Babka   mm, compaction: p...
353
   * compaction. This is similar to what compact_unlock_should_abort() does, but
be9765722   Vlastimil Babka   mm, compaction: p...
354
355
356
357
358
359
360
361
362
363
   * is used where no lock is concerned.
   *
   * Returns false when no scheduling was needed, or sync compaction scheduled.
   * Returns true when async compaction should abort.
   */
  static inline bool compact_should_abort(struct compact_control *cc)
  {
  	/* async compaction aborts if contended */
  	if (need_resched()) {
  		if (cc->mode == MIGRATE_ASYNC) {
1f9efdef4   Vlastimil Babka   mm, compaction: k...
364
  			cc->contended = COMPACT_CONTENDED_SCHED;
be9765722   Vlastimil Babka   mm, compaction: p...
365
366
367
368
369
370
371
372
  			return true;
  		}
  
  		cond_resched();
  	}
  
  	return false;
  }
c67fe3752   Mel Gorman   mm: compaction: A...
373
  /*
9e4be4708   Jerome Marchand   mm/compaction.c: ...
374
375
376
   * Isolate free pages onto a private freelist. If @strict is true, will abort
   * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
   * (even though it may still end up isolating some pages).
85aa125f0   Michal Nazarewicz   mm: compaction: i...
377
   */
f40d1e42b   Mel Gorman   mm: compaction: a...
378
  static unsigned long isolate_freepages_block(struct compact_control *cc,
e14c720ef   Vlastimil Babka   mm, compaction: r...
379
  				unsigned long *start_pfn,
85aa125f0   Michal Nazarewicz   mm: compaction: i...
380
381
382
  				unsigned long end_pfn,
  				struct list_head *freelist,
  				bool strict)
748446bb6   Mel Gorman   mm: compaction: m...
383
  {
b7aba6984   Mel Gorman   mm: compaction: a...
384
  	int nr_scanned = 0, total_isolated = 0;
bb13ffeb9   Mel Gorman   mm: compaction: c...
385
  	struct page *cursor, *valid_page = NULL;
b8b2d8253   Xiubo Li   mm/compaction.c: ...
386
  	unsigned long flags = 0;
f40d1e42b   Mel Gorman   mm: compaction: a...
387
  	bool locked = false;
e14c720ef   Vlastimil Babka   mm, compaction: r...
388
  	unsigned long blockpfn = *start_pfn;
748446bb6   Mel Gorman   mm: compaction: m...
389

748446bb6   Mel Gorman   mm: compaction: m...
390
  	cursor = pfn_to_page(blockpfn);
f40d1e42b   Mel Gorman   mm: compaction: a...
391
  	/* Isolate free pages. */
748446bb6   Mel Gorman   mm: compaction: m...
392
393
394
  	for (; blockpfn < end_pfn; blockpfn++, cursor++) {
  		int isolated, i;
  		struct page *page = cursor;
8b44d2791   Vlastimil Babka   mm, compaction: p...
395
396
397
398
399
400
401
402
403
  		/*
  		 * Periodically drop the lock (if held) regardless of its
  		 * contention, to give chance to IRQs. Abort if fatal signal
  		 * pending or async compaction detects need_resched()
  		 */
  		if (!(blockpfn % SWAP_CLUSTER_MAX)
  		    && compact_unlock_should_abort(&cc->zone->lock, flags,
  								&locked, cc))
  			break;
b7aba6984   Mel Gorman   mm: compaction: a...
404
  		nr_scanned++;
f40d1e42b   Mel Gorman   mm: compaction: a...
405
  		if (!pfn_valid_within(blockpfn))
2af120bc0   Laura Abbott   mm/compaction: br...
406
  			goto isolate_fail;
bb13ffeb9   Mel Gorman   mm: compaction: c...
407
408
  		if (!valid_page)
  			valid_page = page;
f40d1e42b   Mel Gorman   mm: compaction: a...
409
  		if (!PageBuddy(page))
2af120bc0   Laura Abbott   mm/compaction: br...
410
  			goto isolate_fail;
f40d1e42b   Mel Gorman   mm: compaction: a...
411
412
  
  		/*
69b7189f1   Vlastimil Babka   mm, compaction: s...
413
414
415
416
417
  		 * If we already hold the lock, we can skip some rechecking.
  		 * Note that if we hold the lock now, checked_pageblock was
  		 * already set in some previous iteration (or strict is true),
  		 * so it is correct to skip the suitable migration target
  		 * recheck as well.
f40d1e42b   Mel Gorman   mm: compaction: a...
418
  		 */
69b7189f1   Vlastimil Babka   mm, compaction: s...
419
420
421
422
423
424
425
426
427
  		if (!locked) {
  			/*
  			 * The zone lock must be held to isolate freepages.
  			 * Unfortunately this is a very coarse lock and can be
  			 * heavily contended if there are parallel allocations
  			 * or parallel compactions. For async compaction do not
  			 * spin on the lock and we acquire the lock as late as
  			 * possible.
  			 */
8b44d2791   Vlastimil Babka   mm, compaction: p...
428
429
  			locked = compact_trylock_irqsave(&cc->zone->lock,
  								&flags, cc);
69b7189f1   Vlastimil Babka   mm, compaction: s...
430
431
  			if (!locked)
  				break;
f40d1e42b   Mel Gorman   mm: compaction: a...
432

69b7189f1   Vlastimil Babka   mm, compaction: s...
433
434
435
436
  			/* Recheck this is a buddy page under lock */
  			if (!PageBuddy(page))
  				goto isolate_fail;
  		}
748446bb6   Mel Gorman   mm: compaction: m...
437
438
439
440
441
442
443
444
445
446
447
  
  		/* Found a free page, break it into order-0 pages */
  		isolated = split_free_page(page);
  		total_isolated += isolated;
  		for (i = 0; i < isolated; i++) {
  			list_add(&page->lru, freelist);
  			page++;
  		}
  
  		/* If a page was split, advance to the end of it */
  		if (isolated) {
932ff6bbb   Joonsoo Kim   mm/compaction: st...
448
449
450
451
452
453
  			cc->nr_freepages += isolated;
  			if (!strict &&
  				cc->nr_migratepages <= cc->nr_freepages) {
  				blockpfn += isolated;
  				break;
  			}
748446bb6   Mel Gorman   mm: compaction: m...
454
455
  			blockpfn += isolated - 1;
  			cursor += isolated - 1;
2af120bc0   Laura Abbott   mm/compaction: br...
456
  			continue;
748446bb6   Mel Gorman   mm: compaction: m...
457
  		}
2af120bc0   Laura Abbott   mm/compaction: br...
458
459
460
461
462
463
  
  isolate_fail:
  		if (strict)
  			break;
  		else
  			continue;
748446bb6   Mel Gorman   mm: compaction: m...
464
  	}
e34d85f0e   Joonsoo Kim   mm/compaction: pr...
465
466
  	trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
  					nr_scanned, total_isolated);
e14c720ef   Vlastimil Babka   mm, compaction: r...
467
468
  	/* Record how far we have got within the block */
  	*start_pfn = blockpfn;
f40d1e42b   Mel Gorman   mm: compaction: a...
469
470
471
472
473
  	/*
  	 * If strict isolation is requested by CMA then check that all the
  	 * pages requested were isolated. If there were any failures, 0 is
  	 * returned and CMA will fail.
  	 */
2af120bc0   Laura Abbott   mm/compaction: br...
474
  	if (strict && blockpfn < end_pfn)
f40d1e42b   Mel Gorman   mm: compaction: a...
475
476
477
478
  		total_isolated = 0;
  
  	if (locked)
  		spin_unlock_irqrestore(&cc->zone->lock, flags);
bb13ffeb9   Mel Gorman   mm: compaction: c...
479
480
  	/* Update the pageblock-skip if the whole pageblock was scanned */
  	if (blockpfn == end_pfn)
edc2ca612   Vlastimil Babka   mm, compaction: m...
481
  		update_pageblock_skip(cc, valid_page, total_isolated, false);
bb13ffeb9   Mel Gorman   mm: compaction: c...
482

010fc29a4   Minchan Kim   compaction: fix b...
483
  	count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
397487db6   Mel Gorman   mm: compaction: A...
484
  	if (total_isolated)
010fc29a4   Minchan Kim   compaction: fix b...
485
  		count_compact_events(COMPACTISOLATED, total_isolated);
748446bb6   Mel Gorman   mm: compaction: m...
486
487
  	return total_isolated;
  }
85aa125f0   Michal Nazarewicz   mm: compaction: i...
488
489
490
491
492
493
494
495
496
497
498
499
500
  /**
   * isolate_freepages_range() - isolate free pages.
   * @start_pfn: The first PFN to start isolating.
   * @end_pfn:   The one-past-last PFN.
   *
   * Non-free pages, invalid PFNs, or zone boundaries within the
   * [start_pfn, end_pfn) range are considered errors, cause function to
   * undo its actions and return zero.
   *
   * Otherwise, function returns one-past-the-last PFN of isolated page
   * (which may be greater then end_pfn if end fell in a middle of
   * a free page).
   */
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
501
  unsigned long
bb13ffeb9   Mel Gorman   mm: compaction: c...
502
503
  isolate_freepages_range(struct compact_control *cc,
  			unsigned long start_pfn, unsigned long end_pfn)
85aa125f0   Michal Nazarewicz   mm: compaction: i...
504
  {
f40d1e42b   Mel Gorman   mm: compaction: a...
505
  	unsigned long isolated, pfn, block_end_pfn;
85aa125f0   Michal Nazarewicz   mm: compaction: i...
506
  	LIST_HEAD(freelist);
7d49d8868   Vlastimil Babka   mm, compaction: r...
507
508
509
510
511
  	pfn = start_pfn;
  	block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
  
  	for (; pfn < end_pfn; pfn += isolated,
  				block_end_pfn += pageblock_nr_pages) {
e14c720ef   Vlastimil Babka   mm, compaction: r...
512
513
  		/* Protect pfn from changing by isolate_freepages_block */
  		unsigned long isolate_start_pfn = pfn;
85aa125f0   Michal Nazarewicz   mm: compaction: i...
514

85aa125f0   Michal Nazarewicz   mm: compaction: i...
515
  		block_end_pfn = min(block_end_pfn, end_pfn);
584200163   Joonsoo Kim   mm/compaction: sk...
516
517
518
519
520
521
522
523
524
  		/*
  		 * pfn could pass the block_end_pfn if isolated freepage
  		 * is more than pageblock order. In this case, we adjust
  		 * scanning range to right one.
  		 */
  		if (pfn >= block_end_pfn) {
  			block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
  			block_end_pfn = min(block_end_pfn, end_pfn);
  		}
7d49d8868   Vlastimil Babka   mm, compaction: r...
525
526
  		if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
  			break;
e14c720ef   Vlastimil Babka   mm, compaction: r...
527
528
  		isolated = isolate_freepages_block(cc, &isolate_start_pfn,
  						block_end_pfn, &freelist, true);
85aa125f0   Michal Nazarewicz   mm: compaction: i...
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
  
  		/*
  		 * In strict mode, isolate_freepages_block() returns 0 if
  		 * there are any holes in the block (ie. invalid PFNs or
  		 * non-free pages).
  		 */
  		if (!isolated)
  			break;
  
  		/*
  		 * If we managed to isolate pages, it is always (1 << n) *
  		 * pageblock_nr_pages for some non-negative n.  (Max order
  		 * page may span two pageblocks).
  		 */
  	}
  
  	/* split_free_page does not map the pages */
  	map_pages(&freelist);
  
  	if (pfn < end_pfn) {
  		/* Loop terminated early, cleanup. */
  		release_freepages(&freelist);
  		return 0;
  	}
  
  	/* We don't use freelists for anything. */
  	return pfn;
  }
748446bb6   Mel Gorman   mm: compaction: m...
557
  /* Update the number of anon and file isolated pages in the zone */
edc2ca612   Vlastimil Babka   mm, compaction: m...
558
  static void acct_isolated(struct zone *zone, struct compact_control *cc)
748446bb6   Mel Gorman   mm: compaction: m...
559
560
  {
  	struct page *page;
b9e84ac15   Minchan Kim   mm: compaction: t...
561
  	unsigned int count[2] = { 0, };
748446bb6   Mel Gorman   mm: compaction: m...
562

edc2ca612   Vlastimil Babka   mm, compaction: m...
563
564
  	if (list_empty(&cc->migratepages))
  		return;
b9e84ac15   Minchan Kim   mm: compaction: t...
565
566
  	list_for_each_entry(page, &cc->migratepages, lru)
  		count[!!page_is_file_cache(page)]++;
748446bb6   Mel Gorman   mm: compaction: m...
567

edc2ca612   Vlastimil Babka   mm, compaction: m...
568
569
  	mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
  	mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
748446bb6   Mel Gorman   mm: compaction: m...
570
571
572
573
574
  }
  
  /* Similar to reclaim, but different enough that they don't share logic */
  static bool too_many_isolated(struct zone *zone)
  {
bc6930457   Minchan Kim   mm: compaction: h...
575
  	unsigned long active, inactive, isolated;
748446bb6   Mel Gorman   mm: compaction: m...
576
577
578
  
  	inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
  					zone_page_state(zone, NR_INACTIVE_ANON);
bc6930457   Minchan Kim   mm: compaction: h...
579
580
  	active = zone_page_state(zone, NR_ACTIVE_FILE) +
  					zone_page_state(zone, NR_ACTIVE_ANON);
748446bb6   Mel Gorman   mm: compaction: m...
581
582
  	isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
  					zone_page_state(zone, NR_ISOLATED_ANON);
bc6930457   Minchan Kim   mm: compaction: h...
583
  	return isolated > (inactive + active) / 2;
748446bb6   Mel Gorman   mm: compaction: m...
584
  }
2fe86e000   Michal Nazarewicz   mm: compaction: i...
585
  /**
edc2ca612   Vlastimil Babka   mm, compaction: m...
586
587
   * isolate_migratepages_block() - isolate all migrate-able pages within
   *				  a single pageblock
2fe86e000   Michal Nazarewicz   mm: compaction: i...
588
   * @cc:		Compaction control structure.
edc2ca612   Vlastimil Babka   mm, compaction: m...
589
590
591
   * @low_pfn:	The first PFN to isolate
   * @end_pfn:	The one-past-the-last PFN to isolate, within same pageblock
   * @isolate_mode: Isolation mode to be used.
2fe86e000   Michal Nazarewicz   mm: compaction: i...
592
593
   *
   * Isolate all pages that can be migrated from the range specified by
edc2ca612   Vlastimil Babka   mm, compaction: m...
594
595
596
597
   * [low_pfn, end_pfn). The range is expected to be within same pageblock.
   * Returns zero if there is a fatal signal pending, otherwise PFN of the
   * first page that was not scanned (which may be both less, equal to or more
   * than end_pfn).
2fe86e000   Michal Nazarewicz   mm: compaction: i...
598
   *
edc2ca612   Vlastimil Babka   mm, compaction: m...
599
600
601
   * The pages are isolated on cc->migratepages list (not required to be empty),
   * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
   * is neither read nor updated.
748446bb6   Mel Gorman   mm: compaction: m...
602
   */
edc2ca612   Vlastimil Babka   mm, compaction: m...
603
604
605
  static unsigned long
  isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
  			unsigned long end_pfn, isolate_mode_t isolate_mode)
748446bb6   Mel Gorman   mm: compaction: m...
606
  {
edc2ca612   Vlastimil Babka   mm, compaction: m...
607
  	struct zone *zone = cc->zone;
b7aba6984   Mel Gorman   mm: compaction: a...
608
  	unsigned long nr_scanned = 0, nr_isolated = 0;
748446bb6   Mel Gorman   mm: compaction: m...
609
  	struct list_head *migratelist = &cc->migratepages;
fa9add641   Hugh Dickins   mm/memcg: apply a...
610
  	struct lruvec *lruvec;
b8b2d8253   Xiubo Li   mm/compaction.c: ...
611
  	unsigned long flags = 0;
2a1402aa0   Mel Gorman   mm: compaction: a...
612
  	bool locked = false;
bb13ffeb9   Mel Gorman   mm: compaction: c...
613
  	struct page *page = NULL, *valid_page = NULL;
e34d85f0e   Joonsoo Kim   mm/compaction: pr...
614
  	unsigned long start_pfn = low_pfn;
748446bb6   Mel Gorman   mm: compaction: m...
615

748446bb6   Mel Gorman   mm: compaction: m...
616
617
618
619
620
621
  	/*
  	 * Ensure that there are not too many pages isolated from the LRU
  	 * list by either parallel reclaimers or compaction. If there are,
  	 * delay for some time until fewer pages are isolated
  	 */
  	while (unlikely(too_many_isolated(zone))) {
f9e35b3b4   Mel Gorman   mm: compaction: a...
622
  		/* async migration should just abort */
e0b9daeb4   David Rientjes   mm, compaction: e...
623
  		if (cc->mode == MIGRATE_ASYNC)
2fe86e000   Michal Nazarewicz   mm: compaction: i...
624
  			return 0;
f9e35b3b4   Mel Gorman   mm: compaction: a...
625

748446bb6   Mel Gorman   mm: compaction: m...
626
627
628
  		congestion_wait(BLK_RW_ASYNC, HZ/10);
  
  		if (fatal_signal_pending(current))
2fe86e000   Michal Nazarewicz   mm: compaction: i...
629
  			return 0;
748446bb6   Mel Gorman   mm: compaction: m...
630
  	}
be9765722   Vlastimil Babka   mm, compaction: p...
631
632
  	if (compact_should_abort(cc))
  		return 0;
aeef4b838   David Rientjes   mm, compaction: t...
633

748446bb6   Mel Gorman   mm: compaction: m...
634
  	/* Time to isolate some pages for migration */
748446bb6   Mel Gorman   mm: compaction: m...
635
  	for (; low_pfn < end_pfn; low_pfn++) {
8b44d2791   Vlastimil Babka   mm, compaction: p...
636
637
638
639
640
641
642
643
644
  		/*
  		 * Periodically drop the lock (if held) regardless of its
  		 * contention, to give chance to IRQs. Abort async compaction
  		 * if contended.
  		 */
  		if (!(low_pfn % SWAP_CLUSTER_MAX)
  		    && compact_unlock_should_abort(&zone->lru_lock, flags,
  								&locked, cc))
  			break;
c67fe3752   Mel Gorman   mm: compaction: A...
645

748446bb6   Mel Gorman   mm: compaction: m...
646
647
  		if (!pfn_valid_within(low_pfn))
  			continue;
b7aba6984   Mel Gorman   mm: compaction: a...
648
  		nr_scanned++;
748446bb6   Mel Gorman   mm: compaction: m...
649

748446bb6   Mel Gorman   mm: compaction: m...
650
  		page = pfn_to_page(low_pfn);
dc9086004   Mel Gorman   mm: compaction: c...
651

bb13ffeb9   Mel Gorman   mm: compaction: c...
652
653
  		if (!valid_page)
  			valid_page = page;
6c14466cc   Mel Gorman   mm: improve docum...
654
  		/*
99c0fd5e5   Vlastimil Babka   mm, compaction: s...
655
656
657
658
  		 * Skip if free. We read page order here without zone lock
  		 * which is generally unsafe, but the race window is small and
  		 * the worst thing that can happen is that we skip some
  		 * potential isolation targets.
6c14466cc   Mel Gorman   mm: improve docum...
659
  		 */
99c0fd5e5   Vlastimil Babka   mm, compaction: s...
660
661
662
663
664
665
666
667
668
669
  		if (PageBuddy(page)) {
  			unsigned long freepage_order = page_order_unsafe(page);
  
  			/*
  			 * Without lock, we cannot be sure that what we got is
  			 * a valid page order. Consider only values in the
  			 * valid order range to prevent low_pfn overflow.
  			 */
  			if (freepage_order > 0 && freepage_order < MAX_ORDER)
  				low_pfn += (1UL << freepage_order) - 1;
748446bb6   Mel Gorman   mm: compaction: m...
670
  			continue;
99c0fd5e5   Vlastimil Babka   mm, compaction: s...
671
  		}
748446bb6   Mel Gorman   mm: compaction: m...
672

9927af740   Mel Gorman   mm: compaction: p...
673
  		/*
bf6bddf19   Rafael Aquini   mm: introduce com...
674
675
676
677
678
679
  		 * Check may be lockless but that's ok as we recheck later.
  		 * It's possible to migrate LRU pages and balloon pages
  		 * Skip any other type of page
  		 */
  		if (!PageLRU(page)) {
  			if (unlikely(balloon_page_movable(page))) {
d6d86c0a7   Konstantin Khlebnikov   mm/balloon_compac...
680
  				if (balloon_page_isolate(page)) {
bf6bddf19   Rafael Aquini   mm: introduce com...
681
  					/* Successfully isolated */
b6c750163   Joonsoo Kim   mm/compaction: cl...
682
  					goto isolate_success;
bf6bddf19   Rafael Aquini   mm: introduce com...
683
684
  				}
  			}
bc835011a   Andrea Arcangeli   thp: transhuge is...
685
  			continue;
bf6bddf19   Rafael Aquini   mm: introduce com...
686
  		}
bc835011a   Andrea Arcangeli   thp: transhuge is...
687
688
  
  		/*
2a1402aa0   Mel Gorman   mm: compaction: a...
689
690
691
692
693
694
695
696
  		 * PageLRU is set. lru_lock normally excludes isolation
  		 * splitting and collapsing (collapsing has already happened
  		 * if PageLRU is set) but the lock is not necessarily taken
  		 * here and it is wasteful to take it just to check transhuge.
  		 * Check TransHuge without lock and skip the whole pageblock if
  		 * it's either a transhuge or hugetlbfs page, as calling
  		 * compound_order() without preventing THP from splitting the
  		 * page underneath us may return surprising results.
bc835011a   Andrea Arcangeli   thp: transhuge is...
697
698
  		 */
  		if (PageTransHuge(page)) {
2a1402aa0   Mel Gorman   mm: compaction: a...
699
  			if (!locked)
edc2ca612   Vlastimil Babka   mm, compaction: m...
700
701
702
703
  				low_pfn = ALIGN(low_pfn + 1,
  						pageblock_nr_pages) - 1;
  			else
  				low_pfn += (1 << compound_order(page)) - 1;
2a1402aa0   Mel Gorman   mm: compaction: a...
704
705
  			continue;
  		}
119d6d59d   David Rientjes   mm, compaction: a...
706
707
708
709
710
711
712
713
  		/*
  		 * Migration will fail if an anonymous page is pinned in memory,
  		 * so avoid taking lru_lock and isolating it unnecessarily in an
  		 * admittedly racy check.
  		 */
  		if (!page_mapping(page) &&
  		    page_count(page) > page_mapcount(page))
  			continue;
69b7189f1   Vlastimil Babka   mm, compaction: s...
714
715
  		/* If we already hold the lock, we can skip some rechecking */
  		if (!locked) {
8b44d2791   Vlastimil Babka   mm, compaction: p...
716
717
  			locked = compact_trylock_irqsave(&zone->lru_lock,
  								&flags, cc);
69b7189f1   Vlastimil Babka   mm, compaction: s...
718
719
  			if (!locked)
  				break;
2a1402aa0   Mel Gorman   mm: compaction: a...
720

69b7189f1   Vlastimil Babka   mm, compaction: s...
721
722
723
724
725
726
727
  			/* Recheck PageLRU and PageTransHuge under lock */
  			if (!PageLRU(page))
  				continue;
  			if (PageTransHuge(page)) {
  				low_pfn += (1 << compound_order(page)) - 1;
  				continue;
  			}
bc835011a   Andrea Arcangeli   thp: transhuge is...
728
  		}
fa9add641   Hugh Dickins   mm/memcg: apply a...
729
  		lruvec = mem_cgroup_page_lruvec(page, zone);
748446bb6   Mel Gorman   mm: compaction: m...
730
  		/* Try isolate the page */
edc2ca612   Vlastimil Babka   mm, compaction: m...
731
  		if (__isolate_lru_page(page, isolate_mode) != 0)
748446bb6   Mel Gorman   mm: compaction: m...
732
  			continue;
309381fea   Sasha Levin   mm: dump page whe...
733
  		VM_BUG_ON_PAGE(PageTransCompound(page), page);
bc835011a   Andrea Arcangeli   thp: transhuge is...
734

748446bb6   Mel Gorman   mm: compaction: m...
735
  		/* Successfully isolated */
fa9add641   Hugh Dickins   mm/memcg: apply a...
736
  		del_page_from_lru_list(page, lruvec, page_lru(page));
b6c750163   Joonsoo Kim   mm/compaction: cl...
737
738
  
  isolate_success:
748446bb6   Mel Gorman   mm: compaction: m...
739
  		list_add(&page->lru, migratelist);
748446bb6   Mel Gorman   mm: compaction: m...
740
  		cc->nr_migratepages++;
b7aba6984   Mel Gorman   mm: compaction: a...
741
  		nr_isolated++;
748446bb6   Mel Gorman   mm: compaction: m...
742
743
  
  		/* Avoid isolating too much */
31b8384a5   Hillf Danton   mm: compaction: p...
744
745
  		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
  			++low_pfn;
748446bb6   Mel Gorman   mm: compaction: m...
746
  			break;
31b8384a5   Hillf Danton   mm: compaction: p...
747
  		}
748446bb6   Mel Gorman   mm: compaction: m...
748
  	}
99c0fd5e5   Vlastimil Babka   mm, compaction: s...
749
750
751
752
753
754
  	/*
  	 * The PageBuddy() check could have potentially brought us outside
  	 * the range to be scanned.
  	 */
  	if (unlikely(low_pfn > end_pfn))
  		low_pfn = end_pfn;
c67fe3752   Mel Gorman   mm: compaction: A...
755
756
  	if (locked)
  		spin_unlock_irqrestore(&zone->lru_lock, flags);
748446bb6   Mel Gorman   mm: compaction: m...
757

50b5b094e   Vlastimil Babka   mm: compaction: d...
758
759
760
  	/*
  	 * Update the pageblock-skip information and cached scanner pfn,
  	 * if the whole pageblock was scanned without isolating any page.
50b5b094e   Vlastimil Babka   mm: compaction: d...
761
  	 */
35979ef33   David Rientjes   mm, compaction: a...
762
  	if (low_pfn == end_pfn)
edc2ca612   Vlastimil Babka   mm, compaction: m...
763
  		update_pageblock_skip(cc, valid_page, nr_isolated, true);
bb13ffeb9   Mel Gorman   mm: compaction: c...
764

e34d85f0e   Joonsoo Kim   mm/compaction: pr...
765
766
  	trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
  						nr_scanned, nr_isolated);
b7aba6984   Mel Gorman   mm: compaction: a...
767

010fc29a4   Minchan Kim   compaction: fix b...
768
  	count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
397487db6   Mel Gorman   mm: compaction: A...
769
  	if (nr_isolated)
010fc29a4   Minchan Kim   compaction: fix b...
770
  		count_compact_events(COMPACTISOLATED, nr_isolated);
397487db6   Mel Gorman   mm: compaction: A...
771

2fe86e000   Michal Nazarewicz   mm: compaction: i...
772
773
  	return low_pfn;
  }
edc2ca612   Vlastimil Babka   mm, compaction: m...
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
  /**
   * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
   * @cc:        Compaction control structure.
   * @start_pfn: The first PFN to start isolating.
   * @end_pfn:   The one-past-last PFN.
   *
   * Returns zero if isolation fails fatally due to e.g. pending signal.
   * Otherwise, function returns one-past-the-last PFN of isolated page
   * (which may be greater than end_pfn if end fell in a middle of a THP page).
   */
  unsigned long
  isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
  							unsigned long end_pfn)
  {
  	unsigned long pfn, block_end_pfn;
  
  	/* Scan block by block. First and last block may be incomplete */
  	pfn = start_pfn;
  	block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
  
  	for (; pfn < end_pfn; pfn = block_end_pfn,
  				block_end_pfn += pageblock_nr_pages) {
  
  		block_end_pfn = min(block_end_pfn, end_pfn);
7d49d8868   Vlastimil Babka   mm, compaction: r...
798
  		if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
edc2ca612   Vlastimil Babka   mm, compaction: m...
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
  			continue;
  
  		pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
  							ISOLATE_UNEVICTABLE);
  
  		/*
  		 * In case of fatal failure, release everything that might
  		 * have been isolated in the previous iteration, and signal
  		 * the failure back to caller.
  		 */
  		if (!pfn) {
  			putback_movable_pages(&cc->migratepages);
  			cc->nr_migratepages = 0;
  			break;
  		}
6ea41c0c0   Joonsoo Kim   mm/compaction.c: ...
814
815
816
  
  		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
  			break;
edc2ca612   Vlastimil Babka   mm, compaction: m...
817
818
819
820
821
  	}
  	acct_isolated(cc->zone, cc);
  
  	return pfn;
  }
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
822
823
  #endif /* CONFIG_COMPACTION || CONFIG_CMA */
  #ifdef CONFIG_COMPACTION
018e9a49a   Andrew Morton   mm/compaction.c: ...
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
  
  /* Returns true if the page is within a block suitable for migration to */
  static bool suitable_migration_target(struct page *page)
  {
  	/* If the page is a large free page, then disallow migration */
  	if (PageBuddy(page)) {
  		/*
  		 * We are checking page_order without zone->lock taken. But
  		 * the only small danger is that we skip a potentially suitable
  		 * pageblock, so it's not worth to check order for valid range.
  		 */
  		if (page_order_unsafe(page) >= pageblock_order)
  			return false;
  	}
  
  	/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
  	if (migrate_async_suitable(get_pageblock_migratetype(page)))
  		return true;
  
  	/* Otherwise skip the block */
  	return false;
  }
2fe86e000   Michal Nazarewicz   mm: compaction: i...
846
  /*
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
847
848
   * Based on information in the current compact_control, find blocks
   * suitable for isolating free pages from and then isolate them.
2fe86e000   Michal Nazarewicz   mm: compaction: i...
849
   */
edc2ca612   Vlastimil Babka   mm, compaction: m...
850
  static void isolate_freepages(struct compact_control *cc)
2fe86e000   Michal Nazarewicz   mm: compaction: i...
851
  {
edc2ca612   Vlastimil Babka   mm, compaction: m...
852
  	struct zone *zone = cc->zone;
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
853
  	struct page *page;
c96b9e508   Vlastimil Babka   mm/compaction: cl...
854
  	unsigned long block_start_pfn;	/* start of current pageblock */
e14c720ef   Vlastimil Babka   mm, compaction: r...
855
  	unsigned long isolate_start_pfn; /* exact pfn we start at */
c96b9e508   Vlastimil Babka   mm/compaction: cl...
856
857
  	unsigned long block_end_pfn;	/* end of current pageblock */
  	unsigned long low_pfn;	     /* lowest pfn scanner is able to scan */
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
858
  	struct list_head *freelist = &cc->freepages;
2fe86e000   Michal Nazarewicz   mm: compaction: i...
859

ff9543fd3   Michal Nazarewicz   mm: compaction: e...
860
861
  	/*
  	 * Initialise the free scanner. The starting point is where we last
49e068f0b   Vlastimil Babka   mm/compaction: ma...
862
  	 * successfully isolated from, zone-cached value, or the end of the
e14c720ef   Vlastimil Babka   mm, compaction: r...
863
864
  	 * zone when isolating for the first time. For looping we also need
  	 * this pfn aligned down to the pageblock boundary, because we do
c96b9e508   Vlastimil Babka   mm/compaction: cl...
865
866
867
  	 * block_start_pfn -= pageblock_nr_pages in the for loop.
  	 * For ending point, take care when isolating in last pageblock of a
  	 * a zone which ends in the middle of a pageblock.
49e068f0b   Vlastimil Babka   mm/compaction: ma...
868
869
  	 * The low boundary is the end of the pageblock the migration scanner
  	 * is using.
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
870
  	 */
e14c720ef   Vlastimil Babka   mm, compaction: r...
871
  	isolate_start_pfn = cc->free_pfn;
c96b9e508   Vlastimil Babka   mm/compaction: cl...
872
873
874
  	block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
  	block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
  						zone_end_pfn(zone));
7ed695e06   Vlastimil Babka   mm: compaction: d...
875
  	low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
2fe86e000   Michal Nazarewicz   mm: compaction: i...
876

ff9543fd3   Michal Nazarewicz   mm: compaction: e...
877
  	/*
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
878
879
880
881
  	 * Isolate free pages until enough are available to migrate the
  	 * pages on cc->migratepages. We stop searching if the migrate
  	 * and free page scanners meet or enough free pages are isolated.
  	 */
932ff6bbb   Joonsoo Kim   mm/compaction: st...
882
883
  	for (; block_start_pfn >= low_pfn &&
  			cc->nr_migratepages > cc->nr_freepages;
c96b9e508   Vlastimil Babka   mm/compaction: cl...
884
  				block_end_pfn = block_start_pfn,
e14c720ef   Vlastimil Babka   mm, compaction: r...
885
886
  				block_start_pfn -= pageblock_nr_pages,
  				isolate_start_pfn = block_start_pfn) {
2fe86e000   Michal Nazarewicz   mm: compaction: i...
887

f6ea3adb7   David Rientjes   mm/compaction.c: ...
888
889
890
  		/*
  		 * This can iterate a massively long zone without finding any
  		 * suitable migration targets, so periodically check if we need
be9765722   Vlastimil Babka   mm, compaction: p...
891
  		 * to schedule, or even abort async compaction.
f6ea3adb7   David Rientjes   mm/compaction.c: ...
892
  		 */
be9765722   Vlastimil Babka   mm, compaction: p...
893
894
895
  		if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
  						&& compact_should_abort(cc))
  			break;
f6ea3adb7   David Rientjes   mm/compaction.c: ...
896

7d49d8868   Vlastimil Babka   mm, compaction: r...
897
898
899
  		page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
  									zone);
  		if (!page)
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
900
901
902
  			continue;
  
  		/* Check the block is suitable for migration */
68e3e9262   Linus Torvalds   Revert "mm: compa...
903
  		if (!suitable_migration_target(page))
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
904
  			continue;
68e3e9262   Linus Torvalds   Revert "mm: compa...
905

bb13ffeb9   Mel Gorman   mm: compaction: c...
906
907
908
  		/* If isolation recently failed, do not retry */
  		if (!isolation_suitable(cc, page))
  			continue;
e14c720ef   Vlastimil Babka   mm, compaction: r...
909
  		/* Found a block suitable for isolating free pages from. */
932ff6bbb   Joonsoo Kim   mm/compaction: st...
910
  		isolate_freepages_block(cc, &isolate_start_pfn,
c96b9e508   Vlastimil Babka   mm/compaction: cl...
911
  					block_end_pfn, freelist, false);
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
912
913
  
  		/*
e14c720ef   Vlastimil Babka   mm, compaction: r...
914
915
916
917
918
919
920
921
922
923
924
925
926
  		 * Remember where the free scanner should restart next time,
  		 * which is where isolate_freepages_block() left off.
  		 * But if it scanned the whole pageblock, isolate_start_pfn
  		 * now points at block_end_pfn, which is the start of the next
  		 * pageblock.
  		 * In that case we will however want to restart at the start
  		 * of the previous pageblock.
  		 */
  		cc->free_pfn = (isolate_start_pfn < block_end_pfn) ?
  				isolate_start_pfn :
  				block_start_pfn - pageblock_nr_pages;
  
  		/*
be9765722   Vlastimil Babka   mm, compaction: p...
927
928
929
930
931
  		 * isolate_freepages_block() might have aborted due to async
  		 * compaction being contended
  		 */
  		if (cc->contended)
  			break;
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
932
933
934
935
  	}
  
  	/* split_free_page does not map the pages */
  	map_pages(freelist);
7ed695e06   Vlastimil Babka   mm: compaction: d...
936
937
938
939
  	/*
  	 * If we crossed the migrate scanner, we want to keep it that way
  	 * so that compact_finished() may detect this
  	 */
c96b9e508   Vlastimil Babka   mm/compaction: cl...
940
  	if (block_start_pfn < low_pfn)
e9ade5699   Vlastimil Babka   mm/compaction: av...
941
  		cc->free_pfn = cc->migrate_pfn;
748446bb6   Mel Gorman   mm: compaction: m...
942
943
944
945
946
947
948
949
950
951
952
953
  }
  
  /*
   * This is a migrate-callback that "allocates" freepages by taking pages
   * from the isolated freelists in the block we are migrating to.
   */
  static struct page *compaction_alloc(struct page *migratepage,
  					unsigned long data,
  					int **result)
  {
  	struct compact_control *cc = (struct compact_control *)data;
  	struct page *freepage;
be9765722   Vlastimil Babka   mm, compaction: p...
954
955
956
957
  	/*
  	 * Isolate free pages if necessary, and if we are not aborting due to
  	 * contention.
  	 */
748446bb6   Mel Gorman   mm: compaction: m...
958
  	if (list_empty(&cc->freepages)) {
be9765722   Vlastimil Babka   mm, compaction: p...
959
  		if (!cc->contended)
edc2ca612   Vlastimil Babka   mm, compaction: m...
960
  			isolate_freepages(cc);
748446bb6   Mel Gorman   mm: compaction: m...
961
962
963
964
965
966
967
968
969
970
971
972
973
  
  		if (list_empty(&cc->freepages))
  			return NULL;
  	}
  
  	freepage = list_entry(cc->freepages.next, struct page, lru);
  	list_del(&freepage->lru);
  	cc->nr_freepages--;
  
  	return freepage;
  }
  
  /*
d53aea3d4   David Rientjes   mm, compaction: r...
974
975
976
977
978
979
980
981
982
983
984
   * This is a migrate-callback that "frees" freepages back to the isolated
   * freelist.  All pages on the freelist are from the same zone, so there is no
   * special handling needed for NUMA.
   */
  static void compaction_free(struct page *page, unsigned long data)
  {
  	struct compact_control *cc = (struct compact_control *)data;
  
  	list_add(&page->lru, &cc->freepages);
  	cc->nr_freepages++;
  }
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
985
986
987
988
989
990
991
992
  /* possible outcome of isolate_migratepages */
  typedef enum {
  	ISOLATE_ABORT,		/* Abort compaction now */
  	ISOLATE_NONE,		/* No pages isolated, continue scanning */
  	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
  } isolate_migrate_t;
  
  /*
5bbe3547a   Eric B Munson   mm: allow compact...
993
994
995
996
997
998
   * Allow userspace to control policy on scanning the unevictable LRU for
   * compactable pages.
   */
  int sysctl_compact_unevictable_allowed __read_mostly = 1;
  
  /*
edc2ca612   Vlastimil Babka   mm, compaction: m...
999
1000
1001
   * Isolate all pages that can be migrated from the first suitable block,
   * starting at the block pointed to by the migrate scanner pfn within
   * compact_control.
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1002
1003
1004
1005
1006
   */
  static isolate_migrate_t isolate_migratepages(struct zone *zone,
  					struct compact_control *cc)
  {
  	unsigned long low_pfn, end_pfn;
edc2ca612   Vlastimil Babka   mm, compaction: m...
1007
1008
  	struct page *page;
  	const isolate_mode_t isolate_mode =
5bbe3547a   Eric B Munson   mm: allow compact...
1009
  		(sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
edc2ca612   Vlastimil Babka   mm, compaction: m...
1010
  		(cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0);
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1011

edc2ca612   Vlastimil Babka   mm, compaction: m...
1012
1013
1014
1015
1016
  	/*
  	 * Start at where we last stopped, or beginning of the zone as
  	 * initialized by compact_zone()
  	 */
  	low_pfn = cc->migrate_pfn;
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1017
1018
  
  	/* Only scan within a pageblock boundary */
a9aacbccf   Mel Gorman   mm: compaction: d...
1019
  	end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1020

edc2ca612   Vlastimil Babka   mm, compaction: m...
1021
1022
1023
1024
1025
1026
  	/*
  	 * Iterate over whole pageblocks until we find the first suitable.
  	 * Do not cross the free scanner.
  	 */
  	for (; end_pfn <= cc->free_pfn;
  			low_pfn = end_pfn, end_pfn += pageblock_nr_pages) {
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1027

edc2ca612   Vlastimil Babka   mm, compaction: m...
1028
1029
1030
1031
1032
1033
1034
1035
  		/*
  		 * This can potentially iterate a massively long zone with
  		 * many pageblocks unsuitable, so periodically check if we
  		 * need to schedule, or even abort async compaction.
  		 */
  		if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
  						&& compact_should_abort(cc))
  			break;
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1036

7d49d8868   Vlastimil Babka   mm, compaction: r...
1037
1038
  		page = pageblock_pfn_to_page(low_pfn, end_pfn, zone);
  		if (!page)
edc2ca612   Vlastimil Babka   mm, compaction: m...
1039
  			continue;
edc2ca612   Vlastimil Babka   mm, compaction: m...
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
  		/* If isolation recently failed, do not retry */
  		if (!isolation_suitable(cc, page))
  			continue;
  
  		/*
  		 * For async compaction, also only scan in MOVABLE blocks.
  		 * Async compaction is optimistic to see if the minimum amount
  		 * of work satisfies the allocation.
  		 */
  		if (cc->mode == MIGRATE_ASYNC &&
  		    !migrate_async_suitable(get_pageblock_migratetype(page)))
  			continue;
  
  		/* Perform the isolation */
  		low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn,
  								isolate_mode);
ff59909a0   Hugh Dickins   mm: fix negative ...
1056
1057
  		if (!low_pfn || cc->contended) {
  			acct_isolated(zone, cc);
edc2ca612   Vlastimil Babka   mm, compaction: m...
1058
  			return ISOLATE_ABORT;
ff59909a0   Hugh Dickins   mm: fix negative ...
1059
  		}
edc2ca612   Vlastimil Babka   mm, compaction: m...
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
  
  		/*
  		 * Either we isolated something and proceed with migration. Or
  		 * we failed and compact_zone should decide if we should
  		 * continue or not.
  		 */
  		break;
  	}
  
  	acct_isolated(zone, cc);
1d5bfe1ff   Vlastimil Babka   mm, compaction: p...
1070
1071
1072
1073
1074
1075
  	/*
  	 * Record where migration scanner will be restarted. If we end up in
  	 * the same pageblock as the free scanner, make the scanners fully
  	 * meet so that compact_finished() terminates compaction.
  	 */
  	cc->migrate_pfn = (end_pfn <= cc->free_pfn) ? low_pfn : cc->free_pfn;
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1076

edc2ca612   Vlastimil Babka   mm, compaction: m...
1077
  	return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1078
  }
837d026d5   Joonsoo Kim   mm/compaction: mo...
1079
  static int __compact_finished(struct zone *zone, struct compact_control *cc,
6d7ce5594   David Rientjes   mm, compaction: p...
1080
  			    const int migratetype)
748446bb6   Mel Gorman   mm: compaction: m...
1081
  {
8fb74b9fb   Mel Gorman   mm: compaction: p...
1082
  	unsigned int order;
5a03b051e   Andrea Arcangeli   thp: use compacti...
1083
  	unsigned long watermark;
56de7263f   Mel Gorman   mm: compaction: d...
1084

be9765722   Vlastimil Babka   mm, compaction: p...
1085
  	if (cc->contended || fatal_signal_pending(current))
748446bb6   Mel Gorman   mm: compaction: m...
1086
  		return COMPACT_PARTIAL;
753341a4b   Mel Gorman   revert "mm: have ...
1087
  	/* Compaction run completes if the migrate and free scanner meet */
bb13ffeb9   Mel Gorman   mm: compaction: c...
1088
  	if (cc->free_pfn <= cc->migrate_pfn) {
55b7c4c99   Vlastimil Babka   mm: compaction: r...
1089
  		/* Let the next compaction start anew. */
35979ef33   David Rientjes   mm, compaction: a...
1090
1091
  		zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
  		zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
55b7c4c99   Vlastimil Babka   mm: compaction: r...
1092
  		zone->compact_cached_free_pfn = zone_end_pfn(zone);
62997027c   Mel Gorman   mm: compaction: c...
1093
1094
1095
1096
1097
1098
1099
1100
  		/*
  		 * Mark that the PG_migrate_skip information should be cleared
  		 * by kswapd when it goes to sleep. kswapd does not set the
  		 * flag itself as the decision to be clear should be directly
  		 * based on an allocation request.
  		 */
  		if (!current_is_kswapd())
  			zone->compact_blockskip_flush = true;
748446bb6   Mel Gorman   mm: compaction: m...
1101
  		return COMPACT_COMPLETE;
bb13ffeb9   Mel Gorman   mm: compaction: c...
1102
  	}
748446bb6   Mel Gorman   mm: compaction: m...
1103

82478fb7b   Johannes Weiner   mm: compaction: p...
1104
1105
1106
1107
  	/*
  	 * order == -1 is expected when compacting via
  	 * /proc/sys/vm/compact_memory
  	 */
56de7263f   Mel Gorman   mm: compaction: d...
1108
1109
  	if (cc->order == -1)
  		return COMPACT_CONTINUE;
3957c7768   Michal Hocko   mm: compaction: f...
1110
1111
  	/* Compaction run is not finished if the watermark is not met */
  	watermark = low_wmark_pages(zone);
3957c7768   Michal Hocko   mm: compaction: f...
1112

ebff39801   Vlastimil Babka   mm, compaction: p...
1113
1114
  	if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx,
  							cc->alloc_flags))
3957c7768   Michal Hocko   mm: compaction: f...
1115
  		return COMPACT_CONTINUE;
56de7263f   Mel Gorman   mm: compaction: d...
1116
  	/* Direct compactor: Is a suitable page free? */
8fb74b9fb   Mel Gorman   mm: compaction: p...
1117
1118
  	for (order = cc->order; order < MAX_ORDER; order++) {
  		struct free_area *area = &zone->free_area[order];
2149cdaef   Joonsoo Kim   mm/compaction: en...
1119
  		bool can_steal;
8fb74b9fb   Mel Gorman   mm: compaction: p...
1120
1121
  
  		/* Job done if page is free of the right migratetype */
6d7ce5594   David Rientjes   mm, compaction: p...
1122
  		if (!list_empty(&area->free_list[migratetype]))
8fb74b9fb   Mel Gorman   mm: compaction: p...
1123
  			return COMPACT_PARTIAL;
2149cdaef   Joonsoo Kim   mm/compaction: en...
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
  #ifdef CONFIG_CMA
  		/* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
  		if (migratetype == MIGRATE_MOVABLE &&
  			!list_empty(&area->free_list[MIGRATE_CMA]))
  			return COMPACT_PARTIAL;
  #endif
  		/*
  		 * Job done if allocation would steal freepages from
  		 * other migratetype buddy lists.
  		 */
  		if (find_suitable_fallback(area, order, migratetype,
  						true, &can_steal) != -1)
56de7263f   Mel Gorman   mm: compaction: d...
1136
1137
  			return COMPACT_PARTIAL;
  	}
837d026d5   Joonsoo Kim   mm/compaction: mo...
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
  	return COMPACT_NO_SUITABLE_PAGE;
  }
  
  static int compact_finished(struct zone *zone, struct compact_control *cc,
  			    const int migratetype)
  {
  	int ret;
  
  	ret = __compact_finished(zone, cc, migratetype);
  	trace_mm_compaction_finished(zone, cc->order, ret);
  	if (ret == COMPACT_NO_SUITABLE_PAGE)
  		ret = COMPACT_CONTINUE;
  
  	return ret;
748446bb6   Mel Gorman   mm: compaction: m...
1152
  }
3e7d34497   Mel Gorman   mm: vmscan: recla...
1153
1154
1155
1156
1157
1158
1159
  /*
   * compaction_suitable: Is this suitable to run compaction on this zone now?
   * Returns
   *   COMPACT_SKIPPED  - If there are too few free pages for compaction
   *   COMPACT_PARTIAL  - If the allocation would succeed without compaction
   *   COMPACT_CONTINUE - If compaction should run now
   */
837d026d5   Joonsoo Kim   mm/compaction: mo...
1160
  static unsigned long __compaction_suitable(struct zone *zone, int order,
ebff39801   Vlastimil Babka   mm, compaction: p...
1161
  					int alloc_flags, int classzone_idx)
3e7d34497   Mel Gorman   mm: vmscan: recla...
1162
1163
1164
1165
1166
  {
  	int fragindex;
  	unsigned long watermark;
  
  	/*
3957c7768   Michal Hocko   mm: compaction: f...
1167
1168
1169
1170
1171
  	 * order == -1 is expected when compacting via
  	 * /proc/sys/vm/compact_memory
  	 */
  	if (order == -1)
  		return COMPACT_CONTINUE;
ebff39801   Vlastimil Babka   mm, compaction: p...
1172
1173
1174
1175
1176
1177
1178
1179
  	watermark = low_wmark_pages(zone);
  	/*
  	 * If watermarks for high-order allocation are already met, there
  	 * should be no need for compaction at all.
  	 */
  	if (zone_watermark_ok(zone, order, watermark, classzone_idx,
  								alloc_flags))
  		return COMPACT_PARTIAL;
3957c7768   Michal Hocko   mm: compaction: f...
1180
  	/*
3e7d34497   Mel Gorman   mm: vmscan: recla...
1181
1182
1183
1184
  	 * Watermarks for order-0 must be met for compaction. Note the 2UL.
  	 * This is because during migration, copies of pages need to be
  	 * allocated and for a short time, the footprint is higher
  	 */
ebff39801   Vlastimil Babka   mm, compaction: p...
1185
1186
  	watermark += (2UL << order);
  	if (!zone_watermark_ok(zone, 0, watermark, classzone_idx, alloc_flags))
3e7d34497   Mel Gorman   mm: vmscan: recla...
1187
1188
1189
1190
1191
1192
  		return COMPACT_SKIPPED;
  
  	/*
  	 * fragmentation index determines if allocation failures are due to
  	 * low memory or external fragmentation
  	 *
ebff39801   Vlastimil Babka   mm, compaction: p...
1193
1194
  	 * index of -1000 would imply allocations might succeed depending on
  	 * watermarks, but we already failed the high-order watermark check
3e7d34497   Mel Gorman   mm: vmscan: recla...
1195
1196
1197
1198
1199
1200
1201
  	 * index towards 0 implies failure is due to lack of memory
  	 * index towards 1000 implies failure is due to fragmentation
  	 *
  	 * Only compact if a failure would be due to fragmentation.
  	 */
  	fragindex = fragmentation_index(zone, order);
  	if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
837d026d5   Joonsoo Kim   mm/compaction: mo...
1202
  		return COMPACT_NOT_SUITABLE_ZONE;
3e7d34497   Mel Gorman   mm: vmscan: recla...
1203

3e7d34497   Mel Gorman   mm: vmscan: recla...
1204
1205
  	return COMPACT_CONTINUE;
  }
837d026d5   Joonsoo Kim   mm/compaction: mo...
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
  unsigned long compaction_suitable(struct zone *zone, int order,
  					int alloc_flags, int classzone_idx)
  {
  	unsigned long ret;
  
  	ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx);
  	trace_mm_compaction_suitable(zone, order, ret);
  	if (ret == COMPACT_NOT_SUITABLE_ZONE)
  		ret = COMPACT_SKIPPED;
  
  	return ret;
  }
748446bb6   Mel Gorman   mm: compaction: m...
1218
1219
1220
  static int compact_zone(struct zone *zone, struct compact_control *cc)
  {
  	int ret;
c89511ab2   Mel Gorman   mm: compaction: R...
1221
  	unsigned long start_pfn = zone->zone_start_pfn;
108bcc96e   Cody P Schafer   mm: add & use zon...
1222
  	unsigned long end_pfn = zone_end_pfn(zone);
6d7ce5594   David Rientjes   mm, compaction: p...
1223
  	const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
e0b9daeb4   David Rientjes   mm, compaction: e...
1224
  	const bool sync = cc->mode != MIGRATE_ASYNC;
fdaf7f5c4   Vlastimil Babka   mm, compaction: m...
1225
  	unsigned long last_migrated_pfn = 0;
748446bb6   Mel Gorman   mm: compaction: m...
1226

ebff39801   Vlastimil Babka   mm, compaction: p...
1227
1228
  	ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
  							cc->classzone_idx);
3e7d34497   Mel Gorman   mm: vmscan: recla...
1229
1230
1231
1232
1233
1234
1235
1236
1237
  	switch (ret) {
  	case COMPACT_PARTIAL:
  	case COMPACT_SKIPPED:
  		/* Compaction is likely to fail */
  		return ret;
  	case COMPACT_CONTINUE:
  		/* Fall through to compaction */
  		;
  	}
c89511ab2   Mel Gorman   mm: compaction: R...
1238
  	/*
d3132e4b8   Vlastimil Babka   mm: compaction: r...
1239
1240
1241
1242
1243
1244
1245
1246
  	 * Clear pageblock skip if there were failures recently and compaction
  	 * is about to be retried after being deferred. kswapd does not do
  	 * this reset as it'll reset the cached information when going to sleep.
  	 */
  	if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
  		__reset_isolation_suitable(zone);
  
  	/*
c89511ab2   Mel Gorman   mm: compaction: R...
1247
1248
1249
1250
  	 * Setup to move all movable pages to the end of the zone. Used cached
  	 * information on where the scanners should start but check that it
  	 * is initialised by ensuring the values are within zone boundaries.
  	 */
e0b9daeb4   David Rientjes   mm, compaction: e...
1251
  	cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
c89511ab2   Mel Gorman   mm: compaction: R...
1252
1253
1254
1255
1256
1257
1258
  	cc->free_pfn = zone->compact_cached_free_pfn;
  	if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
  		cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
  		zone->compact_cached_free_pfn = cc->free_pfn;
  	}
  	if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
  		cc->migrate_pfn = start_pfn;
35979ef33   David Rientjes   mm, compaction: a...
1259
1260
  		zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
  		zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
c89511ab2   Mel Gorman   mm: compaction: R...
1261
  	}
748446bb6   Mel Gorman   mm: compaction: m...
1262

16c4a097a   Joonsoo Kim   mm/compaction: en...
1263
1264
  	trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
  				cc->free_pfn, end_pfn, sync);
0eb927c0a   Mel Gorman   mm: compaction: t...
1265

748446bb6   Mel Gorman   mm: compaction: m...
1266
  	migrate_prep_local();
6d7ce5594   David Rientjes   mm, compaction: p...
1267
1268
  	while ((ret = compact_finished(zone, cc, migratetype)) ==
  						COMPACT_CONTINUE) {
9d502c1c8   Minchan Kim   mm/compaction: ch...
1269
  		int err;
fdaf7f5c4   Vlastimil Babka   mm, compaction: m...
1270
  		unsigned long isolate_start_pfn = cc->migrate_pfn;
748446bb6   Mel Gorman   mm: compaction: m...
1271

f9e35b3b4   Mel Gorman   mm: compaction: a...
1272
1273
1274
  		switch (isolate_migratepages(zone, cc)) {
  		case ISOLATE_ABORT:
  			ret = COMPACT_PARTIAL;
5733c7d11   Rafael Aquini   mm: introduce put...
1275
  			putback_movable_pages(&cc->migratepages);
e64c5237c   Shaohua Li   mm: compaction: a...
1276
  			cc->nr_migratepages = 0;
f9e35b3b4   Mel Gorman   mm: compaction: a...
1277
1278
  			goto out;
  		case ISOLATE_NONE:
fdaf7f5c4   Vlastimil Babka   mm, compaction: m...
1279
1280
1281
1282
1283
1284
  			/*
  			 * We haven't isolated and migrated anything, but
  			 * there might still be unflushed migrations from
  			 * previous cc->order aligned block.
  			 */
  			goto check_drain;
f9e35b3b4   Mel Gorman   mm: compaction: a...
1285
1286
1287
  		case ISOLATE_SUCCESS:
  			;
  		}
748446bb6   Mel Gorman   mm: compaction: m...
1288

d53aea3d4   David Rientjes   mm, compaction: r...
1289
  		err = migrate_pages(&cc->migratepages, compaction_alloc,
e0b9daeb4   David Rientjes   mm, compaction: e...
1290
  				compaction_free, (unsigned long)cc, cc->mode,
7b2a2d4a1   Mel Gorman   mm: migrate: Add ...
1291
  				MR_COMPACTION);
748446bb6   Mel Gorman   mm: compaction: m...
1292

f8c9301fa   Vlastimil Babka   mm/compaction: do...
1293
1294
  		trace_mm_compaction_migratepages(cc->nr_migratepages, err,
  							&cc->migratepages);
748446bb6   Mel Gorman   mm: compaction: m...
1295

f8c9301fa   Vlastimil Babka   mm/compaction: do...
1296
1297
  		/* All pages were either migrated or will be released */
  		cc->nr_migratepages = 0;
9d502c1c8   Minchan Kim   mm/compaction: ch...
1298
  		if (err) {
5733c7d11   Rafael Aquini   mm: introduce put...
1299
  			putback_movable_pages(&cc->migratepages);
7ed695e06   Vlastimil Babka   mm: compaction: d...
1300
1301
1302
1303
1304
  			/*
  			 * migrate_pages() may return -ENOMEM when scanners meet
  			 * and we want compact_finished() to detect it
  			 */
  			if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) {
4bf2bba37   David Rientjes   mm, thp: abort co...
1305
1306
1307
  				ret = COMPACT_PARTIAL;
  				goto out;
  			}
748446bb6   Mel Gorman   mm: compaction: m...
1308
  		}
fdaf7f5c4   Vlastimil Babka   mm, compaction: m...
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
  
  		/*
  		 * Record where we could have freed pages by migration and not
  		 * yet flushed them to buddy allocator. We use the pfn that
  		 * isolate_migratepages() started from in this loop iteration
  		 * - this is the lowest page that could have been isolated and
  		 * then freed by migration.
  		 */
  		if (!last_migrated_pfn)
  			last_migrated_pfn = isolate_start_pfn;
  
  check_drain:
  		/*
  		 * Has the migration scanner moved away from the previous
  		 * cc->order aligned block where we migrated from? If yes,
  		 * flush the pages that were freed, so that they can merge and
  		 * compact_finished() can detect immediately if allocation
  		 * would succeed.
  		 */
  		if (cc->order > 0 && last_migrated_pfn) {
  			int cpu;
  			unsigned long current_block_start =
  				cc->migrate_pfn & ~((1UL << cc->order) - 1);
  
  			if (last_migrated_pfn < current_block_start) {
  				cpu = get_cpu();
  				lru_add_drain_cpu(cpu);
  				drain_local_pages(zone);
  				put_cpu();
  				/* No more flushing until we migrate again */
  				last_migrated_pfn = 0;
  			}
  		}
748446bb6   Mel Gorman   mm: compaction: m...
1342
  	}
f9e35b3b4   Mel Gorman   mm: compaction: a...
1343
  out:
6bace090a   Vlastimil Babka   mm, compaction: a...
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
  	/*
  	 * Release free pages and update where the free scanner should restart,
  	 * so we don't leave any returned pages behind in the next attempt.
  	 */
  	if (cc->nr_freepages > 0) {
  		unsigned long free_pfn = release_freepages(&cc->freepages);
  
  		cc->nr_freepages = 0;
  		VM_BUG_ON(free_pfn == 0);
  		/* The cached pfn is always the first in a pageblock */
  		free_pfn &= ~(pageblock_nr_pages-1);
  		/*
  		 * Only go back, not forward. The cached pfn might have been
  		 * already reset to zone end in compact_finished()
  		 */
  		if (free_pfn > zone->compact_cached_free_pfn)
  			zone->compact_cached_free_pfn = free_pfn;
  	}
748446bb6   Mel Gorman   mm: compaction: m...
1362

16c4a097a   Joonsoo Kim   mm/compaction: en...
1363
1364
  	trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
  				cc->free_pfn, end_pfn, sync, ret);
0eb927c0a   Mel Gorman   mm: compaction: t...
1365

748446bb6   Mel Gorman   mm: compaction: m...
1366
1367
  	return ret;
  }
76ab0f530   Mel Gorman   mm: compaction: a...
1368

e0b9daeb4   David Rientjes   mm, compaction: e...
1369
  static unsigned long compact_zone_order(struct zone *zone, int order,
ebff39801   Vlastimil Babka   mm, compaction: p...
1370
1371
  		gfp_t gfp_mask, enum migrate_mode mode, int *contended,
  		int alloc_flags, int classzone_idx)
56de7263f   Mel Gorman   mm: compaction: d...
1372
  {
e64c5237c   Shaohua Li   mm: compaction: a...
1373
  	unsigned long ret;
56de7263f   Mel Gorman   mm: compaction: d...
1374
1375
1376
1377
  	struct compact_control cc = {
  		.nr_freepages = 0,
  		.nr_migratepages = 0,
  		.order = order,
6d7ce5594   David Rientjes   mm, compaction: p...
1378
  		.gfp_mask = gfp_mask,
56de7263f   Mel Gorman   mm: compaction: d...
1379
  		.zone = zone,
e0b9daeb4   David Rientjes   mm, compaction: e...
1380
  		.mode = mode,
ebff39801   Vlastimil Babka   mm, compaction: p...
1381
1382
  		.alloc_flags = alloc_flags,
  		.classzone_idx = classzone_idx,
56de7263f   Mel Gorman   mm: compaction: d...
1383
1384
1385
  	};
  	INIT_LIST_HEAD(&cc.freepages);
  	INIT_LIST_HEAD(&cc.migratepages);
e64c5237c   Shaohua Li   mm: compaction: a...
1386
1387
1388
1389
1390
1391
1392
  	ret = compact_zone(zone, &cc);
  
  	VM_BUG_ON(!list_empty(&cc.freepages));
  	VM_BUG_ON(!list_empty(&cc.migratepages));
  
  	*contended = cc.contended;
  	return ret;
56de7263f   Mel Gorman   mm: compaction: d...
1393
  }
5e7719058   Mel Gorman   mm: compaction: a...
1394
  int sysctl_extfrag_threshold = 500;
56de7263f   Mel Gorman   mm: compaction: d...
1395
1396
  /**
   * try_to_compact_pages - Direct compact to satisfy a high-order allocation
56de7263f   Mel Gorman   mm: compaction: d...
1397
   * @gfp_mask: The GFP mask of the current allocation
1a6d53a10   Vlastimil Babka   mm: reduce try_to...
1398
1399
1400
   * @order: The order of the current allocation
   * @alloc_flags: The allocation flags of the current allocation
   * @ac: The context of current allocation
e0b9daeb4   David Rientjes   mm, compaction: e...
1401
   * @mode: The migration mode for async, sync light, or sync migration
1f9efdef4   Vlastimil Babka   mm, compaction: k...
1402
1403
   * @contended: Return value that determines if compaction was aborted due to
   *	       need_resched() or lock contention
56de7263f   Mel Gorman   mm: compaction: d...
1404
1405
1406
   *
   * This is the main entry point for direct page compaction.
   */
1a6d53a10   Vlastimil Babka   mm: reduce try_to...
1407
1408
1409
  unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
  			int alloc_flags, const struct alloc_context *ac,
  			enum migrate_mode mode, int *contended)
56de7263f   Mel Gorman   mm: compaction: d...
1410
  {
56de7263f   Mel Gorman   mm: compaction: d...
1411
1412
  	int may_enter_fs = gfp_mask & __GFP_FS;
  	int may_perform_io = gfp_mask & __GFP_IO;
56de7263f   Mel Gorman   mm: compaction: d...
1413
1414
  	struct zoneref *z;
  	struct zone *zone;
53853e2d2   Vlastimil Babka   mm, compaction: d...
1415
  	int rc = COMPACT_DEFERRED;
1f9efdef4   Vlastimil Babka   mm, compaction: k...
1416
1417
1418
  	int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */
  
  	*contended = COMPACT_CONTENDED_NONE;
56de7263f   Mel Gorman   mm: compaction: d...
1419

4ffb6335d   Mel Gorman   mm: compaction: u...
1420
  	/* Check if the GFP flags allow compaction */
c5a73c3d5   Andrea Arcangeli   thp: use compacti...
1421
  	if (!order || !may_enter_fs || !may_perform_io)
53853e2d2   Vlastimil Babka   mm, compaction: d...
1422
  		return COMPACT_SKIPPED;
56de7263f   Mel Gorman   mm: compaction: d...
1423

837d026d5   Joonsoo Kim   mm/compaction: mo...
1424
  	trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode);
56de7263f   Mel Gorman   mm: compaction: d...
1425
  	/* Compact each zone in the list */
1a6d53a10   Vlastimil Babka   mm: reduce try_to...
1426
1427
  	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
  								ac->nodemask) {
56de7263f   Mel Gorman   mm: compaction: d...
1428
  		int status;
1f9efdef4   Vlastimil Babka   mm, compaction: k...
1429
  		int zone_contended;
56de7263f   Mel Gorman   mm: compaction: d...
1430

53853e2d2   Vlastimil Babka   mm, compaction: d...
1431
1432
  		if (compaction_deferred(zone, order))
  			continue;
e0b9daeb4   David Rientjes   mm, compaction: e...
1433
  		status = compact_zone_order(zone, order, gfp_mask, mode,
1a6d53a10   Vlastimil Babka   mm: reduce try_to...
1434
1435
  				&zone_contended, alloc_flags,
  				ac->classzone_idx);
56de7263f   Mel Gorman   mm: compaction: d...
1436
  		rc = max(status, rc);
1f9efdef4   Vlastimil Babka   mm, compaction: k...
1437
1438
1439
1440
1441
  		/*
  		 * It takes at least one zone that wasn't lock contended
  		 * to clear all_zones_contended.
  		 */
  		all_zones_contended &= zone_contended;
56de7263f   Mel Gorman   mm: compaction: d...
1442

3e7d34497   Mel Gorman   mm: vmscan: recla...
1443
  		/* If a normal allocation would succeed, stop compacting */
ebff39801   Vlastimil Babka   mm, compaction: p...
1444
  		if (zone_watermark_ok(zone, order, low_wmark_pages(zone),
1a6d53a10   Vlastimil Babka   mm: reduce try_to...
1445
  					ac->classzone_idx, alloc_flags)) {
53853e2d2   Vlastimil Babka   mm, compaction: d...
1446
1447
1448
1449
1450
1451
1452
  			/*
  			 * We think the allocation will succeed in this zone,
  			 * but it is not certain, hence the false. The caller
  			 * will repeat this with true if allocation indeed
  			 * succeeds in this zone.
  			 */
  			compaction_defer_reset(zone, order, false);
1f9efdef4   Vlastimil Babka   mm, compaction: k...
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
  			/*
  			 * It is possible that async compaction aborted due to
  			 * need_resched() and the watermarks were ok thanks to
  			 * somebody else freeing memory. The allocation can
  			 * however still fail so we better signal the
  			 * need_resched() contention anyway (this will not
  			 * prevent the allocation attempt).
  			 */
  			if (zone_contended == COMPACT_CONTENDED_SCHED)
  				*contended = COMPACT_CONTENDED_SCHED;
  
  			goto break_loop;
  		}
f86697953   Vlastimil Babka   mm, compaction: d...
1466
  		if (mode != MIGRATE_ASYNC && status == COMPACT_COMPLETE) {
53853e2d2   Vlastimil Babka   mm, compaction: d...
1467
1468
1469
1470
1471
1472
1473
  			/*
  			 * We think that allocation won't succeed in this zone
  			 * so we defer compaction there. If it ends up
  			 * succeeding after all, it will be reset.
  			 */
  			defer_compaction(zone, order);
  		}
1f9efdef4   Vlastimil Babka   mm, compaction: k...
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
  
  		/*
  		 * We might have stopped compacting due to need_resched() in
  		 * async compaction, or due to a fatal signal detected. In that
  		 * case do not try further zones and signal need_resched()
  		 * contention.
  		 */
  		if ((zone_contended == COMPACT_CONTENDED_SCHED)
  					|| fatal_signal_pending(current)) {
  			*contended = COMPACT_CONTENDED_SCHED;
  			goto break_loop;
  		}
  
  		continue;
  break_loop:
  		/*
  		 * We might not have tried all the zones, so  be conservative
  		 * and assume they are not all lock contended.
  		 */
  		all_zones_contended = 0;
  		break;
56de7263f   Mel Gorman   mm: compaction: d...
1495
  	}
1f9efdef4   Vlastimil Babka   mm, compaction: k...
1496
1497
1498
1499
1500
1501
  	/*
  	 * If at least one zone wasn't deferred or skipped, we report if all
  	 * zones that were tried were lock contended.
  	 */
  	if (rc > COMPACT_SKIPPED && all_zones_contended)
  		*contended = COMPACT_CONTENDED_LOCK;
56de7263f   Mel Gorman   mm: compaction: d...
1502
1503
  	return rc;
  }
76ab0f530   Mel Gorman   mm: compaction: a...
1504
  /* Compact all zones within a node */
7103f16db   Andrew Morton   mm: compaction: m...
1505
  static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
76ab0f530   Mel Gorman   mm: compaction: a...
1506
1507
  {
  	int zoneid;
76ab0f530   Mel Gorman   mm: compaction: a...
1508
  	struct zone *zone;
76ab0f530   Mel Gorman   mm: compaction: a...
1509
  	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
76ab0f530   Mel Gorman   mm: compaction: a...
1510
1511
1512
1513
  
  		zone = &pgdat->node_zones[zoneid];
  		if (!populated_zone(zone))
  			continue;
7be62de99   Rik van Riel   vmscan: kswapd ca...
1514
1515
1516
1517
1518
  		cc->nr_freepages = 0;
  		cc->nr_migratepages = 0;
  		cc->zone = zone;
  		INIT_LIST_HEAD(&cc->freepages);
  		INIT_LIST_HEAD(&cc->migratepages);
76ab0f530   Mel Gorman   mm: compaction: a...
1519

195b0c608   Gioh Kim   mm/compaction: re...
1520
1521
1522
1523
1524
1525
1526
  		/*
  		 * When called via /proc/sys/vm/compact_memory
  		 * this makes sure we compact the whole zone regardless of
  		 * cached scanner positions.
  		 */
  		if (cc->order == -1)
  			__reset_isolation_suitable(zone);
aad6ec377   Dan Carpenter   mm: compaction: m...
1527
  		if (cc->order == -1 || !compaction_deferred(zone, cc->order))
7be62de99   Rik van Riel   vmscan: kswapd ca...
1528
  			compact_zone(zone, cc);
76ab0f530   Mel Gorman   mm: compaction: a...
1529

aff622495   Rik van Riel   vmscan: only defe...
1530
  		if (cc->order > 0) {
de6c60a6c   Vlastimil Babka   mm: compaction: e...
1531
1532
1533
  			if (zone_watermark_ok(zone, cc->order,
  						low_wmark_pages(zone), 0, 0))
  				compaction_defer_reset(zone, cc->order, false);
aff622495   Rik van Riel   vmscan: only defe...
1534
  		}
7be62de99   Rik van Riel   vmscan: kswapd ca...
1535
1536
  		VM_BUG_ON(!list_empty(&cc->freepages));
  		VM_BUG_ON(!list_empty(&cc->migratepages));
76ab0f530   Mel Gorman   mm: compaction: a...
1537
  	}
76ab0f530   Mel Gorman   mm: compaction: a...
1538
  }
7103f16db   Andrew Morton   mm: compaction: m...
1539
  void compact_pgdat(pg_data_t *pgdat, int order)
7be62de99   Rik van Riel   vmscan: kswapd ca...
1540
1541
1542
  {
  	struct compact_control cc = {
  		.order = order,
e0b9daeb4   David Rientjes   mm, compaction: e...
1543
  		.mode = MIGRATE_ASYNC,
7be62de99   Rik van Riel   vmscan: kswapd ca...
1544
  	};
3a7200af3   Mel Gorman   mm: compaction: d...
1545
1546
  	if (!order)
  		return;
7103f16db   Andrew Morton   mm: compaction: m...
1547
  	__compact_pgdat(pgdat, &cc);
7be62de99   Rik van Riel   vmscan: kswapd ca...
1548
  }
7103f16db   Andrew Morton   mm: compaction: m...
1549
  static void compact_node(int nid)
7be62de99   Rik van Riel   vmscan: kswapd ca...
1550
  {
7be62de99   Rik van Riel   vmscan: kswapd ca...
1551
1552
  	struct compact_control cc = {
  		.order = -1,
e0b9daeb4   David Rientjes   mm, compaction: e...
1553
  		.mode = MIGRATE_SYNC,
91ca91864   David Rientjes   mm, compaction: i...
1554
  		.ignore_skip_hint = true,
7be62de99   Rik van Riel   vmscan: kswapd ca...
1555
  	};
7103f16db   Andrew Morton   mm: compaction: m...
1556
  	__compact_pgdat(NODE_DATA(nid), &cc);
7be62de99   Rik van Riel   vmscan: kswapd ca...
1557
  }
76ab0f530   Mel Gorman   mm: compaction: a...
1558
  /* Compact all nodes in the system */
7964c06d6   Jason Liu   mm: compaction: f...
1559
  static void compact_nodes(void)
76ab0f530   Mel Gorman   mm: compaction: a...
1560
1561
  {
  	int nid;
8575ec29f   Hugh Dickins   compact_pgdat: wo...
1562
1563
  	/* Flush pending updates to the LRU lists */
  	lru_add_drain_all();
76ab0f530   Mel Gorman   mm: compaction: a...
1564
1565
  	for_each_online_node(nid)
  		compact_node(nid);
76ab0f530   Mel Gorman   mm: compaction: a...
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
  }
  
  /* The written value is actually unused, all memory is compacted */
  int sysctl_compact_memory;
  
  /* This is the entry point for compacting all nodes via /proc/sys/vm */
  int sysctl_compaction_handler(struct ctl_table *table, int write,
  			void __user *buffer, size_t *length, loff_t *ppos)
  {
  	if (write)
7964c06d6   Jason Liu   mm: compaction: f...
1576
  		compact_nodes();
76ab0f530   Mel Gorman   mm: compaction: a...
1577
1578
1579
  
  	return 0;
  }
ed4a6d7f0   Mel Gorman   mm: compaction: a...
1580

5e7719058   Mel Gorman   mm: compaction: a...
1581
1582
1583
1584
1585
1586
1587
  int sysctl_extfrag_handler(struct ctl_table *table, int write,
  			void __user *buffer, size_t *length, loff_t *ppos)
  {
  	proc_dointvec_minmax(table, write, buffer, length, ppos);
  
  	return 0;
  }
ed4a6d7f0   Mel Gorman   mm: compaction: a...
1588
  #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
74e77fb9a   Rashika Kheria   mm/compaction.c: ...
1589
  static ssize_t sysfs_compact_node(struct device *dev,
10fbcf4c6   Kay Sievers   convert 'memory' ...
1590
  			struct device_attribute *attr,
ed4a6d7f0   Mel Gorman   mm: compaction: a...
1591
1592
  			const char *buf, size_t count)
  {
8575ec29f   Hugh Dickins   compact_pgdat: wo...
1593
1594
1595
1596
1597
1598
1599
1600
  	int nid = dev->id;
  
  	if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
  		/* Flush pending updates to the LRU lists */
  		lru_add_drain_all();
  
  		compact_node(nid);
  	}
ed4a6d7f0   Mel Gorman   mm: compaction: a...
1601
1602
1603
  
  	return count;
  }
10fbcf4c6   Kay Sievers   convert 'memory' ...
1604
  static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
ed4a6d7f0   Mel Gorman   mm: compaction: a...
1605
1606
1607
  
  int compaction_register_node(struct node *node)
  {
10fbcf4c6   Kay Sievers   convert 'memory' ...
1608
  	return device_create_file(&node->dev, &dev_attr_compact);
ed4a6d7f0   Mel Gorman   mm: compaction: a...
1609
1610
1611
1612
  }
  
  void compaction_unregister_node(struct node *node)
  {
10fbcf4c6   Kay Sievers   convert 'memory' ...
1613
  	return device_remove_file(&node->dev, &dev_attr_compact);
ed4a6d7f0   Mel Gorman   mm: compaction: a...
1614
1615
  }
  #endif /* CONFIG_SYSFS && CONFIG_NUMA */
ff9543fd3   Michal Nazarewicz   mm: compaction: e...
1616
1617
  
  #endif /* CONFIG_COMPACTION */