Blame view

mm/page_owner.c 15.2 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
48c96a368   Joonsoo Kim   mm/page_owner: ke...
2
3
4
5
6
7
8
  #include <linux/debugfs.h>
  #include <linux/mm.h>
  #include <linux/slab.h>
  #include <linux/uaccess.h>
  #include <linux/bootmem.h>
  #include <linux/stacktrace.h>
  #include <linux/page_owner.h>
7dd80b8af   Vlastimil Babka   mm, page_owner: c...
9
  #include <linux/jump_label.h>
7cd12b4ab   Vlastimil Babka   mm, page_owner: t...
10
  #include <linux/migrate.h>
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
11
  #include <linux/stackdepot.h>
e2f612e67   Joonsoo Kim   mm/page_owner: mo...
12
  #include <linux/seq_file.h>
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
13

48c96a368   Joonsoo Kim   mm/page_owner: ke...
14
  #include "internal.h"
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
15
16
17
18
19
  /*
   * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
   * to use off stack temporal storage
   */
  #define PAGE_OWNER_STACK_DEPTH (16)
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
20
21
22
23
24
25
  struct page_owner {
  	unsigned int order;
  	gfp_t gfp_mask;
  	int last_migrate_reason;
  	depot_stack_handle_t handle;
  };
48c96a368   Joonsoo Kim   mm/page_owner: ke...
26
  static bool page_owner_disabled = true;
7dd80b8af   Vlastimil Babka   mm, page_owner: c...
27
  DEFINE_STATIC_KEY_FALSE(page_owner_inited);
48c96a368   Joonsoo Kim   mm/page_owner: ke...
28

f2ca0b557   Joonsoo Kim   mm/page_owner: us...
29
30
  static depot_stack_handle_t dummy_handle;
  static depot_stack_handle_t failure_handle;
dab4ead1a   Vlastimil Babka   mm, page_owner: m...
31
  static depot_stack_handle_t early_handle;
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
32

61cf5febd   Joonsoo Kim   mm/page_owner: co...
33
  static void init_early_allocated_pages(void);
48c96a368   Joonsoo Kim   mm/page_owner: ke...
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
  static int early_page_owner_param(char *buf)
  {
  	if (!buf)
  		return -EINVAL;
  
  	if (strcmp(buf, "on") == 0)
  		page_owner_disabled = false;
  
  	return 0;
  }
  early_param("page_owner", early_page_owner_param);
  
  static bool need_page_owner(void)
  {
  	if (page_owner_disabled)
  		return false;
  
  	return true;
  }
dab4ead1a   Vlastimil Babka   mm, page_owner: m...
53
  static __always_inline depot_stack_handle_t create_dummy_stack(void)
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
54
55
56
57
58
59
60
61
62
63
  {
  	unsigned long entries[4];
  	struct stack_trace dummy;
  
  	dummy.nr_entries = 0;
  	dummy.max_entries = ARRAY_SIZE(entries);
  	dummy.entries = &entries[0];
  	dummy.skip = 0;
  
  	save_stack_trace(&dummy);
dab4ead1a   Vlastimil Babka   mm, page_owner: m...
64
  	return depot_save_stack(&dummy, GFP_KERNEL);
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
65
  }
dab4ead1a   Vlastimil Babka   mm, page_owner: m...
66
  static noinline void register_dummy_stack(void)
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
67
  {
dab4ead1a   Vlastimil Babka   mm, page_owner: m...
68
69
  	dummy_handle = create_dummy_stack();
  }
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
70

dab4ead1a   Vlastimil Babka   mm, page_owner: m...
71
72
73
74
  static noinline void register_failure_stack(void)
  {
  	failure_handle = create_dummy_stack();
  }
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
75

dab4ead1a   Vlastimil Babka   mm, page_owner: m...
76
77
78
  static noinline void register_early_stack(void)
  {
  	early_handle = create_dummy_stack();
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
79
  }
48c96a368   Joonsoo Kim   mm/page_owner: ke...
80
81
82
83
  static void init_page_owner(void)
  {
  	if (page_owner_disabled)
  		return;
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
84
85
  	register_dummy_stack();
  	register_failure_stack();
dab4ead1a   Vlastimil Babka   mm, page_owner: m...
86
  	register_early_stack();
7dd80b8af   Vlastimil Babka   mm, page_owner: c...
87
  	static_branch_enable(&page_owner_inited);
61cf5febd   Joonsoo Kim   mm/page_owner: co...
88
  	init_early_allocated_pages();
48c96a368   Joonsoo Kim   mm/page_owner: ke...
89
90
91
  }
  
  struct page_ext_operations page_owner_ops = {
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
92
  	.size = sizeof(struct page_owner),
48c96a368   Joonsoo Kim   mm/page_owner: ke...
93
94
95
  	.need = need_page_owner,
  	.init = init_page_owner,
  };
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
96
97
98
99
  static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
  {
  	return (void *)page_ext + page_owner_ops.offset;
  }
48c96a368   Joonsoo Kim   mm/page_owner: ke...
100
101
102
103
104
105
106
  void __reset_page_owner(struct page *page, unsigned int order)
  {
  	int i;
  	struct page_ext *page_ext;
  
  	for (i = 0; i < (1 << order); i++) {
  		page_ext = lookup_page_ext(page + i);
f86e42719   Yang Shi   mm: check the ret...
107
108
  		if (unlikely(!page_ext))
  			continue;
48c96a368   Joonsoo Kim   mm/page_owner: ke...
109
110
111
  		__clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
  	}
  }
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
112
113
  static inline bool check_recursive_alloc(struct stack_trace *trace,
  					unsigned long ip)
48c96a368   Joonsoo Kim   mm/page_owner: ke...
114
  {
d2a5d00dc   Maninder Singh   mm/page_owner: fi...
115
  	int i;
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
116
117
118
  
  	if (!trace->nr_entries)
  		return false;
d2a5d00dc   Maninder Singh   mm/page_owner: fi...
119
120
  	for (i = 0; i < trace->nr_entries; i++) {
  		if (trace->entries[i] == ip)
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
121
122
  			return true;
  	}
f86e42719   Yang Shi   mm: check the ret...
123

f2ca0b557   Joonsoo Kim   mm/page_owner: us...
124
125
126
127
128
129
  	return false;
  }
  
  static noinline depot_stack_handle_t save_stack(gfp_t flags)
  {
  	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
94f759d62   Sergei Rogachev   mm/page_owner.c: ...
130
131
  	struct stack_trace trace = {
  		.nr_entries = 0,
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
132
133
  		.entries = entries,
  		.max_entries = PAGE_OWNER_STACK_DEPTH,
5f48f0bd4   Prakash Gupta   mm, page_owner: s...
134
  		.skip = 2
94f759d62   Sergei Rogachev   mm/page_owner.c: ...
135
  	};
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
  	depot_stack_handle_t handle;
  
  	save_stack_trace(&trace);
  	if (trace.nr_entries != 0 &&
  	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
  		trace.nr_entries--;
  
  	/*
  	 * We need to check recursion here because our request to stackdepot
  	 * could trigger memory allocation to save new entry. New memory
  	 * allocation would reach here and call depot_save_stack() again
  	 * if we don't catch it. There is still not enough memory in stackdepot
  	 * so it would try to allocate memory again and loop forever.
  	 */
  	if (check_recursive_alloc(&trace, _RET_IP_))
  		return dummy_handle;
  
  	handle = depot_save_stack(&trace, flags);
  	if (!handle)
  		handle = failure_handle;
  
  	return handle;
  }
dab4ead1a   Vlastimil Babka   mm, page_owner: m...
159
160
  static inline void __set_page_owner_handle(struct page_ext *page_ext,
  	depot_stack_handle_t handle, unsigned int order, gfp_t gfp_mask)
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
161
  {
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
162
  	struct page_owner *page_owner;
48c96a368   Joonsoo Kim   mm/page_owner: ke...
163

9300d8dfd   Joonsoo Kim   mm/page_owner: do...
164
  	page_owner = get_page_owner(page_ext);
dab4ead1a   Vlastimil Babka   mm, page_owner: m...
165
  	page_owner->handle = handle;
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
166
167
168
  	page_owner->order = order;
  	page_owner->gfp_mask = gfp_mask;
  	page_owner->last_migrate_reason = -1;
48c96a368   Joonsoo Kim   mm/page_owner: ke...
169
170
171
  
  	__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
  }
dab4ead1a   Vlastimil Babka   mm, page_owner: m...
172
173
174
175
176
177
178
179
180
181
182
183
  noinline void __set_page_owner(struct page *page, unsigned int order,
  					gfp_t gfp_mask)
  {
  	struct page_ext *page_ext = lookup_page_ext(page);
  	depot_stack_handle_t handle;
  
  	if (unlikely(!page_ext))
  		return;
  
  	handle = save_stack(gfp_mask);
  	__set_page_owner_handle(page_ext, handle, order, gfp_mask);
  }
7cd12b4ab   Vlastimil Babka   mm, page_owner: t...
184
185
186
  void __set_page_owner_migrate_reason(struct page *page, int reason)
  {
  	struct page_ext *page_ext = lookup_page_ext(page);
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
187
  	struct page_owner *page_owner;
f86e42719   Yang Shi   mm: check the ret...
188
189
  	if (unlikely(!page_ext))
  		return;
7cd12b4ab   Vlastimil Babka   mm, page_owner: t...
190

9300d8dfd   Joonsoo Kim   mm/page_owner: do...
191
192
  	page_owner = get_page_owner(page_ext);
  	page_owner->last_migrate_reason = reason;
7cd12b4ab   Vlastimil Babka   mm, page_owner: t...
193
  }
a9627bc5e   Joonsoo Kim   mm/page_owner: in...
194
  void __split_page_owner(struct page *page, unsigned int order)
e2cfc9112   Joonsoo Kim   mm/page_owner: se...
195
  {
a9627bc5e   Joonsoo Kim   mm/page_owner: in...
196
  	int i;
e2cfc9112   Joonsoo Kim   mm/page_owner: se...
197
  	struct page_ext *page_ext = lookup_page_ext(page);
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
198
  	struct page_owner *page_owner;
a9627bc5e   Joonsoo Kim   mm/page_owner: in...
199

f86e42719   Yang Shi   mm: check the ret...
200
  	if (unlikely(!page_ext))
a9627bc5e   Joonsoo Kim   mm/page_owner: in...
201
  		return;
e2cfc9112   Joonsoo Kim   mm/page_owner: se...
202

9300d8dfd   Joonsoo Kim   mm/page_owner: do...
203
204
  	page_owner = get_page_owner(page_ext);
  	page_owner->order = 0;
a9627bc5e   Joonsoo Kim   mm/page_owner: in...
205
206
  	for (i = 1; i < (1 << order); i++)
  		__copy_page_owner(page, page + i);
e2cfc9112   Joonsoo Kim   mm/page_owner: se...
207
  }
d435edca9   Vlastimil Babka   mm, page_owner: c...
208
209
210
211
  void __copy_page_owner(struct page *oldpage, struct page *newpage)
  {
  	struct page_ext *old_ext = lookup_page_ext(oldpage);
  	struct page_ext *new_ext = lookup_page_ext(newpage);
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
212
  	struct page_owner *old_page_owner, *new_page_owner;
d435edca9   Vlastimil Babka   mm, page_owner: c...
213

f86e42719   Yang Shi   mm: check the ret...
214
215
  	if (unlikely(!old_ext || !new_ext))
  		return;
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
216
217
218
219
220
221
222
  	old_page_owner = get_page_owner(old_ext);
  	new_page_owner = get_page_owner(new_ext);
  	new_page_owner->order = old_page_owner->order;
  	new_page_owner->gfp_mask = old_page_owner->gfp_mask;
  	new_page_owner->last_migrate_reason =
  		old_page_owner->last_migrate_reason;
  	new_page_owner->handle = old_page_owner->handle;
d435edca9   Vlastimil Babka   mm, page_owner: c...
223
224
225
226
227
228
229
230
231
232
233
234
  
  	/*
  	 * We don't clear the bit on the oldpage as it's going to be freed
  	 * after migration. Until then, the info can be useful in case of
  	 * a bug, and the overal stats will be off a bit only temporarily.
  	 * Also, migrate_misplaced_transhuge_page() can still fail the
  	 * migration and then we want the oldpage to retain the info. But
  	 * in that case we also don't need to explicitly clear the info from
  	 * the new page, which will be freed.
  	 */
  	__set_bit(PAGE_EXT_OWNER, &new_ext->flags);
  }
e2f612e67   Joonsoo Kim   mm/page_owner: mo...
235
236
237
238
239
  void pagetypeinfo_showmixedcount_print(struct seq_file *m,
  				       pg_data_t *pgdat, struct zone *zone)
  {
  	struct page *page;
  	struct page_ext *page_ext;
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
240
  	struct page_owner *page_owner;
e2f612e67   Joonsoo Kim   mm/page_owner: mo...
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
  	unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
  	unsigned long end_pfn = pfn + zone->spanned_pages;
  	unsigned long count[MIGRATE_TYPES] = { 0, };
  	int pageblock_mt, page_mt;
  	int i;
  
  	/* Scan block by block. First and last block may be incomplete */
  	pfn = zone->zone_start_pfn;
  
  	/*
  	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
  	 * a zone boundary, it will be double counted between zones. This does
  	 * not matter as the mixed block count will still be correct
  	 */
  	for (; pfn < end_pfn; ) {
  		if (!pfn_valid(pfn)) {
  			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
  			continue;
  		}
  
  		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
  		block_end_pfn = min(block_end_pfn, end_pfn);
  
  		page = pfn_to_page(pfn);
  		pageblock_mt = get_pageblock_migratetype(page);
  
  		for (; pfn < block_end_pfn; pfn++) {
  			if (!pfn_valid_within(pfn))
  				continue;
  
  			page = pfn_to_page(pfn);
  
  			if (page_zone(page) != zone)
  				continue;
  
  			if (PageBuddy(page)) {
727c080f0   Vinayak Menon   mm: avoid taking ...
277
278
279
280
281
  				unsigned long freepage_order;
  
  				freepage_order = page_order_unsafe(page);
  				if (freepage_order < MAX_ORDER)
  					pfn += (1UL << freepage_order) - 1;
e2f612e67   Joonsoo Kim   mm/page_owner: mo...
282
283
284
285
286
287
288
289
290
291
292
293
  				continue;
  			}
  
  			if (PageReserved(page))
  				continue;
  
  			page_ext = lookup_page_ext(page);
  			if (unlikely(!page_ext))
  				continue;
  
  			if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
  				continue;
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
294
295
296
  			page_owner = get_page_owner(page_ext);
  			page_mt = gfpflags_to_migratetype(
  					page_owner->gfp_mask);
e2f612e67   Joonsoo Kim   mm/page_owner: mo...
297
298
299
300
301
302
303
304
305
  			if (pageblock_mt != page_mt) {
  				if (is_migrate_cma(pageblock_mt))
  					count[MIGRATE_MOVABLE]++;
  				else
  					count[pageblock_mt]++;
  
  				pfn = block_end_pfn;
  				break;
  			}
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
306
  			pfn += (1UL << page_owner->order) - 1;
e2f612e67   Joonsoo Kim   mm/page_owner: mo...
307
308
309
310
311
312
313
314
315
316
  		}
  	}
  
  	/* Print counts */
  	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
  	for (i = 0; i < MIGRATE_TYPES; i++)
  		seq_printf(m, "%12lu ", count[i]);
  	seq_putc(m, '
  ');
  }
48c96a368   Joonsoo Kim   mm/page_owner: ke...
317
318
  static ssize_t
  print_page_owner(char __user *buf, size_t count, unsigned long pfn,
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
319
  		struct page *page, struct page_owner *page_owner,
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
320
  		depot_stack_handle_t handle)
48c96a368   Joonsoo Kim   mm/page_owner: ke...
321
322
323
324
  {
  	int ret;
  	int pageblock_mt, page_mt;
  	char *kbuf;
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
325
  	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
94f759d62   Sergei Rogachev   mm/page_owner.c: ...
326
  	struct stack_trace trace = {
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
327
328
329
330
  		.nr_entries = 0,
  		.entries = entries,
  		.max_entries = PAGE_OWNER_STACK_DEPTH,
  		.skip = 0
94f759d62   Sergei Rogachev   mm/page_owner.c: ...
331
  	};
48c96a368   Joonsoo Kim   mm/page_owner: ke...
332
333
334
335
336
337
  
  	kbuf = kmalloc(count, GFP_KERNEL);
  	if (!kbuf)
  		return -ENOMEM;
  
  	ret = snprintf(kbuf, count,
60f30350f   Vlastimil Babka   mm, page_owner: p...
338
339
  			"Page allocated via order %u, mask %#x(%pGg)
  ",
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
340
341
  			page_owner->order, page_owner->gfp_mask,
  			&page_owner->gfp_mask);
48c96a368   Joonsoo Kim   mm/page_owner: ke...
342
343
344
345
346
  
  	if (ret >= count)
  		goto err;
  
  	/* Print information relevant to grouping pages by mobility */
0b423ca22   Mel Gorman   mm, page_alloc: i...
347
  	pageblock_mt = get_pageblock_migratetype(page);
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
348
  	page_mt  = gfpflags_to_migratetype(page_owner->gfp_mask);
48c96a368   Joonsoo Kim   mm/page_owner: ke...
349
  	ret += snprintf(kbuf + ret, count - ret,
60f30350f   Vlastimil Babka   mm, page_owner: p...
350
351
  			"PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)
  ",
48c96a368   Joonsoo Kim   mm/page_owner: ke...
352
  			pfn,
60f30350f   Vlastimil Babka   mm, page_owner: p...
353
  			migratetype_names[page_mt],
48c96a368   Joonsoo Kim   mm/page_owner: ke...
354
  			pfn >> pageblock_order,
60f30350f   Vlastimil Babka   mm, page_owner: p...
355
356
  			migratetype_names[pageblock_mt],
  			page->flags, &page->flags);
48c96a368   Joonsoo Kim   mm/page_owner: ke...
357
358
359
  
  	if (ret >= count)
  		goto err;
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
360
  	depot_fetch_stack(handle, &trace);
94f759d62   Sergei Rogachev   mm/page_owner.c: ...
361
  	ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
48c96a368   Joonsoo Kim   mm/page_owner: ke...
362
363
  	if (ret >= count)
  		goto err;
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
364
  	if (page_owner->last_migrate_reason != -1) {
7cd12b4ab   Vlastimil Babka   mm, page_owner: t...
365
366
367
  		ret += snprintf(kbuf + ret, count - ret,
  			"Page has been migrated, last migrate reason: %s
  ",
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
368
  			migrate_reason_names[page_owner->last_migrate_reason]);
7cd12b4ab   Vlastimil Babka   mm, page_owner: t...
369
370
371
  		if (ret >= count)
  			goto err;
  	}
48c96a368   Joonsoo Kim   mm/page_owner: ke...
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
  	ret += snprintf(kbuf + ret, count - ret, "
  ");
  	if (ret >= count)
  		goto err;
  
  	if (copy_to_user(buf, kbuf, ret))
  		ret = -EFAULT;
  
  	kfree(kbuf);
  	return ret;
  
  err:
  	kfree(kbuf);
  	return -ENOMEM;
  }
4e462112e   Vlastimil Babka   mm, page_owner: d...
387
388
389
  void __dump_page_owner(struct page *page)
  {
  	struct page_ext *page_ext = lookup_page_ext(page);
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
390
  	struct page_owner *page_owner;
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
391
  	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
4e462112e   Vlastimil Babka   mm, page_owner: d...
392
  	struct stack_trace trace = {
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
393
394
395
396
  		.nr_entries = 0,
  		.entries = entries,
  		.max_entries = PAGE_OWNER_STACK_DEPTH,
  		.skip = 0
4e462112e   Vlastimil Babka   mm, page_owner: d...
397
  	};
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
398
  	depot_stack_handle_t handle;
8285027fc   Sudip Mukherjee   mm/page_owner: av...
399
400
  	gfp_t gfp_mask;
  	int mt;
4e462112e   Vlastimil Babka   mm, page_owner: d...
401

f86e42719   Yang Shi   mm: check the ret...
402
403
404
405
406
  	if (unlikely(!page_ext)) {
  		pr_alert("There is not page extension available.
  ");
  		return;
  	}
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
407
408
409
  
  	page_owner = get_page_owner(page_ext);
  	gfp_mask = page_owner->gfp_mask;
8285027fc   Sudip Mukherjee   mm/page_owner: av...
410
  	mt = gfpflags_to_migratetype(gfp_mask);
f86e42719   Yang Shi   mm: check the ret...
411

4e462112e   Vlastimil Babka   mm, page_owner: d...
412
413
414
415
416
  	if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
  		pr_alert("page_owner info is not active (free page?)
  ");
  		return;
  	}
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
417
  	handle = READ_ONCE(page_owner->handle);
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
418
419
420
421
422
423
424
  	if (!handle) {
  		pr_alert("page_owner info is not active (free page?)
  ");
  		return;
  	}
  
  	depot_fetch_stack(handle, &trace);
756a025f0   Joe Perches   mm: coalesce spli...
425
426
  	pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)
  ",
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
427
  		 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
4e462112e   Vlastimil Babka   mm, page_owner: d...
428
  	print_stack_trace(&trace, 0);
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
429
  	if (page_owner->last_migrate_reason != -1)
4e462112e   Vlastimil Babka   mm, page_owner: d...
430
431
  		pr_alert("page has been migrated, last migrate reason: %s
  ",
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
432
  			migrate_reason_names[page_owner->last_migrate_reason]);
4e462112e   Vlastimil Babka   mm, page_owner: d...
433
  }
48c96a368   Joonsoo Kim   mm/page_owner: ke...
434
435
436
437
438
439
  static ssize_t
  read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  {
  	unsigned long pfn;
  	struct page *page;
  	struct page_ext *page_ext;
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
440
  	struct page_owner *page_owner;
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
441
  	depot_stack_handle_t handle;
48c96a368   Joonsoo Kim   mm/page_owner: ke...
442

7dd80b8af   Vlastimil Babka   mm, page_owner: c...
443
  	if (!static_branch_unlikely(&page_owner_inited))
48c96a368   Joonsoo Kim   mm/page_owner: ke...
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
  		return -EINVAL;
  
  	page = NULL;
  	pfn = min_low_pfn + *ppos;
  
  	/* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
  	while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
  		pfn++;
  
  	drain_all_pages(NULL);
  
  	/* Find an allocated page */
  	for (; pfn < max_pfn; pfn++) {
  		/*
  		 * If the new page is in a new MAX_ORDER_NR_PAGES area,
  		 * validate the area as existing, skip it if not
  		 */
  		if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
  			pfn += MAX_ORDER_NR_PAGES - 1;
  			continue;
  		}
  
  		/* Check for holes within a MAX_ORDER area */
  		if (!pfn_valid_within(pfn))
  			continue;
  
  		page = pfn_to_page(pfn);
  		if (PageBuddy(page)) {
  			unsigned long freepage_order = page_order_unsafe(page);
  
  			if (freepage_order < MAX_ORDER)
  				pfn += (1UL << freepage_order) - 1;
  			continue;
  		}
  
  		page_ext = lookup_page_ext(page);
f86e42719   Yang Shi   mm: check the ret...
480
481
  		if (unlikely(!page_ext))
  			continue;
48c96a368   Joonsoo Kim   mm/page_owner: ke...
482
483
  
  		/*
61cf5febd   Joonsoo Kim   mm/page_owner: co...
484
485
  		 * Some pages could be missed by concurrent allocation or free,
  		 * because we don't hold the zone lock.
48c96a368   Joonsoo Kim   mm/page_owner: ke...
486
487
488
  		 */
  		if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
  			continue;
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
489
  		page_owner = get_page_owner(page_ext);
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
490
491
492
493
  		/*
  		 * Access to page_ext->handle isn't synchronous so we should
  		 * be careful to access it.
  		 */
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
494
  		handle = READ_ONCE(page_owner->handle);
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
495
496
  		if (!handle)
  			continue;
48c96a368   Joonsoo Kim   mm/page_owner: ke...
497
498
  		/* Record the next PFN to read in the file offset */
  		*ppos = (pfn - min_low_pfn) + 1;
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
499
  		return print_page_owner(buf, count, pfn, page,
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
500
  				page_owner, handle);
48c96a368   Joonsoo Kim   mm/page_owner: ke...
501
502
503
504
  	}
  
  	return 0;
  }
61cf5febd   Joonsoo Kim   mm/page_owner: co...
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
  static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
  {
  	struct page *page;
  	struct page_ext *page_ext;
  	unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
  	unsigned long end_pfn = pfn + zone->spanned_pages;
  	unsigned long count = 0;
  
  	/* Scan block by block. First and last block may be incomplete */
  	pfn = zone->zone_start_pfn;
  
  	/*
  	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
  	 * a zone boundary, it will be double counted between zones. This does
  	 * not matter as the mixed block count will still be correct
  	 */
  	for (; pfn < end_pfn; ) {
  		if (!pfn_valid(pfn)) {
  			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
  			continue;
  		}
  
  		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
  		block_end_pfn = min(block_end_pfn, end_pfn);
  
  		page = pfn_to_page(pfn);
  
  		for (; pfn < block_end_pfn; pfn++) {
  			if (!pfn_valid_within(pfn))
  				continue;
  
  			page = pfn_to_page(pfn);
9d43f5aec   Joonsoo Kim   mm/page_owner: ad...
537
538
  			if (page_zone(page) != zone)
  				continue;
61cf5febd   Joonsoo Kim   mm/page_owner: co...
539
  			/*
109030279   Vlastimil Babka   mm, page_owner: d...
540
541
542
543
544
  			 * To avoid having to grab zone->lock, be a little
  			 * careful when reading buddy page order. The only
  			 * danger is that we skip too much and potentially miss
  			 * some early allocated pages, which is better than
  			 * heavy lock contention.
61cf5febd   Joonsoo Kim   mm/page_owner: co...
545
546
  			 */
  			if (PageBuddy(page)) {
109030279   Vlastimil Babka   mm, page_owner: d...
547
548
549
550
  				unsigned long order = page_order_unsafe(page);
  
  				if (order > 0 && order < MAX_ORDER)
  					pfn += (1UL << order) - 1;
61cf5febd   Joonsoo Kim   mm/page_owner: co...
551
552
553
554
555
556
557
  				continue;
  			}
  
  			if (PageReserved(page))
  				continue;
  
  			page_ext = lookup_page_ext(page);
f86e42719   Yang Shi   mm: check the ret...
558
559
  			if (unlikely(!page_ext))
  				continue;
61cf5febd   Joonsoo Kim   mm/page_owner: co...
560

dab4ead1a   Vlastimil Babka   mm, page_owner: m...
561
  			/* Maybe overlapping zone */
61cf5febd   Joonsoo Kim   mm/page_owner: co...
562
563
564
565
  			if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
  				continue;
  
  			/* Found early allocated page */
dab4ead1a   Vlastimil Babka   mm, page_owner: m...
566
  			__set_page_owner_handle(page_ext, early_handle, 0, 0);
61cf5febd   Joonsoo Kim   mm/page_owner: co...
567
568
  			count++;
  		}
109030279   Vlastimil Babka   mm, page_owner: d...
569
  		cond_resched();
61cf5febd   Joonsoo Kim   mm/page_owner: co...
570
571
572
573
574
575
576
577
578
579
580
  	}
  
  	pr_info("Node %d, zone %8s: page owner found early allocated %lu pages
  ",
  		pgdat->node_id, zone->name, count);
  }
  
  static void init_zones_in_node(pg_data_t *pgdat)
  {
  	struct zone *zone;
  	struct zone *node_zones = pgdat->node_zones;
61cf5febd   Joonsoo Kim   mm/page_owner: co...
581
582
583
584
  
  	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
  		if (!populated_zone(zone))
  			continue;
61cf5febd   Joonsoo Kim   mm/page_owner: co...
585
  		init_pages_in_zone(pgdat, zone);
61cf5febd   Joonsoo Kim   mm/page_owner: co...
586
587
588
589
590
591
592
593
594
595
596
  	}
  }
  
  static void init_early_allocated_pages(void)
  {
  	pg_data_t *pgdat;
  
  	drain_all_pages(NULL);
  	for_each_online_pgdat(pgdat)
  		init_zones_in_node(pgdat);
  }
48c96a368   Joonsoo Kim   mm/page_owner: ke...
597
598
599
600
601
602
603
  static const struct file_operations proc_page_owner_operations = {
  	.read		= read_page_owner,
  };
  
  static int __init pageowner_init(void)
  {
  	struct dentry *dentry;
7dd80b8af   Vlastimil Babka   mm, page_owner: c...
604
  	if (!static_branch_unlikely(&page_owner_inited)) {
48c96a368   Joonsoo Kim   mm/page_owner: ke...
605
606
607
608
609
610
611
612
613
614
615
616
  		pr_info("page_owner is disabled
  ");
  		return 0;
  	}
  
  	dentry = debugfs_create_file("page_owner", S_IRUSR, NULL,
  			NULL, &proc_page_owner_operations);
  	if (IS_ERR(dentry))
  		return PTR_ERR(dentry);
  
  	return 0;
  }
44c5af96d   Paul Gortmaker   mm/page_owner.c: ...
617
  late_initcall(pageowner_init)