Blame view

mm/page_owner.c 16 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
48c96a368   Joonsoo Kim   mm/page_owner: ke...
2
3
4
5
  #include <linux/debugfs.h>
  #include <linux/mm.h>
  #include <linux/slab.h>
  #include <linux/uaccess.h>
57c8a661d   Mike Rapoport   mm: remove includ...
6
  #include <linux/memblock.h>
48c96a368   Joonsoo Kim   mm/page_owner: ke...
7
8
  #include <linux/stacktrace.h>
  #include <linux/page_owner.h>
7dd80b8af   Vlastimil Babka   mm, page_owner: c...
9
  #include <linux/jump_label.h>
7cd12b4ab   Vlastimil Babka   mm, page_owner: t...
10
  #include <linux/migrate.h>
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
11
  #include <linux/stackdepot.h>
e2f612e67   Joonsoo Kim   mm/page_owner: mo...
12
  #include <linux/seq_file.h>
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
13

48c96a368   Joonsoo Kim   mm/page_owner: ke...
14
  #include "internal.h"
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
15
16
17
18
19
  /*
   * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
   * to use off stack temporal storage
   */
  #define PAGE_OWNER_STACK_DEPTH (16)
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
20
  struct page_owner {
6b4c54e37   Ayush Mittal   mm/page_owner.c: ...
21
22
  	unsigned short order;
  	short last_migrate_reason;
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
23
  	gfp_t gfp_mask;
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
24
  	depot_stack_handle_t handle;
8974558f4   Vlastimil Babka   mm, page_owner, d...
25
  	depot_stack_handle_t free_handle;
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
26
  };
0fe9a448a   Vlastimil Babka   mm, page_owner: d...
27
  static bool page_owner_enabled = false;
7dd80b8af   Vlastimil Babka   mm, page_owner: c...
28
  DEFINE_STATIC_KEY_FALSE(page_owner_inited);
48c96a368   Joonsoo Kim   mm/page_owner: ke...
29

f2ca0b557   Joonsoo Kim   mm/page_owner: us...
30
31
  static depot_stack_handle_t dummy_handle;
  static depot_stack_handle_t failure_handle;
dab4ead1a   Vlastimil Babka   mm, page_owner: m...
32
  static depot_stack_handle_t early_handle;
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
33

61cf5febd   Joonsoo Kim   mm/page_owner: co...
34
  static void init_early_allocated_pages(void);
1173194e1   Dou Liyang   mm/page_owner.c: ...
35
  static int __init early_page_owner_param(char *buf)
48c96a368   Joonsoo Kim   mm/page_owner: ke...
36
37
38
39
40
  {
  	if (!buf)
  		return -EINVAL;
  
  	if (strcmp(buf, "on") == 0)
0fe9a448a   Vlastimil Babka   mm, page_owner: d...
41
  		page_owner_enabled = true;
48c96a368   Joonsoo Kim   mm/page_owner: ke...
42
43
44
45
46
47
48
  
  	return 0;
  }
  early_param("page_owner", early_page_owner_param);
  
  static bool need_page_owner(void)
  {
0fe9a448a   Vlastimil Babka   mm, page_owner: d...
49
  	return page_owner_enabled;
48c96a368   Joonsoo Kim   mm/page_owner: ke...
50
  }
dab4ead1a   Vlastimil Babka   mm, page_owner: m...
51
  static __always_inline depot_stack_handle_t create_dummy_stack(void)
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
52
53
  {
  	unsigned long entries[4];
af52bf6b9   Thomas Gleixner   mm/page_owner: Si...
54
  	unsigned int nr_entries;
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
55

af52bf6b9   Thomas Gleixner   mm/page_owner: Si...
56
57
  	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
  	return stack_depot_save(entries, nr_entries, GFP_KERNEL);
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
58
  }
dab4ead1a   Vlastimil Babka   mm, page_owner: m...
59
  static noinline void register_dummy_stack(void)
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
60
  {
dab4ead1a   Vlastimil Babka   mm, page_owner: m...
61
62
  	dummy_handle = create_dummy_stack();
  }
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
63

dab4ead1a   Vlastimil Babka   mm, page_owner: m...
64
65
66
67
  static noinline void register_failure_stack(void)
  {
  	failure_handle = create_dummy_stack();
  }
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
68

dab4ead1a   Vlastimil Babka   mm, page_owner: m...
69
70
71
  static noinline void register_early_stack(void)
  {
  	early_handle = create_dummy_stack();
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
72
  }
48c96a368   Joonsoo Kim   mm/page_owner: ke...
73
74
  static void init_page_owner(void)
  {
0fe9a448a   Vlastimil Babka   mm, page_owner: d...
75
  	if (!page_owner_enabled)
48c96a368   Joonsoo Kim   mm/page_owner: ke...
76
  		return;
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
77
78
  	register_dummy_stack();
  	register_failure_stack();
dab4ead1a   Vlastimil Babka   mm, page_owner: m...
79
  	register_early_stack();
7dd80b8af   Vlastimil Babka   mm, page_owner: c...
80
  	static_branch_enable(&page_owner_inited);
61cf5febd   Joonsoo Kim   mm/page_owner: co...
81
  	init_early_allocated_pages();
48c96a368   Joonsoo Kim   mm/page_owner: ke...
82
83
84
  }
  
  struct page_ext_operations page_owner_ops = {
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
85
  	.size = sizeof(struct page_owner),
48c96a368   Joonsoo Kim   mm/page_owner: ke...
86
87
88
  	.need = need_page_owner,
  	.init = init_page_owner,
  };
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
89
90
91
92
  static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
  {
  	return (void *)page_ext + page_owner_ops.offset;
  }
af52bf6b9   Thomas Gleixner   mm/page_owner: Si...
93
94
95
  static inline bool check_recursive_alloc(unsigned long *entries,
  					 unsigned int nr_entries,
  					 unsigned long ip)
48c96a368   Joonsoo Kim   mm/page_owner: ke...
96
  {
af52bf6b9   Thomas Gleixner   mm/page_owner: Si...
97
  	unsigned int i;
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
98

af52bf6b9   Thomas Gleixner   mm/page_owner: Si...
99
100
  	for (i = 0; i < nr_entries; i++) {
  		if (entries[i] == ip)
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
101
102
  			return true;
  	}
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
103
104
105
106
107
108
  	return false;
  }
  
  static noinline depot_stack_handle_t save_stack(gfp_t flags)
  {
  	unsigned long entries[PAGE_OWNER_STACK_DEPTH];
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
109
  	depot_stack_handle_t handle;
af52bf6b9   Thomas Gleixner   mm/page_owner: Si...
110
  	unsigned int nr_entries;
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
111

af52bf6b9   Thomas Gleixner   mm/page_owner: Si...
112
  	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
113
114
  
  	/*
af52bf6b9   Thomas Gleixner   mm/page_owner: Si...
115
116
117
118
119
120
  	 * We need to check recursion here because our request to
  	 * stackdepot could trigger memory allocation to save new
  	 * entry. New memory allocation would reach here and call
  	 * stack_depot_save_entries() again if we don't catch it. There is
  	 * still not enough memory in stackdepot so it would try to
  	 * allocate memory again and loop forever.
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
121
  	 */
af52bf6b9   Thomas Gleixner   mm/page_owner: Si...
122
  	if (check_recursive_alloc(entries, nr_entries, _RET_IP_))
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
123
  		return dummy_handle;
af52bf6b9   Thomas Gleixner   mm/page_owner: Si...
124
  	handle = stack_depot_save(entries, nr_entries, flags);
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
125
126
127
128
129
  	if (!handle)
  		handle = failure_handle;
  
  	return handle;
  }
8974558f4   Vlastimil Babka   mm, page_owner, d...
130
131
132
133
  void __reset_page_owner(struct page *page, unsigned int order)
  {
  	int i;
  	struct page_ext *page_ext;
8974558f4   Vlastimil Babka   mm, page_owner, d...
134
135
  	depot_stack_handle_t handle = 0;
  	struct page_owner *page_owner;
0fe9a448a   Vlastimil Babka   mm, page_owner: d...
136
  	handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
8974558f4   Vlastimil Babka   mm, page_owner, d...
137

5556cfe8d   Vlastimil Babka   mm, page_owner: f...
138
139
140
  	page_ext = lookup_page_ext(page);
  	if (unlikely(!page_ext))
  		return;
8974558f4   Vlastimil Babka   mm, page_owner, d...
141
  	for (i = 0; i < (1 << order); i++) {
fdf3bf809   Vlastimil Babka   mm, page_owner: r...
142
  		__clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
0fe9a448a   Vlastimil Babka   mm, page_owner: d...
143
144
  		page_owner = get_page_owner(page_ext);
  		page_owner->free_handle = handle;
5556cfe8d   Vlastimil Babka   mm, page_owner: f...
145
  		page_ext = page_ext_next(page_ext);
8974558f4   Vlastimil Babka   mm, page_owner, d...
146
147
  	}
  }
7e2f2a0cd   Vlastimil Babka   mm, page_owner: r...
148
149
150
  static inline void __set_page_owner_handle(struct page *page,
  	struct page_ext *page_ext, depot_stack_handle_t handle,
  	unsigned int order, gfp_t gfp_mask)
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
151
  {
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
152
  	struct page_owner *page_owner;
7e2f2a0cd   Vlastimil Babka   mm, page_owner: r...
153
  	int i;
48c96a368   Joonsoo Kim   mm/page_owner: ke...
154

7e2f2a0cd   Vlastimil Babka   mm, page_owner: r...
155
156
157
158
159
160
161
  	for (i = 0; i < (1 << order); i++) {
  		page_owner = get_page_owner(page_ext);
  		page_owner->handle = handle;
  		page_owner->order = order;
  		page_owner->gfp_mask = gfp_mask;
  		page_owner->last_migrate_reason = -1;
  		__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
fdf3bf809   Vlastimil Babka   mm, page_owner: r...
162
  		__set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
48c96a368   Joonsoo Kim   mm/page_owner: ke...
163

5556cfe8d   Vlastimil Babka   mm, page_owner: f...
164
  		page_ext = page_ext_next(page_ext);
7e2f2a0cd   Vlastimil Babka   mm, page_owner: r...
165
  	}
48c96a368   Joonsoo Kim   mm/page_owner: ke...
166
  }
dab4ead1a   Vlastimil Babka   mm, page_owner: m...
167
168
169
170
171
172
173
174
175
176
  noinline void __set_page_owner(struct page *page, unsigned int order,
  					gfp_t gfp_mask)
  {
  	struct page_ext *page_ext = lookup_page_ext(page);
  	depot_stack_handle_t handle;
  
  	if (unlikely(!page_ext))
  		return;
  
  	handle = save_stack(gfp_mask);
7e2f2a0cd   Vlastimil Babka   mm, page_owner: r...
177
  	__set_page_owner_handle(page, page_ext, handle, order, gfp_mask);
dab4ead1a   Vlastimil Babka   mm, page_owner: m...
178
  }
7cd12b4ab   Vlastimil Babka   mm, page_owner: t...
179
180
181
  void __set_page_owner_migrate_reason(struct page *page, int reason)
  {
  	struct page_ext *page_ext = lookup_page_ext(page);
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
182
  	struct page_owner *page_owner;
f86e42719   Yang Shi   mm: check the ret...
183
184
  	if (unlikely(!page_ext))
  		return;
7cd12b4ab   Vlastimil Babka   mm, page_owner: t...
185

9300d8dfd   Joonsoo Kim   mm/page_owner: do...
186
187
  	page_owner = get_page_owner(page_ext);
  	page_owner->last_migrate_reason = reason;
7cd12b4ab   Vlastimil Babka   mm, page_owner: t...
188
  }
a9627bc5e   Joonsoo Kim   mm/page_owner: in...
189
  void __split_page_owner(struct page *page, unsigned int order)
e2cfc9112   Joonsoo Kim   mm/page_owner: se...
190
  {
a9627bc5e   Joonsoo Kim   mm/page_owner: in...
191
  	int i;
e2cfc9112   Joonsoo Kim   mm/page_owner: se...
192
  	struct page_ext *page_ext = lookup_page_ext(page);
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
193
  	struct page_owner *page_owner;
a9627bc5e   Joonsoo Kim   mm/page_owner: in...
194

f86e42719   Yang Shi   mm: check the ret...
195
  	if (unlikely(!page_ext))
a9627bc5e   Joonsoo Kim   mm/page_owner: in...
196
  		return;
e2cfc9112   Joonsoo Kim   mm/page_owner: se...
197

5556cfe8d   Vlastimil Babka   mm, page_owner: f...
198
  	for (i = 0; i < (1 << order); i++) {
7e2f2a0cd   Vlastimil Babka   mm, page_owner: r...
199
200
  		page_owner = get_page_owner(page_ext);
  		page_owner->order = 0;
5556cfe8d   Vlastimil Babka   mm, page_owner: f...
201
  		page_ext = page_ext_next(page_ext);
7e2f2a0cd   Vlastimil Babka   mm, page_owner: r...
202
  	}
e2cfc9112   Joonsoo Kim   mm/page_owner: se...
203
  }
d435edca9   Vlastimil Babka   mm, page_owner: c...
204
205
206
207
  void __copy_page_owner(struct page *oldpage, struct page *newpage)
  {
  	struct page_ext *old_ext = lookup_page_ext(oldpage);
  	struct page_ext *new_ext = lookup_page_ext(newpage);
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
208
  	struct page_owner *old_page_owner, *new_page_owner;
d435edca9   Vlastimil Babka   mm, page_owner: c...
209

f86e42719   Yang Shi   mm: check the ret...
210
211
  	if (unlikely(!old_ext || !new_ext))
  		return;
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
212
213
214
215
216
217
218
  	old_page_owner = get_page_owner(old_ext);
  	new_page_owner = get_page_owner(new_ext);
  	new_page_owner->order = old_page_owner->order;
  	new_page_owner->gfp_mask = old_page_owner->gfp_mask;
  	new_page_owner->last_migrate_reason =
  		old_page_owner->last_migrate_reason;
  	new_page_owner->handle = old_page_owner->handle;
d435edca9   Vlastimil Babka   mm, page_owner: c...
219
220
221
222
223
224
225
226
227
228
229
  
  	/*
  	 * We don't clear the bit on the oldpage as it's going to be freed
  	 * after migration. Until then, the info can be useful in case of
  	 * a bug, and the overal stats will be off a bit only temporarily.
  	 * Also, migrate_misplaced_transhuge_page() can still fail the
  	 * migration and then we want the oldpage to retain the info. But
  	 * in that case we also don't need to explicitly clear the info from
  	 * the new page, which will be freed.
  	 */
  	__set_bit(PAGE_EXT_OWNER, &new_ext->flags);
fdf3bf809   Vlastimil Babka   mm, page_owner: r...
230
  	__set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
d435edca9   Vlastimil Babka   mm, page_owner: c...
231
  }
e2f612e67   Joonsoo Kim   mm/page_owner: mo...
232
233
234
235
236
  void pagetypeinfo_showmixedcount_print(struct seq_file *m,
  				       pg_data_t *pgdat, struct zone *zone)
  {
  	struct page *page;
  	struct page_ext *page_ext;
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
237
  	struct page_owner *page_owner;
e2f612e67   Joonsoo Kim   mm/page_owner: mo...
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
  	unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
  	unsigned long end_pfn = pfn + zone->spanned_pages;
  	unsigned long count[MIGRATE_TYPES] = { 0, };
  	int pageblock_mt, page_mt;
  	int i;
  
  	/* Scan block by block. First and last block may be incomplete */
  	pfn = zone->zone_start_pfn;
  
  	/*
  	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
  	 * a zone boundary, it will be double counted between zones. This does
  	 * not matter as the mixed block count will still be correct
  	 */
  	for (; pfn < end_pfn; ) {
a26ee565b   Qian Cai   mm/page_owner: do...
253
254
  		page = pfn_to_online_page(pfn);
  		if (!page) {
e2f612e67   Joonsoo Kim   mm/page_owner: mo...
255
256
257
258
259
260
  			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
  			continue;
  		}
  
  		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
  		block_end_pfn = min(block_end_pfn, end_pfn);
e2f612e67   Joonsoo Kim   mm/page_owner: mo...
261
262
263
264
265
  		pageblock_mt = get_pageblock_migratetype(page);
  
  		for (; pfn < block_end_pfn; pfn++) {
  			if (!pfn_valid_within(pfn))
  				continue;
a26ee565b   Qian Cai   mm/page_owner: do...
266
  			/* The pageblock is online, no need to recheck. */
e2f612e67   Joonsoo Kim   mm/page_owner: mo...
267
268
269
270
271
272
  			page = pfn_to_page(pfn);
  
  			if (page_zone(page) != zone)
  				continue;
  
  			if (PageBuddy(page)) {
727c080f0   Vinayak Menon   mm: avoid taking ...
273
274
275
276
277
  				unsigned long freepage_order;
  
  				freepage_order = page_order_unsafe(page);
  				if (freepage_order < MAX_ORDER)
  					pfn += (1UL << freepage_order) - 1;
e2f612e67   Joonsoo Kim   mm/page_owner: mo...
278
279
280
281
282
283
284
285
286
  				continue;
  			}
  
  			if (PageReserved(page))
  				continue;
  
  			page_ext = lookup_page_ext(page);
  			if (unlikely(!page_ext))
  				continue;
fdf3bf809   Vlastimil Babka   mm, page_owner: r...
287
  			if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
e2f612e67   Joonsoo Kim   mm/page_owner: mo...
288
  				continue;
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
289
290
291
  			page_owner = get_page_owner(page_ext);
  			page_mt = gfpflags_to_migratetype(
  					page_owner->gfp_mask);
e2f612e67   Joonsoo Kim   mm/page_owner: mo...
292
293
294
295
296
297
298
299
300
  			if (pageblock_mt != page_mt) {
  				if (is_migrate_cma(pageblock_mt))
  					count[MIGRATE_MOVABLE]++;
  				else
  					count[pageblock_mt]++;
  
  				pfn = block_end_pfn;
  				break;
  			}
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
301
  			pfn += (1UL << page_owner->order) - 1;
e2f612e67   Joonsoo Kim   mm/page_owner: mo...
302
303
304
305
306
307
308
309
310
311
  		}
  	}
  
  	/* Print counts */
  	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
  	for (i = 0; i < MIGRATE_TYPES; i++)
  		seq_printf(m, "%12lu ", count[i]);
  	seq_putc(m, '
  ');
  }
48c96a368   Joonsoo Kim   mm/page_owner: ke...
312
313
  static ssize_t
  print_page_owner(char __user *buf, size_t count, unsigned long pfn,
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
314
  		struct page *page, struct page_owner *page_owner,
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
315
  		depot_stack_handle_t handle)
48c96a368   Joonsoo Kim   mm/page_owner: ke...
316
  {
af52bf6b9   Thomas Gleixner   mm/page_owner: Si...
317
318
319
  	int ret, pageblock_mt, page_mt;
  	unsigned long *entries;
  	unsigned int nr_entries;
48c96a368   Joonsoo Kim   mm/page_owner: ke...
320
  	char *kbuf;
c8f61cfc8   Miles Chen   mm/page_owner: cl...
321
  	count = min_t(size_t, count, PAGE_SIZE);
48c96a368   Joonsoo Kim   mm/page_owner: ke...
322
323
324
325
326
  	kbuf = kmalloc(count, GFP_KERNEL);
  	if (!kbuf)
  		return -ENOMEM;
  
  	ret = snprintf(kbuf, count,
60f30350f   Vlastimil Babka   mm, page_owner: p...
327
328
  			"Page allocated via order %u, mask %#x(%pGg)
  ",
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
329
330
  			page_owner->order, page_owner->gfp_mask,
  			&page_owner->gfp_mask);
48c96a368   Joonsoo Kim   mm/page_owner: ke...
331
332
333
334
335
  
  	if (ret >= count)
  		goto err;
  
  	/* Print information relevant to grouping pages by mobility */
0b423ca22   Mel Gorman   mm, page_alloc: i...
336
  	pageblock_mt = get_pageblock_migratetype(page);
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
337
  	page_mt  = gfpflags_to_migratetype(page_owner->gfp_mask);
48c96a368   Joonsoo Kim   mm/page_owner: ke...
338
  	ret += snprintf(kbuf + ret, count - ret,
60f30350f   Vlastimil Babka   mm, page_owner: p...
339
340
  			"PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)
  ",
48c96a368   Joonsoo Kim   mm/page_owner: ke...
341
  			pfn,
60f30350f   Vlastimil Babka   mm, page_owner: p...
342
  			migratetype_names[page_mt],
48c96a368   Joonsoo Kim   mm/page_owner: ke...
343
  			pfn >> pageblock_order,
60f30350f   Vlastimil Babka   mm, page_owner: p...
344
345
  			migratetype_names[pageblock_mt],
  			page->flags, &page->flags);
48c96a368   Joonsoo Kim   mm/page_owner: ke...
346
347
348
  
  	if (ret >= count)
  		goto err;
af52bf6b9   Thomas Gleixner   mm/page_owner: Si...
349
350
  	nr_entries = stack_depot_fetch(handle, &entries);
  	ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0);
48c96a368   Joonsoo Kim   mm/page_owner: ke...
351
352
  	if (ret >= count)
  		goto err;
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
353
  	if (page_owner->last_migrate_reason != -1) {
7cd12b4ab   Vlastimil Babka   mm, page_owner: t...
354
355
356
  		ret += snprintf(kbuf + ret, count - ret,
  			"Page has been migrated, last migrate reason: %s
  ",
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
357
  			migrate_reason_names[page_owner->last_migrate_reason]);
7cd12b4ab   Vlastimil Babka   mm, page_owner: t...
358
359
360
  		if (ret >= count)
  			goto err;
  	}
48c96a368   Joonsoo Kim   mm/page_owner: ke...
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
  	ret += snprintf(kbuf + ret, count - ret, "
  ");
  	if (ret >= count)
  		goto err;
  
  	if (copy_to_user(buf, kbuf, ret))
  		ret = -EFAULT;
  
  	kfree(kbuf);
  	return ret;
  
  err:
  	kfree(kbuf);
  	return -ENOMEM;
  }
4e462112e   Vlastimil Babka   mm, page_owner: d...
376
377
378
  void __dump_page_owner(struct page *page)
  {
  	struct page_ext *page_ext = lookup_page_ext(page);
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
379
  	struct page_owner *page_owner;
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
380
  	depot_stack_handle_t handle;
af52bf6b9   Thomas Gleixner   mm/page_owner: Si...
381
382
  	unsigned long *entries;
  	unsigned int nr_entries;
8285027fc   Sudip Mukherjee   mm/page_owner: av...
383
384
  	gfp_t gfp_mask;
  	int mt;
4e462112e   Vlastimil Babka   mm, page_owner: d...
385

f86e42719   Yang Shi   mm: check the ret...
386
387
388
389
390
  	if (unlikely(!page_ext)) {
  		pr_alert("There is not page extension available.
  ");
  		return;
  	}
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
391
392
393
  
  	page_owner = get_page_owner(page_ext);
  	gfp_mask = page_owner->gfp_mask;
8285027fc   Sudip Mukherjee   mm/page_owner: av...
394
  	mt = gfpflags_to_migratetype(gfp_mask);
f86e42719   Yang Shi   mm: check the ret...
395

4e462112e   Vlastimil Babka   mm, page_owner: d...
396
  	if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
37389167a   Vlastimil Babka   mm, page_owner: k...
397
398
  		pr_alert("page_owner info is not present (never set?)
  ");
4e462112e   Vlastimil Babka   mm, page_owner: d...
399
400
  		return;
  	}
fdf3bf809   Vlastimil Babka   mm, page_owner: r...
401
  	if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
37389167a   Vlastimil Babka   mm, page_owner: k...
402
403
404
405
406
407
408
409
410
  		pr_alert("page_owner tracks the page as allocated
  ");
  	else
  		pr_alert("page_owner tracks the page as freed
  ");
  
  	pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)
  ",
  		 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
411
  	handle = READ_ONCE(page_owner->handle);
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
412
  	if (!handle) {
37389167a   Vlastimil Babka   mm, page_owner: k...
413
414
415
416
417
  		pr_alert("page_owner allocation stack trace missing
  ");
  	} else {
  		nr_entries = stack_depot_fetch(handle, &entries);
  		stack_trace_print(entries, nr_entries, 0);
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
418
  	}
8974558f4   Vlastimil Babka   mm, page_owner, d...
419
420
421
422
423
424
425
426
427
428
  	handle = READ_ONCE(page_owner->free_handle);
  	if (!handle) {
  		pr_alert("page_owner free stack trace missing
  ");
  	} else {
  		nr_entries = stack_depot_fetch(handle, &entries);
  		pr_alert("page last free stack trace:
  ");
  		stack_trace_print(entries, nr_entries, 0);
  	}
8974558f4   Vlastimil Babka   mm, page_owner, d...
429

9300d8dfd   Joonsoo Kim   mm/page_owner: do...
430
  	if (page_owner->last_migrate_reason != -1)
4e462112e   Vlastimil Babka   mm, page_owner: d...
431
432
  		pr_alert("page has been migrated, last migrate reason: %s
  ",
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
433
  			migrate_reason_names[page_owner->last_migrate_reason]);
4e462112e   Vlastimil Babka   mm, page_owner: d...
434
  }
48c96a368   Joonsoo Kim   mm/page_owner: ke...
435
436
437
438
439
440
  static ssize_t
  read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  {
  	unsigned long pfn;
  	struct page *page;
  	struct page_ext *page_ext;
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
441
  	struct page_owner *page_owner;
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
442
  	depot_stack_handle_t handle;
48c96a368   Joonsoo Kim   mm/page_owner: ke...
443

7dd80b8af   Vlastimil Babka   mm, page_owner: c...
444
  	if (!static_branch_unlikely(&page_owner_inited))
48c96a368   Joonsoo Kim   mm/page_owner: ke...
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
  		return -EINVAL;
  
  	page = NULL;
  	pfn = min_low_pfn + *ppos;
  
  	/* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
  	while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
  		pfn++;
  
  	drain_all_pages(NULL);
  
  	/* Find an allocated page */
  	for (; pfn < max_pfn; pfn++) {
  		/*
  		 * If the new page is in a new MAX_ORDER_NR_PAGES area,
  		 * validate the area as existing, skip it if not
  		 */
  		if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
  			pfn += MAX_ORDER_NR_PAGES - 1;
  			continue;
  		}
  
  		/* Check for holes within a MAX_ORDER area */
  		if (!pfn_valid_within(pfn))
  			continue;
  
  		page = pfn_to_page(pfn);
  		if (PageBuddy(page)) {
  			unsigned long freepage_order = page_order_unsafe(page);
  
  			if (freepage_order < MAX_ORDER)
  				pfn += (1UL << freepage_order) - 1;
  			continue;
  		}
  
  		page_ext = lookup_page_ext(page);
f86e42719   Yang Shi   mm: check the ret...
481
482
  		if (unlikely(!page_ext))
  			continue;
48c96a368   Joonsoo Kim   mm/page_owner: ke...
483
484
  
  		/*
61cf5febd   Joonsoo Kim   mm/page_owner: co...
485
486
  		 * Some pages could be missed by concurrent allocation or free,
  		 * because we don't hold the zone lock.
48c96a368   Joonsoo Kim   mm/page_owner: ke...
487
488
489
  		 */
  		if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
  			continue;
37389167a   Vlastimil Babka   mm, page_owner: k...
490
491
492
493
  		/*
  		 * Although we do have the info about past allocation of free
  		 * pages, it's not relevant for current memory usage.
  		 */
fdf3bf809   Vlastimil Babka   mm, page_owner: r...
494
  		if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
37389167a   Vlastimil Babka   mm, page_owner: k...
495
  			continue;
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
496
  		page_owner = get_page_owner(page_ext);
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
497
  		/*
7e2f2a0cd   Vlastimil Babka   mm, page_owner: r...
498
499
500
501
502
503
504
  		 * Don't print "tail" pages of high-order allocations as that
  		 * would inflate the stats.
  		 */
  		if (!IS_ALIGNED(pfn, 1 << page_owner->order))
  			continue;
  
  		/*
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
505
506
507
  		 * Access to page_ext->handle isn't synchronous so we should
  		 * be careful to access it.
  		 */
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
508
  		handle = READ_ONCE(page_owner->handle);
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
509
510
  		if (!handle)
  			continue;
48c96a368   Joonsoo Kim   mm/page_owner: ke...
511
512
  		/* Record the next PFN to read in the file offset */
  		*ppos = (pfn - min_low_pfn) + 1;
f2ca0b557   Joonsoo Kim   mm/page_owner: us...
513
  		return print_page_owner(buf, count, pfn, page,
9300d8dfd   Joonsoo Kim   mm/page_owner: do...
514
  				page_owner, handle);
48c96a368   Joonsoo Kim   mm/page_owner: ke...
515
516
517
518
  	}
  
  	return 0;
  }
61cf5febd   Joonsoo Kim   mm/page_owner: co...
519
520
  static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
  {
6787c1dab   Oscar Salvador   mm/page_owner.c: ...
521
522
  	unsigned long pfn = zone->zone_start_pfn;
  	unsigned long end_pfn = zone_end_pfn(zone);
61cf5febd   Joonsoo Kim   mm/page_owner: co...
523
  	unsigned long count = 0;
61cf5febd   Joonsoo Kim   mm/page_owner: co...
524
525
526
527
528
529
  	/*
  	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
  	 * a zone boundary, it will be double counted between zones. This does
  	 * not matter as the mixed block count will still be correct
  	 */
  	for (; pfn < end_pfn; ) {
6787c1dab   Oscar Salvador   mm/page_owner.c: ...
530
  		unsigned long block_end_pfn;
61cf5febd   Joonsoo Kim   mm/page_owner: co...
531
532
533
534
535
536
537
  		if (!pfn_valid(pfn)) {
  			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
  			continue;
  		}
  
  		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
  		block_end_pfn = min(block_end_pfn, end_pfn);
61cf5febd   Joonsoo Kim   mm/page_owner: co...
538
  		for (; pfn < block_end_pfn; pfn++) {
6787c1dab   Oscar Salvador   mm/page_owner.c: ...
539
540
  			struct page *page;
  			struct page_ext *page_ext;
61cf5febd   Joonsoo Kim   mm/page_owner: co...
541
542
543
544
  			if (!pfn_valid_within(pfn))
  				continue;
  
  			page = pfn_to_page(pfn);
9d43f5aec   Joonsoo Kim   mm/page_owner: ad...
545
546
  			if (page_zone(page) != zone)
  				continue;
61cf5febd   Joonsoo Kim   mm/page_owner: co...
547
  			/*
109030279   Vlastimil Babka   mm, page_owner: d...
548
549
550
551
552
  			 * To avoid having to grab zone->lock, be a little
  			 * careful when reading buddy page order. The only
  			 * danger is that we skip too much and potentially miss
  			 * some early allocated pages, which is better than
  			 * heavy lock contention.
61cf5febd   Joonsoo Kim   mm/page_owner: co...
553
554
  			 */
  			if (PageBuddy(page)) {
109030279   Vlastimil Babka   mm, page_owner: d...
555
556
557
558
  				unsigned long order = page_order_unsafe(page);
  
  				if (order > 0 && order < MAX_ORDER)
  					pfn += (1UL << order) - 1;
61cf5febd   Joonsoo Kim   mm/page_owner: co...
559
560
561
562
563
564
565
  				continue;
  			}
  
  			if (PageReserved(page))
  				continue;
  
  			page_ext = lookup_page_ext(page);
f86e42719   Yang Shi   mm: check the ret...
566
567
  			if (unlikely(!page_ext))
  				continue;
61cf5febd   Joonsoo Kim   mm/page_owner: co...
568

dab4ead1a   Vlastimil Babka   mm, page_owner: m...
569
  			/* Maybe overlapping zone */
61cf5febd   Joonsoo Kim   mm/page_owner: co...
570
571
572
573
  			if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
  				continue;
  
  			/* Found early allocated page */
7e2f2a0cd   Vlastimil Babka   mm, page_owner: r...
574
575
  			__set_page_owner_handle(page, page_ext, early_handle,
  						0, 0);
61cf5febd   Joonsoo Kim   mm/page_owner: co...
576
577
  			count++;
  		}
109030279   Vlastimil Babka   mm, page_owner: d...
578
  		cond_resched();
61cf5febd   Joonsoo Kim   mm/page_owner: co...
579
580
581
582
583
584
585
586
587
588
589
  	}
  
  	pr_info("Node %d, zone %8s: page owner found early allocated %lu pages
  ",
  		pgdat->node_id, zone->name, count);
  }
  
  static void init_zones_in_node(pg_data_t *pgdat)
  {
  	struct zone *zone;
  	struct zone *node_zones = pgdat->node_zones;
61cf5febd   Joonsoo Kim   mm/page_owner: co...
590
591
592
593
  
  	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
  		if (!populated_zone(zone))
  			continue;
61cf5febd   Joonsoo Kim   mm/page_owner: co...
594
  		init_pages_in_zone(pgdat, zone);
61cf5febd   Joonsoo Kim   mm/page_owner: co...
595
596
597
598
599
600
  	}
  }
  
  static void init_early_allocated_pages(void)
  {
  	pg_data_t *pgdat;
61cf5febd   Joonsoo Kim   mm/page_owner: co...
601
602
603
  	for_each_online_pgdat(pgdat)
  		init_zones_in_node(pgdat);
  }
48c96a368   Joonsoo Kim   mm/page_owner: ke...
604
605
606
607
608
609
  static const struct file_operations proc_page_owner_operations = {
  	.read		= read_page_owner,
  };
  
  static int __init pageowner_init(void)
  {
7dd80b8af   Vlastimil Babka   mm, page_owner: c...
610
  	if (!static_branch_unlikely(&page_owner_inited)) {
48c96a368   Joonsoo Kim   mm/page_owner: ke...
611
612
613
614
  		pr_info("page_owner is disabled
  ");
  		return 0;
  	}
d9f7979c9   Greg Kroah-Hartman   mm: no need to ch...
615
616
  	debugfs_create_file("page_owner", 0400, NULL, NULL,
  			    &proc_page_owner_operations);
48c96a368   Joonsoo Kim   mm/page_owner: ke...
617

d9f7979c9   Greg Kroah-Hartman   mm: no need to ch...
618
  	return 0;
48c96a368   Joonsoo Kim   mm/page_owner: ke...
619
  }
44c5af96d   Paul Gortmaker   mm/page_owner.c: ...
620
  late_initcall(pageowner_init)