Blame view

mm/debug.c 8.47 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
31c9afa6d   Sasha Levin   mm: introduce VM_...
2
3
4
5
6
7
  /*
   * mm/debug.c
   *
   * mm/ specific debug routines.
   *
   */
82742a3a5   Sasha Levin   mm: move debug co...
8
9
  #include <linux/kernel.h>
  #include <linux/mm.h>
af658dca2   Steven Rostedt (Red Hat)   tracing: Rename f...
10
  #include <linux/trace_events.h>
82742a3a5   Sasha Levin   mm: move debug co...
11
  #include <linux/memcontrol.h>
420adbe9f   Vlastimil Babka   mm, tracing: unif...
12
  #include <trace/events/mmflags.h>
7cd12b4ab   Vlastimil Babka   mm, page_owner: t...
13
  #include <linux/migrate.h>
4e462112e   Vlastimil Babka   mm, page_owner: d...
14
  #include <linux/page_owner.h>
f682a97a0   Alexander Duyck   mm: provide kerne...
15
  #include <linux/ctype.h>
82742a3a5   Sasha Levin   mm: move debug co...
16

edf14cdbf   Vlastimil Babka   mm, printk: intro...
17
  #include "internal.h"
9a2f45ff3   Alexey Dobriyan   mm/debug.c: make ...
18
  const char *migrate_reason_names[MR_TYPES] = {
7cd12b4ab   Vlastimil Babka   mm, page_owner: t...
19
20
21
22
23
24
25
26
  	"compaction",
  	"memory_failure",
  	"memory_hotplug",
  	"syscall_or_cpuset",
  	"mempolicy_mbind",
  	"numa_misplaced",
  	"cma",
  };
edf14cdbf   Vlastimil Babka   mm, printk: intro...
27
28
29
30
31
32
33
34
  const struct trace_print_flags pageflag_names[] = {
  	__def_pageflag_names,
  	{0, NULL}
  };
  
  const struct trace_print_flags gfpflag_names[] = {
  	__def_gfpflag_names,
  	{0, NULL}
420adbe9f   Vlastimil Babka   mm, tracing: unif...
35
  };
edf14cdbf   Vlastimil Babka   mm, printk: intro...
36
37
38
  const struct trace_print_flags vmaflag_names[] = {
  	__def_vmaflag_names,
  	{0, NULL}
82742a3a5   Sasha Levin   mm: move debug co...
39
  };
ff8e81163   Vlastimil Babka   mm, debug: move b...
40
  void __dump_page(struct page *page, const char *reason)
82742a3a5   Sasha Levin   mm: move debug co...
41
  {
6197ab984   Matthew Wilcox (Oracle)   mm: improve dump_...
42
  	struct page *head = compound_head(page);
311ade0ea   Robin Murphy   mm/debug.c: fix _...
43
  	struct address_space *mapping;
fc36def99   Pavel Tatashin   mm: teach dump_pa...
44
  	bool page_poisoned = PagePoisoned(page);
6197ab984   Matthew Wilcox (Oracle)   mm: improve dump_...
45
  	bool compound = PageCompound(page);
4a55c0474   Qian Cai   mm/hotplug: silen...
46
47
48
49
50
51
52
  	/*
  	 * Accessing the pageblock without the zone lock. It could change to
  	 * "isolate" again in the meantime, but since we are just dumping the
  	 * state for debugging, it should be fine to accept a bit of
  	 * inaccuracy here due to racing.
  	 */
  	bool page_cma = is_migrate_cma_page(page);
fc36def99   Pavel Tatashin   mm: teach dump_pa...
53
  	int mapcount;
5b57b8f22   Vlastimil Babka   mm/debug.c: alway...
54
  	char *type = "";
fc36def99   Pavel Tatashin   mm: teach dump_pa...
55
56
57
58
59
60
61
  
  	/*
  	 * If struct page is poisoned don't access Page*() functions as that
  	 * leads to recursive loop. Page*() check for poisoned pages, and calls
  	 * dump_page() when detected.
  	 */
  	if (page_poisoned) {
e0392cf7c   Michal Hocko   mm: lower the pri...
62
  		pr_warn("page:%px is uninitialized and poisoned", page);
fc36def99   Pavel Tatashin   mm: teach dump_pa...
63
64
  		goto hex_only;
  	}
6197ab984   Matthew Wilcox (Oracle)   mm: improve dump_...
65
  	if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
e1ab96f8c   Matthew Wilcox (Oracle)   mm/debug: handle ...
66
67
68
69
70
71
72
73
74
75
76
77
78
  		/*
  		 * Corrupt page, so we cannot call page_mapping. Instead, do a
  		 * safe subset of the steps that page_mapping() does. Caution:
  		 * this will be misleading for tail pages, PageSwapCache pages,
  		 * and potentially other situations. (See the page_mapping()
  		 * implementation for what's missing here.)
  		 */
  		unsigned long tmp = (unsigned long)page->mapping;
  
  		if (tmp & PAGE_MAPPING_ANON)
  			mapping = NULL;
  		else
  			mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS);
6197ab984   Matthew Wilcox (Oracle)   mm: improve dump_...
79
80
81
82
83
  		head = page;
  		compound = false;
  	} else {
  		mapping = page_mapping(page);
  	}
311ade0ea   Robin Murphy   mm/debug.c: fix _...
84

9996f05ea   Kirill A. Shutemov   mm: clarify why w...
85
86
87
88
89
  	/*
  	 * Avoid VM_BUG_ON() in page_mapcount().
  	 * page->_mapcount space in struct page is used by sl[aou]b pages to
  	 * encode own info.
  	 */
6197ab984   Matthew Wilcox (Oracle)   mm: improve dump_...
90
  	mapcount = PageSlab(head) ? 0 : page_mapcount(page);
54a75157d   Matthew Wilcox (Oracle)   mm/debug: print h...
91
92
  	pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx
  ",
452b557c9   Matthew Wilcox (Oracle)   mm/debug: dump co...
93
  			page, page_ref_count(head), mapcount, mapping,
54a75157d   Matthew Wilcox (Oracle)   mm/debug: print h...
94
  			page_to_pgoff(page), page_to_pfn(page));
452b557c9   Matthew Wilcox (Oracle)   mm/debug: dump co...
95
  	if (compound) {
dc8fb2f28   John Hubbard   mm: dump_page(): ...
96
  		if (hpage_pincount_available(page)) {
54a75157d   Matthew Wilcox (Oracle)   mm/debug: print h...
97
98
  			pr_warn("head:%p order:%u compound_mapcount:%d compound_pincount:%d
  ",
452b557c9   Matthew Wilcox (Oracle)   mm/debug: dump co...
99
  					head, compound_order(head),
bac3cf4d0   John Hubbard   mm, dump_page: re...
100
101
  					head_compound_mapcount(head),
  					head_compound_pincount(head));
dc8fb2f28   John Hubbard   mm: dump_page(): ...
102
  		} else {
54a75157d   Matthew Wilcox (Oracle)   mm/debug: print h...
103
104
  			pr_warn("head:%p order:%u compound_mapcount:%d
  ",
452b557c9   Matthew Wilcox (Oracle)   mm/debug: dump co...
105
  					head, compound_order(head),
bac3cf4d0   John Hubbard   mm, dump_page: re...
106
  					head_compound_mapcount(head));
dc8fb2f28   John Hubbard   mm: dump_page(): ...
107
  		}
452b557c9   Matthew Wilcox (Oracle)   mm/debug: dump co...
108
  	}
6855ac4ac   Ralph Campbell   mm/debug.c: PageA...
109
  	if (PageKsm(page))
5b57b8f22   Vlastimil Babka   mm/debug.c: alway...
110
  		type = "ksm ";
6855ac4ac   Ralph Campbell   mm/debug.c: PageA...
111
  	else if (PageAnon(page))
5b57b8f22   Vlastimil Babka   mm/debug.c: alway...
112
  		type = "anon ";
1c6fb1d89   Michal Hocko   mm: print more in...
113
  	else if (mapping) {
9ad382657   Matthew Wilcox (Oracle)   mm/debug: switch ...
114
  		struct inode *host;
002ae7057   Vlastimil Babka   mm, dump_page(): ...
115
  		const struct address_space_operations *a_ops;
9ad382657   Matthew Wilcox (Oracle)   mm/debug: switch ...
116
117
  		struct hlist_node *dentry_first;
  		struct dentry *dentry_ptr;
002ae7057   Vlastimil Babka   mm, dump_page(): ...
118
  		struct dentry dentry;
853322a67   Matthew Wilcox (Oracle)   mm/debug.c: do no...
119
  		unsigned long ino;
002ae7057   Vlastimil Babka   mm, dump_page(): ...
120
121
122
123
124
  
  		/*
  		 * mapping can be invalid pointer and we don't want to crash
  		 * accessing it, so probe everything depending on it carefully
  		 */
9ad382657   Matthew Wilcox (Oracle)   mm/debug: switch ...
125
126
127
128
  		if (get_kernel_nofault(host, &mapping->host) ||
  		    get_kernel_nofault(a_ops, &mapping->a_ops)) {
  			pr_warn("failed to read mapping contents, not a valid kernel address?
  ");
002ae7057   Vlastimil Babka   mm, dump_page(): ...
129
130
131
132
  			goto out_mapping;
  		}
  
  		if (!host) {
9ad382657   Matthew Wilcox (Oracle)   mm/debug: switch ...
133
134
  			pr_warn("aops:%ps
  ", a_ops);
002ae7057   Vlastimil Babka   mm, dump_page(): ...
135
136
  			goto out_mapping;
  		}
853322a67   Matthew Wilcox (Oracle)   mm/debug.c: do no...
137
138
  		if (get_kernel_nofault(dentry_first, &host->i_dentry.first) ||
  		    get_kernel_nofault(ino, &host->i_ino)) {
9ad382657   Matthew Wilcox (Oracle)   mm/debug: switch ...
139
140
141
  			pr_warn("aops:%ps with invalid host inode %px
  ",
  					a_ops, host);
002ae7057   Vlastimil Babka   mm, dump_page(): ...
142
143
144
145
  			goto out_mapping;
  		}
  
  		if (!dentry_first) {
853322a67   Matthew Wilcox (Oracle)   mm/debug.c: do no...
146
147
  			pr_warn("aops:%ps ino:%lx
  ", a_ops, ino);
002ae7057   Vlastimil Babka   mm, dump_page(): ...
148
149
150
151
  			goto out_mapping;
  		}
  
  		dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias);
9ad382657   Matthew Wilcox (Oracle)   mm/debug: switch ...
152
  		if (get_kernel_nofault(dentry, dentry_ptr)) {
853322a67   Matthew Wilcox (Oracle)   mm/debug.c: do no...
153
154
155
  			pr_warn("aops:%ps ino:%lx with invalid dentry %px
  ",
  					a_ops, ino, dentry_ptr);
002ae7057   Vlastimil Babka   mm, dump_page(): ...
156
157
158
159
160
161
  		} else {
  			/*
  			 * if dentry is corrupted, the %pd handler may still
  			 * crash, but it's unlikely that we reach here with a
  			 * corrupted struct page
  			 */
9bdaf2cc5   Matthew Wilcox (Oracle)   mm/debug: print t...
162
163
  			pr_warn("aops:%ps ino:%lx dentry name:\"%pd\"
  ",
853322a67   Matthew Wilcox (Oracle)   mm/debug.c: do no...
164
  					a_ops, ino, &dentry);
002ae7057   Vlastimil Babka   mm, dump_page(): ...
165
  		}
1c6fb1d89   Michal Hocko   mm: print more in...
166
  	}
002ae7057   Vlastimil Babka   mm, dump_page(): ...
167
  out_mapping:
edf14cdbf   Vlastimil Babka   mm, printk: intro...
168
  	BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
ff8e81163   Vlastimil Babka   mm, debug: move b...
169

0b93d59e9   Matthew Wilcox (Oracle)   mm/debug: print h...
170
171
  	pr_warn("%sflags: %#lx(%pGp)%s
  ", type, head->flags, &head->flags,
4a55c0474   Qian Cai   mm/hotplug: silen...
172
  		page_cma ? " CMA" : "");
5b57b8f22   Vlastimil Babka   mm/debug.c: alway...
173

fc36def99   Pavel Tatashin   mm: teach dump_pa...
174
  hex_only:
e0392cf7c   Michal Hocko   mm: lower the pri...
175
  	print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
46e8a3a08   Vlastimil Babka   mm, debug: print ...
176
177
  			sizeof(unsigned long), page,
  			sizeof(struct page), false);
6197ab984   Matthew Wilcox (Oracle)   mm: improve dump_...
178
179
180
181
  	if (head != page)
  		print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
  			sizeof(unsigned long), head,
  			sizeof(struct page), false);
46e8a3a08   Vlastimil Babka   mm, debug: print ...
182

82742a3a5   Sasha Levin   mm: move debug co...
183
  	if (reason)
e0392cf7c   Michal Hocko   mm: lower the pri...
184
185
  		pr_warn("page dumped because: %s
  ", reason);
b8eceeb99   Vlastimil Babka   mm, debug: replac...
186

9edad6ea0   Johannes Weiner   mm: move page->me...
187
  #ifdef CONFIG_MEMCG
fc36def99   Pavel Tatashin   mm: teach dump_pa...
188
  	if (!page_poisoned && page->mem_cgroup)
e0392cf7c   Michal Hocko   mm: lower the pri...
189
190
  		pr_warn("page->mem_cgroup:%px
  ", page->mem_cgroup);
9edad6ea0   Johannes Weiner   mm: move page->me...
191
  #endif
82742a3a5   Sasha Levin   mm: move debug co...
192
193
194
195
  }
  
  void dump_page(struct page *page, const char *reason)
  {
ff8e81163   Vlastimil Babka   mm, debug: move b...
196
  	__dump_page(page, reason);
4e462112e   Vlastimil Babka   mm, page_owner: d...
197
  	dump_page_owner(page);
82742a3a5   Sasha Levin   mm: move debug co...
198
199
200
201
  }
  EXPORT_SYMBOL(dump_page);
  
  #ifdef CONFIG_DEBUG_VM
82742a3a5   Sasha Levin   mm: move debug co...
202
203
  void dump_vma(const struct vm_area_struct *vma)
  {
152a2d199   Matthew Wilcox   mm/debug.c: provi...
204
205
206
207
208
209
210
211
  	pr_emerg("vma %px start %px end %px
  "
  		"next %px prev %px mm %px
  "
  		"prot %lx anon_vma %px vm_ops %px
  "
  		"pgoff %lx file %px private_data %px
  "
b8eceeb99   Vlastimil Babka   mm, debug: replac...
212
213
  		"flags: %#lx(%pGv)
  ",
82742a3a5   Sasha Levin   mm: move debug co...
214
215
216
217
  		vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
  		vma->vm_prev, vma->vm_mm,
  		(unsigned long)pgprot_val(vma->vm_page_prot),
  		vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
b8eceeb99   Vlastimil Babka   mm, debug: replac...
218
219
  		vma->vm_file, vma->vm_private_data,
  		vma->vm_flags, &vma->vm_flags);
82742a3a5   Sasha Levin   mm: move debug co...
220
221
  }
  EXPORT_SYMBOL(dump_vma);
31c9afa6d   Sasha Levin   mm: introduce VM_...
222
223
  void dump_mm(const struct mm_struct *mm)
  {
7a9cdebdc   Linus Torvalds   mm: get rid of vm...
224
225
  	pr_emerg("mm %px mmap %px seqnum %llu task_size %lu
  "
31c9afa6d   Sasha Levin   mm: introduce VM_...
226
  #ifdef CONFIG_MMU
152a2d199   Matthew Wilcox   mm/debug.c: provi...
227
228
  		"get_unmapped_area %px
  "
31c9afa6d   Sasha Levin   mm: introduce VM_...
229
230
231
  #endif
  		"mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu
  "
152a2d199   Matthew Wilcox   mm/debug.c: provi...
232
233
  		"pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d
  "
31c9afa6d   Sasha Levin   mm: introduce VM_...
234
235
  		"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx
  "
70f8a3ca6   Davidlohr Bueso   mm: make mm->pinn...
236
237
  		"pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx
  "
31c9afa6d   Sasha Levin   mm: introduce VM_...
238
239
240
241
242
243
  		"start_code %lx end_code %lx start_data %lx end_data %lx
  "
  		"start_brk %lx brk %lx start_stack %lx
  "
  		"arg_start %lx arg_end %lx env_start %lx env_end %lx
  "
152a2d199   Matthew Wilcox   mm/debug.c: provi...
244
245
  		"binfmt %px flags %lx core_state %px
  "
31c9afa6d   Sasha Levin   mm: introduce VM_...
246
  #ifdef CONFIG_AIO
152a2d199   Matthew Wilcox   mm/debug.c: provi...
247
248
  		"ioctx_table %px
  "
31c9afa6d   Sasha Levin   mm: introduce VM_...
249
250
  #endif
  #ifdef CONFIG_MEMCG
152a2d199   Matthew Wilcox   mm/debug.c: provi...
251
  		"owner %px "
31c9afa6d   Sasha Levin   mm: introduce VM_...
252
  #endif
152a2d199   Matthew Wilcox   mm/debug.c: provi...
253
254
  		"exe_file %px
  "
31c9afa6d   Sasha Levin   mm: introduce VM_...
255
  #ifdef CONFIG_MMU_NOTIFIER
984cfe4e2   Jason Gunthorpe   mm/mmu_notifier: ...
256
257
  		"notifier_subscriptions %px
  "
31c9afa6d   Sasha Levin   mm: introduce VM_...
258
259
260
261
262
  #endif
  #ifdef CONFIG_NUMA_BALANCING
  		"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d
  "
  #endif
31c9afa6d   Sasha Levin   mm: introduce VM_...
263
264
  		"tlb_flush_pending %d
  "
b8eceeb99   Vlastimil Babka   mm, debug: replac...
265
266
  		"def_flags: %#lx(%pGv)
  ",
31c9afa6d   Sasha Levin   mm: introduce VM_...
267

7a9cdebdc   Linus Torvalds   mm: get rid of vm...
268
  		mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
31c9afa6d   Sasha Levin   mm: introduce VM_...
269
270
271
272
273
274
  #ifdef CONFIG_MMU
  		mm->get_unmapped_area,
  #endif
  		mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
  		mm->pgd, atomic_read(&mm->mm_users),
  		atomic_read(&mm->mm_count),
af5b0f6a0   Kirill A. Shutemov   mm: consolidate p...
275
  		mm_pgtables_bytes(mm),
31c9afa6d   Sasha Levin   mm: introduce VM_...
276
277
  		mm->map_count,
  		mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
44dc1b1fa   Qian Cai   mm/debug.c: add a...
278
  		(u64)atomic64_read(&mm->pinned_vm),
70f8a3ca6   Davidlohr Bueso   mm: make mm->pinn...
279
  		mm->data_vm, mm->exec_vm, mm->stack_vm,
31c9afa6d   Sasha Levin   mm: introduce VM_...
280
281
282
283
284
285
286
287
288
289
290
291
  		mm->start_code, mm->end_code, mm->start_data, mm->end_data,
  		mm->start_brk, mm->brk, mm->start_stack,
  		mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
  		mm->binfmt, mm->flags, mm->core_state,
  #ifdef CONFIG_AIO
  		mm->ioctx_table,
  #endif
  #ifdef CONFIG_MEMCG
  		mm->owner,
  #endif
  		mm->exe_file,
  #ifdef CONFIG_MMU_NOTIFIER
984cfe4e2   Jason Gunthorpe   mm/mmu_notifier: ...
292
  		mm->notifier_subscriptions,
31c9afa6d   Sasha Levin   mm: introduce VM_...
293
294
295
296
  #endif
  #ifdef CONFIG_NUMA_BALANCING
  		mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
  #endif
16af97dc5   Nadav Amit   mm: migrate: prev...
297
  		atomic_read(&mm->tlb_flush_pending),
b8eceeb99   Vlastimil Babka   mm, debug: replac...
298
299
  		mm->def_flags, &mm->def_flags
  	);
31c9afa6d   Sasha Levin   mm: introduce VM_...
300
  }
f682a97a0   Alexander Duyck   mm: provide kerne...
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
  static bool page_init_poisoning __read_mostly = true;
  
  static int __init setup_vm_debug(char *str)
  {
  	bool __page_init_poisoning = true;
  
  	/*
  	 * Calling vm_debug with no arguments is equivalent to requesting
  	 * to enable all debugging options we can control.
  	 */
  	if (*str++ != '=' || !*str)
  		goto out;
  
  	__page_init_poisoning = false;
  	if (*str == '-')
  		goto out;
  
  	while (*str) {
  		switch (tolower(*str)) {
  		case'p':
  			__page_init_poisoning = true;
  			break;
  		default:
  			pr_err("vm_debug option '%c' unknown. skipped
  ",
  			       *str);
  		}
  
  		str++;
  	}
  out:
  	if (page_init_poisoning && !__page_init_poisoning)
  		pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'
  ");
  
  	page_init_poisoning = __page_init_poisoning;
  
  	return 1;
  }
  __setup("vm_debug", setup_vm_debug);
  
  void page_init_poison(struct page *page, size_t size)
  {
  	if (page_init_poisoning)
  		memset(page, PAGE_POISON_PATTERN, size);
  }
  EXPORT_SYMBOL_GPL(page_init_poison);
82742a3a5   Sasha Levin   mm: move debug co...
348
  #endif		/* CONFIG_DEBUG_VM */