Blame view
mm/debug.c
4.17 KB
31c9afa6d
|
1 2 3 4 5 6 |
/* * mm/debug.c * * mm/ specific debug routines. * */ |
82742a3a5
|
7 8 |
#include <linux/kernel.h> #include <linux/mm.h> |
af658dca2
|
9 |
#include <linux/trace_events.h> |
82742a3a5
|
10 |
#include <linux/memcontrol.h> |
420adbe9f
|
11 |
#include <trace/events/mmflags.h> |
7cd12b4ab
|
12 |
#include <linux/migrate.h> |
4e462112e
|
13 |
#include <linux/page_owner.h> |
82742a3a5
|
14 |
|
edf14cdbf
|
15 |
#include "internal.h" |
7cd12b4ab
|
16 17 18 19 20 21 22 23 24 |
char *migrate_reason_names[MR_TYPES] = { "compaction", "memory_failure", "memory_hotplug", "syscall_or_cpuset", "mempolicy_mbind", "numa_misplaced", "cma", }; |
edf14cdbf
|
25 26 27 28 29 30 31 32 |
const struct trace_print_flags pageflag_names[] = { __def_pageflag_names, {0, NULL} }; const struct trace_print_flags gfpflag_names[] = { __def_gfpflag_names, {0, NULL} |
420adbe9f
|
33 |
}; |
edf14cdbf
|
34 35 36 |
const struct trace_print_flags vmaflag_names[] = { __def_vmaflag_names, {0, NULL} |
82742a3a5
|
37 |
}; |
ff8e81163
|
38 |
void __dump_page(struct page *page, const char *reason) |
82742a3a5
|
39 |
{ |
9996f05ea
|
40 41 42 43 44 |
/* * Avoid VM_BUG_ON() in page_mapcount(). * page->_mapcount space in struct page is used by sl[aou]b pages to * encode own info. */ |
4d35427ad
|
45 |
int mapcount = PageSlab(page) ? 0 : page_mapcount(page); |
53f9263ba
|
46 |
pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx", |
4d35427ad
|
47 48 |
page, page_ref_count(page), mapcount, page->mapping, page_to_pgoff(page)); |
53f9263ba
|
49 50 51 52 |
if (PageCompound(page)) pr_cont(" compound_mapcount: %d", compound_mapcount(page)); pr_cont(" "); |
edf14cdbf
|
53 |
BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); |
ff8e81163
|
54 |
|
b8eceeb99
|
55 56 |
pr_emerg("flags: %#lx(%pGp) ", page->flags, &page->flags); |
82742a3a5
|
57 58 59 |
if (reason) pr_alert("page dumped because: %s ", reason); |
b8eceeb99
|
60 |
|
9edad6ea0
|
61 62 63 64 65 |
#ifdef CONFIG_MEMCG if (page->mem_cgroup) pr_alert("page->mem_cgroup:%p ", page->mem_cgroup); #endif |
82742a3a5
|
66 67 68 69 |
} void dump_page(struct page *page, const char *reason) { |
ff8e81163
|
70 |
__dump_page(page, reason); |
4e462112e
|
71 |
dump_page_owner(page); |
82742a3a5
|
72 73 74 75 |
} EXPORT_SYMBOL(dump_page); #ifdef CONFIG_DEBUG_VM |
82742a3a5
|
76 77 |
void dump_vma(const struct vm_area_struct *vma) { |
7a82ca0d6
|
78 79 |
pr_emerg("vma %p start %p end %p " |
82742a3a5
|
80 81 82 83 |
"next %p prev %p mm %p " "prot %lx anon_vma %p vm_ops %p " |
b8eceeb99
|
84 85 86 87 |
"pgoff %lx file %p private_data %p " "flags: %#lx(%pGv) ", |
82742a3a5
|
88 89 90 91 |
vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, vma->vm_prev, vma->vm_mm, (unsigned long)pgprot_val(vma->vm_page_prot), vma->anon_vma, vma->vm_ops, vma->vm_pgoff, |
b8eceeb99
|
92 93 |
vma->vm_file, vma->vm_private_data, vma->vm_flags, &vma->vm_flags); |
82742a3a5
|
94 95 |
} EXPORT_SYMBOL(dump_vma); |
31c9afa6d
|
96 97 |
void dump_mm(const struct mm_struct *mm) { |
7a82ca0d6
|
98 99 |
pr_emerg("mm %p mmap %p seqnum %d task_size %lu " |
31c9afa6d
|
100 101 102 103 104 105 |
#ifdef CONFIG_MMU "get_unmapped_area %p " #endif "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu " |
dc6c9a35b
|
106 107 |
"pgd %p mm_users %d mm_count %d nr_ptes %lu nr_pmds %lu map_count %d " |
31c9afa6d
|
108 109 |
"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx " |
846383359
|
110 111 |
"pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx " |
31c9afa6d
|
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
"start_code %lx end_code %lx start_data %lx end_data %lx " "start_brk %lx brk %lx start_stack %lx " "arg_start %lx arg_end %lx env_start %lx env_end %lx " "binfmt %p flags %lx core_state %p " #ifdef CONFIG_AIO "ioctx_table %p " #endif #ifdef CONFIG_MEMCG "owner %p " #endif "exe_file %p " #ifdef CONFIG_MMU_NOTIFIER "mmu_notifier_mm %p " #endif #ifdef CONFIG_NUMA_BALANCING "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d " #endif #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) "tlb_flush_pending %d " #endif |
b8eceeb99
|
141 142 |
"def_flags: %#lx(%pGv) ", |
31c9afa6d
|
143 144 145 146 147 148 149 150 151 |
mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, #ifdef CONFIG_MMU mm->get_unmapped_area, #endif mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, mm->pgd, atomic_read(&mm->mm_users), atomic_read(&mm->mm_count), atomic_long_read((atomic_long_t *)&mm->nr_ptes), |
dc6c9a35b
|
152 |
mm_nr_pmds((struct mm_struct *)mm), |
31c9afa6d
|
153 154 |
mm->map_count, mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, |
846383359
|
155 |
mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm, |
31c9afa6d
|
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 |
mm->start_code, mm->end_code, mm->start_data, mm->end_data, mm->start_brk, mm->brk, mm->start_stack, mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, mm->binfmt, mm->flags, mm->core_state, #ifdef CONFIG_AIO mm->ioctx_table, #endif #ifdef CONFIG_MEMCG mm->owner, #endif mm->exe_file, #ifdef CONFIG_MMU_NOTIFIER mm->mmu_notifier_mm, #endif #ifdef CONFIG_NUMA_BALANCING mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, #endif #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) mm->tlb_flush_pending, #endif |
b8eceeb99
|
176 177 |
mm->def_flags, &mm->def_flags ); |
31c9afa6d
|
178 |
} |
82742a3a5
|
179 |
#endif /* CONFIG_DEBUG_VM */ |