Blame view

mm/pagewalk.c 4.66 KB
e6473092b   Matt Mackall   maps4: introduce ...
1
2
3
  #include <linux/mm.h>
  #include <linux/highmem.h>
  #include <linux/sched.h>
d33b9f45b   Naoya Horiguchi   mm: hugetlb: fix ...
4
  #include <linux/hugetlb.h>
e6473092b   Matt Mackall   maps4: introduce ...
5
6
  
  static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
2165009bd   Dave Hansen   pagemap: pass mm ...
7
  			  struct mm_walk *walk)
e6473092b   Matt Mackall   maps4: introduce ...
8
9
10
11
12
  {
  	pte_t *pte;
  	int err = 0;
  
  	pte = pte_offset_map(pmd, addr);
556637cda   Johannes Weiner   mm: fix possible ...
13
  	for (;;) {
2165009bd   Dave Hansen   pagemap: pass mm ...
14
  		err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
e6473092b   Matt Mackall   maps4: introduce ...
15
16
  		if (err)
  		       break;
556637cda   Johannes Weiner   mm: fix possible ...
17
18
19
20
21
  		addr += PAGE_SIZE;
  		if (addr == end)
  			break;
  		pte++;
  	}
e6473092b   Matt Mackall   maps4: introduce ...
22
23
24
25
26
27
  
  	pte_unmap(pte);
  	return err;
  }
  
  static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
2165009bd   Dave Hansen   pagemap: pass mm ...
28
  			  struct mm_walk *walk)
e6473092b   Matt Mackall   maps4: introduce ...
29
30
31
32
33
34
35
36
37
38
  {
  	pmd_t *pmd;
  	unsigned long next;
  	int err = 0;
  
  	pmd = pmd_offset(pud, addr);
  	do {
  		next = pmd_addr_end(addr, end);
  		if (pmd_none_or_clear_bad(pmd)) {
  			if (walk->pte_hole)
2165009bd   Dave Hansen   pagemap: pass mm ...
39
  				err = walk->pte_hole(addr, next, walk);
e6473092b   Matt Mackall   maps4: introduce ...
40
41
42
43
44
  			if (err)
  				break;
  			continue;
  		}
  		if (walk->pmd_entry)
2165009bd   Dave Hansen   pagemap: pass mm ...
45
  			err = walk->pmd_entry(pmd, addr, next, walk);
e6473092b   Matt Mackall   maps4: introduce ...
46
  		if (!err && walk->pte_entry)
2165009bd   Dave Hansen   pagemap: pass mm ...
47
  			err = walk_pte_range(pmd, addr, next, walk);
e6473092b   Matt Mackall   maps4: introduce ...
48
49
50
51
52
53
54
55
  		if (err)
  			break;
  	} while (pmd++, addr = next, addr != end);
  
  	return err;
  }
  
  static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
2165009bd   Dave Hansen   pagemap: pass mm ...
56
  			  struct mm_walk *walk)
e6473092b   Matt Mackall   maps4: introduce ...
57
58
59
60
61
62
63
64
65
66
  {
  	pud_t *pud;
  	unsigned long next;
  	int err = 0;
  
  	pud = pud_offset(pgd, addr);
  	do {
  		next = pud_addr_end(addr, end);
  		if (pud_none_or_clear_bad(pud)) {
  			if (walk->pte_hole)
2165009bd   Dave Hansen   pagemap: pass mm ...
67
  				err = walk->pte_hole(addr, next, walk);
e6473092b   Matt Mackall   maps4: introduce ...
68
69
70
71
72
  			if (err)
  				break;
  			continue;
  		}
  		if (walk->pud_entry)
2165009bd   Dave Hansen   pagemap: pass mm ...
73
  			err = walk->pud_entry(pud, addr, next, walk);
e6473092b   Matt Mackall   maps4: introduce ...
74
  		if (!err && (walk->pmd_entry || walk->pte_entry))
2165009bd   Dave Hansen   pagemap: pass mm ...
75
  			err = walk_pmd_range(pud, addr, next, walk);
e6473092b   Matt Mackall   maps4: introduce ...
76
77
78
79
80
81
  		if (err)
  			break;
  	} while (pud++, addr = next, addr != end);
  
  	return err;
  }
116354d17   Naoya Horiguchi   pagemap: fix pfn ...
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
  #ifdef CONFIG_HUGETLB_PAGE
  static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
  				       unsigned long end)
  {
  	unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
  	return boundary < end ? boundary : end;
  }
  
  static int walk_hugetlb_range(struct vm_area_struct *vma,
  			      unsigned long addr, unsigned long end,
  			      struct mm_walk *walk)
  {
  	struct hstate *h = hstate_vma(vma);
  	unsigned long next;
  	unsigned long hmask = huge_page_mask(h);
  	pte_t *pte;
  	int err = 0;
  
  	do {
  		next = hugetlb_entry_end(h, addr, end);
  		pte = huge_pte_offset(walk->mm, addr & hmask);
  		if (pte && walk->hugetlb_entry)
  			err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
  		if (err)
  			return err;
  	} while (addr = next, addr != end);
  
  	return 0;
  }
  #endif
e6473092b   Matt Mackall   maps4: introduce ...
112
113
  /**
   * walk_page_range - walk a memory map's page tables with a callback
7682486b3   Randy Dunlap   mm: fix various k...
114
115
116
117
   * @mm: memory map to walk
   * @addr: starting address
   * @end: ending address
   * @walk: set of callbacks to invoke for each level of the tree
e6473092b   Matt Mackall   maps4: introduce ...
118
119
120
121
122
123
   *
   * Recursively walk the page table for the memory area in a VMA,
   * calling supplied callbacks. Callbacks are called in-order (first
   * PGD, first PUD, first PMD, first PTE, second PTE... second PMD,
   * etc.). If lower-level callbacks are omitted, walking depth is reduced.
   *
2165009bd   Dave Hansen   pagemap: pass mm ...
124
125
126
   * Each callback receives an entry pointer and the start and end of the
   * associated range, and a copy of the original mm_walk for access to
   * the ->private or ->mm fields.
e6473092b   Matt Mackall   maps4: introduce ...
127
128
129
130
131
132
133
   *
   * No locks are taken, but the bottom level iterator will map PTE
   * directories from highmem if necessary.
   *
   * If any callback returns a non-zero value, the walk is aborted and
   * the return value is propagated back to the caller. Otherwise 0 is returned.
   */
2165009bd   Dave Hansen   pagemap: pass mm ...
134
135
  int walk_page_range(unsigned long addr, unsigned long end,
  		    struct mm_walk *walk)
e6473092b   Matt Mackall   maps4: introduce ...
136
137
138
139
  {
  	pgd_t *pgd;
  	unsigned long next;
  	int err = 0;
d33b9f45b   Naoya Horiguchi   mm: hugetlb: fix ...
140
  	struct vm_area_struct *vma;
e6473092b   Matt Mackall   maps4: introduce ...
141
142
143
  
  	if (addr >= end)
  		return err;
2165009bd   Dave Hansen   pagemap: pass mm ...
144
145
146
147
  	if (!walk->mm)
  		return -EINVAL;
  
  	pgd = pgd_offset(walk->mm, addr);
e6473092b   Matt Mackall   maps4: introduce ...
148
149
  	do {
  		next = pgd_addr_end(addr, end);
d33b9f45b   Naoya Horiguchi   mm: hugetlb: fix ...
150

5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
151
152
153
154
155
  		/*
  		 * handle hugetlb vma individually because pagetable walk for
  		 * the hugetlb page is dependent on the architecture and
  		 * we can't handled it in the same manner as non-huge pages.
  		 */
d33b9f45b   Naoya Horiguchi   mm: hugetlb: fix ...
156
  		vma = find_vma(walk->mm, addr);
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
157
  #ifdef CONFIG_HUGETLB_PAGE
d33b9f45b   Naoya Horiguchi   mm: hugetlb: fix ...
158
159
160
  		if (vma && is_vm_hugetlb_page(vma)) {
  			if (vma->vm_end < next)
  				next = vma->vm_end;
116354d17   Naoya Horiguchi   pagemap: fix pfn ...
161
162
163
164
165
  			/*
  			 * Hugepage is very tightly coupled with vma, so
  			 * walk through hugetlb entries within a given vma.
  			 */
  			err = walk_hugetlb_range(vma, addr, next, walk);
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
166
167
  			if (err)
  				break;
116354d17   Naoya Horiguchi   pagemap: fix pfn ...
168
  			pgd = pgd_offset(walk->mm, next);
d33b9f45b   Naoya Horiguchi   mm: hugetlb: fix ...
169
170
  			continue;
  		}
5dc37642c   Naoya Horiguchi   mm hugetlb: add h...
171
  #endif
e6473092b   Matt Mackall   maps4: introduce ...
172
173
  		if (pgd_none_or_clear_bad(pgd)) {
  			if (walk->pte_hole)
2165009bd   Dave Hansen   pagemap: pass mm ...
174
  				err = walk->pte_hole(addr, next, walk);
e6473092b   Matt Mackall   maps4: introduce ...
175
176
  			if (err)
  				break;
d33b9f45b   Naoya Horiguchi   mm: hugetlb: fix ...
177
  			pgd++;
e6473092b   Matt Mackall   maps4: introduce ...
178
179
180
  			continue;
  		}
  		if (walk->pgd_entry)
2165009bd   Dave Hansen   pagemap: pass mm ...
181
  			err = walk->pgd_entry(pgd, addr, next, walk);
e6473092b   Matt Mackall   maps4: introduce ...
182
183
  		if (!err &&
  		    (walk->pud_entry || walk->pmd_entry || walk->pte_entry))
2165009bd   Dave Hansen   pagemap: pass mm ...
184
  			err = walk_pud_range(pgd, addr, next, walk);
e6473092b   Matt Mackall   maps4: introduce ...
185
186
  		if (err)
  			break;
d33b9f45b   Naoya Horiguchi   mm: hugetlb: fix ...
187
188
  		pgd++;
  	} while (addr = next, addr != end);
e6473092b   Matt Mackall   maps4: introduce ...
189
190
191
  
  	return err;
  }