Blame view

mm/pagewalk.c 3.22 KB
e6473092b   Matt Mackall   maps4: introduce ...
1
2
3
4
5
  #include <linux/mm.h>
  #include <linux/highmem.h>
  #include <linux/sched.h>
  
  static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
2165009bd   Dave Hansen   pagemap: pass mm ...
6
  			  struct mm_walk *walk)
e6473092b   Matt Mackall   maps4: introduce ...
7
8
9
10
11
  {
  	pte_t *pte;
  	int err = 0;
  
  	pte = pte_offset_map(pmd, addr);
556637cda   Johannes Weiner   mm: fix possible ...
12
  	for (;;) {
2165009bd   Dave Hansen   pagemap: pass mm ...
13
  		err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
e6473092b   Matt Mackall   maps4: introduce ...
14
15
  		if (err)
  		       break;
556637cda   Johannes Weiner   mm: fix possible ...
16
17
18
19
20
  		addr += PAGE_SIZE;
  		if (addr == end)
  			break;
  		pte++;
  	}
e6473092b   Matt Mackall   maps4: introduce ...
21
22
23
24
25
26
  
  	pte_unmap(pte);
  	return err;
  }
  
  static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
2165009bd   Dave Hansen   pagemap: pass mm ...
27
  			  struct mm_walk *walk)
e6473092b   Matt Mackall   maps4: introduce ...
28
29
30
31
32
33
34
35
36
37
  {
  	pmd_t *pmd;
  	unsigned long next;
  	int err = 0;
  
  	pmd = pmd_offset(pud, addr);
  	do {
  		next = pmd_addr_end(addr, end);
  		if (pmd_none_or_clear_bad(pmd)) {
  			if (walk->pte_hole)
2165009bd   Dave Hansen   pagemap: pass mm ...
38
  				err = walk->pte_hole(addr, next, walk);
e6473092b   Matt Mackall   maps4: introduce ...
39
40
41
42
43
  			if (err)
  				break;
  			continue;
  		}
  		if (walk->pmd_entry)
2165009bd   Dave Hansen   pagemap: pass mm ...
44
  			err = walk->pmd_entry(pmd, addr, next, walk);
e6473092b   Matt Mackall   maps4: introduce ...
45
  		if (!err && walk->pte_entry)
2165009bd   Dave Hansen   pagemap: pass mm ...
46
  			err = walk_pte_range(pmd, addr, next, walk);
e6473092b   Matt Mackall   maps4: introduce ...
47
48
49
50
51
52
53
54
  		if (err)
  			break;
  	} while (pmd++, addr = next, addr != end);
  
  	return err;
  }
  
  static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
2165009bd   Dave Hansen   pagemap: pass mm ...
55
  			  struct mm_walk *walk)
e6473092b   Matt Mackall   maps4: introduce ...
56
57
58
59
60
61
62
63
64
65
  {
  	pud_t *pud;
  	unsigned long next;
  	int err = 0;
  
  	pud = pud_offset(pgd, addr);
  	do {
  		next = pud_addr_end(addr, end);
  		if (pud_none_or_clear_bad(pud)) {
  			if (walk->pte_hole)
2165009bd   Dave Hansen   pagemap: pass mm ...
66
  				err = walk->pte_hole(addr, next, walk);
e6473092b   Matt Mackall   maps4: introduce ...
67
68
69
70
71
  			if (err)
  				break;
  			continue;
  		}
  		if (walk->pud_entry)
2165009bd   Dave Hansen   pagemap: pass mm ...
72
  			err = walk->pud_entry(pud, addr, next, walk);
e6473092b   Matt Mackall   maps4: introduce ...
73
  		if (!err && (walk->pmd_entry || walk->pte_entry))
2165009bd   Dave Hansen   pagemap: pass mm ...
74
  			err = walk_pmd_range(pud, addr, next, walk);
e6473092b   Matt Mackall   maps4: introduce ...
75
76
77
78
79
80
81
82
83
  		if (err)
  			break;
  	} while (pud++, addr = next, addr != end);
  
  	return err;
  }
  
  /**
   * walk_page_range - walk a memory map's page tables with a callback
7682486b3   Randy Dunlap   mm: fix various k...
84
85
86
87
   * @mm: memory map to walk
   * @addr: starting address
   * @end: ending address
   * @walk: set of callbacks to invoke for each level of the tree
e6473092b   Matt Mackall   maps4: introduce ...
88
89
90
91
92
93
   *
   * Recursively walk the page table for the memory area in a VMA,
   * calling supplied callbacks. Callbacks are called in-order (first
   * PGD, first PUD, first PMD, first PTE, second PTE... second PMD,
   * etc.). If lower-level callbacks are omitted, walking depth is reduced.
   *
2165009bd   Dave Hansen   pagemap: pass mm ...
94
95
96
   * Each callback receives an entry pointer and the start and end of the
   * associated range, and a copy of the original mm_walk for access to
   * the ->private or ->mm fields.
e6473092b   Matt Mackall   maps4: introduce ...
97
98
99
100
101
102
103
   *
   * No locks are taken, but the bottom level iterator will map PTE
   * directories from highmem if necessary.
   *
   * If any callback returns a non-zero value, the walk is aborted and
   * the return value is propagated back to the caller. Otherwise 0 is returned.
   */
2165009bd   Dave Hansen   pagemap: pass mm ...
104
105
  int walk_page_range(unsigned long addr, unsigned long end,
  		    struct mm_walk *walk)
e6473092b   Matt Mackall   maps4: introduce ...
106
107
108
109
110
111
112
  {
  	pgd_t *pgd;
  	unsigned long next;
  	int err = 0;
  
  	if (addr >= end)
  		return err;
2165009bd   Dave Hansen   pagemap: pass mm ...
113
114
115
116
  	if (!walk->mm)
  		return -EINVAL;
  
  	pgd = pgd_offset(walk->mm, addr);
e6473092b   Matt Mackall   maps4: introduce ...
117
118
119
120
  	do {
  		next = pgd_addr_end(addr, end);
  		if (pgd_none_or_clear_bad(pgd)) {
  			if (walk->pte_hole)
2165009bd   Dave Hansen   pagemap: pass mm ...
121
  				err = walk->pte_hole(addr, next, walk);
e6473092b   Matt Mackall   maps4: introduce ...
122
123
124
125
126
  			if (err)
  				break;
  			continue;
  		}
  		if (walk->pgd_entry)
2165009bd   Dave Hansen   pagemap: pass mm ...
127
  			err = walk->pgd_entry(pgd, addr, next, walk);
e6473092b   Matt Mackall   maps4: introduce ...
128
129
  		if (!err &&
  		    (walk->pud_entry || walk->pmd_entry || walk->pte_entry))
2165009bd   Dave Hansen   pagemap: pass mm ...
130
  			err = walk_pud_range(pgd, addr, next, walk);
e6473092b   Matt Mackall   maps4: introduce ...
131
132
133
134
135
136
  		if (err)
  			break;
  	} while (pgd++, addr = next, addr != end);
  
  	return err;
  }