Blame view
mm/mincore.c
6.72 KB
b24413180
|
1 |
// SPDX-License-Identifier: GPL-2.0 |
1da177e4c
|
2 3 4 |
/* * linux/mm/mincore.c * |
2f77d1070
|
5 |
* Copyright (C) 1994-2006 Linus Torvalds |
1da177e4c
|
6 7 8 9 10 |
*/ /* * The mincore() system call. */ |
1da177e4c
|
11 |
#include <linux/pagemap.h> |
5a0e3ad6a
|
12 |
#include <linux/gfp.h> |
1da177e4c
|
13 14 15 |
#include <linux/mm.h> #include <linux/mman.h> #include <linux/syscalls.h> |
42da9cbd3
|
16 17 |
#include <linux/swap.h> #include <linux/swapops.h> |
3a4f8a0b3
|
18 |
#include <linux/shmem_fs.h> |
4f16fc107
|
19 |
#include <linux/hugetlb.h> |
1da177e4c
|
20 |
|
7c0f6ba68
|
21 |
#include <linux/uaccess.h> |
1da177e4c
|
22 |
#include <asm/pgtable.h> |
1e25a271c
|
23 24 |
static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) |
f48840107
|
25 26 |
{ #ifdef CONFIG_HUGETLB_PAGE |
1e25a271c
|
27 28 |
unsigned char present; unsigned char *vec = walk->private; |
f48840107
|
29 |
|
1e25a271c
|
30 31 32 33 34 35 36 37 |
/* * Hugepages under user process are always in RAM and never * swapped out, but theoretically it needs to be checked. */ present = pte && !huge_pte_none(huge_ptep_get(pte)); for (; addr != end; vec++, addr += PAGE_SIZE) *vec = present; walk->private = vec; |
f48840107
|
38 39 40 |
#else BUG(); #endif |
1e25a271c
|
41 |
return 0; |
f48840107
|
42 |
} |
1da177e4c
|
43 44 45 46 47 48 |
/* * Later we can get more picky about what "in core" means precisely. * For now, simply check to see if the page is in the page cache, * and is up to date; i.e. that no page-in operation would be required * at this time if an application were to map and access this page. */ |
42da9cbd3
|
49 |
static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff) |
1da177e4c
|
50 51 |
{ unsigned char present = 0; |
42da9cbd3
|
52 |
struct page *page; |
1da177e4c
|
53 |
|
42da9cbd3
|
54 55 56 57 |
/* * When tmpfs swaps out a page from a file, any process mapping that * file will not get a swp_entry_t in its pte, but rather it is like * any other file mapping (ie. marked !present and faulted in with |
3c18ddd16
|
58 |
* tmpfs's .fault). So swapped out tmpfs mappings are tested here. |
42da9cbd3
|
59 |
*/ |
31475dd61
|
60 |
#ifdef CONFIG_SWAP |
0cd6144aa
|
61 62 63 64 65 66 67 68 |
if (shmem_mapping(mapping)) { page = find_get_entry(mapping, pgoff); /* * shmem/tmpfs may return swap: account for swapcache * page too. */ if (radix_tree_exceptional_entry(page)) { swp_entry_t swp = radix_to_swp_entry(page); |
f6ab1f7f6
|
69 70 |
page = find_get_page(swap_address_space(swp), swp_offset(swp)); |
0cd6144aa
|
71 72 73 74 75 |
} } else page = find_get_page(mapping, pgoff); #else page = find_get_page(mapping, pgoff); |
31475dd61
|
76 |
#endif |
1da177e4c
|
77 78 |
if (page) { present = PageUptodate(page); |
09cbfeaf1
|
79 |
put_page(page); |
1da177e4c
|
80 81 82 83 |
} return present; } |
1e25a271c
|
84 85 |
static int __mincore_unmapped_range(unsigned long addr, unsigned long end, struct vm_area_struct *vma, unsigned char *vec) |
f48840107
|
86 |
{ |
25ef0e50c
|
87 |
unsigned long nr = (end - addr) >> PAGE_SHIFT; |
f48840107
|
88 89 90 91 92 93 94 95 96 97 98 99 |
int i; if (vma->vm_file) { pgoff_t pgoff; pgoff = linear_page_index(vma, addr); for (i = 0; i < nr; i++, pgoff++) vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff); } else { for (i = 0; i < nr; i++) vec[i] = 0; } |
1e25a271c
|
100 101 102 103 104 105 106 107 108 |
return nr; } static int mincore_unmapped_range(unsigned long addr, unsigned long end, struct mm_walk *walk) { walk->private += __mincore_unmapped_range(addr, end, walk->vma, walk->private); return 0; |
f48840107
|
109 |
} |
1e25a271c
|
110 111 |
static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) |
f48840107
|
112 113 |
{ spinlock_t *ptl; |
1e25a271c
|
114 |
struct vm_area_struct *vma = walk->vma; |
f48840107
|
115 |
pte_t *ptep; |
1e25a271c
|
116 117 |
unsigned char *vec = walk->private; int nr = (end - addr) >> PAGE_SHIFT; |
b6ec57f4b
|
118 119 |
ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { |
1e25a271c
|
120 121 122 123 |
memset(vec, 1, nr); spin_unlock(ptl); goto out; } |
f48840107
|
124 |
|
1e25a271c
|
125 126 127 128 129 130 131 |
if (pmd_trans_unstable(pmd)) { __mincore_unmapped_range(addr, end, vma, vec); goto out; } ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); for (; addr != end; ptep++, addr += PAGE_SIZE) { |
f48840107
|
132 |
pte_t pte = *ptep; |
f48840107
|
133 134 |
if (pte_none(pte)) |
1e25a271c
|
135 136 |
__mincore_unmapped_range(addr, addr + PAGE_SIZE, vma, vec); |
f48840107
|
137 |
else if (pte_present(pte)) |
25ef0e50c
|
138 |
*vec = 1; |
0661a3361
|
139 |
else { /* pte is a swap entry */ |
f48840107
|
140 |
swp_entry_t entry = pte_to_swp_entry(pte); |
c313dc5de
|
141 142 143 144 145 |
if (non_swap_entry(entry)) { /* * migration or hwpoison entries are always * uptodate */ |
25ef0e50c
|
146 |
*vec = 1; |
f48840107
|
147 148 |
} else { #ifdef CONFIG_SWAP |
33806f06d
|
149 |
*vec = mincore_page(swap_address_space(entry), |
f6ab1f7f6
|
150 |
swp_offset(entry)); |
f48840107
|
151 152 |
#else WARN_ON(1); |
25ef0e50c
|
153 |
*vec = 1; |
f48840107
|
154 155 156 |
#endif } } |
25ef0e50c
|
157 |
vec++; |
1e25a271c
|
158 |
} |
f48840107
|
159 |
pte_unmap_unlock(ptep - 1, ptl); |
1e25a271c
|
160 161 162 163 |
out: walk->private += nr; cond_resched(); return 0; |
e48293fd7
|
164 |
} |
2f77d1070
|
165 166 167 168 169 |
/* * Do a chunk of "sys_mincore()". We've already checked * all the arguments, we hold the mmap semaphore: we should * just return the amount of info we're asked for. */ |
6a60f1b35
|
170 |
static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec) |
1da177e4c
|
171 |
{ |
6a60f1b35
|
172 |
struct vm_area_struct *vma; |
25ef0e50c
|
173 |
unsigned long end; |
1e25a271c
|
174 175 176 177 178 179 180 |
int err; struct mm_walk mincore_walk = { .pmd_entry = mincore_pte_range, .pte_hole = mincore_unmapped_range, .hugetlb_entry = mincore_hugetlb, .private = vec, }; |
1da177e4c
|
181 |
|
6a60f1b35
|
182 |
vma = find_vma(current->mm, addr); |
4fb23e439
|
183 184 |
if (!vma || addr < vma->vm_start) return -ENOMEM; |
1e25a271c
|
185 |
mincore_walk.mm = vma->vm_mm; |
25ef0e50c
|
186 |
end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); |
1e25a271c
|
187 188 189 |
err = walk_page_range(addr, end, &mincore_walk); if (err < 0) return err; |
25ef0e50c
|
190 |
return (end - addr) >> PAGE_SHIFT; |
1da177e4c
|
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 |
} /* * The mincore(2) system call. * * mincore() returns the memory residency status of the pages in the * current process's address space specified by [addr, addr + len). * The status is returned in a vector of bytes. The least significant * bit of each byte is 1 if the referenced page is in memory, otherwise * it is zero. * * Because the status of a page can change after mincore() checks it * but before it returns to the application, the returned vector may * contain stale information. Only locked pages are guaranteed to * remain in memory. * * return values: * zero - success * -EFAULT - vec points to an illegal address |
ea1754a08
|
210 |
* -EINVAL - addr is not a multiple of PAGE_SIZE |
1da177e4c
|
211 212 213 214 215 216 |
* -ENOMEM - Addresses in the range [addr, addr + len] are * invalid for the address space of this process, or * specify one or more pages which are not currently * mapped * -EAGAIN - A kernel resource was temporarily unavailable. */ |
3480b2574
|
217 218 |
SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len, unsigned char __user *, vec) |
1da177e4c
|
219 |
{ |
2f77d1070
|
220 221 222 |
long retval; unsigned long pages; unsigned char *tmp; |
1da177e4c
|
223 |
|
2f77d1070
|
224 |
/* Check the start address: needs to be page-aligned.. */ |
09cbfeaf1
|
225 |
if (start & ~PAGE_MASK) |
2f77d1070
|
226 |
return -EINVAL; |
1da177e4c
|
227 |
|
2f77d1070
|
228 229 230 |
/* ..and we need to be passed a valid user-space range */ if (!access_ok(VERIFY_READ, (void __user *) start, len)) return -ENOMEM; |
1da177e4c
|
231 |
|
ea1754a08
|
232 |
/* This also avoids any overflows on PAGE_ALIGN */ |
2f77d1070
|
233 |
pages = len >> PAGE_SHIFT; |
e7bbdd071
|
234 |
pages += (offset_in_page(len)) != 0; |
1da177e4c
|
235 |
|
2f77d1070
|
236 237 |
if (!access_ok(VERIFY_WRITE, vec, pages)) return -EFAULT; |
1da177e4c
|
238 |
|
2f77d1070
|
239 240 |
tmp = (void *) __get_free_page(GFP_USER); if (!tmp) |
4fb23e439
|
241 |
return -EAGAIN; |
2f77d1070
|
242 243 244 245 246 247 248 249 |
retval = 0; while (pages) { /* * Do at most PAGE_SIZE entries per iteration, due to * the temporary buffer size. */ down_read(¤t->mm->mmap_sem); |
6a60f1b35
|
250 |
retval = do_mincore(start, min(pages, PAGE_SIZE), tmp); |
2f77d1070
|
251 252 253 254 255 256 257 |
up_read(¤t->mm->mmap_sem); if (retval <= 0) break; if (copy_to_user(vec, tmp, retval)) { retval = -EFAULT; break; |
1da177e4c
|
258 |
} |
2f77d1070
|
259 260 261 262 |
pages -= retval; vec += retval; start += retval << PAGE_SHIFT; retval = 0; |
1da177e4c
|
263 |
} |
2f77d1070
|
264 265 |
free_page((unsigned long) tmp); return retval; |
1da177e4c
|
266 |
} |