Blame view
mm/madvise.c
11.3 KB
1da177e4c Linux-2.6.12-rc2 |
1 2 3 4 5 6 7 8 9 10 |
/* * linux/mm/madvise.c * * Copyright (C) 1999 Linus Torvalds * Copyright (C) 2002 Christoph Hellwig */ #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/syscalls.h> |
05b743847 [PATCH] madvise: ... |
11 |
#include <linux/mempolicy.h> |
afcf938ee HWPOISON: Add a m... |
12 |
#include <linux/page-isolation.h> |
1da177e4c Linux-2.6.12-rc2 |
13 |
#include <linux/hugetlb.h> |
e8edc6e03 Detach sched.h fr... |
14 |
#include <linux/sched.h> |
f8af4da3b ksm: the mm inter... |
15 |
#include <linux/ksm.h> |
1da177e4c Linux-2.6.12-rc2 |
16 17 |
/* |
0a27a14a6 mm: madvise avoid... |
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
* Any behaviour which results in changes to the vma->vm_flags needs to * take mmap_sem for writing. Others, which simply traverse vmas, need * to only take it for reading. */ static int madvise_need_mmap_write(int behavior) { switch (behavior) { case MADV_REMOVE: case MADV_WILLNEED: case MADV_DONTNEED: return 0; default: /* be safe, default to 1. list exceptions explicitly */ return 1; } } /* |
1da177e4c Linux-2.6.12-rc2 |
36 37 38 |
* We can potentially split a vm area into separate * areas, each area with its own behavior. */ |
05b743847 [PATCH] madvise: ... |
39 40 41 |
static long madvise_behavior(struct vm_area_struct * vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) |
1da177e4c Linux-2.6.12-rc2 |
42 43 44 |
{ struct mm_struct * mm = vma->vm_mm; int error = 0; |
05b743847 [PATCH] madvise: ... |
45 |
pgoff_t pgoff; |
3866ea90d ksm: first tidy u... |
46 |
unsigned long new_flags = vma->vm_flags; |
e798c6e87 [PATCH] madvise: ... |
47 48 |
switch (behavior) { |
f82256616 [PATCH] madvise M... |
49 50 51 |
case MADV_NORMAL: new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; break; |
e798c6e87 [PATCH] madvise: ... |
52 |
case MADV_SEQUENTIAL: |
f82256616 [PATCH] madvise M... |
53 |
new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; |
e798c6e87 [PATCH] madvise: ... |
54 55 |
break; case MADV_RANDOM: |
f82256616 [PATCH] madvise M... |
56 |
new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; |
e798c6e87 [PATCH] madvise: ... |
57 |
break; |
f82256616 [PATCH] madvise M... |
58 59 60 61 |
case MADV_DONTFORK: new_flags |= VM_DONTCOPY; break; case MADV_DOFORK: |
3866ea90d ksm: first tidy u... |
62 63 64 65 |
if (vma->vm_flags & VM_IO) { error = -EINVAL; goto out; } |
f82256616 [PATCH] madvise M... |
66 |
new_flags &= ~VM_DONTCOPY; |
e798c6e87 [PATCH] madvise: ... |
67 |
break; |
f8af4da3b ksm: the mm inter... |
68 69 70 71 72 73 |
case MADV_MERGEABLE: case MADV_UNMERGEABLE: error = ksm_madvise(vma, start, end, behavior, &new_flags); if (error) goto out; break; |
e798c6e87 [PATCH] madvise: ... |
74 |
} |
05b743847 [PATCH] madvise: ... |
75 76 |
if (new_flags == vma->vm_flags) { *prev = vma; |
836d5ffd3 [PATCH] mm: fix m... |
77 |
goto out; |
05b743847 [PATCH] madvise: ... |
78 79 80 81 82 83 84 85 86 87 88 |
} pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); if (*prev) { vma = *prev; goto success; } *prev = vma; |
1da177e4c Linux-2.6.12-rc2 |
89 90 91 92 93 94 95 96 97 98 99 100 |
if (start != vma->vm_start) { error = split_vma(mm, vma, start, 1); if (error) goto out; } if (end != vma->vm_end) { error = split_vma(mm, vma, end, 0); if (error) goto out; } |
836d5ffd3 [PATCH] mm: fix m... |
101 |
success: |
1da177e4c Linux-2.6.12-rc2 |
102 103 104 |
/* * vm_flags is protected by the mmap_sem held in write mode. */ |
e798c6e87 [PATCH] madvise: ... |
105 |
vma->vm_flags = new_flags; |
1da177e4c Linux-2.6.12-rc2 |
106 107 108 109 110 111 112 113 114 115 116 |
out: if (error == -ENOMEM) error = -EAGAIN; return error; } /* * Schedule all required I/O operations. Do not wait for completion. */ static long madvise_willneed(struct vm_area_struct * vma, |
05b743847 [PATCH] madvise: ... |
117 |
struct vm_area_struct ** prev, |
1da177e4c Linux-2.6.12-rc2 |
118 119 120 |
unsigned long start, unsigned long end) { struct file *file = vma->vm_file; |
1bef40032 [PATCH] madvise: ... |
121 122 |
if (!file) return -EBADF; |
70688e4dd xip: support non-... |
123 |
if (file->f_mapping->a_ops->get_xip_mem) { |
fe77ba6f4 [PATCH] xip: madv... |
124 125 126 |
/* no bad return value, but ignore advice */ return 0; } |
05b743847 [PATCH] madvise: ... |
127 |
*prev = vma; |
1da177e4c Linux-2.6.12-rc2 |
128 129 130 131 |
start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; if (end > vma->vm_end) end = vma->vm_end; end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
f7e839dd3 readahead: move m... |
132 |
force_page_cache_readahead(file->f_mapping, file, start, end - start); |
1da177e4c Linux-2.6.12-rc2 |
133 134 135 136 137 138 139 |
return 0; } /* * Application no longer needs these pages. If the pages are dirty, * it's OK to just throw them away. The app will be more careful about * data it wants to keep. Be sure to free swap resources too. The |
7e6cbea39 madvise: update f... |
140 |
* zap_page_range call sets things up for shrink_active_list to actually free |
1da177e4c Linux-2.6.12-rc2 |
141 142 |
* these pages later if no one else has touched them in the meantime, * although we could add these pages to a global reuse list for |
7e6cbea39 madvise: update f... |
143 |
* shrink_active_list to pick up before reclaiming other pages. |
1da177e4c Linux-2.6.12-rc2 |
144 145 146 147 148 149 150 151 152 153 154 155 |
* * NB: This interface discards data rather than pushes it out to swap, * as some implementations do. This has performance implications for * applications like large transactional databases which want to discard * pages in anonymous maps after committing to backing store the data * that was kept in them. There is no reason to write this data out to * the swap area if the application is discarding it. * * An interface that causes the system to free clean pages and flush * dirty pages is already available as msync(MS_INVALIDATE). */ static long madvise_dontneed(struct vm_area_struct * vma, |
05b743847 [PATCH] madvise: ... |
156 |
struct vm_area_struct ** prev, |
1da177e4c Linux-2.6.12-rc2 |
157 158 |
unsigned long start, unsigned long end) { |
05b743847 [PATCH] madvise: ... |
159 |
*prev = vma; |
6aab341e0 mm: re-architect ... |
160 |
if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)) |
1da177e4c Linux-2.6.12-rc2 |
161 162 163 164 165 166 167 168 169 170 171 172 |
return -EINVAL; if (unlikely(vma->vm_flags & VM_NONLINEAR)) { struct zap_details details = { .nonlinear_vma = vma, .last_index = ULONG_MAX, }; zap_page_range(vma, start, end - start, &details); } else zap_page_range(vma, start, end - start, NULL); return 0; } |
f6b3ec238 [PATCH] madvise(M... |
173 174 175 176 177 178 179 180 |
/* * Application wants to free up the pages and associated backing store. * This is effectively punching a hole into the middle of a file. * * NOTE: Currently, only shmfs/tmpfs is supported for this operation. * Other filesystems return -ENOSYS. */ static long madvise_remove(struct vm_area_struct *vma, |
00e9fa2d6 [PATCH] mm: fix m... |
181 |
struct vm_area_struct **prev, |
f6b3ec238 [PATCH] madvise(M... |
182 183 184 |
unsigned long start, unsigned long end) { struct address_space *mapping; |
90ed52ebe [PATCH] holepunch... |
185 186 |
loff_t offset, endoff; int error; |
f6b3ec238 [PATCH] madvise(M... |
187 |
|
90ed52ebe [PATCH] holepunch... |
188 |
*prev = NULL; /* tell sys_madvise we drop mmap_sem */ |
00e9fa2d6 [PATCH] mm: fix m... |
189 |
|
f6b3ec238 [PATCH] madvise(M... |
190 191 192 193 194 195 196 |
if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB)) return -EINVAL; if (!vma->vm_file || !vma->vm_file->f_mapping || !vma->vm_file->f_mapping->host) { return -EINVAL; } |
69cf0fac6 [PATCH] Fix MADV_... |
197 198 |
if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) return -EACCES; |
f6b3ec238 [PATCH] madvise(M... |
199 200 201 202 203 204 |
mapping = vma->vm_file->f_mapping; offset = (loff_t)(start - vma->vm_start) + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); endoff = (loff_t)(end - vma->vm_start - 1) + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); |
90ed52ebe [PATCH] holepunch... |
205 206 |
/* vmtruncate_range needs to take i_mutex and i_alloc_sem */ |
0a27a14a6 mm: madvise avoid... |
207 |
up_read(¤t->mm->mmap_sem); |
90ed52ebe [PATCH] holepunch... |
208 |
error = vmtruncate_range(mapping->host, offset, endoff); |
0a27a14a6 mm: madvise avoid... |
209 |
down_read(¤t->mm->mmap_sem); |
90ed52ebe [PATCH] holepunch... |
210 |
return error; |
f6b3ec238 [PATCH] madvise(M... |
211 |
} |
9893e49d6 HWPOISON: Add mad... |
212 213 214 215 |
#ifdef CONFIG_MEMORY_FAILURE /* * Error injection support for memory error handling. */ |
afcf938ee HWPOISON: Add a m... |
216 |
static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end) |
9893e49d6 HWPOISON: Add mad... |
217 218 219 220 221 222 223 |
{ int ret = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; for (; start < end; start += PAGE_SIZE) { struct page *p; |
d15f107d9 HWPOISON: Use get... |
224 |
int ret = get_user_pages_fast(start, 1, 0, &p); |
9893e49d6 HWPOISON: Add mad... |
225 226 |
if (ret != 1) return ret; |
afcf938ee HWPOISON: Add a m... |
227 228 229 230 231 232 233 234 235 |
if (bhv == MADV_SOFT_OFFLINE) { printk(KERN_INFO "Soft offlining page %lx at %lx ", page_to_pfn(p), start); ret = soft_offline_page(p, MF_COUNT_INCREASED); if (ret) break; continue; } |
9893e49d6 HWPOISON: Add mad... |
236 237 238 239 |
printk(KERN_INFO "Injecting memory failure for page %lx at %lx ", page_to_pfn(p), start); /* Ignore return value for now */ |
82ba011b9 HWPOISON: Turn re... |
240 |
__memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED); |
9893e49d6 HWPOISON: Add mad... |
241 242 243 244 |
} return ret; } #endif |
165cd4023 [PATCH] madvise()... |
245 246 247 |
static long madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) |
1da177e4c Linux-2.6.12-rc2 |
248 |
{ |
1da177e4c Linux-2.6.12-rc2 |
249 |
switch (behavior) { |
f6b3ec238 [PATCH] madvise(M... |
250 |
case MADV_REMOVE: |
3866ea90d ksm: first tidy u... |
251 |
return madvise_remove(vma, prev, start, end); |
1da177e4c Linux-2.6.12-rc2 |
252 |
case MADV_WILLNEED: |
3866ea90d ksm: first tidy u... |
253 |
return madvise_willneed(vma, prev, start, end); |
1da177e4c Linux-2.6.12-rc2 |
254 |
case MADV_DONTNEED: |
3866ea90d ksm: first tidy u... |
255 |
return madvise_dontneed(vma, prev, start, end); |
1da177e4c Linux-2.6.12-rc2 |
256 |
default: |
3866ea90d ksm: first tidy u... |
257 |
return madvise_behavior(vma, prev, start, end, behavior); |
1da177e4c Linux-2.6.12-rc2 |
258 |
} |
1da177e4c Linux-2.6.12-rc2 |
259 |
} |
75927af8b mm: madvise(): co... |
260 261 262 263 264 265 266 267 268 269 270 271 |
static int madvise_behavior_valid(int behavior) { switch (behavior) { case MADV_DOFORK: case MADV_DONTFORK: case MADV_NORMAL: case MADV_SEQUENTIAL: case MADV_RANDOM: case MADV_REMOVE: case MADV_WILLNEED: case MADV_DONTNEED: |
f8af4da3b ksm: the mm inter... |
272 273 274 275 |
#ifdef CONFIG_KSM case MADV_MERGEABLE: case MADV_UNMERGEABLE: #endif |
75927af8b mm: madvise(): co... |
276 277 278 279 280 281 |
return 1; default: return 0; } } |
3866ea90d ksm: first tidy u... |
282 |
|
1da177e4c Linux-2.6.12-rc2 |
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 |
/* * The madvise(2) system call. * * Applications can use madvise() to advise the kernel how it should * handle paging I/O in this VM area. The idea is to help the kernel * use appropriate read-ahead and caching techniques. The information * provided is advisory only, and can be safely disregarded by the * kernel without affecting the correct operation of the application. * * behavior values: * MADV_NORMAL - the default behavior is to read clusters. This * results in some read-ahead and read-behind. * MADV_RANDOM - the system should read the minimum amount of data * on any access, since it is unlikely that the appli- * cation will need more than what it asks for. * MADV_SEQUENTIAL - pages in the given range will probably be accessed * once, so they can be aggressively read ahead, and * can be freed soon after they are accessed. * MADV_WILLNEED - the application is notifying the system to read * some pages ahead. * MADV_DONTNEED - the application is finished with the given range, * so the kernel can free resources associated with it. |
f6b3ec238 [PATCH] madvise(M... |
305 306 |
* MADV_REMOVE - the application wants to free up the given range of * pages and associated backing store. |
3866ea90d ksm: first tidy u... |
307 308 309 |
* MADV_DONTFORK - omit this area from child's address space when forking: * typically, to avoid COWing pages pinned by get_user_pages(). * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. |
f8af4da3b ksm: the mm inter... |
310 311 312 |
* MADV_MERGEABLE - the application recommends that KSM try to merge pages in * this area with pages of identical content from other such areas. * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others. |
1da177e4c Linux-2.6.12-rc2 |
313 314 315 316 317 318 319 320 321 322 323 324 |
* * return values: * zero - success * -EINVAL - start + len < 0, start is not page-aligned, * "behavior" is not a valid value, or application * is attempting to release locked or shared pages. * -ENOMEM - addresses in the specified range are not currently * mapped, or are outside the AS of the process. * -EIO - an I/O error occurred while paging in data. * -EBADF - map exists, but area maps something that isn't a file. * -EAGAIN - a kernel resource was temporarily unavailable. */ |
3480b2574 [CVE-2009-0029] S... |
325 |
SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) |
1da177e4c Linux-2.6.12-rc2 |
326 |
{ |
05b743847 [PATCH] madvise: ... |
327 328 |
unsigned long end, tmp; struct vm_area_struct * vma, *prev; |
1da177e4c Linux-2.6.12-rc2 |
329 330 |
int unmapped_error = 0; int error = -EINVAL; |
f79777932 speed up madvise_... |
331 |
int write; |
1da177e4c Linux-2.6.12-rc2 |
332 |
size_t len; |
9893e49d6 HWPOISON: Add mad... |
333 |
#ifdef CONFIG_MEMORY_FAILURE |
afcf938ee HWPOISON: Add a m... |
334 335 |
if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE) return madvise_hwpoison(behavior, start, start+len_in); |
9893e49d6 HWPOISON: Add mad... |
336 |
#endif |
75927af8b mm: madvise(): co... |
337 338 |
if (!madvise_behavior_valid(behavior)) return error; |
f79777932 speed up madvise_... |
339 340 |
write = madvise_need_mmap_write(behavior); if (write) |
0a27a14a6 mm: madvise avoid... |
341 342 343 |
down_write(¤t->mm->mmap_sem); else down_read(¤t->mm->mmap_sem); |
1da177e4c Linux-2.6.12-rc2 |
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 |
if (start & ~PAGE_MASK) goto out; len = (len_in + ~PAGE_MASK) & PAGE_MASK; /* Check to see whether len was rounded up from small -ve to zero */ if (len_in && !len) goto out; end = start + len; if (end < start) goto out; error = 0; if (end == start) goto out; /* * If the interval [start,end) covers some unmapped address * ranges, just ignore them, but return -ENOMEM at the end. |
05b743847 [PATCH] madvise: ... |
364 |
* - different from the way of handling in mlock etc. |
1da177e4c Linux-2.6.12-rc2 |
365 |
*/ |
05b743847 [PATCH] madvise: ... |
366 |
vma = find_vma_prev(current->mm, start, &prev); |
836d5ffd3 [PATCH] mm: fix m... |
367 368 |
if (vma && start > vma->vm_start) prev = vma; |
1da177e4c Linux-2.6.12-rc2 |
369 370 371 372 373 |
for (;;) { /* Still start < end. */ error = -ENOMEM; if (!vma) goto out; |
05b743847 [PATCH] madvise: ... |
374 |
/* Here start < (end|vma->vm_end). */ |
1da177e4c Linux-2.6.12-rc2 |
375 376 377 |
if (start < vma->vm_start) { unmapped_error = -ENOMEM; start = vma->vm_start; |
05b743847 [PATCH] madvise: ... |
378 379 |
if (start >= end) goto out; |
1da177e4c Linux-2.6.12-rc2 |
380 |
} |
05b743847 [PATCH] madvise: ... |
381 382 383 384 |
/* Here vma->vm_start <= start < (end|vma->vm_end) */ tmp = vma->vm_end; if (end < tmp) tmp = end; |
1da177e4c Linux-2.6.12-rc2 |
385 |
|
05b743847 [PATCH] madvise: ... |
386 387 |
/* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ error = madvise_vma(vma, &prev, start, tmp, behavior); |
1da177e4c Linux-2.6.12-rc2 |
388 389 |
if (error) goto out; |
05b743847 [PATCH] madvise: ... |
390 |
start = tmp; |
90ed52ebe [PATCH] holepunch... |
391 |
if (prev && start < prev->vm_end) |
05b743847 [PATCH] madvise: ... |
392 393 394 395 |
start = prev->vm_end; error = unmapped_error; if (start >= end) goto out; |
90ed52ebe [PATCH] holepunch... |
396 397 398 399 |
if (prev) vma = prev->vm_next; else /* madvise_remove dropped mmap_sem */ vma = find_vma(current->mm, start); |
1da177e4c Linux-2.6.12-rc2 |
400 |
} |
1da177e4c Linux-2.6.12-rc2 |
401 |
out: |
f79777932 speed up madvise_... |
402 |
if (write) |
0a27a14a6 mm: madvise avoid... |
403 404 405 |
up_write(¤t->mm->mmap_sem); else up_read(¤t->mm->mmap_sem); |
1da177e4c Linux-2.6.12-rc2 |
406 407 |
return error; } |