Blame view
mm/madvise.c
11.8 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 |
/* * linux/mm/madvise.c * * Copyright (C) 1999 Linus Torvalds * Copyright (C) 2002 Christoph Hellwig */ #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/syscalls.h> |
05b743847
|
11 |
#include <linux/mempolicy.h> |
afcf938ee
|
12 |
#include <linux/page-isolation.h> |
1da177e4c
|
13 |
#include <linux/hugetlb.h> |
3f31d0757
|
14 |
#include <linux/falloc.h> |
e8edc6e03
|
15 |
#include <linux/sched.h> |
f8af4da3b
|
16 |
#include <linux/ksm.h> |
3f31d0757
|
17 |
#include <linux/fs.h> |
9ab4233dd
|
18 |
#include <linux/file.h> |
1da177e4c
|
19 20 |
/* |
0a27a14a6
|
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
* Any behaviour which results in changes to the vma->vm_flags needs to * take mmap_sem for writing. Others, which simply traverse vmas, need * to only take it for reading. */ static int madvise_need_mmap_write(int behavior) { switch (behavior) { case MADV_REMOVE: case MADV_WILLNEED: case MADV_DONTNEED: return 0; default: /* be safe, default to 1. list exceptions explicitly */ return 1; } } /* |
1da177e4c
|
39 40 41 |
* We can potentially split a vm area into separate * areas, each area with its own behavior. */ |
05b743847
|
42 43 44 |
static long madvise_behavior(struct vm_area_struct * vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) |
1da177e4c
|
45 46 47 |
{ struct mm_struct * mm = vma->vm_mm; int error = 0; |
05b743847
|
48 |
pgoff_t pgoff; |
3866ea90d
|
49 |
unsigned long new_flags = vma->vm_flags; |
e798c6e87
|
50 51 |
switch (behavior) { |
f82256616
|
52 53 54 |
case MADV_NORMAL: new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; break; |
e798c6e87
|
55 |
case MADV_SEQUENTIAL: |
f82256616
|
56 |
new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; |
e798c6e87
|
57 58 |
break; case MADV_RANDOM: |
f82256616
|
59 |
new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; |
e798c6e87
|
60 |
break; |
f82256616
|
61 62 63 64 |
case MADV_DONTFORK: new_flags |= VM_DONTCOPY; break; case MADV_DOFORK: |
3866ea90d
|
65 66 67 68 |
if (vma->vm_flags & VM_IO) { error = -EINVAL; goto out; } |
f82256616
|
69 |
new_flags &= ~VM_DONTCOPY; |
e798c6e87
|
70 |
break; |
accb61fe7
|
71 72 73 74 75 76 |
case MADV_DONTDUMP: new_flags |= VM_NODUMP; break; case MADV_DODUMP: new_flags &= ~VM_NODUMP; break; |
f8af4da3b
|
77 78 79 80 81 82 |
case MADV_MERGEABLE: case MADV_UNMERGEABLE: error = ksm_madvise(vma, start, end, behavior, &new_flags); if (error) goto out; break; |
0af4e98b6
|
83 |
case MADV_HUGEPAGE: |
a664b2d85
|
84 |
case MADV_NOHUGEPAGE: |
60ab3244e
|
85 |
error = hugepage_madvise(vma, &new_flags, behavior); |
0af4e98b6
|
86 87 88 |
if (error) goto out; break; |
e798c6e87
|
89 |
} |
05b743847
|
90 91 |
if (new_flags == vma->vm_flags) { *prev = vma; |
836d5ffd3
|
92 |
goto out; |
05b743847
|
93 94 95 96 97 98 99 100 101 102 103 |
} pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); if (*prev) { vma = *prev; goto success; } *prev = vma; |
1da177e4c
|
104 105 106 107 108 109 110 111 112 113 114 115 |
if (start != vma->vm_start) { error = split_vma(mm, vma, start, 1); if (error) goto out; } if (end != vma->vm_end) { error = split_vma(mm, vma, end, 0); if (error) goto out; } |
836d5ffd3
|
116 |
success: |
1da177e4c
|
117 118 119 |
/* * vm_flags is protected by the mmap_sem held in write mode. */ |
e798c6e87
|
120 |
vma->vm_flags = new_flags; |
1da177e4c
|
121 122 123 124 125 126 127 128 129 130 131 |
out: if (error == -ENOMEM) error = -EAGAIN; return error; } /* * Schedule all required I/O operations. Do not wait for completion. */ static long madvise_willneed(struct vm_area_struct * vma, |
05b743847
|
132 |
struct vm_area_struct ** prev, |
1da177e4c
|
133 134 135 |
unsigned long start, unsigned long end) { struct file *file = vma->vm_file; |
1bef40032
|
136 137 |
if (!file) return -EBADF; |
70688e4dd
|
138 |
if (file->f_mapping->a_ops->get_xip_mem) { |
fe77ba6f4
|
139 140 141 |
/* no bad return value, but ignore advice */ return 0; } |
05b743847
|
142 |
*prev = vma; |
1da177e4c
|
143 144 145 146 |
start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; if (end > vma->vm_end) end = vma->vm_end; end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
f7e839dd3
|
147 |
force_page_cache_readahead(file->f_mapping, file, start, end - start); |
1da177e4c
|
148 149 150 151 152 153 154 |
return 0; } /* * Application no longer needs these pages. If the pages are dirty, * it's OK to just throw them away. The app will be more careful about * data it wants to keep. Be sure to free swap resources too. The |
7e6cbea39
|
155 |
* zap_page_range call sets things up for shrink_active_list to actually free |
1da177e4c
|
156 157 |
* these pages later if no one else has touched them in the meantime, * although we could add these pages to a global reuse list for |
7e6cbea39
|
158 |
* shrink_active_list to pick up before reclaiming other pages. |
1da177e4c
|
159 160 161 162 163 164 165 166 167 168 169 170 |
* * NB: This interface discards data rather than pushes it out to swap, * as some implementations do. This has performance implications for * applications like large transactional databases which want to discard * pages in anonymous maps after committing to backing store the data * that was kept in them. There is no reason to write this data out to * the swap area if the application is discarding it. * * An interface that causes the system to free clean pages and flush * dirty pages is already available as msync(MS_INVALIDATE). */ static long madvise_dontneed(struct vm_area_struct * vma, |
05b743847
|
171 |
struct vm_area_struct ** prev, |
1da177e4c
|
172 173 |
unsigned long start, unsigned long end) { |
05b743847
|
174 |
*prev = vma; |
6aab341e0
|
175 |
if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)) |
1da177e4c
|
176 177 178 179 180 181 182 183 184 185 186 187 |
return -EINVAL; if (unlikely(vma->vm_flags & VM_NONLINEAR)) { struct zap_details details = { .nonlinear_vma = vma, .last_index = ULONG_MAX, }; zap_page_range(vma, start, end - start, &details); } else zap_page_range(vma, start, end - start, NULL); return 0; } |
f6b3ec238
|
188 189 190 191 192 193 194 195 |
/* * Application wants to free up the pages and associated backing store. * This is effectively punching a hole into the middle of a file. * * NOTE: Currently, only shmfs/tmpfs is supported for this operation. * Other filesystems return -ENOSYS. */ static long madvise_remove(struct vm_area_struct *vma, |
00e9fa2d6
|
196 |
struct vm_area_struct **prev, |
f6b3ec238
|
197 198 |
unsigned long start, unsigned long end) { |
3f31d0757
|
199 |
loff_t offset; |
90ed52ebe
|
200 |
int error; |
9ab4233dd
|
201 |
struct file *f; |
f6b3ec238
|
202 |
|
90ed52ebe
|
203 |
*prev = NULL; /* tell sys_madvise we drop mmap_sem */ |
00e9fa2d6
|
204 |
|
f6b3ec238
|
205 206 |
if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB)) return -EINVAL; |
9ab4233dd
|
207 208 209 |
f = vma->vm_file; if (!f || !f->f_mapping || !f->f_mapping->host) { |
f6b3ec238
|
210 211 |
return -EINVAL; } |
69cf0fac6
|
212 213 |
if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) return -EACCES; |
f6b3ec238
|
214 215 |
offset = (loff_t)(start - vma->vm_start) + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); |
90ed52ebe
|
216 |
|
9ab4233dd
|
217 218 219 220 221 222 223 |
/* * Filesystem's fallocate may need to take i_mutex. We need to * explicitly grab a reference because the vma (and hence the * vma's reference to the file) can go away as soon as we drop * mmap_sem. */ get_file(f); |
0a27a14a6
|
224 |
up_read(¤t->mm->mmap_sem); |
9ab4233dd
|
225 |
error = do_fallocate(f, |
3f31d0757
|
226 227 |
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, offset, end - start); |
9ab4233dd
|
228 |
fput(f); |
0a27a14a6
|
229 |
down_read(¤t->mm->mmap_sem); |
90ed52ebe
|
230 |
return error; |
f6b3ec238
|
231 |
} |
9893e49d6
|
232 233 234 235 |
#ifdef CONFIG_MEMORY_FAILURE /* * Error injection support for memory error handling. */ |
afcf938ee
|
236 |
static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end) |
9893e49d6
|
237 238 239 240 241 242 243 |
{ int ret = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; for (; start < end; start += PAGE_SIZE) { struct page *p; |
d15f107d9
|
244 |
int ret = get_user_pages_fast(start, 1, 0, &p); |
9893e49d6
|
245 246 |
if (ret != 1) return ret; |
afcf938ee
|
247 248 249 250 251 252 253 254 255 |
if (bhv == MADV_SOFT_OFFLINE) { printk(KERN_INFO "Soft offlining page %lx at %lx ", page_to_pfn(p), start); ret = soft_offline_page(p, MF_COUNT_INCREASED); if (ret) break; continue; } |
9893e49d6
|
256 257 258 259 |
printk(KERN_INFO "Injecting memory failure for page %lx at %lx ", page_to_pfn(p), start); /* Ignore return value for now */ |
cd42f4a3b
|
260 |
memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED); |
9893e49d6
|
261 262 263 264 |
} return ret; } #endif |
165cd4023
|
265 266 267 |
static long madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) |
1da177e4c
|
268 |
{ |
1da177e4c
|
269 |
switch (behavior) { |
f6b3ec238
|
270 |
case MADV_REMOVE: |
3866ea90d
|
271 |
return madvise_remove(vma, prev, start, end); |
1da177e4c
|
272 |
case MADV_WILLNEED: |
3866ea90d
|
273 |
return madvise_willneed(vma, prev, start, end); |
1da177e4c
|
274 |
case MADV_DONTNEED: |
3866ea90d
|
275 |
return madvise_dontneed(vma, prev, start, end); |
1da177e4c
|
276 |
default: |
3866ea90d
|
277 |
return madvise_behavior(vma, prev, start, end, behavior); |
1da177e4c
|
278 |
} |
1da177e4c
|
279 |
} |
75927af8b
|
280 281 282 283 284 285 286 287 288 289 290 291 |
static int madvise_behavior_valid(int behavior) { switch (behavior) { case MADV_DOFORK: case MADV_DONTFORK: case MADV_NORMAL: case MADV_SEQUENTIAL: case MADV_RANDOM: case MADV_REMOVE: case MADV_WILLNEED: case MADV_DONTNEED: |
f8af4da3b
|
292 293 294 295 |
#ifdef CONFIG_KSM case MADV_MERGEABLE: case MADV_UNMERGEABLE: #endif |
0af4e98b6
|
296 297 |
#ifdef CONFIG_TRANSPARENT_HUGEPAGE case MADV_HUGEPAGE: |
a664b2d85
|
298 |
case MADV_NOHUGEPAGE: |
0af4e98b6
|
299 |
#endif |
accb61fe7
|
300 301 |
case MADV_DONTDUMP: case MADV_DODUMP: |
75927af8b
|
302 303 304 305 306 307 |
return 1; default: return 0; } } |
3866ea90d
|
308 |
|
1da177e4c
|
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 |
/* * The madvise(2) system call. * * Applications can use madvise() to advise the kernel how it should * handle paging I/O in this VM area. The idea is to help the kernel * use appropriate read-ahead and caching techniques. The information * provided is advisory only, and can be safely disregarded by the * kernel without affecting the correct operation of the application. * * behavior values: * MADV_NORMAL - the default behavior is to read clusters. This * results in some read-ahead and read-behind. * MADV_RANDOM - the system should read the minimum amount of data * on any access, since it is unlikely that the appli- * cation will need more than what it asks for. * MADV_SEQUENTIAL - pages in the given range will probably be accessed * once, so they can be aggressively read ahead, and * can be freed soon after they are accessed. * MADV_WILLNEED - the application is notifying the system to read * some pages ahead. * MADV_DONTNEED - the application is finished with the given range, * so the kernel can free resources associated with it. |
f6b3ec238
|
331 332 |
* MADV_REMOVE - the application wants to free up the given range of * pages and associated backing store. |
3866ea90d
|
333 334 335 |
* MADV_DONTFORK - omit this area from child's address space when forking: * typically, to avoid COWing pages pinned by get_user_pages(). * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. |
f8af4da3b
|
336 337 338 |
* MADV_MERGEABLE - the application recommends that KSM try to merge pages in * this area with pages of identical content from other such areas. * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others. |
1da177e4c
|
339 340 341 342 343 344 345 346 347 348 349 350 |
* * return values: * zero - success * -EINVAL - start + len < 0, start is not page-aligned, * "behavior" is not a valid value, or application * is attempting to release locked or shared pages. * -ENOMEM - addresses in the specified range are not currently * mapped, or are outside the AS of the process. * -EIO - an I/O error occurred while paging in data. * -EBADF - map exists, but area maps something that isn't a file. * -EAGAIN - a kernel resource was temporarily unavailable. */ |
3480b2574
|
351 |
SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) |
1da177e4c
|
352 |
{ |
05b743847
|
353 354 |
unsigned long end, tmp; struct vm_area_struct * vma, *prev; |
1da177e4c
|
355 356 |
int unmapped_error = 0; int error = -EINVAL; |
f79777932
|
357 |
int write; |
1da177e4c
|
358 |
size_t len; |
9893e49d6
|
359 |
#ifdef CONFIG_MEMORY_FAILURE |
afcf938ee
|
360 361 |
if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE) return madvise_hwpoison(behavior, start, start+len_in); |
9893e49d6
|
362 |
#endif |
75927af8b
|
363 364 |
if (!madvise_behavior_valid(behavior)) return error; |
f79777932
|
365 366 |
write = madvise_need_mmap_write(behavior); if (write) |
0a27a14a6
|
367 368 369 |
down_write(¤t->mm->mmap_sem); else down_read(¤t->mm->mmap_sem); |
1da177e4c
|
370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 |
if (start & ~PAGE_MASK) goto out; len = (len_in + ~PAGE_MASK) & PAGE_MASK; /* Check to see whether len was rounded up from small -ve to zero */ if (len_in && !len) goto out; end = start + len; if (end < start) goto out; error = 0; if (end == start) goto out; /* * If the interval [start,end) covers some unmapped address * ranges, just ignore them, but return -ENOMEM at the end. |
05b743847
|
390 |
* - different from the way of handling in mlock etc. |
1da177e4c
|
391 |
*/ |
05b743847
|
392 |
vma = find_vma_prev(current->mm, start, &prev); |
836d5ffd3
|
393 394 |
if (vma && start > vma->vm_start) prev = vma; |
1da177e4c
|
395 396 397 398 399 |
for (;;) { /* Still start < end. */ error = -ENOMEM; if (!vma) goto out; |
05b743847
|
400 |
/* Here start < (end|vma->vm_end). */ |
1da177e4c
|
401 402 403 |
if (start < vma->vm_start) { unmapped_error = -ENOMEM; start = vma->vm_start; |
05b743847
|
404 405 |
if (start >= end) goto out; |
1da177e4c
|
406 |
} |
05b743847
|
407 408 409 410 |
/* Here vma->vm_start <= start < (end|vma->vm_end) */ tmp = vma->vm_end; if (end < tmp) tmp = end; |
1da177e4c
|
411 |
|
05b743847
|
412 413 |
/* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ error = madvise_vma(vma, &prev, start, tmp, behavior); |
1da177e4c
|
414 415 |
if (error) goto out; |
05b743847
|
416 |
start = tmp; |
90ed52ebe
|
417 |
if (prev && start < prev->vm_end) |
05b743847
|
418 419 420 421 |
start = prev->vm_end; error = unmapped_error; if (start >= end) goto out; |
90ed52ebe
|
422 423 424 425 |
if (prev) vma = prev->vm_next; else /* madvise_remove dropped mmap_sem */ vma = find_vma(current->mm, start); |
1da177e4c
|
426 |
} |
1da177e4c
|
427 |
out: |
f79777932
|
428 |
if (write) |
0a27a14a6
|
429 430 431 |
up_write(¤t->mm->mmap_sem); else up_read(¤t->mm->mmap_sem); |
1da177e4c
|
432 433 |
return error; } |