Commit 84d96d897671cfb386e722acbefdb3a79e115a8a
Committed by
Linus Torvalds
1 parent
4edd7ceff0
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
mm: madvise: complete input validation before taking lock
In madvise(), there doesn't seem to be any reason for taking the ¤t->mm->mmap_sem before start and len_in have been validated. Incidentally, this removes the need for the out: label. [akpm@linux-foundation.org: s/out_plug/out/, per David] Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk> Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 1 changed file with 15 additions and 16 deletions Inline Diff
mm/madvise.c
1 | /* | 1 | /* |
2 | * linux/mm/madvise.c | 2 | * linux/mm/madvise.c |
3 | * | 3 | * |
4 | * Copyright (C) 1999 Linus Torvalds | 4 | * Copyright (C) 1999 Linus Torvalds |
5 | * Copyright (C) 2002 Christoph Hellwig | 5 | * Copyright (C) 2002 Christoph Hellwig |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/mman.h> | 8 | #include <linux/mman.h> |
9 | #include <linux/pagemap.h> | 9 | #include <linux/pagemap.h> |
10 | #include <linux/syscalls.h> | 10 | #include <linux/syscalls.h> |
11 | #include <linux/mempolicy.h> | 11 | #include <linux/mempolicy.h> |
12 | #include <linux/page-isolation.h> | 12 | #include <linux/page-isolation.h> |
13 | #include <linux/hugetlb.h> | 13 | #include <linux/hugetlb.h> |
14 | #include <linux/falloc.h> | 14 | #include <linux/falloc.h> |
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/ksm.h> | 16 | #include <linux/ksm.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | #include <linux/file.h> | 18 | #include <linux/file.h> |
19 | #include <linux/blkdev.h> | 19 | #include <linux/blkdev.h> |
20 | #include <linux/swap.h> | 20 | #include <linux/swap.h> |
21 | #include <linux/swapops.h> | 21 | #include <linux/swapops.h> |
22 | 22 | ||
23 | /* | 23 | /* |
24 | * Any behaviour which results in changes to the vma->vm_flags needs to | 24 | * Any behaviour which results in changes to the vma->vm_flags needs to |
25 | * take mmap_sem for writing. Others, which simply traverse vmas, need | 25 | * take mmap_sem for writing. Others, which simply traverse vmas, need |
26 | * to only take it for reading. | 26 | * to only take it for reading. |
27 | */ | 27 | */ |
28 | static int madvise_need_mmap_write(int behavior) | 28 | static int madvise_need_mmap_write(int behavior) |
29 | { | 29 | { |
30 | switch (behavior) { | 30 | switch (behavior) { |
31 | case MADV_REMOVE: | 31 | case MADV_REMOVE: |
32 | case MADV_WILLNEED: | 32 | case MADV_WILLNEED: |
33 | case MADV_DONTNEED: | 33 | case MADV_DONTNEED: |
34 | return 0; | 34 | return 0; |
35 | default: | 35 | default: |
36 | /* be safe, default to 1. list exceptions explicitly */ | 36 | /* be safe, default to 1. list exceptions explicitly */ |
37 | return 1; | 37 | return 1; |
38 | } | 38 | } |
39 | } | 39 | } |
40 | 40 | ||
41 | /* | 41 | /* |
42 | * We can potentially split a vm area into separate | 42 | * We can potentially split a vm area into separate |
43 | * areas, each area with its own behavior. | 43 | * areas, each area with its own behavior. |
44 | */ | 44 | */ |
45 | static long madvise_behavior(struct vm_area_struct * vma, | 45 | static long madvise_behavior(struct vm_area_struct * vma, |
46 | struct vm_area_struct **prev, | 46 | struct vm_area_struct **prev, |
47 | unsigned long start, unsigned long end, int behavior) | 47 | unsigned long start, unsigned long end, int behavior) |
48 | { | 48 | { |
49 | struct mm_struct * mm = vma->vm_mm; | 49 | struct mm_struct * mm = vma->vm_mm; |
50 | int error = 0; | 50 | int error = 0; |
51 | pgoff_t pgoff; | 51 | pgoff_t pgoff; |
52 | unsigned long new_flags = vma->vm_flags; | 52 | unsigned long new_flags = vma->vm_flags; |
53 | 53 | ||
54 | switch (behavior) { | 54 | switch (behavior) { |
55 | case MADV_NORMAL: | 55 | case MADV_NORMAL: |
56 | new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; | 56 | new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; |
57 | break; | 57 | break; |
58 | case MADV_SEQUENTIAL: | 58 | case MADV_SEQUENTIAL: |
59 | new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; | 59 | new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; |
60 | break; | 60 | break; |
61 | case MADV_RANDOM: | 61 | case MADV_RANDOM: |
62 | new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; | 62 | new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; |
63 | break; | 63 | break; |
64 | case MADV_DONTFORK: | 64 | case MADV_DONTFORK: |
65 | new_flags |= VM_DONTCOPY; | 65 | new_flags |= VM_DONTCOPY; |
66 | break; | 66 | break; |
67 | case MADV_DOFORK: | 67 | case MADV_DOFORK: |
68 | if (vma->vm_flags & VM_IO) { | 68 | if (vma->vm_flags & VM_IO) { |
69 | error = -EINVAL; | 69 | error = -EINVAL; |
70 | goto out; | 70 | goto out; |
71 | } | 71 | } |
72 | new_flags &= ~VM_DONTCOPY; | 72 | new_flags &= ~VM_DONTCOPY; |
73 | break; | 73 | break; |
74 | case MADV_DONTDUMP: | 74 | case MADV_DONTDUMP: |
75 | new_flags |= VM_DONTDUMP; | 75 | new_flags |= VM_DONTDUMP; |
76 | break; | 76 | break; |
77 | case MADV_DODUMP: | 77 | case MADV_DODUMP: |
78 | if (new_flags & VM_SPECIAL) { | 78 | if (new_flags & VM_SPECIAL) { |
79 | error = -EINVAL; | 79 | error = -EINVAL; |
80 | goto out; | 80 | goto out; |
81 | } | 81 | } |
82 | new_flags &= ~VM_DONTDUMP; | 82 | new_flags &= ~VM_DONTDUMP; |
83 | break; | 83 | break; |
84 | case MADV_MERGEABLE: | 84 | case MADV_MERGEABLE: |
85 | case MADV_UNMERGEABLE: | 85 | case MADV_UNMERGEABLE: |
86 | error = ksm_madvise(vma, start, end, behavior, &new_flags); | 86 | error = ksm_madvise(vma, start, end, behavior, &new_flags); |
87 | if (error) | 87 | if (error) |
88 | goto out; | 88 | goto out; |
89 | break; | 89 | break; |
90 | case MADV_HUGEPAGE: | 90 | case MADV_HUGEPAGE: |
91 | case MADV_NOHUGEPAGE: | 91 | case MADV_NOHUGEPAGE: |
92 | error = hugepage_madvise(vma, &new_flags, behavior); | 92 | error = hugepage_madvise(vma, &new_flags, behavior); |
93 | if (error) | 93 | if (error) |
94 | goto out; | 94 | goto out; |
95 | break; | 95 | break; |
96 | } | 96 | } |
97 | 97 | ||
98 | if (new_flags == vma->vm_flags) { | 98 | if (new_flags == vma->vm_flags) { |
99 | *prev = vma; | 99 | *prev = vma; |
100 | goto out; | 100 | goto out; |
101 | } | 101 | } |
102 | 102 | ||
103 | pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); | 103 | pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); |
104 | *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, | 104 | *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, |
105 | vma->vm_file, pgoff, vma_policy(vma)); | 105 | vma->vm_file, pgoff, vma_policy(vma)); |
106 | if (*prev) { | 106 | if (*prev) { |
107 | vma = *prev; | 107 | vma = *prev; |
108 | goto success; | 108 | goto success; |
109 | } | 109 | } |
110 | 110 | ||
111 | *prev = vma; | 111 | *prev = vma; |
112 | 112 | ||
113 | if (start != vma->vm_start) { | 113 | if (start != vma->vm_start) { |
114 | error = split_vma(mm, vma, start, 1); | 114 | error = split_vma(mm, vma, start, 1); |
115 | if (error) | 115 | if (error) |
116 | goto out; | 116 | goto out; |
117 | } | 117 | } |
118 | 118 | ||
119 | if (end != vma->vm_end) { | 119 | if (end != vma->vm_end) { |
120 | error = split_vma(mm, vma, end, 0); | 120 | error = split_vma(mm, vma, end, 0); |
121 | if (error) | 121 | if (error) |
122 | goto out; | 122 | goto out; |
123 | } | 123 | } |
124 | 124 | ||
125 | success: | 125 | success: |
126 | /* | 126 | /* |
127 | * vm_flags is protected by the mmap_sem held in write mode. | 127 | * vm_flags is protected by the mmap_sem held in write mode. |
128 | */ | 128 | */ |
129 | vma->vm_flags = new_flags; | 129 | vma->vm_flags = new_flags; |
130 | 130 | ||
131 | out: | 131 | out: |
132 | if (error == -ENOMEM) | 132 | if (error == -ENOMEM) |
133 | error = -EAGAIN; | 133 | error = -EAGAIN; |
134 | return error; | 134 | return error; |
135 | } | 135 | } |
136 | 136 | ||
137 | #ifdef CONFIG_SWAP | 137 | #ifdef CONFIG_SWAP |
138 | static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, | 138 | static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, |
139 | unsigned long end, struct mm_walk *walk) | 139 | unsigned long end, struct mm_walk *walk) |
140 | { | 140 | { |
141 | pte_t *orig_pte; | 141 | pte_t *orig_pte; |
142 | struct vm_area_struct *vma = walk->private; | 142 | struct vm_area_struct *vma = walk->private; |
143 | unsigned long index; | 143 | unsigned long index; |
144 | 144 | ||
145 | if (pmd_none_or_trans_huge_or_clear_bad(pmd)) | 145 | if (pmd_none_or_trans_huge_or_clear_bad(pmd)) |
146 | return 0; | 146 | return 0; |
147 | 147 | ||
148 | for (index = start; index != end; index += PAGE_SIZE) { | 148 | for (index = start; index != end; index += PAGE_SIZE) { |
149 | pte_t pte; | 149 | pte_t pte; |
150 | swp_entry_t entry; | 150 | swp_entry_t entry; |
151 | struct page *page; | 151 | struct page *page; |
152 | spinlock_t *ptl; | 152 | spinlock_t *ptl; |
153 | 153 | ||
154 | orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); | 154 | orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); |
155 | pte = *(orig_pte + ((index - start) / PAGE_SIZE)); | 155 | pte = *(orig_pte + ((index - start) / PAGE_SIZE)); |
156 | pte_unmap_unlock(orig_pte, ptl); | 156 | pte_unmap_unlock(orig_pte, ptl); |
157 | 157 | ||
158 | if (pte_present(pte) || pte_none(pte) || pte_file(pte)) | 158 | if (pte_present(pte) || pte_none(pte) || pte_file(pte)) |
159 | continue; | 159 | continue; |
160 | entry = pte_to_swp_entry(pte); | 160 | entry = pte_to_swp_entry(pte); |
161 | if (unlikely(non_swap_entry(entry))) | 161 | if (unlikely(non_swap_entry(entry))) |
162 | continue; | 162 | continue; |
163 | 163 | ||
164 | page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, | 164 | page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, |
165 | vma, index); | 165 | vma, index); |
166 | if (page) | 166 | if (page) |
167 | page_cache_release(page); | 167 | page_cache_release(page); |
168 | } | 168 | } |
169 | 169 | ||
170 | return 0; | 170 | return 0; |
171 | } | 171 | } |
172 | 172 | ||
173 | static void force_swapin_readahead(struct vm_area_struct *vma, | 173 | static void force_swapin_readahead(struct vm_area_struct *vma, |
174 | unsigned long start, unsigned long end) | 174 | unsigned long start, unsigned long end) |
175 | { | 175 | { |
176 | struct mm_walk walk = { | 176 | struct mm_walk walk = { |
177 | .mm = vma->vm_mm, | 177 | .mm = vma->vm_mm, |
178 | .pmd_entry = swapin_walk_pmd_entry, | 178 | .pmd_entry = swapin_walk_pmd_entry, |
179 | .private = vma, | 179 | .private = vma, |
180 | }; | 180 | }; |
181 | 181 | ||
182 | walk_page_range(start, end, &walk); | 182 | walk_page_range(start, end, &walk); |
183 | 183 | ||
184 | lru_add_drain(); /* Push any new pages onto the LRU now */ | 184 | lru_add_drain(); /* Push any new pages onto the LRU now */ |
185 | } | 185 | } |
186 | 186 | ||
187 | static void force_shm_swapin_readahead(struct vm_area_struct *vma, | 187 | static void force_shm_swapin_readahead(struct vm_area_struct *vma, |
188 | unsigned long start, unsigned long end, | 188 | unsigned long start, unsigned long end, |
189 | struct address_space *mapping) | 189 | struct address_space *mapping) |
190 | { | 190 | { |
191 | pgoff_t index; | 191 | pgoff_t index; |
192 | struct page *page; | 192 | struct page *page; |
193 | swp_entry_t swap; | 193 | swp_entry_t swap; |
194 | 194 | ||
195 | for (; start < end; start += PAGE_SIZE) { | 195 | for (; start < end; start += PAGE_SIZE) { |
196 | index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | 196 | index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
197 | 197 | ||
198 | page = find_get_page(mapping, index); | 198 | page = find_get_page(mapping, index); |
199 | if (!radix_tree_exceptional_entry(page)) { | 199 | if (!radix_tree_exceptional_entry(page)) { |
200 | if (page) | 200 | if (page) |
201 | page_cache_release(page); | 201 | page_cache_release(page); |
202 | continue; | 202 | continue; |
203 | } | 203 | } |
204 | swap = radix_to_swp_entry(page); | 204 | swap = radix_to_swp_entry(page); |
205 | page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE, | 205 | page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE, |
206 | NULL, 0); | 206 | NULL, 0); |
207 | if (page) | 207 | if (page) |
208 | page_cache_release(page); | 208 | page_cache_release(page); |
209 | } | 209 | } |
210 | 210 | ||
211 | lru_add_drain(); /* Push any new pages onto the LRU now */ | 211 | lru_add_drain(); /* Push any new pages onto the LRU now */ |
212 | } | 212 | } |
213 | #endif /* CONFIG_SWAP */ | 213 | #endif /* CONFIG_SWAP */ |
214 | 214 | ||
215 | /* | 215 | /* |
216 | * Schedule all required I/O operations. Do not wait for completion. | 216 | * Schedule all required I/O operations. Do not wait for completion. |
217 | */ | 217 | */ |
218 | static long madvise_willneed(struct vm_area_struct * vma, | 218 | static long madvise_willneed(struct vm_area_struct * vma, |
219 | struct vm_area_struct ** prev, | 219 | struct vm_area_struct ** prev, |
220 | unsigned long start, unsigned long end) | 220 | unsigned long start, unsigned long end) |
221 | { | 221 | { |
222 | struct file *file = vma->vm_file; | 222 | struct file *file = vma->vm_file; |
223 | 223 | ||
224 | #ifdef CONFIG_SWAP | 224 | #ifdef CONFIG_SWAP |
225 | if (!file || mapping_cap_swap_backed(file->f_mapping)) { | 225 | if (!file || mapping_cap_swap_backed(file->f_mapping)) { |
226 | *prev = vma; | 226 | *prev = vma; |
227 | if (!file) | 227 | if (!file) |
228 | force_swapin_readahead(vma, start, end); | 228 | force_swapin_readahead(vma, start, end); |
229 | else | 229 | else |
230 | force_shm_swapin_readahead(vma, start, end, | 230 | force_shm_swapin_readahead(vma, start, end, |
231 | file->f_mapping); | 231 | file->f_mapping); |
232 | return 0; | 232 | return 0; |
233 | } | 233 | } |
234 | #endif | 234 | #endif |
235 | 235 | ||
236 | if (!file) | 236 | if (!file) |
237 | return -EBADF; | 237 | return -EBADF; |
238 | 238 | ||
239 | if (file->f_mapping->a_ops->get_xip_mem) { | 239 | if (file->f_mapping->a_ops->get_xip_mem) { |
240 | /* no bad return value, but ignore advice */ | 240 | /* no bad return value, but ignore advice */ |
241 | return 0; | 241 | return 0; |
242 | } | 242 | } |
243 | 243 | ||
244 | *prev = vma; | 244 | *prev = vma; |
245 | start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | 245 | start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
246 | if (end > vma->vm_end) | 246 | if (end > vma->vm_end) |
247 | end = vma->vm_end; | 247 | end = vma->vm_end; |
248 | end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | 248 | end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
249 | 249 | ||
250 | force_page_cache_readahead(file->f_mapping, file, start, end - start); | 250 | force_page_cache_readahead(file->f_mapping, file, start, end - start); |
251 | return 0; | 251 | return 0; |
252 | } | 252 | } |
253 | 253 | ||
254 | /* | 254 | /* |
255 | * Application no longer needs these pages. If the pages are dirty, | 255 | * Application no longer needs these pages. If the pages are dirty, |
256 | * it's OK to just throw them away. The app will be more careful about | 256 | * it's OK to just throw them away. The app will be more careful about |
257 | * data it wants to keep. Be sure to free swap resources too. The | 257 | * data it wants to keep. Be sure to free swap resources too. The |
258 | * zap_page_range call sets things up for shrink_active_list to actually free | 258 | * zap_page_range call sets things up for shrink_active_list to actually free |
259 | * these pages later if no one else has touched them in the meantime, | 259 | * these pages later if no one else has touched them in the meantime, |
260 | * although we could add these pages to a global reuse list for | 260 | * although we could add these pages to a global reuse list for |
261 | * shrink_active_list to pick up before reclaiming other pages. | 261 | * shrink_active_list to pick up before reclaiming other pages. |
262 | * | 262 | * |
263 | * NB: This interface discards data rather than pushes it out to swap, | 263 | * NB: This interface discards data rather than pushes it out to swap, |
264 | * as some implementations do. This has performance implications for | 264 | * as some implementations do. This has performance implications for |
265 | * applications like large transactional databases which want to discard | 265 | * applications like large transactional databases which want to discard |
266 | * pages in anonymous maps after committing to backing store the data | 266 | * pages in anonymous maps after committing to backing store the data |
267 | * that was kept in them. There is no reason to write this data out to | 267 | * that was kept in them. There is no reason to write this data out to |
268 | * the swap area if the application is discarding it. | 268 | * the swap area if the application is discarding it. |
269 | * | 269 | * |
270 | * An interface that causes the system to free clean pages and flush | 270 | * An interface that causes the system to free clean pages and flush |
271 | * dirty pages is already available as msync(MS_INVALIDATE). | 271 | * dirty pages is already available as msync(MS_INVALIDATE). |
272 | */ | 272 | */ |
273 | static long madvise_dontneed(struct vm_area_struct * vma, | 273 | static long madvise_dontneed(struct vm_area_struct * vma, |
274 | struct vm_area_struct ** prev, | 274 | struct vm_area_struct ** prev, |
275 | unsigned long start, unsigned long end) | 275 | unsigned long start, unsigned long end) |
276 | { | 276 | { |
277 | *prev = vma; | 277 | *prev = vma; |
278 | if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)) | 278 | if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)) |
279 | return -EINVAL; | 279 | return -EINVAL; |
280 | 280 | ||
281 | if (unlikely(vma->vm_flags & VM_NONLINEAR)) { | 281 | if (unlikely(vma->vm_flags & VM_NONLINEAR)) { |
282 | struct zap_details details = { | 282 | struct zap_details details = { |
283 | .nonlinear_vma = vma, | 283 | .nonlinear_vma = vma, |
284 | .last_index = ULONG_MAX, | 284 | .last_index = ULONG_MAX, |
285 | }; | 285 | }; |
286 | zap_page_range(vma, start, end - start, &details); | 286 | zap_page_range(vma, start, end - start, &details); |
287 | } else | 287 | } else |
288 | zap_page_range(vma, start, end - start, NULL); | 288 | zap_page_range(vma, start, end - start, NULL); |
289 | return 0; | 289 | return 0; |
290 | } | 290 | } |
291 | 291 | ||
292 | /* | 292 | /* |
293 | * Application wants to free up the pages and associated backing store. | 293 | * Application wants to free up the pages and associated backing store. |
294 | * This is effectively punching a hole into the middle of a file. | 294 | * This is effectively punching a hole into the middle of a file. |
295 | * | 295 | * |
296 | * NOTE: Currently, only shmfs/tmpfs is supported for this operation. | 296 | * NOTE: Currently, only shmfs/tmpfs is supported for this operation. |
297 | * Other filesystems return -ENOSYS. | 297 | * Other filesystems return -ENOSYS. |
298 | */ | 298 | */ |
299 | static long madvise_remove(struct vm_area_struct *vma, | 299 | static long madvise_remove(struct vm_area_struct *vma, |
300 | struct vm_area_struct **prev, | 300 | struct vm_area_struct **prev, |
301 | unsigned long start, unsigned long end) | 301 | unsigned long start, unsigned long end) |
302 | { | 302 | { |
303 | loff_t offset; | 303 | loff_t offset; |
304 | int error; | 304 | int error; |
305 | struct file *f; | 305 | struct file *f; |
306 | 306 | ||
307 | *prev = NULL; /* tell sys_madvise we drop mmap_sem */ | 307 | *prev = NULL; /* tell sys_madvise we drop mmap_sem */ |
308 | 308 | ||
309 | if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB)) | 309 | if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB)) |
310 | return -EINVAL; | 310 | return -EINVAL; |
311 | 311 | ||
312 | f = vma->vm_file; | 312 | f = vma->vm_file; |
313 | 313 | ||
314 | if (!f || !f->f_mapping || !f->f_mapping->host) { | 314 | if (!f || !f->f_mapping || !f->f_mapping->host) { |
315 | return -EINVAL; | 315 | return -EINVAL; |
316 | } | 316 | } |
317 | 317 | ||
318 | if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) | 318 | if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) |
319 | return -EACCES; | 319 | return -EACCES; |
320 | 320 | ||
321 | offset = (loff_t)(start - vma->vm_start) | 321 | offset = (loff_t)(start - vma->vm_start) |
322 | + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); | 322 | + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); |
323 | 323 | ||
324 | /* | 324 | /* |
325 | * Filesystem's fallocate may need to take i_mutex. We need to | 325 | * Filesystem's fallocate may need to take i_mutex. We need to |
326 | * explicitly grab a reference because the vma (and hence the | 326 | * explicitly grab a reference because the vma (and hence the |
327 | * vma's reference to the file) can go away as soon as we drop | 327 | * vma's reference to the file) can go away as soon as we drop |
328 | * mmap_sem. | 328 | * mmap_sem. |
329 | */ | 329 | */ |
330 | get_file(f); | 330 | get_file(f); |
331 | up_read(¤t->mm->mmap_sem); | 331 | up_read(¤t->mm->mmap_sem); |
332 | error = do_fallocate(f, | 332 | error = do_fallocate(f, |
333 | FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, | 333 | FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, |
334 | offset, end - start); | 334 | offset, end - start); |
335 | fput(f); | 335 | fput(f); |
336 | down_read(¤t->mm->mmap_sem); | 336 | down_read(¤t->mm->mmap_sem); |
337 | return error; | 337 | return error; |
338 | } | 338 | } |
339 | 339 | ||
340 | #ifdef CONFIG_MEMORY_FAILURE | 340 | #ifdef CONFIG_MEMORY_FAILURE |
341 | /* | 341 | /* |
342 | * Error injection support for memory error handling. | 342 | * Error injection support for memory error handling. |
343 | */ | 343 | */ |
344 | static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end) | 344 | static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end) |
345 | { | 345 | { |
346 | int ret = 0; | 346 | int ret = 0; |
347 | 347 | ||
348 | if (!capable(CAP_SYS_ADMIN)) | 348 | if (!capable(CAP_SYS_ADMIN)) |
349 | return -EPERM; | 349 | return -EPERM; |
350 | for (; start < end; start += PAGE_SIZE) { | 350 | for (; start < end; start += PAGE_SIZE) { |
351 | struct page *p; | 351 | struct page *p; |
352 | int ret = get_user_pages_fast(start, 1, 0, &p); | 352 | int ret = get_user_pages_fast(start, 1, 0, &p); |
353 | if (ret != 1) | 353 | if (ret != 1) |
354 | return ret; | 354 | return ret; |
355 | if (bhv == MADV_SOFT_OFFLINE) { | 355 | if (bhv == MADV_SOFT_OFFLINE) { |
356 | printk(KERN_INFO "Soft offlining page %lx at %lx\n", | 356 | printk(KERN_INFO "Soft offlining page %lx at %lx\n", |
357 | page_to_pfn(p), start); | 357 | page_to_pfn(p), start); |
358 | ret = soft_offline_page(p, MF_COUNT_INCREASED); | 358 | ret = soft_offline_page(p, MF_COUNT_INCREASED); |
359 | if (ret) | 359 | if (ret) |
360 | break; | 360 | break; |
361 | continue; | 361 | continue; |
362 | } | 362 | } |
363 | printk(KERN_INFO "Injecting memory failure for page %lx at %lx\n", | 363 | printk(KERN_INFO "Injecting memory failure for page %lx at %lx\n", |
364 | page_to_pfn(p), start); | 364 | page_to_pfn(p), start); |
365 | /* Ignore return value for now */ | 365 | /* Ignore return value for now */ |
366 | memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED); | 366 | memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED); |
367 | } | 367 | } |
368 | return ret; | 368 | return ret; |
369 | } | 369 | } |
370 | #endif | 370 | #endif |
371 | 371 | ||
372 | static long | 372 | static long |
373 | madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, | 373 | madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, |
374 | unsigned long start, unsigned long end, int behavior) | 374 | unsigned long start, unsigned long end, int behavior) |
375 | { | 375 | { |
376 | switch (behavior) { | 376 | switch (behavior) { |
377 | case MADV_REMOVE: | 377 | case MADV_REMOVE: |
378 | return madvise_remove(vma, prev, start, end); | 378 | return madvise_remove(vma, prev, start, end); |
379 | case MADV_WILLNEED: | 379 | case MADV_WILLNEED: |
380 | return madvise_willneed(vma, prev, start, end); | 380 | return madvise_willneed(vma, prev, start, end); |
381 | case MADV_DONTNEED: | 381 | case MADV_DONTNEED: |
382 | return madvise_dontneed(vma, prev, start, end); | 382 | return madvise_dontneed(vma, prev, start, end); |
383 | default: | 383 | default: |
384 | return madvise_behavior(vma, prev, start, end, behavior); | 384 | return madvise_behavior(vma, prev, start, end, behavior); |
385 | } | 385 | } |
386 | } | 386 | } |
387 | 387 | ||
388 | static int | 388 | static int |
389 | madvise_behavior_valid(int behavior) | 389 | madvise_behavior_valid(int behavior) |
390 | { | 390 | { |
391 | switch (behavior) { | 391 | switch (behavior) { |
392 | case MADV_DOFORK: | 392 | case MADV_DOFORK: |
393 | case MADV_DONTFORK: | 393 | case MADV_DONTFORK: |
394 | case MADV_NORMAL: | 394 | case MADV_NORMAL: |
395 | case MADV_SEQUENTIAL: | 395 | case MADV_SEQUENTIAL: |
396 | case MADV_RANDOM: | 396 | case MADV_RANDOM: |
397 | case MADV_REMOVE: | 397 | case MADV_REMOVE: |
398 | case MADV_WILLNEED: | 398 | case MADV_WILLNEED: |
399 | case MADV_DONTNEED: | 399 | case MADV_DONTNEED: |
400 | #ifdef CONFIG_KSM | 400 | #ifdef CONFIG_KSM |
401 | case MADV_MERGEABLE: | 401 | case MADV_MERGEABLE: |
402 | case MADV_UNMERGEABLE: | 402 | case MADV_UNMERGEABLE: |
403 | #endif | 403 | #endif |
404 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 404 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
405 | case MADV_HUGEPAGE: | 405 | case MADV_HUGEPAGE: |
406 | case MADV_NOHUGEPAGE: | 406 | case MADV_NOHUGEPAGE: |
407 | #endif | 407 | #endif |
408 | case MADV_DONTDUMP: | 408 | case MADV_DONTDUMP: |
409 | case MADV_DODUMP: | 409 | case MADV_DODUMP: |
410 | return 1; | 410 | return 1; |
411 | 411 | ||
412 | default: | 412 | default: |
413 | return 0; | 413 | return 0; |
414 | } | 414 | } |
415 | } | 415 | } |
416 | 416 | ||
417 | /* | 417 | /* |
418 | * The madvise(2) system call. | 418 | * The madvise(2) system call. |
419 | * | 419 | * |
420 | * Applications can use madvise() to advise the kernel how it should | 420 | * Applications can use madvise() to advise the kernel how it should |
421 | * handle paging I/O in this VM area. The idea is to help the kernel | 421 | * handle paging I/O in this VM area. The idea is to help the kernel |
422 | * use appropriate read-ahead and caching techniques. The information | 422 | * use appropriate read-ahead and caching techniques. The information |
423 | * provided is advisory only, and can be safely disregarded by the | 423 | * provided is advisory only, and can be safely disregarded by the |
424 | * kernel without affecting the correct operation of the application. | 424 | * kernel without affecting the correct operation of the application. |
425 | * | 425 | * |
426 | * behavior values: | 426 | * behavior values: |
427 | * MADV_NORMAL - the default behavior is to read clusters. This | 427 | * MADV_NORMAL - the default behavior is to read clusters. This |
428 | * results in some read-ahead and read-behind. | 428 | * results in some read-ahead and read-behind. |
429 | * MADV_RANDOM - the system should read the minimum amount of data | 429 | * MADV_RANDOM - the system should read the minimum amount of data |
430 | * on any access, since it is unlikely that the appli- | 430 | * on any access, since it is unlikely that the appli- |
431 | * cation will need more than what it asks for. | 431 | * cation will need more than what it asks for. |
432 | * MADV_SEQUENTIAL - pages in the given range will probably be accessed | 432 | * MADV_SEQUENTIAL - pages in the given range will probably be accessed |
433 | * once, so they can be aggressively read ahead, and | 433 | * once, so they can be aggressively read ahead, and |
434 | * can be freed soon after they are accessed. | 434 | * can be freed soon after they are accessed. |
435 | * MADV_WILLNEED - the application is notifying the system to read | 435 | * MADV_WILLNEED - the application is notifying the system to read |
436 | * some pages ahead. | 436 | * some pages ahead. |
437 | * MADV_DONTNEED - the application is finished with the given range, | 437 | * MADV_DONTNEED - the application is finished with the given range, |
438 | * so the kernel can free resources associated with it. | 438 | * so the kernel can free resources associated with it. |
439 | * MADV_REMOVE - the application wants to free up the given range of | 439 | * MADV_REMOVE - the application wants to free up the given range of |
440 | * pages and associated backing store. | 440 | * pages and associated backing store. |
441 | * MADV_DONTFORK - omit this area from child's address space when forking: | 441 | * MADV_DONTFORK - omit this area from child's address space when forking: |
442 | * typically, to avoid COWing pages pinned by get_user_pages(). | 442 | * typically, to avoid COWing pages pinned by get_user_pages(). |
443 | * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. | 443 | * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. |
444 | * MADV_MERGEABLE - the application recommends that KSM try to merge pages in | 444 | * MADV_MERGEABLE - the application recommends that KSM try to merge pages in |
445 | * this area with pages of identical content from other such areas. | 445 | * this area with pages of identical content from other such areas. |
446 | * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others. | 446 | * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others. |
447 | * | 447 | * |
448 | * return values: | 448 | * return values: |
449 | * zero - success | 449 | * zero - success |
450 | * -EINVAL - start + len < 0, start is not page-aligned, | 450 | * -EINVAL - start + len < 0, start is not page-aligned, |
451 | * "behavior" is not a valid value, or application | 451 | * "behavior" is not a valid value, or application |
452 | * is attempting to release locked or shared pages. | 452 | * is attempting to release locked or shared pages. |
453 | * -ENOMEM - addresses in the specified range are not currently | 453 | * -ENOMEM - addresses in the specified range are not currently |
454 | * mapped, or are outside the AS of the process. | 454 | * mapped, or are outside the AS of the process. |
455 | * -EIO - an I/O error occurred while paging in data. | 455 | * -EIO - an I/O error occurred while paging in data. |
456 | * -EBADF - map exists, but area maps something that isn't a file. | 456 | * -EBADF - map exists, but area maps something that isn't a file. |
457 | * -EAGAIN - a kernel resource was temporarily unavailable. | 457 | * -EAGAIN - a kernel resource was temporarily unavailable. |
458 | */ | 458 | */ |
459 | SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) | 459 | SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) |
460 | { | 460 | { |
461 | unsigned long end, tmp; | 461 | unsigned long end, tmp; |
462 | struct vm_area_struct * vma, *prev; | 462 | struct vm_area_struct * vma, *prev; |
463 | int unmapped_error = 0; | 463 | int unmapped_error = 0; |
464 | int error = -EINVAL; | 464 | int error = -EINVAL; |
465 | int write; | 465 | int write; |
466 | size_t len; | 466 | size_t len; |
467 | struct blk_plug plug; | 467 | struct blk_plug plug; |
468 | 468 | ||
469 | #ifdef CONFIG_MEMORY_FAILURE | 469 | #ifdef CONFIG_MEMORY_FAILURE |
470 | if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE) | 470 | if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE) |
471 | return madvise_hwpoison(behavior, start, start+len_in); | 471 | return madvise_hwpoison(behavior, start, start+len_in); |
472 | #endif | 472 | #endif |
473 | if (!madvise_behavior_valid(behavior)) | 473 | if (!madvise_behavior_valid(behavior)) |
474 | return error; | 474 | return error; |
475 | 475 | ||
476 | write = madvise_need_mmap_write(behavior); | ||
477 | if (write) | ||
478 | down_write(¤t->mm->mmap_sem); | ||
479 | else | ||
480 | down_read(¤t->mm->mmap_sem); | ||
481 | |||
482 | if (start & ~PAGE_MASK) | 476 | if (start & ~PAGE_MASK) |
483 | goto out; | 477 | return error; |
484 | len = (len_in + ~PAGE_MASK) & PAGE_MASK; | 478 | len = (len_in + ~PAGE_MASK) & PAGE_MASK; |
485 | 479 | ||
486 | /* Check to see whether len was rounded up from small -ve to zero */ | 480 | /* Check to see whether len was rounded up from small -ve to zero */ |
487 | if (len_in && !len) | 481 | if (len_in && !len) |
488 | goto out; | 482 | return error; |
489 | 483 | ||
490 | end = start + len; | 484 | end = start + len; |
491 | if (end < start) | 485 | if (end < start) |
492 | goto out; | 486 | return error; |
493 | 487 | ||
494 | error = 0; | 488 | error = 0; |
495 | if (end == start) | 489 | if (end == start) |
496 | goto out; | 490 | return error; |
497 | 491 | ||
492 | write = madvise_need_mmap_write(behavior); | ||
493 | if (write) | ||
494 | down_write(¤t->mm->mmap_sem); | ||
495 | else | ||
496 | down_read(¤t->mm->mmap_sem); | ||
497 | |||
498 | /* | 498 | /* |
499 | * If the interval [start,end) covers some unmapped address | 499 | * If the interval [start,end) covers some unmapped address |
500 | * ranges, just ignore them, but return -ENOMEM at the end. | 500 | * ranges, just ignore them, but return -ENOMEM at the end. |
501 | * - different from the way of handling in mlock etc. | 501 | * - different from the way of handling in mlock etc. |
502 | */ | 502 | */ |
503 | vma = find_vma_prev(current->mm, start, &prev); | 503 | vma = find_vma_prev(current->mm, start, &prev); |
504 | if (vma && start > vma->vm_start) | 504 | if (vma && start > vma->vm_start) |
505 | prev = vma; | 505 | prev = vma; |
506 | 506 | ||
507 | blk_start_plug(&plug); | 507 | blk_start_plug(&plug); |
508 | for (;;) { | 508 | for (;;) { |
509 | /* Still start < end. */ | 509 | /* Still start < end. */ |
510 | error = -ENOMEM; | 510 | error = -ENOMEM; |
511 | if (!vma) | 511 | if (!vma) |
512 | goto out_plug; | 512 | goto out; |
513 | 513 | ||
514 | /* Here start < (end|vma->vm_end). */ | 514 | /* Here start < (end|vma->vm_end). */ |
515 | if (start < vma->vm_start) { | 515 | if (start < vma->vm_start) { |
516 | unmapped_error = -ENOMEM; | 516 | unmapped_error = -ENOMEM; |
517 | start = vma->vm_start; | 517 | start = vma->vm_start; |
518 | if (start >= end) | 518 | if (start >= end) |
519 | goto out_plug; | 519 | goto out; |
520 | } | 520 | } |
521 | 521 | ||
522 | /* Here vma->vm_start <= start < (end|vma->vm_end) */ | 522 | /* Here vma->vm_start <= start < (end|vma->vm_end) */ |
523 | tmp = vma->vm_end; | 523 | tmp = vma->vm_end; |
524 | if (end < tmp) | 524 | if (end < tmp) |
525 | tmp = end; | 525 | tmp = end; |
526 | 526 | ||
527 | /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ | 527 | /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ |
528 | error = madvise_vma(vma, &prev, start, tmp, behavior); | 528 | error = madvise_vma(vma, &prev, start, tmp, behavior); |
529 | if (error) | 529 | if (error) |
530 | goto out_plug; | 530 | goto out; |
531 | start = tmp; | 531 | start = tmp; |
532 | if (prev && start < prev->vm_end) | 532 | if (prev && start < prev->vm_end) |
533 | start = prev->vm_end; | 533 | start = prev->vm_end; |
534 | error = unmapped_error; | 534 | error = unmapped_error; |
535 | if (start >= end) | 535 | if (start >= end) |
536 | goto out_plug; | 536 | goto out; |
537 | if (prev) | 537 | if (prev) |
538 | vma = prev->vm_next; | 538 | vma = prev->vm_next; |
539 | else /* madvise_remove dropped mmap_sem */ | 539 | else /* madvise_remove dropped mmap_sem */ |
540 | vma = find_vma(current->mm, start); | 540 | vma = find_vma(current->mm, start); |
541 | } | 541 | } |
542 | out_plug: | ||
543 | blk_finish_plug(&plug); | ||
544 | out: | 542 | out: |
543 | blk_finish_plug(&plug); | ||
545 | if (write) | 544 | if (write) |