Commit 9978ad583e100945b74e4f33e73317983ea32df9

Authored by Lee Schermerhorn
Committed by Linus Torvalds
1 parent c11d69d8c8

mlock: make mlock error return Posixly Correct

Rework Posix error return for mlock().

Posix requires error code for mlock*() system calls for some conditions
that differ from what kernel low level functions, such as
get_user_pages(), return for those conditions.  For more info, see:

http://marc.info/?l=linux-kernel&m=121750892930775&w=2

This patch provides the same translation of get_user_pages()
error codes to posix specified error codes in the context
of the mlock rework for unevictable lru.

[akpm@linux-foundation.org: fix build]
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 2 changed files with 28 additions and 7 deletions Side-by-side Diff

... ... @@ -2821,7 +2821,7 @@
2821 2821 len, write, 0, NULL, NULL);
2822 2822 if (ret < 0)
2823 2823 return ret;
2824   - return ret == len ? 0 : -1;
  2824 + return ret == len ? 0 : -EFAULT;
2825 2825 }
2826 2826  
2827 2827 #if !defined(__HAVE_ARCH_GATE_AREA)
... ... @@ -248,13 +248,26 @@
248 248 addr += PAGE_SIZE; /* for next get_user_pages() */
249 249 nr_pages--;
250 250 }
  251 + ret = 0;
251 252 }
252 253  
253 254 lru_add_drain_all(); /* to update stats */
254 255  
255   - return 0; /* count entire vma as locked_vm */
  256 + return ret; /* count entire vma as locked_vm */
256 257 }
257 258  
  259 +/*
  260 + * convert get_user_pages() return value to posix mlock() error
  261 + */
  262 +static int __mlock_posix_error_return(long retval)
  263 +{
  264 + if (retval == -EFAULT)
  265 + retval = -ENOMEM;
  266 + else if (retval == -ENOMEM)
  267 + retval = -EAGAIN;
  268 + return retval;
  269 +}
  270 +
258 271 #else /* CONFIG_UNEVICTABLE_LRU */
259 272  
260 273 /*
261 274  
... ... @@ -265,9 +278,15 @@
265 278 int mlock)
266 279 {
267 280 if (mlock && (vma->vm_flags & VM_LOCKED))
268   - make_pages_present(start, end);
  281 + return make_pages_present(start, end);
269 282 return 0;
270 283 }
  284 +
  285 +static inline int __mlock_posix_error_return(long retval)
  286 +{
  287 + return 0;
  288 +}
  289 +
271 290 #endif /* CONFIG_UNEVICTABLE_LRU */
272 291  
273 292 /**
... ... @@ -434,10 +453,7 @@
434 453 downgrade_write(&mm->mmap_sem);
435 454  
436 455 ret = __mlock_vma_pages_range(vma, start, end, 1);
437   - if (ret > 0) {
438   - mm->locked_vm -= ret;
439   - ret = 0;
440   - }
  456 +
441 457 /*
442 458 * Need to reacquire mmap sem in write mode, as our callers
443 459 * expect this. We have no support for atomically upgrading
... ... @@ -451,6 +467,11 @@
451 467 /* non-NULL *prev must contain @start, but need to check @end */
452 468 if (!(*prev) || end > (*prev)->vm_end)
453 469 ret = -ENOMEM;
  470 + else if (ret > 0) {
  471 + mm->locked_vm -= ret;
  472 + ret = 0;
  473 + } else
  474 + ret = __mlock_posix_error_return(ret); /* translate if needed */
454 475 } else {
455 476 /*
456 477 * TODO: for unlocking, pages will already be resident, so