01 Apr, 2009

40 commits

  • Remove PARPORT dependency for Auxiliary Display support.

    This is not needed since the dependency for the KS0108 driver is
    PARPORT_PC.

    Signed-off-by: H Hartley Sweeten
    Cc: Miguel Ojeda Sandonis
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    H Hartley Sweeten
     
  • Signed-off-by: FUJITA Tomonori
    Cc: James Bottomley
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    FUJITA Tomonori
     
  • Now that the filesystem freeze operation has been elevated to the VFS, and
    is just an ioctl away, some sort of safety net for unintentionally frozen
    root filesystems may be in order.

    The timeout thaw originally proposed did not get merged, but perhaps
    something like this would be useful in emergencies.

    For example, freeze /path/to/mountpoint may freeze your root filesystem if
    you forgot that you had that unmounted.

    I chose 'j' as the last remaining character other than 'h' which is sort
    of reserved for help (because help is generated on any unknown character).

    I've tested this on a non-root fs with multiple (nested) freezers, as well
    as on a system rendered unresponsive due to a frozen root fs.

    [randy.dunlap@oracle.com: emergency thaw only if CONFIG_BLOCK enabled]
    Signed-off-by: Eric Sandeen
    Cc: Takashi Sato
    Signed-off-by: Randy Dunlap
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Eric Sandeen
     
  • Tfour 4 redundant if-conditions in function __rb_erase_color() in
    lib/rbtree.c are removed.

    In pseudo-source-code, the structure of the code is as follows:

    if ((!A || B) && (!C || D)) {
    .
    .
    .
    } else {
    if (!C || D) {//if this is true, it implies: (A == true) && (B == false)
    if (A) {//hence this always evaluates to 'true'...
    .
    }
    .
    //at this point, C always becomes true, because of:
    __rb_rotate_right/left();
    //and:
    other = parent->rb_right/left;
    }
    .
    .
    if (C) {//...and this too !
    .
    }
    }

    Signed-off-by: Wolfram Strepp
    Acked-by: Peter Zijlstra
    Cc: Andrea Arcangeli
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Wolfram Strepp
     
  • Add the ability to 'resize' the loop device on the fly.

    One practical application is a loop file with XFS filesystem, already
    mounted: You can easily enlarge the file (append some bytes) and then call
    ioctl(fd, LOOP_SET_CAPACITY, new); The loop driver will learn about the
    new size and you can use xfs_growfs later on, which will allow you to use
    full capacity of the loop file without the need to unmount.

    Test app:

    #include
    #include
    #include
    #include
    #include
    #include
    #include
    #include
    #include
    #include
    #include

    #define _GNU_SOURCE
    #include

    char *me;

    void usage(FILE *f)
    {
    fprintf(f, "%s [options] loop_dev [backend_file]\n"
    "-s, --set new_size_in_bytes\n"
    "\twhen backend_file is given, "
    "it will be expanded too while keeping the original contents\n",
    me);
    }

    struct option opts[] = {
    {
    .name = "set",
    .has_arg = 1,
    .flag = NULL,
    .val = 's'
    },
    {
    .name = "help",
    .has_arg = 0,
    .flag = NULL,
    .val = 'h'
    }
    };

    void err_size(char *name, __u64 old)
    {
    fprintf(stderr, "size must be larger than current %s (%llu)\n",
    name, old);
    }

    int main(int argc, char *argv[])
    {
    int fd, err, c, i, bfd;
    ssize_t ssz;
    size_t sz;
    __u64 old, new, append;
    char a[BUFSIZ];
    struct stat st;
    FILE *out;
    char *backend, *dev;

    err = EINVAL;
    out = stderr;
    me = argv[0];
    new = 0;
    while ((c = getopt_long(argc, argv, "s:h", opts, &i)) != -1) {
    switch (c) {
    case 's':
    errno = 0;
    new = strtoull(optarg, NULL, 0);
    if (errno) {
    err = errno;
    perror(argv[i]);
    goto out;
    }
    break;

    case 'h':
    err = 0;
    out = stdout;
    goto err;

    default:
    perror(argv[i]);
    goto err;
    }
    }

    if (optind < argc)
    dev = argv[optind++];
    else
    goto err;

    fd = open(dev, O_RDONLY);
    if (fd < 0) {
    err = errno;
    perror(dev);
    goto out;
    }

    err = ioctl(fd, BLKGETSIZE64, &old);
    if (err) {
    err = errno;
    perror("ioctl BLKGETSIZE64");
    goto out;
    }

    if (!new) {
    printf("%llu\n", old);
    goto out;
    }

    if (new < old) {
    err = EINVAL;
    err_size(dev, old);
    goto out;
    }

    if (optind < argc) {
    backend = argv[optind++];
    bfd = open(backend, O_WRONLY|O_APPEND);
    if (bfd < 0) {
    err = errno;
    perror(backend);
    goto out;
    }
    err = fstat(bfd, &st);
    if (err) {
    err = errno;
    perror(backend);
    goto out;
    }
    if (new < st.st_size) {
    err = EINVAL;
    err_size(backend, st.st_size);
    goto out;
    }
    append = new - st.st_size;
    sz = sizeof(a);
    while (append > 0) {
    if (append < sz)
    sz = append;
    ssz = write(bfd, a, sz);
    if (ssz != sz) {
    err = errno;
    perror(backend);
    goto out;
    }
    append -= sz;
    }
    err = fsync(bfd);
    if (err) {
    err = errno;
    perror(backend);
    goto out;
    }
    }

    err = ioctl(fd, LOOP_SET_CAPACITY, new);
    if (err) {
    err = errno;
    perror("ioctl LOOP_SET_CAPACITY");
    }
    goto out;

    err:
    usage(out);
    out:
    return err;
    }

    Signed-off-by: J. R. Okajima
    Signed-off-by: Tomas Matejicek
    Cc:
    Cc: Karel Zak
    Cc: Jens Axboe
    Cc: Al Viro
    Cc: Christoph Hellwig
    Cc: Akinobu Mita
    Cc:
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    J. R. Okajima
     
  • These comments are useless now, remove them.

    Signed-off-by: WANG Cong
    Cc: Jeff Dike
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    WANG Cong
     
  • These error messages are from check_sysemu(), not check_ptrace().

    Signed-off-by: WANG Cong
    Cc: Jeff Dike
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    WANG Cong
     
  • uml uses a concatenated string literal to store the contents of .config,
    but .config file content is varaible, it can be very long.

    Use an array of string literals instead.

    Signed-off-by: WANG Cong
    Cc: Jeff Dike
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    WANG Cong
     
  • MAJOR_NR isn't needed anymore since very early 2.5 kernels.

    [akpm@linux-foundation.org: coding-style fixes]
    Signed-off-by: Christoph Hellwig
    Cc: Jeff Dike
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Christoph Hellwig
     
  • Remove unused/duplicate cruft from asm/suspend.h:

    - x86_32: remove unused acpi code
    - powerpc: remove duplicate prototypes, see linux/suspend.h

    Signed-off-by: Magnus Damm
    Cc: Paul Mundt
    Acked-by: "Rafael J. Wysocki"
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Magnus Damm
     
  • Make the following header file changes:

    - remove arch ifdefs and asm/suspend.h from linux/suspend.h
    - add asm/suspend.h to disk.c (for arch_prepare_suspend())
    - add linux/io.h to swsusp.c (for ioremap())
    - x86 32/64 bit compile fixes

    Signed-off-by: Magnus Damm
    Cc: Paul Mundt
    Acked-by: "Rafael J. Wysocki"
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Magnus Damm
     
  • Convert alpha architecture to use u64 as unsigned long long. This is
    being done so that (a) all arches use u64 as unsigned long long and (b)
    printk of a u64 as %ll[ux] will not generate format warnings by gcc.

    The only gcc cross-compiler that I have is 4.0.2, which generates errors
    about miscompiling __weak references, so I have commented out that line in
    compiler-gcc4.h so that most of these compile, but more builds and real
    machine testing would be Real Good.

    [akpm@linux-foundation.org: fix warning]
    [akpm@linux-foundation.org: fix build]
    [akpm@linux-foundation.org: coding-style fixes]
    Signed-off-by: Randy Dunlap
    Cc: Richard Henderson
    Cc: Ivan Kokshaysky
    From: Andrew Morton
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Randy Dunlap
     
  • - "_local" versions of xchg/cmpxchg functions duplicate code
    of non-local ones (quite a few pages of assembler), except
    memory barriers. We can generate these two variants from a
    single header file using simple macros;

    - convert xchg macro back to inline function using always_inline
    attribute;

    - use proper argument types for cmpxchg_u8/u16 functions
    to fix a problem with negative arguments.

    Signed-off-by: Ivan Kokshaysky
    Cc: Richard Henderson
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Ivan Kokshaysky
     
  • Signed-off-by: Cheng Renquan
    Cc: Richard Henderson
    Cc: Ivan Kokshaysky
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Cheng Renquan
     
  • When this macros isn't called with 'fixup', e.g. with foo this will
    incorectly expand to foo->foo.bits.errreg

    Signed-off-by: Roel Kluin
    Cc: Ivan Kokshaysky
    Cc: Richard Henderson
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Roel Kluin
     
  • Synopsis: if shmem_writepage calls swap_writepage directly, most shmem
    swap loads benefit, and a catastrophic interaction between SLUB and some
    flash storage is avoided.

    shmem_writepage() has always been peculiar in making no attempt to write:
    it has just transferred a shmem page from file cache to swap cache, then
    let that page make its way around the LRU again before being written and
    freed.

    The idea was that people use tmpfs because they want those pages to stay
    in RAM; so although we give it an overflow to swap, we should resist
    writing too soon, giving those pages a second chance before they can be
    reclaimed.

    That was always questionable, and I've toyed with this patch for years;
    but never had a clear justification to depart from the original design.

    It became more questionable in 2.6.28, when the split LRU patches classed
    shmem and tmpfs pages as SwapBacked rather than as file_cache: that in
    itself gives them more resistance to reclaim than normal file pages. I
    prepared this patch for 2.6.29, but the merge window arrived before I'd
    completed gathering statistics to justify sending it in.

    Then while comparing SLQB against SLUB, running SLUB on a laptop I'd
    habitually used with SLAB, I found SLUB to run my tmpfs kbuild swapping
    tests five times slower than SLAB or SLQB - other machines slower too, but
    nowhere near so bad. Simpler "cp -a" swapping tests showed the same.

    slub_max_order=0 brings sanity to all, but heavy swapping is too far from
    normal to justify such a tuning. The crucial factor on that laptop turns
    out to be that I'm using an SD card for swap. What happens is this:

    By default, SLUB uses order-2 pages for shmem_inode_cache (and many other
    fs inodes), so creating tmpfs files under memory pressure brings lumpy
    reclaim into play. One subpage of the order is chosen from the bottom of
    the LRU as usual, then the other three picked out from their random
    positions on the LRUs.

    In a tmpfs load, many of these pages will be ones which already passed
    through shmem_writepage, so already have swap allocated. And though their
    offsets on swap were probably allocated sequentially, now that the pages
    are picked off at random, their swap offsets are scattered.

    But the flash storage on the SD card is very sensitive to having its
    writes merged: once swap is written at scattered offsets, performance
    falls apart. Rotating disk seeks increase too, but less disastrously.

    So: stop giving shmem/tmpfs pages a second pass around the LRU, write them
    out to swap as soon as their swap has been allocated.

    It's surely possible to devise an artificial load which runs faster the
    old way, one whose sizing is such that the tmpfs pages on their second
    pass are the ones that are wanted again, and other pages not.

    But I've not yet found such a load: on all machines, under the loads I've
    tried, immediate swap_writepage speeds up shmem swapping: especially when
    using the SLUB allocator (and more effectively than slub_max_order=0), but
    also with the others; and it also reduces the variance between runs. How
    much faster varies widely: a factor of five is rare, 5% is common.

    One load which might have suffered: imagine a swapping shmem load in a
    limited mem_cgroup on a machine with plenty of memory. Before 2.6.29 the
    swapcache was not charged, and such a load would have run quickest with
    the shmem swapcache never written to swap. But now swapcache is charged,
    so even this load benefits from shmem_writepage directly to swap.

    Apologies for the #ifndef CONFIG_SWAP swap_writepage() stub in swap.h:
    it's silly because that will never get called; but refactoring shmem.c
    sensibly according to CONFIG_SWAP will be a separate task.

    Signed-off-by: Hugh Dickins
    Acked-by: Pekka Enberg
    Acked-by: Rik van Riel
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Hugh Dickins
     
  • try_to_free_pages() is used for the direct reclaim of up to
    SWAP_CLUSTER_MAX pages when watermarks are low. The caller to
    alloc_pages_nodemask() can specify a nodemask of nodes that are allowed to
    be used but this is not passed to try_to_free_pages(). This can lead to
    unnecessary reclaim of pages that are unusable by the caller and int the
    worst case lead to allocation failure as progress was not been make where
    it is needed.

    This patch passes the nodemask used for alloc_pages_nodemask() to
    try_to_free_pages().

    Reviewed-by: KOSAKI Motohiro
    Acked-by: Mel Gorman
    Signed-off-by: KAMEZAWA Hiroyuki
    Cc: Rik van Riel
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    KAMEZAWA Hiroyuki
     
  • Instead of open-coding the lru-list-add pagevec batching when expanding a
    file mapping from zero, defer to the appropriate page cache function that
    also takes care of adding the page to the lru list.

    This is cleaner, saves code and reduces the stack footprint by 16 words
    worth of pagevec.

    Signed-off-by: Johannes Weiner
    Acked-by: David Howells
    Cc: Nick Piggin
    Acked-by: KOSAKI Motohiro
    Cc: Rik van Riel
    Cc: Peter Zijlstra
    Cc: MinChan Kim
    Cc: Lee Schermerhorn
    Cc: Greg Ungerer
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Johannes Weiner
     
  • When a shrinker has a negative number of objects to delete, the symbol
    name of the shrinker should be printed, not shrink_slab. This also makes
    the error message slightly more informative.

    Cc: Ingo Molnar
    Signed-off-by: David Rientjes
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    David Rientjes
     
  • Make CONFIG_UNEVICTABLE_LRU available when CONFIG_MMU=n. There's no logical
    reason it shouldn't be available, and it can be used for ramfs.

    Signed-off-by: David Howells
    Reviewed-by: KOSAKI Motohiro
    Cc: Peter Zijlstra
    Cc: Greg Ungerer
    Cc: Johannes Weiner
    Cc: Rik van Riel
    Cc: Lee Schermerhorn
    Cc: Enrik Berkhan
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    David Howells
     
  • The mlock() facility does not exist for NOMMU since all mappings are
    effectively locked anyway, so we don't make the bits available when
    they're not useful.

    Signed-off-by: David Howells
    Reviewed-by: KOSAKI Motohiro
    Cc: Peter Zijlstra
    Cc: Greg Ungerer
    Cc: Johannes Weiner
    Cc: Rik van Riel
    Cc: Lee Schermerhorn
    Cc: Enrik Berkhan
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    David Howells
     
  • Use debug_kmap_atomic in kmap_atomic, kmap_atomic_pfn, and
    iomap_atomic_prot_pfn.

    Signed-off-by: Akinobu Mita
    Cc: Thomas Gleixner
    Cc: Ingo Molnar
    Cc: "H. Peter Anvin"
    Cc:
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Akinobu Mita
     
  • x86 has debug_kmap_atomic_prot() which is error checking function for
    kmap_atomic. It is usefull for the other architectures, although it needs
    CONFIG_TRACE_IRQFLAGS_SUPPORT.

    This patch exposes it to the other architectures.

    Signed-off-by: Akinobu Mita
    Cc: Thomas Gleixner
    Cc: Ingo Molnar
    Cc: "H. Peter Anvin"
    Cc:
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Akinobu Mita
     
  • Fix warnings and return values in sysfs bin_page_mkwrite(), fixing
    fs/sysfs/bin.c: In function `bin_page_mkwrite':
    fs/sysfs/bin.c:250: warning: passing argument 2 of `bb->vm_ops->page_mkwrite' from incompatible pointer type
    fs/sysfs/bin.c: At top level:
    fs/sysfs/bin.c:280: warning: initialization from incompatible pointer type

    Expects to have my [PATCH next] sysfs: fix some bin_vm_ops errors

    Signed-off-by: Hugh Dickins
    Cc: Nick Piggin
    Cc: "Eric W. Biederman"
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Hugh Dickins
     
  • page_mkwrite is called with neither the page lock nor the ptl held. This
    means a page can be concurrently truncated or invalidated out from
    underneath it. Callers are supposed to prevent truncate races themselves,
    however previously the only thing they can do in case they hit one is to
    raise a SIGBUS. A sigbus is wrong for the case that the page has been
    invalidated or truncated within i_size (eg. hole punched). Callers may
    also have to perform memory allocations in this path, where again, SIGBUS
    would be wrong.

    The previous patch ("mm: page_mkwrite change prototype to match fault")
    made it possible to properly specify errors. Convert the generic buffer.c
    code and btrfs to return sane error values (in the case of page removed
    from pagecache, VM_FAULT_NOPAGE will cause the fault handler to exit
    without doing anything, and the fault will be retried properly).

    This fixes core code, and converts btrfs as a template/example. All other
    filesystems defining their own page_mkwrite should be fixed in a similar
    manner.

    Acked-by: Chris Mason
    Signed-off-by: Nick Piggin
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Nick Piggin
     
  • Change the page_mkwrite prototype to take a struct vm_fault, and return
    VM_FAULT_xxx flags. There should be no functional change.

    This makes it possible to return much more detailed error information to
    the VM (and also can provide more information eg. virtual_address to the
    driver, which might be important in some special cases).

    This is required for a subsequent fix. And will also make it easier to
    merge page_mkwrite() with fault() in future.

    Signed-off-by: Nick Piggin
    Cc: Chris Mason
    Cc: Trond Myklebust
    Cc: Miklos Szeredi
    Cc: Steven Whitehouse
    Cc: Mark Fasheh
    Cc: Joel Becker
    Cc: Artem Bityutskiy
    Cc: Felix Blyakher
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Nick Piggin
     
  • On PowerPC we allocate large boot time hashes on node 0. This leads to an
    imbalance in the free memory, for example on a 64GB box (4 x 16GB nodes):

    Free memory:
    Node 0: 97.03%
    Node 1: 98.54%
    Node 2: 98.42%
    Node 3: 98.53%

    If we switch to using vmalloc (like ia64 and x86-64) things are more
    balanced:

    Free memory:
    Node 0: 97.53%
    Node 1: 98.35%
    Node 2: 98.33%
    Node 3: 98.33%

    For many HPC applications we are limited by the free available memory on
    the smallest node, so even though the same amount of memory is used the
    better balancing helps.

    Since all 64bit NUMA capable architectures should have sufficient vmalloc
    space, it makes sense to enable it via CONFIG_64BIT.

    Signed-off-by: Anton Blanchard
    Acked-by: David S. Miller
    Acked-by: Benjamin Herrenschmidt
    Acked-by: Ralf Baechle
    Cc: Heiko Carstens
    Cc: Martin Schwidefsky
    Cc: Ivan Kokshaysky
    Cc: Richard Henderson
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Anton Blanchard
     
  • Addresses http://bugzilla.kernel.org/show_bug.cgi?id=9838

    On i386, HZ=1000, jiffies_to_clock_t() converts time in a somewhat strange
    way from the user's point of view:

    # echo 500 >/proc/sys/vm/dirty_writeback_centisecs
    # cat /proc/sys/vm/dirty_writeback_centisecs
    499

    So, we have 5000 jiffies converted to only 499 clock ticks and reported
    back.

    TICK_NSEC = 999848
    ACTHZ = 256039

    Keeping in-kernel variable in units passed from userspace will fix issue
    of course, but this probably won't be right for every sysctl.

    [akpm@linux-foundation.org: coding-style fixes]
    Signed-off-by: Alexey Dobriyan
    Cc: Peter Zijlstra
    Cc: Nick Piggin
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Alexey Dobriyan
     
  • CONFIG_DEBUG_PAGEALLOC is now supported by x86, powerpc, sparc64, and
    s390. This patch implements it for the rest of the architectures by
    filling the pages with poison byte patterns after free_pages() and
    verifying the poison patterns before alloc_pages().

    This generic one cannot detect invalid page accesses immediately but
    invalid read access may cause invalid dereference by poisoned memory and
    invalid write access can be detected after a long delay.

    Signed-off-by: Akinobu Mita
    Cc:
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Akinobu Mita
     
  • I notice there are many places doing copy_from_user() which follows
    kmalloc():

    dst = kmalloc(len, GFP_KERNEL);
    if (!dst)
    return -ENOMEM;
    if (copy_from_user(dst, src, len)) {
    kfree(dst);
    return -EFAULT
    }

    memdup_user() is a wrapper of the above code. With this new function, we
    don't have to write 'len' twice, which can lead to typos/mistakes. It
    also produces smaller code and kernel text.

    A quick grep shows 250+ places where memdup_user() *may* be used. I'll
    prepare a patchset to do this conversion.

    Signed-off-by: Li Zefan
    Cc: KOSAKI Motohiro
    Cc: Americo Wang
    Cc: Alexey Dobriyan
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Li Zefan
     
  • chg is unsigned, so it cannot be less than 0.

    Also, since region_chg returns long, let vma_needs_reservation() forward
    this to alloc_huge_page(). Store it as long as well. all callers cast it
    to long anyway.

    Signed-off-by: Roel Kluin
    Cc: Andy Whitcroft
    Cc: Mel Gorman
    Cc: Adam Litke
    Cc: Johannes Weiner
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Roel Kluin
     
  • pagevec_swap_free() is now unused.

    Signed-off-by: KOSAKI Motohiro
    Cc: Johannes Weiner
    Cc: Rik van Riel
    Acked-by: Hugh Dickins
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    KOSAKI Motohiro
     
  • The pagevec_swap_free() at the end of shrink_active_list() was introduced
    in 68a22394 "vmscan: free swap space on swap-in/activation" when
    shrink_active_list() was still rotating referenced active pages.

    In 7e9cd48 "vmscan: fix pagecache reclaim referenced bit check" this was
    changed, the rotating removed but the pagevec_swap_free() after the
    rotation loop was forgotten, applying now to the pagevec of the
    deactivation loop instead.

    Now swap space is freed for deactivated pages. And only for those that
    happen to be on the pagevec after the deactivation loop.

    Complete 7e9cd48 and remove the rest of the swap freeing.

    Signed-off-by: Johannes Weiner
    Acked-by: Rik van Riel
    Cc: Hugh Dickins
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Johannes Weiner
     
  • In shrink_active_list() after the deactivation loop, we strip buffer heads
    from the potentially remaining pages in the pagevec.

    Currently, this drops the zone's lru lock for stripping, only to reacquire
    it again afterwards to update statistics.

    It is not necessary to strip the pages before updating the stats, so move
    the whole thing out of the protected region and save the extra locking.

    Signed-off-by: Johannes Weiner
    Reviewed-by: MinChan Kim
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Johannes Weiner
     
  • Allow non root users with sufficient mlock rlimits to be able to allocate
    hugetlb backed shm for now. Deprecate this though. This is being
    deprecated because the mlock based rlimit checks for SHM_HUGETLB is not
    consistent with mmap based huge page allocations.

    Signed-off-by: Ravikiran Thirumalai
    Reviewed-by: Mel Gorman
    Cc: William Lee Irwin III
    Cc: Adam Litke
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Ravikiran G Thirumalai
     
  • Fix hugetlb subsystem so that non root users belonging to
    hugetlb_shm_group can actually allocate hugetlb backed shm.

    Currently non root users cannot even map one large page using SHM_HUGETLB
    when they belong to the gid in /proc/sys/vm/hugetlb_shm_group. This is
    because allocation size is verified against RLIMIT_MEMLOCK resource limit
    even if the user belongs to hugetlb_shm_group.

    This patch
    1. Fixes hugetlb subsystem so that users with CAP_IPC_LOCK and users
    belonging to hugetlb_shm_group don't need to be restricted with
    RLIMIT_MEMLOCK resource limits
    2. This patch also disables mlock based rlimit checking (which will
    be reinstated and marked deprecated in a subsequent patch).

    Signed-off-by: Ravikiran Thirumalai
    Reviewed-by: Mel Gorman
    Cc: William Lee Irwin III
    Cc: Adam Litke
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Ravikiran G Thirumalai
     
  • Add a helper function account_page_dirtied(). Use that from two
    callsites. reiser4 adds a function which adds a third callsite.

    Signed-off-by: Edward Shishkin
    Cc: Nick Piggin
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Edward Shishkin
     
  • During page allocation, there are two stages of direct reclaim that are
    applied to each zone in the preferred list. The first stage using
    zone_reclaim() reclaims unmapped file backed pages and slab pages if over
    defined limits as these are cheaper to reclaim. The caller specifies the
    order of the target allocation but the scan control is not being correctly
    initialised.

    The impact is that the correct number of pages are being reclaimed but
    that lumpy reclaim is not being applied. This increases the chances of a
    full direct reclaim via try_to_free_pages() is required.

    This patch initialises the order field of the scan control as requested by
    the caller.

    [mel@csn.ul.ie: rewrote changelog]
    Signed-off-by: Johannes Weiner
    Acked-by: Mel Gorman
    Cc: Rik van Riel
    Cc: Andy Whitcroft
    Cc: KOSAKI Motohiro
    Cc: KAMEZAWA Hiroyuki
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Johannes Weiner
     
  • At first look, mark_page_accessed() in follow_page() seems a bit strange.
    It seems pte_mkyoung() would be better consistent with other kernel code.

    However, it is intentional. The commit log said:

    ------------------------------------------------
    commit 9e45f61d69be9024a2e6bef3831fb04d90fac7a8
    Author: akpm
    Date: Fri Aug 15 07:24:59 2003 +0000

    [PATCH] Use mark_page_accessed() in follow_page()

    Touching a page via follow_page() counts as a reference so we should be
    either setting the referenced bit in the pte or running mark_page_accessed().

    Altering the pte is tricky because we haven't implemented an atomic
    pte_mkyoung(). And mark_page_accessed() is better anyway because it has more
    aging state: it can move the page onto the active list.

    BKrev: 3f3c8acbplT8FbwBVGtth7QmnqWkIw
    ------------------------------------------------

    The atomic issue is still true nowadays. adding comment help to understand
    code intention and it would be better.

    [akpm@linux-foundation.org: clarify text]
    Signed-off-by: KOSAKI Motohiro
    Signed-off-by: Hugh Dickins
    Cc: Nick Piggin
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    KOSAKI Motohiro
     
  • shrink_inactive_list() scans in sc->swap_cluster_max chunks until it hits
    the scan limit it was passed.

    shrink_inactive_list()
    {
    do {
    isolate_pages(swap_cluster_max)
    shrink_page_list()
    } while (nr_scanned < max_scan);
    }

    This assumes that swap_cluster_max is not bigger than the scan limit
    because the latter is checked only after at least one iteration.

    In shrink_all_memory() sc->swap_cluster_max is initialized to the overall
    reclaim goal in the beginning but not decreased while reclaim is making
    progress which leads to subsequent calls to shrink_inactive_list()
    reclaiming way too much in the one iteration that is done unconditionally.

    Set sc->swap_cluster_max always to the proper goal before doing
    shrink_all_zones()
    shrink_list()
    shrink_inactive_list().

    While the current shrink_all_memory() happily reclaims more than actually
    requested, this patch fixes it to never exceed the goal:

    unpatched
    wanted=10000 reclaimed=13356
    wanted=10000 reclaimed=19711
    wanted=10000 reclaimed=10289
    wanted=10000 reclaimed=17306
    wanted=10000 reclaimed=10700
    wanted=10000 reclaimed=10004
    wanted=10000 reclaimed=13301
    wanted=10000 reclaimed=10976
    wanted=10000 reclaimed=10605
    wanted=10000 reclaimed=10088
    wanted=10000 reclaimed=15000

    patched
    wanted=10000 reclaimed=10000
    wanted=10000 reclaimed=9599
    wanted=10000 reclaimed=8476
    wanted=10000 reclaimed=8326
    wanted=10000 reclaimed=10000
    wanted=10000 reclaimed=10000
    wanted=10000 reclaimed=9919
    wanted=10000 reclaimed=10000
    wanted=10000 reclaimed=10000
    wanted=10000 reclaimed=10000
    wanted=10000 reclaimed=10000
    wanted=10000 reclaimed=9624
    wanted=10000 reclaimed=10000
    wanted=10000 reclaimed=10000
    wanted=8500 reclaimed=8092
    wanted=316 reclaimed=316

    Signed-off-by: Johannes Weiner
    Reviewed-by: MinChan Kim
    Acked-by: Nigel Cunningham
    Acked-by: "Rafael J. Wysocki"
    Reviewed-by: KOSAKI Motohiro
    Cc: Rik van Riel
    Signed-off-by: Andrew Morton
    Signed-off-by: Linus Torvalds

    Johannes Weiner