Commit 5ca302c8e502ca53b7d75f12127ec0289904003a

Authored by Glauber Costa
Committed by Al Viro
1 parent a0b02131c5

list_lru: dynamically adjust node arrays

We currently use a compile-time constant to size the node array for the
list_lru structure.  Due to this, we don't need to allocate any memory at
initialization time.  But as a consequence, the structures that contain
embedded list_lru lists can become way too big (the superblock for
instance contains two of them).

This patch aims at ameliorating this situation by dynamically allocating
the node arrays with the firmware provided nr_node_ids.

Signed-off-by: Glauber Costa <glommer@openvz.org>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Carlos Maiolino <cmaiolino@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

Showing 5 changed files with 37 additions and 17 deletions Side-by-side Diff

... ... @@ -195,8 +195,12 @@
195 195 INIT_HLIST_NODE(&s->s_instances);
196 196 INIT_HLIST_BL_HEAD(&s->s_anon);
197 197 INIT_LIST_HEAD(&s->s_inodes);
198   - list_lru_init(&s->s_dentry_lru);
199   - list_lru_init(&s->s_inode_lru);
  198 +
  199 + if (list_lru_init(&s->s_dentry_lru))
  200 + goto err_out;
  201 + if (list_lru_init(&s->s_inode_lru))
  202 + goto err_out_dentry_lru;
  203 +
200 204 INIT_LIST_HEAD(&s->s_mounts);
201 205 init_rwsem(&s->s_umount);
202 206 lockdep_set_class(&s->s_umount, &type->s_umount_key);
... ... @@ -236,6 +240,9 @@
236 240 }
237 241 out:
238 242 return s;
  243 +
  244 +err_out_dentry_lru:
  245 + list_lru_destroy(&s->s_dentry_lru);
239 246 err_out:
240 247 security_sb_free(s);
241 248 #ifdef CONFIG_SMP
... ... @@ -1592,6 +1592,7 @@
1592 1592 struct xfs_mount *mp,
1593 1593 struct xfs_buftarg *btp)
1594 1594 {
  1595 + list_lru_destroy(&btp->bt_lru);
1595 1596 unregister_shrinker(&btp->bt_shrinker);
1596 1597  
1597 1598 if (mp->m_flags & XFS_MOUNT_BARRIER)
1598 1599  
... ... @@ -1666,9 +1667,12 @@
1666 1667 if (!btp->bt_bdi)
1667 1668 goto error;
1668 1669  
1669   - list_lru_init(&btp->bt_lru);
1670 1670 if (xfs_setsize_buftarg_early(btp, bdev))
1671 1671 goto error;
  1672 +
  1673 + if (list_lru_init(&btp->bt_lru))
  1674 + goto error;
  1675 +
1672 1676 btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
1673 1677 btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
1674 1678 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
... ... @@ -831,11 +831,18 @@
831 831  
832 832 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
833 833  
  834 + if ((error = list_lru_init(&qinf->qi_lru))) {
  835 + kmem_free(qinf);
  836 + mp->m_quotainfo = NULL;
  837 + return error;
  838 + }
  839 +
834 840 /*
835 841 * See if quotainodes are setup, and if not, allocate them,
836 842 * and change the superblock accordingly.
837 843 */
838 844 if ((error = xfs_qm_init_quotainos(mp))) {
  845 + list_lru_destroy(&qinf->qi_lru);
839 846 kmem_free(qinf);
840 847 mp->m_quotainfo = NULL;
841 848 return error;
... ... @@ -846,8 +853,6 @@
846 853 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
847 854 mutex_init(&qinf->qi_tree_lock);
848 855  
849   - list_lru_init(&qinf->qi_lru);
850   -
851 856 /* mutex used to serialize quotaoffs */
852 857 mutex_init(&qinf->qi_quotaofflock);
853 858  
... ... @@ -935,6 +940,7 @@
935 940 qi = mp->m_quotainfo;
936 941 ASSERT(qi != NULL);
937 942  
  943 + list_lru_destroy(&qi->qi_lru);
938 944 unregister_shrinker(&qi->qi_shrinker);
939 945  
940 946 if (qi->qi_uquotaip) {
include/linux/list_lru.h
... ... @@ -27,20 +27,11 @@
27 27 } ____cacheline_aligned_in_smp;
28 28  
29 29 struct list_lru {
30   - /*
31   - * Because we use a fixed-size array, this struct can be very big if
32   - * MAX_NUMNODES is big. If this becomes a problem this is fixable by
33   - * turning this into a pointer and dynamically allocating this to
34   - * nr_node_ids. This quantity is firwmare-provided, and still would
35   - * provide room for all nodes at the cost of a pointer lookup and an
36   - * extra allocation. Because that allocation will most likely come from
37   - * a different slab cache than the main structure holding this
38   - * structure, we may very well fail.
39   - */
40   - struct list_lru_node node[MAX_NUMNODES];
  30 + struct list_lru_node *node;
41 31 nodemask_t active_nodes;
42 32 };
43 33  
  34 +void list_lru_destroy(struct list_lru *lru);
44 35 int list_lru_init(struct list_lru *lru);
45 36  
46 37 /**
... ... @@ -8,6 +8,7 @@
8 8 #include <linux/module.h>
9 9 #include <linux/mm.h>
10 10 #include <linux/list_lru.h>
  11 +#include <linux/slab.h>
11 12  
12 13 bool list_lru_add(struct list_lru *lru, struct list_head *item)
13 14 {
14 15  
15 16  
... ... @@ -115,9 +116,14 @@
115 116 int list_lru_init(struct list_lru *lru)
116 117 {
117 118 int i;
  119 + size_t size = sizeof(*lru->node) * nr_node_ids;
118 120  
  121 + lru->node = kzalloc(size, GFP_KERNEL);
  122 + if (!lru->node)
  123 + return -ENOMEM;
  124 +
119 125 nodes_clear(lru->active_nodes);
120   - for (i = 0; i < MAX_NUMNODES; i++) {
  126 + for (i = 0; i < nr_node_ids; i++) {
121 127 spin_lock_init(&lru->node[i].lock);
122 128 INIT_LIST_HEAD(&lru->node[i].list);
123 129 lru->node[i].nr_items = 0;
... ... @@ -125,4 +131,10 @@
125 131 return 0;
126 132 }
127 133 EXPORT_SYMBOL_GPL(list_lru_init);
  134 +
  135 +void list_lru_destroy(struct list_lru *lru)
  136 +{
  137 + kfree(lru->node);
  138 +}
  139 +EXPORT_SYMBOL_GPL(list_lru_destroy);