Commit 1495f230fa7750479c79e3656286b9183d662077

Authored by Ying Han
Committed by Linus Torvalds
1 parent a09ed5e000

vmscan: change shrinker API by passing shrink_control struct

Change each shrinker's API by consolidating the existing parameters into
shrink_control struct.  This will simplify any further features added w/o
touching each file of shrinker.

[akpm@linux-foundation.org: fix build]
[akpm@linux-foundation.org: fix warning]
[kosaki.motohiro@jp.fujitsu.com: fix up new shrinker API]
[akpm@linux-foundation.org: fix xfs warning]
[akpm@linux-foundation.org: update gfs2]
Signed-off-by: Ying Han <yinghan@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Acked-by: Pavel Emelyanov <xemul@openvz.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 21 changed files with 95 additions and 61 deletions Side-by-side Diff

... ... @@ -3545,10 +3545,11 @@
3545 3545 return kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
3546 3546 }
3547 3547  
3548   -static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
  3548 +static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
3549 3549 {
3550 3550 struct kvm *kvm;
3551 3551 struct kvm *kvm_freed = NULL;
  3552 + int nr_to_scan = sc->nr_to_scan;
3552 3553  
3553 3554 if (nr_to_scan == 0)
3554 3555 goto out;
drivers/gpu/drm/i915/i915_gem.c
... ... @@ -56,10 +56,8 @@
56 56 static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
57 57  
58 58 static int i915_gem_inactive_shrink(struct shrinker *shrinker,
59   - int nr_to_scan,
60   - gfp_t gfp_mask);
  59 + struct shrink_control *sc);
61 60  
62   -
63 61 /* some bookkeeping */
64 62 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
65 63 size_t size)
... ... @@ -4092,9 +4090,7 @@
4092 4090 }
4093 4091  
4094 4092 static int
4095   -i915_gem_inactive_shrink(struct shrinker *shrinker,
4096   - int nr_to_scan,
4097   - gfp_t gfp_mask)
  4093 +i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4098 4094 {
4099 4095 struct drm_i915_private *dev_priv =
4100 4096 container_of(shrinker,
... ... @@ -4102,6 +4098,7 @@
4102 4098 mm.inactive_shrinker);
4103 4099 struct drm_device *dev = dev_priv->dev;
4104 4100 struct drm_i915_gem_object *obj, *next;
  4101 + int nr_to_scan = sc->nr_to_scan;
4105 4102 int cnt;
4106 4103  
4107 4104 if (!mutex_trylock(&dev->struct_mutex))
drivers/gpu/drm/ttm/ttm_page_alloc.c
... ... @@ -395,12 +395,14 @@
395 395 /**
396 396 * Callback for mm to request pool to reduce number of page held.
397 397 */
398   -static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask)
  398 +static int ttm_pool_mm_shrink(struct shrinker *shrink,
  399 + struct shrink_control *sc)
399 400 {
400 401 static atomic_t start_pool = ATOMIC_INIT(0);
401 402 unsigned i;
402 403 unsigned pool_offset = atomic_add_return(1, &start_pool);
403 404 struct ttm_page_pool *pool;
  405 + int shrink_pages = sc->nr_to_scan;
404 406  
405 407 pool_offset = pool_offset % NUM_POOLS;
406 408 /* select start pool in round robin fashion */
drivers/staging/zcache/zcache.c
... ... @@ -1181,9 +1181,12 @@
1181 1181 /*
1182 1182 * zcache shrinker interface (only useful for ephemeral pages, so zbud only)
1183 1183 */
1184   -static int shrink_zcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
  1184 +static int shrink_zcache_memory(struct shrinker *shrink,
  1185 + struct shrink_control *sc)
1185 1186 {
1186 1187 int ret = -1;
  1188 + int nr = sc->nr_to_scan;
  1189 + gfp_t gfp_mask = sc->gfp_mask;
1187 1190  
1188 1191 if (nr >= 0) {
1189 1192 if (!(gfp_mask & __GFP_FS))
... ... @@ -1220,7 +1220,7 @@
1220 1220 EXPORT_SYMBOL(shrink_dcache_parent);
1221 1221  
1222 1222 /*
1223   - * Scan `nr' dentries and return the number which remain.
  1223 + * Scan `sc->nr_slab_to_reclaim' dentries and return the number which remain.
1224 1224 *
1225 1225 * We need to avoid reentering the filesystem if the caller is performing a
1226 1226 * GFP_NOFS allocation attempt. One example deadlock is:
1227 1227  
... ... @@ -1231,8 +1231,12 @@
1231 1231 *
1232 1232 * In this case we return -1 to tell the caller that we baled.
1233 1233 */
1234   -static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
  1234 +static int shrink_dcache_memory(struct shrinker *shrink,
  1235 + struct shrink_control *sc)
1235 1236 {
  1237 + int nr = sc->nr_to_scan;
  1238 + gfp_t gfp_mask = sc->gfp_mask;
  1239 +
1236 1240 if (nr) {
1237 1241 if (!(gfp_mask & __GFP_FS))
1238 1242 return -1;
... ... @@ -42,11 +42,10 @@
42 42 int nr_objects;
43 43 struct shrink_control shrink = {
44 44 .gfp_mask = GFP_KERNEL,
45   - .nr_scanned = 1000,
46 45 };
47 46  
48 47 do {
49   - nr_objects = shrink_slab(&shrink, 1000);
  48 + nr_objects = shrink_slab(&shrink, 1000, 1000);
50 49 } while (nr_objects > 10);
51 50 }
52 51  
... ... @@ -1346,11 +1346,14 @@
1346 1346 }
1347 1347  
1348 1348  
1349   -static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
  1349 +static int gfs2_shrink_glock_memory(struct shrinker *shrink,
  1350 + struct shrink_control *sc)
1350 1351 {
1351 1352 struct gfs2_glock *gl;
1352 1353 int may_demote;
1353 1354 int nr_skipped = 0;
  1355 + int nr = sc->nr_to_scan;
  1356 + gfp_t gfp_mask = sc->gfp_mask;
1354 1357 LIST_HEAD(skipped);
1355 1358  
1356 1359 if (nr == 0)
... ... @@ -38,6 +38,7 @@
38 38  
39 39 #include <linux/sched.h>
40 40 #include <linux/slab.h>
  41 +#include <linux/mm.h>
41 42 #include <linux/spinlock.h>
42 43 #include <linux/completion.h>
43 44 #include <linux/buffer_head.h>
44 45  
45 46  
46 47  
47 48  
... ... @@ -77,19 +78,20 @@
77 78 static atomic_t qd_lru_count = ATOMIC_INIT(0);
78 79 static DEFINE_SPINLOCK(qd_lru_lock);
79 80  
80   -int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
  81 +int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc)
81 82 {
82 83 struct gfs2_quota_data *qd;
83 84 struct gfs2_sbd *sdp;
  85 + int nr_to_scan = sc->nr_to_scan;
84 86  
85   - if (nr == 0)
  87 + if (nr_to_scan == 0)
86 88 goto out;
87 89  
88   - if (!(gfp_mask & __GFP_FS))
  90 + if (!(sc->gfp_mask & __GFP_FS))
89 91 return -1;
90 92  
91 93 spin_lock(&qd_lru_lock);
92   - while (nr && !list_empty(&qd_lru_list)) {
  94 + while (nr_to_scan && !list_empty(&qd_lru_list)) {
93 95 qd = list_entry(qd_lru_list.next,
94 96 struct gfs2_quota_data, qd_reclaim);
95 97 sdp = qd->qd_gl->gl_sbd;
... ... @@ -110,7 +112,7 @@
110 112 spin_unlock(&qd_lru_lock);
111 113 kmem_cache_free(gfs2_quotad_cachep, qd);
112 114 spin_lock(&qd_lru_lock);
113   - nr--;
  115 + nr_to_scan--;
114 116 }
115 117 spin_unlock(&qd_lru_lock);
116 118  
... ... @@ -12,6 +12,7 @@
12 12  
13 13 struct gfs2_inode;
14 14 struct gfs2_sbd;
  15 +struct shrink_control;
15 16  
16 17 #define NO_QUOTA_CHANGE ((u32)-1)
17 18  
... ... @@ -51,7 +52,8 @@
51 52 return ret;
52 53 }
53 54  
54   -extern int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask);
  55 +extern int gfs2_shrink_qd_memory(struct shrinker *shrink,
  56 + struct shrink_control *sc);
55 57 extern const struct quotactl_ops gfs2_quotactl_ops;
56 58  
57 59 #endif /* __QUOTA_DOT_H__ */
... ... @@ -751,8 +751,12 @@
751 751 * This function is passed the number of inodes to scan, and it returns the
752 752 * total number of remaining possibly-reclaimable inodes.
753 753 */
754   -static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
  754 +static int shrink_icache_memory(struct shrinker *shrink,
  755 + struct shrink_control *sc)
755 756 {
  757 + int nr = sc->nr_to_scan;
  758 + gfp_t gfp_mask = sc->gfp_mask;
  759 +
756 760 if (nr) {
757 761 /*
758 762 * Nasty deadlock avoidance. We may hold various FS locks,
... ... @@ -90,7 +90,8 @@
90 90 * What the mbcache registers as to get shrunk dynamically.
91 91 */
92 92  
93   -static int mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask);
  93 +static int mb_cache_shrink_fn(struct shrinker *shrink,
  94 + struct shrink_control *sc);
94 95  
95 96 static struct shrinker mb_cache_shrinker = {
96 97 .shrink = mb_cache_shrink_fn,
97 98  
98 99  
... ... @@ -156,18 +157,19 @@
156 157 * gets low.
157 158 *
158 159 * @shrink: (ignored)
159   - * @nr_to_scan: Number of objects to scan
160   - * @gfp_mask: (ignored)
  160 + * @sc: shrink_control passed from reclaim
161 161 *
162 162 * Returns the number of objects which are present in the cache.
163 163 */
164 164 static int
165   -mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
  165 +mb_cache_shrink_fn(struct shrinker *shrink, struct shrink_control *sc)
166 166 {
167 167 LIST_HEAD(free_list);
168 168 struct mb_cache *cache;
169 169 struct mb_cache_entry *entry, *tmp;
170 170 int count = 0;
  171 + int nr_to_scan = sc->nr_to_scan;
  172 + gfp_t gfp_mask = sc->gfp_mask;
171 173  
172 174 mb_debug("trying to free %d entries", nr_to_scan);
173 175 spin_lock(&mb_cache_spinlock);
... ... @@ -2042,11 +2042,14 @@
2042 2042 }
2043 2043 }
2044 2044  
2045   -int nfs_access_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
  2045 +int nfs_access_cache_shrinker(struct shrinker *shrink,
  2046 + struct shrink_control *sc)
2046 2047 {
2047 2048 LIST_HEAD(head);
2048 2049 struct nfs_inode *nfsi, *next;
2049 2050 struct nfs_access_entry *cache;
  2051 + int nr_to_scan = sc->nr_to_scan;
  2052 + gfp_t gfp_mask = sc->gfp_mask;
2050 2053  
2051 2054 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
2052 2055 return (nr_to_scan == 0) ? 0 : -1;
... ... @@ -234,7 +234,7 @@
234 234  
235 235 /* dir.c */
236 236 extern int nfs_access_cache_shrinker(struct shrinker *shrink,
237   - int nr_to_scan, gfp_t gfp_mask);
  237 + struct shrink_control *sc);
238 238  
239 239 /* inode.c */
240 240 extern struct workqueue_struct *nfsiod_workqueue;
... ... @@ -691,8 +691,11 @@
691 691 * This is called from kswapd when we think we need some
692 692 * more memory
693 693 */
694   -static int shrink_dqcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
  694 +static int shrink_dqcache_memory(struct shrinker *shrink,
  695 + struct shrink_control *sc)
695 696 {
  697 + int nr = sc->nr_to_scan;
  698 +
696 699 if (nr) {
697 700 spin_lock(&dq_list_lock);
698 701 prune_dqcache(nr);
fs/xfs/linux-2.6/xfs_buf.c
... ... @@ -1422,12 +1422,12 @@
1422 1422 int
1423 1423 xfs_buftarg_shrink(
1424 1424 struct shrinker *shrink,
1425   - int nr_to_scan,
1426   - gfp_t mask)
  1425 + struct shrink_control *sc)
1427 1426 {
1428 1427 struct xfs_buftarg *btp = container_of(shrink,
1429 1428 struct xfs_buftarg, bt_shrinker);
1430 1429 struct xfs_buf *bp;
  1430 + int nr_to_scan = sc->nr_to_scan;
1431 1431 LIST_HEAD(dispose);
1432 1432  
1433 1433 if (!nr_to_scan)
fs/xfs/linux-2.6/xfs_sync.c
... ... @@ -1032,13 +1032,14 @@
1032 1032 static int
1033 1033 xfs_reclaim_inode_shrink(
1034 1034 struct shrinker *shrink,
1035   - int nr_to_scan,
1036   - gfp_t gfp_mask)
  1035 + struct shrink_control *sc)
1037 1036 {
1038 1037 struct xfs_mount *mp;
1039 1038 struct xfs_perag *pag;
1040 1039 xfs_agnumber_t ag;
1041 1040 int reclaimable;
  1041 + int nr_to_scan = sc->nr_to_scan;
  1042 + gfp_t gfp_mask = sc->gfp_mask;
1042 1043  
1043 1044 mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
1044 1045 if (nr_to_scan) {
fs/xfs/quota/xfs_qm.c
... ... @@ -60,7 +60,7 @@
60 60  
61 61 STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
62 62 STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
63   -STATIC int xfs_qm_shake(struct shrinker *, int, gfp_t);
  63 +STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *);
64 64  
65 65 static struct shrinker xfs_qm_shaker = {
66 66 .shrink = xfs_qm_shake,
67 67  
... ... @@ -2009,10 +2009,10 @@
2009 2009 STATIC int
2010 2010 xfs_qm_shake(
2011 2011 struct shrinker *shrink,
2012   - int nr_to_scan,
2013   - gfp_t gfp_mask)
  2012 + struct shrink_control *sc)
2014 2013 {
2015 2014 int ndqused, nfree, n;
  2015 + gfp_t gfp_mask = sc->gfp_mask;
2016 2016  
2017 2017 if (!kmem_shake_allow(gfp_mask))
2018 2018 return 0;
... ... @@ -1166,18 +1166,20 @@
1166 1166 * We consolidate the values for easier extention later.
1167 1167 */
1168 1168 struct shrink_control {
1169   - unsigned long nr_scanned;
1170 1169 gfp_t gfp_mask;
  1170 +
  1171 + /* How many slab objects shrinker() should scan and try to reclaim */
  1172 + unsigned long nr_to_scan;
1171 1173 };
1172 1174  
1173 1175 /*
1174 1176 * A callback you can register to apply pressure to ageable caches.
1175 1177 *
1176   - * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should
1177   - * look through the least-recently-used 'nr_to_scan' entries and
1178   - * attempt to free them up. It should return the number of objects
1179   - * which remain in the cache. If it returns -1, it means it cannot do
1180   - * any scanning at this time (eg. there is a risk of deadlock).
  1178 + * 'sc' is passed shrink_control which includes a count 'nr_to_scan'
  1179 + * and a 'gfpmask'. It should look through the least-recently-used
  1180 + * 'nr_to_scan' entries and attempt to free them up. It should return
  1181 + * the number of objects which remain in the cache. If it returns -1, it means
  1182 + * it cannot do any scanning at this time (eg. there is a risk of deadlock).
1181 1183 *
1182 1184 * The 'gfpmask' refers to the allocation we are currently trying to
1183 1185 * fulfil.
... ... @@ -1186,7 +1188,7 @@
1186 1188 * querying the cache size, so a fastpath for that case is appropriate.
1187 1189 */
1188 1190 struct shrinker {
1189   - int (*shrink)(struct shrinker *, int nr_to_scan, gfp_t gfp_mask);
  1191 + int (*shrink)(struct shrinker *, struct shrink_control *sc);
1190 1192 int seeks; /* seeks to recreate an obj */
1191 1193  
1192 1194 /* These are for internal use */
... ... @@ -1640,7 +1642,8 @@
1640 1642 int drop_caches_sysctl_handler(struct ctl_table *, int,
1641 1643 void __user *, size_t *, loff_t *);
1642 1644 unsigned long shrink_slab(struct shrink_control *shrink,
1643   - unsigned long lru_pages);
  1645 + unsigned long nr_pages_scanned,
  1646 + unsigned long lru_pages);
1644 1647  
1645 1648 #ifndef CONFIG_MMU
1646 1649 #define randomize_va_space 0
... ... @@ -241,10 +241,9 @@
241 241 do {
242 242 struct shrink_control shrink = {
243 243 .gfp_mask = GFP_KERNEL,
244   - .nr_scanned = 1000,
245 244 };
246 245  
247   - nr = shrink_slab(&shrink, 1000);
  246 + nr = shrink_slab(&shrink, 1000, 1000);
248 247 if (page_count(p) == 1)
249 248 break;
250 249 } while (nr > 10);
... ... @@ -202,6 +202,14 @@
202 202 }
203 203 EXPORT_SYMBOL(unregister_shrinker);
204 204  
  205 +static inline int do_shrinker_shrink(struct shrinker *shrinker,
  206 + struct shrink_control *sc,
  207 + unsigned long nr_to_scan)
  208 +{
  209 + sc->nr_to_scan = nr_to_scan;
  210 + return (*shrinker->shrink)(shrinker, sc);
  211 +}
  212 +
205 213 #define SHRINK_BATCH 128
206 214 /*
207 215 * Call the shrink functions to age shrinkable caches
208 216  
209 217  
... ... @@ -223,15 +231,14 @@
223 231 * Returns the number of slab objects which we shrunk.
224 232 */
225 233 unsigned long shrink_slab(struct shrink_control *shrink,
  234 + unsigned long nr_pages_scanned,
226 235 unsigned long lru_pages)
227 236 {
228 237 struct shrinker *shrinker;
229 238 unsigned long ret = 0;
230   - unsigned long scanned = shrink->nr_scanned;
231   - gfp_t gfp_mask = shrink->gfp_mask;
232 239  
233   - if (scanned == 0)
234   - scanned = SWAP_CLUSTER_MAX;
  240 + if (nr_pages_scanned == 0)
  241 + nr_pages_scanned = SWAP_CLUSTER_MAX;
235 242  
236 243 if (!down_read_trylock(&shrinker_rwsem)) {
237 244 /* Assume we'll be able to shrink next time */
... ... @@ -244,8 +251,8 @@
244 251 unsigned long total_scan;
245 252 unsigned long max_pass;
246 253  
247   - max_pass = (*shrinker->shrink)(shrinker, 0, gfp_mask);
248   - delta = (4 * scanned) / shrinker->seeks;
  254 + max_pass = do_shrinker_shrink(shrinker, shrink, 0);
  255 + delta = (4 * nr_pages_scanned) / shrinker->seeks;
249 256 delta *= max_pass;
250 257 do_div(delta, lru_pages + 1);
251 258 shrinker->nr += delta;
... ... @@ -272,9 +279,9 @@
272 279 int shrink_ret;
273 280 int nr_before;
274 281  
275   - nr_before = (*shrinker->shrink)(shrinker, 0, gfp_mask);
276   - shrink_ret = (*shrinker->shrink)(shrinker, this_scan,
277   - gfp_mask);
  282 + nr_before = do_shrinker_shrink(shrinker, shrink, 0);
  283 + shrink_ret = do_shrinker_shrink(shrinker, shrink,
  284 + this_scan);
278 285 if (shrink_ret == -1)
279 286 break;
280 287 if (shrink_ret < nr_before)
... ... @@ -2072,8 +2079,7 @@
2072 2079 lru_pages += zone_reclaimable_pages(zone);
2073 2080 }
2074 2081  
2075   - shrink->nr_scanned = sc->nr_scanned;
2076   - shrink_slab(shrink, lru_pages);
  2082 + shrink_slab(shrink, sc->nr_scanned, lru_pages);
2077 2083 if (reclaim_state) {
2078 2084 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2079 2085 reclaim_state->reclaimed_slab = 0;
... ... @@ -2456,8 +2462,7 @@
2456 2462 end_zone, 0))
2457 2463 shrink_zone(priority, zone, &sc);
2458 2464 reclaim_state->reclaimed_slab = 0;
2459   - shrink.nr_scanned = sc.nr_scanned;
2460   - nr_slab = shrink_slab(&shrink, lru_pages);
  2465 + nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
2461 2466 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2462 2467 total_scanned += sc.nr_scanned;
2463 2468  
... ... @@ -3025,7 +3030,6 @@
3025 3030 }
3026 3031  
3027 3032 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
3028   - shrink.nr_scanned = sc.nr_scanned;
3029 3033 if (nr_slab_pages0 > zone->min_slab_pages) {
3030 3034 /*
3031 3035 * shrink_slab() does not currently allow us to determine how
... ... @@ -3041,7 +3045,7 @@
3041 3045 unsigned long lru_pages = zone_reclaimable_pages(zone);
3042 3046  
3043 3047 /* No reclaimable slab or very low memory pressure */
3044   - if (!shrink_slab(&shrink, lru_pages))
  3048 + if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
3045 3049 break;
3046 3050  
3047 3051 /* Freed enough memory */
... ... @@ -326,10 +326,12 @@
326 326 * Run memory cache shrinker.
327 327 */
328 328 static int
329   -rpcauth_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
  329 +rpcauth_cache_shrinker(struct shrinker *shrink, struct shrink_control *sc)
330 330 {
331 331 LIST_HEAD(free);
332 332 int res;
  333 + int nr_to_scan = sc->nr_to_scan;
  334 + gfp_t gfp_mask = sc->gfp_mask;
333 335  
334 336 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
335 337 return (nr_to_scan == 0) ? 0 : -1;