Commit a09ed5e00084448453c8bada4dcd31e5fbfc2f21
Committed by
Linus Torvalds
1 parent
7b1de5868b
Exists in
master
and in
4 other branches
vmscan: change shrink_slab() interfaces by passing shrink_control
Consolidate the existing parameters to shrink_slab() into a new shrink_control struct. This is needed later to pass the same struct to shrinkers. Signed-off-by: Ying Han <yinghan@google.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Minchan Kim <minchan.kim@gmail.com> Acked-by: Pavel Emelyanov <xemul@openvz.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Acked-by: Rik van Riel <riel@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 4 changed files with 55 additions and 17 deletions Side-by-side Diff
fs/drop_caches.c
... | ... | @@ -40,9 +40,13 @@ |
40 | 40 | static void drop_slab(void) |
41 | 41 | { |
42 | 42 | int nr_objects; |
43 | + struct shrink_control shrink = { | |
44 | + .gfp_mask = GFP_KERNEL, | |
45 | + .nr_scanned = 1000, | |
46 | + }; | |
43 | 47 | |
44 | 48 | do { |
45 | - nr_objects = shrink_slab(1000, GFP_KERNEL, 1000); | |
49 | + nr_objects = shrink_slab(&shrink, 1000); | |
46 | 50 | } while (nr_objects > 10); |
47 | 51 | } |
48 | 52 |
include/linux/mm.h
... | ... | @@ -1162,6 +1162,15 @@ |
1162 | 1162 | #endif |
1163 | 1163 | |
1164 | 1164 | /* |
1165 | + * This struct is used to pass information from page reclaim to the shrinkers. | |
1166 | + * We consolidate the values for easier extention later. | |
1167 | + */ | |
1168 | +struct shrink_control { | |
1169 | + unsigned long nr_scanned; | |
1170 | + gfp_t gfp_mask; | |
1171 | +}; | |
1172 | + | |
1173 | +/* | |
1165 | 1174 | * A callback you can register to apply pressure to ageable caches. |
1166 | 1175 | * |
1167 | 1176 | * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should |
... | ... | @@ -1630,8 +1639,8 @@ |
1630 | 1639 | |
1631 | 1640 | int drop_caches_sysctl_handler(struct ctl_table *, int, |
1632 | 1641 | void __user *, size_t *, loff_t *); |
1633 | -unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, | |
1634 | - unsigned long lru_pages); | |
1642 | +unsigned long shrink_slab(struct shrink_control *shrink, | |
1643 | + unsigned long lru_pages); | |
1635 | 1644 | |
1636 | 1645 | #ifndef CONFIG_MMU |
1637 | 1646 | #define randomize_va_space 0 |
mm/memory-failure.c
... | ... | @@ -239,7 +239,12 @@ |
239 | 239 | if (access) { |
240 | 240 | int nr; |
241 | 241 | do { |
242 | - nr = shrink_slab(1000, GFP_KERNEL, 1000); | |
242 | + struct shrink_control shrink = { | |
243 | + .gfp_mask = GFP_KERNEL, | |
244 | + .nr_scanned = 1000, | |
245 | + }; | |
246 | + | |
247 | + nr = shrink_slab(&shrink, 1000); | |
243 | 248 | if (page_count(p) == 1) |
244 | 249 | break; |
245 | 250 | } while (nr > 10); |
mm/vmscan.c
... | ... | @@ -222,11 +222,13 @@ |
222 | 222 | * |
223 | 223 | * Returns the number of slab objects which we shrunk. |
224 | 224 | */ |
225 | -unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, | |
226 | - unsigned long lru_pages) | |
225 | +unsigned long shrink_slab(struct shrink_control *shrink, | |
226 | + unsigned long lru_pages) | |
227 | 227 | { |
228 | 228 | struct shrinker *shrinker; |
229 | 229 | unsigned long ret = 0; |
230 | + unsigned long scanned = shrink->nr_scanned; | |
231 | + gfp_t gfp_mask = shrink->gfp_mask; | |
230 | 232 | |
231 | 233 | if (scanned == 0) |
232 | 234 | scanned = SWAP_CLUSTER_MAX; |
... | ... | @@ -2035,7 +2037,8 @@ |
2035 | 2037 | * else, the number of pages reclaimed |
2036 | 2038 | */ |
2037 | 2039 | static unsigned long do_try_to_free_pages(struct zonelist *zonelist, |
2038 | - struct scan_control *sc) | |
2040 | + struct scan_control *sc, | |
2041 | + struct shrink_control *shrink) | |
2039 | 2042 | { |
2040 | 2043 | int priority; |
2041 | 2044 | unsigned long total_scanned = 0; |
... | ... | @@ -2069,7 +2072,8 @@ |
2069 | 2072 | lru_pages += zone_reclaimable_pages(zone); |
2070 | 2073 | } |
2071 | 2074 | |
2072 | - shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages); | |
2075 | + shrink->nr_scanned = sc->nr_scanned; | |
2076 | + shrink_slab(shrink, lru_pages); | |
2073 | 2077 | if (reclaim_state) { |
2074 | 2078 | sc->nr_reclaimed += reclaim_state->reclaimed_slab; |
2075 | 2079 | reclaim_state->reclaimed_slab = 0; |
2076 | 2080 | |
... | ... | @@ -2141,12 +2145,15 @@ |
2141 | 2145 | .mem_cgroup = NULL, |
2142 | 2146 | .nodemask = nodemask, |
2143 | 2147 | }; |
2148 | + struct shrink_control shrink = { | |
2149 | + .gfp_mask = sc.gfp_mask, | |
2150 | + }; | |
2144 | 2151 | |
2145 | 2152 | trace_mm_vmscan_direct_reclaim_begin(order, |
2146 | 2153 | sc.may_writepage, |
2147 | 2154 | gfp_mask); |
2148 | 2155 | |
2149 | - nr_reclaimed = do_try_to_free_pages(zonelist, &sc); | |
2156 | + nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); | |
2150 | 2157 | |
2151 | 2158 | trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); |
2152 | 2159 | |
2153 | 2160 | |
2154 | 2161 | |
2155 | 2162 | |
... | ... | @@ -2206,17 +2213,20 @@ |
2206 | 2213 | .order = 0, |
2207 | 2214 | .mem_cgroup = mem_cont, |
2208 | 2215 | .nodemask = NULL, /* we don't care the placement */ |
2216 | + .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | | |
2217 | + (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), | |
2209 | 2218 | }; |
2219 | + struct shrink_control shrink = { | |
2220 | + .gfp_mask = sc.gfp_mask, | |
2221 | + }; | |
2210 | 2222 | |
2211 | - sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | | |
2212 | - (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); | |
2213 | 2223 | zonelist = NODE_DATA(numa_node_id())->node_zonelists; |
2214 | 2224 | |
2215 | 2225 | trace_mm_vmscan_memcg_reclaim_begin(0, |
2216 | 2226 | sc.may_writepage, |
2217 | 2227 | sc.gfp_mask); |
2218 | 2228 | |
2219 | - nr_reclaimed = do_try_to_free_pages(zonelist, &sc); | |
2229 | + nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); | |
2220 | 2230 | |
2221 | 2231 | trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); |
2222 | 2232 | |
... | ... | @@ -2344,6 +2354,9 @@ |
2344 | 2354 | .order = order, |
2345 | 2355 | .mem_cgroup = NULL, |
2346 | 2356 | }; |
2357 | + struct shrink_control shrink = { | |
2358 | + .gfp_mask = sc.gfp_mask, | |
2359 | + }; | |
2347 | 2360 | loop_again: |
2348 | 2361 | total_scanned = 0; |
2349 | 2362 | sc.nr_reclaimed = 0; |
... | ... | @@ -2443,8 +2456,8 @@ |
2443 | 2456 | end_zone, 0)) |
2444 | 2457 | shrink_zone(priority, zone, &sc); |
2445 | 2458 | reclaim_state->reclaimed_slab = 0; |
2446 | - nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, | |
2447 | - lru_pages); | |
2459 | + shrink.nr_scanned = sc.nr_scanned; | |
2460 | + nr_slab = shrink_slab(&shrink, lru_pages); | |
2448 | 2461 | sc.nr_reclaimed += reclaim_state->reclaimed_slab; |
2449 | 2462 | total_scanned += sc.nr_scanned; |
2450 | 2463 | |
... | ... | @@ -2796,7 +2809,10 @@ |
2796 | 2809 | .swappiness = vm_swappiness, |
2797 | 2810 | .order = 0, |
2798 | 2811 | }; |
2799 | - struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); | |
2812 | + struct shrink_control shrink = { | |
2813 | + .gfp_mask = sc.gfp_mask, | |
2814 | + }; | |
2815 | + struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); | |
2800 | 2816 | struct task_struct *p = current; |
2801 | 2817 | unsigned long nr_reclaimed; |
2802 | 2818 | |
... | ... | @@ -2805,7 +2821,7 @@ |
2805 | 2821 | reclaim_state.reclaimed_slab = 0; |
2806 | 2822 | p->reclaim_state = &reclaim_state; |
2807 | 2823 | |
2808 | - nr_reclaimed = do_try_to_free_pages(zonelist, &sc); | |
2824 | + nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); | |
2809 | 2825 | |
2810 | 2826 | p->reclaim_state = NULL; |
2811 | 2827 | lockdep_clear_current_reclaim_state(); |
... | ... | @@ -2980,6 +2996,9 @@ |
2980 | 2996 | .swappiness = vm_swappiness, |
2981 | 2997 | .order = order, |
2982 | 2998 | }; |
2999 | + struct shrink_control shrink = { | |
3000 | + .gfp_mask = sc.gfp_mask, | |
3001 | + }; | |
2983 | 3002 | unsigned long nr_slab_pages0, nr_slab_pages1; |
2984 | 3003 | |
2985 | 3004 | cond_resched(); |
... | ... | @@ -3006,6 +3025,7 @@ |
3006 | 3025 | } |
3007 | 3026 | |
3008 | 3027 | nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); |
3028 | + shrink.nr_scanned = sc.nr_scanned; | |
3009 | 3029 | if (nr_slab_pages0 > zone->min_slab_pages) { |
3010 | 3030 | /* |
3011 | 3031 | * shrink_slab() does not currently allow us to determine how |
... | ... | @@ -3021,7 +3041,7 @@ |
3021 | 3041 | unsigned long lru_pages = zone_reclaimable_pages(zone); |
3022 | 3042 | |
3023 | 3043 | /* No reclaimable slab or very low memory pressure */ |
3024 | - if (!shrink_slab(sc.nr_scanned, gfp_mask, lru_pages)) | |
3044 | + if (!shrink_slab(&shrink, lru_pages)) | |
3025 | 3045 | break; |
3026 | 3046 | |
3027 | 3047 | /* Freed enough memory */ |