Blame view

include/linux/shrinker.h 3.21 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  /* SPDX-License-Identifier: GPL-2.0 */
b0d40c92a   Dave Chinner   superblock: intro...
2
3
4
5
6
7
  #ifndef _LINUX_SHRINKER_H
  #define _LINUX_SHRINKER_H
  
  /*
   * This struct is used to pass information from page reclaim to the shrinkers.
   * We consolidate the values for easier extention later.
24f7c6b98   Dave Chinner   mm: new shrinker API
8
9
10
   *
   * The 'gfpmask' refers to the allocation we are currently trying to
   * fulfil.
b0d40c92a   Dave Chinner   superblock: intro...
11
12
13
   */
  struct shrink_control {
  	gfp_t gfp_mask;
92be775a3   Kirill Tkhai   mm: struct shrink...
14
15
  	/* current node being shrunk (for NUMA aware shrinkers) */
  	int nid;
a0b02131c   Dave Chinner   shrinker: Kill ol...
16
17
18
19
20
  	/*
  	 * How many objects scan_objects should scan and try to reclaim.
  	 * This is reset before every call, so it is safe for callees
  	 * to modify.
  	 */
b0d40c92a   Dave Chinner   superblock: intro...
21
  	unsigned long nr_to_scan;
0ce3d7445   Dave Chinner   shrinker: add nod...
22

d460acb5b   Chris Wilson   mm: track actual ...
23
24
25
26
27
28
  	/*
  	 * How many objects did scan_objects process?
  	 * This defaults to nr_to_scan before every call, but the callee
  	 * should track its actual progress.
  	 */
  	unsigned long nr_scanned;
cb731d6c6   Vladimir Davydov   vmscan: per memor...
29
30
  	/* current memcg being shrunk (for memcg aware shrinkers) */
  	struct mem_cgroup *memcg;
b0d40c92a   Dave Chinner   superblock: intro...
31
  };
24f7c6b98   Dave Chinner   mm: new shrinker API
32
  #define SHRINK_STOP (~0UL)
9b996468c   Kirill Tkhai   mm: add SHRINK_EM...
33
  #define SHRINK_EMPTY (~0UL - 1)
b0d40c92a   Dave Chinner   superblock: intro...
34
35
36
  /*
   * A callback you can register to apply pressure to ageable caches.
   *
24f7c6b98   Dave Chinner   mm: new shrinker API
37
   * @count_objects should return the number of freeable items in the cache. If
9b996468c   Kirill Tkhai   mm: add SHRINK_EM...
38
39
40
41
   * there are no objects to free, it should return SHRINK_EMPTY, while 0 is
   * returned in cases of the number of freeable items cannot be determined
   * or shrinker should skip this cache for this time (e.g., their number
   * is below shrinkable limit). No deadlock checks should be done during the
24f7c6b98   Dave Chinner   mm: new shrinker API
42
43
44
   * count callback - the shrinker relies on aggregating scan counts that couldn't
   * be executed due to potential deadlocks to be run at a later call when the
   * deadlock condition is no longer pending.
b0d40c92a   Dave Chinner   superblock: intro...
45
   *
24f7c6b98   Dave Chinner   mm: new shrinker API
46
47
48
49
50
51
52
   * @scan_objects will only be called if @count_objects returned a non-zero
   * value for the number of freeable objects. The callout should scan the cache
   * and attempt to free items from the cache. It should then return the number
   * of objects freed during the scan, or SHRINK_STOP if progress cannot be made
   * due to potential deadlocks. If SHRINK_STOP is returned, then no further
   * attempts to call the @scan_objects will be made from the current reclaim
   * context.
1d3d4437e   Glauber Costa   vmscan: per-node ...
53
54
   *
   * @flags determine the shrinker abilities, like numa awareness
b0d40c92a   Dave Chinner   superblock: intro...
55
56
   */
  struct shrinker {
24f7c6b98   Dave Chinner   mm: new shrinker API
57
58
59
60
  	unsigned long (*count_objects)(struct shrinker *,
  				       struct shrink_control *sc);
  	unsigned long (*scan_objects)(struct shrinker *,
  				      struct shrink_control *sc);
b0d40c92a   Dave Chinner   superblock: intro...
61
  	long batch;	/* reclaim batch size, 0 = default */
e50ef89b0   Kirill Tkhai   mm: struct shrink...
62
63
  	int seeks;	/* seeks to recreate an obj */
  	unsigned flags;
b0d40c92a   Dave Chinner   superblock: intro...
64
65
66
  
  	/* These are for internal use */
  	struct list_head list;
0a432dcbe   Yang Shi   mm: shrinker: mak...
67
  #ifdef CONFIG_MEMCG
b4c2b231c   Kirill Tkhai   mm: assign id to ...
68
69
70
  	/* ID in shrinker_idr */
  	int id;
  #endif
1d3d4437e   Glauber Costa   vmscan: per-node ...
71
72
  	/* objs pending delete, per node */
  	atomic_long_t *nr_deferred;
b0d40c92a   Dave Chinner   superblock: intro...
73
74
  };
  #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
1d3d4437e   Glauber Costa   vmscan: per-node ...
75
76
  
  /* Flags */
cb731d6c6   Vladimir Davydov   vmscan: per memor...
77
78
  #define SHRINKER_NUMA_AWARE	(1 << 0)
  #define SHRINKER_MEMCG_AWARE	(1 << 1)
0a432dcbe   Yang Shi   mm: shrinker: mak...
79
80
81
82
83
  /*
   * It just makes sense when the shrinker is also MEMCG_AWARE for now,
   * non-MEMCG_AWARE shrinker should not have this flag set.
   */
  #define SHRINKER_NONSLAB	(1 << 2)
1d3d4437e   Glauber Costa   vmscan: per-node ...
84

8e04944f0   Tetsuo Handa   mm,vmscan: Allow ...
85
86
87
88
89
  extern int prealloc_shrinker(struct shrinker *shrinker);
  extern void register_shrinker_prepared(struct shrinker *shrinker);
  extern int register_shrinker(struct shrinker *shrinker);
  extern void unregister_shrinker(struct shrinker *shrinker);
  extern void free_prealloced_shrinker(struct shrinker *shrinker);
b0d40c92a   Dave Chinner   superblock: intro...
90
  #endif