Blame view

include/linux/list_lru.h 7.29 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  /* SPDX-License-Identifier: GPL-2.0 */
a38e40824   Dave Chinner   list: add a new L...
2
3
4
5
6
7
8
9
10
11
  /*
   * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
   * Authors: David Chinner and Glauber Costa
   *
   * Generic LRU infrastructure
   */
  #ifndef _LRU_LIST_H
  #define _LRU_LIST_H
  
  #include <linux/list.h>
3b1d58a4c   Dave Chinner   list_lru: per-nod...
12
  #include <linux/nodemask.h>
503c358cf   Vladimir Davydov   list_lru: introdu...
13
  #include <linux/shrinker.h>
a38e40824   Dave Chinner   list: add a new L...
14

60d3fd32a   Vladimir Davydov   list_lru: introdu...
15
  struct mem_cgroup;
a38e40824   Dave Chinner   list: add a new L...
16
17
18
  /* list_lru_walk_cb has to always return one of those */
  enum lru_status {
  	LRU_REMOVED,		/* item removed from list */
449dd6984   Johannes Weiner   mm: keep page cac...
19
20
  	LRU_REMOVED_RETRY,	/* item removed, but lock has been
  				   dropped and reacquired */
a38e40824   Dave Chinner   list: add a new L...
21
22
23
24
25
  	LRU_ROTATE,		/* item referenced, give another pass */
  	LRU_SKIP,		/* item cannot be locked, skip */
  	LRU_RETRY,		/* item not freeable. May drop the lock
  				   internally, but has to return locked. */
  };
60d3fd32a   Vladimir Davydov   list_lru: introdu...
26
  struct list_lru_one {
a38e40824   Dave Chinner   list: add a new L...
27
  	struct list_head	list;
2788cf0c4   Vladimir Davydov   memcg: reparent l...
28
  	/* may become negative during memcg reparenting */
a38e40824   Dave Chinner   list: add a new L...
29
  	long			nr_items;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
30
31
32
  };
  
  struct list_lru_memcg {
0c7c1bed7   Kirill Tkhai   mm: make counting...
33
  	struct rcu_head		rcu;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
34
  	/* array of per cgroup lists, indexed by memcg_cache_id */
859b49411   Gustavo A. R. Silva   list_lru.h: Repla...
35
  	struct list_lru_one	*lru[];
60d3fd32a   Vladimir Davydov   list_lru: introdu...
36
37
38
39
40
41
42
  };
  
  struct list_lru_node {
  	/* protects all lists on the node, including per cgroup */
  	spinlock_t		lock;
  	/* global list, used for the root cgroup in cgroup aware lrus */
  	struct list_lru_one	lru;
84c07d11a   Kirill Tkhai   mm: introduce CON...
43
  #ifdef CONFIG_MEMCG_KMEM
60d3fd32a   Vladimir Davydov   list_lru: introdu...
44
  	/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
0c7c1bed7   Kirill Tkhai   mm: make counting...
45
  	struct list_lru_memcg	__rcu *memcg_lrus;
60d3fd32a   Vladimir Davydov   list_lru: introdu...
46
  #endif
2c80cd57c   Sahitya Tummala   mm/list_lru.c: fi...
47
  	long nr_items;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
48
49
50
  } ____cacheline_aligned_in_smp;
  
  struct list_lru {
5ca302c8e   Glauber Costa   list_lru: dynamic...
51
  	struct list_lru_node	*node;
84c07d11a   Kirill Tkhai   mm: introduce CON...
52
  #ifdef CONFIG_MEMCG_KMEM
c0a5b5609   Vladimir Davydov   list_lru: organiz...
53
  	struct list_head	list;
c92e8e10c   Kirill Tkhai   fs: propagate shr...
54
  	int			shrinker_id;
3e8589963   Jiri Slaby   memcg: make it wo...
55
  	bool			memcg_aware;
c0a5b5609   Vladimir Davydov   list_lru: organiz...
56
  #endif
a38e40824   Dave Chinner   list: add a new L...
57
  };
5ca302c8e   Glauber Costa   list_lru: dynamic...
58
  void list_lru_destroy(struct list_lru *lru);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
59
  int __list_lru_init(struct list_lru *lru, bool memcg_aware,
c92e8e10c   Kirill Tkhai   fs: propagate shr...
60
  		    struct lock_class_key *key, struct shrinker *shrinker);
60d3fd32a   Vladimir Davydov   list_lru: introdu...
61

c92e8e10c   Kirill Tkhai   fs: propagate shr...
62
63
64
65
66
67
  #define list_lru_init(lru)				\
  	__list_lru_init((lru), false, NULL, NULL)
  #define list_lru_init_key(lru, key)			\
  	__list_lru_init((lru), false, (key), NULL)
  #define list_lru_init_memcg(lru, shrinker)		\
  	__list_lru_init((lru), true, NULL, shrinker)
60d3fd32a   Vladimir Davydov   list_lru: introdu...
68
69
  
  int memcg_update_all_list_lrus(int num_memcgs);
9bec5c35b   Kirill Tkhai   mm/list_lru: pass...
70
  void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg);
a38e40824   Dave Chinner   list: add a new L...
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
  
  /**
   * list_lru_add: add an element to the lru list's tail
   * @list_lru: the lru pointer
   * @item: the item to be added.
   *
   * If the element is already part of a list, this function returns doing
   * nothing. Therefore the caller does not need to keep state about whether or
   * not the element already belongs in the list and is allowed to lazy update
   * it. Note however that this is valid for *a* list, not *this* list. If
   * the caller organize itself in a way that elements can be in more than
   * one type of list, it is up to the caller to fully remove the item from
   * the previous list (with list_lru_del() for instance) before moving it
   * to @list_lru
   *
   * Return value: true if the list was updated, false otherwise
   */
  bool list_lru_add(struct list_lru *lru, struct list_head *item);
  
  /**
   * list_lru_del: delete an element to the lru list
   * @list_lru: the lru pointer
   * @item: the item to be deleted.
   *
   * This function works analogously as list_lru_add in terms of list
   * manipulation. The comments about an element already pertaining to
   * a list are also valid for list_lru_del.
   *
   * Return value: true if the list was updated, false otherwise
   */
  bool list_lru_del(struct list_lru *lru, struct list_head *item);
  
  /**
60d3fd32a   Vladimir Davydov   list_lru: introdu...
104
   * list_lru_count_one: return the number of objects currently held by @lru
a38e40824   Dave Chinner   list: add a new L...
105
   * @lru: the lru pointer.
6a4f496fd   Glauber Costa   list_lru: per-nod...
106
   * @nid: the node id to count from.
60d3fd32a   Vladimir Davydov   list_lru: introdu...
107
   * @memcg: the cgroup to count from.
a38e40824   Dave Chinner   list: add a new L...
108
109
110
111
112
   *
   * Always return a non-negative number, 0 for empty lists. There is no
   * guarantee that the list is not updated while the count is being computed.
   * Callers that want such a guarantee need to provide an outer lock.
   */
60d3fd32a   Vladimir Davydov   list_lru: introdu...
113
114
  unsigned long list_lru_count_one(struct list_lru *lru,
  				 int nid, struct mem_cgroup *memcg);
6a4f496fd   Glauber Costa   list_lru: per-nod...
115
  unsigned long list_lru_count_node(struct list_lru *lru, int nid);
503c358cf   Vladimir Davydov   list_lru: introdu...
116
117
118
119
  
  static inline unsigned long list_lru_shrink_count(struct list_lru *lru,
  						  struct shrink_control *sc)
  {
60d3fd32a   Vladimir Davydov   list_lru: introdu...
120
  	return list_lru_count_one(lru, sc->nid, sc->memcg);
503c358cf   Vladimir Davydov   list_lru: introdu...
121
  }
6a4f496fd   Glauber Costa   list_lru: per-nod...
122
123
124
125
  static inline unsigned long list_lru_count(struct list_lru *lru)
  {
  	long count = 0;
  	int nid;
ff0b67ef5   Vladimir Davydov   list_lru: get rid...
126
  	for_each_node_state(nid, N_NORMAL_MEMORY)
6a4f496fd   Glauber Costa   list_lru: per-nod...
127
128
129
130
  		count += list_lru_count_node(lru, nid);
  
  	return count;
  }
a38e40824   Dave Chinner   list: add a new L...
131

3f97b1632   Vladimir Davydov   list_lru: add hel...
132
133
134
135
136
137
  void list_lru_isolate(struct list_lru_one *list, struct list_head *item);
  void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
  			   struct list_head *head);
  
  typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
  		struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
a38e40824   Dave Chinner   list: add a new L...
138
  /**
60d3fd32a   Vladimir Davydov   list_lru: introdu...
139
   * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items.
a38e40824   Dave Chinner   list: add a new L...
140
   * @lru: the lru pointer.
6a4f496fd   Glauber Costa   list_lru: per-nod...
141
   * @nid: the node id to scan from.
60d3fd32a   Vladimir Davydov   list_lru: introdu...
142
   * @memcg: the cgroup to scan from.
a38e40824   Dave Chinner   list: add a new L...
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
   * @isolate: callback function that is resposible for deciding what to do with
   *  the item currently being scanned
   * @cb_arg: opaque type that will be passed to @isolate
   * @nr_to_walk: how many items to scan.
   *
   * This function will scan all elements in a particular list_lru, calling the
   * @isolate callback for each of those items, along with the current list
   * spinlock and a caller-provided opaque. The @isolate callback can choose to
   * drop the lock internally, but *must* return with the lock held. The callback
   * will return an enum lru_status telling the list_lru infrastructure what to
   * do with the object being scanned.
   *
   * Please note that nr_to_walk does not mean how many objects will be freed,
   * just how many objects will be scanned.
   *
   * Return value: the number of objects effectively removed from the LRU.
   */
60d3fd32a   Vladimir Davydov   list_lru: introdu...
160
161
162
163
  unsigned long list_lru_walk_one(struct list_lru *lru,
  				int nid, struct mem_cgroup *memcg,
  				list_lru_walk_cb isolate, void *cb_arg,
  				unsigned long *nr_to_walk);
6b51e8819   Sebastian Andrzej Siewior   mm/list_lru: intr...
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
  /**
   * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items.
   * @lru: the lru pointer.
   * @nid: the node id to scan from.
   * @memcg: the cgroup to scan from.
   * @isolate: callback function that is resposible for deciding what to do with
   *  the item currently being scanned
   * @cb_arg: opaque type that will be passed to @isolate
   * @nr_to_walk: how many items to scan.
   *
   * Same as @list_lru_walk_one except that the spinlock is acquired with
   * spin_lock_irq().
   */
  unsigned long list_lru_walk_one_irq(struct list_lru *lru,
  				    int nid, struct mem_cgroup *memcg,
  				    list_lru_walk_cb isolate, void *cb_arg,
  				    unsigned long *nr_to_walk);
6a4f496fd   Glauber Costa   list_lru: per-nod...
181
182
183
184
185
  unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
  				 list_lru_walk_cb isolate, void *cb_arg,
  				 unsigned long *nr_to_walk);
  
  static inline unsigned long
503c358cf   Vladimir Davydov   list_lru: introdu...
186
187
188
  list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
  		     list_lru_walk_cb isolate, void *cb_arg)
  {
60d3fd32a   Vladimir Davydov   list_lru: introdu...
189
190
  	return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg,
  				 &sc->nr_to_scan);
503c358cf   Vladimir Davydov   list_lru: introdu...
191
192
193
  }
  
  static inline unsigned long
6b51e8819   Sebastian Andrzej Siewior   mm/list_lru: intr...
194
195
196
197
198
199
200
201
  list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc,
  			 list_lru_walk_cb isolate, void *cb_arg)
  {
  	return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg,
  				     &sc->nr_to_scan);
  }
  
  static inline unsigned long
6a4f496fd   Glauber Costa   list_lru: per-nod...
202
203
204
205
206
  list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
  	      void *cb_arg, unsigned long nr_to_walk)
  {
  	long isolated = 0;
  	int nid;
ff0b67ef5   Vladimir Davydov   list_lru: get rid...
207
  	for_each_node_state(nid, N_NORMAL_MEMORY) {
6a4f496fd   Glauber Costa   list_lru: per-nod...
208
209
210
211
212
213
214
  		isolated += list_lru_walk_node(lru, nid, isolate,
  					       cb_arg, &nr_to_walk);
  		if (nr_to_walk <= 0)
  			break;
  	}
  	return isolated;
  }
a38e40824   Dave Chinner   list: add a new L...
215
  #endif /* _LRU_LIST_H */