Blame view

mm/list_lru.c 3.44 KB
a38e40824   Dave Chinner   list: add a new L...
1
2
3
4
5
6
7
8
  /*
   * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
   * Authors: David Chinner and Glauber Costa
   *
   * Generic LRU infrastructure
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
3b1d58a4c   Dave Chinner   list_lru: per-nod...
9
  #include <linux/mm.h>
a38e40824   Dave Chinner   list: add a new L...
10
  #include <linux/list_lru.h>
5ca302c8e   Glauber Costa   list_lru: dynamic...
11
  #include <linux/slab.h>
a38e40824   Dave Chinner   list: add a new L...
12
13
14
  
  bool list_lru_add(struct list_lru *lru, struct list_head *item)
  {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
15
16
17
18
19
  	int nid = page_to_nid(virt_to_page(item));
  	struct list_lru_node *nlru = &lru->node[nid];
  
  	spin_lock(&nlru->lock);
  	WARN_ON_ONCE(nlru->nr_items < 0);
a38e40824   Dave Chinner   list: add a new L...
20
  	if (list_empty(item)) {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
21
22
23
24
  		list_add_tail(item, &nlru->list);
  		if (nlru->nr_items++ == 0)
  			node_set(nid, lru->active_nodes);
  		spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
25
26
  		return true;
  	}
3b1d58a4c   Dave Chinner   list_lru: per-nod...
27
  	spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
28
29
30
31
32
33
  	return false;
  }
  EXPORT_SYMBOL_GPL(list_lru_add);
  
  bool list_lru_del(struct list_lru *lru, struct list_head *item)
  {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
34
35
36
37
  	int nid = page_to_nid(virt_to_page(item));
  	struct list_lru_node *nlru = &lru->node[nid];
  
  	spin_lock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
38
39
  	if (!list_empty(item)) {
  		list_del_init(item);
3b1d58a4c   Dave Chinner   list_lru: per-nod...
40
41
42
43
  		if (--nlru->nr_items == 0)
  			node_clear(nid, lru->active_nodes);
  		WARN_ON_ONCE(nlru->nr_items < 0);
  		spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
44
45
  		return true;
  	}
3b1d58a4c   Dave Chinner   list_lru: per-nod...
46
  	spin_unlock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
47
48
49
  	return false;
  }
  EXPORT_SYMBOL_GPL(list_lru_del);
6a4f496fd   Glauber Costa   list_lru: per-nod...
50
51
  unsigned long
  list_lru_count_node(struct list_lru *lru, int nid)
a38e40824   Dave Chinner   list: add a new L...
52
  {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
53
  	unsigned long count = 0;
6a4f496fd   Glauber Costa   list_lru: per-nod...
54
  	struct list_lru_node *nlru = &lru->node[nid];
3b1d58a4c   Dave Chinner   list_lru: per-nod...
55

6a4f496fd   Glauber Costa   list_lru: per-nod...
56
57
58
59
  	spin_lock(&nlru->lock);
  	WARN_ON_ONCE(nlru->nr_items < 0);
  	count += nlru->nr_items;
  	spin_unlock(&nlru->lock);
3b1d58a4c   Dave Chinner   list_lru: per-nod...
60
61
62
  
  	return count;
  }
6a4f496fd   Glauber Costa   list_lru: per-nod...
63
  EXPORT_SYMBOL_GPL(list_lru_count_node);
3b1d58a4c   Dave Chinner   list_lru: per-nod...
64

6a4f496fd   Glauber Costa   list_lru: per-nod...
65
  unsigned long
3b1d58a4c   Dave Chinner   list_lru: per-nod...
66
67
68
69
70
  list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate,
  		   void *cb_arg, unsigned long *nr_to_walk)
  {
  
  	struct list_lru_node	*nlru = &lru->node[nid];
a38e40824   Dave Chinner   list: add a new L...
71
  	struct list_head *item, *n;
3b1d58a4c   Dave Chinner   list_lru: per-nod...
72
  	unsigned long isolated = 0;
a38e40824   Dave Chinner   list: add a new L...
73

3b1d58a4c   Dave Chinner   list_lru: per-nod...
74
  	spin_lock(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
75
  restart:
3b1d58a4c   Dave Chinner   list_lru: per-nod...
76
  	list_for_each_safe(item, n, &nlru->list) {
a38e40824   Dave Chinner   list: add a new L...
77
  		enum lru_status ret;
5cedf721a   Dave Chinner   list_lru: fix bro...
78
79
80
81
82
  
  		/*
  		 * decrement nr_to_walk first so that we don't livelock if we
  		 * get stuck on large numbesr of LRU_RETRY items
  		 */
c56b097af   Russell King   mm: list_lru: fix...
83
  		if (!*nr_to_walk)
5cedf721a   Dave Chinner   list_lru: fix bro...
84
  			break;
c56b097af   Russell King   mm: list_lru: fix...
85
  		--*nr_to_walk;
5cedf721a   Dave Chinner   list_lru: fix bro...
86

3b1d58a4c   Dave Chinner   list_lru: per-nod...
87
  		ret = isolate(item, &nlru->lock, cb_arg);
a38e40824   Dave Chinner   list: add a new L...
88
  		switch (ret) {
449dd6984   Johannes Weiner   mm: keep page cac...
89
90
  		case LRU_REMOVED_RETRY:
  			assert_spin_locked(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
91
  		case LRU_REMOVED:
3b1d58a4c   Dave Chinner   list_lru: per-nod...
92
93
94
95
  			if (--nlru->nr_items == 0)
  				node_clear(nid, lru->active_nodes);
  			WARN_ON_ONCE(nlru->nr_items < 0);
  			isolated++;
449dd6984   Johannes Weiner   mm: keep page cac...
96
97
98
99
100
101
102
  			/*
  			 * If the lru lock has been dropped, our list
  			 * traversal is now invalid and so we have to
  			 * restart from scratch.
  			 */
  			if (ret == LRU_REMOVED_RETRY)
  				goto restart;
a38e40824   Dave Chinner   list: add a new L...
103
104
  			break;
  		case LRU_ROTATE:
3b1d58a4c   Dave Chinner   list_lru: per-nod...
105
  			list_move_tail(item, &nlru->list);
a38e40824   Dave Chinner   list: add a new L...
106
107
108
109
  			break;
  		case LRU_SKIP:
  			break;
  		case LRU_RETRY:
5cedf721a   Dave Chinner   list_lru: fix bro...
110
111
112
113
  			/*
  			 * The lru lock has been dropped, our list traversal is
  			 * now invalid and so we have to restart from scratch.
  			 */
449dd6984   Johannes Weiner   mm: keep page cac...
114
  			assert_spin_locked(&nlru->lock);
a38e40824   Dave Chinner   list: add a new L...
115
116
117
118
  			goto restart;
  		default:
  			BUG();
  		}
a38e40824   Dave Chinner   list: add a new L...
119
  	}
3b1d58a4c   Dave Chinner   list_lru: per-nod...
120
121
122
123
124
  
  	spin_unlock(&nlru->lock);
  	return isolated;
  }
  EXPORT_SYMBOL_GPL(list_lru_walk_node);
449dd6984   Johannes Weiner   mm: keep page cac...
125
  int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key)
a38e40824   Dave Chinner   list: add a new L...
126
  {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
127
  	int i;
5ca302c8e   Glauber Costa   list_lru: dynamic...
128
129
130
131
132
  	size_t size = sizeof(*lru->node) * nr_node_ids;
  
  	lru->node = kzalloc(size, GFP_KERNEL);
  	if (!lru->node)
  		return -ENOMEM;
a38e40824   Dave Chinner   list: add a new L...
133

3b1d58a4c   Dave Chinner   list_lru: per-nod...
134
  	nodes_clear(lru->active_nodes);
5ca302c8e   Glauber Costa   list_lru: dynamic...
135
  	for (i = 0; i < nr_node_ids; i++) {
3b1d58a4c   Dave Chinner   list_lru: per-nod...
136
  		spin_lock_init(&lru->node[i].lock);
449dd6984   Johannes Weiner   mm: keep page cac...
137
138
  		if (key)
  			lockdep_set_class(&lru->node[i].lock, key);
3b1d58a4c   Dave Chinner   list_lru: per-nod...
139
140
141
  		INIT_LIST_HEAD(&lru->node[i].list);
  		lru->node[i].nr_items = 0;
  	}
a38e40824   Dave Chinner   list: add a new L...
142
143
  	return 0;
  }
449dd6984   Johannes Weiner   mm: keep page cac...
144
  EXPORT_SYMBOL_GPL(list_lru_init_key);
5ca302c8e   Glauber Costa   list_lru: dynamic...
145
146
147
148
149
150
  
  void list_lru_destroy(struct list_lru *lru)
  {
  	kfree(lru->node);
  }
  EXPORT_SYMBOL_GPL(list_lru_destroy);