Blame view

fs/mbcache.c 11.9 KB
f9a61eb4e   Jan Kara   mbcache2: reimple...
1
2
3
4
5
6
  #include <linux/spinlock.h>
  #include <linux/slab.h>
  #include <linux/list.h>
  #include <linux/list_bl.h>
  #include <linux/module.h>
  #include <linux/sched.h>
c2f3140fe   Jan Kara   mbcache2: limit c...
7
  #include <linux/workqueue.h>
7a2508e1b   Jan Kara   mbcache2: rename ...
8
  #include <linux/mbcache.h>
f9a61eb4e   Jan Kara   mbcache2: reimple...
9
10
11
12
  
  /*
   * Mbcache is a simple key-value store. Keys need not be unique, however
   * key-value pairs are expected to be unique (we use this fact in
c07dfcb45   Tahsin Erdogan   mbcache: make mbc...
13
   * mb_cache_entry_delete()).
f9a61eb4e   Jan Kara   mbcache2: reimple...
14
15
   *
   * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
dec214d00   Tahsin Erdogan   ext4: xattr inode...
16
17
18
19
20
   * Ext4 also uses it for deduplication of xattr values stored in inodes.
   * They use hash of data as a key and provide a value that may represent a
   * block or inode number. That's why keys need not be unique (hash of different
   * data may be the same). However user provided value always uniquely
   * identifies a cache entry.
f9a61eb4e   Jan Kara   mbcache2: reimple...
21
22
23
24
25
   *
   * We provide functions for creation and removal of entries, search by key,
   * and a special "delete entry with given key-value pair" operation. Fixed
   * size hash table is used for fast key lookups.
   */
7a2508e1b   Jan Kara   mbcache2: rename ...
26
  struct mb_cache {
f9a61eb4e   Jan Kara   mbcache2: reimple...
27
28
29
30
  	/* Hash table of entries */
  	struct hlist_bl_head	*c_hash;
  	/* log2 of hash table size */
  	int			c_bucket_bits;
c2f3140fe   Jan Kara   mbcache2: limit c...
31
  	/* Maximum entries in cache to avoid degrading hash too much */
132d4e2d5   Eric Biggers   mbcache: use cons...
32
  	unsigned long		c_max_entries;
f0c8b4623   Jan Kara   mbcache2: Use ref...
33
34
35
  	/* Protects c_list, c_entry_count */
  	spinlock_t		c_list_lock;
  	struct list_head	c_list;
f9a61eb4e   Jan Kara   mbcache2: reimple...
36
37
38
  	/* Number of entries in cache */
  	unsigned long		c_entry_count;
  	struct shrinker		c_shrink;
c2f3140fe   Jan Kara   mbcache2: limit c...
39
40
  	/* Work for shrinking when the cache has too many entries */
  	struct work_struct	c_shrink_work;
f9a61eb4e   Jan Kara   mbcache2: reimple...
41
  };
7a2508e1b   Jan Kara   mbcache2: rename ...
42
  static struct kmem_cache *mb_entry_cache;
f9a61eb4e   Jan Kara   mbcache2: reimple...
43

7a2508e1b   Jan Kara   mbcache2: rename ...
44
  static unsigned long mb_cache_shrink(struct mb_cache *cache,
132d4e2d5   Eric Biggers   mbcache: use cons...
45
  				     unsigned long nr_to_scan);
c2f3140fe   Jan Kara   mbcache2: limit c...
46

dc8d5e565   Andreas Gruenbacher   mbcache: get rid ...
47
48
  static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
  							u32 key)
f0c8b4623   Jan Kara   mbcache2: Use ref...
49
  {
dc8d5e565   Andreas Gruenbacher   mbcache: get rid ...
50
  	return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
f0c8b4623   Jan Kara   mbcache2: Use ref...
51
  }
c2f3140fe   Jan Kara   mbcache2: limit c...
52
53
54
55
56
  /*
   * Number of entries to reclaim synchronously when there are too many entries
   * in cache
   */
  #define SYNC_SHRINK_BATCH 64
f9a61eb4e   Jan Kara   mbcache2: reimple...
57
  /*
7a2508e1b   Jan Kara   mbcache2: rename ...
58
   * mb_cache_entry_create - create entry in cache
f9a61eb4e   Jan Kara   mbcache2: reimple...
59
60
61
   * @cache - cache where the entry should be created
   * @mask - gfp mask with which the entry should be allocated
   * @key - key of the entry
c07dfcb45   Tahsin Erdogan   mbcache: make mbc...
62
63
   * @value - value of the entry
   * @reusable - is the entry reusable by others?
f9a61eb4e   Jan Kara   mbcache2: reimple...
64
   *
c07dfcb45   Tahsin Erdogan   mbcache: make mbc...
65
66
67
   * Creates entry in @cache with key @key and value @value. The function returns
   * -EBUSY if entry with the same key and value already exists in cache.
   * Otherwise 0 is returned.
f9a61eb4e   Jan Kara   mbcache2: reimple...
68
   */
7a2508e1b   Jan Kara   mbcache2: rename ...
69
  int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
c07dfcb45   Tahsin Erdogan   mbcache: make mbc...
70
  			  u64 value, bool reusable)
f9a61eb4e   Jan Kara   mbcache2: reimple...
71
  {
7a2508e1b   Jan Kara   mbcache2: rename ...
72
  	struct mb_cache_entry *entry, *dup;
f9a61eb4e   Jan Kara   mbcache2: reimple...
73
74
  	struct hlist_bl_node *dup_node;
  	struct hlist_bl_head *head;
c2f3140fe   Jan Kara   mbcache2: limit c...
75
76
77
78
79
  	/* Schedule background reclaim if there are too many entries */
  	if (cache->c_entry_count >= cache->c_max_entries)
  		schedule_work(&cache->c_shrink_work);
  	/* Do some sync reclaim if background reclaim cannot keep up */
  	if (cache->c_entry_count >= 2*cache->c_max_entries)
7a2508e1b   Jan Kara   mbcache2: rename ...
80
  		mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
c2f3140fe   Jan Kara   mbcache2: limit c...
81

7a2508e1b   Jan Kara   mbcache2: rename ...
82
  	entry = kmem_cache_alloc(mb_entry_cache, mask);
f9a61eb4e   Jan Kara   mbcache2: reimple...
83
84
  	if (!entry)
  		return -ENOMEM;
f0c8b4623   Jan Kara   mbcache2: Use ref...
85
  	INIT_LIST_HEAD(&entry->e_list);
f9a61eb4e   Jan Kara   mbcache2: reimple...
86
87
88
  	/* One ref for hash, one ref returned */
  	atomic_set(&entry->e_refcnt, 1);
  	entry->e_key = key;
c07dfcb45   Tahsin Erdogan   mbcache: make mbc...
89
  	entry->e_value = value;
6048c64b2   Andreas Gruenbacher   mbcache: add reus...
90
  	entry->e_reusable = reusable;
e96a21989   Alexander Potapenko   mbcache: initiali...
91
  	entry->e_referenced = 0;
dc8d5e565   Andreas Gruenbacher   mbcache: get rid ...
92
  	head = mb_cache_entry_head(cache, key);
f9a61eb4e   Jan Kara   mbcache2: reimple...
93
94
  	hlist_bl_lock(head);
  	hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
c07dfcb45   Tahsin Erdogan   mbcache: make mbc...
95
  		if (dup->e_key == key && dup->e_value == value) {
f9a61eb4e   Jan Kara   mbcache2: reimple...
96
  			hlist_bl_unlock(head);
7a2508e1b   Jan Kara   mbcache2: rename ...
97
  			kmem_cache_free(mb_entry_cache, entry);
f9a61eb4e   Jan Kara   mbcache2: reimple...
98
99
100
101
102
  			return -EBUSY;
  		}
  	}
  	hlist_bl_add_head(&entry->e_hash_list, head);
  	hlist_bl_unlock(head);
f0c8b4623   Jan Kara   mbcache2: Use ref...
103
104
  	spin_lock(&cache->c_list_lock);
  	list_add_tail(&entry->e_list, &cache->c_list);
f9a61eb4e   Jan Kara   mbcache2: reimple...
105
106
107
  	/* Grab ref for LRU list */
  	atomic_inc(&entry->e_refcnt);
  	cache->c_entry_count++;
f0c8b4623   Jan Kara   mbcache2: Use ref...
108
  	spin_unlock(&cache->c_list_lock);
f9a61eb4e   Jan Kara   mbcache2: reimple...
109
110
111
  
  	return 0;
  }
7a2508e1b   Jan Kara   mbcache2: rename ...
112
  EXPORT_SYMBOL(mb_cache_entry_create);
f9a61eb4e   Jan Kara   mbcache2: reimple...
113

7a2508e1b   Jan Kara   mbcache2: rename ...
114
  void __mb_cache_entry_free(struct mb_cache_entry *entry)
f9a61eb4e   Jan Kara   mbcache2: reimple...
115
  {
7a2508e1b   Jan Kara   mbcache2: rename ...
116
  	kmem_cache_free(mb_entry_cache, entry);
f9a61eb4e   Jan Kara   mbcache2: reimple...
117
  }
7a2508e1b   Jan Kara   mbcache2: rename ...
118
  EXPORT_SYMBOL(__mb_cache_entry_free);
f9a61eb4e   Jan Kara   mbcache2: reimple...
119

7a2508e1b   Jan Kara   mbcache2: rename ...
120
121
122
  static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
  					   struct mb_cache_entry *entry,
  					   u32 key)
f9a61eb4e   Jan Kara   mbcache2: reimple...
123
  {
7a2508e1b   Jan Kara   mbcache2: rename ...
124
  	struct mb_cache_entry *old_entry = entry;
f9a61eb4e   Jan Kara   mbcache2: reimple...
125
126
  	struct hlist_bl_node *node;
  	struct hlist_bl_head *head;
dc8d5e565   Andreas Gruenbacher   mbcache: get rid ...
127
  	head = mb_cache_entry_head(cache, key);
f9a61eb4e   Jan Kara   mbcache2: reimple...
128
129
130
131
132
133
  	hlist_bl_lock(head);
  	if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
  		node = entry->e_hash_list.next;
  	else
  		node = hlist_bl_first(head);
  	while (node) {
7a2508e1b   Jan Kara   mbcache2: rename ...
134
  		entry = hlist_bl_entry(node, struct mb_cache_entry,
f9a61eb4e   Jan Kara   mbcache2: reimple...
135
  				       e_hash_list);
6048c64b2   Andreas Gruenbacher   mbcache: add reus...
136
  		if (entry->e_key == key && entry->e_reusable) {
f9a61eb4e   Jan Kara   mbcache2: reimple...
137
138
139
140
141
142
143
144
145
  			atomic_inc(&entry->e_refcnt);
  			goto out;
  		}
  		node = node->next;
  	}
  	entry = NULL;
  out:
  	hlist_bl_unlock(head);
  	if (old_entry)
7a2508e1b   Jan Kara   mbcache2: rename ...
146
  		mb_cache_entry_put(cache, old_entry);
f9a61eb4e   Jan Kara   mbcache2: reimple...
147
148
149
150
151
  
  	return entry;
  }
  
  /*
b649668c0   Eric Biggers   mbcache: document...
152
   * mb_cache_entry_find_first - find the first reusable entry with the given key
f9a61eb4e   Jan Kara   mbcache2: reimple...
153
154
155
   * @cache: cache where we should search
   * @key: key to look for
   *
b649668c0   Eric Biggers   mbcache: document...
156
157
   * Search in @cache for a reusable entry with key @key. Grabs reference to the
   * first reusable entry found and returns the entry.
f9a61eb4e   Jan Kara   mbcache2: reimple...
158
   */
7a2508e1b   Jan Kara   mbcache2: rename ...
159
160
  struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
  						 u32 key)
f9a61eb4e   Jan Kara   mbcache2: reimple...
161
162
163
  {
  	return __entry_find(cache, NULL, key);
  }
7a2508e1b   Jan Kara   mbcache2: rename ...
164
  EXPORT_SYMBOL(mb_cache_entry_find_first);
f9a61eb4e   Jan Kara   mbcache2: reimple...
165
166
  
  /*
b649668c0   Eric Biggers   mbcache: document...
167
   * mb_cache_entry_find_next - find next reusable entry with the same key
f9a61eb4e   Jan Kara   mbcache2: reimple...
168
169
170
   * @cache: cache where we should search
   * @entry: entry to start search from
   *
b649668c0   Eric Biggers   mbcache: document...
171
172
173
174
   * Finds next reusable entry in the hash chain which has the same key as @entry.
   * If @entry is unhashed (which can happen when deletion of entry races with the
   * search), finds the first reusable entry in the hash chain. The function drops
   * reference to @entry and returns with a reference to the found entry.
f9a61eb4e   Jan Kara   mbcache2: reimple...
175
   */
7a2508e1b   Jan Kara   mbcache2: rename ...
176
177
  struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
  						struct mb_cache_entry *entry)
f9a61eb4e   Jan Kara   mbcache2: reimple...
178
179
180
  {
  	return __entry_find(cache, entry, entry->e_key);
  }
7a2508e1b   Jan Kara   mbcache2: rename ...
181
  EXPORT_SYMBOL(mb_cache_entry_find_next);
f9a61eb4e   Jan Kara   mbcache2: reimple...
182

6048c64b2   Andreas Gruenbacher   mbcache: add reus...
183
  /*
c07dfcb45   Tahsin Erdogan   mbcache: make mbc...
184
   * mb_cache_entry_get - get a cache entry by value (and key)
6048c64b2   Andreas Gruenbacher   mbcache: add reus...
185
   * @cache - cache we work with
c07dfcb45   Tahsin Erdogan   mbcache: make mbc...
186
187
   * @key - key
   * @value - value
6048c64b2   Andreas Gruenbacher   mbcache: add reus...
188
189
   */
  struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
c07dfcb45   Tahsin Erdogan   mbcache: make mbc...
190
  					  u64 value)
6048c64b2   Andreas Gruenbacher   mbcache: add reus...
191
192
193
194
195
196
197
198
  {
  	struct hlist_bl_node *node;
  	struct hlist_bl_head *head;
  	struct mb_cache_entry *entry;
  
  	head = mb_cache_entry_head(cache, key);
  	hlist_bl_lock(head);
  	hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
c07dfcb45   Tahsin Erdogan   mbcache: make mbc...
199
  		if (entry->e_key == key && entry->e_value == value) {
6048c64b2   Andreas Gruenbacher   mbcache: add reus...
200
201
202
203
204
205
206
207
208
209
  			atomic_inc(&entry->e_refcnt);
  			goto out;
  		}
  	}
  	entry = NULL;
  out:
  	hlist_bl_unlock(head);
  	return entry;
  }
  EXPORT_SYMBOL(mb_cache_entry_get);
c07dfcb45   Tahsin Erdogan   mbcache: make mbc...
210
  /* mb_cache_entry_delete - remove a cache entry
f9a61eb4e   Jan Kara   mbcache2: reimple...
211
   * @cache - cache we work with
c07dfcb45   Tahsin Erdogan   mbcache: make mbc...
212
213
   * @key - key
   * @value - value
f9a61eb4e   Jan Kara   mbcache2: reimple...
214
   *
c07dfcb45   Tahsin Erdogan   mbcache: make mbc...
215
   * Remove entry from cache @cache with key @key and value @value.
f9a61eb4e   Jan Kara   mbcache2: reimple...
216
   */
c07dfcb45   Tahsin Erdogan   mbcache: make mbc...
217
  void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value)
f9a61eb4e   Jan Kara   mbcache2: reimple...
218
219
220
  {
  	struct hlist_bl_node *node;
  	struct hlist_bl_head *head;
7a2508e1b   Jan Kara   mbcache2: rename ...
221
  	struct mb_cache_entry *entry;
f9a61eb4e   Jan Kara   mbcache2: reimple...
222

dc8d5e565   Andreas Gruenbacher   mbcache: get rid ...
223
  	head = mb_cache_entry_head(cache, key);
f9a61eb4e   Jan Kara   mbcache2: reimple...
224
225
  	hlist_bl_lock(head);
  	hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
c07dfcb45   Tahsin Erdogan   mbcache: make mbc...
226
  		if (entry->e_key == key && entry->e_value == value) {
f9a61eb4e   Jan Kara   mbcache2: reimple...
227
228
229
  			/* We keep hash list reference to keep entry alive */
  			hlist_bl_del_init(&entry->e_hash_list);
  			hlist_bl_unlock(head);
f0c8b4623   Jan Kara   mbcache2: Use ref...
230
231
232
  			spin_lock(&cache->c_list_lock);
  			if (!list_empty(&entry->e_list)) {
  				list_del_init(&entry->e_list);
f9a61eb4e   Jan Kara   mbcache2: reimple...
233
234
235
  				cache->c_entry_count--;
  				atomic_dec(&entry->e_refcnt);
  			}
f0c8b4623   Jan Kara   mbcache2: Use ref...
236
  			spin_unlock(&cache->c_list_lock);
7a2508e1b   Jan Kara   mbcache2: rename ...
237
  			mb_cache_entry_put(cache, entry);
f9a61eb4e   Jan Kara   mbcache2: reimple...
238
239
240
241
242
  			return;
  		}
  	}
  	hlist_bl_unlock(head);
  }
c07dfcb45   Tahsin Erdogan   mbcache: make mbc...
243
  EXPORT_SYMBOL(mb_cache_entry_delete);
f9a61eb4e   Jan Kara   mbcache2: reimple...
244

7a2508e1b   Jan Kara   mbcache2: rename ...
245
  /* mb_cache_entry_touch - cache entry got used
f9a61eb4e   Jan Kara   mbcache2: reimple...
246
247
248
   * @cache - cache the entry belongs to
   * @entry - entry that got used
   *
f0c8b4623   Jan Kara   mbcache2: Use ref...
249
   * Marks entry as used to give hit higher chances of surviving in cache.
f9a61eb4e   Jan Kara   mbcache2: reimple...
250
   */
7a2508e1b   Jan Kara   mbcache2: rename ...
251
252
  void mb_cache_entry_touch(struct mb_cache *cache,
  			  struct mb_cache_entry *entry)
f9a61eb4e   Jan Kara   mbcache2: reimple...
253
  {
dc8d5e565   Andreas Gruenbacher   mbcache: get rid ...
254
  	entry->e_referenced = 1;
f9a61eb4e   Jan Kara   mbcache2: reimple...
255
  }
7a2508e1b   Jan Kara   mbcache2: rename ...
256
  EXPORT_SYMBOL(mb_cache_entry_touch);
f9a61eb4e   Jan Kara   mbcache2: reimple...
257

7a2508e1b   Jan Kara   mbcache2: rename ...
258
259
  static unsigned long mb_cache_count(struct shrinker *shrink,
  				    struct shrink_control *sc)
f9a61eb4e   Jan Kara   mbcache2: reimple...
260
  {
7a2508e1b   Jan Kara   mbcache2: rename ...
261
262
  	struct mb_cache *cache = container_of(shrink, struct mb_cache,
  					      c_shrink);
f9a61eb4e   Jan Kara   mbcache2: reimple...
263
264
265
266
267
  
  	return cache->c_entry_count;
  }
  
  /* Shrink number of entries in cache */
7a2508e1b   Jan Kara   mbcache2: rename ...
268
  static unsigned long mb_cache_shrink(struct mb_cache *cache,
132d4e2d5   Eric Biggers   mbcache: use cons...
269
  				     unsigned long nr_to_scan)
f9a61eb4e   Jan Kara   mbcache2: reimple...
270
  {
7a2508e1b   Jan Kara   mbcache2: rename ...
271
  	struct mb_cache_entry *entry;
f9a61eb4e   Jan Kara   mbcache2: reimple...
272
  	struct hlist_bl_head *head;
132d4e2d5   Eric Biggers   mbcache: use cons...
273
  	unsigned long shrunk = 0;
f9a61eb4e   Jan Kara   mbcache2: reimple...
274

f0c8b4623   Jan Kara   mbcache2: Use ref...
275
276
277
  	spin_lock(&cache->c_list_lock);
  	while (nr_to_scan-- && !list_empty(&cache->c_list)) {
  		entry = list_first_entry(&cache->c_list,
7a2508e1b   Jan Kara   mbcache2: rename ...
278
  					 struct mb_cache_entry, e_list);
dc8d5e565   Andreas Gruenbacher   mbcache: get rid ...
279
280
  		if (entry->e_referenced) {
  			entry->e_referenced = 0;
918b7306e   Eric Biggers   mbcache: correctl...
281
  			list_move_tail(&entry->e_list, &cache->c_list);
f0c8b4623   Jan Kara   mbcache2: Use ref...
282
283
284
  			continue;
  		}
  		list_del_init(&entry->e_list);
f9a61eb4e   Jan Kara   mbcache2: reimple...
285
286
287
288
289
  		cache->c_entry_count--;
  		/*
  		 * We keep LRU list reference so that entry doesn't go away
  		 * from under us.
  		 */
f0c8b4623   Jan Kara   mbcache2: Use ref...
290
  		spin_unlock(&cache->c_list_lock);
dc8d5e565   Andreas Gruenbacher   mbcache: get rid ...
291
  		head = mb_cache_entry_head(cache, entry->e_key);
f9a61eb4e   Jan Kara   mbcache2: reimple...
292
293
294
295
296
297
  		hlist_bl_lock(head);
  		if (!hlist_bl_unhashed(&entry->e_hash_list)) {
  			hlist_bl_del_init(&entry->e_hash_list);
  			atomic_dec(&entry->e_refcnt);
  		}
  		hlist_bl_unlock(head);
7a2508e1b   Jan Kara   mbcache2: rename ...
298
  		if (mb_cache_entry_put(cache, entry))
f9a61eb4e   Jan Kara   mbcache2: reimple...
299
300
  			shrunk++;
  		cond_resched();
f0c8b4623   Jan Kara   mbcache2: Use ref...
301
  		spin_lock(&cache->c_list_lock);
f9a61eb4e   Jan Kara   mbcache2: reimple...
302
  	}
f0c8b4623   Jan Kara   mbcache2: Use ref...
303
  	spin_unlock(&cache->c_list_lock);
f9a61eb4e   Jan Kara   mbcache2: reimple...
304
305
306
  
  	return shrunk;
  }
7a2508e1b   Jan Kara   mbcache2: rename ...
307
308
  static unsigned long mb_cache_scan(struct shrinker *shrink,
  				   struct shrink_control *sc)
c2f3140fe   Jan Kara   mbcache2: limit c...
309
  {
7a2508e1b   Jan Kara   mbcache2: rename ...
310
  	struct mb_cache *cache = container_of(shrink, struct mb_cache,
c2f3140fe   Jan Kara   mbcache2: limit c...
311
  					      c_shrink);
132d4e2d5   Eric Biggers   mbcache: use cons...
312
  	return mb_cache_shrink(cache, sc->nr_to_scan);
c2f3140fe   Jan Kara   mbcache2: limit c...
313
314
315
316
  }
  
  /* We shrink 1/X of the cache when we have too many entries in it */
  #define SHRINK_DIVISOR 16
7a2508e1b   Jan Kara   mbcache2: rename ...
317
  static void mb_cache_shrink_worker(struct work_struct *work)
c2f3140fe   Jan Kara   mbcache2: limit c...
318
  {
7a2508e1b   Jan Kara   mbcache2: rename ...
319
320
321
  	struct mb_cache *cache = container_of(work, struct mb_cache,
  					      c_shrink_work);
  	mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
c2f3140fe   Jan Kara   mbcache2: limit c...
322
  }
f9a61eb4e   Jan Kara   mbcache2: reimple...
323
  /*
7a2508e1b   Jan Kara   mbcache2: rename ...
324
   * mb_cache_create - create cache
f9a61eb4e   Jan Kara   mbcache2: reimple...
325
326
327
328
   * @bucket_bits: log2 of the hash table size
   *
   * Create cache for keys with 2^bucket_bits hash entries.
   */
7a2508e1b   Jan Kara   mbcache2: rename ...
329
  struct mb_cache *mb_cache_create(int bucket_bits)
f9a61eb4e   Jan Kara   mbcache2: reimple...
330
  {
7a2508e1b   Jan Kara   mbcache2: rename ...
331
  	struct mb_cache *cache;
132d4e2d5   Eric Biggers   mbcache: use cons...
332
333
  	unsigned long bucket_count = 1UL << bucket_bits;
  	unsigned long i;
f9a61eb4e   Jan Kara   mbcache2: reimple...
334

7a2508e1b   Jan Kara   mbcache2: rename ...
335
  	cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
f9a61eb4e   Jan Kara   mbcache2: reimple...
336
337
338
  	if (!cache)
  		goto err_out;
  	cache->c_bucket_bits = bucket_bits;
c2f3140fe   Jan Kara   mbcache2: limit c...
339
  	cache->c_max_entries = bucket_count << 4;
f0c8b4623   Jan Kara   mbcache2: Use ref...
340
341
  	INIT_LIST_HEAD(&cache->c_list);
  	spin_lock_init(&cache->c_list_lock);
f9a61eb4e   Jan Kara   mbcache2: reimple...
342
343
344
345
346
347
348
349
  	cache->c_hash = kmalloc(bucket_count * sizeof(struct hlist_bl_head),
  				GFP_KERNEL);
  	if (!cache->c_hash) {
  		kfree(cache);
  		goto err_out;
  	}
  	for (i = 0; i < bucket_count; i++)
  		INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
7a2508e1b   Jan Kara   mbcache2: rename ...
350
351
  	cache->c_shrink.count_objects = mb_cache_count;
  	cache->c_shrink.scan_objects = mb_cache_scan;
f9a61eb4e   Jan Kara   mbcache2: reimple...
352
  	cache->c_shrink.seeks = DEFAULT_SEEKS;
8913f343c   Chao Yu   mbcache: fix to d...
353
354
355
356
357
  	if (register_shrinker(&cache->c_shrink)) {
  		kfree(cache->c_hash);
  		kfree(cache);
  		goto err_out;
  	}
f9a61eb4e   Jan Kara   mbcache2: reimple...
358

7a2508e1b   Jan Kara   mbcache2: rename ...
359
  	INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
c2f3140fe   Jan Kara   mbcache2: limit c...
360

f9a61eb4e   Jan Kara   mbcache2: reimple...
361
362
363
  	return cache;
  
  err_out:
f9a61eb4e   Jan Kara   mbcache2: reimple...
364
365
  	return NULL;
  }
7a2508e1b   Jan Kara   mbcache2: rename ...
366
  EXPORT_SYMBOL(mb_cache_create);
f9a61eb4e   Jan Kara   mbcache2: reimple...
367
368
  
  /*
7a2508e1b   Jan Kara   mbcache2: rename ...
369
   * mb_cache_destroy - destroy cache
f9a61eb4e   Jan Kara   mbcache2: reimple...
370
371
372
373
374
   * @cache: the cache to destroy
   *
   * Free all entries in cache and cache itself. Caller must make sure nobody
   * (except shrinker) can reach @cache when calling this.
   */
7a2508e1b   Jan Kara   mbcache2: rename ...
375
  void mb_cache_destroy(struct mb_cache *cache)
f9a61eb4e   Jan Kara   mbcache2: reimple...
376
  {
7a2508e1b   Jan Kara   mbcache2: rename ...
377
  	struct mb_cache_entry *entry, *next;
f9a61eb4e   Jan Kara   mbcache2: reimple...
378
379
380
381
382
383
384
  
  	unregister_shrinker(&cache->c_shrink);
  
  	/*
  	 * We don't bother with any locking. Cache must not be used at this
  	 * point.
  	 */
f0c8b4623   Jan Kara   mbcache2: Use ref...
385
  	list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
f9a61eb4e   Jan Kara   mbcache2: reimple...
386
387
388
389
390
  		if (!hlist_bl_unhashed(&entry->e_hash_list)) {
  			hlist_bl_del_init(&entry->e_hash_list);
  			atomic_dec(&entry->e_refcnt);
  		} else
  			WARN_ON(1);
f0c8b4623   Jan Kara   mbcache2: Use ref...
391
  		list_del(&entry->e_list);
f9a61eb4e   Jan Kara   mbcache2: reimple...
392
  		WARN_ON(atomic_read(&entry->e_refcnt) != 1);
7a2508e1b   Jan Kara   mbcache2: rename ...
393
  		mb_cache_entry_put(cache, entry);
f9a61eb4e   Jan Kara   mbcache2: reimple...
394
395
396
  	}
  	kfree(cache->c_hash);
  	kfree(cache);
f9a61eb4e   Jan Kara   mbcache2: reimple...
397
  }
7a2508e1b   Jan Kara   mbcache2: rename ...
398
  EXPORT_SYMBOL(mb_cache_destroy);
f9a61eb4e   Jan Kara   mbcache2: reimple...
399

7a2508e1b   Jan Kara   mbcache2: rename ...
400
  static int __init mbcache_init(void)
f9a61eb4e   Jan Kara   mbcache2: reimple...
401
  {
7a2508e1b   Jan Kara   mbcache2: rename ...
402
403
  	mb_entry_cache = kmem_cache_create("mbcache",
  				sizeof(struct mb_cache_entry), 0,
f9a61eb4e   Jan Kara   mbcache2: reimple...
404
  				SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
21d0f4fa8   Eric Biggers   mbcache: don't BU...
405
406
  	if (!mb_entry_cache)
  		return -ENOMEM;
f9a61eb4e   Jan Kara   mbcache2: reimple...
407
408
  	return 0;
  }
7a2508e1b   Jan Kara   mbcache2: rename ...
409
  static void __exit mbcache_exit(void)
f9a61eb4e   Jan Kara   mbcache2: reimple...
410
  {
7a2508e1b   Jan Kara   mbcache2: rename ...
411
  	kmem_cache_destroy(mb_entry_cache);
f9a61eb4e   Jan Kara   mbcache2: reimple...
412
  }
7a2508e1b   Jan Kara   mbcache2: rename ...
413
414
  module_init(mbcache_init)
  module_exit(mbcache_exit)
f9a61eb4e   Jan Kara   mbcache2: reimple...
415
416
417
418
  
  MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
  MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
  MODULE_LICENSE("GPL");