Blame view

fs/mbcache.c 12 KB
f9a61eb4e   Jan Kara   mbcache2: reimple...
1
2
3
4
5
6
  #include <linux/spinlock.h>
  #include <linux/slab.h>
  #include <linux/list.h>
  #include <linux/list_bl.h>
  #include <linux/module.h>
  #include <linux/sched.h>
c2f3140fe   Jan Kara   mbcache2: limit c...
7
  #include <linux/workqueue.h>
7a2508e1b   Jan Kara   mbcache2: rename ...
8
  #include <linux/mbcache.h>
f9a61eb4e   Jan Kara   mbcache2: reimple...
9
10
11
12
  
  /*
   * Mbcache is a simple key-value store. Keys need not be unique, however
   * key-value pairs are expected to be unique (we use this fact in
7a2508e1b   Jan Kara   mbcache2: rename ...
13
   * mb_cache_entry_delete_block()).
f9a61eb4e   Jan Kara   mbcache2: reimple...
14
15
16
17
18
19
20
21
22
23
24
   *
   * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
   * They use hash of a block contents as a key and block number as a value.
   * That's why keys need not be unique (different xattr blocks may end up having
   * the same hash). However block number always uniquely identifies a cache
   * entry.
   *
   * We provide functions for creation and removal of entries, search by key,
   * and a special "delete entry with given key-value pair" operation. Fixed
   * size hash table is used for fast key lookups.
   */
7a2508e1b   Jan Kara   mbcache2: rename ...
25
  struct mb_cache {
f9a61eb4e   Jan Kara   mbcache2: reimple...
26
27
28
29
  	/* Hash table of entries */
  	struct hlist_bl_head	*c_hash;
  	/* log2 of hash table size */
  	int			c_bucket_bits;
c2f3140fe   Jan Kara   mbcache2: limit c...
30
31
  	/* Maximum entries in cache to avoid degrading hash too much */
  	int			c_max_entries;
f0c8b4623   Jan Kara   mbcache2: Use ref...
32
33
34
  	/* Protects c_list, c_entry_count */
  	spinlock_t		c_list_lock;
  	struct list_head	c_list;
f9a61eb4e   Jan Kara   mbcache2: reimple...
35
36
37
  	/* Number of entries in cache */
  	unsigned long		c_entry_count;
  	struct shrinker		c_shrink;
c2f3140fe   Jan Kara   mbcache2: limit c...
38
39
  	/* Work for shrinking when the cache has too many entries */
  	struct work_struct	c_shrink_work;
f9a61eb4e   Jan Kara   mbcache2: reimple...
40
  };
7a2508e1b   Jan Kara   mbcache2: rename ...
41
  static struct kmem_cache *mb_entry_cache;
f9a61eb4e   Jan Kara   mbcache2: reimple...
42

7a2508e1b   Jan Kara   mbcache2: rename ...
43
44
  static unsigned long mb_cache_shrink(struct mb_cache *cache,
  				     unsigned int nr_to_scan);
c2f3140fe   Jan Kara   mbcache2: limit c...
45

dc8d5e565   Andreas Gruenbacher   mbcache: get rid ...
46
47
  static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
  							u32 key)
f0c8b4623   Jan Kara   mbcache2: Use ref...
48
  {
dc8d5e565   Andreas Gruenbacher   mbcache: get rid ...
49
  	return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
f0c8b4623   Jan Kara   mbcache2: Use ref...
50
  }
c2f3140fe   Jan Kara   mbcache2: limit c...
51
52
53
54
55
  /*
   * Number of entries to reclaim synchronously when there are too many entries
   * in cache
   */
  #define SYNC_SHRINK_BATCH 64
f9a61eb4e   Jan Kara   mbcache2: reimple...
56
  /*
7a2508e1b   Jan Kara   mbcache2: rename ...
57
   * mb_cache_entry_create - create entry in cache
f9a61eb4e   Jan Kara   mbcache2: reimple...
58
59
60
61
   * @cache - cache where the entry should be created
   * @mask - gfp mask with which the entry should be allocated
   * @key - key of the entry
   * @block - block that contains data
6048c64b2   Andreas Gruenbacher   mbcache: add reus...
62
   * @reusable - is the block reusable by other inodes?
f9a61eb4e   Jan Kara   mbcache2: reimple...
63
64
65
66
67
   *
   * Creates entry in @cache with key @key and records that data is stored in
   * block @block. The function returns -EBUSY if entry with the same key
   * and for the same block already exists in cache. Otherwise 0 is returned.
   */
7a2508e1b   Jan Kara   mbcache2: rename ...
68
  int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
6048c64b2   Andreas Gruenbacher   mbcache: add reus...
69
  			  sector_t block, bool reusable)
f9a61eb4e   Jan Kara   mbcache2: reimple...
70
  {
7a2508e1b   Jan Kara   mbcache2: rename ...
71
  	struct mb_cache_entry *entry, *dup;
f9a61eb4e   Jan Kara   mbcache2: reimple...
72
73
  	struct hlist_bl_node *dup_node;
  	struct hlist_bl_head *head;
c2f3140fe   Jan Kara   mbcache2: limit c...
74
75
76
77
78
  	/* Schedule background reclaim if there are too many entries */
  	if (cache->c_entry_count >= cache->c_max_entries)
  		schedule_work(&cache->c_shrink_work);
  	/* Do some sync reclaim if background reclaim cannot keep up */
  	if (cache->c_entry_count >= 2*cache->c_max_entries)
7a2508e1b   Jan Kara   mbcache2: rename ...
79
  		mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
c2f3140fe   Jan Kara   mbcache2: limit c...
80

7a2508e1b   Jan Kara   mbcache2: rename ...
81
  	entry = kmem_cache_alloc(mb_entry_cache, mask);
f9a61eb4e   Jan Kara   mbcache2: reimple...
82
83
  	if (!entry)
  		return -ENOMEM;
f0c8b4623   Jan Kara   mbcache2: Use ref...
84
  	INIT_LIST_HEAD(&entry->e_list);
f9a61eb4e   Jan Kara   mbcache2: reimple...
85
86
87
88
  	/* One ref for hash, one ref returned */
  	atomic_set(&entry->e_refcnt, 1);
  	entry->e_key = key;
  	entry->e_block = block;
6048c64b2   Andreas Gruenbacher   mbcache: add reus...
89
  	entry->e_reusable = reusable;
dc8d5e565   Andreas Gruenbacher   mbcache: get rid ...
90
  	head = mb_cache_entry_head(cache, key);
f9a61eb4e   Jan Kara   mbcache2: reimple...
91
92
93
94
  	hlist_bl_lock(head);
  	hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
  		if (dup->e_key == key && dup->e_block == block) {
  			hlist_bl_unlock(head);
7a2508e1b   Jan Kara   mbcache2: rename ...
95
  			kmem_cache_free(mb_entry_cache, entry);
f9a61eb4e   Jan Kara   mbcache2: reimple...
96
97
98
99
100
  			return -EBUSY;
  		}
  	}
  	hlist_bl_add_head(&entry->e_hash_list, head);
  	hlist_bl_unlock(head);
f0c8b4623   Jan Kara   mbcache2: Use ref...
101
102
  	spin_lock(&cache->c_list_lock);
  	list_add_tail(&entry->e_list, &cache->c_list);
f9a61eb4e   Jan Kara   mbcache2: reimple...
103
104
105
  	/* Grab ref for LRU list */
  	atomic_inc(&entry->e_refcnt);
  	cache->c_entry_count++;
f0c8b4623   Jan Kara   mbcache2: Use ref...
106
  	spin_unlock(&cache->c_list_lock);
f9a61eb4e   Jan Kara   mbcache2: reimple...
107
108
109
  
  	return 0;
  }
7a2508e1b   Jan Kara   mbcache2: rename ...
110
  EXPORT_SYMBOL(mb_cache_entry_create);
f9a61eb4e   Jan Kara   mbcache2: reimple...
111

7a2508e1b   Jan Kara   mbcache2: rename ...
112
  void __mb_cache_entry_free(struct mb_cache_entry *entry)
f9a61eb4e   Jan Kara   mbcache2: reimple...
113
  {
7a2508e1b   Jan Kara   mbcache2: rename ...
114
  	kmem_cache_free(mb_entry_cache, entry);
f9a61eb4e   Jan Kara   mbcache2: reimple...
115
  }
7a2508e1b   Jan Kara   mbcache2: rename ...
116
  EXPORT_SYMBOL(__mb_cache_entry_free);
f9a61eb4e   Jan Kara   mbcache2: reimple...
117

7a2508e1b   Jan Kara   mbcache2: rename ...
118
119
120
  static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
  					   struct mb_cache_entry *entry,
  					   u32 key)
f9a61eb4e   Jan Kara   mbcache2: reimple...
121
  {
7a2508e1b   Jan Kara   mbcache2: rename ...
122
  	struct mb_cache_entry *old_entry = entry;
f9a61eb4e   Jan Kara   mbcache2: reimple...
123
124
  	struct hlist_bl_node *node;
  	struct hlist_bl_head *head;
dc8d5e565   Andreas Gruenbacher   mbcache: get rid ...
125
  	head = mb_cache_entry_head(cache, key);
f9a61eb4e   Jan Kara   mbcache2: reimple...
126
127
128
129
130
131
  	hlist_bl_lock(head);
  	if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
  		node = entry->e_hash_list.next;
  	else
  		node = hlist_bl_first(head);
  	while (node) {
7a2508e1b   Jan Kara   mbcache2: rename ...
132
  		entry = hlist_bl_entry(node, struct mb_cache_entry,
f9a61eb4e   Jan Kara   mbcache2: reimple...
133
  				       e_hash_list);
6048c64b2   Andreas Gruenbacher   mbcache: add reus...
134
  		if (entry->e_key == key && entry->e_reusable) {
f9a61eb4e   Jan Kara   mbcache2: reimple...
135
136
137
138
139
140
141
142
143
  			atomic_inc(&entry->e_refcnt);
  			goto out;
  		}
  		node = node->next;
  	}
  	entry = NULL;
  out:
  	hlist_bl_unlock(head);
  	if (old_entry)
7a2508e1b   Jan Kara   mbcache2: rename ...
144
  		mb_cache_entry_put(cache, old_entry);
f9a61eb4e   Jan Kara   mbcache2: reimple...
145
146
147
148
149
  
  	return entry;
  }
  
  /*
7a2508e1b   Jan Kara   mbcache2: rename ...
150
   * mb_cache_entry_find_first - find the first entry in cache with given key
f9a61eb4e   Jan Kara   mbcache2: reimple...
151
152
153
154
155
156
   * @cache: cache where we should search
   * @key: key to look for
   *
   * Search in @cache for entry with key @key. Grabs reference to the first
   * entry found and returns the entry.
   */
7a2508e1b   Jan Kara   mbcache2: rename ...
157
158
  struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
  						 u32 key)
f9a61eb4e   Jan Kara   mbcache2: reimple...
159
160
161
  {
  	return __entry_find(cache, NULL, key);
  }
7a2508e1b   Jan Kara   mbcache2: rename ...
162
  EXPORT_SYMBOL(mb_cache_entry_find_first);
f9a61eb4e   Jan Kara   mbcache2: reimple...
163
164
  
  /*
7a2508e1b   Jan Kara   mbcache2: rename ...
165
   * mb_cache_entry_find_next - find next entry in cache with the same
f9a61eb4e   Jan Kara   mbcache2: reimple...
166
167
168
169
170
171
172
173
   * @cache: cache where we should search
   * @entry: entry to start search from
   *
   * Finds next entry in the hash chain which has the same key as @entry.
   * If @entry is unhashed (which can happen when deletion of entry races
   * with the search), finds the first entry in the hash chain. The function
   * drops reference to @entry and returns with a reference to the found entry.
   */
7a2508e1b   Jan Kara   mbcache2: rename ...
174
175
  struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
  						struct mb_cache_entry *entry)
f9a61eb4e   Jan Kara   mbcache2: reimple...
176
177
178
  {
  	return __entry_find(cache, entry, entry->e_key);
  }
7a2508e1b   Jan Kara   mbcache2: rename ...
179
  EXPORT_SYMBOL(mb_cache_entry_find_next);
f9a61eb4e   Jan Kara   mbcache2: reimple...
180

6048c64b2   Andreas Gruenbacher   mbcache: add reus...
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
  /*
   * mb_cache_entry_get - get a cache entry by block number (and key)
   * @cache - cache we work with
   * @key - key of block number @block
   * @block - block number
   */
  struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
  					  sector_t block)
  {
  	struct hlist_bl_node *node;
  	struct hlist_bl_head *head;
  	struct mb_cache_entry *entry;
  
  	head = mb_cache_entry_head(cache, key);
  	hlist_bl_lock(head);
  	hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
  		if (entry->e_key == key && entry->e_block == block) {
  			atomic_inc(&entry->e_refcnt);
  			goto out;
  		}
  	}
  	entry = NULL;
  out:
  	hlist_bl_unlock(head);
  	return entry;
  }
  EXPORT_SYMBOL(mb_cache_entry_get);
7a2508e1b   Jan Kara   mbcache2: rename ...
208
  /* mb_cache_entry_delete_block - remove information about block from cache
f9a61eb4e   Jan Kara   mbcache2: reimple...
209
   * @cache - cache we work with
6048c64b2   Andreas Gruenbacher   mbcache: add reus...
210
211
   * @key - key of block @block
   * @block - block number
f9a61eb4e   Jan Kara   mbcache2: reimple...
212
213
214
   *
   * Remove entry from cache @cache with key @key with data stored in @block.
   */
7a2508e1b   Jan Kara   mbcache2: rename ...
215
216
  void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key,
  				 sector_t block)
f9a61eb4e   Jan Kara   mbcache2: reimple...
217
218
219
  {
  	struct hlist_bl_node *node;
  	struct hlist_bl_head *head;
7a2508e1b   Jan Kara   mbcache2: rename ...
220
  	struct mb_cache_entry *entry;
f9a61eb4e   Jan Kara   mbcache2: reimple...
221

dc8d5e565   Andreas Gruenbacher   mbcache: get rid ...
222
  	head = mb_cache_entry_head(cache, key);
f9a61eb4e   Jan Kara   mbcache2: reimple...
223
224
225
226
227
228
  	hlist_bl_lock(head);
  	hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
  		if (entry->e_key == key && entry->e_block == block) {
  			/* We keep hash list reference to keep entry alive */
  			hlist_bl_del_init(&entry->e_hash_list);
  			hlist_bl_unlock(head);
f0c8b4623   Jan Kara   mbcache2: Use ref...
229
230
231
  			spin_lock(&cache->c_list_lock);
  			if (!list_empty(&entry->e_list)) {
  				list_del_init(&entry->e_list);
f9a61eb4e   Jan Kara   mbcache2: reimple...
232
233
234
  				cache->c_entry_count--;
  				atomic_dec(&entry->e_refcnt);
  			}
f0c8b4623   Jan Kara   mbcache2: Use ref...
235
  			spin_unlock(&cache->c_list_lock);
7a2508e1b   Jan Kara   mbcache2: rename ...
236
  			mb_cache_entry_put(cache, entry);
f9a61eb4e   Jan Kara   mbcache2: reimple...
237
238
239
240
241
  			return;
  		}
  	}
  	hlist_bl_unlock(head);
  }
7a2508e1b   Jan Kara   mbcache2: rename ...
242
  EXPORT_SYMBOL(mb_cache_entry_delete_block);
f9a61eb4e   Jan Kara   mbcache2: reimple...
243

7a2508e1b   Jan Kara   mbcache2: rename ...
244
  /* mb_cache_entry_touch - cache entry got used
f9a61eb4e   Jan Kara   mbcache2: reimple...
245
246
247
   * @cache - cache the entry belongs to
   * @entry - entry that got used
   *
f0c8b4623   Jan Kara   mbcache2: Use ref...
248
   * Marks entry as used to give hit higher chances of surviving in cache.
f9a61eb4e   Jan Kara   mbcache2: reimple...
249
   */
7a2508e1b   Jan Kara   mbcache2: rename ...
250
251
  void mb_cache_entry_touch(struct mb_cache *cache,
  			  struct mb_cache_entry *entry)
f9a61eb4e   Jan Kara   mbcache2: reimple...
252
  {
dc8d5e565   Andreas Gruenbacher   mbcache: get rid ...
253
  	entry->e_referenced = 1;
f9a61eb4e   Jan Kara   mbcache2: reimple...
254
  }
7a2508e1b   Jan Kara   mbcache2: rename ...
255
  EXPORT_SYMBOL(mb_cache_entry_touch);
f9a61eb4e   Jan Kara   mbcache2: reimple...
256

7a2508e1b   Jan Kara   mbcache2: rename ...
257
258
  static unsigned long mb_cache_count(struct shrinker *shrink,
  				    struct shrink_control *sc)
f9a61eb4e   Jan Kara   mbcache2: reimple...
259
  {
7a2508e1b   Jan Kara   mbcache2: rename ...
260
261
  	struct mb_cache *cache = container_of(shrink, struct mb_cache,
  					      c_shrink);
f9a61eb4e   Jan Kara   mbcache2: reimple...
262
263
264
265
266
  
  	return cache->c_entry_count;
  }
  
  /* Shrink number of entries in cache */
7a2508e1b   Jan Kara   mbcache2: rename ...
267
268
  static unsigned long mb_cache_shrink(struct mb_cache *cache,
  				     unsigned int nr_to_scan)
f9a61eb4e   Jan Kara   mbcache2: reimple...
269
  {
7a2508e1b   Jan Kara   mbcache2: rename ...
270
  	struct mb_cache_entry *entry;
f9a61eb4e   Jan Kara   mbcache2: reimple...
271
272
  	struct hlist_bl_head *head;
  	unsigned int shrunk = 0;
f0c8b4623   Jan Kara   mbcache2: Use ref...
273
274
275
  	spin_lock(&cache->c_list_lock);
  	while (nr_to_scan-- && !list_empty(&cache->c_list)) {
  		entry = list_first_entry(&cache->c_list,
7a2508e1b   Jan Kara   mbcache2: rename ...
276
  					 struct mb_cache_entry, e_list);
dc8d5e565   Andreas Gruenbacher   mbcache: get rid ...
277
278
  		if (entry->e_referenced) {
  			entry->e_referenced = 0;
f0c8b4623   Jan Kara   mbcache2: Use ref...
279
280
281
282
  			list_move_tail(&cache->c_list, &entry->e_list);
  			continue;
  		}
  		list_del_init(&entry->e_list);
f9a61eb4e   Jan Kara   mbcache2: reimple...
283
284
285
286
287
  		cache->c_entry_count--;
  		/*
  		 * We keep LRU list reference so that entry doesn't go away
  		 * from under us.
  		 */
f0c8b4623   Jan Kara   mbcache2: Use ref...
288
  		spin_unlock(&cache->c_list_lock);
dc8d5e565   Andreas Gruenbacher   mbcache: get rid ...
289
  		head = mb_cache_entry_head(cache, entry->e_key);
f9a61eb4e   Jan Kara   mbcache2: reimple...
290
291
292
293
294
295
  		hlist_bl_lock(head);
  		if (!hlist_bl_unhashed(&entry->e_hash_list)) {
  			hlist_bl_del_init(&entry->e_hash_list);
  			atomic_dec(&entry->e_refcnt);
  		}
  		hlist_bl_unlock(head);
7a2508e1b   Jan Kara   mbcache2: rename ...
296
  		if (mb_cache_entry_put(cache, entry))
f9a61eb4e   Jan Kara   mbcache2: reimple...
297
298
  			shrunk++;
  		cond_resched();
f0c8b4623   Jan Kara   mbcache2: Use ref...
299
  		spin_lock(&cache->c_list_lock);
f9a61eb4e   Jan Kara   mbcache2: reimple...
300
  	}
f0c8b4623   Jan Kara   mbcache2: Use ref...
301
  	spin_unlock(&cache->c_list_lock);
f9a61eb4e   Jan Kara   mbcache2: reimple...
302
303
304
  
  	return shrunk;
  }
7a2508e1b   Jan Kara   mbcache2: rename ...
305
306
  static unsigned long mb_cache_scan(struct shrinker *shrink,
  				   struct shrink_control *sc)
c2f3140fe   Jan Kara   mbcache2: limit c...
307
308
  {
  	int nr_to_scan = sc->nr_to_scan;
7a2508e1b   Jan Kara   mbcache2: rename ...
309
  	struct mb_cache *cache = container_of(shrink, struct mb_cache,
c2f3140fe   Jan Kara   mbcache2: limit c...
310
  					      c_shrink);
7a2508e1b   Jan Kara   mbcache2: rename ...
311
  	return mb_cache_shrink(cache, nr_to_scan);
c2f3140fe   Jan Kara   mbcache2: limit c...
312
313
314
315
  }
  
  /* We shrink 1/X of the cache when we have too many entries in it */
  #define SHRINK_DIVISOR 16
7a2508e1b   Jan Kara   mbcache2: rename ...
316
  static void mb_cache_shrink_worker(struct work_struct *work)
c2f3140fe   Jan Kara   mbcache2: limit c...
317
  {
7a2508e1b   Jan Kara   mbcache2: rename ...
318
319
320
  	struct mb_cache *cache = container_of(work, struct mb_cache,
  					      c_shrink_work);
  	mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
c2f3140fe   Jan Kara   mbcache2: limit c...
321
  }
f9a61eb4e   Jan Kara   mbcache2: reimple...
322
  /*
7a2508e1b   Jan Kara   mbcache2: rename ...
323
   * mb_cache_create - create cache
f9a61eb4e   Jan Kara   mbcache2: reimple...
324
325
326
327
   * @bucket_bits: log2 of the hash table size
   *
   * Create cache for keys with 2^bucket_bits hash entries.
   */
7a2508e1b   Jan Kara   mbcache2: rename ...
328
  struct mb_cache *mb_cache_create(int bucket_bits)
f9a61eb4e   Jan Kara   mbcache2: reimple...
329
  {
7a2508e1b   Jan Kara   mbcache2: rename ...
330
  	struct mb_cache *cache;
f9a61eb4e   Jan Kara   mbcache2: reimple...
331
332
333
334
335
  	int bucket_count = 1 << bucket_bits;
  	int i;
  
  	if (!try_module_get(THIS_MODULE))
  		return NULL;
7a2508e1b   Jan Kara   mbcache2: rename ...
336
  	cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
f9a61eb4e   Jan Kara   mbcache2: reimple...
337
338
339
  	if (!cache)
  		goto err_out;
  	cache->c_bucket_bits = bucket_bits;
c2f3140fe   Jan Kara   mbcache2: limit c...
340
  	cache->c_max_entries = bucket_count << 4;
f0c8b4623   Jan Kara   mbcache2: Use ref...
341
342
  	INIT_LIST_HEAD(&cache->c_list);
  	spin_lock_init(&cache->c_list_lock);
f9a61eb4e   Jan Kara   mbcache2: reimple...
343
344
345
346
347
348
349
350
  	cache->c_hash = kmalloc(bucket_count * sizeof(struct hlist_bl_head),
  				GFP_KERNEL);
  	if (!cache->c_hash) {
  		kfree(cache);
  		goto err_out;
  	}
  	for (i = 0; i < bucket_count; i++)
  		INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
7a2508e1b   Jan Kara   mbcache2: rename ...
351
352
  	cache->c_shrink.count_objects = mb_cache_count;
  	cache->c_shrink.scan_objects = mb_cache_scan;
f9a61eb4e   Jan Kara   mbcache2: reimple...
353
  	cache->c_shrink.seeks = DEFAULT_SEEKS;
8913f343c   Chao Yu   mbcache: fix to d...
354
355
356
357
358
  	if (register_shrinker(&cache->c_shrink)) {
  		kfree(cache->c_hash);
  		kfree(cache);
  		goto err_out;
  	}
f9a61eb4e   Jan Kara   mbcache2: reimple...
359

7a2508e1b   Jan Kara   mbcache2: rename ...
360
  	INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
c2f3140fe   Jan Kara   mbcache2: limit c...
361

f9a61eb4e   Jan Kara   mbcache2: reimple...
362
363
364
365
366
367
  	return cache;
  
  err_out:
  	module_put(THIS_MODULE);
  	return NULL;
  }
7a2508e1b   Jan Kara   mbcache2: rename ...
368
  EXPORT_SYMBOL(mb_cache_create);
f9a61eb4e   Jan Kara   mbcache2: reimple...
369
370
  
  /*
7a2508e1b   Jan Kara   mbcache2: rename ...
371
   * mb_cache_destroy - destroy cache
f9a61eb4e   Jan Kara   mbcache2: reimple...
372
373
374
375
376
   * @cache: the cache to destroy
   *
   * Free all entries in cache and cache itself. Caller must make sure nobody
   * (except shrinker) can reach @cache when calling this.
   */
7a2508e1b   Jan Kara   mbcache2: rename ...
377
  void mb_cache_destroy(struct mb_cache *cache)
f9a61eb4e   Jan Kara   mbcache2: reimple...
378
  {
7a2508e1b   Jan Kara   mbcache2: rename ...
379
  	struct mb_cache_entry *entry, *next;
f9a61eb4e   Jan Kara   mbcache2: reimple...
380
381
382
383
384
385
386
  
  	unregister_shrinker(&cache->c_shrink);
  
  	/*
  	 * We don't bother with any locking. Cache must not be used at this
  	 * point.
  	 */
f0c8b4623   Jan Kara   mbcache2: Use ref...
387
  	list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
f9a61eb4e   Jan Kara   mbcache2: reimple...
388
389
390
391
392
  		if (!hlist_bl_unhashed(&entry->e_hash_list)) {
  			hlist_bl_del_init(&entry->e_hash_list);
  			atomic_dec(&entry->e_refcnt);
  		} else
  			WARN_ON(1);
f0c8b4623   Jan Kara   mbcache2: Use ref...
393
  		list_del(&entry->e_list);
f9a61eb4e   Jan Kara   mbcache2: reimple...
394
  		WARN_ON(atomic_read(&entry->e_refcnt) != 1);
7a2508e1b   Jan Kara   mbcache2: rename ...
395
  		mb_cache_entry_put(cache, entry);
f9a61eb4e   Jan Kara   mbcache2: reimple...
396
397
398
399
400
  	}
  	kfree(cache->c_hash);
  	kfree(cache);
  	module_put(THIS_MODULE);
  }
7a2508e1b   Jan Kara   mbcache2: rename ...
401
  EXPORT_SYMBOL(mb_cache_destroy);
f9a61eb4e   Jan Kara   mbcache2: reimple...
402

7a2508e1b   Jan Kara   mbcache2: rename ...
403
  static int __init mbcache_init(void)
f9a61eb4e   Jan Kara   mbcache2: reimple...
404
  {
7a2508e1b   Jan Kara   mbcache2: rename ...
405
406
  	mb_entry_cache = kmem_cache_create("mbcache",
  				sizeof(struct mb_cache_entry), 0,
f9a61eb4e   Jan Kara   mbcache2: reimple...
407
  				SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
7a2508e1b   Jan Kara   mbcache2: rename ...
408
  	BUG_ON(!mb_entry_cache);
f9a61eb4e   Jan Kara   mbcache2: reimple...
409
410
  	return 0;
  }
7a2508e1b   Jan Kara   mbcache2: rename ...
411
  static void __exit mbcache_exit(void)
f9a61eb4e   Jan Kara   mbcache2: reimple...
412
  {
7a2508e1b   Jan Kara   mbcache2: rename ...
413
  	kmem_cache_destroy(mb_entry_cache);
f9a61eb4e   Jan Kara   mbcache2: reimple...
414
  }
7a2508e1b   Jan Kara   mbcache2: rename ...
415
416
  module_init(mbcache_init)
  module_exit(mbcache_exit)
f9a61eb4e   Jan Kara   mbcache2: reimple...
417
418
419
420
  
  MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
  MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
  MODULE_LICENSE("GPL");