Blame view

mm/cleancache.c 12.7 KB
077b1f83a   Dan Magenheimer   mm: cleancache co...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
  /*
   * Cleancache frontend
   *
   * This code provides the generic "frontend" layer to call a matching
   * "backend" driver implementation of cleancache.  See
   * Documentation/vm/cleancache.txt for more information.
   *
   * Copyright (C) 2009-2010 Oracle Corp. All rights reserved.
   * Author: Dan Magenheimer
   *
   * This work is licensed under the terms of the GNU GPL, version 2.
   */
  
  #include <linux/module.h>
  #include <linux/fs.h>
  #include <linux/exportfs.h>
  #include <linux/mm.h>
417fc2cae   Dan Magenheimer   mm: cleancache: r...
18
  #include <linux/debugfs.h>
077b1f83a   Dan Magenheimer   mm: cleancache co...
19
20
21
  #include <linux/cleancache.h>
  
  /*
077b1f83a   Dan Magenheimer   mm: cleancache co...
22
23
24
   * cleancache_ops is set by cleancache_ops_register to contain the pointers
   * to the cleancache "backend" implementation functions.
   */
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
25
  static struct cleancache_ops *cleancache_ops __read_mostly;
077b1f83a   Dan Magenheimer   mm: cleancache co...
26

417fc2cae   Dan Magenheimer   mm: cleancache: r...
27
28
29
30
31
32
33
34
35
  /*
   * Counters available via /sys/kernel/debug/frontswap (if debugfs is
   * properly configured.  These are for information only so are not protected
   * against increment races.
   */
  static u64 cleancache_succ_gets;
  static u64 cleancache_failed_gets;
  static u64 cleancache_puts;
  static u64 cleancache_invalidates;
077b1f83a   Dan Magenheimer   mm: cleancache co...
36
37
  
  /*
49a9ab815   Dan Magenheimer   mm: cleancache: l...
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
   * When no backend is registered all calls to init_fs and init_shared_fs
   * are registered and fake poolids (FAKE_FS_POOLID_OFFSET or
   * FAKE_SHARED_FS_POOLID_OFFSET, plus offset in the respective array
   * [shared_|]fs_poolid_map) are given to the respective super block
   * (sb->cleancache_poolid) and no tmem_pools are created. When a backend
   * registers with cleancache the previous calls to init_fs and init_shared_fs
   * are executed to create tmem_pools and set the respective poolids. While no
   * backend is registered all "puts", "gets" and "flushes" are ignored or failed.
   */
  #define MAX_INITIALIZABLE_FS 32
  #define FAKE_FS_POOLID_OFFSET 1000
  #define FAKE_SHARED_FS_POOLID_OFFSET 2000
  
  #define FS_NO_BACKEND (-1)
  #define FS_UNKNOWN (-2)
  static int fs_poolid_map[MAX_INITIALIZABLE_FS];
  static int shared_fs_poolid_map[MAX_INITIALIZABLE_FS];
  static char *uuids[MAX_INITIALIZABLE_FS];
  /*
   * Mutex for the [shared_|]fs_poolid_map to guard against multiple threads
   * invoking umount (and ending in __cleancache_invalidate_fs) and also multiple
   * threads calling mount (and ending up in __cleancache_init_[shared|]fs).
   */
  static DEFINE_MUTEX(poolid_mutex);
  /*
   * When set to false (default) all calls to the cleancache functions, except
   * the __cleancache_invalidate_fs and __cleancache_init_[shared|]fs are guarded
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
65
66
   * by the if (!cleancache_ops) return. This means multiple threads (from
   * different filesystems) will be checking cleancache_ops. The usage of a
49a9ab815   Dan Magenheimer   mm: cleancache: l...
67
68
   * bool instead of a atomic_t or a bool guarded by a spinlock is OK - we are
   * OK if the time between the backend's have been initialized (and
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
69
   * cleancache_ops has been set to not NULL) and when the filesystems start
49a9ab815   Dan Magenheimer   mm: cleancache: l...
70
71
72
   * actually calling the backends. The inverse (when unloading) is obviously
   * not good - but this shim does not do that (yet).
   */
49a9ab815   Dan Magenheimer   mm: cleancache: l...
73
74
75
76
77
78
79
80
81
  
  /*
   * The backends and filesystems work all asynchronously. This is b/c the
   * backends can be built as modules.
   * The usual sequence of events is:
   *	a) mount /	-> __cleancache_init_fs is called. We set the
   *		[shared_|]fs_poolid_map and uuids for.
   *
   *	b). user does I/Os -> we call the rest of __cleancache_* functions
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
82
   *		which return immediately as cleancache_ops is false.
49a9ab815   Dan Magenheimer   mm: cleancache: l...
83
84
   *
   *	c). modprobe zcache -> cleancache_register_ops. We init the backend
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
85
   *		and set cleancache_ops to true, and for any fs_poolid_map
49a9ab815   Dan Magenheimer   mm: cleancache: l...
86
87
   *		(which is set by __cleancache_init_fs) we initialize the poolid.
   *
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
88
   *	d). user does I/Os -> now that cleancache_ops is true all the
49a9ab815   Dan Magenheimer   mm: cleancache: l...
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
   *		__cleancache_* functions can call the backend. They all check
   *		that fs_poolid_map is valid and if so invoke the backend.
   *
   *	e). umount /	-> __cleancache_invalidate_fs, the fs_poolid_map is
   *		reset (which is the second check in the __cleancache_* ops
   *		to call the backend).
   *
   * The sequence of event could also be c), followed by a), and d). and e). The
   * c) would not happen anymore. There is also the chance of c), and one thread
   * doing a) + d), and another doing e). For that case we depend on the
   * filesystem calling __cleancache_invalidate_fs in the proper sequence (so
   * that it handles all I/Os before it invalidates the fs (which is last part
   * of unmounting process).
   *
   * Note: The acute reader will notice that there is no "rmmod zcache" case.
   * This is b/c the functionality for that is not yet implemented and when
   * done, will require some extra locking not yet devised.
   */
  
  /*
   * Register operations for cleancache, returning previous thus allowing
   * detection of multiple backends and possible nesting.
077b1f83a   Dan Magenheimer   mm: cleancache co...
111
   */
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
112
  struct cleancache_ops *cleancache_register_ops(struct cleancache_ops *ops)
077b1f83a   Dan Magenheimer   mm: cleancache co...
113
  {
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
114
  	struct cleancache_ops *old = cleancache_ops;
49a9ab815   Dan Magenheimer   mm: cleancache: l...
115
  	int i;
077b1f83a   Dan Magenheimer   mm: cleancache co...
116

49a9ab815   Dan Magenheimer   mm: cleancache: l...
117
  	mutex_lock(&poolid_mutex);
49a9ab815   Dan Magenheimer   mm: cleancache: l...
118
119
  	for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
  		if (fs_poolid_map[i] == FS_NO_BACKEND)
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
120
  			fs_poolid_map[i] = ops->init_fs(PAGE_SIZE);
49a9ab815   Dan Magenheimer   mm: cleancache: l...
121
  		if (shared_fs_poolid_map[i] == FS_NO_BACKEND)
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
122
  			shared_fs_poolid_map[i] = ops->init_shared_fs
49a9ab815   Dan Magenheimer   mm: cleancache: l...
123
124
  					(uuids[i], PAGE_SIZE);
  	}
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
125
126
127
128
129
130
131
  	/*
  	 * We MUST set cleancache_ops _after_ we have called the backends
  	 * init_fs or init_shared_fs functions. Otherwise the compiler might
  	 * re-order where cleancache_ops is set in this function.
  	 */
  	barrier();
  	cleancache_ops = ops;
49a9ab815   Dan Magenheimer   mm: cleancache: l...
132
  	mutex_unlock(&poolid_mutex);
077b1f83a   Dan Magenheimer   mm: cleancache co...
133
134
135
136
137
138
139
  	return old;
  }
  EXPORT_SYMBOL(cleancache_register_ops);
  
  /* Called by a cleancache-enabled filesystem at time of mount */
  void __cleancache_init_fs(struct super_block *sb)
  {
49a9ab815   Dan Magenheimer   mm: cleancache: l...
140
141
142
143
144
145
  	int i;
  
  	mutex_lock(&poolid_mutex);
  	for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
  		if (fs_poolid_map[i] == FS_UNKNOWN) {
  			sb->cleancache_poolid = i + FAKE_FS_POOLID_OFFSET;
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
146
147
  			if (cleancache_ops)
  				fs_poolid_map[i] = cleancache_ops->init_fs(PAGE_SIZE);
49a9ab815   Dan Magenheimer   mm: cleancache: l...
148
149
150
151
152
153
  			else
  				fs_poolid_map[i] = FS_NO_BACKEND;
  			break;
  		}
  	}
  	mutex_unlock(&poolid_mutex);
077b1f83a   Dan Magenheimer   mm: cleancache co...
154
155
156
157
158
159
  }
  EXPORT_SYMBOL(__cleancache_init_fs);
  
  /* Called by a cleancache-enabled clustered filesystem at time of mount */
  void __cleancache_init_shared_fs(char *uuid, struct super_block *sb)
  {
49a9ab815   Dan Magenheimer   mm: cleancache: l...
160
161
162
163
164
165
166
  	int i;
  
  	mutex_lock(&poolid_mutex);
  	for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
  		if (shared_fs_poolid_map[i] == FS_UNKNOWN) {
  			sb->cleancache_poolid = i + FAKE_SHARED_FS_POOLID_OFFSET;
  			uuids[i] = uuid;
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
167
168
  			if (cleancache_ops)
  				shared_fs_poolid_map[i] = cleancache_ops->init_shared_fs
49a9ab815   Dan Magenheimer   mm: cleancache: l...
169
170
171
172
173
174
175
  						(uuid, PAGE_SIZE);
  			else
  				shared_fs_poolid_map[i] = FS_NO_BACKEND;
  			break;
  		}
  	}
  	mutex_unlock(&poolid_mutex);
077b1f83a   Dan Magenheimer   mm: cleancache co...
176
177
178
179
180
181
182
183
184
185
  }
  EXPORT_SYMBOL(__cleancache_init_shared_fs);
  
  /*
   * If the filesystem uses exportable filehandles, use the filehandle as
   * the key, else use the inode number.
   */
  static int cleancache_get_key(struct inode *inode,
  			      struct cleancache_filekey *key)
  {
b0b0382bb   Al Viro   ->encode_fh() API...
186
  	int (*fhfn)(struct inode *, __u32 *fh, int *, struct inode *);
077b1f83a   Dan Magenheimer   mm: cleancache co...
187
188
189
190
191
192
193
  	int len = 0, maxlen = CLEANCACHE_KEY_MAX;
  	struct super_block *sb = inode->i_sb;
  
  	key->u.ino = inode->i_ino;
  	if (sb->s_export_op != NULL) {
  		fhfn = sb->s_export_op->encode_fh;
  		if  (fhfn) {
b0b0382bb   Al Viro   ->encode_fh() API...
194
  			len = (*fhfn)(inode, &key->u.fh[0], &maxlen, NULL);
94e07a759   Namjae Jeon   fs: encode_fh: re...
195
  			if (len <= FILEID_ROOT || len == FILEID_INVALID)
077b1f83a   Dan Magenheimer   mm: cleancache co...
196
197
198
199
200
201
202
203
204
  				return -1;
  			if (maxlen > CLEANCACHE_KEY_MAX)
  				return -1;
  		}
  	}
  	return 0;
  }
  
  /*
49a9ab815   Dan Magenheimer   mm: cleancache: l...
205
206
207
208
209
210
211
212
213
214
215
216
217
   * Returns a pool_id that is associated with a given fake poolid.
   */
  static int get_poolid_from_fake(int fake_pool_id)
  {
  	if (fake_pool_id >= FAKE_SHARED_FS_POOLID_OFFSET)
  		return shared_fs_poolid_map[fake_pool_id -
  			FAKE_SHARED_FS_POOLID_OFFSET];
  	else if (fake_pool_id >= FAKE_FS_POOLID_OFFSET)
  		return fs_poolid_map[fake_pool_id - FAKE_FS_POOLID_OFFSET];
  	return FS_NO_BACKEND;
  }
  
  /*
077b1f83a   Dan Magenheimer   mm: cleancache co...
218
219
220
221
222
   * "Get" data from cleancache associated with the poolid/inode/index
   * that were specified when the data was put to cleanache and, if
   * successful, use it to fill the specified page with data and return 0.
   * The pageframe is unchanged and returns -1 if the get fails.
   * Page must be locked by caller.
49a9ab815   Dan Magenheimer   mm: cleancache: l...
223
224
225
226
   *
   * The function has two checks before any action is taken - whether
   * a backend is registered and whether the sb->cleancache_poolid
   * is correct.
077b1f83a   Dan Magenheimer   mm: cleancache co...
227
228
229
230
231
   */
  int __cleancache_get_page(struct page *page)
  {
  	int ret = -1;
  	int pool_id;
49a9ab815   Dan Magenheimer   mm: cleancache: l...
232
  	int fake_pool_id;
077b1f83a   Dan Magenheimer   mm: cleancache co...
233
  	struct cleancache_filekey key = { .u.key = { 0 } };
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
234
  	if (!cleancache_ops) {
49a9ab815   Dan Magenheimer   mm: cleancache: l...
235
236
237
  		cleancache_failed_gets++;
  		goto out;
  	}
309381fea   Sasha Levin   mm: dump page whe...
238
  	VM_BUG_ON_PAGE(!PageLocked(page), page);
49a9ab815   Dan Magenheimer   mm: cleancache: l...
239
240
  	fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
  	if (fake_pool_id < 0)
077b1f83a   Dan Magenheimer   mm: cleancache co...
241
  		goto out;
49a9ab815   Dan Magenheimer   mm: cleancache: l...
242
  	pool_id = get_poolid_from_fake(fake_pool_id);
077b1f83a   Dan Magenheimer   mm: cleancache co...
243
244
245
  
  	if (cleancache_get_key(page->mapping->host, &key) < 0)
  		goto out;
49a9ab815   Dan Magenheimer   mm: cleancache: l...
246
  	if (pool_id >= 0)
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
247
  		ret = cleancache_ops->get_page(pool_id,
49a9ab815   Dan Magenheimer   mm: cleancache: l...
248
  				key, page->index, page);
077b1f83a   Dan Magenheimer   mm: cleancache co...
249
250
251
252
253
254
255
256
257
258
259
260
261
262
  	if (ret == 0)
  		cleancache_succ_gets++;
  	else
  		cleancache_failed_gets++;
  out:
  	return ret;
  }
  EXPORT_SYMBOL(__cleancache_get_page);
  
  /*
   * "Put" data from a page to cleancache and associate it with the
   * (previously-obtained per-filesystem) poolid and the page's,
   * inode and page index.  Page must be locked.  Note that a put_page
   * always "succeeds", though a subsequent get_page may succeed or fail.
49a9ab815   Dan Magenheimer   mm: cleancache: l...
263
264
265
266
   *
   * The function has two checks before any action is taken - whether
   * a backend is registered and whether the sb->cleancache_poolid
   * is correct.
077b1f83a   Dan Magenheimer   mm: cleancache co...
267
268
269
270
   */
  void __cleancache_put_page(struct page *page)
  {
  	int pool_id;
49a9ab815   Dan Magenheimer   mm: cleancache: l...
271
  	int fake_pool_id;
077b1f83a   Dan Magenheimer   mm: cleancache co...
272
  	struct cleancache_filekey key = { .u.key = { 0 } };
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
273
  	if (!cleancache_ops) {
49a9ab815   Dan Magenheimer   mm: cleancache: l...
274
275
276
  		cleancache_puts++;
  		return;
  	}
309381fea   Sasha Levin   mm: dump page whe...
277
  	VM_BUG_ON_PAGE(!PageLocked(page), page);
49a9ab815   Dan Magenheimer   mm: cleancache: l...
278
279
280
281
282
  	fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
  	if (fake_pool_id < 0)
  		return;
  
  	pool_id = get_poolid_from_fake(fake_pool_id);
077b1f83a   Dan Magenheimer   mm: cleancache co...
283
  	if (pool_id >= 0 &&
49a9ab815   Dan Magenheimer   mm: cleancache: l...
284
  		cleancache_get_key(page->mapping->host, &key) >= 0) {
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
285
  		cleancache_ops->put_page(pool_id, key, page->index, page);
077b1f83a   Dan Magenheimer   mm: cleancache co...
286
287
288
289
290
291
  		cleancache_puts++;
  	}
  }
  EXPORT_SYMBOL(__cleancache_put_page);
  
  /*
3167760f8   Dan Magenheimer   mm: cleancache: s...
292
   * Invalidate any data from cleancache associated with the poolid and the
077b1f83a   Dan Magenheimer   mm: cleancache co...
293
   * page's inode and page index so that a subsequent "get" will fail.
49a9ab815   Dan Magenheimer   mm: cleancache: l...
294
295
296
297
   *
   * The function has two checks before any action is taken - whether
   * a backend is registered and whether the sb->cleancache_poolid
   * is correct.
077b1f83a   Dan Magenheimer   mm: cleancache co...
298
   */
3167760f8   Dan Magenheimer   mm: cleancache: s...
299
300
  void __cleancache_invalidate_page(struct address_space *mapping,
  					struct page *page)
077b1f83a   Dan Magenheimer   mm: cleancache co...
301
302
  {
  	/* careful... page->mapping is NULL sometimes when this is called */
49a9ab815   Dan Magenheimer   mm: cleancache: l...
303
304
  	int pool_id;
  	int fake_pool_id = mapping->host->i_sb->cleancache_poolid;
077b1f83a   Dan Magenheimer   mm: cleancache co...
305
  	struct cleancache_filekey key = { .u.key = { 0 } };
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
306
  	if (!cleancache_ops)
49a9ab815   Dan Magenheimer   mm: cleancache: l...
307
308
309
310
311
312
  		return;
  
  	if (fake_pool_id >= 0) {
  		pool_id = get_poolid_from_fake(fake_pool_id);
  		if (pool_id < 0)
  			return;
309381fea   Sasha Levin   mm: dump page whe...
313
  		VM_BUG_ON_PAGE(!PageLocked(page), page);
077b1f83a   Dan Magenheimer   mm: cleancache co...
314
  		if (cleancache_get_key(mapping->host, &key) >= 0) {
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
315
  			cleancache_ops->invalidate_page(pool_id,
49a9ab815   Dan Magenheimer   mm: cleancache: l...
316
  					key, page->index);
417fc2cae   Dan Magenheimer   mm: cleancache: r...
317
  			cleancache_invalidates++;
077b1f83a   Dan Magenheimer   mm: cleancache co...
318
319
320
  		}
  	}
  }
3167760f8   Dan Magenheimer   mm: cleancache: s...
321
  EXPORT_SYMBOL(__cleancache_invalidate_page);
077b1f83a   Dan Magenheimer   mm: cleancache co...
322
323
  
  /*
3167760f8   Dan Magenheimer   mm: cleancache: s...
324
   * Invalidate all data from cleancache associated with the poolid and the
077b1f83a   Dan Magenheimer   mm: cleancache co...
325
326
   * mappings's inode so that all subsequent gets to this poolid/inode
   * will fail.
49a9ab815   Dan Magenheimer   mm: cleancache: l...
327
328
329
330
   *
   * The function has two checks before any action is taken - whether
   * a backend is registered and whether the sb->cleancache_poolid
   * is correct.
077b1f83a   Dan Magenheimer   mm: cleancache co...
331
   */
3167760f8   Dan Magenheimer   mm: cleancache: s...
332
  void __cleancache_invalidate_inode(struct address_space *mapping)
077b1f83a   Dan Magenheimer   mm: cleancache co...
333
  {
49a9ab815   Dan Magenheimer   mm: cleancache: l...
334
335
  	int pool_id;
  	int fake_pool_id = mapping->host->i_sb->cleancache_poolid;
077b1f83a   Dan Magenheimer   mm: cleancache co...
336
  	struct cleancache_filekey key = { .u.key = { 0 } };
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
337
  	if (!cleancache_ops)
49a9ab815   Dan Magenheimer   mm: cleancache: l...
338
339
340
341
342
343
  		return;
  
  	if (fake_pool_id < 0)
  		return;
  
  	pool_id = get_poolid_from_fake(fake_pool_id);
077b1f83a   Dan Magenheimer   mm: cleancache co...
344
  	if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
345
  		cleancache_ops->invalidate_inode(pool_id, key);
077b1f83a   Dan Magenheimer   mm: cleancache co...
346
  }
3167760f8   Dan Magenheimer   mm: cleancache: s...
347
  EXPORT_SYMBOL(__cleancache_invalidate_inode);
077b1f83a   Dan Magenheimer   mm: cleancache co...
348
349
350
  
  /*
   * Called by any cleancache-enabled filesystem at time of unmount;
49a9ab815   Dan Magenheimer   mm: cleancache: l...
351
352
   * note that pool_id is surrendered and may be returned by a subsequent
   * cleancache_init_fs or cleancache_init_shared_fs.
077b1f83a   Dan Magenheimer   mm: cleancache co...
353
   */
3167760f8   Dan Magenheimer   mm: cleancache: s...
354
  void __cleancache_invalidate_fs(struct super_block *sb)
077b1f83a   Dan Magenheimer   mm: cleancache co...
355
  {
49a9ab815   Dan Magenheimer   mm: cleancache: l...
356
357
358
359
360
361
362
363
364
365
366
367
368
369
  	int index;
  	int fake_pool_id = sb->cleancache_poolid;
  	int old_poolid = fake_pool_id;
  
  	mutex_lock(&poolid_mutex);
  	if (fake_pool_id >= FAKE_SHARED_FS_POOLID_OFFSET) {
  		index = fake_pool_id - FAKE_SHARED_FS_POOLID_OFFSET;
  		old_poolid = shared_fs_poolid_map[index];
  		shared_fs_poolid_map[index] = FS_UNKNOWN;
  		uuids[index] = NULL;
  	} else if (fake_pool_id >= FAKE_FS_POOLID_OFFSET) {
  		index = fake_pool_id - FAKE_FS_POOLID_OFFSET;
  		old_poolid = fs_poolid_map[index];
  		fs_poolid_map[index] = FS_UNKNOWN;
077b1f83a   Dan Magenheimer   mm: cleancache co...
370
  	}
49a9ab815   Dan Magenheimer   mm: cleancache: l...
371
  	sb->cleancache_poolid = -1;
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
372
373
  	if (cleancache_ops)
  		cleancache_ops->invalidate_fs(old_poolid);
49a9ab815   Dan Magenheimer   mm: cleancache: l...
374
  	mutex_unlock(&poolid_mutex);
077b1f83a   Dan Magenheimer   mm: cleancache co...
375
  }
3167760f8   Dan Magenheimer   mm: cleancache: s...
376
  EXPORT_SYMBOL(__cleancache_invalidate_fs);
077b1f83a   Dan Magenheimer   mm: cleancache co...
377

077b1f83a   Dan Magenheimer   mm: cleancache co...
378
379
  static int __init init_cleancache(void)
  {
49a9ab815   Dan Magenheimer   mm: cleancache: l...
380
  	int i;
417fc2cae   Dan Magenheimer   mm: cleancache: r...
381
382
383
384
385
386
387
388
389
390
391
  #ifdef CONFIG_DEBUG_FS
  	struct dentry *root = debugfs_create_dir("cleancache", NULL);
  	if (root == NULL)
  		return -ENXIO;
  	debugfs_create_u64("succ_gets", S_IRUGO, root, &cleancache_succ_gets);
  	debugfs_create_u64("failed_gets", S_IRUGO,
  				root, &cleancache_failed_gets);
  	debugfs_create_u64("puts", S_IRUGO, root, &cleancache_puts);
  	debugfs_create_u64("invalidates", S_IRUGO,
  				root, &cleancache_invalidates);
  #endif
49a9ab815   Dan Magenheimer   mm: cleancache: l...
392
393
394
395
  	for (i = 0; i < MAX_INITIALIZABLE_FS; i++) {
  		fs_poolid_map[i] = FS_UNKNOWN;
  		shared_fs_poolid_map[i] = FS_UNKNOWN;
  	}
077b1f83a   Dan Magenheimer   mm: cleancache co...
396
397
398
  	return 0;
  }
  module_init(init_cleancache)