Blame view

mm/cleancache.c 9.88 KB
077b1f83a   Dan Magenheimer   mm: cleancache co...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
  /*
   * Cleancache frontend
   *
   * This code provides the generic "frontend" layer to call a matching
   * "backend" driver implementation of cleancache.  See
   * Documentation/vm/cleancache.txt for more information.
   *
   * Copyright (C) 2009-2010 Oracle Corp. All rights reserved.
   * Author: Dan Magenheimer
   *
   * This work is licensed under the terms of the GNU GPL, version 2.
   */
  
  #include <linux/module.h>
  #include <linux/fs.h>
  #include <linux/exportfs.h>
  #include <linux/mm.h>
417fc2cae   Dan Magenheimer   mm: cleancache: r...
18
  #include <linux/debugfs.h>
077b1f83a   Dan Magenheimer   mm: cleancache co...
19
20
21
  #include <linux/cleancache.h>
  
  /*
3cb29d111   Vladimir Davydov   cleancache: remov...
22
   * cleancache_ops is set by cleancache_register_ops to contain the pointers
077b1f83a   Dan Magenheimer   mm: cleancache co...
23
24
   * to the cleancache "backend" implementation functions.
   */
b3c6de492   Julia Lawall   cleancache: const...
25
  static const struct cleancache_ops *cleancache_ops __read_mostly;
077b1f83a   Dan Magenheimer   mm: cleancache co...
26

417fc2cae   Dan Magenheimer   mm: cleancache: r...
27
  /*
8fc8f4d57   Marcin Jabrzyk   mm: fix cleancach...
28
   * Counters available via /sys/kernel/debug/cleancache (if debugfs is
417fc2cae   Dan Magenheimer   mm: cleancache: r...
29
30
31
32
33
34
35
   * properly configured.  These are for information only so are not protected
   * against increment races.
   */
  static u64 cleancache_succ_gets;
  static u64 cleancache_failed_gets;
  static u64 cleancache_puts;
  static u64 cleancache_invalidates;
077b1f83a   Dan Magenheimer   mm: cleancache co...
36

3cb29d111   Vladimir Davydov   cleancache: remov...
37
38
39
40
41
42
43
44
45
46
47
  static void cleancache_register_ops_sb(struct super_block *sb, void *unused)
  {
  	switch (sb->cleancache_poolid) {
  	case CLEANCACHE_NO_BACKEND:
  		__cleancache_init_fs(sb);
  		break;
  	case CLEANCACHE_NO_BACKEND_SHARED:
  		__cleancache_init_shared_fs(sb);
  		break;
  	}
  }
49a9ab815   Dan Magenheimer   mm: cleancache: l...
48
49
  
  /*
53d85c985   Vladimir Davydov   cleancache: forbi...
50
   * Register operations for cleancache. Returns 0 on success.
077b1f83a   Dan Magenheimer   mm: cleancache co...
51
   */
b3c6de492   Julia Lawall   cleancache: const...
52
  int cleancache_register_ops(const struct cleancache_ops *ops)
077b1f83a   Dan Magenheimer   mm: cleancache co...
53
  {
3cb29d111   Vladimir Davydov   cleancache: remov...
54
  	if (cmpxchg(&cleancache_ops, NULL, ops))
53d85c985   Vladimir Davydov   cleancache: forbi...
55
  		return -EBUSY;
3cb29d111   Vladimir Davydov   cleancache: remov...
56

833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
57
  	/*
3cb29d111   Vladimir Davydov   cleancache: remov...
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
  	 * A cleancache backend can be built as a module and hence loaded after
  	 * a cleancache enabled filesystem has called cleancache_init_fs. To
  	 * handle such a scenario, here we call ->init_fs or ->init_shared_fs
  	 * for each active super block. To differentiate between local and
  	 * shared filesystems, we temporarily initialize sb->cleancache_poolid
  	 * to CLEANCACHE_NO_BACKEND or CLEANCACHE_NO_BACKEND_SHARED
  	 * respectively in case there is no backend registered at the time
  	 * cleancache_init_fs or cleancache_init_shared_fs is called.
  	 *
  	 * Since filesystems can be mounted concurrently with cleancache
  	 * backend registration, we have to be careful to guarantee that all
  	 * cleancache enabled filesystems that has been mounted by the time
  	 * cleancache_register_ops is called has got and all mounted later will
  	 * get cleancache_poolid. This is assured by the following statements
  	 * tied together:
  	 *
  	 * a) iterate_supers skips only those super blocks that has started
  	 *    ->kill_sb
  	 *
  	 * b) if iterate_supers encounters a super block that has not finished
  	 *    ->mount yet, it waits until it is finished
  	 *
  	 * c) cleancache_init_fs is called from ->mount and
  	 *    cleancache_invalidate_fs is called from ->kill_sb
  	 *
  	 * d) we call iterate_supers after cleancache_ops has been set
  	 *
  	 * From a) it follows that if iterate_supers skips a super block, then
  	 * either the super block is already dead, in which case we do not need
  	 * to bother initializing cleancache for it, or it was mounted after we
  	 * initiated iterate_supers. In the latter case, it must have seen
  	 * cleancache_ops set according to d) and initialized cleancache from
  	 * ->mount by itself according to c). This proves that we call
  	 * ->init_fs at least once for each active super block.
  	 *
  	 * From b) and c) it follows that if iterate_supers encounters a super
  	 * block that has already started ->init_fs, it will wait until ->mount
  	 * and hence ->init_fs has finished, then check cleancache_poolid, see
  	 * that it has already been set and therefore do nothing. This proves
  	 * that we call ->init_fs no more than once for each super block.
  	 *
  	 * Combined together, the last two paragraphs prove the function
  	 * correctness.
  	 *
  	 * Note that various cleancache callbacks may proceed before this
  	 * function is called or even concurrently with it, but since
  	 * CLEANCACHE_NO_BACKEND is negative, they will all result in a noop
  	 * until the corresponding ->init_fs has been actually called and
  	 * cleancache_ops has been set.
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
107
  	 */
3cb29d111   Vladimir Davydov   cleancache: remov...
108
  	iterate_supers(cleancache_register_ops_sb, NULL);
53d85c985   Vladimir Davydov   cleancache: forbi...
109
  	return 0;
077b1f83a   Dan Magenheimer   mm: cleancache co...
110
111
112
113
114
115
  }
  EXPORT_SYMBOL(cleancache_register_ops);
  
  /* Called by a cleancache-enabled filesystem at time of mount */
  void __cleancache_init_fs(struct super_block *sb)
  {
3cb29d111   Vladimir Davydov   cleancache: remov...
116
  	int pool_id = CLEANCACHE_NO_BACKEND;
49a9ab815   Dan Magenheimer   mm: cleancache: l...
117

3cb29d111   Vladimir Davydov   cleancache: remov...
118
119
120
121
  	if (cleancache_ops) {
  		pool_id = cleancache_ops->init_fs(PAGE_SIZE);
  		if (pool_id < 0)
  			pool_id = CLEANCACHE_NO_POOL;
49a9ab815   Dan Magenheimer   mm: cleancache: l...
122
  	}
3cb29d111   Vladimir Davydov   cleancache: remov...
123
  	sb->cleancache_poolid = pool_id;
077b1f83a   Dan Magenheimer   mm: cleancache co...
124
125
126
127
  }
  EXPORT_SYMBOL(__cleancache_init_fs);
  
  /* Called by a cleancache-enabled clustered filesystem at time of mount */
9de162629   Vladimir Davydov   cleancache: zap u...
128
  void __cleancache_init_shared_fs(struct super_block *sb)
077b1f83a   Dan Magenheimer   mm: cleancache co...
129
  {
3cb29d111   Vladimir Davydov   cleancache: remov...
130
  	int pool_id = CLEANCACHE_NO_BACKEND_SHARED;
49a9ab815   Dan Magenheimer   mm: cleancache: l...
131

3cb29d111   Vladimir Davydov   cleancache: remov...
132
133
134
135
  	if (cleancache_ops) {
  		pool_id = cleancache_ops->init_shared_fs(sb->s_uuid, PAGE_SIZE);
  		if (pool_id < 0)
  			pool_id = CLEANCACHE_NO_POOL;
49a9ab815   Dan Magenheimer   mm: cleancache: l...
136
  	}
3cb29d111   Vladimir Davydov   cleancache: remov...
137
  	sb->cleancache_poolid = pool_id;
077b1f83a   Dan Magenheimer   mm: cleancache co...
138
139
140
141
142
143
144
145
146
147
  }
  EXPORT_SYMBOL(__cleancache_init_shared_fs);
  
  /*
   * If the filesystem uses exportable filehandles, use the filehandle as
   * the key, else use the inode number.
   */
  static int cleancache_get_key(struct inode *inode,
  			      struct cleancache_filekey *key)
  {
b0b0382bb   Al Viro   ->encode_fh() API...
148
  	int (*fhfn)(struct inode *, __u32 *fh, int *, struct inode *);
077b1f83a   Dan Magenheimer   mm: cleancache co...
149
150
151
152
153
154
155
  	int len = 0, maxlen = CLEANCACHE_KEY_MAX;
  	struct super_block *sb = inode->i_sb;
  
  	key->u.ino = inode->i_ino;
  	if (sb->s_export_op != NULL) {
  		fhfn = sb->s_export_op->encode_fh;
  		if  (fhfn) {
b0b0382bb   Al Viro   ->encode_fh() API...
156
  			len = (*fhfn)(inode, &key->u.fh[0], &maxlen, NULL);
94e07a759   Namjae Jeon   fs: encode_fh: re...
157
  			if (len <= FILEID_ROOT || len == FILEID_INVALID)
077b1f83a   Dan Magenheimer   mm: cleancache co...
158
159
160
161
162
163
164
165
166
167
168
169
170
171
  				return -1;
  			if (maxlen > CLEANCACHE_KEY_MAX)
  				return -1;
  		}
  	}
  	return 0;
  }
  
  /*
   * "Get" data from cleancache associated with the poolid/inode/index
   * that were specified when the data was put to cleanache and, if
   * successful, use it to fill the specified page with data and return 0.
   * The pageframe is unchanged and returns -1 if the get fails.
   * Page must be locked by caller.
49a9ab815   Dan Magenheimer   mm: cleancache: l...
172
173
174
175
   *
   * The function has two checks before any action is taken - whether
   * a backend is registered and whether the sb->cleancache_poolid
   * is correct.
077b1f83a   Dan Magenheimer   mm: cleancache co...
176
177
178
179
180
181
   */
  int __cleancache_get_page(struct page *page)
  {
  	int ret = -1;
  	int pool_id;
  	struct cleancache_filekey key = { .u.key = { 0 } };
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
182
  	if (!cleancache_ops) {
49a9ab815   Dan Magenheimer   mm: cleancache: l...
183
184
185
  		cleancache_failed_gets++;
  		goto out;
  	}
309381fea   Sasha Levin   mm: dump page whe...
186
  	VM_BUG_ON_PAGE(!PageLocked(page), page);
3cb29d111   Vladimir Davydov   cleancache: remov...
187
188
  	pool_id = page->mapping->host->i_sb->cleancache_poolid;
  	if (pool_id < 0)
077b1f83a   Dan Magenheimer   mm: cleancache co...
189
190
191
192
  		goto out;
  
  	if (cleancache_get_key(page->mapping->host, &key) < 0)
  		goto out;
3cb29d111   Vladimir Davydov   cleancache: remov...
193
  	ret = cleancache_ops->get_page(pool_id, key, page->index, page);
077b1f83a   Dan Magenheimer   mm: cleancache co...
194
195
196
197
198
199
200
201
202
203
204
205
206
207
  	if (ret == 0)
  		cleancache_succ_gets++;
  	else
  		cleancache_failed_gets++;
  out:
  	return ret;
  }
  EXPORT_SYMBOL(__cleancache_get_page);
  
  /*
   * "Put" data from a page to cleancache and associate it with the
   * (previously-obtained per-filesystem) poolid and the page's,
   * inode and page index.  Page must be locked.  Note that a put_page
   * always "succeeds", though a subsequent get_page may succeed or fail.
49a9ab815   Dan Magenheimer   mm: cleancache: l...
208
209
210
211
   *
   * The function has two checks before any action is taken - whether
   * a backend is registered and whether the sb->cleancache_poolid
   * is correct.
077b1f83a   Dan Magenheimer   mm: cleancache co...
212
213
214
215
216
   */
  void __cleancache_put_page(struct page *page)
  {
  	int pool_id;
  	struct cleancache_filekey key = { .u.key = { 0 } };
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
217
  	if (!cleancache_ops) {
49a9ab815   Dan Magenheimer   mm: cleancache: l...
218
219
220
  		cleancache_puts++;
  		return;
  	}
309381fea   Sasha Levin   mm: dump page whe...
221
  	VM_BUG_ON_PAGE(!PageLocked(page), page);
3cb29d111   Vladimir Davydov   cleancache: remov...
222
  	pool_id = page->mapping->host->i_sb->cleancache_poolid;
077b1f83a   Dan Magenheimer   mm: cleancache co...
223
  	if (pool_id >= 0 &&
49a9ab815   Dan Magenheimer   mm: cleancache: l...
224
  		cleancache_get_key(page->mapping->host, &key) >= 0) {
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
225
  		cleancache_ops->put_page(pool_id, key, page->index, page);
077b1f83a   Dan Magenheimer   mm: cleancache co...
226
227
228
229
230
231
  		cleancache_puts++;
  	}
  }
  EXPORT_SYMBOL(__cleancache_put_page);
  
  /*
3167760f8   Dan Magenheimer   mm: cleancache: s...
232
   * Invalidate any data from cleancache associated with the poolid and the
077b1f83a   Dan Magenheimer   mm: cleancache co...
233
   * page's inode and page index so that a subsequent "get" will fail.
49a9ab815   Dan Magenheimer   mm: cleancache: l...
234
235
236
237
   *
   * The function has two checks before any action is taken - whether
   * a backend is registered and whether the sb->cleancache_poolid
   * is correct.
077b1f83a   Dan Magenheimer   mm: cleancache co...
238
   */
3167760f8   Dan Magenheimer   mm: cleancache: s...
239
240
  void __cleancache_invalidate_page(struct address_space *mapping,
  					struct page *page)
077b1f83a   Dan Magenheimer   mm: cleancache co...
241
242
  {
  	/* careful... page->mapping is NULL sometimes when this is called */
3cb29d111   Vladimir Davydov   cleancache: remov...
243
  	int pool_id = mapping->host->i_sb->cleancache_poolid;
077b1f83a   Dan Magenheimer   mm: cleancache co...
244
  	struct cleancache_filekey key = { .u.key = { 0 } };
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
245
  	if (!cleancache_ops)
49a9ab815   Dan Magenheimer   mm: cleancache: l...
246
  		return;
3cb29d111   Vladimir Davydov   cleancache: remov...
247
  	if (pool_id >= 0) {
309381fea   Sasha Levin   mm: dump page whe...
248
  		VM_BUG_ON_PAGE(!PageLocked(page), page);
077b1f83a   Dan Magenheimer   mm: cleancache co...
249
  		if (cleancache_get_key(mapping->host, &key) >= 0) {
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
250
  			cleancache_ops->invalidate_page(pool_id,
49a9ab815   Dan Magenheimer   mm: cleancache: l...
251
  					key, page->index);
417fc2cae   Dan Magenheimer   mm: cleancache: r...
252
  			cleancache_invalidates++;
077b1f83a   Dan Magenheimer   mm: cleancache co...
253
254
255
  		}
  	}
  }
3167760f8   Dan Magenheimer   mm: cleancache: s...
256
  EXPORT_SYMBOL(__cleancache_invalidate_page);
077b1f83a   Dan Magenheimer   mm: cleancache co...
257
258
  
  /*
3167760f8   Dan Magenheimer   mm: cleancache: s...
259
   * Invalidate all data from cleancache associated with the poolid and the
077b1f83a   Dan Magenheimer   mm: cleancache co...
260
261
   * mappings's inode so that all subsequent gets to this poolid/inode
   * will fail.
49a9ab815   Dan Magenheimer   mm: cleancache: l...
262
263
264
265
   *
   * The function has two checks before any action is taken - whether
   * a backend is registered and whether the sb->cleancache_poolid
   * is correct.
077b1f83a   Dan Magenheimer   mm: cleancache co...
266
   */
3167760f8   Dan Magenheimer   mm: cleancache: s...
267
  void __cleancache_invalidate_inode(struct address_space *mapping)
077b1f83a   Dan Magenheimer   mm: cleancache co...
268
  {
3cb29d111   Vladimir Davydov   cleancache: remov...
269
  	int pool_id = mapping->host->i_sb->cleancache_poolid;
077b1f83a   Dan Magenheimer   mm: cleancache co...
270
  	struct cleancache_filekey key = { .u.key = { 0 } };
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
271
  	if (!cleancache_ops)
49a9ab815   Dan Magenheimer   mm: cleancache: l...
272
  		return;
077b1f83a   Dan Magenheimer   mm: cleancache co...
273
  	if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
274
  		cleancache_ops->invalidate_inode(pool_id, key);
077b1f83a   Dan Magenheimer   mm: cleancache co...
275
  }
3167760f8   Dan Magenheimer   mm: cleancache: s...
276
  EXPORT_SYMBOL(__cleancache_invalidate_inode);
077b1f83a   Dan Magenheimer   mm: cleancache co...
277
278
279
  
  /*
   * Called by any cleancache-enabled filesystem at time of unmount;
49a9ab815   Dan Magenheimer   mm: cleancache: l...
280
281
   * note that pool_id is surrendered and may be returned by a subsequent
   * cleancache_init_fs or cleancache_init_shared_fs.
077b1f83a   Dan Magenheimer   mm: cleancache co...
282
   */
3167760f8   Dan Magenheimer   mm: cleancache: s...
283
  void __cleancache_invalidate_fs(struct super_block *sb)
077b1f83a   Dan Magenheimer   mm: cleancache co...
284
  {
3cb29d111   Vladimir Davydov   cleancache: remov...
285
  	int pool_id;
49a9ab815   Dan Magenheimer   mm: cleancache: l...
286

3cb29d111   Vladimir Davydov   cleancache: remov...
287
288
289
290
291
  	pool_id = sb->cleancache_poolid;
  	sb->cleancache_poolid = CLEANCACHE_NO_POOL;
  
  	if (cleancache_ops && pool_id >= 0)
  		cleancache_ops->invalidate_fs(pool_id);
077b1f83a   Dan Magenheimer   mm: cleancache co...
292
  }
3167760f8   Dan Magenheimer   mm: cleancache: s...
293
  EXPORT_SYMBOL(__cleancache_invalidate_fs);
077b1f83a   Dan Magenheimer   mm: cleancache co...
294

077b1f83a   Dan Magenheimer   mm: cleancache co...
295
296
  static int __init init_cleancache(void)
  {
417fc2cae   Dan Magenheimer   mm: cleancache: r...
297
298
299
300
301
302
303
304
305
306
307
  #ifdef CONFIG_DEBUG_FS
  	struct dentry *root = debugfs_create_dir("cleancache", NULL);
  	if (root == NULL)
  		return -ENXIO;
  	debugfs_create_u64("succ_gets", S_IRUGO, root, &cleancache_succ_gets);
  	debugfs_create_u64("failed_gets", S_IRUGO,
  				root, &cleancache_failed_gets);
  	debugfs_create_u64("puts", S_IRUGO, root, &cleancache_puts);
  	debugfs_create_u64("invalidates", S_IRUGO,
  				root, &cleancache_invalidates);
  #endif
077b1f83a   Dan Magenheimer   mm: cleancache co...
308
309
310
  	return 0;
  }
  module_init(init_cleancache)