Blame view

mm/cleancache.c 9.79 KB
7a338472f   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-only
077b1f83a   Dan Magenheimer   mm: cleancache co...
2
3
4
5
6
  /*
   * Cleancache frontend
   *
   * This code provides the generic "frontend" layer to call a matching
   * "backend" driver implementation of cleancache.  See
ad56b738c   Mike Rapoport   docs/vm: rename d...
7
   * Documentation/vm/cleancache.rst for more information.
077b1f83a   Dan Magenheimer   mm: cleancache co...
8
9
10
   *
   * Copyright (C) 2009-2010 Oracle Corp. All rights reserved.
   * Author: Dan Magenheimer
077b1f83a   Dan Magenheimer   mm: cleancache co...
11
12
13
14
15
16
   */
  
  #include <linux/module.h>
  #include <linux/fs.h>
  #include <linux/exportfs.h>
  #include <linux/mm.h>
417fc2cae   Dan Magenheimer   mm: cleancache: r...
17
  #include <linux/debugfs.h>
077b1f83a   Dan Magenheimer   mm: cleancache co...
18
19
20
  #include <linux/cleancache.h>
  
  /*
3cb29d111   Vladimir Davydov   cleancache: remov...
21
   * cleancache_ops is set by cleancache_register_ops to contain the pointers
077b1f83a   Dan Magenheimer   mm: cleancache co...
22
23
   * to the cleancache "backend" implementation functions.
   */
b3c6de492   Julia Lawall   cleancache: const...
24
  static const struct cleancache_ops *cleancache_ops __read_mostly;
077b1f83a   Dan Magenheimer   mm: cleancache co...
25

417fc2cae   Dan Magenheimer   mm: cleancache: r...
26
  /*
8fc8f4d57   Marcin Jabrzyk   mm: fix cleancach...
27
   * Counters available via /sys/kernel/debug/cleancache (if debugfs is
417fc2cae   Dan Magenheimer   mm: cleancache: r...
28
29
30
31
32
33
34
   * properly configured.  These are for information only so are not protected
   * against increment races.
   */
  static u64 cleancache_succ_gets;
  static u64 cleancache_failed_gets;
  static u64 cleancache_puts;
  static u64 cleancache_invalidates;
077b1f83a   Dan Magenheimer   mm: cleancache co...
35

3cb29d111   Vladimir Davydov   cleancache: remov...
36
37
38
39
40
41
42
43
44
45
46
  static void cleancache_register_ops_sb(struct super_block *sb, void *unused)
  {
  	switch (sb->cleancache_poolid) {
  	case CLEANCACHE_NO_BACKEND:
  		__cleancache_init_fs(sb);
  		break;
  	case CLEANCACHE_NO_BACKEND_SHARED:
  		__cleancache_init_shared_fs(sb);
  		break;
  	}
  }
49a9ab815   Dan Magenheimer   mm: cleancache: l...
47
48
  
  /*
53d85c985   Vladimir Davydov   cleancache: forbi...
49
   * Register operations for cleancache. Returns 0 on success.
077b1f83a   Dan Magenheimer   mm: cleancache co...
50
   */
b3c6de492   Julia Lawall   cleancache: const...
51
  int cleancache_register_ops(const struct cleancache_ops *ops)
077b1f83a   Dan Magenheimer   mm: cleancache co...
52
  {
3cb29d111   Vladimir Davydov   cleancache: remov...
53
  	if (cmpxchg(&cleancache_ops, NULL, ops))
53d85c985   Vladimir Davydov   cleancache: forbi...
54
  		return -EBUSY;
3cb29d111   Vladimir Davydov   cleancache: remov...
55

833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
56
  	/*
3cb29d111   Vladimir Davydov   cleancache: remov...
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
  	 * A cleancache backend can be built as a module and hence loaded after
  	 * a cleancache enabled filesystem has called cleancache_init_fs. To
  	 * handle such a scenario, here we call ->init_fs or ->init_shared_fs
  	 * for each active super block. To differentiate between local and
  	 * shared filesystems, we temporarily initialize sb->cleancache_poolid
  	 * to CLEANCACHE_NO_BACKEND or CLEANCACHE_NO_BACKEND_SHARED
  	 * respectively in case there is no backend registered at the time
  	 * cleancache_init_fs or cleancache_init_shared_fs is called.
  	 *
  	 * Since filesystems can be mounted concurrently with cleancache
  	 * backend registration, we have to be careful to guarantee that all
  	 * cleancache enabled filesystems that has been mounted by the time
  	 * cleancache_register_ops is called has got and all mounted later will
  	 * get cleancache_poolid. This is assured by the following statements
  	 * tied together:
  	 *
  	 * a) iterate_supers skips only those super blocks that has started
  	 *    ->kill_sb
  	 *
  	 * b) if iterate_supers encounters a super block that has not finished
  	 *    ->mount yet, it waits until it is finished
  	 *
  	 * c) cleancache_init_fs is called from ->mount and
  	 *    cleancache_invalidate_fs is called from ->kill_sb
  	 *
  	 * d) we call iterate_supers after cleancache_ops has been set
  	 *
  	 * From a) it follows that if iterate_supers skips a super block, then
  	 * either the super block is already dead, in which case we do not need
  	 * to bother initializing cleancache for it, or it was mounted after we
  	 * initiated iterate_supers. In the latter case, it must have seen
  	 * cleancache_ops set according to d) and initialized cleancache from
  	 * ->mount by itself according to c). This proves that we call
  	 * ->init_fs at least once for each active super block.
  	 *
  	 * From b) and c) it follows that if iterate_supers encounters a super
  	 * block that has already started ->init_fs, it will wait until ->mount
  	 * and hence ->init_fs has finished, then check cleancache_poolid, see
  	 * that it has already been set and therefore do nothing. This proves
  	 * that we call ->init_fs no more than once for each super block.
  	 *
  	 * Combined together, the last two paragraphs prove the function
  	 * correctness.
  	 *
  	 * Note that various cleancache callbacks may proceed before this
  	 * function is called or even concurrently with it, but since
  	 * CLEANCACHE_NO_BACKEND is negative, they will all result in a noop
  	 * until the corresponding ->init_fs has been actually called and
  	 * cleancache_ops has been set.
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
106
  	 */
3cb29d111   Vladimir Davydov   cleancache: remov...
107
  	iterate_supers(cleancache_register_ops_sb, NULL);
53d85c985   Vladimir Davydov   cleancache: forbi...
108
  	return 0;
077b1f83a   Dan Magenheimer   mm: cleancache co...
109
110
111
112
113
114
  }
  EXPORT_SYMBOL(cleancache_register_ops);
  
  /* Called by a cleancache-enabled filesystem at time of mount */
  void __cleancache_init_fs(struct super_block *sb)
  {
3cb29d111   Vladimir Davydov   cleancache: remov...
115
  	int pool_id = CLEANCACHE_NO_BACKEND;
49a9ab815   Dan Magenheimer   mm: cleancache: l...
116

3cb29d111   Vladimir Davydov   cleancache: remov...
117
118
119
120
  	if (cleancache_ops) {
  		pool_id = cleancache_ops->init_fs(PAGE_SIZE);
  		if (pool_id < 0)
  			pool_id = CLEANCACHE_NO_POOL;
49a9ab815   Dan Magenheimer   mm: cleancache: l...
121
  	}
3cb29d111   Vladimir Davydov   cleancache: remov...
122
  	sb->cleancache_poolid = pool_id;
077b1f83a   Dan Magenheimer   mm: cleancache co...
123
124
125
126
  }
  EXPORT_SYMBOL(__cleancache_init_fs);
  
  /* Called by a cleancache-enabled clustered filesystem at time of mount */
9de162629   Vladimir Davydov   cleancache: zap u...
127
  void __cleancache_init_shared_fs(struct super_block *sb)
077b1f83a   Dan Magenheimer   mm: cleancache co...
128
  {
3cb29d111   Vladimir Davydov   cleancache: remov...
129
  	int pool_id = CLEANCACHE_NO_BACKEND_SHARED;
49a9ab815   Dan Magenheimer   mm: cleancache: l...
130

3cb29d111   Vladimir Davydov   cleancache: remov...
131
  	if (cleancache_ops) {
85787090a   Christoph Hellwig   fs: switch ->s_uu...
132
  		pool_id = cleancache_ops->init_shared_fs(&sb->s_uuid, PAGE_SIZE);
3cb29d111   Vladimir Davydov   cleancache: remov...
133
134
  		if (pool_id < 0)
  			pool_id = CLEANCACHE_NO_POOL;
49a9ab815   Dan Magenheimer   mm: cleancache: l...
135
  	}
3cb29d111   Vladimir Davydov   cleancache: remov...
136
  	sb->cleancache_poolid = pool_id;
077b1f83a   Dan Magenheimer   mm: cleancache co...
137
138
139
140
141
142
143
144
145
146
  }
  EXPORT_SYMBOL(__cleancache_init_shared_fs);
  
  /*
   * If the filesystem uses exportable filehandles, use the filehandle as
   * the key, else use the inode number.
   */
  static int cleancache_get_key(struct inode *inode,
  			      struct cleancache_filekey *key)
  {
b0b0382bb   Al Viro   ->encode_fh() API...
147
  	int (*fhfn)(struct inode *, __u32 *fh, int *, struct inode *);
077b1f83a   Dan Magenheimer   mm: cleancache co...
148
149
150
151
152
153
154
  	int len = 0, maxlen = CLEANCACHE_KEY_MAX;
  	struct super_block *sb = inode->i_sb;
  
  	key->u.ino = inode->i_ino;
  	if (sb->s_export_op != NULL) {
  		fhfn = sb->s_export_op->encode_fh;
  		if  (fhfn) {
b0b0382bb   Al Viro   ->encode_fh() API...
155
  			len = (*fhfn)(inode, &key->u.fh[0], &maxlen, NULL);
94e07a759   Namjae Jeon   fs: encode_fh: re...
156
  			if (len <= FILEID_ROOT || len == FILEID_INVALID)
077b1f83a   Dan Magenheimer   mm: cleancache co...
157
158
159
160
161
162
163
164
165
166
167
168
169
170
  				return -1;
  			if (maxlen > CLEANCACHE_KEY_MAX)
  				return -1;
  		}
  	}
  	return 0;
  }
  
  /*
   * "Get" data from cleancache associated with the poolid/inode/index
   * that were specified when the data was put to cleanache and, if
   * successful, use it to fill the specified page with data and return 0.
   * The pageframe is unchanged and returns -1 if the get fails.
   * Page must be locked by caller.
49a9ab815   Dan Magenheimer   mm: cleancache: l...
171
172
173
174
   *
   * The function has two checks before any action is taken - whether
   * a backend is registered and whether the sb->cleancache_poolid
   * is correct.
077b1f83a   Dan Magenheimer   mm: cleancache co...
175
176
177
178
179
180
   */
  int __cleancache_get_page(struct page *page)
  {
  	int ret = -1;
  	int pool_id;
  	struct cleancache_filekey key = { .u.key = { 0 } };
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
181
  	if (!cleancache_ops) {
49a9ab815   Dan Magenheimer   mm: cleancache: l...
182
183
184
  		cleancache_failed_gets++;
  		goto out;
  	}
309381fea   Sasha Levin   mm: dump page whe...
185
  	VM_BUG_ON_PAGE(!PageLocked(page), page);
3cb29d111   Vladimir Davydov   cleancache: remov...
186
187
  	pool_id = page->mapping->host->i_sb->cleancache_poolid;
  	if (pool_id < 0)
077b1f83a   Dan Magenheimer   mm: cleancache co...
188
189
190
191
  		goto out;
  
  	if (cleancache_get_key(page->mapping->host, &key) < 0)
  		goto out;
3cb29d111   Vladimir Davydov   cleancache: remov...
192
  	ret = cleancache_ops->get_page(pool_id, key, page->index, page);
077b1f83a   Dan Magenheimer   mm: cleancache co...
193
194
195
196
197
198
199
200
201
202
203
204
205
206
  	if (ret == 0)
  		cleancache_succ_gets++;
  	else
  		cleancache_failed_gets++;
  out:
  	return ret;
  }
  EXPORT_SYMBOL(__cleancache_get_page);
  
  /*
   * "Put" data from a page to cleancache and associate it with the
   * (previously-obtained per-filesystem) poolid and the page's,
   * inode and page index.  Page must be locked.  Note that a put_page
   * always "succeeds", though a subsequent get_page may succeed or fail.
49a9ab815   Dan Magenheimer   mm: cleancache: l...
207
208
209
210
   *
   * The function has two checks before any action is taken - whether
   * a backend is registered and whether the sb->cleancache_poolid
   * is correct.
077b1f83a   Dan Magenheimer   mm: cleancache co...
211
212
213
214
215
   */
  void __cleancache_put_page(struct page *page)
  {
  	int pool_id;
  	struct cleancache_filekey key = { .u.key = { 0 } };
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
216
  	if (!cleancache_ops) {
49a9ab815   Dan Magenheimer   mm: cleancache: l...
217
218
219
  		cleancache_puts++;
  		return;
  	}
309381fea   Sasha Levin   mm: dump page whe...
220
  	VM_BUG_ON_PAGE(!PageLocked(page), page);
3cb29d111   Vladimir Davydov   cleancache: remov...
221
  	pool_id = page->mapping->host->i_sb->cleancache_poolid;
077b1f83a   Dan Magenheimer   mm: cleancache co...
222
  	if (pool_id >= 0 &&
49a9ab815   Dan Magenheimer   mm: cleancache: l...
223
  		cleancache_get_key(page->mapping->host, &key) >= 0) {
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
224
  		cleancache_ops->put_page(pool_id, key, page->index, page);
077b1f83a   Dan Magenheimer   mm: cleancache co...
225
226
227
228
229
230
  		cleancache_puts++;
  	}
  }
  EXPORT_SYMBOL(__cleancache_put_page);
  
  /*
3167760f8   Dan Magenheimer   mm: cleancache: s...
231
   * Invalidate any data from cleancache associated with the poolid and the
077b1f83a   Dan Magenheimer   mm: cleancache co...
232
   * page's inode and page index so that a subsequent "get" will fail.
49a9ab815   Dan Magenheimer   mm: cleancache: l...
233
234
235
236
   *
   * The function has two checks before any action is taken - whether
   * a backend is registered and whether the sb->cleancache_poolid
   * is correct.
077b1f83a   Dan Magenheimer   mm: cleancache co...
237
   */
3167760f8   Dan Magenheimer   mm: cleancache: s...
238
239
  void __cleancache_invalidate_page(struct address_space *mapping,
  					struct page *page)
077b1f83a   Dan Magenheimer   mm: cleancache co...
240
241
  {
  	/* careful... page->mapping is NULL sometimes when this is called */
3cb29d111   Vladimir Davydov   cleancache: remov...
242
  	int pool_id = mapping->host->i_sb->cleancache_poolid;
077b1f83a   Dan Magenheimer   mm: cleancache co...
243
  	struct cleancache_filekey key = { .u.key = { 0 } };
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
244
  	if (!cleancache_ops)
49a9ab815   Dan Magenheimer   mm: cleancache: l...
245
  		return;
3cb29d111   Vladimir Davydov   cleancache: remov...
246
  	if (pool_id >= 0) {
309381fea   Sasha Levin   mm: dump page whe...
247
  		VM_BUG_ON_PAGE(!PageLocked(page), page);
077b1f83a   Dan Magenheimer   mm: cleancache co...
248
  		if (cleancache_get_key(mapping->host, &key) >= 0) {
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
249
  			cleancache_ops->invalidate_page(pool_id,
49a9ab815   Dan Magenheimer   mm: cleancache: l...
250
  					key, page->index);
417fc2cae   Dan Magenheimer   mm: cleancache: r...
251
  			cleancache_invalidates++;
077b1f83a   Dan Magenheimer   mm: cleancache co...
252
253
254
  		}
  	}
  }
3167760f8   Dan Magenheimer   mm: cleancache: s...
255
  EXPORT_SYMBOL(__cleancache_invalidate_page);
077b1f83a   Dan Magenheimer   mm: cleancache co...
256
257
  
  /*
3167760f8   Dan Magenheimer   mm: cleancache: s...
258
   * Invalidate all data from cleancache associated with the poolid and the
077b1f83a   Dan Magenheimer   mm: cleancache co...
259
260
   * mappings's inode so that all subsequent gets to this poolid/inode
   * will fail.
49a9ab815   Dan Magenheimer   mm: cleancache: l...
261
262
263
264
   *
   * The function has two checks before any action is taken - whether
   * a backend is registered and whether the sb->cleancache_poolid
   * is correct.
077b1f83a   Dan Magenheimer   mm: cleancache co...
265
   */
3167760f8   Dan Magenheimer   mm: cleancache: s...
266
  void __cleancache_invalidate_inode(struct address_space *mapping)
077b1f83a   Dan Magenheimer   mm: cleancache co...
267
  {
3cb29d111   Vladimir Davydov   cleancache: remov...
268
  	int pool_id = mapping->host->i_sb->cleancache_poolid;
077b1f83a   Dan Magenheimer   mm: cleancache co...
269
  	struct cleancache_filekey key = { .u.key = { 0 } };
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
270
  	if (!cleancache_ops)
49a9ab815   Dan Magenheimer   mm: cleancache: l...
271
  		return;
077b1f83a   Dan Magenheimer   mm: cleancache co...
272
  	if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
833f8662a   Konrad Rzeszutek Wilk   cleancache: Make ...
273
  		cleancache_ops->invalidate_inode(pool_id, key);
077b1f83a   Dan Magenheimer   mm: cleancache co...
274
  }
3167760f8   Dan Magenheimer   mm: cleancache: s...
275
  EXPORT_SYMBOL(__cleancache_invalidate_inode);
077b1f83a   Dan Magenheimer   mm: cleancache co...
276
277
278
  
  /*
   * Called by any cleancache-enabled filesystem at time of unmount;
49a9ab815   Dan Magenheimer   mm: cleancache: l...
279
280
   * note that pool_id is surrendered and may be returned by a subsequent
   * cleancache_init_fs or cleancache_init_shared_fs.
077b1f83a   Dan Magenheimer   mm: cleancache co...
281
   */
3167760f8   Dan Magenheimer   mm: cleancache: s...
282
  void __cleancache_invalidate_fs(struct super_block *sb)
077b1f83a   Dan Magenheimer   mm: cleancache co...
283
  {
3cb29d111   Vladimir Davydov   cleancache: remov...
284
  	int pool_id;
49a9ab815   Dan Magenheimer   mm: cleancache: l...
285

3cb29d111   Vladimir Davydov   cleancache: remov...
286
287
288
289
290
  	pool_id = sb->cleancache_poolid;
  	sb->cleancache_poolid = CLEANCACHE_NO_POOL;
  
  	if (cleancache_ops && pool_id >= 0)
  		cleancache_ops->invalidate_fs(pool_id);
077b1f83a   Dan Magenheimer   mm: cleancache co...
291
  }
3167760f8   Dan Magenheimer   mm: cleancache: s...
292
  EXPORT_SYMBOL(__cleancache_invalidate_fs);
077b1f83a   Dan Magenheimer   mm: cleancache co...
293

077b1f83a   Dan Magenheimer   mm: cleancache co...
294
295
  static int __init init_cleancache(void)
  {
417fc2cae   Dan Magenheimer   mm: cleancache: r...
296
297
  #ifdef CONFIG_DEBUG_FS
  	struct dentry *root = debugfs_create_dir("cleancache", NULL);
c4e41349a   Greg Kroah-Hartman   mm: cleancache: n...
298

0825a6f98   Joe Perches   mm: use octal not...
299
300
301
302
  	debugfs_create_u64("succ_gets", 0444, root, &cleancache_succ_gets);
  	debugfs_create_u64("failed_gets", 0444, root, &cleancache_failed_gets);
  	debugfs_create_u64("puts", 0444, root, &cleancache_puts);
  	debugfs_create_u64("invalidates", 0444, root, &cleancache_invalidates);
417fc2cae   Dan Magenheimer   mm: cleancache: r...
303
  #endif
077b1f83a   Dan Magenheimer   mm: cleancache co...
304
305
306
  	return 0;
  }
  module_init(init_cleancache)