Blame view

fs/f2fs/crypto.c 12.7 KB
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
  /*
   * linux/fs/f2fs/crypto.c
   *
   * Copied from linux/fs/ext4/crypto.c
   *
   * Copyright (C) 2015, Google, Inc.
   * Copyright (C) 2015, Motorola Mobility
   *
   * This contains encryption functions for f2fs
   *
   * Written by Michael Halcrow, 2014.
   *
   * Filename encryption additions
   *	Uday Savagaonkar, 2014
   * Encryption policy handling additions
   *	Ildar Muslukhov, 2014
   * Remove ext4_encrypted_zeroout(),
   *   add f2fs_restore_and_release_control_page()
   *	Jaegeuk Kim, 2015.
   *
   * This has not yet undergone a rigorous security audit.
   *
   * The usage of AES-XTS should conform to recommendations in NIST
   * Special Publication 800-38E and IEEE P1619/D16.
   */
  #include <crypto/hash.h>
  #include <crypto/sha.h>
  #include <keys/user-type.h>
  #include <keys/encrypted-type.h>
  #include <linux/crypto.h>
  #include <linux/ecryptfs.h>
  #include <linux/gfp.h>
  #include <linux/kernel.h>
  #include <linux/key.h>
  #include <linux/list.h>
  #include <linux/mempool.h>
  #include <linux/module.h>
  #include <linux/mutex.h>
  #include <linux/random.h>
  #include <linux/scatterlist.h>
  #include <linux/spinlock_types.h>
  #include <linux/f2fs_fs.h>
  #include <linux/ratelimit.h>
  #include <linux/bio.h>
  
  #include "f2fs.h"
  #include "xattr.h"
  
  /* Encryption added and removed here! (L: */
  
  static unsigned int num_prealloc_crypto_pages = 32;
  static unsigned int num_prealloc_crypto_ctxs = 128;
  
  module_param(num_prealloc_crypto_pages, uint, 0444);
  MODULE_PARM_DESC(num_prealloc_crypto_pages,
  		"Number of crypto pages to preallocate");
  module_param(num_prealloc_crypto_ctxs, uint, 0444);
  MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
  		"Number of crypto contexts to preallocate");
  
  static mempool_t *f2fs_bounce_page_pool;
  
  static LIST_HEAD(f2fs_free_crypto_ctxs);
  static DEFINE_SPINLOCK(f2fs_crypto_ctx_lock);
cfc4d971d   Jaegeuk Kim   f2fs crypto: spli...
65
  static struct workqueue_struct *f2fs_read_workqueue;
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
66
  static DEFINE_MUTEX(crypto_init);
8bacf6deb   Jaegeuk Kim   f2fs crypto: use ...
67
68
  static struct kmem_cache *f2fs_crypto_ctx_cachep;
  struct kmem_cache *f2fs_crypt_info_cachep;
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
69
70
71
72
73
74
75
76
77
78
79
80
  /**
   * f2fs_release_crypto_ctx() - Releases an encryption context
   * @ctx: The encryption context to release.
   *
   * If the encryption context was allocated from the pre-allocated pool, returns
   * it to that pool. Else, frees it.
   *
   * If there's a bounce page in the context, this frees that.
   */
  void f2fs_release_crypto_ctx(struct f2fs_crypto_ctx *ctx)
  {
  	unsigned long flags;
ca40b0305   Jaegeuk Kim   f2fs crypto: shri...
81
  	if (ctx->flags & F2FS_WRITE_PATH_FL && ctx->w.bounce_page) {
4683ff837   Jaegeuk Kim   f2fs crypto: remo...
82
  		mempool_free(ctx->w.bounce_page, f2fs_bounce_page_pool);
ca40b0305   Jaegeuk Kim   f2fs crypto: shri...
83
  		ctx->w.bounce_page = NULL;
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
84
  	}
ca40b0305   Jaegeuk Kim   f2fs crypto: shri...
85
  	ctx->w.control_page = NULL;
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
86
  	if (ctx->flags & F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
8bacf6deb   Jaegeuk Kim   f2fs crypto: use ...
87
  		kmem_cache_free(f2fs_crypto_ctx_cachep, ctx);
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
88
89
90
91
92
93
94
95
  	} else {
  		spin_lock_irqsave(&f2fs_crypto_ctx_lock, flags);
  		list_add(&ctx->free_list, &f2fs_free_crypto_ctxs);
  		spin_unlock_irqrestore(&f2fs_crypto_ctx_lock, flags);
  	}
  }
  
  /**
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
96
97
98
99
100
101
102
103
104
105
106
   * f2fs_get_crypto_ctx() - Gets an encryption context
   * @inode:       The inode for which we are doing the crypto
   *
   * Allocates and initializes an encryption context.
   *
   * Return: An allocated and initialized encryption context on success; error
   * value or NULL otherwise.
   */
  struct f2fs_crypto_ctx *f2fs_get_crypto_ctx(struct inode *inode)
  {
  	struct f2fs_crypto_ctx *ctx = NULL;
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
107
108
  	unsigned long flags;
  	struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
edf3fb8e9   Jaegeuk Kim   f2fs crypto: fix ...
109
  	if (ci == NULL)
7e8e754a4   Jaegeuk Kim   f2fs crypto: fix ...
110
  		return ERR_PTR(-ENOKEY);
edf3fb8e9   Jaegeuk Kim   f2fs crypto: fix ...
111

57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
  	/*
  	 * We first try getting the ctx from a free list because in
  	 * the common case the ctx will have an allocated and
  	 * initialized crypto tfm, so it's probably a worthwhile
  	 * optimization. For the bounce page, we first try getting it
  	 * from the kernel allocator because that's just about as fast
  	 * as getting it from a list and because a cache of free pages
  	 * should generally be a "last resort" option for a filesystem
  	 * to be able to do its job.
  	 */
  	spin_lock_irqsave(&f2fs_crypto_ctx_lock, flags);
  	ctx = list_first_entry_or_null(&f2fs_free_crypto_ctxs,
  					struct f2fs_crypto_ctx, free_list);
  	if (ctx)
  		list_del(&ctx->free_list);
  	spin_unlock_irqrestore(&f2fs_crypto_ctx_lock, flags);
  	if (!ctx) {
8bacf6deb   Jaegeuk Kim   f2fs crypto: use ...
129
  		ctx = kmem_cache_zalloc(f2fs_crypto_ctx_cachep, GFP_NOFS);
26bf3dc7e   Jaegeuk Kim   f2fs crypto: use ...
130
131
  		if (!ctx)
  			return ERR_PTR(-ENOMEM);
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
132
133
134
135
  		ctx->flags |= F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
  	} else {
  		ctx->flags &= ~F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
  	}
ca40b0305   Jaegeuk Kim   f2fs crypto: shri...
136
  	ctx->flags &= ~F2FS_WRITE_PATH_FL;
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
137
138
139
140
141
142
143
144
145
146
  	return ctx;
  }
  
  /*
   * Call f2fs_decrypt on every single page, reusing the encryption
   * context.
   */
  static void completion_pages(struct work_struct *work)
  {
  	struct f2fs_crypto_ctx *ctx =
ca40b0305   Jaegeuk Kim   f2fs crypto: shri...
147
148
  		container_of(work, struct f2fs_crypto_ctx, r.work);
  	struct bio *bio = ctx->r.bio;
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
  	struct bio_vec *bv;
  	int i;
  
  	bio_for_each_segment_all(bv, bio, i) {
  		struct page *page = bv->bv_page;
  		int ret = f2fs_decrypt(ctx, page);
  
  		if (ret) {
  			WARN_ON_ONCE(1);
  			SetPageError(page);
  		} else
  			SetPageUptodate(page);
  		unlock_page(page);
  	}
  	f2fs_release_crypto_ctx(ctx);
  	bio_put(bio);
  }
  
  void f2fs_end_io_crypto_work(struct f2fs_crypto_ctx *ctx, struct bio *bio)
  {
ca40b0305   Jaegeuk Kim   f2fs crypto: shri...
169
170
171
  	INIT_WORK(&ctx->r.work, completion_pages);
  	ctx->r.bio = bio;
  	queue_work(f2fs_read_workqueue, &ctx->r.work);
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
172
  }
cfc4d971d   Jaegeuk Kim   f2fs crypto: spli...
173
  static void f2fs_crypto_destroy(void)
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
174
175
  {
  	struct f2fs_crypto_ctx *pos, *n;
26bf3dc7e   Jaegeuk Kim   f2fs crypto: use ...
176
  	list_for_each_entry_safe(pos, n, &f2fs_free_crypto_ctxs, free_list)
8bacf6deb   Jaegeuk Kim   f2fs crypto: use ...
177
  		kmem_cache_free(f2fs_crypto_ctx_cachep, pos);
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
178
179
180
181
  	INIT_LIST_HEAD(&f2fs_free_crypto_ctxs);
  	if (f2fs_bounce_page_pool)
  		mempool_destroy(f2fs_bounce_page_pool);
  	f2fs_bounce_page_pool = NULL;
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
182
183
184
  }
  
  /**
cfc4d971d   Jaegeuk Kim   f2fs crypto: spli...
185
   * f2fs_crypto_initialize() - Set up for f2fs encryption.
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
186
187
188
189
190
191
   *
   * We only call this when we start accessing encrypted files, since it
   * results in memory getting allocated that wouldn't otherwise be used.
   *
   * Return: Zero on success, non-zero otherwise.
   */
cfc4d971d   Jaegeuk Kim   f2fs crypto: spli...
192
  int f2fs_crypto_initialize(void)
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
193
  {
8bacf6deb   Jaegeuk Kim   f2fs crypto: use ...
194
  	int i, res = -ENOMEM;
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
195

cfc4d971d   Jaegeuk Kim   f2fs crypto: spli...
196
197
  	if (f2fs_bounce_page_pool)
  		return 0;
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
198
  	mutex_lock(&crypto_init);
cfc4d971d   Jaegeuk Kim   f2fs crypto: spli...
199
  	if (f2fs_bounce_page_pool)
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
200
  		goto already_initialized;
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
201
202
  	for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
  		struct f2fs_crypto_ctx *ctx;
8bacf6deb   Jaegeuk Kim   f2fs crypto: use ...
203
  		ctx = kmem_cache_zalloc(f2fs_crypto_ctx_cachep, GFP_KERNEL);
cfc4d971d   Jaegeuk Kim   f2fs crypto: spli...
204
  		if (!ctx)
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
205
  			goto fail;
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
206
207
  		list_add(&ctx->free_list, &f2fs_free_crypto_ctxs);
  	}
cfc4d971d   Jaegeuk Kim   f2fs crypto: spli...
208
  	/* must be allocated at the last step to avoid race condition above */
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
209
210
  	f2fs_bounce_page_pool =
  		mempool_create_page_pool(num_prealloc_crypto_pages, 0);
cfc4d971d   Jaegeuk Kim   f2fs crypto: spli...
211
  	if (!f2fs_bounce_page_pool)
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
212
  		goto fail;
cfc4d971d   Jaegeuk Kim   f2fs crypto: spli...
213

57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
214
215
216
217
  already_initialized:
  	mutex_unlock(&crypto_init);
  	return 0;
  fail:
cfc4d971d   Jaegeuk Kim   f2fs crypto: spli...
218
  	f2fs_crypto_destroy();
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
219
220
221
  	mutex_unlock(&crypto_init);
  	return res;
  }
cfc4d971d   Jaegeuk Kim   f2fs crypto: spli...
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
  /**
   * f2fs_exit_crypto() - Shutdown the f2fs encryption system
   */
  void f2fs_exit_crypto(void)
  {
  	f2fs_crypto_destroy();
  
  	if (f2fs_read_workqueue)
  		destroy_workqueue(f2fs_read_workqueue);
  	if (f2fs_crypto_ctx_cachep)
  		kmem_cache_destroy(f2fs_crypto_ctx_cachep);
  	if (f2fs_crypt_info_cachep)
  		kmem_cache_destroy(f2fs_crypt_info_cachep);
  }
  
  int __init f2fs_init_crypto(void)
  {
  	int res = -ENOMEM;
  
  	f2fs_read_workqueue = alloc_workqueue("f2fs_crypto", WQ_HIGHPRI, 0);
  	if (!f2fs_read_workqueue)
  		goto fail;
  
  	f2fs_crypto_ctx_cachep = KMEM_CACHE(f2fs_crypto_ctx,
  						SLAB_RECLAIM_ACCOUNT);
  	if (!f2fs_crypto_ctx_cachep)
  		goto fail;
  
  	f2fs_crypt_info_cachep = KMEM_CACHE(f2fs_crypt_info,
  						SLAB_RECLAIM_ACCOUNT);
  	if (!f2fs_crypt_info_cachep)
  		goto fail;
  
  	return 0;
  fail:
  	f2fs_exit_crypto();
  	return res;
  }
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
260
261
262
263
264
265
266
267
268
269
270
271
272
273
  void f2fs_restore_and_release_control_page(struct page **page)
  {
  	struct f2fs_crypto_ctx *ctx;
  	struct page *bounce_page;
  
  	/* The bounce data pages are unmapped. */
  	if ((*page)->mapping)
  		return;
  
  	/* The bounce data page is unmapped. */
  	bounce_page = *page;
  	ctx = (struct f2fs_crypto_ctx *)page_private(bounce_page);
  
  	/* restore control page */
ca40b0305   Jaegeuk Kim   f2fs crypto: shri...
274
  	*page = ctx->w.control_page;
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
  
  	f2fs_restore_control_page(bounce_page);
  }
  
  void f2fs_restore_control_page(struct page *data_page)
  {
  	struct f2fs_crypto_ctx *ctx =
  		(struct f2fs_crypto_ctx *)page_private(data_page);
  
  	set_page_private(data_page, (unsigned long)NULL);
  	ClearPagePrivate(data_page);
  	unlock_page(data_page);
  	f2fs_release_crypto_ctx(ctx);
  }
  
  /**
   * f2fs_crypt_complete() - The completion callback for page encryption
   * @req: The asynchronous encryption request context
   * @res: The result of the encryption operation
   */
  static void f2fs_crypt_complete(struct crypto_async_request *req, int res)
  {
  	struct f2fs_completion_result *ecr = req->data;
  
  	if (res == -EINPROGRESS)
  		return;
  	ecr->res = res;
  	complete(&ecr->completion);
  }
  
  typedef enum {
  	F2FS_DECRYPT = 0,
  	F2FS_ENCRYPT,
  } f2fs_direction_t;
  
  static int f2fs_page_crypto(struct f2fs_crypto_ctx *ctx,
  				struct inode *inode,
  				f2fs_direction_t rw,
  				pgoff_t index,
  				struct page *src_page,
  				struct page *dest_page)
  {
  	u8 xts_tweak[F2FS_XTS_TWEAK_SIZE];
  	struct ablkcipher_request *req = NULL;
  	DECLARE_F2FS_COMPLETION_RESULT(ecr);
  	struct scatterlist dst, src;
26bf3dc7e   Jaegeuk Kim   f2fs crypto: use ...
321
322
  	struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
  	struct crypto_ablkcipher *tfm = ci->ci_ctfm;
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
323
  	int res = 0;
26bf3dc7e   Jaegeuk Kim   f2fs crypto: use ...
324
  	req = ablkcipher_request_alloc(tfm, GFP_NOFS);
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
  	if (!req) {
  		printk_ratelimited(KERN_ERR
  				"%s: crypto_request_alloc() failed
  ",
  				__func__);
  		return -ENOMEM;
  	}
  	ablkcipher_request_set_callback(
  		req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
  		f2fs_crypt_complete, &ecr);
  
  	BUILD_BUG_ON(F2FS_XTS_TWEAK_SIZE < sizeof(index));
  	memcpy(xts_tweak, &index, sizeof(index));
  	memset(&xts_tweak[sizeof(index)], 0,
  			F2FS_XTS_TWEAK_SIZE - sizeof(index));
  
  	sg_init_table(&dst, 1);
  	sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
  	sg_init_table(&src, 1);
  	sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
  	ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
  					xts_tweak);
  	if (rw == F2FS_DECRYPT)
  		res = crypto_ablkcipher_decrypt(req);
  	else
  		res = crypto_ablkcipher_encrypt(req);
  	if (res == -EINPROGRESS || res == -EBUSY) {
  		BUG_ON(req->base.data != &ecr);
  		wait_for_completion(&ecr.completion);
  		res = ecr.res;
  	}
  	ablkcipher_request_free(req);
  	if (res) {
  		printk_ratelimited(KERN_ERR
  			"%s: crypto_ablkcipher_encrypt() returned %d
  ",
  			__func__, res);
  		return res;
  	}
  	return 0;
  }
43f54cd52   Jaegeuk Kim   f2fs crypto: add ...
366
367
368
369
370
371
372
373
  static struct page *alloc_bounce_page(struct f2fs_crypto_ctx *ctx)
  {
  	ctx->w.bounce_page = mempool_alloc(f2fs_bounce_page_pool, GFP_NOWAIT);
  	if (ctx->w.bounce_page == NULL)
  		return ERR_PTR(-ENOMEM);
  	ctx->flags |= F2FS_WRITE_PATH_FL;
  	return ctx->w.bounce_page;
  }
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
  /**
   * f2fs_encrypt() - Encrypts a page
   * @inode:          The inode for which the encryption should take place
   * @plaintext_page: The page to encrypt. Must be locked.
   *
   * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
   * encryption context.
   *
   * Called on the page write path.  The caller must call
   * f2fs_restore_control_page() on the returned ciphertext page to
   * release the bounce buffer and the encryption context.
   *
   * Return: An allocated page with the encrypted content on success. Else, an
   * error value or NULL.
   */
  struct page *f2fs_encrypt(struct inode *inode,
  			  struct page *plaintext_page)
  {
  	struct f2fs_crypto_ctx *ctx;
  	struct page *ciphertext_page = NULL;
  	int err;
  
  	BUG_ON(!PageLocked(plaintext_page));
  
  	ctx = f2fs_get_crypto_ctx(inode);
  	if (IS_ERR(ctx))
  		return (struct page *)ctx;
  
  	/* The encryption operation will require a bounce page. */
43f54cd52   Jaegeuk Kim   f2fs crypto: add ...
403
404
  	ciphertext_page = alloc_bounce_page(ctx);
  	if (IS_ERR(ciphertext_page))
4683ff837   Jaegeuk Kim   f2fs crypto: remo...
405
  		goto err_out;
4683ff837   Jaegeuk Kim   f2fs crypto: remo...
406

ca40b0305   Jaegeuk Kim   f2fs crypto: shri...
407
  	ctx->w.control_page = plaintext_page;
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
408
409
  	err = f2fs_page_crypto(ctx, inode, F2FS_ENCRYPT, plaintext_page->index,
  					plaintext_page, ciphertext_page);
43f54cd52   Jaegeuk Kim   f2fs crypto: add ...
410
411
  	if (err) {
  		ciphertext_page = ERR_PTR(err);
4683ff837   Jaegeuk Kim   f2fs crypto: remo...
412
  		goto err_out;
43f54cd52   Jaegeuk Kim   f2fs crypto: add ...
413
  	}
4683ff837   Jaegeuk Kim   f2fs crypto: remo...
414

57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
415
416
417
418
  	SetPagePrivate(ciphertext_page);
  	set_page_private(ciphertext_page, (unsigned long)ctx);
  	lock_page(ciphertext_page);
  	return ciphertext_page;
4683ff837   Jaegeuk Kim   f2fs crypto: remo...
419
420
421
  
  err_out:
  	f2fs_release_crypto_ctx(ctx);
43f54cd52   Jaegeuk Kim   f2fs crypto: add ...
422
  	return ciphertext_page;
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
  }
  
  /**
   * f2fs_decrypt() - Decrypts a page in-place
   * @ctx:  The encryption context.
   * @page: The page to decrypt. Must be locked.
   *
   * Decrypts page in-place using the ctx encryption context.
   *
   * Called from the read completion callback.
   *
   * Return: Zero on success, non-zero otherwise.
   */
  int f2fs_decrypt(struct f2fs_crypto_ctx *ctx, struct page *page)
  {
  	BUG_ON(!PageLocked(page));
  
  	return f2fs_page_crypto(ctx, page->mapping->host,
  				F2FS_DECRYPT, page->index, page, page);
  }
  
  /*
   * Convenience function which takes care of allocating and
   * deallocating the encryption context
   */
  int f2fs_decrypt_one(struct inode *inode, struct page *page)
  {
  	struct f2fs_crypto_ctx *ctx = f2fs_get_crypto_ctx(inode);
  	int ret;
7e8e754a4   Jaegeuk Kim   f2fs crypto: fix ...
452
453
  	if (IS_ERR(ctx))
  		return PTR_ERR(ctx);
57e5055b0   Jaegeuk Kim   f2fs crypto: add ...
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
  	ret = f2fs_decrypt(ctx, page);
  	f2fs_release_crypto_ctx(ctx);
  	return ret;
  }
  
  bool f2fs_valid_contents_enc_mode(uint32_t mode)
  {
  	return (mode == F2FS_ENCRYPTION_MODE_AES_256_XTS);
  }
  
  /**
   * f2fs_validate_encryption_key_size() - Validate the encryption key size
   * @mode: The key mode.
   * @size: The key size to validate.
   *
   * Return: The validated key size for @mode. Zero if invalid.
   */
  uint32_t f2fs_validate_encryption_key_size(uint32_t mode, uint32_t size)
  {
  	if (size == f2fs_encryption_key_size(mode))
  		return size;
  	return 0;
  }