1 From: Michael Halcrow <mhalcrow@google.com>
3 ext4: implement the ext4 decryption the read path
5 Pulls in read_full_page(), modified to support decryption on read
8 [ XXX there is some non-decryption related patch hunks here we need to
11 Signed-off-by: Michael Halcrow <mhalcrow@google.com>
12 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
13 Signed-off-by: Ildar Muslukhov <ildarm@google.com>
15 fs/ext4/crypto.c | 136 +++++++++++++++++++++++++++++++++++-------
17 fs/ext4/file.c | 16 ++++-
18 fs/ext4/inode.c | 177 ++++++++++++++++++++++++++++++++++++++++++++++++++++---
19 fs/ext4/super.c | 8 ++-
20 5 files changed, 304 insertions(+), 34 deletions(-)
22 diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
23 index 80e6fac..0a4d9fb 100644
24 --- a/fs/ext4/crypto.c
25 +++ b/fs/ext4/crypto.c
27 #include <linux/random.h>
28 #include <linux/scatterlist.h>
29 #include <linux/spinlock_types.h>
30 +#include <linux/key.h>
35 /* Encryption added and removed here! (L: */
37 -mempool_t *ext4_bounce_page_pool = NULL;
38 +static mempool_t *ext4_bounce_page_pool = NULL;
40 -LIST_HEAD(ext4_free_crypto_ctxs);
41 -DEFINE_SPINLOCK(ext4_crypto_ctx_lock);
42 +static LIST_HEAD(ext4_free_crypto_ctxs);
43 +static DEFINE_SPINLOCK(ext4_crypto_ctx_lock);
44 +static struct ext4_encryption_key dummy_key;
47 * ext4_release_crypto_ctx() - Releases an encryption context
48 @@ -79,7 +81,7 @@ void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
49 * Return: An allocated and initialized encryption context on success. An error
50 * value or NULL otherwise.
52 -static struct ext4_crypto_ctx *ext4_alloc_and_init_crypto_ctx(u32 mask)
53 +static struct ext4_crypto_ctx *ext4_alloc_and_init_crypto_ctx(gfp_t mask)
55 struct ext4_crypto_ctx *ctx = kzalloc(sizeof(struct ext4_crypto_ctx),
57 @@ -367,8 +369,8 @@ static void ext4_prep_pages_for_write(struct page *ciphertext_page,
58 * Return: An allocated page with the encrypted content on success. Else, an
59 * error value or NULL.
61 -struct page *ext4_xts_encrypt(struct ext4_crypto_ctx *ctx,
62 - struct page *plaintext_page)
63 +static struct page *ext4_xts_encrypt(struct ext4_crypto_ctx *ctx,
64 + struct page *plaintext_page)
66 struct page *ciphertext_page = ctx->bounce_page;
67 u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
68 @@ -473,7 +475,7 @@ struct page *ext4_encrypt(struct ext4_crypto_ctx *ctx,
70 * Return: Zero on success, non-zero otherwise.
72 -int ext4_xts_decrypt(struct ext4_crypto_ctx *ctx, struct page *page)
73 +static int ext4_xts_decrypt(struct ext4_crypto_ctx *ctx, struct page *page)
75 u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
76 struct ablkcipher_request *req = NULL;
77 @@ -514,7 +516,7 @@ int ext4_xts_decrypt(struct ext4_crypto_ctx *ctx, struct page *page)
78 ablkcipher_request_free(req);
81 - printk_ratelimited(KERN_ERR "%s: res = [%d]\n", __func__, res);
82 + printk_ratelimited(KERN_ERR "%s: res = %d\n", __func__, res);
86 @@ -570,11 +572,18 @@ static int ext4_get_wrapping_key_from_keyring(
87 payload = (struct encrypted_key_payload *)create_key->payload.data;
88 if (WARN_ON_ONCE(create_key->datalen !=
89 sizeof(struct ecryptfs_auth_tok))) {
91 + "%s: Got auth tok length %d, expected %zd\n",
92 + __func__, create_key->datalen,
93 + sizeof(struct ecryptfs_auth_tok));
96 auth_tok = (struct ecryptfs_auth_tok *)(&(payload)->payload_data);
97 if (WARN_ON_ONCE(!(auth_tok->token.password.flags &
98 ECRYPTFS_SESSION_KEY_ENCRYPTION_KEY_SET))) {
100 + "%s: ECRYPTFS_SESSION_KEY_ENCRYPTION_KEY_SET not set in auth_tok->token.password.flags\n",
104 BUILD_BUG_ON(EXT4_MAX_KEY_SIZE < EXT4_AES_256_XTS_KEY_SIZE);
105 @@ -662,7 +671,7 @@ static uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
106 struct ext4_hmac_result {
107 struct completion completion;
113 * ext4_hmac_complete() - Completion for async HMAC
114 @@ -705,8 +714,11 @@ static int ext4_hmac(bool derivation, const char *key, size_t key_size,
117 BUG_ON(dst_size > SHA512_DIGEST_SIZE);
120 + printk(KERN_ERR "%s: crypto_alloc_ahash() returned %ld\n",
121 + __func__, PTR_ERR(tfm));
124 req = ahash_request_alloc(tfm, GFP_NOFS);
127 @@ -718,8 +730,11 @@ static int ext4_hmac(bool derivation, const char *key, size_t key_size,
128 ext4_hmac_complete, &ehr);
130 res = crypto_ahash_setkey(tfm, key, key_size);
133 + printk(KERN_ERR "%s: crypto_ahash_setkey() returned %d\n",
137 sg_init_one(&sg, src, src_size);
138 ahash_request_set_crypt(req, &sg, hmac, src_size);
139 init_completion(&ehr.completion);
140 @@ -729,13 +744,18 @@ static int ext4_hmac(bool derivation, const char *key, size_t key_size,
141 wait_for_completion(&ehr.completion);
146 + printk(KERN_ERR "%s: crypto_ahash_digest() returned %d\n",
150 memcpy(dst, hmac, dst_size);
152 crypto_free_ahash(tfm);
154 ahash_request_free(req);
156 + printk(KERN_ERR "%s: returning %d\n", __func__, res);
160 @@ -799,8 +819,11 @@ static int ext4_crypt_wrapper_virt(const char *enc_key, const char *iv,
163 desc.tfm = crypto_alloc_blkcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
164 - if (IS_ERR(desc.tfm))
165 + if (IS_ERR(desc.tfm)) {
166 + printk(KERN_ERR "%s: crypto_alloc_blkcipher() returned %ld\n",
167 + __func__, PTR_ERR(desc.tfm));
168 return PTR_ERR(desc.tfm);
172 crypto_blkcipher_set_flags(desc.tfm, CRYPTO_TFM_REQ_WEAK_KEY);
173 @@ -809,12 +832,19 @@ static int ext4_crypt_wrapper_virt(const char *enc_key, const char *iv,
174 crypto_blkcipher_set_iv(desc.tfm, iv, EXT4_WRAPPING_IV_SIZE);
175 res = crypto_blkcipher_setkey(desc.tfm, enc_key,
176 EXT4_AES_256_CTR_KEY_SIZE);
179 + printk(KERN_ERR "%s: crypto_blkcipher_setkey() returned %d\n",
184 res = crypto_blkcipher_encrypt(&desc, &dst, &src, size);
186 res = crypto_blkcipher_decrypt(&desc, &dst, &src, size);
188 + printk(KERN_ERR "%s: crypto_blkcipher_*crypt() returned %d\n",
192 crypto_free_blkcipher(desc.tfm);
194 @@ -953,8 +983,12 @@ static int ext4_wrap_key(char *wrapped_key_packet, size_t *key_packet_size,
197 res = ext4_get_wrapping_key(wrapping_key, packet->sig, inode);
200 + ext4_error(inode->i_sb,
201 + "%s: ext4_get_wrapping_key() with packet->sig %s returned %d\n",
202 + __func__, packet->sig, res);
205 BUG_ON(*key_packet_size != EXT4_FULL_WRAPPED_KEY_PACKET_V0_SIZE);
207 /* Size, type, nonce, and IV */
208 @@ -970,8 +1004,12 @@ static int ext4_wrap_key(char *wrapped_key_packet, size_t *key_packet_size,
210 EXT4_DERIVATION_TWEAK_NONCE_SIZE,
211 enc_key, EXT4_AES_256_CTR_KEY_SIZE);
214 + ext4_error(inode->i_sb,
215 + "%s: ext4_hmac_derive_key() returned %d\n",
220 /* Wrap the data key with the wrapping encryption key */
221 *((uint32_t *)key_packet.mode) = htonl(key->mode);
222 @@ -984,8 +1022,12 @@ static int ext4_wrap_key(char *wrapped_key_packet, size_t *key_packet_size,
223 EXT4_V0_SERIALIZED_KEY_SIZE, true);
224 memset(enc_key, 0, EXT4_AES_256_CTR_KEY_SIZE);
225 memset(key_packet.raw, 0, EXT4_MAX_KEY_SIZE);
228 + ext4_error(inode->i_sb,
229 + "%s: ext4_crypt_wrapper_virt() returned %d\n",
234 /* Calculate the HMAC over the entire packet (except, of
235 * course, the HMAC buffer at the end) */
236 @@ -994,8 +1036,12 @@ static int ext4_wrap_key(char *wrapped_key_packet, size_t *key_packet_size,
238 EXT4_DERIVATION_TWEAK_NONCE_SIZE,
239 int_key, EXT4_HMAC_KEY_SIZE);
242 + ext4_error(inode->i_sb,
243 + "%s: ext4_hmac_derive_key() returned %d\n",
247 BUILD_BUG_ON(EXT4_FULL_WRAPPED_KEY_PACKET_V0_SIZE < EXT4_HMAC_SIZE);
248 res = ext4_hmac_integrity(int_key, EXT4_HMAC_KEY_SIZE,
250 @@ -1006,6 +1052,8 @@ static int ext4_wrap_key(char *wrapped_key_packet, size_t *key_packet_size,
251 memset(int_key, 0, EXT4_HMAC_KEY_SIZE);
253 memset(wrapping_key, 0, EXT4_AES_256_XTS_KEY_SIZE);
255 + ext4_error(inode->i_sb, "%s: returning %d\n", __func__, res);
259 @@ -1025,6 +1073,26 @@ static void ext4_generate_encryption_key(const struct dentry *dentry)
260 get_random_bytes(key->raw, key->size);
264 + * Ted lost his saving throw vs ecryptfs key management, so use a
265 + * dummy key for testing purposes. It appears the ecryptfs userspace
266 + * ABI is mysteriously kconfig dependent, or there is some mysterious
267 + * silent failure if you are missing some kconfig option. This also
268 + * allows us to avoid bloating the kvm-xfstests image with
271 +static void generate_dummy_key(struct inode *inode)
275 + dummy_key.mode = EXT4_SB(inode->i_sb)->s_default_encryption_mode;
276 + dummy_key.size = ext4_encryption_key_size(dummy_key.mode);
277 + for (i = 0; i < dummy_key.size; i++) {
278 + dummy_key.raw[i] = "TESTKEY"[i % 7];
284 * ext4_set_crypto_key() - Generates and sets the encryption key for the inode
285 * @dentry: The dentry for the encryption key.
286 @@ -1045,12 +1113,23 @@ int ext4_set_crypto_key(struct dentry *dentry)
287 struct ext4_inode_info *ei = EXT4_I(inode);
290 + if (test_opt2(inode->i_sb, DUMMY_ENCRYPTION)) {
291 + if (unlikely(dummy_key.mode) == 0)
292 + generate_dummy_key(inode);
293 + ei->i_encryption_key = dummy_key;
298 ext4_generate_encryption_key(dentry);
299 res = ext4_wrap_key(wrapped_key_packet, &wrapped_key_packet_size,
300 &ei->i_encryption_key, inode);
303 + ext4_error(dentry->d_inode->i_sb,
304 + "%s: ext4_wrap_key() returned %d\n", __func__,
308 root_packet[0] = EXT4_PACKET_SET_VERSION_V0;
309 BUILD_BUG_ON(EXT4_PACKET_SET_V0_MAX_SIZE !=
310 (EXT4_PACKET_HEADER_SIZE +
311 @@ -1058,12 +1137,17 @@ try_again:
312 BUG_ON(sizeof(root_packet) != root_packet_size);
313 res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION_METADATA, "",
314 root_packet, root_packet_size, 0);
316 + ext4_error(dentry->d_inode->i_sb,
317 + "%s: ext4_xattr_set() returned %d\n", __func__,
324 ei->i_encryption_key.mode = EXT4_ENCRYPTION_MODE_INVALID;
325 - printk_ratelimited(KERN_ERR "%s: res = [%d]\n", __func__, res);
326 + printk_ratelimited(KERN_ERR "%s: res = %d\n", __func__, res);
330 @@ -1095,7 +1179,7 @@ static int ext4_get_root_packet(struct inode *inode, char *root_packet,
331 if (root_packet[0] != EXT4_PACKET_SET_VERSION_V0) {
334 - "%s: Expected root packet version [%d]; got [%d]\n",
335 + "%s: Expected root packet version %d; got %d\n",
336 __func__, EXT4_PACKET_SET_VERSION_V0, root_packet[0]);
339 @@ -1117,8 +1201,16 @@ int ext4_get_crypto_key(const struct file *file)
340 wrapped_key_packet_size);
341 struct inode *inode = file->f_mapping->host;
342 struct ext4_inode_info *ei = EXT4_I(inode);
343 - int res = ext4_get_root_packet(inode, root_packet, &root_packet_size);
346 + if (test_opt2(inode->i_sb, DUMMY_ENCRYPTION)) {
347 + if (unlikely(dummy_key.mode) == 0)
348 + generate_dummy_key(inode);
349 + ei->i_encryption_key = dummy_key;
353 + res = ext4_get_root_packet(inode, root_packet, &root_packet_size);
356 res = ext4_unwrap_key(wrapped_key_packet,
357 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
358 index 11a9960..292a3a3 100644
361 @@ -1011,6 +1011,7 @@ struct ext4_inode_info {
363 #define EXT4_MOUNT2_HURD_COMPAT 0x00000004 /* Support HURD-castrated
365 +#define EXT4_MOUNT2_DUMMY_ENCRYPTION 0x80000000 /* Use dummy encryption */
367 #define clear_opt(sb, opt) EXT4_SB(sb)->s_mount_opt &= \
369 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
370 index aca7b24..6958f1a 100644
373 @@ -200,8 +200,13 @@ static const struct vm_operations_struct ext4_file_vm_ops = {
375 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
380 vma->vm_ops = &ext4_file_vm_ops;
381 + res = ext4_get_crypto_key(file);
382 + if (res == -EACCES) /* If it's encrypted and we don't have the key */
387 @@ -212,6 +217,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
388 struct vfsmount *mnt = filp->f_path.mnt;
393 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
394 !(sb->s_flags & MS_RDONLY))) {
395 @@ -250,11 +256,17 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
396 * writing and the journal is present
398 if (filp->f_mode & FMODE_WRITE) {
399 - int ret = ext4_inode_attach_jinode(inode);
400 + ret = ext4_inode_attach_jinode(inode);
404 - return dquot_file_open(inode, filp);
405 + ret = dquot_file_open(inode, filp);
407 + ret = ext4_get_crypto_key(filp);
408 + if (ret != -EACCES)
415 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
416 index 117b691..c60f15c 100644
417 --- a/fs/ext4/inode.c
418 +++ b/fs/ext4/inode.c
420 #include <linux/ratelimit.h>
421 #include <linux/aio.h>
422 #include <linux/bitops.h>
423 +#include <linux/prefetch.h>
425 #include "ext4_jbd2.h"
427 @@ -781,6 +782,8 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
428 ext4_lblk_t block, int create)
430 struct buffer_head *bh;
431 + struct ext4_inode_info *ei = EXT4_I(inode);
432 + struct ext4_crypto_ctx *ctx;
434 bh = ext4_getblk(handle, inode, block, create);
436 @@ -789,8 +792,14 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
438 ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
440 - if (buffer_uptodate(bh))
441 + if (buffer_uptodate(bh)) {
442 + if (ext4_is_encryption_enabled(ei)) {
443 + ctx = ext4_get_crypto_ctx(false, &ei->i_encryption_key);
444 + WARN_ON_ONCE(ext4_decrypt(ctx, bh->b_page));
445 + ext4_release_crypto_ctx(ctx);
450 return ERR_PTR(-EIO);
452 @@ -877,9 +886,8 @@ int do_journal_get_write_access(handle_t *handle,
454 static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
455 struct buffer_head *bh_result, int create);
457 static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
458 - get_block_t *get_block)
459 + get_block_t *get_block)
461 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
462 unsigned to = from + len;
463 @@ -971,7 +979,6 @@ out:
468 static int ext4_write_begin(struct file *file, struct address_space *mapping,
469 loff_t pos, unsigned len, unsigned flags,
470 struct page **pagep, void **fsdata)
471 @@ -2376,7 +2383,6 @@ static int ext4_writepages(struct address_space *mapping,
472 handle_t *handle = NULL;
473 struct mpage_da_data mpd;
474 struct inode *inode = mapping->host;
475 - struct ext4_inode_info *ei = EXT4_I(inode);
476 int needed_blocks, rsv_blocks = 0, ret = 0;
477 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
479 @@ -2393,7 +2399,7 @@ static int ext4_writepages(struct address_space *mapping,
480 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
483 - if (ext4_should_journal_data(inode) || ext4_is_encryption_enabled(ei)) {
484 + if (ext4_should_journal_data(inode)) {
485 struct blk_plug plug;
487 blk_start_plug(&plug);
488 @@ -2908,20 +2914,142 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
489 return generic_block_bmap(mapping, block, ext4_get_block);
492 +static void ext4_completion_work(struct work_struct *work)
494 + struct ext4_crypto_ctx *ctx =
495 + container_of(work, struct ext4_crypto_ctx, work);
496 + struct page *page = ctx->control_page;
497 + WARN_ON_ONCE(ext4_decrypt(ctx, page));
498 + ext4_release_crypto_ctx(ctx);
499 + SetPageUptodate(page);
503 +static int ext4_complete_cb(struct bio *bio, int res)
505 + struct ext4_crypto_ctx *ctx = bio->bi_cb_ctx;
506 + struct page *page = ctx->control_page;
508 + ext4_release_crypto_ctx(ctx);
512 + INIT_WORK(&ctx->work, ext4_completion_work);
513 + queue_work(mpage_read_workqueue, &ctx->work);
517 +static int ext4_read_full_page(struct page *page)
519 + struct inode *inode = page->mapping->host;
520 + struct buffer_head *head = create_page_buffers(page, inode, 0);
521 + unsigned int blocksize = head->b_size;
522 + unsigned int bbits = ilog2(blocksize);
523 + sector_t iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
524 + sector_t lblock = (i_size_read(inode)+blocksize-1) >> bbits;
525 + struct buffer_head *bh = head;
526 + struct buffer_head *arr[MAX_BUF_PER_PAGE];
529 + int fully_mapped = 1;
532 + if (buffer_uptodate(bh))
535 + if (!buffer_mapped(bh)) {
539 + if (iblock < lblock) {
540 + WARN_ON(bh->b_size != blocksize);
541 + err = ext4_get_block(inode, iblock, bh, 0);
543 + SetPageError(page);
545 + if (!buffer_mapped(bh)) {
546 + zero_user(page, i * blocksize, blocksize);
548 + set_buffer_uptodate(bh);
552 + * get_block() might have updated the buffer
555 + if (buffer_uptodate(bh))
559 + } while (i++, iblock++, (bh = bh->b_this_page) != head);
562 + SetPageMappedToDisk(page);
566 + * All buffers are uptodate - we can set the page uptodate
567 + * as well. But not if get_block() returned an error.
569 + if (!PageError(page))
570 + SetPageUptodate(page);
576 + * Encryption requires blocksize is page size, so we should never see
577 + * more than one buffer head per page.
581 + /* Stage two: lock the buffers */
582 + for (i = 0; i < nr; i++) {
585 + mark_buffer_async_read(bh);
589 + * Stage 3: start the IO. Check for uptodateness
590 + * inside the buffer lock in case another process reading
591 + * the underlying blockdev brought it uptodate (the sct fix).
593 + for (i = 0; i < nr; i++) {
595 + if (buffer_uptodate(bh))
596 + end_buffer_async_read(bh, 1);
598 + struct ext4_inode_info *ei = EXT4_I(inode);
599 + struct ext4_crypto_ctx *ctx = ext4_get_crypto_ctx(
600 + false, &ei->i_encryption_key);
601 + atomic_inc(&ctx->dbg_refcnt);
602 + ctx->control_page = page;
603 + if (submit_bh_cb(READ, bh, ext4_complete_cb, ctx))
604 + ext4_release_crypto_ctx(ctx);
610 static int ext4_readpage(struct file *file, struct page *page)
613 struct inode *inode = page->mapping->host;
614 + struct ext4_inode_info *ei = EXT4_I(inode);
616 trace_ext4_readpage(page);
618 if (ext4_has_inline_data(inode))
619 ret = ext4_readpage_inline(inode, page);
621 - if (ret == -EAGAIN)
622 + if (ext4_is_encryption_enabled(ei)) {
623 + ext4_read_full_page(page);
624 + } else if (ret == -EAGAIN) {
625 return mpage_readpage(page, ext4_get_block);
633 @@ -2929,12 +3057,35 @@ ext4_readpages(struct file *file, struct address_space *mapping,
634 struct list_head *pages, unsigned nr_pages)
636 struct inode *inode = mapping->host;
637 + struct ext4_inode_info *ei = EXT4_I(inode);
638 + struct page *page = NULL;
641 /* If the file has inline data, no need to do readpages. */
642 if (ext4_has_inline_data(inode))
645 - return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
646 + if (ext4_is_encryption_enabled(ei)) {
647 + for (page_idx = 0; page_idx < nr_pages; page_idx++) {
648 + page = list_entry(pages->prev, struct page, lru);
649 + prefetchw(&page->flags);
650 + list_del(&page->lru);
651 + if (!add_to_page_cache_lru(page, mapping, page->index,
653 + if (!PageUptodate(page)) {
654 + ext4_read_full_page(page);
659 + page_cache_release(page);
661 + BUG_ON(!list_empty(pages));
664 + return mpage_readpages(mapping, pages, nr_pages,
669 static void ext4_invalidatepage(struct page *page, unsigned int offset,
670 @@ -3322,8 +3473,10 @@ static int ext4_block_zero_page_range(handle_t *handle,
671 unsigned blocksize, max, pos;
673 struct inode *inode = mapping->host;
674 + struct ext4_inode_info *ei = EXT4_I(inode);
675 struct buffer_head *bh;
677 + struct ext4_crypto_ctx *ctx;
680 page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
681 @@ -3379,6 +3532,12 @@ static int ext4_block_zero_page_range(handle_t *handle,
682 /* Uhhuh. Read error. Complain and punt. */
683 if (!buffer_uptodate(bh))
685 + if (ext4_is_encryption_enabled(ei)) {
686 + BUG_ON(blocksize != PAGE_CACHE_SIZE);
687 + ctx = ext4_get_crypto_ctx(false, &ei->i_encryption_key);
688 + WARN_ON_ONCE(ext4_decrypt(ctx, page));
689 + ext4_release_crypto_ctx(ctx);
692 if (ext4_should_journal_data(inode)) {
693 BUFFER_TRACE(bh, "get write access");
694 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
695 index c3f4d4c..240bce4 100644
696 --- a/fs/ext4/super.c
697 +++ b/fs/ext4/super.c
698 @@ -1147,7 +1147,7 @@ enum {
699 Opt_inode_readahead_blks, Opt_journal_ioprio,
700 Opt_dioread_nolock, Opt_dioread_lock,
701 Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
702 - Opt_max_dir_size_kb, Opt_encrypt_key_sig,
703 + Opt_max_dir_size_kb, Opt_encrypt_key_sig, Opt_dummy_encryption
706 static const match_table_t tokens = {
707 @@ -1224,6 +1224,7 @@ static const match_table_t tokens = {
708 {Opt_noinit_itable, "noinit_itable"},
709 {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
710 {Opt_encrypt_key_sig, "encrypt_key_sig=%s"},
711 + {Opt_dummy_encryption, "dummy_encryption" },
712 {Opt_removed, "check=none"}, /* mount option from ext2/3 */
713 {Opt_removed, "nocheck"}, /* mount option from ext2/3 */
714 {Opt_removed, "reservation"}, /* mount option from ext2/3 */
715 @@ -1423,6 +1424,7 @@ static const struct mount_opts {
716 {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
717 {Opt_max_dir_size_kb, 0, MOPT_GTE0},
718 {Opt_encrypt_key_sig, 0, MOPT_STRING},
719 + {Opt_dummy_encryption, 0, 0},
723 @@ -1546,6 +1548,10 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
724 ECRYPTFS_SIG_SIZE_HEX);
725 sbi->s_default_encryption_wrapper_desc.wrapping_key_sig[
726 ECRYPTFS_SIG_SIZE_HEX] = '\0';
727 + } else if (token == Opt_dummy_encryption) {
728 + sbi->s_default_encryption_mode =
729 + EXT4_ENCRYPTION_MODE_AES_256_XTS;
730 + set_opt2(sb, DUMMY_ENCRYPTION);
731 } else if (token == Opt_stripe) {
733 } else if (token == Opt_resuid) {
735 2.1.0.rc2.206.gedb03e5