1 ext4 crypto: implement the ext4 decryption read path
3 From: Michael Halcrow <mhalcrow@google.com>
5 Signed-off-by: Michael Halcrow <mhalcrow@google.com>
6 Signed-off-by: Ildar Muslukhov <ildarm@google.com>
7 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
9 fs/ext4/file.c | 18 ++++++++++++++--
10 fs/ext4/inode.c | 7 ++++++
11 fs/ext4/readpage.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-
12 3 files changed, 88 insertions(+), 3 deletions(-)
14 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
15 index fcc6c13..b132a3c 100644
18 @@ -218,6 +218,13 @@ static const struct vm_operations_struct ext4_file_vm_ops = {
20 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
22 + struct inode *inode = file->f_mapping->host;
24 + if (ext4_encrypted_inode(inode)) {
25 + int err = ext4_generate_encryption_key(inode);
30 if (IS_DAX(file_inode(file))) {
31 vma->vm_ops = &ext4_dax_vm_ops;
32 @@ -235,6 +242,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
33 struct vfsmount *mnt = filp->f_path.mnt;
38 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
39 !(sb->s_flags & MS_RDONLY))) {
40 @@ -273,11 +281,17 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
41 * writing and the journal is present
43 if (filp->f_mode & FMODE_WRITE) {
44 - int ret = ext4_inode_attach_jinode(inode);
45 + ret = ext4_inode_attach_jinode(inode);
49 - return dquot_file_open(inode, filp);
50 + ret = dquot_file_open(inode, filp);
51 + if (!ret && ext4_encrypted_inode(inode)) {
52 + ret = ext4_generate_encryption_key(inode);
60 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
61 index 7c4527e..8b4fe62 100644
64 @@ -3370,6 +3370,13 @@ static int __ext4_block_zero_page_range(handle_t *handle,
65 /* Uhhuh. Read error. Complain and punt. */
66 if (!buffer_uptodate(bh))
68 + if (S_ISREG(inode->i_mode) &&
69 + ext4_encrypted_inode(inode)) {
70 + /* We expect the key to be set. */
71 + BUG_ON(!ext4_has_encryption_key(inode));
72 + BUG_ON(blocksize != PAGE_CACHE_SIZE);
73 + WARN_ON_ONCE(ext4_decrypt_one(inode, page));
76 if (ext4_should_journal_data(inode)) {
77 BUFFER_TRACE(bh, "get write access");
78 diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
79 index fff9fe6..171b9ac 100644
80 --- a/fs/ext4/readpage.c
81 +++ b/fs/ext4/readpage.c
86 + * Call ext4_decrypt on every single page, reusing the encryption
89 +static void completion_pages(struct work_struct *work)
91 +#ifdef CONFIG_EXT4_FS_ENCRYPTION
92 + struct ext4_crypto_ctx *ctx =
93 + container_of(work, struct ext4_crypto_ctx, work);
94 + struct bio *bio = ctx->bio;
98 + bio_for_each_segment_all(bv, bio, i) {
99 + struct page *page = bv->bv_page;
101 + int ret = ext4_decrypt(ctx, page);
104 + SetPageError(page);
106 + SetPageUptodate(page);
109 + ext4_release_crypto_ctx(ctx);
116 +static inline bool ext4_bio_encrypted(struct bio *bio)
118 +#ifdef CONFIG_EXT4_FS_ENCRYPTION
119 + return unlikely(bio->bi_private != NULL);
126 * I/O completion handler for multipage BIOs.
128 * The mpage code never puts partial pages into a BIO (except for end-of-file).
129 @@ -63,6 +103,18 @@ static void mpage_end_io(struct bio *bio, int err)
133 + if (ext4_bio_encrypted(bio)) {
134 + struct ext4_crypto_ctx *ctx = bio->bi_private;
137 + ext4_release_crypto_ctx(ctx);
139 + INIT_WORK(&ctx->work, completion_pages);
141 + queue_work(ext4_read_workqueue, &ctx->work);
145 bio_for_each_segment_all(bv, bio, i) {
146 struct page *page = bv->bv_page;
148 @@ -223,13 +275,25 @@ int ext4_mpage_readpages(struct address_space *mapping,
152 + struct ext4_crypto_ctx *ctx = NULL;
154 + if (ext4_encrypted_inode(inode) &&
155 + S_ISREG(inode->i_mode)) {
156 + ctx = ext4_get_crypto_ctx(inode);
158 + goto set_error_page;
160 bio = bio_alloc(GFP_KERNEL,
161 min_t(int, nr_pages, bio_get_nr_vecs(bdev)));
165 + ext4_release_crypto_ctx(ctx);
169 bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
170 bio->bi_end_io = mpage_end_io;
171 + bio->bi_private = ctx;
174 length = first_hole << blkbits;