Sync ext4 encryption as of commit dffd334e4d7134
[ext4-patch-queue.git] / implement-the-ext4-decryption-read-path
blob8b688cca7115ddd7d1bbe6cf4b96f14d15a74259
1 ext4 crypto: implement the ext4 decryption read path
3 From: Michael Halcrow <mhalcrow@google.com>
5 Change-Id: Ie9c043a132a01da60d1617662cd30307639f5599
6 Signed-off-by: Michael Halcrow <mhalcrow@google.com>
7 Signed-off-by: Ildar Muslukhov <ildarm@google.com>
8 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
9 ---
10  fs/ext4/file.c     | 19 +++++++++++++++++--
11  fs/ext4/inode.c    |  8 ++++++++
12  fs/ext4/readpage.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
13  3 files changed, 90 insertions(+), 3 deletions(-)
15 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
16 index 8131be8..756d10f 100644
17 --- a/fs/ext4/file.c
18 +++ b/fs/ext4/file.c
19 @@ -28,6 +28,7 @@
20  #include <linux/pagevec.h>
21  #include "ext4.h"
22  #include "ext4_jbd2.h"
23 +#include "ext4_crypto.h"
24  #include "xattr.h"
25  #include "acl.h"
27 @@ -200,6 +201,13 @@ static const struct vm_operations_struct ext4_file_vm_ops = {
29  static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
30  {
31 +       struct inode *inode = file->f_mapping->host;
33 +       if (ext4_encrypted_inode(inode)) {
34 +               int err = ext4_generate_encryption_key(inode);
35 +               if (err)
36 +                       return 0;
37 +       }
38         file_accessed(file);
39         vma->vm_ops = &ext4_file_vm_ops;
40         return 0;
41 @@ -212,6 +220,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
42         struct vfsmount *mnt = filp->f_path.mnt;
43         struct path path;
44         char buf[64], *cp;
45 +       int ret;
47         if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
48                      !(sb->s_flags & MS_RDONLY))) {
49 @@ -250,11 +259,17 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
50          * writing and the journal is present
51          */
52         if (filp->f_mode & FMODE_WRITE) {
53 -               int ret = ext4_inode_attach_jinode(inode);
54 +               ret = ext4_inode_attach_jinode(inode);
55                 if (ret < 0)
56                         return ret;
57         }
58 -       return dquot_file_open(inode, filp);
59 +       ret = dquot_file_open(inode, filp);
60 +       if (!ret && ext4_encrypted_inode(inode)) {
61 +               ret = ext4_generate_encryption_key(inode);
62 +               if (ret)
63 +                       ret = -EACCES;
64 +       }
65 +       return ret;
66  }
68  /*
69 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
70 index dcc836c..5b6b7b6 100644
71 --- a/fs/ext4/inode.c
72 +++ b/fs/ext4/inode.c
73 @@ -39,6 +39,7 @@
74  #include <linux/ratelimit.h>
75  #include <linux/aio.h>
76  #include <linux/bitops.h>
77 +#include <linux/prefetch.h>
79  #include "ext4_jbd2.h"
80  #include "ext4_crypto.h"
81 @@ -3363,6 +3364,13 @@ static int ext4_block_zero_page_range(handle_t *handle,
82                 /* Uhhuh. Read error. Complain and punt. */
83                 if (!buffer_uptodate(bh))
84                         goto unlock;
85 +               if (S_ISREG(inode->i_mode) &&
86 +                   ext4_encrypted_inode(inode)) {
87 +                       /* We expect the key to be set. */
88 +                       BUG_ON(!ext4_has_encryption_key(inode));
89 +                       BUG_ON(blocksize != PAGE_CACHE_SIZE);
90 +                       WARN_ON_ONCE(ext4_decrypt_one(inode, page));
91 +               }
92         }
93         if (ext4_should_journal_data(inode)) {
94                 BUFFER_TRACE(bh, "get write access");
95 diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
96 index fff9fe6..171b9ac 100644
97 --- a/fs/ext4/readpage.c
98 +++ b/fs/ext4/readpage.c
99 @@ -47,6 +47,46 @@
100  #include "ext4.h"
102  /*
103 + * Call ext4_decrypt on every single page, reusing the encryption
104 + * context.
105 + */
106 +static void completion_pages(struct work_struct *work)
108 +#ifdef CONFIG_EXT4_FS_ENCRYPTION
109 +       struct ext4_crypto_ctx *ctx =
110 +               container_of(work, struct ext4_crypto_ctx, work);
111 +       struct bio      *bio    = ctx->bio;
112 +       struct bio_vec  *bv;
113 +       int             i;
115 +       bio_for_each_segment_all(bv, bio, i) {
116 +               struct page *page = bv->bv_page;
118 +               int ret = ext4_decrypt(ctx, page);
119 +               if (ret) {
120 +                       WARN_ON_ONCE(1);
121 +                       SetPageError(page);
122 +               } else
123 +                       SetPageUptodate(page);
124 +               unlock_page(page);
125 +       }
126 +       ext4_release_crypto_ctx(ctx);
127 +       bio_put(bio);
128 +#else
129 +       BUG();
130 +#endif
133 +static inline bool ext4_bio_encrypted(struct bio *bio)
135 +#ifdef CONFIG_EXT4_FS_ENCRYPTION
136 +       return unlikely(bio->bi_private != NULL);
137 +#else
138 +       return false;
139 +#endif
143   * I/O completion handler for multipage BIOs.
144   *
145   * The mpage code never puts partial pages into a BIO (except for end-of-file).
146 @@ -63,6 +103,18 @@ static void mpage_end_io(struct bio *bio, int err)
147         struct bio_vec *bv;
148         int i;
150 +       if (ext4_bio_encrypted(bio)) {
151 +               struct ext4_crypto_ctx *ctx = bio->bi_private;
153 +               if (err) {
154 +                       ext4_release_crypto_ctx(ctx);
155 +               } else {
156 +                       INIT_WORK(&ctx->work, completion_pages);
157 +                       ctx->bio = bio;
158 +                       queue_work(ext4_read_workqueue, &ctx->work);
159 +                       return;
160 +               }
161 +       }
162         bio_for_each_segment_all(bv, bio, i) {
163                 struct page *page = bv->bv_page;
165 @@ -223,13 +275,25 @@ int ext4_mpage_readpages(struct address_space *mapping,
166                         bio = NULL;
167                 }
168                 if (bio == NULL) {
169 +                       struct ext4_crypto_ctx *ctx = NULL;
171 +                       if (ext4_encrypted_inode(inode) &&
172 +                           S_ISREG(inode->i_mode)) {
173 +                               ctx = ext4_get_crypto_ctx(inode);
174 +                               if (IS_ERR(ctx))
175 +                                       goto set_error_page;
176 +                       }
177                         bio = bio_alloc(GFP_KERNEL,
178                                 min_t(int, nr_pages, bio_get_nr_vecs(bdev)));
179 -                       if (!bio)
180 +                       if (!bio) {
181 +                               if (ctx)
182 +                                       ext4_release_crypto_ctx(ctx);
183                                 goto set_error_page;
184 +                       }
185                         bio->bi_bdev = bdev;
186                         bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
187                         bio->bi_end_io = mpage_end_io;
188 +                       bio->bi_private = ctx;
189                 }
191                 length = first_hole << blkbits;