add patch fix-crypto-warnings
[ext4-patch-queue.git] / add-ext4-encryption-read-callback-support
blob5f6a7de1f2282cae26d17341b498350dbd5b689b
1 ext4: add EXT4 encryption read callback support
3 From: Mike Halcrow <mhalcrow@google.com>
5 Copies block_read_full_page() to ext4_read_full_page() and adds some
6 callback logic near the end.
8 Signed-off-by: Michael Halcrow <mhalcrow@google.com>
9 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
10 ---
11  fs/ext4/file.c      |  16 ++++++-
12  fs/ext4/inode.c     | 186 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--
13  include/linux/bio.h |   3 ++
14  3 files changed, 199 insertions(+), 6 deletions(-)
16 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
17 index aca7b24..6958f1a 100644
18 --- a/fs/ext4/file.c
19 +++ b/fs/ext4/file.c
20 @@ -200,8 +200,13 @@ static const struct vm_operations_struct ext4_file_vm_ops = {
22  static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
23  {
24 +       int res;
26         file_accessed(file);
27         vma->vm_ops = &ext4_file_vm_ops;
28 +       res = ext4_get_crypto_key(file);
29 +       if (res == -EACCES) /* If it's encrypted and we don't have the key */
30 +               return res;
31         return 0;
32  }
34 @@ -212,6 +217,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
35         struct vfsmount *mnt = filp->f_path.mnt;
36         struct path path;
37         char buf[64], *cp;
38 +       int ret;
40         if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
41                      !(sb->s_flags & MS_RDONLY))) {
42 @@ -250,11 +256,17 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
43          * writing and the journal is present
44          */
45         if (filp->f_mode & FMODE_WRITE) {
46 -               int ret = ext4_inode_attach_jinode(inode);
47 +               ret = ext4_inode_attach_jinode(inode);
48                 if (ret < 0)
49                         return ret;
50         }
51 -       return dquot_file_open(inode, filp);
52 +       ret = dquot_file_open(inode, filp);
53 +       if (!ret) {
54 +               ret = ext4_get_crypto_key(filp);
55 +               if (ret != -EACCES)
56 +                       ret = 0;
57 +       }
58 +       return ret;
59  }
61  /*
62 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
63 index 3aedf18..bd7a7d5 100644
64 --- a/fs/ext4/inode.c
65 +++ b/fs/ext4/inode.c
66 @@ -39,6 +39,7 @@
67  #include <linux/ratelimit.h>
68  #include <linux/aio.h>
69  #include <linux/bitops.h>
70 +#include <linux/prefetch.h>
72  #include "ext4_jbd2.h"
73  #include "xattr.h"
74 @@ -784,6 +785,8 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
75                                ext4_lblk_t block, int create)
76  {
77         struct buffer_head *bh;
78 +       struct ext4_inode_info *ei = EXT4_I(inode);
79 +       struct ext4_crypto_ctx *ctx;
81         bh = ext4_getblk(handle, inode, block, create);
82         if (IS_ERR(bh))
83 @@ -792,8 +795,16 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
84                 return bh;
85         ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
86         wait_on_buffer(bh);
87 -       if (buffer_uptodate(bh))
88 +       if (buffer_uptodate(bh)) {
89 +               if (ext4_is_encryption_enabled(ei)) {
90 +                       BUG_ON(!bh->b_page);
91 +                       BUG_ON(bh->b_size != PAGE_CACHE_SIZE);
92 +                       ctx = ext4_get_crypto_ctx(false, &ei->i_encryption_key);
93 +                       WARN_ON_ONCE(ext4_decrypt(ctx, bh->b_page));
94 +                       ext4_release_crypto_ctx(ctx);
95 +               }
96                 return bh;
97 +       }
98         put_bh(bh);
99         return ERR_PTR(-EIO);
101 @@ -2799,20 +2810,152 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
102         return generic_block_bmap(mapping, block, ext4_get_block);
105 +static void ext4_completion_work(struct work_struct *work)
107 +       struct ext4_crypto_ctx *ctx =
108 +               container_of(work, struct ext4_crypto_ctx, work);
109 +       struct page *page = ctx->control_page;
110 +       WARN_ON_ONCE(ext4_decrypt(ctx, page));
111 +       ext4_release_crypto_ctx(ctx);
112 +       SetPageUptodate(page);
113 +       unlock_page(page);
116 +static int ext4_complete_cb(struct bio *bio, int res)
118 +       struct ext4_crypto_ctx *ctx = bio->bi_cb_ctx;
119 +       struct page *page = ctx->control_page;
120 +       BUG_ON(atomic_read(&ctx->dbg_refcnt) != 1);
121 +       if (res) {
122 +               ext4_release_crypto_ctx(ctx);
123 +               unlock_page(page);
124 +               return res;
125 +       }
126 +       INIT_WORK(&ctx->work, ext4_completion_work);
127 +       queue_work(mpage_read_workqueue, &ctx->work);
128 +       return 0;
131 +static int ext4_read_full_page(struct page *page)
133 +       struct inode *inode = page->mapping->host;
134 +       sector_t iblock, lblock;
135 +       struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
136 +       unsigned int blocksize, bbits;
137 +       int nr, i;
138 +       int fully_mapped = 1;
140 +       head = create_page_buffers(page, inode, 0);
141 +       blocksize = head->b_size;
142 +       bbits = ilog2(blocksize);
144 +       iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
145 +       lblock = (i_size_read(inode)+blocksize-1) >> bbits;
146 +       bh = head;
147 +       nr = 0;
148 +       i = 0;
150 +       do {
151 +               if (buffer_uptodate(bh))
152 +                       continue;
154 +               if (!buffer_mapped(bh)) {
155 +                       int err = 0;
157 +                       fully_mapped = 0;
158 +                       if (iblock < lblock) {
159 +                               WARN_ON(bh->b_size != blocksize);
160 +                               err = ext4_get_block(inode, iblock, bh, 0);
161 +                               if (err)
162 +                                       SetPageError(page);
163 +                       }
164 +                       if (!buffer_mapped(bh)) {
165 +                               zero_user(page, i * blocksize, blocksize);
166 +                               if (!err)
167 +                                       set_buffer_uptodate(bh);
168 +                               continue;
169 +                       }
170 +                       /*
171 +                        * get_block() might have updated the buffer
172 +                        * synchronously
173 +                        */
174 +                       if (buffer_uptodate(bh))
175 +                               continue;
176 +               }
177 +               arr[nr++] = bh;
178 +       } while (i++, iblock++, (bh = bh->b_this_page) != head);
180 +       if (fully_mapped)
181 +               SetPageMappedToDisk(page);
183 +       if (!nr) {
184 +               /*
185 +                * All buffers are uptodate - we can set the page uptodate
186 +                * as well. But not if get_block() returned an error.
187 +                */
188 +               if (!PageError(page))
189 +                       SetPageUptodate(page);
190 +               unlock_page(page);
191 +               return 0;
192 +       }
194 +       /* TODO(mhalcrow): For the development phase, encryption
195 +        * requires that the block size be equal to the page size. To
196 +        * make this the case for release (if we go that route), we'll
197 +        * need a super.c change to verify. */
198 +       BUG_ON(nr != 1);
200 +       /* Stage two: lock the buffers */
201 +       for (i = 0; i < nr; i++) {
202 +               bh = arr[i];
203 +               lock_buffer(bh);
204 +               mark_buffer_async_read(bh);
205 +       }
207 +       /*
208 +        * Stage 3: start the IO.  Check for uptodateness
209 +        * inside the buffer lock in case another process reading
210 +        * the underlying blockdev brought it uptodate (the sct fix).
211 +        */
212 +       for (i = 0; i < nr; i++) {
213 +               bh = arr[i];
214 +               if (buffer_uptodate(bh))
215 +                       end_buffer_async_read(bh, 1);
216 +               else {
217 +                       struct ext4_inode_info *ei = EXT4_I(inode);
218 +                       struct ext4_crypto_ctx *ctx = ext4_get_crypto_ctx(
219 +                               false, &ei->i_encryption_key);
220 +                       BUG_ON(atomic_read(&ctx->dbg_refcnt) != 0);
221 +                       atomic_inc(&ctx->dbg_refcnt);
222 +                       BUG_ON(ctx->control_page);
223 +                       ctx->control_page = page;
224 +                       BUG_ON(atomic_read(&ctx->dbg_refcnt) != 1);
225 +                       if (submit_bh_cb(READ, bh, ext4_complete_cb, ctx))
226 +                               ext4_release_crypto_ctx(ctx);
227 +               }
228 +       }
229 +       return 0;
232  static int ext4_readpage(struct file *file, struct page *page)
234         int ret = -EAGAIN;
235         struct inode *inode = page->mapping->host;
236 +       struct ext4_inode_info *ei = EXT4_I(inode);
238         trace_ext4_readpage(page);
240         if (ext4_has_inline_data(inode))
241                 ret = ext4_readpage_inline(inode, page);
243 -       if (ret == -EAGAIN)
244 +       if (ext4_is_encryption_enabled(ei)) {
245 +               BUG_ON(ret != -EAGAIN);
246 +               ext4_read_full_page(page);
247 +       } else if (ret == -EAGAIN) {
248                 return mpage_readpage(page, ext4_get_block);
249 +       }
251 -       return ret;
252 +       return 0;
255  static int
256 @@ -2820,12 +2963,35 @@ ext4_readpages(struct file *file, struct address_space *mapping,
257                 struct list_head *pages, unsigned nr_pages)
259         struct inode *inode = mapping->host;
260 +       struct ext4_inode_info *ei = EXT4_I(inode);
261 +       struct page *page = NULL;
262 +       unsigned page_idx;
264         /* If the file has inline data, no need to do readpages. */
265         if (ext4_has_inline_data(inode))
266                 return 0;
268 -       return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
269 +       if (ext4_is_encryption_enabled(ei)) {
270 +               for (page_idx = 0; page_idx < nr_pages; page_idx++) {
271 +                       page = list_entry(pages->prev, struct page, lru);
272 +                       prefetchw(&page->flags);
273 +                       list_del(&page->lru);
274 +                       if (!add_to_page_cache_lru(page, mapping, page->index,
275 +                                                  GFP_KERNEL)) {
276 +                               if (!PageUptodate(page)) {
277 +                                       ext4_read_full_page(page);
278 +                               } else {
279 +                                       unlock_page(page);
280 +                               }
281 +                       }
282 +                       page_cache_release(page);
283 +               }
284 +               BUG_ON(!list_empty(pages));
285 +               return 0;
286 +       } else {
287 +               return mpage_readpages(mapping, pages, nr_pages,
288 +                                      ext4_get_block);
289 +       }
292  static void ext4_invalidatepage(struct page *page, unsigned int offset,
293 @@ -3084,9 +3250,13 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
295         struct file *file = iocb->ki_filp;
296         struct inode *inode = file->f_mapping->host;
297 +       struct ext4_inode_info *ei = EXT4_I(inode);
298         size_t count = iov_iter_count(iter);
299         ssize_t ret;
301 +       if (ext4_is_encryption_enabled(ei))
302 +               return 0;
304         /*
305          * If we are doing data journalling we don't support O_DIRECT
306          */
307 @@ -3209,8 +3379,10 @@ static int ext4_block_zero_page_range(handle_t *handle,
308         unsigned blocksize, max, pos;
309         ext4_lblk_t iblock;
310         struct inode *inode = mapping->host;
311 +       struct ext4_inode_info *ei = EXT4_I(inode);
312         struct buffer_head *bh;
313         struct page *page;
314 +       struct ext4_crypto_ctx *ctx;
315         int err = 0;
317         page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
318 @@ -3266,6 +3438,12 @@ static int ext4_block_zero_page_range(handle_t *handle,
319                 /* Uhhuh. Read error. Complain and punt. */
320                 if (!buffer_uptodate(bh))
321                         goto unlock;
322 +               if (ext4_is_encryption_enabled(ei)) {
323 +                       BUG_ON(blocksize != PAGE_CACHE_SIZE);
324 +                       ctx = ext4_get_crypto_ctx(false, &ei->i_encryption_key);
325 +                       WARN_ON_ONCE(ext4_decrypt(ctx, page));
326 +                       ext4_release_crypto_ctx(ctx);
327 +               }
328         }
329         if (ext4_should_journal_data(inode)) {
330                 BUFFER_TRACE(bh, "get write access");
331 diff --git a/include/linux/bio.h b/include/linux/bio.h
332 index b39e500..6b42e2a 100644
333 --- a/include/linux/bio.h
334 +++ b/include/linux/bio.h
335 @@ -376,6 +376,9 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
339 +/* TODO(mhalcrow): Only here for test; remove before release */
340 +extern atomic_t global_bio_count;
342  extern void bio_endio(struct bio *, int);
343  extern void bio_endio_nodec(struct bio *, int);
344  struct request_queue;