1 ext4: add EXT4 encryption read callback support
3 From: Mike Halcrow <mhalcrow@google.com>
5 Copies block_read_full_page() to ext4_read_full_page() and adds some
6 callback logic near the end.
8 Signed-off-by: Michael Halcrow <mhalcrow@google.com>
9 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
11 fs/ext4/file.c | 16 ++++++-
12 fs/ext4/inode.c | 186 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--
13 include/linux/bio.h | 3 ++
14 3 files changed, 199 insertions(+), 6 deletions(-)
16 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
17 index aca7b24..6958f1a 100644
20 @@ -200,8 +200,13 @@ static const struct vm_operations_struct ext4_file_vm_ops = {
22 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
27 vma->vm_ops = &ext4_file_vm_ops;
28 + res = ext4_get_crypto_key(file);
29 + if (res == -EACCES) /* If it's encrypted and we don't have the key */
34 @@ -212,6 +217,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
35 struct vfsmount *mnt = filp->f_path.mnt;
40 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
41 !(sb->s_flags & MS_RDONLY))) {
42 @@ -250,11 +256,17 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
43 * writing and the journal is present
45 if (filp->f_mode & FMODE_WRITE) {
46 - int ret = ext4_inode_attach_jinode(inode);
47 + ret = ext4_inode_attach_jinode(inode);
51 - return dquot_file_open(inode, filp);
52 + ret = dquot_file_open(inode, filp);
54 + ret = ext4_get_crypto_key(filp);
62 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
63 index 3aedf18..bd7a7d5 100644
67 #include <linux/ratelimit.h>
68 #include <linux/aio.h>
69 #include <linux/bitops.h>
70 +#include <linux/prefetch.h>
72 #include "ext4_jbd2.h"
74 @@ -784,6 +785,8 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
75 ext4_lblk_t block, int create)
77 struct buffer_head *bh;
78 + struct ext4_inode_info *ei = EXT4_I(inode);
79 + struct ext4_crypto_ctx *ctx;
81 bh = ext4_getblk(handle, inode, block, create);
83 @@ -792,8 +795,16 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
85 ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
87 - if (buffer_uptodate(bh))
88 + if (buffer_uptodate(bh)) {
89 + if (ext4_is_encryption_enabled(ei)) {
90 + BUG_ON(!bh->b_page);
91 + BUG_ON(bh->b_size != PAGE_CACHE_SIZE);
92 + ctx = ext4_get_crypto_ctx(false, &ei->i_encryption_key);
93 + WARN_ON_ONCE(ext4_decrypt(ctx, bh->b_page));
94 + ext4_release_crypto_ctx(ctx);
101 @@ -2799,20 +2810,152 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
102 return generic_block_bmap(mapping, block, ext4_get_block);
105 +static void ext4_completion_work(struct work_struct *work)
107 + struct ext4_crypto_ctx *ctx =
108 + container_of(work, struct ext4_crypto_ctx, work);
109 + struct page *page = ctx->control_page;
110 + WARN_ON_ONCE(ext4_decrypt(ctx, page));
111 + ext4_release_crypto_ctx(ctx);
112 + SetPageUptodate(page);
116 +static int ext4_complete_cb(struct bio *bio, int res)
118 + struct ext4_crypto_ctx *ctx = bio->bi_cb_ctx;
119 + struct page *page = ctx->control_page;
120 + BUG_ON(atomic_read(&ctx->dbg_refcnt) != 1);
122 + ext4_release_crypto_ctx(ctx);
126 + INIT_WORK(&ctx->work, ext4_completion_work);
127 + queue_work(mpage_read_workqueue, &ctx->work);
131 +static int ext4_read_full_page(struct page *page)
133 + struct inode *inode = page->mapping->host;
134 + sector_t iblock, lblock;
135 + struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
136 + unsigned int blocksize, bbits;
138 + int fully_mapped = 1;
140 + head = create_page_buffers(page, inode, 0);
141 + blocksize = head->b_size;
142 + bbits = ilog2(blocksize);
144 + iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
145 + lblock = (i_size_read(inode)+blocksize-1) >> bbits;
151 + if (buffer_uptodate(bh))
154 + if (!buffer_mapped(bh)) {
158 + if (iblock < lblock) {
159 + WARN_ON(bh->b_size != blocksize);
160 + err = ext4_get_block(inode, iblock, bh, 0);
162 + SetPageError(page);
164 + if (!buffer_mapped(bh)) {
165 + zero_user(page, i * blocksize, blocksize);
167 + set_buffer_uptodate(bh);
171 + * get_block() might have updated the buffer
174 + if (buffer_uptodate(bh))
178 + } while (i++, iblock++, (bh = bh->b_this_page) != head);
181 + SetPageMappedToDisk(page);
185 + * All buffers are uptodate - we can set the page uptodate
186 + * as well. But not if get_block() returned an error.
188 + if (!PageError(page))
189 + SetPageUptodate(page);
194 + /* TODO(mhalcrow): For the development phase, encryption
195 + * requires that the block size be equal to the page size. To
196 + * make this the case for release (if we go that route), we'll
197 + * need a super.c change to verify. */
200 + /* Stage two: lock the buffers */
201 + for (i = 0; i < nr; i++) {
204 + mark_buffer_async_read(bh);
208 + * Stage 3: start the IO. Check for uptodateness
209 + * inside the buffer lock in case another process reading
210 + * the underlying blockdev brought it uptodate (the sct fix).
212 + for (i = 0; i < nr; i++) {
214 + if (buffer_uptodate(bh))
215 + end_buffer_async_read(bh, 1);
217 + struct ext4_inode_info *ei = EXT4_I(inode);
218 + struct ext4_crypto_ctx *ctx = ext4_get_crypto_ctx(
219 + false, &ei->i_encryption_key);
220 + BUG_ON(atomic_read(&ctx->dbg_refcnt) != 0);
221 + atomic_inc(&ctx->dbg_refcnt);
222 + BUG_ON(ctx->control_page);
223 + ctx->control_page = page;
224 + BUG_ON(atomic_read(&ctx->dbg_refcnt) != 1);
225 + if (submit_bh_cb(READ, bh, ext4_complete_cb, ctx))
226 + ext4_release_crypto_ctx(ctx);
232 static int ext4_readpage(struct file *file, struct page *page)
235 struct inode *inode = page->mapping->host;
236 + struct ext4_inode_info *ei = EXT4_I(inode);
238 trace_ext4_readpage(page);
240 if (ext4_has_inline_data(inode))
241 ret = ext4_readpage_inline(inode, page);
243 - if (ret == -EAGAIN)
244 + if (ext4_is_encryption_enabled(ei)) {
245 + BUG_ON(ret != -EAGAIN);
246 + ext4_read_full_page(page);
247 + } else if (ret == -EAGAIN) {
248 return mpage_readpage(page, ext4_get_block);
256 @@ -2820,12 +2963,35 @@ ext4_readpages(struct file *file, struct address_space *mapping,
257 struct list_head *pages, unsigned nr_pages)
259 struct inode *inode = mapping->host;
260 + struct ext4_inode_info *ei = EXT4_I(inode);
261 + struct page *page = NULL;
264 /* If the file has inline data, no need to do readpages. */
265 if (ext4_has_inline_data(inode))
268 - return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
269 + if (ext4_is_encryption_enabled(ei)) {
270 + for (page_idx = 0; page_idx < nr_pages; page_idx++) {
271 + page = list_entry(pages->prev, struct page, lru);
272 + prefetchw(&page->flags);
273 + list_del(&page->lru);
274 + if (!add_to_page_cache_lru(page, mapping, page->index,
276 + if (!PageUptodate(page)) {
277 + ext4_read_full_page(page);
282 + page_cache_release(page);
284 + BUG_ON(!list_empty(pages));
287 + return mpage_readpages(mapping, pages, nr_pages,
292 static void ext4_invalidatepage(struct page *page, unsigned int offset,
293 @@ -3084,9 +3250,13 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
295 struct file *file = iocb->ki_filp;
296 struct inode *inode = file->f_mapping->host;
297 + struct ext4_inode_info *ei = EXT4_I(inode);
298 size_t count = iov_iter_count(iter);
301 + if (ext4_is_encryption_enabled(ei))
305 * If we are doing data journalling we don't support O_DIRECT
307 @@ -3209,8 +3379,10 @@ static int ext4_block_zero_page_range(handle_t *handle,
308 unsigned blocksize, max, pos;
310 struct inode *inode = mapping->host;
311 + struct ext4_inode_info *ei = EXT4_I(inode);
312 struct buffer_head *bh;
314 + struct ext4_crypto_ctx *ctx;
317 page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
318 @@ -3266,6 +3438,12 @@ static int ext4_block_zero_page_range(handle_t *handle,
319 /* Uhhuh. Read error. Complain and punt. */
320 if (!buffer_uptodate(bh))
322 + if (ext4_is_encryption_enabled(ei)) {
323 + BUG_ON(blocksize != PAGE_CACHE_SIZE);
324 + ctx = ext4_get_crypto_ctx(false, &ei->i_encryption_key);
325 + WARN_ON_ONCE(ext4_decrypt(ctx, page));
326 + ext4_release_crypto_ctx(ctx);
329 if (ext4_should_journal_data(inode)) {
330 BUFFER_TRACE(bh, "get write access");
331 diff --git a/include/linux/bio.h b/include/linux/bio.h
332 index b39e500..6b42e2a 100644
333 --- a/include/linux/bio.h
334 +++ b/include/linux/bio.h
335 @@ -376,6 +376,9 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
339 +/* TODO(mhalcrow): Only here for test; remove before release */
340 +extern atomic_t global_bio_count;
342 extern void bio_endio(struct bio *, int);
343 extern void bio_endio_nodec(struct bio *, int);
344 struct request_queue;