Update to v6 version of the lazytime patch
[ext4-patch-queue.git] / add-ext4-encryption-facilities
blob0d7ea7201c724c30bb0d36a27fd29b69e5318c63
1 ext4 crypto: add ext4 encryption facilities
3 From: Michael Halcrow <mhalcrow@google.com>
5 We encrypt into bounce pages and schedule them for block I/O. We
6 decrypt in-place in the newly added read completion callback.
8 The current encryption mode, AES-256-XTS, is the first of several
9 encryption modes on the roadmap. In-plan modes include HMAC-SHA1 for
10 integrity-only and AES-256-GCM for authenticated encryption. These
11 future modes depend on anticipated functionality for storing per-block
12 metadata.
14 Signed-off-by: Michael Halcrow <mhalcrow@google.com>
15 Signed-off-by: Ildar Muslukhov <ildarm@google.com>
16 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
17 ---
18  fs/ext4/Makefile      |    9 +-
19  fs/ext4/crypto.c      | 1133 +++++++++++++++++++++++++++++++++++++++++++++++++
20  fs/ext4/ext4.h        |   29 ++
21  fs/ext4/ext4_crypto.h |  172 ++++++++
22  fs/ext4/extents.c     |    4 +-
23  fs/ext4/super.c       |   38 +-
24  fs/ext4/xattr.h       |    1 +
25  7 files changed, 1379 insertions(+), 7 deletions(-)
26  create mode 100644 fs/ext4/crypto.c
27  create mode 100644 fs/ext4/ext4_crypto.h
29 diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
30 index 0310fec..de4de1c 100644
31 --- a/fs/ext4/Makefile
32 +++ b/fs/ext4/Makefile
33 @@ -4,10 +4,11 @@
35  obj-$(CONFIG_EXT4_FS) += ext4.o
37 -ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
38 -               ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
39 -               ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
40 -               mmp.o indirect.o extents_status.o xattr.o xattr_user.o \
41 +ext4-y := balloc.o bitmap.o crypto.o dir.o file.o fsync.o ialloc.o     \
42 +               inode.o page-io.o ioctl.o namei.o super.o symlink.o     \
43 +               hash.o resize.o extents.o ext4_jbd2.o migrate.o         \
44 +               mballoc.o block_validity.o move_extent.o mmp.o          \
45 +               indirect.o extents_status.o xattr.o xattr_user.o        \
46                 xattr_trusted.o inline.o
48  ext4-$(CONFIG_EXT4_FS_POSIX_ACL)       += acl.o
49 diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
50 new file mode 100644
51 index 0000000..80e6fac
52 --- /dev/null
53 +++ b/fs/ext4/crypto.c
54 @@ -0,0 +1,1133 @@
55 +/*
56 + * linux/fs/ext4/crypto.c
57 + *
58 + * This contains encryption functions for ext4
59 + *
60 + * Written by Michael Halcrow, 2014.
61 + *
62 + * This has not yet undergone a rigorous security audit.
63 + *
64 + * The usage of AES-XTS should conform to recommendations in NIST
65 + * Special Publication 800-38E. The usage of AES-GCM should conform to
66 + * the recommendations in NIST Special Publication 800-38D. Further
67 + * guidance for block-oriented storage is in IEEE P1619/D16. The key
68 + * derivation code implements an HKDF (see RFC 5869).
69 + */
71 +#include <crypto/hash.h>
72 +#include <crypto/sha.h>
73 +#include <keys/user-type.h>
74 +#include <keys/encrypted-type.h>
75 +#include <linux/crypto.h>
76 +#include <linux/gfp.h>
77 +#include <linux/kernel.h>
78 +#include <linux/key.h>
79 +#include <linux/list.h>
80 +#include <linux/mempool.h>
81 +#include <linux/random.h>
82 +#include <linux/scatterlist.h>
83 +#include <linux/spinlock_types.h>
85 +#include "ext4.h"
86 +#include "xattr.h"
88 +/* Encryption added and removed here! (L: */
90 +mempool_t *ext4_bounce_page_pool = NULL;
92 +LIST_HEAD(ext4_free_crypto_ctxs);
93 +DEFINE_SPINLOCK(ext4_crypto_ctx_lock);
95 +/**
96 + * ext4_release_crypto_ctx() - Releases an encryption context
97 + * @ctx: The encryption context to release.
98 + *
99 + * If the encryption context was allocated from the pre-allocated pool, returns
100 + * it to that pool. Else, frees it.
101 + *
102 + * If there's a bounce page in the context, this frees that.
103 + */
104 +void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
106 +       unsigned long flags;
108 +       atomic_dec(&ctx->dbg_refcnt);
109 +       if (ctx->bounce_page) {
110 +               if (ctx->flags & EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) {
111 +                       __free_page(ctx->bounce_page);
112 +               } else {
113 +                       mempool_free(ctx->bounce_page, ext4_bounce_page_pool);
114 +               }
115 +               ctx->bounce_page = NULL;
116 +       }
117 +       ctx->control_page = NULL;
118 +       if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) {
119 +               if (ctx->tfm)
120 +                       crypto_free_tfm(ctx->tfm);
121 +               kfree(ctx);
122 +       } else {
123 +               spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
124 +               list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
125 +               spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
126 +       }
129 +/**
130 + * ext4_alloc_and_init_crypto_ctx() - Allocates and inits an encryption context
131 + * @mask: The allocation mask.
132 + *
133 + * Return: An allocated and initialized encryption context on success. An error
134 + * value or NULL otherwise.
135 + */
136 +static struct ext4_crypto_ctx *ext4_alloc_and_init_crypto_ctx(u32 mask)
138 +       struct ext4_crypto_ctx *ctx = kzalloc(sizeof(struct ext4_crypto_ctx),
139 +                                             mask);
141 +       if (!ctx)
142 +               return ERR_PTR(-ENOMEM);
143 +       return ctx;
146 +/**
147 + * ext4_get_crypto_ctx() - Gets an encryption context
148 + * @with_page: If true, allocates and attaches a bounce page.
149 + * @key:       The encryption key for the context.
150 + *
151 + * Allocates and initializes an encryption context.
152 + *
153 + * Return: An allocated and initialized encryption context on success; error
154 + * value or NULL otherwise.
155 + */
156 +struct ext4_crypto_ctx *ext4_get_crypto_ctx(
157 +       bool with_page, const struct ext4_encryption_key *key)
159 +       struct ext4_crypto_ctx *ctx = NULL;
160 +       int res = 0;
161 +       unsigned long flags;
163 +       /* We first try getting the ctx from a free list because in the common
164 +        * case the ctx will have an allocated and initialized crypto tfm, so
165 +        * it's probably a worthwhile optimization. For the bounce page, we
166 +        * first try getting it from the kernel allocator because that's just
167 +        * about as fast as getting it from a list and because a cache of free
168 +        * pages should generally be a "last resort" option for a filesystem to
169 +        * be able to do its job. */
170 +       spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
171 +       ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs,
172 +                                      struct ext4_crypto_ctx, free_list);
173 +       if (ctx)
174 +               list_del(&ctx->free_list);
175 +       spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
176 +       if (!ctx) {
177 +               ctx = ext4_alloc_and_init_crypto_ctx(GFP_NOFS);
178 +               if (IS_ERR(ctx)) {
179 +                       res = PTR_ERR(ctx);
180 +                       goto out;
181 +               }
182 +               ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
183 +       } else {
184 +               ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
185 +       }
186 +       atomic_set(&ctx->dbg_refcnt, 0);
188 +       /* Allocate a new Crypto API context if we don't already have one or if
189 +        * it isn't the right mode. */
190 +       BUG_ON(key->mode == EXT4_ENCRYPTION_MODE_INVALID);
191 +       if (ctx->tfm && (ctx->mode != key->mode)) {
192 +               crypto_free_tfm(ctx->tfm);
193 +               ctx->tfm = NULL;
194 +               ctx->mode = EXT4_ENCRYPTION_MODE_INVALID;
195 +       }
196 +       if (!ctx->tfm) {
197 +               switch (key->mode) {
198 +               case EXT4_ENCRYPTION_MODE_AES_256_XTS:
199 +                       ctx->tfm = crypto_ablkcipher_tfm(
200 +                               crypto_alloc_ablkcipher("xts(aes)", 0, 0));
201 +                       break;
202 +               case EXT4_ENCRYPTION_MODE_AES_256_GCM:
203 +                       /* TODO(mhalcrow): AEAD w/ gcm(aes);
204 +                        * crypto_aead_setauthsize() */
205 +               case EXT4_ENCRYPTION_MODE_HMAC_SHA1:
206 +                       /* TODO(mhalcrow): AHASH w/ hmac(sha1) */
207 +               case EXT4_ENCRYPTION_MODE_AES_256_XTS_RANDOM_IV_HMAC_SHA1:
208 +                       ctx->tfm = ERR_PTR(-ENOTSUPP);
209 +                       break;
210 +               default:
211 +                       BUG();
212 +               }
213 +               if (IS_ERR_OR_NULL(ctx->tfm)) {
214 +                       res = PTR_ERR(ctx->tfm);
215 +                       ctx->tfm = NULL;
216 +                       goto out;
217 +               }
218 +               ctx->mode = key->mode;
219 +       }
220 +       BUG_ON(key->size != ext4_encryption_key_size(key->mode));
222 +       /* There shouldn't be a bounce page attached to the crypto
223 +        * context at this point. */
224 +       BUG_ON(ctx->bounce_page);
225 +       if (!with_page)
226 +               goto out;
228 +       /* The encryption operation will require a bounce page. */
229 +       ctx->bounce_page = alloc_page(GFP_NOFS);
230 +       if (!ctx->bounce_page) {
231 +               /* This is a potential bottleneck, but at least we'll have
232 +                * forward progress. */
233 +               ctx->bounce_page = mempool_alloc(ext4_bounce_page_pool,
234 +                                                GFP_NOFS);
235 +               if (WARN_ON_ONCE(!ctx->bounce_page)) {
236 +                       ctx->bounce_page = mempool_alloc(ext4_bounce_page_pool,
237 +                                                        GFP_NOFS | __GFP_WAIT);
238 +               }
239 +               ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
240 +       } else {
241 +               ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
242 +       }
243 +out:
244 +       if (res) {
245 +               if (!IS_ERR_OR_NULL(ctx))
246 +                       ext4_release_crypto_ctx(ctx);
247 +               ctx = ERR_PTR(res);
248 +       }
249 +       return ctx;
252 +struct workqueue_struct *mpage_read_workqueue;
254 +/**
255 + * ext4_delete_crypto_ctxs() - Deletes/frees all encryption contexts
256 + */
257 +static void ext4_delete_crypto_ctxs(void)
259 +       struct ext4_crypto_ctx *pos, *n;
261 +       list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list) {
262 +               if (pos->bounce_page) {
263 +                       if (pos->flags &
264 +                           EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) {
265 +                               __free_page(pos->bounce_page);
266 +                       } else {
267 +                               mempool_free(pos->bounce_page,
268 +                                            ext4_bounce_page_pool);
269 +                       }
270 +               }
271 +               if (pos->tfm)
272 +                       crypto_free_tfm(pos->tfm);
273 +               kfree(pos);
274 +       }
277 +/**
278 + * ext4_allocate_crypto_ctxs() -  Allocates a pool of encryption contexts
279 + * @num_to_allocate: The number of encryption contexts to allocate.
280 + *
281 + * Return: Zero on success, non-zero otherwise.
282 + */
283 +static int __init ext4_allocate_crypto_ctxs(size_t num_to_allocate)
285 +       struct ext4_crypto_ctx *ctx = NULL;
287 +       while (num_to_allocate > 0) {
288 +               ctx = ext4_alloc_and_init_crypto_ctx(GFP_KERNEL);
289 +               if (IS_ERR(ctx))
290 +                       break;
291 +               list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
292 +               num_to_allocate--;
293 +       }
294 +       if (IS_ERR(ctx))
295 +               ext4_delete_crypto_ctxs();
296 +       return PTR_ERR_OR_ZERO(ctx);
299 +/**
300 + * ext4_delete_crypto() - Frees all allocated encryption objects
301 + */
302 +void ext4_delete_crypto(void)
304 +       ext4_delete_crypto_ctxs();
305 +       mempool_destroy(ext4_bounce_page_pool);
306 +       destroy_workqueue(mpage_read_workqueue);
309 +/**
310 + * ext4_allocate_crypto() - Allocates encryption objects for later use
311 + * @num_crypto_pages: The number of bounce pages to allocate for encryption.
312 + * @num_crypto_ctxs:  The number of encryption contexts to allocate.
313 + *
314 + * Return: Zero on success, non-zero otherwise.
315 + */
316 +int __init ext4_allocate_crypto(size_t num_crypto_pages, size_t num_crypto_ctxs)
318 +       int res = 0;
320 +       mpage_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0);
321 +       if (!mpage_read_workqueue) {
322 +               res = -ENOMEM;
323 +               goto fail;
324 +       }
325 +       res = ext4_allocate_crypto_ctxs(num_crypto_ctxs);
326 +       if (res)
327 +               goto fail;
328 +       ext4_bounce_page_pool = mempool_create_page_pool(num_crypto_pages, 0);
329 +       if (!ext4_bounce_page_pool)
330 +               goto fail;
331 +       return 0;
332 +fail:
333 +       ext4_delete_crypto();
334 +       return res;
337 +/**
338 + * ext4_xts_tweak_for_page() - Generates an XTS tweak for a page
339 + * @xts_tweak: Buffer into which this writes the XTS tweak.
340 + * @page:      The page for which this generates a tweak.
341 + *
342 + * Generates an XTS tweak value for the given page.
343 + */
344 +static void ext4_xts_tweak_for_page(u8 xts_tweak[EXT4_XTS_TWEAK_SIZE],
345 +                                   const struct page *page)
347 +       /* Only do this for XTS tweak values. For other modes (CBC,
348 +        * GCM, etc.), you most like will need to do something
349 +        * different. */
350 +       BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(page->index));
351 +       memcpy(xts_tweak, &page->index, sizeof(page->index));
352 +       memset(&xts_tweak[sizeof(page->index)], 0,
353 +              EXT4_XTS_TWEAK_SIZE - sizeof(page->index));
356 +/**
357 + * set_bh_to_page() - Re-assigns the pages for a set of buffer heads
358 + * @head: The head of the buffer list to reassign.
359 + * @page: The page to which to re-assign the buffer heads.
360 + */
361 +void set_bh_to_page(struct buffer_head *head, struct page *page)
363 +       struct buffer_head *bh = head;
365 +       do {
366 +               set_bh_page(bh, page, bh_offset(bh));
367 +               if (PageDirty(page))
368 +                       set_buffer_dirty(bh);
369 +               if (!bh->b_this_page)
370 +                       bh->b_this_page = head;
371 +       } while ((bh = bh->b_this_page) != head);
374 +struct ext4_crypt_result {
375 +       struct completion completion;
376 +       int res;
379 +/**
380 + * ext4_crypt_complete() - The completion callback for page encryption
381 + * @req: The asynchronous encryption request context
382 + * @res: The result of the encryption operation
383 + */
384 +static void ext4_crypt_complete(struct crypto_async_request *req, int res)
386 +       struct ext4_crypt_result *ecr = req->data;
388 +       if (res == -EINPROGRESS)
389 +               return;
390 +       ecr->res = res;
391 +       complete(&ecr->completion);
394 +/**
395 + * ext4_prep_pages_for_write() - Prepares pages for write
396 + * @ciphertext_page: Ciphertext page that will actually be written.
397 + * @plaintext_page:  Plaintext page that acts as a control page.
398 + * @ctx:             Encryption context for the pages.
399 + */
400 +static void ext4_prep_pages_for_write(struct page *ciphertext_page,
401 +                                     struct page *plaintext_page,
402 +                                     struct ext4_crypto_ctx *ctx)
404 +       SetPageDirty(ciphertext_page);
405 +       SetPagePrivate(ciphertext_page);
406 +       ctx->control_page = plaintext_page;
407 +       set_page_private(ciphertext_page, (unsigned long)ctx);
408 +       set_bh_to_page(page_buffers(plaintext_page), ciphertext_page);
411 +/**
412 + * ext4_xts_encrypt() - Encrypts a page using AES-256-XTS
413 + * @ctx:            The encryption context.
414 + * @plaintext_page: The page to encrypt. Must be locked.
415 + *
416 + * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
417 + * encryption context. Uses AES-256-XTS.
418 + *
419 + * Called on the page write path.
420 + *
421 + * Return: An allocated page with the encrypted content on success. Else, an
422 + * error value or NULL.
423 + */
424 +struct page *ext4_xts_encrypt(struct ext4_crypto_ctx *ctx,
425 +                             struct page *plaintext_page)
427 +       struct page *ciphertext_page = ctx->bounce_page;
428 +       u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
429 +       struct ablkcipher_request *req = NULL;
430 +       struct ext4_crypt_result ecr;
431 +       struct scatterlist dst, src;
432 +       struct ext4_inode_info *ei = EXT4_I(plaintext_page->mapping->host);
433 +       struct crypto_ablkcipher *atfm = __crypto_ablkcipher_cast(ctx->tfm);
434 +       int res = 0;
436 +       BUG_ON(!ciphertext_page);
437 +       BUG_ON(!ctx->tfm);
438 +       BUG_ON(ei->i_encryption_key.mode != EXT4_ENCRYPTION_MODE_AES_256_XTS);
439 +       crypto_ablkcipher_clear_flags(atfm, ~0);
440 +       crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
442 +       /* Since in AES-256-XTS mode we only perform one cryptographic operation
443 +        * on each block and there are no constraints about how many blocks a
444 +        * single key can encrypt, we directly use the inode master key */
445 +       res = crypto_ablkcipher_setkey(atfm, ei->i_encryption_key.raw,
446 +                                      ei->i_encryption_key.size);
447 +       req = ablkcipher_request_alloc(atfm, GFP_NOFS);
448 +       if (!req) {
449 +               printk_ratelimited(KERN_ERR
450 +                                  "%s: crypto_request_alloc() failed\n",
451 +                                  __func__);
452 +               ciphertext_page = ERR_PTR(-ENOMEM);
453 +               goto out;
454 +       }
455 +       ablkcipher_request_set_callback(
456 +               req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
457 +               ext4_crypt_complete, &ecr);
458 +       ext4_xts_tweak_for_page(xts_tweak, plaintext_page);
459 +       sg_init_table(&dst, 1);
460 +       sg_set_page(&dst, ciphertext_page, PAGE_CACHE_SIZE, 0);
461 +       sg_init_table(&src, 1);
462 +       sg_set_page(&src, plaintext_page, PAGE_CACHE_SIZE, 0);
463 +       ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
464 +                                    xts_tweak);
465 +       res = crypto_ablkcipher_encrypt(req);
466 +       if (res == -EINPROGRESS || res == -EBUSY) {
467 +               BUG_ON(req->base.data != &ecr);
468 +               wait_for_completion(&ecr.completion);
469 +               res = ecr.res;
470 +       }
471 +       ablkcipher_request_free(req);
472 +       if (res) {
473 +               printk_ratelimited(
474 +                       KERN_ERR
475 +                       "%s: crypto_ablkcipher_encrypt() returned %d\n",
476 +                       __func__, res);
477 +               ciphertext_page = ERR_PTR(res);
478 +               goto out;
479 +       }
480 +out:
481 +       return ciphertext_page;
484 +/**
485 + * ext4_encrypt() - Encrypts a page
486 + * @ctx:            The encryption context.
487 + * @plaintext_page: The page to encrypt. Must be locked.
488 + *
489 + * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
490 + * encryption context.
491 + *
492 + * Called on the page write path.
493 + *
494 + * Return: An allocated page with the encrypted content on success. Else, an
495 + * error value or NULL.
496 + */
497 +struct page *ext4_encrypt(struct ext4_crypto_ctx *ctx,
498 +                         struct page *plaintext_page)
500 +       struct page *ciphertext_page = NULL;
502 +       BUG_ON(!PageLocked(plaintext_page));
503 +       switch (ctx->mode) {
504 +       case EXT4_ENCRYPTION_MODE_AES_256_XTS:
505 +               ciphertext_page = ext4_xts_encrypt(ctx, plaintext_page);
506 +               break;
507 +       case EXT4_ENCRYPTION_MODE_AES_256_GCM:
508 +               /* TODO(mhalcrow): We'll need buffers for the
509 +                * generated IV and/or auth tag for this mode and the
510 +                * ones below */
511 +       case EXT4_ENCRYPTION_MODE_HMAC_SHA1:
512 +       case EXT4_ENCRYPTION_MODE_AES_256_XTS_RANDOM_IV_HMAC_SHA1:
513 +               ciphertext_page = ERR_PTR(-ENOTSUPP);
514 +               break;
515 +       default:
516 +               BUG();
517 +       }
518 +       if (!IS_ERR_OR_NULL(ciphertext_page))
519 +               ext4_prep_pages_for_write(ciphertext_page, plaintext_page, ctx);
520 +       return ciphertext_page;
523 +/**
524 + * ext4_xts_decrypt() - Decrypts a page using AES-256-XTS
525 + * @ctx:  The encryption context.
526 + * @page: The page to decrypt. Must be locked.
527 + *
528 + * Return: Zero on success, non-zero otherwise.
529 + */
530 +int ext4_xts_decrypt(struct ext4_crypto_ctx *ctx, struct page *page)
532 +       u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
533 +       struct ablkcipher_request *req = NULL;
534 +       struct ext4_crypt_result ecr;
535 +       struct scatterlist sg;
536 +       struct ext4_inode_info *ei = EXT4_I(page->mapping->host);
537 +       struct crypto_ablkcipher *atfm = __crypto_ablkcipher_cast(ctx->tfm);
538 +       int res = 0;
540 +       BUG_ON(!ctx->tfm);
541 +       BUG_ON(ei->i_encryption_key.mode != EXT4_ENCRYPTION_MODE_AES_256_XTS);
542 +       crypto_ablkcipher_clear_flags(atfm, ~0);
543 +       crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
545 +       /* Since in AES-256-XTS mode we only perform one cryptographic operation
546 +        * on each block and there are no constraints about how many blocks a
547 +        * single key can encrypt, we directly use the inode master key */
548 +       res = crypto_ablkcipher_setkey(atfm, ei->i_encryption_key.raw,
549 +                                      ei->i_encryption_key.size);
550 +       req = ablkcipher_request_alloc(atfm, GFP_NOFS);
551 +       if (!req) {
552 +               res = -ENOMEM;
553 +               goto out;
554 +       }
555 +       ablkcipher_request_set_callback(
556 +               req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
557 +               ext4_crypt_complete, &ecr);
558 +       ext4_xts_tweak_for_page(xts_tweak, page);
559 +       sg_init_table(&sg, 1);
560 +       sg_set_page(&sg, page, PAGE_CACHE_SIZE, 0);
561 +       ablkcipher_request_set_crypt(req, &sg, &sg, PAGE_CACHE_SIZE, xts_tweak);
562 +       res = crypto_ablkcipher_decrypt(req);
563 +       if (res == -EINPROGRESS || res == -EBUSY) {
564 +               BUG_ON(req->base.data != &ecr);
565 +               wait_for_completion(&ecr.completion);
566 +               res = ecr.res;
567 +       }
568 +       ablkcipher_request_free(req);
569 +out:
570 +       if (res)
571 +               printk_ratelimited(KERN_ERR "%s: res = [%d]\n", __func__, res);
572 +       return res;
575 +/**
576 + * ext4_decrypt() - Decrypts a page in-place
577 + * @ctx:  The encryption context.
578 + * @page: The page to decrypt. Must be locked.
579 + *
580 + * Decrypts page in-place using the ctx encryption context.
581 + *
582 + * Called from the read completion callback.
583 + *
584 + * Return: Zero on success, non-zero otherwise.
585 + */
586 +int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page)
588 +       int res = 0;
590 +       BUG_ON(!PageLocked(page));
591 +       switch (ctx->mode) {
592 +       case EXT4_ENCRYPTION_MODE_AES_256_XTS:
593 +               res = ext4_xts_decrypt(ctx, page);
594 +               break;
595 +       case EXT4_ENCRYPTION_MODE_AES_256_GCM:
596 +       case EXT4_ENCRYPTION_MODE_HMAC_SHA1:
597 +       case EXT4_ENCRYPTION_MODE_AES_256_XTS_RANDOM_IV_HMAC_SHA1:
598 +               res = -ENOTSUPP;
599 +               break;
600 +       default:
601 +               BUG();
602 +       }
603 +       return res;
606 +/**
607 + * ext4_get_wrapping_key_from_keyring() - Gets a wrapping key from the keyring
608 + * @wrapping_key: Buffer into which this writes the wrapping key.
609 + * @sig:          The signature for the wrapping key.
610 + *
611 + * Return: Zero on success, non-zero otherwise.
612 + */
613 +static int ext4_get_wrapping_key_from_keyring(
614 +       char wrapping_key[EXT4_MAX_KEY_SIZE],
615 +       const char sig[EXT4_WRAPPING_KEY_SIG_NULL_TERMINATED_SIZE])
617 +       struct key *create_key;
618 +       struct encrypted_key_payload *payload;
619 +       struct ecryptfs_auth_tok *auth_tok;
621 +       create_key = request_key(&key_type_user, sig, NULL);
622 +       if (WARN_ON_ONCE(IS_ERR(create_key)))
623 +               return -ENOENT;
624 +       payload = (struct encrypted_key_payload *)create_key->payload.data;
625 +       if (WARN_ON_ONCE(create_key->datalen !=
626 +                        sizeof(struct ecryptfs_auth_tok))) {
627 +               return -EINVAL;
628 +       }
629 +       auth_tok = (struct ecryptfs_auth_tok *)(&(payload)->payload_data);
630 +       if (WARN_ON_ONCE(!(auth_tok->token.password.flags &
631 +                          ECRYPTFS_SESSION_KEY_ENCRYPTION_KEY_SET))) {
632 +               return -EINVAL;
633 +       }
634 +       BUILD_BUG_ON(EXT4_MAX_KEY_SIZE < EXT4_AES_256_XTS_KEY_SIZE);
635 +       BUILD_BUG_ON(ECRYPTFS_MAX_KEY_BYTES < EXT4_AES_256_XTS_KEY_SIZE);
636 +       memcpy(wrapping_key,
637 +              auth_tok->token.password.session_key_encryption_key,
638 +              EXT4_AES_256_XTS_KEY_SIZE);
639 +       return 0;
642 +/**
643 + * ext4_wrapping_key_sig_for_parent_dir() - Gets the key signature for
644 + *                                          the parent directory
645 + * @sig: Buffer into which this writes the wrapping key signature.
646 + *
647 + * Return: Zero on success, non-zero otherwise.
648 + */
649 +static int ext4_wrapping_key_sig_for_parent_dir(
650 +       char sig[EXT4_WRAPPING_KEY_SIG_NULL_TERMINATED_SIZE])
652 +       /* TODO(mhalcrow): Here's where we can check for wrapping key
653 +        * specifier in parent directory xattr. */
654 +       return -ENOTSUPP;
657 +/**
658 + * ext4_get_wrapping_key() - Gets the wrapping key from the user session keyring
659 + * @wrapping_key: Buffer into which this writes the wrapping key.
660 + * @sig:          Buffer into which this writes the wrapping key signature.
661 + * @inode:        The inode for the wrapping key.
662 + *
663 + * Return: Zero on success, non-zero otherwise.
664 + */
665 +static int ext4_get_wrapping_key(
666 +       char wrapping_key[EXT4_AES_256_XTS_KEY_SIZE],
667 +       char sig[EXT4_WRAPPING_KEY_SIG_NULL_TERMINATED_SIZE],
668 +       const struct inode *inode)
670 +       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
671 +       int res = ext4_wrapping_key_sig_for_parent_dir(sig);
673 +       if (res) {
674 +               BUILD_BUG_ON(ECRYPTFS_SIG_SIZE_HEX + 1 !=
675 +                            EXT4_WRAPPING_KEY_SIG_NULL_TERMINATED_SIZE);
676 +               memcpy(sig,
677 +                      sbi->s_default_encryption_wrapper_desc.wrapping_key_sig,
678 +                      EXT4_WRAPPING_KEY_SIG_NULL_TERMINATED_SIZE);
679 +       }
680 +       BUG_ON(sig[EXT4_WRAPPING_KEY_SIG_NULL_TERMINATED_SIZE - 1] != '\0');
681 +       res = ext4_get_wrapping_key_from_keyring(wrapping_key, sig);
682 +       return res;
685 +/**
686 + * ext4_validate_encryption_mode() - Validates the encryption key mode
687 + * @mode: The key mode to validate.
688 + *
689 + * Return: The validated key mode. EXT4_ENCRYPTION_MODE_INVALID if invalid.
690 + */
691 +static uint32_t ext4_validate_encryption_mode(uint32_t mode)
693 +       switch (mode) {
694 +       case EXT4_ENCRYPTION_MODE_AES_256_XTS:
695 +               return mode;
696 +       default:
697 +               break;
698 +       }
699 +       return EXT4_ENCRYPTION_MODE_INVALID;
702 +/**
703 + * ext4_validate_encryption_key_size() - Validate the encryption key size
704 + * @mode: The key mode.
705 + * @size: The key size to validate.
706 + *
707 + * Return: The validated key size for @mode. Zero if invalid.
708 + */
709 +static uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
711 +       if (size == ext4_encryption_key_size(mode))
712 +               return size;
713 +       return 0;
716 +struct ext4_hmac_result {
717 +       struct completion completion;
718 +       int res;
719 +} ext4_hmac_result;
721 +/**
722 + * ext4_hmac_complete() - Completion for async HMAC
723 + * @req: The async request.
724 + * @res: The result of the HMAC operation.
725 + */
726 +static void ext4_hmac_complete(struct crypto_async_request *req, int res)
728 +       struct ext4_hmac_result *ehr = req->data;
730 +       if (res == -EINPROGRESS)
731 +               return;
732 +       ehr->res = res;
733 +       complete(&ehr->completion);
736 +/**
737 + * ext4_hmac() - Generates an HMAC
738 + * @derivation: If true, derive a key. Else, generate an integrity HMAC.
739 + * @key:        The HMAC key.
740 + * @key_size:   The size of @key.
741 + * @src:        The data to HMAC.
742 + * @src_size:   The size of @src.
743 + * @dst:        The target buffer for the generated HMAC.
744 + * @dst_size:   The size of @dst.
745 + *
746 + * Return: Zero on success, non-zero otherwise.
747 + */
748 +static int ext4_hmac(bool derivation, const char *key, size_t key_size,
749 +                    const char *src, size_t src_size, char *dst,
750 +                    size_t dst_size)
752 +       struct scatterlist sg;
753 +       struct ahash_request *req = NULL;
754 +       struct ext4_hmac_result ehr;
755 +       char hmac[SHA512_DIGEST_SIZE];
756 +       struct crypto_ahash *tfm = crypto_alloc_ahash(derivation ?
757 +                                                     "hmac(sha512)" :
758 +                                                     "hmac(sha1)", 0, 0);
759 +       int res = 0;
761 +       BUG_ON(dst_size > SHA512_DIGEST_SIZE);
762 +       if (IS_ERR(tfm))
763 +               return PTR_ERR(tfm);
764 +       req = ahash_request_alloc(tfm, GFP_NOFS);
765 +       if (!req) {
766 +               res = -ENOMEM;
767 +               goto out;
768 +       }
769 +       ahash_request_set_callback(req,
770 +                                  (CRYPTO_TFM_REQ_MAY_BACKLOG |
771 +                                   CRYPTO_TFM_REQ_MAY_SLEEP),
772 +                                  ext4_hmac_complete, &ehr);
774 +       res = crypto_ahash_setkey(tfm, key, key_size);
775 +       if (res)
776 +               goto out;
777 +       sg_init_one(&sg, src, src_size);
778 +       ahash_request_set_crypt(req, &sg, hmac, src_size);
779 +       init_completion(&ehr.completion);
780 +       res = crypto_ahash_digest(req);
781 +       if (res == -EINPROGRESS || res == -EBUSY) {
782 +               BUG_ON(req->base.data != &ehr);
783 +               wait_for_completion(&ehr.completion);
784 +               res = ehr.res;
785 +       }
786 +       if (res)
787 +               goto out;
788 +       memcpy(dst, hmac, dst_size);
789 +out:
790 +       crypto_free_ahash(tfm);
791 +       if (req)
792 +               ahash_request_free(req);
793 +       return res;
796 +/**
797 + * ext4_hmac_derive_key() - Generates an HMAC for an key derivation (HKDF)
798 + * @key:      The master key.
799 + * @key_size: The size of @key.
800 + * @src:      The derivation data.
801 + * @src_size: The size of @src.
802 + * @dst:      The target buffer for the derived key.
803 + * @dst_size: The size of @dst.
804 + *
805 + * Return: Zero on success, non-zero otherwise.
806 + */
807 +static int ext4_hmac_derive_key(const char *key, size_t key_size,
808 +                               const char *src, size_t src_size, char *dst,
809 +                               size_t dst_size)
811 +       return ext4_hmac(true, key, key_size, src, src_size, dst, dst_size);
814 +/**
815 + * ext4_hmac_integrity() - Generates an HMAC for an integrity measurement
816 + * @key:      The HMAC key.
817 + * @key_size: The size of @key.
818 + * @src:      The data to generate the HMAC over.
819 + * @src_size: The size of @src.
820 + * @dst:      The target buffer for the HMAC.
821 + * @dst_size: The size of @dst.
822 + *
823 + * Return: Zero on success, non-zero otherwise.
824 + */
825 +static int ext4_hmac_integrity(const char *key, size_t key_size,
826 +                              const char *src, size_t src_size, char *dst,
827 +                              size_t dst_size)
829 +       return ext4_hmac(false, key, key_size, src, src_size, dst, dst_size);
832 +/**
833 + * ext4_crypt_wrapper_virt() - Encrypts a key
834 + * @enc_key:  The wrapping key.
835 + * @iv:       The initialization vector for the key encryption.
836 + * @src_virt: The source key object to wrap.
837 + * @dst_virt: The buffer for the wrapped key object.
838 + * @size:     The size of the key object (identical for wrapped or unwrapped).
839 + * @enc:      If 0, decrypt. Else, encrypt.
840 + *
841 + * Uses the wrapped key to unwrap the encryption key.
842 + *
843 + * Return: Zero on success, non-zero otherwise.
844 + */
845 +static int ext4_crypt_wrapper_virt(const char *enc_key, const char *iv,
846 +                                  const char *src_virt, char *dst_virt,
847 +                                  size_t size, bool enc)
849 +       struct scatterlist dst, src;
850 +       struct blkcipher_desc desc = {
851 +               .flags = CRYPTO_TFM_REQ_MAY_SLEEP
852 +       };
853 +       int res = 0;
855 +       desc.tfm = crypto_alloc_blkcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
856 +       if (IS_ERR(desc.tfm))
857 +               return PTR_ERR(desc.tfm);
858 +       if (!desc.tfm)
859 +               return -ENOMEM;
860 +       crypto_blkcipher_set_flags(desc.tfm, CRYPTO_TFM_REQ_WEAK_KEY);
861 +       sg_init_one(&dst, dst_virt, size);
862 +       sg_init_one(&src, src_virt, size);
863 +       crypto_blkcipher_set_iv(desc.tfm, iv, EXT4_WRAPPING_IV_SIZE);
864 +       res = crypto_blkcipher_setkey(desc.tfm, enc_key,
865 +                                     EXT4_AES_256_CTR_KEY_SIZE);
866 +       if (res)
867 +               goto out;
868 +       if (enc)
869 +               res = crypto_blkcipher_encrypt(&desc, &dst, &src, size);
870 +       else
871 +               res = crypto_blkcipher_decrypt(&desc, &dst, &src, size);
872 +out:
873 +       crypto_free_blkcipher(desc.tfm);
874 +       return res;
877 +/**
878 + * ext4_unwrap_key() - Unwraps the encryption key for the inode
879 + * @wrapped_key_packet:      The wrapped encryption key packet.
880 + * @wrapped_key_packet_size: The wrapped encryption key packet size.
881 + * @key:                     The encryption key to fill in with unwrapped data.
882 + *
883 + * Uses the wrapped key to unwrap the encryption key.
884 + *
885 + * Return: Zero on success, non-zero otherwise.
886 + */
887 +static int ext4_unwrap_key(const char *wrapped_key_packet,
888 +                          size_t wrapped_key_packet_size,
889 +                          struct ext4_encryption_key *key)
891 +       struct ext4_wrapped_key_packet *packet =
892 +               (struct ext4_wrapped_key_packet *)wrapped_key_packet;
893 +       uint32_t packet_size = ntohl(*(uint32_t *)packet->size);
894 +       struct ext4_encryption_key_packet key_packet;
895 +       char wrapping_key[EXT4_AES_256_XTS_KEY_SIZE];
896 +       char enc_key[EXT4_AES_256_CTR_KEY_SIZE];
897 +       char int_key[EXT4_HMAC_KEY_SIZE];
898 +       char hmac[EXT4_HMAC_SIZE];
899 +       char hmac_invalid = 0;
900 +       int i;
901 +       int res = 0;
903 +       if (wrapped_key_packet_size < sizeof(packet_size))
904 +               return -EINVAL;
905 +       BUILD_BUG_ON(sizeof(struct ext4_wrapped_key_packet) !=
906 +                    EXT4_FULL_WRAPPED_KEY_PACKET_V0_SIZE);
907 +       if (packet_size != sizeof(struct ext4_wrapped_key_packet))
908 +               return -EINVAL;
909 +       if (wrapped_key_packet_size != packet_size)
910 +               return -EINVAL;
911 +       if (packet->type != EXT4_KEY_PACKET_TYPE_WRAPPED_KEY_V0)
912 +               return -EINVAL;
913 +       if (packet->sig[EXT4_WRAPPING_KEY_SIG_NULL_TERMINATED_SIZE - 1] != '\0')
914 +               return -EINVAL;
915 +       res = ext4_get_wrapping_key_from_keyring(wrapping_key, packet->sig);
916 +       if (res)
917 +               return res;
919 +       /* Always validate the HMAC as soon as we get the key to do so */
920 +       packet->nonce[EXT4_NONCE_SIZE] = EXT4_WRAPPING_INT_DERIVATION_TWEAK;
921 +       res = ext4_hmac_derive_key(wrapping_key, EXT4_AES_256_XTS_KEY_SIZE,
922 +                                  packet->nonce,
923 +                                  EXT4_DERIVATION_TWEAK_NONCE_SIZE, int_key,
924 +                                  EXT4_HMAC_KEY_SIZE);
925 +       if (res)
926 +               goto out;
927 +       res = ext4_hmac_integrity(int_key, EXT4_HMAC_KEY_SIZE,
928 +                                 wrapped_key_packet,
929 +                                 (EXT4_FULL_WRAPPED_KEY_PACKET_V0_SIZE -
930 +                                  EXT4_HMAC_SIZE), hmac, EXT4_HMAC_SIZE);
931 +       memset(int_key, 0, EXT4_HMAC_KEY_SIZE);
932 +       for (i = 0; i < EXT4_HMAC_SIZE; ++i)
933 +               hmac_invalid |= (packet->hmac[i] ^ hmac[i]);
934 +       if (hmac_invalid) {
935 +               printk_ratelimited(
936 +                       KERN_ERR
937 +                       "%s: Security warning: Wrapped key HMAC check failed\n",
938 +                       __func__);
939 +               res = -EINVAL;
940 +               goto out;
941 +       }
943 +       /* The HMAC validated. Decrypt the key packet. */
944 +       packet->nonce[EXT4_NONCE_SIZE] = EXT4_WRAPPING_ENC_DERIVATION_TWEAK;
945 +       res = ext4_hmac_derive_key(wrapping_key, EXT4_AES_256_XTS_KEY_SIZE,
946 +                                  packet->nonce,
947 +                                  EXT4_DERIVATION_TWEAK_NONCE_SIZE, enc_key,
948 +                                  EXT4_AES_256_CTR_KEY_SIZE);
949 +       if (res)
950 +               goto out;
951 +       res = ext4_crypt_wrapper_virt(enc_key, packet->iv,
952 +                                     packet->wrapped_key_packet,
953 +                                     (char *)&key_packet,
954 +                                     EXT4_V0_SERIALIZED_KEY_SIZE, false);
955 +       memset(enc_key, 0, EXT4_AES_256_CTR_KEY_SIZE);
956 +       if (res)
957 +               goto out;
958 +       key->mode = ext4_validate_encryption_mode(
959 +               ntohl(*((uint32_t *)key_packet.mode)));
960 +       if (key->mode == EXT4_ENCRYPTION_MODE_INVALID) {
961 +               res = -EINVAL;
962 +               goto out;
963 +       }
964 +       memcpy(key->raw, key_packet.raw, EXT4_MAX_KEY_SIZE);
965 +       memset(key_packet.raw, 0, EXT4_MAX_KEY_SIZE);
966 +       key->size = ext4_validate_encryption_key_size(
967 +               key->mode, ntohl(*((uint32_t *)key_packet.size)));
968 +       if (!key->size) {
969 +               res = -EINVAL;
970 +               goto out;
971 +       }
972 +out:
973 +       if (res)
974 +               key->mode = EXT4_ENCRYPTION_MODE_INVALID;
975 +       memset(wrapping_key, 0, EXT4_AES_256_XTS_KEY_SIZE);
976 +       return res;
979 +/**
980 + * ext4_wrap_key() - Wraps the encryption key for the inode
981 + * @wrapped_crypto_key: The buffer into which this writes the wrapped key.
982 + * @key_packet_size:    The size of the packet.
983 + * @key:                The encryption key.
984 + * @inode:              The inode for the encryption key.
985 + *
986 + * Generates a wrapped key packet from an encryption key and a wrapping key for
987 + * an inode.
988 + *
989 + * Return: Zero on success, non-zero otherwise.
990 + */
991 +static int ext4_wrap_key(char *wrapped_key_packet, size_t *key_packet_size,
992 +                        const struct ext4_encryption_key *key,
993 +                        const struct inode *inode)
995 +       struct ext4_wrapped_key_packet *packet =
996 +               (struct ext4_wrapped_key_packet *)wrapped_key_packet;
997 +       struct ext4_encryption_key_packet key_packet;
998 +       char wrapping_key[EXT4_AES_256_XTS_KEY_SIZE];
999 +       char enc_key[EXT4_AES_256_CTR_KEY_SIZE];
1000 +       char int_key[EXT4_HMAC_KEY_SIZE];
1001 +       int res = 0;
1003 +       BUILD_BUG_ON(sizeof(struct ext4_wrapped_key_packet) !=
1004 +                    EXT4_FULL_WRAPPED_KEY_PACKET_V0_SIZE);
1005 +       if (!wrapped_key_packet) {
1006 +               *key_packet_size = EXT4_FULL_WRAPPED_KEY_PACKET_V0_SIZE;
1007 +               return 0;
1008 +       }
1009 +       res = ext4_get_wrapping_key(wrapping_key, packet->sig, inode);
1010 +       if (res)
1011 +               return res;
1012 +       BUG_ON(*key_packet_size != EXT4_FULL_WRAPPED_KEY_PACKET_V0_SIZE);
1014 +       /* Size, type, nonce, and IV */
1015 +       *((uint32_t *)packet->size) =
1016 +               htonl(EXT4_FULL_WRAPPED_KEY_PACKET_V0_SIZE);
1017 +       packet->type = EXT4_KEY_PACKET_TYPE_WRAPPED_KEY_V0;
1018 +       get_random_bytes(packet->nonce, EXT4_NONCE_SIZE);
1019 +       get_random_bytes(packet->iv, EXT4_WRAPPING_IV_SIZE);
1021 +       /* Derive the wrapping encryption key from the wrapping key */
1022 +       packet->nonce[EXT4_NONCE_SIZE] = EXT4_WRAPPING_ENC_DERIVATION_TWEAK;
1023 +       res = ext4_hmac_derive_key(wrapping_key, EXT4_AES_256_XTS_KEY_SIZE,
1024 +                                  packet->nonce,
1025 +                                  EXT4_DERIVATION_TWEAK_NONCE_SIZE,
1026 +                                  enc_key, EXT4_AES_256_CTR_KEY_SIZE);
1027 +       if (res)
1028 +               goto out;
1030 +       /* Wrap the data key with the wrapping encryption key */
1031 +       *((uint32_t *)key_packet.mode) = htonl(key->mode);
1032 +       memcpy(key_packet.raw, key->raw, EXT4_MAX_KEY_SIZE);
1033 +       *((uint32_t *)key_packet.size) = htonl(key->size);
1034 +       BUILD_BUG_ON(sizeof(struct ext4_encryption_key_packet) !=
1035 +                    EXT4_V0_SERIALIZED_KEY_SIZE);
1036 +       res = ext4_crypt_wrapper_virt(enc_key, packet->iv, (char *)&key_packet,
1037 +                                     (char *)&packet->wrapped_key_packet,
1038 +                                     EXT4_V0_SERIALIZED_KEY_SIZE, true);
1039 +       memset(enc_key, 0, EXT4_AES_256_CTR_KEY_SIZE);
1040 +       memset(key_packet.raw, 0, EXT4_MAX_KEY_SIZE);
1041 +       if (res)
1042 +               goto out;
1044 +       /* Calculate the HMAC over the entire packet (except, of
1045 +        * course, the HMAC buffer at the end) */
1046 +       packet->nonce[EXT4_NONCE_SIZE] = EXT4_WRAPPING_INT_DERIVATION_TWEAK;
1047 +       res = ext4_hmac_derive_key(wrapping_key, EXT4_AES_256_XTS_KEY_SIZE,
1048 +                                  packet->nonce,
1049 +                                  EXT4_DERIVATION_TWEAK_NONCE_SIZE,
1050 +                                  int_key, EXT4_HMAC_KEY_SIZE);
1051 +       if (res)
1052 +               goto out;
1053 +       BUILD_BUG_ON(EXT4_FULL_WRAPPED_KEY_PACKET_V0_SIZE < EXT4_HMAC_SIZE);
1054 +       res = ext4_hmac_integrity(int_key, EXT4_HMAC_KEY_SIZE,
1055 +                                 wrapped_key_packet,
1056 +                                 (EXT4_FULL_WRAPPED_KEY_PACKET_V0_SIZE -
1057 +                                  EXT4_HMAC_SIZE), packet->hmac,
1058 +                                 EXT4_HMAC_SIZE);
1059 +       packet->nonce[EXT4_NONCE_SIZE] = 0; /* to catch decryption bugs */
1060 +       memset(int_key, 0, EXT4_HMAC_KEY_SIZE);
1061 +out:
1062 +       memset(wrapping_key, 0, EXT4_AES_256_XTS_KEY_SIZE);
1063 +       return res;
1066 +/**
1067 + * ext4_generate_encryption_key() - Generates an encryption key
1068 + * @dentry: The dentry containing the encryption key this will set.
1069 + */
1070 +static void ext4_generate_encryption_key(const struct dentry *dentry)
1072 +       struct ext4_inode_info *ei = EXT4_I(dentry->d_inode);
1073 +       struct ext4_sb_info *sbi = EXT4_SB(dentry->d_sb);
1074 +       struct ext4_encryption_key *key = &ei->i_encryption_key;
1076 +       key->mode = sbi->s_default_encryption_mode;
1077 +       key->size = ext4_encryption_key_size(key->mode);
1078 +       BUG_ON(!key->size);
1079 +       get_random_bytes(key->raw, key->size);
1082 +/**
1083 + * ext4_set_crypto_key() - Generates and sets the encryption key for the inode
1084 + * @dentry: The dentry for the encryption key.
1085 + *
1086 + * Generates the encryption key for the inode. Generates and writes the
1087 + * encryption metadata for the inode.
1088 + *
1089 + * Return: Zero on success, non-zero otherwise.
1090 + */
1091 +int ext4_set_crypto_key(struct dentry *dentry)
1093 +       char root_packet[EXT4_PACKET_SET_V0_MAX_SIZE];
1094 +       char *wrapped_key_packet = &root_packet[EXT4_PACKET_HEADER_SIZE];
1095 +       size_t wrapped_key_packet_size = EXT4_FULL_WRAPPED_KEY_PACKET_V0_SIZE;
1096 +       size_t root_packet_size = (EXT4_PACKET_HEADER_SIZE +
1097 +                                  wrapped_key_packet_size);
1098 +       struct inode *inode = dentry->d_inode;
1099 +       struct ext4_inode_info *ei = EXT4_I(inode);
1100 +       int res = 0;
1102 +try_again:
1103 +       ext4_generate_encryption_key(dentry);
1104 +       res = ext4_wrap_key(wrapped_key_packet, &wrapped_key_packet_size,
1105 +                           &ei->i_encryption_key, inode);
1106 +       if (res)
1107 +               goto out;
1108 +       root_packet[0] = EXT4_PACKET_SET_VERSION_V0;
1109 +       BUILD_BUG_ON(EXT4_PACKET_SET_V0_MAX_SIZE !=
1110 +                    (EXT4_PACKET_HEADER_SIZE +
1111 +                     EXT4_FULL_WRAPPED_KEY_PACKET_V0_SIZE));
1112 +       BUG_ON(sizeof(root_packet) != root_packet_size);
1113 +       res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION_METADATA, "",
1114 +                            root_packet, root_packet_size, 0);
1115 +out:
1116 +       if (res) {
1117 +               if (res == -EINTR)
1118 +                       goto try_again;
1119 +               ei->i_encryption_key.mode = EXT4_ENCRYPTION_MODE_INVALID;
1120 +               printk_ratelimited(KERN_ERR "%s: res = [%d]\n", __func__, res);
1121 +       }
1122 +       return res;
1125 +/**
1126 + * ext4_get_root_packet() - Reads the root packet
1127 + * @inode:            The inode containing the root packet.
1128 + * @root_packet:      The root packet.
1129 + * @root_packet_size: The size of the root packet. Set by this if
1130 + *                    root_packet == NULL.
1131 + *
1132 + * Return: Zero on success, non-zero otherwise.
1133 + */
1134 +static int ext4_get_root_packet(struct inode *inode, char *root_packet,
1135 +                               size_t *root_packet_size)
1137 +       int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION_METADATA,
1138 +                                "", NULL, 0);
1139 +       if (res < 0)
1140 +               return res;
1141 +       if (!root_packet) {
1142 +               *root_packet_size = res;
1143 +               return 0;
1144 +       }
1145 +       if (res != *root_packet_size)
1146 +               return -ENODATA;
1147 +       res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION_METADATA, "",
1148 +                            root_packet, res);
1149 +       if (root_packet[0] != EXT4_PACKET_SET_VERSION_V0) {
1150 +               printk_ratelimited(
1151 +                       KERN_ERR
1152 +                       "%s: Expected root packet version [%d]; got [%d]\n",
1153 +                       __func__, EXT4_PACKET_SET_VERSION_V0, root_packet[0]);
1154 +               return -EINVAL;
1155 +       }
1156 +       return 0;
1159 +/**
1160 + * ext4_get_crypto_key() - Gets the encryption key for the inode
1161 + * @file: The file for the encryption key.
1162 + *
1163 + * Return: Zero on success, non-zero otherwise.
1164 + */
1165 +int ext4_get_crypto_key(const struct file *file)
1167 +       char root_packet[EXT4_PACKET_SET_V0_MAX_SIZE];
1168 +       char *wrapped_key_packet = &root_packet[EXT4_PACKET_HEADER_SIZE];
1169 +       size_t wrapped_key_packet_size = EXT4_FULL_WRAPPED_KEY_PACKET_V0_SIZE;
1170 +       size_t root_packet_size = (EXT4_PACKET_HEADER_SIZE +
1171 +                                  wrapped_key_packet_size);
1172 +       struct inode *inode = file->f_mapping->host;
1173 +       struct ext4_inode_info *ei = EXT4_I(inode);
1174 +       int res = ext4_get_root_packet(inode, root_packet, &root_packet_size);
1176 +       if (res)
1177 +               goto out;
1178 +       res = ext4_unwrap_key(wrapped_key_packet,
1179 +                             EXT4_FULL_WRAPPED_KEY_PACKET_V0_SIZE,
1180 +                             &ei->i_encryption_key);
1181 +       if (res)
1182 +               goto out;
1183 +out:
1184 +       if (res)
1185 +               ei->i_encryption_key.mode = EXT4_ENCRYPTION_MODE_INVALID;
1186 +       return res;
1188 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
1189 index c55a1fa..11a9960 100644
1190 --- a/fs/ext4/ext4.h
1191 +++ b/fs/ext4/ext4.h
1192 @@ -32,6 +32,7 @@
1193  #include <linux/ratelimit.h>
1194  #include <crypto/hash.h>
1195  #include <linux/falloc.h>
1196 +#include <linux/ecryptfs.h>
1197  #ifdef __KERNEL__
1198  #include <linux/compat.h>
1199  #endif
1200 @@ -808,6 +809,8 @@ do {                                                                               \
1202  #endif /* defined(__KERNEL__) || defined(__linux__) */
1204 +#include "ext4_crypto.h"
1206  #include "extents_status.h"
1208  /*
1209 @@ -943,6 +946,10 @@ struct ext4_inode_info {
1211         /* Precomputed uuid+inum+igen checksum for seeding inode checksums */
1212         __u32 i_csum_seed;
1214 +       /* Encryption params */
1215 +       struct ext4_encryption_key i_encryption_key;
1216 +       struct ext4_encryption_wrapper_desc i_encryption_wrapper_desc;
1217  };
1219  /*
1220 @@ -1342,6 +1349,10 @@ struct ext4_sb_info {
1221         struct ratelimit_state s_err_ratelimit_state;
1222         struct ratelimit_state s_warning_ratelimit_state;
1223         struct ratelimit_state s_msg_ratelimit_state;
1225 +       /* Encryption */
1226 +       uint32_t s_default_encryption_mode;
1227 +       struct ext4_encryption_wrapper_desc s_default_encryption_wrapper_desc;
1228  };
1230  static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
1231 @@ -2817,6 +2828,24 @@ static inline void set_bitmap_uptodate(struct buffer_head *bh)
1232         set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state);
1235 +/* crypto.c */
1236 +extern struct workqueue_struct *mpage_read_workqueue;
1237 +int ext4_allocate_crypto(size_t num_crypto_pages, size_t num_crypto_ctxs);
1238 +void ext4_delete_crypto(void);
1239 +struct ext4_crypto_ctx *ext4_get_crypto_ctx(
1240 +       bool with_page, const struct ext4_encryption_key *key);
1241 +void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx);
1242 +void set_bh_to_page(struct buffer_head *head, struct page *page);
1243 +struct page *ext4_encrypt(struct ext4_crypto_ctx *ctx,
1244 +                         struct page *plaintext_page);
1245 +int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page);
1246 +int ext4_get_crypto_key(const struct file *file);
1247 +int ext4_set_crypto_key(struct dentry *dentry);
1248 +static inline bool ext4_is_encryption_enabled(struct ext4_inode_info *ei)
1250 +       return ei->i_encryption_key.mode != EXT4_ENCRYPTION_MODE_INVALID;
1253  /*
1254   * Disable DIO read nolock optimization, so new dioreaders will be forced
1255   * to grab i_mutex
1256 diff --git a/fs/ext4/ext4_crypto.h b/fs/ext4/ext4_crypto.h
1257 new file mode 100644
1258 index 0000000..6cb5ba9
1259 --- /dev/null
1260 +++ b/fs/ext4/ext4_crypto.h
1261 @@ -0,0 +1,172 @@
1263 + * linux/fs/ext4/ext4_crypto.h
1264 + *
1265 + * This contains encryption header content for ext4
1266 + *
1267 + * Written by Michael Halcrow, 2014.
1268 + */
1270 +#ifndef _EXT4_CRYPTO_H
1271 +#define _EXT4_CRYPTO_H
1273 +/* Encryption parameters */
1274 +#define EXT4_AES_256_XTS_KEY_SIZE 64
1275 +#define EXT4_XTS_TWEAK_SIZE 16
1276 +#define EXT4_AES_256_CTR_KEY_SIZE 32
1277 +#define EXT4_AES_256_ECB_KEY_SIZE 32
1278 +#define EXT4_HMAC_KEY_SIZE 12
1279 +#define EXT4_HMAC_SIZE 12
1280 +#define EXT4_NONCE_SIZE 12
1281 +#define EXT4_DERIVATION_TWEAK_SIZE 1
1282 +#define EXT4_DERIVATION_TWEAK_NONCE_SIZE (EXT4_NONCE_SIZE + \
1283 +                                         EXT4_DERIVATION_TWEAK_SIZE)
1284 +#define EXT4_WRAPPING_ENC_DERIVATION_TWEAK 'e'
1285 +#define EXT4_WRAPPING_INT_DERIVATION_TWEAK 'i'
1286 +#define EXT4_AES_256_XTS_RANDOMIV_HMAC_SHA1_KEY_SIZE \
1287 +       (EXT4_AES_256_XTS_KEY_SIZE + EXT4_HMAC_KEY_SIZE)
1288 +#define EXT4_AES_256_GCM_KEY_SIZE 32
1289 +#define EXT4_AES_256_GCM_AUTH_SIZE 16
1290 +#define EXT4_GCM_ASSOC_DATA_SIZE sizeof(pgoff_t)
1291 +#define EXT4_PAGE_REGION_INDEX_SHIFT 16 /* 2**16-sized regions */
1292 +#define EXT4_MAX_KEY_SIZE EXT4_AES_256_XTS_RANDOMIV_HMAC_SHA1_KEY_SIZE
1293 +#define EXT4_MAX_IV_SIZE AES_BLOCK_SIZE
1294 +#define EXT4_MAX_AUTH_SIZE EXT4_AES_256_GCM_AUTH_SIZE
1296 +/* The metadata directory is only necessary only for the sibling file
1297 + * directory under the mount root, which will be replaced by per-block
1298 + * metadata when it's ready. */
1299 +#define EXT4_METADATA_DIRECTORY_NAME ".ext4_crypt_data"
1300 +#define EXT4_METADATA_DIRECTORY_NAME_SIZE 16
1302 +/**
1303 + * Packet format:
1304 + *  4 bytes: Size of packet (inclusive of these 4 bytes)
1305 + *  1 byte: Packet type/version
1306 + *   Variable bytes: Packet content (may contain nested packets)
1307 + *
1308 + * Packets may be nested. The top-level packet is the "packet set".
1309 + */
1310 +#define EXT4_PACKET_SET_VERSION_V0 ((char)0x00)
1311 +#define EXT4_PACKET_SET_VERSION_SIZE 1
1312 +#define EXT4_PACKET_SIZE_SIZE 4
1313 +#define EXT4_PACKET_TYPE_SIZE 1
1314 +#define EXT4_PACKET_HEADER_SIZE (EXT4_PACKET_SIZE_SIZE + EXT4_PACKET_TYPE_SIZE)
1316 +/**
1317 + * Wrapped key packet format:
1318 + *  4 bytes: Size of packet (inclusive of these 4 bytes)
1319 + *  1 byte: Packet type/version (0x00)
1320 + *   17 bytes: NULL-terminated wrapping key signature (printable)
1321 + *   13 bytes: Derivation nonce (last byte ignored)
1322 + *   16 bytes: IV
1323 + *   Variable bytes: Serialized key, AES-256-CTR encrypted
1324 + *   12 bytes: HMAC-SHA1(everything preceding)
1325 + */
1326 +#define EXT4_KEY_PACKET_TYPE_WRAPPED_KEY_V0 ((char)0x00)
1327 +#define EXT4_WRAPPING_KEY_SIG_NULL_TERMINATED_SIZE (ECRYPTFS_SIG_SIZE_HEX + 1)
1328 +#define EXT4_WRAPPING_IV_SIZE 16
1330 +/* These #defines may seem redundant to the sizeof the structs below
1331 + * them. Since naively changing the structs can result in nasty bugs
1332 + * that might have security implications, we use the explict sizes
1333 + * together with BUILD_BUG_ON() to help avoid mistakes. */
1334 +#define EXT4_V0_SERIALIZED_KEY_SIZE (sizeof(uint32_t) + \
1335 +                                    EXT4_MAX_KEY_SIZE + \
1336 +                                    sizeof(uint32_t))
1337 +#define EXT4_WRAPPED_KEY_PACKET_V0_SIZE ( \
1338 +               EXT4_WRAPPING_KEY_SIG_NULL_TERMINATED_SIZE + \
1339 +               EXT4_DERIVATION_TWEAK_NONCE_SIZE + \
1340 +               EXT4_WRAPPING_IV_SIZE + \
1341 +               EXT4_V0_SERIALIZED_KEY_SIZE + \
1342 +               EXT4_HMAC_SIZE)
1344 +#define EXT4_FULL_WRAPPED_KEY_PACKET_V0_SIZE ((uint32_t)( \
1345 +               EXT4_PACKET_HEADER_SIZE +                 \
1346 +               EXT4_WRAPPED_KEY_PACKET_V0_SIZE))
1348 +/* V0 supports only one key in a fixed xattr space. If/when compelling
1349 + * requirements come along, future versions may be able to use
1350 + * (non-xattr) metadata storage to store an arbitrary number of
1351 + * wrapped keys. In the meantime, we won't spend the code complexity
1352 + * budget on supporting multiple wrapped keys. */
1353 +#define EXT4_PACKET_SET_V0_MAX_WRAPPED_KEYS 1
1354 +#define EXT4_PACKET_SET_V0_MAX_SIZE ((uint32_t)(       \
1355 +               EXT4_PACKET_HEADER_SIZE +               \
1356 +               (EXT4_FULL_WRAPPED_KEY_PACKET_V0_SIZE * \
1357 +               EXT4_PACKET_SET_V0_MAX_WRAPPED_KEYS)))
1359 +/* Don't change this without also changing the packet type. Serialized
1360 + * packets are cast directly into this struct. */
1361 +struct ext4_encryption_key_packet {
1362 +       char mode[sizeof(uint32_t)]; /* Network byte order */
1363 +       char raw[EXT4_MAX_KEY_SIZE];
1364 +       char size[sizeof(uint32_t)]; /* Network byte order */
1365 +} __attribute__((__packed__));
1367 +/**
1368 + * If you change the existing modes (order or type), you'll need to
1369 + * change the packet type too.
1370 + */
1371 +enum ext4_encryption_mode {
1372 +       EXT4_ENCRYPTION_MODE_INVALID = 0,
1373 +       EXT4_ENCRYPTION_MODE_AES_256_XTS,
1374 +       EXT4_ENCRYPTION_MODE_AES_256_GCM,
1375 +       EXT4_ENCRYPTION_MODE_HMAC_SHA1,
1376 +       EXT4_ENCRYPTION_MODE_AES_256_XTS_RANDOM_IV_HMAC_SHA1,
1379 +struct ext4_encryption_key {
1380 +       uint32_t mode;
1381 +       char raw[EXT4_MAX_KEY_SIZE];
1382 +       uint32_t size;
1385 +/* Don't change this without also changing the packet type. Serialized
1386 + * packets are cast directly into this struct. */
1387 +struct ext4_wrapped_key_packet {
1388 +       char size[sizeof(uint32_t)]; /* Network byte order */
1389 +       char type;
1390 +       char sig[EXT4_WRAPPING_KEY_SIG_NULL_TERMINATED_SIZE];
1391 +       char nonce[EXT4_DERIVATION_TWEAK_NONCE_SIZE];
1392 +       char iv[EXT4_WRAPPING_IV_SIZE];
1393 +       char wrapped_key_packet[sizeof(struct ext4_encryption_key_packet)];
1394 +       char hmac[EXT4_HMAC_SIZE];
1395 +} __attribute__((__packed__));
1397 +struct ext4_encryption_wrapper_desc {
1398 +       char wrapping_key_sig[ECRYPTFS_SIG_SIZE_HEX + 1];
1401 +#define EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL              0x00000001
1402 +#define EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL      0x00000002
1404 +struct ext4_crypto_ctx {
1405 +       struct crypto_tfm *tfm;         /* Crypto API context */
1406 +       struct page *bounce_page;       /* Ciphertext page on write path */
1407 +       struct page *control_page;      /* Original page on write path */
1408 +       struct bio *bio;                /* The bio for this context */
1409 +       struct work_struct work;        /* Work queue for read complete path */
1410 +       struct list_head free_list;     /* Free list */
1411 +       int flags;                      /* Flags */
1412 +       enum ext4_encryption_mode mode; /* Encryption mode for tfm */
1413 +       atomic_t dbg_refcnt;            /* TODO(mhalcrow): Remove for release */
1416 +static inline int ext4_encryption_key_size(enum ext4_encryption_mode mode)
1418 +       switch (mode) {
1419 +       case EXT4_ENCRYPTION_MODE_AES_256_XTS:
1420 +               return EXT4_AES_256_XTS_KEY_SIZE;
1421 +       case EXT4_ENCRYPTION_MODE_AES_256_GCM:
1422 +               return EXT4_AES_256_GCM_KEY_SIZE;
1423 +       case EXT4_ENCRYPTION_MODE_HMAC_SHA1:
1424 +               return EXT4_HMAC_KEY_SIZE;
1425 +       case EXT4_ENCRYPTION_MODE_AES_256_XTS_RANDOM_IV_HMAC_SHA1:
1426 +               return EXT4_AES_256_XTS_RANDOMIV_HMAC_SHA1_KEY_SIZE;
1427 +       default:
1428 +               BUG();
1429 +       }
1430 +       return 0;
1433 +#endif /* _EXT4_CRYPTO_H */
1434 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
1435 index 37043d0..b8e8a82 100644
1436 --- a/fs/ext4/extents.c
1437 +++ b/fs/ext4/extents.c
1438 @@ -4905,6 +4905,7 @@ out_mutex:
1439  long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
1441         struct inode *inode = file_inode(file);
1442 +       struct ext4_inode_info *ei = EXT4_I(inode);
1443         loff_t new_size = 0;
1444         unsigned int max_blocks;
1445         int ret = 0;
1446 @@ -4914,7 +4915,8 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
1448         /* Return error if mode is not supported */
1449         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1450 -                    FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE))
1451 +                    FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE) ||
1452 +               ext4_is_encryption_enabled(ei))
1453                 return -EOPNOTSUPP;
1455         if (mode & FALLOC_FL_PUNCH_HOLE)
1456 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1457 index 1eda6ab..c3f4d4c 100644
1458 --- a/fs/ext4/super.c
1459 +++ b/fs/ext4/super.c
1460 @@ -901,6 +901,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
1461         atomic_set(&ei->i_ioend_count, 0);
1462         atomic_set(&ei->i_unwritten, 0);
1463         INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
1464 +       ei->i_encryption_key.mode = EXT4_ENCRYPTION_MODE_INVALID;
1466         return &ei->vfs_inode;
1468 @@ -1146,7 +1147,7 @@ enum {
1469         Opt_inode_readahead_blks, Opt_journal_ioprio,
1470         Opt_dioread_nolock, Opt_dioread_lock,
1471         Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
1472 -       Opt_max_dir_size_kb,
1473 +       Opt_max_dir_size_kb, Opt_encrypt_key_sig,
1474  };
1476  static const match_table_t tokens = {
1477 @@ -1222,6 +1223,7 @@ static const match_table_t tokens = {
1478         {Opt_init_itable, "init_itable"},
1479         {Opt_noinit_itable, "noinit_itable"},
1480         {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
1481 +       {Opt_encrypt_key_sig, "encrypt_key_sig=%s"},
1482         {Opt_removed, "check=none"},    /* mount option from ext2/3 */
1483         {Opt_removed, "nocheck"},       /* mount option from ext2/3 */
1484         {Opt_removed, "reservation"},   /* mount option from ext2/3 */
1485 @@ -1420,6 +1422,7 @@ static const struct mount_opts {
1486         {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
1487         {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
1488         {Opt_max_dir_size_kb, 0, MOPT_GTE0},
1489 +       {Opt_encrypt_key_sig, 0, MOPT_STRING},
1490         {Opt_err, 0, 0}
1491  };
1493 @@ -1521,6 +1524,28 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
1494                 sbi->s_li_wait_mult = arg;
1495         } else if (token == Opt_max_dir_size_kb) {
1496                 sbi->s_max_dir_size_kb = arg;
1497 +       } else if (token == Opt_encrypt_key_sig) {
1498 +               char *encrypt_key_sig;
1500 +               encrypt_key_sig = match_strdup(&args[0]);
1501 +               if (!encrypt_key_sig) {
1502 +                       ext4_msg(sb, KERN_ERR,
1503 +                                "error: could not dup encryption key sig string");
1504 +                       return -1;
1505 +               }
1506 +               if (strlen(encrypt_key_sig) != ECRYPTFS_SIG_SIZE_HEX) {
1507 +                       ext4_msg(sb, KERN_ERR,
1508 +                                "error: encryption key sig string must be length %d",
1509 +                                ECRYPTFS_SIG_SIZE_HEX);
1510 +                       return -1;
1511 +               }
1512 +               sbi->s_default_encryption_mode =
1513 +                       EXT4_ENCRYPTION_MODE_AES_256_XTS;
1514 +               memcpy(sbi->s_default_encryption_wrapper_desc.wrapping_key_sig,
1515 +                      encrypt_key_sig,
1516 +                      ECRYPTFS_SIG_SIZE_HEX);
1517 +               sbi->s_default_encryption_wrapper_desc.wrapping_key_sig[
1518 +                       ECRYPTFS_SIG_SIZE_HEX] = '\0';
1519         } else if (token == Opt_stripe) {
1520                 sbi->s_stripe = arg;
1521         } else if (token == Opt_resuid) {
1522 @@ -5523,6 +5548,8 @@ struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ];
1523  static int __init ext4_init_fs(void)
1525         int i, err;
1526 +       static size_t num_prealloc_crypto_pages = 32;
1527 +       static size_t num_prealloc_crypto_ctxs = 128;
1529         ext4_li_info = NULL;
1530         mutex_init(&ext4_li_mtx);
1531 @@ -5535,10 +5562,15 @@ static int __init ext4_init_fs(void)
1532                 init_waitqueue_head(&ext4__ioend_wq[i]);
1533         }
1535 -       err = ext4_init_es();
1536 +       err = ext4_allocate_crypto(num_prealloc_crypto_pages,
1537 +                                  num_prealloc_crypto_ctxs);
1538         if (err)
1539                 return err;
1541 +       err = ext4_init_es();
1542 +       if (err)
1543 +               goto out8;
1545         err = ext4_init_pageio();
1546         if (err)
1547                 goto out7;
1548 @@ -5591,6 +5623,8 @@ out6:
1549         ext4_exit_pageio();
1550  out7:
1551         ext4_exit_es();
1552 +out8:
1553 +       ext4_delete_crypto();
1555         return err;
1557 diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
1558 index 29bedf5..29d47c7 100644
1559 --- a/fs/ext4/xattr.h
1560 +++ b/fs/ext4/xattr.h
1561 @@ -23,6 +23,7 @@
1562  #define EXT4_XATTR_INDEX_SECURITY              6
1563  #define EXT4_XATTR_INDEX_SYSTEM                        7
1564  #define EXT4_XATTR_INDEX_RICHACL               8
1565 +#define EXT4_XATTR_INDEX_ENCRYPTION_METADATA   9
1567  struct ext4_xattr_header {
1568         __le32  h_magic;        /* magic number for identification */
1569 -- 
1570 2.1.0.rc2.206.gedb03e5