1 ext4 crypto: add ext4 encryption facilities
3 From: Michael Halcrow <mhalcrow@google.com>
5 On encrypt, we will re-assign the buffer_heads to point to a bounce
6 page rather than the control_page (which is the original page to write
7 that contains the plaintext). The block I/O occurs against the bounce
8 page. On write completion, we re-assign the buffer_heads to the
9 original plaintext page.
11 On decrypt, we will attach a read completion callback to the bio
12 struct. This read completion will decrypt the read contents in-place
13 prior to setting the page up-to-date.
15 The current encryption mode, AES-256-XTS, lacks cryptographic
16 integrity. AES-256-GCM is in-plan, but we will need to devise a
17 mechanism for handling the integrity data.
19 Change-Id: I5ed4c913d49971d7f7e9b10bb4e694df86f960d7
20 Signed-off-by: Michael Halcrow <mhalcrow@google.com>
21 Signed-off-by: Ildar Muslukhov <ildarm@google.com>
22 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
23 diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
24 index 3886ee4..1b1c561 100644
25 --- a/fs/ext4/Makefile
26 +++ b/fs/ext4/Makefile
27 @@ -12,4 +12,4 @@ ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
29 ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
30 ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o
31 -ext4-$(CONFIG_EXT4_FS_ENCRYPTION) += crypto_policy.o
32 +ext4-$(CONFIG_EXT4_FS_ENCRYPTION) += crypto_policy.o crypto.o
33 diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
35 index 0000000..49b1656
37 +++ b/fs/ext4/crypto.c
40 + * linux/fs/ext4/crypto.c
42 + * Copyright (C) 2015, Google, Inc.
44 + * This contains encryption functions for ext4
46 + * Written by Michael Halcrow, 2014.
48 + * Filename encryption additions
49 + * Uday Savagaonkar, 2014
50 + * Encryption policy handling additions
51 + * Ildar Muslukhov, 2014
53 + * This has not yet undergone a rigorous security audit.
55 + * The usage of AES-XTS should conform to recommendations in NIST
56 + * Special Publication 800-38E and IEEE P1619/D16.
59 +#include <crypto/hash.h>
60 +#include <crypto/sha.h>
61 +#include <keys/user-type.h>
62 +#include <keys/encrypted-type.h>
63 +#include <linux/crypto.h>
64 +#include <linux/ecryptfs.h>
65 +#include <linux/gfp.h>
66 +#include <linux/kernel.h>
67 +#include <linux/key.h>
68 +#include <linux/list.h>
69 +#include <linux/mempool.h>
70 +#include <linux/module.h>
71 +#include <linux/mutex.h>
72 +#include <linux/random.h>
73 +#include <linux/scatterlist.h>
74 +#include <linux/spinlock_types.h>
76 +#include "ext4_extents.h"
79 +/* Encryption added and removed here! (L: */
81 +static unsigned int num_prealloc_crypto_pages = 32;
82 +static unsigned int num_prealloc_crypto_ctxs = 128;
84 +module_param(num_prealloc_crypto_pages, uint, 0444);
85 +MODULE_PARM_DESC(num_prealloc_crypto_pages,
86 + "Number of crypto pages to preallocate");
87 +module_param(num_prealloc_crypto_ctxs, uint, 0444);
88 +MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
89 + "Number of crypto contexts to preallocate");
91 +static mempool_t *ext4_bounce_page_pool;
93 +static LIST_HEAD(ext4_free_crypto_ctxs);
94 +static DEFINE_SPINLOCK(ext4_crypto_ctx_lock);
97 + * ext4_release_crypto_ctx() - Releases an encryption context
98 + * @ctx: The encryption context to release.
100 + * If the encryption context was allocated from the pre-allocated pool, returns
101 + * it to that pool. Else, frees it.
103 + * If there's a bounce page in the context, this frees that.
105 +void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
107 + unsigned long flags;
109 + if (ctx->bounce_page) {
110 + if (ctx->flags & EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL)
111 + __free_page(ctx->bounce_page);
113 + mempool_free(ctx->bounce_page, ext4_bounce_page_pool);
114 + ctx->bounce_page = NULL;
116 + ctx->control_page = NULL;
117 + if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) {
119 + crypto_free_tfm(ctx->tfm);
122 + spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
123 + list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
124 + spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
129 + * ext4_alloc_and_init_crypto_ctx() - Allocates and inits an encryption context
130 + * @mask: The allocation mask.
132 + * Return: An allocated and initialized encryption context on success. An error
133 + * value or NULL otherwise.
135 +static struct ext4_crypto_ctx *ext4_alloc_and_init_crypto_ctx(gfp_t mask)
137 + struct ext4_crypto_ctx *ctx = kzalloc(sizeof(struct ext4_crypto_ctx),
141 + return ERR_PTR(-ENOMEM);
146 + * ext4_get_crypto_ctx() - Gets an encryption context
147 + * @inode: The inode for which we are doing the crypto
149 + * Allocates and initializes an encryption context.
151 + * Return: An allocated and initialized encryption context on success; error
152 + * value or NULL otherwise.
154 +struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
156 + struct ext4_crypto_ctx *ctx = NULL;
158 + unsigned long flags;
159 + struct ext4_encryption_key *key = &EXT4_I(inode)->i_encryption_key;
161 + if (!ext4_read_workqueue)
162 + ext4_init_crypto();
165 + * We first try getting the ctx from a free list because in
166 + * the common case the ctx will have an allocated and
167 + * initialized crypto tfm, so it's probably a worthwhile
168 + * optimization. For the bounce page, we first try getting it
169 + * from the kernel allocator because that's just about as fast
170 + * as getting it from a list and because a cache of free pages
171 + * should generally be a "last resort" option for a filesystem
172 + * to be able to do its job.
174 + spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
175 + ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs,
176 + struct ext4_crypto_ctx, free_list);
178 + list_del(&ctx->free_list);
179 + spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
181 + ctx = ext4_alloc_and_init_crypto_ctx(GFP_NOFS);
183 + res = PTR_ERR(ctx);
186 + ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
188 + ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
191 + /* Allocate a new Crypto API context if we don't already have
192 + * one or if it isn't the right mode. */
193 + BUG_ON(key->mode == EXT4_ENCRYPTION_MODE_INVALID);
194 + if (ctx->tfm && (ctx->mode != key->mode)) {
195 + crypto_free_tfm(ctx->tfm);
197 + ctx->mode = EXT4_ENCRYPTION_MODE_INVALID;
200 + switch (key->mode) {
201 + case EXT4_ENCRYPTION_MODE_AES_256_XTS:
202 + ctx->tfm = crypto_ablkcipher_tfm(
203 + crypto_alloc_ablkcipher("xts(aes)", 0, 0));
205 + case EXT4_ENCRYPTION_MODE_AES_256_GCM:
206 + /* TODO(mhalcrow): AEAD w/ gcm(aes);
207 + * crypto_aead_setauthsize() */
208 + ctx->tfm = ERR_PTR(-ENOTSUPP);
213 + if (IS_ERR_OR_NULL(ctx->tfm)) {
214 + res = PTR_ERR(ctx->tfm);
218 + ctx->mode = key->mode;
220 + BUG_ON(key->size != ext4_encryption_key_size(key->mode));
222 + /* There shouldn't be a bounce page attached to the crypto
223 + * context at this point. */
224 + BUG_ON(ctx->bounce_page);
228 + if (!IS_ERR_OR_NULL(ctx))
229 + ext4_release_crypto_ctx(ctx);
230 + ctx = ERR_PTR(res);
235 +struct workqueue_struct *ext4_read_workqueue;
236 +static DEFINE_MUTEX(crypto_init);
239 + * ext4_exit_crypto() - Shutdown the ext4 encryption system
241 +void ext4_exit_crypto(void)
243 + struct ext4_crypto_ctx *pos, *n;
245 + list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list) {
246 + if (pos->bounce_page) {
248 + EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) {
249 + __free_page(pos->bounce_page);
251 + mempool_free(pos->bounce_page,
252 + ext4_bounce_page_pool);
256 + crypto_free_tfm(pos->tfm);
259 + INIT_LIST_HEAD(&ext4_free_crypto_ctxs);
260 + if (ext4_bounce_page_pool)
261 + mempool_destroy(ext4_bounce_page_pool);
262 + ext4_bounce_page_pool = NULL;
263 + if (ext4_read_workqueue)
264 + destroy_workqueue(ext4_read_workqueue);
265 + ext4_read_workqueue = NULL;
269 + * ext4_init_crypto() - Set up for ext4 encryption.
271 + * We only call this when we start accessing encrypted files, since it
272 + * results in memory getting allocated that wouldn't otherwise be used.
274 + * Return: Zero on success, non-zero otherwise.
276 +int ext4_init_crypto(void)
280 + mutex_lock(&crypto_init);
281 + if (ext4_read_workqueue)
282 + goto already_initialized;
283 + ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0);
284 + if (!ext4_read_workqueue) {
289 + for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
290 + struct ext4_crypto_ctx *ctx;
292 + ctx = ext4_alloc_and_init_crypto_ctx(GFP_KERNEL);
294 + res = PTR_ERR(ctx);
297 + list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
300 + ext4_bounce_page_pool =
301 + mempool_create_page_pool(num_prealloc_crypto_pages, 0);
302 + if (!ext4_bounce_page_pool) {
306 +already_initialized:
307 + mutex_unlock(&crypto_init);
310 + ext4_exit_crypto();
311 + mutex_unlock(&crypto_init);
315 +void ext4_restore_control_page(struct page *data_page)
317 + struct ext4_crypto_ctx *ctx =
318 + (struct ext4_crypto_ctx *)page_private(data_page);
320 + set_page_private(data_page, (unsigned long)NULL);
321 + ClearPagePrivate(data_page);
322 + unlock_page(data_page);
323 + ext4_release_crypto_ctx(ctx);
327 + * ext4_crypt_complete() - The completion callback for page encryption
328 + * @req: The asynchronous encryption request context
329 + * @res: The result of the encryption operation
331 +static void ext4_crypt_complete(struct crypto_async_request *req, int res)
333 + struct ext4_completion_result *ecr = req->data;
335 + if (res == -EINPROGRESS)
338 + complete(&ecr->completion);
346 +static int ext4_page_crypto(struct ext4_crypto_ctx *ctx,
347 + struct inode *inode,
348 + ext4_direction_t rw,
350 + struct page *src_page,
351 + struct page *dest_page)
354 + u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
355 + struct ablkcipher_request *req = NULL;
356 + DECLARE_EXT4_COMPLETION_RESULT(ecr);
357 + struct scatterlist dst, src;
358 + struct ext4_inode_info *ei = EXT4_I(inode);
359 + struct crypto_ablkcipher *atfm = __crypto_ablkcipher_cast(ctx->tfm);
363 + BUG_ON(ctx->mode != ei->i_encryption_key.mode);
365 + if (ctx->mode != EXT4_ENCRYPTION_MODE_AES_256_XTS) {
366 + printk_ratelimited(KERN_ERR
367 + "%s: unsupported crypto algorithm: %d\n",
368 + __func__, ctx->mode);
372 + crypto_ablkcipher_clear_flags(atfm, ~0);
373 + crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
375 + res = crypto_ablkcipher_setkey(atfm, ei->i_encryption_key.raw,
376 + ei->i_encryption_key.size);
378 + printk_ratelimited(KERN_ERR
379 + "%s: crypto_ablkcipher_setkey() failed\n",
383 + req = ablkcipher_request_alloc(atfm, GFP_NOFS);
385 + printk_ratelimited(KERN_ERR
386 + "%s: crypto_request_alloc() failed\n",
390 + ablkcipher_request_set_callback(
391 + req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
392 + ext4_crypt_complete, &ecr);
394 + BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(index));
395 + memcpy(xts_tweak, &index, sizeof(index));
396 + memset(&xts_tweak[sizeof(index)], 0,
397 + EXT4_XTS_TWEAK_SIZE - sizeof(index));
399 + sg_init_table(&dst, 1);
400 + sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
401 + sg_init_table(&src, 1);
402 + sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
403 + ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
405 + if (rw == EXT4_DECRYPT)
406 + res = crypto_ablkcipher_decrypt(req);
408 + res = crypto_ablkcipher_encrypt(req);
409 + if (res == -EINPROGRESS || res == -EBUSY) {
410 + BUG_ON(req->base.data != &ecr);
411 + wait_for_completion(&ecr.completion);
414 + ablkcipher_request_free(req);
416 + printk_ratelimited(
418 + "%s: crypto_ablkcipher_encrypt() returned %d\n",
426 + * ext4_encrypt() - Encrypts a page
427 + * @inode: The inode for which the encryption should take place
428 + * @plaintext_page: The page to encrypt. Must be locked.
430 + * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
431 + * encryption context.
433 + * Called on the page write path. The caller must call
434 + * ext4_restore_control_page() on the returned ciphertext page to
435 + * release the bounce buffer and the encryption context.
437 + * Return: An allocated page with the encrypted content on success. Else, an
438 + * error value or NULL.
440 +struct page *ext4_encrypt(struct inode *inode,
441 + struct page *plaintext_page)
443 + struct ext4_crypto_ctx *ctx;
444 + struct page *ciphertext_page = NULL;
447 + BUG_ON(!PageLocked(plaintext_page));
449 + ctx = ext4_get_crypto_ctx(inode);
451 + return (struct page *) ctx;
453 + /* The encryption operation will require a bounce page. */
454 + ciphertext_page = alloc_page(GFP_NOFS);
455 + if (!ciphertext_page) {
456 + /* This is a potential bottleneck, but at least we'll have
457 + * forward progress. */
458 + ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
460 + if (WARN_ON_ONCE(!ciphertext_page)) {
461 + ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
462 + GFP_NOFS | __GFP_WAIT);
464 + ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
466 + ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
468 + ctx->bounce_page = ciphertext_page;
469 + ctx->control_page = plaintext_page;
470 + err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index,
471 + plaintext_page, ciphertext_page);
473 + ext4_release_crypto_ctx(ctx);
474 + return ERR_PTR(err);
476 + SetPagePrivate(ciphertext_page);
477 + set_page_private(ciphertext_page, (unsigned long)ctx);
478 + lock_page(ciphertext_page);
479 + return ciphertext_page;
483 + * ext4_decrypt() - Decrypts a page in-place
484 + * @ctx: The encryption context.
485 + * @page: The page to decrypt. Must be locked.
487 + * Decrypts page in-place using the ctx encryption context.
489 + * Called from the read completion callback.
491 + * Return: Zero on success, non-zero otherwise.
493 +int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page)
495 + BUG_ON(!PageLocked(page));
497 + return ext4_page_crypto(ctx, page->mapping->host,
498 + EXT4_DECRYPT, page->index, page, page);
502 + * Convenience function which takes care of allocating and
503 + * deallocating the encryption context
505 +int ext4_decrypt_one(struct inode *inode, struct page *page)
509 + struct ext4_crypto_ctx *ctx = ext4_get_crypto_ctx(inode);
513 + ret = ext4_decrypt(ctx, page);
514 + ext4_release_crypto_ctx(ctx);
518 +int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
520 + struct ext4_crypto_ctx *ctx;
521 + struct page *ciphertext_page = NULL;
523 + ext4_lblk_t lblk = ex->ee_block;
524 + ext4_fsblk_t pblk = ext4_ext_pblock(ex);
525 + unsigned int len = ext4_ext_get_actual_len(ex);
528 + BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);
530 + ctx = ext4_get_crypto_ctx(inode);
532 + return PTR_ERR(ctx);
534 + ciphertext_page = alloc_page(GFP_NOFS);
535 + if (!ciphertext_page) {
536 + /* This is a potential bottleneck, but at least we'll have
537 + * forward progress. */
538 + ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
540 + if (WARN_ON_ONCE(!ciphertext_page)) {
541 + ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
542 + GFP_NOFS | __GFP_WAIT);
544 + ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
546 + ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
548 + ctx->bounce_page = ciphertext_page;
551 + err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk,
552 + ZERO_PAGE(0), ciphertext_page);
556 + bio = bio_alloc(GFP_KERNEL, 1);
561 + bio->bi_bdev = inode->i_sb->s_bdev;
562 + bio->bi_iter.bi_sector = pblk;
563 + err = bio_add_page(bio, ciphertext_page,
564 + inode->i_sb->s_blocksize, 0);
569 + err = submit_bio_wait(WRITE, bio);
575 + ext4_release_crypto_ctx(ctx);
579 +bool ext4_valid_contents_enc_mode(uint32_t mode)
581 + return (mode == EXT4_ENCRYPTION_MODE_AES_256_XTS);
585 + * ext4_validate_encryption_key_size() - Validate the encryption key size
586 + * @mode: The key mode.
587 + * @size: The key size to validate.
589 + * Return: The validated key size for @mode. Zero if invalid.
591 +uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
593 + if (size == ext4_encryption_key_size(mode))
597 diff --git a/fs/ext4/crypto_policy.c b/fs/ext4/crypto_policy.c
598 index 532b69c..a4bf762 100644
599 --- a/fs/ext4/crypto_policy.c
600 +++ b/fs/ext4/crypto_policy.c
601 @@ -52,6 +52,13 @@ static int ext4_create_encryption_context_from_policy(
602 ctx.format = EXT4_ENCRYPTION_CONTEXT_FORMAT_V1;
603 memcpy(ctx.master_key_descriptor, policy->master_key_descriptor,
604 EXT4_KEY_DESCRIPTOR_SIZE);
605 + if (!ext4_valid_contents_enc_mode(policy->contents_encryption_mode)) {
606 + printk(KERN_WARNING
607 + "%s: Invalid contents encryption mode %d\n", __func__,
608 + policy->contents_encryption_mode);
612 ctx.contents_encryption_mode = policy->contents_encryption_mode;
613 ctx.filenames_encryption_mode = policy->filenames_encryption_mode;
614 BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE);
615 @@ -60,6 +67,7 @@ static int ext4_create_encryption_context_from_policy(
616 res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
617 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
621 ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
623 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
624 index e0956b7..620179e 100644
627 @@ -951,6 +951,11 @@ struct ext4_inode_info {
629 /* Precomputed uuid+inum+igen checksum for seeding inode checksums */
632 +#ifdef CONFIG_EXT4_FS_ENCRYPTION
633 + /* Encryption params */
634 + struct ext4_encryption_key i_encryption_key;
639 @@ -1355,6 +1360,12 @@ struct ext4_sb_info {
640 struct ratelimit_state s_err_ratelimit_state;
641 struct ratelimit_state s_warning_ratelimit_state;
642 struct ratelimit_state s_msg_ratelimit_state;
644 +#ifdef CONFIG_EXT4_FS_ENCRYPTION
646 + uint32_t s_file_encryption_mode;
647 + uint32_t s_dir_encryption_mode;
651 static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
652 @@ -1470,6 +1481,18 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
653 #define EXT4_SB(sb) (sb)
657 + * Returns true if the inode is inode is encrypted
659 +static inline int ext4_encrypted_inode(struct inode *inode)
661 +#ifdef CONFIG_EXT4_FS_ENCRYPTION
662 + return ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT);
668 #define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime
671 @@ -2014,6 +2037,35 @@ int ext4_process_policy(const struct ext4_encryption_policy *policy,
672 int ext4_get_policy(struct inode *inode,
673 struct ext4_encryption_policy *policy);
676 +bool ext4_valid_contents_enc_mode(uint32_t mode);
677 +uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size);
678 +extern struct workqueue_struct *ext4_read_workqueue;
679 +struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode);
680 +void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx);
681 +void ext4_restore_control_page(struct page *data_page);
682 +struct page *ext4_encrypt(struct inode *inode,
683 + struct page *plaintext_page);
684 +int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page);
685 +int ext4_decrypt_one(struct inode *inode, struct page *page);
686 +int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex);
688 +#ifdef CONFIG_EXT4_FS_ENCRYPTION
689 +int ext4_init_crypto(void);
690 +void ext4_exit_crypto(void);
691 +static inline int ext4_sb_has_crypto(struct super_block *sb)
693 + return EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT);
696 +static inline int ext4_init_crypto(void) { return 0; }
697 +static inline void ext4_exit_crypto(void) { }
698 +static inline int ext4_sb_has_crypto(struct super_block *sb)
705 extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
707 diff --git a/fs/ext4/ext4_crypto.h b/fs/ext4/ext4_crypto.h
708 index a69d2ba..9d5d2e5 100644
709 --- a/fs/ext4/ext4_crypto.h
710 +++ b/fs/ext4/ext4_crypto.h
711 @@ -46,4 +46,59 @@ struct ext4_encryption_context {
712 char nonce[EXT4_KEY_DERIVATION_NONCE_SIZE];
713 } __attribute__((__packed__));
715 +/* Encryption parameters */
716 +#define EXT4_XTS_TWEAK_SIZE 16
717 +#define EXT4_AES_128_ECB_KEY_SIZE 16
718 +#define EXT4_AES_256_GCM_KEY_SIZE 32
719 +#define EXT4_AES_256_CBC_KEY_SIZE 32
720 +#define EXT4_AES_256_CTS_KEY_SIZE 32
721 +#define EXT4_AES_256_XTS_KEY_SIZE 64
722 +#define EXT4_MAX_KEY_SIZE 64
724 +struct ext4_encryption_key {
726 + char raw[EXT4_MAX_KEY_SIZE];
730 +#define EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
731 +#define EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL 0x00000002
733 +struct ext4_crypto_ctx {
734 + struct crypto_tfm *tfm; /* Crypto API context */
735 + struct page *bounce_page; /* Ciphertext page on write path */
736 + struct page *control_page; /* Original page on write path */
737 + struct bio *bio; /* The bio for this context */
738 + struct work_struct work; /* Work queue for read complete path */
739 + struct list_head free_list; /* Free list */
740 + int flags; /* Flags */
741 + int mode; /* Encryption mode for tfm */
744 +struct ext4_completion_result {
745 + struct completion completion;
749 +#define DECLARE_EXT4_COMPLETION_RESULT(ecr) \
750 + struct ext4_completion_result ecr = { \
751 + COMPLETION_INITIALIZER((ecr).completion), 0 }
753 +static inline int ext4_encryption_key_size(int mode)
756 + case EXT4_ENCRYPTION_MODE_AES_256_XTS:
757 + return EXT4_AES_256_XTS_KEY_SIZE;
758 + case EXT4_ENCRYPTION_MODE_AES_256_GCM:
759 + return EXT4_AES_256_GCM_KEY_SIZE;
760 + case EXT4_ENCRYPTION_MODE_AES_256_CBC:
761 + return EXT4_AES_256_CBC_KEY_SIZE;
762 + case EXT4_ENCRYPTION_MODE_AES_256_CTS:
763 + return EXT4_AES_256_CTS_KEY_SIZE;
770 #endif /* _EXT4_CRYPTO_H */
771 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
772 index 74c5f53..1a44e74 100644
773 --- a/fs/ext4/super.c
774 +++ b/fs/ext4/super.c
775 @@ -893,6 +893,9 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
776 atomic_set(&ei->i_ioend_count, 0);
777 atomic_set(&ei->i_unwritten, 0);
778 INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
779 +#ifdef CONFIG_EXT4_FS_ENCRYPTION
780 + ei->i_encryption_key.mode = EXT4_ENCRYPTION_MODE_INVALID;
783 return &ei->vfs_inode;
785 @@ -3439,6 +3442,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
786 if (sb->s_bdev->bd_part)
787 sbi->s_sectors_written_start =
788 part_stat_read(sb->s_bdev->bd_part, sectors[1]);
789 +#ifdef CONFIG_EXT4_FS_ENCRYPTION
790 + /* Modes of operations for file and directory encryption. */
791 + sbi->s_file_encryption_mode = EXT4_ENCRYPTION_MODE_AES_256_XTS;
792 + sbi->s_dir_encryption_mode = EXT4_ENCRYPTION_MODE_INVALID;
795 /* Cleanup superblock name */
796 for (cp = sb->s_id; (cp = strchr(cp, '/'));)