1 ext4 crypto: fix memory leaks in ext4_encrypted_zeroout
3 ext4_encrypted_zeroout() could end up leaking a bio and bounce page.
4 Fortunately it's not used much. While we're fixing things up,
5 refactor out common code into the static function alloc_bounce_page()
6 and fix up error handling if mempool_alloc() fails.
8 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
11 fs/ext4/crypto.c | 62 +++++++++++++++++++++++++++++++-------------------------------
12 1 file changed, 31 insertions(+), 31 deletions(-)
14 diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
15 index 68c7ab8..ac2419c 100644
16 --- a/fs/ext4/crypto.c
17 +++ b/fs/ext4/crypto.c
18 @@ -324,6 +324,26 @@ static int ext4_page_crypto(struct ext4_crypto_ctx *ctx,
22 +static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
24 + struct page *ciphertext_page = alloc_page(GFP_NOFS);
26 + if (!ciphertext_page) {
27 + /* This is a potential bottleneck, but at least we'll have
28 + * forward progress. */
29 + ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
31 + if (ciphertext_page == NULL)
32 + return ERR_PTR(-ENOMEM);
33 + ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
35 + ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
37 + ctx->flags |= EXT4_WRITE_PATH_FL;
38 + ctx->w.bounce_page = ciphertext_page;
39 + return ciphertext_page;
43 * ext4_encrypt() - Encrypts a page
44 * @inode: The inode for which the encryption should take place
45 @@ -353,28 +373,17 @@ struct page *ext4_encrypt(struct inode *inode,
46 return (struct page *) ctx;
48 /* The encryption operation will require a bounce page. */
49 - ciphertext_page = alloc_page(GFP_NOFS);
50 - if (!ciphertext_page) {
51 - /* This is a potential bottleneck, but at least we'll have
52 - * forward progress. */
53 - ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
55 - if (WARN_ON_ONCE(!ciphertext_page)) {
56 - ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
57 - GFP_NOFS | __GFP_WAIT);
59 - ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
61 - ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
63 - ctx->flags |= EXT4_WRITE_PATH_FL;
64 - ctx->w.bounce_page = ciphertext_page;
65 + ciphertext_page = alloc_bounce_page(ctx);
66 + if (IS_ERR(ciphertext_page))
68 ctx->w.control_page = plaintext_page;
69 err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index,
70 plaintext_page, ciphertext_page);
72 + ciphertext_page = ERR_PTR(err);
74 ext4_release_crypto_ctx(ctx);
75 - return ERR_PTR(err);
76 + return ciphertext_page;
78 SetPagePrivate(ciphertext_page);
79 set_page_private(ciphertext_page, (unsigned long)ctx);
80 @@ -434,21 +443,11 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
84 - ciphertext_page = alloc_page(GFP_NOFS);
85 - if (!ciphertext_page) {
86 - /* This is a potential bottleneck, but at least we'll have
87 - * forward progress. */
88 - ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
90 - if (WARN_ON_ONCE(!ciphertext_page)) {
91 - ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
92 - GFP_NOFS | __GFP_WAIT);
94 - ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
96 - ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
97 + ciphertext_page = alloc_bounce_page(ctx);
98 + if (IS_ERR(ciphertext_page)) {
99 + err = PTR_ERR(ciphertext_page);
102 - ctx->w.bounce_page = ciphertext_page;
105 err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk,
106 @@ -470,6 +469,7 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
109 err = submit_bio_wait(WRITE, bio);