add patch fix-ocfs2-corrupt-when-updating-journal-superblock-fails
[ext4-patch-queue.git] / fix-memory-leaks-in-ext4_encrypted_zeroout
blobfa99738de7132893e893211b2f8af2e5794cbd05
1 ext4 crypto: fix memory leaks in ext4_encrypted_zeroout
3 ext4_encrypted_zeroout() could end up leaking a bio and bounce page.
4 Fortunately it's not used much.  While we're fixing things up,
5 refactor out common code into the static function alloc_bounce_page()
6 and fix up error handling if mempool_alloc() fails.
8 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
10 ---
11  fs/ext4/crypto.c | 62 +++++++++++++++++++++++++++++++-------------------------------
12  1 file changed, 31 insertions(+), 31 deletions(-)
14 diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
15 index 68c7ab8..ac2419c 100644
16 --- a/fs/ext4/crypto.c
17 +++ b/fs/ext4/crypto.c
18 @@ -324,6 +324,26 @@ static int ext4_page_crypto(struct ext4_crypto_ctx *ctx,
19         return 0;
20  }
22 +static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
24 +       struct page *ciphertext_page = alloc_page(GFP_NOFS);
26 +       if (!ciphertext_page) {
27 +               /* This is a potential bottleneck, but at least we'll have
28 +                * forward progress. */
29 +               ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
30 +                                                GFP_NOFS);
31 +               if (ciphertext_page == NULL)
32 +                       return ERR_PTR(-ENOMEM);
33 +               ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
34 +       } else {
35 +               ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
36 +       }
37 +       ctx->flags |= EXT4_WRITE_PATH_FL;
38 +       ctx->w.bounce_page = ciphertext_page;
39 +       return ciphertext_page;
42  /**
43   * ext4_encrypt() - Encrypts a page
44   * @inode:          The inode for which the encryption should take place
45 @@ -353,28 +373,17 @@ struct page *ext4_encrypt(struct inode *inode,
46                 return (struct page *) ctx;
48         /* The encryption operation will require a bounce page. */
49 -       ciphertext_page = alloc_page(GFP_NOFS);
50 -       if (!ciphertext_page) {
51 -               /* This is a potential bottleneck, but at least we'll have
52 -                * forward progress. */
53 -               ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
54 -                                                GFP_NOFS);
55 -               if (WARN_ON_ONCE(!ciphertext_page)) {
56 -                       ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
57 -                                                        GFP_NOFS | __GFP_WAIT);
58 -               }
59 -               ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
60 -       } else {
61 -               ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
62 -       }
63 -       ctx->flags |= EXT4_WRITE_PATH_FL;
64 -       ctx->w.bounce_page = ciphertext_page;
65 +       ciphertext_page = alloc_bounce_page(ctx);
66 +       if (IS_ERR(ciphertext_page))
67 +               goto errout;
68         ctx->w.control_page = plaintext_page;
69         err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index,
70                                plaintext_page, ciphertext_page);
71         if (err) {
72 +               ciphertext_page = ERR_PTR(err);
73 +       errout:
74                 ext4_release_crypto_ctx(ctx);
75 -               return ERR_PTR(err);
76 +               return ciphertext_page;
77         }
78         SetPagePrivate(ciphertext_page);
79         set_page_private(ciphertext_page, (unsigned long)ctx);
80 @@ -434,21 +443,11 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
81         if (IS_ERR(ctx))
82                 return PTR_ERR(ctx);
84 -       ciphertext_page = alloc_page(GFP_NOFS);
85 -       if (!ciphertext_page) {
86 -               /* This is a potential bottleneck, but at least we'll have
87 -                * forward progress. */
88 -               ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
89 -                                                GFP_NOFS);
90 -               if (WARN_ON_ONCE(!ciphertext_page)) {
91 -                       ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
92 -                                                        GFP_NOFS | __GFP_WAIT);
93 -               }
94 -               ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
95 -       } else {
96 -               ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
97 +       ciphertext_page = alloc_bounce_page(ctx);
98 +       if (IS_ERR(ciphertext_page)) {
99 +               err = PTR_ERR(ciphertext_page);
100 +               goto errout;
101         }
102 -       ctx->w.bounce_page = ciphertext_page;
104         while (len--) {
105                 err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk,
106 @@ -470,6 +469,7 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
107                         goto errout;
108                 }
109                 err = submit_bio_wait(WRITE, bio);
110 +               bio_put(bio);
111                 if (err)
112                         goto errout;
113         }