add patch set-lazytime-on-remount
[ext4-patch-queue.git] / crypto-shrink-ext4_crypto_ctx
bloba3bd560334ba6d154f77de5d242421d636f8cf5a
1 ext4 crypto: shrink size of the ext4_crypto_ctx structure
3 Some fields are only used when the crypto_ctx is being used on the
4 read path, some are only used on the write path, and some are only
5 used when the structure is on free list.  Optimize memory use by using
6 a union.
8 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
9 ---
10  fs/ext4/crypto.c      | 31 ++++++++++---------------------
11  fs/ext4/ext4_crypto.h | 21 ++++++++++++++-------
12  fs/ext4/page-io.c     |  2 +-
13  fs/ext4/readpage.c    | 10 +++++-----
14  4 files changed, 30 insertions(+), 34 deletions(-)
16 diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
17 index 9969d05..28a0e4bd 100644
18 --- a/fs/ext4/crypto.c
19 +++ b/fs/ext4/crypto.c
20 @@ -71,14 +71,14 @@ void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
21  {
22         unsigned long flags;
24 -       if (ctx->bounce_page) {
25 +       if (ctx->flags & EXT4_WRITE_PATH_FL && ctx->w.bounce_page) {
26                 if (ctx->flags & EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL)
27 -                       __free_page(ctx->bounce_page);
28 +                       __free_page(ctx->w.bounce_page);
29                 else
30 -                       mempool_free(ctx->bounce_page, ext4_bounce_page_pool);
31 -               ctx->bounce_page = NULL;
32 +                       mempool_free(ctx->w.bounce_page, ext4_bounce_page_pool);
33         }
34 -       ctx->control_page = NULL;
35 +       ctx->w.bounce_page = NULL;
36 +       ctx->w.control_page = NULL;
37         if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) {
38                 if (ctx->tfm)
39                         crypto_free_tfm(ctx->tfm);
40 @@ -134,6 +134,7 @@ struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
41         } else {
42                 ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
43         }
44 +       ctx->flags &= ~EXT4_WRITE_PATH_FL;
46         /* Allocate a new Crypto API context if we don't already have
47          * one or if it isn't the right mode. */
48 @@ -165,10 +166,6 @@ struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
49         }
50         BUG_ON(ci->ci_size != ext4_encryption_key_size(ci->ci_data_mode));
52 -       /* There shouldn't be a bounce page attached to the crypto
53 -        * context at this point. */
54 -       BUG_ON(ctx->bounce_page);
56  out:
57         if (res) {
58                 if (!IS_ERR_OR_NULL(ctx))
59 @@ -189,15 +186,6 @@ void ext4_exit_crypto(void)
60         struct ext4_crypto_ctx *pos, *n;
62         list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list) {
63 -               if (pos->bounce_page) {
64 -                       if (pos->flags &
65 -                           EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) {
66 -                               __free_page(pos->bounce_page);
67 -                       } else {
68 -                               mempool_free(pos->bounce_page,
69 -                                            ext4_bounce_page_pool);
70 -                       }
71 -               }
72                 if (pos->tfm)
73                         crypto_free_tfm(pos->tfm);
74                 kmem_cache_free(ext4_crypto_ctx_cachep, pos);
75 @@ -425,8 +413,9 @@ struct page *ext4_encrypt(struct inode *inode,
76         } else {
77                 ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
78         }
79 -       ctx->bounce_page = ciphertext_page;
80 -       ctx->control_page = plaintext_page;
81 +       ctx->flags |= EXT4_WRITE_PATH_FL;
82 +       ctx->w.bounce_page = ciphertext_page;
83 +       ctx->w.control_page = plaintext_page;
84         err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index,
85                                plaintext_page, ciphertext_page);
86         if (err) {
87 @@ -505,7 +494,7 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
88         } else {
89                 ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
90         }
91 -       ctx->bounce_page = ciphertext_page;
92 +       ctx->w.bounce_page = ciphertext_page;
94         while (len--) {
95                 err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk,
96 diff --git a/fs/ext4/ext4_crypto.h b/fs/ext4/ext4_crypto.h
97 index 69faf0e..c5258f2 100644
98 --- a/fs/ext4/ext4_crypto.h
99 +++ b/fs/ext4/ext4_crypto.h
100 @@ -86,16 +86,23 @@ struct ext4_crypt_info {
102  #define EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL             0x00000001
103  #define EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL     0x00000002
104 +#define EXT4_WRITE_PATH_FL                           0x00000004
106  struct ext4_crypto_ctx {
107         struct crypto_tfm *tfm;         /* Crypto API context */
108 -       struct page *bounce_page;       /* Ciphertext page on write path */
109 -       struct page *control_page;      /* Original page on write path */
110 -       struct bio *bio;                /* The bio for this context */
111 -       struct work_struct work;        /* Work queue for read complete path */
112 -       struct list_head free_list;     /* Free list */
113 -       int flags;                      /* Flags */
114 -       int mode;                       /* Encryption mode for tfm */
115 +       union {
116 +               struct {
117 +                       struct page *bounce_page;       /* Ciphertext page */
118 +                       struct page *control_page;      /* Original page  */
119 +               } w;
120 +               struct {
121 +                       struct bio *bio;
122 +                       struct work_struct work;
123 +               } r;
124 +               struct list_head free_list;     /* Free list */
125 +       };
126 +       char flags;                      /* Flags */
127 +       char mode;                       /* Encryption mode for tfm */
128  };
130  struct ext4_completion_result {
131 diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
132 index 5765f88..79636e2 100644
133 --- a/fs/ext4/page-io.c
134 +++ b/fs/ext4/page-io.c
135 @@ -84,7 +84,7 @@ static void ext4_finish_bio(struct bio *bio)
136                         /* The bounce data pages are unmapped. */
137                         data_page = page;
138                         ctx = (struct ext4_crypto_ctx *)page_private(data_page);
139 -                       page = ctx->control_page;
140 +                       page = ctx->w.control_page;
141                 }
142  #endif
144 diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
145 index 171b9ac..ec3ef93 100644
146 --- a/fs/ext4/readpage.c
147 +++ b/fs/ext4/readpage.c
148 @@ -54,8 +54,8 @@ static void completion_pages(struct work_struct *work)
150  #ifdef CONFIG_EXT4_FS_ENCRYPTION
151         struct ext4_crypto_ctx *ctx =
152 -               container_of(work, struct ext4_crypto_ctx, work);
153 -       struct bio      *bio    = ctx->bio;
154 +               container_of(work, struct ext4_crypto_ctx, r.work);
155 +       struct bio      *bio    = ctx->r.bio;
156         struct bio_vec  *bv;
157         int             i;
159 @@ -109,9 +109,9 @@ static void mpage_end_io(struct bio *bio, int err)
160                 if (err) {
161                         ext4_release_crypto_ctx(ctx);
162                 } else {
163 -                       INIT_WORK(&ctx->work, completion_pages);
164 -                       ctx->bio = bio;
165 -                       queue_work(ext4_read_workqueue, &ctx->work);
166 +                       INIT_WORK(&ctx->r.work, completion_pages);
167 +                       ctx->r.bio = bio;
168 +                       queue_work(ext4_read_workqueue, &ctx->r.work);
169                         return;
170                 }
171         }