Commit 614def70 authored by Theodore Ts'o's avatar Theodore Ts'o

ext4 crypto: shrink size of the ext4_crypto_ctx structure

Some fields are only used when the crypto_ctx is being used on the
read path, some are only used on the write path, and some are only
used when the structure is on free list.  Optimize memory use by using
a union.
Signed-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent 1aaa6e8b
...@@ -71,14 +71,14 @@ void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx) ...@@ -71,14 +71,14 @@ void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
{ {
unsigned long flags; unsigned long flags;
if (ctx->bounce_page) { if (ctx->flags & EXT4_WRITE_PATH_FL && ctx->w.bounce_page) {
if (ctx->flags & EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) if (ctx->flags & EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL)
__free_page(ctx->bounce_page); __free_page(ctx->w.bounce_page);
else else
mempool_free(ctx->bounce_page, ext4_bounce_page_pool); mempool_free(ctx->w.bounce_page, ext4_bounce_page_pool);
ctx->bounce_page = NULL;
} }
ctx->control_page = NULL; ctx->w.bounce_page = NULL;
ctx->w.control_page = NULL;
if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) { if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) {
if (ctx->tfm) if (ctx->tfm)
crypto_free_tfm(ctx->tfm); crypto_free_tfm(ctx->tfm);
...@@ -134,6 +134,7 @@ struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode) ...@@ -134,6 +134,7 @@ struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
} else { } else {
ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL; ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
} }
ctx->flags &= ~EXT4_WRITE_PATH_FL;
/* Allocate a new Crypto API context if we don't already have /* Allocate a new Crypto API context if we don't already have
* one or if it isn't the right mode. */ * one or if it isn't the right mode. */
...@@ -165,10 +166,6 @@ struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode) ...@@ -165,10 +166,6 @@ struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
} }
BUG_ON(ci->ci_size != ext4_encryption_key_size(ci->ci_data_mode)); BUG_ON(ci->ci_size != ext4_encryption_key_size(ci->ci_data_mode));
/* There shouldn't be a bounce page attached to the crypto
* context at this point. */
BUG_ON(ctx->bounce_page);
out: out:
if (res) { if (res) {
if (!IS_ERR_OR_NULL(ctx)) if (!IS_ERR_OR_NULL(ctx))
...@@ -189,15 +186,6 @@ void ext4_exit_crypto(void) ...@@ -189,15 +186,6 @@ void ext4_exit_crypto(void)
struct ext4_crypto_ctx *pos, *n; struct ext4_crypto_ctx *pos, *n;
list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list) { list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list) {
if (pos->bounce_page) {
if (pos->flags &
EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) {
__free_page(pos->bounce_page);
} else {
mempool_free(pos->bounce_page,
ext4_bounce_page_pool);
}
}
if (pos->tfm) if (pos->tfm)
crypto_free_tfm(pos->tfm); crypto_free_tfm(pos->tfm);
kmem_cache_free(ext4_crypto_ctx_cachep, pos); kmem_cache_free(ext4_crypto_ctx_cachep, pos);
...@@ -425,8 +413,9 @@ struct page *ext4_encrypt(struct inode *inode, ...@@ -425,8 +413,9 @@ struct page *ext4_encrypt(struct inode *inode,
} else { } else {
ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
} }
ctx->bounce_page = ciphertext_page; ctx->flags |= EXT4_WRITE_PATH_FL;
ctx->control_page = plaintext_page; ctx->w.bounce_page = ciphertext_page;
ctx->w.control_page = plaintext_page;
err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index, err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index,
plaintext_page, ciphertext_page); plaintext_page, ciphertext_page);
if (err) { if (err) {
...@@ -505,7 +494,7 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex) ...@@ -505,7 +494,7 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
} else { } else {
ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
} }
ctx->bounce_page = ciphertext_page; ctx->w.bounce_page = ciphertext_page;
while (len--) { while (len--) {
err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk, err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk,
......
...@@ -86,16 +86,23 @@ struct ext4_crypt_info { ...@@ -86,16 +86,23 @@ struct ext4_crypt_info {
#define EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 #define EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
#define EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL 0x00000002 #define EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL 0x00000002
#define EXT4_WRITE_PATH_FL 0x00000004
struct ext4_crypto_ctx { struct ext4_crypto_ctx {
struct crypto_tfm *tfm; /* Crypto API context */ struct crypto_tfm *tfm; /* Crypto API context */
struct page *bounce_page; /* Ciphertext page on write path */ union {
struct page *control_page; /* Original page on write path */ struct {
struct bio *bio; /* The bio for this context */ struct page *bounce_page; /* Ciphertext page */
struct work_struct work; /* Work queue for read complete path */ struct page *control_page; /* Original page */
struct list_head free_list; /* Free list */ } w;
int flags; /* Flags */ struct {
int mode; /* Encryption mode for tfm */ struct bio *bio;
struct work_struct work;
} r;
struct list_head free_list; /* Free list */
};
char flags; /* Flags */
char mode; /* Encryption mode for tfm */
}; };
struct ext4_completion_result { struct ext4_completion_result {
......
...@@ -84,7 +84,7 @@ static void ext4_finish_bio(struct bio *bio) ...@@ -84,7 +84,7 @@ static void ext4_finish_bio(struct bio *bio)
/* The bounce data pages are unmapped. */ /* The bounce data pages are unmapped. */
data_page = page; data_page = page;
ctx = (struct ext4_crypto_ctx *)page_private(data_page); ctx = (struct ext4_crypto_ctx *)page_private(data_page);
page = ctx->control_page; page = ctx->w.control_page;
} }
#endif #endif
......
...@@ -54,8 +54,8 @@ static void completion_pages(struct work_struct *work) ...@@ -54,8 +54,8 @@ static void completion_pages(struct work_struct *work)
{ {
#ifdef CONFIG_EXT4_FS_ENCRYPTION #ifdef CONFIG_EXT4_FS_ENCRYPTION
struct ext4_crypto_ctx *ctx = struct ext4_crypto_ctx *ctx =
container_of(work, struct ext4_crypto_ctx, work); container_of(work, struct ext4_crypto_ctx, r.work);
struct bio *bio = ctx->bio; struct bio *bio = ctx->r.bio;
struct bio_vec *bv; struct bio_vec *bv;
int i; int i;
...@@ -109,9 +109,9 @@ static void mpage_end_io(struct bio *bio, int err) ...@@ -109,9 +109,9 @@ static void mpage_end_io(struct bio *bio, int err)
if (err) { if (err) {
ext4_release_crypto_ctx(ctx); ext4_release_crypto_ctx(ctx);
} else { } else {
INIT_WORK(&ctx->work, completion_pages); INIT_WORK(&ctx->r.work, completion_pages);
ctx->bio = bio; ctx->r.bio = bio;
queue_work(ext4_read_workqueue, &ctx->work); queue_work(ext4_read_workqueue, &ctx->r.work);
return; return;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment