Commit 0a9bab39 authored by Mikulas Patocka's avatar Mikulas Patocka Committed by Mike Snitzer

dm-crypt, dm-verity: disable tasklets

Tasklets have an inherent problem with memory corruption. The function
tasklet_action_common calls tasklet_trylock, then it calls the tasklet
callback and then it calls tasklet_unlock. If the tasklet callback frees
the structure that contains the tasklet or if it calls some code that may
free it, tasklet_unlock will write into free memory.

The commits 8e14f610 and d9a02e01 try to fix it for dm-crypt, but
it is not a sufficient fix and the data corruption can still happen [1].
There is no fix for dm-verity and dm-verity will write into free memory
with every tasklet-processed bio.

There will be atomic workqueues implemented in the kernel 6.9 [2]. They
will have better interface and they will not suffer from the memory
corruption problem.

But we need something that stops the memory corruption now and that can be
backported to the stable kernels. So, I'm proposing this commit that
disables tasklets in both dm-crypt and dm-verity. This commit doesn't
remove the tasklet support, because the tasklet code will be reused when
atomic workqueues will be implemented.

[1] https://lore.kernel.org/all/d390d7ee-f142-44d3-822a-87949e14608b@suse.de/T/
[2] https://lore.kernel.org/lkml/20240130091300.2968534-1-tj@kernel.org/Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Cc: stable@vger.kernel.org
Fixes: 39d42fa9 ("dm crypt: add flags to optionally bypass kcryptd workqueues")
Fixes: 5721d4e5 ("dm verity: Add optional "try_verify_in_tasklet" feature")
Signed-off-by: default avatarMike Snitzer <snitzer@kernel.org>
parent 40ef8756
...@@ -73,10 +73,8 @@ struct dm_crypt_io { ...@@ -73,10 +73,8 @@ struct dm_crypt_io {
struct bio *base_bio; struct bio *base_bio;
u8 *integrity_metadata; u8 *integrity_metadata;
bool integrity_metadata_from_pool:1; bool integrity_metadata_from_pool:1;
bool in_tasklet:1;
struct work_struct work; struct work_struct work;
struct tasklet_struct tasklet;
struct convert_context ctx; struct convert_context ctx;
...@@ -1762,7 +1760,6 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, ...@@ -1762,7 +1760,6 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
io->ctx.r.req = NULL; io->ctx.r.req = NULL;
io->integrity_metadata = NULL; io->integrity_metadata = NULL;
io->integrity_metadata_from_pool = false; io->integrity_metadata_from_pool = false;
io->in_tasklet = false;
atomic_set(&io->io_pending, 0); atomic_set(&io->io_pending, 0);
} }
...@@ -1771,13 +1768,6 @@ static void crypt_inc_pending(struct dm_crypt_io *io) ...@@ -1771,13 +1768,6 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
atomic_inc(&io->io_pending); atomic_inc(&io->io_pending);
} }
static void kcryptd_io_bio_endio(struct work_struct *work)
{
struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
bio_endio(io->base_bio);
}
/* /*
* One of the bios was finished. Check for completion of * One of the bios was finished. Check for completion of
* the whole request and correctly clean up the buffer. * the whole request and correctly clean up the buffer.
...@@ -1801,20 +1791,6 @@ static void crypt_dec_pending(struct dm_crypt_io *io) ...@@ -1801,20 +1791,6 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
base_bio->bi_status = error; base_bio->bi_status = error;
/*
* If we are running this function from our tasklet,
* we can't call bio_endio() here, because it will call
* clone_endio() from dm.c, which in turn will
* free the current struct dm_crypt_io structure with
* our tasklet. In this case we need to delay bio_endio()
* execution to after the tasklet is done and dequeued.
*/
if (io->in_tasklet) {
INIT_WORK(&io->work, kcryptd_io_bio_endio);
queue_work(cc->io_queue, &io->work);
return;
}
bio_endio(base_bio); bio_endio(base_bio);
} }
...@@ -2246,11 +2222,6 @@ static void kcryptd_crypt(struct work_struct *work) ...@@ -2246,11 +2222,6 @@ static void kcryptd_crypt(struct work_struct *work)
kcryptd_crypt_write_convert(io); kcryptd_crypt_write_convert(io);
} }
static void kcryptd_crypt_tasklet(unsigned long work)
{
kcryptd_crypt((struct work_struct *)work);
}
static void kcryptd_queue_crypt(struct dm_crypt_io *io) static void kcryptd_queue_crypt(struct dm_crypt_io *io)
{ {
struct crypt_config *cc = io->cc; struct crypt_config *cc = io->cc;
...@@ -2262,15 +2233,10 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io) ...@@ -2262,15 +2233,10 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
* irqs_disabled(): the kernel may run some IO completion from the idle thread, but * irqs_disabled(): the kernel may run some IO completion from the idle thread, but
* it is being executed with irqs disabled. * it is being executed with irqs disabled.
*/ */
if (in_hardirq() || irqs_disabled()) { if (!(in_hardirq() || irqs_disabled())) {
io->in_tasklet = true; kcryptd_crypt(&io->work);
tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
tasklet_schedule(&io->tasklet);
return; return;
} }
kcryptd_crypt(&io->work);
return;
} }
INIT_WORK(&io->work, kcryptd_crypt); INIT_WORK(&io->work, kcryptd_crypt);
......
...@@ -645,23 +645,6 @@ static void verity_work(struct work_struct *w) ...@@ -645,23 +645,6 @@ static void verity_work(struct work_struct *w)
verity_finish_io(io, errno_to_blk_status(verity_verify_io(io))); verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
} }
static void verity_tasklet(unsigned long data)
{
struct dm_verity_io *io = (struct dm_verity_io *)data;
int err;
io->in_tasklet = true;
err = verity_verify_io(io);
if (err == -EAGAIN || err == -ENOMEM) {
/* fallback to retrying with work-queue */
INIT_WORK(&io->work, verity_work);
queue_work(io->v->verify_wq, &io->work);
return;
}
verity_finish_io(io, errno_to_blk_status(err));
}
static void verity_end_io(struct bio *bio) static void verity_end_io(struct bio *bio)
{ {
struct dm_verity_io *io = bio->bi_private; struct dm_verity_io *io = bio->bi_private;
...@@ -674,13 +657,8 @@ static void verity_end_io(struct bio *bio) ...@@ -674,13 +657,8 @@ static void verity_end_io(struct bio *bio)
return; return;
} }
if (static_branch_unlikely(&use_tasklet_enabled) && io->v->use_tasklet) { INIT_WORK(&io->work, verity_work);
tasklet_init(&io->tasklet, verity_tasklet, (unsigned long)io); queue_work(io->v->verify_wq, &io->work);
tasklet_schedule(&io->tasklet);
} else {
INIT_WORK(&io->work, verity_work);
queue_work(io->v->verify_wq, &io->work);
}
} }
/* /*
......
...@@ -83,7 +83,6 @@ struct dm_verity_io { ...@@ -83,7 +83,6 @@ struct dm_verity_io {
struct bvec_iter iter; struct bvec_iter iter;
struct work_struct work; struct work_struct work;
struct tasklet_struct tasklet;
/* /*
* Three variably-size fields follow this struct: * Three variably-size fields follow this struct:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment