Commit 027581f3 authored by Olaf Kirch's avatar Olaf Kirch Committed by Linus Torvalds

dm crypt: fix call to clone_init

Call clone_init early

We need to call clone_init as early as possible - at least before call
bio_put(clone) in any error path.  Otherwise, the destructor will try to
dereference bi_private, which may still be NULL.
Signed-off-by: default avatarOlaf Kirch <olaf.kirch@oracle.com>
Signed-off-by: default avatarAlasdair G Kergon <agk@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9c89f8be
...@@ -107,6 +107,8 @@ struct crypt_config { ...@@ -107,6 +107,8 @@ struct crypt_config {
static struct kmem_cache *_crypt_io_pool; static struct kmem_cache *_crypt_io_pool;
static void clone_init(struct crypt_io *, struct bio *);
/* /*
* Different IV generation algorithms: * Different IV generation algorithms:
* *
...@@ -379,9 +381,10 @@ static int crypt_convert(struct crypt_config *cc, ...@@ -379,9 +381,10 @@ static int crypt_convert(struct crypt_config *cc,
* May return a smaller bio when running out of pages * May return a smaller bio when running out of pages
*/ */
static struct bio * static struct bio *
crypt_alloc_buffer(struct crypt_config *cc, unsigned int size, crypt_alloc_buffer(struct crypt_io *io, unsigned int size,
struct bio *base_bio, unsigned int *bio_vec_idx) struct bio *base_bio, unsigned int *bio_vec_idx)
{ {
struct crypt_config *cc = io->target->private;
struct bio *clone; struct bio *clone;
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
...@@ -396,7 +399,7 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size, ...@@ -396,7 +399,7 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
if (!clone) if (!clone)
return NULL; return NULL;
clone->bi_destructor = dm_crypt_bio_destructor; clone_init(io, clone);
/* if the last bio was not complete, continue where that one ended */ /* if the last bio was not complete, continue where that one ended */
clone->bi_idx = *bio_vec_idx; clone->bi_idx = *bio_vec_idx;
...@@ -562,6 +565,7 @@ static void clone_init(struct crypt_io *io, struct bio *clone) ...@@ -562,6 +565,7 @@ static void clone_init(struct crypt_io *io, struct bio *clone)
clone->bi_end_io = crypt_endio; clone->bi_end_io = crypt_endio;
clone->bi_bdev = cc->dev->bdev; clone->bi_bdev = cc->dev->bdev;
clone->bi_rw = io->base_bio->bi_rw; clone->bi_rw = io->base_bio->bi_rw;
clone->bi_destructor = dm_crypt_bio_destructor;
} }
static void process_read(struct crypt_io *io) static void process_read(struct crypt_io *io)
...@@ -585,7 +589,6 @@ static void process_read(struct crypt_io *io) ...@@ -585,7 +589,6 @@ static void process_read(struct crypt_io *io)
} }
clone_init(io, clone); clone_init(io, clone);
clone->bi_destructor = dm_crypt_bio_destructor;
clone->bi_idx = 0; clone->bi_idx = 0;
clone->bi_vcnt = bio_segments(base_bio); clone->bi_vcnt = bio_segments(base_bio);
clone->bi_size = base_bio->bi_size; clone->bi_size = base_bio->bi_size;
...@@ -615,7 +618,7 @@ static void process_write(struct crypt_io *io) ...@@ -615,7 +618,7 @@ static void process_write(struct crypt_io *io)
* so repeat the whole process until all the data can be handled. * so repeat the whole process until all the data can be handled.
*/ */
while (remaining) { while (remaining) {
clone = crypt_alloc_buffer(cc, base_bio->bi_size, clone = crypt_alloc_buffer(io, base_bio->bi_size,
io->first_clone, &bvec_idx); io->first_clone, &bvec_idx);
if (unlikely(!clone)) { if (unlikely(!clone)) {
dec_pending(io, -ENOMEM); dec_pending(io, -ENOMEM);
...@@ -631,7 +634,6 @@ static void process_write(struct crypt_io *io) ...@@ -631,7 +634,6 @@ static void process_write(struct crypt_io *io)
return; return;
} }
clone_init(io, clone);
clone->bi_sector = cc->start + sector; clone->bi_sector = cc->start + sector;
if (!io->first_clone) { if (!io->first_clone) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment