Commit 93f221ae authored by Eric Biggers's avatar Eric Biggers Committed by Jens Axboe

block: make blk_crypto_rq_bio_prep() able to fail

blk_crypto_rq_bio_prep() assumes its gfp_mask argument always includes
__GFP_DIRECT_RECLAIM, so that the mempool_alloc() will always succeed.

However, blk_crypto_rq_bio_prep() might be called with GFP_ATOMIC via
setup_clone() in drivers/md/dm-rq.c.

This case isn't currently reachable with a bio that actually has an
encryption context.  However, it's fragile to rely on this.  Just make
blk_crypto_rq_bio_prep() able to fail.
Suggested-by: default avatarSatya Tangirala <satyat@google.com>
Signed-off-by: default avatarEric Biggers <ebiggers@google.com>
Reviewed-by: default avatarMike Snitzer <snitzer@redhat.com>
Reviewed-by: default avatarSatya Tangirala <satyat@google.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 07560151
......@@ -1617,8 +1617,10 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
if (rq->bio) {
rq->biotail->bi_next = bio;
rq->biotail = bio;
} else
} else {
rq->bio = rq->biotail = bio;
}
bio = NULL;
}
/* Copy attributes of the original request to the clone request. */
......@@ -1631,8 +1633,8 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
rq->nr_phys_segments = rq_src->nr_phys_segments;
rq->ioprio = rq_src->ioprio;
if (rq->bio)
blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask);
if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
goto free_and_out;
return 0;
......
......@@ -142,13 +142,24 @@ static inline void blk_crypto_free_request(struct request *rq)
__blk_crypto_free_request(rq);
}
void __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
gfp_t gfp_mask);
static inline void blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
gfp_t gfp_mask)
int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
gfp_t gfp_mask);
/**
* blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio
* is inserted
* @rq: The request to prepare
* @bio: The first bio being inserted into the request
* @gfp_mask: Memory allocation flags
*
* Return: 0 on success, -ENOMEM if out of memory. -ENOMEM is only possible if
* @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM.
*/
static inline int blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
gfp_t gfp_mask)
{
if (bio_has_crypt_ctx(bio))
__blk_crypto_rq_bio_prep(rq, bio, gfp_mask);
return __blk_crypto_rq_bio_prep(rq, bio, gfp_mask);
return 0;
}
/**
......
......@@ -283,20 +283,16 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr)
return false;
}
/**
* __blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio
* is inserted
*
* @rq: The request to prepare
* @bio: The first bio being inserted into the request
* @gfp_mask: gfp mask
*/
void __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
gfp_t gfp_mask)
int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
gfp_t gfp_mask)
{
if (!rq->crypt_ctx)
if (!rq->crypt_ctx) {
rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
if (!rq->crypt_ctx)
return -ENOMEM;
}
*rq->crypt_ctx = *bio->bi_crypt_context;
return 0;
}
/**
......
......@@ -1940,13 +1940,18 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
unsigned int nr_segs)
{
int err;
if (bio->bi_opf & REQ_RAHEAD)
rq->cmd_flags |= REQ_FAILFAST_MASK;
rq->__sector = bio->bi_iter.bi_sector;
rq->write_hint = bio->bi_write_hint;
blk_rq_bio_prep(rq, bio, nr_segs);
blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
/* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
WARN_ON_ONCE(err);
blk_account_io_start(rq);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment