Commit 38417468 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Martin K. Petersen

scsi: block: remove the cluster flag

Now that the the SCSI layer replaced the use of the cluster flag with
segment size limits and the DMA boundary we can remove the cluster flag
from the block layer.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent d6a9000b
...@@ -194,7 +194,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, ...@@ -194,7 +194,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
goto split; goto split;
} }
if (bvprvp && blk_queue_cluster(q)) { if (bvprvp) {
if (seg_size + bv.bv_len > queue_max_segment_size(q)) if (seg_size + bv.bv_len > queue_max_segment_size(q))
goto new_segment; goto new_segment;
if (!biovec_phys_mergeable(q, bvprvp, &bv)) if (!biovec_phys_mergeable(q, bvprvp, &bv))
...@@ -294,7 +294,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, ...@@ -294,7 +294,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
bool no_sg_merge) bool no_sg_merge)
{ {
struct bio_vec bv, bvprv = { NULL }; struct bio_vec bv, bvprv = { NULL };
int cluster, prev = 0; int prev = 0;
unsigned int seg_size, nr_phys_segs; unsigned int seg_size, nr_phys_segs;
struct bio *fbio, *bbio; struct bio *fbio, *bbio;
struct bvec_iter iter; struct bvec_iter iter;
...@@ -312,7 +312,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, ...@@ -312,7 +312,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
} }
fbio = bio; fbio = bio;
cluster = blk_queue_cluster(q);
seg_size = 0; seg_size = 0;
nr_phys_segs = 0; nr_phys_segs = 0;
for_each_bio(bio) { for_each_bio(bio) {
...@@ -324,7 +323,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, ...@@ -324,7 +323,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
if (no_sg_merge) if (no_sg_merge)
goto new_segment; goto new_segment;
if (prev && cluster) { if (prev) {
if (seg_size + bv.bv_len if (seg_size + bv.bv_len
> queue_max_segment_size(q)) > queue_max_segment_size(q))
goto new_segment; goto new_segment;
...@@ -395,9 +394,6 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, ...@@ -395,9 +394,6 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
{ {
struct bio_vec end_bv = { NULL }, nxt_bv; struct bio_vec end_bv = { NULL }, nxt_bv;
if (!blk_queue_cluster(q))
return 0;
if (bio->bi_seg_back_size + nxt->bi_seg_front_size > if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
queue_max_segment_size(q)) queue_max_segment_size(q))
return 0; return 0;
...@@ -414,12 +410,12 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, ...@@ -414,12 +410,12 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
static inline void static inline void
__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
struct scatterlist *sglist, struct bio_vec *bvprv, struct scatterlist *sglist, struct bio_vec *bvprv,
struct scatterlist **sg, int *nsegs, int *cluster) struct scatterlist **sg, int *nsegs)
{ {
int nbytes = bvec->bv_len; int nbytes = bvec->bv_len;
if (*sg && *cluster) { if (*sg) {
if ((*sg)->length + nbytes > queue_max_segment_size(q)) if ((*sg)->length + nbytes > queue_max_segment_size(q))
goto new_segment; goto new_segment;
if (!biovec_phys_mergeable(q, bvprv, bvec)) if (!biovec_phys_mergeable(q, bvprv, bvec))
...@@ -465,12 +461,12 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, ...@@ -465,12 +461,12 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
{ {
struct bio_vec bvec, bvprv = { NULL }; struct bio_vec bvec, bvprv = { NULL };
struct bvec_iter iter; struct bvec_iter iter;
int cluster = blk_queue_cluster(q), nsegs = 0; int nsegs = 0;
for_each_bio(bio) for_each_bio(bio)
bio_for_each_segment(bvec, bio, iter) bio_for_each_segment(bvec, bio, iter)
__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
&nsegs, &cluster); &nsegs);
return nsegs; return nsegs;
} }
......
...@@ -109,7 +109,6 @@ void blk_set_default_limits(struct queue_limits *lim) ...@@ -109,7 +109,6 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->alignment_offset = 0; lim->alignment_offset = 0;
lim->io_opt = 0; lim->io_opt = 0;
lim->misaligned = 0; lim->misaligned = 0;
lim->cluster = 1;
lim->zoned = BLK_ZONED_NONE; lim->zoned = BLK_ZONED_NONE;
} }
EXPORT_SYMBOL(blk_set_default_limits); EXPORT_SYMBOL(blk_set_default_limits);
...@@ -602,8 +601,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ...@@ -602,8 +601,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->io_min = max(t->io_min, b->io_min); t->io_min = max(t->io_min, b->io_min);
t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
t->cluster &= b->cluster;
/* Physical block size a multiple of the logical block size? */ /* Physical block size a multiple of the logical block size? */
if (t->physical_block_size & (t->logical_block_size - 1)) { if (t->physical_block_size & (t->logical_block_size - 1)) {
t->physical_block_size = t->logical_block_size; t->physical_block_size = t->logical_block_size;
......
...@@ -136,10 +136,7 @@ static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char * ...@@ -136,10 +136,7 @@ static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *
static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
{ {
if (blk_queue_cluster(q)) return queue_var_show(queue_max_segment_size(q), (page));
return queue_var_show(queue_max_segment_size(q), (page));
return queue_var_show(PAGE_SIZE, (page));
} }
static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
......
...@@ -389,7 +389,6 @@ struct queue_limits { ...@@ -389,7 +389,6 @@ struct queue_limits {
unsigned char misaligned; unsigned char misaligned;
unsigned char discard_misaligned; unsigned char discard_misaligned;
unsigned char cluster;
unsigned char raid_partial_stripes_expensive; unsigned char raid_partial_stripes_expensive;
enum blk_zoned_model zoned; enum blk_zoned_model zoned;
}; };
...@@ -785,11 +784,6 @@ static inline bool queue_is_rq_based(struct request_queue *q) ...@@ -785,11 +784,6 @@ static inline bool queue_is_rq_based(struct request_queue *q)
return q->request_fn || q->mq_ops; return q->request_fn || q->mq_ops;
} }
static inline unsigned int blk_queue_cluster(struct request_queue *q)
{
return q->limits.cluster;
}
static inline enum blk_zoned_model static inline enum blk_zoned_model
blk_queue_zoned_model(struct request_queue *q) blk_queue_zoned_model(struct request_queue *q)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment