Commit e94b45d0 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: move dma_pad_mask into queue_limits

dma_pad_mask is a queue_limits by all ways of looking at it, so move it
there and set it through the atomic queue limits APIs.

Add a little helper that takes the alignment and pad into account to
simplify the code that is touched a bit.

Note that there never was any need for the > check in
blk_queue_update_dma_pad, this probably was just copy and paste from
dma_update_dma_alignment.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDamien Le Moal <dlemoal@kernel.org>
Link: https://lore.kernel.org/r/20240626142637.300624-9-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent abfc9d81
...@@ -312,7 +312,7 @@ int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes, ...@@ -312,7 +312,7 @@ int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes,
u32 seed) u32 seed)
{ {
struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct request_queue *q = bdev_get_queue(bio->bi_bdev);
unsigned int align = q->dma_pad_mask | queue_dma_alignment(q); unsigned int align = blk_lim_dma_alignment_and_pad(&q->limits);
struct page *stack_pages[UIO_FASTIOV], **pages = stack_pages; struct page *stack_pages[UIO_FASTIOV], **pages = stack_pages;
struct bio_vec stack_vec[UIO_FASTIOV], *bvec = stack_vec; struct bio_vec stack_vec[UIO_FASTIOV], *bvec = stack_vec;
unsigned int direction, nr_bvecs; unsigned int direction, nr_bvecs;
......
...@@ -634,7 +634,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, ...@@ -634,7 +634,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
const struct iov_iter *iter, gfp_t gfp_mask) const struct iov_iter *iter, gfp_t gfp_mask)
{ {
bool copy = false, map_bvec = false; bool copy = false, map_bvec = false;
unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); unsigned long align = blk_lim_dma_alignment_and_pad(&q->limits);
struct bio *bio = NULL; struct bio *bio = NULL;
struct iov_iter i; struct iov_iter i;
int ret = -EINVAL; int ret = -EINVAL;
......
...@@ -768,23 +768,6 @@ bool queue_limits_stack_integrity(struct queue_limits *t, ...@@ -768,23 +768,6 @@ bool queue_limits_stack_integrity(struct queue_limits *t,
} }
EXPORT_SYMBOL_GPL(queue_limits_stack_integrity); EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
/**
* blk_queue_update_dma_pad - update pad mask
* @q: the request queue for the device
* @mask: pad mask
*
* Update dma pad mask.
*
* Appending pad buffer to a request modifies the last entry of a
* scatter list such that it includes the pad buffer.
**/
void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
{
if (mask > q->dma_pad_mask)
q->dma_pad_mask = mask;
}
EXPORT_SYMBOL(blk_queue_update_dma_pad);
/** /**
* blk_set_queue_depth - tell the block layer about the device queue depth * blk_set_queue_depth - tell the block layer about the device queue depth
* @q: the request queue for the device * @q: the request queue for the device
......
...@@ -1024,7 +1024,6 @@ EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain); ...@@ -1024,7 +1024,6 @@ EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain);
int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim, int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
struct ata_device *dev) struct ata_device *dev)
{ {
struct request_queue *q = sdev->request_queue;
int depth = 1; int depth = 1;
if (!ata_id_has_unload(dev->id)) if (!ata_id_has_unload(dev->id))
...@@ -1038,7 +1037,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim, ...@@ -1038,7 +1037,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
sdev->sector_size = ATA_SECT_SIZE; sdev->sector_size = ATA_SECT_SIZE;
/* set DMA padding */ /* set DMA padding */
blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1); lim->dma_pad_mask = ATA_DMA_PAD_SZ - 1;
/* make room for appending the drain */ /* make room for appending the drain */
lim->max_segments--; lim->max_segments--;
......
...@@ -816,7 +816,7 @@ static int pata_macio_device_configure(struct scsi_device *sdev, ...@@ -816,7 +816,7 @@ static int pata_macio_device_configure(struct scsi_device *sdev,
/* OHare has issues with non cache aligned DMA on some chipsets */ /* OHare has issues with non cache aligned DMA on some chipsets */
if (priv->kind == controller_ohare) { if (priv->kind == controller_ohare) {
lim->dma_alignment = 31; lim->dma_alignment = 31;
blk_queue_update_dma_pad(sdev->request_queue, 31); lim->dma_pad_mask = 31;
/* Tell the world about it */ /* Tell the world about it */
ata_dev_info(dev, "OHare alignment limits applied\n"); ata_dev_info(dev, "OHare alignment limits applied\n");
...@@ -831,7 +831,7 @@ static int pata_macio_device_configure(struct scsi_device *sdev, ...@@ -831,7 +831,7 @@ static int pata_macio_device_configure(struct scsi_device *sdev,
if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) { if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) {
/* Allright these are bad, apply restrictions */ /* Allright these are bad, apply restrictions */
lim->dma_alignment = 15; lim->dma_alignment = 15;
blk_queue_update_dma_pad(sdev->request_queue, 15); lim->dma_pad_mask = 15;
/* We enable MWI and hack cache line size directly here, this /* We enable MWI and hack cache line size directly here, this
* is specific to this chipset and not normal values, we happen * is specific to this chipset and not normal values, we happen
......
...@@ -1139,9 +1139,9 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd) ...@@ -1139,9 +1139,9 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
*/ */
count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg); count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg);
if (blk_rq_bytes(rq) & rq->q->dma_pad_mask) { if (blk_rq_bytes(rq) & rq->q->limits.dma_pad_mask) {
unsigned int pad_len = unsigned int pad_len =
(rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; (rq->q->limits.dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
last_sg->length += pad_len; last_sg->length += pad_len;
cmd->extra_len += pad_len; cmd->extra_len += pad_len;
......
...@@ -5193,17 +5193,19 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth) ...@@ -5193,17 +5193,19 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
} }
/** /**
* ufshcd_slave_configure - adjust SCSI device configurations * ufshcd_device_configure - adjust SCSI device configurations
* @sdev: pointer to SCSI device * @sdev: pointer to SCSI device
* @lim: queue limits
* *
* Return: 0 (success). * Return: 0 (success).
*/ */
static int ufshcd_slave_configure(struct scsi_device *sdev) static int ufshcd_device_configure(struct scsi_device *sdev,
struct queue_limits *lim)
{ {
struct ufs_hba *hba = shost_priv(sdev->host); struct ufs_hba *hba = shost_priv(sdev->host);
struct request_queue *q = sdev->request_queue; struct request_queue *q = sdev->request_queue;
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); lim->dma_pad_mask = PRDT_DATA_BYTE_COUNT_PAD - 1;
/* /*
* Block runtime-pm until all consumers are added. * Block runtime-pm until all consumers are added.
...@@ -8907,7 +8909,7 @@ static const struct scsi_host_template ufshcd_driver_template = { ...@@ -8907,7 +8909,7 @@ static const struct scsi_host_template ufshcd_driver_template = {
.queuecommand = ufshcd_queuecommand, .queuecommand = ufshcd_queuecommand,
.mq_poll = ufshcd_poll, .mq_poll = ufshcd_poll,
.slave_alloc = ufshcd_slave_alloc, .slave_alloc = ufshcd_slave_alloc,
.slave_configure = ufshcd_slave_configure, .device_configure = ufshcd_device_configure,
.slave_destroy = ufshcd_slave_destroy, .slave_destroy = ufshcd_slave_destroy,
.change_queue_depth = ufshcd_change_queue_depth, .change_queue_depth = ufshcd_change_queue_depth,
.eh_abort_handler = ufshcd_abort, .eh_abort_handler = ufshcd_abort,
......
...@@ -401,6 +401,7 @@ struct queue_limits { ...@@ -401,6 +401,7 @@ struct queue_limits {
* due to possible offsets. * due to possible offsets.
*/ */
unsigned int dma_alignment; unsigned int dma_alignment;
unsigned int dma_pad_mask;
struct blk_integrity integrity; struct blk_integrity integrity;
}; };
...@@ -509,8 +510,6 @@ struct request_queue { ...@@ -509,8 +510,6 @@ struct request_queue {
*/ */
int id; int id;
unsigned int dma_pad_mask;
/* /*
* queue settings * queue settings
*/ */
...@@ -981,7 +980,6 @@ extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ...@@ -981,7 +980,6 @@ extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset); sector_t offset);
void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev, void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
sector_t offset, const char *pfx); sector_t offset, const char *pfx);
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
struct blk_independent_access_ranges * struct blk_independent_access_ranges *
...@@ -1433,10 +1431,16 @@ static inline bool bdev_iter_is_aligned(struct block_device *bdev, ...@@ -1433,10 +1431,16 @@ static inline bool bdev_iter_is_aligned(struct block_device *bdev,
bdev_logical_block_size(bdev) - 1); bdev_logical_block_size(bdev) - 1);
} }
static inline int blk_lim_dma_alignment_and_pad(struct queue_limits *lim)
{
return lim->dma_alignment | lim->dma_pad_mask;
}
static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
unsigned int len) unsigned int len)
{ {
unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; unsigned int alignment = blk_lim_dma_alignment_and_pad(&q->limits);
return !(addr & alignment) && !(len & alignment); return !(addr & alignment) && !(len & alignment);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment