Commit e48994e5 authored by Mike Christie's avatar Mike Christie Committed by Ben Hutchings

target: Fix max_unmap_lba_count calc overflow

commit ea263c7f upstream.

max_discard_sectors only 32bits, and some non scsi backend
devices will set this to the max 0xffffffff, so we can end up
overflowing during the max_unmap_lba_count calculation.

This fixes a regression caused by my patch:

commit 8a9ebe71
Author: Mike Christie <mchristi@redhat.com>
Date:   Mon Jan 18 14:09:27 2016 -0600

    target: Fix WRITE_SAME/DISCARD conversion to linux 512b sectors

which can result in extra discards being sent to due the overflow
causing max_unmap_lba_count to be smaller than what the backing
device can actually support.
Signed-off-by: default avatarMike Christie <mchristi@redhat.com>
Reviewed-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
parent 109e91db
...@@ -1583,13 +1583,15 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) ...@@ -1583,13 +1583,15 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
* in ATA and we need to set TPE=1 * in ATA and we need to set TPE=1
*/ */
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
struct request_queue *q, int block_size) struct request_queue *q)
{ {
int block_size = queue_logical_block_size(q);
if (!blk_queue_discard(q)) if (!blk_queue_discard(q))
return false; return false;
attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) / attrib->max_unmap_lba_count =
block_size; q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
/* /*
* Currently hardcoded to 1 in Linux/SCSI code.. * Currently hardcoded to 1 in Linux/SCSI code..
*/ */
......
...@@ -165,8 +165,7 @@ static int fd_configure_device(struct se_device *dev) ...@@ -165,8 +165,7 @@ static int fd_configure_device(struct se_device *dev)
dev_size, div_u64(dev_size, fd_dev->fd_block_size), dev_size, div_u64(dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size); fd_dev->fd_block_size);
if (target_configure_unmap_from_queue(&dev->dev_attrib, q, if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
fd_dev->fd_block_size))
pr_debug("IFILE: BLOCK Discard support available," pr_debug("IFILE: BLOCK Discard support available,"
" disabled by default\n"); " disabled by default\n");
/* /*
......
...@@ -126,8 +126,7 @@ static int iblock_configure_device(struct se_device *dev) ...@@ -126,8 +126,7 @@ static int iblock_configure_device(struct se_device *dev)
dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
dev->dev_attrib.hw_queue_depth = q->nr_requests; dev->dev_attrib.hw_queue_depth = q->nr_requests;
if (target_configure_unmap_from_queue(&dev->dev_attrib, q, if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
dev->dev_attrib.hw_block_size))
pr_debug("IBLOCK: BLOCK Discard support available," pr_debug("IBLOCK: BLOCK Discard support available,"
" disabled by default\n"); " disabled by default\n");
......
...@@ -97,6 +97,6 @@ sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *, ...@@ -97,6 +97,6 @@ sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *,
void array_free(void *array, int n); void array_free(void *array, int n);
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb); sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
struct request_queue *q, int block_size); struct request_queue *q);
#endif /* TARGET_CORE_BACKEND_H */ #endif /* TARGET_CORE_BACKEND_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment