Commit 7007e9dd authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Martin K. Petersen

scsi: core: Clean up allocation and freeing of sgtables

Rename scsi_init_io() to scsi_alloc_sgtables(), and ensure callers call
scsi_free_sgtables() to cleanup failures close to scsi_init_io() instead of
leaking it down the generic I/O submission path.

Link: https://lore.kernel.org/r/20201005084130.143273-9-hch@lst.deReviewed-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 5843cc3d
...@@ -515,7 +515,7 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd) ...@@ -515,7 +515,7 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
} }
} }
static void scsi_free_sgtables(struct scsi_cmnd *cmd) void scsi_free_sgtables(struct scsi_cmnd *cmd)
{ {
if (cmd->sdb.table.nents) if (cmd->sdb.table.nents)
sg_free_table_chained(&cmd->sdb.table, sg_free_table_chained(&cmd->sdb.table,
...@@ -524,6 +524,7 @@ static void scsi_free_sgtables(struct scsi_cmnd *cmd) ...@@ -524,6 +524,7 @@ static void scsi_free_sgtables(struct scsi_cmnd *cmd)
sg_free_table_chained(&cmd->prot_sdb->table, sg_free_table_chained(&cmd->prot_sdb->table,
SCSI_INLINE_PROT_SG_CNT); SCSI_INLINE_PROT_SG_CNT);
} }
EXPORT_SYMBOL_GPL(scsi_free_sgtables);
static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
{ {
...@@ -983,7 +984,7 @@ static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev, ...@@ -983,7 +984,7 @@ static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
} }
/** /**
* scsi_init_io - SCSI I/O initialization function. * scsi_alloc_sgtables - allocate S/G tables for a command
* @cmd: command descriptor we wish to initialize * @cmd: command descriptor we wish to initialize
* *
* Returns: * Returns:
...@@ -991,7 +992,7 @@ static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev, ...@@ -991,7 +992,7 @@ static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
* * BLK_STS_RESOURCE - if the failure is retryable * * BLK_STS_RESOURCE - if the failure is retryable
* * BLK_STS_IOERR - if the failure is fatal * * BLK_STS_IOERR - if the failure is fatal
*/ */
blk_status_t scsi_init_io(struct scsi_cmnd *cmd) blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
{ {
struct scsi_device *sdev = cmd->device; struct scsi_device *sdev = cmd->device;
struct request *rq = cmd->request; struct request *rq = cmd->request;
...@@ -1083,7 +1084,7 @@ blk_status_t scsi_init_io(struct scsi_cmnd *cmd) ...@@ -1083,7 +1084,7 @@ blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
scsi_free_sgtables(cmd); scsi_free_sgtables(cmd);
return ret; return ret;
} }
EXPORT_SYMBOL(scsi_init_io); EXPORT_SYMBOL(scsi_alloc_sgtables);
/** /**
* scsi_initialize_rq - initialize struct scsi_cmnd partially * scsi_initialize_rq - initialize struct scsi_cmnd partially
...@@ -1171,7 +1172,7 @@ static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev, ...@@ -1171,7 +1172,7 @@ static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
* submit a request without an attached bio. * submit a request without an attached bio.
*/ */
if (req->bio) { if (req->bio) {
blk_status_t ret = scsi_init_io(cmd); blk_status_t ret = scsi_alloc_sgtables(cmd);
if (unlikely(ret != BLK_STS_OK)) if (unlikely(ret != BLK_STS_OK))
return ret; return ret;
} else { } else {
...@@ -1213,19 +1214,12 @@ static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev, ...@@ -1213,19 +1214,12 @@ static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev,
struct request *req) struct request *req)
{ {
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
blk_status_t ret;
cmd->sc_data_direction = rq_dma_dir(req); cmd->sc_data_direction = rq_dma_dir(req);
if (blk_rq_is_scsi(req)) if (blk_rq_is_scsi(req))
ret = scsi_setup_scsi_cmnd(sdev, req); return scsi_setup_scsi_cmnd(sdev, req);
else return scsi_setup_fs_cmnd(sdev, req);
ret = scsi_setup_fs_cmnd(sdev, req);
if (ret != BLK_STS_OK)
scsi_free_sgtables(cmd);
return ret;
} }
static blk_status_t static blk_status_t
......
...@@ -902,7 +902,7 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) ...@@ -902,7 +902,7 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
cmd->transfersize = data_len; cmd->transfersize = data_len;
rq->timeout = SD_TIMEOUT; rq->timeout = SD_TIMEOUT;
return scsi_init_io(cmd); return scsi_alloc_sgtables(cmd);
} }
static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
...@@ -934,7 +934,7 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, ...@@ -934,7 +934,7 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
cmd->transfersize = data_len; cmd->transfersize = data_len;
rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
return scsi_init_io(cmd); return scsi_alloc_sgtables(cmd);
} }
static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
...@@ -966,7 +966,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, ...@@ -966,7 +966,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
cmd->transfersize = data_len; cmd->transfersize = data_len;
rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
return scsi_init_io(cmd); return scsi_alloc_sgtables(cmd);
} }
static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd) static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
...@@ -1107,7 +1107,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) ...@@ -1107,7 +1107,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
* knows how much to actually write. * knows how much to actually write.
*/ */
rq->__data_len = sdp->sector_size; rq->__data_len = sdp->sector_size;
ret = scsi_init_io(cmd); ret = scsi_alloc_sgtables(cmd);
rq->__data_len = blk_rq_bytes(rq); rq->__data_len = blk_rq_bytes(rq);
return ret; return ret;
...@@ -1226,23 +1226,24 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) ...@@ -1226,23 +1226,24 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
unsigned int dif; unsigned int dif;
bool dix; bool dix;
ret = scsi_init_io(cmd); ret = scsi_alloc_sgtables(cmd);
if (ret != BLK_STS_OK) if (ret != BLK_STS_OK)
return ret; return ret;
ret = BLK_STS_IOERR;
if (!scsi_device_online(sdp) || sdp->changed) { if (!scsi_device_online(sdp) || sdp->changed) {
scmd_printk(KERN_ERR, cmd, "device offline or changed\n"); scmd_printk(KERN_ERR, cmd, "device offline or changed\n");
return BLK_STS_IOERR; goto fail;
} }
if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) { if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) {
scmd_printk(KERN_ERR, cmd, "access beyond end of device\n"); scmd_printk(KERN_ERR, cmd, "access beyond end of device\n");
return BLK_STS_IOERR; goto fail;
} }
if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) { if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) {
scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n"); scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n");
return BLK_STS_IOERR; goto fail;
} }
/* /*
...@@ -1264,7 +1265,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) ...@@ -1264,7 +1265,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
if (req_op(rq) == REQ_OP_ZONE_APPEND) { if (req_op(rq) == REQ_OP_ZONE_APPEND) {
ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks); ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks);
if (ret) if (ret)
return ret; goto fail;
} }
fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0; fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0;
...@@ -1292,7 +1293,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) ...@@ -1292,7 +1293,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
} }
if (unlikely(ret != BLK_STS_OK)) if (unlikely(ret != BLK_STS_OK))
return ret; goto fail;
/* /*
* We shouldn't disconnect in the middle of a sector, so with a dumb * We shouldn't disconnect in the middle of a sector, so with a dumb
...@@ -1316,10 +1317,12 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) ...@@ -1316,10 +1317,12 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
blk_rq_sectors(rq))); blk_rq_sectors(rq)));
/* /*
* This indicates that the command is ready from our end to be * This indicates that the command is ready from our end to be queued.
* queued.
*/ */
return BLK_STS_OK; return BLK_STS_OK;
fail:
scsi_free_sgtables(cmd);
return ret;
} }
static blk_status_t sd_init_command(struct scsi_cmnd *cmd) static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
......
...@@ -392,15 +392,11 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt) ...@@ -392,15 +392,11 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
struct request *rq = SCpnt->request; struct request *rq = SCpnt->request;
blk_status_t ret; blk_status_t ret;
ret = scsi_init_io(SCpnt); ret = scsi_alloc_sgtables(SCpnt);
if (ret != BLK_STS_OK) if (ret != BLK_STS_OK)
goto out; return ret;
cd = scsi_cd(rq->rq_disk); cd = scsi_cd(rq->rq_disk);
/* from here on until we're complete, any goto out
* is used for a killable error condition */
ret = BLK_STS_IOERR;
SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt, SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
"Doing sr request, block = %d\n", block)); "Doing sr request, block = %d\n", block));
...@@ -509,12 +505,12 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt) ...@@ -509,12 +505,12 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
SCpnt->allowed = MAX_RETRIES; SCpnt->allowed = MAX_RETRIES;
/* /*
* This indicates that the command is ready from our end to be * This indicates that the command is ready from our end to be queued.
* queued.
*/ */
ret = BLK_STS_OK; return BLK_STS_OK;
out: out:
return ret; scsi_free_sgtables(SCpnt);
return BLK_STS_IOERR;
} }
static int sr_block_open(struct block_device *bdev, fmode_t mode) static int sr_block_open(struct block_device *bdev, fmode_t mode)
......
...@@ -165,7 +165,8 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count, ...@@ -165,7 +165,8 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
size_t *offset, size_t *len); size_t *offset, size_t *len);
extern void scsi_kunmap_atomic_sg(void *virt); extern void scsi_kunmap_atomic_sg(void *virt);
extern blk_status_t scsi_init_io(struct scsi_cmnd *cmd); blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd);
void scsi_free_sgtables(struct scsi_cmnd *cmd);
#ifdef CONFIG_SCSI_DMA #ifdef CONFIG_SCSI_DMA
extern int scsi_dma_map(struct scsi_cmnd *cmd); extern int scsi_dma_map(struct scsi_cmnd *cmd);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment