Commit 75ca5640 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Martin K. Petersen

scsi: bsg: Move the whole request execution into the SCSI/transport handlers

Remove the amount of indirect calls by making the handler responsible for
the entire execution of the request.

Link: https://lore.kernel.org/r/20210729064845.1044147-5-hch@lst.deSigned-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 1e61c1a8
...@@ -25,32 +25,39 @@ struct bsg_set { ...@@ -25,32 +25,39 @@ struct bsg_set {
bsg_timeout_fn *timeout_fn; bsg_timeout_fn *timeout_fn;
}; };
static int bsg_transport_check_proto(struct sg_io_v4 *hdr) static int bsg_transport_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
fmode_t mode, unsigned int timeout)
{ {
struct bsg_job *job;
struct request *rq;
struct bio *bio;
int ret;
if (hdr->protocol != BSG_PROTOCOL_SCSI || if (hdr->protocol != BSG_PROTOCOL_SCSI ||
hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_TRANSPORT) hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_TRANSPORT)
return -EINVAL; return -EINVAL;
if (!capable(CAP_SYS_RAWIO)) if (!capable(CAP_SYS_RAWIO))
return -EPERM; return -EPERM;
return 0;
}
static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr, rq = blk_get_request(q, hdr->dout_xfer_len ?
fmode_t mode) REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
{ if (IS_ERR(rq))
struct bsg_job *job = blk_mq_rq_to_pdu(rq); return PTR_ERR(rq);
int ret; rq->timeout = timeout;
job = blk_mq_rq_to_pdu(rq);
job->request_len = hdr->request_len; job->request_len = hdr->request_len;
job->request = memdup_user(uptr64(hdr->request), hdr->request_len); job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
if (IS_ERR(job->request)) if (IS_ERR(job->request)) {
return PTR_ERR(job->request); ret = PTR_ERR(job->request);
goto out_put_request;
}
if (hdr->dout_xfer_len && hdr->din_xfer_len) { if (hdr->dout_xfer_len && hdr->din_xfer_len) {
job->bidi_rq = blk_get_request(rq->q, REQ_OP_DRV_IN, 0); job->bidi_rq = blk_get_request(rq->q, REQ_OP_DRV_IN, 0);
if (IS_ERR(job->bidi_rq)) { if (IS_ERR(job->bidi_rq)) {
ret = PTR_ERR(job->bidi_rq); ret = PTR_ERR(job->bidi_rq);
goto out; goto out_free_job_request;
} }
ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL, ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL,
...@@ -65,20 +72,19 @@ static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr, ...@@ -65,20 +72,19 @@ static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
job->bidi_bio = NULL; job->bidi_bio = NULL;
} }
return 0; if (hdr->dout_xfer_len) {
ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp),
hdr->dout_xfer_len, GFP_KERNEL);
} else if (hdr->din_xfer_len) {
ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp),
hdr->din_xfer_len, GFP_KERNEL);
}
out_free_bidi_rq: if (ret)
if (job->bidi_rq) goto out_unmap_bidi_rq;
blk_put_request(job->bidi_rq);
out:
kfree(job->request);
return ret;
}
static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr) bio = rq->bio;
{ blk_execute_rq(NULL, rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
struct bsg_job *job = blk_mq_rq_to_pdu(rq);
int ret = 0;
/* /*
* The assignments below don't make much sense, but are kept for * The assignments below don't make much sense, but are kept for
...@@ -121,28 +127,20 @@ static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr) ...@@ -121,28 +127,20 @@ static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
hdr->din_resid = 0; hdr->din_resid = 0;
} }
return ret; blk_rq_unmap_user(bio);
} out_unmap_bidi_rq:
if (job->bidi_rq)
static void bsg_transport_free_rq(struct request *rq)
{
struct bsg_job *job = blk_mq_rq_to_pdu(rq);
if (job->bidi_rq) {
blk_rq_unmap_user(job->bidi_bio); blk_rq_unmap_user(job->bidi_bio);
out_free_bidi_rq:
if (job->bidi_rq)
blk_put_request(job->bidi_rq); blk_put_request(job->bidi_rq);
} out_free_job_request:
kfree(job->request); kfree(job->request);
out_put_request:
blk_put_request(rq);
return ret;
} }
static const struct bsg_ops bsg_transport_ops = {
.check_proto = bsg_transport_check_proto,
.fill_hdr = bsg_transport_fill_hdr,
.complete_rq = bsg_transport_complete_rq,
.free_rq = bsg_transport_free_rq,
};
/** /**
* bsg_teardown_job - routine to teardown a bsg job * bsg_teardown_job - routine to teardown a bsg job
* @kref: kref inside bsg_job that is to be torn down * @kref: kref inside bsg_job that is to be torn down
...@@ -398,7 +396,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name, ...@@ -398,7 +396,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
q->queuedata = dev; q->queuedata = dev;
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
bset->bd = bsg_register_queue(q, dev, name, &bsg_transport_ops); bset->bd = bsg_register_queue(q, dev, name, bsg_transport_sg_io_fn);
if (IS_ERR(bset->bd)) { if (IS_ERR(bset->bd)) {
ret = PTR_ERR(bset->bd); ret = PTR_ERR(bset->bd);
goto out_cleanup_queue; goto out_cleanup_queue;
......
...@@ -22,12 +22,12 @@ ...@@ -22,12 +22,12 @@
struct bsg_device { struct bsg_device {
struct request_queue *queue; struct request_queue *queue;
const struct bsg_ops *ops;
struct device device; struct device device;
struct cdev cdev; struct cdev cdev;
int max_queue; int max_queue;
unsigned int timeout; unsigned int timeout;
unsigned int reserved_size; unsigned int reserved_size;
bsg_sg_io_fn *sg_io_fn;
}; };
static inline struct bsg_device *to_bsg_device(struct inode *inode) static inline struct bsg_device *to_bsg_device(struct inode *inode)
...@@ -42,63 +42,28 @@ static DEFINE_IDA(bsg_minor_ida); ...@@ -42,63 +42,28 @@ static DEFINE_IDA(bsg_minor_ida);
static struct class *bsg_class; static struct class *bsg_class;
static int bsg_major; static int bsg_major;
#define uptr64(val) ((void __user *)(uintptr_t)(val)) static unsigned int bsg_timeout(struct bsg_device *bd, struct sg_io_v4 *hdr)
{
unsigned int timeout = BLK_DEFAULT_SG_TIMEOUT;
if (hdr->timeout)
timeout = msecs_to_jiffies(hdr->timeout);
else if (bd->timeout)
timeout = bd->timeout;
return max_t(unsigned int, timeout, BLK_MIN_SG_TIMEOUT);
}
static int bsg_sg_io(struct bsg_device *bd, fmode_t mode, void __user *uarg) static int bsg_sg_io(struct bsg_device *bd, fmode_t mode, void __user *uarg)
{ {
struct request *rq;
struct bio *bio;
struct sg_io_v4 hdr; struct sg_io_v4 hdr;
int ret; int ret;
if (copy_from_user(&hdr, uarg, sizeof(hdr))) if (copy_from_user(&hdr, uarg, sizeof(hdr)))
return -EFAULT; return -EFAULT;
if (hdr.guard != 'Q') if (hdr.guard != 'Q')
return -EINVAL; return -EINVAL;
ret = bd->ops->check_proto(&hdr); ret = bd->sg_io_fn(bd->queue, &hdr, mode, bsg_timeout(bd, &hdr));
if (ret)
return ret;
rq = blk_get_request(bd->queue, hdr.dout_xfer_len ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
ret = bd->ops->fill_hdr(rq, &hdr, mode);
if (ret) {
blk_put_request(rq);
return ret;
}
rq->timeout = msecs_to_jiffies(hdr.timeout);
if (!rq->timeout)
rq->timeout = bd->timeout;
if (!rq->timeout)
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
if (rq->timeout < BLK_MIN_SG_TIMEOUT)
rq->timeout = BLK_MIN_SG_TIMEOUT;
if (hdr.dout_xfer_len) {
ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr.dout_xferp),
hdr.dout_xfer_len, GFP_KERNEL);
} else if (hdr.din_xfer_len) {
ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr.din_xferp),
hdr.din_xfer_len, GFP_KERNEL);
}
if (ret)
goto out_free_rq;
bio = rq->bio;
blk_execute_rq(NULL, rq, !(hdr.flags & BSG_FLAG_Q_AT_TAIL));
ret = bd->ops->complete_rq(rq, &hdr);
blk_rq_unmap_user(bio);
out_free_rq:
bd->ops->free_rq(rq);
blk_put_request(rq);
if (!ret && copy_to_user(uarg, &hdr, sizeof(hdr))) if (!ret && copy_to_user(uarg, &hdr, sizeof(hdr)))
return -EFAULT; return -EFAULT;
return ret; return ret;
...@@ -211,8 +176,7 @@ void bsg_unregister_queue(struct bsg_device *bd) ...@@ -211,8 +176,7 @@ void bsg_unregister_queue(struct bsg_device *bd)
EXPORT_SYMBOL_GPL(bsg_unregister_queue); EXPORT_SYMBOL_GPL(bsg_unregister_queue);
struct bsg_device *bsg_register_queue(struct request_queue *q, struct bsg_device *bsg_register_queue(struct request_queue *q,
struct device *parent, const char *name, struct device *parent, const char *name, bsg_sg_io_fn *sg_io_fn)
const struct bsg_ops *ops)
{ {
struct bsg_device *bd; struct bsg_device *bd;
int ret; int ret;
...@@ -223,7 +187,7 @@ struct bsg_device *bsg_register_queue(struct request_queue *q, ...@@ -223,7 +187,7 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
bd->max_queue = BSG_DEFAULT_CMDS; bd->max_queue = BSG_DEFAULT_CMDS;
bd->reserved_size = INT_MAX; bd->reserved_size = INT_MAX;
bd->queue = q; bd->queue = q;
bd->ops = ops; bd->sg_io_fn = sg_io_fn;
ret = ida_simple_get(&bsg_minor_ida, 0, BSG_MAX_DEVS, GFP_KERNEL); ret = ida_simple_get(&bsg_minor_ida, 0, BSG_MAX_DEVS, GFP_KERNEL);
if (ret < 0) { if (ret < 0) {
......
...@@ -9,42 +9,57 @@ ...@@ -9,42 +9,57 @@
#define uptr64(val) ((void __user *)(uintptr_t)(val)) #define uptr64(val) ((void __user *)(uintptr_t)(val))
static int scsi_bsg_check_proto(struct sg_io_v4 *hdr) static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
fmode_t mode, unsigned int timeout)
{ {
struct scsi_request *sreq;
struct request *rq;
struct bio *bio;
int ret;
if (hdr->protocol != BSG_PROTOCOL_SCSI || if (hdr->protocol != BSG_PROTOCOL_SCSI ||
hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD) hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD)
return -EINVAL; return -EINVAL;
return 0;
}
static int scsi_bsg_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
fmode_t mode)
{
struct scsi_request *sreq = scsi_req(rq);
if (hdr->dout_xfer_len && hdr->din_xfer_len) { if (hdr->dout_xfer_len && hdr->din_xfer_len) {
pr_warn_once("BIDI support in bsg has been removed.\n"); pr_warn_once("BIDI support in bsg has been removed.\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
rq = blk_get_request(q, hdr->dout_xfer_len ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
rq->timeout = timeout;
ret = -ENOMEM;
sreq = scsi_req(rq);
sreq->cmd_len = hdr->request_len; sreq->cmd_len = hdr->request_len;
if (sreq->cmd_len > BLK_MAX_CDB) { if (sreq->cmd_len > BLK_MAX_CDB) {
sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL); sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL);
if (!sreq->cmd) if (!sreq->cmd)
return -ENOMEM; goto out_put_request;
} }
ret = -EFAULT;
if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len)) if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len))
return -EFAULT; goto out_free_cmd;
ret = -EPERM;
if (!scsi_cmd_allowed(sreq->cmd, mode)) if (!scsi_cmd_allowed(sreq->cmd, mode))
return -EPERM; goto out_free_cmd;
return 0;
}
static int scsi_bsg_complete_rq(struct request *rq, struct sg_io_v4 *hdr) if (hdr->dout_xfer_len) {
{ ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp),
struct scsi_request *sreq = scsi_req(rq); hdr->dout_xfer_len, GFP_KERNEL);
int ret = 0; } else if (hdr->din_xfer_len) {
ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp),
hdr->din_xfer_len, GFP_KERNEL);
}
if (ret)
goto out_free_cmd;
bio = rq->bio;
blk_execute_rq(NULL, rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
/* /*
* fill in all the output members * fill in all the output members
...@@ -74,23 +89,17 @@ static int scsi_bsg_complete_rq(struct request *rq, struct sg_io_v4 *hdr) ...@@ -74,23 +89,17 @@ static int scsi_bsg_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
else else
hdr->dout_resid = sreq->resid_len; hdr->dout_resid = sreq->resid_len;
return ret; blk_rq_unmap_user(bio);
}
static void scsi_bsg_free_rq(struct request *rq) out_free_cmd:
{
scsi_req_free_cmd(scsi_req(rq)); scsi_req_free_cmd(scsi_req(rq));
out_put_request:
blk_put_request(rq);
return ret;
} }
static const struct bsg_ops scsi_bsg_ops = {
.check_proto = scsi_bsg_check_proto,
.fill_hdr = scsi_bsg_fill_hdr,
.complete_rq = scsi_bsg_complete_rq,
.free_rq = scsi_bsg_free_rq,
};
struct bsg_device *scsi_bsg_register_queue(struct scsi_device *sdev) struct bsg_device *scsi_bsg_register_queue(struct scsi_device *sdev)
{ {
return bsg_register_queue(sdev->request_queue, &sdev->sdev_gendev, return bsg_register_queue(sdev->request_queue, &sdev->sdev_gendev,
dev_name(&sdev->sdev_gendev), &scsi_bsg_ops); dev_name(&sdev->sdev_gendev), scsi_bsg_sg_io_fn);
} }
...@@ -6,20 +6,14 @@ ...@@ -6,20 +6,14 @@
struct bsg_device; struct bsg_device;
struct device; struct device;
struct request;
struct request_queue; struct request_queue;
struct bsg_ops { typedef int (bsg_sg_io_fn)(struct request_queue *, struct sg_io_v4 *hdr,
int (*check_proto)(struct sg_io_v4 *hdr); fmode_t mode, unsigned int timeout);
int (*fill_hdr)(struct request *rq, struct sg_io_v4 *hdr,
fmode_t mode);
int (*complete_rq)(struct request *rq, struct sg_io_v4 *hdr);
void (*free_rq)(struct request *rq);
};
struct bsg_device *bsg_register_queue(struct request_queue *q, struct bsg_device *bsg_register_queue(struct request_queue *q,
struct device *parent, const char *name, struct device *parent, const char *name,
const struct bsg_ops *ops); bsg_sg_io_fn *sg_io_fn);
void bsg_unregister_queue(struct bsg_device *bcd); void bsg_unregister_queue(struct bsg_device *bcd);
#endif /* _LINUX_BSG_H */ #endif /* _LINUX_BSG_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment