Commit 1c63dc66 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

nvme: split a new struct nvme_ctrl out of struct nvme_dev

The new struct nvme_ctrl will be used by the common NVMe code that sits
on top of struct request_queue and the new nvme_ctrl_ops abstraction.
It only contains the bare minimum required, which consists of values
sampled during controller probe, the admin queue pointer and a second
struct device pointer at the moment, but more will follow later.  Only
values that are not used in the I/O fast path should be moved to
struct nvme_ctrl so that drivers can optimize their cache line usage
easily.  That's also the reason why we have two device pointers as
the struct device is used for DMA mapping purposes.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 01fec28a
...@@ -79,7 +79,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, ...@@ -79,7 +79,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
return __nvme_submit_sync_cmd(q, cmd, buffer, NULL, bufflen, NULL, 0); return __nvme_submit_sync_cmd(q, cmd, buffer, NULL, bufflen, NULL, 0);
} }
int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id) int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
{ {
struct nvme_command c = { }; struct nvme_command c = { };
int error; int error;
...@@ -99,7 +99,7 @@ int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id) ...@@ -99,7 +99,7 @@ int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id)
return error; return error;
} }
int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid, int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
struct nvme_id_ns **id) struct nvme_id_ns **id)
{ {
struct nvme_command c = { }; struct nvme_command c = { };
...@@ -120,7 +120,7 @@ int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid, ...@@ -120,7 +120,7 @@ int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid,
return error; return error;
} }
int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
dma_addr_t dma_addr, u32 *result) dma_addr_t dma_addr, u32 *result)
{ {
struct nvme_command c; struct nvme_command c;
...@@ -135,7 +135,7 @@ int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, ...@@ -135,7 +135,7 @@ int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
result, 0); result, 0);
} }
int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
dma_addr_t dma_addr, u32 *result) dma_addr_t dma_addr, u32 *result)
{ {
struct nvme_command c; struct nvme_command c;
...@@ -150,7 +150,7 @@ int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, ...@@ -150,7 +150,7 @@ int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
result, 0); result, 0);
} }
int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log) int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log)
{ {
struct nvme_command c = { }; struct nvme_command c = { };
int error; int error;
......
...@@ -30,46 +30,16 @@ enum { ...@@ -30,46 +30,16 @@ enum {
NVME_NS_LIGHTNVM = 1, NVME_NS_LIGHTNVM = 1,
}; };
/* struct nvme_ctrl {
* Represents an NVM Express device. Each nvme_dev is a PCI function. const struct nvme_ctrl_ops *ops;
*/
struct nvme_dev {
struct list_head node;
struct nvme_queue **queues;
struct request_queue *admin_q; struct request_queue *admin_q;
struct blk_mq_tag_set tagset;
struct blk_mq_tag_set admin_tagset;
u32 __iomem *dbs;
struct device *dev; struct device *dev;
struct dma_pool *prp_page_pool;
struct dma_pool *prp_small_pool;
int instance; int instance;
unsigned queue_count;
unsigned online_queues;
unsigned max_qid;
int q_depth;
u32 db_stride;
u32 ctrl_config;
struct msix_entry *entry;
void __iomem *bar;
struct list_head namespaces;
struct kref kref;
struct device *device;
struct work_struct reset_work;
struct work_struct probe_work;
struct work_struct scan_work;
char name[12]; char name[12];
char serial[20]; char serial[20];
char model[40]; char model[40];
char firmware_rev[8]; char firmware_rev[8];
bool subsystem;
u32 max_hw_sectors;
u32 stripe_size;
u32 page_size;
void __iomem *cmb;
dma_addr_t cmb_dma_addr;
u64 cmb_size;
u32 cmbsz;
u16 oncs; u16 oncs;
u16 abort_limit; u16 abort_limit;
u8 event_limit; u8 event_limit;
...@@ -82,7 +52,7 @@ struct nvme_dev { ...@@ -82,7 +52,7 @@ struct nvme_dev {
struct nvme_ns { struct nvme_ns {
struct list_head list; struct list_head list;
struct nvme_dev *dev; struct nvme_ctrl *ctrl;
struct request_queue *queue; struct request_queue *queue;
struct gendisk *disk; struct gendisk *disk;
struct kref kref; struct kref kref;
...@@ -97,6 +67,19 @@ struct nvme_ns { ...@@ -97,6 +67,19 @@ struct nvme_ns {
u32 mode_select_block_len; u32 mode_select_block_len;
}; };
struct nvme_ctrl_ops {
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
};
static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
{
u32 val = 0;
if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
return false;
return val & NVME_CSTS_RDY;
}
static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
{ {
return (sector >> (ns->lba_shift - 9)); return (sector >> (ns->lba_shift - 9));
...@@ -107,13 +90,13 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, ...@@ -107,13 +90,13 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, void __user *ubuffer, unsigned bufflen, void *buffer, void __user *ubuffer, unsigned bufflen,
u32 *result, unsigned timeout); u32 *result, unsigned timeout);
int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id); int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id);
int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid, int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
struct nvme_id_ns **id); struct nvme_id_ns **id);
int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log); int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log);
int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
dma_addr_t dma_addr, u32 *result); dma_addr_t dma_addr, u32 *result);
int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
dma_addr_t dma_addr, u32 *result); dma_addr_t dma_addr, u32 *result);
struct sg_io_hdr; struct sg_io_hdr;
......
...@@ -87,6 +87,9 @@ static wait_queue_head_t nvme_kthread_wait; ...@@ -87,6 +87,9 @@ static wait_queue_head_t nvme_kthread_wait;
static struct class *nvme_class; static struct class *nvme_class;
struct nvme_dev;
struct nvme_queue;
static int __nvme_reset(struct nvme_dev *dev); static int __nvme_reset(struct nvme_dev *dev);
static int nvme_reset(struct nvme_dev *dev); static int nvme_reset(struct nvme_dev *dev);
static void nvme_process_cq(struct nvme_queue *nvmeq); static void nvme_process_cq(struct nvme_queue *nvmeq);
...@@ -101,6 +104,49 @@ struct async_cmd_info { ...@@ -101,6 +104,49 @@ struct async_cmd_info {
void *ctx; void *ctx;
}; };
/*
* Represents an NVM Express device. Each nvme_dev is a PCI function.
*/
struct nvme_dev {
struct list_head node;
struct nvme_queue **queues;
struct blk_mq_tag_set tagset;
struct blk_mq_tag_set admin_tagset;
u32 __iomem *dbs;
struct device *dev;
struct dma_pool *prp_page_pool;
struct dma_pool *prp_small_pool;
unsigned queue_count;
unsigned online_queues;
unsigned max_qid;
int q_depth;
u32 db_stride;
u32 ctrl_config;
struct msix_entry *entry;
void __iomem *bar;
struct list_head namespaces;
struct kref kref;
struct device *device;
struct work_struct reset_work;
struct work_struct probe_work;
struct work_struct scan_work;
bool subsystem;
u32 max_hw_sectors;
u32 stripe_size;
u32 page_size;
void __iomem *cmb;
dma_addr_t cmb_dma_addr;
u64 cmb_size;
u32 cmbsz;
struct nvme_ctrl ctrl;
};
static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
{
return container_of(ctrl, struct nvme_dev, ctrl);
}
/* /*
* An NVM Express queue. Each device has at least two (one for admin * An NVM Express queue. Each device has at least two (one for admin
* commands and one for I/O commands). * commands and one for I/O commands).
...@@ -333,7 +379,7 @@ static void async_req_completion(struct nvme_queue *nvmeq, void *ctx, ...@@ -333,7 +379,7 @@ static void async_req_completion(struct nvme_queue *nvmeq, void *ctx,
u16 status = le16_to_cpup(&cqe->status) >> 1; u16 status = le16_to_cpup(&cqe->status) >> 1;
if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ)
++nvmeq->dev->event_limit; ++nvmeq->dev->ctrl.event_limit;
if (status != NVME_SC_SUCCESS) if (status != NVME_SC_SUCCESS)
return; return;
...@@ -357,7 +403,7 @@ static void abort_completion(struct nvme_queue *nvmeq, void *ctx, ...@@ -357,7 +403,7 @@ static void abort_completion(struct nvme_queue *nvmeq, void *ctx,
blk_mq_free_request(req); blk_mq_free_request(req);
dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result); dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result);
++nvmeq->dev->abort_limit; ++nvmeq->dev->ctrl.abort_limit;
} }
static void async_completion(struct nvme_queue *nvmeq, void *ctx, static void async_completion(struct nvme_queue *nvmeq, void *ctx,
...@@ -1051,7 +1097,7 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev) ...@@ -1051,7 +1097,7 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev)
struct nvme_cmd_info *cmd_info; struct nvme_cmd_info *cmd_info;
struct request *req; struct request *req;
req = blk_mq_alloc_request(dev->admin_q, WRITE, req = blk_mq_alloc_request(dev->ctrl.admin_q, WRITE,
BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED); BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED);
if (IS_ERR(req)) if (IS_ERR(req))
return PTR_ERR(req); return PTR_ERR(req);
...@@ -1077,7 +1123,7 @@ static int nvme_submit_admin_async_cmd(struct nvme_dev *dev, ...@@ -1077,7 +1123,7 @@ static int nvme_submit_admin_async_cmd(struct nvme_dev *dev,
struct request *req; struct request *req;
struct nvme_cmd_info *cmd_rq; struct nvme_cmd_info *cmd_rq;
req = blk_mq_alloc_request(dev->admin_q, WRITE, 0); req = blk_mq_alloc_request(dev->ctrl.admin_q, WRITE, 0);
if (IS_ERR(req)) if (IS_ERR(req))
return PTR_ERR(req); return PTR_ERR(req);
...@@ -1101,7 +1147,7 @@ static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) ...@@ -1101,7 +1147,7 @@ static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
c.delete_queue.opcode = opcode; c.delete_queue.opcode = opcode;
c.delete_queue.qid = cpu_to_le16(id); c.delete_queue.qid = cpu_to_le16(id);
return nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0); return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
} }
static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
...@@ -1122,7 +1168,7 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, ...@@ -1122,7 +1168,7 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
c.create_cq.cq_flags = cpu_to_le16(flags); c.create_cq.cq_flags = cpu_to_le16(flags);
c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
return nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0); return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
} }
static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
...@@ -1143,7 +1189,7 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, ...@@ -1143,7 +1189,7 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
c.create_sq.sq_flags = cpu_to_le16(flags); c.create_sq.sq_flags = cpu_to_le16(flags);
c.create_sq.cqid = cpu_to_le16(qid); c.create_sq.cqid = cpu_to_le16(qid);
return nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0); return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
} }
static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
...@@ -1182,10 +1228,10 @@ static void nvme_abort_req(struct request *req) ...@@ -1182,10 +1228,10 @@ static void nvme_abort_req(struct request *req)
return; return;
} }
if (!dev->abort_limit) if (!dev->ctrl.abort_limit)
return; return;
abort_req = blk_mq_alloc_request(dev->admin_q, WRITE, abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, WRITE,
BLK_MQ_REQ_NOWAIT); BLK_MQ_REQ_NOWAIT);
if (IS_ERR(abort_req)) if (IS_ERR(abort_req))
return; return;
...@@ -1199,7 +1245,7 @@ static void nvme_abort_req(struct request *req) ...@@ -1199,7 +1245,7 @@ static void nvme_abort_req(struct request *req)
cmd.abort.sqid = cpu_to_le16(nvmeq->qid); cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
cmd.abort.command_id = abort_req->tag; cmd.abort.command_id = abort_req->tag;
--dev->abort_limit; --dev->ctrl.abort_limit;
cmd_rq->aborted = 1; cmd_rq->aborted = 1;
dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", req->tag, dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", req->tag,
...@@ -1294,8 +1340,8 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq) ...@@ -1294,8 +1340,8 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
nvmeq->cq_vector = -1; nvmeq->cq_vector = -1;
spin_unlock_irq(&nvmeq->q_lock); spin_unlock_irq(&nvmeq->q_lock);
if (!nvmeq->qid && nvmeq->dev->admin_q) if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
blk_mq_freeze_queue_start(nvmeq->dev->admin_q); blk_mq_freeze_queue_start(nvmeq->dev->ctrl.admin_q);
irq_set_affinity_hint(vector, NULL); irq_set_affinity_hint(vector, NULL);
free_irq(vector, nvmeq); free_irq(vector, nvmeq);
...@@ -1391,7 +1437,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, ...@@ -1391,7 +1437,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->q_dmadev = dev->dev; nvmeq->q_dmadev = dev->dev;
nvmeq->dev = dev; nvmeq->dev = dev;
snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d", snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
dev->instance, qid); dev->ctrl.instance, qid);
spin_lock_init(&nvmeq->q_lock); spin_lock_init(&nvmeq->q_lock);
nvmeq->cq_head = 0; nvmeq->cq_head = 0;
nvmeq->cq_phase = 1; nvmeq->cq_phase = 1;
...@@ -1559,15 +1605,15 @@ static struct blk_mq_ops nvme_mq_ops = { ...@@ -1559,15 +1605,15 @@ static struct blk_mq_ops nvme_mq_ops = {
static void nvme_dev_remove_admin(struct nvme_dev *dev) static void nvme_dev_remove_admin(struct nvme_dev *dev)
{ {
if (dev->admin_q && !blk_queue_dying(dev->admin_q)) { if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
blk_cleanup_queue(dev->admin_q); blk_cleanup_queue(dev->ctrl.admin_q);
blk_mq_free_tag_set(&dev->admin_tagset); blk_mq_free_tag_set(&dev->admin_tagset);
} }
} }
static int nvme_alloc_admin_tags(struct nvme_dev *dev) static int nvme_alloc_admin_tags(struct nvme_dev *dev)
{ {
if (!dev->admin_q) { if (!dev->ctrl.admin_q) {
dev->admin_tagset.ops = &nvme_mq_admin_ops; dev->admin_tagset.ops = &nvme_mq_admin_ops;
dev->admin_tagset.nr_hw_queues = 1; dev->admin_tagset.nr_hw_queues = 1;
dev->admin_tagset.queue_depth = NVME_AQ_DEPTH - 1; dev->admin_tagset.queue_depth = NVME_AQ_DEPTH - 1;
...@@ -1580,18 +1626,18 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev) ...@@ -1580,18 +1626,18 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
if (blk_mq_alloc_tag_set(&dev->admin_tagset)) if (blk_mq_alloc_tag_set(&dev->admin_tagset))
return -ENOMEM; return -ENOMEM;
dev->admin_q = blk_mq_init_queue(&dev->admin_tagset); dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
if (IS_ERR(dev->admin_q)) { if (IS_ERR(dev->ctrl.admin_q)) {
blk_mq_free_tag_set(&dev->admin_tagset); blk_mq_free_tag_set(&dev->admin_tagset);
return -ENOMEM; return -ENOMEM;
} }
if (!blk_get_queue(dev->admin_q)) { if (!blk_get_queue(dev->ctrl.admin_q)) {
nvme_dev_remove_admin(dev); nvme_dev_remove_admin(dev);
dev->admin_q = NULL; dev->ctrl.admin_q = NULL;
return -ENODEV; return -ENODEV;
} }
} else } else
blk_mq_unfreeze_queue(dev->admin_q); blk_mq_unfreeze_queue(dev->ctrl.admin_q);
return 0; return 0;
} }
...@@ -1670,7 +1716,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) ...@@ -1670,7 +1716,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{ {
struct nvme_dev *dev = ns->dev; struct nvme_dev *dev = to_nvme_dev(ns->ctrl);
struct nvme_user_io io; struct nvme_user_io io;
struct nvme_command c; struct nvme_command c;
unsigned length, meta_len; unsigned length, meta_len;
...@@ -1745,7 +1791,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) ...@@ -1745,7 +1791,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
return status; return status;
} }
static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns, static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_passthru_cmd __user *ucmd) struct nvme_passthru_cmd __user *ucmd)
{ {
struct nvme_passthru_cmd cmd; struct nvme_passthru_cmd cmd;
...@@ -1774,7 +1820,7 @@ static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns, ...@@ -1774,7 +1820,7 @@ static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
if (cmd.timeout_ms) if (cmd.timeout_ms)
timeout = msecs_to_jiffies(cmd.timeout_ms); timeout = msecs_to_jiffies(cmd.timeout_ms);
status = __nvme_submit_sync_cmd(ns ? ns->queue : dev->admin_q, &c, status = __nvme_submit_sync_cmd(ns ? ns->queue : ctrl->admin_q, &c,
NULL, (void __user *)(uintptr_t)cmd.addr, cmd.data_len, NULL, (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
&cmd.result, timeout); &cmd.result, timeout);
if (status >= 0) { if (status >= 0) {
...@@ -1804,9 +1850,9 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, ...@@ -1804,9 +1850,9 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
force_successful_syscall_return(); force_successful_syscall_return();
return ns->ns_id; return ns->ns_id;
case NVME_IOCTL_ADMIN_CMD: case NVME_IOCTL_ADMIN_CMD:
return nvme_user_cmd(ns->dev, NULL, (void __user *)arg); return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg);
case NVME_IOCTL_IO_CMD: case NVME_IOCTL_IO_CMD:
return nvme_user_cmd(ns->dev, ns, (void __user *)arg); return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg);
case NVME_IOCTL_SUBMIT_IO: case NVME_IOCTL_SUBMIT_IO:
return nvme_submit_io(ns, (void __user *)arg); return nvme_submit_io(ns, (void __user *)arg);
case SG_GET_VERSION_NUM: case SG_GET_VERSION_NUM:
...@@ -1836,6 +1882,7 @@ static void nvme_free_dev(struct kref *kref); ...@@ -1836,6 +1882,7 @@ static void nvme_free_dev(struct kref *kref);
static void nvme_free_ns(struct kref *kref) static void nvme_free_ns(struct kref *kref)
{ {
struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
struct nvme_dev *dev = to_nvme_dev(ns->ctrl);
if (ns->type == NVME_NS_LIGHTNVM) if (ns->type == NVME_NS_LIGHTNVM)
nvme_nvm_unregister(ns->queue, ns->disk->disk_name); nvme_nvm_unregister(ns->queue, ns->disk->disk_name);
...@@ -1844,7 +1891,7 @@ static void nvme_free_ns(struct kref *kref) ...@@ -1844,7 +1891,7 @@ static void nvme_free_ns(struct kref *kref)
ns->disk->private_data = NULL; ns->disk->private_data = NULL;
spin_unlock(&dev_list_lock); spin_unlock(&dev_list_lock);
kref_put(&ns->dev->kref, nvme_free_dev); kref_put(&dev->kref, nvme_free_dev);
put_disk(ns->disk); put_disk(ns->disk);
kfree(ns); kfree(ns);
} }
...@@ -1893,15 +1940,15 @@ static void nvme_config_discard(struct nvme_ns *ns) ...@@ -1893,15 +1940,15 @@ static void nvme_config_discard(struct nvme_ns *ns)
static int nvme_revalidate_disk(struct gendisk *disk) static int nvme_revalidate_disk(struct gendisk *disk)
{ {
struct nvme_ns *ns = disk->private_data; struct nvme_ns *ns = disk->private_data;
struct nvme_dev *dev = ns->dev; struct nvme_dev *dev = to_nvme_dev(ns->ctrl);
struct nvme_id_ns *id; struct nvme_id_ns *id;
u8 lbaf, pi_type; u8 lbaf, pi_type;
u16 old_ms; u16 old_ms;
unsigned short bs; unsigned short bs;
if (nvme_identify_ns(dev, ns->ns_id, &id)) { if (nvme_identify_ns(&dev->ctrl, ns->ns_id, &id)) {
dev_warn(dev->dev, "%s: Identify failure nvme%dn%d\n", __func__, dev_warn(dev->dev, "%s: Identify failure nvme%dn%d\n", __func__,
dev->instance, ns->ns_id); dev->ctrl.instance, ns->ns_id);
return -ENODEV; return -ENODEV;
} }
if (id->ncap == 0) { if (id->ncap == 0) {
...@@ -1957,7 +2004,7 @@ static int nvme_revalidate_disk(struct gendisk *disk) ...@@ -1957,7 +2004,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
else else
set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
if (dev->oncs & NVME_CTRL_ONCS_DSM) if (dev->ctrl.oncs & NVME_CTRL_ONCS_DSM)
nvme_config_discard(ns); nvme_config_discard(ns);
blk_mq_unfreeze_queue(disk->queue); blk_mq_unfreeze_queue(disk->queue);
...@@ -2095,10 +2142,10 @@ static int nvme_kthread(void *data) ...@@ -2095,10 +2142,10 @@ static int nvme_kthread(void *data)
spin_lock_irq(&nvmeq->q_lock); spin_lock_irq(&nvmeq->q_lock);
nvme_process_cq(nvmeq); nvme_process_cq(nvmeq);
while ((i == 0) && (dev->event_limit > 0)) { while (i == 0 && dev->ctrl.event_limit > 0) {
if (nvme_submit_async_admin_req(dev)) if (nvme_submit_async_admin_req(dev))
break; break;
dev->event_limit--; dev->ctrl.event_limit--;
} }
spin_unlock_irq(&nvmeq->q_lock); spin_unlock_irq(&nvmeq->q_lock);
} }
...@@ -2124,7 +2171,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid) ...@@ -2124,7 +2171,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
goto out_free_ns; goto out_free_ns;
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue); queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
ns->dev = dev; ns->ctrl = &dev->ctrl;
ns->queue->queuedata = ns; ns->queue->queuedata = ns;
disk = alloc_disk_node(0, node); disk = alloc_disk_node(0, node);
...@@ -2145,7 +2192,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid) ...@@ -2145,7 +2192,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
} }
if (dev->stripe_size) if (dev->stripe_size)
blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9); blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9);
if (dev->vwc & NVME_CTRL_VWC_PRESENT) if (dev->ctrl.vwc & NVME_CTRL_VWC_PRESENT)
blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA); blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
blk_queue_virt_boundary(ns->queue, dev->page_size - 1); blk_queue_virt_boundary(ns->queue, dev->page_size - 1);
...@@ -2156,7 +2203,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid) ...@@ -2156,7 +2203,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
disk->queue = ns->queue; disk->queue = ns->queue;
disk->driverfs_dev = dev->device; disk->driverfs_dev = dev->device;
disk->flags = GENHD_FL_EXT_DEVT; disk->flags = GENHD_FL_EXT_DEVT;
sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); sprintf(disk->disk_name, "nvme%dn%d", dev->ctrl.instance, nsid);
/* /*
* Initialize capacity to 0 until we establish the namespace format and * Initialize capacity to 0 until we establish the namespace format and
...@@ -2221,7 +2268,7 @@ static int set_queue_count(struct nvme_dev *dev, int count) ...@@ -2221,7 +2268,7 @@ static int set_queue_count(struct nvme_dev *dev, int count)
u32 result; u32 result;
u32 q_count = (count - 1) | ((count - 1) << 16); u32 q_count = (count - 1) | ((count - 1) << 16);
status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0, status = nvme_set_features(&dev->ctrl, NVME_FEAT_NUM_QUEUES, q_count, 0,
&result); &result);
if (status < 0) if (status < 0)
return status; return status;
...@@ -2405,7 +2452,8 @@ static inline bool nvme_io_incapable(struct nvme_dev *dev) ...@@ -2405,7 +2452,8 @@ static inline bool nvme_io_incapable(struct nvme_dev *dev)
static void nvme_ns_remove(struct nvme_ns *ns) static void nvme_ns_remove(struct nvme_ns *ns)
{ {
bool kill = nvme_io_incapable(ns->dev) && !blk_queue_dying(ns->queue); bool kill = nvme_io_incapable(to_nvme_dev(ns->ctrl)) &&
!blk_queue_dying(ns->queue);
if (kill) if (kill)
blk_set_queue_dying(ns->queue); blk_set_queue_dying(ns->queue);
...@@ -2462,7 +2510,7 @@ static void nvme_dev_scan(struct work_struct *work) ...@@ -2462,7 +2510,7 @@ static void nvme_dev_scan(struct work_struct *work)
if (!dev->tagset.tags) if (!dev->tagset.tags)
return; return;
if (nvme_identify_ctrl(dev, &ctrl)) if (nvme_identify_ctrl(&dev->ctrl, &ctrl))
return; return;
nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn)); nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn));
kfree(ctrl); kfree(ctrl);
...@@ -2482,18 +2530,18 @@ static int nvme_dev_add(struct nvme_dev *dev) ...@@ -2482,18 +2530,18 @@ static int nvme_dev_add(struct nvme_dev *dev)
struct nvme_id_ctrl *ctrl; struct nvme_id_ctrl *ctrl;
int shift = NVME_CAP_MPSMIN(lo_hi_readq(dev->bar + NVME_REG_CAP)) + 12; int shift = NVME_CAP_MPSMIN(lo_hi_readq(dev->bar + NVME_REG_CAP)) + 12;
res = nvme_identify_ctrl(dev, &ctrl); res = nvme_identify_ctrl(&dev->ctrl, &ctrl);
if (res) { if (res) {
dev_err(dev->dev, "Identify Controller failed (%d)\n", res); dev_err(dev->dev, "Identify Controller failed (%d)\n", res);
return -EIO; return -EIO;
} }
dev->oncs = le16_to_cpup(&ctrl->oncs); dev->ctrl.oncs = le16_to_cpup(&ctrl->oncs);
dev->abort_limit = ctrl->acl + 1; dev->ctrl.abort_limit = ctrl->acl + 1;
dev->vwc = ctrl->vwc; dev->ctrl.vwc = ctrl->vwc;
memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); memcpy(dev->ctrl.serial, ctrl->sn, sizeof(ctrl->sn));
memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); memcpy(dev->ctrl.model, ctrl->mn, sizeof(ctrl->mn));
memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); memcpy(dev->ctrl.firmware_rev, ctrl->fr, sizeof(ctrl->fr));
if (ctrl->mdts) if (ctrl->mdts)
dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9); dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
else else
...@@ -2728,7 +2776,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev) ...@@ -2728,7 +2776,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
DEFINE_KTHREAD_WORKER_ONSTACK(worker); DEFINE_KTHREAD_WORKER_ONSTACK(worker);
struct nvme_delq_ctx dq; struct nvme_delq_ctx dq;
struct task_struct *kworker_task = kthread_run(kthread_worker_fn, struct task_struct *kworker_task = kthread_run(kthread_worker_fn,
&worker, "nvme%d", dev->instance); &worker, "nvme%d", dev->ctrl.instance);
if (IS_ERR(kworker_task)) { if (IS_ERR(kworker_task)) {
dev_err(dev->dev, dev_err(dev->dev,
...@@ -2879,14 +2927,14 @@ static int nvme_set_instance(struct nvme_dev *dev) ...@@ -2879,14 +2927,14 @@ static int nvme_set_instance(struct nvme_dev *dev)
if (error) if (error)
return -ENODEV; return -ENODEV;
dev->instance = instance; dev->ctrl.instance = instance;
return 0; return 0;
} }
static void nvme_release_instance(struct nvme_dev *dev) static void nvme_release_instance(struct nvme_dev *dev)
{ {
spin_lock(&dev_list_lock); spin_lock(&dev_list_lock);
ida_remove(&nvme_instance_ida, dev->instance); ida_remove(&nvme_instance_ida, dev->ctrl.instance);
spin_unlock(&dev_list_lock); spin_unlock(&dev_list_lock);
} }
...@@ -2899,8 +2947,8 @@ static void nvme_free_dev(struct kref *kref) ...@@ -2899,8 +2947,8 @@ static void nvme_free_dev(struct kref *kref)
nvme_release_instance(dev); nvme_release_instance(dev);
if (dev->tagset.tags) if (dev->tagset.tags)
blk_mq_free_tag_set(&dev->tagset); blk_mq_free_tag_set(&dev->tagset);
if (dev->admin_q) if (dev->ctrl.admin_q)
blk_put_queue(dev->admin_q); blk_put_queue(dev->ctrl.admin_q);
kfree(dev->queues); kfree(dev->queues);
kfree(dev->entry); kfree(dev->entry);
kfree(dev); kfree(dev);
...@@ -2914,8 +2962,8 @@ static int nvme_dev_open(struct inode *inode, struct file *f) ...@@ -2914,8 +2962,8 @@ static int nvme_dev_open(struct inode *inode, struct file *f)
spin_lock(&dev_list_lock); spin_lock(&dev_list_lock);
list_for_each_entry(dev, &dev_list, node) { list_for_each_entry(dev, &dev_list, node) {
if (dev->instance == instance) { if (dev->ctrl.instance == instance) {
if (!dev->admin_q) { if (!dev->ctrl.admin_q) {
ret = -EWOULDBLOCK; ret = -EWOULDBLOCK;
break; break;
} }
...@@ -2945,12 +2993,12 @@ static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg) ...@@ -2945,12 +2993,12 @@ static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
switch (cmd) { switch (cmd) {
case NVME_IOCTL_ADMIN_CMD: case NVME_IOCTL_ADMIN_CMD:
return nvme_user_cmd(dev, NULL, (void __user *)arg); return nvme_user_cmd(&dev->ctrl, NULL, (void __user *)arg);
case NVME_IOCTL_IO_CMD: case NVME_IOCTL_IO_CMD:
if (list_empty(&dev->namespaces)) if (list_empty(&dev->namespaces))
return -ENOTTY; return -ENOTTY;
ns = list_first_entry(&dev->namespaces, struct nvme_ns, list); ns = list_first_entry(&dev->namespaces, struct nvme_ns, list);
return nvme_user_cmd(dev, ns, (void __user *)arg); return nvme_user_cmd(&dev->ctrl, ns, (void __user *)arg);
case NVME_IOCTL_RESET: case NVME_IOCTL_RESET:
dev_warn(dev->dev, "resetting controller\n"); dev_warn(dev->dev, "resetting controller\n");
return nvme_reset(dev); return nvme_reset(dev);
...@@ -3011,7 +3059,7 @@ static void nvme_probe_work(struct work_struct *work) ...@@ -3011,7 +3059,7 @@ static void nvme_probe_work(struct work_struct *work)
if (result) if (result)
goto free_tags; goto free_tags;
dev->event_limit = 1; dev->ctrl.event_limit = 1;
/* /*
* Keep the controller around but remove all namespaces if we don't have * Keep the controller around but remove all namespaces if we don't have
...@@ -3029,8 +3077,8 @@ static void nvme_probe_work(struct work_struct *work) ...@@ -3029,8 +3077,8 @@ static void nvme_probe_work(struct work_struct *work)
free_tags: free_tags:
nvme_dev_remove_admin(dev); nvme_dev_remove_admin(dev);
blk_put_queue(dev->admin_q); blk_put_queue(dev->ctrl.admin_q);
dev->admin_q = NULL; dev->ctrl.admin_q = NULL;
dev->queues[0]->tags = NULL; dev->queues[0]->tags = NULL;
disable: disable:
nvme_disable_queue(dev, 0); nvme_disable_queue(dev, 0);
...@@ -3058,7 +3106,7 @@ static void nvme_dead_ctrl(struct nvme_dev *dev) ...@@ -3058,7 +3106,7 @@ static void nvme_dead_ctrl(struct nvme_dev *dev)
dev_warn(dev->dev, "Device failed to resume\n"); dev_warn(dev->dev, "Device failed to resume\n");
kref_get(&dev->kref); kref_get(&dev->kref);
if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d", if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d",
dev->instance))) { dev->ctrl.instance))) {
dev_err(dev->dev, dev_err(dev->dev,
"Failed to start controller remove task\n"); "Failed to start controller remove task\n");
kref_put(&dev->kref, nvme_free_dev); kref_put(&dev->kref, nvme_free_dev);
...@@ -3100,7 +3148,7 @@ static int nvme_reset(struct nvme_dev *dev) ...@@ -3100,7 +3148,7 @@ static int nvme_reset(struct nvme_dev *dev)
{ {
int ret; int ret;
if (!dev->admin_q || blk_queue_dying(dev->admin_q)) if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
return -ENODEV; return -ENODEV;
spin_lock(&dev_list_lock); spin_lock(&dev_list_lock);
...@@ -3131,6 +3179,16 @@ static ssize_t nvme_sysfs_reset(struct device *dev, ...@@ -3131,6 +3179,16 @@ static ssize_t nvme_sysfs_reset(struct device *dev,
} }
static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
{
*val = readl(to_nvme_dev(ctrl)->bar + off);
return 0;
}
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.reg_read32 = nvme_pci_reg_read32,
};
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{ {
int node, result = -ENOMEM; int node, result = -ENOMEM;
...@@ -3156,6 +3214,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -3156,6 +3214,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_WORK(&dev->reset_work, nvme_reset_work); INIT_WORK(&dev->reset_work, nvme_reset_work);
dev->dev = get_device(&pdev->dev); dev->dev = get_device(&pdev->dev);
pci_set_drvdata(pdev, dev); pci_set_drvdata(pdev, dev);
dev->ctrl.ops = &nvme_pci_ctrl_ops;
dev->ctrl.dev = dev->dev;
result = nvme_set_instance(dev); result = nvme_set_instance(dev);
if (result) if (result)
goto put_pci; goto put_pci;
...@@ -3166,8 +3228,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -3166,8 +3228,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
kref_init(&dev->kref); kref_init(&dev->kref);
dev->device = device_create(nvme_class, &pdev->dev, dev->device = device_create(nvme_class, &pdev->dev,
MKDEV(nvme_char_major, dev->instance), MKDEV(nvme_char_major, dev->ctrl.instance),
dev, "nvme%d", dev->instance); dev, "nvme%d", dev->ctrl.instance);
if (IS_ERR(dev->device)) { if (IS_ERR(dev->device)) {
result = PTR_ERR(dev->device); result = PTR_ERR(dev->device);
goto release_pools; goto release_pools;
...@@ -3186,7 +3248,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -3186,7 +3248,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0; return 0;
put_dev: put_dev:
device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance)); device_destroy(nvme_class, MKDEV(nvme_char_major, dev->ctrl.instance));
put_device(dev->device); put_device(dev->device);
release_pools: release_pools:
nvme_release_prp_pools(dev); nvme_release_prp_pools(dev);
...@@ -3233,7 +3295,7 @@ static void nvme_remove(struct pci_dev *pdev) ...@@ -3233,7 +3295,7 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_dev_remove(dev); nvme_dev_remove(dev);
nvme_dev_shutdown(dev); nvme_dev_shutdown(dev);
nvme_dev_remove_admin(dev); nvme_dev_remove_admin(dev);
device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance)); device_destroy(nvme_class, MKDEV(nvme_char_major, dev->ctrl.instance));
nvme_free_queues(dev, 0); nvme_free_queues(dev, 0);
nvme_release_cmb(dev); nvme_release_cmb(dev);
nvme_release_prp_pools(dev); nvme_release_prp_pools(dev);
......
...@@ -524,7 +524,7 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns, ...@@ -524,7 +524,7 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
struct sg_io_hdr *hdr, u8 *inq_response, struct sg_io_hdr *hdr, u8 *inq_response,
int alloc_len) int alloc_len)
{ {
struct nvme_dev *dev = ns->dev; struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_id_ns *id_ns; struct nvme_id_ns *id_ns;
int res; int res;
int nvme_sc; int nvme_sc;
...@@ -532,10 +532,10 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns, ...@@ -532,10 +532,10 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
u8 resp_data_format = 0x02; u8 resp_data_format = 0x02;
u8 protect; u8 protect;
u8 cmdque = 0x01 << 1; u8 cmdque = 0x01 << 1;
u8 fw_offset = sizeof(dev->firmware_rev); u8 fw_offset = sizeof(ctrl->firmware_rev);
/* nvme ns identify - use DPS value for PROTECT field */ /* nvme ns identify - use DPS value for PROTECT field */
nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns); nvme_sc = nvme_identify_ns(ctrl, ns->ns_id, &id_ns);
res = nvme_trans_status_code(hdr, nvme_sc); res = nvme_trans_status_code(hdr, nvme_sc);
if (res) if (res)
return res; return res;
...@@ -553,12 +553,12 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns, ...@@ -553,12 +553,12 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
inq_response[5] = protect; /* sccs=0 | acc=0 | tpgs=0 | pc3=0 */ inq_response[5] = protect; /* sccs=0 | acc=0 | tpgs=0 | pc3=0 */
inq_response[7] = cmdque; /* wbus16=0 | sync=0 | vs=0 */ inq_response[7] = cmdque; /* wbus16=0 | sync=0 | vs=0 */
strncpy(&inq_response[8], "NVMe ", 8); strncpy(&inq_response[8], "NVMe ", 8);
strncpy(&inq_response[16], dev->model, 16); strncpy(&inq_response[16], ctrl->model, 16);
while (dev->firmware_rev[fw_offset - 1] == ' ' && fw_offset > 4) while (ctrl->firmware_rev[fw_offset - 1] == ' ' && fw_offset > 4)
fw_offset--; fw_offset--;
fw_offset -= 4; fw_offset -= 4;
strncpy(&inq_response[32], dev->firmware_rev + fw_offset, 4); strncpy(&inq_response[32], ctrl->firmware_rev + fw_offset, 4);
xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
return nvme_trans_copy_to_user(hdr, inq_response, xfer_len); return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
...@@ -588,27 +588,26 @@ static int nvme_trans_unit_serial_page(struct nvme_ns *ns, ...@@ -588,27 +588,26 @@ static int nvme_trans_unit_serial_page(struct nvme_ns *ns,
struct sg_io_hdr *hdr, u8 *inq_response, struct sg_io_hdr *hdr, u8 *inq_response,
int alloc_len) int alloc_len)
{ {
struct nvme_dev *dev = ns->dev;
int xfer_len; int xfer_len;
memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */ inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */
inq_response[3] = INQ_SERIAL_NUMBER_LENGTH; /* Page Length */ inq_response[3] = INQ_SERIAL_NUMBER_LENGTH; /* Page Length */
strncpy(&inq_response[4], dev->serial, INQ_SERIAL_NUMBER_LENGTH); strncpy(&inq_response[4], ns->ctrl->serial, INQ_SERIAL_NUMBER_LENGTH);
xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
return nvme_trans_copy_to_user(hdr, inq_response, xfer_len); return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
} }
static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr, static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 *inq_response, int alloc_len) u8 *inq_response, int alloc_len, u32 vs)
{ {
struct nvme_id_ns *id_ns; struct nvme_id_ns *id_ns;
int nvme_sc, res; int nvme_sc, res;
size_t len; size_t len;
void *eui; void *eui;
nvme_sc = nvme_identify_ns(ns->dev, ns->ns_id, &id_ns); nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
res = nvme_trans_status_code(hdr, nvme_sc); res = nvme_trans_status_code(hdr, nvme_sc);
if (res) if (res)
return res; return res;
...@@ -616,7 +615,7 @@ static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -616,7 +615,7 @@ static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr,
eui = id_ns->eui64; eui = id_ns->eui64;
len = sizeof(id_ns->eui64); len = sizeof(id_ns->eui64);
if (readl(ns->dev->bar + NVME_REG_VS) >= NVME_VS(1, 2)) { if (vs >= NVME_VS(1, 2)) {
if (bitmap_empty(eui, len * 8)) { if (bitmap_empty(eui, len * 8)) {
eui = id_ns->nguid; eui = id_ns->nguid;
len = sizeof(id_ns->nguid); len = sizeof(id_ns->nguid);
...@@ -648,7 +647,7 @@ static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -648,7 +647,7 @@ static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr,
static int nvme_fill_device_id_scsi_string(struct nvme_ns *ns, static int nvme_fill_device_id_scsi_string(struct nvme_ns *ns,
struct sg_io_hdr *hdr, u8 *inq_response, int alloc_len) struct sg_io_hdr *hdr, u8 *inq_response, int alloc_len)
{ {
struct nvme_dev *dev = ns->dev; struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_id_ctrl *id_ctrl; struct nvme_id_ctrl *id_ctrl;
int nvme_sc, res; int nvme_sc, res;
...@@ -659,7 +658,7 @@ static int nvme_fill_device_id_scsi_string(struct nvme_ns *ns, ...@@ -659,7 +658,7 @@ static int nvme_fill_device_id_scsi_string(struct nvme_ns *ns,
SCSI_ASCQ_CAUSE_NOT_REPORTABLE); SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
} }
nvme_sc = nvme_identify_ctrl(dev, &id_ctrl); nvme_sc = nvme_identify_ctrl(ctrl, &id_ctrl);
res = nvme_trans_status_code(hdr, nvme_sc); res = nvme_trans_status_code(hdr, nvme_sc);
if (res) if (res)
return res; return res;
...@@ -675,9 +674,9 @@ static int nvme_fill_device_id_scsi_string(struct nvme_ns *ns, ...@@ -675,9 +674,9 @@ static int nvme_fill_device_id_scsi_string(struct nvme_ns *ns,
inq_response[7] = 0x44; /* Designator Length */ inq_response[7] = 0x44; /* Designator Length */
sprintf(&inq_response[8], "%04x", le16_to_cpu(id_ctrl->vid)); sprintf(&inq_response[8], "%04x", le16_to_cpu(id_ctrl->vid));
memcpy(&inq_response[12], dev->model, sizeof(dev->model)); memcpy(&inq_response[12], ctrl->model, sizeof(ctrl->model));
sprintf(&inq_response[52], "%04x", cpu_to_be32(ns->ns_id)); sprintf(&inq_response[52], "%04x", cpu_to_be32(ns->ns_id));
memcpy(&inq_response[56], dev->serial, sizeof(dev->serial)); memcpy(&inq_response[56], ctrl->serial, sizeof(ctrl->serial));
res = nvme_trans_copy_to_user(hdr, inq_response, alloc_len); res = nvme_trans_copy_to_user(hdr, inq_response, alloc_len);
kfree(id_ctrl); kfree(id_ctrl);
...@@ -688,9 +687,14 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -688,9 +687,14 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 *resp, int alloc_len) u8 *resp, int alloc_len)
{ {
int res; int res;
u32 vs;
if (readl(ns->dev->bar + NVME_REG_VS) >= NVME_VS(1, 1)) { res = ns->ctrl->ops->reg_read32(ns->ctrl, NVME_REG_VS, &vs);
res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len); if (res)
return res;
if (vs >= NVME_VS(1, 1)) {
res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len, vs);
if (res != -EOPNOTSUPP) if (res != -EOPNOTSUPP)
return res; return res;
} }
...@@ -704,7 +708,7 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -704,7 +708,7 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 *inq_response; u8 *inq_response;
int res; int res;
int nvme_sc; int nvme_sc;
struct nvme_dev *dev = ns->dev; struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_id_ctrl *id_ctrl; struct nvme_id_ctrl *id_ctrl;
struct nvme_id_ns *id_ns; struct nvme_id_ns *id_ns;
int xfer_len; int xfer_len;
...@@ -720,7 +724,7 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -720,7 +724,7 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
if (inq_response == NULL) if (inq_response == NULL)
return -ENOMEM; return -ENOMEM;
nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns); nvme_sc = nvme_identify_ns(ctrl, ns->ns_id, &id_ns);
res = nvme_trans_status_code(hdr, nvme_sc); res = nvme_trans_status_code(hdr, nvme_sc);
if (res) if (res)
goto out_free_inq; goto out_free_inq;
...@@ -736,7 +740,7 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -736,7 +740,7 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
app_chk = protect << 1; app_chk = protect << 1;
ref_chk = protect; ref_chk = protect;
nvme_sc = nvme_identify_ctrl(dev, &id_ctrl); nvme_sc = nvme_identify_ctrl(ctrl, &id_ctrl);
res = nvme_trans_status_code(hdr, nvme_sc); res = nvme_trans_status_code(hdr, nvme_sc);
if (res) if (res)
goto out_free_inq; goto out_free_inq;
...@@ -847,7 +851,6 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns, ...@@ -847,7 +851,6 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
int res; int res;
int xfer_len; int xfer_len;
u8 *log_response; u8 *log_response;
struct nvme_dev *dev = ns->dev;
struct nvme_smart_log *smart_log; struct nvme_smart_log *smart_log;
u8 temp_c; u8 temp_c;
u16 temp_k; u16 temp_k;
...@@ -856,7 +859,7 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns, ...@@ -856,7 +859,7 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
if (log_response == NULL) if (log_response == NULL)
return -ENOMEM; return -ENOMEM;
res = nvme_get_log_page(dev, &smart_log); res = nvme_get_log_page(ns->ctrl, &smart_log);
if (res < 0) if (res < 0)
goto out_free_response; goto out_free_response;
...@@ -894,7 +897,6 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -894,7 +897,6 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
int res; int res;
int xfer_len; int xfer_len;
u8 *log_response; u8 *log_response;
struct nvme_dev *dev = ns->dev;
struct nvme_smart_log *smart_log; struct nvme_smart_log *smart_log;
u32 feature_resp; u32 feature_resp;
u8 temp_c_cur, temp_c_thresh; u8 temp_c_cur, temp_c_thresh;
...@@ -904,7 +906,7 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -904,7 +906,7 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
if (log_response == NULL) if (log_response == NULL)
return -ENOMEM; return -ENOMEM;
res = nvme_get_log_page(dev, &smart_log); res = nvme_get_log_page(ns->ctrl, &smart_log);
if (res < 0) if (res < 0)
goto out_free_response; goto out_free_response;
...@@ -918,7 +920,7 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -918,7 +920,7 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
kfree(smart_log); kfree(smart_log);
/* Get Features for Temp Threshold */ /* Get Features for Temp Threshold */
res = nvme_get_features(dev, NVME_FEAT_TEMP_THRESH, 0, 0, res = nvme_get_features(ns->ctrl, NVME_FEAT_TEMP_THRESH, 0, 0,
&feature_resp); &feature_resp);
if (res != NVME_SC_SUCCESS) if (res != NVME_SC_SUCCESS)
temp_c_thresh = LOG_TEMP_UNKNOWN; temp_c_thresh = LOG_TEMP_UNKNOWN;
...@@ -980,7 +982,6 @@ static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -980,7 +982,6 @@ static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
{ {
int res; int res;
int nvme_sc; int nvme_sc;
struct nvme_dev *dev = ns->dev;
struct nvme_id_ns *id_ns; struct nvme_id_ns *id_ns;
u8 flbas; u8 flbas;
u32 lba_length; u32 lba_length;
...@@ -990,7 +991,7 @@ static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -990,7 +991,7 @@ static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN) else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN)
return -EINVAL; return -EINVAL;
nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns); nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
res = nvme_trans_status_code(hdr, nvme_sc); res = nvme_trans_status_code(hdr, nvme_sc);
if (res) if (res)
return res; return res;
...@@ -1046,14 +1047,13 @@ static int nvme_trans_fill_caching_page(struct nvme_ns *ns, ...@@ -1046,14 +1047,13 @@ static int nvme_trans_fill_caching_page(struct nvme_ns *ns,
{ {
int res = 0; int res = 0;
int nvme_sc; int nvme_sc;
struct nvme_dev *dev = ns->dev;
u32 feature_resp; u32 feature_resp;
u8 vwc; u8 vwc;
if (len < MODE_PAGE_CACHING_LEN) if (len < MODE_PAGE_CACHING_LEN)
return -EINVAL; return -EINVAL;
nvme_sc = nvme_get_features(dev, NVME_FEAT_VOLATILE_WC, 0, 0, nvme_sc = nvme_get_features(ns->ctrl, NVME_FEAT_VOLATILE_WC, 0, 0,
&feature_resp); &feature_resp);
res = nvme_trans_status_code(hdr, nvme_sc); res = nvme_trans_status_code(hdr, nvme_sc);
if (res) if (res)
...@@ -1239,12 +1239,11 @@ static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -1239,12 +1239,11 @@ static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
{ {
int res; int res;
int nvme_sc; int nvme_sc;
struct nvme_dev *dev = ns->dev;
struct nvme_id_ctrl *id_ctrl; struct nvme_id_ctrl *id_ctrl;
int lowest_pow_st; /* max npss = lowest power consumption */ int lowest_pow_st; /* max npss = lowest power consumption */
unsigned ps_desired = 0; unsigned ps_desired = 0;
nvme_sc = nvme_identify_ctrl(dev, &id_ctrl); nvme_sc = nvme_identify_ctrl(ns->ctrl, &id_ctrl);
res = nvme_trans_status_code(hdr, nvme_sc); res = nvme_trans_status_code(hdr, nvme_sc);
if (res) if (res)
return res; return res;
...@@ -1288,7 +1287,7 @@ static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -1288,7 +1287,7 @@ static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
SCSI_ASCQ_CAUSE_NOT_REPORTABLE); SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
break; break;
} }
nvme_sc = nvme_set_features(dev, NVME_FEAT_POWER_MGMT, ps_desired, 0, nvme_sc = nvme_set_features(ns->ctrl, NVME_FEAT_POWER_MGMT, ps_desired, 0,
NULL); NULL);
return nvme_trans_status_code(hdr, nvme_sc); return nvme_trans_status_code(hdr, nvme_sc);
} }
...@@ -1312,7 +1311,6 @@ static int nvme_trans_send_download_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr ...@@ -1312,7 +1311,6 @@ static int nvme_trans_send_download_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr
u8 buffer_id) u8 buffer_id)
{ {
int nvme_sc; int nvme_sc;
struct nvme_dev *dev = ns->dev;
struct nvme_command c; struct nvme_command c;
if (hdr->iovec_count > 0) { if (hdr->iovec_count > 0) {
...@@ -1329,7 +1327,7 @@ static int nvme_trans_send_download_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr ...@@ -1329,7 +1327,7 @@ static int nvme_trans_send_download_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr
c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1); c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS); c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
nvme_sc = __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, nvme_sc = __nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, NULL,
hdr->dxferp, tot_len, NULL, 0); hdr->dxferp, tot_len, NULL, 0);
return nvme_trans_status_code(hdr, nvme_sc); return nvme_trans_status_code(hdr, nvme_sc);
} }
...@@ -1396,14 +1394,13 @@ static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -1396,14 +1394,13 @@ static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
{ {
int res = 0; int res = 0;
int nvme_sc; int nvme_sc;
struct nvme_dev *dev = ns->dev;
unsigned dword11; unsigned dword11;
switch (page_code) { switch (page_code) {
case MODE_PAGE_CACHING: case MODE_PAGE_CACHING:
dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0); dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0);
nvme_sc = nvme_set_features(dev, NVME_FEAT_VOLATILE_WC, dword11, nvme_sc = nvme_set_features(ns->ctrl, NVME_FEAT_VOLATILE_WC,
0, NULL); dword11, 0, NULL);
res = nvme_trans_status_code(hdr, nvme_sc); res = nvme_trans_status_code(hdr, nvme_sc);
break; break;
case MODE_PAGE_CONTROL: case MODE_PAGE_CONTROL:
...@@ -1505,7 +1502,6 @@ static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns, ...@@ -1505,7 +1502,6 @@ static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
{ {
int res = 0; int res = 0;
int nvme_sc; int nvme_sc;
struct nvme_dev *dev = ns->dev;
u8 flbas; u8 flbas;
/* /*
...@@ -1518,7 +1514,7 @@ static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns, ...@@ -1518,7 +1514,7 @@ static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) { if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) {
struct nvme_id_ns *id_ns; struct nvme_id_ns *id_ns;
nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns); nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
res = nvme_trans_status_code(hdr, nvme_sc); res = nvme_trans_status_code(hdr, nvme_sc);
if (res) if (res)
return res; return res;
...@@ -1602,7 +1598,6 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -1602,7 +1598,6 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
{ {
int res; int res;
int nvme_sc; int nvme_sc;
struct nvme_dev *dev = ns->dev;
struct nvme_id_ns *id_ns; struct nvme_id_ns *id_ns;
u8 i; u8 i;
u8 flbas, nlbaf; u8 flbas, nlbaf;
...@@ -1611,7 +1606,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -1611,7 +1606,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
struct nvme_command c; struct nvme_command c;
/* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */ /* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */
nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns); nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
res = nvme_trans_status_code(hdr, nvme_sc); res = nvme_trans_status_code(hdr, nvme_sc);
if (res) if (res)
return res; return res;
...@@ -1643,7 +1638,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -1643,7 +1638,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
c.format.nsid = cpu_to_le32(ns->ns_id); c.format.nsid = cpu_to_le32(ns->ns_id);
c.format.cdw10 = cpu_to_le32(cdw10); c.format.cdw10 = cpu_to_le32(cdw10);
nvme_sc = nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0); nvme_sc = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, NULL, 0);
res = nvme_trans_status_code(hdr, nvme_sc); res = nvme_trans_status_code(hdr, nvme_sc);
kfree(id_ns); kfree(id_ns);
...@@ -2072,7 +2067,6 @@ static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -2072,7 +2067,6 @@ static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u32 alloc_len; u32 alloc_len;
u32 resp_size; u32 resp_size;
u32 xfer_len; u32 xfer_len;
struct nvme_dev *dev = ns->dev;
struct nvme_id_ns *id_ns; struct nvme_id_ns *id_ns;
u8 *response; u8 *response;
...@@ -2084,7 +2078,7 @@ static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -2084,7 +2078,7 @@ static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
resp_size = READ_CAP_10_RESP_SIZE; resp_size = READ_CAP_10_RESP_SIZE;
} }
nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns); nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
res = nvme_trans_status_code(hdr, nvme_sc); res = nvme_trans_status_code(hdr, nvme_sc);
if (res) if (res)
return res; return res;
...@@ -2112,7 +2106,6 @@ static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -2112,7 +2106,6 @@ static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
int nvme_sc; int nvme_sc;
u32 alloc_len, xfer_len, resp_size; u32 alloc_len, xfer_len, resp_size;
u8 *response; u8 *response;
struct nvme_dev *dev = ns->dev;
struct nvme_id_ctrl *id_ctrl; struct nvme_id_ctrl *id_ctrl;
u32 ll_length, lun_id; u32 ll_length, lun_id;
u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET; u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET;
...@@ -2126,7 +2119,7 @@ static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -2126,7 +2119,7 @@ static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
case ALL_LUNS_RETURNED: case ALL_LUNS_RETURNED:
case ALL_WELL_KNOWN_LUNS_RETURNED: case ALL_WELL_KNOWN_LUNS_RETURNED:
case RESTRICTED_LUNS_RETURNED: case RESTRICTED_LUNS_RETURNED:
nvme_sc = nvme_identify_ctrl(dev, &id_ctrl); nvme_sc = nvme_identify_ctrl(ns->ctrl, &id_ctrl);
res = nvme_trans_status_code(hdr, nvme_sc); res = nvme_trans_status_code(hdr, nvme_sc);
if (res) if (res)
return res; return res;
...@@ -2327,9 +2320,7 @@ static int nvme_trans_test_unit_ready(struct nvme_ns *ns, ...@@ -2327,9 +2320,7 @@ static int nvme_trans_test_unit_ready(struct nvme_ns *ns,
struct sg_io_hdr *hdr, struct sg_io_hdr *hdr,
u8 *cmd) u8 *cmd)
{ {
struct nvme_dev *dev = ns->dev; if (nvme_ctrl_ready(ns->ctrl))
if (!(readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_RDY))
return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
NOT_READY, SCSI_ASC_LUN_NOT_READY, NOT_READY, SCSI_ASC_LUN_NOT_READY,
SCSI_ASCQ_CAUSE_NOT_REPORTABLE); SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment