Commit 5da273fe authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.infradead.org/users/willy/linux-nvme

Pull NVMe driver update from Matthew Wilcox:
 "These patches have mostly been baking for a few months; sorry I didn't
  get them in during the merge window.  They're all bug fixes, except
  for the addition of the SMART log and the addition to MAINTAINERS."

* git://git.infradead.org/users/willy/linux-nvme:
  NVMe: Add namespaces with no LBA range feature
  MAINTAINERS: Add entry for the NVMe driver
  NVMe: Initialize iod nents to 0
  NVMe: Define SMART log
  NVMe: Add result to nvme_get_features
  NVMe: Set result from user admin command
  NVMe: End queued bio requests when freeing queue
  NVMe: Free cmdid on nvme_submit_bio error
parents 14629ed3 12209036
...@@ -5641,6 +5641,14 @@ S: Maintained ...@@ -5641,6 +5641,14 @@ S: Maintained
F: drivers/video/riva/ F: drivers/video/riva/
F: drivers/video/nvidia/ F: drivers/video/nvidia/
NVM EXPRESS DRIVER
M: Matthew Wilcox <willy@linux.intel.com>
L: linux-nvme@lists.infradead.org
T: git git://git.infradead.org/users/willy/linux-nvme.git
S: Supported
F: drivers/block/nvme.c
F: include/linux/nvme.h
OMAP SUPPORT OMAP SUPPORT
M: Tony Lindgren <tony@atomide.com> M: Tony Lindgren <tony@atomide.com>
L: linux-omap@vger.kernel.org L: linux-omap@vger.kernel.org
......
...@@ -135,6 +135,7 @@ static inline void _nvme_check_size(void) ...@@ -135,6 +135,7 @@ static inline void _nvme_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
} }
typedef void (*nvme_completion_fn)(struct nvme_dev *, void *, typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
...@@ -237,7 +238,8 @@ static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid, ...@@ -237,7 +238,8 @@ static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
*fn = special_completion; *fn = special_completion;
return CMD_CTX_INVALID; return CMD_CTX_INVALID;
} }
*fn = info[cmdid].fn; if (fn)
*fn = info[cmdid].fn;
ctx = info[cmdid].ctx; ctx = info[cmdid].ctx;
info[cmdid].fn = special_completion; info[cmdid].fn = special_completion;
info[cmdid].ctx = CMD_CTX_COMPLETED; info[cmdid].ctx = CMD_CTX_COMPLETED;
...@@ -335,6 +337,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp) ...@@ -335,6 +337,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
iod->offset = offsetof(struct nvme_iod, sg[nseg]); iod->offset = offsetof(struct nvme_iod, sg[nseg]);
iod->npages = -1; iod->npages = -1;
iod->length = nbytes; iod->length = nbytes;
iod->nents = 0;
} }
return iod; return iod;
...@@ -375,7 +378,8 @@ static void bio_completion(struct nvme_dev *dev, void *ctx, ...@@ -375,7 +378,8 @@ static void bio_completion(struct nvme_dev *dev, void *ctx,
struct bio *bio = iod->private; struct bio *bio = iod->private;
u16 status = le16_to_cpup(&cqe->status) >> 1; u16 status = le16_to_cpup(&cqe->status) >> 1;
dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, if (iod->nents)
dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
nvme_free_iod(dev, iod); nvme_free_iod(dev, iod);
if (status) { if (status) {
...@@ -589,7 +593,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, ...@@ -589,7 +593,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs); result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
if (result < 0) if (result < 0)
goto free_iod; goto free_cmdid;
length = result; length = result;
cmnd->rw.command_id = cmdid; cmnd->rw.command_id = cmdid;
...@@ -609,6 +613,8 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, ...@@ -609,6 +613,8 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
return 0; return 0;
free_cmdid:
free_cmdid(nvmeq, cmdid, NULL);
free_iod: free_iod:
nvme_free_iod(nvmeq->dev, iod); nvme_free_iod(nvmeq->dev, iod);
nomem: nomem:
...@@ -835,8 +841,8 @@ static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, ...@@ -835,8 +841,8 @@ static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
return nvme_submit_admin_cmd(dev, &c, NULL); return nvme_submit_admin_cmd(dev, &c, NULL);
} }
static int nvme_get_features(struct nvme_dev *dev, unsigned fid, static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
unsigned nsid, dma_addr_t dma_addr) dma_addr_t dma_addr, u32 *result)
{ {
struct nvme_command c; struct nvme_command c;
...@@ -846,7 +852,7 @@ static int nvme_get_features(struct nvme_dev *dev, unsigned fid, ...@@ -846,7 +852,7 @@ static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
c.features.prp1 = cpu_to_le64(dma_addr); c.features.prp1 = cpu_to_le64(dma_addr);
c.features.fid = cpu_to_le32(fid); c.features.fid = cpu_to_le32(fid);
return nvme_submit_admin_cmd(dev, &c, NULL); return nvme_submit_admin_cmd(dev, &c, result);
} }
static int nvme_set_features(struct nvme_dev *dev, unsigned fid, static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
...@@ -906,6 +912,10 @@ static void nvme_free_queue(struct nvme_dev *dev, int qid) ...@@ -906,6 +912,10 @@ static void nvme_free_queue(struct nvme_dev *dev, int qid)
spin_lock_irq(&nvmeq->q_lock); spin_lock_irq(&nvmeq->q_lock);
nvme_cancel_ios(nvmeq, false); nvme_cancel_ios(nvmeq, false);
while (bio_list_peek(&nvmeq->sq_cong)) {
struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
bio_endio(bio, -EIO);
}
spin_unlock_irq(&nvmeq->q_lock); spin_unlock_irq(&nvmeq->q_lock);
irq_set_affinity_hint(vector, NULL); irq_set_affinity_hint(vector, NULL);
...@@ -1230,12 +1240,17 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev, ...@@ -1230,12 +1240,17 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
if (length != cmd.data_len) if (length != cmd.data_len)
status = -ENOMEM; status = -ENOMEM;
else else
status = nvme_submit_admin_cmd(dev, &c, NULL); status = nvme_submit_admin_cmd(dev, &c, &cmd.result);
if (cmd.data_len) { if (cmd.data_len) {
nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
nvme_free_iod(dev, iod); nvme_free_iod(dev, iod);
} }
if (!status && copy_to_user(&ucmd->result, &cmd.result,
sizeof(cmd.result)))
status = -EFAULT;
return status; return status;
} }
...@@ -1523,9 +1538,9 @@ static int nvme_dev_add(struct nvme_dev *dev) ...@@ -1523,9 +1538,9 @@ static int nvme_dev_add(struct nvme_dev *dev)
continue; continue;
res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i, res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
dma_addr + 4096); dma_addr + 4096, NULL);
if (res) if (res)
continue; memset(mem + 4096, 0, 4096);
ns = nvme_alloc_ns(dev, i, mem, mem + 4096); ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
if (ns) if (ns)
......
...@@ -137,6 +137,34 @@ enum { ...@@ -137,6 +137,34 @@ enum {
NVME_LBAF_RP_DEGRADED = 3, NVME_LBAF_RP_DEGRADED = 3,
}; };
struct nvme_smart_log {
__u8 critical_warning;
__u8 temperature[2];
__u8 avail_spare;
__u8 spare_thresh;
__u8 percent_used;
__u8 rsvd6[26];
__u8 data_units_read[16];
__u8 data_units_written[16];
__u8 host_reads[16];
__u8 host_writes[16];
__u8 ctrl_busy_time[16];
__u8 power_cycles[16];
__u8 power_on_hours[16];
__u8 unsafe_shutdowns[16];
__u8 media_errors[16];
__u8 num_err_log_entries[16];
__u8 rsvd192[320];
};
enum {
NVME_SMART_CRIT_SPARE = 1 << 0,
NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
NVME_SMART_CRIT_RELIABILITY = 1 << 2,
NVME_SMART_CRIT_MEDIA = 1 << 3,
NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
};
struct nvme_lba_range_type { struct nvme_lba_range_type {
__u8 type; __u8 type;
__u8 attributes; __u8 attributes;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment