Commit 4033f35d authored by Christoph Hellwig's avatar Christoph Hellwig

nvme-pci: use dma memory for the host memory buffer descriptors

The NVMe 1.3 specification says in section 5.21.1.13:

"After a successful completion of a Set Features enabling the host memory
 buffer, the host shall not write to the associated host memory region,
 buffer size, or descriptor list until the host memory buffer has been
 disabled."

While this doesn't state that the descriptor list must remain accessible
to the device it certainly implies it must remaing readable by the device.

So switch to a dma coherent allocation for the descriptor list just to be
safe - it's not like the cost for it matters compared to the actual
memory buffers.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarKeith Busch <keith.busch@intel.com>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Fixes: 87ad72a5 ("nvme-pci: implement host memory buffer support")
parent b925a2dc
...@@ -109,6 +109,7 @@ struct nvme_dev { ...@@ -109,6 +109,7 @@ struct nvme_dev {
/* host memory buffer support: */ /* host memory buffer support: */
u64 host_mem_size; u64 host_mem_size;
u32 nr_host_mem_descs; u32 nr_host_mem_descs;
dma_addr_t host_mem_descs_dma;
struct nvme_host_mem_buf_desc *host_mem_descs; struct nvme_host_mem_buf_desc *host_mem_descs;
void **host_mem_desc_bufs; void **host_mem_desc_bufs;
}; };
...@@ -1565,16 +1566,10 @@ static inline void nvme_release_cmb(struct nvme_dev *dev) ...@@ -1565,16 +1566,10 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
{ {
size_t len = dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs); u64 dma_addr = dev->host_mem_descs_dma;
struct nvme_command c; struct nvme_command c;
u64 dma_addr;
int ret; int ret;
dma_addr = dma_map_single(dev->dev, dev->host_mem_descs, len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev->dev, dma_addr))
return -ENOMEM;
memset(&c, 0, sizeof(c)); memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_set_features; c.features.opcode = nvme_admin_set_features;
c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
...@@ -1591,7 +1586,6 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) ...@@ -1591,7 +1586,6 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
"failed to set host mem (err %d, flags %#x).\n", "failed to set host mem (err %d, flags %#x).\n",
ret, bits); ret, bits);
} }
dma_unmap_single(dev->dev, dma_addr, len, DMA_TO_DEVICE);
return ret; return ret;
} }
...@@ -1609,7 +1603,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev) ...@@ -1609,7 +1603,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
kfree(dev->host_mem_desc_bufs); kfree(dev->host_mem_desc_bufs);
dev->host_mem_desc_bufs = NULL; dev->host_mem_desc_bufs = NULL;
kfree(dev->host_mem_descs); dma_free_coherent(dev->dev,
dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
dev->host_mem_descs, dev->host_mem_descs_dma);
dev->host_mem_descs = NULL; dev->host_mem_descs = NULL;
} }
...@@ -1617,6 +1613,7 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) ...@@ -1617,6 +1613,7 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
{ {
struct nvme_host_mem_buf_desc *descs; struct nvme_host_mem_buf_desc *descs;
u32 chunk_size, max_entries, len; u32 chunk_size, max_entries, len;
dma_addr_t descs_dma;
int i = 0; int i = 0;
void **bufs; void **bufs;
u64 size = 0, tmp; u64 size = 0, tmp;
...@@ -1627,7 +1624,8 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) ...@@ -1627,7 +1624,8 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
tmp = (preferred + chunk_size - 1); tmp = (preferred + chunk_size - 1);
do_div(tmp, chunk_size); do_div(tmp, chunk_size);
max_entries = tmp; max_entries = tmp;
descs = kcalloc(max_entries, sizeof(*descs), GFP_KERNEL); descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs),
&descs_dma, GFP_KERNEL);
if (!descs) if (!descs)
goto out; goto out;
...@@ -1661,6 +1659,7 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) ...@@ -1661,6 +1659,7 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
dev->nr_host_mem_descs = i; dev->nr_host_mem_descs = i;
dev->host_mem_size = size; dev->host_mem_size = size;
dev->host_mem_descs = descs; dev->host_mem_descs = descs;
dev->host_mem_descs_dma = descs_dma;
dev->host_mem_desc_bufs = bufs; dev->host_mem_desc_bufs = bufs;
return 0; return 0;
...@@ -1674,7 +1673,8 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) ...@@ -1674,7 +1673,8 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
kfree(bufs); kfree(bufs);
out_free_descs: out_free_descs:
kfree(descs); dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
descs_dma);
out: out:
/* try a smaller chunk size if we failed early */ /* try a smaller chunk size if we failed early */
if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) { if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment