Commit 83166ac8 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-5.6-rc6' of git://git.infradead.org/nvme into block-5.6

Pull NVMe fixes from Keith:

"Two late nvme fabrics fixes for 5.6: a double free with the rdma
 transport, and a regression fix for tcp; please pull."

* 'nvme-5.6-rc6' of git://git.infradead.org/nvme:
  nvmet-tcp: set MSG_MORE only if we actually have more to send
  nvme-rdma: Avoid double freeing of async event data
parents b53df2e7 98fd5c72
...@@ -850,9 +850,11 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, ...@@ -850,9 +850,11 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
if (new) if (new)
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
out_free_async_qe: out_free_async_qe:
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, if (ctrl->async_event_sqe.data) {
sizeof(struct nvme_command), DMA_TO_DEVICE); nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
ctrl->async_event_sqe.data = NULL; sizeof(struct nvme_command), DMA_TO_DEVICE);
ctrl->async_event_sqe.data = NULL;
}
out_free_queue: out_free_queue:
nvme_rdma_free_queue(&ctrl->queues[0]); nvme_rdma_free_queue(&ctrl->queues[0]);
return error; return error;
......
...@@ -515,7 +515,7 @@ static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd) ...@@ -515,7 +515,7 @@ static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
return 1; return 1;
} }
static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd) static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
{ {
struct nvmet_tcp_queue *queue = cmd->queue; struct nvmet_tcp_queue *queue = cmd->queue;
int ret; int ret;
...@@ -523,9 +523,15 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd) ...@@ -523,9 +523,15 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
while (cmd->cur_sg) { while (cmd->cur_sg) {
struct page *page = sg_page(cmd->cur_sg); struct page *page = sg_page(cmd->cur_sg);
u32 left = cmd->cur_sg->length - cmd->offset; u32 left = cmd->cur_sg->length - cmd->offset;
int flags = MSG_DONTWAIT;
if ((!last_in_batch && cmd->queue->send_list_len) ||
cmd->wbytes_done + left < cmd->req.transfer_len ||
queue->data_digest || !queue->nvme_sq.sqhd_disabled)
flags |= MSG_MORE;
ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset, ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
left, MSG_DONTWAIT | MSG_MORE); left, flags);
if (ret <= 0) if (ret <= 0)
return ret; return ret;
...@@ -660,7 +666,7 @@ static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue, ...@@ -660,7 +666,7 @@ static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
} }
if (cmd->state == NVMET_TCP_SEND_DATA) { if (cmd->state == NVMET_TCP_SEND_DATA) {
ret = nvmet_try_send_data(cmd); ret = nvmet_try_send_data(cmd, last_in_batch);
if (ret <= 0) if (ret <= 0)
goto done_send; goto done_send;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment