Commit 0c29f9fa authored by Li Feng's avatar Li Feng Committed by Keith Busch

nvme/tcp: Add wq_unbound modparam for nvme_tcp_wq

The default nvme_tcp_wq will use all CPUs to process tasks. Sometimes it is
necessary to set CPU affinity to improve performance.

A new module parameter wq_unbound is added here. If set to true, users can
configure cpu affinity through
/sys/devices/virtual/workqueue/nvme_tcp_wq/cpumask.
Signed-off-by: default avatarLi Feng <fengli@smartx.com>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarKeith Busch <kbusch@kernel.org>
parent ec58afb4
...@@ -36,6 +36,14 @@ static int so_priority; ...@@ -36,6 +36,14 @@ static int so_priority;
module_param(so_priority, int, 0644); module_param(so_priority, int, 0644);
MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority"); MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
/*
* Use the unbound workqueue for nvme_tcp_wq, then we can set the cpu affinity
* from sysfs.
*/
static bool wq_unbound;
module_param(wq_unbound, bool, 0644);
MODULE_PARM_DESC(wq_unbound, "Use unbound workqueue for nvme-tcp IO context (default false)");
/* /*
* TLS handshake timeout * TLS handshake timeout
*/ */
...@@ -1551,7 +1559,10 @@ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue) ...@@ -1551,7 +1559,10 @@ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
else if (nvme_tcp_poll_queue(queue)) else if (nvme_tcp_poll_queue(queue))
n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
ctrl->io_queues[HCTX_TYPE_READ] - 1; ctrl->io_queues[HCTX_TYPE_READ] - 1;
queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); if (wq_unbound)
queue->io_cpu = WORK_CPU_UNBOUND;
else
queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
} }
static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid) static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
...@@ -2790,6 +2801,8 @@ static struct nvmf_transport_ops nvme_tcp_transport = { ...@@ -2790,6 +2801,8 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
static int __init nvme_tcp_init_module(void) static int __init nvme_tcp_init_module(void)
{ {
unsigned int wq_flags = WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_SYSFS;
BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8); BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72); BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24); BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24);
...@@ -2799,8 +2812,10 @@ static int __init nvme_tcp_init_module(void) ...@@ -2799,8 +2812,10 @@ static int __init nvme_tcp_init_module(void)
BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128); BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128);
BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24); BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24);
nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq", if (wq_unbound)
WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_SYSFS, 0); wq_flags |= WQ_UNBOUND;
nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq", wq_flags, 0);
if (!nvme_tcp_wq) if (!nvme_tcp_wq)
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment