Commit a80a8461 authored by NeilBrown's avatar NeilBrown Committed by Trond Myklebust

SUNRPC: remove scheduling boost for "SWAPPER" tasks.

Currently, tasks marked as "swapper" tasks get put to the front of
non-priority rpc_queues, and are sorted earlier than non-swapper tasks on
the transport's ->xmit_queue.

This is pointless as currently *all* tasks for a mount that has swap
enabled on *any* file are marked as "swapper" tasks.  So the net result
is that the non-priority rpc_queues are reverse-ordered (LIFO).

This scheduling boost is not necessary to avoid deadlocks, and hurts
fairness, so remove it.  If there were a need to expedite some requests,
the tk_priority mechanism is a more appropriate tool.
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
parent a7210354
...@@ -186,11 +186,6 @@ static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, ...@@ -186,11 +186,6 @@ static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
/* /*
* Add new request to wait queue. * Add new request to wait queue.
*
* Swapper tasks always get inserted at the head of the queue.
* This should avoid many nasty memory deadlocks and hopefully
* improve overall performance.
* Everyone else gets appended to the queue to ensure proper FIFO behavior.
*/ */
static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
struct rpc_task *task, struct rpc_task *task,
...@@ -199,8 +194,6 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, ...@@ -199,8 +194,6 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
INIT_LIST_HEAD(&task->u.tk_wait.timer_list); INIT_LIST_HEAD(&task->u.tk_wait.timer_list);
if (RPC_IS_PRIORITY(queue)) if (RPC_IS_PRIORITY(queue))
__rpc_add_wait_queue_priority(queue, task, queue_priority); __rpc_add_wait_queue_priority(queue, task, queue_priority);
else if (RPC_IS_SWAPPER(task))
list_add(&task->u.tk_wait.list, &queue->tasks[0]);
else else
list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
task->tk_waitqueue = queue; task->tk_waitqueue = queue;
......
...@@ -1354,17 +1354,6 @@ xprt_request_enqueue_transmit(struct rpc_task *task) ...@@ -1354,17 +1354,6 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
INIT_LIST_HEAD(&req->rq_xmit2); INIT_LIST_HEAD(&req->rq_xmit2);
goto out; goto out;
} }
} else if (RPC_IS_SWAPPER(task)) {
list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
if (pos->rq_cong || pos->rq_bytes_sent)
continue;
if (RPC_IS_SWAPPER(pos->rq_task))
continue;
/* Note: req is added _before_ pos */
list_add_tail(&req->rq_xmit, &pos->rq_xmit);
INIT_LIST_HEAD(&req->rq_xmit2);
goto out;
}
} else if (!req->rq_seqno) { } else if (!req->rq_seqno) {
list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
if (pos->rq_task->tk_owner != task->tk_owner) if (pos->rq_task->tk_owner != task->tk_owner)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment