Commit 05949945 authored by Tejun Heo's avatar Tejun Heo

afs: don't use PREPARE_WORK

PREPARE_[DELAYED_]WORK() are being phased out.  They have few users
and a nasty surprise in terms of reentrancy guarantee as workqueue
considers work items to be different if they don't have the same work
function.

afs_call->async_work is multiplexed with multiple work functions.
Introduce afs_async_workfn() which invokes afs_call->async_workfn and
always use it as the work function and update the users to set the
->async_workfn field instead of overriding the work function using
PREPARE_WORK().

It would probably be best to route this with other related updates
through the workqueue tree.

Compile tested.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: David Howells <dhowells@redhat.com>
Cc: linux-afs@lists.infradead.org
parent 9ca97374
...@@ -75,6 +75,7 @@ struct afs_call { ...@@ -75,6 +75,7 @@ struct afs_call {
const struct afs_call_type *type; /* type of call */ const struct afs_call_type *type; /* type of call */
const struct afs_wait_mode *wait_mode; /* completion wait mode */ const struct afs_wait_mode *wait_mode; /* completion wait mode */
wait_queue_head_t waitq; /* processes awaiting completion */ wait_queue_head_t waitq; /* processes awaiting completion */
work_func_t async_workfn;
struct work_struct async_work; /* asynchronous work processor */ struct work_struct async_work; /* asynchronous work processor */
struct work_struct work; /* actual work processor */ struct work_struct work; /* actual work processor */
struct sk_buff_head rx_queue; /* received packets */ struct sk_buff_head rx_queue; /* received packets */
......
...@@ -644,7 +644,7 @@ static void afs_process_async_call(struct work_struct *work) ...@@ -644,7 +644,7 @@ static void afs_process_async_call(struct work_struct *work)
/* we can't just delete the call because the work item may be /* we can't just delete the call because the work item may be
* queued */ * queued */
PREPARE_WORK(&call->async_work, afs_delete_async_call); call->async_workfn = afs_delete_async_call;
queue_work(afs_async_calls, &call->async_work); queue_work(afs_async_calls, &call->async_work);
} }
...@@ -663,6 +663,13 @@ void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb) ...@@ -663,6 +663,13 @@ void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb)
call->reply_size += len; call->reply_size += len;
} }
static void afs_async_workfn(struct work_struct *work)
{
struct afs_call *call = container_of(work, struct afs_call, async_work);
call->async_workfn(work);
}
/* /*
* accept the backlog of incoming calls * accept the backlog of incoming calls
*/ */
...@@ -685,7 +692,8 @@ static void afs_collect_incoming_call(struct work_struct *work) ...@@ -685,7 +692,8 @@ static void afs_collect_incoming_call(struct work_struct *work)
return; return;
} }
INIT_WORK(&call->async_work, afs_process_async_call); call->async_workfn = afs_process_async_call;
INIT_WORK(&call->async_work, afs_async_workfn);
call->wait_mode = &afs_async_incoming_call; call->wait_mode = &afs_async_incoming_call;
call->type = &afs_RXCMxxxx; call->type = &afs_RXCMxxxx;
init_waitqueue_head(&call->waitq); init_waitqueue_head(&call->waitq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment