Commit b416268b authored by Florian Westphal's avatar Florian Westphal Committed by David S. Miller

mptcp: use mptcp worker for path management

We can re-use the existing work queue to handle path management
instead of a dedicated work queue.  Just move pm_worker to protocol.c,
call it from the mptcp worker and get rid of the msk lock (already held).
Signed-off-by: default avatarFlorian Westphal <fw@strlen.de>
Reviewed-by: default avatarMat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 847d97e0
...@@ -10,8 +10,6 @@ ...@@ -10,8 +10,6 @@
#include <net/mptcp.h> #include <net/mptcp.h>
#include "protocol.h" #include "protocol.h"
static struct workqueue_struct *pm_wq;
/* path manager command handlers */ /* path manager command handlers */
int mptcp_pm_announce_addr(struct mptcp_sock *msk, int mptcp_pm_announce_addr(struct mptcp_sock *msk,
...@@ -78,7 +76,7 @@ static bool mptcp_pm_schedule_work(struct mptcp_sock *msk, ...@@ -78,7 +76,7 @@ static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
return false; return false;
msk->pm.status |= BIT(new_status); msk->pm.status |= BIT(new_status);
if (queue_work(pm_wq, &msk->pm.work)) if (schedule_work(&msk->work))
sock_hold((struct sock *)msk); sock_hold((struct sock *)msk);
return true; return true;
} }
...@@ -181,35 +179,6 @@ int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) ...@@ -181,35 +179,6 @@ int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
return mptcp_pm_nl_get_local_id(msk, skc); return mptcp_pm_nl_get_local_id(msk, skc);
} }
static void pm_worker(struct work_struct *work)
{
struct mptcp_pm_data *pm = container_of(work, struct mptcp_pm_data,
work);
struct mptcp_sock *msk = container_of(pm, struct mptcp_sock, pm);
struct sock *sk = (struct sock *)msk;
lock_sock(sk);
spin_lock_bh(&msk->pm.lock);
pr_debug("msk=%p status=%x", msk, pm->status);
if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
mptcp_pm_nl_add_addr_received(msk);
}
if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) {
pm->status &= ~BIT(MPTCP_PM_ESTABLISHED);
mptcp_pm_nl_fully_established(msk);
}
if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) {
pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED);
mptcp_pm_nl_subflow_established(msk);
}
spin_unlock_bh(&msk->pm.lock);
release_sock(sk);
sock_put(sk);
}
void mptcp_pm_data_init(struct mptcp_sock *msk) void mptcp_pm_data_init(struct mptcp_sock *msk)
{ {
msk->pm.add_addr_signaled = 0; msk->pm.add_addr_signaled = 0;
...@@ -223,22 +192,11 @@ void mptcp_pm_data_init(struct mptcp_sock *msk) ...@@ -223,22 +192,11 @@ void mptcp_pm_data_init(struct mptcp_sock *msk)
msk->pm.status = 0; msk->pm.status = 0;
spin_lock_init(&msk->pm.lock); spin_lock_init(&msk->pm.lock);
INIT_WORK(&msk->pm.work, pm_worker);
mptcp_pm_nl_data_init(msk); mptcp_pm_nl_data_init(msk);
} }
void mptcp_pm_close(struct mptcp_sock *msk)
{
if (cancel_work_sync(&msk->pm.work))
sock_put((struct sock *)msk);
}
void __init mptcp_pm_init(void) void __init mptcp_pm_init(void)
{ {
pm_wq = alloc_workqueue("pm_wq", WQ_UNBOUND | WQ_MEM_RECLAIM, 8);
if (!pm_wq)
panic("Failed to allocate workqueue");
mptcp_pm_nl_init(); mptcp_pm_nl_init();
} }
...@@ -1214,6 +1214,29 @@ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu) ...@@ -1214,6 +1214,29 @@ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
return 0; return 0;
} }
static void pm_work(struct mptcp_sock *msk)
{
struct mptcp_pm_data *pm = &msk->pm;
spin_lock_bh(&msk->pm.lock);
pr_debug("msk=%p status=%x", msk, pm->status);
if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
mptcp_pm_nl_add_addr_received(msk);
}
if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) {
pm->status &= ~BIT(MPTCP_PM_ESTABLISHED);
mptcp_pm_nl_fully_established(msk);
}
if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) {
pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED);
mptcp_pm_nl_subflow_established(msk);
}
spin_unlock_bh(&msk->pm.lock);
}
static void mptcp_worker(struct work_struct *work) static void mptcp_worker(struct work_struct *work)
{ {
struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work); struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
...@@ -1230,6 +1253,9 @@ static void mptcp_worker(struct work_struct *work) ...@@ -1230,6 +1253,9 @@ static void mptcp_worker(struct work_struct *work)
__mptcp_flush_join_list(msk); __mptcp_flush_join_list(msk);
__mptcp_move_skbs(msk); __mptcp_move_skbs(msk);
if (msk->pm.status)
pm_work(msk);
if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags)) if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
mptcp_check_for_eof(msk); mptcp_check_for_eof(msk);
...@@ -1420,7 +1446,6 @@ static void mptcp_close(struct sock *sk, long timeout) ...@@ -1420,7 +1446,6 @@ static void mptcp_close(struct sock *sk, long timeout)
} }
mptcp_cancel_work(sk); mptcp_cancel_work(sk);
mptcp_pm_close(msk);
__skb_queue_purge(&sk->sk_receive_queue); __skb_queue_purge(&sk->sk_receive_queue);
......
...@@ -174,8 +174,6 @@ struct mptcp_pm_data { ...@@ -174,8 +174,6 @@ struct mptcp_pm_data {
u8 local_addr_max; u8 local_addr_max;
u8 subflows_max; u8 subflows_max;
u8 status; u8 status;
struct work_struct work;
}; };
struct mptcp_data_frag { struct mptcp_data_frag {
...@@ -412,7 +410,6 @@ void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac); ...@@ -412,7 +410,6 @@ void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac);
void __init mptcp_pm_init(void); void __init mptcp_pm_init(void);
void mptcp_pm_data_init(struct mptcp_sock *msk); void mptcp_pm_data_init(struct mptcp_sock *msk);
void mptcp_pm_close(struct mptcp_sock *msk);
void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side); void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side);
void mptcp_pm_fully_established(struct mptcp_sock *msk); void mptcp_pm_fully_established(struct mptcp_sock *msk);
bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk); bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment