Commit a5cb752b authored by Paolo Abeni's avatar Paolo Abeni Committed by Jakub Kicinski

mptcp: use mptcp_schedule_work instead of open-coding it

Beyond reducing code duplication this also avoids scheduling
the mptcp_worker on a closed socket on some edge scenarios.

The addressed issue is actually older than the blamed commit
below, but this fix needs it as a pre-requisite.

Fixes: ba8f48f7 ("mptcp: introduce mptcp_schedule_work")
Cc: stable@vger.kernel.org
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
Reviewed-by: default avatarMatthieu Baerts <matthieu.baerts@tessares.net>
Signed-off-by: default avatarMatthieu Baerts <matthieu.baerts@tessares.net>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 5b7be2d4
...@@ -1192,9 +1192,8 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) ...@@ -1192,9 +1192,8 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
*/ */
if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
if (mp_opt.data_fin && mp_opt.data_len == 1 && if (mp_opt.data_fin && mp_opt.data_len == 1 &&
mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64) && mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64))
schedule_work(&msk->work)) mptcp_schedule_work((struct sock *)msk);
sock_hold(subflow->conn);
return true; return true;
} }
......
...@@ -408,9 +408,8 @@ void mptcp_subflow_reset(struct sock *ssk) ...@@ -408,9 +408,8 @@ void mptcp_subflow_reset(struct sock *ssk)
tcp_send_active_reset(ssk, GFP_ATOMIC); tcp_send_active_reset(ssk, GFP_ATOMIC);
tcp_done(ssk); tcp_done(ssk);
if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) && if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags))
schedule_work(&mptcp_sk(sk)->work)) mptcp_schedule_work(sk);
return; /* worker will put sk for us */
sock_put(sk); sock_put(sk);
} }
...@@ -1118,8 +1117,8 @@ static enum mapping_status get_mapping_status(struct sock *ssk, ...@@ -1118,8 +1117,8 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
skb_ext_del(skb, SKB_EXT_MPTCP); skb_ext_del(skb, SKB_EXT_MPTCP);
return MAPPING_OK; return MAPPING_OK;
} else { } else {
if (updated && schedule_work(&msk->work)) if (updated)
sock_hold((struct sock *)msk); mptcp_schedule_work((struct sock *)msk);
return MAPPING_DATA_FIN; return MAPPING_DATA_FIN;
} }
...@@ -1222,17 +1221,12 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb, ...@@ -1222,17 +1221,12 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
/* sched mptcp worker to remove the subflow if no more data is pending */ /* sched mptcp worker to remove the subflow if no more data is pending */
static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk) static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
{ {
struct sock *sk = (struct sock *)msk;
if (likely(ssk->sk_state != TCP_CLOSE)) if (likely(ssk->sk_state != TCP_CLOSE))
return; return;
if (skb_queue_empty(&ssk->sk_receive_queue) && if (skb_queue_empty(&ssk->sk_receive_queue) &&
!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) { !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
sock_hold(sk); mptcp_schedule_work((struct sock *)msk);
if (!schedule_work(&msk->work))
sock_put(sk);
}
} }
static bool subflow_can_fallback(struct mptcp_subflow_context *subflow) static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment