Commit 9f12e97b authored by Geliang Tang's avatar Geliang Tang Committed by David S. Miller

mptcp: unify RM_ADDR and RM_SUBFLOW receiving

There are some duplicate code in mptcp_pm_nl_rm_addr_received and
mptcp_pm_nl_rm_subflow_received. This patch unifies them into a new
function named mptcp_pm_nl_rm_addr_or_subflow. In it, use the input
parameter rm_type to identify it's now removing an address or a subflow.
Suggested-by: default avatarPaolo Abeni <pabeni@redhat.com>
Reviewed-by: default avatarMat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: default avatarGeliang Tang <geliangtang@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 774c8a8d
...@@ -586,45 +586,68 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk, ...@@ -586,45 +586,68 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
return -EINVAL; return -EINVAL;
} }
static void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk) static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
const struct mptcp_rm_list *rm_list,
enum linux_mptcp_mib_field rm_type)
{ {
struct mptcp_subflow_context *subflow, *tmp; struct mptcp_subflow_context *subflow, *tmp;
struct sock *sk = (struct sock *)msk; struct sock *sk = (struct sock *)msk;
u8 i; u8 i;
pr_debug("address rm_list_nr %d", msk->pm.rm_list_rx.nr); pr_debug("%s rm_list_nr %d",
rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr);
msk_owned_by_me(msk); msk_owned_by_me(msk);
if (!msk->pm.rm_list_rx.nr) if (!rm_list->nr)
return; return;
if (list_empty(&msk->conn_list)) if (list_empty(&msk->conn_list))
return; return;
for (i = 0; i < msk->pm.rm_list_rx.nr; i++) { for (i = 0; i < rm_list->nr; i++) {
list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) { list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow); struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
int how = RCV_SHUTDOWN | SEND_SHUTDOWN; int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
u8 id = subflow->local_id;
if (rm_type == MPTCP_MIB_RMADDR)
id = subflow->remote_id;
if (msk->pm.rm_list_rx.ids[i] != subflow->remote_id) if (rm_list->ids[i] != id)
continue; continue;
pr_debug(" -> address rm_list_ids[%d]=%u", i, msk->pm.rm_list_rx.ids[i]); pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u",
rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow",
i, rm_list->ids[i], subflow->local_id, subflow->remote_id);
spin_unlock_bh(&msk->pm.lock); spin_unlock_bh(&msk->pm.lock);
mptcp_subflow_shutdown(sk, ssk, how); mptcp_subflow_shutdown(sk, ssk, how);
mptcp_close_ssk(sk, ssk, subflow); mptcp_close_ssk(sk, ssk, subflow);
spin_lock_bh(&msk->pm.lock); spin_lock_bh(&msk->pm.lock);
if (rm_type == MPTCP_MIB_RMADDR) {
msk->pm.add_addr_accepted--; msk->pm.add_addr_accepted--;
msk->pm.subflows--;
WRITE_ONCE(msk->pm.accept_addr, true); WRITE_ONCE(msk->pm.accept_addr, true);
} else if (rm_type == MPTCP_MIB_RMSUBFLOW) {
__MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RMADDR); msk->pm.local_addr_used--;
}
msk->pm.subflows--;
__MPTCP_INC_STATS(sock_net(sk), rm_type);
} }
} }
} }
static void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk)
{
mptcp_pm_nl_rm_addr_or_subflow(msk, &msk->pm.rm_list_rx, MPTCP_MIB_RMADDR);
}
void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
const struct mptcp_rm_list *rm_list)
{
mptcp_pm_nl_rm_addr_or_subflow(msk, rm_list, MPTCP_MIB_RMSUBFLOW);
}
void mptcp_pm_nl_work(struct mptcp_sock *msk) void mptcp_pm_nl_work(struct mptcp_sock *msk)
{ {
struct mptcp_pm_data *pm = &msk->pm; struct mptcp_pm_data *pm = &msk->pm;
...@@ -658,45 +681,6 @@ void mptcp_pm_nl_work(struct mptcp_sock *msk) ...@@ -658,45 +681,6 @@ void mptcp_pm_nl_work(struct mptcp_sock *msk)
spin_unlock_bh(&msk->pm.lock); spin_unlock_bh(&msk->pm.lock);
} }
void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
const struct mptcp_rm_list *rm_list)
{
struct mptcp_subflow_context *subflow, *tmp;
struct sock *sk = (struct sock *)msk;
u8 i;
pr_debug("subflow rm_list_nr %d", rm_list->nr);
msk_owned_by_me(msk);
if (!rm_list->nr)
return;
if (list_empty(&msk->conn_list))
return;
for (i = 0; i < rm_list->nr; i++) {
list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
if (rm_list->ids[i] != subflow->local_id)
continue;
pr_debug(" -> subflow rm_list_ids[%d]=%u", i, rm_list->ids[i]);
spin_unlock_bh(&msk->pm.lock);
mptcp_subflow_shutdown(sk, ssk, how);
mptcp_close_ssk(sk, ssk, subflow);
spin_lock_bh(&msk->pm.lock);
msk->pm.local_addr_used--;
msk->pm.subflows--;
__MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RMSUBFLOW);
}
}
}
static bool address_use_port(struct mptcp_pm_addr_entry *entry) static bool address_use_port(struct mptcp_pm_addr_entry *entry)
{ {
return (entry->addr.flags & return (entry->addr.flags &
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment