Commit 9fda3510 authored by Hans Wippel's avatar Hans Wippel Committed by David S. Miller

net/smc: move link group list to smc_core

This patch moves the global link group list to smc_core where the link
group functions are. To make this work, it moves code in af_smc and
smc_ib that operates on the link group list to smc_core as well.

While at it, the link group counter is integrated into the list
structure and initialized to zero.
Signed-off-by: default avatarHans Wippel <hwippel@linux.ibm.com>
Signed-off-by: default avatarUrsula Braun <ubraun@linux.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 69cb7dc0
...@@ -46,11 +46,6 @@ static DEFINE_MUTEX(smc_create_lgr_pending); /* serialize link group ...@@ -46,11 +46,6 @@ static DEFINE_MUTEX(smc_create_lgr_pending); /* serialize link group
* creation * creation
*/ */
struct smc_lgr_list smc_lgr_list = { /* established link groups */
.lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
.list = LIST_HEAD_INIT(smc_lgr_list.list),
};
static void smc_tcp_listen_work(struct work_struct *); static void smc_tcp_listen_work(struct work_struct *);
static void smc_set_keepalive(struct sock *sk, int val) static void smc_set_keepalive(struct sock *sk, int val)
...@@ -1637,19 +1632,7 @@ static int __init smc_init(void) ...@@ -1637,19 +1632,7 @@ static int __init smc_init(void)
static void __exit smc_exit(void) static void __exit smc_exit(void)
{ {
struct smc_link_group *lgr, *lg; smc_core_exit();
LIST_HEAD(lgr_freeing_list);
spin_lock_bh(&smc_lgr_list.lock);
if (!list_empty(&smc_lgr_list.list))
list_splice_init(&smc_lgr_list.list, &lgr_freeing_list);
spin_unlock_bh(&smc_lgr_list.lock);
list_for_each_entry_safe(lgr, lg, &lgr_freeing_list, list) {
list_del_init(&lgr->list);
smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
cancel_delayed_work_sync(&lgr->free_work);
smc_lgr_free(lgr); /* free link group */
}
static_branch_disable(&tcp_have_smc); static_branch_disable(&tcp_have_smc);
smc_ib_unregister_client(); smc_ib_unregister_client();
sock_unregister(PF_SMC); sock_unregister(PF_SMC);
......
...@@ -30,7 +30,11 @@ ...@@ -30,7 +30,11 @@
#define SMC_LGR_FREE_DELAY_SERV (600 * HZ) #define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
#define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10) #define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10)
static u32 smc_lgr_num; /* unique link group number */ static struct smc_lgr_list smc_lgr_list = { /* established link groups */
.lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
.list = LIST_HEAD_INIT(smc_lgr_list.list),
.num = 0,
};
static void smc_buf_free(struct smc_buf_desc *buf_desc, struct smc_link *lnk, static void smc_buf_free(struct smc_buf_desc *buf_desc, struct smc_link *lnk,
bool is_rmb); bool is_rmb);
...@@ -181,8 +185,8 @@ static int smc_lgr_create(struct smc_sock *smc, ...@@ -181,8 +185,8 @@ static int smc_lgr_create(struct smc_sock *smc,
INIT_LIST_HEAD(&lgr->sndbufs[i]); INIT_LIST_HEAD(&lgr->sndbufs[i]);
INIT_LIST_HEAD(&lgr->rmbs[i]); INIT_LIST_HEAD(&lgr->rmbs[i]);
} }
smc_lgr_num += SMC_LGR_NUM_INCR; smc_lgr_list.num += SMC_LGR_NUM_INCR;
memcpy(&lgr->id, (u8 *)&smc_lgr_num, SMC_LGR_ID_SIZE); memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work); INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
lgr->conns_all = RB_ROOT; lgr->conns_all = RB_ROOT;
...@@ -374,6 +378,18 @@ void smc_lgr_terminate(struct smc_link_group *lgr) ...@@ -374,6 +378,18 @@ void smc_lgr_terminate(struct smc_link_group *lgr)
smc_lgr_schedule_free_work(lgr); smc_lgr_schedule_free_work(lgr);
} }
/* Called when IB port is terminated */
void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
{
struct smc_link_group *lgr, *l;
list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
if (lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
lgr->lnk[SMC_SINGLE_LINK].ibport == ibport)
smc_lgr_terminate(lgr);
}
}
/* Determine vlan of internal TCP socket. /* Determine vlan of internal TCP socket.
* @vlan_id: address to store the determined vlan id into * @vlan_id: address to store the determined vlan id into
*/ */
...@@ -802,3 +818,21 @@ int smc_rmb_rtoken_handling(struct smc_connection *conn, ...@@ -802,3 +818,21 @@ int smc_rmb_rtoken_handling(struct smc_connection *conn,
return conn->rtoken_idx; return conn->rtoken_idx;
return 0; return 0;
} }
/* Called (from smc_exit) when module is removed */
void smc_core_exit(void)
{
struct smc_link_group *lgr, *lg;
LIST_HEAD(lgr_freeing_list);
spin_lock_bh(&smc_lgr_list.lock);
if (!list_empty(&smc_lgr_list.list))
list_splice_init(&smc_lgr_list.list, &lgr_freeing_list);
spin_unlock_bh(&smc_lgr_list.lock);
list_for_each_entry_safe(lgr, lg, &lgr_freeing_list, list) {
list_del_init(&lgr->list);
smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
cancel_delayed_work_sync(&lgr->free_work);
smc_lgr_free(lgr); /* free link group */
}
}
...@@ -23,10 +23,9 @@ ...@@ -23,10 +23,9 @@
struct smc_lgr_list { /* list of link group definition */ struct smc_lgr_list { /* list of link group definition */
struct list_head list; struct list_head list;
spinlock_t lock; /* protects list of link groups */ spinlock_t lock; /* protects list of link groups */
u32 num; /* unique link group number */
}; };
extern struct smc_lgr_list smc_lgr_list; /* list of link groups */
enum smc_lgr_role { /* possible roles of a link group */ enum smc_lgr_role { /* possible roles of a link group */
SMC_CLNT, /* client */ SMC_CLNT, /* client */
SMC_SERV /* server */ SMC_SERV /* server */
...@@ -210,6 +209,7 @@ struct smc_clc_msg_accept_confirm; ...@@ -210,6 +209,7 @@ struct smc_clc_msg_accept_confirm;
void smc_lgr_free(struct smc_link_group *lgr); void smc_lgr_free(struct smc_link_group *lgr);
void smc_lgr_forget(struct smc_link_group *lgr); void smc_lgr_forget(struct smc_link_group *lgr);
void smc_lgr_terminate(struct smc_link_group *lgr); void smc_lgr_terminate(struct smc_link_group *lgr);
void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport);
int smc_buf_create(struct smc_sock *smc); int smc_buf_create(struct smc_sock *smc);
int smc_rmb_rtoken_handling(struct smc_connection *conn, int smc_rmb_rtoken_handling(struct smc_connection *conn,
struct smc_clc_msg_accept_confirm *clc); struct smc_clc_msg_accept_confirm *clc);
...@@ -219,4 +219,5 @@ void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn); ...@@ -219,4 +219,5 @@ void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn);
void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn); void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn);
void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn); void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn);
void smc_rmb_sync_sg_for_device(struct smc_connection *conn); void smc_rmb_sync_sg_for_device(struct smc_connection *conn);
void smc_core_exit(void);
#endif #endif
...@@ -143,17 +143,6 @@ int smc_ib_ready_link(struct smc_link *lnk) ...@@ -143,17 +143,6 @@ int smc_ib_ready_link(struct smc_link *lnk)
return rc; return rc;
} }
static void smc_ib_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
{
struct smc_link_group *lgr, *l;
list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
if (lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
lgr->lnk[SMC_SINGLE_LINK].ibport == ibport)
smc_lgr_terminate(lgr);
}
}
/* process context wrapper for might_sleep smc_ib_remember_port_attr */ /* process context wrapper for might_sleep smc_ib_remember_port_attr */
static void smc_ib_port_event_work(struct work_struct *work) static void smc_ib_port_event_work(struct work_struct *work)
{ {
...@@ -165,7 +154,7 @@ static void smc_ib_port_event_work(struct work_struct *work) ...@@ -165,7 +154,7 @@ static void smc_ib_port_event_work(struct work_struct *work)
smc_ib_remember_port_attr(smcibdev, port_idx + 1); smc_ib_remember_port_attr(smcibdev, port_idx + 1);
clear_bit(port_idx, &smcibdev->port_event_mask); clear_bit(port_idx, &smcibdev->port_event_mask);
if (!smc_ib_port_active(smcibdev, port_idx + 1)) if (!smc_ib_port_active(smcibdev, port_idx + 1))
smc_ib_port_terminate(smcibdev, port_idx + 1); smc_port_terminate(smcibdev, port_idx + 1);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment