Commit e7be235f authored by David S. Miller's avatar David S. Miller

Merge branch 'net-smc-improve-termination-handling-part-3'

Karsten Graul says:

====================
net/smc: improve termination handling (part 3)

Part 3 of the SMC termination patches improves the link group
termination processing and introduces the ability to immediately
terminate a link group.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 43da44c8 0b29ec64
...@@ -32,8 +32,6 @@ ...@@ -32,8 +32,6 @@
#define ISM_UNREG_SBA 0x11 #define ISM_UNREG_SBA 0x11
#define ISM_UNREG_IEQ 0x12 #define ISM_UNREG_IEQ 0x12
#define ISM_ERROR 0xFFFF
struct ism_req_hdr { struct ism_req_hdr {
u32 cmd; u32 cmd;
u16 : 16; u16 : 16;
......
...@@ -37,6 +37,8 @@ struct smcd_dmb { ...@@ -37,6 +37,8 @@ struct smcd_dmb {
#define ISM_EVENT_GID 1 #define ISM_EVENT_GID 1
#define ISM_EVENT_SWR 2 #define ISM_EVENT_SWR 2
#define ISM_ERROR 0xFFFF
struct smcd_event { struct smcd_event {
u32 type; u32 type;
u32 code; u32 code;
...@@ -77,6 +79,8 @@ struct smcd_dev { ...@@ -77,6 +79,8 @@ struct smcd_dev {
bool pnetid_by_user; bool pnetid_by_user;
struct list_head lgr_list; struct list_head lgr_list;
spinlock_t lgr_lock; spinlock_t lgr_lock;
atomic_t lgr_cnt;
wait_queue_head_t lgrs_deleted;
u8 going_away : 1; u8 going_away : 1;
}; };
......
...@@ -131,6 +131,9 @@ int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn) ...@@ -131,6 +131,9 @@ int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
{ {
int rc; int rc;
if (!conn->lgr || (conn->lgr->is_smcd && conn->lgr->peer_shutdown))
return -EPIPE;
if (conn->lgr->is_smcd) { if (conn->lgr->is_smcd) {
spin_lock_bh(&conn->send_lock); spin_lock_bh(&conn->send_lock);
rc = smcd_cdc_msg_send(conn); rc = smcd_cdc_msg_send(conn);
......
...@@ -349,7 +349,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, ...@@ -349,7 +349,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
smc->peer_diagnosis = ntohl(dclc->peer_diagnosis); smc->peer_diagnosis = ntohl(dclc->peer_diagnosis);
if (((struct smc_clc_msg_decline *)buf)->hdr.flag) { if (((struct smc_clc_msg_decline *)buf)->hdr.flag) {
smc->conn.lgr->sync_err = 1; smc->conn.lgr->sync_err = 1;
smc_lgr_terminate(smc->conn.lgr); smc_lgr_terminate(smc->conn.lgr, true);
} }
} }
......
...@@ -110,6 +110,17 @@ int smc_close_abort(struct smc_connection *conn) ...@@ -110,6 +110,17 @@ int smc_close_abort(struct smc_connection *conn)
return smc_cdc_get_slot_and_msg_send(conn); return smc_cdc_get_slot_and_msg_send(conn);
} }
static void smc_close_cancel_work(struct smc_sock *smc)
{
struct sock *sk = &smc->sk;
release_sock(sk);
cancel_work_sync(&smc->conn.close_work);
cancel_delayed_work_sync(&smc->conn.tx_work);
lock_sock(sk);
sk->sk_state = SMC_CLOSED;
}
/* terminate smc socket abnormally - active abort /* terminate smc socket abnormally - active abort
* link group is terminated, i.e. RDMA communication no longer possible * link group is terminated, i.e. RDMA communication no longer possible
*/ */
...@@ -126,23 +137,21 @@ void smc_close_active_abort(struct smc_sock *smc) ...@@ -126,23 +137,21 @@ void smc_close_active_abort(struct smc_sock *smc)
switch (sk->sk_state) { switch (sk->sk_state) {
case SMC_ACTIVE: case SMC_ACTIVE:
sk->sk_state = SMC_PEERABORTWAIT; sk->sk_state = SMC_PEERABORTWAIT;
release_sock(sk); smc_close_cancel_work(smc);
cancel_delayed_work_sync(&smc->conn.tx_work);
lock_sock(sk);
sk->sk_state = SMC_CLOSED; sk->sk_state = SMC_CLOSED;
sock_put(sk); /* passive closing */ sock_put(sk); /* passive closing */
break; break;
case SMC_APPCLOSEWAIT1: case SMC_APPCLOSEWAIT1:
case SMC_APPCLOSEWAIT2: case SMC_APPCLOSEWAIT2:
release_sock(sk); smc_close_cancel_work(smc);
cancel_delayed_work_sync(&smc->conn.tx_work);
lock_sock(sk);
sk->sk_state = SMC_CLOSED; sk->sk_state = SMC_CLOSED;
sock_put(sk); /* postponed passive closing */ sock_put(sk); /* postponed passive closing */
break; break;
case SMC_PEERCLOSEWAIT1: case SMC_PEERCLOSEWAIT1:
case SMC_PEERCLOSEWAIT2: case SMC_PEERCLOSEWAIT2:
case SMC_PEERFINCLOSEWAIT: case SMC_PEERFINCLOSEWAIT:
sk->sk_state = SMC_PEERABORTWAIT;
smc_close_cancel_work(smc);
sk->sk_state = SMC_CLOSED; sk->sk_state = SMC_CLOSED;
smc_conn_free(&smc->conn); smc_conn_free(&smc->conn);
release_clcsock = true; release_clcsock = true;
...@@ -150,7 +159,11 @@ void smc_close_active_abort(struct smc_sock *smc) ...@@ -150,7 +159,11 @@ void smc_close_active_abort(struct smc_sock *smc)
break; break;
case SMC_PROCESSABORT: case SMC_PROCESSABORT:
case SMC_APPFINCLOSEWAIT: case SMC_APPFINCLOSEWAIT:
sk->sk_state = SMC_PEERABORTWAIT;
smc_close_cancel_work(smc);
sk->sk_state = SMC_CLOSED; sk->sk_state = SMC_CLOSED;
smc_conn_free(&smc->conn);
release_clcsock = true;
break; break;
case SMC_INIT: case SMC_INIT:
case SMC_PEERABORTWAIT: case SMC_PEERABORTWAIT:
......
...@@ -161,10 +161,10 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn) ...@@ -161,10 +161,10 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn)
* of the DELETE LINK sequence from server; or as server to * of the DELETE LINK sequence from server; or as server to
* initiate the delete processing. See smc_llc_rx_delete_link(). * initiate the delete processing. See smc_llc_rx_delete_link().
*/ */
static int smc_link_send_delete(struct smc_link *lnk) static int smc_link_send_delete(struct smc_link *lnk, bool orderly)
{ {
if (lnk->state == SMC_LNK_ACTIVE && if (lnk->state == SMC_LNK_ACTIVE &&
!smc_llc_send_delete_link(lnk, SMC_LLC_REQ, true)) { !smc_llc_send_delete_link(lnk, SMC_LLC_REQ, orderly)) {
smc_llc_link_deleting(lnk); smc_llc_link_deleting(lnk);
return 0; return 0;
} }
...@@ -201,7 +201,7 @@ static void smc_lgr_free_work(struct work_struct *work) ...@@ -201,7 +201,7 @@ static void smc_lgr_free_work(struct work_struct *work)
if (!lgr->is_smcd && !lgr->terminating) { if (!lgr->is_smcd && !lgr->terminating) {
/* try to send del link msg, on error free lgr immediately */ /* try to send del link msg, on error free lgr immediately */
if (lnk->state == SMC_LNK_ACTIVE && if (lnk->state == SMC_LNK_ACTIVE &&
!smc_link_send_delete(lnk)) { !smc_link_send_delete(lnk, true)) {
/* reschedule in case we never receive a response */ /* reschedule in case we never receive a response */
smc_lgr_schedule_free_work(lgr); smc_lgr_schedule_free_work(lgr);
spin_unlock_bh(lgr_lock); spin_unlock_bh(lgr_lock);
...@@ -214,7 +214,7 @@ static void smc_lgr_free_work(struct work_struct *work) ...@@ -214,7 +214,7 @@ static void smc_lgr_free_work(struct work_struct *work)
if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE) if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
smc_llc_link_inactive(lnk); smc_llc_link_inactive(lnk);
if (lgr->is_smcd) if (lgr->is_smcd && !lgr->terminating)
smc_ism_signal_shutdown(lgr); smc_ism_signal_shutdown(lgr);
smc_lgr_free(lgr); smc_lgr_free(lgr);
} }
...@@ -224,7 +224,7 @@ static void smc_lgr_terminate_work(struct work_struct *work) ...@@ -224,7 +224,7 @@ static void smc_lgr_terminate_work(struct work_struct *work)
struct smc_link_group *lgr = container_of(work, struct smc_link_group, struct smc_link_group *lgr = container_of(work, struct smc_link_group,
terminate_work); terminate_work);
smc_lgr_terminate(lgr); smc_lgr_terminate(lgr, true);
} }
/* create a new SMC link group */ /* create a new SMC link group */
...@@ -275,6 +275,8 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini) ...@@ -275,6 +275,8 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
lgr->smcd = ini->ism_dev; lgr->smcd = ini->ism_dev;
lgr_list = &ini->ism_dev->lgr_list; lgr_list = &ini->ism_dev->lgr_list;
lgr_lock = &lgr->smcd->lgr_lock; lgr_lock = &lgr->smcd->lgr_lock;
lgr->peer_shutdown = 0;
atomic_inc(&ini->ism_dev->lgr_cnt);
} else { } else {
/* SMC-R specific settings */ /* SMC-R specific settings */
get_device(&ini->ib_dev->ibdev->dev); get_device(&ini->ib_dev->ibdev->dev);
...@@ -380,7 +382,8 @@ void smc_conn_free(struct smc_connection *conn) ...@@ -380,7 +382,8 @@ void smc_conn_free(struct smc_connection *conn)
if (!lgr) if (!lgr)
return; return;
if (lgr->is_smcd) { if (lgr->is_smcd) {
smc_ism_unset_conn(conn); if (!list_empty(&lgr->list))
smc_ism_unset_conn(conn);
tasklet_kill(&conn->rx_tsklet); tasklet_kill(&conn->rx_tsklet);
} else { } else {
smc_cdc_tx_dismiss_slots(conn); smc_cdc_tx_dismiss_slots(conn);
...@@ -480,8 +483,12 @@ static void smc_lgr_free(struct smc_link_group *lgr) ...@@ -480,8 +483,12 @@ static void smc_lgr_free(struct smc_link_group *lgr)
{ {
smc_lgr_free_bufs(lgr); smc_lgr_free_bufs(lgr);
if (lgr->is_smcd) { if (lgr->is_smcd) {
smc_ism_put_vlan(lgr->smcd, lgr->vlan_id); if (!lgr->terminating) {
put_device(&lgr->smcd->dev); smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
put_device(&lgr->smcd->dev);
}
if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
wake_up(&lgr->smcd->lgrs_deleted);
} else { } else {
smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]); smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
put_device(&lgr->lnk[SMC_SINGLE_LINK].smcibdev->ibdev->dev); put_device(&lgr->lnk[SMC_SINGLE_LINK].smcibdev->ibdev->dev);
...@@ -502,6 +509,20 @@ void smc_lgr_forget(struct smc_link_group *lgr) ...@@ -502,6 +509,20 @@ void smc_lgr_forget(struct smc_link_group *lgr)
spin_unlock_bh(lgr_lock); spin_unlock_bh(lgr_lock);
} }
static void smcd_unregister_all_dmbs(struct smc_link_group *lgr)
{
int i;
for (i = 0; i < SMC_RMBE_SIZES; i++) {
struct smc_buf_desc *buf_desc;
list_for_each_entry(buf_desc, &lgr->rmbs[i], list) {
buf_desc->len += sizeof(struct smcd_cdc_msg);
smc_ism_unregister_dmb(lgr->smcd, buf_desc);
}
}
}
static void smc_sk_wake_ups(struct smc_sock *smc) static void smc_sk_wake_ups(struct smc_sock *smc)
{ {
smc->sk.sk_write_space(&smc->sk); smc->sk.sk_write_space(&smc->sk);
...@@ -510,20 +531,50 @@ static void smc_sk_wake_ups(struct smc_sock *smc) ...@@ -510,20 +531,50 @@ static void smc_sk_wake_ups(struct smc_sock *smc)
} }
/* kill a connection */ /* kill a connection */
static void smc_conn_kill(struct smc_connection *conn) static void smc_conn_kill(struct smc_connection *conn, bool soft)
{ {
struct smc_sock *smc = container_of(conn, struct smc_sock, conn); struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
smc_close_abort(conn); if (conn->lgr->is_smcd && conn->lgr->peer_shutdown)
conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
else
smc_close_abort(conn);
conn->killed = 1; conn->killed = 1;
smc->sk.sk_err = ECONNABORTED;
smc_sk_wake_ups(smc); smc_sk_wake_ups(smc);
if (conn->lgr->is_smcd) {
smc_ism_unset_conn(conn);
if (soft)
tasklet_kill(&conn->rx_tsklet);
else
tasklet_unlock_wait(&conn->rx_tsklet);
} else {
smc_cdc_tx_dismiss_slots(conn);
}
smc_lgr_unregister_conn(conn); smc_lgr_unregister_conn(conn);
smc->sk.sk_err = ECONNABORTED;
smc_close_active_abort(smc); smc_close_active_abort(smc);
} }
static void smc_lgr_cleanup(struct smc_link_group *lgr)
{
if (lgr->is_smcd) {
smc_ism_signal_shutdown(lgr);
smcd_unregister_all_dmbs(lgr);
smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
put_device(&lgr->smcd->dev);
} else {
struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
wake_up(&lnk->wr_reg_wait);
if (lnk->state != SMC_LNK_INACTIVE) {
smc_link_send_delete(lnk, false);
smc_llc_link_inactive(lnk);
}
}
}
/* terminate link group */ /* terminate link group */
static void __smc_lgr_terminate(struct smc_link_group *lgr) static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
{ {
struct smc_connection *conn; struct smc_connection *conn;
struct smc_sock *smc; struct smc_sock *smc;
...@@ -531,6 +582,8 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr) ...@@ -531,6 +582,8 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr)
if (lgr->terminating) if (lgr->terminating)
return; /* lgr already terminating */ return; /* lgr already terminating */
if (!soft)
cancel_delayed_work_sync(&lgr->free_work);
lgr->terminating = 1; lgr->terminating = 1;
if (!lgr->is_smcd) if (!lgr->is_smcd)
smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]); smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
...@@ -544,20 +597,25 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr) ...@@ -544,20 +597,25 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr)
smc = container_of(conn, struct smc_sock, conn); smc = container_of(conn, struct smc_sock, conn);
sock_hold(&smc->sk); /* sock_put below */ sock_hold(&smc->sk); /* sock_put below */
lock_sock(&smc->sk); lock_sock(&smc->sk);
smc_conn_kill(conn); smc_conn_kill(conn, soft);
release_sock(&smc->sk); release_sock(&smc->sk);
sock_put(&smc->sk); /* sock_hold above */ sock_put(&smc->sk); /* sock_hold above */
read_lock_bh(&lgr->conns_lock); read_lock_bh(&lgr->conns_lock);
node = rb_first(&lgr->conns_all); node = rb_first(&lgr->conns_all);
} }
read_unlock_bh(&lgr->conns_lock); read_unlock_bh(&lgr->conns_lock);
if (!lgr->is_smcd) smc_lgr_cleanup(lgr);
wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait); if (soft)
smc_lgr_schedule_free_work_fast(lgr); smc_lgr_schedule_free_work_fast(lgr);
else
smc_lgr_free(lgr);
} }
/* unlink and terminate link group */ /* unlink and terminate link group
void smc_lgr_terminate(struct smc_link_group *lgr) * @soft: true if link group shutdown can take its time
* false if immediate link group shutdown is required
*/
void smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
{ {
spinlock_t *lgr_lock; spinlock_t *lgr_lock;
...@@ -567,9 +625,11 @@ void smc_lgr_terminate(struct smc_link_group *lgr) ...@@ -567,9 +625,11 @@ void smc_lgr_terminate(struct smc_link_group *lgr)
spin_unlock_bh(lgr_lock); spin_unlock_bh(lgr_lock);
return; /* lgr already terminating */ return; /* lgr already terminating */
} }
if (!soft)
lgr->freeing = 1;
list_del_init(&lgr->list); list_del_init(&lgr->list);
spin_unlock_bh(lgr_lock); spin_unlock_bh(lgr_lock);
__smc_lgr_terminate(lgr); __smc_lgr_terminate(lgr, soft);
} }
/* Called when IB port is terminated */ /* Called when IB port is terminated */
...@@ -582,18 +642,20 @@ void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport) ...@@ -582,18 +642,20 @@ void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) { list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
if (!lgr->is_smcd && if (!lgr->is_smcd &&
lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev && lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
lgr->lnk[SMC_SINGLE_LINK].ibport == ibport) lgr->lnk[SMC_SINGLE_LINK].ibport == ibport) {
list_move(&lgr->list, &lgr_free_list); list_move(&lgr->list, &lgr_free_list);
lgr->freeing = 1;
}
} }
spin_unlock_bh(&smc_lgr_list.lock); spin_unlock_bh(&smc_lgr_list.lock);
list_for_each_entry_safe(lgr, l, &lgr_free_list, list) { list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
list_del_init(&lgr->list); list_del_init(&lgr->list);
__smc_lgr_terminate(lgr); __smc_lgr_terminate(lgr, false);
} }
} }
/* Called when SMC-D device is terminated or peer is lost */ /* Called when peer lgr shutdown (regularly or abnormally) is received */
void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan) void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
{ {
struct smc_link_group *lgr, *l; struct smc_link_group *lgr, *l;
...@@ -604,6 +666,8 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan) ...@@ -604,6 +666,8 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) { list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
if ((!peer_gid || lgr->peer_gid == peer_gid) && if ((!peer_gid || lgr->peer_gid == peer_gid) &&
(vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) { (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
if (peer_gid) /* peer triggered termination */
lgr->peer_shutdown = 1;
list_move(&lgr->list, &lgr_free_list); list_move(&lgr->list, &lgr_free_list);
} }
} }
...@@ -612,11 +676,58 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan) ...@@ -612,11 +676,58 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
/* cancel the regular free workers and actually free lgrs */ /* cancel the regular free workers and actually free lgrs */
list_for_each_entry_safe(lgr, l, &lgr_free_list, list) { list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
list_del_init(&lgr->list); list_del_init(&lgr->list);
__smc_lgr_terminate(lgr); schedule_work(&lgr->terminate_work);
cancel_delayed_work_sync(&lgr->free_work); }
if (!peer_gid && vlan == VLAN_VID_MASK) /* dev terminated? */ }
smc_ism_signal_shutdown(lgr);
smc_lgr_free(lgr); /* Called when an SMCD device is removed or the smc module is unloaded */
void smc_smcd_terminate_all(struct smcd_dev *smcd)
{
struct smc_link_group *lgr, *lg;
LIST_HEAD(lgr_free_list);
spin_lock_bh(&smcd->lgr_lock);
list_splice_init(&smcd->lgr_list, &lgr_free_list);
list_for_each_entry(lgr, &lgr_free_list, list)
lgr->freeing = 1;
spin_unlock_bh(&smcd->lgr_lock);
list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
list_del_init(&lgr->list);
__smc_lgr_terminate(lgr, false);
}
if (atomic_read(&smcd->lgr_cnt))
wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt));
}
/* Called when an SMCR device is removed or the smc module is unloaded.
* If smcibdev is given, all SMCR link groups using this device are terminated.
* If smcibdev is NULL, all SMCR link groups are terminated.
*/
void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
{
struct smc_link_group *lgr, *lg;
LIST_HEAD(lgr_free_list);
spin_lock_bh(&smc_lgr_list.lock);
if (!smcibdev) {
list_splice_init(&smc_lgr_list.list, &lgr_free_list);
list_for_each_entry(lgr, &lgr_free_list, list)
lgr->freeing = 1;
} else {
list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
if (lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev) {
list_move(&lgr->list, &lgr_free_list);
lgr->freeing = 1;
}
}
}
spin_unlock_bh(&smc_lgr_list.lock);
list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
list_del_init(&lgr->list);
__smc_lgr_terminate(lgr, false);
} }
} }
...@@ -1137,37 +1248,23 @@ static void smc_core_going_away(void) ...@@ -1137,37 +1248,23 @@ static void smc_core_going_away(void)
spin_unlock(&smcd_dev_list.lock); spin_unlock(&smcd_dev_list.lock);
} }
/* Called (from smc_exit) when module is removed */ /* Clean up all SMC link groups */
void smc_core_exit(void) static void smc_lgrs_shutdown(void)
{ {
struct smc_link_group *lgr, *lg;
LIST_HEAD(lgr_freeing_list);
struct smcd_dev *smcd; struct smcd_dev *smcd;
smc_core_going_away(); smc_core_going_away();
spin_lock_bh(&smc_lgr_list.lock); smc_smcr_terminate_all(NULL);
list_splice_init(&smc_lgr_list.list, &lgr_freeing_list);
spin_unlock_bh(&smc_lgr_list.lock);
spin_lock(&smcd_dev_list.lock); spin_lock(&smcd_dev_list.lock);
list_for_each_entry(smcd, &smcd_dev_list.list, list) list_for_each_entry(smcd, &smcd_dev_list.list, list)
list_splice_init(&smcd->lgr_list, &lgr_freeing_list); smc_smcd_terminate_all(smcd);
spin_unlock(&smcd_dev_list.lock); spin_unlock(&smcd_dev_list.lock);
}
list_for_each_entry_safe(lgr, lg, &lgr_freeing_list, list) { /* Called (from smc_exit) when module is removed */
list_del_init(&lgr->list); void smc_core_exit(void)
if (!lgr->is_smcd) { {
struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK]; smc_lgrs_shutdown();
if (lnk->state == SMC_LNK_ACTIVE)
smc_llc_send_delete_link(lnk, SMC_LLC_REQ,
false);
smc_llc_link_inactive(lnk);
}
cancel_delayed_work_sync(&lgr->free_work);
if (lgr->is_smcd)
smc_ism_signal_shutdown(lgr);
smc_lgr_free(lgr); /* free link group */
}
} }
...@@ -228,6 +228,8 @@ struct smc_link_group { ...@@ -228,6 +228,8 @@ struct smc_link_group {
/* Peer GID (remote) */ /* Peer GID (remote) */
struct smcd_dev *smcd; struct smcd_dev *smcd;
/* ISM device for VLAN reg. */ /* ISM device for VLAN reg. */
u8 peer_shutdown : 1;
/* peer triggered shutdownn */
}; };
}; };
}; };
...@@ -285,7 +287,7 @@ static inline struct smc_connection *smc_lgr_find_conn( ...@@ -285,7 +287,7 @@ static inline struct smc_connection *smc_lgr_find_conn(
static inline void smc_lgr_terminate_sched(struct smc_link_group *lgr) static inline void smc_lgr_terminate_sched(struct smc_link_group *lgr)
{ {
if (!lgr->terminating) if (!lgr->terminating && !lgr->freeing)
schedule_work(&lgr->terminate_work); schedule_work(&lgr->terminate_work);
} }
...@@ -294,10 +296,12 @@ struct smc_clc_msg_accept_confirm; ...@@ -294,10 +296,12 @@ struct smc_clc_msg_accept_confirm;
struct smc_clc_msg_local; struct smc_clc_msg_local;
void smc_lgr_forget(struct smc_link_group *lgr); void smc_lgr_forget(struct smc_link_group *lgr);
void smc_lgr_terminate(struct smc_link_group *lgr); void smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport); void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport);
void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid,
unsigned short vlan); unsigned short vlan);
void smc_smcd_terminate_all(struct smcd_dev *dev);
void smc_smcr_terminate_all(struct smc_ib_device *smcibdev);
int smc_buf_create(struct smc_sock *smc, bool is_smcd); int smc_buf_create(struct smc_sock *smc, bool is_smcd);
int smc_uncompress_bufsize(u8 compressed); int smc_uncompress_bufsize(u8 compressed);
int smc_rmb_rtoken_handling(struct smc_connection *conn, int smc_rmb_rtoken_handling(struct smc_connection *conn,
......
...@@ -520,9 +520,9 @@ static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev) ...@@ -520,9 +520,9 @@ static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev)
if (!smcibdev->initialized) if (!smcibdev->initialized)
return; return;
smcibdev->initialized = 0; smcibdev->initialized = 0;
smc_wr_remove_dev(smcibdev);
ib_destroy_cq(smcibdev->roce_cq_recv); ib_destroy_cq(smcibdev->roce_cq_recv);
ib_destroy_cq(smcibdev->roce_cq_send); ib_destroy_cq(smcibdev->roce_cq_send);
smc_wr_remove_dev(smcibdev);
} }
static struct ib_client smc_ib_client; static struct ib_client smc_ib_client;
...@@ -565,7 +565,7 @@ static void smc_ib_add_dev(struct ib_device *ibdev) ...@@ -565,7 +565,7 @@ static void smc_ib_add_dev(struct ib_device *ibdev)
schedule_work(&smcibdev->port_event_work); schedule_work(&smcibdev->port_event_work);
} }
/* callback function for ib_register_client() */ /* callback function for ib_unregister_client() */
static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data) static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
{ {
struct smc_ib_device *smcibdev; struct smc_ib_device *smcibdev;
...@@ -575,6 +575,7 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data) ...@@ -575,6 +575,7 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
spin_lock(&smc_ib_devices.lock); spin_lock(&smc_ib_devices.lock);
list_del_init(&smcibdev->list); /* remove from smc_ib_devices */ list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
spin_unlock(&smc_ib_devices.lock); spin_unlock(&smc_ib_devices.lock);
smc_smcr_terminate_all(smcibdev);
smc_ib_cleanup_per_ibdev(smcibdev); smc_ib_cleanup_per_ibdev(smcibdev);
ib_unregister_event_handler(&smcibdev->event_handler); ib_unregister_event_handler(&smcibdev->event_handler);
kfree(smcibdev); kfree(smcibdev);
......
...@@ -146,6 +146,10 @@ int smc_ism_put_vlan(struct smcd_dev *smcd, unsigned short vlanid) ...@@ -146,6 +146,10 @@ int smc_ism_put_vlan(struct smcd_dev *smcd, unsigned short vlanid)
int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc) int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc)
{ {
struct smcd_dmb dmb; struct smcd_dmb dmb;
int rc = 0;
if (!dmb_desc->dma_addr)
return rc;
memset(&dmb, 0, sizeof(dmb)); memset(&dmb, 0, sizeof(dmb));
dmb.dmb_tok = dmb_desc->token; dmb.dmb_tok = dmb_desc->token;
...@@ -153,7 +157,13 @@ int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc) ...@@ -153,7 +157,13 @@ int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc)
dmb.cpu_addr = dmb_desc->cpu_addr; dmb.cpu_addr = dmb_desc->cpu_addr;
dmb.dma_addr = dmb_desc->dma_addr; dmb.dma_addr = dmb_desc->dma_addr;
dmb.dmb_len = dmb_desc->len; dmb.dmb_len = dmb_desc->len;
return smcd->ops->unregister_dmb(smcd, &dmb); rc = smcd->ops->unregister_dmb(smcd, &dmb);
if (!rc || rc == ISM_ERROR) {
dmb_desc->cpu_addr = NULL;
dmb_desc->dma_addr = 0;
}
return rc;
} }
int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len, int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len,
...@@ -226,6 +236,9 @@ int smc_ism_signal_shutdown(struct smc_link_group *lgr) ...@@ -226,6 +236,9 @@ int smc_ism_signal_shutdown(struct smc_link_group *lgr)
int rc; int rc;
union smcd_sw_event_info ev_info; union smcd_sw_event_info ev_info;
if (lgr->peer_shutdown)
return 0;
memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE); memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE);
ev_info.vlan_id = lgr->vlan_id; ev_info.vlan_id = lgr->vlan_id;
ev_info.code = ISM_EVENT_REQUEST; ev_info.code = ISM_EVENT_REQUEST;
...@@ -289,6 +302,7 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name, ...@@ -289,6 +302,7 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
spin_lock_init(&smcd->lgr_lock); spin_lock_init(&smcd->lgr_lock);
INIT_LIST_HEAD(&smcd->vlan); INIT_LIST_HEAD(&smcd->vlan);
INIT_LIST_HEAD(&smcd->lgr_list); INIT_LIST_HEAD(&smcd->lgr_list);
init_waitqueue_head(&smcd->lgrs_deleted);
smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)", smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
WQ_MEM_RECLAIM, name); WQ_MEM_RECLAIM, name);
if (!smcd->event_wq) { if (!smcd->event_wq) {
...@@ -313,12 +327,12 @@ EXPORT_SYMBOL_GPL(smcd_register_dev); ...@@ -313,12 +327,12 @@ EXPORT_SYMBOL_GPL(smcd_register_dev);
void smcd_unregister_dev(struct smcd_dev *smcd) void smcd_unregister_dev(struct smcd_dev *smcd)
{ {
spin_lock(&smcd_dev_list.lock); spin_lock(&smcd_dev_list.lock);
list_del(&smcd->list); list_del_init(&smcd->list);
spin_unlock(&smcd_dev_list.lock); spin_unlock(&smcd_dev_list.lock);
smcd->going_away = 1; smcd->going_away = 1;
smc_smcd_terminate_all(smcd);
flush_workqueue(smcd->event_wq); flush_workqueue(smcd->event_wq);
destroy_workqueue(smcd->event_wq); destroy_workqueue(smcd->event_wq);
smc_smcd_terminate(smcd, 0, VLAN_VID_MASK);
device_del(&smcd->dev); device_del(&smcd->dev);
} }
...@@ -372,7 +386,7 @@ void smcd_handle_irq(struct smcd_dev *smcd, unsigned int dmbno) ...@@ -372,7 +386,7 @@ void smcd_handle_irq(struct smcd_dev *smcd, unsigned int dmbno)
spin_lock_irqsave(&smcd->lock, flags); spin_lock_irqsave(&smcd->lock, flags);
conn = smcd->conn[dmbno]; conn = smcd->conn[dmbno];
if (conn) if (conn && !conn->killed)
tasklet_schedule(&conn->rx_tsklet); tasklet_schedule(&conn->rx_tsklet);
spin_unlock_irqrestore(&smcd->lock, flags); spin_unlock_irqrestore(&smcd->lock, flags);
} }
......
...@@ -614,7 +614,7 @@ static void smc_llc_testlink_work(struct work_struct *work) ...@@ -614,7 +614,7 @@ static void smc_llc_testlink_work(struct work_struct *work)
rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp, rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp,
SMC_LLC_WAIT_TIME); SMC_LLC_WAIT_TIME);
if (rc <= 0) { if (rc <= 0) {
smc_lgr_terminate(smc_get_lgr(link)); smc_lgr_terminate(smc_get_lgr(link), true);
return; return;
} }
next_interval = link->llc_testlink_time; next_interval = link->llc_testlink_time;
...@@ -656,6 +656,7 @@ void smc_llc_link_active(struct smc_link *link, int testlink_time) ...@@ -656,6 +656,7 @@ void smc_llc_link_active(struct smc_link *link, int testlink_time)
void smc_llc_link_deleting(struct smc_link *link) void smc_llc_link_deleting(struct smc_link *link)
{ {
link->state = SMC_LNK_DELETING; link->state = SMC_LNK_DELETING;
smc_wr_wakeup_tx_wait(link);
} }
/* called in tasklet context */ /* called in tasklet context */
...@@ -663,6 +664,8 @@ void smc_llc_link_inactive(struct smc_link *link) ...@@ -663,6 +664,8 @@ void smc_llc_link_inactive(struct smc_link *link)
{ {
link->state = SMC_LNK_INACTIVE; link->state = SMC_LNK_INACTIVE;
cancel_delayed_work(&link->llc_testlink_wrk); cancel_delayed_work(&link->llc_testlink_wrk);
smc_wr_wakeup_reg_wait(link);
smc_wr_wakeup_tx_wait(link);
} }
/* called in worker context */ /* called in worker context */
...@@ -695,9 +698,11 @@ int smc_llc_do_confirm_rkey(struct smc_link *link, ...@@ -695,9 +698,11 @@ int smc_llc_do_confirm_rkey(struct smc_link *link,
int smc_llc_do_delete_rkey(struct smc_link *link, int smc_llc_do_delete_rkey(struct smc_link *link,
struct smc_buf_desc *rmb_desc) struct smc_buf_desc *rmb_desc)
{ {
int rc; int rc = 0;
mutex_lock(&link->llc_delete_rkey_mutex); mutex_lock(&link->llc_delete_rkey_mutex);
if (link->state != SMC_LNK_ACTIVE)
goto out;
reinit_completion(&link->llc_delete_rkey); reinit_completion(&link->llc_delete_rkey);
rc = smc_llc_send_delete_rkey(link, rmb_desc); rc = smc_llc_send_delete_rkey(link, rmb_desc);
if (rc) if (rc)
......
...@@ -284,7 +284,7 @@ static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, ...@@ -284,7 +284,7 @@ static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey; rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL); rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL);
if (rc) if (rc)
smc_lgr_terminate(lgr); smc_lgr_terminate(lgr, true);
return rc; return rc;
} }
......
...@@ -50,6 +50,26 @@ struct smc_wr_tx_pend { /* control data for a pending send request */ ...@@ -50,6 +50,26 @@ struct smc_wr_tx_pend { /* control data for a pending send request */
/*------------------------------- completion --------------------------------*/ /*------------------------------- completion --------------------------------*/
/* returns true if at least one tx work request is pending on the given link */
static inline bool smc_wr_is_tx_pend(struct smc_link *link)
{
if (find_first_bit(link->wr_tx_mask, link->wr_tx_cnt) !=
link->wr_tx_cnt) {
return true;
}
return false;
}
/* wait till all pending tx work requests on the given link are completed */
static inline int smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
{
if (wait_event_timeout(link->wr_tx_wait, !smc_wr_is_tx_pend(link),
SMC_WR_TX_WAIT_PENDING_TIME))
return 0;
else /* timeout */
return -EPIPE;
}
static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id) static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id)
{ {
u32 i; u32 i;
...@@ -75,7 +95,7 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc) ...@@ -75,7 +95,7 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
link->wr_reg_state = FAILED; link->wr_reg_state = FAILED;
else else
link->wr_reg_state = CONFIRMED; link->wr_reg_state = CONFIRMED;
wake_up(&link->wr_reg_wait); smc_wr_wakeup_reg_wait(link);
return; return;
} }
...@@ -171,6 +191,7 @@ int smc_wr_tx_get_free_slot(struct smc_link *link, ...@@ -171,6 +191,7 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
struct smc_rdma_wr **wr_rdma_buf, struct smc_rdma_wr **wr_rdma_buf,
struct smc_wr_tx_pend_priv **wr_pend_priv) struct smc_wr_tx_pend_priv **wr_pend_priv)
{ {
struct smc_link_group *lgr = smc_get_lgr(link);
struct smc_wr_tx_pend *wr_pend; struct smc_wr_tx_pend *wr_pend;
u32 idx = link->wr_tx_cnt; u32 idx = link->wr_tx_cnt;
struct ib_send_wr *wr_ib; struct ib_send_wr *wr_ib;
...@@ -179,19 +200,20 @@ int smc_wr_tx_get_free_slot(struct smc_link *link, ...@@ -179,19 +200,20 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
*wr_buf = NULL; *wr_buf = NULL;
*wr_pend_priv = NULL; *wr_pend_priv = NULL;
if (in_softirq()) { if (in_softirq() || lgr->terminating) {
rc = smc_wr_tx_get_free_slot_index(link, &idx); rc = smc_wr_tx_get_free_slot_index(link, &idx);
if (rc) if (rc)
return rc; return rc;
} else { } else {
rc = wait_event_timeout( rc = wait_event_interruptible_timeout(
link->wr_tx_wait, link->wr_tx_wait,
link->state == SMC_LNK_INACTIVE || link->state == SMC_LNK_INACTIVE ||
lgr->terminating ||
(smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY), (smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
SMC_WR_TX_WAIT_FREE_SLOT_TIME); SMC_WR_TX_WAIT_FREE_SLOT_TIME);
if (!rc) { if (!rc) {
/* timeout - terminate connections */ /* timeout - terminate connections */
smc_lgr_terminate_sched(smc_get_lgr(link)); smc_lgr_terminate_sched(lgr);
return -EPIPE; return -EPIPE;
} }
if (idx == link->wr_tx_cnt) if (idx == link->wr_tx_cnt)
...@@ -227,6 +249,7 @@ int smc_wr_tx_put_slot(struct smc_link *link, ...@@ -227,6 +249,7 @@ int smc_wr_tx_put_slot(struct smc_link *link,
memset(&link->wr_tx_bufs[idx], 0, memset(&link->wr_tx_bufs[idx], 0,
sizeof(link->wr_tx_bufs[idx])); sizeof(link->wr_tx_bufs[idx]));
test_and_clear_bit(idx, link->wr_tx_mask); test_and_clear_bit(idx, link->wr_tx_mask);
wake_up(&link->wr_tx_wait);
return 1; return 1;
} }
...@@ -510,8 +533,10 @@ void smc_wr_free_link(struct smc_link *lnk) ...@@ -510,8 +533,10 @@ void smc_wr_free_link(struct smc_link *lnk)
{ {
struct ib_device *ibdev; struct ib_device *ibdev;
memset(lnk->wr_tx_mask, 0, if (smc_wr_tx_wait_no_pending_sends(lnk))
BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask)); memset(lnk->wr_tx_mask, 0,
BITS_TO_LONGS(SMC_WR_BUF_CNT) *
sizeof(*lnk->wr_tx_mask));
if (!lnk->smcibdev) if (!lnk->smcibdev)
return; return;
......
...@@ -60,6 +60,16 @@ static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val) ...@@ -60,6 +60,16 @@ static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val)
atomic_long_set(wr_tx_id, val); atomic_long_set(wr_tx_id, val);
} }
static inline void smc_wr_wakeup_tx_wait(struct smc_link *lnk)
{
wake_up_all(&lnk->wr_tx_wait);
}
static inline void smc_wr_wakeup_reg_wait(struct smc_link *lnk)
{
wake_up(&lnk->wr_reg_wait);
}
/* post a new receive work request to fill a completed old work request entry */ /* post a new receive work request to fill a completed old work request entry */
static inline int smc_wr_rx_post(struct smc_link *link) static inline int smc_wr_rx_post(struct smc_link *link)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment