Commit 4c082221 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-smc-next'

Ursula Braun says:

====================
net/smc: patches 2020-02-17

here are patches for SMC making termination tasks more perfect.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 790a9a7c 5613f20c
...@@ -349,7 +349,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, ...@@ -349,7 +349,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
smc->peer_diagnosis = ntohl(dclc->peer_diagnosis); smc->peer_diagnosis = ntohl(dclc->peer_diagnosis);
if (((struct smc_clc_msg_decline *)buf)->hdr.flag) { if (((struct smc_clc_msg_decline *)buf)->hdr.flag) {
smc->conn.lgr->sync_err = 1; smc->conn.lgr->sync_err = 1;
smc_lgr_terminate(smc->conn.lgr, true); smc_lgr_terminate_sched(smc->conn.lgr);
} }
} }
......
...@@ -46,6 +46,7 @@ static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted); ...@@ -46,6 +46,7 @@ static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);
static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb, static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
struct smc_buf_desc *buf_desc); struct smc_buf_desc *buf_desc);
static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
/* return head of link group list and its lock for a given link group */ /* return head of link group list and its lock for a given link group */
static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr, static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
...@@ -229,7 +230,7 @@ static void smc_lgr_terminate_work(struct work_struct *work) ...@@ -229,7 +230,7 @@ static void smc_lgr_terminate_work(struct work_struct *work)
struct smc_link_group *lgr = container_of(work, struct smc_link_group, struct smc_link_group *lgr = container_of(work, struct smc_link_group,
terminate_work); terminate_work);
smc_lgr_terminate(lgr, true); __smc_lgr_terminate(lgr, true);
} }
/* create a new SMC link group */ /* create a new SMC link group */
...@@ -576,15 +577,15 @@ static void smc_lgr_cleanup(struct smc_link_group *lgr) ...@@ -576,15 +577,15 @@ static void smc_lgr_cleanup(struct smc_link_group *lgr)
} else { } else {
struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK]; struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
wake_up(&lnk->wr_reg_wait); if (lnk->state != SMC_LNK_INACTIVE)
if (lnk->state != SMC_LNK_INACTIVE) {
smc_link_send_delete(lnk, false);
smc_llc_link_inactive(lnk); smc_llc_link_inactive(lnk);
}
} }
} }
/* terminate link group */ /* terminate link group
* @soft: true if link group shutdown can take its time
* false if immediate link group shutdown is required
*/
static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft) static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
{ {
struct smc_connection *conn; struct smc_connection *conn;
...@@ -622,25 +623,20 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft) ...@@ -622,25 +623,20 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
smc_lgr_free(lgr); smc_lgr_free(lgr);
} }
/* unlink and terminate link group /* unlink link group and schedule termination */
* @soft: true if link group shutdown can take its time void smc_lgr_terminate_sched(struct smc_link_group *lgr)
* false if immediate link group shutdown is required
*/
void smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
{ {
spinlock_t *lgr_lock; spinlock_t *lgr_lock;
smc_lgr_list_head(lgr, &lgr_lock); smc_lgr_list_head(lgr, &lgr_lock);
spin_lock_bh(lgr_lock); spin_lock_bh(lgr_lock);
if (lgr->terminating) { if (list_empty(&lgr->list) || lgr->terminating || lgr->freeing) {
spin_unlock_bh(lgr_lock); spin_unlock_bh(lgr_lock);
return; /* lgr already terminating */ return; /* lgr already terminating */
} }
if (!soft)
lgr->freeing = 1;
list_del_init(&lgr->list); list_del_init(&lgr->list);
spin_unlock_bh(lgr_lock); spin_unlock_bh(lgr_lock);
__smc_lgr_terminate(lgr, soft); schedule_work(&lgr->terminate_work);
} }
/* Called when IB port is terminated */ /* Called when IB port is terminated */
......
...@@ -285,18 +285,12 @@ static inline struct smc_connection *smc_lgr_find_conn( ...@@ -285,18 +285,12 @@ static inline struct smc_connection *smc_lgr_find_conn(
return res; return res;
} }
static inline void smc_lgr_terminate_sched(struct smc_link_group *lgr)
{
if (!lgr->terminating && !lgr->freeing)
schedule_work(&lgr->terminate_work);
}
struct smc_sock; struct smc_sock;
struct smc_clc_msg_accept_confirm; struct smc_clc_msg_accept_confirm;
struct smc_clc_msg_local; struct smc_clc_msg_local;
void smc_lgr_forget(struct smc_link_group *lgr); void smc_lgr_forget(struct smc_link_group *lgr);
void smc_lgr_terminate(struct smc_link_group *lgr, bool soft); void smc_lgr_terminate_sched(struct smc_link_group *lgr);
void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport); void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport);
void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid,
unsigned short vlan); unsigned short vlan);
......
...@@ -257,6 +257,7 @@ static void smc_ib_global_event_handler(struct ib_event_handler *handler, ...@@ -257,6 +257,7 @@ static void smc_ib_global_event_handler(struct ib_event_handler *handler,
struct ib_event *ibevent) struct ib_event *ibevent)
{ {
struct smc_ib_device *smcibdev; struct smc_ib_device *smcibdev;
bool schedule = false;
u8 port_idx; u8 port_idx;
smcibdev = container_of(handler, struct smc_ib_device, event_handler); smcibdev = container_of(handler, struct smc_ib_device, event_handler);
...@@ -266,22 +267,35 @@ static void smc_ib_global_event_handler(struct ib_event_handler *handler, ...@@ -266,22 +267,35 @@ static void smc_ib_global_event_handler(struct ib_event_handler *handler,
/* terminate all ports on device */ /* terminate all ports on device */
for (port_idx = 0; port_idx < SMC_MAX_PORTS; port_idx++) { for (port_idx = 0; port_idx < SMC_MAX_PORTS; port_idx++) {
set_bit(port_idx, &smcibdev->port_event_mask); set_bit(port_idx, &smcibdev->port_event_mask);
set_bit(port_idx, smcibdev->ports_going_away); if (!test_and_set_bit(port_idx,
smcibdev->ports_going_away))
schedule = true;
} }
schedule_work(&smcibdev->port_event_work); if (schedule)
schedule_work(&smcibdev->port_event_work);
break; break;
case IB_EVENT_PORT_ERR:
case IB_EVENT_PORT_ACTIVE: case IB_EVENT_PORT_ACTIVE:
case IB_EVENT_GID_CHANGE:
port_idx = ibevent->element.port_num - 1; port_idx = ibevent->element.port_num - 1;
if (port_idx < SMC_MAX_PORTS) { if (port_idx >= SMC_MAX_PORTS)
set_bit(port_idx, &smcibdev->port_event_mask); break;
if (ibevent->event == IB_EVENT_PORT_ERR) set_bit(port_idx, &smcibdev->port_event_mask);
set_bit(port_idx, smcibdev->ports_going_away); if (test_and_clear_bit(port_idx, smcibdev->ports_going_away))
else if (ibevent->event == IB_EVENT_PORT_ACTIVE) schedule_work(&smcibdev->port_event_work);
clear_bit(port_idx, smcibdev->ports_going_away); break;
case IB_EVENT_PORT_ERR:
port_idx = ibevent->element.port_num - 1;
if (port_idx >= SMC_MAX_PORTS)
break;
set_bit(port_idx, &smcibdev->port_event_mask);
if (!test_and_set_bit(port_idx, smcibdev->ports_going_away))
schedule_work(&smcibdev->port_event_work); schedule_work(&smcibdev->port_event_work);
} break;
case IB_EVENT_GID_CHANGE:
port_idx = ibevent->element.port_num - 1;
if (port_idx >= SMC_MAX_PORTS)
break;
set_bit(port_idx, &smcibdev->port_event_mask);
schedule_work(&smcibdev->port_event_work);
break; break;
default: default:
break; break;
...@@ -316,11 +330,11 @@ static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv) ...@@ -316,11 +330,11 @@ static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
case IB_EVENT_QP_FATAL: case IB_EVENT_QP_FATAL:
case IB_EVENT_QP_ACCESS_ERR: case IB_EVENT_QP_ACCESS_ERR:
port_idx = ibevent->element.qp->port - 1; port_idx = ibevent->element.qp->port - 1;
if (port_idx < SMC_MAX_PORTS) { if (port_idx >= SMC_MAX_PORTS)
set_bit(port_idx, &smcibdev->port_event_mask); break;
set_bit(port_idx, smcibdev->ports_going_away); set_bit(port_idx, &smcibdev->port_event_mask);
if (!test_and_set_bit(port_idx, smcibdev->ports_going_away))
schedule_work(&smcibdev->port_event_work); schedule_work(&smcibdev->port_event_work);
}
break; break;
default: default:
break; break;
......
...@@ -614,7 +614,7 @@ static void smc_llc_testlink_work(struct work_struct *work) ...@@ -614,7 +614,7 @@ static void smc_llc_testlink_work(struct work_struct *work)
rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp, rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp,
SMC_LLC_WAIT_TIME); SMC_LLC_WAIT_TIME);
if (rc <= 0) { if (rc <= 0) {
smc_lgr_terminate(smc_get_lgr(link), true); smc_lgr_terminate_sched(smc_get_lgr(link));
return; return;
} }
next_interval = link->llc_testlink_time; next_interval = link->llc_testlink_time;
......
...@@ -284,7 +284,7 @@ static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, ...@@ -284,7 +284,7 @@ static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey; rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL); rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL);
if (rc) if (rc)
smc_lgr_terminate(lgr, true); smc_lgr_terminate_sched(lgr);
return rc; return rc;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment