Commit 72a231b7 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-smc-fixes-2018-01-26'

Ursula Braun says:

====================
net/smc: fixes 2018-01-26

here are some more smc patches. The first 4 patches take care about
different aspects of smc socket closing, the 5th patch improves
coding style.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a81e4aff a8fbf8e7
......@@ -115,7 +115,6 @@ static int smc_release(struct socket *sock)
goto out;
smc = smc_sk(sk);
sock_hold(sk);
if (sk->sk_state == SMC_LISTEN)
/* smc_close_non_accepted() is called and acquires
* sock lock for child sockets again
......@@ -124,10 +123,7 @@ static int smc_release(struct socket *sock)
else
lock_sock(sk);
if (smc->use_fallback) {
sk->sk_state = SMC_CLOSED;
sk->sk_state_change(sk);
} else {
if (!smc->use_fallback) {
rc = smc_close_active(smc);
sock_set_flag(sk, SOCK_DEAD);
sk->sk_shutdown |= SHUTDOWN_MASK;
......@@ -136,20 +132,21 @@ static int smc_release(struct socket *sock)
sock_release(smc->clcsock);
smc->clcsock = NULL;
}
if (smc->use_fallback) {
sock_put(sk); /* passive closing */
sk->sk_state = SMC_CLOSED;
sk->sk_state_change(sk);
}
/* detach socket */
sock_orphan(sk);
sock->sk = NULL;
if (smc->use_fallback) {
schedule_delayed_work(&smc->sock_put_work, TCP_TIMEWAIT_LEN);
} else if (sk->sk_state == SMC_CLOSED) {
if (!smc->use_fallback && sk->sk_state == SMC_CLOSED)
smc_conn_free(&smc->conn);
schedule_delayed_work(&smc->sock_put_work,
SMC_CLOSE_SOCK_PUT_DELAY);
}
release_sock(sk);
sock_put(sk);
sk->sk_prot->unhash(sk);
sock_put(sk); /* final sock_put */
out:
return rc;
}
......@@ -181,7 +178,6 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock)
INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
INIT_LIST_HEAD(&smc->accept_q);
spin_lock_init(&smc->accept_q_lock);
INIT_DELAYED_WORK(&smc->sock_put_work, smc_close_sock_put_work);
sk->sk_prot->hash(sk);
sk_refcnt_debug_inc(sk);
......@@ -399,6 +395,8 @@ static int smc_connect_rdma(struct smc_sock *smc)
int rc = 0;
u8 ibport;
sock_hold(&smc->sk); /* sock put in passive closing */
if (!tcp_sk(smc->clcsock->sk)->syn_smc) {
/* peer has not signalled SMC-capability */
smc->use_fallback = true;
......@@ -542,6 +540,8 @@ static int smc_connect_rdma(struct smc_sock *smc)
mutex_unlock(&smc_create_lgr_pending);
smc_conn_free(&smc->conn);
out_err:
if (smc->sk.sk_state == SMC_INIT)
sock_put(&smc->sk); /* passive closing */
return rc;
}
......@@ -620,7 +620,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
new_sk->sk_state = SMC_CLOSED;
sock_set_flag(new_sk, SOCK_DEAD);
new_sk->sk_prot->unhash(new_sk);
sock_put(new_sk);
sock_put(new_sk); /* final */
*new_smc = NULL;
goto out;
}
......@@ -637,7 +637,7 @@ static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
{
struct smc_sock *par = smc_sk(parent);
sock_hold(sk);
sock_hold(sk); /* sock_put in smc_accept_unlink () */
spin_lock(&par->accept_q_lock);
list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
spin_unlock(&par->accept_q_lock);
......@@ -653,7 +653,7 @@ static void smc_accept_unlink(struct sock *sk)
list_del_init(&smc_sk(sk)->accept_q);
spin_unlock(&par->accept_q_lock);
sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
sock_put(sk);
sock_put(sk); /* sock_hold in smc_accept_enqueue */
}
/* remove a sock from the accept queue to bind it to a new socket created
......@@ -670,8 +670,12 @@ struct sock *smc_accept_dequeue(struct sock *parent,
smc_accept_unlink(new_sk);
if (new_sk->sk_state == SMC_CLOSED) {
if (isk->clcsock) {
sock_release(isk->clcsock);
isk->clcsock = NULL;
}
new_sk->sk_prot->unhash(new_sk);
sock_put(new_sk);
sock_put(new_sk); /* final */
continue;
}
if (new_sock)
......@@ -686,14 +690,11 @@ void smc_close_non_accepted(struct sock *sk)
{
struct smc_sock *smc = smc_sk(sk);
sock_hold(sk);
lock_sock(sk);
if (!sk->sk_lingertime)
/* wait for peer closing */
sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
if (smc->use_fallback) {
sk->sk_state = SMC_CLOSED;
} else {
if (!smc->use_fallback) {
smc_close_active(smc);
sock_set_flag(sk, SOCK_DEAD);
sk->sk_shutdown |= SHUTDOWN_MASK;
......@@ -706,14 +707,15 @@ void smc_close_non_accepted(struct sock *sk)
sock_release(tcp);
}
if (smc->use_fallback) {
schedule_delayed_work(&smc->sock_put_work, TCP_TIMEWAIT_LEN);
} else if (sk->sk_state == SMC_CLOSED) {
sock_put(sk); /* passive closing */
sk->sk_state = SMC_CLOSED;
} else {
if (sk->sk_state == SMC_CLOSED)
smc_conn_free(&smc->conn);
schedule_delayed_work(&smc->sock_put_work,
SMC_CLOSE_SOCK_PUT_DELAY);
}
release_sock(sk);
sock_put(sk);
sk->sk_prot->unhash(sk);
sock_put(sk); /* final sock_put */
}
static int smc_serv_conf_first_link(struct smc_sock *smc)
......@@ -937,6 +939,8 @@ static void smc_listen_work(struct work_struct *work)
smc_lgr_forget(new_smc->conn.lgr);
mutex_unlock(&smc_create_lgr_pending);
out_err:
if (newsmcsk->sk_state == SMC_INIT)
sock_put(&new_smc->sk); /* passive closing */
newsmcsk->sk_state = SMC_CLOSED;
smc_conn_free(&new_smc->conn);
goto enqueue; /* queue new sock with sk_err set */
......@@ -963,12 +967,22 @@ static void smc_tcp_listen_work(struct work_struct *work)
sock_hold(lsk); /* sock_put in smc_listen_work */
INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
smc_copy_sock_settings_to_smc(new_smc);
schedule_work(&new_smc->smc_listen_work);
sock_hold(&new_smc->sk); /* sock_put in passive closing */
if (!schedule_work(&new_smc->smc_listen_work))
sock_put(&new_smc->sk);
}
out:
if (lsmc->clcsock) {
sock_release(lsmc->clcsock);
lsmc->clcsock = NULL;
}
release_sock(lsk);
lsk->sk_data_ready(lsk); /* no more listening, wake accept */
/* no more listening, wake up smc_close_wait_listen_clcsock and
* accept
*/
lsk->sk_state_change(lsk);
sock_put(&lsmc->sk); /* sock_hold in smc_listen */
}
static int smc_listen(struct socket *sock, int backlog)
......@@ -1002,7 +1016,9 @@ static int smc_listen(struct socket *sock, int backlog)
sk->sk_ack_backlog = 0;
sk->sk_state = SMC_LISTEN;
INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
schedule_work(&smc->tcp_listen_work);
sock_hold(sk); /* sock_hold in tcp_listen_worker */
if (!schedule_work(&smc->tcp_listen_work))
sock_put(sk);
out:
release_sock(sk);
......@@ -1019,6 +1035,7 @@ static int smc_accept(struct socket *sock, struct socket *new_sock,
int rc = 0;
lsmc = smc_sk(sk);
sock_hold(sk); /* sock_put below */
lock_sock(sk);
if (lsmc->sk.sk_state != SMC_LISTEN) {
......@@ -1053,6 +1070,7 @@ static int smc_accept(struct socket *sock, struct socket *new_sock,
out:
release_sock(sk);
sock_put(sk); /* sock_hold above */
return rc;
}
......@@ -1122,21 +1140,15 @@ static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
static unsigned int smc_accept_poll(struct sock *parent)
{
struct smc_sock *isk;
struct sock *sk;
struct smc_sock *isk = smc_sk(parent);
int mask = 0;
lock_sock(parent);
list_for_each_entry(isk, &smc_sk(parent)->accept_q, accept_q) {
sk = (struct sock *)isk;
spin_lock(&isk->accept_q_lock);
if (!list_empty(&isk->accept_q))
mask = POLLIN | POLLRDNORM;
spin_unlock(&isk->accept_q_lock);
if (sk->sk_state == SMC_ACTIVE) {
release_sock(parent);
return POLLIN | POLLRDNORM;
}
}
release_sock(parent);
return 0;
return mask;
}
static unsigned int smc_poll(struct file *file, struct socket *sock,
......@@ -1147,9 +1159,15 @@ static unsigned int smc_poll(struct file *file, struct socket *sock,
struct smc_sock *smc;
int rc;
if (!sk)
return POLLNVAL;
smc = smc_sk(sock->sk);
sock_hold(sk);
lock_sock(sk);
if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
/* delegate to CLC child sock */
release_sock(sk);
mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
/* if non-blocking connect finished ... */
lock_sock(sk);
......@@ -1161,21 +1179,27 @@ static unsigned int smc_poll(struct file *file, struct socket *sock,
rc = smc_connect_rdma(smc);
if (rc < 0)
mask |= POLLERR;
else
/* success cases including fallback */
mask |= POLLOUT | POLLWRNORM;
}
}
release_sock(sk);
} else {
if (sk->sk_state != SMC_CLOSED) {
release_sock(sk);
sock_poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_state == SMC_LISTEN)
/* woken up by sk_data_ready in smc_listen_work() */
mask |= smc_accept_poll(sk);
lock_sock(sk);
}
if (sk->sk_err)
mask |= POLLERR;
if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
(sk->sk_state == SMC_CLOSED))
mask |= POLLHUP;
if (sk->sk_state == SMC_LISTEN) {
/* woken up by sk_data_ready in smc_listen_work() */
mask = smc_accept_poll(sk);
} else {
if (atomic_read(&smc->conn.sndbuf_space) ||
(sk->sk_shutdown & SEND_SHUTDOWN)) {
sk->sk_shutdown & SEND_SHUTDOWN) {
mask |= POLLOUT | POLLWRNORM;
} else {
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
......@@ -1183,15 +1207,15 @@ static unsigned int smc_poll(struct file *file, struct socket *sock,
}
if (atomic_read(&smc->conn.bytes_to_rcv))
mask |= POLLIN | POLLRDNORM;
if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
(sk->sk_state == SMC_CLOSED))
mask |= POLLHUP;
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLIN | POLLRDNORM | POLLRDHUP;
if (sk->sk_state == SMC_APPCLOSEWAIT1)
mask |= POLLIN;
}
}
release_sock(sk);
sock_put(sk);
return mask;
}
......
......@@ -178,7 +178,6 @@ struct smc_sock { /* smc sock container */
struct work_struct smc_listen_work;/* prepare new accept socket */
struct list_head accept_q; /* sockets to be accepted */
spinlock_t accept_q_lock; /* protects accept_q */
struct delayed_work sock_put_work; /* final socket freeing */
bool use_fallback; /* fallback to tcp */
u8 wait_close_tx_prepared : 1;
/* shutdown wr or close
......@@ -253,12 +252,12 @@ static inline int smc_uncompress_bufsize(u8 compressed)
static inline bool using_ipsec(struct smc_sock *smc)
{
return (smc->clcsock->sk->sk_policy[0] ||
smc->clcsock->sk->sk_policy[1]) ? 1 : 0;
smc->clcsock->sk->sk_policy[1]) ? true : false;
}
#else
static inline bool using_ipsec(struct smc_sock *smc)
{
return 0;
return false;
}
#endif
......
......@@ -212,6 +212,14 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
smc->sk.sk_data_ready(&smc->sk);
}
/* piggy backed tx info */
/* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
if (diff_cons && smc_tx_prepared_sends(conn)) {
smc_tx_sndbuf_nonempty(conn);
/* trigger socket release if connection closed */
smc_close_wake_tx_prepared(smc);
}
if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
smc->sk.sk_err = ECONNRESET;
conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
......@@ -221,15 +229,9 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
if (smc->clcsock && smc->clcsock->sk)
smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
sock_set_flag(&smc->sk, SOCK_DONE);
schedule_work(&conn->close_work);
}
/* piggy backed tx info */
/* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
if (diff_cons && smc_tx_prepared_sends(conn)) {
smc_tx_sndbuf_nonempty(conn);
/* trigger socket release if connection closed */
smc_close_wake_tx_prepared(smc);
sock_hold(&smc->sk); /* sock_put in close_work */
if (!schedule_work(&conn->close_work))
sock_put(&smc->sk);
}
}
......
......@@ -19,6 +19,8 @@
#include "smc_cdc.h"
#include "smc_close.h"
#define SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME (5 * HZ)
static void smc_close_cleanup_listen(struct sock *parent)
{
struct sock *sk;
......@@ -28,6 +30,27 @@ static void smc_close_cleanup_listen(struct sock *parent)
smc_close_non_accepted(sk);
}
static void smc_close_wait_listen_clcsock(struct smc_sock *smc)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct sock *sk = &smc->sk;
signed long timeout;
timeout = SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME;
add_wait_queue(sk_sleep(sk), &wait);
do {
release_sock(sk);
if (smc->clcsock)
timeout = wait_woken(&wait, TASK_UNINTERRUPTIBLE,
timeout);
sched_annotate_sleep();
lock_sock(sk);
if (!smc->clcsock)
break;
} while (timeout);
remove_wait_queue(sk_sleep(sk), &wait);
}
/* wait for sndbuf data being transmitted */
static void smc_close_stream_wait(struct smc_sock *smc, long timeout)
{
......@@ -110,10 +133,10 @@ static void smc_close_active_abort(struct smc_sock *smc)
release_sock(sk);
cancel_delayed_work_sync(&smc->conn.tx_work);
lock_sock(sk);
sock_put(sk); /* passive closing */
break;
case SMC_APPCLOSEWAIT1:
case SMC_APPCLOSEWAIT2:
sock_release(smc->clcsock);
if (!smc_cdc_rxed_any_close(&smc->conn))
sk->sk_state = SMC_PEERABORTWAIT;
else
......@@ -125,19 +148,20 @@ static void smc_close_active_abort(struct smc_sock *smc)
case SMC_PEERCLOSEWAIT1:
case SMC_PEERCLOSEWAIT2:
if (!txflags->peer_conn_closed) {
/* just SHUTDOWN_SEND done */
sk->sk_state = SMC_PEERABORTWAIT;
sock_release(smc->clcsock);
} else {
sk->sk_state = SMC_CLOSED;
}
sock_put(sk); /* passive closing */
break;
case SMC_PROCESSABORT:
case SMC_APPFINCLOSEWAIT:
if (!txflags->peer_conn_closed)
sock_release(smc->clcsock);
sk->sk_state = SMC_CLOSED;
break;
case SMC_PEERFINCLOSEWAIT:
sock_put(sk); /* passive closing */
break;
case SMC_PEERABORTWAIT:
case SMC_CLOSED:
break;
......@@ -172,8 +196,6 @@ int smc_close_active(struct smc_sock *smc)
switch (sk->sk_state) {
case SMC_INIT:
sk->sk_state = SMC_CLOSED;
if (smc->smc_listen_work.func)
cancel_work_sync(&smc->smc_listen_work);
break;
case SMC_LISTEN:
sk->sk_state = SMC_CLOSED;
......@@ -182,11 +204,9 @@ int smc_close_active(struct smc_sock *smc)
rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
/* wake up kernel_accept of smc_tcp_listen_worker */
smc->clcsock->sk->sk_data_ready(smc->clcsock->sk);
smc_close_wait_listen_clcsock(smc);
}
release_sock(sk);
smc_close_cleanup_listen(sk);
cancel_work_sync(&smc->smc_listen_work);
lock_sock(sk);
break;
case SMC_ACTIVE:
smc_close_stream_wait(smc, timeout);
......@@ -229,12 +249,14 @@ int smc_close_active(struct smc_sock *smc)
rc = smc_close_final(conn);
if (rc)
break;
if (smc_cdc_rxed_any_close(conn))
if (smc_cdc_rxed_any_close(conn)) {
/* peer has closed the socket already */
sk->sk_state = SMC_CLOSED;
else
sock_put(sk); /* postponed passive closing */
} else {
/* peer has just issued a shutdown write */
sk->sk_state = SMC_PEERFINCLOSEWAIT;
}
break;
case SMC_PEERCLOSEWAIT1:
case SMC_PEERCLOSEWAIT2:
......@@ -272,27 +294,33 @@ static void smc_close_passive_abort_received(struct smc_sock *smc)
struct sock *sk = &smc->sk;
switch (sk->sk_state) {
case SMC_INIT:
case SMC_ACTIVE:
case SMC_APPFINCLOSEWAIT:
case SMC_APPCLOSEWAIT1:
case SMC_APPCLOSEWAIT2:
sk->sk_state = SMC_PROCESSABORT;
sock_put(sk); /* passive closing */
break;
case SMC_APPFINCLOSEWAIT:
sk->sk_state = SMC_PROCESSABORT;
break;
case SMC_PEERCLOSEWAIT1:
case SMC_PEERCLOSEWAIT2:
if (txflags->peer_done_writing &&
!smc_close_sent_any_close(&smc->conn)) {
!smc_close_sent_any_close(&smc->conn))
/* just shutdown, but not yet closed locally */
sk->sk_state = SMC_PROCESSABORT;
} else {
else
sk->sk_state = SMC_CLOSED;
}
sock_put(sk); /* passive closing */
break;
case SMC_APPCLOSEWAIT2:
case SMC_PEERFINCLOSEWAIT:
sk->sk_state = SMC_CLOSED;
sock_put(sk); /* passive closing */
break;
case SMC_PEERABORTWAIT:
sk->sk_state = SMC_CLOSED;
break;
case SMC_INIT:
case SMC_PROCESSABORT:
/* nothing to do, add tracing in future patch */
break;
......@@ -336,13 +364,18 @@ static void smc_close_passive_work(struct work_struct *work)
case SMC_INIT:
if (atomic_read(&conn->bytes_to_rcv) ||
(rxflags->peer_done_writing &&
!smc_cdc_rxed_any_close(conn)))
!smc_cdc_rxed_any_close(conn))) {
sk->sk_state = SMC_APPCLOSEWAIT1;
else
} else {
sk->sk_state = SMC_CLOSED;
sock_put(sk); /* passive closing */
}
break;
case SMC_ACTIVE:
sk->sk_state = SMC_APPCLOSEWAIT1;
/* postpone sock_put() for passive closing to cover
* received SEND_SHUTDOWN as well
*/
break;
case SMC_PEERCLOSEWAIT1:
if (rxflags->peer_done_writing)
......@@ -360,13 +393,20 @@ static void smc_close_passive_work(struct work_struct *work)
/* just shutdown, but not yet closed locally */
sk->sk_state = SMC_APPFINCLOSEWAIT;
}
sock_put(sk); /* passive closing */
break;
case SMC_PEERFINCLOSEWAIT:
if (smc_cdc_rxed_any_close(conn))
if (smc_cdc_rxed_any_close(conn)) {
sk->sk_state = SMC_CLOSED;
sock_put(sk); /* passive closing */
}
break;
case SMC_APPCLOSEWAIT1:
case SMC_APPCLOSEWAIT2:
/* postpone sock_put() for passive closing to cover
* received SEND_SHUTDOWN as well
*/
break;
case SMC_APPFINCLOSEWAIT:
case SMC_PEERABORTWAIT:
case SMC_PROCESSABORT:
......@@ -382,23 +422,11 @@ static void smc_close_passive_work(struct work_struct *work)
if (old_state != sk->sk_state) {
sk->sk_state_change(sk);
if ((sk->sk_state == SMC_CLOSED) &&
(sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) {
(sock_flag(sk, SOCK_DEAD) || !sk->sk_socket))
smc_conn_free(conn);
schedule_delayed_work(&smc->sock_put_work,
SMC_CLOSE_SOCK_PUT_DELAY);
}
}
release_sock(sk);
}
void smc_close_sock_put_work(struct work_struct *work)
{
struct smc_sock *smc = container_of(to_delayed_work(work),
struct smc_sock,
sock_put_work);
smc->sk.sk_prot->unhash(&smc->sk);
sock_put(&smc->sk);
sock_put(sk); /* sock_hold done by schedulers of close_work */
}
int smc_close_shutdown_write(struct smc_sock *smc)
......
......@@ -21,7 +21,6 @@
void smc_close_wake_tx_prepared(struct smc_sock *smc);
int smc_close_active(struct smc_sock *smc);
void smc_close_sock_put_work(struct work_struct *work);
int smc_close_shutdown_write(struct smc_sock *smc);
void smc_close_init(struct smc_sock *smc);
......
......@@ -328,13 +328,13 @@ void smc_lgr_terminate(struct smc_link_group *lgr)
while (node) {
conn = rb_entry(node, struct smc_connection, alert_node);
smc = container_of(conn, struct smc_sock, conn);
sock_hold(&smc->sk);
sock_hold(&smc->sk); /* sock_put in close work */
conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
__smc_lgr_unregister_conn(conn);
write_unlock_bh(&lgr->conns_lock);
schedule_work(&conn->close_work);
write_lock_bh(&lgr->conns_lock);
if (!schedule_work(&conn->close_work))
sock_put(&smc->sk);
write_lock_bh(&lgr->conns_lock);
node = rb_first(&lgr->conns_all);
}
write_unlock_bh(&lgr->conns_lock);
......
......@@ -141,6 +141,17 @@ int smc_ib_ready_link(struct smc_link *lnk)
return rc;
}
static void smc_ib_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
{
struct smc_link_group *lgr, *l;
list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
if (lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
lgr->lnk[SMC_SINGLE_LINK].ibport == ibport)
smc_lgr_terminate(lgr);
}
}
/* process context wrapper for might_sleep smc_ib_remember_port_attr */
static void smc_ib_port_event_work(struct work_struct *work)
{
......@@ -151,6 +162,8 @@ static void smc_ib_port_event_work(struct work_struct *work)
for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) {
smc_ib_remember_port_attr(smcibdev, port_idx + 1);
clear_bit(port_idx, &smcibdev->port_event_mask);
if (!smc_ib_port_active(smcibdev, port_idx + 1))
smc_ib_port_terminate(smcibdev, port_idx + 1);
}
}
......@@ -165,15 +178,7 @@ static void smc_ib_global_event_handler(struct ib_event_handler *handler,
switch (ibevent->event) {
case IB_EVENT_PORT_ERR:
port_idx = ibevent->element.port_num - 1;
set_bit(port_idx, &smcibdev->port_event_mask);
schedule_work(&smcibdev->port_event_work);
/* fall through */
case IB_EVENT_DEVICE_FATAL:
/* tbd in follow-on patch:
* abnormal close of corresponding connections
*/
break;
case IB_EVENT_PORT_ACTIVE:
port_idx = ibevent->element.port_num - 1;
set_bit(port_idx, &smcibdev->port_event_mask);
......@@ -186,6 +191,7 @@ static void smc_ib_global_event_handler(struct ib_event_handler *handler,
void smc_ib_dealloc_protection_domain(struct smc_link *lnk)
{
if (lnk->roce_pd)
ib_dealloc_pd(lnk->roce_pd);
lnk->roce_pd = NULL;
}
......@@ -203,14 +209,18 @@ int smc_ib_create_protection_domain(struct smc_link *lnk)
static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
{
struct smc_ib_device *smcibdev =
(struct smc_ib_device *)ibevent->device;
u8 port_idx;
switch (ibevent->event) {
case IB_EVENT_DEVICE_FATAL:
case IB_EVENT_GID_CHANGE:
case IB_EVENT_PORT_ERR:
case IB_EVENT_QP_ACCESS_ERR:
/* tbd in follow-on patch:
* abnormal close of corresponding connections
*/
port_idx = ibevent->element.port_num - 1;
set_bit(port_idx, &smcibdev->port_event_mask);
schedule_work(&smcibdev->port_event_work);
break;
default:
break;
......@@ -219,6 +229,7 @@ static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
void smc_ib_destroy_queue_pair(struct smc_link *lnk)
{
if (lnk->roce_qp)
ib_destroy_qp(lnk->roce_qp);
lnk->roce_qp = NULL;
}
......@@ -462,6 +473,7 @@ static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev)
{
if (!smcibdev->initialized)
return;
smcibdev->initialized = 0;
smc_wr_remove_dev(smcibdev);
ib_unregister_event_handler(&smcibdev->event_handler);
ib_destroy_cq(smcibdev->roce_cq_recv);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment