Commit 3163c507 authored by Ursula Braun's avatar Ursula Braun Committed by David S. Miller

net/smc: use local struct sock variables consistently

Cleanup to consistently exploit the local struct sock definitions.
No functional change.
Signed-off-by: default avatarUrsula Braun <ubraun@linux.vnet.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9d5fd927
...@@ -581,39 +581,39 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr, ...@@ -581,39 +581,39 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr,
static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc) static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
{ {
struct sock *sk = &lsmc->sk; struct socket *new_clcsock = NULL;
struct socket *new_clcsock; struct sock *lsk = &lsmc->sk;
struct sock *new_sk; struct sock *new_sk;
int rc; int rc;
release_sock(&lsmc->sk); release_sock(lsk);
new_sk = smc_sock_alloc(sock_net(sk), NULL); new_sk = smc_sock_alloc(sock_net(lsk), NULL);
if (!new_sk) { if (!new_sk) {
rc = -ENOMEM; rc = -ENOMEM;
lsmc->sk.sk_err = ENOMEM; lsk->sk_err = ENOMEM;
*new_smc = NULL; *new_smc = NULL;
lock_sock(&lsmc->sk); lock_sock(lsk);
goto out; goto out;
} }
*new_smc = smc_sk(new_sk); *new_smc = smc_sk(new_sk);
rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0); rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
lock_sock(&lsmc->sk); lock_sock(lsk);
if (rc < 0) { if (rc < 0) {
lsmc->sk.sk_err = -rc; lsk->sk_err = -rc;
new_sk->sk_state = SMC_CLOSED; new_sk->sk_state = SMC_CLOSED;
sock_set_flag(new_sk, SOCK_DEAD); sock_set_flag(new_sk, SOCK_DEAD);
sk->sk_prot->unhash(new_sk); new_sk->sk_prot->unhash(new_sk);
sock_put(new_sk); sock_put(new_sk);
*new_smc = NULL; *new_smc = NULL;
goto out; goto out;
} }
if (lsmc->sk.sk_state == SMC_CLOSED) { if (lsk->sk_state == SMC_CLOSED) {
if (new_clcsock) if (new_clcsock)
sock_release(new_clcsock); sock_release(new_clcsock);
new_sk->sk_state = SMC_CLOSED; new_sk->sk_state = SMC_CLOSED;
sock_set_flag(new_sk, SOCK_DEAD); sock_set_flag(new_sk, SOCK_DEAD);
sk->sk_prot->unhash(new_sk); new_sk->sk_prot->unhash(new_sk);
sock_put(new_sk); sock_put(new_sk);
*new_smc = NULL; *new_smc = NULL;
goto out; goto out;
...@@ -936,11 +936,12 @@ static void smc_tcp_listen_work(struct work_struct *work) ...@@ -936,11 +936,12 @@ static void smc_tcp_listen_work(struct work_struct *work)
{ {
struct smc_sock *lsmc = container_of(work, struct smc_sock, struct smc_sock *lsmc = container_of(work, struct smc_sock,
tcp_listen_work); tcp_listen_work);
struct sock *lsk = &lsmc->sk;
struct smc_sock *new_smc; struct smc_sock *new_smc;
int rc = 0; int rc = 0;
lock_sock(&lsmc->sk); lock_sock(lsk);
while (lsmc->sk.sk_state == SMC_LISTEN) { while (lsk->sk_state == SMC_LISTEN) {
rc = smc_clcsock_accept(lsmc, &new_smc); rc = smc_clcsock_accept(lsmc, &new_smc);
if (rc) if (rc)
goto out; goto out;
...@@ -949,15 +950,15 @@ static void smc_tcp_listen_work(struct work_struct *work) ...@@ -949,15 +950,15 @@ static void smc_tcp_listen_work(struct work_struct *work)
new_smc->listen_smc = lsmc; new_smc->listen_smc = lsmc;
new_smc->use_fallback = false; /* assume rdma capability first*/ new_smc->use_fallback = false; /* assume rdma capability first*/
sock_hold(&lsmc->sk); /* sock_put in smc_listen_work */ sock_hold(lsk); /* sock_put in smc_listen_work */
INIT_WORK(&new_smc->smc_listen_work, smc_listen_work); INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
smc_copy_sock_settings_to_smc(new_smc); smc_copy_sock_settings_to_smc(new_smc);
schedule_work(&new_smc->smc_listen_work); schedule_work(&new_smc->smc_listen_work);
} }
out: out:
release_sock(&lsmc->sk); release_sock(lsk);
lsmc->sk.sk_data_ready(&lsmc->sk); /* no more listening, wake accept */ lsk->sk_data_ready(lsk); /* no more listening, wake accept */
} }
static int smc_listen(struct socket *sock, int backlog) static int smc_listen(struct socket *sock, int backlog)
......
...@@ -115,36 +115,38 @@ static int smc_close_abort(struct smc_connection *conn) ...@@ -115,36 +115,38 @@ static int smc_close_abort(struct smc_connection *conn)
*/ */
static void smc_close_active_abort(struct smc_sock *smc) static void smc_close_active_abort(struct smc_sock *smc)
{ {
struct sock *sk = &smc->sk;
struct smc_cdc_conn_state_flags *txflags = struct smc_cdc_conn_state_flags *txflags =
&smc->conn.local_tx_ctrl.conn_state_flags; &smc->conn.local_tx_ctrl.conn_state_flags;
smc->sk.sk_err = ECONNABORTED; sk->sk_err = ECONNABORTED;
if (smc->clcsock && smc->clcsock->sk) { if (smc->clcsock && smc->clcsock->sk) {
smc->clcsock->sk->sk_err = ECONNABORTED; smc->clcsock->sk->sk_err = ECONNABORTED;
smc->clcsock->sk->sk_state_change(smc->clcsock->sk); smc->clcsock->sk->sk_state_change(smc->clcsock->sk);
} }
switch (smc->sk.sk_state) { switch (sk->sk_state) {
case SMC_INIT: case SMC_INIT:
case SMC_ACTIVE: case SMC_ACTIVE:
smc->sk.sk_state = SMC_PEERABORTWAIT; sk->sk_state = SMC_PEERABORTWAIT;
break; break;
case SMC_APPCLOSEWAIT1: case SMC_APPCLOSEWAIT1:
case SMC_APPCLOSEWAIT2: case SMC_APPCLOSEWAIT2:
txflags->peer_conn_abort = 1; txflags->peer_conn_abort = 1;
sock_release(smc->clcsock); sock_release(smc->clcsock);
if (!smc_cdc_rxed_any_close(&smc->conn)) if (!smc_cdc_rxed_any_close(&smc->conn))
smc->sk.sk_state = SMC_PEERABORTWAIT; sk->sk_state = SMC_PEERABORTWAIT;
else else
smc->sk.sk_state = SMC_CLOSED; sk->sk_state = SMC_CLOSED;
break; break;
case SMC_PEERCLOSEWAIT1: case SMC_PEERCLOSEWAIT1:
case SMC_PEERCLOSEWAIT2: case SMC_PEERCLOSEWAIT2:
if (!txflags->peer_conn_closed) { if (!txflags->peer_conn_closed) {
smc->sk.sk_state = SMC_PEERABORTWAIT; sk->sk_state = SMC_PEERABORTWAIT;
txflags->peer_conn_abort = 1; txflags->peer_conn_abort = 1;
sock_release(smc->clcsock); sock_release(smc->clcsock);
} else { } else {
smc->sk.sk_state = SMC_CLOSED; sk->sk_state = SMC_CLOSED;
} }
break; break;
case SMC_PROCESSABORT: case SMC_PROCESSABORT:
...@@ -153,7 +155,7 @@ static void smc_close_active_abort(struct smc_sock *smc) ...@@ -153,7 +155,7 @@ static void smc_close_active_abort(struct smc_sock *smc)
txflags->peer_conn_abort = 1; txflags->peer_conn_abort = 1;
sock_release(smc->clcsock); sock_release(smc->clcsock);
} }
smc->sk.sk_state = SMC_CLOSED; sk->sk_state = SMC_CLOSED;
break; break;
case SMC_PEERFINCLOSEWAIT: case SMC_PEERFINCLOSEWAIT:
case SMC_PEERABORTWAIT: case SMC_PEERABORTWAIT:
...@@ -161,8 +163,8 @@ static void smc_close_active_abort(struct smc_sock *smc) ...@@ -161,8 +163,8 @@ static void smc_close_active_abort(struct smc_sock *smc)
break; break;
} }
sock_set_flag(&smc->sk, SOCK_DEAD); sock_set_flag(sk, SOCK_DEAD);
smc->sk.sk_state_change(&smc->sk); sk->sk_state_change(sk);
} }
static inline bool smc_close_sent_any_close(struct smc_connection *conn) static inline bool smc_close_sent_any_close(struct smc_connection *conn)
...@@ -278,7 +280,7 @@ int smc_close_active(struct smc_sock *smc) ...@@ -278,7 +280,7 @@ int smc_close_active(struct smc_sock *smc)
} }
if (old_state != sk->sk_state) if (old_state != sk->sk_state)
sk->sk_state_change(&smc->sk); sk->sk_state_change(sk);
return rc; return rc;
} }
...@@ -331,7 +333,7 @@ static void smc_close_passive_work(struct work_struct *work) ...@@ -331,7 +333,7 @@ static void smc_close_passive_work(struct work_struct *work)
struct sock *sk = &smc->sk; struct sock *sk = &smc->sk;
int old_state; int old_state;
lock_sock(&smc->sk); lock_sock(sk);
old_state = sk->sk_state; old_state = sk->sk_state;
if (!conn->alert_token_local) { if (!conn->alert_token_local) {
...@@ -340,7 +342,7 @@ static void smc_close_passive_work(struct work_struct *work) ...@@ -340,7 +342,7 @@ static void smc_close_passive_work(struct work_struct *work)
goto wakeup; goto wakeup;
} }
rxflags = &smc->conn.local_rx_ctrl.conn_state_flags; rxflags = &conn->local_rx_ctrl.conn_state_flags;
if (rxflags->peer_conn_abort) { if (rxflags->peer_conn_abort) {
smc_close_passive_abort_received(smc); smc_close_passive_abort_received(smc);
goto wakeup; goto wakeup;
...@@ -348,7 +350,7 @@ static void smc_close_passive_work(struct work_struct *work) ...@@ -348,7 +350,7 @@ static void smc_close_passive_work(struct work_struct *work)
switch (sk->sk_state) { switch (sk->sk_state) {
case SMC_INIT: case SMC_INIT:
if (atomic_read(&smc->conn.bytes_to_rcv) || if (atomic_read(&conn->bytes_to_rcv) ||
(rxflags->peer_done_writing && (rxflags->peer_done_writing &&
!smc_cdc_rxed_any_close(conn))) !smc_cdc_rxed_any_close(conn)))
sk->sk_state = SMC_APPCLOSEWAIT1; sk->sk_state = SMC_APPCLOSEWAIT1;
...@@ -365,7 +367,7 @@ static void smc_close_passive_work(struct work_struct *work) ...@@ -365,7 +367,7 @@ static void smc_close_passive_work(struct work_struct *work)
/* to check for closing */ /* to check for closing */
case SMC_PEERCLOSEWAIT2: case SMC_PEERCLOSEWAIT2:
case SMC_PEERFINCLOSEWAIT: case SMC_PEERFINCLOSEWAIT:
if (!smc_cdc_rxed_any_close(&smc->conn)) if (!smc_cdc_rxed_any_close(conn))
break; break;
if (sock_flag(sk, SOCK_DEAD) && if (sock_flag(sk, SOCK_DEAD) &&
smc_close_sent_any_close(conn)) { smc_close_sent_any_close(conn)) {
...@@ -394,12 +396,12 @@ static void smc_close_passive_work(struct work_struct *work) ...@@ -394,12 +396,12 @@ static void smc_close_passive_work(struct work_struct *work)
sk->sk_state_change(sk); sk->sk_state_change(sk);
if ((sk->sk_state == SMC_CLOSED) && if ((sk->sk_state == SMC_CLOSED) &&
(sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) { (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) {
smc_conn_free(&smc->conn); smc_conn_free(conn);
schedule_delayed_work(&smc->sock_put_work, schedule_delayed_work(&smc->sock_put_work,
SMC_CLOSE_SOCK_PUT_DELAY); SMC_CLOSE_SOCK_PUT_DELAY);
} }
} }
release_sock(&smc->sk); release_sock(sk);
} }
void smc_close_sock_put_work(struct work_struct *work) void smc_close_sock_put_work(struct work_struct *work)
...@@ -462,7 +464,7 @@ int smc_close_shutdown_write(struct smc_sock *smc) ...@@ -462,7 +464,7 @@ int smc_close_shutdown_write(struct smc_sock *smc)
} }
if (old_state != sk->sk_state) if (old_state != sk->sk_state)
sk->sk_state_change(&smc->sk); sk->sk_state_change(sk);
return rc; return rc;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment