Commit 5fd88337 authored by James Simmons's avatar James Simmons Committed by Greg Kroah-Hartman

staging: lustre: fix all conditional comparison to zero in LNet layer

Doing if (rc != 0) or if (rc == 0) is bad form. This patch corrects
the LNet code to behavior according to kernel coding standards.
Signed-off-by: default avatarJames Simmons <jsimmons@infradead.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 06ace26e
......@@ -72,8 +72,8 @@ static inline int lnet_is_wire_handle_none(lnet_handle_wire_t *wh)
static inline int lnet_md_exhausted(lnet_libmd_t *md)
{
return (md->md_threshold == 0 ||
((md->md_options & LNET_MD_MAX_SIZE) != 0 &&
return (!md->md_threshold ||
((md->md_options & LNET_MD_MAX_SIZE) &&
md->md_offset + md->md_max_size > md->md_length));
}
......@@ -85,13 +85,13 @@ static inline int lnet_md_unlinkable(lnet_libmd_t *md)
* LNetM[DE]Unlink, in the latter case md may not be exhausted).
* - auto unlink is on and md is exhausted.
*/
if (md->md_refcount != 0)
if (md->md_refcount)
return 0;
if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) != 0)
if (md->md_flags & LNET_MD_FLAG_ZOMBIE)
return 1;
return ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0 &&
return ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) &&
lnet_md_exhausted(md));
}
......@@ -186,12 +186,11 @@ lnet_md_alloc(lnet_md_t *umd)
unsigned int size;
unsigned int niov;
if ((umd->options & LNET_MD_KIOV) != 0) {
if (umd->options & LNET_MD_KIOV) {
niov = umd->length;
size = offsetof(lnet_libmd_t, md_iov.kiov[niov]);
} else {
niov = ((umd->options & LNET_MD_IOVEC) != 0) ?
umd->length : 1;
niov = umd->options & LNET_MD_IOVEC ? umd->length : 1;
size = offsetof(lnet_libmd_t, md_iov.iov[niov]);
}
......@@ -212,7 +211,7 @@ lnet_md_free(lnet_libmd_t *md)
{
unsigned int size;
if ((md->md_options & LNET_MD_KIOV) != 0)
if (md->md_options & LNET_MD_KIOV)
size = offsetof(lnet_libmd_t, md_iov.kiov[md->md_niov]);
else
size = offsetof(lnet_libmd_t, md_iov.iov[md->md_niov]);
......@@ -364,14 +363,14 @@ lnet_peer_decref_locked(lnet_peer_t *lp)
{
LASSERT(lp->lp_refcount > 0);
lp->lp_refcount--;
if (lp->lp_refcount == 0)
if (!lp->lp_refcount)
lnet_destroy_peer_locked(lp);
}
static inline int
lnet_isrouter(lnet_peer_t *lp)
{
return lp->lp_rtr_refcount != 0;
return lp->lp_rtr_refcount ? 1 : 0;
}
static inline void
......
......@@ -359,7 +359,7 @@ struct lnet_peer_table {
* peer aliveness is enabled only on routers for peers in a network where the
* lnet_ni_t::ni_peertimeout has been set to a positive value
*/
#define lnet_peer_aliveness_enabled(lp) (the_lnet.ln_routing != 0 && \
#define lnet_peer_aliveness_enabled(lp) (the_lnet.ln_routing && \
(lp)->lp_ni->ni_peertimeout > 0)
typedef struct {
......
......@@ -148,7 +148,7 @@ kiblnd_concurrent_sends_v1(void)
#define IBLND_MSG_SIZE (4 << 10) /* max size of queued messages (inc hdr) */
#define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */
#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand != 0 ? \
#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand ? \
*kiblnd_tunables.kib_map_on_demand : \
IBLND_MAX_RDMA_FRAGS) /* max # of fragments configured by user */
#define IBLND_RDMA_FRAGS(v) ((v) == IBLND_MSG_VERSION_1 ? \
......@@ -611,7 +611,7 @@ kiblnd_dev_can_failover(kib_dev_t *dev)
if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */
return 0;
if (*kiblnd_tunables.kib_dev_failover == 0) /* disabled */
if (!*kiblnd_tunables.kib_dev_failover) /* disabled */
return 0;
if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */
......@@ -710,16 +710,16 @@ kiblnd_need_noop(kib_conn_t *conn)
/* No tx to piggyback NOOP onto or no credit to send a tx */
return (list_empty(&conn->ibc_tx_queue) ||
conn->ibc_credits == 0);
!conn->ibc_credits);
}
if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
!list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */
conn->ibc_credits == 0) /* no credit */
!conn->ibc_credits) /* no credit */
return 0;
if (conn->ibc_credits == 1 && /* last credit reserved for */
conn->ibc_outstanding_credits == 0) /* giving back credits */
!conn->ibc_outstanding_credits) /* giving back credits */
return 0;
/* No tx to piggyback NOOP onto or no credit to send a tx */
......@@ -765,8 +765,8 @@ kiblnd_ptr2wreqid(void *ptr, int type)
{
unsigned long lptr = (unsigned long)ptr;
LASSERT((lptr & IBLND_WID_MASK) == 0);
LASSERT((type & ~IBLND_WID_MASK) == 0);
LASSERT(!(lptr & IBLND_WID_MASK));
LASSERT(!(type & ~IBLND_WID_MASK));
return (__u64)(lptr | type);
}
......
......@@ -202,7 +202,7 @@ kiblnd_tunables_init(void)
if (*kiblnd_tunables.kib_map_on_demand == 1)
*kiblnd_tunables.kib_map_on_demand = 2; /* don't make sense to create map if only one fragment */
if (*kiblnd_tunables.kib_concurrent_sends == 0) {
if (!*kiblnd_tunables.kib_concurrent_sends) {
if (*kiblnd_tunables.kib_map_on_demand > 0 &&
*kiblnd_tunables.kib_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8)
*kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits) * 2;
......
......@@ -45,13 +45,13 @@ ksocknal_lib_get_conn_addrs(ksock_conn_t *conn)
/* Didn't need the {get,put}connsock dance to deref ksnc_sock... */
LASSERT(!conn->ksnc_closing);
if (rc != 0) {
if (rc) {
CERROR("Error %d getting sock peer IP\n", rc);
return rc;
}
rc = lnet_sock_getaddr(conn->ksnc_sock, 0, &conn->ksnc_myipaddr, NULL);
if (rc != 0) {
if (rc) {
CERROR("Error %d getting sock local IP\n", rc);
return rc;
}
......@@ -71,7 +71,7 @@ ksocknal_lib_zc_capable(ksock_conn_t *conn)
* ZC if the socket supports scatter/gather and doesn't need software
* checksums
*/
return ((caps & NETIF_F_SG) != 0 && (caps & NETIF_F_CSUM_MASK) != 0);
return ((caps & NETIF_F_SG) && (caps & NETIF_F_CSUM_MASK));
}
int
......@@ -84,7 +84,7 @@ ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */
conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */
tx->tx_nob == tx->tx_resid && /* frist sending */
tx->tx_msg.ksm_csum == 0) /* not checksummed */
!tx->tx_msg.ksm_csum) /* not checksummed */
ksocknal_lib_csum_tx(tx);
/*
......@@ -132,7 +132,7 @@ ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
* NB we can't trust socket ops to either consume our iovs
* or leave them alone.
*/
if (tx->tx_msg.ksm_zc_cookies[0] != 0) {
if (tx->tx_msg.ksm_zc_cookies[0]) {
/* Zero copy is enabled */
struct sock *sk = sock->sk;
struct page *page = kiov->kiov_page;
......@@ -245,7 +245,7 @@ ksocknal_lib_recv_iov(ksock_conn_t *conn)
conn->ksnc_msg.ksm_csum = 0;
}
if (saved_csum != 0) {
if (saved_csum) {
/* accumulate checksum */
for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
LASSERT(i < niov);
......@@ -290,7 +290,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
return NULL;
for (nob = i = 0; i < niov; i++) {
if ((kiov[i].kiov_offset != 0 && i > 0) ||
if ((kiov[i].kiov_offset && i > 0) ||
(kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1))
return NULL;
......@@ -360,7 +360,7 @@ ksocknal_lib_recv_kiov(ksock_conn_t *conn)
rc = kernel_recvmsg(conn->ksnc_sock, &msg, (struct kvec *)scratchiov,
n, nob, MSG_DONTWAIT);
if (conn->ksnc_msg.ksm_csum != 0) {
if (conn->ksnc_msg.ksm_csum) {
for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
LASSERT(i < niov);
......@@ -439,14 +439,14 @@ ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem, int *
int rc;
rc = ksocknal_connsock_addref(conn);
if (rc != 0) {
if (rc) {
LASSERT(conn->ksnc_closing);
*txmem = *rxmem = *nagle = 0;
return -ESHUTDOWN;
}
rc = lnet_sock_getbuf(sock, txmem, rxmem);
if (rc == 0) {
if (!rc) {
len = sizeof(*nagle);
rc = kernel_getsockopt(sock, SOL_TCP, TCP_NODELAY,
(char *)nagle, &len);
......@@ -454,7 +454,7 @@ ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem, int *
ksocknal_connsock_decref(conn);
if (rc == 0)
if (!rc)
*nagle = !*nagle;
else
*txmem = *rxmem = *nagle = 0;
......@@ -484,7 +484,7 @@ ksocknal_lib_setup_sock(struct socket *sock)
rc = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, (char *)&linger,
sizeof(linger));
if (rc != 0) {
if (rc) {
CERROR("Can't set SO_LINGER: %d\n", rc);
return rc;
}
......@@ -492,7 +492,7 @@ ksocknal_lib_setup_sock(struct socket *sock)
option = -1;
rc = kernel_setsockopt(sock, SOL_TCP, TCP_LINGER2, (char *)&option,
sizeof(option));
if (rc != 0) {
if (rc) {
CERROR("Can't set SO_LINGER2: %d\n", rc);
return rc;
}
......@@ -502,7 +502,7 @@ ksocknal_lib_setup_sock(struct socket *sock)
rc = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
(char *)&option, sizeof(option));
if (rc != 0) {
if (rc) {
CERROR("Can't disable nagle: %d\n", rc);
return rc;
}
......@@ -510,7 +510,7 @@ ksocknal_lib_setup_sock(struct socket *sock)
rc = lnet_sock_setbuf(sock, *ksocknal_tunables.ksnd_tx_buffer_size,
*ksocknal_tunables.ksnd_rx_buffer_size);
if (rc != 0) {
if (rc) {
CERROR("Can't set buffer tx %d, rx %d buffers: %d\n",
*ksocknal_tunables.ksnd_tx_buffer_size,
*ksocknal_tunables.ksnd_rx_buffer_size, rc);
......@@ -529,7 +529,7 @@ ksocknal_lib_setup_sock(struct socket *sock)
option = (do_keepalive ? 1 : 0);
rc = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (char *)&option,
sizeof(option));
if (rc != 0) {
if (rc) {
CERROR("Can't set SO_KEEPALIVE: %d\n", rc);
return rc;
}
......@@ -539,21 +539,21 @@ ksocknal_lib_setup_sock(struct socket *sock)
rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, (char *)&keep_idle,
sizeof(keep_idle));
if (rc != 0) {
if (rc) {
CERROR("Can't set TCP_KEEPIDLE: %d\n", rc);
return rc;
}
rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
(char *)&keep_intvl, sizeof(keep_intvl));
if (rc != 0) {
if (rc) {
CERROR("Can't set TCP_KEEPINTVL: %d\n", rc);
return rc;
}
rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, (char *)&keep_count,
sizeof(keep_count));
if (rc != 0) {
if (rc) {
CERROR("Can't set TCP_KEEPCNT: %d\n", rc);
return rc;
}
......@@ -571,7 +571,7 @@ ksocknal_lib_push_conn(ksock_conn_t *conn)
int rc;
rc = ksocknal_connsock_addref(conn);
if (rc != 0) /* being shut down */
if (rc) /* being shut down */
return;
sk = conn->ksnc_sock->sk;
......@@ -584,7 +584,7 @@ ksocknal_lib_push_conn(ksock_conn_t *conn)
rc = kernel_setsockopt(conn->ksnc_sock, SOL_TCP, TCP_NODELAY,
(char *)&val, sizeof(val));
LASSERT(rc == 0);
LASSERT(!rc);
lock_sock(sk);
tp->nonagle = nonagle;
......
......@@ -103,7 +103,7 @@ ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn,
}
LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_LNET);
LASSERT(tx->tx_msg.ksm_zc_cookies[1] == 0);
LASSERT(!tx->tx_msg.ksm_zc_cookies[1]);
if (tx_ack)
cookie = tx_ack->tx_msg.ksm_zc_cookies[1];
......@@ -185,7 +185,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
if (tx->tx_msg.ksm_zc_cookies[1] == SOCKNAL_KEEPALIVE_PING) {
/* replace the keepalive PING with a real ACK */
LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0);
LASSERT(!tx->tx_msg.ksm_zc_cookies[0]);
tx->tx_msg.ksm_zc_cookies[1] = cookie;
return 1;
}
......@@ -197,7 +197,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
return 1; /* XXX return error in the future */
}
if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
if (!tx->tx_msg.ksm_zc_cookies[0]) {
/* NOOP tx has only one ZC-ACK cookie, can carry at least one more */
if (tx->tx_msg.ksm_zc_cookies[1] > cookie) {
tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1];
......@@ -233,7 +233,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
tmp = tx->tx_msg.ksm_zc_cookies[0];
}
if (tmp != 0) {
if (tmp) {
/* range of cookies */
tx->tx_msg.ksm_zc_cookies[0] = tmp - 1;
tx->tx_msg.ksm_zc_cookies[1] = tmp + 1;
......@@ -394,7 +394,7 @@ ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote)
return -ENOMEM;
rc = ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id);
if (rc == 0)
if (!rc)
return 0;
ksocknal_free_tx(tx);
......@@ -411,7 +411,7 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
LIST_HEAD(zlist);
int count;
if (cookie1 == 0)
if (!cookie1)
cookie1 = cookie2;
count = (cookie1 > cookie2) ? 2 : (cookie2 - cookie1 + 1);
......@@ -433,7 +433,7 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
list_del(&tx->tx_zc_list);
list_add(&tx->tx_zc_list, &zlist);
if (--count == 0)
if (!--count)
break;
}
}
......@@ -446,7 +446,7 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
ksocknal_tx_decref(tx);
}
return count == 0 ? 0 : -EPROTO;
return !count ? 0 : -EPROTO;
}
static int
......@@ -476,14 +476,14 @@ ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello)
hmv->version_major = cpu_to_le16(KSOCK_PROTO_V1_MAJOR);
hmv->version_minor = cpu_to_le16(KSOCK_PROTO_V1_MINOR);
if (the_lnet.ln_testprotocompat != 0) {
if (the_lnet.ln_testprotocompat) {
/* single-shot proto check */
LNET_LOCK();
if ((the_lnet.ln_testprotocompat & 1) != 0) {
if (the_lnet.ln_testprotocompat & 1) {
hmv->version_major++; /* just different! */
the_lnet.ln_testprotocompat &= ~1;
}
if ((the_lnet.ln_testprotocompat & 2) != 0) {
if (the_lnet.ln_testprotocompat & 2) {
hmv->magic = LNET_PROTO_MAGIC;
the_lnet.ln_testprotocompat &= ~2;
}
......@@ -498,13 +498,13 @@ ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello)
hdr->msg.hello.incarnation = cpu_to_le64(hello->kshm_src_incarnation);
rc = lnet_sock_write(sock, hdr, sizeof(*hdr), lnet_acceptor_timeout());
if (rc != 0) {
if (rc) {
CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n",
rc, &conn->ksnc_ipaddr, conn->ksnc_port);
goto out;
}
if (hello->kshm_nips == 0)
if (!hello->kshm_nips)
goto out;
for (i = 0; i < (int) hello->kshm_nips; i++)
......@@ -513,7 +513,7 @@ ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello)
rc = lnet_sock_write(sock, hello->kshm_ips,
hello->kshm_nips * sizeof(__u32),
lnet_acceptor_timeout());
if (rc != 0) {
if (rc) {
CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n",
rc, hello->kshm_nips,
&conn->ksnc_ipaddr, conn->ksnc_port);
......@@ -533,10 +533,10 @@ ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello)
hello->kshm_magic = LNET_PROTO_MAGIC;
hello->kshm_version = conn->ksnc_proto->pro_version;
if (the_lnet.ln_testprotocompat != 0) {
if (the_lnet.ln_testprotocompat) {
/* single-shot proto check */
LNET_LOCK();
if ((the_lnet.ln_testprotocompat & 1) != 0) {
if (the_lnet.ln_testprotocompat & 1) {
hello->kshm_version++; /* just different! */
the_lnet.ln_testprotocompat &= ~1;
}
......@@ -545,19 +545,19 @@ ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello)
rc = lnet_sock_write(sock, hello, offsetof(ksock_hello_msg_t, kshm_ips),
lnet_acceptor_timeout());
if (rc != 0) {
if (rc) {
CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n",
rc, &conn->ksnc_ipaddr, conn->ksnc_port);
return rc;
}
if (hello->kshm_nips == 0)
if (!hello->kshm_nips)
return 0;
rc = lnet_sock_write(sock, hello->kshm_ips,
hello->kshm_nips * sizeof(__u32),
lnet_acceptor_timeout());
if (rc != 0) {
if (rc) {
CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n",
rc, hello->kshm_nips,
&conn->ksnc_ipaddr, conn->ksnc_port);
......@@ -584,7 +584,7 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,
rc = lnet_sock_read(sock, &hdr->src_nid,
sizeof(*hdr) - offsetof(lnet_hdr_t, src_nid),
timeout);
if (rc != 0) {
if (rc) {
CERROR("Error %d reading rest of HELLO hdr from %pI4h\n",
rc, &conn->ksnc_ipaddr);
LASSERT(rc < 0 && rc != -EALREADY);
......@@ -614,12 +614,12 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,
goto out;
}
if (hello->kshm_nips == 0)
if (!hello->kshm_nips)
goto out;
rc = lnet_sock_read(sock, hello->kshm_ips,
hello->kshm_nips * sizeof(__u32), timeout);
if (rc != 0) {
if (rc) {
CERROR("Error %d reading IPs from ip %pI4h\n",
rc, &conn->ksnc_ipaddr);
LASSERT(rc < 0 && rc != -EALREADY);
......@@ -629,7 +629,7 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,
for (i = 0; i < (int) hello->kshm_nips; i++) {
hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]);
if (hello->kshm_ips[i] == 0) {
if (!hello->kshm_ips[i]) {
CERROR("Zero IP[%d] from ip %pI4h\n",
i, &conn->ksnc_ipaddr);
rc = -EPROTO;
......@@ -658,7 +658,7 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout
offsetof(ksock_hello_msg_t, kshm_ips) -
offsetof(ksock_hello_msg_t, kshm_src_nid),
timeout);
if (rc != 0) {
if (rc) {
CERROR("Error %d reading HELLO from %pI4h\n",
rc, &conn->ksnc_ipaddr);
LASSERT(rc < 0 && rc != -EALREADY);
......@@ -682,12 +682,12 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout
return -EPROTO;
}
if (hello->kshm_nips == 0)
if (!hello->kshm_nips)
return 0;
rc = lnet_sock_read(sock, hello->kshm_ips,
hello->kshm_nips * sizeof(__u32), timeout);
if (rc != 0) {
if (rc) {
CERROR("Error %d reading IPs from ip %pI4h\n",
rc, &conn->ksnc_ipaddr);
LASSERT(rc < 0 && rc != -EALREADY);
......@@ -698,7 +698,7 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout
if (conn->ksnc_flip)
__swab32s(&hello->kshm_ips[i]);
if (hello->kshm_ips[i] == 0) {
if (!hello->kshm_ips[i]) {
CERROR("Zero IP[%d] from ip %pI4h\n",
i, &conn->ksnc_ipaddr);
return -EPROTO;
......
......@@ -159,7 +159,7 @@ lnet_connect(struct socket **sockp, lnet_nid_t peer_nid,
rc = lnet_sock_connect(&sock, &fatal, local_ip, port, peer_ip,
peer_port);
if (rc != 0) {
if (rc) {
if (fatal)
goto failed;
continue;
......@@ -171,14 +171,14 @@ lnet_connect(struct socket **sockp, lnet_nid_t peer_nid,
cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
cr.acr_nid = peer_nid;
if (the_lnet.ln_testprotocompat != 0) {
if (the_lnet.ln_testprotocompat) {
/* single-shot proto check */
lnet_net_lock(LNET_LOCK_EX);
if ((the_lnet.ln_testprotocompat & 4) != 0) {
if (the_lnet.ln_testprotocompat & 4) {
cr.acr_version++;
the_lnet.ln_testprotocompat &= ~4;
}
if ((the_lnet.ln_testprotocompat & 8) != 0) {
if (the_lnet.ln_testprotocompat & 8) {
cr.acr_magic = LNET_PROTO_MAGIC;
the_lnet.ln_testprotocompat &= ~8;
}
......@@ -186,7 +186,7 @@ lnet_connect(struct socket **sockp, lnet_nid_t peer_nid,
}
rc = lnet_sock_write(sock, &cr, sizeof(cr), accept_timeout);
if (rc != 0)
if (rc)
goto failed_sock;
*sockp = sock;
......@@ -220,7 +220,7 @@ lnet_accept(struct socket *sock, __u32 magic)
LASSERT(sizeof(cr) <= 16); /* not too big for the stack */
rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port);
LASSERT(rc == 0); /* we succeeded before */
LASSERT(!rc); /* we succeeded before */
if (!lnet_accept_magic(magic, LNET_PROTO_ACCEPTOR_MAGIC)) {
if (lnet_accept_magic(magic, LNET_PROTO_MAGIC)) {
......@@ -236,7 +236,7 @@ lnet_accept(struct socket *sock, __u32 magic)
rc = lnet_sock_write(sock, &cr, sizeof(cr),
accept_timeout);
if (rc != 0)
if (rc)
CERROR("Error sending magic+version in response to LNET magic from %pI4h: %d\n",
&peer_ip, rc);
return -EPROTO;
......@@ -256,7 +256,7 @@ lnet_accept(struct socket *sock, __u32 magic)
rc = lnet_sock_read(sock, &cr.acr_version, sizeof(cr.acr_version),
accept_timeout);
if (rc != 0) {
if (rc) {
CERROR("Error %d reading connection request version from %pI4h\n",
rc, &peer_ip);
return -EIO;
......@@ -279,7 +279,7 @@ lnet_accept(struct socket *sock, __u32 magic)
cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
rc = lnet_sock_write(sock, &cr, sizeof(cr), accept_timeout);
if (rc != 0)
if (rc)
CERROR("Error sending magic+version in response to version %d from %pI4h: %d\n",
peer_version, &peer_ip, rc);
return -EPROTO;
......@@ -289,7 +289,7 @@ lnet_accept(struct socket *sock, __u32 magic)
sizeof(cr) -
offsetof(lnet_acceptor_connreq_t, acr_nid),
accept_timeout);
if (rc != 0) {
if (rc) {
CERROR("Error %d reading connection request from %pI4h\n",
rc, &peer_ip);
return -EIO;
......@@ -341,7 +341,7 @@ lnet_acceptor(void *arg)
rc = lnet_sock_listen(&lnet_acceptor_state.pta_sock, 0, accept_port,
accept_backlog);
if (rc != 0) {
if (rc) {
if (rc == -EADDRINUSE)
LCONSOLE_ERROR_MSG(0x122, "Can't start acceptor on port %d: port already in use\n",
accept_port);
......@@ -358,12 +358,12 @@ lnet_acceptor(void *arg)
lnet_acceptor_state.pta_shutdown = rc;
complete(&lnet_acceptor_state.pta_signal);
if (rc != 0)
if (rc)
return rc;
while (!lnet_acceptor_state.pta_shutdown) {
rc = lnet_sock_accept(&newsock, lnet_acceptor_state.pta_sock);
if (rc != 0) {
if (rc) {
if (rc != -EAGAIN) {
CWARN("Accept error %d: pausing...\n", rc);
set_current_state(TASK_UNINTERRUPTIBLE);
......@@ -379,7 +379,7 @@ lnet_acceptor(void *arg)
}
rc = lnet_sock_getaddr(newsock, 1, &peer_ip, &peer_port);
if (rc != 0) {
if (rc) {
CERROR("Can't determine new connection's address\n");
goto failed;
}
......@@ -392,14 +392,14 @@ lnet_acceptor(void *arg)
rc = lnet_sock_read(newsock, &magic, sizeof(magic),
accept_timeout);
if (rc != 0) {
if (rc) {
CERROR("Error %d reading connection request from %pI4h\n",
rc, &peer_ip);
goto failed;
}
rc = lnet_accept(newsock, magic);
if (rc != 0)
if (rc)
goto failed;
continue;
......@@ -446,7 +446,7 @@ lnet_acceptor_start(void)
LASSERT(!lnet_acceptor_state.pta_sock);
rc = lnet_acceptor_get_tunables();
if (rc != 0)
if (rc)
return rc;
init_completion(&lnet_acceptor_state.pta_signal);
......@@ -454,7 +454,7 @@ lnet_acceptor_start(void)
if (rc <= 0)
return rc;
if (lnet_count_acceptor_nis() == 0) /* not required */
if (!lnet_count_acceptor_nis()) /* not required */
return 0;
rc2 = PTR_ERR(kthread_run(lnet_acceptor,
......
This diff is collapsed.
......@@ -210,7 +210,7 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
if (!ni)
goto failed;
while (str && *str != 0) {
while (str && *str) {
char *comma = strchr(str, ',');
char *bracket = strchr(str, '(');
char *square = strchr(str, '[');
......@@ -240,7 +240,7 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
rc = cfs_expr_list_parse(square, tmp - square + 1,
0, LNET_CPT_NUMBER - 1, &el);
if (rc != 0) {
if (rc) {
tmp = square;
goto failed_syntax;
}
......@@ -309,7 +309,7 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
*comma++ = 0;
iface = cfs_trimwhite(iface);
if (*iface == 0) {
if (!*iface) {
tmp = iface;
goto failed_syntax;
}
......@@ -330,7 +330,7 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
if (comma) {
*comma = 0;
str = cfs_trimwhite(str);
if (*str != 0) {
if (*str) {
tmp = str;
goto failed_syntax;
}
......@@ -339,7 +339,7 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
}
str = cfs_trimwhite(str);
if (*str != 0) {
if (*str) {
tmp = str;
goto failed_syntax;
}
......@@ -434,7 +434,7 @@ lnet_str2tbs_sep(struct list_head *tbs, char *str)
str++;
/* scan for separator or comment */
for (sep = str; *sep != 0; sep++)
for (sep = str; *sep; sep++)
if (lnet_issep(*sep) || *sep == '#')
break;
......@@ -461,10 +461,10 @@ lnet_str2tbs_sep(struct list_head *tbs, char *str)
/* scan for separator */
do {
sep++;
} while (*sep != 0 && !lnet_issep(*sep));
} while (*sep && !lnet_issep(*sep));
}
if (*sep == 0)
if (!*sep)
break;
str = sep + 1;
......@@ -539,7 +539,7 @@ lnet_str2tbs_expand(struct list_head *tbs, char *str)
/* simple string enumeration */
if (lnet_expand1tb(&pending, str, sep, sep2,
parsed,
(int)(enditem - parsed)) != 0) {
(int)(enditem - parsed))) {
goto failed;
}
continue;
......@@ -554,7 +554,7 @@ lnet_str2tbs_expand(struct list_head *tbs, char *str)
goto failed;
if (hi < 0 || lo < 0 || stride < 0 || hi < lo ||
(hi - lo) % stride != 0)
(hi - lo) % stride)
goto failed;
for (i = lo; i <= hi; i += stride) {
......@@ -564,7 +564,7 @@ lnet_str2tbs_expand(struct list_head *tbs, char *str)
goto failed;
if (lnet_expand1tb(&pending, str, sep, sep2,
num, nob) != 0)
num, nob))
goto failed;
}
}
......@@ -656,7 +656,7 @@ lnet_parse_route(char *str, int *im_a_router)
/* scan for token start */
while (isspace(*sep))
sep++;
if (*sep == 0) {
if (!*sep) {
if (ntokens < (got_hops ? 3 : 2))
goto token_error;
break;
......@@ -666,9 +666,9 @@ lnet_parse_route(char *str, int *im_a_router)
token = sep++;
/* scan for token end */
while (*sep != 0 && !isspace(*sep))
while (*sep && !isspace(*sep))
sep++;
if (*sep != 0)
if (*sep)
*sep++ = 0;
if (ntokens == 1) {
......@@ -745,7 +745,7 @@ lnet_parse_route(char *str, int *im_a_router)
}
rc = lnet_add_route(net, hops, nid, priority);
if (rc != 0) {
if (rc) {
CERROR("Can't create route to %s via %s\n",
libcfs_net2str(net),
libcfs_nid2str(nid));
......@@ -802,7 +802,7 @@ lnet_parse_routes(char *routes, int *im_a_router)
rc = lnet_parse_route_tbs(&tbs, im_a_router);
}
LASSERT(lnet_tbnob == 0);
LASSERT(!lnet_tbnob);
return rc;
}
......@@ -814,7 +814,7 @@ lnet_match_network_token(char *token, int len, __u32 *ipaddrs, int nip)
int i;
rc = cfs_ip_addr_parse(token, len, &list);
if (rc != 0)
if (rc)
return rc;
for (rc = i = 0; !rc && i < nip; i++)
......@@ -847,18 +847,18 @@ lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip)
/* scan for token start */
while (isspace(*sep))
sep++;
if (*sep == 0)
if (!*sep)
break;
token = sep++;
/* scan for token end */
while (*sep != 0 && !isspace(*sep))
while (*sep && !isspace(*sep))
sep++;
if (*sep != 0)
if (*sep)
*sep++ = 0;
if (ntokens++ == 0) {
if (!ntokens++) {
net = token;
continue;
}
......@@ -872,7 +872,8 @@ lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip)
return rc;
}
matched |= (rc != 0);
if (rc)
matched |= 1;
}
if (!matched)
......@@ -930,12 +931,12 @@ lnet_splitnets(char *source, struct list_head *nets)
bracket = strchr(bracket + 1, ')');
if (!bracket ||
!(bracket[1] == ',' || bracket[1] == 0)) {
!(bracket[1] == ',' || !bracket[1])) {
lnet_syntax("ip2nets", source, offset2, len);
return -EINVAL;
}
sep = (bracket[1] == 0) ? NULL : bracket + 1;
sep = !bracket[1] ? NULL : bracket + 1;
}
if (sep)
......@@ -1002,7 +1003,7 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
INIT_LIST_HEAD(&raw_entries);
if (lnet_str2tbs_sep(&raw_entries, ip2nets) < 0) {
CERROR("Error parsing ip2nets\n");
LASSERT(lnet_tbnob == 0);
LASSERT(!lnet_tbnob);
return -EINVAL;
}
......@@ -1026,7 +1027,7 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
list_del(&tb->ltb_list);
if (rc == 0) { /* no match */
if (!rc) { /* no match */
lnet_free_text_buf(tb);
continue;
}
......@@ -1072,7 +1073,7 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
list_add_tail(&tb->ltb_list, &matched_nets);
len += snprintf(networks + len, sizeof(networks) - len,
"%s%s", (len == 0) ? "" : ",",
"%s%s", !len ? "" : ",",
tb->ltb_text);
if (len >= sizeof(networks)) {
......@@ -1089,7 +1090,7 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
lnet_free_text_bufs(&raw_entries);
lnet_free_text_bufs(&matched_nets);
lnet_free_text_bufs(&current_nets);
LASSERT(lnet_tbnob == 0);
LASSERT(!lnet_tbnob);
if (rc < 0)
return rc;
......@@ -1126,7 +1127,7 @@ lnet_ipaddr_enumerate(__u32 **ipaddrsp)
continue;
rc = lnet_ipif_query(ifnames[i], &up, &ipaddrs[nip], &netmask);
if (rc != 0) {
if (rc) {
CWARN("Can't query interface %s: %d\n",
ifnames[i], rc);
continue;
......@@ -1177,7 +1178,7 @@ lnet_parse_ip2nets(char **networksp, char *ip2nets)
return nip;
}
if (nip == 0) {
if (!nip) {
LCONSOLE_ERROR_MSG(0x118,
"No local IP interfaces for ip2nets to match\n");
return -ENOENT;
......@@ -1191,7 +1192,7 @@ lnet_parse_ip2nets(char **networksp, char *ip2nets)
return rc;
}
if (rc == 0) {
if (!rc) {
LCONSOLE_ERROR_MSG(0x11a,
"ip2nets does not match any local IP interfaces\n");
return -ENOENT;
......
......@@ -83,21 +83,21 @@ LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
if (count)
count = roundup_pow_of_two(count);
if (callback != LNET_EQ_HANDLER_NONE && count != 0)
if (callback != LNET_EQ_HANDLER_NONE && count)
CWARN("EQ callback is guaranteed to get every event, do you still want to set eqcount %d for polling event which will have locking overhead? Please contact with developer to confirm\n", count);
/*
* count can be 0 if only need callback, we can eliminate
* overhead of enqueue event
*/
if (count == 0 && callback == LNET_EQ_HANDLER_NONE)
if (!count && callback == LNET_EQ_HANDLER_NONE)
return -EINVAL;
eq = lnet_eq_alloc();
if (!eq)
return -ENOMEM;
if (count != 0) {
if (count) {
LIBCFS_ALLOC(eq->eq_events, count * sizeof(lnet_event_t));
if (!eq->eq_events)
goto failed;
......@@ -185,7 +185,7 @@ LNetEQFree(lnet_handle_eq_t eqh)
cfs_percpt_for_each(ref, i, eq->eq_refs) {
LASSERT(*ref >= 0);
if (*ref == 0)
if (!*ref)
continue;
CDEBUG(D_NET, "Event equeue (%d: %d) busy on destroy.\n",
......@@ -221,7 +221,7 @@ lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev)
/* MUST called with resource lock hold but w/o lnet_eq_wait_lock */
int index;
if (eq->eq_size == 0) {
if (!eq->eq_size) {
LASSERT(eq->eq_callback != LNET_EQ_HANDLER_NONE);
eq->eq_callback(ev);
return;
......@@ -321,7 +321,7 @@ __must_hold(&the_lnet.ln_eq_wait_lock)
wait_queue_t wl;
unsigned long now;
if (tms == 0)
if (!tms)
return -1; /* don't want to wait and no new event */
init_waitqueue_entry(&wl, current);
......@@ -340,7 +340,7 @@ __must_hold(&the_lnet.ln_eq_wait_lock)
tms = 0;
}
wait = tms != 0; /* might need to call here again */
wait = tms; /* might need to call here again */
*timeout_ms = tms;
lnet_eq_wait_lock();
......@@ -401,14 +401,14 @@ LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
}
rc = lnet_eq_dequeue_event(eq, event);
if (rc != 0) {
if (rc) {
lnet_eq_wait_unlock();
*which = i;
return rc;
}
}
if (wait == 0)
if (!wait)
break;
/*
......
......@@ -46,7 +46,7 @@
void
lnet_md_unlink(lnet_libmd_t *md)
{
if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) == 0) {
if (!(md->md_flags & LNET_MD_FLAG_ZOMBIE)) {
/* first unlink attempt... */
lnet_me_t *me = md->md_me;
......@@ -68,7 +68,7 @@ lnet_md_unlink(lnet_libmd_t *md)
lnet_res_lh_invalidate(&md->md_lh);
}
if (md->md_refcount != 0) {
if (md->md_refcount) {
CDEBUG(D_NET, "Queueing unlink of md %p\n", md);
return;
}
......@@ -105,8 +105,8 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
lmd->md_refcount = 0;
lmd->md_flags = (unlink == LNET_UNLINK) ? LNET_MD_FLAG_AUTO_UNLINK : 0;
if ((umd->options & LNET_MD_IOVEC) != 0) {
if ((umd->options & LNET_MD_KIOV) != 0) /* Can't specify both */
if (umd->options & LNET_MD_IOVEC) {
if (umd->options & LNET_MD_KIOV) /* Can't specify both */
return -EINVAL;
niov = umd->length;
......@@ -125,12 +125,12 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
lmd->md_length = total_length;
if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* use max size */
if ((umd->options & LNET_MD_MAX_SIZE) && /* use max size */
(umd->max_size < 0 ||
umd->max_size > total_length)) /* illegal max_size */
return -EINVAL;
} else if ((umd->options & LNET_MD_KIOV) != 0) {
} else if (umd->options & LNET_MD_KIOV) {
niov = umd->length;
lmd->md_niov = umd->length;
memcpy(lmd->md_iov.kiov, umd->start,
......@@ -147,7 +147,7 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
lmd->md_length = total_length;
if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
if ((umd->options & LNET_MD_MAX_SIZE) && /* max size used */
(umd->max_size < 0 ||
umd->max_size > total_length)) /* illegal max_size */
return -EINVAL;
......@@ -158,7 +158,7 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
lmd->md_iov.iov[0].iov_base = umd->start;
lmd->md_iov.iov[0].iov_len = umd->length;
if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
if ((umd->options & LNET_MD_MAX_SIZE) && /* max size used */
(umd->max_size < 0 ||
umd->max_size > (int)umd->length)) /* illegal max_size */
return -EINVAL;
......@@ -216,8 +216,8 @@ lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd)
* and that's all.
*/
umd->start = lmd->md_start;
umd->length = ((lmd->md_options &
(LNET_MD_IOVEC | LNET_MD_KIOV)) == 0) ?
umd->length = !(lmd->md_options &
(LNET_MD_IOVEC | LNET_MD_KIOV)) ?
lmd->md_length : lmd->md_niov;
umd->threshold = lmd->md_threshold;
umd->max_size = lmd->md_max_size;
......@@ -229,13 +229,13 @@ lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd)
static int
lnet_md_validate(lnet_md_t *umd)
{
if (!umd->start && umd->length != 0) {
if (!umd->start && umd->length) {
CERROR("MD start pointer can not be NULL with length %u\n",
umd->length);
return -EINVAL;
}
if ((umd->options & (LNET_MD_KIOV | LNET_MD_IOVEC)) != 0 &&
if ((umd->options & (LNET_MD_KIOV | LNET_MD_IOVEC)) &&
umd->length > LNET_MAX_IOV) {
CERROR("Invalid option: too many fragments %u, %d max\n",
umd->length, LNET_MAX_IOV);
......@@ -284,10 +284,10 @@ LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd,
LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
if (lnet_md_validate(&umd) != 0)
if (lnet_md_validate(&umd))
return -EINVAL;
if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) == 0) {
if (!(umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT))) {
CERROR("Invalid option: no MD_OP set\n");
return -EINVAL;
}
......@@ -300,7 +300,7 @@ LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd,
cpt = lnet_cpt_of_cookie(meh.cookie);
lnet_res_lock(cpt);
if (rc != 0)
if (rc)
goto failed;
me = lnet_handle2me(&meh);
......@@ -311,7 +311,7 @@ LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd,
else
rc = lnet_md_link(md, umd.eq_handle, cpt);
if (rc != 0)
if (rc)
goto failed;
/*
......@@ -363,10 +363,10 @@ LNetMDBind(lnet_md_t umd, lnet_unlink_t unlink, lnet_handle_md_t *handle)
LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
if (lnet_md_validate(&umd) != 0)
if (lnet_md_validate(&umd))
return -EINVAL;
if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) != 0) {
if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT))) {
CERROR("Invalid option: GET|PUT illegal on active MDs\n");
return -EINVAL;
}
......@@ -378,11 +378,11 @@ LNetMDBind(lnet_md_t umd, lnet_unlink_t unlink, lnet_handle_md_t *handle)
rc = lnet_md_build(md, &umd, unlink);
cpt = lnet_res_lock_current();
if (rc != 0)
if (rc)
goto failed;
rc = lnet_md_link(md, umd.eq_handle, cpt);
if (rc != 0)
if (rc)
goto failed;
lnet_md2handle(handle, md);
......@@ -453,7 +453,7 @@ LNetMDUnlink(lnet_handle_md_t mdh)
* when the LND is done, the completion event flags that the MD was
* unlinked. Otherwise, we enqueue an event now...
*/
if (md->md_eq && md->md_refcount == 0) {
if (md->md_eq && !md->md_refcount) {
lnet_build_unlink_event(md, &ev);
lnet_eq_enqueue_event(md->md_eq, &ev);
}
......
......@@ -109,7 +109,7 @@ LNetMEAttach(unsigned int portal,
lnet_res_lh_initialize(the_lnet.ln_me_containers[mtable->mt_cpt],
&me->me_lh);
if (ignore_bits != 0)
if (ignore_bits)
head = &mtable->mt_mhash[LNET_MT_HASH_IGNORE];
else
head = lnet_mt_match_head(mtable, match_id, match_bits);
......@@ -248,7 +248,7 @@ LNetMEUnlink(lnet_handle_me_t meh)
md = me->me_md;
if (md) {
md->md_flags |= LNET_MD_FLAG_ABORTED;
if (md->md_eq && md->md_refcount == 0) {
if (md->md_eq && !md->md_refcount) {
lnet_build_unlink_event(md, &ev);
lnet_eq_enqueue_event(md->md_eq, &ev);
}
......
This diff is collapsed.
......@@ -172,7 +172,7 @@ lnet_msg_decommit_tx(lnet_msg_t *msg, int status)
lnet_event_t *ev = &msg->msg_ev;
LASSERT(msg->msg_tx_committed);
if (status != 0)
if (status)
goto out;
counters = the_lnet.ln_counters[msg->msg_tx_cpt];
......@@ -180,7 +180,7 @@ lnet_msg_decommit_tx(lnet_msg_t *msg, int status)
default: /* routed message */
LASSERT(msg->msg_routing);
LASSERT(msg->msg_rx_committed);
LASSERT(ev->type == 0);
LASSERT(!ev->type);
counters->route_length += msg->msg_len;
counters->route_count++;
......@@ -226,13 +226,13 @@ lnet_msg_decommit_rx(lnet_msg_t *msg, int status)
LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
LASSERT(msg->msg_rx_committed);
if (status != 0)
if (status)
goto out;
counters = the_lnet.ln_counters[msg->msg_rx_cpt];
switch (ev->type) {
default:
LASSERT(ev->type == 0);
LASSERT(!ev->type);
LASSERT(msg->msg_routing);
goto out;
......@@ -371,7 +371,7 @@ lnet_complete_msg_locked(lnet_msg_t *msg, int cpt)
LASSERT(msg->msg_onactivelist);
if (status == 0 && msg->msg_ack) {
if (!status && msg->msg_ack) {
/* Only send an ACK if the PUT completed successfully */
lnet_msg_decommit(msg, cpt, 0);
......@@ -410,7 +410,7 @@ lnet_complete_msg_locked(lnet_msg_t *msg, int cpt)
*/
return rc;
} else if (status == 0 && /* OK so far */
} else if (!status && /* OK so far */
(msg->msg_routing && !msg->msg_sending)) {
/* not forwarded */
LASSERT(!msg->msg_receiving); /* called back recv already */
......@@ -531,14 +531,14 @@ lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status)
* anything, so my finalizing friends can chomp along too
*/
rc = lnet_complete_msg_locked(msg, cpt);
if (rc != 0)
if (rc)
break;
}
container->msc_finalizers[my_slot] = NULL;
lnet_net_unlock(cpt);
if (rc != 0)
if (rc)
goto again;
}
EXPORT_SYMBOL(lnet_finalize);
......@@ -548,7 +548,7 @@ lnet_msg_container_cleanup(struct lnet_msg_container *container)
{
int count = 0;
if (container->msc_init == 0)
if (!container->msc_init)
return;
while (!list_empty(&container->msc_active)) {
......@@ -592,7 +592,7 @@ lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
rc = lnet_freelist_init(&container->msc_freelist,
LNET_FL_MAX_MSGS, sizeof(lnet_msg_t));
if (rc != 0) {
if (rc) {
CERROR("Failed to init freelist for message container\n");
lnet_msg_container_cleanup(container);
return rc;
......@@ -649,7 +649,7 @@ lnet_msg_containers_create(void)
cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) {
rc = lnet_msg_container_setup(container, i);
if (rc != 0) {
if (rc) {
lnet_msg_containers_destroy();
return rc;
}
......
......@@ -50,7 +50,7 @@ lnet_ptl_match_type(unsigned int index, lnet_process_id_t match_id,
struct lnet_portal *ptl = the_lnet.ln_portals[index];
int unique;
unique = ignore_bits == 0 &&
unique = !ignore_bits &&
match_id.nid != LNET_NID_ANY &&
match_id.pid != LNET_PID_ANY;
......@@ -152,7 +152,7 @@ lnet_try_match_md(lnet_libmd_t *md,
return LNET_MATCHMD_NONE | LNET_MATCHMD_EXHAUSTED;
/* mismatched MD op */
if ((md->md_options & info->mi_opc) == 0)
if (!(md->md_options & info->mi_opc))
return LNET_MATCHMD_NONE;
/* mismatched ME nid/pid? */
......@@ -165,17 +165,17 @@ lnet_try_match_md(lnet_libmd_t *md,
return LNET_MATCHMD_NONE;
/* mismatched ME matchbits? */
if (((me->me_match_bits ^ info->mi_mbits) & ~me->me_ignore_bits) != 0)
if ((me->me_match_bits ^ info->mi_mbits) & ~me->me_ignore_bits)
return LNET_MATCHMD_NONE;
/* Hurrah! This _is_ a match; check it out... */
if ((md->md_options & LNET_MD_MANAGE_REMOTE) == 0)
if (!(md->md_options & LNET_MD_MANAGE_REMOTE))
offset = md->md_offset;
else
offset = info->mi_roffset;
if ((md->md_options & LNET_MD_MAX_SIZE) != 0) {
if (md->md_options & LNET_MD_MAX_SIZE) {
mlength = md->md_max_size;
LASSERT(md->md_offset + mlength <= md->md_length);
} else {
......@@ -184,7 +184,7 @@ lnet_try_match_md(lnet_libmd_t *md,
if (info->mi_rlength <= mlength) { /* fits in allowed space */
mlength = info->mi_rlength;
} else if ((md->md_options & LNET_MD_TRUNCATE) == 0) {
} else if (!(md->md_options & LNET_MD_TRUNCATE)) {
/* this packet _really_ is too big */
CERROR("Matching packet from %s, match %llu length %d too big: %d left, %d allowed\n",
libcfs_id2str(info->mi_id), info->mi_mbits,
......@@ -210,7 +210,7 @@ lnet_try_match_md(lnet_libmd_t *md,
* We bumped md->md_refcount above so the MD just gets flagged
* for unlink when it is finalized.
*/
if ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0)
if (md->md_flags & LNET_MD_FLAG_AUTO_UNLINK)
lnet_md_unlink(md);
return LNET_MATCHMD_OK | LNET_MATCHMD_EXHAUSTED;
......@@ -304,7 +304,7 @@ lnet_mt_of_match(struct lnet_match_info *info, struct lnet_msg *msg)
/* is there any active entry for this portal? */
nmaps = ptl->ptl_mt_nmaps;
/* map to an active mtable to avoid heavy "stealing" */
if (nmaps != 0) {
if (nmaps) {
/*
* NB: there is possibility that ptl_mt_maps is being
* changed because we are not under protection of
......@@ -339,7 +339,7 @@ lnet_mt_test_exhausted(struct lnet_match_table *mtable, int pos)
bmap = &mtable->mt_exhausted[pos >> LNET_MT_BITS_U64];
pos &= (1 << LNET_MT_BITS_U64) - 1;
return ((*bmap) & (1ULL << pos)) != 0;
return (*bmap & (1ULL << pos));
}
static void
......@@ -405,10 +405,10 @@ lnet_mt_match_md(struct lnet_match_table *mtable,
LASSERT(me == me->me_md->md_me);
rc = lnet_try_match_md(me->me_md, info, msg);
if ((rc & LNET_MATCHMD_EXHAUSTED) == 0)
if (!(rc & LNET_MATCHMD_EXHAUSTED))
exhausted = 0; /* mlist is not empty */
if ((rc & LNET_MATCHMD_FINISH) != 0) {
if (rc & LNET_MATCHMD_FINISH) {
/*
* don't return EXHAUSTED bit because we don't know
* whether the mlist is empty or not
......@@ -423,7 +423,7 @@ lnet_mt_match_md(struct lnet_match_table *mtable,
exhausted = 0;
}
if (exhausted == 0 && head == &mtable->mt_mhash[LNET_MT_HASH_IGNORE]) {
if (!exhausted && head == &mtable->mt_mhash[LNET_MT_HASH_IGNORE]) {
head = lnet_mt_match_head(mtable, info->mi_id, info->mi_mbits);
goto again; /* re-check MEs w/o ignore-bits */
}
......@@ -490,13 +490,13 @@ lnet_ptl_match_delay(struct lnet_portal *ptl,
cpt = (first + i) % LNET_CPT_NUMBER;
mtable = ptl->ptl_mtables[cpt];
if (i != 0 && i != LNET_CPT_NUMBER - 1 && !mtable->mt_enabled)
if (i && i != LNET_CPT_NUMBER - 1 && !mtable->mt_enabled)
continue;
lnet_res_lock(cpt);
lnet_ptl_lock(ptl);
if (i == 0) { /* the first try, attach on stealing list */
if (!i) { /* the first try, attach on stealing list */
list_add_tail(&msg->msg_list,
&ptl->ptl_msg_stealing);
}
......@@ -504,11 +504,11 @@ lnet_ptl_match_delay(struct lnet_portal *ptl,
if (!list_empty(&msg->msg_list)) { /* on stealing list */
rc = lnet_mt_match_md(mtable, info, msg);
if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 &&
if ((rc & LNET_MATCHMD_EXHAUSTED) &&
mtable->mt_enabled)
lnet_ptl_disable_mt(ptl, cpt);
if ((rc & LNET_MATCHMD_FINISH) != 0)
if (rc & LNET_MATCHMD_FINISH)
list_del_init(&msg->msg_list);
} else {
......@@ -522,7 +522,7 @@ lnet_ptl_match_delay(struct lnet_portal *ptl,
if (!list_empty(&msg->msg_list) && /* not matched yet */
(i == LNET_CPT_NUMBER - 1 || /* the last CPT */
ptl->ptl_mt_nmaps == 0 || /* no active CPT */
!ptl->ptl_mt_nmaps || /* no active CPT */
(ptl->ptl_mt_nmaps == 1 && /* the only active CPT */
ptl->ptl_mt_maps[0] == cpt))) {
/* nothing to steal, delay or drop */
......@@ -541,7 +541,7 @@ lnet_ptl_match_delay(struct lnet_portal *ptl,
lnet_ptl_unlock(ptl);
lnet_res_unlock(cpt);
if ((rc & LNET_MATCHMD_FINISH) != 0 || msg->msg_rx_delayed)
if ((rc & LNET_MATCHMD_FINISH) || msg->msg_rx_delayed)
break;
}
......@@ -567,7 +567,7 @@ lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg)
ptl = the_lnet.ln_portals[info->mi_portal];
rc = lnet_ptl_match_early(ptl, msg);
if (rc != 0) /* matched or delayed early message */
if (rc) /* matched or delayed early message */
return rc;
mtable = lnet_mt_of_match(info, msg);
......@@ -579,13 +579,13 @@ lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg)
}
rc = lnet_mt_match_md(mtable, info, msg);
if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 && mtable->mt_enabled) {
if ((rc & LNET_MATCHMD_EXHAUSTED) && mtable->mt_enabled) {
lnet_ptl_lock(ptl);
lnet_ptl_disable_mt(ptl, mtable->mt_cpt);
lnet_ptl_unlock(ptl);
}
if ((rc & LNET_MATCHMD_FINISH) != 0) /* matched or dropping */
if (rc & LNET_MATCHMD_FINISH) /* matched or dropping */
goto out1;
if (!msg->msg_rx_ready_delay)
......@@ -646,7 +646,7 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md,
int exhausted = 0;
int cpt;
LASSERT(md->md_refcount == 0); /* a brand new MD */
LASSERT(!md->md_refcount); /* a brand new MD */
me->me_md = md;
md->md_me = me;
......@@ -680,15 +680,15 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md,
rc = lnet_try_match_md(md, &info, msg);
exhausted = (rc & LNET_MATCHMD_EXHAUSTED) != 0;
if ((rc & LNET_MATCHMD_NONE) != 0) {
exhausted = (rc & LNET_MATCHMD_EXHAUSTED);
if (rc & LNET_MATCHMD_NONE) {
if (exhausted)
break;
continue;
}
/* Hurrah! This _is_ a match */
LASSERT((rc & LNET_MATCHMD_FINISH) != 0);
LASSERT(rc & LNET_MATCHMD_FINISH);
list_del_init(&msg->msg_list);
if (head == &ptl->ptl_msg_stealing) {
......@@ -698,7 +698,7 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md,
continue;
}
if ((rc & LNET_MATCHMD_OK) != 0) {
if (rc & LNET_MATCHMD_OK) {
list_add_tail(&msg->msg_list, matches);
CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n",
......
......@@ -64,7 +64,7 @@ lnet_sock_ioctl(int cmd, unsigned long arg)
int rc;
rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock);
if (rc != 0) {
if (rc) {
CERROR("Can't create socket: %d\n", rc);
return rc;
}
......@@ -101,12 +101,12 @@ lnet_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask)
strcpy(ifr.ifr_name, name);
rc = lnet_sock_ioctl(SIOCGIFFLAGS, (unsigned long)&ifr);
if (rc != 0) {
if (rc) {
CERROR("Can't get flags for interface %s\n", name);
return rc;
}
if ((ifr.ifr_flags & IFF_UP) == 0) {
if (!(ifr.ifr_flags & IFF_UP)) {
CDEBUG(D_NET, "Interface %s down\n", name);
*up = 0;
*ip = *mask = 0;
......@@ -117,7 +117,7 @@ lnet_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask)
strcpy(ifr.ifr_name, name);
ifr.ifr_addr.sa_family = AF_INET;
rc = lnet_sock_ioctl(SIOCGIFADDR, (unsigned long)&ifr);
if (rc != 0) {
if (rc) {
CERROR("Can't get IP address for interface %s\n", name);
return rc;
}
......@@ -128,7 +128,7 @@ lnet_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask)
strcpy(ifr.ifr_name, name);
ifr.ifr_addr.sa_family = AF_INET;
rc = lnet_sock_ioctl(SIOCGIFNETMASK, (unsigned long)&ifr);
if (rc != 0) {
if (rc) {
CERROR("Can't get netmask for interface %s\n", name);
return rc;
}
......@@ -181,7 +181,7 @@ lnet_ipif_enumerate(char ***namesp)
goto out1;
}
LASSERT(rc == 0);
LASSERT(!rc);
nfound = ifc.ifc_len / sizeof(*ifr);
LASSERT(nfound <= nalloc);
......@@ -193,7 +193,7 @@ lnet_ipif_enumerate(char ***namesp)
nalloc *= 2;
}
if (nfound == 0)
if (!nfound)
goto out1;
LIBCFS_ALLOC(names, nfound * sizeof(*names));
......@@ -268,10 +268,10 @@ lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout)
.iov_len = nob
};
struct msghdr msg = {
.msg_flags = (timeout == 0) ? MSG_DONTWAIT : 0
.msg_flags = !timeout ? MSG_DONTWAIT : 0
};
if (timeout != 0) {
if (timeout) {
/* Set send timeout to remaining time */
tv = (struct timeval) {
.tv_sec = ticks / HZ,
......@@ -279,7 +279,7 @@ lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout)
};
rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
(char *)&tv, sizeof(tv));
if (rc != 0) {
if (rc) {
CERROR("Can't set socket send timeout %ld.%06d: %d\n",
(long)tv.tv_sec, (int)tv.tv_usec, rc);
return rc;
......@@ -296,7 +296,7 @@ lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout)
if (rc < 0)
return rc;
if (rc == 0) {
if (!rc) {
CERROR("Unexpected zero rc\n");
return -ECONNABORTED;
}
......@@ -338,7 +338,7 @@ lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout)
};
rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
(char *)&tv, sizeof(tv));
if (rc != 0) {
if (rc) {
CERROR("Can't set socket recv timeout %ld.%06d: %d\n",
(long)tv.tv_sec, (int)tv.tv_usec, rc);
return rc;
......@@ -351,13 +351,13 @@ lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout)
if (rc < 0)
return rc;
if (rc == 0)
if (!rc)
return -ECONNRESET;
buffer = ((char *)buffer) + rc;
nob -= rc;
if (nob == 0)
if (!nob)
return 0;
if (ticks <= 0)
......@@ -380,7 +380,7 @@ lnet_sock_create(struct socket **sockp, int *fatal, __u32 local_ip,
rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock);
*sockp = sock;
if (rc != 0) {
if (rc) {
CERROR("Can't create socket: %d\n", rc);
return rc;
}
......@@ -388,16 +388,16 @@ lnet_sock_create(struct socket **sockp, int *fatal, __u32 local_ip,
option = 1;
rc = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
(char *)&option, sizeof(option));
if (rc != 0) {
if (rc) {
CERROR("Can't set SO_REUSEADDR for socket: %d\n", rc);
goto failed;
}
if (local_ip != 0 || local_port != 0) {
if (local_ip || local_port) {
memset(&locaddr, 0, sizeof(locaddr));
locaddr.sin_family = AF_INET;
locaddr.sin_port = htons(local_port);
locaddr.sin_addr.s_addr = (local_ip == 0) ?
locaddr.sin_addr.s_addr = !local_ip ?
INADDR_ANY : htonl(local_ip);
rc = kernel_bind(sock, (struct sockaddr *)&locaddr,
......@@ -407,7 +407,7 @@ lnet_sock_create(struct socket **sockp, int *fatal, __u32 local_ip,
*fatal = 0;
goto failed;
}
if (rc != 0) {
if (rc) {
CERROR("Error trying to bind to port %d: %d\n",
local_port, rc);
goto failed;
......@@ -426,22 +426,22 @@ lnet_sock_setbuf(struct socket *sock, int txbufsize, int rxbufsize)
int option;
int rc;
if (txbufsize != 0) {
if (txbufsize) {
option = txbufsize;
rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
(char *)&option, sizeof(option));
if (rc != 0) {
if (rc) {
CERROR("Can't set send buffer %d: %d\n",
option, rc);
return rc;
}
}
if (rxbufsize != 0) {
if (rxbufsize) {
option = rxbufsize;
rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUF,
(char *)&option, sizeof(option));
if (rc != 0) {
if (rc) {
CERROR("Can't set receive buffer %d: %d\n",
option, rc);
return rc;
......@@ -462,7 +462,7 @@ lnet_sock_getaddr(struct socket *sock, bool remote, __u32 *ip, int *port)
rc = kernel_getpeername(sock, (struct sockaddr *)&sin, &len);
else
rc = kernel_getsockname(sock, (struct sockaddr *)&sin, &len);
if (rc != 0) {
if (rc) {
CERROR("Error %d getting sock %s IP/port\n",
rc, remote ? "peer" : "local");
return rc;
......@@ -499,7 +499,7 @@ lnet_sock_listen(struct socket **sockp, __u32 local_ip, int local_port,
int rc;
rc = lnet_sock_create(sockp, &fatal, local_ip, local_port);
if (rc != 0) {
if (rc) {
if (!fatal)
CERROR("Can't create socket: port %d already in use\n",
local_port);
......@@ -507,7 +507,7 @@ lnet_sock_listen(struct socket **sockp, __u32 local_ip, int local_port,
}
rc = kernel_listen(*sockp, backlog);
if (rc == 0)
if (!rc)
return 0;
CERROR("Can't set listen backlog %d: %d\n", backlog, rc);
......@@ -548,7 +548,7 @@ lnet_sock_accept(struct socket **newsockp, struct socket *sock)
rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
}
if (rc != 0)
if (rc)
goto failed;
*newsockp = newsock;
......@@ -568,7 +568,7 @@ lnet_sock_connect(struct socket **sockp, int *fatal, __u32 local_ip,
int rc;
rc = lnet_sock_create(sockp, fatal, local_ip, local_port);
if (rc != 0)
if (rc)
return rc;
memset(&srvaddr, 0, sizeof(srvaddr));
......@@ -578,7 +578,7 @@ lnet_sock_connect(struct socket **sockp, int *fatal, __u32 local_ip,
rc = kernel_connect(*sockp, (struct sockaddr *)&srvaddr,
sizeof(srvaddr), 0);
if (rc == 0)
if (!rc)
return 0;
/*
......
......@@ -80,7 +80,7 @@ lnet_unconfigure(void)
mutex_unlock(&the_lnet.ln_api_mutex);
mutex_unlock(&lnet_config_mutex);
return (refcount == 0) ? 0 : -EBUSY;
return !refcount ? 0 : -EBUSY;
}
static int
......@@ -120,13 +120,13 @@ init_lnet(void)
mutex_init(&lnet_config_mutex);
rc = lnet_init();
if (rc != 0) {
if (rc) {
CERROR("lnet_init: error %d\n", rc);
return rc;
}
rc = libcfs_register_ioctl(&lnet_ioctl_handler);
LASSERT(rc == 0);
LASSERT(!rc);
if (config_on_load) {
/*
......@@ -145,7 +145,7 @@ fini_lnet(void)
int rc;
rc = libcfs_deregister_ioctl(&lnet_ioctl_handler);
LASSERT(rc == 0);
LASSERT(!rc);
lnet_fini();
}
......
......@@ -206,7 +206,7 @@ add_nidrange(const struct cfs_lstr *src,
if (!nf)
return NULL;
endlen = src->ls_len - strlen(nf->nf_name);
if (endlen == 0)
if (!endlen)
/* network name only, e.g. "elan" or "tcp" */
netnum = 0;
else {
......@@ -255,17 +255,17 @@ parse_nidrange(struct cfs_lstr *src, struct list_head *nidlist)
struct nidrange *nr;
tmp = *src;
if (cfs_gettok(src, '@', &addrrange) == 0)
if (!cfs_gettok(src, '@', &addrrange))
goto failed;
if (cfs_gettok(src, '@', &net) == 0 || src->ls_str)
if (!cfs_gettok(src, '@', &net) || src->ls_str)
goto failed;
nr = add_nidrange(&net, nidlist);
if (!nr)
goto failed;
if (parse_addrange(&addrrange, nr) != 0)
if (parse_addrange(&addrrange, nr))
goto failed;
return 1;
......@@ -344,12 +344,12 @@ cfs_parse_nidlist(char *str, int len, struct list_head *nidlist)
INIT_LIST_HEAD(nidlist);
while (src.ls_str) {
rc = cfs_gettok(&src, ' ', &res);
if (rc == 0) {
if (!rc) {
cfs_free_nidlist(nidlist);
return 0;
}
rc = parse_nidrange(&res, nidlist);
if (rc == 0) {
if (!rc) {
cfs_free_nidlist(nidlist);
return 0;
}
......@@ -397,7 +397,7 @@ cfs_print_network(char *buffer, int count, struct nidrange *nr)
{
struct netstrfns *nf = nr->nr_netstrfns;
if (nr->nr_netnum == 0)
if (!nr->nr_netnum)
return scnprintf(buffer, count, "@%s", nf->nf_name);
else
return scnprintf(buffer, count, "@%s%u",
......@@ -419,7 +419,7 @@ cfs_print_addrranges(char *buffer, int count, struct list_head *addrranges,
struct netstrfns *nf = nr->nr_netstrfns;
list_for_each_entry(ar, addrranges, ar_link) {
if (i != 0)
if (i)
i += scnprintf(buffer + i, count - i, " ");
i += nf->nf_print_addrlist(buffer + i, count - i,
&ar->ar_numaddr_ranges);
......@@ -444,10 +444,10 @@ int cfs_print_nidlist(char *buffer, int count, struct list_head *nidlist)
return 0;
list_for_each_entry(nr, nidlist, nr_link) {
if (i != 0)
if (i)
i += scnprintf(buffer + i, count - i, " ");
if (nr->nr_all != 0) {
if (nr->nr_all) {
LASSERT(list_empty(&nr->nr_addrranges));
i += scnprintf(buffer + i, count - i, "*");
i += cfs_print_network(buffer + i, count - i, nr);
......@@ -517,7 +517,7 @@ static void cfs_num_ar_min_max(struct addrrange *ar, __u32 *min_nid,
list_for_each_entry(el, &ar->ar_numaddr_ranges, el_link) {
list_for_each_entry(re, &el->el_exprs, re_link) {
if (re->re_lo < min_addr || min_addr == 0)
if (re->re_lo < min_addr || !min_addr)
min_addr = re->re_lo;
if (re->re_hi > max_addr)
max_addr = re->re_hi;
......@@ -553,7 +553,7 @@ bool cfs_nidrange_is_contiguous(struct list_head *nidlist)
if (netnum == -1)
netnum = nr->nr_netnum;
if (strcmp(lndname, nf->nf_name) != 0 ||
if (strcmp(lndname, nf->nf_name) ||
netnum != nr->nr_netnum)
return false;
}
......@@ -592,7 +592,7 @@ static bool cfs_num_is_contiguous(struct list_head *nidlist)
list_for_each_entry(ar, &nr->nr_addrranges, ar_link) {
cfs_num_ar_min_max(ar, &current_start_nid,
&current_end_nid);
if (last_end_nid != 0 &&
if (last_end_nid &&
(current_start_nid - last_end_nid != 1))
return false;
last_end_nid = current_end_nid;
......@@ -602,7 +602,7 @@ static bool cfs_num_is_contiguous(struct list_head *nidlist)
re_link) {
if (re->re_stride > 1)
return false;
else if (last_hi != 0 &&
else if (last_hi &&
re->re_hi - last_hi != 1)
return false;
last_hi = re->re_hi;
......@@ -642,7 +642,7 @@ static bool cfs_ip_is_contiguous(struct list_head *nidlist)
last_diff = 0;
cfs_ip_ar_min_max(ar, &current_start_nid,
&current_end_nid);
if (last_end_nid != 0 &&
if (last_end_nid &&
(current_start_nid - last_end_nid != 1))
return false;
last_end_nid = current_end_nid;
......@@ -726,7 +726,7 @@ static void cfs_num_min_max(struct list_head *nidlist, __u32 *min_nid,
list_for_each_entry(ar, &nr->nr_addrranges, ar_link) {
cfs_num_ar_min_max(ar, &tmp_min_addr,
&tmp_max_addr);
if (tmp_min_addr < min_addr || min_addr == 0)
if (tmp_min_addr < min_addr || !min_addr)
min_addr = tmp_min_addr;
if (tmp_max_addr > max_addr)
max_addr = tmp_min_addr;
......@@ -758,7 +758,7 @@ static void cfs_ip_min_max(struct list_head *nidlist, __u32 *min_nid,
list_for_each_entry(ar, &nr->nr_addrranges, ar_link) {
cfs_ip_ar_min_max(ar, &tmp_min_ip_addr,
&tmp_max_ip_addr);
if (tmp_min_ip_addr < min_ip_addr || min_ip_addr == 0)
if (tmp_min_ip_addr < min_ip_addr || !min_ip_addr)
min_ip_addr = tmp_min_ip_addr;
if (tmp_max_ip_addr > max_ip_addr)
max_ip_addr = tmp_max_ip_addr;
......@@ -806,8 +806,8 @@ libcfs_ip_str2addr(const char *str, int nob, __u32 *addr)
/* numeric IP? */
if (sscanf(str, "%u.%u.%u.%u%n", &a, &b, &c, &d, &n) >= 4 &&
n == nob &&
(a & ~0xff) == 0 && (b & ~0xff) == 0 &&
(c & ~0xff) == 0 && (d & ~0xff) == 0) {
!(a & ~0xff) && !(b & ~0xff) &&
!(c & ~0xff) && !(d & ~0xff)) {
*addr = ((a << 24) | (b << 16) | (c << 8) | d);
return 1;
}
......@@ -837,7 +837,7 @@ cfs_ip_addr_parse(char *str, int len, struct list_head *list)
}
rc = cfs_expr_list_parse(res.ls_str, res.ls_len, 0, 255, &el);
if (rc != 0)
if (rc)
goto out;
list_add_tail(&el->el_link, list);
......@@ -862,7 +862,7 @@ libcfs_ip_addr_range_print(char *buffer, int count, struct list_head *list)
list_for_each_entry(el, list, el_link) {
LASSERT(j++ < 4);
if (i != 0)
if (i)
i += scnprintf(buffer + i, count - i, ".");
i += cfs_expr_list_print(buffer + i, count - i, el);
}
......@@ -932,7 +932,7 @@ libcfs_num_parse(char *str, int len, struct list_head *list)
int rc;
rc = cfs_expr_list_parse(str, len, 0, MAX_NUMERIC_VALUE, &el);
if (rc == 0)
if (!rc)
list_add_tail(&el->el_link, list);
return rc;
......@@ -1114,7 +1114,7 @@ libcfs_net2str_r(__u32 net, char *buf, size_t buf_size)
nf = libcfs_lnd2netstrfns(lnd);
if (!nf)
snprintf(buf, buf_size, "<%u:%u>", lnd, nnum);
else if (nnum == 0)
else if (!nnum)
snprintf(buf, buf_size, "%s", nf->nf_name);
else
snprintf(buf, buf_size, "%s%u", nf->nf_name, nnum);
......@@ -1146,7 +1146,7 @@ libcfs_nid2str_r(lnet_nid_t nid, char *buf, size_t buf_size)
nf->nf_addr2str(addr, buf, buf_size);
addr_len = strlen(buf);
if (nnum == 0)
if (!nnum)
snprintf(buf + addr_len, buf_size - addr_len, "@%s",
nf->nf_name);
else
......@@ -1244,8 +1244,8 @@ libcfs_id2str(lnet_process_id_t id)
}
snprintf(str, LNET_NIDSTR_SIZE, "%s%u-%s",
((id.pid & LNET_PID_USERFLAG) != 0) ? "U" : "",
(id.pid & ~LNET_PID_USERFLAG), libcfs_nid2str(id.nid));
id.pid & LNET_PID_USERFLAG ? "U" : "",
id.pid & ~LNET_PID_USERFLAG, libcfs_nid2str(id.nid));
return str;
}
EXPORT_SYMBOL(libcfs_id2str);
......
......@@ -137,10 +137,10 @@ lnet_peer_tables_cleanup(void)
lnet_net_lock(i);
for (j = 3; ptable->pt_number != 0; j++) {
for (j = 3; ptable->pt_number; j++) {
lnet_net_unlock(i);
if ((j & (j - 1)) == 0) {
if (!(j & (j - 1))) {
CDEBUG(D_WARNING,
"Waiting for %d peers on peer table\n",
ptable->pt_number);
......@@ -167,11 +167,11 @@ lnet_destroy_peer_locked(lnet_peer_t *lp)
{
struct lnet_peer_table *ptable;
LASSERT(lp->lp_refcount == 0);
LASSERT(lp->lp_rtr_refcount == 0);
LASSERT(!lp->lp_refcount);
LASSERT(!lp->lp_rtr_refcount);
LASSERT(list_empty(&lp->lp_txq));
LASSERT(list_empty(&lp->lp_hashlist));
LASSERT(lp->lp_txqnob == 0);
LASSERT(!lp->lp_txqnob);
ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
LASSERT(ptable->pt_number > 0);
......@@ -317,7 +317,7 @@ lnet_debug_peer(lnet_nid_t nid)
lnet_net_lock(cpt);
rc = lnet_nid2peer_locked(&lp, nid, cpt);
if (rc != 0) {
if (rc) {
lnet_net_unlock(cpt);
CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
return;
......
......@@ -109,7 +109,7 @@ lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive,
lp->lp_timestamp = when; /* update timestamp */
lp->lp_ping_deadline = 0; /* disable ping timeout */
if (lp->lp_alive_count != 0 && /* got old news */
if (lp->lp_alive_count && /* got old news */
(!lp->lp_alive) == (!alive)) { /* new date for old news */
CDEBUG(D_NET, "Old news\n");
return;
......@@ -201,7 +201,7 @@ lnet_rtr_decref_locked(lnet_peer_t *lp)
/* lnet_net_lock must be exclusively locked */
lp->lp_rtr_refcount--;
if (lp->lp_rtr_refcount == 0) {
if (!lp->lp_rtr_refcount) {
LASSERT(list_empty(&lp->lp_routes));
if (lp->lp_rcd) {
......@@ -283,7 +283,7 @@ lnet_add_route_to_rnet(lnet_remotenet_t *rnet, lnet_route_t *route)
/* len+1 positions to add a new entry, also prevents division by 0 */
offset = cfs_rand() % (len + 1);
list_for_each(e, &rnet->lrn_routes) {
if (offset == 0)
if (!offset)
break;
offset--;
}
......@@ -342,7 +342,7 @@ lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway,
lnet_net_lock(LNET_LOCK_EX);
rc = lnet_nid2peer_locked(&route->lr_gateway, gateway, LNET_LOCK_EX);
if (rc != 0) {
if (rc) {
lnet_net_unlock(LNET_LOCK_EX);
LIBCFS_FREE(route, sizeof(*route));
......@@ -565,7 +565,7 @@ lnet_get_route(int idx, __u32 *net, __u32 *hops,
list_for_each(e2, &rnet->lrn_routes) {
route = list_entry(e2, lnet_route_t, lr_list);
if (idx-- == 0) {
if (!idx--) {
*net = rnet->lrn_net;
*hops = route->lr_hops;
*priority = route->lr_priority;
......@@ -625,13 +625,13 @@ lnet_parse_rc_info(lnet_rc_data_t *rcd)
}
gw->lp_ping_feats = info->pi_features;
if ((gw->lp_ping_feats & LNET_PING_FEAT_MASK) == 0) {
if (!(gw->lp_ping_feats & LNET_PING_FEAT_MASK)) {
CDEBUG(D_NET, "%s: Unexpected features 0x%x\n",
libcfs_nid2str(gw->lp_nid), gw->lp_ping_feats);
return; /* nothing I can understand */
}
if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) == 0)
if (!(gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS))
return; /* can't carry NI status info */
list_for_each_entry(rtr, &gw->lp_routes, lr_gwlist) {
......@@ -722,7 +722,7 @@ lnet_router_checker_event(lnet_event_t *event)
if (event->type == LNET_EVENT_SEND) {
lp->lp_ping_notsent = 0;
if (event->status == 0)
if (!event->status)
goto out;
}
......@@ -733,7 +733,7 @@ lnet_router_checker_event(lnet_event_t *event)
* we ping alive routers to try to detect router death before
* apps get burned).
*/
lnet_notify_locked(lp, 1, (event->status == 0), cfs_time_current());
lnet_notify_locked(lp, 1, !event->status, cfs_time_current());
/*
* The router checker will wake up very shortly and do the
......@@ -741,7 +741,7 @@ lnet_router_checker_event(lnet_event_t *event)
* XXX If 'lp' stops being a router before then, it will still
* have the notification pending!!!
*/
if (avoid_asym_router_failure && event->status == 0)
if (avoid_asym_router_failure && !event->status)
lnet_parse_rc_info(rcd);
out:
......@@ -764,7 +764,7 @@ lnet_wait_known_routerstate(void)
list_for_each(entry, &the_lnet.ln_routers) {
rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
if (rtr->lp_alive_count == 0) {
if (!rtr->lp_alive_count) {
all_known = 0;
break;
}
......@@ -785,7 +785,7 @@ lnet_router_ni_update_locked(lnet_peer_t *gw, __u32 net)
{
lnet_route_t *rte;
if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0) {
if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS)) {
list_for_each_entry(rte, &gw->lp_routes, lr_gwlist) {
if (rte->lr_net == net) {
rte->lr_downis = 0;
......@@ -898,7 +898,7 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway)
CERROR("Can't bind MD: %d\n", rc);
goto out;
}
LASSERT(rc == 0);
LASSERT(!rc);
lnet_net_lock(gateway->lp_cpt);
/* router table changed or someone has created rcd for this gateway */
......@@ -918,7 +918,7 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway)
if (rcd) {
if (!LNetHandleIsInvalid(rcd->rcd_mdh)) {
rc = LNetMDUnlink(rcd->rcd_mdh);
LASSERT(rc == 0);
LASSERT(!rc);
}
lnet_destroy_rc_data(rcd);
}
......@@ -949,7 +949,7 @@ lnet_ping_router_locked(lnet_peer_t *rtr)
lnet_peer_addref_locked(rtr);
if (rtr->lp_ping_deadline != 0 && /* ping timed out? */
if (rtr->lp_ping_deadline && /* ping timed out? */
cfs_time_after(now, rtr->lp_ping_deadline))
lnet_notify_locked(rtr, 1, 0, now);
......@@ -977,7 +977,7 @@ lnet_ping_router_locked(lnet_peer_t *rtr)
rtr->lp_ping_deadline, rtr->lp_ping_notsent,
rtr->lp_alive, rtr->lp_alive_count, rtr->lp_ping_timestamp);
if (secs != 0 && !rtr->lp_ping_notsent &&
if (secs && !rtr->lp_ping_notsent &&
cfs_time_after(now, cfs_time_add(rtr->lp_ping_timestamp,
cfs_time_seconds(secs)))) {
int rc;
......@@ -993,7 +993,7 @@ lnet_ping_router_locked(lnet_peer_t *rtr)
mdh = rcd->rcd_mdh;
if (rtr->lp_ping_deadline == 0) {
if (!rtr->lp_ping_deadline) {
rtr->lp_ping_deadline =
cfs_time_shift(router_ping_timeout);
}
......@@ -1004,7 +1004,7 @@ lnet_ping_router_locked(lnet_peer_t *rtr)
LNET_PROTO_PING_MATCHBITS, 0);
lnet_net_lock(rtr->lp_cpt);
if (rc != 0)
if (rc)
rtr->lp_ping_notsent = 0; /* no event pending */
}
......@@ -1038,7 +1038,7 @@ lnet_router_checker_start(void)
eqsz = 0;
rc = LNetEQAlloc(eqsz, lnet_router_checker_event,
&the_lnet.ln_rc_eqh);
if (rc != 0) {
if (rc) {
CERROR("Can't allocate EQ(%d): %d\n", eqsz, rc);
return -ENOMEM;
}
......@@ -1051,7 +1051,7 @@ lnet_router_checker_start(void)
/* block until event callback signals exit */
down(&the_lnet.ln_rc_signal);
rc = LNetEQFree(the_lnet.ln_rc_eqh);
LASSERT(rc == 0);
LASSERT(!rc);
the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
return -ENOMEM;
}
......@@ -1084,7 +1084,7 @@ lnet_router_checker_stop(void)
LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
rc = LNetEQFree(the_lnet.ln_rc_eqh);
LASSERT(rc == 0);
LASSERT(!rc);
}
static void
......@@ -1288,7 +1288,7 @@ lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp)
int nbuffers = 0;
lnet_rtrbuf_t *rb;
if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */
if (!rbp->rbp_nbuffers) /* not initialized or already freed */
return;
LASSERT(list_empty(&rbp->rbp_msgs));
......@@ -1317,7 +1317,7 @@ lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
lnet_rtrbuf_t *rb;
int i;
if (rbp->rbp_nbuffers != 0) {
if (rbp->rbp_nbuffers) {
LASSERT(rbp->rbp_nbuffers == nbufs);
return 0;
}
......@@ -1484,17 +1484,17 @@ lnet_rtrpools_alloc(int im_a_router)
cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
lnet_rtrpool_init(&rtrp[0], 0);
rc = lnet_rtrpool_alloc_bufs(&rtrp[0], nrb_tiny, i);
if (rc != 0)
if (rc)
goto failed;
lnet_rtrpool_init(&rtrp[1], small_pages);
rc = lnet_rtrpool_alloc_bufs(&rtrp[1], nrb_small, i);
if (rc != 0)
if (rc)
goto failed;
lnet_rtrpool_init(&rtrp[2], large_pages);
rc = lnet_rtrpool_alloc_bufs(&rtrp[2], nrb_large, i);
if (rc != 0)
if (rc)
goto failed;
}
......
This diff is collapsed.
......@@ -218,7 +218,7 @@ stt_startup(void)
stt_data.stt_nthreads = 0;
init_waitqueue_head(&stt_data.stt_waitq);
rc = stt_start_timer_thread();
if (rc != 0)
if (rc)
CERROR("Can't spawn timer thread: %d\n", rc);
return rc;
......@@ -237,7 +237,7 @@ stt_shutdown(void)
stt_data.stt_shuttingdown = 1;
wake_up(&stt_data.stt_waitq);
lst_wait_until(stt_data.stt_nthreads == 0, stt_data.stt_lock,
lst_wait_until(!stt_data.stt_nthreads, stt_data.stt_lock,
"waiting for %d threads to terminate\n",
stt_data.stt_nthreads);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment