Commit 97d10d0a authored by Mike Shuey's avatar Mike Shuey Committed by Greg Kroah-Hartman

staging: lustre: lnet: socklnd: code cleanup - align spacing

Unify variable declarations to use a single space.  Also include several
miscellaneous whitespace cleanups, particularly in socklnd.h.
Signed-off-by: default avatarMike Shuey <shuey@purdue.edu>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent ec3d17c0
......@@ -49,8 +49,8 @@ ksock_nal_data_t ksocknal_data;
static ksock_interface_t *
ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip)
{
ksock_net_t *net = ni->ni_data;
int i;
ksock_net_t *net = ni->ni_data;
int i;
ksock_interface_t *iface;
for (i = 0; i < net->ksnn_ninterfaces; i++) {
......@@ -102,8 +102,8 @@ ksocknal_destroy_route(ksock_route_t *route)
static int
ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
{
ksock_net_t *net = ni->ni_data;
ksock_peer_t *peer;
ksock_net_t *net = ni->ni_data;
ksock_peer_t *peer;
LASSERT(id.nid != LNET_NID_ANY);
LASSERT(id.pid != LNET_PID_ANY);
......@@ -149,7 +149,7 @@ ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
void
ksocknal_destroy_peer(ksock_peer_t *peer)
{
ksock_net_t *net = peer->ksnp_ni->ni_data;
ksock_net_t *net = peer->ksnp_ni->ni_data;
CDEBUG(D_NET, "peer %s %p deleted\n",
libcfs_id2str(peer->ksnp_id), peer);
......@@ -175,9 +175,9 @@ ksocknal_destroy_peer(ksock_peer_t *peer)
ksock_peer_t *
ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id)
{
struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
struct list_head *tmp;
ksock_peer_t *peer;
struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
struct list_head *tmp;
ksock_peer_t *peer;
list_for_each(tmp, peer_list) {
......@@ -203,7 +203,7 @@ ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id)
ksock_peer_t *
ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id)
{
ksock_peer_t *peer;
ksock_peer_t *peer;
read_lock(&ksocknal_data.ksnd_global_lock);
peer = ksocknal_find_peer_locked(ni, id);
......@@ -217,8 +217,8 @@ ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id)
static void
ksocknal_unlink_peer_locked(ksock_peer_t *peer)
{
int i;
__u32 ip;
int i;
__u32 ip;
ksock_interface_t *iface;
for (i = 0; i < peer->ksnp_n_passive_ips; i++) {
......@@ -249,13 +249,13 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
int *port, int *conn_count, int *share_count)
{
ksock_peer_t *peer;
struct list_head *ptmp;
ksock_route_t *route;
struct list_head *rtmp;
int i;
int j;
int rc = -ENOENT;
ksock_peer_t *peer;
struct list_head *ptmp;
ksock_route_t *route;
struct list_head *rtmp;
int i;
int j;
int rc = -ENOENT;
read_lock(&ksocknal_data.ksnd_global_lock);
......@@ -322,8 +322,8 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
static void
ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
{
ksock_peer_t *peer = route->ksnr_peer;
int type = conn->ksnc_type;
ksock_peer_t *peer = route->ksnr_peer;
int type = conn->ksnc_type;
ksock_interface_t *iface;
conn->ksnc_route = route;
......@@ -366,9 +366,9 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
static void
ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route)
{
struct list_head *tmp;
ksock_conn_t *conn;
ksock_route_t *route2;
struct list_head *tmp;
ksock_conn_t *conn;
ksock_route_t *route2;
LASSERT(!peer->ksnp_closing);
LASSERT(route->ksnr_peer == NULL);
......@@ -407,11 +407,11 @@ ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route)
static void
ksocknal_del_route_locked(ksock_route_t *route)
{
ksock_peer_t *peer = route->ksnr_peer;
ksock_peer_t *peer = route->ksnr_peer;
ksock_interface_t *iface;
ksock_conn_t *conn;
struct list_head *ctmp;
struct list_head *cnxt;
ksock_conn_t *conn;
struct list_head *ctmp;
struct list_head *cnxt;
LASSERT(!route->ksnr_deleted);
......@@ -447,12 +447,12 @@ ksocknal_del_route_locked(ksock_route_t *route)
int
ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
{
struct list_head *tmp;
ksock_peer_t *peer;
ksock_peer_t *peer2;
ksock_route_t *route;
ksock_route_t *route2;
int rc;
struct list_head *tmp;
ksock_peer_t *peer;
ksock_peer_t *peer2;
ksock_route_t *route;
ksock_route_t *route2;
int rc;
if (id.nid == LNET_NID_ANY ||
id.pid == LNET_PID_ANY)
......@@ -509,11 +509,11 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
static void
ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip)
{
ksock_conn_t *conn;
ksock_route_t *route;
struct list_head *tmp;
struct list_head *nxt;
int nshared;
ksock_conn_t *conn;
ksock_route_t *route;
struct list_head *tmp;
struct list_head *nxt;
int nshared;
LASSERT(!peer->ksnp_closing);
......@@ -565,13 +565,13 @@ static int
ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
{
LIST_HEAD(zombies);
struct list_head *ptmp;
struct list_head *pnxt;
ksock_peer_t *peer;
int lo;
int hi;
int i;
int rc = -ENOENT;
struct list_head *ptmp;
struct list_head *pnxt;
ksock_peer_t *peer;
int lo;
int hi;
int i;
int rc = -ENOENT;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
......@@ -623,11 +623,11 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
static ksock_conn_t *
ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index)
{
ksock_peer_t *peer;
struct list_head *ptmp;
ksock_conn_t *conn;
struct list_head *ctmp;
int i;
ksock_peer_t *peer;
struct list_head *ptmp;
ksock_conn_t *conn;
struct list_head *ctmp;
int i;
read_lock(&ksocknal_data.ksnd_global_lock);
......@@ -661,8 +661,8 @@ static ksock_sched_t *
ksocknal_choose_scheduler_locked(unsigned int cpt)
{
struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
ksock_sched_t *sched;
int i;
ksock_sched_t *sched;
int i;
LASSERT(info->ksi_nthreads > 0);
......@@ -683,9 +683,9 @@ ksocknal_choose_scheduler_locked(unsigned int cpt)
static int
ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs)
{
ksock_net_t *net = ni->ni_data;
int i;
int nip;
ksock_net_t *net = ni->ni_data;
int i;
int nip;
read_lock(&ksocknal_data.ksnd_global_lock);
......@@ -711,12 +711,12 @@ ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs)
static int
ksocknal_match_peerip(ksock_interface_t *iface, __u32 *ips, int nips)
{
int best_netmatch = 0;
int best_xor = 0;
int best = -1;
int this_xor;
int this_netmatch;
int i;
int best_netmatch = 0;
int best_xor = 0;
int best = -1;
int this_xor;
int this_netmatch;
int i;
for (i = 0; i < nips; i++) {
if (ips[i] == 0)
......@@ -743,19 +743,19 @@ ksocknal_match_peerip(ksock_interface_t *iface, __u32 *ips, int nips)
static int
ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
{
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
ksock_net_t *net = peer->ksnp_ni->ni_data;
ksock_interface_t *iface;
ksock_interface_t *best_iface;
int n_ips;
int i;
int j;
int k;
__u32 ip;
__u32 xor;
int this_netmatch;
int best_netmatch;
int best_npeers;
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
ksock_net_t *net = peer->ksnp_ni->ni_data;
ksock_interface_t *iface;
ksock_interface_t *best_iface;
int n_ips;
int i;
int j;
int k;
__u32 ip;
__u32 xor;
int this_netmatch;
int best_netmatch;
int best_npeers;
/* CAVEAT EMPTOR: We do all our interface matching with an
* exclusive hold of global lock at IRQ priority. We're only
......@@ -846,19 +846,19 @@ static void
ksocknal_create_routes(ksock_peer_t *peer, int port,
__u32 *peer_ipaddrs, int npeer_ipaddrs)
{
ksock_route_t *newroute = NULL;
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
lnet_ni_t *ni = peer->ksnp_ni;
ksock_net_t *net = ni->ni_data;
struct list_head *rtmp;
ksock_route_t *route;
ksock_interface_t *iface;
ksock_interface_t *best_iface;
int best_netmatch;
int this_netmatch;
int best_nroutes;
int i;
int j;
ksock_route_t *newroute = NULL;
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
lnet_ni_t *ni = peer->ksnp_ni;
ksock_net_t *net = ni->ni_data;
struct list_head *rtmp;
ksock_route_t *route;
ksock_interface_t *iface;
ksock_interface_t *best_iface;
int best_netmatch;
int this_netmatch;
int best_nroutes;
int i;
int j;
/* CAVEAT EMPTOR: We do all our interface matching with an
* exclusive hold of global lock at IRQ priority. We're only
......@@ -963,10 +963,10 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
int
ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
{
ksock_connreq_t *cr;
int rc;
__u32 peer_ip;
int peer_port;
ksock_connreq_t *cr;
int rc;
__u32 peer_ip;
int peer_port;
rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
LASSERT(rc == 0); /* we succeeded before */
......@@ -994,7 +994,7 @@ ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
static int
ksocknal_connecting(ksock_peer_t *peer, __u32 ipaddr)
{
ksock_route_t *route;
ksock_route_t *route;
list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) {
......@@ -1008,23 +1008,23 @@ int
ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
struct socket *sock, int type)
{
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
LIST_HEAD(zombies);
lnet_process_id_t peerid;
struct list_head *tmp;
__u64 incarnation;
ksock_conn_t *conn;
ksock_conn_t *conn2;
ksock_peer_t *peer = NULL;
ksock_peer_t *peer2;
ksock_sched_t *sched;
lnet_process_id_t peerid;
struct list_head *tmp;
__u64 incarnation;
ksock_conn_t *conn;
ksock_conn_t *conn2;
ksock_peer_t *peer = NULL;
ksock_peer_t *peer2;
ksock_sched_t *sched;
ksock_hello_msg_t *hello;
int cpt;
ksock_tx_t *tx;
ksock_tx_t *txtmp;
int rc;
int active;
char *warn = NULL;
int cpt;
ksock_tx_t *tx;
ksock_tx_t *txtmp;
int rc;
int active;
char *warn = NULL;
active = (route != NULL);
......@@ -1396,10 +1396,10 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
/* This just does the immmediate housekeeping, and queues the
* connection for the reaper to terminate.
* Caller holds ksnd_global_lock exclusively in irq context */
ksock_peer_t *peer = conn->ksnc_peer;
ksock_route_t *route;
ksock_conn_t *conn2;
struct list_head *tmp;
ksock_peer_t *peer = conn->ksnc_peer;
ksock_route_t *route;
ksock_conn_t *conn2;
struct list_head *tmp;
LASSERT(peer->ksnp_error == 0);
LASSERT(!conn->ksnc_closing);
......@@ -1479,7 +1479,7 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
void
ksocknal_peer_failed(ksock_peer_t *peer)
{
int notify = 0;
int notify = 0;
unsigned long last_alive = 0;
/* There has been a connection failure or comms error; but I'll only
......@@ -1506,9 +1506,9 @@ ksocknal_peer_failed(ksock_peer_t *peer)
void
ksocknal_finalize_zcreq(ksock_conn_t *conn)
{
ksock_peer_t *peer = conn->ksnc_peer;
ksock_tx_t *tx;
ksock_tx_t *tmp;
ksock_peer_t *peer = conn->ksnc_peer;
ksock_tx_t *tx;
ksock_tx_t *tmp;
LIST_HEAD(zlist);
/* NB safe to finalize TXs because closing of socket will
......@@ -1546,9 +1546,9 @@ ksocknal_terminate_conn(ksock_conn_t *conn)
* disengage the socket from its callbacks and close it.
* ksnc_refcount will eventually hit zero, and then the reaper will
* destroy it. */
ksock_peer_t *peer = conn->ksnc_peer;
ksock_sched_t *sched = conn->ksnc_scheduler;
int failed = 0;
ksock_peer_t *peer = conn->ksnc_peer;
ksock_sched_t *sched = conn->ksnc_scheduler;
int failed = 0;
LASSERT(conn->ksnc_closing);
......@@ -1617,7 +1617,7 @@ ksocknal_queue_zombie_conn(ksock_conn_t *conn)
void
ksocknal_destroy_conn(ksock_conn_t *conn)
{
unsigned long last_rcv;
unsigned long last_rcv;
/* Final coup-de-grace of the reaper */
CDEBUG(D_NET, "connection %p\n", conn);
......@@ -1677,10 +1677,10 @@ ksocknal_destroy_conn(ksock_conn_t *conn)
int
ksocknal_close_peer_conns_locked(ksock_peer_t *peer, __u32 ipaddr, int why)
{
ksock_conn_t *conn;
struct list_head *ctmp;
struct list_head *cnxt;
int count = 0;
ksock_conn_t *conn;
struct list_head *ctmp;
struct list_head *cnxt;
int count = 0;
list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
......@@ -1698,9 +1698,9 @@ ksocknal_close_peer_conns_locked(ksock_peer_t *peer, __u32 ipaddr, int why)
int
ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why)
{
ksock_peer_t *peer = conn->ksnc_peer;
__u32 ipaddr = conn->ksnc_ipaddr;
int count;
ksock_peer_t *peer = conn->ksnc_peer;
__u32 ipaddr = conn->ksnc_ipaddr;
int count;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
......@@ -1714,13 +1714,13 @@ ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why)
int
ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
{
ksock_peer_t *peer;
struct list_head *ptmp;
struct list_head *pnxt;
int lo;
int hi;
int i;
int count = 0;
ksock_peer_t *peer;
struct list_head *ptmp;
struct list_head *pnxt;
int lo;
int hi;
int i;
int count = 0;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
......@@ -1762,7 +1762,7 @@ ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
{
/* The router is telling me she's been notified of a change in
* gateway state.... */
lnet_process_id_t id = {0};
lnet_process_id_t id = {0};
id.nid = gw_nid;
id.pid = LNET_PID_ANY;
......@@ -1783,20 +1783,20 @@ ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
void
ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
{
int connect = 1;
unsigned long last_alive = 0;
unsigned long now = cfs_time_current();
ksock_peer_t *peer = NULL;
rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
lnet_process_id_t id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID};
int connect = 1;
unsigned long last_alive = 0;
unsigned long now = cfs_time_current();
ksock_peer_t *peer = NULL;
rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
lnet_process_id_t id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID};
read_lock(glock);
peer = ksocknal_find_peer_locked(ni, id);
if (peer != NULL) {
struct list_head *tmp;
ksock_conn_t *conn;
int bufnob;
struct list_head *tmp;
ksock_conn_t *conn;
int bufnob;
list_for_each(tmp, &peer->ksnp_conns) {
conn = list_entry(tmp, ksock_conn_t, ksnc_list);
......@@ -1844,10 +1844,10 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
static void
ksocknal_push_peer(ksock_peer_t *peer)
{
int index;
int i;
struct list_head *tmp;
ksock_conn_t *conn;
int index;
int i;
struct list_head *tmp;
ksock_conn_t *conn;
for (index = 0; ; index++) {
read_lock(&ksocknal_data.ksnd_global_lock);
......@@ -1877,12 +1877,12 @@ ksocknal_push_peer(ksock_peer_t *peer)
static int
ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id)
{
ksock_peer_t *peer;
struct list_head *tmp;
int index;
int i;
int j;
int rc = -ENOENT;
ksock_peer_t *peer;
struct list_head *tmp;
int index;
int i;
int j;
int rc = -ENOENT;
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
for (j = 0; ; j++) {
......@@ -1926,15 +1926,15 @@ ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id)
static int
ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
{
ksock_net_t *net = ni->ni_data;
ksock_net_t *net = ni->ni_data;
ksock_interface_t *iface;
int rc;
int i;
int j;
struct list_head *ptmp;
ksock_peer_t *peer;
struct list_head *rtmp;
ksock_route_t *route;
int rc;
int i;
int j;
struct list_head *ptmp;
ksock_peer_t *peer;
struct list_head *rtmp;
ksock_route_t *route;
if (ipaddress == 0 ||
netmask == 0)
......@@ -1988,12 +1988,12 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
static void
ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
{
struct list_head *tmp;
struct list_head *nxt;
ksock_route_t *route;
ksock_conn_t *conn;
int i;
int j;
struct list_head *tmp;
struct list_head *nxt;
ksock_route_t *route;
ksock_conn_t *conn;
int i;
int j;
for (i = 0; i < peer->ksnp_n_passive_ips; i++)
if (peer->ksnp_passive_ips[i] == ipaddr) {
......@@ -2029,14 +2029,14 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
static int
ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
{
ksock_net_t *net = ni->ni_data;
int rc = -ENOENT;
struct list_head *tmp;
struct list_head *nxt;
ksock_peer_t *peer;
__u32 this_ip;
int i;
int j;
ksock_net_t *net = ni->ni_data;
int rc = -ENOENT;
struct list_head *tmp;
struct list_head *nxt;
ksock_peer_t *peer;
__u32 this_ip;
int i;
int j;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
......@@ -2114,11 +2114,11 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
data->ioc_u32[0]); /* IP address */
case IOC_LIBCFS_GET_PEER: {
__u32 myip = 0;
__u32 ip = 0;
int port = 0;
int conn_count = 0;
int share_count = 0;
__u32 myip = 0;
__u32 ip = 0;
int port = 0;
int conn_count = 0;
int share_count = 0;
rc = ksocknal_get_peer_info(ni, data->ioc_count,
&id, &myip, &ip, &port,
......@@ -2150,9 +2150,9 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
data->ioc_u32[0]); /* IP */
case IOC_LIBCFS_GET_CONN: {
int txmem;
int rxmem;
int nagle;
int txmem;
int rxmem;
int nagle;
ksock_conn_t *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
if (conn == NULL)
......@@ -2207,8 +2207,8 @@ ksocknal_free_buffers(void)
LASSERT(atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
if (ksocknal_data.ksnd_sched_info != NULL) {
struct ksock_sched_info *info;
int i;
struct ksock_sched_info *info;
int i;
cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
if (info->ksi_scheds != NULL) {
......@@ -2227,8 +2227,8 @@ ksocknal_free_buffers(void)
spin_lock(&ksocknal_data.ksnd_tx_lock);
if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
struct list_head zlist;
ksock_tx_t *tx;
struct list_head zlist;
ksock_tx_t *tx;
list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
......@@ -2248,9 +2248,9 @@ static void
ksocknal_base_shutdown(void)
{
struct ksock_sched_info *info;
ksock_sched_t *sched;
int i;
int j;
ksock_sched_t *sched;
int i;
int j;
CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
atomic_read(&libcfs_kmemory));
......@@ -2351,8 +2351,8 @@ static int
ksocknal_base_startup(void)
{
struct ksock_sched_info *info;
int rc;
int i;
int rc;
int i;
LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
LASSERT(ksocknal_data.ksnd_nnets == 0);
......@@ -2398,8 +2398,8 @@ ksocknal_base_startup(void)
goto failed;
cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
ksock_sched_t *sched;
int nthrs;
ksock_sched_t *sched;
int nthrs;
nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
if (*ksocknal_tunables.ksnd_nscheds > 0) {
......@@ -2430,9 +2430,9 @@ ksocknal_base_startup(void)
}
}
ksocknal_data.ksnd_connd_starting = 0;
ksocknal_data.ksnd_connd_failed_stamp = 0;
ksocknal_data.ksnd_connd_starting_stamp = get_seconds();
ksocknal_data.ksnd_connd_starting = 0;
ksocknal_data.ksnd_connd_failed_stamp = 0;
ksocknal_data.ksnd_connd_starting_stamp = get_seconds();
/* must have at least 2 connds to remain responsive to accepts while
* connecting */
if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
......@@ -2482,9 +2482,9 @@ ksocknal_base_startup(void)
static void
ksocknal_debug_peerhash(lnet_ni_t *ni)
{
ksock_peer_t *peer = NULL;
struct list_head *tmp;
int i;
ksock_peer_t *peer = NULL;
struct list_head *tmp;
int i;
read_lock(&ksocknal_data.ksnd_global_lock);
......@@ -2536,12 +2536,12 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
void
ksocknal_shutdown(lnet_ni_t *ni)
{
ksock_net_t *net = ni->ni_data;
int i;
ksock_net_t *net = ni->ni_data;
int i;
lnet_process_id_t anyid = {0};
anyid.nid = LNET_NID_ANY;
anyid.pid = LNET_PID_ANY;
anyid.nid = LNET_NID_ANY;
anyid.pid = LNET_PID_ANY;
LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
LASSERT(ksocknal_data.ksnd_nnets > 0);
......@@ -2588,11 +2588,11 @@ ksocknal_shutdown(lnet_ni_t *ni)
static int
ksocknal_enumerate_interfaces(ksock_net_t *net)
{
char **names;
int i;
int j;
int rc;
int n;
char **names;
int i;
int j;
int rc;
int n;
n = libcfs_ipif_enumerate(&names);
if (n <= 0) {
......@@ -2601,9 +2601,9 @@ ksocknal_enumerate_interfaces(ksock_net_t *net)
}
for (i = j = 0; i < n; i++) {
int up;
__u32 ip;
__u32 mask;
int up;
__u32 ip;
__u32 mask;
if (!strcmp(names[i], "lo")) /* skip the loopback IF */
continue;
......@@ -2645,15 +2645,15 @@ ksocknal_enumerate_interfaces(ksock_net_t *net)
static int
ksocknal_search_new_ipif(ksock_net_t *net)
{
int new_ipif = 0;
int i;
int new_ipif = 0;
int i;
for (i = 0; i < net->ksnn_ninterfaces; i++) {
char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
char *colon = strchr(ifnam, ':');
int found = 0;
ksock_net_t *tmp;
int j;
char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
char *colon = strchr(ifnam, ':');
int found = 0;
ksock_net_t *tmp;
int j;
if (colon != NULL) /* ignore alias device */
*colon = 0;
......@@ -2687,9 +2687,9 @@ ksocknal_search_new_ipif(ksock_net_t *net)
static int
ksocknal_start_schedulers(struct ksock_sched_info *info)
{
int nthrs;
int rc = 0;
int i;
int nthrs;
int rc = 0;
int i;
if (info->ksi_nthreads == 0) {
if (*ksocknal_tunables.ksnd_nscheds > 0) {
......@@ -2708,9 +2708,9 @@ ksocknal_start_schedulers(struct ksock_sched_info *info)
}
for (i = 0; i < nthrs; i++) {
long id;
char name[20];
ksock_sched_t *sched;
long id;
char name[20];
ksock_sched_t *sched;
id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
......@@ -2733,14 +2733,14 @@ ksocknal_start_schedulers(struct ksock_sched_info *info)
static int
ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
{
int newif = ksocknal_search_new_ipif(net);
int rc;
int i;
int newif = ksocknal_search_new_ipif(net);
int rc;
int i;
LASSERT(ncpts > 0 && ncpts <= cfs_cpt_number(lnet_cpt_table()));
for (i = 0; i < ncpts; i++) {
struct ksock_sched_info *info;
struct ksock_sched_info *info;
int cpt = (cpts == NULL) ? i : cpts[i];
LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
......@@ -2759,9 +2759,9 @@ ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
int
ksocknal_startup(lnet_ni_t *ni)
{
ksock_net_t *net;
int rc;
int i;
ksock_net_t *net;
int rc;
int i;
LASSERT(ni->ni_lnd == &the_ksocklnd);
......@@ -2791,7 +2791,7 @@ ksocknal_startup(lnet_ni_t *ni)
net->ksnn_ninterfaces = 1;
} else {
for (i = 0; i < LNET_MAX_INTERFACES; i++) {
int up;
int up;
if (ni->ni_interfaces[i] == NULL)
break;
......@@ -2851,7 +2851,7 @@ ksocknal_module_fini(void)
static int __init
ksocknal_module_init(void)
{
int rc;
int rc;
/* check ksnr_connected/connecting field large enough */
CLASSERT(SOCKLND_CONN_NTYPES <= 4);
......
......@@ -36,15 +36,15 @@
#include "../../../include/linux/lnet/socklnd.h"
#include "../../../include/linux/lnet/lnet-sysctl.h"
#define SOCKNAL_PEER_HASH_SIZE 101 /* # peer lists */
#define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */
#define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */
#define SOCKNAL_ENOMEM_RETRY CFS_TICK /* jiffies between retries */
#define SOCKNAL_PEER_HASH_SIZE 101 /* # peer lists */
#define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */
#define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */
#define SOCKNAL_ENOMEM_RETRY CFS_TICK /* jiffies between retries */
#define SOCKNAL_SINGLE_FRAG_TX 0 /* disable multi-fragment sends */
#define SOCKNAL_SINGLE_FRAG_RX 0 /* disable multi-fragment receives */
#define SOCKNAL_SINGLE_FRAG_TX 0 /* disable multi-fragment sends */
#define SOCKNAL_SINGLE_FRAG_RX 0 /* disable multi-fragment receives */
#define SOCKNAL_VERSION_DEBUG 0 /* enable protocol version debugging */
#define SOCKNAL_VERSION_DEBUG 0 /* enable protocol version debugging */
/* risk kmap deadlock on multi-frag I/O (backs off to single-frag if disabled).
* no risk if we're not running on a CONFIG_HIGHMEM platform. */
......@@ -58,33 +58,31 @@ struct ksock_sched_info;
typedef struct /* per scheduler state */
{
spinlock_t kss_lock; /* serialise */
struct list_head kss_rx_conns; /* conn waiting to be read */
/* conn waiting to be written */
struct list_head kss_tx_conns;
/* zombie noop tx list */
struct list_head kss_zombie_noop_txs;
wait_queue_head_t kss_waitq; /* where scheduler sleeps */
/* # connections assigned to this scheduler */
int kss_nconns;
struct ksock_sched_info *kss_info; /* owner of it */
struct page *kss_rx_scratch_pgs[LNET_MAX_IOV];
struct kvec kss_scratch_iov[LNET_MAX_IOV];
spinlock_t kss_lock; /* serialise */
struct list_head kss_rx_conns; /* conn waiting to be read */
struct list_head kss_tx_conns; /* conn waiting to be written */
struct list_head kss_zombie_noop_txs; /* zombie noop tx list */
wait_queue_head_t kss_waitq; /* where scheduler sleeps */
int kss_nconns; /* # connections assigned to
* this scheduler */
struct ksock_sched_info *kss_info; /* owner of it */
struct page *kss_rx_scratch_pgs[LNET_MAX_IOV];
struct kvec kss_scratch_iov[LNET_MAX_IOV];
} ksock_sched_t;
struct ksock_sched_info {
int ksi_nthreads_max; /* max allowed threads */
int ksi_nthreads; /* number of threads */
int ksi_cpt; /* CPT id */
ksock_sched_t *ksi_scheds; /* array of schedulers */
int ksi_nthreads_max; /* max allowed threads */
int ksi_nthreads; /* number of threads */
int ksi_cpt; /* CPT id */
ksock_sched_t *ksi_scheds; /* array of schedulers */
};
#define KSOCK_CPT_SHIFT 16
#define KSOCK_THREAD_ID(cpt, sid) (((cpt) << KSOCK_CPT_SHIFT) | (sid))
#define KSOCK_THREAD_CPT(id) ((id) >> KSOCK_CPT_SHIFT)
#define KSOCK_THREAD_SID(id) ((id) & ((1UL << KSOCK_CPT_SHIFT) - 1))
#define KSOCK_CPT_SHIFT 16
#define KSOCK_THREAD_ID(cpt, sid) (((cpt) << KSOCK_CPT_SHIFT) | (sid))
#define KSOCK_THREAD_CPT(id) ((id) >> KSOCK_CPT_SHIFT)
#define KSOCK_THREAD_SID(id) ((id) & ((1UL << KSOCK_CPT_SHIFT) - 1))
typedef struct /* in-use interface */
typedef struct /* in-use interface */
{
__u32 ksni_ipaddr; /* interface's IP address */
__u32 ksni_netmask; /* interface's network mask */
......@@ -94,35 +92,48 @@ typedef struct /* in-use interface */
} ksock_interface_t;
typedef struct {
/* "stuck" socket timeout (seconds) */
int *ksnd_timeout;
/* # scheduler threads in each pool while starting */
int *ksnd_nscheds;
int *ksnd_nconnds; /* # connection daemons */
int *ksnd_nconnds_max; /* max # connection daemons */
int *ksnd_min_reconnectms; /* first connection retry after (ms)... */
int *ksnd_max_reconnectms; /* ...exponentially increasing to this */
int *ksnd_eager_ack; /* make TCP ack eagerly? */
int *ksnd_typed_conns; /* drive sockets by type? */
int *ksnd_min_bulk; /* smallest "large" message */
int *ksnd_tx_buffer_size; /* socket tx buffer size */
int *ksnd_rx_buffer_size; /* socket rx buffer size */
int *ksnd_nagle; /* enable NAGLE? */
int *ksnd_round_robin; /* round robin for multiple interfaces */
int *ksnd_keepalive; /* # secs for sending keepalive NOOP */
int *ksnd_keepalive_idle; /* # idle secs before 1st probe */
int *ksnd_keepalive_count; /* # probes */
int *ksnd_keepalive_intvl; /* time between probes */
int *ksnd_credits; /* # concurrent sends */
int *ksnd_peertxcredits; /* # concurrent sends to 1 peer */
int *ksnd_peerrtrcredits; /* # per-peer router buffer credits */
int *ksnd_peertimeout; /* seconds to consider peer dead */
int *ksnd_enable_csum; /* enable check sum */
int *ksnd_inject_csum_error; /* set non-zero to inject checksum error */
int *ksnd_nonblk_zcack; /* always send zc-ack on non-blocking connection */
unsigned int *ksnd_zc_min_payload; /* minimum zero copy payload size */
int *ksnd_zc_recv; /* enable ZC receive (for Chelsio TOE) */
int *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to enable ZC receive */
int *ksnd_timeout; /* "stuck" socket timeout
* (seconds) */
int *ksnd_nscheds; /* # scheduler threads in each
* pool while starting */
int *ksnd_nconnds; /* # connection daemons */
int *ksnd_nconnds_max; /* max # connection daemons */
int *ksnd_min_reconnectms; /* first connection retry after
* (ms)... */
int *ksnd_max_reconnectms; /* ...exponentially increasing to
* this */
int *ksnd_eager_ack; /* make TCP ack eagerly? */
int *ksnd_typed_conns; /* drive sockets by type? */
int *ksnd_min_bulk; /* smallest "large" message */
int *ksnd_tx_buffer_size; /* socket tx buffer size */
int *ksnd_rx_buffer_size; /* socket rx buffer size */
int *ksnd_nagle; /* enable NAGLE? */
int *ksnd_round_robin; /* round robin for multiple
* interfaces */
int *ksnd_keepalive; /* # secs for sending keepalive
* NOOP */
int *ksnd_keepalive_idle; /* # idle secs before 1st probe
*/
int *ksnd_keepalive_count; /* # probes */
int *ksnd_keepalive_intvl; /* time between probes */
int *ksnd_credits; /* # concurrent sends */
int *ksnd_peertxcredits; /* # concurrent sends to 1 peer
*/
int *ksnd_peerrtrcredits; /* # per-peer router buffer
* credits */
int *ksnd_peertimeout; /* seconds to consider peer dead
*/
int *ksnd_enable_csum; /* enable check sum */
int *ksnd_inject_csum_error; /* set non-zero to inject
* checksum error */
int *ksnd_nonblk_zcack; /* always send zc-ack on
* non-blocking connection */
unsigned int *ksnd_zc_min_payload; /* minimum zero copy payload
* size */
int *ksnd_zc_recv; /* enable ZC receive (for
* Chelsio TOE) */
int *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to
* enable ZC receive */
} ksock_tunables_t;
typedef struct {
......@@ -141,55 +152,67 @@ typedef struct {
#define SOCKNAL_CONND_RESV 1
typedef struct {
int ksnd_init; /* initialisation state */
int ksnd_nnets; /* # networks set up */
struct list_head ksnd_nets; /* list of nets */
/* stabilize peer/conn ops */
rwlock_t ksnd_global_lock;
/* hash table of all my known peers */
struct list_head *ksnd_peers;
int ksnd_peer_hash_size; /* size of ksnd_peers */
int ksnd_nthreads; /* # live threads */
int ksnd_shuttingdown; /* tell threads to exit */
/* schedulers information */
struct ksock_sched_info **ksnd_sched_info;
atomic_t ksnd_nactive_txs; /* #active txs */
struct list_head ksnd_deathrow_conns; /* conns to close: reaper_lock*/
struct list_head ksnd_zombie_conns; /* conns to free: reaper_lock */
struct list_head ksnd_enomem_conns; /* conns to retry: reaper_lock*/
wait_queue_head_t ksnd_reaper_waitq; /* reaper sleeps here */
unsigned long ksnd_reaper_waketime;/* when reaper will wake */
spinlock_t ksnd_reaper_lock; /* serialise */
int ksnd_enomem_tx; /* test ENOMEM sender */
int ksnd_stall_tx; /* test sluggish sender */
int ksnd_stall_rx; /* test sluggish receiver */
struct list_head ksnd_connd_connreqs; /* incoming connection requests */
struct list_head ksnd_connd_routes; /* routes waiting to be connected */
wait_queue_head_t ksnd_connd_waitq; /* connds sleep here */
int ksnd_connd_connecting;/* # connds connecting */
/** time stamp of the last failed connecting attempt */
long ksnd_connd_failed_stamp;
/** # starting connd */
unsigned ksnd_connd_starting;
/** time stamp of the last starting connd */
long ksnd_connd_starting_stamp;
/** # running connd */
unsigned ksnd_connd_running;
spinlock_t ksnd_connd_lock; /* serialise */
struct list_head ksnd_idle_noop_txs; /* list head for freed noop tx */
spinlock_t ksnd_tx_lock; /* serialise, g_lock unsafe */
int ksnd_init; /* initialisation state
*/
int ksnd_nnets; /* # networks set up */
struct list_head ksnd_nets; /* list of nets */
rwlock_t ksnd_global_lock; /* stabilize peer/conn
* ops */
struct list_head *ksnd_peers; /* hash table of all my
* known peers */
int ksnd_peer_hash_size; /* size of ksnd_peers */
int ksnd_nthreads; /* # live threads */
int ksnd_shuttingdown; /* tell threads to exit
*/
struct ksock_sched_info **ksnd_sched_info; /* schedulers info */
atomic_t ksnd_nactive_txs; /* #active txs */
struct list_head ksnd_deathrow_conns; /* conns to close:
* reaper_lock*/
struct list_head ksnd_zombie_conns; /* conns to free:
* reaper_lock */
struct list_head ksnd_enomem_conns; /* conns to retry:
* reaper_lock*/
wait_queue_head_t ksnd_reaper_waitq; /* reaper sleeps here */
unsigned long ksnd_reaper_waketime; /* when reaper will wake
*/
spinlock_t ksnd_reaper_lock; /* serialise */
int ksnd_enomem_tx; /* test ENOMEM sender */
int ksnd_stall_tx; /* test sluggish sender
*/
int ksnd_stall_rx; /* test sluggish
* receiver */
struct list_head ksnd_connd_connreqs; /* incoming connection
* requests */
struct list_head ksnd_connd_routes; /* routes waiting to be
* connected */
wait_queue_head_t ksnd_connd_waitq; /* connds sleep here */
int ksnd_connd_connecting; /* # connds connecting
*/
long ksnd_connd_failed_stamp;/* time stamp of the
* last failed
* connecting attempt */
unsigned ksnd_connd_starting; /* # starting connd */
long ksnd_connd_starting_stamp;/* time stamp of the
* last starting connd
*/
unsigned ksnd_connd_running; /* # running connd */
spinlock_t ksnd_connd_lock; /* serialise */
struct list_head ksnd_idle_noop_txs; /* list head for freed
* noop tx */
spinlock_t ksnd_tx_lock; /* serialise, g_lock
* unsafe */
} ksock_nal_data_t;
#define SOCKNAL_INIT_NOTHING 0
#define SOCKNAL_INIT_DATA 1
#define SOCKNAL_INIT_ALL 2
#define SOCKNAL_INIT_NOTHING 0
#define SOCKNAL_INIT_DATA 1
#define SOCKNAL_INIT_ALL 2
/* A packet just assembled for transmission is represented by 1 or more
* struct iovec fragments (the first frag contains the portals header),
......@@ -200,43 +223,45 @@ typedef struct {
* received into either struct iovec or lnet_kiov_t fragments, depending on
* what the header matched or whether the message needs forwarding. */
struct ksock_conn; /* forward ref */
struct ksock_peer; /* forward ref */
struct ksock_route; /* forward ref */
struct ksock_proto; /* forward ref */
struct ksock_conn; /* forward ref */
struct ksock_peer; /* forward ref */
struct ksock_route; /* forward ref */
struct ksock_proto; /* forward ref */
typedef struct /* transmit packet */
typedef struct /* transmit packet */
{
struct list_head tx_list; /* queue on conn for transmission etc */
struct list_head tx_zc_list; /* queue on peer for ZC request */
atomic_t tx_refcount; /* tx reference count */
int tx_nob; /* # packet bytes */
int tx_resid; /* residual bytes */
int tx_niov; /* # packet iovec frags */
struct kvec *tx_iov; /* packet iovec frags */
int tx_nkiov; /* # packet page frags */
unsigned short tx_zc_aborted; /* aborted ZC request */
unsigned short tx_zc_capable:1; /* payload is large enough for ZC */
unsigned short tx_zc_checked:1; /* Have I checked if I should ZC? */
unsigned short tx_nonblk:1; /* it's a non-blocking ACK */
lnet_kiov_t *tx_kiov; /* packet page frags */
struct ksock_conn *tx_conn; /* owning conn */
lnet_msg_t *tx_lnetmsg; /* lnet message for lnet_finalize() */
unsigned long tx_deadline; /* when (in jiffies) tx times out */
ksock_msg_t tx_msg; /* socklnd message buffer */
int tx_desc_size; /* size of this descriptor */
struct list_head tx_list; /* queue on conn for transmission etc
*/
struct list_head tx_zc_list; /* queue on peer for ZC request */
atomic_t tx_refcount; /* tx reference count */
int tx_nob; /* # packet bytes */
int tx_resid; /* residual bytes */
int tx_niov; /* # packet iovec frags */
struct kvec *tx_iov; /* packet iovec frags */
int tx_nkiov; /* # packet page frags */
unsigned short tx_zc_aborted; /* aborted ZC request */
unsigned short tx_zc_capable:1; /* payload is large enough for ZC */
unsigned short tx_zc_checked:1; /* Have I checked if I should ZC? */
unsigned short tx_nonblk:1; /* it's a non-blocking ACK */
lnet_kiov_t *tx_kiov; /* packet page frags */
struct ksock_conn *tx_conn; /* owning conn */
lnet_msg_t *tx_lnetmsg; /* lnet message for lnet_finalize()
*/
unsigned long tx_deadline; /* when (in jiffies) tx times out */
ksock_msg_t tx_msg; /* socklnd message buffer */
int tx_desc_size; /* size of this descriptor */
union {
struct {
struct kvec iov; /* virt hdr */
lnet_kiov_t kiov[0]; /* paged payload */
} paged;
struct kvec iov; /* virt hdr */
lnet_kiov_t kiov[0]; /* paged payload */
} paged;
struct {
struct kvec iov[1]; /* virt hdr + payload */
} virt;
} tx_frags;
struct kvec iov[1]; /* virt hdr + payload */
} virt;
} tx_frags;
} ksock_tx_t;
#define KSOCK_NOOP_TX_SIZE ((int)offsetof(ksock_tx_t, tx_frags.paged.kiov[0]))
#define KSOCK_NOOP_TX_SIZE ((int)offsetof(ksock_tx_t, tx_frags.paged.kiov[0]))
/* network zero copy callback descriptor embedded in ksock_tx_t */
......@@ -247,148 +272,189 @@ typedef union {
lnet_kiov_t kiov[LNET_MAX_IOV];
} ksock_rxiovspace_t;
#define SOCKNAL_RX_KSM_HEADER 1 /* reading ksock message header */
#define SOCKNAL_RX_LNET_HEADER 2 /* reading lnet message header */
#define SOCKNAL_RX_PARSE 3 /* Calling lnet_parse() */
#define SOCKNAL_RX_PARSE_WAIT 4 /* waiting to be told to read the body */
#define SOCKNAL_RX_LNET_PAYLOAD 5 /* reading lnet payload (to deliver here) */
#define SOCKNAL_RX_SLOP 6 /* skipping body */
#define SOCKNAL_RX_KSM_HEADER 1 /* reading ksock message header */
#define SOCKNAL_RX_LNET_HEADER 2 /* reading lnet message header */
#define SOCKNAL_RX_PARSE 3 /* Calling lnet_parse() */
#define SOCKNAL_RX_PARSE_WAIT 4 /* waiting to be told to read the body */
#define SOCKNAL_RX_LNET_PAYLOAD 5 /* reading lnet payload (to deliver here) */
#define SOCKNAL_RX_SLOP 6 /* skipping body */
typedef struct ksock_conn {
struct ksock_peer *ksnc_peer; /* owning peer */
struct ksock_route *ksnc_route; /* owning route */
struct list_head ksnc_list; /* stash on peer's conn list */
struct socket *ksnc_sock; /* actual socket */
void *ksnc_saved_data_ready; /* socket's original data_ready() callback */
void *ksnc_saved_write_space; /* socket's original write_space() callback */
atomic_t ksnc_conn_refcount; /* conn refcount */
atomic_t ksnc_sock_refcount; /* sock refcount */
ksock_sched_t *ksnc_scheduler; /* who schedules this connection */
__u32 ksnc_myipaddr; /* my IP */
__u32 ksnc_ipaddr; /* peer's IP */
int ksnc_port; /* peer's port */
signed int ksnc_type:3; /* type of connection,
* should be signed value */
unsigned int ksnc_closing:1; /* being shut down */
unsigned int ksnc_flip:1; /* flip or not, only for V2.x */
unsigned int ksnc_zc_capable:1; /* enable to ZC */
struct ksock_proto *ksnc_proto; /* protocol for the connection */
struct ksock_peer *ksnc_peer; /* owning peer */
struct ksock_route *ksnc_route; /* owning route */
struct list_head ksnc_list; /* stash on peer's conn list */
struct socket *ksnc_sock; /* actual socket */
void *ksnc_saved_data_ready; /* socket's original
* data_ready() callback */
void *ksnc_saved_write_space; /* socket's original
* write_space() callback */
atomic_t ksnc_conn_refcount;/* conn refcount */
atomic_t ksnc_sock_refcount;/* sock refcount */
ksock_sched_t *ksnc_scheduler; /* who schedules this connection
*/
__u32 ksnc_myipaddr; /* my IP */
__u32 ksnc_ipaddr; /* peer's IP */
int ksnc_port; /* peer's port */
signed int ksnc_type:3; /* type of connection, should be
* signed value */
unsigned int ksnc_closing:1; /* being shut down */
unsigned int ksnc_flip:1; /* flip or not, only for V2.x */
unsigned int ksnc_zc_capable:1; /* enable to ZC */
struct ksock_proto *ksnc_proto; /* protocol for the connection */
/* reader */
struct list_head ksnc_rx_list; /* where I enq waiting input or a forwarding descriptor */
unsigned long ksnc_rx_deadline; /* when (in jiffies) receive times out */
__u8 ksnc_rx_started; /* started receiving a message */
__u8 ksnc_rx_ready; /* data ready to read */
__u8 ksnc_rx_scheduled;/* being progressed */
__u8 ksnc_rx_state; /* what is being read */
int ksnc_rx_nob_left; /* # bytes to next hdr/body */
int ksnc_rx_nob_wanted; /* bytes actually wanted */
int ksnc_rx_niov; /* # iovec frags */
struct kvec *ksnc_rx_iov; /* the iovec frags */
int ksnc_rx_nkiov; /* # page frags */
lnet_kiov_t *ksnc_rx_kiov; /* the page frags */
ksock_rxiovspace_t ksnc_rx_iov_space;/* space for frag descriptors */
__u32 ksnc_rx_csum; /* partial checksum for incoming data */
void *ksnc_cookie; /* rx lnet_finalize passthru arg */
ksock_msg_t ksnc_msg; /* incoming message buffer:
* V2.x message takes the
* whole struct
* V1.x message is a bare
* lnet_hdr_t, it's stored in
* ksnc_msg.ksm_u.lnetmsg */
struct list_head ksnc_rx_list; /* where I enq waiting input or a
* forwarding descriptor */
unsigned long ksnc_rx_deadline; /* when (in jiffies) receive times
* out */
__u8 ksnc_rx_started; /* started receiving a message */
__u8 ksnc_rx_ready; /* data ready to read */
__u8 ksnc_rx_scheduled; /* being progressed */
__u8 ksnc_rx_state; /* what is being read */
int ksnc_rx_nob_left; /* # bytes to next hdr/body */
int ksnc_rx_nob_wanted;/* bytes actually wanted */
int ksnc_rx_niov; /* # iovec frags */
struct kvec *ksnc_rx_iov; /* the iovec frags */
int ksnc_rx_nkiov; /* # page frags */
lnet_kiov_t *ksnc_rx_kiov; /* the page frags */
ksock_rxiovspace_t ksnc_rx_iov_space; /* space for frag descriptors */
__u32 ksnc_rx_csum; /* partial checksum for incoming
* data */
void *ksnc_cookie; /* rx lnet_finalize passthru arg
*/
ksock_msg_t ksnc_msg; /* incoming message buffer:
* V2.x message takes the
* whole struct
* V1.x message is a bare
* lnet_hdr_t, it's stored in
* ksnc_msg.ksm_u.lnetmsg */
/* WRITER */
struct list_head ksnc_tx_list; /* where I enq waiting for output space */
struct list_head ksnc_tx_queue; /* packets waiting to be sent */
ksock_tx_t *ksnc_tx_carrier; /* next TX that can carry a LNet message or ZC-ACK */
unsigned long ksnc_tx_deadline; /* when (in jiffies) tx times out */
int ksnc_tx_bufnob; /* send buffer marker */
atomic_t ksnc_tx_nob; /* # bytes queued */
int ksnc_tx_ready; /* write space */
int ksnc_tx_scheduled; /* being progressed */
unsigned long ksnc_tx_last_post; /* time stamp of the last posted TX */
struct list_head ksnc_tx_list; /* where I enq waiting for output
* space */
struct list_head ksnc_tx_queue; /* packets waiting to be sent */
ksock_tx_t *ksnc_tx_carrier; /* next TX that can carry a LNet
* message or ZC-ACK */
unsigned long ksnc_tx_deadline; /* when (in jiffies) tx times out
*/
int ksnc_tx_bufnob; /* send buffer marker */
atomic_t ksnc_tx_nob; /* # bytes queued */
int ksnc_tx_ready; /* write space */
int ksnc_tx_scheduled; /* being progressed */
unsigned long ksnc_tx_last_post; /* time stamp of the last posted
* TX */
} ksock_conn_t;
typedef struct ksock_route {
struct list_head ksnr_list; /* chain on peer route list */
struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */
struct ksock_peer *ksnr_peer; /* owning peer */
atomic_t ksnr_refcount; /* # users */
unsigned long ksnr_timeout; /* when (in jiffies) reconnection can happen next */
long ksnr_retry_interval; /* how long between retries */
__u32 ksnr_myipaddr; /* my IP */
__u32 ksnr_ipaddr; /* IP address to connect to */
int ksnr_port; /* port to connect to */
unsigned int ksnr_scheduled:1; /* scheduled for attention */
unsigned int ksnr_connecting:1;/* connection establishment in progress */
unsigned int ksnr_connected:4; /* connections established by type */
unsigned int ksnr_deleted:1; /* been removed from peer? */
unsigned int ksnr_share_count; /* created explicitly? */
int ksnr_conn_count; /* # conns established by this route */
struct list_head ksnr_list; /* chain on peer route list */
struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */
struct ksock_peer *ksnr_peer; /* owning peer */
atomic_t ksnr_refcount; /* # users */
unsigned long ksnr_timeout; /* when (in jiffies) reconnection
* can happen next */
long ksnr_retry_interval; /* how long between retries */
__u32 ksnr_myipaddr; /* my IP */
__u32 ksnr_ipaddr; /* IP address to connect to */
int ksnr_port; /* port to connect to */
unsigned int ksnr_scheduled:1; /* scheduled for attention */
unsigned int ksnr_connecting:1; /* connection establishment in
* progress */
unsigned int ksnr_connected:4; /* connections established by
* type */
unsigned int ksnr_deleted:1; /* been removed from peer? */
unsigned int ksnr_share_count; /* created explicitly? */
int ksnr_conn_count; /* # conns established by this
* route */
} ksock_route_t;
#define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */
#define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */
typedef struct ksock_peer {
struct list_head ksnp_list; /* stash on global peer list */
unsigned long ksnp_last_alive; /* when (in jiffies) I was last alive */
lnet_process_id_t ksnp_id; /* who's on the other end(s) */
atomic_t ksnp_refcount; /* # users */
int ksnp_sharecount; /* lconf usage counter */
int ksnp_closing; /* being closed */
int ksnp_accepting;/* # passive connections pending */
int ksnp_error; /* errno on closing last conn */
__u64 ksnp_zc_next_cookie;/* ZC completion cookie */
__u64 ksnp_incarnation; /* latest known peer incarnation */
struct ksock_proto *ksnp_proto; /* latest known peer protocol */
struct list_head ksnp_conns; /* all active connections */
struct list_head ksnp_routes; /* routes */
struct list_head ksnp_tx_queue; /* waiting packets */
spinlock_t ksnp_lock; /* serialize, g_lock unsafe */
struct list_head ksnp_zc_req_list; /* zero copy requests wait for ACK */
unsigned long ksnp_send_keepalive; /* time to send keepalive */
lnet_ni_t *ksnp_ni; /* which network */
int ksnp_n_passive_ips; /* # of... */
__u32 ksnp_passive_ips[LNET_MAX_INTERFACES]; /* preferred local interfaces */
struct list_head ksnp_list; /* stash on global peer list */
unsigned long ksnp_last_alive; /* when (in jiffies) I was last
* alive */
lnet_process_id_t ksnp_id; /* who's on the other end(s) */
atomic_t ksnp_refcount; /* # users */
int ksnp_sharecount; /* lconf usage counter */
int ksnp_closing; /* being closed */
int ksnp_accepting; /* # passive connections pending
*/
int ksnp_error; /* errno on closing last conn */
__u64 ksnp_zc_next_cookie; /* ZC completion cookie */
__u64 ksnp_incarnation; /* latest known peer incarnation
*/
struct ksock_proto *ksnp_proto; /* latest known peer protocol */
struct list_head ksnp_conns; /* all active connections */
struct list_head ksnp_routes; /* routes */
struct list_head ksnp_tx_queue; /* waiting packets */
spinlock_t ksnp_lock; /* serialize, g_lock unsafe */
struct list_head ksnp_zc_req_list; /* zero copy requests wait for
* ACK */
unsigned long ksnp_send_keepalive; /* time to send keepalive */
lnet_ni_t *ksnp_ni; /* which network */
int ksnp_n_passive_ips; /* # of... */
/* preferred local interfaces */
__u32 ksnp_passive_ips[LNET_MAX_INTERFACES];
} ksock_peer_t;
typedef struct ksock_connreq {
struct list_head ksncr_list; /* stash on ksnd_connd_connreqs */
lnet_ni_t *ksncr_ni; /* chosen NI */
struct socket *ksncr_sock; /* accepted socket */
struct list_head ksncr_list; /* stash on ksnd_connd_connreqs */
lnet_ni_t *ksncr_ni; /* chosen NI */
struct socket *ksncr_sock; /* accepted socket */
} ksock_connreq_t;
extern ksock_nal_data_t ksocknal_data;
extern ksock_tunables_t ksocknal_tunables;
#define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */
#define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */
#define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not preferred */
#define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */
#define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */
#define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not
* preferred */
typedef struct ksock_proto {
int pro_version; /* version number of protocol */
int (*pro_send_hello)(ksock_conn_t *, ksock_hello_msg_t *); /* handshake function */
int (*pro_recv_hello)(ksock_conn_t *, ksock_hello_msg_t *, int);/* handshake function */
void (*pro_pack)(ksock_tx_t *); /* message pack */
void (*pro_unpack)(ksock_msg_t *); /* message unpack */
ksock_tx_t *(*pro_queue_tx_msg)(ksock_conn_t *, ksock_tx_t *); /* queue tx on the connection */
int (*pro_queue_tx_zcack)(ksock_conn_t *, ksock_tx_t *, __u64); /* queue ZC ack on the connection */
int (*pro_handle_zcreq)(ksock_conn_t *, __u64, int); /* handle ZC request */
int (*pro_handle_zcack)(ksock_conn_t *, __u64, __u64); /* handle ZC ACK */
int (*pro_match_tx)(ksock_conn_t *, ksock_tx_t *, int); /* msg type matches the connection type:
* return value:
* return MATCH_NO : no
* return MATCH_YES : matching type
* return MATCH_MAY : can be backup */
/* version number of protocol */
int pro_version;
/* handshake function */
int (*pro_send_hello)(ksock_conn_t *, ksock_hello_msg_t *);
/* handshake function */
int (*pro_recv_hello)(ksock_conn_t *, ksock_hello_msg_t *, int);
/* message pack */
void (*pro_pack)(ksock_tx_t *);
/* message unpack */
void (*pro_unpack)(ksock_msg_t *);
/* queue tx on the connection */
ksock_tx_t *(*pro_queue_tx_msg)(ksock_conn_t *, ksock_tx_t *);
/* queue ZC ack on the connection */
int (*pro_queue_tx_zcack)(ksock_conn_t *, ksock_tx_t *, __u64);
/* handle ZC request */
int (*pro_handle_zcreq)(ksock_conn_t *, __u64, int);
/* handle ZC ACK */
int (*pro_handle_zcack)(ksock_conn_t *, __u64, __u64);
/* msg type matches the connection type:
* return value:
* return MATCH_NO : no
* return MATCH_YES : matching type
* return MATCH_MAY : can be backup */
int (*pro_match_tx)(ksock_conn_t *, ksock_tx_t *, int);
} ksock_proto_t;
extern ksock_proto_t ksocknal_protocol_v1x;
extern ksock_proto_t ksocknal_protocol_v2x;
extern ksock_proto_t ksocknal_protocol_v3x;
#define KSOCK_PROTO_V1_MAJOR LNET_PROTO_TCP_VERSION_MAJOR
#define KSOCK_PROTO_V1_MINOR LNET_PROTO_TCP_VERSION_MINOR
#define KSOCK_PROTO_V1 KSOCK_PROTO_V1_MAJOR
#define KSOCK_PROTO_V1_MAJOR LNET_PROTO_TCP_VERSION_MAJOR
#define KSOCK_PROTO_V1_MINOR LNET_PROTO_TCP_VERSION_MINOR
#define KSOCK_PROTO_V1 KSOCK_PROTO_V1_MAJOR
#ifndef CPU_MASK_NONE
#define CPU_MASK_NONE 0UL
......@@ -434,7 +500,7 @@ ksocknal_conn_decref(ksock_conn_t *conn)
static inline int
ksocknal_connsock_addref(ksock_conn_t *conn)
{
int rc = -ESHUTDOWN;
int rc = -ESHUTDOWN;
read_lock(&ksocknal_data.ksnd_global_lock);
if (!conn->ksnc_closing) {
......
......@@ -75,13 +75,13 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
return NULL;
}
tx->tx_conn = NULL;
tx->tx_lnetmsg = NULL;
tx->tx_kiov = NULL;
tx->tx_nkiov = 0;
tx->tx_iov = tx->tx_frags.virt.iov;
tx->tx_niov = 1;
tx->tx_nonblk = nonblk;
tx->tx_conn = NULL;
tx->tx_lnetmsg = NULL;
tx->tx_kiov = NULL;
tx->tx_nkiov = 0;
tx->tx_iov = tx->tx_frags.virt.iov;
tx->tx_niov = 1;
tx->tx_nonblk = nonblk;
socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP);
tx->tx_msg.ksm_zc_cookies[1] = cookie;
......@@ -110,11 +110,11 @@ ksocknal_free_tx (ksock_tx_t *tx)
static int
ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
{
struct kvec *iov = tx->tx_iov;
int nob;
int rc;
struct kvec *iov = tx->tx_iov;
int nob;
int rc;
LASSERT (tx->tx_niov > 0);
LASSERT(tx->tx_niov > 0);
/* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */
rc = ksocknal_lib_send_iov(conn, tx);
......@@ -128,7 +128,7 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
/* "consume" iov */
do {
LASSERT (tx->tx_niov > 0);
LASSERT(tx->tx_niov > 0);
if (nob < (int) iov->iov_len) {
iov->iov_base = (void *)((char *)iov->iov_base + nob);
......@@ -147,12 +147,12 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
static int
ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
{
lnet_kiov_t *kiov = tx->tx_kiov;
int nob;
int rc;
lnet_kiov_t *kiov = tx->tx_kiov;
int nob;
int rc;
LASSERT (tx->tx_niov == 0);
LASSERT (tx->tx_nkiov > 0);
LASSERT(tx->tx_niov == 0);
LASSERT(tx->tx_nkiov > 0);
/* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
rc = ksocknal_lib_send_kiov(conn, tx);
......@@ -185,15 +185,15 @@ ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
static int
ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
{
int rc;
int bufnob;
int rc;
int bufnob;
if (ksocknal_data.ksnd_stall_tx != 0) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
}
LASSERT (tx->tx_resid != 0);
LASSERT(tx->tx_resid != 0);
rc = ksocknal_connsock_addref(conn);
if (rc != 0) {
......@@ -252,10 +252,10 @@ static int
ksocknal_recv_iov (ksock_conn_t *conn)
{
struct kvec *iov = conn->ksnc_rx_iov;
int nob;
int rc;
int nob;
int rc;
LASSERT (conn->ksnc_rx_niov > 0);
LASSERT(conn->ksnc_rx_niov > 0);
/* Never touch conn->ksnc_rx_iov or change connection
* status inside ksocknal_lib_recv_iov */
......@@ -277,7 +277,7 @@ ksocknal_recv_iov (ksock_conn_t *conn)
conn->ksnc_rx_nob_left -= nob;
do {
LASSERT (conn->ksnc_rx_niov > 0);
LASSERT(conn->ksnc_rx_niov > 0);
if (nob < (int)iov->iov_len) {
iov->iov_len -= nob;
......@@ -296,10 +296,10 @@ ksocknal_recv_iov (ksock_conn_t *conn)
static int
ksocknal_recv_kiov (ksock_conn_t *conn)
{
lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
int nob;
int rc;
LASSERT (conn->ksnc_rx_nkiov > 0);
lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
int nob;
int rc;
LASSERT(conn->ksnc_rx_nkiov > 0);
/* Never touch conn->ksnc_rx_kiov or change connection
* status inside ksocknal_lib_recv_iov */
......@@ -321,7 +321,7 @@ ksocknal_recv_kiov (ksock_conn_t *conn)
conn->ksnc_rx_nob_left -= nob;
do {
LASSERT (conn->ksnc_rx_nkiov > 0);
LASSERT(conn->ksnc_rx_nkiov > 0);
if (nob < (int) kiov->kiov_len) {
kiov->kiov_offset += nob;
......@@ -343,7 +343,7 @@ ksocknal_receive (ksock_conn_t *conn)
/* Return 1 on success, 0 on EOF, < 0 on error.
* Caller checks ksnc_rx_nob_wanted to determine
* progress/completion. */
int rc;
int rc;
if (ksocknal_data.ksnd_stall_rx != 0) {
set_current_state(TASK_UNINTERRUPTIBLE);
......@@ -388,8 +388,8 @@ ksocknal_receive (ksock_conn_t *conn)
void
ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx)
{
lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO;
lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO;
LASSERT(ni != NULL || tx->tx_conn != NULL);
......@@ -410,7 +410,7 @@ ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error)
ksock_tx_t *tx;
while (!list_empty (txlist)) {
tx = list_entry (txlist->next, ksock_tx_t, tx_list);
tx = list_entry(txlist->next, ksock_tx_t, tx_list);
if (error && tx->tx_lnetmsg != NULL) {
CNETERR("Deleting packet type %d len %d %s->%s\n",
......@@ -422,18 +422,18 @@ ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error)
CNETERR("Deleting noop packet\n");
}
list_del (&tx->tx_list);
list_del(&tx->tx_list);
LASSERT (atomic_read(&tx->tx_refcount) == 1);
ksocknal_tx_done (ni, tx);
LASSERT(atomic_read(&tx->tx_refcount) == 1);
ksocknal_tx_done(ni, tx);
}
}
static void
ksocknal_check_zc_req(ksock_tx_t *tx)
{
ksock_conn_t *conn = tx->tx_conn;
ksock_peer_t *peer = conn->ksnc_peer;
ksock_conn_t *conn = tx->tx_conn;
ksock_peer_t *peer = conn->ksnc_peer;
/* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
* to ksnp_zc_req_list if some fragment of this message should be sent
......@@ -441,8 +441,8 @@ ksocknal_check_zc_req(ksock_tx_t *tx)
* she has received this message to tell us we can signal completion.
* tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on
* ksnp_zc_req_list. */
LASSERT (tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
LASSERT (tx->tx_zc_capable);
LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
LASSERT(tx->tx_zc_capable);
tx->tx_zc_checked = 1;
......@@ -461,7 +461,7 @@ ksocknal_check_zc_req(ksock_tx_t *tx)
tx->tx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0);
LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0);
tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++;
......@@ -476,7 +476,7 @@ ksocknal_check_zc_req(ksock_tx_t *tx)
static void
ksocknal_uncheck_zc_req(ksock_tx_t *tx)
{
ksock_peer_t *peer = tx->tx_conn->ksnc_peer;
ksock_peer_t *peer = tx->tx_conn->ksnc_peer;
LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
LASSERT(tx->tx_zc_capable);
......@@ -502,14 +502,14 @@ ksocknal_uncheck_zc_req(ksock_tx_t *tx)
static int
ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
{
int rc;
int rc;
if (tx->tx_zc_capable && !tx->tx_zc_checked)
ksocknal_check_zc_req(tx);
rc = ksocknal_transmit (conn, tx);
CDEBUG (D_NET, "send(%d) %d\n", tx->tx_resid, rc);
CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc);
if (tx->tx_resid == 0) {
/* Sent everything OK */
......@@ -546,7 +546,7 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
}
/* Actual error */
LASSERT (rc < 0);
LASSERT(rc < 0);
if (!conn->ksnc_closing) {
switch (rc) {
......@@ -582,9 +582,9 @@ ksocknal_launch_connection_locked (ksock_route_t *route)
/* called holding write lock on ksnd_global_lock */
LASSERT (!route->ksnr_scheduled);
LASSERT (!route->ksnr_connecting);
LASSERT ((ksocknal_route_mask() & ~route->ksnr_connected) != 0);
LASSERT(!route->ksnr_scheduled);
LASSERT(!route->ksnr_connecting);
LASSERT((ksocknal_route_mask() & ~route->ksnr_connected) != 0);
route->ksnr_scheduled = 1; /* scheduling conn for connd */
ksocknal_route_addref(route); /* extra ref for connd */
......@@ -617,22 +617,22 @@ ksocknal_launch_all_connections_locked (ksock_peer_t *peer)
ksock_conn_t *
ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
{
struct list_head *tmp;
ksock_conn_t *conn;
ksock_conn_t *typed = NULL;
ksock_conn_t *fallback = NULL;
int tnob = 0;
int fnob = 0;
struct list_head *tmp;
ksock_conn_t *conn;
ksock_conn_t *typed = NULL;
ksock_conn_t *fallback = NULL;
int tnob = 0;
int fnob = 0;
list_for_each (tmp, &peer->ksnp_conns) {
ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list);
int nob = atomic_read(&c->ksnc_tx_nob) +
c->ksnc_sock->sk->sk_wmem_queued;
int rc;
int nob = atomic_read(&c->ksnc_tx_nob) +
c->ksnc_sock->sk->sk_wmem_queued;
int rc;
LASSERT (!c->ksnc_closing);
LASSERT (c->ksnc_proto != NULL &&
c->ksnc_proto->pro_match_tx != NULL);
LASSERT(!c->ksnc_closing);
LASSERT(c->ksnc_proto != NULL &&
c->ksnc_proto->pro_match_tx != NULL);
rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk);
......@@ -656,7 +656,7 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
(fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
fallback = c;
fnob = nob;
fnob = nob;
}
break;
}
......@@ -685,9 +685,9 @@ void
ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
{
ksock_sched_t *sched = conn->ksnc_scheduler;
ksock_msg_t *msg = &tx->tx_msg;
ksock_tx_t *ztx = NULL;
int bufnob = 0;
ksock_msg_t *msg = &tx->tx_msg;
ksock_tx_t *ztx = NULL;
int bufnob = 0;
/* called holding global lock (read or irq-write) and caller may
* not have dropped this lock between finding conn and calling me,
......@@ -708,11 +708,11 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
*
* We always expect at least 1 mapped fragment containing the
* complete ksocknal message header. */
LASSERT (lnet_iov_nob (tx->tx_niov, tx->tx_iov) +
lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
(unsigned int)tx->tx_nob);
LASSERT (tx->tx_niov >= 1);
LASSERT (tx->tx_resid == tx->tx_nob);
LASSERT(lnet_iov_nob (tx->tx_niov, tx->tx_iov) +
lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
(unsigned int)tx->tx_nob);
LASSERT(tx->tx_niov >= 1);
LASSERT(tx->tx_resid == tx->tx_nob);
CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type:
......@@ -739,8 +739,8 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
if (msg->ksm_type == KSOCK_MSG_NOOP) {
/* The packet is noop ZC ACK, try to piggyback the ack_cookie
* on a normal packet so I don't need to send it */
LASSERT (msg->ksm_zc_cookies[1] != 0);
LASSERT (conn->ksnc_proto->pro_queue_tx_zcack != NULL);
LASSERT(msg->ksm_zc_cookies[1] != 0);
LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL);
if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
......@@ -748,8 +748,8 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
} else {
/* It's a normal packet - can it piggback a noop zc-ack that
* has been queued already? */
LASSERT (msg->ksm_zc_cookies[1] == 0);
LASSERT (conn->ksnc_proto->pro_queue_tx_msg != NULL);
LASSERT(msg->ksm_zc_cookies[1] == 0);
LASSERT(conn->ksnc_proto->pro_queue_tx_msg != NULL);
ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx);
/* ztx will be released later */
......@@ -777,14 +777,14 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
ksock_route_t *
ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
{
unsigned long now = cfs_time_current();
struct list_head *tmp;
unsigned long now = cfs_time_current();
struct list_head *tmp;
ksock_route_t *route;
list_for_each (tmp, &peer->ksnp_routes) {
route = list_entry (tmp, ksock_route_t, ksnr_list);
LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
if (route->ksnr_scheduled) /* connections being established */
continue;
......@@ -813,13 +813,13 @@ ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
ksock_route_t *
ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
{
struct list_head *tmp;
ksock_route_t *route;
struct list_head *tmp;
ksock_route_t *route;
list_for_each (tmp, &peer->ksnp_routes) {
route = list_entry (tmp, ksock_route_t, ksnr_list);
LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
if (route->ksnr_scheduled)
return route;
......@@ -831,13 +831,13 @@ ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
int
ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
{
ksock_peer_t *peer;
ksock_conn_t *conn;
rwlock_t *g_lock;
int retry;
int rc;
ksock_peer_t *peer;
ksock_conn_t *conn;
rwlock_t *g_lock;
int retry;
int rc;
LASSERT (tx->tx_conn == NULL);
LASSERT(tx->tx_conn == NULL);
g_lock = &ksocknal_data.ksnd_global_lock;
......@@ -922,17 +922,17 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
int
ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
{
int mpflag = 1;
int type = lntmsg->msg_type;
int mpflag = 1;
int type = lntmsg->msg_type;
lnet_process_id_t target = lntmsg->msg_target;
unsigned int payload_niov = lntmsg->msg_niov;
struct kvec *payload_iov = lntmsg->msg_iov;
lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
unsigned int payload_offset = lntmsg->msg_offset;
unsigned int payload_nob = lntmsg->msg_len;
ksock_tx_t *tx;
int desc_size;
int rc;
unsigned int payload_niov = lntmsg->msg_niov;
struct kvec *payload_iov = lntmsg->msg_iov;
lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
unsigned int payload_offset = lntmsg->msg_offset;
unsigned int payload_nob = lntmsg->msg_len;
ksock_tx_t *tx;
int desc_size;
int rc;
/* NB 'private' is different depending on what we're sending.
* Just ignore it... */
......@@ -940,8 +940,8 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
payload_nob, payload_niov, libcfs_id2str(target));
LASSERT (payload_nob == 0 || payload_niov > 0);
LASSERT (payload_niov <= LNET_MAX_IOV);
LASSERT(payload_nob == 0 || payload_niov > 0);
LASSERT(payload_niov <= LNET_MAX_IOV);
/* payload is either all vaddrs or all pages */
LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
LASSERT (!in_interrupt ());
......@@ -1028,9 +1028,9 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
{
static char ksocknal_slop_buffer[4096];
int nob;
unsigned int niov;
int skipped;
int nob;
unsigned int niov;
int skipped;
LASSERT(conn->ksnc_proto != NULL);
......@@ -1063,7 +1063,7 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t);
conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t);
break;
default:
......@@ -1108,18 +1108,18 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
static int
ksocknal_process_receive (ksock_conn_t *conn)
{
lnet_hdr_t *lhdr;
lnet_hdr_t *lhdr;
lnet_process_id_t *id;
int rc;
int rc;
LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
/* NB: sched lock NOT held */
/* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
again:
if (conn->ksnc_rx_nob_wanted != 0) {
rc = ksocknal_receive(conn);
......@@ -1229,7 +1229,7 @@ ksocknal_process_receive (ksock_conn_t *conn)
if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) {
/* Userspace peer */
lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
id = &conn->ksnc_peer->ksnp_id;
id = &conn->ksnc_peer->ksnp_id;
/* Substitute process ID assigned at connection time */
lhdr->src_pid = cpu_to_le32(id->pid);
......@@ -1277,7 +1277,7 @@ ksocknal_process_receive (ksock_conn_t *conn)
LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
id = &conn->ksnc_peer->ksnp_id;
id = &conn->ksnc_peer->ksnp_id;
rc = conn->ksnc_proto->pro_handle_zcreq(conn,
conn->ksnc_msg.ksm_zc_cookies[0],
......@@ -1305,7 +1305,7 @@ ksocknal_process_receive (ksock_conn_t *conn)
}
/* Not Reached */
LBUG ();
LBUG();
return -EINVAL; /* keep gcc happy */
}
......@@ -1314,15 +1314,15 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
ksock_conn_t *conn = (ksock_conn_t *)private;
ksock_conn_t *conn = (ksock_conn_t *)private;
ksock_sched_t *sched = conn->ksnc_scheduler;
LASSERT (mlen <= rlen);
LASSERT (niov <= LNET_MAX_IOV);
LASSERT(mlen <= rlen);
LASSERT(niov <= LNET_MAX_IOV);
conn->ksnc_cookie = msg;
conn->ksnc_rx_nob_wanted = mlen;
conn->ksnc_rx_nob_left = rlen;
conn->ksnc_rx_nob_left = rlen;
if (mlen == 0 || iov != NULL) {
conn->ksnc_rx_nkiov = 0;
......@@ -1333,18 +1333,18 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
niov, iov, offset, mlen);
} else {
conn->ksnc_rx_niov = 0;
conn->ksnc_rx_iov = NULL;
conn->ksnc_rx_iov = NULL;
conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
conn->ksnc_rx_nkiov =
lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
niov, kiov, offset, mlen);
}
LASSERT (mlen ==
lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
lnet_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
LASSERT(mlen ==
lnet_iov_nob(conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
lnet_kiov_nob(conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
LASSERT (conn->ksnc_rx_scheduled);
LASSERT(conn->ksnc_rx_scheduled);
spin_lock_bh(&sched->kss_lock);
......@@ -1370,7 +1370,7 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
static inline int
ksocknal_sched_cansleep(ksock_sched_t *sched)
{
int rc;
int rc;
spin_lock_bh(&sched->kss_lock);
......@@ -1384,13 +1384,13 @@ ksocknal_sched_cansleep(ksock_sched_t *sched)
int ksocknal_scheduler(void *arg)
{
struct ksock_sched_info *info;
ksock_sched_t *sched;
ksock_conn_t *conn;
ksock_tx_t *tx;
int rc;
int nloops = 0;
long id = (long)arg;
struct ksock_sched_info *info;
ksock_sched_t *sched;
ksock_conn_t *conn;
ksock_tx_t *tx;
int rc;
int nloops = 0;
long id = (long)arg;
info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)];
sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
......@@ -1455,7 +1455,7 @@ int ksocknal_scheduler(void *arg)
}
if (!list_empty (&sched->kss_tx_conns)) {
LIST_HEAD (zlist);
LIST_HEAD(zlist);
if (!list_empty(&sched->kss_zombie_noop_txs)) {
list_add(&zlist,
......@@ -1513,9 +1513,9 @@ int ksocknal_scheduler(void *arg)
/* Do nothing; after a short timeout, this
* conn will be reposted on kss_tx_conns. */
} else if (conn->ksnc_tx_ready &&
!list_empty (&conn->ksnc_tx_queue)) {
!list_empty(&conn->ksnc_tx_queue)) {
/* reschedule for tx */
list_add_tail (&conn->ksnc_tx_list,
list_add_tail(&conn->ksnc_tx_list,
&sched->kss_tx_conns);
} else {
conn->ksnc_tx_scheduled = 0;
......@@ -1606,7 +1606,7 @@ void ksocknal_write_callback (ksock_conn_t *conn)
static ksock_proto_t *
ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
{
__u32 version = 0;
__u32 version = 0;
if (hello->kshm_magic == LNET_PROTO_MAGIC)
version = hello->kshm_version;
......@@ -1634,8 +1634,8 @@ ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello;
CLASSERT (sizeof (lnet_magicversion_t) ==
offsetof (ksock_hello_msg_t, kshm_src_nid));
CLASSERT(sizeof (lnet_magicversion_t) ==
offsetof (ksock_hello_msg_t, kshm_src_nid));
if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) &&
hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR))
......@@ -1650,19 +1650,19 @@ ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn,
lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
{
/* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
ksock_net_t *net = (ksock_net_t *)ni->ni_data;
ksock_net_t *net = (ksock_net_t *)ni->ni_data;
LASSERT (hello->kshm_nips <= LNET_MAX_INTERFACES);
LASSERT(hello->kshm_nips <= LNET_MAX_INTERFACES);
/* rely on caller to hold a ref on socket so it wouldn't disappear */
LASSERT (conn->ksnc_proto != NULL);
LASSERT(conn->ksnc_proto != NULL);
hello->kshm_src_nid = ni->ni_nid;
hello->kshm_dst_nid = peer_nid;
hello->kshm_src_pid = the_lnet.ln_pid;
hello->kshm_src_nid = ni->ni_nid;
hello->kshm_dst_nid = peer_nid;
hello->kshm_src_pid = the_lnet.ln_pid;
hello->kshm_src_incarnation = net->ksnn_incarnation;
hello->kshm_ctype = conn->ksnc_type;
hello->kshm_ctype = conn->ksnc_type;
return conn->ksnc_proto->pro_send_hello(conn, hello);
}
......@@ -1693,16 +1693,16 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
* EALREADY lost connection race
* EPROTO protocol version mismatch
*/
struct socket *sock = conn->ksnc_sock;
int active = (conn->ksnc_proto != NULL);
int timeout;
int proto_match;
int rc;
ksock_proto_t *proto;
lnet_process_id_t recv_id;
struct socket *sock = conn->ksnc_sock;
int active = (conn->ksnc_proto != NULL);
int timeout;
int proto_match;
int rc;
ksock_proto_t *proto;
lnet_process_id_t recv_id;
/* socket type set on active connections - not set on passive */
LASSERT (!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
timeout = active ? *ksocknal_tunables.ksnd_timeout :
lnet_acceptor_timeout();
......@@ -1731,7 +1731,7 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
if (rc != 0) {
CERROR("Error %d reading HELLO from %pI4h\n",
rc, &conn->ksnc_ipaddr);
LASSERT (rc < 0);
LASSERT(rc < 0);
return rc;
}
......@@ -1765,7 +1765,7 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
if (rc != 0) {
CERROR("Error %d reading or checking hello from from %pI4h\n",
rc, &conn->ksnc_ipaddr);
LASSERT (rc < 0);
LASSERT(rc < 0);
return rc;
}
......@@ -1830,22 +1830,22 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
static int
ksocknal_connect (ksock_route_t *route)
{
LIST_HEAD (zombies);
ksock_peer_t *peer = route->ksnr_peer;
int type;
int wanted;
struct socket *sock;
unsigned long deadline;
int retry_later = 0;
int rc = 0;
LIST_HEAD(zombies);
ksock_peer_t *peer = route->ksnr_peer;
int type;
int wanted;
struct socket *sock;
unsigned long deadline;
int retry_later = 0;
int rc = 0;
deadline = cfs_time_add(cfs_time_current(),
cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
write_lock_bh(&ksocknal_data.ksnd_global_lock);
LASSERT (route->ksnr_scheduled);
LASSERT (!route->ksnr_connecting);
LASSERT(route->ksnr_scheduled);
LASSERT(!route->ksnr_connecting);
route->ksnr_connecting = 1;
......@@ -2101,7 +2101,7 @@ static ksock_route_t *
ksocknal_connd_get_route_locked(signed long *timeout_p)
{
ksock_route_t *route;
unsigned long now;
unsigned long now;
now = cfs_time_current();
......@@ -2124,13 +2124,13 @@ ksocknal_connd_get_route_locked(signed long *timeout_p)
int
ksocknal_connd (void *arg)
{
spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
ksock_connreq_t *cr;
wait_queue_t wait;
int nloops = 0;
int cons_retry = 0;
spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
ksock_connreq_t *cr;
wait_queue_t wait;
int nloops = 0;
int cons_retry = 0;
cfs_block_allsigs ();
cfs_block_allsigs();
init_waitqueue_entry(&wait, current);
......@@ -2144,7 +2144,7 @@ ksocknal_connd (void *arg)
ksock_route_t *route = NULL;
long sec = get_seconds();
long timeout = MAX_SCHEDULE_TIMEOUT;
int dropped_lock = 0;
int dropped_lock = 0;
if (ksocknal_connd_check_stop(sec, &timeout)) {
/* wakeup another one to check stop */
......@@ -2236,15 +2236,15 @@ static ksock_conn_t *
ksocknal_find_timed_out_conn (ksock_peer_t *peer)
{
/* We're called with a shared lock on ksnd_global_lock */
ksock_conn_t *conn;
struct list_head *ctmp;
ksock_conn_t *conn;
struct list_head *ctmp;
list_for_each (ctmp, &peer->ksnp_conns) {
int error;
int error;
conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
/* Don't need the {get,put}connsock dance to deref ksnc_sock */
LASSERT (!conn->ksnc_closing);
LASSERT(!conn->ksnc_closing);
/* SOCK_ERROR will reset error code of socket in
* some platform (like Darwin8.x) */
......@@ -2313,8 +2313,8 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
static inline void
ksocknal_flush_stale_txs(ksock_peer_t *peer)
{
ksock_tx_t *tx;
LIST_HEAD (stale_txs);
ksock_tx_t *tx;
LIST_HEAD(stale_txs);
write_lock_bh(&ksocknal_data.ksnd_global_lock);
......@@ -2338,9 +2338,9 @@ ksocknal_flush_stale_txs(ksock_peer_t *peer)
static int
ksocknal_send_keepalive_locked(ksock_peer_t *peer)
{
ksock_sched_t *sched;
ksock_conn_t *conn;
ksock_tx_t *tx;
ksock_sched_t *sched;
ksock_conn_t *conn;
ksock_tx_t *tx;
if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
return 0;
......@@ -2399,10 +2399,10 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer)
static void
ksocknal_check_peer_timeouts (int idx)
{
struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
ksock_peer_t *peer;
ksock_conn_t *conn;
ksock_tx_t *tx;
struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
ksock_peer_t *peer;
ksock_conn_t *conn;
ksock_tx_t *tx;
again:
/* NB. We expect to have a look at all the peers and not find any
......@@ -2411,9 +2411,9 @@ ksocknal_check_peer_timeouts (int idx)
read_lock(&ksocknal_data.ksnd_global_lock);
list_for_each_entry(peer, peers, ksnp_list) {
unsigned long deadline = 0;
int resid = 0;
int n = 0;
unsigned long deadline = 0;
int resid = 0;
int n = 0;
if (ksocknal_send_keepalive_locked(peer) != 0) {
read_unlock(&ksocknal_data.ksnd_global_lock);
......@@ -2476,8 +2476,8 @@ ksocknal_check_peer_timeouts (int idx)
tx = list_entry(peer->ksnp_zc_req_list.next,
ksock_tx_t, tx_zc_list);
deadline = tx->tx_deadline;
resid = tx->tx_resid;
conn = tx->tx_conn;
resid = tx->tx_resid;
conn = tx->tx_conn;
ksocknal_conn_addref(conn);
spin_unlock(&peer->ksnp_lock);
......@@ -2499,17 +2499,17 @@ ksocknal_check_peer_timeouts (int idx)
int
ksocknal_reaper (void *arg)
{
wait_queue_t wait;
ksock_conn_t *conn;
ksock_sched_t *sched;
struct list_head enomem_conns;
int nenomem_conns;
long timeout;
int i;
int peer_index = 0;
unsigned long deadline = cfs_time_current();
cfs_block_allsigs ();
wait_queue_t wait;
ksock_conn_t *conn;
ksock_sched_t *sched;
struct list_head enomem_conns;
int nenomem_conns;
long timeout;
int i;
int peer_index = 0;
unsigned long deadline = cfs_time_current();
cfs_block_allsigs();
INIT_LIST_HEAD(&enomem_conns);
init_waitqueue_entry(&wait, current);
......@@ -2580,7 +2580,7 @@ ksocknal_reaper (void *arg)
cfs_time_current())) <= 0) {
const int n = 4;
const int p = 1;
int chunk = ksocknal_data.ksnd_peer_hash_size;
int chunk = ksocknal_data.ksnd_peer_hash_size;
/* Time to check for timeouts on a few more peers: I do
* checks every 'p' seconds on a proportion of the peer
......
......@@ -64,7 +64,7 @@ ksocknal_lib_get_conn_addrs(ksock_conn_t *conn)
int
ksocknal_lib_zc_capable(ksock_conn_t *conn)
{
int caps = conn->ksnc_sock->sk->sk_route_caps;
int caps = conn->ksnc_sock->sk->sk_route_caps;
if (conn->ksnc_proto == &ksocknal_protocol_v1x)
return 0;
......@@ -78,8 +78,8 @@ int
ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
{
struct socket *sock = conn->ksnc_sock;
int nob;
int rc;
int nob;
int rc;
if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */
conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */
......@@ -92,15 +92,15 @@ ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
{
#if SOCKNAL_SINGLE_FRAG_TX
struct kvec scratch;
struct kvec *scratchiov = &scratch;
unsigned int niov = 1;
struct kvec scratch;
struct kvec *scratchiov = &scratch;
unsigned int niov = 1;
#else
struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
unsigned int niov = tx->tx_niov;
struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
unsigned int niov = tx->tx_niov;
#endif
struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
int i;
int i;
for (nob = i = 0; i < niov; i++) {
scratchiov[i] = tx->tx_iov[i];
......@@ -120,9 +120,9 @@ int
ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
{
struct socket *sock = conn->ksnc_sock;
lnet_kiov_t *kiov = tx->tx_kiov;
int rc;
int nob;
lnet_kiov_t *kiov = tx->tx_kiov;
int rc;
int nob;
/* Not NOOP message */
LASSERT(tx->tx_lnetmsg != NULL);
......@@ -131,11 +131,11 @@ ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
* or leave them alone. */
if (tx->tx_msg.ksm_zc_cookies[0] != 0) {
/* Zero copy is enabled */
struct sock *sk = sock->sk;
struct page *page = kiov->kiov_page;
int offset = kiov->kiov_offset;
int fragsize = kiov->kiov_len;
int msgflg = MSG_DONTWAIT;
struct sock *sk = sock->sk;
struct page *page = kiov->kiov_page;
int offset = kiov->kiov_offset;
int fragsize = kiov->kiov_len;
int msgflg = MSG_DONTWAIT;
CDEBUG(D_NET, "page %p + offset %x for %d\n",
page, offset, kiov->kiov_len);
......@@ -153,18 +153,18 @@ ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
}
} else {
#if SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_RISK_KMAP_DEADLOCK
struct kvec scratch;
struct kvec scratch;
struct kvec *scratchiov = &scratch;
unsigned int niov = 1;
unsigned int niov = 1;
#else
#ifdef CONFIG_HIGHMEM
#warning "XXX risk of kmap deadlock on multiple frags..."
#endif
struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
unsigned int niov = tx->tx_nkiov;
unsigned int niov = tx->tx_nkiov;
#endif
struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
int i;
int i;
for (nob = i = 0; i < niov; i++) {
scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
......@@ -187,7 +187,7 @@ ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
void
ksocknal_lib_eager_ack(ksock_conn_t *conn)
{
int opt = 1;
int opt = 1;
struct socket *sock = conn->ksnc_sock;
/* Remind the socket to ACK eagerly. If I don't, the socket might
......@@ -203,23 +203,23 @@ int
ksocknal_lib_recv_iov(ksock_conn_t *conn)
{
#if SOCKNAL_SINGLE_FRAG_RX
struct kvec scratch;
struct kvec scratch;
struct kvec *scratchiov = &scratch;
unsigned int niov = 1;
unsigned int niov = 1;
#else
struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
unsigned int niov = conn->ksnc_rx_niov;
unsigned int niov = conn->ksnc_rx_niov;
#endif
struct kvec *iov = conn->ksnc_rx_iov;
struct msghdr msg = {
.msg_flags = 0
.msg_flags = 0
};
int nob;
int i;
int rc;
int fragnob;
int sum;
__u32 saved_csum;
int nob;
int i;
int rc;
int fragnob;
int sum;
__u32 saved_csum;
/* NB we can't trust socket ops to either consume our iovs
* or leave them alone. */
......@@ -271,9 +271,9 @@ static void *
ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
struct kvec *iov, struct page **pages)
{
void *addr;
int nob;
int i;
void *addr;
int nob;
int i;
if (!*ksocknal_tunables.ksnd_zc_recv || pages == NULL)
return NULL;
......@@ -307,29 +307,29 @@ int
ksocknal_lib_recv_kiov(ksock_conn_t *conn)
{
#if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK
struct kvec scratch;
struct kvec *scratchiov = &scratch;
struct page **pages = NULL;
unsigned int niov = 1;
struct kvec scratch;
struct kvec *scratchiov = &scratch;
struct page **pages = NULL;
unsigned int niov = 1;
#else
#ifdef CONFIG_HIGHMEM
#warning "XXX risk of kmap deadlock on multiple frags..."
#endif
struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
struct page **pages = conn->ksnc_scheduler->kss_rx_scratch_pgs;
unsigned int niov = conn->ksnc_rx_nkiov;
struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
struct page **pages = conn->ksnc_scheduler->kss_rx_scratch_pgs;
unsigned int niov = conn->ksnc_rx_nkiov;
#endif
lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
struct msghdr msg = {
.msg_flags = 0
.msg_flags = 0
};
int nob;
int i;
int rc;
void *base;
void *addr;
int sum;
int fragnob;
int nob;
int i;
int rc;
void *base;
void *addr;
int sum;
int fragnob;
int n;
/* NB we can't trust socket ops to either consume our iovs
......@@ -357,10 +357,10 @@ ksocknal_lib_recv_kiov(ksock_conn_t *conn)
for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
LASSERT(i < niov);
/* Dang! have to kmap again because I have nowhere to stash the
* mapped address. But by doing it while the page is still
* mapped, the kernel just bumps the map count and returns me
* the address it stashed. */
/* Dang! have to kmap again because I have nowhere to
* stash the mapped address. But by doing it while the
* page is still mapped, the kernel just bumps the map
* count and returns me the address it stashed. */
base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
fragnob = kiov[i].kiov_len;
if (fragnob > sum)
......@@ -386,9 +386,9 @@ ksocknal_lib_recv_kiov(ksock_conn_t *conn)
void
ksocknal_lib_csum_tx(ksock_tx_t *tx)
{
int i;
__u32 csum;
void *base;
int i;
__u32 csum;
void *base;
LASSERT(tx->tx_iov[0].iov_base == &tx->tx_msg);
LASSERT(tx->tx_conn != NULL);
......@@ -426,8 +426,8 @@ int
ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle)
{
struct socket *sock = conn->ksnc_sock;
int len;
int rc;
int len;
int rc;
rc = ksocknal_connsock_addref(conn);
if (rc != 0) {
......@@ -456,13 +456,13 @@ ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem, int *
int
ksocknal_lib_setup_sock(struct socket *sock)
{
int rc;
int option;
int keep_idle;
int keep_intvl;
int keep_count;
int do_keepalive;
struct linger linger;
int rc;
int option;
int keep_idle;
int keep_intvl;
int keep_count;
int do_keepalive;
struct linger linger;
sock->sk->sk_allocation = GFP_NOFS;
......@@ -555,11 +555,11 @@ ksocknal_lib_setup_sock(struct socket *sock)
void
ksocknal_lib_push_conn(ksock_conn_t *conn)
{
struct sock *sk;
struct sock *sk;
struct tcp_sock *tp;
int nonagle;
int val = 1;
int rc;
int nonagle;
int val = 1;
int rc;
rc = ksocknal_connsock_addref(conn);
if (rc != 0) /* being shut down */
......@@ -592,7 +592,7 @@ extern void ksocknal_write_callback(ksock_conn_t *conn);
static void
ksocknal_data_ready(struct sock *sk)
{
ksock_conn_t *conn;
ksock_conn_t *conn;
/* interleave correctly with closing sockets... */
LASSERT(!in_irq());
......@@ -611,9 +611,9 @@ ksocknal_data_ready(struct sock *sk)
static void
ksocknal_write_space(struct sock *sk)
{
ksock_conn_t *conn;
int wspace;
int min_wpace;
ksock_conn_t *conn;
int wspace;
int min_wpace;
/* interleave correctly with closing sockets... */
LASSERT(!in_irq());
......@@ -689,7 +689,7 @@ ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
int
ksocknal_lib_memory_pressure(ksock_conn_t *conn)
{
int rc = 0;
int rc = 0;
ksock_sched_t *sched;
sched = conn->ksnc_scheduler;
......
......@@ -145,40 +145,37 @@ ksock_tunables_t ksocknal_tunables;
int ksocknal_tunables_init(void)
{
/* initialize ksocknal_tunables structure */
ksocknal_tunables.ksnd_timeout = &sock_timeout;
ksocknal_tunables.ksnd_nscheds = &nscheds;
ksocknal_tunables.ksnd_nconnds = &nconnds;
ksocknal_tunables.ksnd_nconnds_max = &nconnds_max;
ksocknal_tunables.ksnd_timeout = &sock_timeout;
ksocknal_tunables.ksnd_nscheds = &nscheds;
ksocknal_tunables.ksnd_nconnds = &nconnds;
ksocknal_tunables.ksnd_nconnds_max = &nconnds_max;
ksocknal_tunables.ksnd_min_reconnectms = &min_reconnectms;
ksocknal_tunables.ksnd_max_reconnectms = &max_reconnectms;
ksocknal_tunables.ksnd_eager_ack = &eager_ack;
ksocknal_tunables.ksnd_typed_conns = &typed_conns;
ksocknal_tunables.ksnd_min_bulk = &min_bulk;
ksocknal_tunables.ksnd_eager_ack = &eager_ack;
ksocknal_tunables.ksnd_typed_conns = &typed_conns;
ksocknal_tunables.ksnd_min_bulk = &min_bulk;
ksocknal_tunables.ksnd_tx_buffer_size = &tx_buffer_size;
ksocknal_tunables.ksnd_rx_buffer_size = &rx_buffer_size;
ksocknal_tunables.ksnd_nagle = &nagle;
ksocknal_tunables.ksnd_round_robin = &round_robin;
ksocknal_tunables.ksnd_keepalive = &keepalive;
ksocknal_tunables.ksnd_nagle = &nagle;
ksocknal_tunables.ksnd_round_robin = &round_robin;
ksocknal_tunables.ksnd_keepalive = &keepalive;
ksocknal_tunables.ksnd_keepalive_idle = &keepalive_idle;
ksocknal_tunables.ksnd_keepalive_count = &keepalive_count;
ksocknal_tunables.ksnd_keepalive_intvl = &keepalive_intvl;
ksocknal_tunables.ksnd_credits = &credits;
ksocknal_tunables.ksnd_credits = &credits;
ksocknal_tunables.ksnd_peertxcredits = &peer_credits;
ksocknal_tunables.ksnd_peerrtrcredits = &peer_buffer_credits;
ksocknal_tunables.ksnd_peertimeout = &peer_timeout;
ksocknal_tunables.ksnd_enable_csum = &enable_csum;
ksocknal_tunables.ksnd_peertimeout = &peer_timeout;
ksocknal_tunables.ksnd_enable_csum = &enable_csum;
ksocknal_tunables.ksnd_inject_csum_error = &inject_csum_error;
ksocknal_tunables.ksnd_nonblk_zcack = &nonblk_zcack;
ksocknal_tunables.ksnd_zc_min_payload = &zc_min_payload;
ksocknal_tunables.ksnd_zc_recv = &zc_recv;
ksocknal_tunables.ksnd_zc_recv = &zc_recv;
ksocknal_tunables.ksnd_zc_recv_min_nfrags = &zc_recv_min_nfrags;
#if SOCKNAL_VERSION_DEBUG
ksocknal_tunables.ksnd_protocol = &protocol;
ksocknal_tunables.ksnd_protocol = &protocol;
#endif
if (*ksocknal_tunables.ksnd_zc_min_payload < (2 << 10))
......
......@@ -52,7 +52,7 @@ ksocknal_queue_tx_msg_v1(ksock_conn_t *conn, ksock_tx_t *tx_msg)
void
ksocknal_next_tx_carrier(ksock_conn_t *conn)
{
ksock_tx_t *tx = conn->ksnc_tx_carrier;
ksock_tx_t *tx = conn->ksnc_tx_carrier;
/* Called holding BH lock: conn->ksnc_scheduler->kss_lock */
LASSERT(!list_empty(&conn->ksnc_tx_queue));
......@@ -119,7 +119,7 @@ ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn,
static ksock_tx_t *
ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg)
{
ksock_tx_t *tx = conn->ksnc_tx_carrier;
ksock_tx_t *tx = conn->ksnc_tx_carrier;
/*
* Enqueue tx_msg:
......@@ -361,10 +361,10 @@ ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
static int
ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote)
{
ksock_peer_t *peer = c->ksnc_peer;
ksock_conn_t *conn;
ksock_tx_t *tx;
int rc;
ksock_peer_t *peer = c->ksnc_peer;
ksock_conn_t *conn;
ksock_tx_t *tx;
int rc;
read_lock(&ksocknal_data.ksnd_global_lock);
......@@ -405,11 +405,11 @@ ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote)
static int
ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
{
ksock_peer_t *peer = conn->ksnc_peer;
ksock_tx_t *tx;
ksock_tx_t *tmp;
ksock_peer_t *peer = conn->ksnc_peer;
ksock_tx_t *tx;
ksock_tx_t *tmp;
LIST_HEAD(zlist);
int count;
int count;
if (cookie1 == 0)
cookie1 = cookie2;
......@@ -452,11 +452,11 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
static int
ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello)
{
struct socket *sock = conn->ksnc_sock;
lnet_hdr_t *hdr;
struct socket *sock = conn->ksnc_sock;
lnet_hdr_t *hdr;
lnet_magicversion_t *hmv;
int rc;
int i;
int rc;
int i;
CLASSERT(sizeof(lnet_magicversion_t) == offsetof(lnet_hdr_t, src_nid));
......@@ -470,7 +470,7 @@ ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello)
/* Re-organize V2.x message header to V1.x (lnet_hdr_t)
* header and send out */
hmv->magic = cpu_to_le32 (LNET_PROTO_TCP_MAGIC);
hmv->magic = cpu_to_le32 (LNET_PROTO_TCP_MAGIC);
hmv->version_major = cpu_to_le16 (KSOCK_PROTO_V1_MAJOR);
hmv->version_minor = cpu_to_le16 (KSOCK_PROTO_V1_MINOR);
......@@ -488,9 +488,9 @@ ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello)
LNET_UNLOCK();
}
hdr->src_nid = cpu_to_le64 (hello->kshm_src_nid);
hdr->src_pid = cpu_to_le32 (hello->kshm_src_pid);
hdr->type = cpu_to_le32 (LNET_MSG_HELLO);
hdr->src_nid = cpu_to_le64 (hello->kshm_src_nid);
hdr->src_pid = cpu_to_le32 (hello->kshm_src_pid);
hdr->type = cpu_to_le32 (LNET_MSG_HELLO);
hdr->payload_length = cpu_to_le32 (hello->kshm_nips * sizeof(__u32));
hdr->msg.hello.type = cpu_to_le32 (hello->kshm_ctype);
hdr->msg.hello.incarnation = cpu_to_le64 (hello->kshm_src_incarnation);
......@@ -529,7 +529,7 @@ static int
ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello)
{
struct socket *sock = conn->ksnc_sock;
int rc;
int rc;
hello->kshm_magic = LNET_PROTO_MAGIC;
hello->kshm_version = conn->ksnc_proto->pro_version;
......@@ -572,10 +572,10 @@ static int
ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,
int timeout)
{
struct socket *sock = conn->ksnc_sock;
lnet_hdr_t *hdr;
int rc;
int i;
struct socket *sock = conn->ksnc_sock;
lnet_hdr_t *hdr;
int rc;
int i;
LIBCFS_ALLOC(hdr, sizeof(*hdr));
if (hdr == NULL) {
......@@ -602,12 +602,12 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,
goto out;
}
hello->kshm_src_nid = le64_to_cpu(hdr->src_nid);
hello->kshm_src_pid = le32_to_cpu(hdr->src_pid);
hello->kshm_src_nid = le64_to_cpu(hdr->src_nid);
hello->kshm_src_pid = le32_to_cpu(hdr->src_pid);
hello->kshm_src_incarnation = le64_to_cpu(hdr->msg.hello.incarnation);
hello->kshm_ctype = le32_to_cpu(hdr->msg.hello.type);
hello->kshm_nips = le32_to_cpu(hdr->payload_length) /
sizeof(__u32);
hello->kshm_ctype = le32_to_cpu(hdr->msg.hello.type);
hello->kshm_nips = le32_to_cpu(hdr->payload_length) /
sizeof(__u32);
if (hello->kshm_nips > LNET_MAX_INTERFACES) {
CERROR("Bad nips %d from ip %pI4h\n",
......@@ -647,9 +647,9 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,
static int
ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout)
{
struct socket *sock = conn->ksnc_sock;
int rc;
int i;
struct socket *sock = conn->ksnc_sock;
int rc;
int i;
if (hello->kshm_magic == LNET_PROTO_MAGIC)
conn->ksnc_flip = 0;
......@@ -746,9 +746,9 @@ ksocknal_pack_msg_v2(ksock_tx_t *tx)
static void
ksocknal_unpack_msg_v1(ksock_msg_t *msg)
{
msg->ksm_csum = 0;
msg->ksm_type = KSOCK_MSG_LNET;
msg->ksm_zc_cookies[0] = msg->ksm_zc_cookies[1] = 0;
msg->ksm_csum = 0;
msg->ksm_type = KSOCK_MSG_LNET;
msg->ksm_zc_cookies[0] = msg->ksm_zc_cookies[1] = 0;
}
static void
......@@ -758,40 +758,40 @@ ksocknal_unpack_msg_v2(ksock_msg_t *msg)
}
ksock_proto_t ksocknal_protocol_v1x = {
.pro_version = KSOCK_PROTO_V1,
.pro_send_hello = ksocknal_send_hello_v1,
.pro_recv_hello = ksocknal_recv_hello_v1,
.pro_pack = ksocknal_pack_msg_v1,
.pro_unpack = ksocknal_unpack_msg_v1,
.pro_queue_tx_msg = ksocknal_queue_tx_msg_v1,
.pro_handle_zcreq = NULL,
.pro_handle_zcack = NULL,
.pro_queue_tx_zcack = NULL,
.pro_match_tx = ksocknal_match_tx
.pro_version = KSOCK_PROTO_V1,
.pro_send_hello = ksocknal_send_hello_v1,
.pro_recv_hello = ksocknal_recv_hello_v1,
.pro_pack = ksocknal_pack_msg_v1,
.pro_unpack = ksocknal_unpack_msg_v1,
.pro_queue_tx_msg = ksocknal_queue_tx_msg_v1,
.pro_handle_zcreq = NULL,
.pro_handle_zcack = NULL,
.pro_queue_tx_zcack = NULL,
.pro_match_tx = ksocknal_match_tx
};
ksock_proto_t ksocknal_protocol_v2x = {
.pro_version = KSOCK_PROTO_V2,
.pro_send_hello = ksocknal_send_hello_v2,
.pro_recv_hello = ksocknal_recv_hello_v2,
.pro_pack = ksocknal_pack_msg_v2,
.pro_unpack = ksocknal_unpack_msg_v2,
.pro_queue_tx_msg = ksocknal_queue_tx_msg_v2,
.pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v2,
.pro_handle_zcreq = ksocknal_handle_zcreq,
.pro_handle_zcack = ksocknal_handle_zcack,
.pro_match_tx = ksocknal_match_tx
.pro_version = KSOCK_PROTO_V2,
.pro_send_hello = ksocknal_send_hello_v2,
.pro_recv_hello = ksocknal_recv_hello_v2,
.pro_pack = ksocknal_pack_msg_v2,
.pro_unpack = ksocknal_unpack_msg_v2,
.pro_queue_tx_msg = ksocknal_queue_tx_msg_v2,
.pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v2,
.pro_handle_zcreq = ksocknal_handle_zcreq,
.pro_handle_zcack = ksocknal_handle_zcack,
.pro_match_tx = ksocknal_match_tx
};
ksock_proto_t ksocknal_protocol_v3x = {
.pro_version = KSOCK_PROTO_V3,
.pro_send_hello = ksocknal_send_hello_v2,
.pro_recv_hello = ksocknal_recv_hello_v2,
.pro_pack = ksocknal_pack_msg_v2,
.pro_unpack = ksocknal_unpack_msg_v2,
.pro_queue_tx_msg = ksocknal_queue_tx_msg_v2,
.pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v3,
.pro_handle_zcreq = ksocknal_handle_zcreq,
.pro_handle_zcack = ksocknal_handle_zcack,
.pro_match_tx = ksocknal_match_tx_v3
.pro_version = KSOCK_PROTO_V3,
.pro_send_hello = ksocknal_send_hello_v2,
.pro_recv_hello = ksocknal_recv_hello_v2,
.pro_pack = ksocknal_pack_msg_v2,
.pro_unpack = ksocknal_unpack_msg_v2,
.pro_queue_tx_msg = ksocknal_queue_tx_msg_v2,
.pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v3,
.pro_handle_zcreq = ksocknal_handle_zcreq,
.pro_handle_zcack = ksocknal_handle_zcack,
.pro_match_tx = ksocknal_match_tx_v3
};
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment