Commit 194730a9 authored by Guvenc Gulce's avatar Guvenc Gulce Committed by David S. Miller

net/smc: Make SMC statistics network namespace aware

Make the gathered SMC statistics network namespace aware, for each
namespace collect an own set of statistic information.
Signed-off-by: default avatarGuvenc Gulce <guvenc@linux.ibm.com>
Signed-off-by: default avatarKarsten Graul <kgraul@linux.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f0dd7bf5
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <net/netns/mpls.h> #include <net/netns/mpls.h>
#include <net/netns/can.h> #include <net/netns/can.h>
#include <net/netns/xdp.h> #include <net/netns/xdp.h>
#include <net/netns/smc.h>
#include <net/netns/bpf.h> #include <net/netns/bpf.h>
#include <linux/ns_common.h> #include <linux/ns_common.h>
#include <linux/idr.h> #include <linux/idr.h>
...@@ -170,6 +171,9 @@ struct net { ...@@ -170,6 +171,9 @@ struct net {
struct sock *crypto_nlsk; struct sock *crypto_nlsk;
#endif #endif
struct sock *diag_nlsk; struct sock *diag_nlsk;
#if IS_ENABLED(CONFIG_SMC)
struct netns_smc smc;
#endif
} __randomize_layout; } __randomize_layout;
#include <linux/seq_file_net.h> #include <linux/seq_file_net.h>
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NETNS_SMC_H__
#define __NETNS_SMC_H__
#include <linux/mutex.h>
#include <linux/percpu.h>
struct smc_stats_rsn;
struct smc_stats;
struct netns_smc {
/* per cpu counters for SMC */
struct smc_stats __percpu *smc_stats;
/* protect fback_rsn */
struct mutex mutex_fback_rsn;
struct smc_stats_rsn *fback_rsn;
};
#endif
...@@ -529,15 +529,17 @@ static void smc_stat_inc_fback_rsn_cnt(struct smc_sock *smc, ...@@ -529,15 +529,17 @@ static void smc_stat_inc_fback_rsn_cnt(struct smc_sock *smc,
static void smc_stat_fallback(struct smc_sock *smc) static void smc_stat_fallback(struct smc_sock *smc)
{ {
mutex_lock(&smc_stat_fback_rsn); struct net *net = sock_net(&smc->sk);
mutex_lock(&net->smc.mutex_fback_rsn);
if (smc->listen_smc) { if (smc->listen_smc) {
smc_stat_inc_fback_rsn_cnt(smc, fback_rsn.srv); smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->srv);
fback_rsn.srv_fback_cnt++; net->smc.fback_rsn->srv_fback_cnt++;
} else { } else {
smc_stat_inc_fback_rsn_cnt(smc, fback_rsn.clnt); smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->clnt);
fback_rsn.clnt_fback_cnt++; net->smc.fback_rsn->clnt_fback_cnt++;
} }
mutex_unlock(&smc_stat_fback_rsn); mutex_unlock(&net->smc.mutex_fback_rsn);
} }
static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code) static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
...@@ -568,10 +570,11 @@ static int smc_connect_fallback(struct smc_sock *smc, int reason_code) ...@@ -568,10 +570,11 @@ static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code, static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
u8 version) u8 version)
{ {
struct net *net = sock_net(&smc->sk);
int rc; int rc;
if (reason_code < 0) { /* error, fallback is not possible */ if (reason_code < 0) { /* error, fallback is not possible */
this_cpu_inc(smc_stats->clnt_hshake_err_cnt); this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
if (smc->sk.sk_state == SMC_INIT) if (smc->sk.sk_state == SMC_INIT)
sock_put(&smc->sk); /* passive closing */ sock_put(&smc->sk); /* passive closing */
return reason_code; return reason_code;
...@@ -579,7 +582,7 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code, ...@@ -579,7 +582,7 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
if (reason_code != SMC_CLC_DECL_PEERDECL) { if (reason_code != SMC_CLC_DECL_PEERDECL) {
rc = smc_clc_send_decline(smc, reason_code, version); rc = smc_clc_send_decline(smc, reason_code, version);
if (rc < 0) { if (rc < 0) {
this_cpu_inc(smc_stats->clnt_hshake_err_cnt); this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
if (smc->sk.sk_state == SMC_INIT) if (smc->sk.sk_state == SMC_INIT)
sock_put(&smc->sk); /* passive closing */ sock_put(&smc->sk); /* passive closing */
return rc; return rc;
...@@ -1027,7 +1030,7 @@ static int __smc_connect(struct smc_sock *smc) ...@@ -1027,7 +1030,7 @@ static int __smc_connect(struct smc_sock *smc)
if (rc) if (rc)
goto vlan_cleanup; goto vlan_cleanup;
SMC_STAT_CLNT_SUCC_INC(aclc); SMC_STAT_CLNT_SUCC_INC(sock_net(smc->clcsock->sk), aclc);
smc_connect_ism_vlan_cleanup(smc, ini); smc_connect_ism_vlan_cleanup(smc, ini);
kfree(buf); kfree(buf);
kfree(ini); kfree(ini);
...@@ -1343,8 +1346,9 @@ static void smc_listen_out_connected(struct smc_sock *new_smc) ...@@ -1343,8 +1346,9 @@ static void smc_listen_out_connected(struct smc_sock *new_smc)
static void smc_listen_out_err(struct smc_sock *new_smc) static void smc_listen_out_err(struct smc_sock *new_smc)
{ {
struct sock *newsmcsk = &new_smc->sk; struct sock *newsmcsk = &new_smc->sk;
struct net *net = sock_net(newsmcsk);
this_cpu_inc(smc_stats->srv_hshake_err_cnt); this_cpu_inc(net->smc.smc_stats->srv_hshake_err_cnt);
if (newsmcsk->sk_state == SMC_INIT) if (newsmcsk->sk_state == SMC_INIT)
sock_put(&new_smc->sk); /* passive closing */ sock_put(&new_smc->sk); /* passive closing */
newsmcsk->sk_state = SMC_CLOSED; newsmcsk->sk_state = SMC_CLOSED;
...@@ -1813,7 +1817,7 @@ static void smc_listen_work(struct work_struct *work) ...@@ -1813,7 +1817,7 @@ static void smc_listen_work(struct work_struct *work)
} }
smc_conn_save_peer_info(new_smc, cclc); smc_conn_save_peer_info(new_smc, cclc);
smc_listen_out_connected(new_smc); smc_listen_out_connected(new_smc);
SMC_STAT_SERV_SUCC_INC(ini); SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk), ini);
goto out_free; goto out_free;
out_unlock: out_unlock:
...@@ -2242,7 +2246,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, ...@@ -2242,7 +2246,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_LISTEN &&
sk->sk_state != SMC_CLOSED) { sk->sk_state != SMC_CLOSED) {
if (val) { if (val) {
SMC_STAT_INC(!smc->conn.lnk, ndly_cnt); SMC_STAT_INC(smc, ndly_cnt);
mod_delayed_work(smc->conn.lgr->tx_wq, mod_delayed_work(smc->conn.lgr->tx_wq,
&smc->conn.tx_work, 0); &smc->conn.tx_work, 0);
} }
...@@ -2253,7 +2257,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, ...@@ -2253,7 +2257,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_LISTEN &&
sk->sk_state != SMC_CLOSED) { sk->sk_state != SMC_CLOSED) {
if (!val) { if (!val) {
SMC_STAT_INC(!smc->conn.lnk, cork_cnt); SMC_STAT_INC(smc, cork_cnt);
mod_delayed_work(smc->conn.lgr->tx_wq, mod_delayed_work(smc->conn.lgr->tx_wq,
&smc->conn.tx_work, 0); &smc->conn.tx_work, 0);
} }
...@@ -2383,7 +2387,7 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page, ...@@ -2383,7 +2387,7 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page,
rc = kernel_sendpage(smc->clcsock, page, offset, rc = kernel_sendpage(smc->clcsock, page, offset,
size, flags); size, flags);
} else { } else {
SMC_STAT_INC(!smc->conn.lnk, sendpage_cnt); SMC_STAT_INC(smc, sendpage_cnt);
rc = sock_no_sendpage(sock, page, offset, size, flags); rc = sock_no_sendpage(sock, page, offset, size, flags);
} }
...@@ -2434,7 +2438,7 @@ static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos, ...@@ -2434,7 +2438,7 @@ static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
flags = MSG_DONTWAIT; flags = MSG_DONTWAIT;
else else
flags = 0; flags = 0;
SMC_STAT_INC(!smc->conn.lnk, splice_cnt); SMC_STAT_INC(smc, splice_cnt);
rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags); rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
} }
out: out:
...@@ -2523,6 +2527,16 @@ static void __net_exit smc_net_exit(struct net *net) ...@@ -2523,6 +2527,16 @@ static void __net_exit smc_net_exit(struct net *net)
smc_pnet_net_exit(net); smc_pnet_net_exit(net);
} }
static __net_init int smc_net_stat_init(struct net *net)
{
return smc_stats_init(net);
}
static void __net_exit smc_net_stat_exit(struct net *net)
{
smc_stats_exit(net);
}
static struct pernet_operations smc_net_ops = { static struct pernet_operations smc_net_ops = {
.init = smc_net_init, .init = smc_net_init,
.exit = smc_net_exit, .exit = smc_net_exit,
...@@ -2530,6 +2544,11 @@ static struct pernet_operations smc_net_ops = { ...@@ -2530,6 +2544,11 @@ static struct pernet_operations smc_net_ops = {
.size = sizeof(struct smc_net), .size = sizeof(struct smc_net),
}; };
static struct pernet_operations smc_net_stat_ops = {
.init = smc_net_stat_init,
.exit = smc_net_stat_exit,
};
static int __init smc_init(void) static int __init smc_init(void)
{ {
int rc; int rc;
...@@ -2538,6 +2557,10 @@ static int __init smc_init(void) ...@@ -2538,6 +2557,10 @@ static int __init smc_init(void)
if (rc) if (rc)
return rc; return rc;
rc = register_pernet_subsys(&smc_net_stat_ops);
if (rc)
return rc;
smc_ism_init(); smc_ism_init();
smc_clc_init(); smc_clc_init();
...@@ -2558,16 +2581,10 @@ static int __init smc_init(void) ...@@ -2558,16 +2581,10 @@ static int __init smc_init(void)
if (!smc_close_wq) if (!smc_close_wq)
goto out_alloc_hs_wq; goto out_alloc_hs_wq;
rc = smc_stats_init();
if (rc) {
pr_err("%s: smc_stats_init fails with %d\n", __func__, rc);
goto out_alloc_wqs;
}
rc = smc_core_init(); rc = smc_core_init();
if (rc) { if (rc) {
pr_err("%s: smc_core_init fails with %d\n", __func__, rc); pr_err("%s: smc_core_init fails with %d\n", __func__, rc);
goto out_smc_stat; goto out_alloc_wqs;
} }
rc = smc_llc_init(); rc = smc_llc_init();
...@@ -2619,8 +2636,6 @@ static int __init smc_init(void) ...@@ -2619,8 +2636,6 @@ static int __init smc_init(void)
proto_unregister(&smc_proto); proto_unregister(&smc_proto);
out_core: out_core:
smc_core_exit(); smc_core_exit();
out_smc_stat:
smc_stats_exit();
out_alloc_wqs: out_alloc_wqs:
destroy_workqueue(smc_close_wq); destroy_workqueue(smc_close_wq);
out_alloc_hs_wq: out_alloc_hs_wq:
...@@ -2643,11 +2658,11 @@ static void __exit smc_exit(void) ...@@ -2643,11 +2658,11 @@ static void __exit smc_exit(void)
smc_ib_unregister_client(); smc_ib_unregister_client();
destroy_workqueue(smc_close_wq); destroy_workqueue(smc_close_wq);
destroy_workqueue(smc_hs_wq); destroy_workqueue(smc_hs_wq);
smc_stats_exit();
proto_unregister(&smc_proto6); proto_unregister(&smc_proto6);
proto_unregister(&smc_proto); proto_unregister(&smc_proto);
smc_pnet_exit(); smc_pnet_exit();
smc_nl_exit(); smc_nl_exit();
unregister_pernet_subsys(&smc_net_stat_ops);
unregister_pernet_subsys(&smc_net_ops); unregister_pernet_subsys(&smc_net_ops);
rcu_barrier(); rcu_barrier();
} }
......
...@@ -2058,8 +2058,8 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb) ...@@ -2058,8 +2058,8 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
/* check for reusable slot in the link group */ /* check for reusable slot in the link group */
buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list); buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);
if (buf_desc) { if (buf_desc) {
SMC_STAT_RMB_SIZE(is_smcd, is_rmb, bufsize); SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
SMC_STAT_BUF_REUSE(is_smcd, is_rmb); SMC_STAT_BUF_REUSE(smc, is_smcd, is_rmb);
memset(buf_desc->cpu_addr, 0, bufsize); memset(buf_desc->cpu_addr, 0, bufsize);
break; /* found reusable slot */ break; /* found reusable slot */
} }
...@@ -2074,13 +2074,13 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb) ...@@ -2074,13 +2074,13 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
if (IS_ERR(buf_desc)) { if (IS_ERR(buf_desc)) {
if (!is_dgraded) { if (!is_dgraded) {
is_dgraded = true; is_dgraded = true;
SMC_STAT_RMB_DOWNGRADED(is_smcd, is_rmb); SMC_STAT_RMB_DOWNGRADED(smc, is_smcd, is_rmb);
} }
continue; continue;
} }
SMC_STAT_RMB_ALLOC(is_smcd, is_rmb); SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rmb);
SMC_STAT_RMB_SIZE(is_smcd, is_rmb, bufsize); SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
buf_desc->used = 1; buf_desc->used = 1;
mutex_lock(lock); mutex_lock(lock);
list_add(&buf_desc->list, buf_list); list_add(&buf_desc->list, buf_list);
......
...@@ -228,7 +228,7 @@ static int smc_rx_recv_urg(struct smc_sock *smc, struct msghdr *msg, int len, ...@@ -228,7 +228,7 @@ static int smc_rx_recv_urg(struct smc_sock *smc, struct msghdr *msg, int len,
conn->urg_state == SMC_URG_READ) conn->urg_state == SMC_URG_READ)
return -EINVAL; return -EINVAL;
SMC_STAT_INC(!conn->lnk, urg_data_cnt); SMC_STAT_INC(smc, urg_data_cnt);
if (conn->urg_state == SMC_URG_VALID) { if (conn->urg_state == SMC_URG_VALID) {
if (!(flags & MSG_PEEK)) if (!(flags & MSG_PEEK))
smc->conn.urg_state = SMC_URG_READ; smc->conn.urg_state = SMC_URG_READ;
...@@ -307,10 +307,10 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, ...@@ -307,10 +307,10 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
readable = atomic_read(&conn->bytes_to_rcv); readable = atomic_read(&conn->bytes_to_rcv);
if (readable >= conn->rmb_desc->len) if (readable >= conn->rmb_desc->len)
SMC_STAT_RMB_RX_FULL(!conn->lnk); SMC_STAT_RMB_RX_FULL(smc, !conn->lnk);
if (len < readable) if (len < readable)
SMC_STAT_RMB_RX_SIZE_SMALL(!conn->lnk); SMC_STAT_RMB_RX_SIZE_SMALL(smc, !conn->lnk);
/* we currently use 1 RMBE per RMB, so RMBE == RMB base addr */ /* we currently use 1 RMBE per RMB, so RMBE == RMB base addr */
rcvbuf_base = conn->rx_off + conn->rmb_desc->cpu_addr; rcvbuf_base = conn->rx_off + conn->rmb_desc->cpu_addr;
......
...@@ -18,24 +18,28 @@ ...@@ -18,24 +18,28 @@
#include "smc_netlink.h" #include "smc_netlink.h"
#include "smc_stats.h" #include "smc_stats.h"
/* serialize fallback reason statistic gathering */ int smc_stats_init(struct net *net)
DEFINE_MUTEX(smc_stat_fback_rsn);
struct smc_stats __percpu *smc_stats; /* per cpu counters for SMC */
struct smc_stats_reason fback_rsn;
int __init smc_stats_init(void)
{ {
memset(&fback_rsn, 0, sizeof(fback_rsn)); net->smc.fback_rsn = kzalloc(sizeof(*net->smc.fback_rsn), GFP_KERNEL);
smc_stats = alloc_percpu(struct smc_stats); if (!net->smc.fback_rsn)
if (!smc_stats) goto err_fback;
return -ENOMEM; net->smc.smc_stats = alloc_percpu(struct smc_stats);
if (!net->smc.smc_stats)
goto err_stats;
mutex_init(&net->smc.mutex_fback_rsn);
return 0; return 0;
err_stats:
kfree(net->smc.fback_rsn);
err_fback:
return -ENOMEM;
} }
void smc_stats_exit(void) void smc_stats_exit(struct net *net)
{ {
free_percpu(smc_stats); kfree(net->smc.fback_rsn);
if (net->smc.smc_stats)
free_percpu(net->smc.smc_stats);
} }
static int smc_nl_fill_stats_rmb_data(struct sk_buff *skb, static int smc_nl_fill_stats_rmb_data(struct sk_buff *skb,
...@@ -256,6 +260,7 @@ int smc_nl_get_stats(struct sk_buff *skb, ...@@ -256,6 +260,7 @@ int smc_nl_get_stats(struct sk_buff *skb,
struct netlink_callback *cb) struct netlink_callback *cb)
{ {
struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
struct net *net = sock_net(skb->sk);
struct smc_stats *stats; struct smc_stats *stats;
struct nlattr *attrs; struct nlattr *attrs;
int cpu, i, size; int cpu, i, size;
...@@ -279,7 +284,7 @@ int smc_nl_get_stats(struct sk_buff *skb, ...@@ -279,7 +284,7 @@ int smc_nl_get_stats(struct sk_buff *skb,
goto erralloc; goto erralloc;
size = sizeof(*stats) / sizeof(u64); size = sizeof(*stats) / sizeof(u64);
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
src = (u64 *)per_cpu_ptr(smc_stats, cpu); src = (u64 *)per_cpu_ptr(net->smc.smc_stats, cpu);
sum = (u64 *)stats; sum = (u64 *)stats;
for (i = 0; i < size; i++) for (i = 0; i < size; i++)
*(sum++) += *(src++); *(sum++) += *(src++);
...@@ -318,6 +323,7 @@ static int smc_nl_get_fback_details(struct sk_buff *skb, ...@@ -318,6 +323,7 @@ static int smc_nl_get_fback_details(struct sk_buff *skb,
bool is_srv) bool is_srv)
{ {
struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
struct net *net = sock_net(skb->sk);
int cnt_reported = cb_ctx->pos[2]; int cnt_reported = cb_ctx->pos[2];
struct smc_stats_fback *trgt_arr; struct smc_stats_fback *trgt_arr;
struct nlattr *attrs; struct nlattr *attrs;
...@@ -325,9 +331,9 @@ static int smc_nl_get_fback_details(struct sk_buff *skb, ...@@ -325,9 +331,9 @@ static int smc_nl_get_fback_details(struct sk_buff *skb,
void *nlh; void *nlh;
if (is_srv) if (is_srv)
trgt_arr = &fback_rsn.srv[0]; trgt_arr = &net->smc.fback_rsn->srv[0];
else else
trgt_arr = &fback_rsn.clnt[0]; trgt_arr = &net->smc.fback_rsn->clnt[0];
if (!trgt_arr[pos].fback_code) if (!trgt_arr[pos].fback_code)
return -ENODATA; return -ENODATA;
nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
...@@ -342,11 +348,11 @@ static int smc_nl_get_fback_details(struct sk_buff *skb, ...@@ -342,11 +348,11 @@ static int smc_nl_get_fback_details(struct sk_buff *skb,
goto errattr; goto errattr;
if (!cnt_reported) { if (!cnt_reported) {
if (nla_put_u64_64bit(skb, SMC_NLA_FBACK_STATS_SRV_CNT, if (nla_put_u64_64bit(skb, SMC_NLA_FBACK_STATS_SRV_CNT,
fback_rsn.srv_fback_cnt, net->smc.fback_rsn->srv_fback_cnt,
SMC_NLA_FBACK_STATS_PAD)) SMC_NLA_FBACK_STATS_PAD))
goto errattr; goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_FBACK_STATS_CLNT_CNT, if (nla_put_u64_64bit(skb, SMC_NLA_FBACK_STATS_CLNT_CNT,
fback_rsn.clnt_fback_cnt, net->smc.fback_rsn->clnt_fback_cnt,
SMC_NLA_FBACK_STATS_PAD)) SMC_NLA_FBACK_STATS_PAD))
goto errattr; goto errattr;
cnt_reported = 1; cnt_reported = 1;
...@@ -375,12 +381,13 @@ static int smc_nl_get_fback_details(struct sk_buff *skb, ...@@ -375,12 +381,13 @@ static int smc_nl_get_fback_details(struct sk_buff *skb,
int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb) int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb)
{ {
struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
struct net *net = sock_net(skb->sk);
int rc_srv = 0, rc_clnt = 0, k; int rc_srv = 0, rc_clnt = 0, k;
int skip_serv = cb_ctx->pos[1]; int skip_serv = cb_ctx->pos[1];
int snum = cb_ctx->pos[0]; int snum = cb_ctx->pos[0];
bool is_srv = true; bool is_srv = true;
mutex_lock(&smc_stat_fback_rsn); mutex_lock(&net->smc.mutex_fback_rsn);
for (k = 0; k < SMC_MAX_FBACK_RSN_CNT; k++) { for (k = 0; k < SMC_MAX_FBACK_RSN_CNT; k++) {
if (k < snum) if (k < snum)
continue; continue;
...@@ -399,7 +406,7 @@ int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -399,7 +406,7 @@ int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb)
if (rc_clnt == ENODATA && rc_srv == ENODATA) if (rc_clnt == ENODATA && rc_srv == ENODATA)
break; break;
} }
mutex_unlock(&smc_stat_fback_rsn); mutex_unlock(&net->smc.mutex_fback_rsn);
cb_ctx->pos[1] = skip_serv; cb_ctx->pos[1] = skip_serv;
cb_ctx->pos[0] = k; cb_ctx->pos[0] = k;
return skb->len; return skb->len;
......
...@@ -21,10 +21,6 @@ ...@@ -21,10 +21,6 @@
#define SMC_MAX_FBACK_RSN_CNT 30 #define SMC_MAX_FBACK_RSN_CNT 30
extern struct smc_stats __percpu *smc_stats; /* per cpu counters for SMC */
extern struct smc_stats_reason fback_rsn;
extern struct mutex smc_stat_fback_rsn;
enum { enum {
SMC_BUF_8K, SMC_BUF_8K,
SMC_BUF_16K, SMC_BUF_16K,
...@@ -43,7 +39,7 @@ struct smc_stats_fback { ...@@ -43,7 +39,7 @@ struct smc_stats_fback {
u16 count; u16 count;
}; };
struct smc_stats_reason { struct smc_stats_rsn {
struct smc_stats_fback srv[SMC_MAX_FBACK_RSN_CNT]; struct smc_stats_fback srv[SMC_MAX_FBACK_RSN_CNT];
struct smc_stats_fback clnt[SMC_MAX_FBACK_RSN_CNT]; struct smc_stats_fback clnt[SMC_MAX_FBACK_RSN_CNT];
u64 srv_fback_cnt; u64 srv_fback_cnt;
...@@ -92,122 +88,135 @@ struct smc_stats { ...@@ -92,122 +88,135 @@ struct smc_stats {
u64 srv_hshake_err_cnt; u64 srv_hshake_err_cnt;
}; };
#define SMC_STAT_PAYLOAD_SUB(_tech, key, _len, _rc) \ #define SMC_STAT_PAYLOAD_SUB(_smc_stats, _tech, key, _len, _rc) \
do { \ do { \
typeof(_smc_stats) stats = (_smc_stats); \
typeof(_tech) t = (_tech); \ typeof(_tech) t = (_tech); \
typeof(_len) l = (_len); \ typeof(_len) l = (_len); \
int _pos = fls64((l) >> 13); \ int _pos = fls64((l) >> 13); \
typeof(_rc) r = (_rc); \ typeof(_rc) r = (_rc); \
int m = SMC_BUF_MAX - 1; \ int m = SMC_BUF_MAX - 1; \
this_cpu_inc((*smc_stats).smc[t].key ## _cnt); \ this_cpu_inc((*stats).smc[t].key ## _cnt); \
if (r <= 0) \ if (r <= 0) \
break; \ break; \
_pos = (_pos < m) ? ((l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \ _pos = (_pos < m) ? ((l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
this_cpu_inc((*smc_stats).smc[t].key ## _pd.buf[_pos]); \ this_cpu_inc((*stats).smc[t].key ## _pd.buf[_pos]); \
this_cpu_add((*smc_stats).smc[t].key ## _bytes, r); \ this_cpu_add((*stats).smc[t].key ## _bytes, r); \
} \ } \
while (0) while (0)
#define SMC_STAT_TX_PAYLOAD(_smc, length, rcode) \ #define SMC_STAT_TX_PAYLOAD(_smc, length, rcode) \
do { \ do { \
typeof(_smc) __smc = _smc; \ typeof(_smc) __smc = _smc; \
struct net *_net = sock_net(&__smc->sk); \
struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \
typeof(length) _len = (length); \ typeof(length) _len = (length); \
typeof(rcode) _rc = (rcode); \ typeof(rcode) _rc = (rcode); \
bool is_smcd = !__smc->conn.lnk; \ bool is_smcd = !__smc->conn.lnk; \
if (is_smcd) \ if (is_smcd) \
SMC_STAT_PAYLOAD_SUB(SMC_TYPE_D, tx, _len, _rc); \ SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_D, tx, _len, _rc); \
else \ else \
SMC_STAT_PAYLOAD_SUB(SMC_TYPE_R, tx, _len, _rc); \ SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_R, tx, _len, _rc); \
} \ } \
while (0) while (0)
#define SMC_STAT_RX_PAYLOAD(_smc, length, rcode) \ #define SMC_STAT_RX_PAYLOAD(_smc, length, rcode) \
do { \ do { \
typeof(_smc) __smc = _smc; \ typeof(_smc) __smc = _smc; \
struct net *_net = sock_net(&__smc->sk); \
struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \
typeof(length) _len = (length); \ typeof(length) _len = (length); \
typeof(rcode) _rc = (rcode); \ typeof(rcode) _rc = (rcode); \
bool is_smcd = !__smc->conn.lnk; \ bool is_smcd = !__smc->conn.lnk; \
if (is_smcd) \ if (is_smcd) \
SMC_STAT_PAYLOAD_SUB(SMC_TYPE_D, rx, _len, _rc); \ SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_D, rx, _len, _rc); \
else \ else \
SMC_STAT_PAYLOAD_SUB(SMC_TYPE_R, rx, _len, _rc); \ SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_R, rx, _len, _rc); \
} \ } \
while (0) while (0)
#define SMC_STAT_RMB_SIZE_SUB(_tech, k, _len) \ #define SMC_STAT_RMB_SIZE_SUB(_smc_stats, _tech, k, _len) \
do { \ do { \
typeof(_len) _l = (_len); \ typeof(_len) _l = (_len); \
typeof(_tech) t = (_tech); \ typeof(_tech) t = (_tech); \
int _pos = fls((_l) >> 13); \ int _pos = fls((_l) >> 13); \
int m = SMC_BUF_MAX - 1; \ int m = SMC_BUF_MAX - 1; \
_pos = (_pos < m) ? ((_l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \ _pos = (_pos < m) ? ((_l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
this_cpu_inc((*smc_stats).smc[t].k ## _rmbsize.buf[_pos]); \ this_cpu_inc((*(_smc_stats)).smc[t].k ## _rmbsize.buf[_pos]); \
} \ } \
while (0) while (0)
#define SMC_STAT_RMB_SUB(type, t, key) \ #define SMC_STAT_RMB_SUB(_smc_stats, type, t, key) \
this_cpu_inc((*smc_stats).smc[t].rmb ## _ ## key.type ## _cnt) this_cpu_inc((*(_smc_stats)).smc[t].rmb ## _ ## key.type ## _cnt)
#define SMC_STAT_RMB_SIZE(_is_smcd, _is_rx, _len) \ #define SMC_STAT_RMB_SIZE(_smc, _is_smcd, _is_rx, _len) \
do { \ do { \
struct net *_net = sock_net(&(_smc)->sk); \
struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \
typeof(_is_smcd) is_d = (_is_smcd); \ typeof(_is_smcd) is_d = (_is_smcd); \
typeof(_is_rx) is_r = (_is_rx); \ typeof(_is_rx) is_r = (_is_rx); \
typeof(_len) l = (_len); \ typeof(_len) l = (_len); \
if ((is_d) && (is_r)) \ if ((is_d) && (is_r)) \
SMC_STAT_RMB_SIZE_SUB(SMC_TYPE_D, rx, l); \ SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_D, rx, l); \
if ((is_d) && !(is_r)) \ if ((is_d) && !(is_r)) \
SMC_STAT_RMB_SIZE_SUB(SMC_TYPE_D, tx, l); \ SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_D, tx, l); \
if (!(is_d) && (is_r)) \ if (!(is_d) && (is_r)) \
SMC_STAT_RMB_SIZE_SUB(SMC_TYPE_R, rx, l); \ SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_R, rx, l); \
if (!(is_d) && !(is_r)) \ if (!(is_d) && !(is_r)) \
SMC_STAT_RMB_SIZE_SUB(SMC_TYPE_R, tx, l); \ SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_R, tx, l); \
} \ } \
while (0) while (0)
#define SMC_STAT_RMB(type, _is_smcd, _is_rx) \ #define SMC_STAT_RMB(_smc, type, _is_smcd, _is_rx) \
do { \ do { \
struct net *net = sock_net(&(_smc)->sk); \
struct smc_stats __percpu *_smc_stats = net->smc.smc_stats; \
typeof(_is_smcd) is_d = (_is_smcd); \ typeof(_is_smcd) is_d = (_is_smcd); \
typeof(_is_rx) is_r = (_is_rx); \ typeof(_is_rx) is_r = (_is_rx); \
if ((is_d) && (is_r)) \ if ((is_d) && (is_r)) \
SMC_STAT_RMB_SUB(type, SMC_TYPE_D, rx); \ SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_D, rx); \
if ((is_d) && !(is_r)) \ if ((is_d) && !(is_r)) \
SMC_STAT_RMB_SUB(type, SMC_TYPE_D, tx); \ SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_D, tx); \
if (!(is_d) && (is_r)) \ if (!(is_d) && (is_r)) \
SMC_STAT_RMB_SUB(type, SMC_TYPE_R, rx); \ SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_R, rx); \
if (!(is_d) && !(is_r)) \ if (!(is_d) && !(is_r)) \
SMC_STAT_RMB_SUB(type, SMC_TYPE_R, tx); \ SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_R, tx); \
} \ } \
while (0) while (0)
#define SMC_STAT_BUF_REUSE(is_smcd, is_rx) \ #define SMC_STAT_BUF_REUSE(smc, is_smcd, is_rx) \
SMC_STAT_RMB(reuse, is_smcd, is_rx) SMC_STAT_RMB(smc, reuse, is_smcd, is_rx)
#define SMC_STAT_RMB_ALLOC(is_smcd, is_rx) \ #define SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rx) \
SMC_STAT_RMB(alloc, is_smcd, is_rx) SMC_STAT_RMB(smc, alloc, is_smcd, is_rx)
#define SMC_STAT_RMB_DOWNGRADED(is_smcd, is_rx) \ #define SMC_STAT_RMB_DOWNGRADED(smc, is_smcd, is_rx) \
SMC_STAT_RMB(dgrade, is_smcd, is_rx) SMC_STAT_RMB(smc, dgrade, is_smcd, is_rx)
#define SMC_STAT_RMB_TX_PEER_FULL(is_smcd) \ #define SMC_STAT_RMB_TX_PEER_FULL(smc, is_smcd) \
SMC_STAT_RMB(buf_full_peer, is_smcd, false) SMC_STAT_RMB(smc, buf_full_peer, is_smcd, false)
#define SMC_STAT_RMB_TX_FULL(is_smcd) \ #define SMC_STAT_RMB_TX_FULL(smc, is_smcd) \
SMC_STAT_RMB(buf_full, is_smcd, false) SMC_STAT_RMB(smc, buf_full, is_smcd, false)
#define SMC_STAT_RMB_TX_PEER_SIZE_SMALL(is_smcd) \ #define SMC_STAT_RMB_TX_PEER_SIZE_SMALL(smc, is_smcd) \
SMC_STAT_RMB(buf_size_small_peer, is_smcd, false) SMC_STAT_RMB(smc, buf_size_small_peer, is_smcd, false)
#define SMC_STAT_RMB_TX_SIZE_SMALL(is_smcd) \ #define SMC_STAT_RMB_TX_SIZE_SMALL(smc, is_smcd) \
SMC_STAT_RMB(buf_size_small, is_smcd, false) SMC_STAT_RMB(smc, buf_size_small, is_smcd, false)
#define SMC_STAT_RMB_RX_SIZE_SMALL(is_smcd) \ #define SMC_STAT_RMB_RX_SIZE_SMALL(smc, is_smcd) \
SMC_STAT_RMB(buf_size_small, is_smcd, true) SMC_STAT_RMB(smc, buf_size_small, is_smcd, true)
#define SMC_STAT_RMB_RX_FULL(is_smcd) \ #define SMC_STAT_RMB_RX_FULL(smc, is_smcd) \
SMC_STAT_RMB(buf_full, is_smcd, true) SMC_STAT_RMB(smc, buf_full, is_smcd, true)
#define SMC_STAT_INC(is_smcd, type) \ #define SMC_STAT_INC(_smc, type) \
do { \ do { \
typeof(_smc) __smc = _smc; \
bool is_smcd = !(__smc)->conn.lnk; \
struct net *net = sock_net(&(__smc)->sk); \
struct smc_stats __percpu *smc_stats = net->smc.smc_stats; \
if ((is_smcd)) \ if ((is_smcd)) \
this_cpu_inc(smc_stats->smc[SMC_TYPE_D].type); \ this_cpu_inc(smc_stats->smc[SMC_TYPE_D].type); \
else \ else \
...@@ -215,11 +224,12 @@ do { \ ...@@ -215,11 +224,12 @@ do { \
} \ } \
while (0) while (0)
#define SMC_STAT_CLNT_SUCC_INC(_aclc) \ #define SMC_STAT_CLNT_SUCC_INC(net, _aclc) \
do { \ do { \
typeof(_aclc) acl = (_aclc); \ typeof(_aclc) acl = (_aclc); \
bool is_v2 = (acl->hdr.version == SMC_V2); \ bool is_v2 = (acl->hdr.version == SMC_V2); \
bool is_smcd = (acl->hdr.typev1 == SMC_TYPE_D); \ bool is_smcd = (acl->hdr.typev1 == SMC_TYPE_D); \
struct smc_stats __percpu *smc_stats = (net)->smc.smc_stats; \
if (is_v2 && is_smcd) \ if (is_v2 && is_smcd) \
this_cpu_inc(smc_stats->smc[SMC_TYPE_D].clnt_v2_succ_cnt); \ this_cpu_inc(smc_stats->smc[SMC_TYPE_D].clnt_v2_succ_cnt); \
else if (is_v2 && !is_smcd) \ else if (is_v2 && !is_smcd) \
...@@ -231,11 +241,12 @@ do { \ ...@@ -231,11 +241,12 @@ do { \
} \ } \
while (0) while (0)
#define SMC_STAT_SERV_SUCC_INC(_ini) \ #define SMC_STAT_SERV_SUCC_INC(net, _ini) \
do { \ do { \
typeof(_ini) i = (_ini); \ typeof(_ini) i = (_ini); \
bool is_v2 = (i->smcd_version & SMC_V2); \ bool is_v2 = (i->smcd_version & SMC_V2); \
bool is_smcd = (i->is_smcd); \ bool is_smcd = (i->is_smcd); \
typeof(net->smc.smc_stats) smc_stats = (net)->smc.smc_stats; \
if (is_v2 && is_smcd) \ if (is_v2 && is_smcd) \
this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v2_succ_cnt); \ this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v2_succ_cnt); \
else if (is_v2 && !is_smcd) \ else if (is_v2 && !is_smcd) \
...@@ -249,7 +260,7 @@ while (0) ...@@ -249,7 +260,7 @@ while (0)
int smc_nl_get_stats(struct sk_buff *skb, struct netlink_callback *cb); int smc_nl_get_stats(struct sk_buff *skb, struct netlink_callback *cb);
int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb); int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb);
int smc_stats_init(void) __init; int smc_stats_init(struct net *net);
void smc_stats_exit(void); void smc_stats_exit(struct net *net);
#endif /* NET_SMC_SMC_STATS_H_ */ #endif /* NET_SMC_SMC_STATS_H_ */
...@@ -47,7 +47,7 @@ static void smc_tx_write_space(struct sock *sk) ...@@ -47,7 +47,7 @@ static void smc_tx_write_space(struct sock *sk)
/* similar to sk_stream_write_space */ /* similar to sk_stream_write_space */
if (atomic_read(&smc->conn.sndbuf_space) && sock) { if (atomic_read(&smc->conn.sndbuf_space) && sock) {
if (test_bit(SOCK_NOSPACE, &sock->flags)) if (test_bit(SOCK_NOSPACE, &sock->flags))
SMC_STAT_RMB_TX_FULL(!smc->conn.lnk); SMC_STAT_RMB_TX_FULL(smc, !smc->conn.lnk);
clear_bit(SOCK_NOSPACE, &sock->flags); clear_bit(SOCK_NOSPACE, &sock->flags);
rcu_read_lock(); rcu_read_lock();
wq = rcu_dereference(sk->sk_wq); wq = rcu_dereference(sk->sk_wq);
...@@ -155,13 +155,13 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len) ...@@ -155,13 +155,13 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
} }
if (len > conn->sndbuf_desc->len) if (len > conn->sndbuf_desc->len)
SMC_STAT_RMB_TX_SIZE_SMALL(!conn->lnk); SMC_STAT_RMB_TX_SIZE_SMALL(smc, !conn->lnk);
if (len > conn->peer_rmbe_size) if (len > conn->peer_rmbe_size)
SMC_STAT_RMB_TX_PEER_SIZE_SMALL(!conn->lnk); SMC_STAT_RMB_TX_PEER_SIZE_SMALL(smc, !conn->lnk);
if (msg->msg_flags & MSG_OOB) if (msg->msg_flags & MSG_OOB)
SMC_STAT_INC(!conn->lnk, urg_data_cnt); SMC_STAT_INC(smc, urg_data_cnt);
while (msg_data_left(msg)) { while (msg_data_left(msg)) {
if (sk->sk_state == SMC_INIT) if (sk->sk_state == SMC_INIT)
...@@ -432,7 +432,9 @@ static int smc_tx_rdma_writes(struct smc_connection *conn, ...@@ -432,7 +432,9 @@ static int smc_tx_rdma_writes(struct smc_connection *conn,
/* cf. snd_wnd */ /* cf. snd_wnd */
rmbespace = atomic_read(&conn->peer_rmbe_space); rmbespace = atomic_read(&conn->peer_rmbe_space);
if (rmbespace <= 0) { if (rmbespace <= 0) {
SMC_STAT_RMB_TX_PEER_FULL(!conn->lnk); struct smc_sock *smc = container_of(conn, struct smc_sock,
conn);
SMC_STAT_RMB_TX_PEER_FULL(smc, !conn->lnk);
return 0; return 0;
} }
smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn); smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment