Commit c4c6bc31 authored by Raghavendra K T's avatar Raghavendra K T Committed by David S. Miller

net: Introduce helper functions to get the per cpu data

Signed-off-by: default avatarRaghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 06fb4e70
...@@ -202,10 +202,20 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, ...@@ -202,10 +202,20 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd) #define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd) #define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
unsigned long snmp_fold_field(void __percpu *mib, int offt); unsigned long snmp_fold_field(void __percpu *mib, int offt);
#if BITS_PER_LONG==32 #if BITS_PER_LONG==32
u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
size_t syncp_offset);
u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off); u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
#else #else
static inline u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
size_t syncp_offset)
{
return snmp_get_cpu_field(mib, cpu, offct);
}
static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off) static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
{ {
return snmp_fold_field(mib, offt); return snmp_fold_field(mib, offt);
......
...@@ -1452,38 +1452,51 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family, ...@@ -1452,38 +1452,51 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
} }
EXPORT_SYMBOL_GPL(inet_ctl_sock_create); EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
{
return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
}
EXPORT_SYMBOL_GPL(snmp_get_cpu_field);
unsigned long snmp_fold_field(void __percpu *mib, int offt) unsigned long snmp_fold_field(void __percpu *mib, int offt)
{ {
unsigned long res = 0; unsigned long res = 0;
int i; int i;
for_each_possible_cpu(i) for_each_possible_cpu(i)
res += *(((unsigned long *) per_cpu_ptr(mib, i)) + offt); res += snmp_get_cpu_field(mib, i, offt);
return res; return res;
} }
EXPORT_SYMBOL_GPL(snmp_fold_field); EXPORT_SYMBOL_GPL(snmp_fold_field);
#if BITS_PER_LONG==32 #if BITS_PER_LONG==32
u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
size_t syncp_offset)
{
void *bhptr;
struct u64_stats_sync *syncp;
u64 v;
unsigned int start;
bhptr = per_cpu_ptr(mib, cpu);
syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
do {
start = u64_stats_fetch_begin_irq(syncp);
v = *(((u64 *)bhptr) + offt);
} while (u64_stats_fetch_retry_irq(syncp, start));
return v;
}
EXPORT_SYMBOL_GPL(snmp_get_cpu_field64);
u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset) u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
{ {
u64 res = 0; u64 res = 0;
int cpu; int cpu;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
void *bhptr; res += snmp_get_cpu_field(mib, cpu, offct, syncp_offset);
struct u64_stats_sync *syncp;
u64 v;
unsigned int start;
bhptr = per_cpu_ptr(mib, cpu);
syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
do {
start = u64_stats_fetch_begin_irq(syncp);
v = *(((u64 *) bhptr) + offt);
} while (u64_stats_fetch_retry_irq(syncp, start));
res += v;
} }
return res; return res;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment