Commit a466ca4e authored by Andreas Dilger's avatar Andreas Dilger Committed by Greg Kroah-Hartman

staging: lustre: lprocfs: cleanup stats locking code

Add comment blocks on lprocfs_stats_lock() and lprocfs_stats_unlock().
Move common NOPERCPU code out of the switch() statements to reduce
code size and complexity, since it doesn't depend on the opc at all.

Replace switch() in lprocfs_stats_unlock() with a simple if/else,
since the lock opc was already checked in lprocfs_stats_lock().

Add an enum for the lprocfs_stats_lock() operations to make it clear
what the valid values are and allow compiler checking.
Signed-off-by: default avatarAndreas Dilger <andreas.dilger@intel.com>
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-5946
Reviewed-on: http://review.whamcloud.com/12872Reviewed-by: default avatarBobi Jam <bobijam@hotmail.com>
Reviewed-by: default avatarJohn L. Hammond <john.hammond@intel.com>
Reviewed-by: default avatarOleg Drokin <oleg.drokin@intel.com>
Signed-off-by: default avatarJames Simmons <jsimmons@infradead.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 6ffc4b3b
...@@ -165,8 +165,10 @@ struct lprocfs_percpu { ...@@ -165,8 +165,10 @@ struct lprocfs_percpu {
struct lprocfs_counter lp_cntr[0]; struct lprocfs_counter lp_cntr[0];
}; };
#define LPROCFS_GET_NUM_CPU 0x0001 enum lprocfs_stats_lock_ops {
#define LPROCFS_GET_SMP_ID 0x0002 LPROCFS_GET_NUM_CPU = 0x0001, /* number allocated per-CPU stats */
LPROCFS_GET_SMP_ID = 0x0002, /* current stat to be updated */
};
enum lprocfs_stats_flags { enum lprocfs_stats_flags {
LPROCFS_STATS_FLAG_NONE = 0x0000, /* per cpu counter */ LPROCFS_STATS_FLAG_NONE = 0x0000, /* per cpu counter */
...@@ -371,31 +373,53 @@ int lprocfs_write_frac_helper(const char __user *buffer, ...@@ -371,31 +373,53 @@ int lprocfs_write_frac_helper(const char __user *buffer,
int lprocfs_read_frac_helper(char *buffer, unsigned long count, int lprocfs_read_frac_helper(char *buffer, unsigned long count,
long val, int mult); long val, int mult);
int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid); int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid);
/*
* \return value /**
* < 0 : on error (only possible for opc as LPROCFS_GET_SMP_ID) * Lock statistics structure for access, possibly only on this CPU.
*
* The statistics struct may be allocated with per-CPU structures for
* efficient concurrent update (usually only on server-wide stats), or
* as a single global struct (e.g. for per-client or per-job statistics),
* so the required locking depends on the type of structure allocated.
*
* For per-CPU statistics, pin the thread to the current cpuid so that
* will only access the statistics for that CPU. If the stats structure
* for the current CPU has not been allocated (or previously freed),
* allocate it now. The per-CPU statistics do not need locking since
* the thread is pinned to the CPU during update.
*
* For global statistics, lock the stats structure to prevent concurrent update.
*
* \param[in] stats statistics structure to lock
* \param[in] opc type of operation:
* LPROCFS_GET_SMP_ID: "lock" and return current CPU index
* for incrementing statistics for that CPU
* LPROCFS_GET_NUM_CPU: "lock" and return number of used
* CPU indices to iterate over all indices
* \param[out] flags CPU interrupt saved state for IRQ-safe locking
*
* \retval cpuid of current thread or number of allocated structs
* \retval negative on error (only for opc LPROCFS_GET_SMP_ID + per-CPU stats)
*/ */
static inline int lprocfs_stats_lock(struct lprocfs_stats *stats, int opc, static inline int lprocfs_stats_lock(struct lprocfs_stats *stats,
enum lprocfs_stats_lock_ops opc,
unsigned long *flags) unsigned long *flags)
{ {
int rc = 0;
switch (opc) {
default:
LBUG();
case LPROCFS_GET_SMP_ID:
if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) { if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
spin_lock_irqsave(&stats->ls_lock, *flags); spin_lock_irqsave(&stats->ls_lock, *flags);
else else
spin_lock(&stats->ls_lock); spin_lock(&stats->ls_lock);
return 0; return opc == LPROCFS_GET_NUM_CPU ? 1 : 0;
} else { }
switch (opc) {
case LPROCFS_GET_SMP_ID: {
unsigned int cpuid = get_cpu(); unsigned int cpuid = get_cpu();
if (unlikely(!stats->ls_percpu[cpuid])) { if (unlikely(!stats->ls_percpu[cpuid])) {
rc = lprocfs_stats_alloc_one(stats, cpuid); int rc = lprocfs_stats_alloc_one(stats, cpuid);
if (rc < 0) { if (rc < 0) {
put_cpu(); put_cpu();
return rc; return rc;
...@@ -403,46 +427,38 @@ static inline int lprocfs_stats_lock(struct lprocfs_stats *stats, int opc, ...@@ -403,46 +427,38 @@ static inline int lprocfs_stats_lock(struct lprocfs_stats *stats, int opc,
} }
return cpuid; return cpuid;
} }
case LPROCFS_GET_NUM_CPU: case LPROCFS_GET_NUM_CPU:
if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
spin_lock_irqsave(&stats->ls_lock, *flags);
else
spin_lock(&stats->ls_lock);
return 1;
}
return stats->ls_biggest_alloc_num; return stats->ls_biggest_alloc_num;
default:
LBUG();
} }
} }
static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats, int opc, /**
* Unlock statistics structure after access.
*
* Unlock the lock acquired via lprocfs_stats_lock() for global statistics,
* or unpin this thread from the current cpuid for per-CPU statistics.
*
* This function must be called using the same arguments as used when calling
* lprocfs_stats_lock() so that the correct operation can be performed.
*
* \param[in] stats statistics structure to unlock
* \param[in] opc type of operation (current cpuid or number of structs)
* \param[in] flags CPU interrupt saved state for IRQ-safe locking
*/
static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats,
enum lprocfs_stats_lock_ops opc,
unsigned long *flags) unsigned long *flags)
{ {
switch (opc) {
default:
LBUG();
case LPROCFS_GET_SMP_ID:
if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) { if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
spin_unlock_irqrestore(&stats->ls_lock, *flags); spin_unlock_irqrestore(&stats->ls_lock, *flags);
else else
spin_unlock(&stats->ls_lock); spin_unlock(&stats->ls_lock);
} else { } else if (opc == LPROCFS_GET_SMP_ID) {
put_cpu(); put_cpu();
} }
return;
case LPROCFS_GET_NUM_CPU:
if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
spin_unlock_irqrestore(&stats->ls_lock, *flags);
else
spin_unlock(&stats->ls_lock);
}
return;
}
} }
static inline unsigned int static inline unsigned int
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment