Commit 2dc09ea8 authored by Liang Zhen's avatar Liang Zhen Committed by Greg Kroah-Hartman

staging: lustre: libcfs: add lock-class for cfs_percpt_lock

initialise lock-class for each sublock of cfs_percpt_lock
to eliminate false alarm ""possible recursive locking detected"
Signed-off-by: default avatarLiang Zhen <liang.zhen@intel.com>
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-6432
Reviewed-on: http://review.whamcloud.com/14368Reviewed-by: default avatarJames Simmons <uja.ornl@yahoo.com>
Reviewed-by: default avatarAndreas Dilger <andreas.dilger@intel.com>
Reviewed-by: default avatarOleg Drokin <oleg.drokin@intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 762d266d
...@@ -256,8 +256,8 @@ struct cfs_percpt_lock { ...@@ -256,8 +256,8 @@ struct cfs_percpt_lock {
* create a cpu-partition lock based on CPU partition table \a cptab, * create a cpu-partition lock based on CPU partition table \a cptab,
* each private lock has extra \a psize bytes padding data * each private lock has extra \a psize bytes padding data
*/ */
struct cfs_percpt_lock *cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab); struct cfs_percpt_lock *cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
struct lock_class_key *keys);
/* destroy a cpu-partition lock */ /* destroy a cpu-partition lock */
void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl); void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl);
...@@ -267,6 +267,21 @@ void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index); ...@@ -267,6 +267,21 @@ void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index);
/* unlock private lock \a index of \a pcl */ /* unlock private lock \a index of \a pcl */
void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index); void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index);
#define CFS_PERCPT_LOCK_KEYS 256
/* NB: don't allocate keys dynamically, lockdep needs them to be in ".data" */
#define cfs_percpt_lock_alloc(cptab) \
({ \
static struct lock_class_key ___keys[CFS_PERCPT_LOCK_KEYS]; \
struct cfs_percpt_lock *___lk; \
\
if (cfs_cpt_number(cptab) > CFS_PERCPT_LOCK_KEYS) \
___lk = cfs_percpt_lock_create(cptab, NULL); \
else \
___lk = cfs_percpt_lock_create(cptab, ___keys); \
___lk; \
})
/** /**
* iterate over all CPU partitions in \a cptab * iterate over all CPU partitions in \a cptab
*/ */
......
...@@ -49,7 +49,8 @@ EXPORT_SYMBOL(cfs_percpt_lock_free); ...@@ -49,7 +49,8 @@ EXPORT_SYMBOL(cfs_percpt_lock_free);
* reason we always allocate cacheline-aligned memory block. * reason we always allocate cacheline-aligned memory block.
*/ */
struct cfs_percpt_lock * struct cfs_percpt_lock *
cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab) cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
struct lock_class_key *keys)
{ {
struct cfs_percpt_lock *pcl; struct cfs_percpt_lock *pcl;
spinlock_t *lock; spinlock_t *lock;
...@@ -67,12 +68,18 @@ cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab) ...@@ -67,12 +68,18 @@ cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
return NULL; return NULL;
} }
cfs_percpt_for_each(lock, i, pcl->pcl_locks) if (!keys)
CWARN("Cannot setup class key for percpt lock, you may see recursive locking warnings which are actually fake.\n");
cfs_percpt_for_each(lock, i, pcl->pcl_locks) {
spin_lock_init(lock); spin_lock_init(lock);
if (keys != NULL)
lockdep_set_class(lock, &keys[i]);
}
return pcl; return pcl;
} }
EXPORT_SYMBOL(cfs_percpt_lock_alloc); EXPORT_SYMBOL(cfs_percpt_lock_create);
/** /**
* lock a CPU partition * lock a CPU partition
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment