Commit a10bf26b authored by Dmitry Kasatkin's avatar Dmitry Kasatkin Committed by Mimi Zohar

ima: replace iint spinblock with rwlock/read_lock

For performance, replace the iint spinlock with rwlock/read_lock.

Eric Paris questioned this change, from spinlocks to rwlocks, saying
"rwlocks have been shown to actually be slower on multi processor
systems in a number of cases due to the cache line bouncing required."

Based on performance measurements compiling the kernel on a cold
boot with multiple jobs with/without this patch, Dmitry Kasatkin
and I found that rwlocks performed better than spinlocks, but very
insignificantly.  For example with total compilation time around 6
minutes, with rwlocks time was 1 - 3 seconds shorter... but always
like that.

Changelog v2:
- new patch taken from the 'allocating iint improvements' patch
Signed-off-by: default avatarDmitry Kasatkin <dmitry.kasatkin@intel.com>
Signed-off-by: default avatarMimi Zohar <zohar@us.ibm.com>
parent bf2276d1
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#include "integrity.h" #include "integrity.h"
static struct rb_root integrity_iint_tree = RB_ROOT; static struct rb_root integrity_iint_tree = RB_ROOT;
static DEFINE_SPINLOCK(integrity_iint_lock); static DEFINE_RWLOCK(integrity_iint_lock);
static struct kmem_cache *iint_cache __read_mostly; static struct kmem_cache *iint_cache __read_mostly;
int iint_initialized; int iint_initialized;
...@@ -35,8 +35,6 @@ static struct integrity_iint_cache *__integrity_iint_find(struct inode *inode) ...@@ -35,8 +35,6 @@ static struct integrity_iint_cache *__integrity_iint_find(struct inode *inode)
struct integrity_iint_cache *iint; struct integrity_iint_cache *iint;
struct rb_node *n = integrity_iint_tree.rb_node; struct rb_node *n = integrity_iint_tree.rb_node;
assert_spin_locked(&integrity_iint_lock);
while (n) { while (n) {
iint = rb_entry(n, struct integrity_iint_cache, rb_node); iint = rb_entry(n, struct integrity_iint_cache, rb_node);
...@@ -63,9 +61,9 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode) ...@@ -63,9 +61,9 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode)
if (!IS_IMA(inode)) if (!IS_IMA(inode))
return NULL; return NULL;
spin_lock(&integrity_iint_lock); read_lock(&integrity_iint_lock);
iint = __integrity_iint_find(inode); iint = __integrity_iint_find(inode);
spin_unlock(&integrity_iint_lock); read_unlock(&integrity_iint_lock);
return iint; return iint;
} }
...@@ -100,7 +98,7 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode) ...@@ -100,7 +98,7 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
if (!iint) if (!iint)
return NULL; return NULL;
spin_lock(&integrity_iint_lock); write_lock(&integrity_iint_lock);
p = &integrity_iint_tree.rb_node; p = &integrity_iint_tree.rb_node;
while (*p) { while (*p) {
...@@ -119,7 +117,7 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode) ...@@ -119,7 +117,7 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
rb_link_node(node, parent, p); rb_link_node(node, parent, p);
rb_insert_color(node, &integrity_iint_tree); rb_insert_color(node, &integrity_iint_tree);
spin_unlock(&integrity_iint_lock); write_unlock(&integrity_iint_lock);
return iint; return iint;
} }
...@@ -136,10 +134,10 @@ void integrity_inode_free(struct inode *inode) ...@@ -136,10 +134,10 @@ void integrity_inode_free(struct inode *inode)
if (!IS_IMA(inode)) if (!IS_IMA(inode))
return; return;
spin_lock(&integrity_iint_lock); write_lock(&integrity_iint_lock);
iint = __integrity_iint_find(inode); iint = __integrity_iint_find(inode);
rb_erase(&iint->rb_node, &integrity_iint_tree); rb_erase(&iint->rb_node, &integrity_iint_tree);
spin_unlock(&integrity_iint_lock); write_unlock(&integrity_iint_lock);
iint_free(iint); iint_free(iint);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment