Commit 41448c61 authored by Imran Khan's avatar Imran Khan Committed by Greg Kroah-Hartman

kernfs: Introduce interface to access global kernfs_open_file_mutex.

This allows to change underlying mutex locking, without needing to change
the users of the lock. For example next patch modifies this interface to
use hashed mutexes in place of a single global kernfs_open_file_mutex.
Acked-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarImran Khan <imran.f.khan@oracle.com>
Link: https://lore.kernel.org/r/20220615021059.862643-4-imran.f.khan@oracle.comSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent b8f35fa1
...@@ -49,6 +49,22 @@ struct kernfs_open_node { ...@@ -49,6 +49,22 @@ struct kernfs_open_node {
static LLIST_HEAD(kernfs_notify_list); static LLIST_HEAD(kernfs_notify_list);
static inline struct mutex *kernfs_open_file_mutex_ptr(struct kernfs_node *kn)
{
return &kernfs_open_file_mutex;
}
static inline struct mutex *kernfs_open_file_mutex_lock(struct kernfs_node *kn)
{
struct mutex *lock;
lock = kernfs_open_file_mutex_ptr(kn);
mutex_lock(lock);
return lock;
}
/** /**
* kernfs_deref_open_node - Get kernfs_open_node corresponding to @kn. * kernfs_deref_open_node - Get kernfs_open_node corresponding to @kn.
* *
...@@ -79,9 +95,9 @@ kernfs_deref_open_node(struct kernfs_open_file *of, struct kernfs_node *kn) ...@@ -79,9 +95,9 @@ kernfs_deref_open_node(struct kernfs_open_file *of, struct kernfs_node *kn)
* @kn: target kernfs_node. * @kn: target kernfs_node.
* *
* Fetch and return ->attr.open of @kn when caller holds the * Fetch and return ->attr.open of @kn when caller holds the
* kernfs_open_file_mutex. * kernfs_open_file_mutex_ptr(kn).
* *
* Update of ->attr.open happens under kernfs_open_file_mutex. So when * Update of ->attr.open happens under kernfs_open_file_mutex_ptr(kn). So when
* the caller guarantees that this mutex is being held, other updaters can't * the caller guarantees that this mutex is being held, other updaters can't
* change ->attr.open and this means that we can safely deref ->attr.open * change ->attr.open and this means that we can safely deref ->attr.open
* outside RCU read-side critical section. * outside RCU read-side critical section.
...@@ -92,7 +108,7 @@ static struct kernfs_open_node * ...@@ -92,7 +108,7 @@ static struct kernfs_open_node *
kernfs_deref_open_node_protected(struct kernfs_node *kn) kernfs_deref_open_node_protected(struct kernfs_node *kn)
{ {
return rcu_dereference_protected(kn->attr.open, return rcu_dereference_protected(kn->attr.open,
lockdep_is_held(&kernfs_open_file_mutex)); lockdep_is_held(kernfs_open_file_mutex_ptr(kn)));
} }
static struct kernfs_open_file *kernfs_of(struct file *file) static struct kernfs_open_file *kernfs_of(struct file *file)
...@@ -574,19 +590,20 @@ static int kernfs_get_open_node(struct kernfs_node *kn, ...@@ -574,19 +590,20 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
struct kernfs_open_file *of) struct kernfs_open_file *of)
{ {
struct kernfs_open_node *on, *new_on = NULL; struct kernfs_open_node *on, *new_on = NULL;
struct mutex *mutex = NULL;
mutex_lock(&kernfs_open_file_mutex); mutex = kernfs_open_file_mutex_lock(kn);
on = kernfs_deref_open_node_protected(kn); on = kernfs_deref_open_node_protected(kn);
if (on) { if (on) {
list_add_tail(&of->list, &on->files); list_add_tail(&of->list, &on->files);
mutex_unlock(&kernfs_open_file_mutex); mutex_unlock(mutex);
return 0; return 0;
} else { } else {
/* not there, initialize a new one */ /* not there, initialize a new one */
new_on = kmalloc(sizeof(*new_on), GFP_KERNEL); new_on = kmalloc(sizeof(*new_on), GFP_KERNEL);
if (!new_on) { if (!new_on) {
mutex_unlock(&kernfs_open_file_mutex); mutex_unlock(mutex);
return -ENOMEM; return -ENOMEM;
} }
atomic_set(&new_on->event, 1); atomic_set(&new_on->event, 1);
...@@ -595,7 +612,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn, ...@@ -595,7 +612,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
list_add_tail(&of->list, &new_on->files); list_add_tail(&of->list, &new_on->files);
rcu_assign_pointer(kn->attr.open, new_on); rcu_assign_pointer(kn->attr.open, new_on);
} }
mutex_unlock(&kernfs_open_file_mutex); mutex_unlock(mutex);
return 0; return 0;
} }
...@@ -617,12 +634,13 @@ static void kernfs_unlink_open_file(struct kernfs_node *kn, ...@@ -617,12 +634,13 @@ static void kernfs_unlink_open_file(struct kernfs_node *kn,
struct kernfs_open_file *of) struct kernfs_open_file *of)
{ {
struct kernfs_open_node *on; struct kernfs_open_node *on;
struct mutex *mutex = NULL;
mutex_lock(&kernfs_open_file_mutex); mutex = kernfs_open_file_mutex_lock(kn);
on = kernfs_deref_open_node_protected(kn); on = kernfs_deref_open_node_protected(kn);
if (!on) { if (!on) {
mutex_unlock(&kernfs_open_file_mutex); mutex_unlock(mutex);
return; return;
} }
...@@ -634,7 +652,7 @@ static void kernfs_unlink_open_file(struct kernfs_node *kn, ...@@ -634,7 +652,7 @@ static void kernfs_unlink_open_file(struct kernfs_node *kn,
kfree_rcu(on, rcu_head); kfree_rcu(on, rcu_head);
} }
mutex_unlock(&kernfs_open_file_mutex); mutex_unlock(mutex);
} }
static int kernfs_fop_open(struct inode *inode, struct file *file) static int kernfs_fop_open(struct inode *inode, struct file *file)
...@@ -772,11 +790,11 @@ static void kernfs_release_file(struct kernfs_node *kn, ...@@ -772,11 +790,11 @@ static void kernfs_release_file(struct kernfs_node *kn,
/* /*
* @of is guaranteed to have no other file operations in flight and * @of is guaranteed to have no other file operations in flight and
* we just want to synchronize release and drain paths. * we just want to synchronize release and drain paths.
* @kernfs_open_file_mutex is enough. @of->mutex can't be used * @kernfs_open_file_mutex_ptr(kn) is enough. @of->mutex can't be used
* here because drain path may be called from places which can * here because drain path may be called from places which can
* cause circular dependency. * cause circular dependency.
*/ */
lockdep_assert_held(&kernfs_open_file_mutex); lockdep_assert_held(kernfs_open_file_mutex_ptr(kn));
if (!of->released) { if (!of->released) {
/* /*
...@@ -793,11 +811,12 @@ static int kernfs_fop_release(struct inode *inode, struct file *filp) ...@@ -793,11 +811,12 @@ static int kernfs_fop_release(struct inode *inode, struct file *filp)
{ {
struct kernfs_node *kn = inode->i_private; struct kernfs_node *kn = inode->i_private;
struct kernfs_open_file *of = kernfs_of(filp); struct kernfs_open_file *of = kernfs_of(filp);
struct mutex *mutex = NULL;
if (kn->flags & KERNFS_HAS_RELEASE) { if (kn->flags & KERNFS_HAS_RELEASE) {
mutex_lock(&kernfs_open_file_mutex); mutex = kernfs_open_file_mutex_lock(kn);
kernfs_release_file(kn, of); kernfs_release_file(kn, of);
mutex_unlock(&kernfs_open_file_mutex); mutex_unlock(mutex);
} }
kernfs_unlink_open_file(kn, of); kernfs_unlink_open_file(kn, of);
...@@ -812,6 +831,7 @@ void kernfs_drain_open_files(struct kernfs_node *kn) ...@@ -812,6 +831,7 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
{ {
struct kernfs_open_node *on; struct kernfs_open_node *on;
struct kernfs_open_file *of; struct kernfs_open_file *of;
struct mutex *mutex = NULL;
if (!(kn->flags & (KERNFS_HAS_MMAP | KERNFS_HAS_RELEASE))) if (!(kn->flags & (KERNFS_HAS_MMAP | KERNFS_HAS_RELEASE)))
return; return;
...@@ -821,16 +841,16 @@ void kernfs_drain_open_files(struct kernfs_node *kn) ...@@ -821,16 +841,16 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
* ->attr.open at this point of time. This check allows early bail out * ->attr.open at this point of time. This check allows early bail out
* if ->attr.open is already NULL. kernfs_unlink_open_file makes * if ->attr.open is already NULL. kernfs_unlink_open_file makes
* ->attr.open NULL only while holding kernfs_open_file_mutex so below * ->attr.open NULL only while holding kernfs_open_file_mutex so below
* check under kernfs_open_file_mutex will ensure bailing out if * check under kernfs_open_file_mutex_ptr(kn) will ensure bailing out if
* ->attr.open became NULL while waiting for the mutex. * ->attr.open became NULL while waiting for the mutex.
*/ */
if (!rcu_access_pointer(kn->attr.open)) if (!rcu_access_pointer(kn->attr.open))
return; return;
mutex_lock(&kernfs_open_file_mutex); mutex = kernfs_open_file_mutex_lock(kn);
on = kernfs_deref_open_node_protected(kn); on = kernfs_deref_open_node_protected(kn);
if (!on) { if (!on) {
mutex_unlock(&kernfs_open_file_mutex); mutex_unlock(mutex);
return; return;
} }
...@@ -844,7 +864,7 @@ void kernfs_drain_open_files(struct kernfs_node *kn) ...@@ -844,7 +864,7 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
kernfs_release_file(kn, of); kernfs_release_file(kn, of);
} }
mutex_unlock(&kernfs_open_file_mutex); mutex_unlock(mutex);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment