Commit 41448c61 authored by Imran Khan's avatar Imran Khan Committed by Greg Kroah-Hartman

kernfs: Introduce interface to access global kernfs_open_file_mutex.

This allows to change underlying mutex locking, without needing to change
the users of the lock. For example next patch modifies this interface to
use hashed mutexes in place of a single global kernfs_open_file_mutex.
Acked-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarImran Khan <imran.f.khan@oracle.com>
Link: https://lore.kernel.org/r/20220615021059.862643-4-imran.f.khan@oracle.comSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent b8f35fa1
......@@ -49,6 +49,22 @@ struct kernfs_open_node {
static LLIST_HEAD(kernfs_notify_list);
static inline struct mutex *kernfs_open_file_mutex_ptr(struct kernfs_node *kn)
{
return &kernfs_open_file_mutex;
}
static inline struct mutex *kernfs_open_file_mutex_lock(struct kernfs_node *kn)
{
struct mutex *lock;
lock = kernfs_open_file_mutex_ptr(kn);
mutex_lock(lock);
return lock;
}
/**
* kernfs_deref_open_node - Get kernfs_open_node corresponding to @kn.
*
......@@ -79,9 +95,9 @@ kernfs_deref_open_node(struct kernfs_open_file *of, struct kernfs_node *kn)
* @kn: target kernfs_node.
*
* Fetch and return ->attr.open of @kn when caller holds the
* kernfs_open_file_mutex.
* kernfs_open_file_mutex_ptr(kn).
*
* Update of ->attr.open happens under kernfs_open_file_mutex. So when
* Update of ->attr.open happens under kernfs_open_file_mutex_ptr(kn). So when
* the caller guarantees that this mutex is being held, other updaters can't
* change ->attr.open and this means that we can safely deref ->attr.open
* outside RCU read-side critical section.
......@@ -92,7 +108,7 @@ static struct kernfs_open_node *
kernfs_deref_open_node_protected(struct kernfs_node *kn)
{
return rcu_dereference_protected(kn->attr.open,
lockdep_is_held(&kernfs_open_file_mutex));
lockdep_is_held(kernfs_open_file_mutex_ptr(kn)));
}
static struct kernfs_open_file *kernfs_of(struct file *file)
......@@ -574,19 +590,20 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
struct kernfs_open_file *of)
{
struct kernfs_open_node *on, *new_on = NULL;
struct mutex *mutex = NULL;
mutex_lock(&kernfs_open_file_mutex);
mutex = kernfs_open_file_mutex_lock(kn);
on = kernfs_deref_open_node_protected(kn);
if (on) {
list_add_tail(&of->list, &on->files);
mutex_unlock(&kernfs_open_file_mutex);
mutex_unlock(mutex);
return 0;
} else {
/* not there, initialize a new one */
new_on = kmalloc(sizeof(*new_on), GFP_KERNEL);
if (!new_on) {
mutex_unlock(&kernfs_open_file_mutex);
mutex_unlock(mutex);
return -ENOMEM;
}
atomic_set(&new_on->event, 1);
......@@ -595,7 +612,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
list_add_tail(&of->list, &new_on->files);
rcu_assign_pointer(kn->attr.open, new_on);
}
mutex_unlock(&kernfs_open_file_mutex);
mutex_unlock(mutex);
return 0;
}
......@@ -617,12 +634,13 @@ static void kernfs_unlink_open_file(struct kernfs_node *kn,
struct kernfs_open_file *of)
{
struct kernfs_open_node *on;
struct mutex *mutex = NULL;
mutex_lock(&kernfs_open_file_mutex);
mutex = kernfs_open_file_mutex_lock(kn);
on = kernfs_deref_open_node_protected(kn);
if (!on) {
mutex_unlock(&kernfs_open_file_mutex);
mutex_unlock(mutex);
return;
}
......@@ -634,7 +652,7 @@ static void kernfs_unlink_open_file(struct kernfs_node *kn,
kfree_rcu(on, rcu_head);
}
mutex_unlock(&kernfs_open_file_mutex);
mutex_unlock(mutex);
}
static int kernfs_fop_open(struct inode *inode, struct file *file)
......@@ -772,11 +790,11 @@ static void kernfs_release_file(struct kernfs_node *kn,
/*
* @of is guaranteed to have no other file operations in flight and
* we just want to synchronize release and drain paths.
* @kernfs_open_file_mutex is enough. @of->mutex can't be used
* @kernfs_open_file_mutex_ptr(kn) is enough. @of->mutex can't be used
* here because drain path may be called from places which can
* cause circular dependency.
*/
lockdep_assert_held(&kernfs_open_file_mutex);
lockdep_assert_held(kernfs_open_file_mutex_ptr(kn));
if (!of->released) {
/*
......@@ -793,11 +811,12 @@ static int kernfs_fop_release(struct inode *inode, struct file *filp)
{
struct kernfs_node *kn = inode->i_private;
struct kernfs_open_file *of = kernfs_of(filp);
struct mutex *mutex = NULL;
if (kn->flags & KERNFS_HAS_RELEASE) {
mutex_lock(&kernfs_open_file_mutex);
mutex = kernfs_open_file_mutex_lock(kn);
kernfs_release_file(kn, of);
mutex_unlock(&kernfs_open_file_mutex);
mutex_unlock(mutex);
}
kernfs_unlink_open_file(kn, of);
......@@ -812,6 +831,7 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
{
struct kernfs_open_node *on;
struct kernfs_open_file *of;
struct mutex *mutex = NULL;
if (!(kn->flags & (KERNFS_HAS_MMAP | KERNFS_HAS_RELEASE)))
return;
......@@ -821,16 +841,16 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
* ->attr.open at this point of time. This check allows early bail out
* if ->attr.open is already NULL. kernfs_unlink_open_file makes
* ->attr.open NULL only while holding kernfs_open_file_mutex so below
* check under kernfs_open_file_mutex will ensure bailing out if
* check under kernfs_open_file_mutex_ptr(kn) will ensure bailing out if
* ->attr.open became NULL while waiting for the mutex.
*/
if (!rcu_access_pointer(kn->attr.open))
return;
mutex_lock(&kernfs_open_file_mutex);
mutex = kernfs_open_file_mutex_lock(kn);
on = kernfs_deref_open_node_protected(kn);
if (!on) {
mutex_unlock(&kernfs_open_file_mutex);
mutex_unlock(mutex);
return;
}
......@@ -844,7 +864,7 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
kernfs_release_file(kn, of);
}
mutex_unlock(&kernfs_open_file_mutex);
mutex_unlock(mutex);
}
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment