Commit 18253e03 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'work.dcache2' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull dcache and mountpoint updates from Al Viro:
 "Saner handling of refcounts to mountpoints.

  Transfer the counting reference from struct mount ->mnt_mountpoint
  over to struct mountpoint ->m_dentry. That allows us to get rid of the
  convoluted games with ordering of mount shutdowns.

  The cost is in teaching shrink_dcache_{parent,for_umount} to cope with
  mixed-filesystem shrink lists, which we'll also need for the Slab
  Movable Objects patchset"

* 'work.dcache2' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  switch the remnants of releasing the mountpoint away from fs_pin
  get rid of detach_mnt()
  make struct mountpoint bear the dentry reference to mountpoint, not struct mount
  Teach shrink_dcache_parent() to cope with mixed-filesystem shrink lists
  fs/namespace.c: shift put_mountpoint() to callers of unhash_mnt()
  __detach_mounts(): lookup_mountpoint() can't return ERR_PTR() anymore
  nfs: dget_parent() never returns NULL
  ceph: don't open-code the check for dead lockref
parents abdfd52a 56cbb429
......@@ -1267,7 +1267,7 @@ __dentry_leases_walk(struct ceph_mds_client *mdsc,
if (!spin_trylock(&dentry->d_lock))
continue;
if (dentry->d_lockref.count < 0) {
if (__lockref_is_dead(&dentry->d_lockref)) {
list_del_init(&di->lease_list);
goto next;
}
......
......@@ -861,6 +861,32 @@ void dput(struct dentry *dentry)
}
EXPORT_SYMBOL(dput);
static void __dput_to_list(struct dentry *dentry, struct list_head *list)
__must_hold(&dentry->d_lock)
{
if (dentry->d_flags & DCACHE_SHRINK_LIST) {
/* let the owner of the list it's on deal with it */
--dentry->d_lockref.count;
} else {
if (dentry->d_flags & DCACHE_LRU_LIST)
d_lru_del(dentry);
if (!--dentry->d_lockref.count)
d_shrink_add(dentry, list);
}
}
void dput_to_list(struct dentry *dentry, struct list_head *list)
{
rcu_read_lock();
if (likely(fast_dput(dentry))) {
rcu_read_unlock();
return;
}
rcu_read_unlock();
if (!retain_dentry(dentry))
__dput_to_list(dentry, list);
spin_unlock(&dentry->d_lock);
}
/* This must be called with d_lock held */
static inline void __dget_dlock(struct dentry *dentry)
......@@ -1067,7 +1093,7 @@ static bool shrink_lock_dentry(struct dentry *dentry)
return false;
}
static void shrink_dentry_list(struct list_head *list)
void shrink_dentry_list(struct list_head *list)
{
while (!list_empty(list)) {
struct dentry *dentry, *parent;
......@@ -1089,18 +1115,9 @@ static void shrink_dentry_list(struct list_head *list)
rcu_read_unlock();
d_shrink_del(dentry);
parent = dentry->d_parent;
if (parent != dentry)
__dput_to_list(parent, list);
__dentry_kill(dentry);
if (parent == dentry)
continue;
/*
* We need to prune ancestors too. This is necessary to prevent
* quadratic behavior of shrink_dcache_parent(), but is also
* expected to be beneficial in reducing dentry cache
* fragmentation.
*/
dentry = parent;
while (dentry && !lockref_put_or_lock(&dentry->d_lockref))
dentry = dentry_kill(dentry);
}
}
......@@ -1445,8 +1462,11 @@ int d_set_mounted(struct dentry *dentry)
struct select_data {
struct dentry *start;
union {
long found;
struct dentry *victim;
};
struct list_head dispose;
int found;
};
static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
......@@ -1478,6 +1498,37 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
return ret;
}
static enum d_walk_ret select_collect2(void *_data, struct dentry *dentry)
{
struct select_data *data = _data;
enum d_walk_ret ret = D_WALK_CONTINUE;
if (data->start == dentry)
goto out;
if (dentry->d_flags & DCACHE_SHRINK_LIST) {
if (!dentry->d_lockref.count) {
rcu_read_lock();
data->victim = dentry;
return D_WALK_QUIT;
}
} else {
if (dentry->d_flags & DCACHE_LRU_LIST)
d_lru_del(dentry);
if (!dentry->d_lockref.count)
d_shrink_add(dentry, &data->dispose);
}
/*
* We can return to the caller if we have found some (this
* ensures forward progress). We'll be coming back to find
* the rest.
*/
if (!list_empty(&data->dispose))
ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
out:
return ret;
}
/**
* shrink_dcache_parent - prune dcache
* @parent: parent of entries to prune
......@@ -1487,12 +1538,9 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
void shrink_dcache_parent(struct dentry *parent)
{
for (;;) {
struct select_data data;
struct select_data data = {.start = parent};
INIT_LIST_HEAD(&data.dispose);
data.start = parent;
data.found = 0;
d_walk(parent, &data, select_collect);
if (!list_empty(&data.dispose)) {
......@@ -1503,6 +1551,24 @@ void shrink_dcache_parent(struct dentry *parent)
cond_resched();
if (!data.found)
break;
data.victim = NULL;
d_walk(parent, &data, select_collect2);
if (data.victim) {
struct dentry *parent;
spin_lock(&data.victim->d_lock);
if (!shrink_lock_dentry(data.victim)) {
spin_unlock(&data.victim->d_lock);
rcu_read_unlock();
} else {
rcu_read_unlock();
parent = data.victim->d_parent;
if (parent != data.victim)
__dput_to_list(parent, &data.dispose);
__dentry_kill(data.victim);
}
}
if (!list_empty(&data.dispose))
shrink_dentry_list(&data.dispose);
}
}
EXPORT_SYMBOL(shrink_dcache_parent);
......
......@@ -19,20 +19,14 @@ void pin_remove(struct fs_pin *pin)
spin_unlock_irq(&pin->wait.lock);
}
void pin_insert_group(struct fs_pin *pin, struct vfsmount *m, struct hlist_head *p)
void pin_insert(struct fs_pin *pin, struct vfsmount *m)
{
spin_lock(&pin_lock);
if (p)
hlist_add_head(&pin->s_list, p);
hlist_add_head(&pin->s_list, &m->mnt_sb->s_pins);
hlist_add_head(&pin->m_list, &real_mount(m)->mnt_pins);
spin_unlock(&pin_lock);
}
void pin_insert(struct fs_pin *pin, struct vfsmount *m)
{
pin_insert_group(pin, m, &m->mnt_sb->s_pins);
}
void pin_kill(struct fs_pin *p)
{
wait_queue_entry_t wait;
......
......@@ -157,6 +157,8 @@ extern long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc);
extern struct dentry *d_alloc_cursor(struct dentry *);
extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
extern char *simple_dname(struct dentry *, char *, int);
extern void dput_to_list(struct dentry *, struct list_head *);
extern void shrink_dentry_list(struct list_head *);
/*
* read_write.c
......
......@@ -58,7 +58,10 @@ struct mount {
struct mount *mnt_master; /* slave is on master->mnt_slave_list */
struct mnt_namespace *mnt_ns; /* containing namespace */
struct mountpoint *mnt_mp; /* where is it mounted */
union {
struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */
struct hlist_node mnt_umount;
};
struct list_head mnt_umounting; /* list entry for umount propagation */
#ifdef CONFIG_FSNOTIFY
struct fsnotify_mark_connector __rcu *mnt_fsnotify_marks;
......@@ -68,8 +71,7 @@ struct mount {
int mnt_group_id; /* peer group identifier */
int mnt_expiry_mark; /* true if marked for expiry */
struct hlist_head mnt_pins;
struct fs_pin mnt_umount;
struct dentry *mnt_ex_mountpoint;
struct hlist_head mnt_stuck_children;
} __randomize_layout;
#define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
......
This diff is collapsed.
......@@ -457,11 +457,9 @@ int nfs_statfs(struct dentry *dentry, struct kstatfs *buf)
struct dentry *pd_dentry;
pd_dentry = dget_parent(dentry);
if (pd_dentry != NULL) {
nfs_zap_caches(d_inode(pd_dentry));
dput(pd_dentry);
}
}
nfs_free_fattr(res.fattr);
if (error < 0)
goto out_err;
......
......@@ -20,6 +20,5 @@ static inline void init_fs_pin(struct fs_pin *p, void (*kill)(struct fs_pin *))
}
void pin_remove(struct fs_pin *);
void pin_insert_group(struct fs_pin *, struct vfsmount *, struct hlist_head *);
void pin_insert(struct fs_pin *, struct vfsmount *);
void pin_kill(struct fs_pin *);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment