Commit 20e62ee6 authored by Eric W. Biederman's avatar Eric W. Biederman Committed by Greg Kroah-Hartman

mnt: Honor MNT_LOCKED when detaching mounts

commit ce07d891 upstream.

Modify umount(MNT_DETACH) to keep mounts in the hash table that are
locked to their parent mounts, when the parent is lazily unmounted.

In mntput_no_expire detach the children from the hash table, depending
on mnt_pin_kill in cleanup_mnt to decrement the mnt_count of the children.

In __detach_mounts if there are any mounts that have been unmounted
but still are on the list of mounts of a mountpoint, remove their
children from the mount hash table and those children to the unmounted
list so they won't linger potentially indefinitely waiting for their
final mntput, now that the mounts serve no purpose.
Signed-off-by: default avatar"Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 2db70697
......@@ -1099,6 +1099,13 @@ static void mntput_no_expire(struct mount *mnt)
rcu_read_unlock();
list_del(&mnt->mnt_instance);
if (unlikely(!list_empty(&mnt->mnt_mounts))) {
struct mount *p, *tmp;
list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
umount_mnt(p);
}
}
unlock_mount_hash();
if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
......@@ -1372,6 +1379,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
propagate_umount(&tmp_list);
while (!list_empty(&tmp_list)) {
bool disconnect;
p = list_first_entry(&tmp_list, struct mount, mnt_list);
list_del_init(&p->mnt_expire);
list_del_init(&p->mnt_list);
......@@ -1380,10 +1388,18 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
if (how & UMOUNT_SYNC)
p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, &unmounted);
disconnect = !IS_MNT_LOCKED_AND_LAZY(p);
pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
disconnect ? &unmounted : NULL);
if (mnt_has_parent(p)) {
mnt_add_count(p->mnt_parent, -1);
umount_mnt(p);
if (!disconnect) {
/* Don't forget about p */
list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
} else {
umount_mnt(p);
}
}
change_mnt_propagation(p, MS_PRIVATE);
}
......@@ -1508,7 +1524,14 @@ void __detach_mounts(struct dentry *dentry)
lock_mount_hash();
while (!hlist_empty(&mp->m_list)) {
mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
umount_tree(mnt, 0);
if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
struct mount *p, *tmp;
list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
hlist_add_head(&p->mnt_umount.s_list, &unmounted);
umount_mnt(p);
}
}
else umount_tree(mnt, 0);
}
unlock_mount_hash();
put_mountpoint(mp);
......
......@@ -20,6 +20,8 @@
#define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
#define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
#define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED)
#define IS_MNT_LOCKED_AND_LAZY(m) \
(((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED)
#define CL_EXPIRE 0x01
#define CL_SLAVE 0x02
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment