Commit 2433480a authored by Jan Kara's avatar Jan Kara

xfs: Convert to use invalidate_lock

Use invalidate_lock instead of XFS internal i_mmap_lock. The intended
purpose of invalidate_lock is exactly the same. Note that the locking in
__xfs_filemap_fault() slightly changes as filemap_fault() already takes
invalidate_lock.
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
CC: <linux-xfs@vger.kernel.org>
CC: "Darrick J. Wong" <djwong@kernel.org>
Signed-off-by: default avatarJan Kara <jack@suse.cz>
parent e31cbde7
...@@ -1302,7 +1302,7 @@ xfs_file_llseek( ...@@ -1302,7 +1302,7 @@ xfs_file_llseek(
* *
* mmap_lock (MM) * mmap_lock (MM)
* sb_start_pagefault(vfs, freeze) * sb_start_pagefault(vfs, freeze)
* i_mmaplock (XFS - truncate serialisation) * invalidate_lock (vfs/XFS_MMAPLOCK - truncate serialisation)
* page_lock (MM) * page_lock (MM)
* i_lock (XFS - extent map serialisation) * i_lock (XFS - extent map serialisation)
*/ */
...@@ -1323,24 +1323,27 @@ __xfs_filemap_fault( ...@@ -1323,24 +1323,27 @@ __xfs_filemap_fault(
file_update_time(vmf->vma->vm_file); file_update_time(vmf->vma->vm_file);
} }
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (IS_DAX(inode)) { if (IS_DAX(inode)) {
pfn_t pfn; pfn_t pfn;
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL,
(write_fault && !vmf->cow_page) ? (write_fault && !vmf->cow_page) ?
&xfs_direct_write_iomap_ops : &xfs_direct_write_iomap_ops :
&xfs_read_iomap_ops); &xfs_read_iomap_ops);
if (ret & VM_FAULT_NEEDDSYNC) if (ret & VM_FAULT_NEEDDSYNC)
ret = dax_finish_sync_fault(vmf, pe_size, pfn); ret = dax_finish_sync_fault(vmf, pe_size, pfn);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
} else { } else {
if (write_fault) if (write_fault) {
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
ret = iomap_page_mkwrite(vmf, ret = iomap_page_mkwrite(vmf,
&xfs_buffered_write_iomap_ops); &xfs_buffered_write_iomap_ops);
else xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
} else {
ret = filemap_fault(vmf); ret = filemap_fault(vmf);
}
} }
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (write_fault) if (write_fault)
sb_end_pagefault(inode->i_sb); sb_end_pagefault(inode->i_sb);
......
...@@ -132,7 +132,7 @@ xfs_ilock_attr_map_shared( ...@@ -132,7 +132,7 @@ xfs_ilock_attr_map_shared(
/* /*
* In addition to i_rwsem in the VFS inode, the xfs inode contains 2 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
* multi-reader locks: i_mmap_lock and the i_lock. This routine allows * multi-reader locks: invalidate_lock and the i_lock. This routine allows
* various combinations of the locks to be obtained. * various combinations of the locks to be obtained.
* *
* The 3 locks should always be ordered so that the IO lock is obtained first, * The 3 locks should always be ordered so that the IO lock is obtained first,
...@@ -140,23 +140,23 @@ xfs_ilock_attr_map_shared( ...@@ -140,23 +140,23 @@ xfs_ilock_attr_map_shared(
* *
* Basic locking order: * Basic locking order:
* *
* i_rwsem -> i_mmap_lock -> page_lock -> i_ilock * i_rwsem -> invalidate_lock -> page_lock -> i_ilock
* *
* mmap_lock locking order: * mmap_lock locking order:
* *
* i_rwsem -> page lock -> mmap_lock * i_rwsem -> page lock -> mmap_lock
* mmap_lock -> i_mmap_lock -> page_lock * mmap_lock -> invalidate_lock -> page_lock
* *
* The difference in mmap_lock locking order mean that we cannot hold the * The difference in mmap_lock locking order mean that we cannot hold the
* i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can * invalidate_lock over syscall based read(2)/write(2) based IO. These IO paths
* fault in pages during copy in/out (for buffered IO) or require the mmap_lock * can fault in pages during copy in/out (for buffered IO) or require the
* in get_user_pages() to map the user pages into the kernel address space for * mmap_lock in get_user_pages() to map the user pages into the kernel address
* direct IO. Similarly the i_rwsem cannot be taken inside a page fault because * space for direct IO. Similarly the i_rwsem cannot be taken inside a page
* page faults already hold the mmap_lock. * fault because page faults already hold the mmap_lock.
* *
* Hence to serialise fully against both syscall and mmap based IO, we need to * Hence to serialise fully against both syscall and mmap based IO, we need to
* take both the i_rwsem and the i_mmap_lock. These locks should *only* be both * take both the i_rwsem and the invalidate_lock. These locks should *only* be
* taken in places where we need to invalidate the page cache in a race * both taken in places where we need to invalidate the page cache in a race
* free manner (e.g. truncate, hole punch and other extent manipulation * free manner (e.g. truncate, hole punch and other extent manipulation
* functions). * functions).
*/ */
...@@ -188,10 +188,13 @@ xfs_ilock( ...@@ -188,10 +188,13 @@ xfs_ilock(
XFS_IOLOCK_DEP(lock_flags)); XFS_IOLOCK_DEP(lock_flags));
} }
if (lock_flags & XFS_MMAPLOCK_EXCL) if (lock_flags & XFS_MMAPLOCK_EXCL) {
mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags)); down_write_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
else if (lock_flags & XFS_MMAPLOCK_SHARED) XFS_MMAPLOCK_DEP(lock_flags));
mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags)); } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
down_read_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
XFS_MMAPLOCK_DEP(lock_flags));
}
if (lock_flags & XFS_ILOCK_EXCL) if (lock_flags & XFS_ILOCK_EXCL)
mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
...@@ -240,10 +243,10 @@ xfs_ilock_nowait( ...@@ -240,10 +243,10 @@ xfs_ilock_nowait(
} }
if (lock_flags & XFS_MMAPLOCK_EXCL) { if (lock_flags & XFS_MMAPLOCK_EXCL) {
if (!mrtryupdate(&ip->i_mmaplock)) if (!down_write_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
goto out_undo_iolock; goto out_undo_iolock;
} else if (lock_flags & XFS_MMAPLOCK_SHARED) { } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
if (!mrtryaccess(&ip->i_mmaplock)) if (!down_read_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
goto out_undo_iolock; goto out_undo_iolock;
} }
...@@ -258,9 +261,9 @@ xfs_ilock_nowait( ...@@ -258,9 +261,9 @@ xfs_ilock_nowait(
out_undo_mmaplock: out_undo_mmaplock:
if (lock_flags & XFS_MMAPLOCK_EXCL) if (lock_flags & XFS_MMAPLOCK_EXCL)
mrunlock_excl(&ip->i_mmaplock); up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
else if (lock_flags & XFS_MMAPLOCK_SHARED) else if (lock_flags & XFS_MMAPLOCK_SHARED)
mrunlock_shared(&ip->i_mmaplock); up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
out_undo_iolock: out_undo_iolock:
if (lock_flags & XFS_IOLOCK_EXCL) if (lock_flags & XFS_IOLOCK_EXCL)
up_write(&VFS_I(ip)->i_rwsem); up_write(&VFS_I(ip)->i_rwsem);
...@@ -307,9 +310,9 @@ xfs_iunlock( ...@@ -307,9 +310,9 @@ xfs_iunlock(
up_read(&VFS_I(ip)->i_rwsem); up_read(&VFS_I(ip)->i_rwsem);
if (lock_flags & XFS_MMAPLOCK_EXCL) if (lock_flags & XFS_MMAPLOCK_EXCL)
mrunlock_excl(&ip->i_mmaplock); up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
else if (lock_flags & XFS_MMAPLOCK_SHARED) else if (lock_flags & XFS_MMAPLOCK_SHARED)
mrunlock_shared(&ip->i_mmaplock); up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
if (lock_flags & XFS_ILOCK_EXCL) if (lock_flags & XFS_ILOCK_EXCL)
mrunlock_excl(&ip->i_lock); mrunlock_excl(&ip->i_lock);
...@@ -335,7 +338,7 @@ xfs_ilock_demote( ...@@ -335,7 +338,7 @@ xfs_ilock_demote(
if (lock_flags & XFS_ILOCK_EXCL) if (lock_flags & XFS_ILOCK_EXCL)
mrdemote(&ip->i_lock); mrdemote(&ip->i_lock);
if (lock_flags & XFS_MMAPLOCK_EXCL) if (lock_flags & XFS_MMAPLOCK_EXCL)
mrdemote(&ip->i_mmaplock); downgrade_write(&VFS_I(ip)->i_mapping->invalidate_lock);
if (lock_flags & XFS_IOLOCK_EXCL) if (lock_flags & XFS_IOLOCK_EXCL)
downgrade_write(&VFS_I(ip)->i_rwsem); downgrade_write(&VFS_I(ip)->i_rwsem);
...@@ -375,9 +378,8 @@ xfs_isilocked( ...@@ -375,9 +378,8 @@ xfs_isilocked(
} }
if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) { if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
if (!(lock_flags & XFS_MMAPLOCK_SHARED)) return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem,
return !!ip->i_mmaplock.mr_writer; (lock_flags & XFS_IOLOCK_SHARED));
return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
} }
if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) { if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) {
......
...@@ -40,7 +40,6 @@ typedef struct xfs_inode { ...@@ -40,7 +40,6 @@ typedef struct xfs_inode {
/* Transaction and locking information. */ /* Transaction and locking information. */
struct xfs_inode_log_item *i_itemp; /* logging information */ struct xfs_inode_log_item *i_itemp; /* logging information */
mrlock_t i_lock; /* inode lock */ mrlock_t i_lock; /* inode lock */
mrlock_t i_mmaplock; /* inode mmap IO lock */
atomic_t i_pincount; /* inode pin count */ atomic_t i_pincount; /* inode pin count */
/* /*
......
...@@ -709,8 +709,6 @@ xfs_fs_inode_init_once( ...@@ -709,8 +709,6 @@ xfs_fs_inode_init_once(
atomic_set(&ip->i_pincount, 0); atomic_set(&ip->i_pincount, 0);
spin_lock_init(&ip->i_flags_lock); spin_lock_init(&ip->i_flags_lock);
mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
"xfsino", ip->i_ino);
mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
"xfsino", ip->i_ino); "xfsino", ip->i_ino);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment