Commit ad6e0764 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'zonefs-5.19-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/zonefs

Pull zonefs fixes from Damien Le Moal:

 - Fix handling of the explicit-open mount option, and in particular the
   conditions under which this option can be ignored.

 - Fix a problem with zonefs iomap_begin method, causing a hang in
   iomap_readahead() when a readahead request reaches the end of a file.

* tag 'zonefs-5.19-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/zonefs:
  zonefs: fix zonefs_iomap_begin() for reads
  zonefs: Do not ignore explicit_open with active zone limit
  zonefs: fix handling of explicit_open option on mount
parents f7a1d00e c1c1204c
...@@ -110,15 +110,51 @@ static inline void zonefs_i_size_write(struct inode *inode, loff_t isize) ...@@ -110,15 +110,51 @@ static inline void zonefs_i_size_write(struct inode *inode, loff_t isize)
} }
} }
static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, static int zonefs_read_iomap_begin(struct inode *inode, loff_t offset,
unsigned int flags, struct iomap *iomap, loff_t length, unsigned int flags,
struct iomap *srcmap) struct iomap *iomap, struct iomap *srcmap)
{ {
struct zonefs_inode_info *zi = ZONEFS_I(inode); struct zonefs_inode_info *zi = ZONEFS_I(inode);
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
loff_t isize; loff_t isize;
/* All I/Os should always be within the file maximum size */ /*
* All blocks are always mapped below EOF. If reading past EOF,
* act as if there is a hole up to the file maximum size.
*/
mutex_lock(&zi->i_truncate_mutex);
iomap->bdev = inode->i_sb->s_bdev;
iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
isize = i_size_read(inode);
if (iomap->offset >= isize) {
iomap->type = IOMAP_HOLE;
iomap->addr = IOMAP_NULL_ADDR;
iomap->length = length;
} else {
iomap->type = IOMAP_MAPPED;
iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
iomap->length = isize - iomap->offset;
}
mutex_unlock(&zi->i_truncate_mutex);
trace_zonefs_iomap_begin(inode, iomap);
return 0;
}
static const struct iomap_ops zonefs_read_iomap_ops = {
.iomap_begin = zonefs_read_iomap_begin,
};
static int zonefs_write_iomap_begin(struct inode *inode, loff_t offset,
loff_t length, unsigned int flags,
struct iomap *iomap, struct iomap *srcmap)
{
struct zonefs_inode_info *zi = ZONEFS_I(inode);
struct super_block *sb = inode->i_sb;
loff_t isize;
/* All write I/Os should always be within the file maximum size */
if (WARN_ON_ONCE(offset + length > zi->i_max_size)) if (WARN_ON_ONCE(offset + length > zi->i_max_size))
return -EIO; return -EIO;
...@@ -128,7 +164,7 @@ static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, ...@@ -128,7 +164,7 @@ static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
* operation. * operation.
*/ */
if (WARN_ON_ONCE(zi->i_ztype == ZONEFS_ZTYPE_SEQ && if (WARN_ON_ONCE(zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
(flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT))) !(flags & IOMAP_DIRECT)))
return -EIO; return -EIO;
/* /*
...@@ -137,46 +173,43 @@ static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, ...@@ -137,46 +173,43 @@ static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
* write pointer) and unwriten beyond. * write pointer) and unwriten beyond.
*/ */
mutex_lock(&zi->i_truncate_mutex); mutex_lock(&zi->i_truncate_mutex);
iomap->bdev = inode->i_sb->s_bdev;
iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
isize = i_size_read(inode); isize = i_size_read(inode);
if (offset >= isize) if (iomap->offset >= isize) {
iomap->type = IOMAP_UNWRITTEN; iomap->type = IOMAP_UNWRITTEN;
else iomap->length = zi->i_max_size - iomap->offset;
} else {
iomap->type = IOMAP_MAPPED; iomap->type = IOMAP_MAPPED;
if (flags & IOMAP_WRITE) iomap->length = isize - iomap->offset;
length = zi->i_max_size - offset; }
else
length = min(length, isize - offset);
mutex_unlock(&zi->i_truncate_mutex); mutex_unlock(&zi->i_truncate_mutex);
iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
iomap->length = ALIGN(offset + length, sb->s_blocksize) - iomap->offset;
iomap->bdev = inode->i_sb->s_bdev;
iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
trace_zonefs_iomap_begin(inode, iomap); trace_zonefs_iomap_begin(inode, iomap);
return 0; return 0;
} }
static const struct iomap_ops zonefs_iomap_ops = { static const struct iomap_ops zonefs_write_iomap_ops = {
.iomap_begin = zonefs_iomap_begin, .iomap_begin = zonefs_write_iomap_begin,
}; };
static int zonefs_read_folio(struct file *unused, struct folio *folio) static int zonefs_read_folio(struct file *unused, struct folio *folio)
{ {
return iomap_read_folio(folio, &zonefs_iomap_ops); return iomap_read_folio(folio, &zonefs_read_iomap_ops);
} }
static void zonefs_readahead(struct readahead_control *rac) static void zonefs_readahead(struct readahead_control *rac)
{ {
iomap_readahead(rac, &zonefs_iomap_ops); iomap_readahead(rac, &zonefs_read_iomap_ops);
} }
/* /*
* Map blocks for page writeback. This is used only on conventional zone files, * Map blocks for page writeback. This is used only on conventional zone files,
* which implies that the page range can only be within the fixed inode size. * which implies that the page range can only be within the fixed inode size.
*/ */
static int zonefs_map_blocks(struct iomap_writepage_ctx *wpc, static int zonefs_write_map_blocks(struct iomap_writepage_ctx *wpc,
struct inode *inode, loff_t offset) struct inode *inode, loff_t offset)
{ {
struct zonefs_inode_info *zi = ZONEFS_I(inode); struct zonefs_inode_info *zi = ZONEFS_I(inode);
...@@ -191,12 +224,12 @@ static int zonefs_map_blocks(struct iomap_writepage_ctx *wpc, ...@@ -191,12 +224,12 @@ static int zonefs_map_blocks(struct iomap_writepage_ctx *wpc,
offset < wpc->iomap.offset + wpc->iomap.length) offset < wpc->iomap.offset + wpc->iomap.length)
return 0; return 0;
return zonefs_iomap_begin(inode, offset, zi->i_max_size - offset, return zonefs_write_iomap_begin(inode, offset, zi->i_max_size - offset,
IOMAP_WRITE, &wpc->iomap, NULL); IOMAP_WRITE, &wpc->iomap, NULL);
} }
static const struct iomap_writeback_ops zonefs_writeback_ops = { static const struct iomap_writeback_ops zonefs_writeback_ops = {
.map_blocks = zonefs_map_blocks, .map_blocks = zonefs_write_map_blocks,
}; };
static int zonefs_writepage(struct page *page, struct writeback_control *wbc) static int zonefs_writepage(struct page *page, struct writeback_control *wbc)
...@@ -226,7 +259,8 @@ static int zonefs_swap_activate(struct swap_info_struct *sis, ...@@ -226,7 +259,8 @@ static int zonefs_swap_activate(struct swap_info_struct *sis,
return -EINVAL; return -EINVAL;
} }
return iomap_swapfile_activate(sis, swap_file, span, &zonefs_iomap_ops); return iomap_swapfile_activate(sis, swap_file, span,
&zonefs_read_iomap_ops);
} }
static const struct address_space_operations zonefs_file_aops = { static const struct address_space_operations zonefs_file_aops = {
...@@ -647,7 +681,7 @@ static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf) ...@@ -647,7 +681,7 @@ static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
/* Serialize against truncates */ /* Serialize against truncates */
filemap_invalidate_lock_shared(inode->i_mapping); filemap_invalidate_lock_shared(inode->i_mapping);
ret = iomap_page_mkwrite(vmf, &zonefs_iomap_ops); ret = iomap_page_mkwrite(vmf, &zonefs_write_iomap_ops);
filemap_invalidate_unlock_shared(inode->i_mapping); filemap_invalidate_unlock_shared(inode->i_mapping);
sb_end_pagefault(inode->i_sb); sb_end_pagefault(inode->i_sb);
...@@ -899,7 +933,7 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from) ...@@ -899,7 +933,7 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
if (append) if (append)
ret = zonefs_file_dio_append(iocb, from); ret = zonefs_file_dio_append(iocb, from);
else else
ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops, ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops,
&zonefs_write_dio_ops, 0, NULL, 0); &zonefs_write_dio_ops, 0, NULL, 0);
if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && if (zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
(ret > 0 || ret == -EIOCBQUEUED)) { (ret > 0 || ret == -EIOCBQUEUED)) {
...@@ -948,7 +982,7 @@ static ssize_t zonefs_file_buffered_write(struct kiocb *iocb, ...@@ -948,7 +982,7 @@ static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
if (ret <= 0) if (ret <= 0)
goto inode_unlock; goto inode_unlock;
ret = iomap_file_buffered_write(iocb, from, &zonefs_iomap_ops); ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops);
if (ret > 0) if (ret > 0)
iocb->ki_pos += ret; iocb->ki_pos += ret;
else if (ret == -EIO) else if (ret == -EIO)
...@@ -1041,7 +1075,7 @@ static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) ...@@ -1041,7 +1075,7 @@ static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
goto inode_unlock; goto inode_unlock;
} }
file_accessed(iocb->ki_filp); file_accessed(iocb->ki_filp);
ret = iomap_dio_rw(iocb, to, &zonefs_iomap_ops, ret = iomap_dio_rw(iocb, to, &zonefs_read_iomap_ops,
&zonefs_read_dio_ops, 0, NULL, 0); &zonefs_read_dio_ops, 0, NULL, 0);
} else { } else {
ret = generic_file_read_iter(iocb, to); ret = generic_file_read_iter(iocb, to);
...@@ -1085,7 +1119,8 @@ static int zonefs_seq_file_write_open(struct inode *inode) ...@@ -1085,7 +1119,8 @@ static int zonefs_seq_file_write_open(struct inode *inode)
if (sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) { if (sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) {
if (wro > sbi->s_max_wro_seq_files) { if (sbi->s_max_wro_seq_files
&& wro > sbi->s_max_wro_seq_files) {
atomic_dec(&sbi->s_wro_seq_files); atomic_dec(&sbi->s_wro_seq_files);
ret = -EBUSY; ret = -EBUSY;
goto unlock; goto unlock;
...@@ -1760,12 +1795,6 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1760,12 +1795,6 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
atomic_set(&sbi->s_wro_seq_files, 0); atomic_set(&sbi->s_wro_seq_files, 0);
sbi->s_max_wro_seq_files = bdev_max_open_zones(sb->s_bdev); sbi->s_max_wro_seq_files = bdev_max_open_zones(sb->s_bdev);
if (!sbi->s_max_wro_seq_files &&
sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) {
zonefs_info(sb, "No open zones limit. Ignoring explicit_open mount option\n");
sbi->s_mount_opts &= ~ZONEFS_MNTOPT_EXPLICIT_OPEN;
}
atomic_set(&sbi->s_active_seq_files, 0); atomic_set(&sbi->s_active_seq_files, 0);
sbi->s_max_active_seq_files = bdev_max_active_zones(sb->s_bdev); sbi->s_max_active_seq_files = bdev_max_active_zones(sb->s_bdev);
...@@ -1790,6 +1819,14 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1790,6 +1819,14 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
zonefs_info(sb, "Mounting %u zones", zonefs_info(sb, "Mounting %u zones",
blkdev_nr_zones(sb->s_bdev->bd_disk)); blkdev_nr_zones(sb->s_bdev->bd_disk));
if (!sbi->s_max_wro_seq_files &&
!sbi->s_max_active_seq_files &&
sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) {
zonefs_info(sb,
"No open and active zone limits. Ignoring explicit_open mount option\n");
sbi->s_mount_opts &= ~ZONEFS_MNTOPT_EXPLICIT_OPEN;
}
/* Create root directory inode */ /* Create root directory inode */
ret = -ENOMEM; ret = -ENOMEM;
inode = new_inode(sb); inode = new_inode(sb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment