Commit 1c273e10 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'zonefs-5.12-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/zonefs

Pull zonefs fixes from Damien Le Moal:

 - fix inode write open reference count (Chao)

 - Fix wrong write offset for asynchronous O_APPEND writes (me)

 - Prevent use of sequential zone file as swap files (me)

* tag 'zonefs-5.12-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/zonefs:
  zonefs: fix to update .i_wr_refcnt correctly in zonefs_open_zone()
  zonefs: Fix O_APPEND async write handling
  zonefs: prevent use of seq files as swap file
parents d626c692 6980d29c
...@@ -165,6 +165,21 @@ static int zonefs_writepages(struct address_space *mapping, ...@@ -165,6 +165,21 @@ static int zonefs_writepages(struct address_space *mapping,
return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops); return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);
} }
static int zonefs_swap_activate(struct swap_info_struct *sis,
struct file *swap_file, sector_t *span)
{
struct inode *inode = file_inode(swap_file);
struct zonefs_inode_info *zi = ZONEFS_I(inode);
if (zi->i_ztype != ZONEFS_ZTYPE_CNV) {
zonefs_err(inode->i_sb,
"swap file: not a conventional zone file\n");
return -EINVAL;
}
return iomap_swapfile_activate(sis, swap_file, span, &zonefs_iomap_ops);
}
static const struct address_space_operations zonefs_file_aops = { static const struct address_space_operations zonefs_file_aops = {
.readpage = zonefs_readpage, .readpage = zonefs_readpage,
.readahead = zonefs_readahead, .readahead = zonefs_readahead,
...@@ -177,6 +192,7 @@ static const struct address_space_operations zonefs_file_aops = { ...@@ -177,6 +192,7 @@ static const struct address_space_operations zonefs_file_aops = {
.is_partially_uptodate = iomap_is_partially_uptodate, .is_partially_uptodate = iomap_is_partially_uptodate,
.error_remove_page = generic_error_remove_page, .error_remove_page = generic_error_remove_page,
.direct_IO = noop_direct_IO, .direct_IO = noop_direct_IO,
.swap_activate = zonefs_swap_activate,
}; };
static void zonefs_update_stats(struct inode *inode, loff_t new_isize) static void zonefs_update_stats(struct inode *inode, loff_t new_isize)
...@@ -727,6 +743,68 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from) ...@@ -727,6 +743,68 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
return ret; return ret;
} }
/*
* Do not exceed the LFS limits nor the file zone size. If pos is under the
* limit it becomes a short access. If it exceeds the limit, return -EFBIG.
*/
static loff_t zonefs_write_check_limits(struct file *file, loff_t pos,
loff_t count)
{
struct inode *inode = file_inode(file);
struct zonefs_inode_info *zi = ZONEFS_I(inode);
loff_t limit = rlimit(RLIMIT_FSIZE);
loff_t max_size = zi->i_max_size;
if (limit != RLIM_INFINITY) {
if (pos >= limit) {
send_sig(SIGXFSZ, current, 0);
return -EFBIG;
}
count = min(count, limit - pos);
}
if (!(file->f_flags & O_LARGEFILE))
max_size = min_t(loff_t, MAX_NON_LFS, max_size);
if (unlikely(pos >= max_size))
return -EFBIG;
return min(count, max_size - pos);
}
static ssize_t zonefs_write_checks(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct zonefs_inode_info *zi = ZONEFS_I(inode);
loff_t count;
if (IS_SWAPFILE(inode))
return -ETXTBSY;
if (!iov_iter_count(from))
return 0;
if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
return -EINVAL;
if (iocb->ki_flags & IOCB_APPEND) {
if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
return -EINVAL;
mutex_lock(&zi->i_truncate_mutex);
iocb->ki_pos = zi->i_wpoffset;
mutex_unlock(&zi->i_truncate_mutex);
}
count = zonefs_write_check_limits(file, iocb->ki_pos,
iov_iter_count(from));
if (count < 0)
return count;
iov_iter_truncate(from, count);
return iov_iter_count(from);
}
/* /*
* Handle direct writes. For sequential zone files, this is the only possible * Handle direct writes. For sequential zone files, this is the only possible
* write path. For these files, check that the user is issuing writes * write path. For these files, check that the user is issuing writes
...@@ -744,8 +822,7 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from) ...@@ -744,8 +822,7 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
bool sync = is_sync_kiocb(iocb); bool sync = is_sync_kiocb(iocb);
bool append = false; bool append = false;
size_t count; ssize_t ret, count;
ssize_t ret;
/* /*
* For async direct IOs to sequential zone files, refuse IOCB_NOWAIT * For async direct IOs to sequential zone files, refuse IOCB_NOWAIT
...@@ -763,12 +840,11 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from) ...@@ -763,12 +840,11 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
inode_lock(inode); inode_lock(inode);
} }
ret = generic_write_checks(iocb, from); count = zonefs_write_checks(iocb, from);
if (ret <= 0) if (count <= 0) {
ret = count;
goto inode_unlock; goto inode_unlock;
}
iov_iter_truncate(from, zi->i_max_size - iocb->ki_pos);
count = iov_iter_count(from);
if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) { if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
ret = -EINVAL; ret = -EINVAL;
...@@ -828,12 +904,10 @@ static ssize_t zonefs_file_buffered_write(struct kiocb *iocb, ...@@ -828,12 +904,10 @@ static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
inode_lock(inode); inode_lock(inode);
} }
ret = generic_write_checks(iocb, from); ret = zonefs_write_checks(iocb, from);
if (ret <= 0) if (ret <= 0)
goto inode_unlock; goto inode_unlock;
iov_iter_truncate(from, zi->i_max_size - iocb->ki_pos);
ret = iomap_file_buffered_write(iocb, from, &zonefs_iomap_ops); ret = iomap_file_buffered_write(iocb, from, &zonefs_iomap_ops);
if (ret > 0) if (ret > 0)
iocb->ki_pos += ret; iocb->ki_pos += ret;
...@@ -966,9 +1040,7 @@ static int zonefs_open_zone(struct inode *inode) ...@@ -966,9 +1040,7 @@ static int zonefs_open_zone(struct inode *inode)
mutex_lock(&zi->i_truncate_mutex); mutex_lock(&zi->i_truncate_mutex);
zi->i_wr_refcnt++; if (!zi->i_wr_refcnt) {
if (zi->i_wr_refcnt == 1) {
if (atomic_inc_return(&sbi->s_open_zones) > sbi->s_max_open_zones) { if (atomic_inc_return(&sbi->s_open_zones) > sbi->s_max_open_zones) {
atomic_dec(&sbi->s_open_zones); atomic_dec(&sbi->s_open_zones);
ret = -EBUSY; ret = -EBUSY;
...@@ -978,7 +1050,6 @@ static int zonefs_open_zone(struct inode *inode) ...@@ -978,7 +1050,6 @@ static int zonefs_open_zone(struct inode *inode)
if (i_size_read(inode) < zi->i_max_size) { if (i_size_read(inode) < zi->i_max_size) {
ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_OPEN); ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_OPEN);
if (ret) { if (ret) {
zi->i_wr_refcnt--;
atomic_dec(&sbi->s_open_zones); atomic_dec(&sbi->s_open_zones);
goto unlock; goto unlock;
} }
...@@ -986,6 +1057,8 @@ static int zonefs_open_zone(struct inode *inode) ...@@ -986,6 +1057,8 @@ static int zonefs_open_zone(struct inode *inode)
} }
} }
zi->i_wr_refcnt++;
unlock: unlock:
mutex_unlock(&zi->i_truncate_mutex); mutex_unlock(&zi->i_truncate_mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment