Commit bb6b2062 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'fs_for_v6.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs

Pull isofs, udf, quota, ext2, and reiserfs updates from Jan Kara:

 - convert isofs to the new mount API

 - cleanup isofs Makefile

 - udf conversion to folios

 - some other small udf cleanups and fixes

 - ext2 cleanups

 - removal of reiserfs .writepage method

 - update reiserfs README file

* tag 'fs_for_v6.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
  isofs: Use *-y instead of *-objs in Makefile
  ext2: Remove LEGACY_DIRECT_IO dependency
  isofs: Remove calls to set/clear the error flag
  ext2: Remove call to folio_set_error()
  udf: Use a folio in udf_write_end()
  udf: Convert udf_page_mkwrite() to use a folio
  udf: Convert udf_symlink_getattr() to use a folio
  udf: Convert udf_adinicb_readpage() to udf_adinicb_read_folio()
  udf: Convert udf_expand_file_adinicb() to use a folio
  udf: Convert udf_write_begin() to use a folio
  udf: Convert udf_symlink_filler() to use a folio
  reiserfs: Trim some README bits
  quota: fix to propagate error of mark_dquot_dirty() to caller
  reiserfs: Convert to writepages
  udf: udftime: prevent overflow in udf_disk_stamp_to_time()
  ext2: set FMODE_CAN_ODIRECT instead of a dummy direct_IO method
  udf: replace deprecated strncpy/strcpy with strscpy
  udf: Remove second semicolon
  isofs: convert isofs to use the new mount API
  fs: quota: use group allocation of per-cpu counters API
parents d0e71e23 1dd719a9
......@@ -3,7 +3,6 @@ config EXT2_FS
tristate "Second extended fs support (DEPRECATED)"
select BUFFER_HEAD
select FS_IOMAP
select LEGACY_DIRECT_IO
help
Ext2 is a standard Linux file system for hard disks.
......
......@@ -175,7 +175,6 @@ static bool ext2_check_folio(struct folio *folio, int quiet, char *kaddr)
(unsigned long) le32_to_cpu(p->inode));
}
fail:
folio_set_error(folio);
return false;
}
......
......@@ -302,6 +302,12 @@ static ssize_t ext2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
return generic_file_write_iter(iocb, from);
}
static int ext2_file_open(struct inode *inode, struct file *filp)
{
filp->f_mode |= FMODE_CAN_ODIRECT;
return dquot_file_open(inode, filp);
}
const struct file_operations ext2_file_operations = {
.llseek = generic_file_llseek,
.read_iter = ext2_file_read_iter,
......@@ -311,7 +317,7 @@ const struct file_operations ext2_file_operations = {
.compat_ioctl = ext2_compat_ioctl,
#endif
.mmap = ext2_file_mmap,
.open = dquot_file_open,
.open = ext2_file_open,
.release = ext2_release_file,
.fsync = ext2_fsync,
.get_unmapped_area = thp_get_unmapped_area,
......
......@@ -965,7 +965,6 @@ const struct address_space_operations ext2_aops = {
.write_begin = ext2_write_begin,
.write_end = ext2_write_end,
.bmap = ext2_bmap,
.direct_IO = noop_direct_IO,
.writepages = ext2_writepages,
.migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate,
......@@ -974,7 +973,6 @@ const struct address_space_operations ext2_aops = {
static const struct address_space_operations ext2_dax_aops = {
.writepages = ext2_dax_writepages,
.direct_IO = noop_direct_IO,
.dirty_folio = noop_dirty_folio,
};
......
......@@ -5,7 +5,6 @@
obj-$(CONFIG_ISO9660_FS) += isofs.o
isofs-objs-y := namei.o inode.o dir.o util.o rock.o export.o
isofs-objs-$(CONFIG_JOLIET) += joliet.o
isofs-objs-$(CONFIG_ZISOFS) += compress.o
isofs-objs := $(isofs-objs-y)
isofs-y := namei.o inode.o dir.o util.o rock.o export.o
isofs-$(CONFIG_JOLIET) += joliet.o
isofs-$(CONFIG_ZISOFS) += compress.o
......@@ -346,8 +346,6 @@ static int zisofs_read_folio(struct file *file, struct folio *folio)
for (i = 0; i < pcount; i++, index++) {
if (i != full_page)
pages[i] = grab_cache_page_nowait(mapping, index);
if (pages[i])
ClearPageError(pages[i]);
}
err = zisofs_fill_pages(inode, full_page, pcount, pages);
......@@ -356,8 +354,6 @@ static int zisofs_read_folio(struct file *file, struct folio *folio)
for (i = 0; i < pcount; i++) {
if (pages[i]) {
flush_dcache_page(pages[i]);
if (i == full_page && err)
SetPageError(pages[i]);
unlock_page(pages[i]);
if (i != full_page)
put_page(pages[i]);
......
This diff is collapsed.
......@@ -410,7 +410,7 @@ static inline int mark_all_dquot_dirty(struct dquot __rcu * const *dquots)
if (dquot)
/* Even in case of error we have to continue */
ret = mark_dquot_dirty(dquot);
if (!err)
if (!err && ret < 0)
err = ret;
}
return err;
......@@ -1737,7 +1737,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
if (reserve)
goto out_flush_warn;
mark_all_dquot_dirty(dquots);
ret = mark_all_dquot_dirty(dquots);
out_flush_warn:
srcu_read_unlock(&dquot_srcu, index);
flush_warnings(warn);
......@@ -1786,7 +1786,7 @@ int dquot_alloc_inode(struct inode *inode)
warn_put_all:
spin_unlock(&inode->i_lock);
if (ret == 0)
mark_all_dquot_dirty(dquots);
ret = mark_all_dquot_dirty(dquots);
srcu_read_unlock(&dquot_srcu, index);
flush_warnings(warn);
return ret;
......@@ -1990,7 +1990,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
qsize_t inode_usage = 1;
struct dquot __rcu **dquots;
struct dquot *transfer_from[MAXQUOTAS] = {};
int cnt, index, ret = 0;
int cnt, index, ret = 0, err;
char is_valid[MAXQUOTAS] = {};
struct dquot_warn warn_to[MAXQUOTAS];
struct dquot_warn warn_from_inodes[MAXQUOTAS];
......@@ -2087,8 +2087,12 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
* mark_all_dquot_dirty().
*/
index = srcu_read_lock(&dquot_srcu);
mark_all_dquot_dirty((struct dquot __rcu **)transfer_from);
mark_all_dquot_dirty((struct dquot __rcu **)transfer_to);
err = mark_all_dquot_dirty((struct dquot __rcu **)transfer_from);
if (err < 0)
ret = err;
err = mark_all_dquot_dirty((struct dquot __rcu **)transfer_to);
if (err < 0)
ret = err;
srcu_read_unlock(&dquot_srcu, index);
flush_warnings(warn_to);
......@@ -2098,7 +2102,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
if (is_valid[cnt])
transfer_to[cnt] = transfer_from[cnt];
return 0;
return ret;
over_quota:
/* Back out changes we already did */
for (cnt--; cnt >= 0; cnt--) {
......@@ -2726,6 +2730,7 @@ static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
struct mem_dqblk *dm = &dquot->dq_dqb;
int check_blim = 0, check_ilim = 0;
struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
int ret;
if (di->d_fieldmask & ~VFS_QC_MASK)
return -EINVAL;
......@@ -2807,8 +2812,9 @@ static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
else
set_bit(DQ_FAKE_B, &dquot->dq_flags);
spin_unlock(&dquot->dq_dqb_lock);
mark_dquot_dirty(dquot);
ret = mark_dquot_dirty(dquot);
if (ret < 0)
return ret;
return 0;
}
......@@ -3016,11 +3022,10 @@ static int __init dquot_init(void)
if (!dquot_hash)
panic("Cannot create dquot hash table");
for (i = 0; i < _DQST_DQSTAT_LAST; i++) {
ret = percpu_counter_init(&dqstats.counter[i], 0, GFP_KERNEL);
if (ret)
panic("Cannot create dquot stat counters");
}
ret = percpu_counter_init_many(dqstats.counter, 0, GFP_KERNEL,
_DQST_DQSTAT_LAST);
if (ret)
panic("Cannot create dquot stat counters");
/* Find power-of-two hlist_heads which can fit into allocation */
nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
......
......@@ -102,19 +102,9 @@ that start on a node aligned boundary (there are reasons to want to node
align files), and he invented and implemented indirect items and
unformatted nodes as the solution.
Konstantin Shvachko, with the help of the Russian version of a VC,
tried to put me in a position where I was forced into giving control
of the project to him. (Fortunately, as the person paying the money
for all salaries from my dayjob I owned all copyrights, and you can't
really force takeovers of sole proprietorships.) This was something
curious, because he never really understood the value of our project,
why we should do what we do, or why innovation was possible in
general, but he was sure that he ought to be controlling it. Every
innovation had to be forced past him while he was with us. He added
two years to the time required to complete reiserfs, and was a net
loss for me. Mikhail Gilula was a brilliant innovator who also left
in a destructive way that erased the value of his contributions, and
that he was shown much generosity just makes it more painful.
Konstantin Shvachko was taking part in the early days.
Mikhail Gilula was a brilliant innovator that has shown much generosity.
Grigory Zaigralin was an extremely effective system administrator for
our group.
......
......@@ -2503,8 +2503,8 @@ static int map_block_for_writepage(struct inode *inode,
* start/recovery path as __block_write_full_folio, along with special
* code to handle reiserfs tails.
*/
static int reiserfs_write_full_folio(struct folio *folio,
struct writeback_control *wbc)
static int reiserfs_write_folio(struct folio *folio,
struct writeback_control *wbc, void *data)
{
struct inode *inode = folio->mapping->host;
unsigned long end_index = inode->i_size >> PAGE_SHIFT;
......@@ -2721,12 +2721,11 @@ static int reiserfs_read_folio(struct file *f, struct folio *folio)
return block_read_full_folio(folio, reiserfs_get_block);
}
static int reiserfs_writepage(struct page *page, struct writeback_control *wbc)
static int reiserfs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct folio *folio = page_folio(page);
struct inode *inode = folio->mapping->host;
reiserfs_wait_on_write_block(inode->i_sb);
return reiserfs_write_full_folio(folio, wbc);
reiserfs_wait_on_write_block(mapping->host->i_sb);
return write_cache_pages(mapping, wbc, reiserfs_write_folio, NULL);
}
static void reiserfs_truncate_failed_write(struct inode *inode)
......@@ -3405,7 +3404,7 @@ int reiserfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
}
const struct address_space_operations reiserfs_address_space_operations = {
.writepage = reiserfs_writepage,
.writepages = reiserfs_writepages,
.read_folio = reiserfs_read_folio,
.readahead = reiserfs_readahead,
.release_folio = reiserfs_release_folio,
......@@ -3415,4 +3414,5 @@ const struct address_space_operations reiserfs_address_space_operations = {
.bmap = reiserfs_aop_bmap,
.direct_IO = reiserfs_direct_IO,
.dirty_folio = reiserfs_dirty_folio,
.migrate_folio = buffer_migrate_folio,
};
......@@ -39,7 +39,7 @@ static vm_fault_t udf_page_mkwrite(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
struct inode *inode = file_inode(vma->vm_file);
struct address_space *mapping = inode->i_mapping;
struct page *page = vmf->page;
struct folio *folio = page_folio(vmf->page);
loff_t size;
unsigned int end;
vm_fault_t ret = VM_FAULT_LOCKED;
......@@ -48,31 +48,31 @@ static vm_fault_t udf_page_mkwrite(struct vm_fault *vmf)
sb_start_pagefault(inode->i_sb);
file_update_time(vma->vm_file);
filemap_invalidate_lock_shared(mapping);
lock_page(page);
folio_lock(folio);
size = i_size_read(inode);
if (page->mapping != inode->i_mapping || page_offset(page) >= size) {
unlock_page(page);
if (folio->mapping != inode->i_mapping || folio_pos(folio) >= size) {
folio_unlock(folio);
ret = VM_FAULT_NOPAGE;
goto out_unlock;
}
/* Space is already allocated for in-ICB file */
if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
goto out_dirty;
if (page->index == size >> PAGE_SHIFT)
if (folio->index == size >> PAGE_SHIFT)
end = size & ~PAGE_MASK;
else
end = PAGE_SIZE;
err = __block_write_begin(page, 0, end, udf_get_block);
err = __block_write_begin(&folio->page, 0, end, udf_get_block);
if (err) {
unlock_page(page);
folio_unlock(folio);
ret = vmf_fs_error(err);
goto out_unlock;
}
block_commit_write(page, 0, end);
block_commit_write(&folio->page, 0, end);
out_dirty:
set_page_dirty(page);
wait_for_stable_page(page);
folio_mark_dirty(folio);
folio_wait_stable(folio);
out_unlock:
filemap_invalidate_unlock_shared(mapping);
sb_end_pagefault(inode->i_sb);
......
......@@ -208,19 +208,14 @@ static int udf_writepages(struct address_space *mapping,
return write_cache_pages(mapping, wbc, udf_adinicb_writepage, NULL);
}
static void udf_adinicb_readpage(struct page *page)
static void udf_adinicb_read_folio(struct folio *folio)
{
struct inode *inode = page->mapping->host;
char *kaddr;
struct inode *inode = folio->mapping->host;
struct udf_inode_info *iinfo = UDF_I(inode);
loff_t isize = i_size_read(inode);
kaddr = kmap_local_page(page);
memcpy(kaddr, iinfo->i_data + iinfo->i_lenEAttr, isize);
memset(kaddr + isize, 0, PAGE_SIZE - isize);
flush_dcache_page(page);
SetPageUptodate(page);
kunmap_local(kaddr);
folio_fill_tail(folio, 0, iinfo->i_data + iinfo->i_lenEAttr, isize);
folio_mark_uptodate(folio);
}
static int udf_read_folio(struct file *file, struct folio *folio)
......@@ -228,7 +223,7 @@ static int udf_read_folio(struct file *file, struct folio *folio)
struct udf_inode_info *iinfo = UDF_I(file_inode(file));
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
udf_adinicb_readpage(&folio->page);
udf_adinicb_read_folio(folio);
folio_unlock(folio);
return 0;
}
......@@ -254,7 +249,7 @@ static int udf_write_begin(struct file *file, struct address_space *mapping,
struct page **pagep, void **fsdata)
{
struct udf_inode_info *iinfo = UDF_I(file_inode(file));
struct page *page;
struct folio *folio;
int ret;
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
......@@ -266,12 +261,13 @@ static int udf_write_begin(struct file *file, struct address_space *mapping,
}
if (WARN_ON_ONCE(pos >= PAGE_SIZE))
return -EIO;
page = grab_cache_page_write_begin(mapping, 0);
if (!page)
return -ENOMEM;
*pagep = page;
if (!PageUptodate(page))
udf_adinicb_readpage(page);
folio = __filemap_get_folio(mapping, 0, FGP_WRITEBEGIN,
mapping_gfp_mask(mapping));
if (IS_ERR(folio))
return PTR_ERR(folio);
*pagep = &folio->page;
if (!folio_test_uptodate(folio))
udf_adinicb_read_folio(folio);
return 0;
}
......@@ -280,17 +276,19 @@ static int udf_write_end(struct file *file, struct address_space *mapping,
struct page *page, void *fsdata)
{
struct inode *inode = file_inode(file);
struct folio *folio;
loff_t last_pos;
if (UDF_I(inode)->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB)
return generic_write_end(file, mapping, pos, len, copied, page,
fsdata);
folio = page_folio(page);
last_pos = pos + copied;
if (last_pos > inode->i_size)
i_size_write(inode, last_pos);
set_page_dirty(page);
unlock_page(page);
put_page(page);
folio_mark_dirty(folio);
folio_unlock(folio);
folio_put(folio);
return copied;
}
......@@ -341,7 +339,7 @@ const struct address_space_operations udf_aops = {
*/
int udf_expand_file_adinicb(struct inode *inode)
{
struct page *page;
struct folio *folio;
struct udf_inode_info *iinfo = UDF_I(inode);
int err;
......@@ -357,12 +355,13 @@ int udf_expand_file_adinicb(struct inode *inode)
return 0;
}
page = find_or_create_page(inode->i_mapping, 0, GFP_KERNEL);
if (!page)
return -ENOMEM;
folio = __filemap_get_folio(inode->i_mapping, 0,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_KERNEL);
if (IS_ERR(folio))
return PTR_ERR(folio);
if (!PageUptodate(page))
udf_adinicb_readpage(page);
if (!folio_test_uptodate(folio))
udf_adinicb_read_folio(folio);
down_write(&iinfo->i_data_sem);
memset(iinfo->i_data + iinfo->i_lenEAttr, 0x00,
iinfo->i_lenAlloc);
......@@ -371,22 +370,22 @@ int udf_expand_file_adinicb(struct inode *inode)
iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
else
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
set_page_dirty(page);
unlock_page(page);
folio_mark_dirty(folio);
folio_unlock(folio);
up_write(&iinfo->i_data_sem);
err = filemap_fdatawrite(inode->i_mapping);
if (err) {
/* Restore everything back so that we don't lose data... */
lock_page(page);
folio_lock(folio);
down_write(&iinfo->i_data_sem);
memcpy_to_page(page, 0, iinfo->i_data + iinfo->i_lenEAttr,
inode->i_size);
unlock_page(page);
memcpy_from_folio(iinfo->i_data + iinfo->i_lenEAttr,
folio, 0, inode->i_size);
folio_unlock(folio);
iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
iinfo->i_lenAlloc = inode->i_size;
up_write(&iinfo->i_data_sem);
}
put_page(page);
folio_put(folio);
mark_inode_dirty(inode);
return err;
......
......@@ -630,7 +630,7 @@ static int udf_parse_param(struct fs_context *fc, struct fs_parameter *param)
if (!uopt->nls_map) {
errorf(fc, "iocharset %s not found",
param->string);
return -EINVAL;;
return -EINVAL;
}
}
break;
......@@ -895,7 +895,7 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
int ret;
struct timestamp *ts;
outstr = kmalloc(128, GFP_KERNEL);
outstr = kzalloc(128, GFP_KERNEL);
if (!outstr)
return -ENOMEM;
......@@ -921,11 +921,11 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
ret = udf_dstrCS0toChar(sb, outstr, 31, pvoldesc->volIdent, 32);
if (ret < 0) {
strcpy(UDF_SB(sb)->s_volume_ident, "InvalidName");
strscpy_pad(UDF_SB(sb)->s_volume_ident, "InvalidName");
pr_warn("incorrect volume identification, setting to "
"'InvalidName'\n");
} else {
strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
strscpy_pad(UDF_SB(sb)->s_volume_ident, outstr);
}
udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
......
......@@ -99,18 +99,17 @@ static int udf_pc_to_char(struct super_block *sb, unsigned char *from,
static int udf_symlink_filler(struct file *file, struct folio *folio)
{
struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
struct inode *inode = folio->mapping->host;
struct buffer_head *bh = NULL;
unsigned char *symlink;
int err = 0;
unsigned char *p = page_address(page);
unsigned char *p = folio_address(folio);
struct udf_inode_info *iinfo = UDF_I(inode);
/* We don't support symlinks longer than one block */
if (inode->i_size > inode->i_sb->s_blocksize) {
err = -ENAMETOOLONG;
goto out_unlock;
goto out;
}
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
......@@ -120,24 +119,15 @@ static int udf_symlink_filler(struct file *file, struct folio *folio)
if (!bh) {
if (!err)
err = -EFSCORRUPTED;
goto out_err;
goto out;
}
symlink = bh->b_data;
}
err = udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p, PAGE_SIZE);
brelse(bh);
if (err)
goto out_err;
SetPageUptodate(page);
unlock_page(page);
return 0;
out_err:
SetPageError(page);
out_unlock:
unlock_page(page);
out:
folio_end_read(folio, err == 0);
return err;
}
......@@ -147,12 +137,12 @@ static int udf_symlink_getattr(struct mnt_idmap *idmap,
{
struct dentry *dentry = path->dentry;
struct inode *inode = d_backing_inode(dentry);
struct page *page;
struct folio *folio;
generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
page = read_mapping_page(inode->i_mapping, 0, NULL);
if (IS_ERR(page))
return PTR_ERR(page);
folio = read_mapping_folio(inode->i_mapping, 0, NULL);
if (IS_ERR(folio))
return PTR_ERR(folio);
/*
* UDF uses non-trivial encoding of symlinks so i_size does not match
* number of characters reported by readlink(2) which apparently some
......@@ -162,8 +152,8 @@ static int udf_symlink_getattr(struct mnt_idmap *idmap,
* let's report the length of string returned by readlink(2) for
* st_size.
*/
stat->size = strlen(page_address(page));
put_page(page);
stat->size = strlen(folio_address(folio));
folio_put(folio);
return 0;
}
......
......@@ -46,13 +46,18 @@ udf_disk_stamp_to_time(struct timespec64 *dest, struct timestamp src)
dest->tv_sec = mktime64(year, src.month, src.day, src.hour, src.minute,
src.second);
dest->tv_sec -= offset * 60;
dest->tv_nsec = 1000 * (src.centiseconds * 10000 +
src.hundredsOfMicroseconds * 100 + src.microseconds);
/*
* Sanitize nanosecond field since reportedly some filesystems are
* recorded with bogus sub-second values.
*/
dest->tv_nsec %= NSEC_PER_SEC;
if (src.centiseconds < 100 && src.hundredsOfMicroseconds < 100 &&
src.microseconds < 100) {
dest->tv_nsec = 1000 * (src.centiseconds * 10000 +
src.hundredsOfMicroseconds * 100 + src.microseconds);
} else {
dest->tv_nsec = 0;
}
}
void
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment