Commit 0a7b0ace authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'vfs-6.9-rc1.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs

Pull vfs fixes from Christian Brauner:
 "This contains a few small fixes for this merge window:

   - Undo the hiding of silly-rename files in afs. If they're hidden
     they can't be deleted by rm manually anymore causing regressions

   - Avoid caching the preferred address for an afs server to avoid
     accidently overriding an explicitly specified preferred server
     address

   - Fix bad stat() and rmdir() interaction in afs

   - Take a passive reference on the superblock when opening a block
     device so the holder is available to concurrent callers from the
     block layer

   - Clear private data pointer in fscache_begin_operation() to avoid it
     being falsely treated as valid"

* tag 'vfs-6.9-rc1.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs:
  fscache: Fix error handling in fscache_begin_operation()
  fs,block: get holder during claim
  afs: Fix occasional rmdir-then-VNOVNODE with generic/011
  afs: Don't cache preferred address
  afs: Revert "afs: Hide silly-rename files from userspace"
parents 4ae3dc83 449ac551
...@@ -583,6 +583,9 @@ static void bd_finish_claiming(struct block_device *bdev, void *holder, ...@@ -583,6 +583,9 @@ static void bd_finish_claiming(struct block_device *bdev, void *holder,
mutex_unlock(&bdev->bd_holder_lock); mutex_unlock(&bdev->bd_holder_lock);
bd_clear_claiming(whole, holder); bd_clear_claiming(whole, holder);
mutex_unlock(&bdev_lock); mutex_unlock(&bdev_lock);
if (hops && hops->get_holder)
hops->get_holder(holder);
} }
/** /**
...@@ -605,6 +608,7 @@ EXPORT_SYMBOL(bd_abort_claiming); ...@@ -605,6 +608,7 @@ EXPORT_SYMBOL(bd_abort_claiming);
static void bd_end_claim(struct block_device *bdev, void *holder) static void bd_end_claim(struct block_device *bdev, void *holder)
{ {
struct block_device *whole = bdev_whole(bdev); struct block_device *whole = bdev_whole(bdev);
const struct blk_holder_ops *hops = bdev->bd_holder_ops;
bool unblock = false; bool unblock = false;
/* /*
...@@ -627,6 +631,9 @@ static void bd_end_claim(struct block_device *bdev, void *holder) ...@@ -627,6 +631,9 @@ static void bd_end_claim(struct block_device *bdev, void *holder)
whole->bd_holder = NULL; whole->bd_holder = NULL;
mutex_unlock(&bdev_lock); mutex_unlock(&bdev_lock);
if (hops && hops->put_holder)
hops->put_holder(holder);
/* /*
* If this was the last claim, remove holder link and unblock evpoll if * If this was the last claim, remove holder link and unblock evpoll if
* it was a write holder. * it was a write holder.
......
...@@ -474,16 +474,6 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode, ...@@ -474,16 +474,6 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
continue; continue;
} }
/* Don't expose silly rename entries to userspace. */
if (nlen > 6 &&
dire->u.name[0] == '.' &&
ctx->actor != afs_lookup_filldir &&
ctx->actor != afs_lookup_one_filldir &&
memcmp(dire->u.name, ".__afs", 6) == 0) {
ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent);
continue;
}
/* found the next entry */ /* found the next entry */
if (!dir_emit(ctx, dire->u.name, nlen, if (!dir_emit(ctx, dire->u.name, nlen,
ntohl(dire->u.vnode), ntohl(dire->u.vnode),
......
...@@ -602,6 +602,8 @@ bool afs_select_fileserver(struct afs_operation *op) ...@@ -602,6 +602,8 @@ bool afs_select_fileserver(struct afs_operation *op)
goto wait_for_more_probe_results; goto wait_for_more_probe_results;
alist = op->estate->addresses; alist = op->estate->addresses;
best_prio = -1;
addr_index = 0;
for (i = 0; i < alist->nr_addrs; i++) { for (i = 0; i < alist->nr_addrs; i++) {
if (alist->addrs[i].prio > best_prio) { if (alist->addrs[i].prio > best_prio) {
addr_index = i; addr_index = i;
...@@ -609,9 +611,7 @@ bool afs_select_fileserver(struct afs_operation *op) ...@@ -609,9 +611,7 @@ bool afs_select_fileserver(struct afs_operation *op)
} }
} }
addr_index = READ_ONCE(alist->preferred); alist->preferred = addr_index;
if (!test_bit(addr_index, &set))
addr_index = __ffs(set);
op->addr_index = addr_index; op->addr_index = addr_index;
set_bit(addr_index, &op->addr_tried); set_bit(addr_index, &op->addr_tried);
...@@ -656,12 +656,6 @@ bool afs_select_fileserver(struct afs_operation *op) ...@@ -656,12 +656,6 @@ bool afs_select_fileserver(struct afs_operation *op)
next_server: next_server:
trace_afs_rotate(op, afs_rotate_trace_next_server, 0); trace_afs_rotate(op, afs_rotate_trace_next_server, 0);
_debug("next"); _debug("next");
ASSERT(op->estate);
alist = op->estate->addresses;
if (op->call_responded &&
op->addr_index != READ_ONCE(alist->preferred) &&
test_bit(alist->preferred, &op->addr_tried))
WRITE_ONCE(alist->preferred, op->addr_index);
op->estate = NULL; op->estate = NULL;
goto pick_server; goto pick_server;
...@@ -690,14 +684,7 @@ bool afs_select_fileserver(struct afs_operation *op) ...@@ -690,14 +684,7 @@ bool afs_select_fileserver(struct afs_operation *op)
failed: failed:
trace_afs_rotate(op, afs_rotate_trace_failed, 0); trace_afs_rotate(op, afs_rotate_trace_failed, 0);
op->flags |= AFS_OPERATION_STOP; op->flags |= AFS_OPERATION_STOP;
if (op->estate) { op->estate = NULL;
alist = op->estate->addresses;
if (op->call_responded &&
op->addr_index != READ_ONCE(alist->preferred) &&
test_bit(alist->preferred, &op->addr_tried))
WRITE_ONCE(alist->preferred, op->addr_index);
op->estate = NULL;
}
_leave(" = f [failed %d]", afs_op_error(op)); _leave(" = f [failed %d]", afs_op_error(op));
return false; return false;
} }
......
...@@ -122,6 +122,9 @@ bool afs_check_validity(const struct afs_vnode *vnode) ...@@ -122,6 +122,9 @@ bool afs_check_validity(const struct afs_vnode *vnode)
const struct afs_volume *volume = vnode->volume; const struct afs_volume *volume = vnode->volume;
time64_t deadline = ktime_get_real_seconds() + 10; time64_t deadline = ktime_get_real_seconds() + 10;
if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
return true;
if (atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break) || if (atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break) ||
atomic64_read(&vnode->cb_expires_at) <= deadline || atomic64_read(&vnode->cb_expires_at) <= deadline ||
volume->cb_expires_at <= deadline || volume->cb_expires_at <= deadline ||
...@@ -389,12 +392,17 @@ int afs_validate(struct afs_vnode *vnode, struct key *key) ...@@ -389,12 +392,17 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
key_serial(key)); key_serial(key));
if (afs_check_validity(vnode)) if (afs_check_validity(vnode))
return 0; return test_bit(AFS_VNODE_DELETED, &vnode->flags) ? -ESTALE : 0;
ret = down_write_killable(&vnode->validate_lock); ret = down_write_killable(&vnode->validate_lock);
if (ret < 0) if (ret < 0)
goto error; goto error;
if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
ret = -ESTALE;
goto error_unlock;
}
/* Validate a volume after the v_break has changed or the volume /* Validate a volume after the v_break has changed or the volume
* callback expired. We only want to do this once per volume per * callback expired. We only want to do this once per volume per
* v_break change. The actual work will be done when parsing the * v_break change. The actual work will be done when parsing the
...@@ -448,12 +456,6 @@ int afs_validate(struct afs_vnode *vnode, struct key *key) ...@@ -448,12 +456,6 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
vnode->cb_ro_snapshot = cb_ro_snapshot; vnode->cb_ro_snapshot = cb_ro_snapshot;
vnode->cb_scrub = cb_scrub; vnode->cb_scrub = cb_scrub;
if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
_debug("file already deleted");
ret = -ESTALE;
goto error_unlock;
}
/* if the vnode's data version number changed then its contents are /* if the vnode's data version number changed then its contents are
* different */ * different */
zap |= test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags); zap |= test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags);
......
...@@ -83,8 +83,10 @@ static int fscache_begin_operation(struct netfs_cache_resources *cres, ...@@ -83,8 +83,10 @@ static int fscache_begin_operation(struct netfs_cache_resources *cres,
cres->debug_id = cookie->debug_id; cres->debug_id = cookie->debug_id;
cres->inval_counter = cookie->inval_counter; cres->inval_counter = cookie->inval_counter;
if (!fscache_begin_cookie_access(cookie, why)) if (!fscache_begin_cookie_access(cookie, why)) {
cres->cache_priv = NULL;
return -ENOBUFS; return -ENOBUFS;
}
again: again:
spin_lock(&cookie->lock); spin_lock(&cookie->lock);
......
...@@ -1515,11 +1515,29 @@ static int fs_bdev_thaw(struct block_device *bdev) ...@@ -1515,11 +1515,29 @@ static int fs_bdev_thaw(struct block_device *bdev)
return error; return error;
} }
static void fs_bdev_super_get(void *data)
{
struct super_block *sb = data;
spin_lock(&sb_lock);
sb->s_count++;
spin_unlock(&sb_lock);
}
static void fs_bdev_super_put(void *data)
{
struct super_block *sb = data;
put_super(sb);
}
const struct blk_holder_ops fs_holder_ops = { const struct blk_holder_ops fs_holder_ops = {
.mark_dead = fs_bdev_mark_dead, .mark_dead = fs_bdev_mark_dead,
.sync = fs_bdev_sync, .sync = fs_bdev_sync,
.freeze = fs_bdev_freeze, .freeze = fs_bdev_freeze,
.thaw = fs_bdev_thaw, .thaw = fs_bdev_thaw,
.get_holder = fs_bdev_super_get,
.put_holder = fs_bdev_super_put,
}; };
EXPORT_SYMBOL_GPL(fs_holder_ops); EXPORT_SYMBOL_GPL(fs_holder_ops);
......
...@@ -1505,6 +1505,16 @@ struct blk_holder_ops { ...@@ -1505,6 +1505,16 @@ struct blk_holder_ops {
* Thaw the file system mounted on the block device. * Thaw the file system mounted on the block device.
*/ */
int (*thaw)(struct block_device *bdev); int (*thaw)(struct block_device *bdev);
/*
* If needed, get a reference to the holder.
*/
void (*get_holder)(void *holder);
/*
* Release the holder.
*/
void (*put_holder)(void *holder);
}; };
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment