Commit 4f2d34b6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'fuse-update-6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse

Pull fuse updates from Miklos Szeredi:

 - Add fs-verity support (Richard Fung)

 - Add multi-queue support to virtio-fs (Peter-Jan Gootzen)

 - Fix a bug in NOTIFY_RESEND handling (Hou Tao)

 - page -> folio cleanup (Matthew Wilcox)

* tag 'fuse-update-6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse:
  virtio-fs: add multi-queue support
  virtio-fs: limit number of request queues
  fuse: clear FR_SENT when re-adding requests into pending list
  fuse: set FR_PENDING atomically in fuse_resend()
  fuse: Add initial support for fs-verity
  fuse: Convert fuse_readpages_end() to use folio_end_read()
parents 681ce862 529395d2
...@@ -1813,7 +1813,8 @@ static void fuse_resend(struct fuse_conn *fc) ...@@ -1813,7 +1813,8 @@ static void fuse_resend(struct fuse_conn *fc)
spin_unlock(&fc->lock); spin_unlock(&fc->lock);
list_for_each_entry_safe(req, next, &to_queue, list) { list_for_each_entry_safe(req, next, &to_queue, list) {
__set_bit(FR_PENDING, &req->flags); set_bit(FR_PENDING, &req->flags);
clear_bit(FR_SENT, &req->flags);
/* mark the request as resend request */ /* mark the request as resend request */
req->in.h.unique |= FUSE_UNIQUE_RESEND; req->in.h.unique |= FUSE_UNIQUE_RESEND;
} }
......
...@@ -935,14 +935,10 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args, ...@@ -935,14 +935,10 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
} }
for (i = 0; i < ap->num_pages; i++) { for (i = 0; i < ap->num_pages; i++) {
struct page *page = ap->pages[i]; struct folio *folio = page_folio(ap->pages[i]);
if (!err) folio_end_read(folio, !err);
SetPageUptodate(page); folio_put(folio);
else
SetPageError(page);
unlock_page(page);
put_page(page);
} }
if (ia->ff) if (ia->ff)
fuse_file_put(ia->ff, false); fuse_file_put(ia->ff, false);
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/uio.h> #include <linux/uio.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/fileattr.h> #include <linux/fileattr.h>
#include <linux/fsverity.h>
static ssize_t fuse_send_ioctl(struct fuse_mount *fm, struct fuse_args *args, static ssize_t fuse_send_ioctl(struct fuse_mount *fm, struct fuse_args *args,
struct fuse_ioctl_out *outarg) struct fuse_ioctl_out *outarg)
...@@ -117,6 +118,53 @@ static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst, ...@@ -117,6 +118,53 @@ static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst,
return 0; return 0;
} }
/* For fs-verity, determine iov lengths from input */
static int fuse_setup_measure_verity(unsigned long arg, struct iovec *iov)
{
__u16 digest_size;
struct fsverity_digest __user *uarg = (void __user *)arg;
if (copy_from_user(&digest_size, &uarg->digest_size, sizeof(digest_size)))
return -EFAULT;
if (digest_size > SIZE_MAX - sizeof(struct fsverity_digest))
return -EINVAL;
iov->iov_len = sizeof(struct fsverity_digest) + digest_size;
return 0;
}
static int fuse_setup_enable_verity(unsigned long arg, struct iovec *iov,
unsigned int *in_iovs)
{
struct fsverity_enable_arg enable;
struct fsverity_enable_arg __user *uarg = (void __user *)arg;
const __u32 max_buffer_len = FUSE_MAX_MAX_PAGES * PAGE_SIZE;
if (copy_from_user(&enable, uarg, sizeof(enable)))
return -EFAULT;
if (enable.salt_size > max_buffer_len || enable.sig_size > max_buffer_len)
return -ENOMEM;
if (enable.salt_size > 0) {
iov++;
(*in_iovs)++;
iov->iov_base = u64_to_user_ptr(enable.salt_ptr);
iov->iov_len = enable.salt_size;
}
if (enable.sig_size > 0) {
iov++;
(*in_iovs)++;
iov->iov_base = u64_to_user_ptr(enable.sig_ptr);
iov->iov_len = enable.sig_size;
}
return 0;
}
/* /*
* For ioctls, there is no generic way to determine how much memory * For ioctls, there is no generic way to determine how much memory
...@@ -227,6 +275,18 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, ...@@ -227,6 +275,18 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
out_iov = iov; out_iov = iov;
out_iovs = 1; out_iovs = 1;
} }
err = 0;
switch (cmd) {
case FS_IOC_MEASURE_VERITY:
err = fuse_setup_measure_verity(arg, iov);
break;
case FS_IOC_ENABLE_VERITY:
err = fuse_setup_enable_verity(arg, iov, &in_iovs);
break;
}
if (err)
goto out;
} }
retry: retry:
......
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/dax.h> #include <linux/dax.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/group_cpus.h>
#include <linux/pfn_t.h> #include <linux/pfn_t.h>
#include <linux/memremap.h> #include <linux/memremap.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -67,6 +69,8 @@ struct virtio_fs { ...@@ -67,6 +69,8 @@ struct virtio_fs {
unsigned int num_request_queues; /* number of request queues */ unsigned int num_request_queues; /* number of request queues */
struct dax_device *dax_dev; struct dax_device *dax_dev;
unsigned int *mq_map; /* index = cpu id, value = request vq id */
/* DAX memory window where file contents are mapped */ /* DAX memory window where file contents are mapped */
void *window_kaddr; void *window_kaddr;
phys_addr_t window_phys_addr; phys_addr_t window_phys_addr;
...@@ -185,6 +189,7 @@ static void virtio_fs_ktype_release(struct kobject *kobj) ...@@ -185,6 +189,7 @@ static void virtio_fs_ktype_release(struct kobject *kobj)
{ {
struct virtio_fs *vfs = container_of(kobj, struct virtio_fs, kobj); struct virtio_fs *vfs = container_of(kobj, struct virtio_fs, kobj);
kfree(vfs->mq_map);
kfree(vfs->vqs); kfree(vfs->vqs);
kfree(vfs); kfree(vfs);
} }
...@@ -706,6 +711,44 @@ static void virtio_fs_requests_done_work(struct work_struct *work) ...@@ -706,6 +711,44 @@ static void virtio_fs_requests_done_work(struct work_struct *work)
} }
} }
static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *fs)
{
const struct cpumask *mask, *masks;
unsigned int q, cpu;
/* First attempt to map using existing transport layer affinities
* e.g. PCIe MSI-X
*/
if (!vdev->config->get_vq_affinity)
goto fallback;
for (q = 0; q < fs->num_request_queues; q++) {
mask = vdev->config->get_vq_affinity(vdev, VQ_REQUEST + q);
if (!mask)
goto fallback;
for_each_cpu(cpu, mask)
fs->mq_map[cpu] = q;
}
return;
fallback:
/* Attempt to map evenly in groups over the CPUs */
masks = group_cpus_evenly(fs->num_request_queues);
/* If even this fails we default to all CPUs use queue zero */
if (!masks) {
for_each_possible_cpu(cpu)
fs->mq_map[cpu] = 0;
return;
}
for (q = 0; q < fs->num_request_queues; q++) {
for_each_cpu(cpu, &masks[q])
fs->mq_map[cpu] = q;
}
kfree(masks);
}
/* Virtqueue interrupt handler */ /* Virtqueue interrupt handler */
static void virtio_fs_vq_done(struct virtqueue *vq) static void virtio_fs_vq_done(struct virtqueue *vq)
{ {
...@@ -742,6 +785,11 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev, ...@@ -742,6 +785,11 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
{ {
struct virtqueue **vqs; struct virtqueue **vqs;
vq_callback_t **callbacks; vq_callback_t **callbacks;
/* Specify pre_vectors to ensure that the queues before the
* request queues (e.g. hiprio) don't claim any of the CPUs in
* the multi-queue mapping and interrupt affinities
*/
struct irq_affinity desc = { .pre_vectors = VQ_REQUEST };
const char **names; const char **names;
unsigned int i; unsigned int i;
int ret = 0; int ret = 0;
...@@ -751,6 +799,9 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev, ...@@ -751,6 +799,9 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
if (fs->num_request_queues == 0) if (fs->num_request_queues == 0)
return -EINVAL; return -EINVAL;
/* Truncate nr of request queues to nr_cpu_id */
fs->num_request_queues = min_t(unsigned int, fs->num_request_queues,
nr_cpu_ids);
fs->nvqs = VQ_REQUEST + fs->num_request_queues; fs->nvqs = VQ_REQUEST + fs->num_request_queues;
fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL); fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL);
if (!fs->vqs) if (!fs->vqs)
...@@ -760,7 +811,9 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev, ...@@ -760,7 +811,9 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]), callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]),
GFP_KERNEL); GFP_KERNEL);
names = kmalloc_array(fs->nvqs, sizeof(names[VQ_HIPRIO]), GFP_KERNEL); names = kmalloc_array(fs->nvqs, sizeof(names[VQ_HIPRIO]), GFP_KERNEL);
if (!vqs || !callbacks || !names) { fs->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*fs->mq_map), GFP_KERNEL,
dev_to_node(&vdev->dev));
if (!vqs || !callbacks || !names || !fs->mq_map) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
...@@ -780,7 +833,7 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev, ...@@ -780,7 +833,7 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
names[i] = fs->vqs[i].name; names[i] = fs->vqs[i].name;
} }
ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL); ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, &desc);
if (ret < 0) if (ret < 0)
goto out; goto out;
...@@ -792,8 +845,10 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev, ...@@ -792,8 +845,10 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
kfree(names); kfree(names);
kfree(callbacks); kfree(callbacks);
kfree(vqs); kfree(vqs);
if (ret) if (ret) {
kfree(fs->vqs); kfree(fs->vqs);
kfree(fs->mq_map);
}
return ret; return ret;
} }
...@@ -939,7 +994,7 @@ static int virtio_fs_probe(struct virtio_device *vdev) ...@@ -939,7 +994,7 @@ static int virtio_fs_probe(struct virtio_device *vdev)
if (ret < 0) if (ret < 0)
goto out; goto out;
/* TODO vq affinity */ virtio_fs_map_queues(vdev, fs);
ret = virtio_fs_setup_dax(vdev, fs); ret = virtio_fs_setup_dax(vdev, fs);
if (ret < 0) if (ret < 0)
...@@ -1288,7 +1343,7 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, ...@@ -1288,7 +1343,7 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq) static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq)
__releases(fiq->lock) __releases(fiq->lock)
{ {
unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */ unsigned int queue_id;
struct virtio_fs *fs; struct virtio_fs *fs;
struct fuse_req *req; struct fuse_req *req;
struct virtio_fs_vq *fsvq; struct virtio_fs_vq *fsvq;
...@@ -1302,11 +1357,13 @@ __releases(fiq->lock) ...@@ -1302,11 +1357,13 @@ __releases(fiq->lock)
spin_unlock(&fiq->lock); spin_unlock(&fiq->lock);
fs = fiq->priv; fs = fiq->priv;
queue_id = VQ_REQUEST + fs->mq_map[raw_smp_processor_id()];
pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n", pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u queue_id %u\n",
__func__, req->in.h.opcode, req->in.h.unique, __func__, req->in.h.opcode, req->in.h.unique,
req->in.h.nodeid, req->in.h.len, req->in.h.nodeid, req->in.h.len,
fuse_len_args(req->args->out_numargs, req->args->out_args)); fuse_len_args(req->args->out_numargs, req->args->out_args),
queue_id);
fsvq = &fs->vqs[queue_id]; fsvq = &fs->vqs[queue_id];
ret = virtio_fs_enqueue_req(fsvq, req, false); ret = virtio_fs_enqueue_req(fsvq, req, false);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment