Commit dd892625 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'staging-5.4-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging

Pull IIO fixes and staging driver from Greg KH:
 "Here is a mix of a number of IIO driver fixes for 5.4-rc7, and a whole
  new staging driver.

  The IIO fixes resolve some reported issues, all are tiny.

  The staging driver addition is the vboxsf filesystem, which is the
  VirtualBox guest shared folder code. Hans has been trying to get
  filesystem reviewers to review the code for many months now, and
  Christoph finally said to just merge it in staging now as it is
  stand-alone and the filesystem people can review it easier over time
  that way.

  I know it's late for this big of an addition, but it is stand-alone.

  The code has been in linux-next for a while, long enough to pick up a
  few tiny fixes for it already so people are looking at it.

  All of these have been in linux-next with no reported issues"

* tag 'staging-5.4-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging:
  staging: Fix error return code in vboxsf_fill_super()
  staging: vboxsf: fix dereference of pointer dentry before it is null checked
  staging: vboxsf: Remove unused including <linux/version.h>
  staging: Add VirtualBox guest shared folder (vboxsf) support
  iio: adc: stm32-adc: fix stopping dma
  iio: imu: inv_mpu6050: fix no data on MPU6050
  iio: srf04: fix wrong limitation in distance measuring
  iio: imu: adis16480: make sure provided frequency is positive
parents 3de2a3e9 e39fcaef
......@@ -17339,6 +17339,12 @@ F: include/linux/vbox_utils.h
F: include/uapi/linux/vbox*.h
F: drivers/virt/vboxguest/
VIRTUAL BOX SHARED FOLDER VFS DRIVER:
M: Hans de Goede <hdegoede@redhat.com>
L: linux-fsdevel@vger.kernel.org
S: Maintained
F: drivers/staging/vboxsf/*
VIRTUAL SERIO DEVICE DRIVER
M: Stephen Chandler Paul <thatslyude@gmail.com>
S: Maintained
......
......@@ -1399,7 +1399,7 @@ static int stm32_adc_dma_start(struct iio_dev *indio_dev)
cookie = dmaengine_submit(desc);
ret = dma_submit_error(cookie);
if (ret) {
dmaengine_terminate_all(adc->dma_chan);
dmaengine_terminate_sync(adc->dma_chan);
return ret;
}
......@@ -1477,7 +1477,7 @@ static void __stm32_adc_buffer_predisable(struct iio_dev *indio_dev)
stm32_adc_conv_irq_disable(adc);
if (adc->dma_chan)
dmaengine_terminate_all(adc->dma_chan);
dmaengine_terminate_sync(adc->dma_chan);
if (stm32_adc_set_trig(indio_dev, NULL))
dev_err(&indio_dev->dev, "Can't clear trigger\n");
......
......@@ -317,8 +317,11 @@ static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
struct adis16480 *st = iio_priv(indio_dev);
unsigned int t, reg;
if (val < 0 || val2 < 0)
return -EINVAL;
t = val * 1000 + val2 / 1000;
if (t <= 0)
if (t == 0)
return -EINVAL;
/*
......
......@@ -114,54 +114,63 @@ static const struct inv_mpu6050_hw hw_info[] = {
.name = "MPU6050",
.reg = &reg_set_6050,
.config = &chip_config_6050,
.fifo_size = 1024,
},
{
.whoami = INV_MPU6500_WHOAMI_VALUE,
.name = "MPU6500",
.reg = &reg_set_6500,
.config = &chip_config_6050,
.fifo_size = 512,
},
{
.whoami = INV_MPU6515_WHOAMI_VALUE,
.name = "MPU6515",
.reg = &reg_set_6500,
.config = &chip_config_6050,
.fifo_size = 512,
},
{
.whoami = INV_MPU6000_WHOAMI_VALUE,
.name = "MPU6000",
.reg = &reg_set_6050,
.config = &chip_config_6050,
.fifo_size = 1024,
},
{
.whoami = INV_MPU9150_WHOAMI_VALUE,
.name = "MPU9150",
.reg = &reg_set_6050,
.config = &chip_config_6050,
.fifo_size = 1024,
},
{
.whoami = INV_MPU9250_WHOAMI_VALUE,
.name = "MPU9250",
.reg = &reg_set_6500,
.config = &chip_config_6050,
.fifo_size = 512,
},
{
.whoami = INV_MPU9255_WHOAMI_VALUE,
.name = "MPU9255",
.reg = &reg_set_6500,
.config = &chip_config_6050,
.fifo_size = 512,
},
{
.whoami = INV_ICM20608_WHOAMI_VALUE,
.name = "ICM20608",
.reg = &reg_set_6500,
.config = &chip_config_6050,
.fifo_size = 512,
},
{
.whoami = INV_ICM20602_WHOAMI_VALUE,
.name = "ICM20602",
.reg = &reg_set_icm20602,
.config = &chip_config_6050,
.fifo_size = 1008,
},
};
......
......@@ -100,12 +100,14 @@ struct inv_mpu6050_chip_config {
* @name: name of the chip.
* @reg: register map of the chip.
* @config: configuration of the chip.
* @fifo_size: size of the FIFO in bytes.
*/
struct inv_mpu6050_hw {
u8 whoami;
u8 *name;
const struct inv_mpu6050_reg_map *reg;
const struct inv_mpu6050_chip_config *config;
size_t fifo_size;
};
/*
......
......@@ -180,9 +180,6 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
"failed to ack interrupt\n");
goto flush_fifo;
}
/* handle fifo overflow by reseting fifo */
if (int_status & INV_MPU6050_BIT_FIFO_OVERFLOW_INT)
goto flush_fifo;
if (!(int_status & INV_MPU6050_BIT_RAW_DATA_RDY_INT)) {
dev_warn(regmap_get_device(st->map),
"spurious interrupt with status 0x%x\n", int_status);
......@@ -211,6 +208,18 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
if (result)
goto end_session;
fifo_count = get_unaligned_be16(&data[0]);
/*
* Handle fifo overflow by resetting fifo.
* Reset if there is only 3 data set free remaining to mitigate
* possible delay between reading fifo count and fifo data.
*/
nb = 3 * bytes_per_datum;
if (fifo_count >= st->hw->fifo_size - nb) {
dev_warn(regmap_get_device(st->map), "fifo overflow reset\n");
goto flush_fifo;
}
/* compute and process all complete datum */
nb = fifo_count / bytes_per_datum;
inv_mpu6050_update_period(st, pf->timestamp, nb);
......
......@@ -110,7 +110,7 @@ static int srf04_read(struct srf04_data *data)
udelay(data->cfg->trigger_pulse_us);
gpiod_set_value(data->gpiod_trig, 0);
/* it cannot take more than 20 ms */
/* it should not take more than 20 ms until echo is rising */
ret = wait_for_completion_killable_timeout(&data->rising, HZ/50);
if (ret < 0) {
mutex_unlock(&data->lock);
......@@ -120,7 +120,8 @@ static int srf04_read(struct srf04_data *data)
return -ETIMEDOUT;
}
ret = wait_for_completion_killable_timeout(&data->falling, HZ/50);
/* it cannot take more than 50 ms until echo is falling */
ret = wait_for_completion_killable_timeout(&data->falling, HZ/20);
if (ret < 0) {
mutex_unlock(&data->lock);
return ret;
......@@ -135,19 +136,19 @@ static int srf04_read(struct srf04_data *data)
dt_ns = ktime_to_ns(ktime_dt);
/*
* measuring more than 3 meters is beyond the capabilities of
* the sensor
* measuring more than 6,45 meters is beyond the capabilities of
* the supported sensors
* ==> filter out invalid results for not measuring echos of
* another us sensor
*
* formula:
* distance 3 m
* time = ---------- = --------- = 9404389 ns
* speed 319 m/s
* distance 6,45 * 2 m
* time = ---------- = ------------ = 40438871 ns
* speed 319 m/s
*
* using a minimum speed at -20 °C of 319 m/s
*/
if (dt_ns > 9404389)
if (dt_ns > 40438871)
return -EIO;
time_ns = dt_ns;
......@@ -159,20 +160,20 @@ static int srf04_read(struct srf04_data *data)
* with Temp in °C
* and speed in m/s
*
* use 343 m/s as ultrasonic speed at 20 °C here in absence of the
* use 343,5 m/s as ultrasonic speed at 20 °C here in absence of the
* temperature
*
* therefore:
* time 343
* distance = ------ * -----
* 10^6 2
* time 343,5 time * 106
* distance = ------ * ------- = ------------
* 10^6 2 617176
* with time in ns
* and distance in mm (one way)
*
* because we limit to 3 meters the multiplication with 343 just
* because we limit to 6,45 meters the multiplication with 106 just
* fits into 32 bit
*/
distance_mm = time_ns * 343 / 2000000;
distance_mm = time_ns * 106 / 617176;
return distance_mm;
}
......
......@@ -125,4 +125,6 @@ source "drivers/staging/exfat/Kconfig"
source "drivers/staging/qlge/Kconfig"
source "drivers/staging/vboxsf/Kconfig"
endif # STAGING
......@@ -53,3 +53,4 @@ obj-$(CONFIG_UWB) += uwb/
obj-$(CONFIG_USB_WUSB) += wusbcore/
obj-$(CONFIG_EXFAT_FS) += exfat/
obj-$(CONFIG_QLGE) += qlge/
obj-$(CONFIG_VBOXSF_FS) += vboxsf/
config VBOXSF_FS
tristate "VirtualBox guest shared folder (vboxsf) support"
depends on X86 && VBOXGUEST
select NLS
help
VirtualBox hosts can share folders with guests, this driver
implements the Linux-guest side of this allowing folders exported
by the host to be mounted under Linux.
If you want to use shared folders in VirtualBox guests, answer Y or M.
# SPDX-License-Identifier: MIT
obj-$(CONFIG_VBOXSF_FS) += vboxsf.o
vboxsf-y := dir.o file.o utils.o vboxsf_wrappers.o super.o
TODO:
- Find a file-system developer to review this and give their Reviewed-By
- Address any items coming up during review
- Move to fs/vboxfs
Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>
and Hans de Goede <hdegoede@redhat.com>
This diff is collapsed.
// SPDX-License-Identifier: MIT
/*
* VirtualBox Guest Shared Folders support: Regular file inode and file ops.
*
* Copyright (C) 2006-2018 Oracle Corporation
*/
#include <linux/mm.h>
#include <linux/page-flags.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/sizes.h>
#include "vfsmod.h"
struct vboxsf_handle {
u64 handle;
u32 root;
u32 access_flags;
struct kref refcount;
struct list_head head;
};
static int vboxsf_file_open(struct inode *inode, struct file *file)
{
struct vboxsf_inode *sf_i = VBOXSF_I(inode);
struct shfl_createparms params = {};
struct vboxsf_handle *sf_handle;
u32 access_flags = 0;
int err;
sf_handle = kmalloc(sizeof(*sf_handle), GFP_KERNEL);
if (!sf_handle)
return -ENOMEM;
/*
* We check the value of params.handle afterwards to find out if
* the call succeeded or failed, as the API does not seem to cleanly
* distinguish error and informational messages.
*
* Furthermore, we must set params.handle to SHFL_HANDLE_NIL to
* make the shared folders host service use our mode parameter.
*/
params.handle = SHFL_HANDLE_NIL;
if (file->f_flags & O_CREAT) {
params.create_flags |= SHFL_CF_ACT_CREATE_IF_NEW;
/*
* We ignore O_EXCL, as the Linux kernel seems to call create
* beforehand itself, so O_EXCL should always fail.
*/
if (file->f_flags & O_TRUNC)
params.create_flags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
else
params.create_flags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
} else {
params.create_flags |= SHFL_CF_ACT_FAIL_IF_NEW;
if (file->f_flags & O_TRUNC)
params.create_flags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
}
switch (file->f_flags & O_ACCMODE) {
case O_RDONLY:
access_flags |= SHFL_CF_ACCESS_READ;
break;
case O_WRONLY:
access_flags |= SHFL_CF_ACCESS_WRITE;
break;
case O_RDWR:
access_flags |= SHFL_CF_ACCESS_READWRITE;
break;
default:
WARN_ON(1);
}
if (file->f_flags & O_APPEND)
access_flags |= SHFL_CF_ACCESS_APPEND;
params.create_flags |= access_flags;
params.info.attr.mode = inode->i_mode;
err = vboxsf_create_at_dentry(file_dentry(file), &params);
if (err == 0 && params.handle == SHFL_HANDLE_NIL)
err = (params.result == SHFL_FILE_EXISTS) ? -EEXIST : -ENOENT;
if (err) {
kfree(sf_handle);
return err;
}
/* the host may have given us different attr then requested */
sf_i->force_restat = 1;
/* init our handle struct and add it to the inode's handles list */
sf_handle->handle = params.handle;
sf_handle->root = VBOXSF_SBI(inode->i_sb)->root;
sf_handle->access_flags = access_flags;
kref_init(&sf_handle->refcount);
mutex_lock(&sf_i->handle_list_mutex);
list_add(&sf_handle->head, &sf_i->handle_list);
mutex_unlock(&sf_i->handle_list_mutex);
file->private_data = sf_handle;
return 0;
}
static void vboxsf_handle_release(struct kref *refcount)
{
struct vboxsf_handle *sf_handle =
container_of(refcount, struct vboxsf_handle, refcount);
vboxsf_close(sf_handle->root, sf_handle->handle);
kfree(sf_handle);
}
static int vboxsf_file_release(struct inode *inode, struct file *file)
{
struct vboxsf_inode *sf_i = VBOXSF_I(inode);
struct vboxsf_handle *sf_handle = file->private_data;
/*
* When a file is closed on our (the guest) side, we want any subsequent
* accesses done on the host side to see all changes done from our side.
*/
filemap_write_and_wait(inode->i_mapping);
mutex_lock(&sf_i->handle_list_mutex);
list_del(&sf_handle->head);
mutex_unlock(&sf_i->handle_list_mutex);
kref_put(&sf_handle->refcount, vboxsf_handle_release);
return 0;
}
/*
* Write back dirty pages now, because there may not be any suitable
* open files later
*/
static void vboxsf_vma_close(struct vm_area_struct *vma)
{
filemap_write_and_wait(vma->vm_file->f_mapping);
}
static const struct vm_operations_struct vboxsf_file_vm_ops = {
.close = vboxsf_vma_close,
.fault = filemap_fault,
.map_pages = filemap_map_pages,
};
static int vboxsf_file_mmap(struct file *file, struct vm_area_struct *vma)
{
int err;
err = generic_file_mmap(file, vma);
if (!err)
vma->vm_ops = &vboxsf_file_vm_ops;
return err;
}
/*
* Note that since we are accessing files on the host's filesystem, files
* may always be changed underneath us by the host!
*
* The vboxsf API between the guest and the host does not offer any functions
* to deal with this. There is no inode-generation to check for changes, no
* events / callback on changes and no way to lock files.
*
* To avoid returning stale data when a file gets *opened* on our (the guest)
* side, we do a "stat" on the host side, then compare the mtime with the
* last known mtime and invalidate the page-cache if they differ.
* This is done from vboxsf_inode_revalidate().
*
* When reads are done through the read_iter fop, it is possible to do
* further cache revalidation then, there are 3 options to deal with this:
*
* 1) Rely solely on the revalidation done at open time
* 2) Do another "stat" and compare mtime again. Unfortunately the vboxsf
* host API does not allow stat on handles, so we would need to use
* file->f_path.dentry and the stat will then fail if the file was unlinked
* or renamed (and there is no thing like NFS' silly-rename). So we get:
* 2a) "stat" and compare mtime, on stat failure invalidate the cache
* 2b) "stat" and compare mtime, on stat failure do nothing
* 3) Simply always call invalidate_inode_pages2_range on the range of the read
*
* Currently we are keeping things KISS and using option 1. this allows
* directly using generic_file_read_iter without wrapping it.
*
* This means that only data written on the host side before open() on
* the guest side is guaranteed to be seen by the guest. If necessary
* we may provide other read-cache strategies in the future and make this
* configurable through a mount option.
*/
const struct file_operations vboxsf_reg_fops = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.mmap = vboxsf_file_mmap,
.open = vboxsf_file_open,
.release = vboxsf_file_release,
.fsync = noop_fsync,
.splice_read = generic_file_splice_read,
};
const struct inode_operations vboxsf_reg_iops = {
.getattr = vboxsf_getattr,
.setattr = vboxsf_setattr
};
static int vboxsf_readpage(struct file *file, struct page *page)
{
struct vboxsf_handle *sf_handle = file->private_data;
loff_t off = page_offset(page);
u32 nread = PAGE_SIZE;
u8 *buf;
int err;
buf = kmap(page);
err = vboxsf_read(sf_handle->root, sf_handle->handle, off, &nread, buf);
if (err == 0) {
memset(&buf[nread], 0, PAGE_SIZE - nread);
flush_dcache_page(page);
SetPageUptodate(page);
} else {
SetPageError(page);
}
kunmap(page);
unlock_page(page);
return err;
}
static struct vboxsf_handle *vboxsf_get_write_handle(struct vboxsf_inode *sf_i)
{
struct vboxsf_handle *h, *sf_handle = NULL;
mutex_lock(&sf_i->handle_list_mutex);
list_for_each_entry(h, &sf_i->handle_list, head) {
if (h->access_flags == SHFL_CF_ACCESS_WRITE ||
h->access_flags == SHFL_CF_ACCESS_READWRITE) {
kref_get(&h->refcount);
sf_handle = h;
break;
}
}
mutex_unlock(&sf_i->handle_list_mutex);
return sf_handle;
}
static int vboxsf_writepage(struct page *page, struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
struct vboxsf_inode *sf_i = VBOXSF_I(inode);
struct vboxsf_handle *sf_handle;
loff_t off = page_offset(page);
loff_t size = i_size_read(inode);
u32 nwrite = PAGE_SIZE;
u8 *buf;
int err;
if (off + PAGE_SIZE > size)
nwrite = size & ~PAGE_MASK;
sf_handle = vboxsf_get_write_handle(sf_i);
if (!sf_handle)
return -EBADF;
buf = kmap(page);
err = vboxsf_write(sf_handle->root, sf_handle->handle,
off, &nwrite, buf);
kunmap(page);
kref_put(&sf_handle->refcount, vboxsf_handle_release);
if (err == 0) {
ClearPageError(page);
/* mtime changed */
sf_i->force_restat = 1;
} else {
ClearPageUptodate(page);
}
unlock_page(page);
return err;
}
static int vboxsf_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned int len, unsigned int copied,
struct page *page, void *fsdata)
{
struct inode *inode = mapping->host;
struct vboxsf_handle *sf_handle = file->private_data;
unsigned int from = pos & ~PAGE_MASK;
u32 nwritten = len;
u8 *buf;
int err;
buf = kmap(page);
err = vboxsf_write(sf_handle->root, sf_handle->handle,
pos, &nwritten, buf + from);
kunmap(page);
if (err) {
nwritten = 0;
goto out;
}
/* mtime changed */
VBOXSF_I(inode)->force_restat = 1;
if (!PageUptodate(page) && nwritten == PAGE_SIZE)
SetPageUptodate(page);
pos += nwritten;
if (pos > inode->i_size)
i_size_write(inode, pos);
out:
unlock_page(page);
put_page(page);
return nwritten;
}
const struct address_space_operations vboxsf_reg_aops = {
.readpage = vboxsf_readpage,
.writepage = vboxsf_writepage,
.set_page_dirty = __set_page_dirty_nobuffers,
.write_begin = simple_write_begin,
.write_end = vboxsf_write_end,
};
static const char *vboxsf_get_link(struct dentry *dentry, struct inode *inode,
struct delayed_call *done)
{
struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
struct shfl_string *path;
char *link;
int err;
if (!dentry)
return ERR_PTR(-ECHILD);
path = vboxsf_path_from_dentry(sbi, dentry);
if (IS_ERR(path))
return (char *)path;
link = kzalloc(PATH_MAX, GFP_KERNEL);
if (!link) {
__putname(path);
return ERR_PTR(-ENOMEM);
}
err = vboxsf_readlink(sbi->root, path, PATH_MAX, link);
__putname(path);
if (err) {
kfree(link);
return ERR_PTR(err);
}
set_delayed_call(done, kfree_link, link);
return link;
}
const struct inode_operations vboxsf_lnk_iops = {
.get_link = vboxsf_get_link
};
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/* SPDX-License-Identifier: MIT */
/*
* VirtualBox Guest Shared Folders support: module header.
*
* Copyright (C) 2006-2018 Oracle Corporation
*/
#ifndef VFSMOD_H
#define VFSMOD_H
#include <linux/backing-dev.h>
#include <linux/idr.h>
#include "shfl_hostintf.h"
#define DIR_BUFFER_SIZE SZ_16K
/* The cast is to prevent assignment of void * to pointers of arbitrary type */
#define VBOXSF_SBI(sb) ((struct vboxsf_sbi *)(sb)->s_fs_info)
#define VBOXSF_I(i) container_of(i, struct vboxsf_inode, vfs_inode)
struct vboxsf_options {
unsigned long ttl;
kuid_t uid;
kgid_t gid;
bool dmode_set;
bool fmode_set;
umode_t dmode;
umode_t fmode;
umode_t dmask;
umode_t fmask;
};
struct vboxsf_fs_context {
struct vboxsf_options o;
char *nls_name;
};
/* per-shared folder information */
struct vboxsf_sbi {
struct vboxsf_options o;
struct shfl_fsobjinfo root_info;
struct idr ino_idr;
spinlock_t ino_idr_lock; /* This protects ino_idr */
struct nls_table *nls;
u32 next_generation;
u32 root;
int bdi_id;
};
/* per-inode information */
struct vboxsf_inode {
/* some information was changed, update data on next revalidate */
int force_restat;
/* list of open handles for this inode + lock protecting it */
struct list_head handle_list;
/* This mutex protects handle_list accesses */
struct mutex handle_list_mutex;
/* The VFS inode struct */
struct inode vfs_inode;
};
struct vboxsf_dir_info {
struct list_head info_list;
};
struct vboxsf_dir_buf {
size_t entries;
size_t free;
size_t used;
void *buf;
struct list_head head;
};
/* globals */
extern const struct inode_operations vboxsf_dir_iops;
extern const struct inode_operations vboxsf_lnk_iops;
extern const struct inode_operations vboxsf_reg_iops;
extern const struct file_operations vboxsf_dir_fops;
extern const struct file_operations vboxsf_reg_fops;
extern const struct address_space_operations vboxsf_reg_aops;
extern const struct dentry_operations vboxsf_dentry_ops;
/* from utils.c */
struct inode *vboxsf_new_inode(struct super_block *sb);
void vboxsf_init_inode(struct vboxsf_sbi *sbi, struct inode *inode,
const struct shfl_fsobjinfo *info);
int vboxsf_create_at_dentry(struct dentry *dentry,
struct shfl_createparms *params);
int vboxsf_stat(struct vboxsf_sbi *sbi, struct shfl_string *path,
struct shfl_fsobjinfo *info);
int vboxsf_stat_dentry(struct dentry *dentry, struct shfl_fsobjinfo *info);
int vboxsf_inode_revalidate(struct dentry *dentry);
int vboxsf_getattr(const struct path *path, struct kstat *kstat,
u32 request_mask, unsigned int query_flags);
int vboxsf_setattr(struct dentry *dentry, struct iattr *iattr);
struct shfl_string *vboxsf_path_from_dentry(struct vboxsf_sbi *sbi,
struct dentry *dentry);
int vboxsf_nlscpy(struct vboxsf_sbi *sbi, char *name, size_t name_bound_len,
const unsigned char *utf8_name, size_t utf8_len);
struct vboxsf_dir_info *vboxsf_dir_info_alloc(void);
void vboxsf_dir_info_free(struct vboxsf_dir_info *p);
int vboxsf_dir_read_all(struct vboxsf_sbi *sbi, struct vboxsf_dir_info *sf_d,
u64 handle);
/* from vboxsf_wrappers.c */
int vboxsf_connect(void);
void vboxsf_disconnect(void);
int vboxsf_create(u32 root, struct shfl_string *parsed_path,
struct shfl_createparms *create_parms);
int vboxsf_close(u32 root, u64 handle);
int vboxsf_remove(u32 root, struct shfl_string *parsed_path, u32 flags);
int vboxsf_rename(u32 root, struct shfl_string *src_path,
struct shfl_string *dest_path, u32 flags);
int vboxsf_read(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf);
int vboxsf_write(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf);
int vboxsf_dirinfo(u32 root, u64 handle,
struct shfl_string *parsed_path, u32 flags, u32 index,
u32 *buf_len, struct shfl_dirinfo *buf, u32 *file_count);
int vboxsf_fsinfo(u32 root, u64 handle, u32 flags,
u32 *buf_len, void *buf);
int vboxsf_map_folder(struct shfl_string *folder_name, u32 *root);
int vboxsf_unmap_folder(u32 root);
int vboxsf_readlink(u32 root, struct shfl_string *parsed_path,
u32 buf_len, u8 *buf);
int vboxsf_symlink(u32 root, struct shfl_string *new_path,
struct shfl_string *old_path, struct shfl_fsobjinfo *buf);
int vboxsf_set_utf8(void);
int vboxsf_set_symlinks(void);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment