Commit be3c2131 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ovl-update-6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/overlayfs/vfs

Pull overlayfs update from Amir Goldstein:

 - fix two NULL pointer deref bugs (Zhihao Cheng)

 - add support for "data-only" lower layers destined to be used by
   composefs

 - port overlayfs to the new mount api (Christian Brauner)

* tag 'ovl-update-6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/overlayfs/vfs: (26 commits)
  ovl: add Amir as co-maintainer
  ovl: reserve ability to reconfigure mount options with new mount api
  ovl: modify layer parameter parsing
  ovl: port to new mount api
  ovl: factor out ovl_parse_options() helper
  ovl: store enum redirect_mode in config instead of a string
  ovl: pass ovl_fs to xino helpers
  ovl: clarify ovl_get_root() semantics
  ovl: negate the ofs->share_whiteout boolean
  ovl: check type and offset of struct vfsmount in ovl_entry
  ovl: implement lazy lookup of lowerdata in data-only layers
  ovl: prepare for lazy lookup of lowerdata inode
  ovl: prepare to store lowerdata redirect for lazy lowerdata lookup
  ovl: implement lookup in data-only layers
  ovl: introduce data-only lower layers
  ovl: remove unneeded goto instructions
  ovl: deduplicate lowerdata and lowerstack[]
  ovl: deduplicate lowerpath and lowerstack[]
  ovl: move ovl_entry into ovl_inode
  ovl: factor out ovl_free_entry() and ovl_stack_*() helpers
  ...
parents eee9c708 62149a74
......@@ -231,12 +231,11 @@ Mount options:
Redirects are enabled.
- "redirect_dir=follow":
Redirects are not created, but followed.
- "redirect_dir=off":
Redirects are not created and only followed if "redirect_always_follow"
feature is enabled in the kernel/module config.
- "redirect_dir=nofollow":
Redirects are not created and not followed (equivalent to "redirect_dir=off"
if "redirect_always_follow" feature is not enabled).
Redirects are not created and not followed.
- "redirect_dir=off":
If "redirect_always_follow" is enabled in the kernel/module config,
this "off" traslates to "follow", otherwise it translates to "nofollow".
When the NFS export feature is enabled, every copied up directory is
indexed by the file handle of the lower inode and a file handle of the
......@@ -371,6 +370,41 @@ conflict with metacopy=on, and will result in an error.
[*] redirect_dir=follow only conflicts with metacopy=on if upperdir=... is
given.
Data-only lower layers
----------------------
With "metacopy" feature enabled, an overlayfs regular file may be a composition
of information from up to three different layers:
1) metadata from a file in the upper layer
2) st_ino and st_dev object identifier from a file in a lower layer
3) data from a file in another lower layer (further below)
The "lower data" file can be on any lower layer, except from the top most
lower layer.
Below the top most lower layer, any number of lower most layers may be defined
as "data-only" lower layers, using double colon ("::") separators.
A normal lower layer is not allowed to be below a data-only layer, so single
colon separators are not allowed to the right of double colon ("::") separators.
For example:
mount -t overlay overlay -olowerdir=/l1:/l2:/l3::/do1::/do2 /merged
The paths of files in the "data-only" lower layers are not visible in the
merged overlayfs directories and the metadata and st_ino/st_dev of files
in the "data-only" lower layers are not visible in overlayfs inodes.
Only the data of the files in the "data-only" lower layers may be visible
when a "metacopy" file in one of the lower layers above it, has a "redirect"
to the absolute path of the "lower data" file in the "data-only" lower layer.
Sharing and copying layers
--------------------------
......
......@@ -15937,6 +15937,7 @@ F: include/media/i2c/ov2659.h
OVERLAY FILESYSTEM
M: Miklos Szeredi <miklos@szeredi.hu>
M: Amir Goldstein <amir73il@gmail.com>
L: linux-unionfs@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/vfs.git
......
......@@ -6,4 +6,4 @@
obj-$(CONFIG_OVERLAY_FS) += overlay.o
overlay-objs := super.o namei.o util.o inode.o file.o dir.o readdir.o \
copy_up.o export.o
copy_up.o export.o params.o
......@@ -575,6 +575,7 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
/* Restore timestamps on parent (best effort) */
ovl_set_timestamps(ofs, upperdir, &c->pstat);
ovl_dentry_set_upper_alias(c->dentry);
ovl_dentry_update_reval(c->dentry, upper);
}
}
inode_unlock(udir);
......@@ -894,6 +895,7 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
inode_unlock(udir);
ovl_dentry_set_upper_alias(c->dentry);
ovl_dentry_update_reval(c->dentry, ovl_dentry_upper(c->dentry));
}
out:
......@@ -1071,6 +1073,15 @@ static int ovl_copy_up_flags(struct dentry *dentry, int flags)
if (WARN_ON(disconnected && d_is_dir(dentry)))
return -EIO;
/*
* We may not need lowerdata if we are only doing metacopy up, but it is
* not very important to optimize this case, so do lazy lowerdata lookup
* before any copy up, so we can do it before taking ovl_inode_lock().
*/
err = ovl_maybe_lookup_lowerdata(dentry);
if (err)
return err;
old_cred = ovl_override_creds(dentry->d_sb);
while (!err) {
struct dentry *next;
......
......@@ -83,7 +83,7 @@ static struct dentry *ovl_whiteout(struct ovl_fs *ofs)
ofs->whiteout = whiteout;
}
if (ofs->share_whiteout) {
if (!ofs->no_shared_whiteout) {
whiteout = ovl_lookup_temp(ofs, workdir);
if (IS_ERR(whiteout))
goto out;
......@@ -95,7 +95,7 @@ static struct dentry *ovl_whiteout(struct ovl_fs *ofs)
if (err != -EMLINK) {
pr_warn("Failed to link whiteout - disabling whiteout inode sharing(nlink=%u, err=%i)\n",
ofs->whiteout->d_inode->i_nlink, err);
ofs->share_whiteout = false;
ofs->no_shared_whiteout = true;
}
dput(whiteout);
}
......@@ -269,8 +269,7 @@ static int ovl_instantiate(struct dentry *dentry, struct inode *inode,
ovl_dir_modified(dentry->d_parent, false);
ovl_dentry_set_upper_alias(dentry);
ovl_dentry_update_reval(dentry, newdentry,
DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE);
ovl_dentry_init_reval(dentry, newdentry, NULL);
if (!hardlink) {
/*
......@@ -953,7 +952,7 @@ static bool ovl_type_merge_or_lower(struct dentry *dentry)
static bool ovl_can_move(struct dentry *dentry)
{
return ovl_redirect_dir(dentry->d_sb) ||
return ovl_redirect_dir(OVL_FS(dentry->d_sb)) ||
!d_is_dir(dentry) || !ovl_type_merge_or_lower(dentry);
}
......
......@@ -80,7 +80,7 @@ static int ovl_connectable_layer(struct dentry *dentry)
/* We can get overlay root from root of any layer */
if (dentry == dentry->d_sb->s_root)
return oe->numlower;
return ovl_numlower(oe);
/*
* If it's an unindexed merge dir, then it's not connectable with any
......@@ -91,7 +91,7 @@ static int ovl_connectable_layer(struct dentry *dentry)
return 0;
/* We can get upper/overlay path from indexed/lower dentry */
return oe->lowerstack[0].layer->idx;
return ovl_lowerstack(oe)->layer->idx;
}
/*
......@@ -105,6 +105,7 @@ static int ovl_connectable_layer(struct dentry *dentry)
static int ovl_connect_layer(struct dentry *dentry)
{
struct dentry *next, *parent = NULL;
struct ovl_entry *oe = OVL_E(dentry);
int origin_layer;
int err = 0;
......@@ -112,7 +113,7 @@ static int ovl_connect_layer(struct dentry *dentry)
WARN_ON(!ovl_dentry_lower(dentry)))
return -EIO;
origin_layer = OVL_E(dentry)->lowerstack[0].layer->idx;
origin_layer = ovl_lowerstack(oe)->layer->idx;
if (ovl_dentry_test_flag(OVL_E_CONNECTED, dentry))
return origin_layer;
......@@ -285,21 +286,29 @@ static struct dentry *ovl_obtain_alias(struct super_block *sb,
struct dentry *lower = lowerpath ? lowerpath->dentry : NULL;
struct dentry *upper = upper_alias ?: index;
struct dentry *dentry;
struct inode *inode;
struct inode *inode = NULL;
struct ovl_entry *oe;
struct ovl_inode_params oip = {
.lowerpath = lowerpath,
.index = index,
.numlower = !!lower
};
/* We get overlay directory dentries with ovl_lookup_real() */
if (d_is_dir(upper ?: lower))
return ERR_PTR(-EIO);
oe = ovl_alloc_entry(!!lower);
if (!oe)
return ERR_PTR(-ENOMEM);
oip.upperdentry = dget(upper);
if (lower) {
ovl_lowerstack(oe)->dentry = dget(lower);
ovl_lowerstack(oe)->layer = lowerpath->layer;
}
oip.oe = oe;
inode = ovl_get_inode(sb, &oip);
if (IS_ERR(inode)) {
ovl_free_entry(oe);
dput(upper);
return ERR_CAST(inode);
}
......@@ -314,20 +323,11 @@ static struct dentry *ovl_obtain_alias(struct super_block *sb,
dentry = d_alloc_anon(inode->i_sb);
if (unlikely(!dentry))
goto nomem;
oe = ovl_alloc_entry(lower ? 1 : 0);
if (!oe)
goto nomem;
if (lower) {
oe->lowerstack->dentry = dget(lower);
oe->lowerstack->layer = lowerpath->layer;
}
dentry->d_fsdata = oe;
if (upper_alias)
ovl_dentry_set_upper_alias(dentry);
ovl_dentry_update_reval(dentry, upper,
DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE);
ovl_dentry_init_reval(dentry, upper, OVL_I_E(inode));
return d_instantiate_anon(dentry, inode);
......@@ -342,15 +342,16 @@ static struct dentry *ovl_obtain_alias(struct super_block *sb,
/* Get the upper or lower dentry in stack whose on layer @idx */
static struct dentry *ovl_dentry_real_at(struct dentry *dentry, int idx)
{
struct ovl_entry *oe = dentry->d_fsdata;
struct ovl_entry *oe = OVL_E(dentry);
struct ovl_path *lowerstack = ovl_lowerstack(oe);
int i;
if (!idx)
return ovl_dentry_upper(dentry);
for (i = 0; i < oe->numlower; i++) {
if (oe->lowerstack[i].layer->idx == idx)
return oe->lowerstack[i].dentry;
for (i = 0; i < ovl_numlower(oe); i++) {
if (lowerstack[i].layer->idx == idx)
return lowerstack[i].dentry;
}
return NULL;
......
......@@ -107,14 +107,23 @@ static int ovl_real_fdget_meta(const struct file *file, struct fd *real,
{
struct dentry *dentry = file_dentry(file);
struct path realpath;
int err;
real->flags = 0;
real->file = file->private_data;
if (allow_meta)
if (allow_meta) {
ovl_path_real(dentry, &realpath);
else
} else {
/* lazy lookup of lowerdata */
err = ovl_maybe_lookup_lowerdata(dentry);
if (err)
return err;
ovl_path_realdata(dentry, &realpath);
}
if (!realpath.dentry)
return -EIO;
/* Has it been copied up since we'd opened it? */
if (unlikely(file_inode(real->file) != d_inode(realpath.dentry))) {
......@@ -150,6 +159,11 @@ static int ovl_open(struct inode *inode, struct file *file)
struct path realpath;
int err;
/* lazy lookup of lowerdata */
err = ovl_maybe_lookup_lowerdata(dentry);
if (err)
return err;
err = ovl_maybe_copy_up(dentry, file->f_flags);
if (err)
return err;
......@@ -158,6 +172,9 @@ static int ovl_open(struct inode *inode, struct file *file)
file->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
ovl_path_realdata(dentry, &realpath);
if (!realpath.dentry)
return -EIO;
realfile = ovl_open_realfile(file, &realpath);
if (IS_ERR(realfile))
return PTR_ERR(realfile);
......
......@@ -97,8 +97,9 @@ int ovl_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
static void ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat, int fsid)
{
bool samefs = ovl_same_fs(dentry->d_sb);
unsigned int xinobits = ovl_xino_bits(dentry->d_sb);
struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
bool samefs = ovl_same_fs(ofs);
unsigned int xinobits = ovl_xino_bits(ofs);
unsigned int xinoshift = 64 - xinobits;
if (samefs) {
......@@ -123,7 +124,7 @@ static void ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat, int fsid)
stat->ino |= ((u64)fsid) << (xinoshift + 1);
stat->dev = dentry->d_sb->s_dev;
return;
} else if (ovl_xino_warn(dentry->d_sb)) {
} else if (ovl_xino_warn(ofs)) {
pr_warn_ratelimited("inode number too big (%pd2, ino=%llu, xinobits=%d)\n",
dentry, stat->ino, xinobits);
}
......@@ -149,7 +150,7 @@ static void ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat, int fsid)
* is unique per underlying fs, so we use the unique anonymous
* bdev assigned to the underlying fs.
*/
stat->dev = OVL_FS(dentry->d_sb)->fs[fsid].pseudo_dev;
stat->dev = ofs->fs[fsid].pseudo_dev;
}
}
......@@ -186,7 +187,7 @@ int ovl_getattr(struct mnt_idmap *idmap, const struct path *path,
* If lower filesystem supports NFS file handles, this also guaranties
* persistent st_ino across mount cycle.
*/
if (!is_dir || ovl_same_dev(dentry->d_sb)) {
if (!is_dir || ovl_same_dev(OVL_FS(dentry->d_sb))) {
if (!OVL_TYPE_UPPER(type)) {
fsid = ovl_layer_lower(dentry)->fsid;
} else if (OVL_TYPE_ORIGIN(type)) {
......@@ -240,15 +241,22 @@ int ovl_getattr(struct mnt_idmap *idmap, const struct path *path,
/*
* If lower is not same as lowerdata or if there was
* no origin on upper, we can end up here.
* With lazy lowerdata lookup, guess lowerdata blocks
* from size to avoid lowerdata lookup on stat(2).
*/
struct kstat lowerdatastat;
u32 lowermask = STATX_BLOCKS;
ovl_path_lowerdata(dentry, &realpath);
err = vfs_getattr(&realpath, &lowerdatastat,
lowermask, flags);
if (err)
goto out;
if (realpath.dentry) {
err = vfs_getattr(&realpath, &lowerdatastat,
lowermask, flags);
if (err)
goto out;
} else {
lowerdatastat.blocks =
round_up(stat->size, stat->blksize) >> 9;
}
stat->blocks = lowerdatastat.blocks;
}
}
......@@ -288,8 +296,8 @@ int ovl_permission(struct mnt_idmap *idmap,
int err;
/* Careful in RCU walk mode */
ovl_i_path_real(inode, &realpath);
if (!realpath.dentry) {
realinode = ovl_i_path_real(inode, &realpath);
if (!realinode) {
WARN_ON(!(mask & MAY_NOT_BLOCK));
return -ECHILD;
}
......@@ -302,7 +310,6 @@ int ovl_permission(struct mnt_idmap *idmap,
if (err)
return err;
realinode = d_inode(realpath.dentry);
old_cred = ovl_override_creds(inode->i_sb);
if (!upperinode &&
!special_file(realinode->i_mode) && mask & MAY_WRITE) {
......@@ -559,20 +566,20 @@ struct posix_acl *do_ovl_get_acl(struct mnt_idmap *idmap,
struct inode *inode, int type,
bool rcu, bool noperm)
{
struct inode *realinode = ovl_inode_real(inode);
struct inode *realinode;
struct posix_acl *acl;
struct path realpath;
if (!IS_POSIXACL(realinode))
return NULL;
/* Careful in RCU walk mode */
ovl_i_path_real(inode, &realpath);
if (!realpath.dentry) {
realinode = ovl_i_path_real(inode, &realpath);
if (!realinode) {
WARN_ON(!rcu);
return ERR_PTR(-ECHILD);
}
if (!IS_POSIXACL(realinode))
return NULL;
if (rcu) {
/*
* If the layer is idmapped drop out of RCU path walk
......@@ -710,6 +717,9 @@ static int ovl_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
struct inode *realinode = ovl_inode_realdata(inode);
const struct cred *old_cred;
if (!realinode)
return -EIO;
if (!realinode->i_op->fiemap)
return -EOPNOTSUPP;
......@@ -952,7 +962,7 @@ static inline void ovl_lockdep_annotate_inode_mutex_key(struct inode *inode)
static void ovl_next_ino(struct inode *inode)
{
struct ovl_fs *ofs = inode->i_sb->s_fs_info;
struct ovl_fs *ofs = OVL_FS(inode->i_sb);
inode->i_ino = atomic_long_inc_return(&ofs->last_ino);
if (unlikely(!inode->i_ino))
......@@ -961,7 +971,8 @@ static void ovl_next_ino(struct inode *inode)
static void ovl_map_ino(struct inode *inode, unsigned long ino, int fsid)
{
int xinobits = ovl_xino_bits(inode->i_sb);
struct ovl_fs *ofs = OVL_FS(inode->i_sb);
int xinobits = ovl_xino_bits(ofs);
unsigned int xinoshift = 64 - xinobits;
/*
......@@ -972,7 +983,7 @@ static void ovl_map_ino(struct inode *inode, unsigned long ino, int fsid)
* with d_ino also causes nfsd readdirplus to fail.
*/
inode->i_ino = ino;
if (ovl_same_fs(inode->i_sb)) {
if (ovl_same_fs(ofs)) {
return;
} else if (xinobits && likely(!(ino >> xinoshift))) {
inode->i_ino |= (unsigned long)fsid << (xinoshift + 1);
......@@ -1003,14 +1014,10 @@ void ovl_inode_init(struct inode *inode, struct ovl_inode_params *oip,
struct inode *realinode;
struct ovl_inode *oi = OVL_I(inode);
if (oip->upperdentry)
oi->__upperdentry = oip->upperdentry;
if (oip->lowerpath && oip->lowerpath->dentry) {
oi->lowerpath.dentry = dget(oip->lowerpath->dentry);
oi->lowerpath.layer = oip->lowerpath->layer;
}
if (oip->lowerdata)
oi->lowerdata = igrab(d_inode(oip->lowerdata));
oi->__upperdentry = oip->upperdentry;
oi->oe = oip->oe;
oi->redirect = oip->redirect;
oi->lowerdata_redirect = oip->lowerdata_redirect;
realinode = ovl_inode_real(inode);
ovl_copyattr(inode);
......@@ -1325,7 +1332,7 @@ struct inode *ovl_get_inode(struct super_block *sb,
{
struct ovl_fs *ofs = OVL_FS(sb);
struct dentry *upperdentry = oip->upperdentry;
struct ovl_path *lowerpath = oip->lowerpath;
struct ovl_path *lowerpath = ovl_lowerpath(oip->oe);
struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL;
struct inode *inode;
struct dentry *lowerdentry = lowerpath ? lowerpath->dentry : NULL;
......@@ -1369,7 +1376,9 @@ struct inode *ovl_get_inode(struct super_block *sb,
}
dput(upperdentry);
ovl_free_entry(oip->oe);
kfree(oip->redirect);
kfree(oip->lowerdata_redirect);
goto out;
}
......@@ -1398,14 +1407,12 @@ struct inode *ovl_get_inode(struct super_block *sb,
if (oip->index)
ovl_set_flag(OVL_INDEX, inode);
OVL_I(inode)->redirect = oip->redirect;
if (bylower)
ovl_set_flag(OVL_CONST_INO, inode);
/* Check for non-merge dir that may have whiteouts */
if (is_dir) {
if (((upperdentry && lowerdentry) || oip->numlower > 1) ||
if (((upperdentry && lowerdentry) || ovl_numlower(oip->oe) > 1) ||
ovl_path_check_origin_xattr(ofs, &realpath)) {
ovl_set_flag(OVL_WHITEOUTS, inode);
}
......
This diff is collapsed.
......@@ -57,12 +57,27 @@ enum ovl_entry_flag {
OVL_E_CONNECTED,
};
enum {
OVL_REDIRECT_OFF, /* "off" mode is never used. In effect */
OVL_REDIRECT_FOLLOW, /* ...it translates to either "follow" */
OVL_REDIRECT_NOFOLLOW, /* ...or "nofollow". */
OVL_REDIRECT_ON,
};
enum {
OVL_XINO_OFF,
OVL_XINO_AUTO,
OVL_XINO_ON,
};
/* The set of options that user requested explicitly via mount options */
struct ovl_opt_set {
bool metacopy;
bool redirect;
bool nfs_export;
bool index;
};
/*
* The tuple (fh,uuid) is a universal unique identifier for a copy up origin,
* where:
......@@ -353,17 +368,29 @@ static inline bool ovl_open_flags_need_copy_up(int flags)
return ((OPEN_FMODE(flags) & FMODE_WRITE) || (flags & O_TRUNC));
}
static inline bool ovl_allow_offline_changes(struct ovl_fs *ofs)
{
/*
* To avoid regressions in existing setups with overlay lower offline
* changes, we allow lower changes only if none of the new features
* are used.
*/
return (!ofs->config.index && !ofs->config.metacopy &&
!ofs->config.redirect_dir && ofs->config.xino != OVL_XINO_ON);
}
/* params.c */
#define OVL_MAX_STACK 500
struct ovl_fs_context_layer {
char *name;
struct path path;
};
struct ovl_fs_context {
struct path upper;
struct path work;
size_t capacity;
size_t nr; /* includes nr_data */
size_t nr_data;
struct ovl_opt_set set;
struct ovl_fs_context_layer *lower;
};
int ovl_parse_param_upperdir(const char *name, struct fs_context *fc,
bool workdir);
int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc);
void ovl_parse_param_drop_lowerdir(struct ovl_fs_context *ctx);
/* util.c */
int ovl_want_write(struct dentry *dentry);
......@@ -374,21 +401,30 @@ int ovl_can_decode_fh(struct super_block *sb);
struct dentry *ovl_indexdir(struct super_block *sb);
bool ovl_index_all(struct super_block *sb);
bool ovl_verify_lower(struct super_block *sb);
struct ovl_path *ovl_stack_alloc(unsigned int n);
void ovl_stack_cpy(struct ovl_path *dst, struct ovl_path *src, unsigned int n);
void ovl_stack_put(struct ovl_path *stack, unsigned int n);
void ovl_stack_free(struct ovl_path *stack, unsigned int n);
struct ovl_entry *ovl_alloc_entry(unsigned int numlower);
void ovl_free_entry(struct ovl_entry *oe);
bool ovl_dentry_remote(struct dentry *dentry);
void ovl_dentry_update_reval(struct dentry *dentry, struct dentry *upperdentry,
unsigned int mask);
void ovl_dentry_update_reval(struct dentry *dentry, struct dentry *realdentry);
void ovl_dentry_init_reval(struct dentry *dentry, struct dentry *upperdentry,
struct ovl_entry *oe);
void ovl_dentry_init_flags(struct dentry *dentry, struct dentry *upperdentry,
struct ovl_entry *oe, unsigned int mask);
bool ovl_dentry_weird(struct dentry *dentry);
enum ovl_path_type ovl_path_type(struct dentry *dentry);
void ovl_path_upper(struct dentry *dentry, struct path *path);
void ovl_path_lower(struct dentry *dentry, struct path *path);
void ovl_path_lowerdata(struct dentry *dentry, struct path *path);
void ovl_i_path_real(struct inode *inode, struct path *path);
struct inode *ovl_i_path_real(struct inode *inode, struct path *path);
enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
enum ovl_path_type ovl_path_realdata(struct dentry *dentry, struct path *path);
struct dentry *ovl_dentry_upper(struct dentry *dentry);
struct dentry *ovl_dentry_lower(struct dentry *dentry);
struct dentry *ovl_dentry_lowerdata(struct dentry *dentry);
int ovl_dentry_set_lowerdata(struct dentry *dentry, struct ovl_path *datapath);
const struct ovl_layer *ovl_i_layer_lower(struct inode *inode);
const struct ovl_layer *ovl_layer_lower(struct dentry *dentry);
struct dentry *ovl_dentry_real(struct dentry *dentry);
......@@ -398,6 +434,7 @@ struct inode *ovl_inode_lower(struct inode *inode);
struct inode *ovl_inode_lowerdata(struct inode *inode);
struct inode *ovl_inode_real(struct inode *inode);
struct inode *ovl_inode_realdata(struct inode *inode);
const char *ovl_lowerdata_redirect(struct inode *inode);
struct ovl_dir_cache *ovl_dir_cache(struct inode *inode);
void ovl_set_dir_cache(struct inode *inode, struct ovl_dir_cache *cache);
void ovl_dentry_set_flag(unsigned long flag, struct dentry *dentry);
......@@ -412,7 +449,6 @@ bool ovl_dentry_needs_data_copy_up(struct dentry *dentry, int flags);
bool ovl_dentry_needs_data_copy_up_locked(struct dentry *dentry, int flags);
bool ovl_has_upperdata(struct inode *inode);
void ovl_set_upperdata(struct inode *inode);
bool ovl_redirect_dir(struct super_block *sb);
const char *ovl_dentry_get_redirect(struct dentry *dentry);
void ovl_dentry_set_redirect(struct dentry *dentry, const char *redirect);
void ovl_inode_update(struct inode *inode, struct dentry *upperdentry);
......@@ -480,31 +516,51 @@ static inline bool ovl_is_impuredir(struct super_block *sb,
return ovl_path_check_dir_xattr(ofs, &upperpath, OVL_XATTR_IMPURE);
}
static inline bool ovl_redirect_follow(struct ovl_fs *ofs)
{
return ofs->config.redirect_mode != OVL_REDIRECT_NOFOLLOW;
}
static inline bool ovl_redirect_dir(struct ovl_fs *ofs)
{
return ofs->config.redirect_mode == OVL_REDIRECT_ON;
}
/*
* With xino=auto, we do best effort to keep all inodes on same st_dev and
* d_ino consistent with st_ino.
* With xino=on, we do the same effort but we warn if we failed.
*/
static inline bool ovl_xino_warn(struct super_block *sb)
static inline bool ovl_xino_warn(struct ovl_fs *ofs)
{
return OVL_FS(sb)->config.xino == OVL_XINO_ON;
return ofs->config.xino == OVL_XINO_ON;
}
/*
* To avoid regressions in existing setups with overlay lower offline changes,
* we allow lower changes only if none of the new features are used.
*/
static inline bool ovl_allow_offline_changes(struct ovl_fs *ofs)
{
return (!ofs->config.index && !ofs->config.metacopy &&
!ovl_redirect_dir(ofs) && !ovl_xino_warn(ofs));
}
/* All layers on same fs? */
static inline bool ovl_same_fs(struct super_block *sb)
static inline bool ovl_same_fs(struct ovl_fs *ofs)
{
return OVL_FS(sb)->xino_mode == 0;
return ofs->xino_mode == 0;
}
/* All overlay inodes have same st_dev? */
static inline bool ovl_same_dev(struct super_block *sb)
static inline bool ovl_same_dev(struct ovl_fs *ofs)
{
return OVL_FS(sb)->xino_mode >= 0;
return ofs->xino_mode >= 0;
}
static inline unsigned int ovl_xino_bits(struct super_block *sb)
static inline unsigned int ovl_xino_bits(struct ovl_fs *ofs)
{
return ovl_same_dev(sb) ? OVL_FS(sb)->xino_mode : 0;
return ovl_same_dev(ofs) ? ofs->xino_mode : 0;
}
static inline void ovl_inode_lock(struct inode *inode)
......@@ -550,6 +606,7 @@ struct dentry *ovl_get_index_fh(struct ovl_fs *ofs, struct ovl_fh *fh);
struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
struct dentry *origin, bool verify);
int ovl_path_next(int idx, struct dentry *dentry, struct path *path);
int ovl_maybe_lookup_lowerdata(struct dentry *dentry);
struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags);
bool ovl_lower_positive(struct dentry *dentry);
......@@ -646,11 +703,10 @@ bool ovl_is_private_xattr(struct super_block *sb, const char *name);
struct ovl_inode_params {
struct inode *newinode;
struct dentry *upperdentry;
struct ovl_path *lowerpath;
struct ovl_entry *oe;
bool index;
unsigned int numlower;
char *redirect;
struct dentry *lowerdata;
char *lowerdata_redirect;
};
void ovl_inode_init(struct inode *inode, struct ovl_inode_params *oip,
unsigned long ino, int fsid);
......
......@@ -6,13 +6,10 @@
*/
struct ovl_config {
char *lowerdir;
char *upperdir;
char *workdir;
bool default_permissions;
bool redirect_dir;
bool redirect_follow;
const char *redirect_mode;
int redirect_mode;
bool index;
bool uuid;
bool nfs_export;
......@@ -32,6 +29,7 @@ struct ovl_sb {
};
struct ovl_layer {
/* ovl_free_fs() relies on @mnt being the first member! */
struct vfsmount *mnt;
/* Trap in ovl inode cache */
struct inode *trap;
......@@ -40,18 +38,34 @@ struct ovl_layer {
int idx;
/* One fsid per unique underlying sb (upper fsid == 0) */
int fsid;
char *name;
};
/*
* ovl_free_fs() relies on @mnt being the first member when unmounting
* the private mounts created for each layer. Let's check both the
* offset and type.
*/
static_assert(offsetof(struct ovl_layer, mnt) == 0);
static_assert(__same_type(typeof_member(struct ovl_layer, mnt), struct vfsmount *));
struct ovl_path {
const struct ovl_layer *layer;
struct dentry *dentry;
};
struct ovl_entry {
unsigned int __numlower;
struct ovl_path __lowerstack[];
};
/* private information held for overlayfs's superblock */
struct ovl_fs {
unsigned int numlayer;
/* Number of unique fs among layers including upper fs */
unsigned int numfs;
/* Number of data-only lower layers */
unsigned int numdatalayer;
const struct ovl_layer *layers;
struct ovl_sb *fs;
/* workbasedir is the path at workdir= mount option */
......@@ -70,7 +84,6 @@ struct ovl_fs {
/* Did we take the inuse lock? */
bool upperdir_locked;
bool workdir_locked;
bool share_whiteout;
/* Traps in ovl inode cache */
struct inode *workbasedir_trap;
struct inode *workdir_trap;
......@@ -79,12 +92,19 @@ struct ovl_fs {
int xino_mode;
/* For allocation of non-persistent inode numbers */
atomic_long_t last_ino;
/* Whiteout dentry cache */
/* Shared whiteout cache */
struct dentry *whiteout;
bool no_shared_whiteout;
/* r/o snapshot of upperdir sb's only taken on volatile mounts */
errseq_t errseq;
};
/* Number of lower layers, not including data-only layers */
static inline unsigned int ovl_numlowerlayer(struct ovl_fs *ofs)
{
return ofs->numlayer - ofs->numdatalayer - 1;
}
static inline struct vfsmount *ovl_upper_mnt(struct ovl_fs *ofs)
{
return ofs->layers[0].mnt;
......@@ -105,36 +125,53 @@ static inline bool ovl_should_sync(struct ovl_fs *ofs)
return !ofs->config.ovl_volatile;
}
/* private information held for every overlayfs dentry */
struct ovl_entry {
union {
struct {
unsigned long flags;
};
struct rcu_head rcu;
};
unsigned numlower;
struct ovl_path lowerstack[];
};
static inline unsigned int ovl_numlower(struct ovl_entry *oe)
{
return oe ? oe->__numlower : 0;
}
struct ovl_entry *ovl_alloc_entry(unsigned int numlower);
static inline struct ovl_path *ovl_lowerstack(struct ovl_entry *oe)
{
return ovl_numlower(oe) ? oe->__lowerstack : NULL;
}
static inline struct ovl_entry *OVL_E(struct dentry *dentry)
static inline struct ovl_path *ovl_lowerpath(struct ovl_entry *oe)
{
return (struct ovl_entry *) dentry->d_fsdata;
return ovl_lowerstack(oe);
}
static inline struct ovl_path *ovl_lowerdata(struct ovl_entry *oe)
{
struct ovl_path *lowerstack = ovl_lowerstack(oe);
return lowerstack ? &lowerstack[oe->__numlower - 1] : NULL;
}
/* May return NULL if lazy lookup of lowerdata is needed */
static inline struct dentry *ovl_lowerdata_dentry(struct ovl_entry *oe)
{
struct ovl_path *lowerdata = ovl_lowerdata(oe);
return lowerdata ? READ_ONCE(lowerdata->dentry) : NULL;
}
/* private information held for every overlayfs dentry */
static inline unsigned long *OVL_E_FLAGS(struct dentry *dentry)
{
return (unsigned long *) &dentry->d_fsdata;
}
struct ovl_inode {
union {
struct ovl_dir_cache *cache; /* directory */
struct inode *lowerdata; /* regular file */
const char *lowerdata_redirect; /* regular file */
};
const char *redirect;
u64 version;
unsigned long flags;
struct inode vfs_inode;
struct dentry *__upperdentry;
struct ovl_path lowerpath;
struct ovl_entry *oe;
/* synchronize copy up and more */
struct mutex lock;
......@@ -145,6 +182,16 @@ static inline struct ovl_inode *OVL_I(struct inode *inode)
return container_of(inode, struct ovl_inode, vfs_inode);
}
static inline struct ovl_entry *OVL_I_E(struct inode *inode)
{
return inode ? OVL_I(inode)->oe : NULL;
}
static inline struct ovl_entry *OVL_E(struct dentry *dentry)
{
return OVL_I_E(d_inode(dentry));
}
static inline struct dentry *ovl_upperdentry_dereference(struct ovl_inode *oi)
{
return READ_ONCE(oi->__upperdentry);
......
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
#include <linux/posix_acl_xattr.h>
#include <linux/xattr.h>
#include "overlayfs.h"
static ssize_t ovl_parse_param_split_lowerdirs(char *str)
{
ssize_t nr_layers = 1, nr_colons = 0;
char *s, *d;
for (s = d = str;; s++, d++) {
if (*s == '\\') {
s++;
} else if (*s == ':') {
bool next_colon = (*(s + 1) == ':');
nr_colons++;
if (nr_colons == 2 && next_colon) {
pr_err("only single ':' or double '::' sequences of unescaped colons in lowerdir mount option allowed.\n");
return -EINVAL;
}
/* count layers, not colons */
if (!next_colon)
nr_layers++;
*d = '\0';
continue;
}
*d = *s;
if (!*s) {
/* trailing colons */
if (nr_colons) {
pr_err("unescaped trailing colons in lowerdir mount option.\n");
return -EINVAL;
}
break;
}
nr_colons = 0;
}
return nr_layers;
}
static int ovl_mount_dir_noesc(const char *name, struct path *path)
{
int err = -EINVAL;
if (!*name) {
pr_err("empty lowerdir\n");
goto out;
}
err = kern_path(name, LOOKUP_FOLLOW, path);
if (err) {
pr_err("failed to resolve '%s': %i\n", name, err);
goto out;
}
err = -EINVAL;
if (ovl_dentry_weird(path->dentry)) {
pr_err("filesystem on '%s' not supported\n", name);
goto out_put;
}
if (!d_is_dir(path->dentry)) {
pr_err("'%s' not a directory\n", name);
goto out_put;
}
return 0;
out_put:
path_put_init(path);
out:
return err;
}
static void ovl_unescape(char *s)
{
char *d = s;
for (;; s++, d++) {
if (*s == '\\')
s++;
*d = *s;
if (!*s)
break;
}
}
static int ovl_mount_dir(const char *name, struct path *path)
{
int err = -ENOMEM;
char *tmp = kstrdup(name, GFP_KERNEL);
if (tmp) {
ovl_unescape(tmp);
err = ovl_mount_dir_noesc(tmp, path);
if (!err && path->dentry->d_flags & DCACHE_OP_REAL) {
pr_err("filesystem on '%s' not supported as upperdir\n",
tmp);
path_put_init(path);
err = -EINVAL;
}
kfree(tmp);
}
return err;
}
int ovl_parse_param_upperdir(const char *name, struct fs_context *fc,
bool workdir)
{
int err;
struct ovl_fs *ofs = fc->s_fs_info;
struct ovl_config *config = &ofs->config;
struct ovl_fs_context *ctx = fc->fs_private;
struct path path;
char *dup;
err = ovl_mount_dir(name, &path);
if (err)
return err;
/*
* Check whether upper path is read-only here to report failures
* early. Don't forget to recheck when the superblock is created
* as the mount attributes could change.
*/
if (__mnt_is_readonly(path.mnt)) {
path_put(&path);
return -EINVAL;
}
dup = kstrdup(name, GFP_KERNEL);
if (!dup) {
path_put(&path);
return -ENOMEM;
}
if (workdir) {
kfree(config->workdir);
config->workdir = dup;
path_put(&ctx->work);
ctx->work = path;
} else {
kfree(config->upperdir);
config->upperdir = dup;
path_put(&ctx->upper);
ctx->upper = path;
}
return 0;
}
void ovl_parse_param_drop_lowerdir(struct ovl_fs_context *ctx)
{
for (size_t nr = 0; nr < ctx->nr; nr++) {
path_put(&ctx->lower[nr].path);
kfree(ctx->lower[nr].name);
ctx->lower[nr].name = NULL;
}
ctx->nr = 0;
ctx->nr_data = 0;
}
/*
* Parse lowerdir= mount option:
*
* (1) lowerdir=/lower1:/lower2:/lower3::/data1::/data2
* Set "/lower1", "/lower2", and "/lower3" as lower layers and
* "/data1" and "/data2" as data lower layers. Any existing lower
* layers are replaced.
* (2) lowerdir=:/lower4
* Append "/lower4" to current stack of lower layers. This requires
* that there already is at least one lower layer configured.
* (3) lowerdir=::/lower5
* Append data "/lower5" as data lower layer. This requires that
* there's at least one regular lower layer present.
*/
int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
{
int err;
struct ovl_fs_context *ctx = fc->fs_private;
struct ovl_fs_context_layer *l;
char *dup = NULL, *dup_iter;
ssize_t nr_lower = 0, nr = 0, nr_data = 0;
bool append = false, data_layer = false;
/*
* Ensure we're backwards compatible with mount(2)
* by allowing relative paths.
*/
/* drop all existing lower layers */
if (!*name) {
ovl_parse_param_drop_lowerdir(ctx);
return 0;
}
if (strncmp(name, "::", 2) == 0) {
/*
* This is a data layer.
* There must be at least one regular lower layer
* specified.
*/
if (ctx->nr == 0) {
pr_err("data lower layers without regular lower layers not allowed");
return -EINVAL;
}
/* Skip the leading "::". */
name += 2;
data_layer = true;
/*
* A data layer is automatically an append as there
* must've been at least one regular lower layer.
*/
append = true;
} else if (*name == ':') {
/*
* This is a regular lower layer.
* If users want to append a layer enforce that they
* have already specified a first layer before. It's
* better to be strict.
*/
if (ctx->nr == 0) {
pr_err("cannot append layer if no previous layer has been specified");
return -EINVAL;
}
/*
* Once a sequence of data layers has started regular
* lower layers are forbidden.
*/
if (ctx->nr_data > 0) {
pr_err("regular lower layers cannot follow data lower layers");
return -EINVAL;
}
/* Skip the leading ":". */
name++;
append = true;
}
dup = kstrdup(name, GFP_KERNEL);
if (!dup)
return -ENOMEM;
err = -EINVAL;
nr_lower = ovl_parse_param_split_lowerdirs(dup);
if (nr_lower < 0)
goto out_err;
if ((nr_lower > OVL_MAX_STACK) ||
(append && (size_add(ctx->nr, nr_lower) > OVL_MAX_STACK))) {
pr_err("too many lower directories, limit is %d\n", OVL_MAX_STACK);
goto out_err;
}
if (!append)
ovl_parse_param_drop_lowerdir(ctx);
/*
* (1) append
*
* We want nr <= nr_lower <= capacity We know nr > 0 and nr <=
* capacity. If nr == 0 this wouldn't be append. If nr +
* nr_lower is <= capacity then nr <= nr_lower <= capacity
* already holds. If nr + nr_lower exceeds capacity, we realloc.
*
* (2) replace
*
* Ensure we're backwards compatible with mount(2) which allows
* "lowerdir=/a:/b:/c,lowerdir=/d:/e:/f" causing the last
* specified lowerdir mount option to win.
*
* We want nr <= nr_lower <= capacity We know either (i) nr == 0
* or (ii) nr > 0. We also know nr_lower > 0. The capacity
* could've been changed multiple times already so we only know
* nr <= capacity. If nr + nr_lower > capacity we realloc,
* otherwise nr <= nr_lower <= capacity holds already.
*/
nr_lower += ctx->nr;
if (nr_lower > ctx->capacity) {
err = -ENOMEM;
l = krealloc_array(ctx->lower, nr_lower, sizeof(*ctx->lower),
GFP_KERNEL_ACCOUNT);
if (!l)
goto out_err;
ctx->lower = l;
ctx->capacity = nr_lower;
}
/*
* (3) By (1) and (2) we know nr <= nr_lower <= capacity.
* (4) If ctx->nr == 0 => replace
* We have verified above that the lowerdir mount option
* isn't an append, i.e., the lowerdir mount option
* doesn't start with ":" or "::".
* (4.1) The lowerdir mount options only contains regular lower
* layers ":".
* => Nothing to verify.
* (4.2) The lowerdir mount options contains regular ":" and
* data "::" layers.
* => We need to verify that data lower layers "::" aren't
* followed by regular ":" lower layers
* (5) If ctx->nr > 0 => append
* We know that there's at least one regular layer
* otherwise we would've failed when parsing the previous
* lowerdir mount option.
* (5.1) The lowerdir mount option is a regular layer ":" append
* => We need to verify that no data layers have been
* specified before.
* (5.2) The lowerdir mount option is a data layer "::" append
* We know that there's at least one regular layer or
* other data layers. => There's nothing to verify.
*/
dup_iter = dup;
for (nr = ctx->nr; nr < nr_lower; nr++) {
l = &ctx->lower[nr];
memset(l, 0, sizeof(*l));
err = ovl_mount_dir_noesc(dup_iter, &l->path);
if (err)
goto out_put;
err = -ENOMEM;
l->name = kstrdup(dup_iter, GFP_KERNEL_ACCOUNT);
if (!l->name)
goto out_put;
if (data_layer)
nr_data++;
/* Calling strchr() again would overrun. */
if ((nr + 1) == nr_lower)
break;
err = -EINVAL;
dup_iter = strchr(dup_iter, '\0') + 1;
if (*dup_iter) {
/*
* This is a regular layer so we require that
* there are no data layers.
*/
if ((ctx->nr_data + nr_data) > 0) {
pr_err("regular lower layers cannot follow data lower layers");
goto out_put;
}
data_layer = false;
continue;
}
/* This is a data lower layer. */
data_layer = true;
dup_iter++;
}
ctx->nr = nr_lower;
ctx->nr_data += nr_data;
kfree(dup);
return 0;
out_put:
/*
* We know nr >= ctx->nr < nr_lower. If we failed somewhere
* we want to undo until nr == ctx->nr. This is correct for
* both ctx->nr == 0 and ctx->nr > 0.
*/
for (; nr >= ctx->nr; nr--) {
l = &ctx->lower[nr];
kfree(l->name);
l->name = NULL;
path_put(&l->path);
/* don't overflow */
if (nr == 0)
break;
}
out_err:
kfree(dup);
/* Intentionally don't realloc to a smaller size. */
return err;
}
......@@ -118,7 +118,7 @@ static bool ovl_calc_d_ino(struct ovl_readdir_data *rdd,
return false;
/* Always recalc d_ino when remapping lower inode numbers */
if (ovl_xino_bits(rdd->dentry->d_sb))
if (ovl_xino_bits(OVL_FS(rdd->dentry->d_sb)))
return true;
/* Always recalc d_ino for parent */
......@@ -460,13 +460,14 @@ static int ovl_cache_update_ino(const struct path *path, struct ovl_cache_entry
{
struct dentry *dir = path->dentry;
struct ovl_fs *ofs = OVL_FS(dir->d_sb);
struct dentry *this = NULL;
enum ovl_path_type type;
u64 ino = p->real_ino;
int xinobits = ovl_xino_bits(dir->d_sb);
int xinobits = ovl_xino_bits(ofs);
int err = 0;
if (!ovl_same_dev(dir->d_sb))
if (!ovl_same_dev(ofs))
goto out;
if (p->name[0] == '.') {
......@@ -515,7 +516,7 @@ static int ovl_cache_update_ino(const struct path *path, struct ovl_cache_entry
ino = ovl_remap_lower_ino(ino, xinobits,
ovl_layer_lower(this)->fsid,
p->name, p->len,
ovl_xino_warn(dir->d_sb));
ovl_xino_warn(ofs));
}
out:
......@@ -694,12 +695,13 @@ static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
int err;
struct ovl_dir_file *od = file->private_data;
struct dentry *dir = file->f_path.dentry;
struct ovl_fs *ofs = OVL_FS(dir->d_sb);
const struct ovl_layer *lower_layer = ovl_layer_lower(dir);
struct ovl_readdir_translate rdt = {
.ctx.actor = ovl_fill_real,
.orig_ctx = ctx,
.xinobits = ovl_xino_bits(dir->d_sb),
.xinowarn = ovl_xino_warn(dir->d_sb),
.xinobits = ovl_xino_bits(ofs),
.xinowarn = ovl_xino_warn(ofs),
};
if (rdt.xinobits && lower_layer)
......@@ -735,6 +737,7 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
{
struct ovl_dir_file *od = file->private_data;
struct dentry *dentry = file->f_path.dentry;
struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
struct ovl_cache_entry *p;
const struct cred *old_cred;
int err;
......@@ -749,8 +752,8 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
* dir is impure then need to adjust d_ino for copied up
* entries.
*/
if (ovl_xino_bits(dentry->d_sb) ||
(ovl_same_fs(dentry->d_sb) &&
if (ovl_xino_bits(ofs) ||
(ovl_same_fs(ofs) &&
(ovl_is_impure_dir(file) ||
OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent))))) {
err = ovl_iterate_real(file, ctx);
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment