Commit 07f1e596 authored by Amir Goldstein's avatar Amir Goldstein Committed by Miklos Szeredi

ovl: generalize the lower_fs[] array

Rename lower_fs[] array to fs[], extend its size by one and use index fsid
(instead of fsid-1) to access the fs[] array.

Initialize fs[0] with upper fs values. fsid 0 is reserved even with lower
only overlay, so fs[0] remains null in this case.
Signed-off-by: default avatarAmir Goldstein <amir73il@gmail.com>
Signed-off-by: default avatarMiklos Szeredi <mszeredi@redhat.com>
parent 0f831ec8
...@@ -75,8 +75,7 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr) ...@@ -75,8 +75,7 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
return err; return err;
} }
static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat, static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat, int fsid)
struct ovl_layer *lower_layer)
{ {
bool samefs = ovl_same_fs(dentry->d_sb); bool samefs = ovl_same_fs(dentry->d_sb);
unsigned int xinobits = ovl_xino_bits(dentry->d_sb); unsigned int xinobits = ovl_xino_bits(dentry->d_sb);
...@@ -103,9 +102,7 @@ static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat, ...@@ -103,9 +102,7 @@ static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat,
pr_warn_ratelimited("inode number too big (%pd2, ino=%llu, xinobits=%d)\n", pr_warn_ratelimited("inode number too big (%pd2, ino=%llu, xinobits=%d)\n",
dentry, stat->ino, xinobits); dentry, stat->ino, xinobits);
} else { } else {
if (lower_layer) stat->ino |= ((u64)fsid) << shift;
stat->ino |= ((u64)lower_layer->fsid) << shift;
stat->dev = dentry->d_sb->s_dev; stat->dev = dentry->d_sb->s_dev;
return 0; return 0;
} }
...@@ -124,7 +121,7 @@ static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat, ...@@ -124,7 +121,7 @@ static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat,
*/ */
stat->dev = dentry->d_sb->s_dev; stat->dev = dentry->d_sb->s_dev;
stat->ino = dentry->d_inode->i_ino; stat->ino = dentry->d_inode->i_ino;
} else if (lower_layer && lower_layer->fsid) { } else if (fsid) {
/* /*
* For non-samefs setup, if we cannot map all layers st_ino * For non-samefs setup, if we cannot map all layers st_ino
* to a unified address space, we need to make sure that st_dev * to a unified address space, we need to make sure that st_dev
...@@ -132,7 +129,7 @@ static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat, ...@@ -132,7 +129,7 @@ static int ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat,
* lower layers use the unique anonymous bdev assigned to the * lower layers use the unique anonymous bdev assigned to the
* lower fs. * lower fs.
*/ */
stat->dev = lower_layer->fs->pseudo_dev; stat->dev = OVL_FS(dentry->d_sb)->fs[fsid].pseudo_dev;
} }
return 0; return 0;
...@@ -147,7 +144,7 @@ int ovl_getattr(const struct path *path, struct kstat *stat, ...@@ -147,7 +144,7 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
const struct cred *old_cred; const struct cred *old_cred;
bool is_dir = S_ISDIR(dentry->d_inode->i_mode); bool is_dir = S_ISDIR(dentry->d_inode->i_mode);
bool samefs = ovl_same_fs(dentry->d_sb); bool samefs = ovl_same_fs(dentry->d_sb);
struct ovl_layer *lower_layer = NULL; int fsid = 0;
int err; int err;
bool metacopy_blocks = false; bool metacopy_blocks = false;
...@@ -170,7 +167,7 @@ int ovl_getattr(const struct path *path, struct kstat *stat, ...@@ -170,7 +167,7 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
*/ */
if (!is_dir || ovl_same_dev(dentry->d_sb)) { if (!is_dir || ovl_same_dev(dentry->d_sb)) {
if (!OVL_TYPE_UPPER(type)) { if (!OVL_TYPE_UPPER(type)) {
lower_layer = ovl_layer_lower(dentry); fsid = ovl_layer_lower(dentry)->fsid;
} else if (OVL_TYPE_ORIGIN(type)) { } else if (OVL_TYPE_ORIGIN(type)) {
struct kstat lowerstat; struct kstat lowerstat;
u32 lowermask = STATX_INO | STATX_BLOCKS | u32 lowermask = STATX_INO | STATX_BLOCKS |
...@@ -200,13 +197,13 @@ int ovl_getattr(const struct path *path, struct kstat *stat, ...@@ -200,13 +197,13 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
if (ovl_test_flag(OVL_INDEX, d_inode(dentry)) || if (ovl_test_flag(OVL_INDEX, d_inode(dentry)) ||
(!ovl_verify_lower(dentry->d_sb) && (!ovl_verify_lower(dentry->d_sb) &&
(is_dir || lowerstat.nlink == 1))) { (is_dir || lowerstat.nlink == 1))) {
lower_layer = ovl_layer_lower(dentry); fsid = ovl_layer_lower(dentry)->fsid;
/* /*
* Cannot use origin st_dev;st_ino because * Cannot use origin st_dev;st_ino because
* origin inode content may differ from overlay * origin inode content may differ from overlay
* inode content. * inode content.
*/ */
if (samefs || lower_layer->fsid) if (samefs || fsid)
stat->ino = lowerstat.ino; stat->ino = lowerstat.ino;
} }
...@@ -241,7 +238,7 @@ int ovl_getattr(const struct path *path, struct kstat *stat, ...@@ -241,7 +238,7 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
} }
} }
err = ovl_map_dev_ino(dentry, stat, lower_layer); err = ovl_map_dev_ino(dentry, stat, fsid);
if (err) if (err)
goto out; goto out;
......
...@@ -46,10 +46,10 @@ struct ovl_path { ...@@ -46,10 +46,10 @@ struct ovl_path {
struct ovl_fs { struct ovl_fs {
struct vfsmount *upper_mnt; struct vfsmount *upper_mnt;
unsigned int numlayer; unsigned int numlayer;
/* Number of unique lower sb that differ from upper sb */ /* Number of unique fs among layers including upper fs */
unsigned int numlowerfs; unsigned int numfs;
struct ovl_layer *layers; struct ovl_layer *layers;
struct ovl_sb *lower_fs; struct ovl_sb *fs;
/* workbasedir is the path at workdir= mount option */ /* workbasedir is the path at workdir= mount option */
struct dentry *workbasedir; struct dentry *workbasedir;
/* workdir is the 'work' directory under workbasedir */ /* workdir is the 'work' directory under workbasedir */
......
...@@ -228,10 +228,11 @@ static void ovl_free_fs(struct ovl_fs *ofs) ...@@ -228,10 +228,11 @@ static void ovl_free_fs(struct ovl_fs *ofs)
iput(ofs->layers[i].trap); iput(ofs->layers[i].trap);
mntput(ofs->layers[i].mnt); mntput(ofs->layers[i].mnt);
} }
for (i = 0; i < ofs->numlowerfs; i++)
free_anon_bdev(ofs->lower_fs[i].pseudo_dev);
kfree(ofs->layers); kfree(ofs->layers);
kfree(ofs->lower_fs); /* fs[0].pseudo_dev is either null or real upper st_dev */
for (i = 1; i < ofs->numfs; i++)
free_anon_bdev(ofs->fs[i].pseudo_dev);
kfree(ofs->fs);
kfree(ofs->config.lowerdir); kfree(ofs->config.lowerdir);
kfree(ofs->config.upperdir); kfree(ofs->config.upperdir);
...@@ -1259,7 +1260,7 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid) ...@@ -1259,7 +1260,7 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid)
if (!ofs->config.nfs_export && !ofs->upper_mnt) if (!ofs->config.nfs_export && !ofs->upper_mnt)
return true; return true;
for (i = 0; i < ofs->numlowerfs; i++) { for (i = 1; i < ofs->numfs; i++) {
/* /*
* We use uuid to associate an overlay lower file handle with a * We use uuid to associate an overlay lower file handle with a
* lower layer, so we can accept lower fs with null uuid as long * lower layer, so we can accept lower fs with null uuid as long
...@@ -1267,8 +1268,8 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid) ...@@ -1267,8 +1268,8 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid)
* if we detect multiple lower fs with the same uuid, we * if we detect multiple lower fs with the same uuid, we
* disable lower file handle decoding on all of them. * disable lower file handle decoding on all of them.
*/ */
if (uuid_equal(&ofs->lower_fs[i].sb->s_uuid, uuid)) { if (uuid_equal(&ofs->fs[i].sb->s_uuid, uuid)) {
ofs->lower_fs[i].bad_uuid = true; ofs->fs[i].bad_uuid = true;
return false; return false;
} }
} }
...@@ -1284,13 +1285,9 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path) ...@@ -1284,13 +1285,9 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
int err; int err;
bool bad_uuid = false; bool bad_uuid = false;
/* fsid 0 is reserved for upper fs even with non upper overlay */ for (i = 0; i < ofs->numfs; i++) {
if (ofs->upper_mnt && ofs->upper_mnt->mnt_sb == sb) if (ofs->fs[i].sb == sb)
return 0; return i;
for (i = 0; i < ofs->numlowerfs; i++) {
if (ofs->lower_fs[i].sb == sb)
return i + 1;
} }
if (!ovl_lower_uuid_ok(ofs, &sb->s_uuid)) { if (!ovl_lower_uuid_ok(ofs, &sb->s_uuid)) {
...@@ -1311,12 +1308,11 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path) ...@@ -1311,12 +1308,11 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
return err; return err;
} }
ofs->lower_fs[ofs->numlowerfs].sb = sb; ofs->fs[ofs->numfs].sb = sb;
ofs->lower_fs[ofs->numlowerfs].pseudo_dev = dev; ofs->fs[ofs->numfs].pseudo_dev = dev;
ofs->lower_fs[ofs->numlowerfs].bad_uuid = bad_uuid; ofs->fs[ofs->numfs].bad_uuid = bad_uuid;
ofs->numlowerfs++;
return ofs->numlowerfs; return ofs->numfs++;
} }
static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs, static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
...@@ -1331,17 +1327,27 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs, ...@@ -1331,17 +1327,27 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
if (ofs->layers == NULL) if (ofs->layers == NULL)
goto out; goto out;
ofs->lower_fs = kcalloc(numlower, sizeof(struct ovl_sb), ofs->fs = kcalloc(numlower + 1, sizeof(struct ovl_sb), GFP_KERNEL);
GFP_KERNEL); if (ofs->fs == NULL)
if (ofs->lower_fs == NULL)
goto out; goto out;
/* idx 0 is reserved for upper fs even with lower only overlay */ /* idx/fsid 0 are reserved for upper fs even with lower only overlay */
ofs->numfs++;
ofs->layers[0].mnt = ofs->upper_mnt; ofs->layers[0].mnt = ofs->upper_mnt;
ofs->layers[0].idx = 0; ofs->layers[0].idx = 0;
ofs->layers[0].fsid = 0; ofs->layers[0].fsid = 0;
ofs->numlayer = 1; ofs->numlayer = 1;
/*
* All lower layers that share the same fs as upper layer, use the real
* upper st_dev.
*/
if (ofs->upper_mnt) {
ofs->fs[0].sb = ofs->upper_mnt->mnt_sb;
ofs->fs[0].pseudo_dev = ofs->upper_mnt->mnt_sb->s_dev;
}
for (i = 0; i < numlower; i++) { for (i = 0; i < numlower; i++) {
struct vfsmount *mnt; struct vfsmount *mnt;
struct inode *trap; struct inode *trap;
...@@ -1379,10 +1385,7 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs, ...@@ -1379,10 +1385,7 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
ofs->layers[ofs->numlayer].mnt = mnt; ofs->layers[ofs->numlayer].mnt = mnt;
ofs->layers[ofs->numlayer].idx = ofs->numlayer; ofs->layers[ofs->numlayer].idx = ofs->numlayer;
ofs->layers[ofs->numlayer].fsid = fsid; ofs->layers[ofs->numlayer].fsid = fsid;
if (fsid) { ofs->layers[ofs->numlayer].fs = &ofs->fs[fsid];
ofs->layers[ofs->numlayer].fs =
&ofs->lower_fs[fsid - 1];
}
ofs->numlayer++; ofs->numlayer++;
} }
...@@ -1394,18 +1397,18 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs, ...@@ -1394,18 +1397,18 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
* bits reserved for fsid, it emits a warning and uses the original * bits reserved for fsid, it emits a warning and uses the original
* inode number. * inode number.
*/ */
if (!ofs->numlowerfs || (ofs->numlowerfs == 1 && !ofs->upper_mnt)) { if (ofs->numfs - !ofs->upper_mnt == 1) {
if (ofs->config.xino == OVL_XINO_ON) if (ofs->config.xino == OVL_XINO_ON)
pr_info("\"xino=on\" is useless with all layers on same fs, ignore.\n"); pr_info("\"xino=on\" is useless with all layers on same fs, ignore.\n");
ofs->xino_mode = 0; ofs->xino_mode = 0;
} else if (ofs->config.xino == OVL_XINO_ON && ofs->xino_mode < 0) { } else if (ofs->config.xino == OVL_XINO_ON && ofs->xino_mode < 0) {
/* /*
* This is a roundup of number of bits needed for numlowerfs+1 * This is a roundup of number of bits needed for encoding
* (i.e. ilog2(numlowerfs+1 - 1) + 1). fsid 0 is reserved for * fsid, where fsid 0 is reserved for upper fs even with
* upper fs even with non upper overlay. * lower only overlay.
*/ */
BUILD_BUG_ON(ilog2(OVL_MAX_STACK) > 31); BUILD_BUG_ON(ilog2(OVL_MAX_STACK) > 31);
ofs->xino_mode = ilog2(ofs->numlowerfs) + 1; ofs->xino_mode = ilog2(ofs->numfs - 1) + 1;
} }
if (ofs->xino_mode > 0) { if (ofs->xino_mode > 0) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment