Commit 09edf4d3 authored by Matthew Bobrowski's avatar Matthew Bobrowski Committed by Theodore Ts'o

ext4: introduce new callback for IOMAP_REPORT

As part of the ext4_iomap_begin() cleanups that precede this patch, we
also split up the IOMAP_REPORT branch into a completely separate
->iomap_begin() callback named ext4_iomap_begin_report(). Again, the
raionale for this change is to reduce the overall clutter within
ext4_iomap_begin().
Signed-off-by: default avatarMatthew Bobrowski <mbobrowski@mbobrowski.org>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarRitesh Harjani <riteshh@linux.ibm.com>
Link: https://lore.kernel.org/r/5c97a569e26ddb6696e3d3ac9fbde41317e029a0.1572949325.git.mbobrowski@mbobrowski.orgSigned-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent f063db5e
......@@ -3388,6 +3388,7 @@ static inline void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
}
extern const struct iomap_ops ext4_iomap_ops;
extern const struct iomap_ops ext4_iomap_report_ops;
static inline int ext4_buffer_uptodate(struct buffer_head *bh)
{
......
......@@ -494,12 +494,14 @@ loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
maxbytes, i_size_read(inode));
case SEEK_HOLE:
inode_lock_shared(inode);
offset = iomap_seek_hole(inode, offset, &ext4_iomap_ops);
offset = iomap_seek_hole(inode, offset,
&ext4_iomap_report_ops);
inode_unlock_shared(inode);
break;
case SEEK_DATA:
inode_lock_shared(inode);
offset = iomap_seek_data(inode, offset, &ext4_iomap_ops);
offset = iomap_seek_data(inode, offset,
&ext4_iomap_report_ops);
inode_unlock_shared(inode);
break;
}
......
......@@ -3553,74 +3553,32 @@ static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
unsigned flags, struct iomap *iomap, struct iomap *srcmap)
{
unsigned int blkbits = inode->i_blkbits;
unsigned long first_block, last_block;
struct ext4_map_blocks map;
bool delalloc = false;
int ret;
struct ext4_map_blocks map;
u8 blkbits = inode->i_blkbits;
if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
return -EINVAL;
first_block = offset >> blkbits;
last_block = min_t(loff_t, (offset + length - 1) >> blkbits,
EXT4_MAX_LOGICAL_BLOCK);
if (flags & IOMAP_REPORT) {
if (ext4_has_inline_data(inode)) {
ret = ext4_inline_data_iomap(inode, iomap);
if (ret != -EAGAIN) {
if (ret == 0 && offset >= iomap->length)
ret = -ENOENT;
return ret;
}
}
} else {
if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
return -ERANGE;
}
map.m_lblk = first_block;
map.m_len = last_block - first_block + 1;
if (flags & IOMAP_REPORT) {
ret = ext4_map_blocks(NULL, inode, &map, 0);
if (ret < 0)
return ret;
if (ret == 0) {
ext4_lblk_t end = map.m_lblk + map.m_len - 1;
struct extent_status es;
ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
map.m_lblk, end, &es);
if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
return -ERANGE;
if (!es.es_len || es.es_lblk > end) {
/* entire range is a hole */
} else if (es.es_lblk > map.m_lblk) {
/* range starts with a hole */
map.m_len = es.es_lblk - map.m_lblk;
} else {
ext4_lblk_t offs = 0;
/*
* Calculate the first and last logical blocks respectively.
*/
map.m_lblk = offset >> blkbits;
map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
if (es.es_lblk < map.m_lblk)
offs = map.m_lblk - es.es_lblk;
map.m_lblk = es.es_lblk + offs;
map.m_len = es.es_len - offs;
delalloc = true;
}
}
} else if (flags & IOMAP_WRITE) {
if (flags & IOMAP_WRITE)
ret = ext4_iomap_alloc(inode, &map, flags);
} else {
else
ret = ext4_map_blocks(NULL, inode, &map, 0);
}
if (ret < 0)
return ret;
ext4_set_iomap(inode, iomap, &map, offset, length);
if (delalloc && iomap->type == IOMAP_HOLE)
iomap->type = IOMAP_DELALLOC;
return 0;
}
......@@ -3682,6 +3640,74 @@ const struct iomap_ops ext4_iomap_ops = {
.iomap_end = ext4_iomap_end,
};
static bool ext4_iomap_is_delalloc(struct inode *inode,
struct ext4_map_blocks *map)
{
struct extent_status es;
ext4_lblk_t offset = 0, end = map->m_lblk + map->m_len - 1;
ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
map->m_lblk, end, &es);
if (!es.es_len || es.es_lblk > end)
return false;
if (es.es_lblk > map->m_lblk) {
map->m_len = es.es_lblk - map->m_lblk;
return false;
}
offset = map->m_lblk - es.es_lblk;
map->m_len = es.es_len - offset;
return true;
}
static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
loff_t length, unsigned int flags,
struct iomap *iomap, struct iomap *srcmap)
{
int ret;
bool delalloc = false;
struct ext4_map_blocks map;
u8 blkbits = inode->i_blkbits;
if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
return -EINVAL;
if (ext4_has_inline_data(inode)) {
ret = ext4_inline_data_iomap(inode, iomap);
if (ret != -EAGAIN) {
if (ret == 0 && offset >= iomap->length)
ret = -ENOENT;
return ret;
}
}
/*
* Calculate the first and last logical block respectively.
*/
map.m_lblk = offset >> blkbits;
map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
ret = ext4_map_blocks(NULL, inode, &map, 0);
if (ret < 0)
return ret;
if (ret == 0)
delalloc = ext4_iomap_is_delalloc(inode, &map);
ext4_set_iomap(inode, iomap, &map, offset, length);
if (delalloc && iomap->type == IOMAP_HOLE)
iomap->type = IOMAP_DELALLOC;
return 0;
}
const struct iomap_ops ext4_iomap_report_ops = {
.iomap_begin = ext4_iomap_begin_report,
};
static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
ssize_t size, void *private)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment