Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
e3df41f9
Commit
e3df41f9
authored
Nov 30, 2016
by
Dave Chinner
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'xfs-4.10-misc-fixes-2' into iomap-4.10-directio
parents
9484ab1b
f782088c
Changes
30
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
30 changed files
with
961 additions
and
1064 deletions
+961
-1064
fs/iomap.c
fs/iomap.c
+2
-3
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap.c
+365
-366
fs/xfs/libxfs/xfs_bmap.h
fs/xfs/libxfs/xfs_bmap.h
+9
-8
fs/xfs/libxfs/xfs_btree.c
fs/xfs/libxfs/xfs_btree.c
+1
-1
fs/xfs/libxfs/xfs_defer.c
fs/xfs/libxfs/xfs_defer.c
+5
-12
fs/xfs/libxfs/xfs_dir2.c
fs/xfs/libxfs/xfs_dir2.c
+1
-1
fs/xfs/libxfs/xfs_dquot_buf.c
fs/xfs/libxfs/xfs_dquot_buf.c
+1
-2
fs/xfs/libxfs/xfs_format.h
fs/xfs/libxfs/xfs_format.h
+0
-1
fs/xfs/libxfs/xfs_inode_buf.c
fs/xfs/libxfs/xfs_inode_buf.c
+12
-1
fs/xfs/libxfs/xfs_inode_buf.h
fs/xfs/libxfs/xfs_inode_buf.h
+2
-0
fs/xfs/libxfs/xfs_inode_fork.c
fs/xfs/libxfs/xfs_inode_fork.c
+65
-12
fs/xfs/libxfs/xfs_inode_fork.h
fs/xfs/libxfs/xfs_inode_fork.h
+7
-0
fs/xfs/libxfs/xfs_types.h
fs/xfs/libxfs/xfs_types.h
+0
-1
fs/xfs/xfs_aops.c
fs/xfs/xfs_aops.c
+28
-10
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_bmap_util.c
+15
-18
fs/xfs/xfs_buf.h
fs/xfs/xfs_buf.h
+1
-0
fs/xfs/xfs_file.c
fs/xfs/xfs_file.c
+33
-199
fs/xfs/xfs_icache.c
fs/xfs/xfs_icache.c
+24
-18
fs/xfs/xfs_icreate_item.c
fs/xfs/xfs_icreate_item.c
+1
-1
fs/xfs/xfs_inode.h
fs/xfs/xfs_inode.h
+6
-5
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_inode_item.c
+2
-2
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_ioctl.c
+2
-4
fs/xfs/xfs_iomap.c
fs/xfs/xfs_iomap.c
+63
-48
fs/xfs/xfs_mount.c
fs/xfs/xfs_mount.c
+1
-0
fs/xfs/xfs_qm.c
fs/xfs/xfs_qm.c
+1
-1
fs/xfs/xfs_reflink.c
fs/xfs/xfs_reflink.c
+294
-328
fs/xfs/xfs_reflink.h
fs/xfs/xfs_reflink.h
+6
-11
fs/xfs/xfs_sysfs.c
fs/xfs/xfs_sysfs.c
+2
-2
fs/xfs/xfs_trace.h
fs/xfs/xfs_trace.h
+1
-3
include/linux/iomap.h
include/linux/iomap.h
+11
-6
No files found.
fs/iomap.c
View file @
e3df41f9
...
@@ -433,8 +433,7 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
...
@@ -433,8 +433,7 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
struct
page
*
page
=
data
;
struct
page
*
page
=
data
;
int
ret
;
int
ret
;
ret
=
__block_write_begin_int
(
page
,
pos
&
~
PAGE_MASK
,
length
,
ret
=
__block_write_begin_int
(
page
,
pos
,
length
,
NULL
,
iomap
);
NULL
,
iomap
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
...
@@ -562,7 +561,7 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
...
@@ -562,7 +561,7 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
}
}
while
(
len
>
0
)
{
while
(
len
>
0
)
{
ret
=
iomap_apply
(
inode
,
start
,
len
,
0
,
ops
,
&
ctx
,
ret
=
iomap_apply
(
inode
,
start
,
len
,
IOMAP_REPORT
,
ops
,
&
ctx
,
iomap_fiemap_actor
);
iomap_fiemap_actor
);
/* inode with no (attribute) mapping will give ENOENT */
/* inode with no (attribute) mapping will give ENOENT */
if
(
ret
==
-
ENOENT
)
if
(
ret
==
-
ENOENT
)
...
...
fs/xfs/libxfs/xfs_bmap.c
View file @
e3df41f9
This diff is collapsed.
Click to expand it.
fs/xfs/libxfs/xfs_bmap.h
View file @
e3df41f9
...
@@ -190,6 +190,8 @@ void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
...
@@ -190,6 +190,8 @@ void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
#define XFS_BMAP_TRACE_EXLIST(ip,c,w)
#define XFS_BMAP_TRACE_EXLIST(ip,c,w)
#endif
#endif
void
xfs_trim_extent
(
struct
xfs_bmbt_irec
*
irec
,
xfs_fileoff_t
bno
,
xfs_filblks_t
len
);
int
xfs_bmap_add_attrfork
(
struct
xfs_inode
*
ip
,
int
size
,
int
rsvd
);
int
xfs_bmap_add_attrfork
(
struct
xfs_inode
*
ip
,
int
size
,
int
rsvd
);
void
xfs_bmap_local_to_extents_empty
(
struct
xfs_inode
*
ip
,
int
whichfork
);
void
xfs_bmap_local_to_extents_empty
(
struct
xfs_inode
*
ip
,
int
whichfork
);
void
xfs_bmap_add_free
(
struct
xfs_mount
*
mp
,
struct
xfs_defer_ops
*
dfops
,
void
xfs_bmap_add_free
(
struct
xfs_mount
*
mp
,
struct
xfs_defer_ops
*
dfops
,
...
@@ -221,7 +223,11 @@ int xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
...
@@ -221,7 +223,11 @@ int xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t
bno
,
xfs_filblks_t
len
,
int
flags
,
xfs_fileoff_t
bno
,
xfs_filblks_t
len
,
int
flags
,
xfs_extnum_t
nexts
,
xfs_fsblock_t
*
firstblock
,
xfs_extnum_t
nexts
,
xfs_fsblock_t
*
firstblock
,
struct
xfs_defer_ops
*
dfops
,
int
*
done
);
struct
xfs_defer_ops
*
dfops
,
int
*
done
);
int
xfs_bunmapi_cow
(
struct
xfs_inode
*
ip
,
struct
xfs_bmbt_irec
*
del
);
int
xfs_bmap_del_extent_delay
(
struct
xfs_inode
*
ip
,
int
whichfork
,
xfs_extnum_t
*
idx
,
struct
xfs_bmbt_irec
*
got
,
struct
xfs_bmbt_irec
*
del
);
void
xfs_bmap_del_extent_cow
(
struct
xfs_inode
*
ip
,
xfs_extnum_t
*
idx
,
struct
xfs_bmbt_irec
*
got
,
struct
xfs_bmbt_irec
*
del
);
int
xfs_check_nostate_extents
(
struct
xfs_ifork
*
ifp
,
xfs_extnum_t
idx
,
int
xfs_check_nostate_extents
(
struct
xfs_ifork
*
ifp
,
xfs_extnum_t
idx
,
xfs_extnum_t
num
);
xfs_extnum_t
num
);
uint
xfs_default_attroffset
(
struct
xfs_inode
*
ip
);
uint
xfs_default_attroffset
(
struct
xfs_inode
*
ip
);
...
@@ -231,14 +237,9 @@ int xfs_bmap_shift_extents(struct xfs_trans *tp, struct xfs_inode *ip,
...
@@ -231,14 +237,9 @@ int xfs_bmap_shift_extents(struct xfs_trans *tp, struct xfs_inode *ip,
struct
xfs_defer_ops
*
dfops
,
enum
shift_direction
direction
,
struct
xfs_defer_ops
*
dfops
,
enum
shift_direction
direction
,
int
num_exts
);
int
num_exts
);
int
xfs_bmap_split_extent
(
struct
xfs_inode
*
ip
,
xfs_fileoff_t
split_offset
);
int
xfs_bmap_split_extent
(
struct
xfs_inode
*
ip
,
xfs_fileoff_t
split_offset
);
struct
xfs_bmbt_rec_host
*
xfs_bmap_search_extents
(
struct
xfs_inode
*
ip
,
xfs_fileoff_t
bno
,
int
fork
,
int
*
eofp
,
xfs_extnum_t
*
lastxp
,
struct
xfs_bmbt_irec
*
gotp
,
struct
xfs_bmbt_irec
*
prevp
);
int
xfs_bmapi_reserve_delalloc
(
struct
xfs_inode
*
ip
,
int
whichfork
,
int
xfs_bmapi_reserve_delalloc
(
struct
xfs_inode
*
ip
,
int
whichfork
,
xfs_fileoff_t
aoff
,
xfs_filblks_t
len
,
xfs_fileoff_t
off
,
xfs_filblks_t
len
,
xfs_filblks_t
prealloc
,
struct
xfs_bmbt_irec
*
got
,
struct
xfs_bmbt_irec
*
prev
,
struct
xfs_bmbt_irec
*
got
,
xfs_extnum_t
*
lastx
,
int
eof
);
xfs_extnum_t
*
lastx
,
int
eof
);
enum
xfs_bmap_intent_type
{
enum
xfs_bmap_intent_type
{
XFS_BMAP_MAP
=
1
,
XFS_BMAP_MAP
=
1
,
...
...
fs/xfs/libxfs/xfs_btree.c
View file @
e3df41f9
...
@@ -4826,7 +4826,7 @@ xfs_btree_calc_size(
...
@@ -4826,7 +4826,7 @@ xfs_btree_calc_size(
return
rval
;
return
rval
;
}
}
int
static
int
xfs_btree_count_blocks_helper
(
xfs_btree_count_blocks_helper
(
struct
xfs_btree_cur
*
cur
,
struct
xfs_btree_cur
*
cur
,
int
level
,
int
level
,
...
...
fs/xfs/libxfs/xfs_defer.c
View file @
e3df41f9
...
@@ -199,9 +199,9 @@ xfs_defer_intake_work(
...
@@ -199,9 +199,9 @@ xfs_defer_intake_work(
struct
xfs_defer_pending
*
dfp
;
struct
xfs_defer_pending
*
dfp
;
list_for_each_entry
(
dfp
,
&
dop
->
dop_intake
,
dfp_list
)
{
list_for_each_entry
(
dfp
,
&
dop
->
dop_intake
,
dfp_list
)
{
trace_xfs_defer_intake_work
(
tp
->
t_mountp
,
dfp
);
dfp
->
dfp_intent
=
dfp
->
dfp_type
->
create_intent
(
tp
,
dfp
->
dfp_intent
=
dfp
->
dfp_type
->
create_intent
(
tp
,
dfp
->
dfp_count
);
dfp
->
dfp_count
);
trace_xfs_defer_intake_work
(
tp
->
t_mountp
,
dfp
);
list_sort
(
tp
->
t_mountp
,
&
dfp
->
dfp_work
,
list_sort
(
tp
->
t_mountp
,
&
dfp
->
dfp_work
,
dfp
->
dfp_type
->
diff_items
);
dfp
->
dfp_type
->
diff_items
);
list_for_each
(
li
,
&
dfp
->
dfp_work
)
list_for_each
(
li
,
&
dfp
->
dfp_work
)
...
@@ -221,21 +221,14 @@ xfs_defer_trans_abort(
...
@@ -221,21 +221,14 @@ xfs_defer_trans_abort(
struct
xfs_defer_pending
*
dfp
;
struct
xfs_defer_pending
*
dfp
;
trace_xfs_defer_trans_abort
(
tp
->
t_mountp
,
dop
);
trace_xfs_defer_trans_abort
(
tp
->
t_mountp
,
dop
);
/*
* If the transaction was committed, drop the intent reference
* since we're bailing out of here. The other reference is
* dropped when the intent hits the AIL. If the transaction
* was not committed, the intent is freed by the intent item
* unlock handler on abort.
*/
if
(
!
dop
->
dop_committed
)
return
;
/* Abort intent items. */
/* Abort intent items
that don't have a done item
. */
list_for_each_entry
(
dfp
,
&
dop
->
dop_pending
,
dfp_list
)
{
list_for_each_entry
(
dfp
,
&
dop
->
dop_pending
,
dfp_list
)
{
trace_xfs_defer_pending_abort
(
tp
->
t_mountp
,
dfp
);
trace_xfs_defer_pending_abort
(
tp
->
t_mountp
,
dfp
);
if
(
!
dfp
->
dfp_done
)
if
(
dfp
->
dfp_intent
&&
!
dfp
->
dfp_done
)
{
dfp
->
dfp_type
->
abort_intent
(
dfp
->
dfp_intent
);
dfp
->
dfp_type
->
abort_intent
(
dfp
->
dfp_intent
);
dfp
->
dfp_intent
=
NULL
;
}
}
}
/* Shut down FS. */
/* Shut down FS. */
...
...
fs/xfs/libxfs/xfs_dir2.c
View file @
e3df41f9
...
@@ -93,7 +93,7 @@ xfs_ascii_ci_compname(
...
@@ -93,7 +93,7 @@ xfs_ascii_ci_compname(
return
result
;
return
result
;
}
}
static
struct
xfs_nameops
xfs_ascii_ci_nameops
=
{
static
const
struct
xfs_nameops
xfs_ascii_ci_nameops
=
{
.
hashname
=
xfs_ascii_ci_hashname
,
.
hashname
=
xfs_ascii_ci_hashname
,
.
compname
=
xfs_ascii_ci_compname
,
.
compname
=
xfs_ascii_ci_compname
,
};
};
...
...
fs/xfs/libxfs/xfs_dquot_buf.c
View file @
e3df41f9
...
@@ -191,8 +191,7 @@ xfs_dquot_buf_verify_crc(
...
@@ -191,8 +191,7 @@ xfs_dquot_buf_verify_crc(
if
(
mp
->
m_quotainfo
)
if
(
mp
->
m_quotainfo
)
ndquots
=
mp
->
m_quotainfo
->
qi_dqperchunk
;
ndquots
=
mp
->
m_quotainfo
->
qi_dqperchunk
;
else
else
ndquots
=
xfs_calc_dquots_per_chunk
(
ndquots
=
xfs_calc_dquots_per_chunk
(
bp
->
b_length
);
XFS_BB_TO_FSB
(
mp
,
bp
->
b_length
));
for
(
i
=
0
;
i
<
ndquots
;
i
++
,
d
++
)
{
for
(
i
=
0
;
i
<
ndquots
;
i
++
,
d
++
)
{
if
(
!
xfs_verify_cksum
((
char
*
)
d
,
sizeof
(
struct
xfs_dqblk
),
if
(
!
xfs_verify_cksum
((
char
*
)
d
,
sizeof
(
struct
xfs_dqblk
),
...
...
fs/xfs/libxfs/xfs_format.h
View file @
e3df41f9
...
@@ -865,7 +865,6 @@ typedef struct xfs_timestamp {
...
@@ -865,7 +865,6 @@ typedef struct xfs_timestamp {
* padding field for v3 inodes.
* padding field for v3 inodes.
*/
*/
#define XFS_DINODE_MAGIC 0x494e
/* 'IN' */
#define XFS_DINODE_MAGIC 0x494e
/* 'IN' */
#define XFS_DINODE_GOOD_VERSION(v) ((v) >= 1 && (v) <= 3)
typedef
struct
xfs_dinode
{
typedef
struct
xfs_dinode
{
__be16
di_magic
;
/* inode magic # = XFS_DINODE_MAGIC */
__be16
di_magic
;
/* inode magic # = XFS_DINODE_MAGIC */
__be16
di_mode
;
/* mode and type of file */
__be16
di_mode
;
/* mode and type of file */
...
...
fs/xfs/libxfs/xfs_inode_buf.c
View file @
e3df41f9
...
@@ -57,6 +57,17 @@ xfs_inobp_check(
...
@@ -57,6 +57,17 @@ xfs_inobp_check(
}
}
#endif
#endif
bool
xfs_dinode_good_version
(
struct
xfs_mount
*
mp
,
__u8
version
)
{
if
(
xfs_sb_version_hascrc
(
&
mp
->
m_sb
))
return
version
==
3
;
return
version
==
1
||
version
==
2
;
}
/*
/*
* If we are doing readahead on an inode buffer, we might be in log recovery
* If we are doing readahead on an inode buffer, we might be in log recovery
* reading an inode allocation buffer that hasn't yet been replayed, and hence
* reading an inode allocation buffer that hasn't yet been replayed, and hence
...
@@ -91,7 +102,7 @@ xfs_inode_buf_verify(
...
@@ -91,7 +102,7 @@ xfs_inode_buf_verify(
dip
=
xfs_buf_offset
(
bp
,
(
i
<<
mp
->
m_sb
.
sb_inodelog
));
dip
=
xfs_buf_offset
(
bp
,
(
i
<<
mp
->
m_sb
.
sb_inodelog
));
di_ok
=
dip
->
di_magic
==
cpu_to_be16
(
XFS_DINODE_MAGIC
)
&&
di_ok
=
dip
->
di_magic
==
cpu_to_be16
(
XFS_DINODE_MAGIC
)
&&
XFS_DINODE_GOOD_VERSION
(
dip
->
di_version
);
xfs_dinode_good_version
(
mp
,
dip
->
di_version
);
if
(
unlikely
(
XFS_TEST_ERROR
(
!
di_ok
,
mp
,
if
(
unlikely
(
XFS_TEST_ERROR
(
!
di_ok
,
mp
,
XFS_ERRTAG_ITOBP_INOTOBP
,
XFS_ERRTAG_ITOBP_INOTOBP
,
XFS_RANDOM_ITOBP_INOTOBP
)))
{
XFS_RANDOM_ITOBP_INOTOBP
)))
{
...
...
fs/xfs/libxfs/xfs_inode_buf.h
View file @
e3df41f9
...
@@ -74,6 +74,8 @@ void xfs_inode_from_disk(struct xfs_inode *ip, struct xfs_dinode *from);
...
@@ -74,6 +74,8 @@ void xfs_inode_from_disk(struct xfs_inode *ip, struct xfs_dinode *from);
void
xfs_log_dinode_to_disk
(
struct
xfs_log_dinode
*
from
,
void
xfs_log_dinode_to_disk
(
struct
xfs_log_dinode
*
from
,
struct
xfs_dinode
*
to
);
struct
xfs_dinode
*
to
);
bool
xfs_dinode_good_version
(
struct
xfs_mount
*
mp
,
__u8
version
);
#if defined(DEBUG)
#if defined(DEBUG)
void
xfs_inobp_check
(
struct
xfs_mount
*
,
struct
xfs_buf
*
);
void
xfs_inobp_check
(
struct
xfs_mount
*
,
struct
xfs_buf
*
);
#else
#else
...
...
fs/xfs/libxfs/xfs_inode_fork.c
View file @
e3df41f9
...
@@ -775,6 +775,13 @@ xfs_idestroy_fork(
...
@@ -775,6 +775,13 @@ xfs_idestroy_fork(
}
}
}
}
/* Count number of incore extents based on if_bytes */
xfs_extnum_t
xfs_iext_count
(
struct
xfs_ifork
*
ifp
)
{
return
ifp
->
if_bytes
/
(
uint
)
sizeof
(
xfs_bmbt_rec_t
);
}
/*
/*
* Convert in-core extents to on-disk form
* Convert in-core extents to on-disk form
*
*
...
@@ -803,7 +810,7 @@ xfs_iextents_copy(
...
@@ -803,7 +810,7 @@ xfs_iextents_copy(
ASSERT
(
xfs_isilocked
(
ip
,
XFS_ILOCK_EXCL
|
XFS_ILOCK_SHARED
));
ASSERT
(
xfs_isilocked
(
ip
,
XFS_ILOCK_EXCL
|
XFS_ILOCK_SHARED
));
ASSERT
(
ifp
->
if_bytes
>
0
);
ASSERT
(
ifp
->
if_bytes
>
0
);
nrecs
=
ifp
->
if_bytes
/
(
uint
)
sizeof
(
xfs_bmbt_rec_t
);
nrecs
=
xfs_iext_count
(
ifp
);
XFS_BMAP_TRACE_EXLIST
(
ip
,
nrecs
,
whichfork
);
XFS_BMAP_TRACE_EXLIST
(
ip
,
nrecs
,
whichfork
);
ASSERT
(
nrecs
>
0
);
ASSERT
(
nrecs
>
0
);
...
@@ -941,7 +948,7 @@ xfs_iext_get_ext(
...
@@ -941,7 +948,7 @@ xfs_iext_get_ext(
xfs_extnum_t
idx
)
/* index of target extent */
xfs_extnum_t
idx
)
/* index of target extent */
{
{
ASSERT
(
idx
>=
0
);
ASSERT
(
idx
>=
0
);
ASSERT
(
idx
<
ifp
->
if_bytes
/
sizeof
(
xfs_bmbt_rec_t
));
ASSERT
(
idx
<
xfs_iext_count
(
ifp
));
if
((
ifp
->
if_flags
&
XFS_IFEXTIREC
)
&&
(
idx
==
0
))
{
if
((
ifp
->
if_flags
&
XFS_IFEXTIREC
)
&&
(
idx
==
0
))
{
return
ifp
->
if_u1
.
if_ext_irec
->
er_extbuf
;
return
ifp
->
if_u1
.
if_ext_irec
->
er_extbuf
;
...
@@ -1017,7 +1024,7 @@ xfs_iext_add(
...
@@ -1017,7 +1024,7 @@ xfs_iext_add(
int
new_size
;
/* size of extents after adding */
int
new_size
;
/* size of extents after adding */
xfs_extnum_t
nextents
;
/* number of extents in file */
xfs_extnum_t
nextents
;
/* number of extents in file */
nextents
=
ifp
->
if_bytes
/
(
uint
)
sizeof
(
xfs_bmbt_rec_t
);
nextents
=
xfs_iext_count
(
ifp
);
ASSERT
((
idx
>=
0
)
&&
(
idx
<=
nextents
));
ASSERT
((
idx
>=
0
)
&&
(
idx
<=
nextents
));
byte_diff
=
ext_diff
*
sizeof
(
xfs_bmbt_rec_t
);
byte_diff
=
ext_diff
*
sizeof
(
xfs_bmbt_rec_t
);
new_size
=
ifp
->
if_bytes
+
byte_diff
;
new_size
=
ifp
->
if_bytes
+
byte_diff
;
...
@@ -1241,7 +1248,7 @@ xfs_iext_remove(
...
@@ -1241,7 +1248,7 @@ xfs_iext_remove(
trace_xfs_iext_remove
(
ip
,
idx
,
state
,
_RET_IP_
);
trace_xfs_iext_remove
(
ip
,
idx
,
state
,
_RET_IP_
);
ASSERT
(
ext_diff
>
0
);
ASSERT
(
ext_diff
>
0
);
nextents
=
ifp
->
if_bytes
/
(
uint
)
sizeof
(
xfs_bmbt_rec_t
);
nextents
=
xfs_iext_count
(
ifp
);
new_size
=
(
nextents
-
ext_diff
)
*
sizeof
(
xfs_bmbt_rec_t
);
new_size
=
(
nextents
-
ext_diff
)
*
sizeof
(
xfs_bmbt_rec_t
);
if
(
new_size
==
0
)
{
if
(
new_size
==
0
)
{
...
@@ -1270,7 +1277,7 @@ xfs_iext_remove_inline(
...
@@ -1270,7 +1277,7 @@ xfs_iext_remove_inline(
ASSERT
(
!
(
ifp
->
if_flags
&
XFS_IFEXTIREC
));
ASSERT
(
!
(
ifp
->
if_flags
&
XFS_IFEXTIREC
));
ASSERT
(
idx
<
XFS_INLINE_EXTS
);
ASSERT
(
idx
<
XFS_INLINE_EXTS
);
nextents
=
ifp
->
if_bytes
/
(
uint
)
sizeof
(
xfs_bmbt_rec_t
);
nextents
=
xfs_iext_count
(
ifp
);
ASSERT
(((
nextents
-
ext_diff
)
>
0
)
&&
ASSERT
(((
nextents
-
ext_diff
)
>
0
)
&&
(
nextents
-
ext_diff
)
<
XFS_INLINE_EXTS
);
(
nextents
-
ext_diff
)
<
XFS_INLINE_EXTS
);
...
@@ -1309,7 +1316,7 @@ xfs_iext_remove_direct(
...
@@ -1309,7 +1316,7 @@ xfs_iext_remove_direct(
ASSERT
(
!
(
ifp
->
if_flags
&
XFS_IFEXTIREC
));
ASSERT
(
!
(
ifp
->
if_flags
&
XFS_IFEXTIREC
));
new_size
=
ifp
->
if_bytes
-
new_size
=
ifp
->
if_bytes
-
(
ext_diff
*
sizeof
(
xfs_bmbt_rec_t
));
(
ext_diff
*
sizeof
(
xfs_bmbt_rec_t
));
nextents
=
ifp
->
if_bytes
/
(
uint
)
sizeof
(
xfs_bmbt_rec_t
);
nextents
=
xfs_iext_count
(
ifp
);
if
(
new_size
==
0
)
{
if
(
new_size
==
0
)
{
xfs_iext_destroy
(
ifp
);
xfs_iext_destroy
(
ifp
);
...
@@ -1546,7 +1553,7 @@ xfs_iext_indirect_to_direct(
...
@@ -1546,7 +1553,7 @@ xfs_iext_indirect_to_direct(
int
size
;
/* size of file extents */
int
size
;
/* size of file extents */
ASSERT
(
ifp
->
if_flags
&
XFS_IFEXTIREC
);
ASSERT
(
ifp
->
if_flags
&
XFS_IFEXTIREC
);
nextents
=
ifp
->
if_bytes
/
(
uint
)
sizeof
(
xfs_bmbt_rec_t
);
nextents
=
xfs_iext_count
(
ifp
);
ASSERT
(
nextents
<=
XFS_LINEAR_EXTS
);
ASSERT
(
nextents
<=
XFS_LINEAR_EXTS
);
size
=
nextents
*
sizeof
(
xfs_bmbt_rec_t
);
size
=
nextents
*
sizeof
(
xfs_bmbt_rec_t
);
...
@@ -1620,7 +1627,7 @@ xfs_iext_bno_to_ext(
...
@@ -1620,7 +1627,7 @@ xfs_iext_bno_to_ext(
xfs_extnum_t
nextents
;
/* number of file extents */
xfs_extnum_t
nextents
;
/* number of file extents */
xfs_fileoff_t
startoff
=
0
;
/* start offset of extent */
xfs_fileoff_t
startoff
=
0
;
/* start offset of extent */
nextents
=
ifp
->
if_bytes
/
(
uint
)
sizeof
(
xfs_bmbt_rec_t
);
nextents
=
xfs_iext_count
(
ifp
);
if
(
nextents
==
0
)
{
if
(
nextents
==
0
)
{
*
idxp
=
0
;
*
idxp
=
0
;
return
NULL
;
return
NULL
;
...
@@ -1733,8 +1740,8 @@ xfs_iext_idx_to_irec(
...
@@ -1733,8 +1740,8 @@ xfs_iext_idx_to_irec(
ASSERT
(
ifp
->
if_flags
&
XFS_IFEXTIREC
);
ASSERT
(
ifp
->
if_flags
&
XFS_IFEXTIREC
);
ASSERT
(
page_idx
>=
0
);
ASSERT
(
page_idx
>=
0
);
ASSERT
(
page_idx
<=
ifp
->
if_bytes
/
sizeof
(
xfs_bmbt_rec_t
));
ASSERT
(
page_idx
<=
xfs_iext_count
(
ifp
));
ASSERT
(
page_idx
<
ifp
->
if_bytes
/
sizeof
(
xfs_bmbt_rec_t
)
||
realloc
);
ASSERT
(
page_idx
<
xfs_iext_count
(
ifp
)
||
realloc
);
nlists
=
ifp
->
if_real_bytes
/
XFS_IEXT_BUFSZ
;
nlists
=
ifp
->
if_real_bytes
/
XFS_IEXT_BUFSZ
;
erp_idx
=
0
;
erp_idx
=
0
;
...
@@ -1782,7 +1789,7 @@ xfs_iext_irec_init(
...
@@ -1782,7 +1789,7 @@ xfs_iext_irec_init(
xfs_extnum_t
nextents
;
/* number of extents in file */
xfs_extnum_t
nextents
;
/* number of extents in file */
ASSERT
(
!
(
ifp
->
if_flags
&
XFS_IFEXTIREC
));
ASSERT
(
!
(
ifp
->
if_flags
&
XFS_IFEXTIREC
));
nextents
=
ifp
->
if_bytes
/
(
uint
)
sizeof
(
xfs_bmbt_rec_t
);
nextents
=
xfs_iext_count
(
ifp
);
ASSERT
(
nextents
<=
XFS_LINEAR_EXTS
);
ASSERT
(
nextents
<=
XFS_LINEAR_EXTS
);
erp
=
kmem_alloc
(
sizeof
(
xfs_ext_irec_t
),
KM_NOFS
);
erp
=
kmem_alloc
(
sizeof
(
xfs_ext_irec_t
),
KM_NOFS
);
...
@@ -1906,7 +1913,7 @@ xfs_iext_irec_compact(
...
@@ -1906,7 +1913,7 @@ xfs_iext_irec_compact(
ASSERT
(
ifp
->
if_flags
&
XFS_IFEXTIREC
);
ASSERT
(
ifp
->
if_flags
&
XFS_IFEXTIREC
);
nlists
=
ifp
->
if_real_bytes
/
XFS_IEXT_BUFSZ
;
nlists
=
ifp
->
if_real_bytes
/
XFS_IEXT_BUFSZ
;
nextents
=
ifp
->
if_bytes
/
(
uint
)
sizeof
(
xfs_bmbt_rec_t
);
nextents
=
xfs_iext_count
(
ifp
);
if
(
nextents
==
0
)
{
if
(
nextents
==
0
)
{
xfs_iext_destroy
(
ifp
);
xfs_iext_destroy
(
ifp
);
...
@@ -1996,3 +2003,49 @@ xfs_ifork_init_cow(
...
@@ -1996,3 +2003,49 @@ xfs_ifork_init_cow(
ip
->
i_cformat
=
XFS_DINODE_FMT_EXTENTS
;
ip
->
i_cformat
=
XFS_DINODE_FMT_EXTENTS
;
ip
->
i_cnextents
=
0
;
ip
->
i_cnextents
=
0
;
}
}
/*
* Lookup the extent covering bno.
*
* If there is an extent covering bno return the extent index, and store the
* expanded extent structure in *gotp, and the extent index in *idx.
* If there is no extent covering bno, but there is an extent after it (e.g.
* it lies in a hole) return that extent in *gotp and its index in *idx
* instead.
* If bno is beyond the last extent return false, and return the index after
* the last valid index in *idxp.
*/
bool
xfs_iext_lookup_extent
(
struct
xfs_inode
*
ip
,
struct
xfs_ifork
*
ifp
,
xfs_fileoff_t
bno
,
xfs_extnum_t
*
idxp
,
struct
xfs_bmbt_irec
*
gotp
)
{
struct
xfs_bmbt_rec_host
*
ep
;
XFS_STATS_INC
(
ip
->
i_mount
,
xs_look_exlist
);
ep
=
xfs_iext_bno_to_ext
(
ifp
,
bno
,
idxp
);
if
(
!
ep
)
return
false
;
xfs_bmbt_get_all
(
ep
,
gotp
);
return
true
;
}
/*
* Return true if there is an extent at index idx, and return the expanded
* extent structure at idx in that case. Else return false.
*/
bool
xfs_iext_get_extent
(
struct
xfs_ifork
*
ifp
,
xfs_extnum_t
idx
,
struct
xfs_bmbt_irec
*
gotp
)
{
if
(
idx
<
0
||
idx
>=
xfs_iext_count
(
ifp
))
return
false
;
xfs_bmbt_get_all
(
xfs_iext_get_ext
(
ifp
,
idx
),
gotp
);
return
true
;
}
fs/xfs/libxfs/xfs_inode_fork.h
View file @
e3df41f9
...
@@ -152,6 +152,7 @@ void xfs_init_local_fork(struct xfs_inode *, int, const void *, int);
...
@@ -152,6 +152,7 @@ void xfs_init_local_fork(struct xfs_inode *, int, const void *, int);
struct
xfs_bmbt_rec_host
*
struct
xfs_bmbt_rec_host
*
xfs_iext_get_ext
(
struct
xfs_ifork
*
,
xfs_extnum_t
);
xfs_iext_get_ext
(
struct
xfs_ifork
*
,
xfs_extnum_t
);
xfs_extnum_t
xfs_iext_count
(
struct
xfs_ifork
*
);
void
xfs_iext_insert
(
struct
xfs_inode
*
,
xfs_extnum_t
,
xfs_extnum_t
,
void
xfs_iext_insert
(
struct
xfs_inode
*
,
xfs_extnum_t
,
xfs_extnum_t
,
struct
xfs_bmbt_irec
*
,
int
);
struct
xfs_bmbt_irec
*
,
int
);
void
xfs_iext_add
(
struct
xfs_ifork
*
,
xfs_extnum_t
,
int
);
void
xfs_iext_add
(
struct
xfs_ifork
*
,
xfs_extnum_t
,
int
);
...
@@ -181,6 +182,12 @@ void xfs_iext_irec_compact_pages(struct xfs_ifork *);
...
@@ -181,6 +182,12 @@ void xfs_iext_irec_compact_pages(struct xfs_ifork *);
void
xfs_iext_irec_compact_full
(
struct
xfs_ifork
*
);
void
xfs_iext_irec_compact_full
(
struct
xfs_ifork
*
);
void
xfs_iext_irec_update_extoffs
(
struct
xfs_ifork
*
,
int
,
int
);
void
xfs_iext_irec_update_extoffs
(
struct
xfs_ifork
*
,
int
,
int
);
bool
xfs_iext_lookup_extent
(
struct
xfs_inode
*
ip
,
struct
xfs_ifork
*
ifp
,
xfs_fileoff_t
bno
,
xfs_extnum_t
*
idxp
,
struct
xfs_bmbt_irec
*
gotp
);
bool
xfs_iext_get_extent
(
struct
xfs_ifork
*
ifp
,
xfs_extnum_t
idx
,
struct
xfs_bmbt_irec
*
gotp
);
extern
struct
kmem_zone
*
xfs_ifork_zone
;
extern
struct
kmem_zone
*
xfs_ifork_zone
;
extern
void
xfs_ifork_init_cow
(
struct
xfs_inode
*
ip
);
extern
void
xfs_ifork_init_cow
(
struct
xfs_inode
*
ip
);
...
...
fs/xfs/libxfs/xfs_types.h
View file @
e3df41f9
...
@@ -57,7 +57,6 @@ typedef __int64_t xfs_sfiloff_t; /* signed block number in a file */
...
@@ -57,7 +57,6 @@ typedef __int64_t xfs_sfiloff_t; /* signed block number in a file */
#define NULLAGBLOCK ((xfs_agblock_t)-1)
#define NULLAGBLOCK ((xfs_agblock_t)-1)
#define NULLAGNUMBER ((xfs_agnumber_t)-1)
#define NULLAGNUMBER ((xfs_agnumber_t)-1)
#define NULLEXTNUM ((xfs_extnum_t)-1)
#define NULLCOMMITLSN ((xfs_lsn_t)-1)
#define NULLCOMMITLSN ((xfs_lsn_t)-1)
...
...
fs/xfs/xfs_aops.c
View file @
e3df41f9
...
@@ -777,7 +777,7 @@ xfs_map_cow(
...
@@ -777,7 +777,7 @@ xfs_map_cow(
{
{
struct
xfs_inode
*
ip
=
XFS_I
(
inode
);
struct
xfs_inode
*
ip
=
XFS_I
(
inode
);
struct
xfs_bmbt_irec
imap
;
struct
xfs_bmbt_irec
imap
;
bool
is_cow
=
false
,
need_alloc
=
false
;
bool
is_cow
=
false
;
int
error
;
int
error
;
/*
/*
...
@@ -795,7 +795,7 @@ xfs_map_cow(
...
@@ -795,7 +795,7 @@ xfs_map_cow(
* Else we need to check if there is a COW mapping at this offset.
* Else we need to check if there is a COW mapping at this offset.
*/
*/
xfs_ilock
(
ip
,
XFS_ILOCK_SHARED
);
xfs_ilock
(
ip
,
XFS_ILOCK_SHARED
);
is_cow
=
xfs_reflink_find_cow_mapping
(
ip
,
offset
,
&
imap
,
&
need_alloc
);
is_cow
=
xfs_reflink_find_cow_mapping
(
ip
,
offset
,
&
imap
);
xfs_iunlock
(
ip
,
XFS_ILOCK_SHARED
);
xfs_iunlock
(
ip
,
XFS_ILOCK_SHARED
);
if
(
!
is_cow
)
if
(
!
is_cow
)
...
@@ -805,7 +805,7 @@ xfs_map_cow(
...
@@ -805,7 +805,7 @@ xfs_map_cow(
* And if the COW mapping has a delayed extent here we need to
* And if the COW mapping has a delayed extent here we need to
* allocate real space for it now.
* allocate real space for it now.
*/
*/
if
(
need_alloc
)
{
if
(
isnullstartblock
(
imap
.
br_startblock
)
)
{
error
=
xfs_iomap_write_allocate
(
ip
,
XFS_COW_FORK
,
offset
,
error
=
xfs_iomap_write_allocate
(
ip
,
XFS_COW_FORK
,
offset
,
&
imap
);
&
imap
);
if
(
error
)
if
(
error
)
...
@@ -1311,7 +1311,6 @@ __xfs_get_blocks(
...
@@ -1311,7 +1311,6 @@ __xfs_get_blocks(
ssize_t
size
;
ssize_t
size
;
int
new
=
0
;
int
new
=
0
;
bool
is_cow
=
false
;
bool
is_cow
=
false
;
bool
need_alloc
=
false
;
BUG_ON
(
create
&&
!
direct
);
BUG_ON
(
create
&&
!
direct
);
...
@@ -1337,9 +1336,11 @@ __xfs_get_blocks(
...
@@ -1337,9 +1336,11 @@ __xfs_get_blocks(
end_fsb
=
XFS_B_TO_FSB
(
mp
,
(
xfs_ufsize_t
)
offset
+
size
);
end_fsb
=
XFS_B_TO_FSB
(
mp
,
(
xfs_ufsize_t
)
offset
+
size
);
offset_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
);
offset_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
);
if
(
create
&&
direct
&&
xfs_is_reflink_inode
(
ip
))
if
(
create
&&
direct
&&
xfs_is_reflink_inode
(
ip
))
{
is_cow
=
xfs_reflink_find_cow_mapping
(
ip
,
offset
,
&
imap
,
is_cow
=
xfs_reflink_find_cow_mapping
(
ip
,
offset
,
&
imap
);
&
need_alloc
);
ASSERT
(
!
is_cow
||
!
isnullstartblock
(
imap
.
br_startblock
));
}
if
(
!
is_cow
)
{
if
(
!
is_cow
)
{
error
=
xfs_bmapi_read
(
ip
,
offset_fsb
,
end_fsb
-
offset_fsb
,
error
=
xfs_bmapi_read
(
ip
,
offset_fsb
,
end_fsb
-
offset_fsb
,
&
imap
,
&
nimaps
,
XFS_BMAPI_ENTIRE
);
&
imap
,
&
nimaps
,
XFS_BMAPI_ENTIRE
);
...
@@ -1356,10 +1357,29 @@ __xfs_get_blocks(
...
@@ -1356,10 +1357,29 @@ __xfs_get_blocks(
xfs_reflink_trim_irec_to_next_cow
(
ip
,
offset_fsb
,
xfs_reflink_trim_irec_to_next_cow
(
ip
,
offset_fsb
,
&
imap
);
&
imap
);
}
}
ASSERT
(
!
need_alloc
);
if
(
error
)
if
(
error
)
goto
out_unlock
;
goto
out_unlock
;
/*
* The only time we can ever safely find delalloc blocks on direct I/O
* is a dio write to post-eof speculative preallocation. All other
* scenarios are indicative of a problem or misuse (such as mixing
* direct and mapped I/O).
*
* The file may be unmapped by the time we get here so we cannot
* reliably fail the I/O based on mapping. Instead, fail the I/O if this
* is a read or a write within eof. Otherwise, carry on but warn as a
* precuation if the file happens to be mapped.
*/
if
(
direct
&&
imap
.
br_startblock
==
DELAYSTARTBLOCK
)
{
if
(
!
create
||
offset
<
i_size_read
(
VFS_I
(
ip
)))
{
WARN_ON_ONCE
(
1
);
error
=
-
EIO
;
goto
out_unlock
;
}
WARN_ON_ONCE
(
mapping_mapped
(
VFS_I
(
ip
)
->
i_mapping
));
}
/* for DAX, we convert unwritten extents directly */
/* for DAX, we convert unwritten extents directly */
if
(
create
&&
if
(
create
&&
(
!
nimaps
||
(
!
nimaps
||
...
@@ -1444,8 +1464,6 @@ __xfs_get_blocks(
...
@@ -1444,8 +1464,6 @@ __xfs_get_blocks(
(
new
||
ISUNWRITTEN
(
&
imap
))))
(
new
||
ISUNWRITTEN
(
&
imap
))))
set_buffer_new
(
bh_result
);
set_buffer_new
(
bh_result
);
BUG_ON
(
direct
&&
imap
.
br_startblock
==
DELAYSTARTBLOCK
);
return
0
;
return
0
;
out_unlock:
out_unlock:
...
...
fs/xfs/xfs_bmap_util.c
View file @
e3df41f9
...
@@ -359,9 +359,7 @@ xfs_bmap_count_blocks(
...
@@ -359,9 +359,7 @@ xfs_bmap_count_blocks(
mp
=
ip
->
i_mount
;
mp
=
ip
->
i_mount
;
ifp
=
XFS_IFORK_PTR
(
ip
,
whichfork
);
ifp
=
XFS_IFORK_PTR
(
ip
,
whichfork
);
if
(
XFS_IFORK_FORMAT
(
ip
,
whichfork
)
==
XFS_DINODE_FMT_EXTENTS
)
{
if
(
XFS_IFORK_FORMAT
(
ip
,
whichfork
)
==
XFS_DINODE_FMT_EXTENTS
)
{
xfs_bmap_count_leaves
(
ifp
,
0
,
xfs_bmap_count_leaves
(
ifp
,
0
,
xfs_iext_count
(
ifp
),
count
);
ifp
->
if_bytes
/
(
uint
)
sizeof
(
xfs_bmbt_rec_t
),
count
);
return
0
;
return
0
;
}
}
...
@@ -426,7 +424,7 @@ xfs_getbmapx_fix_eof_hole(
...
@@ -426,7 +424,7 @@ xfs_getbmapx_fix_eof_hole(
ifp
=
XFS_IFORK_PTR
(
ip
,
whichfork
);
ifp
=
XFS_IFORK_PTR
(
ip
,
whichfork
);
if
(
!
moretocome
&&
if
(
!
moretocome
&&
xfs_iext_bno_to_ext
(
ifp
,
fileblock
,
&
lastx
)
&&
xfs_iext_bno_to_ext
(
ifp
,
fileblock
,
&
lastx
)
&&
(
lastx
==
(
ifp
->
if_bytes
/
(
uint
)
sizeof
(
xfs_bmbt_rec_t
))
-
1
))
(
lastx
==
xfs_iext_count
(
ifp
)
-
1
))
out
->
bmv_oflags
|=
BMV_OF_LAST
;
out
->
bmv_oflags
|=
BMV_OF_LAST
;
}
}
...
@@ -1792,6 +1790,7 @@ xfs_swap_extent_forks(
...
@@ -1792,6 +1790,7 @@ xfs_swap_extent_forks(
struct
xfs_ifork
tempifp
,
*
ifp
,
*
tifp
;
struct
xfs_ifork
tempifp
,
*
ifp
,
*
tifp
;
int
aforkblks
=
0
;
int
aforkblks
=
0
;
int
taforkblks
=
0
;
int
taforkblks
=
0
;
xfs_extnum_t
nextents
;
__uint64_t
tmp
;
__uint64_t
tmp
;
int
error
;
int
error
;
...
@@ -1877,14 +1876,13 @@ xfs_swap_extent_forks(
...
@@ -1877,14 +1876,13 @@ xfs_swap_extent_forks(
switch
(
ip
->
i_d
.
di_format
)
{
switch
(
ip
->
i_d
.
di_format
)
{
case
XFS_DINODE_FMT_EXTENTS
:
case
XFS_DINODE_FMT_EXTENTS
:
/*
If the extents fit in the inode, fix the
/*
*
pointer. Otherwise it's already NULL or
*
If the extents fit in the inode, fix the pointer. Otherwise
* pointing to the extent.
*
it's already NULL or
pointing to the extent.
*/
*/
if
(
ip
->
i_d
.
di_nextents
<=
XFS_INLINE_EXTS
)
{
nextents
=
xfs_iext_count
(
&
ip
->
i_df
);
ifp
->
if_u1
.
if_extents
=
if
(
nextents
<=
XFS_INLINE_EXTS
)
ifp
->
if_u2
.
if_inline_ext
;
ifp
->
if_u1
.
if_extents
=
ifp
->
if_u2
.
if_inline_ext
;
}
(
*
src_log_flags
)
|=
XFS_ILOG_DEXT
;
(
*
src_log_flags
)
|=
XFS_ILOG_DEXT
;
break
;
break
;
case
XFS_DINODE_FMT_BTREE
:
case
XFS_DINODE_FMT_BTREE
:
...
@@ -1896,14 +1894,13 @@ xfs_swap_extent_forks(
...
@@ -1896,14 +1894,13 @@ xfs_swap_extent_forks(
switch
(
tip
->
i_d
.
di_format
)
{
switch
(
tip
->
i_d
.
di_format
)
{
case
XFS_DINODE_FMT_EXTENTS
:
case
XFS_DINODE_FMT_EXTENTS
:
/*
If the extents fit in the inode, fix the
/*
*
pointer. Otherwise it's already NULL or
*
If the extents fit in the inode, fix the pointer. Otherwise
* pointing to the extent.
*
it's already NULL or
pointing to the extent.
*/
*/
if
(
tip
->
i_d
.
di_nextents
<=
XFS_INLINE_EXTS
)
{
nextents
=
xfs_iext_count
(
&
tip
->
i_df
);
tifp
->
if_u1
.
if_extents
=
if
(
nextents
<=
XFS_INLINE_EXTS
)
tifp
->
if_u2
.
if_inline_ext
;
tifp
->
if_u1
.
if_extents
=
tifp
->
if_u2
.
if_inline_ext
;
}
(
*
target_log_flags
)
|=
XFS_ILOG_DEXT
;
(
*
target_log_flags
)
|=
XFS_ILOG_DEXT
;
break
;
break
;
case
XFS_DINODE_FMT_BTREE
:
case
XFS_DINODE_FMT_BTREE
:
...
...
fs/xfs/xfs_buf.h
View file @
e3df41f9
...
@@ -71,6 +71,7 @@ typedef unsigned int xfs_buf_flags_t;
...
@@ -71,6 +71,7 @@ typedef unsigned int xfs_buf_flags_t;
{ XBF_READ, "READ" }, \
{ XBF_READ, "READ" }, \
{ XBF_WRITE, "WRITE" }, \
{ XBF_WRITE, "WRITE" }, \
{ XBF_READ_AHEAD, "READ_AHEAD" }, \
{ XBF_READ_AHEAD, "READ_AHEAD" }, \
{ XBF_NO_IOACCT, "NO_IOACCT" }, \
{ XBF_ASYNC, "ASYNC" }, \
{ XBF_ASYNC, "ASYNC" }, \
{ XBF_DONE, "DONE" }, \
{ XBF_DONE, "DONE" }, \
{ XBF_STALE, "STALE" }, \
{ XBF_STALE, "STALE" }, \
...
...
fs/xfs/xfs_file.c
View file @
e3df41f9
...
@@ -249,6 +249,7 @@ xfs_file_dio_aio_read(
...
@@ -249,6 +249,7 @@ xfs_file_dio_aio_read(
struct
xfs_inode
*
ip
=
XFS_I
(
inode
);
struct
xfs_inode
*
ip
=
XFS_I
(
inode
);
loff_t
isize
=
i_size_read
(
inode
);
loff_t
isize
=
i_size_read
(
inode
);
size_t
count
=
iov_iter_count
(
to
);
size_t
count
=
iov_iter_count
(
to
);
loff_t
end
=
iocb
->
ki_pos
+
count
-
1
;
struct
iov_iter
data
;
struct
iov_iter
data
;
struct
xfs_buftarg
*
target
;
struct
xfs_buftarg
*
target
;
ssize_t
ret
=
0
;
ssize_t
ret
=
0
;
...
@@ -272,49 +273,21 @@ xfs_file_dio_aio_read(
...
@@ -272,49 +273,21 @@ xfs_file_dio_aio_read(
file_accessed
(
iocb
->
ki_filp
);
file_accessed
(
iocb
->
ki_filp
);
/*
* Locking is a bit tricky here. If we take an exclusive lock for direct
* IO, we effectively serialise all new concurrent read IO to this file
* and block it behind IO that is currently in progress because IO in
* progress holds the IO lock shared. We only need to hold the lock
* exclusive to blow away the page cache, so only take lock exclusively
* if the page cache needs invalidation. This allows the normal direct
* IO case of no page cache pages to proceeed concurrently without
* serialisation.
*/
xfs_rw_ilock
(
ip
,
XFS_IOLOCK_SHARED
);
xfs_rw_ilock
(
ip
,
XFS_IOLOCK_SHARED
);
if
(
mapping
->
nrpages
)
{
if
(
mapping
->
nrpages
)
{
xfs_rw_iunlock
(
ip
,
XFS_IOLOCK_SHARED
);
ret
=
filemap_write_and_wait_range
(
mapping
,
iocb
->
ki_pos
,
end
);
xfs_rw_ilock
(
ip
,
XFS_IOLOCK_EXCL
);
if
(
ret
)
goto
out_unlock
;
/*
/*
* The generic dio code only flushes the range of the particular
* Invalidate whole pages. This can return an error if we fail
* I/O. Because we take an exclusive lock here, this whole
* to invalidate a page, but this should never happen on XFS.
* sequence is considerably more expensive for us. This has a
* Warn if it does fail.
* noticeable performance impact for any file with cached pages,
* even when outside of the range of the particular I/O.
*
* Hence, amortize the cost of the lock against a full file
* flush and reduce the chances of repeated iolock cycles going
* forward.
*/
*/
if
(
mapping
->
nrpages
)
{
ret
=
invalidate_inode_pages2_range
(
mapping
,
ret
=
filemap_write_and_wait
(
mapping
);
iocb
->
ki_pos
>>
PAGE_SHIFT
,
end
>>
PAGE_SHIFT
);
if
(
ret
)
{
WARN_ON_ONCE
(
ret
);
xfs_rw_iunlock
(
ip
,
XFS_IOLOCK_EXCL
);
ret
=
0
;
return
ret
;
}
/*
* Invalidate whole pages. This can return an error if
* we fail to invalidate a page, but this should never
* happen on XFS. Warn if it does fail.
*/
ret
=
invalidate_inode_pages2
(
mapping
);
WARN_ON_ONCE
(
ret
);
ret
=
0
;
}
xfs_rw_ilock_demote
(
ip
,
XFS_IOLOCK_EXCL
);
}
}
data
=
*
to
;
data
=
*
to
;
...
@@ -324,8 +297,9 @@ xfs_file_dio_aio_read(
...
@@ -324,8 +297,9 @@ xfs_file_dio_aio_read(
iocb
->
ki_pos
+=
ret
;
iocb
->
ki_pos
+=
ret
;
iov_iter_advance
(
to
,
ret
);
iov_iter_advance
(
to
,
ret
);
}
}
xfs_rw_iunlock
(
ip
,
XFS_IOLOCK_SHARED
);
out_unlock:
xfs_rw_iunlock
(
ip
,
XFS_IOLOCK_SHARED
);
return
ret
;
return
ret
;
}
}
...
@@ -570,61 +544,49 @@ xfs_file_dio_aio_write(
...
@@ -570,61 +544,49 @@ xfs_file_dio_aio_write(
if
((
iocb
->
ki_pos
|
count
)
&
target
->
bt_logical_sectormask
)
if
((
iocb
->
ki_pos
|
count
)
&
target
->
bt_logical_sectormask
)
return
-
EINVAL
;
return
-
EINVAL
;
/* "unaligned" here means not aligned to a filesystem block */
if
((
iocb
->
ki_pos
&
mp
->
m_blockmask
)
||
((
iocb
->
ki_pos
+
count
)
&
mp
->
m_blockmask
))
unaligned_io
=
1
;
/*
/*
*
We don't need to take an exclusive lock unless there page cache needs
*
Don't take the exclusive iolock here unless the I/O is unaligned to
* t
o be invalidated or unaligned IO is being executed. We don't need to
* t
he file system block size. We don't need to consider the EOF
*
consider the EOF extension case here because
*
extension case here because xfs_file_aio_write_checks() will relock
*
xfs_file_aio_write_checks() will relock the inode as necessary for
*
the inode as necessary for EOF zeroing cases and fill out the new
*
EOF zeroing cases and fill out the new
inode size as appropriate.
* inode size as appropriate.
*/
*/
if
(
unaligned_io
||
mapping
->
nrpages
)
if
((
iocb
->
ki_pos
&
mp
->
m_blockmask
)
||
((
iocb
->
ki_pos
+
count
)
&
mp
->
m_blockmask
))
{
unaligned_io
=
1
;
iolock
=
XFS_IOLOCK_EXCL
;
iolock
=
XFS_IOLOCK_EXCL
;
else
}
else
{
iolock
=
XFS_IOLOCK_SHARED
;
iolock
=
XFS_IOLOCK_SHARED
;
xfs_rw_ilock
(
ip
,
iolock
);
/*
* Recheck if there are cached pages that need invalidate after we got
* the iolock to protect against other threads adding new pages while
* we were waiting for the iolock.
*/
if
(
mapping
->
nrpages
&&
iolock
==
XFS_IOLOCK_SHARED
)
{
xfs_rw_iunlock
(
ip
,
iolock
);
iolock
=
XFS_IOLOCK_EXCL
;
xfs_rw_ilock
(
ip
,
iolock
);
}
}
xfs_rw_ilock
(
ip
,
iolock
);
ret
=
xfs_file_aio_write_checks
(
iocb
,
from
,
&
iolock
);
ret
=
xfs_file_aio_write_checks
(
iocb
,
from
,
&
iolock
);
if
(
ret
)
if
(
ret
)
goto
out
;
goto
out
;
count
=
iov_iter_count
(
from
);
count
=
iov_iter_count
(
from
);
end
=
iocb
->
ki_pos
+
count
-
1
;
end
=
iocb
->
ki_pos
+
count
-
1
;
/*
* See xfs_file_dio_aio_read() for why we do a full-file flush here.
*/
if
(
mapping
->
nrpages
)
{
if
(
mapping
->
nrpages
)
{
ret
=
filemap_write_and_wait
(
VFS_I
(
ip
)
->
i_mapping
);
ret
=
filemap_write_and_wait
_range
(
mapping
,
iocb
->
ki_pos
,
end
);
if
(
ret
)
if
(
ret
)
goto
out
;
goto
out
;
/*
/*
* Invalidate whole pages. This can return an error if we fail
* Invalidate whole pages. This can return an error if we fail
* to invalidate a page, but this should never happen on XFS.
* to invalidate a page, but this should never happen on XFS.
* Warn if it does fail.
* Warn if it does fail.
*/
*/
ret
=
invalidate_inode_pages2
(
VFS_I
(
ip
)
->
i_mapping
);
ret
=
invalidate_inode_pages2_range
(
mapping
,
iocb
->
ki_pos
>>
PAGE_SHIFT
,
end
>>
PAGE_SHIFT
);
WARN_ON_ONCE
(
ret
);
WARN_ON_ONCE
(
ret
);
ret
=
0
;
ret
=
0
;
}
}
/*
/*
* If we are doing unaligned IO, wait for all other IO to drain,
* If we are doing unaligned IO, wait for all other IO to drain,
* otherwise demote the lock if we had to flush cached pages
* otherwise demote the lock if we had to take the exclusive lock
* for other reasons in xfs_file_aio_write_checks.
*/
*/
if
(
unaligned_io
)
if
(
unaligned_io
)
inode_dio_wait
(
inode
);
inode_dio_wait
(
inode
);
...
@@ -947,134 +909,6 @@ xfs_file_fallocate(
...
@@ -947,134 +909,6 @@ xfs_file_fallocate(
return
error
;
return
error
;
}
}
/*
* Flush all file writes out to disk.
*/
static
int
xfs_file_wait_for_io
(
struct
inode
*
inode
,
loff_t
offset
,
size_t
len
)
{
loff_t
rounding
;
loff_t
ioffset
;
loff_t
iendoffset
;
loff_t
bs
;
int
ret
;
bs
=
inode
->
i_sb
->
s_blocksize
;
inode_dio_wait
(
inode
);
rounding
=
max_t
(
xfs_off_t
,
bs
,
PAGE_SIZE
);
ioffset
=
round_down
(
offset
,
rounding
);
iendoffset
=
round_up
(
offset
+
len
,
rounding
)
-
1
;
ret
=
filemap_write_and_wait_range
(
inode
->
i_mapping
,
ioffset
,
iendoffset
);
return
ret
;
}
/* Hook up to the VFS reflink function */
STATIC
int
xfs_file_share_range
(
struct
file
*
file_in
,
loff_t
pos_in
,
struct
file
*
file_out
,
loff_t
pos_out
,
u64
len
,
bool
is_dedupe
)
{
struct
inode
*
inode_in
;
struct
inode
*
inode_out
;
ssize_t
ret
;
loff_t
bs
;
loff_t
isize
;
int
same_inode
;
loff_t
blen
;
unsigned
int
flags
=
0
;
inode_in
=
file_inode
(
file_in
);
inode_out
=
file_inode
(
file_out
);
bs
=
inode_out
->
i_sb
->
s_blocksize
;
/* Don't touch certain kinds of inodes */
if
(
IS_IMMUTABLE
(
inode_out
))
return
-
EPERM
;
if
(
IS_SWAPFILE
(
inode_in
)
||
IS_SWAPFILE
(
inode_out
))
return
-
ETXTBSY
;
/* Reflink only works within this filesystem. */
if
(
inode_in
->
i_sb
!=
inode_out
->
i_sb
)
return
-
EXDEV
;
same_inode
=
(
inode_in
->
i_ino
==
inode_out
->
i_ino
);
/* Don't reflink dirs, pipes, sockets... */
if
(
S_ISDIR
(
inode_in
->
i_mode
)
||
S_ISDIR
(
inode_out
->
i_mode
))
return
-
EISDIR
;
if
(
S_ISFIFO
(
inode_in
->
i_mode
)
||
S_ISFIFO
(
inode_out
->
i_mode
))
return
-
EINVAL
;
if
(
!
S_ISREG
(
inode_in
->
i_mode
)
||
!
S_ISREG
(
inode_out
->
i_mode
))
return
-
EINVAL
;
/* Don't share DAX file data for now. */
if
(
IS_DAX
(
inode_in
)
||
IS_DAX
(
inode_out
))
return
-
EINVAL
;
/* Are we going all the way to the end? */
isize
=
i_size_read
(
inode_in
);
if
(
isize
==
0
)
return
0
;
if
(
len
==
0
)
len
=
isize
-
pos_in
;
/* Ensure offsets don't wrap and the input is inside i_size */
if
(
pos_in
+
len
<
pos_in
||
pos_out
+
len
<
pos_out
||
pos_in
+
len
>
isize
)
return
-
EINVAL
;
/* Don't allow dedupe past EOF in the dest file */
if
(
is_dedupe
)
{
loff_t
disize
;
disize
=
i_size_read
(
inode_out
);
if
(
pos_out
>=
disize
||
pos_out
+
len
>
disize
)
return
-
EINVAL
;
}
/* If we're linking to EOF, continue to the block boundary. */
if
(
pos_in
+
len
==
isize
)
blen
=
ALIGN
(
isize
,
bs
)
-
pos_in
;
else
blen
=
len
;
/* Only reflink if we're aligned to block boundaries */
if
(
!
IS_ALIGNED
(
pos_in
,
bs
)
||
!
IS_ALIGNED
(
pos_in
+
blen
,
bs
)
||
!
IS_ALIGNED
(
pos_out
,
bs
)
||
!
IS_ALIGNED
(
pos_out
+
blen
,
bs
))
return
-
EINVAL
;
/* Don't allow overlapped reflink within the same file */
if
(
same_inode
&&
pos_out
+
blen
>
pos_in
&&
pos_out
<
pos_in
+
blen
)
return
-
EINVAL
;
/* Wait for the completion of any pending IOs on srcfile */
ret
=
xfs_file_wait_for_io
(
inode_in
,
pos_in
,
len
);
if
(
ret
)
goto
out
;
ret
=
xfs_file_wait_for_io
(
inode_out
,
pos_out
,
len
);
if
(
ret
)
goto
out
;
if
(
is_dedupe
)
flags
|=
XFS_REFLINK_DEDUPE
;
ret
=
xfs_reflink_remap_range
(
XFS_I
(
inode_in
),
pos_in
,
XFS_I
(
inode_out
),
pos_out
,
len
,
flags
);
if
(
ret
<
0
)
goto
out
;
out:
return
ret
;
}
STATIC
ssize_t
STATIC
ssize_t
xfs_file_copy_range
(
xfs_file_copy_range
(
struct
file
*
file_in
,
struct
file
*
file_in
,
...
@@ -1086,7 +920,7 @@ xfs_file_copy_range(
...
@@ -1086,7 +920,7 @@ xfs_file_copy_range(
{
{
int
error
;
int
error
;
error
=
xfs_
file_share
_range
(
file_in
,
pos_in
,
file_out
,
pos_out
,
error
=
xfs_
reflink_remap
_range
(
file_in
,
pos_in
,
file_out
,
pos_out
,
len
,
false
);
len
,
false
);
if
(
error
)
if
(
error
)
return
error
;
return
error
;
...
@@ -1101,7 +935,7 @@ xfs_file_clone_range(
...
@@ -1101,7 +935,7 @@ xfs_file_clone_range(
loff_t
pos_out
,
loff_t
pos_out
,
u64
len
)
u64
len
)
{
{
return
xfs_
file_share
_range
(
file_in
,
pos_in
,
file_out
,
pos_out
,
return
xfs_
reflink_remap
_range
(
file_in
,
pos_in
,
file_out
,
pos_out
,
len
,
false
);
len
,
false
);
}
}
...
@@ -1124,7 +958,7 @@ xfs_file_dedupe_range(
...
@@ -1124,7 +958,7 @@ xfs_file_dedupe_range(
if
(
len
>
XFS_MAX_DEDUPE_LEN
)
if
(
len
>
XFS_MAX_DEDUPE_LEN
)
len
=
XFS_MAX_DEDUPE_LEN
;
len
=
XFS_MAX_DEDUPE_LEN
;
error
=
xfs_
file_share
_range
(
src_file
,
loff
,
dst_file
,
dst_loff
,
error
=
xfs_
reflink_remap
_range
(
src_file
,
loff
,
dst_file
,
dst_loff
,
len
,
true
);
len
,
true
);
if
(
error
)
if
(
error
)
return
error
;
return
error
;
...
...
fs/xfs/xfs_icache.c
View file @
e3df41f9
...
@@ -123,7 +123,6 @@ __xfs_inode_free(
...
@@ -123,7 +123,6 @@ __xfs_inode_free(
{
{
/* asserts to verify all state is correct here */
/* asserts to verify all state is correct here */
ASSERT
(
atomic_read
(
&
ip
->
i_pincount
)
==
0
);
ASSERT
(
atomic_read
(
&
ip
->
i_pincount
)
==
0
);
ASSERT
(
!
xfs_isiflocked
(
ip
));
XFS_STATS_DEC
(
ip
->
i_mount
,
vn_active
);
XFS_STATS_DEC
(
ip
->
i_mount
,
vn_active
);
call_rcu
(
&
VFS_I
(
ip
)
->
i_rcu
,
xfs_inode_free_callback
);
call_rcu
(
&
VFS_I
(
ip
)
->
i_rcu
,
xfs_inode_free_callback
);
...
@@ -133,6 +132,8 @@ void
...
@@ -133,6 +132,8 @@ void
xfs_inode_free
(
xfs_inode_free
(
struct
xfs_inode
*
ip
)
struct
xfs_inode
*
ip
)
{
{
ASSERT
(
!
xfs_isiflocked
(
ip
));
/*
/*
* Because we use RCU freeing we need to ensure the inode always
* Because we use RCU freeing we need to ensure the inode always
* appears to be reclaimed with an invalid inode number when in the
* appears to be reclaimed with an invalid inode number when in the
...
@@ -981,6 +982,7 @@ xfs_reclaim_inode(
...
@@ -981,6 +982,7 @@ xfs_reclaim_inode(
if
(
XFS_FORCED_SHUTDOWN
(
ip
->
i_mount
))
{
if
(
XFS_FORCED_SHUTDOWN
(
ip
->
i_mount
))
{
xfs_iunpin_wait
(
ip
);
xfs_iunpin_wait
(
ip
);
/* xfs_iflush_abort() drops the flush lock */
xfs_iflush_abort
(
ip
,
false
);
xfs_iflush_abort
(
ip
,
false
);
goto
reclaim
;
goto
reclaim
;
}
}
...
@@ -989,10 +991,10 @@ xfs_reclaim_inode(
...
@@ -989,10 +991,10 @@ xfs_reclaim_inode(
goto
out_ifunlock
;
goto
out_ifunlock
;
xfs_iunpin_wait
(
ip
);
xfs_iunpin_wait
(
ip
);
}
}
if
(
xfs_iflags_test
(
ip
,
XFS_ISTALE
))
if
(
xfs_iflags_test
(
ip
,
XFS_ISTALE
)
||
xfs_inode_clean
(
ip
))
{
goto
reclaim
;
xfs_ifunlock
(
ip
);
if
(
xfs_inode_clean
(
ip
))
goto
reclaim
;
goto
reclaim
;
}
/*
/*
* Never flush out dirty data during non-blocking reclaim, as it would
* Never flush out dirty data during non-blocking reclaim, as it would
...
@@ -1030,25 +1032,24 @@ xfs_reclaim_inode(
...
@@ -1030,25 +1032,24 @@ xfs_reclaim_inode(
xfs_buf_relse
(
bp
);
xfs_buf_relse
(
bp
);
}
}
xfs_iflock
(
ip
);
reclaim:
reclaim:
ASSERT
(
!
xfs_isiflocked
(
ip
));
/*
/*
* Because we use RCU freeing we need to ensure the inode always appears
* Because we use RCU freeing we need to ensure the inode always appears
* to be reclaimed with an invalid inode number when in the free state.
* to be reclaimed with an invalid inode number when in the free state.
* We do this as early as possible under the ILOCK and flush lock so
* We do this as early as possible under the ILOCK so that
* that xfs_iflush_cluster() can be guaranteed to detect races with us
* xfs_iflush_cluster() can be guaranteed to detect races with us here.
* here. By doing this, we guarantee that once xfs_iflush_cluster has
* By doing this, we guarantee that once xfs_iflush_cluster has locked
* locked both the XFS_ILOCK and the flush lock that it will see either
* XFS_ILOCK that it will see either a valid, flushable inode that will
* a valid, flushable inode that will serialise correctly against the
* serialise correctly, or it will see a clean (and invalid) inode that
* locks below, or it will see a clean (and invalid) inode that it can
* it can skip.
* skip.
*/
*/
spin_lock
(
&
ip
->
i_flags_lock
);
spin_lock
(
&
ip
->
i_flags_lock
);
ip
->
i_flags
=
XFS_IRECLAIM
;
ip
->
i_flags
=
XFS_IRECLAIM
;
ip
->
i_ino
=
0
;
ip
->
i_ino
=
0
;
spin_unlock
(
&
ip
->
i_flags_lock
);
spin_unlock
(
&
ip
->
i_flags_lock
);
xfs_ifunlock
(
ip
);
xfs_iunlock
(
ip
,
XFS_ILOCK_EXCL
);
xfs_iunlock
(
ip
,
XFS_ILOCK_EXCL
);
XFS_STATS_INC
(
ip
->
i_mount
,
xs_ig_reclaims
);
XFS_STATS_INC
(
ip
->
i_mount
,
xs_ig_reclaims
);
...
@@ -1580,10 +1581,15 @@ xfs_inode_free_cowblocks(
...
@@ -1580,10 +1581,15 @@ xfs_inode_free_cowblocks(
struct
xfs_eofblocks
*
eofb
=
args
;
struct
xfs_eofblocks
*
eofb
=
args
;
bool
need_iolock
=
true
;
bool
need_iolock
=
true
;
int
match
;
int
match
;
struct
xfs_ifork
*
ifp
=
XFS_IFORK_PTR
(
ip
,
XFS_COW_FORK
);
ASSERT
(
!
eofb
||
(
eofb
&&
eofb
->
eof_scan_owner
!=
0
));
ASSERT
(
!
eofb
||
(
eofb
&&
eofb
->
eof_scan_owner
!=
0
));
if
(
!
xfs_reflink_has_real_cow_blocks
(
ip
))
{
/*
* Just clear the tag if we have an empty cow fork or none at all. It's
* possible the inode was fully unshared since it was originally tagged.
*/
if
(
!
xfs_is_reflink_inode
(
ip
)
||
!
ifp
->
if_bytes
)
{
trace_xfs_inode_free_cowblocks_invalid
(
ip
);
trace_xfs_inode_free_cowblocks_invalid
(
ip
);
xfs_inode_clear_cowblocks_tag
(
ip
);
xfs_inode_clear_cowblocks_tag
(
ip
);
return
0
;
return
0
;
...
@@ -1656,9 +1662,9 @@ void
...
@@ -1656,9 +1662,9 @@ void
xfs_inode_set_cowblocks_tag
(
xfs_inode_set_cowblocks_tag
(
xfs_inode_t
*
ip
)
xfs_inode_t
*
ip
)
{
{
trace_xfs_inode_set_
eof
blocks_tag
(
ip
);
trace_xfs_inode_set_
cow
blocks_tag
(
ip
);
return
__xfs_inode_set_eofblocks_tag
(
ip
,
xfs_queue_cowblocks
,
return
__xfs_inode_set_eofblocks_tag
(
ip
,
xfs_queue_cowblocks
,
trace_xfs_perag_set_
eof
blocks
,
trace_xfs_perag_set_
cow
blocks
,
XFS_ICI_COWBLOCKS_TAG
);
XFS_ICI_COWBLOCKS_TAG
);
}
}
...
@@ -1666,7 +1672,7 @@ void
...
@@ -1666,7 +1672,7 @@ void
xfs_inode_clear_cowblocks_tag
(
xfs_inode_clear_cowblocks_tag
(
xfs_inode_t
*
ip
)
xfs_inode_t
*
ip
)
{
{
trace_xfs_inode_clear_
eof
blocks_tag
(
ip
);
trace_xfs_inode_clear_
cow
blocks_tag
(
ip
);
return
__xfs_inode_clear_eofblocks_tag
(
ip
,
return
__xfs_inode_clear_eofblocks_tag
(
ip
,
trace_xfs_perag_clear_
eof
blocks
,
XFS_ICI_COWBLOCKS_TAG
);
trace_xfs_perag_clear_
cow
blocks
,
XFS_ICI_COWBLOCKS_TAG
);
}
}
fs/xfs/xfs_icreate_item.c
View file @
e3df41f9
...
@@ -133,7 +133,7 @@ xfs_icreate_item_committing(
...
@@ -133,7 +133,7 @@ xfs_icreate_item_committing(
/*
/*
* This is the ops vector shared by all buf log items.
* This is the ops vector shared by all buf log items.
*/
*/
static
struct
xfs_item_ops
xfs_icreate_item_ops
=
{
static
const
struct
xfs_item_ops
xfs_icreate_item_ops
=
{
.
iop_size
=
xfs_icreate_item_size
,
.
iop_size
=
xfs_icreate_item_size
,
.
iop_format
=
xfs_icreate_item_format
,
.
iop_format
=
xfs_icreate_item_format
,
.
iop_pin
=
xfs_icreate_item_pin
,
.
iop_pin
=
xfs_icreate_item_pin
,
...
...
fs/xfs/xfs_inode.h
View file @
e3df41f9
...
@@ -246,6 +246,11 @@ static inline bool xfs_is_reflink_inode(struct xfs_inode *ip)
...
@@ -246,6 +246,11 @@ static inline bool xfs_is_reflink_inode(struct xfs_inode *ip)
* Synchronize processes attempting to flush the in-core inode back to disk.
* Synchronize processes attempting to flush the in-core inode back to disk.
*/
*/
static
inline
int
xfs_isiflocked
(
struct
xfs_inode
*
ip
)
{
return
xfs_iflags_test
(
ip
,
XFS_IFLOCK
);
}
extern
void
__xfs_iflock
(
struct
xfs_inode
*
ip
);
extern
void
__xfs_iflock
(
struct
xfs_inode
*
ip
);
static
inline
int
xfs_iflock_nowait
(
struct
xfs_inode
*
ip
)
static
inline
int
xfs_iflock_nowait
(
struct
xfs_inode
*
ip
)
...
@@ -261,16 +266,12 @@ static inline void xfs_iflock(struct xfs_inode *ip)
...
@@ -261,16 +266,12 @@ static inline void xfs_iflock(struct xfs_inode *ip)
static
inline
void
xfs_ifunlock
(
struct
xfs_inode
*
ip
)
static
inline
void
xfs_ifunlock
(
struct
xfs_inode
*
ip
)
{
{
ASSERT
(
xfs_isiflocked
(
ip
));
xfs_iflags_clear
(
ip
,
XFS_IFLOCK
);
xfs_iflags_clear
(
ip
,
XFS_IFLOCK
);
smp_mb
();
smp_mb
();
wake_up_bit
(
&
ip
->
i_flags
,
__XFS_IFLOCK_BIT
);
wake_up_bit
(
&
ip
->
i_flags
,
__XFS_IFLOCK_BIT
);
}
}
static
inline
int
xfs_isiflocked
(
struct
xfs_inode
*
ip
)
{
return
xfs_iflags_test
(
ip
,
XFS_IFLOCK
);
}
/*
/*
* Flags for inode locking.
* Flags for inode locking.
* Bit ranges: 1<<1 - 1<<16-1 -- iolock/ilock modes (bitfield)
* Bit ranges: 1<<1 - 1<<16-1 -- iolock/ilock modes (bitfield)
...
...
fs/xfs/xfs_inode_item.c
View file @
e3df41f9
...
@@ -164,7 +164,7 @@ xfs_inode_item_format_data_fork(
...
@@ -164,7 +164,7 @@ xfs_inode_item_format_data_fork(
struct
xfs_bmbt_rec
*
p
;
struct
xfs_bmbt_rec
*
p
;
ASSERT
(
ip
->
i_df
.
if_u1
.
if_extents
!=
NULL
);
ASSERT
(
ip
->
i_df
.
if_u1
.
if_extents
!=
NULL
);
ASSERT
(
ip
->
i_df
.
if_bytes
/
sizeof
(
xfs_bmbt_rec_t
)
>
0
);
ASSERT
(
xfs_iext_count
(
&
ip
->
i_df
)
>
0
);
p
=
xlog_prepare_iovec
(
lv
,
vecp
,
XLOG_REG_TYPE_IEXT
);
p
=
xlog_prepare_iovec
(
lv
,
vecp
,
XLOG_REG_TYPE_IEXT
);
data_bytes
=
xfs_iextents_copy
(
ip
,
p
,
XFS_DATA_FORK
);
data_bytes
=
xfs_iextents_copy
(
ip
,
p
,
XFS_DATA_FORK
);
...
@@ -261,7 +261,7 @@ xfs_inode_item_format_attr_fork(
...
@@ -261,7 +261,7 @@ xfs_inode_item_format_attr_fork(
ip
->
i_afp
->
if_bytes
>
0
)
{
ip
->
i_afp
->
if_bytes
>
0
)
{
struct
xfs_bmbt_rec
*
p
;
struct
xfs_bmbt_rec
*
p
;
ASSERT
(
ip
->
i_afp
->
if_bytes
/
sizeof
(
xfs_bmbt_rec_t
)
==
ASSERT
(
xfs_iext_count
(
ip
->
i_afp
)
==
ip
->
i_d
.
di_anextents
);
ip
->
i_d
.
di_anextents
);
ASSERT
(
ip
->
i_afp
->
if_u1
.
if_extents
!=
NULL
);
ASSERT
(
ip
->
i_afp
->
if_u1
.
if_extents
!=
NULL
);
...
...
fs/xfs/xfs_ioctl.c
View file @
e3df41f9
...
@@ -910,16 +910,14 @@ xfs_ioc_fsgetxattr(
...
@@ -910,16 +910,14 @@ xfs_ioc_fsgetxattr(
if
(
attr
)
{
if
(
attr
)
{
if
(
ip
->
i_afp
)
{
if
(
ip
->
i_afp
)
{
if
(
ip
->
i_afp
->
if_flags
&
XFS_IFEXTENTS
)
if
(
ip
->
i_afp
->
if_flags
&
XFS_IFEXTENTS
)
fa
.
fsx_nextents
=
ip
->
i_afp
->
if_bytes
/
fa
.
fsx_nextents
=
xfs_iext_count
(
ip
->
i_afp
);
sizeof
(
xfs_bmbt_rec_t
);
else
else
fa
.
fsx_nextents
=
ip
->
i_d
.
di_anextents
;
fa
.
fsx_nextents
=
ip
->
i_d
.
di_anextents
;
}
else
}
else
fa
.
fsx_nextents
=
0
;
fa
.
fsx_nextents
=
0
;
}
else
{
}
else
{
if
(
ip
->
i_df
.
if_flags
&
XFS_IFEXTENTS
)
if
(
ip
->
i_df
.
if_flags
&
XFS_IFEXTENTS
)
fa
.
fsx_nextents
=
ip
->
i_df
.
if_bytes
/
fa
.
fsx_nextents
=
xfs_iext_count
(
&
ip
->
i_df
);
sizeof
(
xfs_bmbt_rec_t
);
else
else
fa
.
fsx_nextents
=
ip
->
i_d
.
di_nextents
;
fa
.
fsx_nextents
=
ip
->
i_d
.
di_nextents
;
}
}
...
...
fs/xfs/xfs_iomap.c
View file @
e3df41f9
...
@@ -395,11 +395,12 @@ xfs_iomap_prealloc_size(
...
@@ -395,11 +395,12 @@ xfs_iomap_prealloc_size(
struct
xfs_inode
*
ip
,
struct
xfs_inode
*
ip
,
loff_t
offset
,
loff_t
offset
,
loff_t
count
,
loff_t
count
,
xfs_extnum_t
idx
,
xfs_extnum_t
idx
)
struct
xfs_bmbt_irec
*
prev
)
{
{
struct
xfs_mount
*
mp
=
ip
->
i_mount
;
struct
xfs_mount
*
mp
=
ip
->
i_mount
;
struct
xfs_ifork
*
ifp
=
XFS_IFORK_PTR
(
ip
,
XFS_DATA_FORK
);
xfs_fileoff_t
offset_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
);
xfs_fileoff_t
offset_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
);
struct
xfs_bmbt_irec
prev
;
int
shift
=
0
;
int
shift
=
0
;
int64_t
freesp
;
int64_t
freesp
;
xfs_fsblock_t
qblocks
;
xfs_fsblock_t
qblocks
;
...
@@ -419,8 +420,8 @@ xfs_iomap_prealloc_size(
...
@@ -419,8 +420,8 @@ xfs_iomap_prealloc_size(
*/
*/
if
((
mp
->
m_flags
&
XFS_MOUNT_DFLT_IOSIZE
)
||
if
((
mp
->
m_flags
&
XFS_MOUNT_DFLT_IOSIZE
)
||
XFS_ISIZE
(
ip
)
<
XFS_FSB_TO_B
(
mp
,
mp
->
m_dalign
)
||
XFS_ISIZE
(
ip
)
<
XFS_FSB_TO_B
(
mp
,
mp
->
m_dalign
)
||
idx
==
0
||
!
xfs_iext_get_extent
(
ifp
,
idx
-
1
,
&
prev
)
||
prev
->
br_startoff
+
prev
->
br_blockcount
<
offset_fsb
)
prev
.
br_startoff
+
prev
.
br_blockcount
<
offset_fsb
)
return
mp
->
m_writeio_blocks
;
return
mp
->
m_writeio_blocks
;
/*
/*
...
@@ -439,8 +440,8 @@ xfs_iomap_prealloc_size(
...
@@ -439,8 +440,8 @@ xfs_iomap_prealloc_size(
* always extends to MAXEXTLEN rather than falling short due to things
* always extends to MAXEXTLEN rather than falling short due to things
* like stripe unit/width alignment of real extents.
* like stripe unit/width alignment of real extents.
*/
*/
if
(
prev
->
br_blockcount
<=
(
MAXEXTLEN
>>
1
))
if
(
prev
.
br_blockcount
<=
(
MAXEXTLEN
>>
1
))
alloc_blocks
=
prev
->
br_blockcount
<<
1
;
alloc_blocks
=
prev
.
br_blockcount
<<
1
;
else
else
alloc_blocks
=
XFS_B_TO_FSB
(
mp
,
offset
);
alloc_blocks
=
XFS_B_TO_FSB
(
mp
,
offset
);
if
(
!
alloc_blocks
)
if
(
!
alloc_blocks
)
...
@@ -535,11 +536,11 @@ xfs_file_iomap_begin_delay(
...
@@ -535,11 +536,11 @@ xfs_file_iomap_begin_delay(
xfs_fileoff_t
offset_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
);
xfs_fileoff_t
offset_fsb
=
XFS_B_TO_FSBT
(
mp
,
offset
);
xfs_fileoff_t
maxbytes_fsb
=
xfs_fileoff_t
maxbytes_fsb
=
XFS_B_TO_FSB
(
mp
,
mp
->
m_super
->
s_maxbytes
);
XFS_B_TO_FSB
(
mp
,
mp
->
m_super
->
s_maxbytes
);
xfs_fileoff_t
end_fsb
,
orig_end_fsb
;
xfs_fileoff_t
end_fsb
;
int
error
=
0
,
eof
=
0
;
int
error
=
0
,
eof
=
0
;
struct
xfs_bmbt_irec
got
;
struct
xfs_bmbt_irec
got
;
struct
xfs_bmbt_irec
prev
;
xfs_extnum_t
idx
;
xfs_extnum_t
idx
;
xfs_fsblock_t
prealloc_blocks
=
0
;
ASSERT
(
!
XFS_IS_REALTIME_INODE
(
ip
));
ASSERT
(
!
XFS_IS_REALTIME_INODE
(
ip
));
ASSERT
(
!
xfs_get_extsz_hint
(
ip
));
ASSERT
(
!
xfs_get_extsz_hint
(
ip
));
...
@@ -563,9 +564,19 @@ xfs_file_iomap_begin_delay(
...
@@ -563,9 +564,19 @@ xfs_file_iomap_begin_delay(
goto
out_unlock
;
goto
out_unlock
;
}
}
xfs_bmap_search_extents
(
ip
,
offset_fsb
,
XFS_DATA_FORK
,
&
eof
,
&
idx
,
eof
=
!
xfs_iext_lookup_extent
(
ip
,
ifp
,
offset_fsb
,
&
idx
,
&
got
);
&
got
,
&
prev
);
if
(
!
eof
&&
got
.
br_startoff
<=
offset_fsb
)
{
if
(
!
eof
&&
got
.
br_startoff
<=
offset_fsb
)
{
if
(
xfs_is_reflink_inode
(
ip
))
{
bool
shared
;
end_fsb
=
min
(
XFS_B_TO_FSB
(
mp
,
offset
+
count
),
maxbytes_fsb
);
xfs_trim_extent
(
&
got
,
offset_fsb
,
end_fsb
-
offset_fsb
);
error
=
xfs_reflink_reserve_cow
(
ip
,
&
got
,
&
shared
);
if
(
error
)
goto
out_unlock
;
}
trace_xfs_iomap_found
(
ip
,
offset
,
count
,
0
,
&
got
);
trace_xfs_iomap_found
(
ip
,
offset
,
count
,
0
,
&
got
);
goto
done
;
goto
done
;
}
}
...
@@ -584,35 +595,32 @@ xfs_file_iomap_begin_delay(
...
@@ -584,35 +595,32 @@ xfs_file_iomap_begin_delay(
* the lower level functions are updated.
* the lower level functions are updated.
*/
*/
count
=
min_t
(
loff_t
,
count
,
1024
*
PAGE_SIZE
);
count
=
min_t
(
loff_t
,
count
,
1024
*
PAGE_SIZE
);
end_fsb
=
orig_end_fsb
=
end_fsb
=
min
(
XFS_B_TO_FSB
(
mp
,
offset
+
count
),
maxbytes_fsb
);
min
(
XFS_B_TO_FSB
(
mp
,
offset
+
count
),
maxbytes_fsb
);
if
(
eof
)
{
if
(
eof
)
{
xfs_fsblock_t
prealloc_blocks
;
prealloc_blocks
=
xfs_iomap_prealloc_size
(
ip
,
offset
,
count
,
idx
);
prealloc_blocks
=
xfs_iomap_prealloc_size
(
ip
,
offset
,
count
,
idx
,
&
prev
);
if
(
prealloc_blocks
)
{
if
(
prealloc_blocks
)
{
xfs_extlen_t
align
;
xfs_extlen_t
align
;
xfs_off_t
end_offset
;
xfs_off_t
end_offset
;
xfs_fileoff_t
p_end_fsb
;
end_offset
=
XFS_WRITEIO_ALIGN
(
mp
,
offset
+
count
-
1
);
end_offset
=
XFS_WRITEIO_ALIGN
(
mp
,
offset
+
count
-
1
);
end_fsb
=
XFS_B_TO_FSBT
(
mp
,
end_offset
)
+
p_
end_fsb
=
XFS_B_TO_FSBT
(
mp
,
end_offset
)
+
prealloc_blocks
;
prealloc_blocks
;
align
=
xfs_eof_alignment
(
ip
,
0
);
align
=
xfs_eof_alignment
(
ip
,
0
);
if
(
align
)
if
(
align
)
end_fsb
=
roundup_64
(
end_fsb
,
align
);
p_end_fsb
=
roundup_64
(
p_
end_fsb
,
align
);
end_fsb
=
min
(
end_fsb
,
maxbytes_fsb
);
p_end_fsb
=
min
(
p_end_fsb
,
maxbytes_fsb
);
ASSERT
(
end_fsb
>
offset_fsb
);
ASSERT
(
p_end_fsb
>
offset_fsb
);
prealloc_blocks
=
p_end_fsb
-
end_fsb
;
}
}
}
}
retry:
retry:
error
=
xfs_bmapi_reserve_delalloc
(
ip
,
XFS_DATA_FORK
,
offset_fsb
,
error
=
xfs_bmapi_reserve_delalloc
(
ip
,
XFS_DATA_FORK
,
offset_fsb
,
end_fsb
-
offset_fsb
,
&
got
,
end_fsb
-
offset_fsb
,
prealloc_blocks
,
&
got
,
&
idx
,
eof
);
&
prev
,
&
idx
,
eof
);
switch
(
error
)
{
switch
(
error
)
{
case
0
:
case
0
:
break
;
break
;
...
@@ -620,8 +628,8 @@ xfs_file_iomap_begin_delay(
...
@@ -620,8 +628,8 @@ xfs_file_iomap_begin_delay(
case
-
EDQUOT
:
case
-
EDQUOT
:
/* retry without any preallocation */
/* retry without any preallocation */
trace_xfs_delalloc_enospc
(
ip
,
offset
,
count
);
trace_xfs_delalloc_enospc
(
ip
,
offset
,
count
);
if
(
end_fsb
!=
orig_end_fsb
)
{
if
(
prealloc_blocks
)
{
end_fsb
=
orig_end_fsb
;
prealloc_blocks
=
0
;
goto
retry
;
goto
retry
;
}
}
/*FALLTHRU*/
/*FALLTHRU*/
...
@@ -629,13 +637,6 @@ xfs_file_iomap_begin_delay(
...
@@ -629,13 +637,6 @@ xfs_file_iomap_begin_delay(
goto
out_unlock
;
goto
out_unlock
;
}
}
/*
* Tag the inode as speculatively preallocated so we can reclaim this
* space on demand, if necessary.
*/
if
(
end_fsb
!=
orig_end_fsb
)
xfs_inode_set_eofblocks_tag
(
ip
);
trace_xfs_iomap_alloc
(
ip
,
offset
,
count
,
0
,
&
got
);
trace_xfs_iomap_alloc
(
ip
,
offset
,
count
,
0
,
&
got
);
done:
done:
if
(
isnullstartblock
(
got
.
br_startblock
))
if
(
isnullstartblock
(
got
.
br_startblock
))
...
@@ -961,19 +962,13 @@ xfs_file_iomap_begin(
...
@@ -961,19 +962,13 @@ xfs_file_iomap_begin(
struct
xfs_mount
*
mp
=
ip
->
i_mount
;
struct
xfs_mount
*
mp
=
ip
->
i_mount
;
struct
xfs_bmbt_irec
imap
;
struct
xfs_bmbt_irec
imap
;
xfs_fileoff_t
offset_fsb
,
end_fsb
;
xfs_fileoff_t
offset_fsb
,
end_fsb
;
bool
shared
,
trimmed
;
int
nimaps
=
1
,
error
=
0
;
int
nimaps
=
1
,
error
=
0
;
bool
shared
=
false
,
trimmed
=
false
;
unsigned
lockmode
;
unsigned
lockmode
;
if
(
XFS_FORCED_SHUTDOWN
(
mp
))
if
(
XFS_FORCED_SHUTDOWN
(
mp
))
return
-
EIO
;
return
-
EIO
;
if
((
flags
&
(
IOMAP_WRITE
|
IOMAP_ZERO
))
&&
xfs_is_reflink_inode
(
ip
))
{
error
=
xfs_reflink_reserve_cow_range
(
ip
,
offset
,
length
);
if
(
error
<
0
)
return
error
;
}
if
((
flags
&
IOMAP_WRITE
)
&&
!
IS_DAX
(
inode
)
&&
if
((
flags
&
IOMAP_WRITE
)
&&
!
IS_DAX
(
inode
)
&&
!
xfs_get_extsz_hint
(
ip
))
{
!
xfs_get_extsz_hint
(
ip
))
{
/* Reserve delalloc blocks for regular writeback. */
/* Reserve delalloc blocks for regular writeback. */
...
@@ -981,7 +976,16 @@ xfs_file_iomap_begin(
...
@@ -981,7 +976,16 @@ xfs_file_iomap_begin(
iomap
);
iomap
);
}
}
lockmode
=
xfs_ilock_data_map_shared
(
ip
);
/*
* COW writes will allocate delalloc space, so we need to make sure
* to take the lock exclusively here.
*/
if
((
flags
&
(
IOMAP_WRITE
|
IOMAP_ZERO
))
&&
xfs_is_reflink_inode
(
ip
))
{
lockmode
=
XFS_ILOCK_EXCL
;
xfs_ilock
(
ip
,
XFS_ILOCK_EXCL
);
}
else
{
lockmode
=
xfs_ilock_data_map_shared
(
ip
);
}
ASSERT
(
offset
<=
mp
->
m_super
->
s_maxbytes
);
ASSERT
(
offset
<=
mp
->
m_super
->
s_maxbytes
);
if
((
xfs_fsize_t
)
offset
+
length
>
mp
->
m_super
->
s_maxbytes
)
if
((
xfs_fsize_t
)
offset
+
length
>
mp
->
m_super
->
s_maxbytes
)
...
@@ -991,16 +995,24 @@ xfs_file_iomap_begin(
...
@@ -991,16 +995,24 @@ xfs_file_iomap_begin(
error
=
xfs_bmapi_read
(
ip
,
offset_fsb
,
end_fsb
-
offset_fsb
,
&
imap
,
error
=
xfs_bmapi_read
(
ip
,
offset_fsb
,
end_fsb
-
offset_fsb
,
&
imap
,
&
nimaps
,
0
);
&
nimaps
,
0
);
if
(
error
)
{
if
(
error
)
xfs_iunlock
(
ip
,
lockmode
);
goto
out_unlock
;
return
error
;
if
(
flags
&
IOMAP_REPORT
)
{
/* Trim the mapping to the nearest shared extent boundary. */
error
=
xfs_reflink_trim_around_shared
(
ip
,
&
imap
,
&
shared
,
&
trimmed
);
if
(
error
)
goto
out_unlock
;
}
}
/* Trim the mapping to the nearest shared extent boundary. */
if
((
flags
&
(
IOMAP_WRITE
|
IOMAP_ZERO
))
&&
xfs_is_reflink_inode
(
ip
))
{
error
=
xfs_reflink_trim_around_shared
(
ip
,
&
imap
,
&
shared
,
&
trimmed
);
error
=
xfs_reflink_reserve_cow
(
ip
,
&
imap
,
&
shared
);
if
(
error
)
{
if
(
error
)
xfs_iunlock
(
ip
,
lockmode
);
goto
out_unlock
;
return
error
;
end_fsb
=
imap
.
br_startoff
+
imap
.
br_blockcount
;
length
=
XFS_FSB_TO_B
(
mp
,
end_fsb
)
-
offset
;
}
}
if
((
flags
&
IOMAP_WRITE
)
&&
imap_needs_alloc
(
inode
,
&
imap
,
nimaps
))
{
if
((
flags
&
IOMAP_WRITE
)
&&
imap_needs_alloc
(
inode
,
&
imap
,
nimaps
))
{
...
@@ -1039,6 +1051,9 @@ xfs_file_iomap_begin(
...
@@ -1039,6 +1051,9 @@ xfs_file_iomap_begin(
if
(
shared
)
if
(
shared
)
iomap
->
flags
|=
IOMAP_F_SHARED
;
iomap
->
flags
|=
IOMAP_F_SHARED
;
return
0
;
return
0
;
out_unlock:
xfs_iunlock
(
ip
,
lockmode
);
return
error
;
}
}
static
int
static
int
...
...
fs/xfs/xfs_mount.c
View file @
e3df41f9
...
@@ -1009,6 +1009,7 @@ xfs_mountfs(
...
@@ -1009,6 +1009,7 @@ xfs_mountfs(
out_quota:
out_quota:
xfs_qm_unmount_quotas
(
mp
);
xfs_qm_unmount_quotas
(
mp
);
out_rtunmount:
out_rtunmount:
mp
->
m_super
->
s_flags
&=
~
MS_ACTIVE
;
xfs_rtunmount_inodes
(
mp
);
xfs_rtunmount_inodes
(
mp
);
out_rele_rip:
out_rele_rip:
IRELE
(
rip
);
IRELE
(
rip
);
...
...
fs/xfs/xfs_qm.c
View file @
e3df41f9
...
@@ -1135,7 +1135,7 @@ xfs_qm_get_rtblks(
...
@@ -1135,7 +1135,7 @@ xfs_qm_get_rtblks(
return
error
;
return
error
;
}
}
rtblks
=
0
;
rtblks
=
0
;
nextents
=
ifp
->
if_bytes
/
(
uint
)
sizeof
(
xfs_bmbt_rec_t
);
nextents
=
xfs_iext_count
(
ifp
);
for
(
idx
=
0
;
idx
<
nextents
;
idx
++
)
for
(
idx
=
0
;
idx
<
nextents
;
idx
++
)
rtblks
+=
xfs_bmbt_get_blockcount
(
xfs_iext_get_ext
(
ifp
,
idx
));
rtblks
+=
xfs_bmbt_get_blockcount
(
xfs_iext_get_ext
(
ifp
,
idx
));
*
O_rtblks
=
(
xfs_qcnt_t
)
rtblks
;
*
O_rtblks
=
(
xfs_qcnt_t
)
rtblks
;
...
...
fs/xfs/xfs_reflink.c
View file @
e3df41f9
This diff is collapsed.
Click to expand it.
fs/xfs/xfs_reflink.h
View file @
e3df41f9
...
@@ -26,13 +26,13 @@ extern int xfs_reflink_find_shared(struct xfs_mount *mp, xfs_agnumber_t agno,
...
@@ -26,13 +26,13 @@ extern int xfs_reflink_find_shared(struct xfs_mount *mp, xfs_agnumber_t agno,
extern
int
xfs_reflink_trim_around_shared
(
struct
xfs_inode
*
ip
,
extern
int
xfs_reflink_trim_around_shared
(
struct
xfs_inode
*
ip
,
struct
xfs_bmbt_irec
*
irec
,
bool
*
shared
,
bool
*
trimmed
);
struct
xfs_bmbt_irec
*
irec
,
bool
*
shared
,
bool
*
trimmed
);
extern
int
xfs_reflink_reserve_cow
_range
(
struct
xfs_inode
*
ip
,
extern
int
xfs_reflink_reserve_cow
(
struct
xfs_inode
*
ip
,
xfs_off_t
offset
,
xfs_off_t
count
);
struct
xfs_bmbt_irec
*
imap
,
bool
*
shared
);
extern
int
xfs_reflink_allocate_cow_range
(
struct
xfs_inode
*
ip
,
extern
int
xfs_reflink_allocate_cow_range
(
struct
xfs_inode
*
ip
,
xfs_off_t
offset
,
xfs_off_t
count
);
xfs_off_t
offset
,
xfs_off_t
count
);
extern
bool
xfs_reflink_find_cow_mapping
(
struct
xfs_inode
*
ip
,
xfs_off_t
offset
,
extern
bool
xfs_reflink_find_cow_mapping
(
struct
xfs_inode
*
ip
,
xfs_off_t
offset
,
struct
xfs_bmbt_irec
*
imap
,
bool
*
need_alloc
);
struct
xfs_bmbt_irec
*
imap
);
extern
int
xfs_reflink_trim_irec_to_next_cow
(
struct
xfs_inode
*
ip
,
extern
void
xfs_reflink_trim_irec_to_next_cow
(
struct
xfs_inode
*
ip
,
xfs_fileoff_t
offset_fsb
,
struct
xfs_bmbt_irec
*
imap
);
xfs_fileoff_t
offset_fsb
,
struct
xfs_bmbt_irec
*
imap
);
extern
int
xfs_reflink_cancel_cow_blocks
(
struct
xfs_inode
*
ip
,
extern
int
xfs_reflink_cancel_cow_blocks
(
struct
xfs_inode
*
ip
,
...
@@ -43,16 +43,11 @@ extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset,
...
@@ -43,16 +43,11 @@ extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset,
extern
int
xfs_reflink_end_cow
(
struct
xfs_inode
*
ip
,
xfs_off_t
offset
,
extern
int
xfs_reflink_end_cow
(
struct
xfs_inode
*
ip
,
xfs_off_t
offset
,
xfs_off_t
count
);
xfs_off_t
count
);
extern
int
xfs_reflink_recover_cow
(
struct
xfs_mount
*
mp
);
extern
int
xfs_reflink_recover_cow
(
struct
xfs_mount
*
mp
);
#define XFS_REFLINK_DEDUPE 1
/* only reflink if contents match */
extern
int
xfs_reflink_remap_range
(
struct
file
*
file_in
,
loff_t
pos_in
,
#define XFS_REFLINK_ALL (XFS_REFLINK_DEDUPE)
struct
file
*
file_out
,
loff_t
pos_out
,
u64
len
,
bool
is_dedupe
);
extern
int
xfs_reflink_remap_range
(
struct
xfs_inode
*
src
,
xfs_off_t
srcoff
,
struct
xfs_inode
*
dest
,
xfs_off_t
destoff
,
xfs_off_t
len
,
unsigned
int
flags
);
extern
int
xfs_reflink_clear_inode_flag
(
struct
xfs_inode
*
ip
,
extern
int
xfs_reflink_clear_inode_flag
(
struct
xfs_inode
*
ip
,
struct
xfs_trans
**
tpp
);
struct
xfs_trans
**
tpp
);
extern
int
xfs_reflink_unshare
(
struct
xfs_inode
*
ip
,
xfs_off_t
offset
,
extern
int
xfs_reflink_unshare
(
struct
xfs_inode
*
ip
,
xfs_off_t
offset
,
xfs_off_t
len
);
xfs_off_t
len
);
extern
bool
xfs_reflink_has_real_cow_blocks
(
struct
xfs_inode
*
ip
);
#endif
/* __XFS_REFLINK_H */
#endif
/* __XFS_REFLINK_H */
fs/xfs/xfs_sysfs.c
View file @
e3df41f9
...
@@ -512,13 +512,13 @@ static struct attribute *xfs_error_attrs[] = {
...
@@ -512,13 +512,13 @@ static struct attribute *xfs_error_attrs[] = {
};
};
struct
kobj_type
xfs_error_cfg_ktype
=
{
st
atic
st
ruct
kobj_type
xfs_error_cfg_ktype
=
{
.
release
=
xfs_sysfs_release
,
.
release
=
xfs_sysfs_release
,
.
sysfs_ops
=
&
xfs_sysfs_ops
,
.
sysfs_ops
=
&
xfs_sysfs_ops
,
.
default_attrs
=
xfs_error_attrs
,
.
default_attrs
=
xfs_error_attrs
,
};
};
struct
kobj_type
xfs_error_ktype
=
{
st
atic
st
ruct
kobj_type
xfs_error_ktype
=
{
.
release
=
xfs_sysfs_release
,
.
release
=
xfs_sysfs_release
,
.
sysfs_ops
=
&
xfs_sysfs_ops
,
.
sysfs_ops
=
&
xfs_sysfs_ops
,
};
};
...
...
fs/xfs/xfs_trace.h
View file @
e3df41f9
...
@@ -3346,7 +3346,7 @@ DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_alloc);
...
@@ -3346,7 +3346,7 @@ DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_alloc);
DEFINE_INODE_IREC_EVENT
(
xfs_reflink_cow_found
);
DEFINE_INODE_IREC_EVENT
(
xfs_reflink_cow_found
);
DEFINE_INODE_IREC_EVENT
(
xfs_reflink_cow_enospc
);
DEFINE_INODE_IREC_EVENT
(
xfs_reflink_cow_enospc
);
DEFINE_RW_EVENT
(
xfs_reflink_reserve_cow
_range
);
DEFINE_RW_EVENT
(
xfs_reflink_reserve_cow
);
DEFINE_RW_EVENT
(
xfs_reflink_allocate_cow_range
);
DEFINE_RW_EVENT
(
xfs_reflink_allocate_cow_range
);
DEFINE_INODE_IREC_EVENT
(
xfs_reflink_bounce_dio_write
);
DEFINE_INODE_IREC_EVENT
(
xfs_reflink_bounce_dio_write
);
...
@@ -3356,9 +3356,7 @@ DEFINE_INODE_IREC_EVENT(xfs_reflink_trim_irec);
...
@@ -3356,9 +3356,7 @@ DEFINE_INODE_IREC_EVENT(xfs_reflink_trim_irec);
DEFINE_SIMPLE_IO_EVENT
(
xfs_reflink_cancel_cow_range
);
DEFINE_SIMPLE_IO_EVENT
(
xfs_reflink_cancel_cow_range
);
DEFINE_SIMPLE_IO_EVENT
(
xfs_reflink_end_cow
);
DEFINE_SIMPLE_IO_EVENT
(
xfs_reflink_end_cow
);
DEFINE_INODE_IREC_EVENT
(
xfs_reflink_cow_remap
);
DEFINE_INODE_IREC_EVENT
(
xfs_reflink_cow_remap
);
DEFINE_INODE_IREC_EVENT
(
xfs_reflink_cow_remap_piece
);
DEFINE_INODE_ERROR_EVENT
(
xfs_reflink_reserve_cow_range_error
);
DEFINE_INODE_ERROR_EVENT
(
xfs_reflink_allocate_cow_range_error
);
DEFINE_INODE_ERROR_EVENT
(
xfs_reflink_allocate_cow_range_error
);
DEFINE_INODE_ERROR_EVENT
(
xfs_reflink_cancel_cow_range_error
);
DEFINE_INODE_ERROR_EVENT
(
xfs_reflink_cancel_cow_range_error
);
DEFINE_INODE_ERROR_EVENT
(
xfs_reflink_end_cow_error
);
DEFINE_INODE_ERROR_EVENT
(
xfs_reflink_end_cow_error
);
...
...
include/linux/iomap.h
View file @
e3df41f9
...
@@ -19,11 +19,15 @@ struct vm_fault;
...
@@ -19,11 +19,15 @@ struct vm_fault;
#define IOMAP_UNWRITTEN 0x04
/* blocks allocated @blkno in unwritten state */
#define IOMAP_UNWRITTEN 0x04
/* blocks allocated @blkno in unwritten state */
/*
/*
* Flags for iomap mappings:
* Flags for
all
iomap mappings:
*/
*/
#define IOMAP_F_MERGED 0x01
/* contains multiple blocks/extents */
#define IOMAP_F_NEW 0x01
/* blocks have been newly allocated */
#define IOMAP_F_SHARED 0x02
/* block shared with another file */
#define IOMAP_F_NEW 0x04
/* blocks have been newly allocated */
/*
* Flags that only need to be reported for IOMAP_REPORT requests:
*/
#define IOMAP_F_MERGED 0x10
/* contains multiple blocks/extents */
#define IOMAP_F_SHARED 0x20
/* block shared with another file */
/*
/*
* Magic value for blkno:
* Magic value for blkno:
...
@@ -42,8 +46,9 @@ struct iomap {
...
@@ -42,8 +46,9 @@ struct iomap {
/*
/*
* Flags for iomap_begin / iomap_end. No flag implies a read.
* Flags for iomap_begin / iomap_end. No flag implies a read.
*/
*/
#define IOMAP_WRITE (1 << 0)
#define IOMAP_WRITE (1 << 0)
/* writing, must allocate blocks */
#define IOMAP_ZERO (1 << 1)
#define IOMAP_ZERO (1 << 1)
/* zeroing operation, may skip holes */
#define IOMAP_REPORT (1 << 2)
/* report extent status, e.g. FIEMAP */
#define IOMAP_FAULT (1 << 3)
/* mapping for page fault */
#define IOMAP_FAULT (1 << 3)
/* mapping for page fault */
struct
iomap_ops
{
struct
iomap_ops
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment