Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
4225441a
Commit
4225441a
authored
Feb 24, 2015
by
Dave Chinner
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'xfs-generic-sb-counters' into for-next
Conflicts: fs/xfs/xfs_super.c
parents
3cabb836
964aa8d9
Changes
13
Hide whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
381 additions
and
1129 deletions
+381
-1129
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap.c
+16
-24
fs/xfs/libxfs/xfs_format.h
fs/xfs/libxfs/xfs_format.h
+0
-62
fs/xfs/libxfs/xfs_ialloc.c
fs/xfs/libxfs/xfs_ialloc.c
+4
-2
fs/xfs/libxfs/xfs_sb.c
fs/xfs/libxfs/xfs_sb.c
+7
-5
fs/xfs/xfs_fsops.c
fs/xfs/xfs_fsops.c
+8
-12
fs/xfs/xfs_iomap.c
fs/xfs/xfs_iomap.c
+1
-2
fs/xfs/xfs_linux.h
fs/xfs/xfs_linux.h
+0
-9
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_log_recover.c
+2
-2
fs/xfs/xfs_mount.c
fs/xfs/xfs_mount.c
+108
-810
fs/xfs/xfs_mount.h
fs/xfs/xfs_mount.h
+13
-82
fs/xfs/xfs_super.c
fs/xfs/xfs_super.c
+78
-27
fs/xfs/xfs_super.h
fs/xfs/xfs_super.h
+2
-0
fs/xfs/xfs_trans.c
fs/xfs/xfs_trans.c
+142
-92
No files found.
fs/xfs/libxfs/xfs_bmap.c
View file @
4225441a
...
@@ -2215,9 +2215,8 @@ xfs_bmap_add_extent_delay_real(
...
@@ -2215,9 +2215,8 @@ xfs_bmap_add_extent_delay_real(
diff
=
(
int
)(
temp
+
temp2
-
startblockval
(
PREV
.
br_startblock
)
-
diff
=
(
int
)(
temp
+
temp2
-
startblockval
(
PREV
.
br_startblock
)
-
(
bma
->
cur
?
bma
->
cur
->
bc_private
.
b
.
allocated
:
0
));
(
bma
->
cur
?
bma
->
cur
->
bc_private
.
b
.
allocated
:
0
));
if
(
diff
>
0
)
{
if
(
diff
>
0
)
{
error
=
xfs_icsb_modify_counters
(
bma
->
ip
->
i_mount
,
error
=
xfs_mod_fdblocks
(
bma
->
ip
->
i_mount
,
XFS_SBS_FDBLOCKS
,
-
((
int64_t
)
diff
),
false
);
-
((
int64_t
)
diff
),
0
);
ASSERT
(
!
error
);
ASSERT
(
!
error
);
if
(
error
)
if
(
error
)
goto
done
;
goto
done
;
...
@@ -2268,9 +2267,8 @@ xfs_bmap_add_extent_delay_real(
...
@@ -2268,9 +2267,8 @@ xfs_bmap_add_extent_delay_real(
temp
+=
bma
->
cur
->
bc_private
.
b
.
allocated
;
temp
+=
bma
->
cur
->
bc_private
.
b
.
allocated
;
ASSERT
(
temp
<=
da_old
);
ASSERT
(
temp
<=
da_old
);
if
(
temp
<
da_old
)
if
(
temp
<
da_old
)
xfs_icsb_modify_counters
(
bma
->
ip
->
i_mount
,
xfs_mod_fdblocks
(
bma
->
ip
->
i_mount
,
XFS_SBS_FDBLOCKS
,
(
int64_t
)(
da_old
-
temp
),
false
);
(
int64_t
)(
da_old
-
temp
),
0
);
}
}
/* clear out the allocated field, done with it now in any case. */
/* clear out the allocated field, done with it now in any case. */
...
@@ -2948,8 +2946,8 @@ xfs_bmap_add_extent_hole_delay(
...
@@ -2948,8 +2946,8 @@ xfs_bmap_add_extent_hole_delay(
}
}
if
(
oldlen
!=
newlen
)
{
if
(
oldlen
!=
newlen
)
{
ASSERT
(
oldlen
>
newlen
);
ASSERT
(
oldlen
>
newlen
);
xfs_
icsb_modify_counters
(
ip
->
i_mount
,
XFS_SBS_FDBLOCKS
,
xfs_
mod_fdblocks
(
ip
->
i_mount
,
(
int64_t
)(
oldlen
-
newlen
)
,
(
int64_t
)(
oldlen
-
newlen
),
0
);
false
);
/*
/*
* Nothing to do for disk quota accounting here.
* Nothing to do for disk quota accounting here.
*/
*/
...
@@ -4166,18 +4164,15 @@ xfs_bmapi_reserve_delalloc(
...
@@ -4166,18 +4164,15 @@ xfs_bmapi_reserve_delalloc(
ASSERT
(
indlen
>
0
);
ASSERT
(
indlen
>
0
);
if
(
rt
)
{
if
(
rt
)
{
error
=
xfs_mod_incore_sb
(
mp
,
XFS_SBS_FREXTENTS
,
error
=
xfs_mod_frextents
(
mp
,
-
((
int64_t
)
extsz
));
-
((
int64_t
)
extsz
),
0
);
}
else
{
}
else
{
error
=
xfs_icsb_modify_counters
(
mp
,
XFS_SBS_FDBLOCKS
,
error
=
xfs_mod_fdblocks
(
mp
,
-
((
int64_t
)
alen
),
false
);
-
((
int64_t
)
alen
),
0
);
}
}
if
(
error
)
if
(
error
)
goto
out_unreserve_quota
;
goto
out_unreserve_quota
;
error
=
xfs_icsb_modify_counters
(
mp
,
XFS_SBS_FDBLOCKS
,
error
=
xfs_mod_fdblocks
(
mp
,
-
((
int64_t
)
indlen
),
false
);
-
((
int64_t
)
indlen
),
0
);
if
(
error
)
if
(
error
)
goto
out_unreserve_blocks
;
goto
out_unreserve_blocks
;
...
@@ -4204,9 +4199,9 @@ xfs_bmapi_reserve_delalloc(
...
@@ -4204,9 +4199,9 @@ xfs_bmapi_reserve_delalloc(
out_unreserve_blocks:
out_unreserve_blocks:
if
(
rt
)
if
(
rt
)
xfs_mod_
incore_sb
(
mp
,
XFS_SBS_FREXTENTS
,
extsz
,
0
);
xfs_mod_
frextents
(
mp
,
extsz
);
else
else
xfs_
icsb_modify_counters
(
mp
,
XFS_SBS_FDBLOCKS
,
alen
,
0
);
xfs_
mod_fdblocks
(
mp
,
alen
,
false
);
out_unreserve_quota:
out_unreserve_quota:
if
(
XFS_IS_QUOTA_ON
(
mp
))
if
(
XFS_IS_QUOTA_ON
(
mp
))
xfs_trans_unreserve_quota_nblks
(
NULL
,
ip
,
(
long
)
alen
,
0
,
rt
?
xfs_trans_unreserve_quota_nblks
(
NULL
,
ip
,
(
long
)
alen
,
0
,
rt
?
...
@@ -5019,10 +5014,8 @@ xfs_bmap_del_extent(
...
@@ -5019,10 +5014,8 @@ xfs_bmap_del_extent(
* Nothing to do for disk quota accounting here.
* Nothing to do for disk quota accounting here.
*/
*/
ASSERT
(
da_old
>=
da_new
);
ASSERT
(
da_old
>=
da_new
);
if
(
da_old
>
da_new
)
{
if
(
da_old
>
da_new
)
xfs_icsb_modify_counters
(
mp
,
XFS_SBS_FDBLOCKS
,
xfs_mod_fdblocks
(
mp
,
(
int64_t
)(
da_old
-
da_new
),
false
);
(
int64_t
)(
da_old
-
da_new
),
0
);
}
done:
done:
*
logflagsp
=
flags
;
*
logflagsp
=
flags
;
return
error
;
return
error
;
...
@@ -5291,14 +5284,13 @@ xfs_bunmapi(
...
@@ -5291,14 +5284,13 @@ xfs_bunmapi(
rtexts
=
XFS_FSB_TO_B
(
mp
,
del
.
br_blockcount
);
rtexts
=
XFS_FSB_TO_B
(
mp
,
del
.
br_blockcount
);
do_div
(
rtexts
,
mp
->
m_sb
.
sb_rextsize
);
do_div
(
rtexts
,
mp
->
m_sb
.
sb_rextsize
);
xfs_mod_incore_sb
(
mp
,
XFS_SBS_FREXTENTS
,
xfs_mod_frextents
(
mp
,
(
int64_t
)
rtexts
);
(
int64_t
)
rtexts
,
0
);
(
void
)
xfs_trans_reserve_quota_nblks
(
NULL
,
(
void
)
xfs_trans_reserve_quota_nblks
(
NULL
,
ip
,
-
((
long
)
del
.
br_blockcount
),
0
,
ip
,
-
((
long
)
del
.
br_blockcount
),
0
,
XFS_QMOPT_RES_RTBLKS
);
XFS_QMOPT_RES_RTBLKS
);
}
else
{
}
else
{
xfs_
icsb_modify_counters
(
mp
,
XFS_SBS_FDBLOCKS
,
xfs_
mod_fdblocks
(
mp
,
(
int64_t
)
del
.
br_blockcount
,
(
int64_t
)
del
.
br_blockcount
,
0
);
false
);
(
void
)
xfs_trans_reserve_quota_nblks
(
NULL
,
(
void
)
xfs_trans_reserve_quota_nblks
(
NULL
,
ip
,
-
((
long
)
del
.
br_blockcount
),
0
,
ip
,
-
((
long
)
del
.
br_blockcount
),
0
,
XFS_QMOPT_RES_REGBLKS
);
XFS_QMOPT_RES_REGBLKS
);
...
...
fs/xfs/libxfs/xfs_format.h
View file @
4225441a
...
@@ -264,68 +264,6 @@ typedef struct xfs_dsb {
...
@@ -264,68 +264,6 @@ typedef struct xfs_dsb {
/* must be padded to 64 bit alignment */
/* must be padded to 64 bit alignment */
}
xfs_dsb_t
;
}
xfs_dsb_t
;
/*
* Sequence number values for the fields.
*/
typedef
enum
{
XFS_SBS_MAGICNUM
,
XFS_SBS_BLOCKSIZE
,
XFS_SBS_DBLOCKS
,
XFS_SBS_RBLOCKS
,
XFS_SBS_REXTENTS
,
XFS_SBS_UUID
,
XFS_SBS_LOGSTART
,
XFS_SBS_ROOTINO
,
XFS_SBS_RBMINO
,
XFS_SBS_RSUMINO
,
XFS_SBS_REXTSIZE
,
XFS_SBS_AGBLOCKS
,
XFS_SBS_AGCOUNT
,
XFS_SBS_RBMBLOCKS
,
XFS_SBS_LOGBLOCKS
,
XFS_SBS_VERSIONNUM
,
XFS_SBS_SECTSIZE
,
XFS_SBS_INODESIZE
,
XFS_SBS_INOPBLOCK
,
XFS_SBS_FNAME
,
XFS_SBS_BLOCKLOG
,
XFS_SBS_SECTLOG
,
XFS_SBS_INODELOG
,
XFS_SBS_INOPBLOG
,
XFS_SBS_AGBLKLOG
,
XFS_SBS_REXTSLOG
,
XFS_SBS_INPROGRESS
,
XFS_SBS_IMAX_PCT
,
XFS_SBS_ICOUNT
,
XFS_SBS_IFREE
,
XFS_SBS_FDBLOCKS
,
XFS_SBS_FREXTENTS
,
XFS_SBS_UQUOTINO
,
XFS_SBS_GQUOTINO
,
XFS_SBS_QFLAGS
,
XFS_SBS_FLAGS
,
XFS_SBS_SHARED_VN
,
XFS_SBS_INOALIGNMT
,
XFS_SBS_UNIT
,
XFS_SBS_WIDTH
,
XFS_SBS_DIRBLKLOG
,
XFS_SBS_LOGSECTLOG
,
XFS_SBS_LOGSECTSIZE
,
XFS_SBS_LOGSUNIT
,
XFS_SBS_FEATURES2
,
XFS_SBS_BAD_FEATURES2
,
XFS_SBS_FEATURES_COMPAT
,
XFS_SBS_FEATURES_RO_COMPAT
,
XFS_SBS_FEATURES_INCOMPAT
,
XFS_SBS_FEATURES_LOG_INCOMPAT
,
XFS_SBS_CRC
,
XFS_SBS_PAD
,
XFS_SBS_PQUOTINO
,
XFS_SBS_LSN
,
XFS_SBS_FIELDCOUNT
}
xfs_sb_field_t
;
/*
* Mask values, defined based on the xfs_sb_field_t values.
* Only define the ones we're using.
*/
#define XFS_SB_MVAL(x) (1LL << XFS_SBS_ ## x)
#define XFS_SB_UUID XFS_SB_MVAL(UUID)
#define XFS_SB_FNAME XFS_SB_MVAL(FNAME)
#define XFS_SB_ROOTINO XFS_SB_MVAL(ROOTINO)
#define XFS_SB_RBMINO XFS_SB_MVAL(RBMINO)
#define XFS_SB_RSUMINO XFS_SB_MVAL(RSUMINO)
#define XFS_SB_VERSIONNUM XFS_SB_MVAL(VERSIONNUM)
#define XFS_SB_UQUOTINO XFS_SB_MVAL(UQUOTINO)
#define XFS_SB_GQUOTINO XFS_SB_MVAL(GQUOTINO)
#define XFS_SB_QFLAGS XFS_SB_MVAL(QFLAGS)
#define XFS_SB_SHARED_VN XFS_SB_MVAL(SHARED_VN)
#define XFS_SB_UNIT XFS_SB_MVAL(UNIT)
#define XFS_SB_WIDTH XFS_SB_MVAL(WIDTH)
#define XFS_SB_ICOUNT XFS_SB_MVAL(ICOUNT)
#define XFS_SB_IFREE XFS_SB_MVAL(IFREE)
#define XFS_SB_FDBLOCKS XFS_SB_MVAL(FDBLOCKS)
#define XFS_SB_FEATURES2 (XFS_SB_MVAL(FEATURES2) | \
XFS_SB_MVAL(BAD_FEATURES2))
#define XFS_SB_FEATURES_COMPAT XFS_SB_MVAL(FEATURES_COMPAT)
#define XFS_SB_FEATURES_RO_COMPAT XFS_SB_MVAL(FEATURES_RO_COMPAT)
#define XFS_SB_FEATURES_INCOMPAT XFS_SB_MVAL(FEATURES_INCOMPAT)
#define XFS_SB_FEATURES_LOG_INCOMPAT XFS_SB_MVAL(FEATURES_LOG_INCOMPAT)
#define XFS_SB_CRC XFS_SB_MVAL(CRC)
#define XFS_SB_PQUOTINO XFS_SB_MVAL(PQUOTINO)
#define XFS_SB_NUM_BITS ((int)XFS_SBS_FIELDCOUNT)
#define XFS_SB_ALL_BITS ((1LL << XFS_SB_NUM_BITS) - 1)
#define XFS_SB_MOD_BITS \
(XFS_SB_UUID | XFS_SB_ROOTINO | XFS_SB_RBMINO | XFS_SB_RSUMINO | \
XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | \
XFS_SB_QFLAGS | XFS_SB_SHARED_VN | XFS_SB_UNIT | XFS_SB_WIDTH | \
XFS_SB_ICOUNT | XFS_SB_IFREE | XFS_SB_FDBLOCKS | XFS_SB_FEATURES2 | \
XFS_SB_FEATURES_COMPAT | XFS_SB_FEATURES_RO_COMPAT | \
XFS_SB_FEATURES_INCOMPAT | XFS_SB_FEATURES_LOG_INCOMPAT | \
XFS_SB_PQUOTINO)
/*
/*
* Misc. Flags - warning - these will be cleared by xfs_repair unless
* Misc. Flags - warning - these will be cleared by xfs_repair unless
...
...
fs/xfs/libxfs/xfs_ialloc.c
View file @
4225441a
...
@@ -376,7 +376,8 @@ xfs_ialloc_ag_alloc(
...
@@ -376,7 +376,8 @@ xfs_ialloc_ag_alloc(
*/
*/
newlen
=
args
.
mp
->
m_ialloc_inos
;
newlen
=
args
.
mp
->
m_ialloc_inos
;
if
(
args
.
mp
->
m_maxicount
&&
if
(
args
.
mp
->
m_maxicount
&&
args
.
mp
->
m_sb
.
sb_icount
+
newlen
>
args
.
mp
->
m_maxicount
)
percpu_counter_read
(
&
args
.
mp
->
m_icount
)
+
newlen
>
args
.
mp
->
m_maxicount
)
return
-
ENOSPC
;
return
-
ENOSPC
;
args
.
minlen
=
args
.
maxlen
=
args
.
mp
->
m_ialloc_blks
;
args
.
minlen
=
args
.
maxlen
=
args
.
mp
->
m_ialloc_blks
;
/*
/*
...
@@ -1340,7 +1341,8 @@ xfs_dialloc(
...
@@ -1340,7 +1341,8 @@ xfs_dialloc(
* inode.
* inode.
*/
*/
if
(
mp
->
m_maxicount
&&
if
(
mp
->
m_maxicount
&&
mp
->
m_sb
.
sb_icount
+
mp
->
m_ialloc_inos
>
mp
->
m_maxicount
)
{
percpu_counter_read
(
&
mp
->
m_icount
)
+
mp
->
m_ialloc_inos
>
mp
->
m_maxicount
)
{
noroom
=
1
;
noroom
=
1
;
okalloc
=
0
;
okalloc
=
0
;
}
}
...
...
fs/xfs/libxfs/xfs_sb.c
View file @
4225441a
...
@@ -735,17 +735,15 @@ xfs_initialize_perag_data(
...
@@ -735,17 +735,15 @@ xfs_initialize_perag_data(
btree
+=
pag
->
pagf_btreeblks
;
btree
+=
pag
->
pagf_btreeblks
;
xfs_perag_put
(
pag
);
xfs_perag_put
(
pag
);
}
}
/*
* Overwrite incore superblock counters with just-read data
/* Overwrite incore superblock counters with just-read data */
*/
spin_lock
(
&
mp
->
m_sb_lock
);
spin_lock
(
&
mp
->
m_sb_lock
);
sbp
->
sb_ifree
=
ifree
;
sbp
->
sb_ifree
=
ifree
;
sbp
->
sb_icount
=
ialloc
;
sbp
->
sb_icount
=
ialloc
;
sbp
->
sb_fdblocks
=
bfree
+
bfreelst
+
btree
;
sbp
->
sb_fdblocks
=
bfree
+
bfreelst
+
btree
;
spin_unlock
(
&
mp
->
m_sb_lock
);
spin_unlock
(
&
mp
->
m_sb_lock
);
/* Fixup the per-cpu counters as well. */
xfs_reinit_percpu_counters
(
mp
);
xfs_icsb_reinit_counters
(
mp
);
return
0
;
return
0
;
}
}
...
@@ -763,6 +761,10 @@ xfs_log_sb(
...
@@ -763,6 +761,10 @@ xfs_log_sb(
struct
xfs_mount
*
mp
=
tp
->
t_mountp
;
struct
xfs_mount
*
mp
=
tp
->
t_mountp
;
struct
xfs_buf
*
bp
=
xfs_trans_getsb
(
tp
,
mp
,
0
);
struct
xfs_buf
*
bp
=
xfs_trans_getsb
(
tp
,
mp
,
0
);
mp
->
m_sb
.
sb_icount
=
percpu_counter_sum
(
&
mp
->
m_icount
);
mp
->
m_sb
.
sb_ifree
=
percpu_counter_sum
(
&
mp
->
m_ifree
);
mp
->
m_sb
.
sb_fdblocks
=
percpu_counter_sum
(
&
mp
->
m_fdblocks
);
xfs_sb_to_disk
(
XFS_BUF_TO_SBP
(
bp
),
&
mp
->
m_sb
);
xfs_sb_to_disk
(
XFS_BUF_TO_SBP
(
bp
),
&
mp
->
m_sb
);
xfs_trans_buf_set_type
(
tp
,
bp
,
XFS_BLFT_SB_BUF
);
xfs_trans_buf_set_type
(
tp
,
bp
,
XFS_BLFT_SB_BUF
);
xfs_trans_log_buf
(
tp
,
bp
,
0
,
sizeof
(
struct
xfs_dsb
));
xfs_trans_log_buf
(
tp
,
bp
,
0
,
sizeof
(
struct
xfs_dsb
));
...
...
fs/xfs/xfs_fsops.c
View file @
4225441a
...
@@ -637,12 +637,13 @@ xfs_fs_counts(
...
@@ -637,12 +637,13 @@ xfs_fs_counts(
xfs_mount_t
*
mp
,
xfs_mount_t
*
mp
,
xfs_fsop_counts_t
*
cnt
)
xfs_fsop_counts_t
*
cnt
)
{
{
xfs_icsb_sync_counters
(
mp
,
XFS_ICSB_LAZY_COUNT
);
cnt
->
allocino
=
percpu_counter_read_positive
(
&
mp
->
m_icount
);
cnt
->
freeino
=
percpu_counter_read_positive
(
&
mp
->
m_ifree
);
cnt
->
freedata
=
percpu_counter_read_positive
(
&
mp
->
m_fdblocks
)
-
XFS_ALLOC_SET_ASIDE
(
mp
);
spin_lock
(
&
mp
->
m_sb_lock
);
spin_lock
(
&
mp
->
m_sb_lock
);
cnt
->
freedata
=
mp
->
m_sb
.
sb_fdblocks
-
XFS_ALLOC_SET_ASIDE
(
mp
);
cnt
->
freertx
=
mp
->
m_sb
.
sb_frextents
;
cnt
->
freertx
=
mp
->
m_sb
.
sb_frextents
;
cnt
->
freeino
=
mp
->
m_sb
.
sb_ifree
;
cnt
->
allocino
=
mp
->
m_sb
.
sb_icount
;
spin_unlock
(
&
mp
->
m_sb_lock
);
spin_unlock
(
&
mp
->
m_sb_lock
);
return
0
;
return
0
;
}
}
...
@@ -692,14 +693,9 @@ xfs_reserve_blocks(
...
@@ -692,14 +693,9 @@ xfs_reserve_blocks(
* what to do. This means that the amount of free space can
* what to do. This means that the amount of free space can
* change while we do this, so we need to retry if we end up
* change while we do this, so we need to retry if we end up
* trying to reserve more space than is available.
* trying to reserve more space than is available.
*
* We also use the xfs_mod_incore_sb() interface so that we
* don't have to care about whether per cpu counter are
* enabled, disabled or even compiled in....
*/
*/
retry:
retry:
spin_lock
(
&
mp
->
m_sb_lock
);
spin_lock
(
&
mp
->
m_sb_lock
);
xfs_icsb_sync_counters_locked
(
mp
,
0
);
/*
/*
* If our previous reservation was larger than the current value,
* If our previous reservation was larger than the current value,
...
@@ -716,7 +712,8 @@ xfs_reserve_blocks(
...
@@ -716,7 +712,8 @@ xfs_reserve_blocks(
}
else
{
}
else
{
__int64_t
free
;
__int64_t
free
;
free
=
mp
->
m_sb
.
sb_fdblocks
-
XFS_ALLOC_SET_ASIDE
(
mp
);
free
=
percpu_counter_sum
(
&
mp
->
m_fdblocks
)
-
XFS_ALLOC_SET_ASIDE
(
mp
);
if
(
!
free
)
if
(
!
free
)
goto
out
;
/* ENOSPC and fdblks_delta = 0 */
goto
out
;
/* ENOSPC and fdblks_delta = 0 */
...
@@ -755,8 +752,7 @@ xfs_reserve_blocks(
...
@@ -755,8 +752,7 @@ xfs_reserve_blocks(
* the extra reserve blocks from the reserve.....
* the extra reserve blocks from the reserve.....
*/
*/
int
error
;
int
error
;
error
=
xfs_icsb_modify_counters
(
mp
,
XFS_SBS_FDBLOCKS
,
error
=
xfs_mod_fdblocks
(
mp
,
fdblks_delta
,
0
);
fdblks_delta
,
0
);
if
(
error
==
-
ENOSPC
)
if
(
error
==
-
ENOSPC
)
goto
retry
;
goto
retry
;
}
}
...
...
fs/xfs/xfs_iomap.c
View file @
4225441a
...
@@ -460,8 +460,7 @@ xfs_iomap_prealloc_size(
...
@@ -460,8 +460,7 @@ xfs_iomap_prealloc_size(
alloc_blocks
=
XFS_FILEOFF_MIN
(
roundup_pow_of_two
(
MAXEXTLEN
),
alloc_blocks
=
XFS_FILEOFF_MIN
(
roundup_pow_of_two
(
MAXEXTLEN
),
alloc_blocks
);
alloc_blocks
);
xfs_icsb_sync_counters
(
mp
,
XFS_ICSB_LAZY_COUNT
);
freesp
=
percpu_counter_read_positive
(
&
mp
->
m_fdblocks
);
freesp
=
mp
->
m_sb
.
sb_fdblocks
;
if
(
freesp
<
mp
->
m_low_space
[
XFS_LOWSP_5_PCNT
])
{
if
(
freesp
<
mp
->
m_low_space
[
XFS_LOWSP_5_PCNT
])
{
shift
=
2
;
shift
=
2
;
if
(
freesp
<
mp
->
m_low_space
[
XFS_LOWSP_4_PCNT
])
if
(
freesp
<
mp
->
m_low_space
[
XFS_LOWSP_4_PCNT
])
...
...
fs/xfs/xfs_linux.h
View file @
4225441a
...
@@ -116,15 +116,6 @@ typedef __uint64_t __psunsigned_t;
...
@@ -116,15 +116,6 @@ typedef __uint64_t __psunsigned_t;
#undef XFS_NATIVE_HOST
#undef XFS_NATIVE_HOST
#endif
#endif
/*
* Feature macros (disable/enable)
*/
#ifdef CONFIG_SMP
#define HAVE_PERCPU_SB
/* per cpu superblock counters are a 2.6 feature */
#else
#undef HAVE_PERCPU_SB
/* per cpu superblock counters are a 2.6 feature */
#endif
#define irix_sgid_inherit xfs_params.sgid_inherit.val
#define irix_sgid_inherit xfs_params.sgid_inherit.val
#define irix_symlink_mode xfs_params.symlink_mode.val
#define irix_symlink_mode xfs_params.symlink_mode.val
#define xfs_panic_mask xfs_params.panic_mask.val
#define xfs_panic_mask xfs_params.panic_mask.val
...
...
fs/xfs/xfs_log_recover.c
View file @
4225441a
...
@@ -4463,10 +4463,10 @@ xlog_do_recover(
...
@@ -4463,10 +4463,10 @@ xlog_do_recover(
xfs_sb_from_disk
(
sbp
,
XFS_BUF_TO_SBP
(
bp
));
xfs_sb_from_disk
(
sbp
,
XFS_BUF_TO_SBP
(
bp
));
ASSERT
(
sbp
->
sb_magicnum
==
XFS_SB_MAGIC
);
ASSERT
(
sbp
->
sb_magicnum
==
XFS_SB_MAGIC
);
ASSERT
(
xfs_sb_good_version
(
sbp
));
ASSERT
(
xfs_sb_good_version
(
sbp
));
xfs_reinit_percpu_counters
(
log
->
l_mp
);
xfs_buf_relse
(
bp
);
xfs_buf_relse
(
bp
);
/* We've re-read the superblock so re-initialize per-cpu counters */
xfs_icsb_reinit_counters
(
log
->
l_mp
);
xlog_recover_check_summary
(
log
);
xlog_recover_check_summary
(
log
);
...
...
fs/xfs/xfs_mount.c
View file @
4225441a
...
@@ -43,18 +43,6 @@
...
@@ -43,18 +43,6 @@
#include "xfs_sysfs.h"
#include "xfs_sysfs.h"
#ifdef HAVE_PERCPU_SB
STATIC
void
xfs_icsb_balance_counter
(
xfs_mount_t
*
,
xfs_sb_field_t
,
int
);
STATIC
void
xfs_icsb_balance_counter_locked
(
xfs_mount_t
*
,
xfs_sb_field_t
,
int
);
STATIC
void
xfs_icsb_disable_counter
(
xfs_mount_t
*
,
xfs_sb_field_t
);
#else
#define xfs_icsb_balance_counter(mp, a, b) do { } while (0)
#define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0)
#endif
static
DEFINE_MUTEX
(
xfs_uuid_table_mutex
);
static
DEFINE_MUTEX
(
xfs_uuid_table_mutex
);
static
int
xfs_uuid_table_size
;
static
int
xfs_uuid_table_size
;
static
uuid_t
*
xfs_uuid_table
;
static
uuid_t
*
xfs_uuid_table
;
...
@@ -347,8 +335,7 @@ xfs_readsb(
...
@@ -347,8 +335,7 @@ xfs_readsb(
goto
reread
;
goto
reread
;
}
}
/* Initialize per-cpu counters */
xfs_reinit_percpu_counters
(
mp
);
xfs_icsb_reinit_counters
(
mp
);
/* no need to be quiet anymore, so reset the buf ops */
/* no need to be quiet anymore, so reset the buf ops */
bp
->
b_ops
=
&
xfs_sb_buf_ops
;
bp
->
b_ops
=
&
xfs_sb_buf_ops
;
...
@@ -1087,8 +1074,6 @@ xfs_log_sbcount(xfs_mount_t *mp)
...
@@ -1087,8 +1074,6 @@ xfs_log_sbcount(xfs_mount_t *mp)
if
(
!
xfs_fs_writable
(
mp
,
SB_FREEZE_COMPLETE
))
if
(
!
xfs_fs_writable
(
mp
,
SB_FREEZE_COMPLETE
))
return
0
;
return
0
;
xfs_icsb_sync_counters
(
mp
,
0
);
/*
/*
* we don't need to do this if we are updating the superblock
* we don't need to do this if we are updating the superblock
* counters on every modification.
* counters on every modification.
...
@@ -1099,253 +1084,136 @@ xfs_log_sbcount(xfs_mount_t *mp)
...
@@ -1099,253 +1084,136 @@ xfs_log_sbcount(xfs_mount_t *mp)
return
xfs_sync_sb
(
mp
,
true
);
return
xfs_sync_sb
(
mp
,
true
);
}
}
/*
int
* xfs_mod_incore_sb_unlocked() is a utility routine commonly used to apply
xfs_mod_icount
(
* a delta to a specified field in the in-core superblock. Simply
struct
xfs_mount
*
mp
,
* switch on the field indicated and apply the delta to that field.
int64_t
delta
)
* Fields are not allowed to dip below zero, so if the delta would
* do this do not apply it and return EINVAL.
*
* The m_sb_lock must be held when this routine is called.
*/
STATIC
int
xfs_mod_incore_sb_unlocked
(
xfs_mount_t
*
mp
,
xfs_sb_field_t
field
,
int64_t
delta
,
int
rsvd
)
{
{
int
scounter
;
/* short counter for 32 bit fields */
/* deltas are +/-64, hence the large batch size of 128. */
long
long
lcounter
;
/* long counter for 64 bit fields */
__percpu_counter_add
(
&
mp
->
m_icount
,
delta
,
128
);
long
long
res_used
,
rem
;
if
(
percpu_counter_compare
(
&
mp
->
m_icount
,
0
)
<
0
)
{
/*
* With the in-core superblock spin lock held, switch
* on the indicated field. Apply the delta to the
* proper field. If the fields value would dip below
* 0, then do not apply the delta and return EINVAL.
*/
switch
(
field
)
{
case
XFS_SBS_ICOUNT
:
lcounter
=
(
long
long
)
mp
->
m_sb
.
sb_icount
;
lcounter
+=
delta
;
if
(
lcounter
<
0
)
{
ASSERT
(
0
);
return
-
EINVAL
;
}
mp
->
m_sb
.
sb_icount
=
lcounter
;
return
0
;
case
XFS_SBS_IFREE
:
lcounter
=
(
long
long
)
mp
->
m_sb
.
sb_ifree
;
lcounter
+=
delta
;
if
(
lcounter
<
0
)
{
ASSERT
(
0
);
return
-
EINVAL
;
}
mp
->
m_sb
.
sb_ifree
=
lcounter
;
return
0
;
case
XFS_SBS_FDBLOCKS
:
lcounter
=
(
long
long
)
mp
->
m_sb
.
sb_fdblocks
-
XFS_ALLOC_SET_ASIDE
(
mp
);
res_used
=
(
long
long
)(
mp
->
m_resblks
-
mp
->
m_resblks_avail
);
if
(
delta
>
0
)
{
/* Putting blocks back */
if
(
res_used
>
delta
)
{
mp
->
m_resblks_avail
+=
delta
;
}
else
{
rem
=
delta
-
res_used
;
mp
->
m_resblks_avail
=
mp
->
m_resblks
;
lcounter
+=
rem
;
}
}
else
{
/* Taking blocks away */
lcounter
+=
delta
;
if
(
lcounter
>=
0
)
{
mp
->
m_sb
.
sb_fdblocks
=
lcounter
+
XFS_ALLOC_SET_ASIDE
(
mp
);
return
0
;
}
/*
* We are out of blocks, use any available reserved
* blocks if were allowed to.
*/
if
(
!
rsvd
)
return
-
ENOSPC
;
lcounter
=
(
long
long
)
mp
->
m_resblks_avail
+
delta
;
if
(
lcounter
>=
0
)
{
mp
->
m_resblks_avail
=
lcounter
;
return
0
;
}
printk_once
(
KERN_WARNING
"Filesystem
\"
%s
\"
: reserve blocks depleted! "
"Consider increasing reserve pool size."
,
mp
->
m_fsname
);
return
-
ENOSPC
;
}
mp
->
m_sb
.
sb_fdblocks
=
lcounter
+
XFS_ALLOC_SET_ASIDE
(
mp
);
return
0
;
case
XFS_SBS_FREXTENTS
:
lcounter
=
(
long
long
)
mp
->
m_sb
.
sb_frextents
;
lcounter
+=
delta
;
if
(
lcounter
<
0
)
{
return
-
ENOSPC
;
}
mp
->
m_sb
.
sb_frextents
=
lcounter
;
return
0
;
case
XFS_SBS_DBLOCKS
:
lcounter
=
(
long
long
)
mp
->
m_sb
.
sb_dblocks
;
lcounter
+=
delta
;
if
(
lcounter
<
0
)
{
ASSERT
(
0
);
return
-
EINVAL
;
}
mp
->
m_sb
.
sb_dblocks
=
lcounter
;
return
0
;
case
XFS_SBS_AGCOUNT
:
scounter
=
mp
->
m_sb
.
sb_agcount
;
scounter
+=
delta
;
if
(
scounter
<
0
)
{
ASSERT
(
0
);
return
-
EINVAL
;
}
mp
->
m_sb
.
sb_agcount
=
scounter
;
return
0
;
case
XFS_SBS_IMAX_PCT
:
scounter
=
mp
->
m_sb
.
sb_imax_pct
;
scounter
+=
delta
;
if
(
scounter
<
0
)
{
ASSERT
(
0
);
return
-
EINVAL
;
}
mp
->
m_sb
.
sb_imax_pct
=
scounter
;
return
0
;
case
XFS_SBS_REXTSIZE
:
scounter
=
mp
->
m_sb
.
sb_rextsize
;
scounter
+=
delta
;
if
(
scounter
<
0
)
{
ASSERT
(
0
);
return
-
EINVAL
;
}
mp
->
m_sb
.
sb_rextsize
=
scounter
;
return
0
;
case
XFS_SBS_RBMBLOCKS
:
scounter
=
mp
->
m_sb
.
sb_rbmblocks
;
scounter
+=
delta
;
if
(
scounter
<
0
)
{
ASSERT
(
0
);
return
-
EINVAL
;
}
mp
->
m_sb
.
sb_rbmblocks
=
scounter
;
return
0
;
case
XFS_SBS_RBLOCKS
:
lcounter
=
(
long
long
)
mp
->
m_sb
.
sb_rblocks
;
lcounter
+=
delta
;
if
(
lcounter
<
0
)
{
ASSERT
(
0
);
return
-
EINVAL
;
}
mp
->
m_sb
.
sb_rblocks
=
lcounter
;
return
0
;
case
XFS_SBS_REXTENTS
:
lcounter
=
(
long
long
)
mp
->
m_sb
.
sb_rextents
;
lcounter
+=
delta
;
if
(
lcounter
<
0
)
{
ASSERT
(
0
);
return
-
EINVAL
;
}
mp
->
m_sb
.
sb_rextents
=
lcounter
;
return
0
;
case
XFS_SBS_REXTSLOG
:
scounter
=
mp
->
m_sb
.
sb_rextslog
;
scounter
+=
delta
;
if
(
scounter
<
0
)
{
ASSERT
(
0
);
return
-
EINVAL
;
}
mp
->
m_sb
.
sb_rextslog
=
scounter
;
return
0
;
default:
ASSERT
(
0
);
ASSERT
(
0
);
percpu_counter_add
(
&
mp
->
m_icount
,
-
delta
);
return
-
EINVAL
;
return
-
EINVAL
;
}
}
return
0
;
}
}
/*
* xfs_mod_incore_sb() is used to change a field in the in-core
* superblock structure by the specified delta. This modification
* is protected by the m_sb_lock. Just use the xfs_mod_incore_sb_unlocked()
* routine to do the work.
*/
int
int
xfs_mod_i
ncore_sb
(
xfs_mod_i
free
(
struct
xfs_mount
*
mp
,
struct
xfs_mount
*
mp
,
xfs_sb_field_t
field
,
int64_t
delta
)
int64_t
delta
,
int
rsvd
)
{
{
int
status
;
percpu_counter_add
(
&
mp
->
m_ifree
,
delta
);
if
(
percpu_counter_compare
(
&
mp
->
m_ifree
,
0
)
<
0
)
{
#ifdef HAVE_PERCPU_SB
ASSERT
(
0
);
ASSERT
(
field
<
XFS_SBS_ICOUNT
||
field
>
XFS_SBS_FDBLOCKS
);
percpu_counter_add
(
&
mp
->
m_ifree
,
-
delta
);
#endif
return
-
EINVAL
;
spin_lock
(
&
mp
->
m_sb_lock
);
}
status
=
xfs_mod_incore_sb_unlocked
(
mp
,
field
,
delta
,
rsvd
);
return
0
;
spin_unlock
(
&
mp
->
m_sb_lock
);
return
status
;
}
}
/*
* Change more than one field in the in-core superblock structure at a time.
*
* The fields and changes to those fields are specified in the array of
* xfs_mod_sb structures passed in. Either all of the specified deltas
* will be applied or none of them will. If any modified field dips below 0,
* then all modifications will be backed out and EINVAL will be returned.
*
* Note that this function may not be used for the superblock values that
* are tracked with the in-memory per-cpu counters - a direct call to
* xfs_icsb_modify_counters is required for these.
*/
int
int
xfs_mod_
incore_sb_batch
(
xfs_mod_
fdblocks
(
struct
xfs_mount
*
mp
,
struct
xfs_mount
*
mp
,
xfs_mod_sb_t
*
msb
,
int64_t
delta
,
uint
nmsb
,
bool
rsvd
)
int
rsvd
)
{
{
xfs_mod_sb_t
*
msbp
;
int64_t
lcounter
;
int
error
=
0
;
long
long
res_used
;
s32
batch
;
if
(
delta
>
0
)
{
/*
* If the reserve pool is depleted, put blocks back into it
* first. Most of the time the pool is full.
*/
if
(
likely
(
mp
->
m_resblks
==
mp
->
m_resblks_avail
))
{
percpu_counter_add
(
&
mp
->
m_fdblocks
,
delta
);
return
0
;
}
spin_lock
(
&
mp
->
m_sb_lock
);
res_used
=
(
long
long
)(
mp
->
m_resblks
-
mp
->
m_resblks_avail
);
if
(
res_used
>
delta
)
{
mp
->
m_resblks_avail
+=
delta
;
}
else
{
delta
-=
res_used
;
mp
->
m_resblks_avail
=
mp
->
m_resblks
;
percpu_counter_add
(
&
mp
->
m_fdblocks
,
delta
);
}
spin_unlock
(
&
mp
->
m_sb_lock
);
return
0
;
}
/*
/*
* Loop through the array of mod structures and apply each individually.
* Taking blocks away, need to be more accurate the closer we
* If any fail, then back out all those which have already been applied.
* are to zero.
* Do all of this within the scope of the m_sb_lock so that all of the
*
* changes will be atomic.
* batch size is set to a maximum of 1024 blocks - if we are
* allocating of freeing extents larger than this then we aren't
* going to be hammering the counter lock so a lock per update
* is not a problem.
*
* If the counter has a value of less than 2 * max batch size,
* then make everything serialise as we are real close to
* ENOSPC.
*/
#define __BATCH 1024
if
(
percpu_counter_compare
(
&
mp
->
m_fdblocks
,
2
*
__BATCH
)
<
0
)
batch
=
1
;
else
batch
=
__BATCH
;
#undef __BATCH
__percpu_counter_add
(
&
mp
->
m_fdblocks
,
delta
,
batch
);
if
(
percpu_counter_compare
(
&
mp
->
m_fdblocks
,
XFS_ALLOC_SET_ASIDE
(
mp
))
>=
0
)
{
/* we had space! */
return
0
;
}
/*
* lock up the sb for dipping into reserves before releasing the space
* that took us to ENOSPC.
*/
*/
spin_lock
(
&
mp
->
m_sb_lock
);
spin_lock
(
&
mp
->
m_sb_lock
);
for
(
msbp
=
msb
;
msbp
<
(
msb
+
nmsb
);
msbp
++
)
{
percpu_counter_add
(
&
mp
->
m_fdblocks
,
-
delta
);
ASSERT
(
msbp
->
msb_field
<
XFS_SBS_ICOUNT
||
if
(
!
rsvd
)
msbp
->
msb_field
>
XFS_SBS_FDBLOCKS
)
;
goto
fdblocks_enospc
;
error
=
xfs_mod_incore_sb_unlocked
(
mp
,
msbp
->
msb_field
,
lcounter
=
(
long
long
)
mp
->
m_resblks_avail
+
delta
;
msbp
->
msb_delta
,
rsvd
);
if
(
lcounter
>=
0
)
{
if
(
error
)
mp
->
m_resblks_avail
=
lcounter
;
goto
unwind
;
spin_unlock
(
&
mp
->
m_sb_lock
);
return
0
;
}
}
printk_once
(
KERN_WARNING
"Filesystem
\"
%s
\"
: reserve blocks depleted! "
"Consider increasing reserve pool size."
,
mp
->
m_fsname
);
fdblocks_enospc:
spin_unlock
(
&
mp
->
m_sb_lock
);
spin_unlock
(
&
mp
->
m_sb_lock
);
return
0
;
return
-
ENOSPC
;
}
unwind:
int
while
(
--
msbp
>=
msb
)
{
xfs_mod_frextents
(
error
=
xfs_mod_incore_sb_unlocked
(
mp
,
msbp
->
msb_field
,
struct
xfs_mount
*
mp
,
-
msbp
->
msb_delta
,
rsvd
);
int64_t
delta
)
ASSERT
(
error
==
0
);
{
}
int64_t
lcounter
;
int
ret
=
0
;
spin_lock
(
&
mp
->
m_sb_lock
);
lcounter
=
mp
->
m_sb
.
sb_frextents
+
delta
;
if
(
lcounter
<
0
)
ret
=
-
ENOSPC
;
else
mp
->
m_sb
.
sb_frextents
=
lcounter
;
spin_unlock
(
&
mp
->
m_sb_lock
);
spin_unlock
(
&
mp
->
m_sb_lock
);
return
error
;
return
ret
;
}
}
/*
/*
...
@@ -1407,573 +1275,3 @@ xfs_dev_is_read_only(
...
@@ -1407,573 +1275,3 @@ xfs_dev_is_read_only(
}
}
return
0
;
return
0
;
}
}
#ifdef HAVE_PERCPU_SB
/*
* Per-cpu incore superblock counters
*
* Simple concept, difficult implementation
*
* Basically, replace the incore superblock counters with a distributed per cpu
* counter for contended fields (e.g. free block count).
*
* Difficulties arise in that the incore sb is used for ENOSPC checking, and
* hence needs to be accurately read when we are running low on space. Hence
* there is a method to enable and disable the per-cpu counters based on how
* much "stuff" is available in them.
*
* Basically, a counter is enabled if there is enough free resource to justify
* running a per-cpu fast-path. If the per-cpu counter runs out (i.e. a local
* ENOSPC), then we disable the counters to synchronise all callers and
* re-distribute the available resources.
*
* If, once we redistributed the available resources, we still get a failure,
* we disable the per-cpu counter and go through the slow path.
*
* The slow path is the current xfs_mod_incore_sb() function. This means that
* when we disable a per-cpu counter, we need to drain its resources back to
* the global superblock. We do this after disabling the counter to prevent
* more threads from queueing up on the counter.
*
* Essentially, this means that we still need a lock in the fast path to enable
* synchronisation between the global counters and the per-cpu counters. This
* is not a problem because the lock will be local to a CPU almost all the time
* and have little contention except when we get to ENOSPC conditions.
*
* Basically, this lock becomes a barrier that enables us to lock out the fast
* path while we do things like enabling and disabling counters and
* synchronising the counters.
*
* Locking rules:
*
* 1. m_sb_lock before picking up per-cpu locks
* 2. per-cpu locks always picked up via for_each_online_cpu() order
* 3. accurate counter sync requires m_sb_lock + per cpu locks
* 4. modifying per-cpu counters requires holding per-cpu lock
* 5. modifying global counters requires holding m_sb_lock
* 6. enabling or disabling a counter requires holding the m_sb_lock
* and _none_ of the per-cpu locks.
*
* Disabled counters are only ever re-enabled by a balance operation
* that results in more free resources per CPU than a given threshold.
* To ensure counters don't remain disabled, they are rebalanced when
* the global resource goes above a higher threshold (i.e. some hysteresis
* is present to prevent thrashing).
*/
#ifdef CONFIG_HOTPLUG_CPU
/*
* hot-plug CPU notifier support.
*
* We need a notifier per filesystem as we need to be able to identify
* the filesystem to balance the counters out. This is achieved by
* having a notifier block embedded in the xfs_mount_t and doing pointer
* magic to get the mount pointer from the notifier block address.
*/
STATIC
int
xfs_icsb_cpu_notify
(
struct
notifier_block
*
nfb
,
unsigned
long
action
,
void
*
hcpu
)
{
xfs_icsb_cnts_t
*
cntp
;
xfs_mount_t
*
mp
;
mp
=
(
xfs_mount_t
*
)
container_of
(
nfb
,
xfs_mount_t
,
m_icsb_notifier
);
cntp
=
(
xfs_icsb_cnts_t
*
)
per_cpu_ptr
(
mp
->
m_sb_cnts
,
(
unsigned
long
)
hcpu
);
switch
(
action
)
{
case
CPU_UP_PREPARE
:
case
CPU_UP_PREPARE_FROZEN
:
/* Easy Case - initialize the area and locks, and
* then rebalance when online does everything else for us. */
memset
(
cntp
,
0
,
sizeof
(
xfs_icsb_cnts_t
));
break
;
case
CPU_ONLINE
:
case
CPU_ONLINE_FROZEN
:
xfs_icsb_lock
(
mp
);
xfs_icsb_balance_counter
(
mp
,
XFS_SBS_ICOUNT
,
0
);
xfs_icsb_balance_counter
(
mp
,
XFS_SBS_IFREE
,
0
);
xfs_icsb_balance_counter
(
mp
,
XFS_SBS_FDBLOCKS
,
0
);
xfs_icsb_unlock
(
mp
);
break
;
case
CPU_DEAD
:
case
CPU_DEAD_FROZEN
:
/* Disable all the counters, then fold the dead cpu's
* count into the total on the global superblock and
* re-enable the counters. */
xfs_icsb_lock
(
mp
);
spin_lock
(
&
mp
->
m_sb_lock
);
xfs_icsb_disable_counter
(
mp
,
XFS_SBS_ICOUNT
);
xfs_icsb_disable_counter
(
mp
,
XFS_SBS_IFREE
);
xfs_icsb_disable_counter
(
mp
,
XFS_SBS_FDBLOCKS
);
mp
->
m_sb
.
sb_icount
+=
cntp
->
icsb_icount
;
mp
->
m_sb
.
sb_ifree
+=
cntp
->
icsb_ifree
;
mp
->
m_sb
.
sb_fdblocks
+=
cntp
->
icsb_fdblocks
;
memset
(
cntp
,
0
,
sizeof
(
xfs_icsb_cnts_t
));
xfs_icsb_balance_counter_locked
(
mp
,
XFS_SBS_ICOUNT
,
0
);
xfs_icsb_balance_counter_locked
(
mp
,
XFS_SBS_IFREE
,
0
);
xfs_icsb_balance_counter_locked
(
mp
,
XFS_SBS_FDBLOCKS
,
0
);
spin_unlock
(
&
mp
->
m_sb_lock
);
xfs_icsb_unlock
(
mp
);
break
;
}
return
NOTIFY_OK
;
}
#endif
/* CONFIG_HOTPLUG_CPU */
int
xfs_icsb_init_counters
(
xfs_mount_t
*
mp
)
{
xfs_icsb_cnts_t
*
cntp
;
int
i
;
mp
->
m_sb_cnts
=
alloc_percpu
(
xfs_icsb_cnts_t
);
if
(
mp
->
m_sb_cnts
==
NULL
)
return
-
ENOMEM
;
for_each_online_cpu
(
i
)
{
cntp
=
(
xfs_icsb_cnts_t
*
)
per_cpu_ptr
(
mp
->
m_sb_cnts
,
i
);
memset
(
cntp
,
0
,
sizeof
(
xfs_icsb_cnts_t
));
}
mutex_init
(
&
mp
->
m_icsb_mutex
);
/*
* start with all counters disabled so that the
* initial balance kicks us off correctly
*/
mp
->
m_icsb_counters
=
-
1
;
#ifdef CONFIG_HOTPLUG_CPU
mp
->
m_icsb_notifier
.
notifier_call
=
xfs_icsb_cpu_notify
;
mp
->
m_icsb_notifier
.
priority
=
0
;
register_hotcpu_notifier
(
&
mp
->
m_icsb_notifier
);
#endif
/* CONFIG_HOTPLUG_CPU */
return
0
;
}
void
xfs_icsb_reinit_counters
(
xfs_mount_t
*
mp
)
{
xfs_icsb_lock
(
mp
);
/*
* start with all counters disabled so that the
* initial balance kicks us off correctly
*/
mp
->
m_icsb_counters
=
-
1
;
xfs_icsb_balance_counter
(
mp
,
XFS_SBS_ICOUNT
,
0
);
xfs_icsb_balance_counter
(
mp
,
XFS_SBS_IFREE
,
0
);
xfs_icsb_balance_counter
(
mp
,
XFS_SBS_FDBLOCKS
,
0
);
xfs_icsb_unlock
(
mp
);
}
void
xfs_icsb_destroy_counters
(
xfs_mount_t
*
mp
)
{
if
(
mp
->
m_sb_cnts
)
{
unregister_hotcpu_notifier
(
&
mp
->
m_icsb_notifier
);
free_percpu
(
mp
->
m_sb_cnts
);
}
mutex_destroy
(
&
mp
->
m_icsb_mutex
);
}
STATIC
void
xfs_icsb_lock_cntr
(
xfs_icsb_cnts_t
*
icsbp
)
{
while
(
test_and_set_bit
(
XFS_ICSB_FLAG_LOCK
,
&
icsbp
->
icsb_flags
))
{
ndelay
(
1000
);
}
}
STATIC
void
xfs_icsb_unlock_cntr
(
xfs_icsb_cnts_t
*
icsbp
)
{
clear_bit
(
XFS_ICSB_FLAG_LOCK
,
&
icsbp
->
icsb_flags
);
}
STATIC
void
xfs_icsb_lock_all_counters
(
xfs_mount_t
*
mp
)
{
xfs_icsb_cnts_t
*
cntp
;
int
i
;
for_each_online_cpu
(
i
)
{
cntp
=
(
xfs_icsb_cnts_t
*
)
per_cpu_ptr
(
mp
->
m_sb_cnts
,
i
);
xfs_icsb_lock_cntr
(
cntp
);
}
}
STATIC
void
xfs_icsb_unlock_all_counters
(
xfs_mount_t
*
mp
)
{
xfs_icsb_cnts_t
*
cntp
;
int
i
;
for_each_online_cpu
(
i
)
{
cntp
=
(
xfs_icsb_cnts_t
*
)
per_cpu_ptr
(
mp
->
m_sb_cnts
,
i
);
xfs_icsb_unlock_cntr
(
cntp
);
}
}
STATIC
void
xfs_icsb_count
(
xfs_mount_t
*
mp
,
xfs_icsb_cnts_t
*
cnt
,
int
flags
)
{
xfs_icsb_cnts_t
*
cntp
;
int
i
;
memset
(
cnt
,
0
,
sizeof
(
xfs_icsb_cnts_t
));
if
(
!
(
flags
&
XFS_ICSB_LAZY_COUNT
))
xfs_icsb_lock_all_counters
(
mp
);
for_each_online_cpu
(
i
)
{
cntp
=
(
xfs_icsb_cnts_t
*
)
per_cpu_ptr
(
mp
->
m_sb_cnts
,
i
);
cnt
->
icsb_icount
+=
cntp
->
icsb_icount
;
cnt
->
icsb_ifree
+=
cntp
->
icsb_ifree
;
cnt
->
icsb_fdblocks
+=
cntp
->
icsb_fdblocks
;
}
if
(
!
(
flags
&
XFS_ICSB_LAZY_COUNT
))
xfs_icsb_unlock_all_counters
(
mp
);
}
STATIC
int
xfs_icsb_counter_disabled
(
xfs_mount_t
*
mp
,
xfs_sb_field_t
field
)
{
ASSERT
((
field
>=
XFS_SBS_ICOUNT
)
&&
(
field
<=
XFS_SBS_FDBLOCKS
));
return
test_bit
(
field
,
&
mp
->
m_icsb_counters
);
}
STATIC
void
xfs_icsb_disable_counter
(
xfs_mount_t
*
mp
,
xfs_sb_field_t
field
)
{
xfs_icsb_cnts_t
cnt
;
ASSERT
((
field
>=
XFS_SBS_ICOUNT
)
&&
(
field
<=
XFS_SBS_FDBLOCKS
));
/*
* If we are already disabled, then there is nothing to do
* here. We check before locking all the counters to avoid
* the expensive lock operation when being called in the
* slow path and the counter is already disabled. This is
* safe because the only time we set or clear this state is under
* the m_icsb_mutex.
*/
if
(
xfs_icsb_counter_disabled
(
mp
,
field
))
return
;
xfs_icsb_lock_all_counters
(
mp
);
if
(
!
test_and_set_bit
(
field
,
&
mp
->
m_icsb_counters
))
{
/* drain back to superblock */
xfs_icsb_count
(
mp
,
&
cnt
,
XFS_ICSB_LAZY_COUNT
);
switch
(
field
)
{
case
XFS_SBS_ICOUNT
:
mp
->
m_sb
.
sb_icount
=
cnt
.
icsb_icount
;
break
;
case
XFS_SBS_IFREE
:
mp
->
m_sb
.
sb_ifree
=
cnt
.
icsb_ifree
;
break
;
case
XFS_SBS_FDBLOCKS
:
mp
->
m_sb
.
sb_fdblocks
=
cnt
.
icsb_fdblocks
;
break
;
default:
BUG
();
}
}
xfs_icsb_unlock_all_counters
(
mp
);
}
STATIC
void
xfs_icsb_enable_counter
(
xfs_mount_t
*
mp
,
xfs_sb_field_t
field
,
uint64_t
count
,
uint64_t
resid
)
{
xfs_icsb_cnts_t
*
cntp
;
int
i
;
ASSERT
((
field
>=
XFS_SBS_ICOUNT
)
&&
(
field
<=
XFS_SBS_FDBLOCKS
));
xfs_icsb_lock_all_counters
(
mp
);
for_each_online_cpu
(
i
)
{
cntp
=
per_cpu_ptr
(
mp
->
m_sb_cnts
,
i
);
switch
(
field
)
{
case
XFS_SBS_ICOUNT
:
cntp
->
icsb_icount
=
count
+
resid
;
break
;
case
XFS_SBS_IFREE
:
cntp
->
icsb_ifree
=
count
+
resid
;
break
;
case
XFS_SBS_FDBLOCKS
:
cntp
->
icsb_fdblocks
=
count
+
resid
;
break
;
default:
BUG
();
break
;
}
resid
=
0
;
}
clear_bit
(
field
,
&
mp
->
m_icsb_counters
);
xfs_icsb_unlock_all_counters
(
mp
);
}
void
xfs_icsb_sync_counters_locked
(
xfs_mount_t
*
mp
,
int
flags
)
{
xfs_icsb_cnts_t
cnt
;
xfs_icsb_count
(
mp
,
&
cnt
,
flags
);
if
(
!
xfs_icsb_counter_disabled
(
mp
,
XFS_SBS_ICOUNT
))
mp
->
m_sb
.
sb_icount
=
cnt
.
icsb_icount
;
if
(
!
xfs_icsb_counter_disabled
(
mp
,
XFS_SBS_IFREE
))
mp
->
m_sb
.
sb_ifree
=
cnt
.
icsb_ifree
;
if
(
!
xfs_icsb_counter_disabled
(
mp
,
XFS_SBS_FDBLOCKS
))
mp
->
m_sb
.
sb_fdblocks
=
cnt
.
icsb_fdblocks
;
}
/*
* Accurate update of per-cpu counters to incore superblock
*/
void
xfs_icsb_sync_counters
(
xfs_mount_t
*
mp
,
int
flags
)
{
spin_lock
(
&
mp
->
m_sb_lock
);
xfs_icsb_sync_counters_locked
(
mp
,
flags
);
spin_unlock
(
&
mp
->
m_sb_lock
);
}
/*
* Balance and enable/disable counters as necessary.
*
* Thresholds for re-enabling counters are somewhat magic. inode counts are
* chosen to be the same number as single on disk allocation chunk per CPU, and
* free blocks is something far enough zero that we aren't going thrash when we
* get near ENOSPC. We also need to supply a minimum we require per cpu to
* prevent looping endlessly when xfs_alloc_space asks for more than will
* be distributed to a single CPU but each CPU has enough blocks to be
* reenabled.
*
* Note that we can be called when counters are already disabled.
* xfs_icsb_disable_counter() optimises the counter locking in this case to
* prevent locking every per-cpu counter needlessly.
*/
#define XFS_ICSB_INO_CNTR_REENABLE (uint64_t)64
#define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \
(uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))
STATIC
void
xfs_icsb_balance_counter_locked
(
xfs_mount_t
*
mp
,
xfs_sb_field_t
field
,
int
min_per_cpu
)
{
uint64_t
count
,
resid
;
int
weight
=
num_online_cpus
();
uint64_t
min
=
(
uint64_t
)
min_per_cpu
;
/* disable counter and sync counter */
xfs_icsb_disable_counter
(
mp
,
field
);
/* update counters - first CPU gets residual*/
switch
(
field
)
{
case
XFS_SBS_ICOUNT
:
count
=
mp
->
m_sb
.
sb_icount
;
resid
=
do_div
(
count
,
weight
);
if
(
count
<
max
(
min
,
XFS_ICSB_INO_CNTR_REENABLE
))
return
;
break
;
case
XFS_SBS_IFREE
:
count
=
mp
->
m_sb
.
sb_ifree
;
resid
=
do_div
(
count
,
weight
);
if
(
count
<
max
(
min
,
XFS_ICSB_INO_CNTR_REENABLE
))
return
;
break
;
case
XFS_SBS_FDBLOCKS
:
count
=
mp
->
m_sb
.
sb_fdblocks
;
resid
=
do_div
(
count
,
weight
);
if
(
count
<
max
(
min
,
XFS_ICSB_FDBLK_CNTR_REENABLE
(
mp
)))
return
;
break
;
default:
BUG
();
count
=
resid
=
0
;
/* quiet, gcc */
break
;
}
xfs_icsb_enable_counter
(
mp
,
field
,
count
,
resid
);
}
STATIC
void
xfs_icsb_balance_counter
(
xfs_mount_t
*
mp
,
xfs_sb_field_t
fields
,
int
min_per_cpu
)
{
spin_lock
(
&
mp
->
m_sb_lock
);
xfs_icsb_balance_counter_locked
(
mp
,
fields
,
min_per_cpu
);
spin_unlock
(
&
mp
->
m_sb_lock
);
}
int
xfs_icsb_modify_counters
(
xfs_mount_t
*
mp
,
xfs_sb_field_t
field
,
int64_t
delta
,
int
rsvd
)
{
xfs_icsb_cnts_t
*
icsbp
;
long
long
lcounter
;
/* long counter for 64 bit fields */
int
ret
=
0
;
might_sleep
();
again:
preempt_disable
();
icsbp
=
this_cpu_ptr
(
mp
->
m_sb_cnts
);
/*
* if the counter is disabled, go to slow path
*/
if
(
unlikely
(
xfs_icsb_counter_disabled
(
mp
,
field
)))
goto
slow_path
;
xfs_icsb_lock_cntr
(
icsbp
);
if
(
unlikely
(
xfs_icsb_counter_disabled
(
mp
,
field
)))
{
xfs_icsb_unlock_cntr
(
icsbp
);
goto
slow_path
;
}
switch
(
field
)
{
case
XFS_SBS_ICOUNT
:
lcounter
=
icsbp
->
icsb_icount
;
lcounter
+=
delta
;
if
(
unlikely
(
lcounter
<
0
))
goto
balance_counter
;
icsbp
->
icsb_icount
=
lcounter
;
break
;
case
XFS_SBS_IFREE
:
lcounter
=
icsbp
->
icsb_ifree
;
lcounter
+=
delta
;
if
(
unlikely
(
lcounter
<
0
))
goto
balance_counter
;
icsbp
->
icsb_ifree
=
lcounter
;
break
;
case
XFS_SBS_FDBLOCKS
:
BUG_ON
((
mp
->
m_resblks
-
mp
->
m_resblks_avail
)
!=
0
);
lcounter
=
icsbp
->
icsb_fdblocks
-
XFS_ALLOC_SET_ASIDE
(
mp
);
lcounter
+=
delta
;
if
(
unlikely
(
lcounter
<
0
))
goto
balance_counter
;
icsbp
->
icsb_fdblocks
=
lcounter
+
XFS_ALLOC_SET_ASIDE
(
mp
);
break
;
default:
BUG
();
break
;
}
xfs_icsb_unlock_cntr
(
icsbp
);
preempt_enable
();
return
0
;
slow_path:
preempt_enable
();
/*
* serialise with a mutex so we don't burn lots of cpu on
* the superblock lock. We still need to hold the superblock
* lock, however, when we modify the global structures.
*/
xfs_icsb_lock
(
mp
);
/*
* Now running atomically.
*
* If the counter is enabled, someone has beaten us to rebalancing.
* Drop the lock and try again in the fast path....
*/
if
(
!
(
xfs_icsb_counter_disabled
(
mp
,
field
)))
{
xfs_icsb_unlock
(
mp
);
goto
again
;
}
/*
* The counter is currently disabled. Because we are
* running atomically here, we know a rebalance cannot
* be in progress. Hence we can go straight to operating
* on the global superblock. We do not call xfs_mod_incore_sb()
* here even though we need to get the m_sb_lock. Doing so
* will cause us to re-enter this function and deadlock.
* Hence we get the m_sb_lock ourselves and then call
* xfs_mod_incore_sb_unlocked() as the unlocked path operates
* directly on the global counters.
*/
spin_lock
(
&
mp
->
m_sb_lock
);
ret
=
xfs_mod_incore_sb_unlocked
(
mp
,
field
,
delta
,
rsvd
);
spin_unlock
(
&
mp
->
m_sb_lock
);
/*
* Now that we've modified the global superblock, we
* may be able to re-enable the distributed counters
* (e.g. lots of space just got freed). After that
* we are done.
*/
if
(
ret
!=
-
ENOSPC
)
xfs_icsb_balance_counter
(
mp
,
field
,
0
);
xfs_icsb_unlock
(
mp
);
return
ret
;
balance_counter:
xfs_icsb_unlock_cntr
(
icsbp
);
preempt_enable
();
/*
* We may have multiple threads here if multiple per-cpu
* counters run dry at the same time. This will mean we can
* do more balances than strictly necessary but it is not
* the common slowpath case.
*/
xfs_icsb_lock
(
mp
);
/*
* running atomically.
*
* This will leave the counter in the correct state for future
* accesses. After the rebalance, we simply try again and our retry
* will either succeed through the fast path or slow path without
* another balance operation being required.
*/
xfs_icsb_balance_counter
(
mp
,
field
,
delta
);
xfs_icsb_unlock
(
mp
);
goto
again
;
}
#endif
fs/xfs/xfs_mount.h
View file @
4225441a
...
@@ -18,8 +18,6 @@
...
@@ -18,8 +18,6 @@
#ifndef __XFS_MOUNT_H__
#ifndef __XFS_MOUNT_H__
#define __XFS_MOUNT_H__
#define __XFS_MOUNT_H__
#ifdef __KERNEL__
struct
xlog
;
struct
xlog
;
struct
xfs_inode
;
struct
xfs_inode
;
struct
xfs_mru_cache
;
struct
xfs_mru_cache
;
...
@@ -29,44 +27,6 @@ struct xfs_quotainfo;
...
@@ -29,44 +27,6 @@ struct xfs_quotainfo;
struct
xfs_dir_ops
;
struct
xfs_dir_ops
;
struct
xfs_da_geometry
;
struct
xfs_da_geometry
;
#ifdef HAVE_PERCPU_SB
/*
* Valid per-cpu incore superblock counters. Note that if you add new counters,
* you may need to define new counter disabled bit field descriptors as there
* are more possible fields in the superblock that can fit in a bitfield on a
* 32 bit platform. The XFS_SBS_* values for the current current counters just
* fit.
*/
typedef
struct
xfs_icsb_cnts
{
uint64_t
icsb_fdblocks
;
uint64_t
icsb_ifree
;
uint64_t
icsb_icount
;
unsigned
long
icsb_flags
;
}
xfs_icsb_cnts_t
;
#define XFS_ICSB_FLAG_LOCK (1 << 0)
/* counter lock bit */
#define XFS_ICSB_LAZY_COUNT (1 << 1)
/* accuracy not needed */
extern
int
xfs_icsb_init_counters
(
struct
xfs_mount
*
);
extern
void
xfs_icsb_reinit_counters
(
struct
xfs_mount
*
);
extern
void
xfs_icsb_destroy_counters
(
struct
xfs_mount
*
);
extern
void
xfs_icsb_sync_counters
(
struct
xfs_mount
*
,
int
);
extern
void
xfs_icsb_sync_counters_locked
(
struct
xfs_mount
*
,
int
);
extern
int
xfs_icsb_modify_counters
(
struct
xfs_mount
*
,
xfs_sb_field_t
,
int64_t
,
int
);
#else
#define xfs_icsb_init_counters(mp) (0)
#define xfs_icsb_destroy_counters(mp) do { } while (0)
#define xfs_icsb_reinit_counters(mp) do { } while (0)
#define xfs_icsb_sync_counters(mp, flags) do { } while (0)
#define xfs_icsb_sync_counters_locked(mp, flags) do { } while (0)
#define xfs_icsb_modify_counters(mp, field, delta, rsvd) \
xfs_mod_incore_sb(mp, field, delta, rsvd)
#endif
/* dynamic preallocation free space thresholds, 5% down to 1% */
/* dynamic preallocation free space thresholds, 5% down to 1% */
enum
{
enum
{
XFS_LOWSP_1_PCNT
=
0
,
XFS_LOWSP_1_PCNT
=
0
,
...
@@ -81,8 +41,13 @@ typedef struct xfs_mount {
...
@@ -81,8 +41,13 @@ typedef struct xfs_mount {
struct
super_block
*
m_super
;
struct
super_block
*
m_super
;
xfs_tid_t
m_tid
;
/* next unused tid for fs */
xfs_tid_t
m_tid
;
/* next unused tid for fs */
struct
xfs_ail
*
m_ail
;
/* fs active log item list */
struct
xfs_ail
*
m_ail
;
/* fs active log item list */
xfs_sb_t
m_sb
;
/* copy of fs superblock */
struct
xfs_sb
m_sb
;
/* copy of fs superblock */
spinlock_t
m_sb_lock
;
/* sb counter lock */
spinlock_t
m_sb_lock
;
/* sb counter lock */
struct
percpu_counter
m_icount
;
/* allocated inodes counter */
struct
percpu_counter
m_ifree
;
/* free inodes counter */
struct
percpu_counter
m_fdblocks
;
/* free block counter */
struct
xfs_buf
*
m_sb_bp
;
/* buffer for superblock */
struct
xfs_buf
*
m_sb_bp
;
/* buffer for superblock */
char
*
m_fsname
;
/* filesystem name */
char
*
m_fsname
;
/* filesystem name */
int
m_fsname_len
;
/* strlen of fs name */
int
m_fsname_len
;
/* strlen of fs name */
...
@@ -152,12 +117,6 @@ typedef struct xfs_mount {
...
@@ -152,12 +117,6 @@ typedef struct xfs_mount {
const
struct
xfs_dir_ops
*
m_nondir_inode_ops
;
/* !dir inode ops */
const
struct
xfs_dir_ops
*
m_nondir_inode_ops
;
/* !dir inode ops */
uint
m_chsize
;
/* size of next field */
uint
m_chsize
;
/* size of next field */
atomic_t
m_active_trans
;
/* number trans frozen */
atomic_t
m_active_trans
;
/* number trans frozen */
#ifdef HAVE_PERCPU_SB
xfs_icsb_cnts_t
__percpu
*
m_sb_cnts
;
/* per-cpu superblock counters */
unsigned
long
m_icsb_counters
;
/* disabled per-cpu counters */
struct
notifier_block
m_icsb_notifier
;
/* hotplug cpu notifier */
struct
mutex
m_icsb_mutex
;
/* balancer sync lock */
#endif
struct
xfs_mru_cache
*
m_filestream
;
/* per-mount filestream data */
struct
xfs_mru_cache
*
m_filestream
;
/* per-mount filestream data */
struct
delayed_work
m_reclaim_work
;
/* background inode reclaim */
struct
delayed_work
m_reclaim_work
;
/* background inode reclaim */
struct
delayed_work
m_eofblocks_work
;
/* background eof blocks
struct
delayed_work
m_eofblocks_work
;
/* background eof blocks
...
@@ -300,35 +259,6 @@ xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
...
@@ -300,35 +259,6 @@ xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
return
(
xfs_agblock_t
)
do_div
(
ld
,
mp
->
m_sb
.
sb_agblocks
);
return
(
xfs_agblock_t
)
do_div
(
ld
,
mp
->
m_sb
.
sb_agblocks
);
}
}
/*
* Per-cpu superblock locking functions
*/
#ifdef HAVE_PERCPU_SB
static
inline
void
xfs_icsb_lock
(
xfs_mount_t
*
mp
)
{
mutex_lock
(
&
mp
->
m_icsb_mutex
);
}
static
inline
void
xfs_icsb_unlock
(
xfs_mount_t
*
mp
)
{
mutex_unlock
(
&
mp
->
m_icsb_mutex
);
}
#else
#define xfs_icsb_lock(mp)
#define xfs_icsb_unlock(mp)
#endif
/*
* This structure is for use by the xfs_mod_incore_sb_batch() routine.
* xfs_growfs can specify a few fields which are more than int limit
*/
typedef
struct
xfs_mod_sb
{
xfs_sb_field_t
msb_field
;
/* Field to modify, see below */
int64_t
msb_delta
;
/* Change to make to specified field */
}
xfs_mod_sb_t
;
/*
/*
* Per-ag incore structure, copies of information in agf and agi, to improve the
* Per-ag incore structure, copies of information in agf and agi, to improve the
* performance of allocation group selection.
* performance of allocation group selection.
...
@@ -383,11 +313,14 @@ extern __uint64_t xfs_default_resblks(xfs_mount_t *mp);
...
@@ -383,11 +313,14 @@ extern __uint64_t xfs_default_resblks(xfs_mount_t *mp);
extern
int
xfs_mountfs
(
xfs_mount_t
*
mp
);
extern
int
xfs_mountfs
(
xfs_mount_t
*
mp
);
extern
int
xfs_initialize_perag
(
xfs_mount_t
*
mp
,
xfs_agnumber_t
agcount
,
extern
int
xfs_initialize_perag
(
xfs_mount_t
*
mp
,
xfs_agnumber_t
agcount
,
xfs_agnumber_t
*
maxagi
);
xfs_agnumber_t
*
maxagi
);
extern
void
xfs_unmountfs
(
xfs_mount_t
*
);
extern
void
xfs_unmountfs
(
xfs_mount_t
*
);
extern
int
xfs_mod_incore_sb
(
xfs_mount_t
*
,
xfs_sb_field_t
,
int64_t
,
int
);
extern
int
xfs_mod_incore_sb_batch
(
xfs_mount_t
*
,
xfs_mod_sb_t
*
,
extern
int
xfs_mod_icount
(
struct
xfs_mount
*
mp
,
int64_t
delta
);
uint
,
int
);
extern
int
xfs_mod_ifree
(
struct
xfs_mount
*
mp
,
int64_t
delta
);
extern
int
xfs_mod_fdblocks
(
struct
xfs_mount
*
mp
,
int64_t
delta
,
bool
reserved
);
extern
int
xfs_mod_frextents
(
struct
xfs_mount
*
mp
,
int64_t
delta
);
extern
int
xfs_mount_log_sb
(
xfs_mount_t
*
);
extern
int
xfs_mount_log_sb
(
xfs_mount_t
*
);
extern
struct
xfs_buf
*
xfs_getsb
(
xfs_mount_t
*
,
int
);
extern
struct
xfs_buf
*
xfs_getsb
(
xfs_mount_t
*
,
int
);
extern
int
xfs_readsb
(
xfs_mount_t
*
,
int
);
extern
int
xfs_readsb
(
xfs_mount_t
*
,
int
);
...
@@ -399,6 +332,4 @@ extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
...
@@ -399,6 +332,4 @@ extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
extern
void
xfs_set_low_space_thresholds
(
struct
xfs_mount
*
);
extern
void
xfs_set_low_space_thresholds
(
struct
xfs_mount
*
);
#endif
/* __KERNEL__ */
#endif
/* __XFS_MOUNT_H__ */
#endif
/* __XFS_MOUNT_H__ */
fs/xfs/xfs_super.c
View file @
4225441a
...
@@ -1013,24 +1013,6 @@ xfs_free_fsname(
...
@@ -1013,24 +1013,6 @@ xfs_free_fsname(
kfree
(
mp
->
m_logname
);
kfree
(
mp
->
m_logname
);
}
}
STATIC
void
xfs_fs_put_super
(
struct
super_block
*
sb
)
{
struct
xfs_mount
*
mp
=
XFS_M
(
sb
);
xfs_notice
(
mp
,
"Unmounting Filesystem"
);
xfs_filestream_unmount
(
mp
);
xfs_unmountfs
(
mp
);
xfs_freesb
(
mp
);
xfs_icsb_destroy_counters
(
mp
);
xfs_destroy_mount_workqueues
(
mp
);
xfs_close_devices
(
mp
);
xfs_free_fsname
(
mp
);
kfree
(
mp
);
}
STATIC
int
STATIC
int
xfs_fs_sync_fs
(
xfs_fs_sync_fs
(
struct
super_block
*
sb
,
struct
super_block
*
sb
,
...
@@ -1066,6 +1048,9 @@ xfs_fs_statfs(
...
@@ -1066,6 +1048,9 @@ xfs_fs_statfs(
xfs_sb_t
*
sbp
=
&
mp
->
m_sb
;
xfs_sb_t
*
sbp
=
&
mp
->
m_sb
;
struct
xfs_inode
*
ip
=
XFS_I
(
dentry
->
d_inode
);
struct
xfs_inode
*
ip
=
XFS_I
(
dentry
->
d_inode
);
__uint64_t
fakeinos
,
id
;
__uint64_t
fakeinos
,
id
;
__uint64_t
icount
;
__uint64_t
ifree
;
__uint64_t
fdblocks
;
xfs_extlen_t
lsize
;
xfs_extlen_t
lsize
;
__int64_t
ffree
;
__int64_t
ffree
;
...
@@ -1076,17 +1061,21 @@ xfs_fs_statfs(
...
@@ -1076,17 +1061,21 @@ xfs_fs_statfs(
statp
->
f_fsid
.
val
[
0
]
=
(
u32
)
id
;
statp
->
f_fsid
.
val
[
0
]
=
(
u32
)
id
;
statp
->
f_fsid
.
val
[
1
]
=
(
u32
)(
id
>>
32
);
statp
->
f_fsid
.
val
[
1
]
=
(
u32
)(
id
>>
32
);
xfs_icsb_sync_counters
(
mp
,
XFS_ICSB_LAZY_COUNT
);
icount
=
percpu_counter_sum
(
&
mp
->
m_icount
);
ifree
=
percpu_counter_sum
(
&
mp
->
m_ifree
);
fdblocks
=
percpu_counter_sum
(
&
mp
->
m_fdblocks
);
spin_lock
(
&
mp
->
m_sb_lock
);
spin_lock
(
&
mp
->
m_sb_lock
);
statp
->
f_bsize
=
sbp
->
sb_blocksize
;
statp
->
f_bsize
=
sbp
->
sb_blocksize
;
lsize
=
sbp
->
sb_logstart
?
sbp
->
sb_logblocks
:
0
;
lsize
=
sbp
->
sb_logstart
?
sbp
->
sb_logblocks
:
0
;
statp
->
f_blocks
=
sbp
->
sb_dblocks
-
lsize
;
statp
->
f_blocks
=
sbp
->
sb_dblocks
-
lsize
;
statp
->
f_bfree
=
statp
->
f_bavail
=
spin_unlock
(
&
mp
->
m_sb_lock
);
sbp
->
sb_fdblocks
-
XFS_ALLOC_SET_ASIDE
(
mp
);
statp
->
f_bfree
=
fdblocks
-
XFS_ALLOC_SET_ASIDE
(
mp
);
statp
->
f_bavail
=
statp
->
f_bfree
;
fakeinos
=
statp
->
f_bfree
<<
sbp
->
sb_inopblog
;
fakeinos
=
statp
->
f_bfree
<<
sbp
->
sb_inopblog
;
statp
->
f_files
=
statp
->
f_files
=
MIN
(
icount
+
fakeinos
,
(
__uint64_t
)
XFS_MAXINUMBER
);
MIN
(
sbp
->
sb_icount
+
fakeinos
,
(
__uint64_t
)
XFS_MAXINUMBER
);
if
(
mp
->
m_maxicount
)
if
(
mp
->
m_maxicount
)
statp
->
f_files
=
min_t
(
typeof
(
statp
->
f_files
),
statp
->
f_files
=
min_t
(
typeof
(
statp
->
f_files
),
statp
->
f_files
,
statp
->
f_files
,
...
@@ -1098,10 +1087,9 @@ xfs_fs_statfs(
...
@@ -1098,10 +1087,9 @@ xfs_fs_statfs(
sbp
->
sb_icount
);
sbp
->
sb_icount
);
/* make sure statp->f_ffree does not underflow */
/* make sure statp->f_ffree does not underflow */
ffree
=
statp
->
f_files
-
(
sbp
->
sb_icount
-
sbp
->
sb_
ifree
);
ffree
=
statp
->
f_files
-
(
icount
-
ifree
);
statp
->
f_ffree
=
max_t
(
__int64_t
,
ffree
,
0
);
statp
->
f_ffree
=
max_t
(
__int64_t
,
ffree
,
0
);
spin_unlock
(
&
mp
->
m_sb_lock
);
if
((
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_PROJINHERIT
)
&&
if
((
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_PROJINHERIT
)
&&
((
mp
->
m_qflags
&
(
XFS_PQUOTA_ACCT
|
XFS_PQUOTA_ENFD
)))
==
((
mp
->
m_qflags
&
(
XFS_PQUOTA_ACCT
|
XFS_PQUOTA_ENFD
)))
==
...
@@ -1382,6 +1370,51 @@ xfs_finish_flags(
...
@@ -1382,6 +1370,51 @@ xfs_finish_flags(
return
0
;
return
0
;
}
}
static
int
xfs_init_percpu_counters
(
struct
xfs_mount
*
mp
)
{
int
error
;
error
=
percpu_counter_init
(
&
mp
->
m_icount
,
0
,
GFP_KERNEL
);
if
(
error
)
return
ENOMEM
;
error
=
percpu_counter_init
(
&
mp
->
m_ifree
,
0
,
GFP_KERNEL
);
if
(
error
)
goto
free_icount
;
error
=
percpu_counter_init
(
&
mp
->
m_fdblocks
,
0
,
GFP_KERNEL
);
if
(
error
)
goto
free_ifree
;
return
0
;
free_ifree:
percpu_counter_destroy
(
&
mp
->
m_ifree
);
free_icount:
percpu_counter_destroy
(
&
mp
->
m_icount
);
return
-
ENOMEM
;
}
void
xfs_reinit_percpu_counters
(
struct
xfs_mount
*
mp
)
{
percpu_counter_set
(
&
mp
->
m_icount
,
mp
->
m_sb
.
sb_icount
);
percpu_counter_set
(
&
mp
->
m_ifree
,
mp
->
m_sb
.
sb_ifree
);
percpu_counter_set
(
&
mp
->
m_fdblocks
,
mp
->
m_sb
.
sb_fdblocks
);
}
static
void
xfs_destroy_percpu_counters
(
struct
xfs_mount
*
mp
)
{
percpu_counter_destroy
(
&
mp
->
m_icount
);
percpu_counter_destroy
(
&
mp
->
m_ifree
);
percpu_counter_destroy
(
&
mp
->
m_fdblocks
);
}
STATIC
int
STATIC
int
xfs_fs_fill_super
(
xfs_fs_fill_super
(
struct
super_block
*
sb
,
struct
super_block
*
sb
,
...
@@ -1430,7 +1463,7 @@ xfs_fs_fill_super(
...
@@ -1430,7 +1463,7 @@ xfs_fs_fill_super(
if
(
error
)
if
(
error
)
goto
out_close_devices
;
goto
out_close_devices
;
error
=
xfs_i
csb_init
_counters
(
mp
);
error
=
xfs_i
nit_percpu
_counters
(
mp
);
if
(
error
)
if
(
error
)
goto
out_destroy_workqueues
;
goto
out_destroy_workqueues
;
...
@@ -1488,7 +1521,7 @@ xfs_fs_fill_super(
...
@@ -1488,7 +1521,7 @@ xfs_fs_fill_super(
out_free_sb:
out_free_sb:
xfs_freesb
(
mp
);
xfs_freesb
(
mp
);
out_destroy_counters:
out_destroy_counters:
xfs_
icsb_destroy
_counters
(
mp
);
xfs_
destroy_percpu
_counters
(
mp
);
out_destroy_workqueues:
out_destroy_workqueues:
xfs_destroy_mount_workqueues
(
mp
);
xfs_destroy_mount_workqueues
(
mp
);
out_close_devices:
out_close_devices:
...
@@ -1505,6 +1538,24 @@ xfs_fs_fill_super(
...
@@ -1505,6 +1538,24 @@ xfs_fs_fill_super(
goto
out_free_sb
;
goto
out_free_sb
;
}
}
STATIC
void
xfs_fs_put_super
(
struct
super_block
*
sb
)
{
struct
xfs_mount
*
mp
=
XFS_M
(
sb
);
xfs_notice
(
mp
,
"Unmounting Filesystem"
);
xfs_filestream_unmount
(
mp
);
xfs_unmountfs
(
mp
);
xfs_freesb
(
mp
);
xfs_destroy_percpu_counters
(
mp
);
xfs_destroy_mount_workqueues
(
mp
);
xfs_close_devices
(
mp
);
xfs_free_fsname
(
mp
);
kfree
(
mp
);
}
STATIC
struct
dentry
*
STATIC
struct
dentry
*
xfs_fs_mount
(
xfs_fs_mount
(
struct
file_system_type
*
fs_type
,
struct
file_system_type
*
fs_type
,
...
...
fs/xfs/xfs_super.h
View file @
4225441a
...
@@ -72,6 +72,8 @@ extern const struct export_operations xfs_export_operations;
...
@@ -72,6 +72,8 @@ extern const struct export_operations xfs_export_operations;
extern
const
struct
xattr_handler
*
xfs_xattr_handlers
[];
extern
const
struct
xattr_handler
*
xfs_xattr_handlers
[];
extern
const
struct
quotactl_ops
xfs_quotactl_operations
;
extern
const
struct
quotactl_ops
xfs_quotactl_operations
;
extern
void
xfs_reinit_percpu_counters
(
struct
xfs_mount
*
mp
);
#define XFS_M(sb) ((struct xfs_mount *)((sb)->s_fs_info))
#define XFS_M(sb) ((struct xfs_mount *)((sb)->s_fs_info))
#endif
/* __XFS_SUPER_H__ */
#endif
/* __XFS_SUPER_H__ */
fs/xfs/xfs_trans.c
View file @
4225441a
...
@@ -173,7 +173,7 @@ xfs_trans_reserve(
...
@@ -173,7 +173,7 @@ xfs_trans_reserve(
uint
rtextents
)
uint
rtextents
)
{
{
int
error
=
0
;
int
error
=
0
;
int
rsvd
=
(
tp
->
t_flags
&
XFS_TRANS_RESERVE
)
!=
0
;
bool
rsvd
=
(
tp
->
t_flags
&
XFS_TRANS_RESERVE
)
!=
0
;
/* Mark this thread as being in a transaction */
/* Mark this thread as being in a transaction */
current_set_flags_nested
(
&
tp
->
t_pflags
,
PF_FSTRANS
);
current_set_flags_nested
(
&
tp
->
t_pflags
,
PF_FSTRANS
);
...
@@ -184,8 +184,7 @@ xfs_trans_reserve(
...
@@ -184,8 +184,7 @@ xfs_trans_reserve(
* fail if the count would go below zero.
* fail if the count would go below zero.
*/
*/
if
(
blocks
>
0
)
{
if
(
blocks
>
0
)
{
error
=
xfs_icsb_modify_counters
(
tp
->
t_mountp
,
XFS_SBS_FDBLOCKS
,
error
=
xfs_mod_fdblocks
(
tp
->
t_mountp
,
-
((
int64_t
)
blocks
),
rsvd
);
-
((
int64_t
)
blocks
),
rsvd
);
if
(
error
!=
0
)
{
if
(
error
!=
0
)
{
current_restore_flags_nested
(
&
tp
->
t_pflags
,
PF_FSTRANS
);
current_restore_flags_nested
(
&
tp
->
t_pflags
,
PF_FSTRANS
);
return
-
ENOSPC
;
return
-
ENOSPC
;
...
@@ -236,8 +235,7 @@ xfs_trans_reserve(
...
@@ -236,8 +235,7 @@ xfs_trans_reserve(
* fail if the count would go below zero.
* fail if the count would go below zero.
*/
*/
if
(
rtextents
>
0
)
{
if
(
rtextents
>
0
)
{
error
=
xfs_mod_incore_sb
(
tp
->
t_mountp
,
XFS_SBS_FREXTENTS
,
error
=
xfs_mod_frextents
(
tp
->
t_mountp
,
-
((
int64_t
)
rtextents
));
-
((
int64_t
)
rtextents
),
rsvd
);
if
(
error
)
{
if
(
error
)
{
error
=
-
ENOSPC
;
error
=
-
ENOSPC
;
goto
undo_log
;
goto
undo_log
;
...
@@ -268,8 +266,7 @@ xfs_trans_reserve(
...
@@ -268,8 +266,7 @@ xfs_trans_reserve(
undo_blocks:
undo_blocks:
if
(
blocks
>
0
)
{
if
(
blocks
>
0
)
{
xfs_icsb_modify_counters
(
tp
->
t_mountp
,
XFS_SBS_FDBLOCKS
,
xfs_mod_fdblocks
(
tp
->
t_mountp
,
-
((
int64_t
)
blocks
),
rsvd
);
(
int64_t
)
blocks
,
rsvd
);
tp
->
t_blk_res
=
0
;
tp
->
t_blk_res
=
0
;
}
}
...
@@ -488,6 +485,54 @@ xfs_trans_apply_sb_deltas(
...
@@ -488,6 +485,54 @@ xfs_trans_apply_sb_deltas(
sizeof
(
sbp
->
sb_frextents
)
-
1
);
sizeof
(
sbp
->
sb_frextents
)
-
1
);
}
}
STATIC
int
xfs_sb_mod8
(
uint8_t
*
field
,
int8_t
delta
)
{
int8_t
counter
=
*
field
;
counter
+=
delta
;
if
(
counter
<
0
)
{
ASSERT
(
0
);
return
-
EINVAL
;
}
*
field
=
counter
;
return
0
;
}
STATIC
int
xfs_sb_mod32
(
uint32_t
*
field
,
int32_t
delta
)
{
int32_t
counter
=
*
field
;
counter
+=
delta
;
if
(
counter
<
0
)
{
ASSERT
(
0
);
return
-
EINVAL
;
}
*
field
=
counter
;
return
0
;
}
STATIC
int
xfs_sb_mod64
(
uint64_t
*
field
,
int64_t
delta
)
{
int64_t
counter
=
*
field
;
counter
+=
delta
;
if
(
counter
<
0
)
{
ASSERT
(
0
);
return
-
EINVAL
;
}
*
field
=
counter
;
return
0
;
}
/*
/*
* xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
* xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
* and apply superblock counter changes to the in-core superblock. The
* and apply superblock counter changes to the in-core superblock. The
...
@@ -495,13 +540,6 @@ xfs_trans_apply_sb_deltas(
...
@@ -495,13 +540,6 @@ xfs_trans_apply_sb_deltas(
* applied to the in-core superblock. The idea is that that has already been
* applied to the in-core superblock. The idea is that that has already been
* done.
* done.
*
*
* This is done efficiently with a single call to xfs_mod_incore_sb_batch().
* However, we have to ensure that we only modify each superblock field only
* once because the application of the delta values may not be atomic. That can
* lead to ENOSPC races occurring if we have two separate modifcations of the
* free space counter to put back the entire reservation and then take away
* what we used.
*
* If we are not logging superblock counters, then the inode allocated/free and
* If we are not logging superblock counters, then the inode allocated/free and
* used block counts are not updated in the on disk superblock. In this case,
* used block counts are not updated in the on disk superblock. In this case,
* XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
* XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
...
@@ -509,21 +547,15 @@ xfs_trans_apply_sb_deltas(
...
@@ -509,21 +547,15 @@ xfs_trans_apply_sb_deltas(
*/
*/
void
void
xfs_trans_unreserve_and_mod_sb
(
xfs_trans_unreserve_and_mod_sb
(
xfs_trans_t
*
tp
)
struct
xfs_trans
*
tp
)
{
{
xfs_mod_sb_t
msb
[
9
];
/* If you add cases, add entries */
struct
xfs_mount
*
mp
=
tp
->
t_mountp
;
xfs_mod_sb_t
*
msbp
;
bool
rsvd
=
(
tp
->
t_flags
&
XFS_TRANS_RESERVE
)
!=
0
;
xfs_mount_t
*
mp
=
tp
->
t_mountp
;
int64_t
blkdelta
=
0
;
/* REFERENCED */
int64_t
rtxdelta
=
0
;
int
error
;
int64_t
idelta
=
0
;
int
rsvd
;
int64_t
ifreedelta
=
0
;
int64_t
blkdelta
=
0
;
int
error
;
int64_t
rtxdelta
=
0
;
int64_t
idelta
=
0
;
int64_t
ifreedelta
=
0
;
msbp
=
msb
;
rsvd
=
(
tp
->
t_flags
&
XFS_TRANS_RESERVE
)
!=
0
;
/* calculate deltas */
/* calculate deltas */
if
(
tp
->
t_blk_res
>
0
)
if
(
tp
->
t_blk_res
>
0
)
...
@@ -547,97 +579,115 @@ xfs_trans_unreserve_and_mod_sb(
...
@@ -547,97 +579,115 @@ xfs_trans_unreserve_and_mod_sb(
/* apply the per-cpu counters */
/* apply the per-cpu counters */
if
(
blkdelta
)
{
if
(
blkdelta
)
{
error
=
xfs_icsb_modify_counters
(
mp
,
XFS_SBS_FDBLOCKS
,
error
=
xfs_mod_fdblocks
(
mp
,
blkdelta
,
rsvd
);
blkdelta
,
rsvd
);
if
(
error
)
if
(
error
)
goto
out
;
goto
out
;
}
}
if
(
idelta
)
{
if
(
idelta
)
{
error
=
xfs_icsb_modify_counters
(
mp
,
XFS_SBS_ICOUNT
,
error
=
xfs_mod_icount
(
mp
,
idelta
);
idelta
,
rsvd
);
if
(
error
)
if
(
error
)
goto
out_undo_fdblocks
;
goto
out_undo_fdblocks
;
}
}
if
(
ifreedelta
)
{
if
(
ifreedelta
)
{
error
=
xfs_icsb_modify_counters
(
mp
,
XFS_SBS_IFREE
,
error
=
xfs_mod_ifree
(
mp
,
ifreedelta
);
ifreedelta
,
rsvd
);
if
(
error
)
if
(
error
)
goto
out_undo_icount
;
goto
out_undo_icount
;
}
}
if
(
rtxdelta
==
0
&&
!
(
tp
->
t_flags
&
XFS_TRANS_SB_DIRTY
))
return
;
/* apply remaining deltas */
/* apply remaining deltas */
if
(
rtxdelta
!=
0
)
{
spin_lock
(
&
mp
->
m_sb_lock
);
msbp
->
msb_field
=
XFS_SBS_FREXTENTS
;
if
(
rtxdelta
)
{
msbp
->
msb_delta
=
rtxdelta
;
error
=
xfs_sb_mod64
(
&
mp
->
m_sb
.
sb_frextents
,
rtxdelta
);
msbp
++
;
if
(
error
)
goto
out_undo_ifree
;
}
}
if
(
tp
->
t_flags
&
XFS_TRANS_SB_DIRTY
)
{
if
(
tp
->
t_dblocks_delta
!=
0
)
{
if
(
tp
->
t_dblocks_delta
!=
0
)
{
error
=
xfs_sb_mod64
(
&
mp
->
m_sb
.
sb_dblocks
,
tp
->
t_dblocks_delta
);
msbp
->
msb_field
=
XFS_SBS_DBLOCKS
;
if
(
error
)
msbp
->
msb_delta
=
tp
->
t_dblocks_delta
;
goto
out_undo_frextents
;
msbp
++
;
}
if
(
tp
->
t_agcount_delta
!=
0
)
{
msbp
->
msb_field
=
XFS_SBS_AGCOUNT
;
msbp
->
msb_delta
=
tp
->
t_agcount_delta
;
msbp
++
;
}
if
(
tp
->
t_imaxpct_delta
!=
0
)
{
msbp
->
msb_field
=
XFS_SBS_IMAX_PCT
;
msbp
->
msb_delta
=
tp
->
t_imaxpct_delta
;
msbp
++
;
}
if
(
tp
->
t_rextsize_delta
!=
0
)
{
msbp
->
msb_field
=
XFS_SBS_REXTSIZE
;
msbp
->
msb_delta
=
tp
->
t_rextsize_delta
;
msbp
++
;
}
if
(
tp
->
t_rbmblocks_delta
!=
0
)
{
msbp
->
msb_field
=
XFS_SBS_RBMBLOCKS
;
msbp
->
msb_delta
=
tp
->
t_rbmblocks_delta
;
msbp
++
;
}
if
(
tp
->
t_rblocks_delta
!=
0
)
{
msbp
->
msb_field
=
XFS_SBS_RBLOCKS
;
msbp
->
msb_delta
=
tp
->
t_rblocks_delta
;
msbp
++
;
}
if
(
tp
->
t_rextents_delta
!=
0
)
{
msbp
->
msb_field
=
XFS_SBS_REXTENTS
;
msbp
->
msb_delta
=
tp
->
t_rextents_delta
;
msbp
++
;
}
if
(
tp
->
t_rextslog_delta
!=
0
)
{
msbp
->
msb_field
=
XFS_SBS_REXTSLOG
;
msbp
->
msb_delta
=
tp
->
t_rextslog_delta
;
msbp
++
;
}
}
}
if
(
tp
->
t_agcount_delta
!=
0
)
{
/*
error
=
xfs_sb_mod32
(
&
mp
->
m_sb
.
sb_agcount
,
tp
->
t_agcount_delta
);
* If we need to change anything, do it.
*/
if
(
msbp
>
msb
)
{
error
=
xfs_mod_incore_sb_batch
(
tp
->
t_mountp
,
msb
,
(
uint
)(
msbp
-
msb
),
rsvd
);
if
(
error
)
if
(
error
)
goto
out_undo_
ifreecount
;
goto
out_undo_
dblocks
;
}
}
if
(
tp
->
t_imaxpct_delta
!=
0
)
{
error
=
xfs_sb_mod8
(
&
mp
->
m_sb
.
sb_imax_pct
,
tp
->
t_imaxpct_delta
);
if
(
error
)
goto
out_undo_agcount
;
}
if
(
tp
->
t_rextsize_delta
!=
0
)
{
error
=
xfs_sb_mod32
(
&
mp
->
m_sb
.
sb_rextsize
,
tp
->
t_rextsize_delta
);
if
(
error
)
goto
out_undo_imaxpct
;
}
if
(
tp
->
t_rbmblocks_delta
!=
0
)
{
error
=
xfs_sb_mod32
(
&
mp
->
m_sb
.
sb_rbmblocks
,
tp
->
t_rbmblocks_delta
);
if
(
error
)
goto
out_undo_rextsize
;
}
if
(
tp
->
t_rblocks_delta
!=
0
)
{
error
=
xfs_sb_mod64
(
&
mp
->
m_sb
.
sb_rblocks
,
tp
->
t_rblocks_delta
);
if
(
error
)
goto
out_undo_rbmblocks
;
}
if
(
tp
->
t_rextents_delta
!=
0
)
{
error
=
xfs_sb_mod64
(
&
mp
->
m_sb
.
sb_rextents
,
tp
->
t_rextents_delta
);
if
(
error
)
goto
out_undo_rblocks
;
}
if
(
tp
->
t_rextslog_delta
!=
0
)
{
error
=
xfs_sb_mod8
(
&
mp
->
m_sb
.
sb_rextslog
,
tp
->
t_rextslog_delta
);
if
(
error
)
goto
out_undo_rextents
;
}
spin_unlock
(
&
mp
->
m_sb_lock
);
return
;
return
;
out_undo_ifreecount:
out_undo_rextents:
if
(
tp
->
t_rextents_delta
)
xfs_sb_mod64
(
&
mp
->
m_sb
.
sb_rextents
,
-
tp
->
t_rextents_delta
);
out_undo_rblocks:
if
(
tp
->
t_rblocks_delta
)
xfs_sb_mod64
(
&
mp
->
m_sb
.
sb_rblocks
,
-
tp
->
t_rblocks_delta
);
out_undo_rbmblocks:
if
(
tp
->
t_rbmblocks_delta
)
xfs_sb_mod32
(
&
mp
->
m_sb
.
sb_rbmblocks
,
-
tp
->
t_rbmblocks_delta
);
out_undo_rextsize:
if
(
tp
->
t_rextsize_delta
)
xfs_sb_mod32
(
&
mp
->
m_sb
.
sb_rextsize
,
-
tp
->
t_rextsize_delta
);
out_undo_imaxpct:
if
(
tp
->
t_rextsize_delta
)
xfs_sb_mod8
(
&
mp
->
m_sb
.
sb_imax_pct
,
-
tp
->
t_imaxpct_delta
);
out_undo_agcount:
if
(
tp
->
t_agcount_delta
)
xfs_sb_mod32
(
&
mp
->
m_sb
.
sb_agcount
,
-
tp
->
t_agcount_delta
);
out_undo_dblocks:
if
(
tp
->
t_dblocks_delta
)
xfs_sb_mod64
(
&
mp
->
m_sb
.
sb_dblocks
,
-
tp
->
t_dblocks_delta
);
out_undo_frextents:
if
(
rtxdelta
)
xfs_sb_mod64
(
&
mp
->
m_sb
.
sb_frextents
,
-
rtxdelta
);
out_undo_ifree:
spin_unlock
(
&
mp
->
m_sb_lock
);
if
(
ifreedelta
)
if
(
ifreedelta
)
xfs_
icsb_modify_counters
(
mp
,
XFS_SBS_IFREE
,
-
ifreedelta
,
rsvd
);
xfs_
mod_ifree
(
mp
,
-
ifreedelta
);
out_undo_icount:
out_undo_icount:
if
(
idelta
)
if
(
idelta
)
xfs_
icsb_modify_counters
(
mp
,
XFS_SBS_ICOUNT
,
-
idelta
,
rsvd
);
xfs_
mod_icount
(
mp
,
-
idelta
);
out_undo_fdblocks:
out_undo_fdblocks:
if
(
blkdelta
)
if
(
blkdelta
)
xfs_
icsb_modify_counters
(
mp
,
XFS_SBS_FDBLOCKS
,
-
blkdelta
,
rsvd
);
xfs_
mod_fdblocks
(
mp
,
-
blkdelta
,
rsvd
);
out:
out:
ASSERT
(
error
==
0
);
ASSERT
(
error
==
0
);
return
;
return
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment