Commit 5cd86e5a authored by Stephen Lord's avatar Stephen Lord

[XFS] reduce byte swapping and spinlock usage in log write path

SGI Modid: 2.5.x-xfs:slinx:140714a
parent c43df29b
...@@ -62,10 +62,6 @@ STATIC int xlog_write(xfs_mount_t *mp, xfs_log_iovec_t region[], ...@@ -62,10 +62,6 @@ STATIC int xlog_write(xfs_mount_t *mp, xfs_log_iovec_t region[],
/* local state machine functions */ /* local state machine functions */
STATIC void xlog_state_done_syncing(xlog_in_core_t *iclog, int); STATIC void xlog_state_done_syncing(xlog_in_core_t *iclog, int);
STATIC void xlog_state_do_callback(xlog_t *log,int aborted, xlog_in_core_t *iclog); STATIC void xlog_state_do_callback(xlog_t *log,int aborted, xlog_in_core_t *iclog);
static inline void xlog_state_finish_copy(xlog_t *log,
xlog_in_core_t *iclog,
int first_write,
int bytes);
STATIC int xlog_state_get_iclog_space(xlog_t *log, STATIC int xlog_state_get_iclog_space(xlog_t *log,
int len, int len,
xlog_in_core_t **iclog, xlog_in_core_t **iclog,
...@@ -876,7 +872,7 @@ xfs_log_need_covered(xfs_mount_t *mp) ...@@ -876,7 +872,7 @@ xfs_log_need_covered(xfs_mount_t *mp)
* We may be holding the log iclog lock upon entering this routine. * We may be holding the log iclog lock upon entering this routine.
*/ */
xfs_lsn_t xfs_lsn_t
xlog_assign_tail_lsn(xfs_mount_t *mp, xlog_in_core_t *iclog) xlog_assign_tail_lsn(xfs_mount_t *mp)
{ {
xfs_lsn_t tail_lsn; xfs_lsn_t tail_lsn;
SPLDECL(s); SPLDECL(s);
...@@ -888,8 +884,6 @@ xlog_assign_tail_lsn(xfs_mount_t *mp, xlog_in_core_t *iclog) ...@@ -888,8 +884,6 @@ xlog_assign_tail_lsn(xfs_mount_t *mp, xlog_in_core_t *iclog)
log->l_tail_lsn = tail_lsn; log->l_tail_lsn = tail_lsn;
else else
tail_lsn = log->l_tail_lsn = log->l_last_sync_lsn; tail_lsn = log->l_tail_lsn = log->l_last_sync_lsn;
if (iclog)
INT_SET(iclog->ic_header.h_tail_lsn, ARCH_CONVERT, tail_lsn);
GRANT_UNLOCK(log, s); GRANT_UNLOCK(log, s);
return tail_lsn; return tail_lsn;
...@@ -1402,7 +1396,7 @@ xlog_sync(xlog_t *log, ...@@ -1402,7 +1396,7 @@ xlog_sync(xlog_t *log,
{ {
xfs_caddr_t dptr; /* pointer to byte sized element */ xfs_caddr_t dptr; /* pointer to byte sized element */
xfs_buf_t *bp; xfs_buf_t *bp;
int i; int i, ops;
uint roundup; uint roundup;
uint count; /* byte count of bwrite */ uint count; /* byte count of bwrite */
int split = 0; /* split write into two regions */ int split = 0; /* split write into two regions */
...@@ -1439,7 +1433,12 @@ xlog_sync(xlog_t *log, ...@@ -1439,7 +1433,12 @@ xlog_sync(xlog_t *log,
log->l_roundoff += iclog->ic_roundoff; log->l_roundoff += iclog->ic_roundoff;
xlog_pack_data(log, iclog); /* put cycle number in every block */ xlog_pack_data(log, iclog); /* put cycle number in every block */
INT_SET(iclog->ic_header.h_len, ARCH_CONVERT, iclog->ic_offset); /* real byte length */
/* real byte length */
INT_SET(iclog->ic_header.h_len, ARCH_CONVERT, iclog->ic_offset);
/* put ops count in correct order */
ops = iclog->ic_header.h_num_logops;
INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops);
bp = iclog->ic_bp; bp = iclog->ic_bp;
ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1);
...@@ -1596,6 +1595,28 @@ xlog_unalloc_log(xlog_t *log) ...@@ -1596,6 +1595,28 @@ xlog_unalloc_log(xlog_t *log)
kmem_free(log, sizeof(xlog_t)); kmem_free(log, sizeof(xlog_t));
} /* xlog_unalloc_log */ } /* xlog_unalloc_log */
/*
* Update counters atomically now that memcpy is done.
*/
/* ARGSUSED */
static inline void
xlog_state_finish_copy(xlog_t *log,
xlog_in_core_t *iclog,
int record_cnt,
int copy_bytes)
{
SPLDECL(s);
s = LOG_LOCK(log);
iclog->ic_header.h_num_logops += record_cnt;
iclog->ic_offset += copy_bytes;
LOG_UNLOCK(log, s);
} /* xlog_state_finish_copy */
/* /*
* Write some region out to in-core log * Write some region out to in-core log
...@@ -1663,6 +1684,7 @@ xlog_write(xfs_mount_t * mp, ...@@ -1663,6 +1684,7 @@ xlog_write(xfs_mount_t * mp,
int contwr; /* continued write of in-core log? */ int contwr; /* continued write of in-core log? */
int firstwr = 0; /* first write of transaction */ int firstwr = 0; /* first write of transaction */
int error; int error;
int record_cnt = 0, data_cnt = 0;
partial_copy_len = partial_copy = 0; partial_copy_len = partial_copy = 0;
...@@ -1725,7 +1747,8 @@ xlog_write(xfs_mount_t * mp, ...@@ -1725,7 +1747,8 @@ xlog_write(xfs_mount_t * mp,
logop_head->oh_flags = XLOG_START_TRANS; logop_head->oh_flags = XLOG_START_TRANS;
INT_ZERO(logop_head->oh_res2, ARCH_CONVERT); INT_ZERO(logop_head->oh_res2, ARCH_CONVERT);
ticket->t_flags &= ~XLOG_TIC_INITED; /* clear bit */ ticket->t_flags &= ~XLOG_TIC_INITED; /* clear bit */
firstwr++; /* increment log ops below */ firstwr = 1; /* increment log ops below */
record_cnt++;
start_rec_copy = sizeof(xlog_op_header_t); start_rec_copy = sizeof(xlog_op_header_t);
xlog_write_adv_cnt(ptr, len, log_offset, start_rec_copy); xlog_write_adv_cnt(ptr, len, log_offset, start_rec_copy);
...@@ -1793,10 +1816,13 @@ xlog_write(xfs_mount_t * mp, ...@@ -1793,10 +1816,13 @@ xlog_write(xfs_mount_t * mp,
/* make copy_len total bytes copied, including headers */ /* make copy_len total bytes copied, including headers */
copy_len += start_rec_copy + sizeof(xlog_op_header_t); copy_len += start_rec_copy + sizeof(xlog_op_header_t);
xlog_state_finish_copy(log, iclog, firstwr, (contwr? copy_len : 0)); record_cnt++;
data_cnt += contwr ? copy_len : 0;
firstwr = 0; firstwr = 0;
if (partial_copy) { /* copied partial region */ if (partial_copy) { /* copied partial region */
/* already marked WANT_SYNC by xlog_state_get_iclog_space */ /* already marked WANT_SYNC by xlog_state_get_iclog_space */
xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
record_cnt = data_cnt = 0;
if ((error = xlog_state_release_iclog(log, iclog))) if ((error = xlog_state_release_iclog(log, iclog)))
return (error); return (error);
break; /* don't increment index */ break; /* don't increment index */
...@@ -1805,6 +1831,8 @@ xlog_write(xfs_mount_t * mp, ...@@ -1805,6 +1831,8 @@ xlog_write(xfs_mount_t * mp,
partial_copy_len = partial_copy = 0; partial_copy_len = partial_copy = 0;
if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) { if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) {
xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
record_cnt = data_cnt = 0;
xlog_state_want_sync(log, iclog); xlog_state_want_sync(log, iclog);
if (commit_iclog) { if (commit_iclog) {
ASSERT(flags & XLOG_COMMIT_TRANS); ASSERT(flags & XLOG_COMMIT_TRANS);
...@@ -1821,6 +1849,7 @@ xlog_write(xfs_mount_t * mp, ...@@ -1821,6 +1849,7 @@ xlog_write(xfs_mount_t * mp,
} /* for (index = 0; index < nentries; ) */ } /* for (index = 0; index < nentries; ) */
ASSERT(len == 0); ASSERT(len == 0);
xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
if (commit_iclog) { if (commit_iclog) {
ASSERT(flags & XLOG_COMMIT_TRANS); ASSERT(flags & XLOG_COMMIT_TRANS);
*commit_iclog = iclog; *commit_iclog = iclog;
...@@ -2207,30 +2236,6 @@ xlog_state_done_syncing( ...@@ -2207,30 +2236,6 @@ xlog_state_done_syncing(
} /* xlog_state_done_syncing */ } /* xlog_state_done_syncing */
/*
* Update counters atomically now that memcpy is done.
*/
/* ARGSUSED */
static inline void
xlog_state_finish_copy(xlog_t *log,
xlog_in_core_t *iclog,
int first_write,
int copy_bytes)
{
SPLDECL(s);
s = LOG_LOCK(log);
if (first_write)
INT_MOD(iclog->ic_header.h_num_logops, ARCH_CONVERT, +1);
INT_MOD(iclog->ic_header.h_num_logops, ARCH_CONVERT, +1);
iclog->ic_offset += copy_bytes;
LOG_UNLOCK(log, s);
} /* xlog_state_finish_copy */
/* /*
* If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
* sleep. The flush semaphore is set to the number of in-core buffers and * sleep. The flush semaphore is set to the number of in-core buffers and
...@@ -2722,7 +2727,7 @@ xlog_state_release_iclog(xlog_t *log, ...@@ -2722,7 +2727,7 @@ xlog_state_release_iclog(xlog_t *log,
SPLDECL(s); SPLDECL(s);
int sync = 0; /* do we sync? */ int sync = 0; /* do we sync? */
xlog_assign_tail_lsn(log->l_mp, 0); xlog_assign_tail_lsn(log->l_mp);
s = LOG_LOCK(log); s = LOG_LOCK(log);
...@@ -3594,7 +3599,10 @@ xlog_iclogs_empty(xlog_t *log) ...@@ -3594,7 +3599,10 @@ xlog_iclogs_empty(xlog_t *log)
iclog = log->l_iclog; iclog = log->l_iclog;
do { do {
if (INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT)) /* endianness does not matter here, zero is zero in
* any language.
*/
if (iclog->ic_header.h_num_logops)
return(0); return(0);
iclog = iclog->ic_next; iclog = iclog->ic_next;
} while (iclog != log->l_iclog); } while (iclog != log->l_iclog);
......
...@@ -536,8 +536,7 @@ typedef struct log { ...@@ -536,8 +536,7 @@ typedef struct log {
/* common routines */ /* common routines */
extern xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp, extern xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
xlog_in_core_t *iclog);
extern int xlog_find_head(xlog_t *log, xfs_daddr_t *head_blk); extern int xlog_find_head(xlog_t *log, xfs_daddr_t *head_blk);
extern int xlog_find_tail(xlog_t *log, extern int xlog_find_tail(xlog_t *log,
xfs_daddr_t *head_blk, xfs_daddr_t *head_blk,
......
...@@ -3463,7 +3463,7 @@ xlog_do_recover(xlog_t *log, ...@@ -3463,7 +3463,7 @@ xlog_do_recover(xlog_t *log,
* or iunlinks they will have some entries in the AIL; so we look at * or iunlinks they will have some entries in the AIL; so we look at
* the AIL to determine how to set the tail_lsn. * the AIL to determine how to set the tail_lsn.
*/ */
xlog_assign_tail_lsn(log->l_mp, NULL); xlog_assign_tail_lsn(log->l_mp);
/* /*
* Now that we've finished replaying all buffer and inode * Now that we've finished replaying all buffer and inode
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment