Commit 39e2defe73106ca2e1c85e5286038a0a13f49513
Committed by
Niv Sardi
1 parent
d9424b3c4a
Exists in
master
and in
7 other branches
reduce l_icloglock roundtrips
All but one caller of xlog_state_want_sync drop and re-acquire l_icloglock around the call to it, just so that xlog_state_want_sync can acquire and drop it. Move all lock operation out of l_icloglock and assert that the lock is held when it is called. Note that it would make sense to extende this scheme to xlog_state_release_iclog, but the locking in there is more complicated and we'd like to keep the atomic_dec_and_lock optmization for those callers not having l_icloglock yet. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <david@fromorbit.com> Signed-off-by: Niv Sardi <xaiki@sgi.com>
Showing 1 changed file with 6 additions and 7 deletions Side-by-side Diff
fs/xfs/xfs_log.c
... | ... | @@ -729,8 +729,8 @@ |
729 | 729 | spin_lock(&log->l_icloglock); |
730 | 730 | iclog = log->l_iclog; |
731 | 731 | atomic_inc(&iclog->ic_refcnt); |
732 | - spin_unlock(&log->l_icloglock); | |
733 | 732 | xlog_state_want_sync(log, iclog); |
733 | + spin_unlock(&log->l_icloglock); | |
734 | 734 | error = xlog_state_release_iclog(log, iclog); |
735 | 735 | |
736 | 736 | spin_lock(&log->l_icloglock); |
737 | 737 | |
... | ... | @@ -767,9 +767,9 @@ |
767 | 767 | spin_lock(&log->l_icloglock); |
768 | 768 | iclog = log->l_iclog; |
769 | 769 | atomic_inc(&iclog->ic_refcnt); |
770 | - spin_unlock(&log->l_icloglock); | |
771 | 770 | |
772 | 771 | xlog_state_want_sync(log, iclog); |
772 | + spin_unlock(&log->l_icloglock); | |
773 | 773 | error = xlog_state_release_iclog(log, iclog); |
774 | 774 | |
775 | 775 | spin_lock(&log->l_icloglock); |
776 | 776 | |
... | ... | @@ -1984,7 +1984,9 @@ |
1984 | 1984 | if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) { |
1985 | 1985 | xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); |
1986 | 1986 | record_cnt = data_cnt = 0; |
1987 | + spin_lock(&log->l_icloglock); | |
1987 | 1988 | xlog_state_want_sync(log, iclog); |
1989 | + spin_unlock(&log->l_icloglock); | |
1988 | 1990 | if (commit_iclog) { |
1989 | 1991 | ASSERT(flags & XLOG_COMMIT_TRANS); |
1990 | 1992 | *commit_iclog = iclog; |
... | ... | @@ -3193,7 +3195,7 @@ |
3193 | 3195 | STATIC void |
3194 | 3196 | xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog) |
3195 | 3197 | { |
3196 | - spin_lock(&log->l_icloglock); | |
3198 | + ASSERT(spin_is_locked(&log->l_icloglock)); | |
3197 | 3199 | |
3198 | 3200 | if (iclog->ic_state == XLOG_STATE_ACTIVE) { |
3199 | 3201 | xlog_state_switch_iclogs(log, iclog, 0); |
... | ... | @@ -3201,10 +3203,7 @@ |
3201 | 3203 | ASSERT(iclog->ic_state & |
3202 | 3204 | (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR)); |
3203 | 3205 | } |
3204 | - | |
3205 | - spin_unlock(&log->l_icloglock); | |
3206 | -} /* xlog_state_want_sync */ | |
3207 | - | |
3206 | +} | |
3208 | 3207 | |
3209 | 3208 | |
3210 | 3209 | /***************************************************************************** |