Commit 8a072a4d4c6a5b6ec32836c467d2996393c76c6f
Committed by
Alex Elder
1 parent
97d3ac75e5
Exists in
master
and in
7 other branches
xfs: reduce the number of pagb_lock roundtrips in xfs_alloc_clear_busy
Instead of finding the per-ag and then taking and releasing the pagb_lock for every single busy extent completed sort the list of busy extents and only switch betweens AGs where nessecary. This becomes especially important with the online discard support which will hit this lock more often. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Alex Elder <aelder@sgi.com>
Showing 6 changed files with 61 additions and 19 deletions Side-by-side Diff
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_linux.h
fs/xfs/xfs_alloc.c
... | ... | @@ -2964,25 +2964,61 @@ |
2964 | 2964 | *rlen = 0; |
2965 | 2965 | } |
2966 | 2966 | |
2967 | -void | |
2968 | -xfs_alloc_busy_clear( | |
2967 | +static void | |
2968 | +xfs_alloc_busy_clear_one( | |
2969 | 2969 | struct xfs_mount *mp, |
2970 | + struct xfs_perag *pag, | |
2970 | 2971 | struct xfs_busy_extent *busyp) |
2971 | 2972 | { |
2972 | - struct xfs_perag *pag; | |
2973 | - | |
2974 | - list_del_init(&busyp->list); | |
2975 | - | |
2976 | - pag = xfs_perag_get(mp, busyp->agno); | |
2977 | - spin_lock(&pag->pagb_lock); | |
2978 | 2973 | if (busyp->length) { |
2979 | 2974 | trace_xfs_alloc_busy_clear(mp, busyp->agno, busyp->bno, |
2980 | 2975 | busyp->length); |
2981 | 2976 | rb_erase(&busyp->rb_node, &pag->pagb_tree); |
2982 | 2977 | } |
2983 | - spin_unlock(&pag->pagb_lock); | |
2984 | - xfs_perag_put(pag); | |
2985 | 2978 | |
2979 | + list_del_init(&busyp->list); | |
2986 | 2980 | kmem_free(busyp); |
2981 | +} | |
2982 | + | |
2983 | +void | |
2984 | +xfs_alloc_busy_clear( | |
2985 | + struct xfs_mount *mp, | |
2986 | + struct list_head *list) | |
2987 | +{ | |
2988 | + struct xfs_busy_extent *busyp, *n; | |
2989 | + struct xfs_perag *pag = NULL; | |
2990 | + xfs_agnumber_t agno = NULLAGNUMBER; | |
2991 | + | |
2992 | + list_for_each_entry_safe(busyp, n, list, list) { | |
2993 | + if (busyp->agno != agno) { | |
2994 | + if (pag) { | |
2995 | + spin_unlock(&pag->pagb_lock); | |
2996 | + xfs_perag_put(pag); | |
2997 | + } | |
2998 | + pag = xfs_perag_get(mp, busyp->agno); | |
2999 | + spin_lock(&pag->pagb_lock); | |
3000 | + agno = busyp->agno; | |
3001 | + } | |
3002 | + | |
3003 | + xfs_alloc_busy_clear_one(mp, pag, busyp); | |
3004 | + } | |
3005 | + | |
3006 | + if (pag) { | |
3007 | + spin_unlock(&pag->pagb_lock); | |
3008 | + xfs_perag_put(pag); | |
3009 | + } | |
3010 | +} | |
3011 | + | |
3012 | +/* | |
3013 | + * Callback for list_sort to sort busy extents by the AG they reside in. | |
3014 | + */ | |
3015 | +int | |
3016 | +xfs_busy_extent_ag_cmp( | |
3017 | + void *priv, | |
3018 | + struct list_head *a, | |
3019 | + struct list_head *b) | |
3020 | +{ | |
3021 | + return container_of(a, struct xfs_busy_extent, list)->agno - | |
3022 | + container_of(b, struct xfs_busy_extent, list)->agno; | |
2987 | 3023 | } |
fs/xfs/xfs_alloc.h
... | ... | @@ -140,7 +140,7 @@ |
140 | 140 | xfs_agblock_t bno, xfs_extlen_t len); |
141 | 141 | |
142 | 142 | void |
143 | -xfs_alloc_busy_clear(struct xfs_mount *mp, struct xfs_busy_extent *busyp); | |
143 | +xfs_alloc_busy_clear(struct xfs_mount *mp, struct list_head *list); | |
144 | 144 | |
145 | 145 | int |
146 | 146 | xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno, |
... | ... | @@ -149,6 +149,15 @@ |
149 | 149 | void |
150 | 150 | xfs_alloc_busy_reuse(struct xfs_mount *mp, xfs_agnumber_t agno, |
151 | 151 | xfs_agblock_t fbno, xfs_extlen_t flen, bool userdata); |
152 | + | |
153 | +int | |
154 | +xfs_busy_extent_ag_cmp(void *priv, struct list_head *a, struct list_head *b); | |
155 | + | |
156 | +static inline void xfs_alloc_busy_sort(struct list_head *list) | |
157 | +{ | |
158 | + list_sort(NULL, list, xfs_busy_extent_ag_cmp); | |
159 | +} | |
160 | + | |
152 | 161 | #endif /* __KERNEL__ */ |
153 | 162 | |
154 | 163 | /* |
fs/xfs/xfs_log_cil.c
... | ... | @@ -361,13 +361,12 @@ |
361 | 361 | int abort) |
362 | 362 | { |
363 | 363 | struct xfs_cil_ctx *ctx = args; |
364 | - struct xfs_busy_extent *busyp, *n; | |
365 | 364 | |
366 | 365 | xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain, |
367 | 366 | ctx->start_lsn, abort); |
368 | 367 | |
369 | - list_for_each_entry_safe(busyp, n, &ctx->busy_extents, list) | |
370 | - xfs_alloc_busy_clear(ctx->cil->xc_log->l_mp, busyp); | |
368 | + xfs_alloc_busy_sort(&ctx->busy_extents); | |
369 | + xfs_alloc_busy_clear(ctx->cil->xc_log->l_mp, &ctx->busy_extents); | |
371 | 370 | |
372 | 371 | spin_lock(&ctx->cil->xc_cil_lock); |
373 | 372 | list_del(&ctx->committing); |
fs/xfs/xfs_trans.c
... | ... | @@ -608,10 +608,8 @@ |
608 | 608 | xfs_trans_free( |
609 | 609 | struct xfs_trans *tp) |
610 | 610 | { |
611 | - struct xfs_busy_extent *busyp, *n; | |
612 | - | |
613 | - list_for_each_entry_safe(busyp, n, &tp->t_busy, list) | |
614 | - xfs_alloc_busy_clear(tp->t_mountp, busyp); | |
611 | + xfs_alloc_busy_sort(&tp->t_busy); | |
612 | + xfs_alloc_busy_clear(tp->t_mountp, &tp->t_busy); | |
615 | 613 | |
616 | 614 | atomic_dec(&tp->t_mountp->m_active_trans); |
617 | 615 | xfs_trans_free_dqinfo(tp); |