Commit aab2207cf8d9c343b6b5f0e4d27e1732f8618d14
Committed by
Linus Torvalds
1 parent
35386e3b0f
Exists in
master
and in
7 other branches
[PATCH] slab: make drain_array more universal by adding more parameters
And a parameter to drain_array to control the freeing of all objects and then use drain_array() to replace instances of drain_array_locked with drain_array. Doing so will avoid taking locks in those locations if the arrays are empty. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 1 changed file with 11 additions and 9 deletions Side-by-side Diff
mm/slab.c
... | ... | @@ -2126,6 +2126,10 @@ |
2126 | 2126 | static void drain_array_locked(struct kmem_cache *cachep, |
2127 | 2127 | struct array_cache *ac, int force, int node); |
2128 | 2128 | |
2129 | +static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, | |
2130 | + struct array_cache *ac, | |
2131 | + int force, int node); | |
2132 | + | |
2129 | 2133 | static void do_drain(void *arg) |
2130 | 2134 | { |
2131 | 2135 | struct kmem_cache *cachep = arg; |
... | ... | @@ -2150,9 +2154,7 @@ |
2150 | 2154 | for_each_online_node(node) { |
2151 | 2155 | l3 = cachep->nodelists[node]; |
2152 | 2156 | if (l3) { |
2153 | - spin_lock_irq(&l3->list_lock); | |
2154 | - drain_array_locked(cachep, l3->shared, 1, node); | |
2155 | - spin_unlock_irq(&l3->list_lock); | |
2157 | + drain_array(cachep, l3, l3->shared, 1, node); | |
2156 | 2158 | if (l3->alien) |
2157 | 2159 | drain_alien_cache(cachep, l3->alien); |
2158 | 2160 | } |
2159 | 2161 | |
... | ... | @@ -3545,12 +3547,11 @@ |
3545 | 3547 | * necessary. |
3546 | 3548 | */ |
3547 | 3549 | static void drain_array(struct kmem_cache *searchp, struct kmem_list3 *l3, |
3548 | - struct array_cache *ac) | |
3550 | + struct array_cache *ac, int force, int node) | |
3549 | 3551 | { |
3550 | 3552 | if (ac && ac->avail) { |
3551 | 3553 | spin_lock_irq(&l3->list_lock); |
3552 | - drain_array_locked(searchp, ac, 0, | |
3553 | - numa_node_id()); | |
3554 | + drain_array_locked(searchp, ac, force, node); | |
3554 | 3555 | spin_unlock_irq(&l3->list_lock); |
3555 | 3556 | } |
3556 | 3557 | } |
... | ... | @@ -3571,6 +3572,7 @@ |
3571 | 3572 | { |
3572 | 3573 | struct list_head *walk; |
3573 | 3574 | struct kmem_list3 *l3; |
3575 | + int node = numa_node_id(); | |
3574 | 3576 | |
3575 | 3577 | if (!mutex_trylock(&cache_chain_mutex)) { |
3576 | 3578 | /* Give up. Setup the next iteration. */ |
3577 | 3579 | |
... | ... | @@ -3593,11 +3595,11 @@ |
3593 | 3595 | * have established with reasonable certainty that |
3594 | 3596 | * we can do some work if the lock was obtained. |
3595 | 3597 | */ |
3596 | - l3 = searchp->nodelists[numa_node_id()]; | |
3598 | + l3 = searchp->nodelists[node]; | |
3597 | 3599 | |
3598 | 3600 | reap_alien(searchp, l3); |
3599 | 3601 | |
3600 | - drain_array(searchp, l3, cpu_cache_get(searchp)); | |
3602 | + drain_array(searchp, l3, cpu_cache_get(searchp), 0, node); | |
3601 | 3603 | |
3602 | 3604 | /* |
3603 | 3605 | * These are racy checks but it does not matter |
... | ... | @@ -3608,7 +3610,7 @@ |
3608 | 3610 | |
3609 | 3611 | l3->next_reap = jiffies + REAPTIMEOUT_LIST3; |
3610 | 3612 | |
3611 | - drain_array(searchp, l3, l3->shared); | |
3613 | + drain_array(searchp, l3, l3->shared, 0, node); | |
3612 | 3614 | |
3613 | 3615 | if (l3->free_touched) { |
3614 | 3616 | l3->free_touched = 0; |