Commit 80bfed904c690642db9d4178950735299160950b
Committed by
Linus Torvalds
1 parent
210fe53030
Exists in
master
and in
20 other branches
[PATCH] consolidate lru_add_drain() and lru_drain_cache()
Cc: Christoph Lameter <clameter@engr.sgi.com> Cc: Rajesh Shah <rajesh.shah@intel.com> Cc: Li Shaohua <shaohua.li@intel.com> Cc: Srivatsa Vaddagiri <vatsa@in.ibm.com> Cc: Ashok Raj <ashok.raj@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 1 changed file with 11 additions and 16 deletions Side-by-side Diff
mm/swap.c
... | ... | @@ -156,18 +156,24 @@ |
156 | 156 | put_cpu_var(lru_add_active_pvecs); |
157 | 157 | } |
158 | 158 | |
159 | -void lru_add_drain(void) | |
159 | +static void __lru_add_drain(int cpu) | |
160 | 160 | { |
161 | - struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); | |
161 | + struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu); | |
162 | 162 | |
163 | + /* CPU is dead, so no locking needed. */ | |
163 | 164 | if (pagevec_count(pvec)) |
164 | 165 | __pagevec_lru_add(pvec); |
165 | - pvec = &__get_cpu_var(lru_add_active_pvecs); | |
166 | + pvec = &per_cpu(lru_add_active_pvecs, cpu); | |
166 | 167 | if (pagevec_count(pvec)) |
167 | 168 | __pagevec_lru_add_active(pvec); |
168 | - put_cpu_var(lru_add_pvecs); | |
169 | 169 | } |
170 | 170 | |
171 | +void lru_add_drain(void) | |
172 | +{ | |
173 | + __lru_add_drain(get_cpu()); | |
174 | + put_cpu(); | |
175 | +} | |
176 | + | |
171 | 177 | /* |
172 | 178 | * This path almost never happens for VM activity - pages are normally |
173 | 179 | * freed via pagevecs. But it gets used by networking. |
174 | 180 | |
... | ... | @@ -412,18 +418,7 @@ |
412 | 418 | } |
413 | 419 | |
414 | 420 | #ifdef CONFIG_HOTPLUG_CPU |
415 | -static void lru_drain_cache(unsigned int cpu) | |
416 | -{ | |
417 | - struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu); | |
418 | 421 | |
419 | - /* CPU is dead, so no locking needed. */ | |
420 | - if (pagevec_count(pvec)) | |
421 | - __pagevec_lru_add(pvec); | |
422 | - pvec = &per_cpu(lru_add_active_pvecs, cpu); | |
423 | - if (pagevec_count(pvec)) | |
424 | - __pagevec_lru_add_active(pvec); | |
425 | -} | |
426 | - | |
427 | 422 | /* Drop the CPU's cached committed space back into the central pool. */ |
428 | 423 | static int cpu_swap_callback(struct notifier_block *nfb, |
429 | 424 | unsigned long action, |
... | ... | @@ -435,7 +430,7 @@ |
435 | 430 | if (action == CPU_DEAD) { |
436 | 431 | atomic_add(*committed, &vm_committed_space); |
437 | 432 | *committed = 0; |
438 | - lru_drain_cache((long)hcpu); | |
433 | + __lru_add_drain((long)hcpu); | |
439 | 434 | } |
440 | 435 | return NOTIFY_OK; |
441 | 436 | } |