Commit a56dbddf06b653ef9c04ca3767f260fd31ccebab
1 parent
9f7dcf224b
Exists in
master
and in
20 other branches
percpu: move fully free chunk reclamation into a work
Impact: code reorganization for later changes Do fully free chunk reclamation using a work. This change is to prepare for locking changes. Signed-off-by: Tejun Heo <tj@kernel.org>
Showing 1 changed file with 38 additions and 10 deletions Side-by-side Diff
mm/percpu.c
... | ... | @@ -63,6 +63,7 @@ |
63 | 63 | #include <linux/rbtree.h> |
64 | 64 | #include <linux/slab.h> |
65 | 65 | #include <linux/vmalloc.h> |
66 | +#include <linux/workqueue.h> | |
66 | 67 | |
67 | 68 | #include <asm/cacheflush.h> |
68 | 69 | #include <asm/tlbflush.h> |
... | ... | @@ -118,6 +119,10 @@ |
118 | 119 | static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ |
119 | 120 | static struct rb_root pcpu_addr_root = RB_ROOT; /* chunks by address */ |
120 | 121 | |
122 | +/* reclaim work to release fully free chunks, scheduled from free path */ | |
123 | +static void pcpu_reclaim(struct work_struct *work); | |
124 | +static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); | |
125 | + | |
121 | 126 | static int __pcpu_size_to_slot(int size) |
122 | 127 | { |
123 | 128 | int highbit = fls(size); /* size is in bytes */ |
124 | 129 | |
... | ... | @@ -846,13 +851,37 @@ |
846 | 851 | return pcpu_alloc(size, align, true); |
847 | 852 | } |
848 | 853 | |
849 | -static void pcpu_kill_chunk(struct pcpu_chunk *chunk) | |
854 | +/** | |
855 | + * pcpu_reclaim - reclaim fully free chunks, workqueue function | |
856 | + * @work: unused | |
857 | + * | |
858 | + * Reclaim all fully free chunks except for the first one. | |
859 | + */ | |
860 | +static void pcpu_reclaim(struct work_struct *work) | |
850 | 861 | { |
851 | - WARN_ON(chunk->immutable); | |
852 | - pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false); | |
853 | - list_del(&chunk->list); | |
854 | - rb_erase(&chunk->rb_node, &pcpu_addr_root); | |
855 | - free_pcpu_chunk(chunk); | |
862 | + LIST_HEAD(todo); | |
863 | + struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; | |
864 | + struct pcpu_chunk *chunk, *next; | |
865 | + | |
866 | + mutex_lock(&pcpu_mutex); | |
867 | + | |
868 | + list_for_each_entry_safe(chunk, next, head, list) { | |
869 | + WARN_ON(chunk->immutable); | |
870 | + | |
871 | + /* spare the first one */ | |
872 | + if (chunk == list_first_entry(head, struct pcpu_chunk, list)) | |
873 | + continue; | |
874 | + | |
875 | + rb_erase(&chunk->rb_node, &pcpu_addr_root); | |
876 | + list_move(&chunk->list, &todo); | |
877 | + } | |
878 | + | |
879 | + mutex_unlock(&pcpu_mutex); | |
880 | + | |
881 | + list_for_each_entry_safe(chunk, next, &todo, list) { | |
882 | + pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false); | |
883 | + free_pcpu_chunk(chunk); | |
884 | + } | |
856 | 885 | } |
857 | 886 | |
858 | 887 | /** |
859 | 888 | |
860 | 889 | |
... | ... | @@ -877,14 +906,13 @@ |
877 | 906 | |
878 | 907 | pcpu_free_area(chunk, off); |
879 | 908 | |
880 | - /* the chunk became fully free, kill one if there are other free ones */ | |
909 | + /* if there are more than one fully free chunks, wake up grim reaper */ | |
881 | 910 | if (chunk->free_size == pcpu_unit_size) { |
882 | 911 | struct pcpu_chunk *pos; |
883 | 912 | |
884 | - list_for_each_entry(pos, | |
885 | - &pcpu_slot[pcpu_chunk_slot(chunk)], list) | |
913 | + list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) | |
886 | 914 | if (pos != chunk) { |
887 | - pcpu_kill_chunk(pos); | |
915 | + schedule_work(&pcpu_reclaim_work); | |
888 | 916 | break; |
889 | 917 | } |
890 | 918 | } |