Commit 34bf6ef94a835a8f1d8abd3e7d38c6c08d205867
Committed by
Pekka Enberg
1 parent
5f0985bb11
Exists in
master
and in
13 other branches
mm: slab/slub: use page->list consistently instead of page->lru
'struct page' has two list_head fields: 'lru' and 'list'. Conveniently, they are unioned together. This means that code can use them interchangably, which gets horribly confusing like with this nugget from slab.c: > list_del(&page->lru); > if (page->active == cachep->num) > list_add(&page->list, &n->slabs_full); This patch makes the slab and slub code use page->lru universally instead of mixing ->list and ->lru. So, the new rule is: page->lru is what the you use if you want to keep your page on a list. Don't like the fact that it's not called ->list? Too bad. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Acked-by: Christoph Lameter <cl@linux.com> Acked-by: David Rientjes <rientjes@google.com> Cc: Pekka Enberg <penberg@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Showing 3 changed files with 9 additions and 8 deletions Side-by-side Diff
include/linux/mm_types.h
... | ... | @@ -124,6 +124,8 @@ |
124 | 124 | union { |
125 | 125 | struct list_head lru; /* Pageout list, eg. active_list |
126 | 126 | * protected by zone->lru_lock ! |
127 | + * Can be used as a generic list | |
128 | + * by the page owner. | |
127 | 129 | */ |
128 | 130 | struct { /* slub per cpu partial pages */ |
129 | 131 | struct page *next; /* Next partial slab */ |
... | ... | @@ -136,7 +138,6 @@ |
136 | 138 | #endif |
137 | 139 | }; |
138 | 140 | |
139 | - struct list_head list; /* slobs list of pages */ | |
140 | 141 | struct slab *slab_page; /* slab fields */ |
141 | 142 | struct rcu_head rcu_head; /* Used by SLAB |
142 | 143 | * when destroying via RCU |
mm/slab.c
... | ... | @@ -2922,9 +2922,9 @@ |
2922 | 2922 | /* move slabp to correct slabp list: */ |
2923 | 2923 | list_del(&page->lru); |
2924 | 2924 | if (page->active == cachep->num) |
2925 | - list_add(&page->list, &n->slabs_full); | |
2925 | + list_add(&page->lru, &n->slabs_full); | |
2926 | 2926 | else |
2927 | - list_add(&page->list, &n->slabs_partial); | |
2927 | + list_add(&page->lru, &n->slabs_partial); | |
2928 | 2928 | } |
2929 | 2929 | |
2930 | 2930 | must_grow: |
mm/slob.c
... | ... | @@ -111,13 +111,13 @@ |
111 | 111 | |
112 | 112 | static void set_slob_page_free(struct page *sp, struct list_head *list) |
113 | 113 | { |
114 | - list_add(&sp->list, list); | |
114 | + list_add(&sp->lru, list); | |
115 | 115 | __SetPageSlobFree(sp); |
116 | 116 | } |
117 | 117 | |
118 | 118 | static inline void clear_slob_page_free(struct page *sp) |
119 | 119 | { |
120 | - list_del(&sp->list); | |
120 | + list_del(&sp->lru); | |
121 | 121 | __ClearPageSlobFree(sp); |
122 | 122 | } |
123 | 123 | |
... | ... | @@ -282,7 +282,7 @@ |
282 | 282 | |
283 | 283 | spin_lock_irqsave(&slob_lock, flags); |
284 | 284 | /* Iterate through each partially free page, try to find room */ |
285 | - list_for_each_entry(sp, slob_list, list) { | |
285 | + list_for_each_entry(sp, slob_list, lru) { | |
286 | 286 | #ifdef CONFIG_NUMA |
287 | 287 | /* |
288 | 288 | * If there's a node specification, search for a partial |
... | ... | @@ -296,7 +296,7 @@ |
296 | 296 | continue; |
297 | 297 | |
298 | 298 | /* Attempt to alloc */ |
299 | - prev = sp->list.prev; | |
299 | + prev = sp->lru.prev; | |
300 | 300 | b = slob_page_alloc(sp, size, align); |
301 | 301 | if (!b) |
302 | 302 | continue; |
... | ... | @@ -322,7 +322,7 @@ |
322 | 322 | spin_lock_irqsave(&slob_lock, flags); |
323 | 323 | sp->units = SLOB_UNITS(PAGE_SIZE); |
324 | 324 | sp->freelist = b; |
325 | - INIT_LIST_HEAD(&sp->list); | |
325 | + INIT_LIST_HEAD(&sp->lru); | |
326 | 326 | set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); |
327 | 327 | set_slob_page_free(sp, slob_list); |
328 | 328 | b = slob_page_alloc(sp, size, align); |