Commit 8b2e9b712f6139df9c754af0d67fecc4bbc88545

Authored by Linus Torvalds
1 parent b5898cd057

Revert "mm: create a separate slab for page->ptl allocation"

This reverts commit ea1e7ed33708c7a760419ff9ded0a6cb90586a50.

Al points out that while the commit *does* actually create a separate
slab for the page->ptl allocation, that slab is never actually used, and
the code continues to use kmalloc/kfree.

Damien Wyart points out that the original patch did have the conversion
to use kmem_cache_alloc/free, so it got lost somewhere on its way to me.

Revert the half-arsed attempt that didn't do anything.  If we really do
want the special slab (remember: this is all relevant just for debug
builds, so it's not necessarily all that critical) we might as well redo
the patch fully.

Reported-by: Al Viro <viro@zeniv.linux.org.uk>
Acked-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Kirill A Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 3 changed files with 1 additions and 17 deletions Side-by-side Diff

... ... @@ -1318,7 +1318,6 @@
1318 1318  
1319 1319 #if USE_SPLIT_PTE_PTLOCKS
1320 1320 #if BLOATED_SPINLOCKS
1321   -void __init ptlock_cache_init(void);
1322 1321 extern bool ptlock_alloc(struct page *page);
1323 1322 extern void ptlock_free(struct page *page);
1324 1323  
... ... @@ -1327,7 +1326,6 @@
1327 1326 return page->ptl;
1328 1327 }
1329 1328 #else /* BLOATED_SPINLOCKS */
1330   -static inline void ptlock_cache_init(void) {}
1331 1329 static inline bool ptlock_alloc(struct page *page)
1332 1330 {
1333 1331 return true;
1334 1332  
... ... @@ -1380,16 +1378,9 @@
1380 1378 {
1381 1379 return &mm->page_table_lock;
1382 1380 }
1383   -static inline void ptlock_cache_init(void) {}
1384 1381 static inline bool ptlock_init(struct page *page) { return true; }
1385 1382 static inline void pte_lock_deinit(struct page *page) {}
1386 1383 #endif /* USE_SPLIT_PTE_PTLOCKS */
1387   -
1388   -static inline void pgtable_init(void)
1389   -{
1390   - ptlock_cache_init();
1391   - pgtable_cache_init();
1392   -}
1393 1384  
1394 1385 static inline bool pgtable_page_ctor(struct page *page)
1395 1386 {
... ... @@ -476,7 +476,7 @@
476 476 mem_init();
477 477 kmem_cache_init();
478 478 percpu_init_late();
479   - pgtable_init();
  479 + pgtable_cache_init();
480 480 vmalloc_init();
481 481 }
482 482  
... ... @@ -4272,13 +4272,6 @@
4272 4272 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
4273 4273  
4274 4274 #if USE_SPLIT_PTE_PTLOCKS && BLOATED_SPINLOCKS
4275   -static struct kmem_cache *page_ptl_cachep;
4276   -void __init ptlock_cache_init(void)
4277   -{
4278   - page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
4279   - SLAB_PANIC, NULL);
4280   -}
4281   -
4282 4275 bool ptlock_alloc(struct page *page)
4283 4276 {
4284 4277 spinlock_t *ptl;