Commit af4f8ba31a4e328677bec493ceeaf112ca193b65
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull slab updates from Pekka Enberg: "Mainly a bunch of SLUB fixes from Joonsoo Kim" * 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: slub: use __SetPageSlab function to set PG_slab flag slub: fix a memory leak in get_partial_node() slub: remove unused argument of init_kmem_cache_node() slub: fix a possible memory leak Documentations: Fix slabinfo.c directory in vm/slub.txt slub: fix incorrect return type of get_any_partial()
Showing 2 changed files Side-by-side Diff
Documentation/vm/slub.txt
... | ... | @@ -17,7 +17,7 @@ |
17 | 17 | slabs that have data in them. See "slabinfo -h" for more options when |
18 | 18 | running the command. slabinfo can be compiled with |
19 | 19 | |
20 | -gcc -o slabinfo tools/slub/slabinfo.c | |
20 | +gcc -o slabinfo tools/vm/slabinfo.c | |
21 | 21 | |
22 | 22 | Some of the modes of operation of slabinfo require that slub debugging |
23 | 23 | be enabled on the command line. F.e. no tracking information will be |
mm/slub.c
... | ... | @@ -1369,7 +1369,7 @@ |
1369 | 1369 | |
1370 | 1370 | inc_slabs_node(s, page_to_nid(page), page->objects); |
1371 | 1371 | page->slab = s; |
1372 | - page->flags |= 1 << PG_slab; | |
1372 | + __SetPageSlab(page); | |
1373 | 1373 | |
1374 | 1374 | start = page_address(page); |
1375 | 1375 | |
1376 | 1376 | |
1377 | 1377 | |
... | ... | @@ -1514,15 +1514,19 @@ |
1514 | 1514 | freelist = page->freelist; |
1515 | 1515 | counters = page->counters; |
1516 | 1516 | new.counters = counters; |
1517 | - if (mode) | |
1517 | + if (mode) { | |
1518 | 1518 | new.inuse = page->objects; |
1519 | + new.freelist = NULL; | |
1520 | + } else { | |
1521 | + new.freelist = freelist; | |
1522 | + } | |
1519 | 1523 | |
1520 | 1524 | VM_BUG_ON(new.frozen); |
1521 | 1525 | new.frozen = 1; |
1522 | 1526 | |
1523 | 1527 | } while (!__cmpxchg_double_slab(s, page, |
1524 | 1528 | freelist, counters, |
1525 | - NULL, new.counters, | |
1529 | + new.freelist, new.counters, | |
1526 | 1530 | "lock and freeze")); |
1527 | 1531 | |
1528 | 1532 | remove_partial(n, page); |
... | ... | @@ -1564,7 +1568,6 @@ |
1564 | 1568 | object = t; |
1565 | 1569 | available = page->objects - page->inuse; |
1566 | 1570 | } else { |
1567 | - page->freelist = t; | |
1568 | 1571 | available = put_cpu_partial(s, page, 0); |
1569 | 1572 | stat(s, CPU_PARTIAL_NODE); |
1570 | 1573 | } |
... | ... | @@ -1579,7 +1582,7 @@ |
1579 | 1582 | /* |
1580 | 1583 | * Get a page from somewhere. Search in increasing NUMA distances. |
1581 | 1584 | */ |
1582 | -static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags, | |
1585 | +static void *get_any_partial(struct kmem_cache *s, gfp_t flags, | |
1583 | 1586 | struct kmem_cache_cpu *c) |
1584 | 1587 | { |
1585 | 1588 | #ifdef CONFIG_NUMA |
... | ... | @@ -2766,7 +2769,7 @@ |
2766 | 2769 | } |
2767 | 2770 | |
2768 | 2771 | static void |
2769 | -init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) | |
2772 | +init_kmem_cache_node(struct kmem_cache_node *n) | |
2770 | 2773 | { |
2771 | 2774 | n->nr_partial = 0; |
2772 | 2775 | spin_lock_init(&n->list_lock); |
... | ... | @@ -2836,7 +2839,7 @@ |
2836 | 2839 | init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); |
2837 | 2840 | init_tracking(kmem_cache_node, n); |
2838 | 2841 | #endif |
2839 | - init_kmem_cache_node(n, kmem_cache_node); | |
2842 | + init_kmem_cache_node(n); | |
2840 | 2843 | inc_slabs_node(kmem_cache_node, node, page->objects); |
2841 | 2844 | |
2842 | 2845 | add_partial(n, page, DEACTIVATE_TO_HEAD); |
... | ... | @@ -2876,7 +2879,7 @@ |
2876 | 2879 | } |
2877 | 2880 | |
2878 | 2881 | s->node[node] = n; |
2879 | - init_kmem_cache_node(n, s); | |
2882 | + init_kmem_cache_node(n); | |
2880 | 2883 | } |
2881 | 2884 | return 1; |
2882 | 2885 | } |
... | ... | @@ -3625,7 +3628,7 @@ |
3625 | 3628 | ret = -ENOMEM; |
3626 | 3629 | goto out; |
3627 | 3630 | } |
3628 | - init_kmem_cache_node(n, s); | |
3631 | + init_kmem_cache_node(n); | |
3629 | 3632 | s->node[nid] = n; |
3630 | 3633 | } |
3631 | 3634 | out: |
3632 | 3635 | |
... | ... | @@ -3968,9 +3971,9 @@ |
3968 | 3971 | } |
3969 | 3972 | return s; |
3970 | 3973 | } |
3971 | - kfree(n); | |
3972 | 3974 | kfree(s); |
3973 | 3975 | } |
3976 | + kfree(n); | |
3974 | 3977 | err: |
3975 | 3978 | up_write(&slub_lock); |
3976 | 3979 |