Commit 343e0d7a93951e35065fdb5e3dd61aece0ec6b3c
Committed by
Linus Torvalds
1 parent
9a2dba4b49
Exists in
master
and in
7 other branches
[PATCH] slab: replace kmem_cache_t with struct kmem_cache
Replace uses of kmem_cache_t with proper struct kmem_cache in mm/slab.c. Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 1 changed file with 98 additions and 97 deletions Side-by-side Diff
mm/slab.c
... | ... | @@ -55,7 +55,7 @@ |
55 | 55 | * |
56 | 56 | * SMP synchronization: |
57 | 57 | * constructors and destructors are called without any locking. |
58 | - * Several members in kmem_cache_t and struct slab never change, they | |
58 | + * Several members in struct kmem_cache and struct slab never change, they | |
59 | 59 | * are accessed without any locking. |
60 | 60 | * The per-cpu arrays are never accessed from the wrong cpu, no locking, |
61 | 61 | * and local interrupts are disabled so slab code is preempt-safe. |
... | ... | @@ -244,7 +244,7 @@ |
244 | 244 | */ |
245 | 245 | struct slab_rcu { |
246 | 246 | struct rcu_head head; |
247 | - kmem_cache_t *cachep; | |
247 | + struct kmem_cache *cachep; | |
248 | 248 | void *addr; |
249 | 249 | }; |
250 | 250 | |
... | ... | @@ -363,7 +363,7 @@ |
363 | 363 | } while (0) |
364 | 364 | |
365 | 365 | /* |
366 | - * kmem_cache_t | |
366 | + * struct kmem_cache | |
367 | 367 | * |
368 | 368 | * manages a cache. |
369 | 369 | */ |
370 | 370 | |
371 | 371 | |
... | ... | @@ -391,15 +391,15 @@ |
391 | 391 | size_t colour; /* cache colouring range */ |
392 | 392 | unsigned int colour_off; /* colour offset */ |
393 | 393 | unsigned int colour_next; /* cache colouring */ |
394 | - kmem_cache_t *slabp_cache; | |
394 | + struct kmem_cache *slabp_cache; | |
395 | 395 | unsigned int slab_size; |
396 | 396 | unsigned int dflags; /* dynamic flags */ |
397 | 397 | |
398 | 398 | /* constructor func */ |
399 | - void (*ctor) (void *, kmem_cache_t *, unsigned long); | |
399 | + void (*ctor) (void *, struct kmem_cache *, unsigned long); | |
400 | 400 | |
401 | 401 | /* de-constructor func */ |
402 | - void (*dtor) (void *, kmem_cache_t *, unsigned long); | |
402 | + void (*dtor) (void *, struct kmem_cache *, unsigned long); | |
403 | 403 | |
404 | 404 | /* 4) cache creation/removal */ |
405 | 405 | const char *name; |
406 | 406 | |
407 | 407 | |
408 | 408 | |
... | ... | @@ -509,23 +509,23 @@ |
509 | 509 | * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] |
510 | 510 | * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long] |
511 | 511 | */ |
512 | -static int obj_offset(kmem_cache_t *cachep) | |
512 | +static int obj_offset(struct kmem_cache *cachep) | |
513 | 513 | { |
514 | 514 | return cachep->obj_offset; |
515 | 515 | } |
516 | 516 | |
517 | -static int obj_size(kmem_cache_t *cachep) | |
517 | +static int obj_size(struct kmem_cache *cachep) | |
518 | 518 | { |
519 | 519 | return cachep->obj_size; |
520 | 520 | } |
521 | 521 | |
522 | -static unsigned long *dbg_redzone1(kmem_cache_t *cachep, void *objp) | |
522 | +static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp) | |
523 | 523 | { |
524 | 524 | BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); |
525 | 525 | return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD); |
526 | 526 | } |
527 | 527 | |
528 | -static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp) | |
528 | +static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp) | |
529 | 529 | { |
530 | 530 | BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); |
531 | 531 | if (cachep->flags & SLAB_STORE_USER) |
... | ... | @@ -534,7 +534,7 @@ |
534 | 534 | return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD); |
535 | 535 | } |
536 | 536 | |
537 | -static void **dbg_userword(kmem_cache_t *cachep, void *objp) | |
537 | +static void **dbg_userword(struct kmem_cache *cachep, void *objp) | |
538 | 538 | { |
539 | 539 | BUG_ON(!(cachep->flags & SLAB_STORE_USER)); |
540 | 540 | return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD); |
541 | 541 | |
542 | 542 | |
... | ... | @@ -636,16 +636,16 @@ |
636 | 636 | { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; |
637 | 637 | |
638 | 638 | /* internal cache of cache description objs */ |
639 | -static kmem_cache_t cache_cache = { | |
639 | +static struct kmem_cache cache_cache = { | |
640 | 640 | .batchcount = 1, |
641 | 641 | .limit = BOOT_CPUCACHE_ENTRIES, |
642 | 642 | .shared = 1, |
643 | - .buffer_size = sizeof(kmem_cache_t), | |
643 | + .buffer_size = sizeof(struct kmem_cache), | |
644 | 644 | .flags = SLAB_NO_REAP, |
645 | 645 | .spinlock = SPIN_LOCK_UNLOCKED, |
646 | 646 | .name = "kmem_cache", |
647 | 647 | #if DEBUG |
648 | - .obj_size = sizeof(kmem_cache_t), | |
648 | + .obj_size = sizeof(struct kmem_cache), | |
649 | 649 | #endif |
650 | 650 | }; |
651 | 651 | |
652 | 652 | |
653 | 653 | |
654 | 654 | |
... | ... | @@ -674,17 +674,17 @@ |
674 | 674 | |
675 | 675 | static DEFINE_PER_CPU(struct work_struct, reap_work); |
676 | 676 | |
677 | -static void free_block(kmem_cache_t *cachep, void **objpp, int len, int node); | |
678 | -static void enable_cpucache(kmem_cache_t *cachep); | |
677 | +static void free_block(struct kmem_cache *cachep, void **objpp, int len, int node); | |
678 | +static void enable_cpucache(struct kmem_cache *cachep); | |
679 | 679 | static void cache_reap(void *unused); |
680 | -static int __node_shrink(kmem_cache_t *cachep, int node); | |
680 | +static int __node_shrink(struct kmem_cache *cachep, int node); | |
681 | 681 | |
682 | -static inline struct array_cache *cpu_cache_get(kmem_cache_t *cachep) | |
682 | +static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) | |
683 | 683 | { |
684 | 684 | return cachep->array[smp_processor_id()]; |
685 | 685 | } |
686 | 686 | |
687 | -static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags) | |
687 | +static inline struct kmem_cache *__find_general_cachep(size_t size, gfp_t gfpflags) | |
688 | 688 | { |
689 | 689 | struct cache_sizes *csizep = malloc_sizes; |
690 | 690 | |
... | ... | @@ -708,7 +708,7 @@ |
708 | 708 | return csizep->cs_cachep; |
709 | 709 | } |
710 | 710 | |
711 | -kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags) | |
711 | +struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) | |
712 | 712 | { |
713 | 713 | return __find_general_cachep(size, gfpflags); |
714 | 714 | } |
... | ... | @@ -781,7 +781,7 @@ |
781 | 781 | |
782 | 782 | #define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg) |
783 | 783 | |
784 | -static void __slab_error(const char *function, kmem_cache_t *cachep, char *msg) | |
784 | +static void __slab_error(const char *function, struct kmem_cache *cachep, char *msg) | |
785 | 785 | { |
786 | 786 | printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", |
787 | 787 | function, cachep->name, msg); |
... | ... | @@ -828,7 +828,7 @@ |
828 | 828 | } |
829 | 829 | |
830 | 830 | #ifdef CONFIG_NUMA |
831 | -static void *__cache_alloc_node(kmem_cache_t *, gfp_t, int); | |
831 | +static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); | |
832 | 832 | |
833 | 833 | static struct array_cache **alloc_alien_cache(int node, int limit) |
834 | 834 | { |
... | ... | @@ -870,7 +870,7 @@ |
870 | 870 | kfree(ac_ptr); |
871 | 871 | } |
872 | 872 | |
873 | -static void __drain_alien_cache(kmem_cache_t *cachep, | |
873 | +static void __drain_alien_cache(struct kmem_cache *cachep, | |
874 | 874 | struct array_cache *ac, int node) |
875 | 875 | { |
876 | 876 | struct kmem_list3 *rl3 = cachep->nodelists[node]; |
... | ... | @@ -883,7 +883,7 @@ |
883 | 883 | } |
884 | 884 | } |
885 | 885 | |
886 | -static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3) | |
886 | +static void drain_alien_cache(struct kmem_cache *cachep, struct kmem_list3 *l3) | |
887 | 887 | { |
888 | 888 | int i = 0; |
889 | 889 | struct array_cache *ac; |
... | ... | @@ -908,7 +908,7 @@ |
908 | 908 | unsigned long action, void *hcpu) |
909 | 909 | { |
910 | 910 | long cpu = (long)hcpu; |
911 | - kmem_cache_t *cachep; | |
911 | + struct kmem_cache *cachep; | |
912 | 912 | struct kmem_list3 *l3 = NULL; |
913 | 913 | int node = cpu_to_node(cpu); |
914 | 914 | int memsize = sizeof(struct kmem_list3); |
... | ... | @@ -1046,7 +1046,7 @@ |
1046 | 1046 | /* |
1047 | 1047 | * swap the static kmem_list3 with kmalloced memory |
1048 | 1048 | */ |
1049 | -static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list, int nodeid) | |
1049 | +static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, int nodeid) | |
1050 | 1050 | { |
1051 | 1051 | struct kmem_list3 *ptr; |
1052 | 1052 | |
1053 | 1053 | |
... | ... | @@ -1086,14 +1086,14 @@ |
1086 | 1086 | |
1087 | 1087 | /* Bootstrap is tricky, because several objects are allocated |
1088 | 1088 | * from caches that do not exist yet: |
1089 | - * 1) initialize the cache_cache cache: it contains the kmem_cache_t | |
1089 | + * 1) initialize the cache_cache cache: it contains the struct kmem_cache | |
1090 | 1090 | * structures of all caches, except cache_cache itself: cache_cache |
1091 | 1091 | * is statically allocated. |
1092 | 1092 | * Initially an __init data area is used for the head array and the |
1093 | 1093 | * kmem_list3 structures, it's replaced with a kmalloc allocated |
1094 | 1094 | * array at the end of the bootstrap. |
1095 | 1095 | * 2) Create the first kmalloc cache. |
1096 | - * The kmem_cache_t for the new cache is allocated normally. | |
1096 | + * The struct kmem_cache for the new cache is allocated normally. | |
1097 | 1097 | * An __init data area is used for the head array. |
1098 | 1098 | * 3) Create the remaining kmalloc caches, with minimally sized |
1099 | 1099 | * head arrays. |
... | ... | @@ -1224,7 +1224,7 @@ |
1224 | 1224 | |
1225 | 1225 | /* 6) resize the head arrays to their final sizes */ |
1226 | 1226 | { |
1227 | - kmem_cache_t *cachep; | |
1227 | + struct kmem_cache *cachep; | |
1228 | 1228 | mutex_lock(&cache_chain_mutex); |
1229 | 1229 | list_for_each_entry(cachep, &cache_chain, next) |
1230 | 1230 | enable_cpucache(cachep); |
... | ... | @@ -1267,7 +1267,7 @@ |
1267 | 1267 | * did not request dmaable memory, we might get it, but that |
1268 | 1268 | * would be relatively rare and ignorable. |
1269 | 1269 | */ |
1270 | -static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid) | |
1270 | +static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |
1271 | 1271 | { |
1272 | 1272 | struct page *page; |
1273 | 1273 | void *addr; |
... | ... | @@ -1293,7 +1293,7 @@ |
1293 | 1293 | /* |
1294 | 1294 | * Interface to system's page release. |
1295 | 1295 | */ |
1296 | -static void kmem_freepages(kmem_cache_t *cachep, void *addr) | |
1296 | +static void kmem_freepages(struct kmem_cache *cachep, void *addr) | |
1297 | 1297 | { |
1298 | 1298 | unsigned long i = (1 << cachep->gfporder); |
1299 | 1299 | struct page *page = virt_to_page(addr); |
... | ... | @@ -1315,7 +1315,7 @@ |
1315 | 1315 | static void kmem_rcu_free(struct rcu_head *head) |
1316 | 1316 | { |
1317 | 1317 | struct slab_rcu *slab_rcu = (struct slab_rcu *)head; |
1318 | - kmem_cache_t *cachep = slab_rcu->cachep; | |
1318 | + struct kmem_cache *cachep = slab_rcu->cachep; | |
1319 | 1319 | |
1320 | 1320 | kmem_freepages(cachep, slab_rcu->addr); |
1321 | 1321 | if (OFF_SLAB(cachep)) |
... | ... | @@ -1325,7 +1325,7 @@ |
1325 | 1325 | #if DEBUG |
1326 | 1326 | |
1327 | 1327 | #ifdef CONFIG_DEBUG_PAGEALLOC |
1328 | -static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, | |
1328 | +static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, | |
1329 | 1329 | unsigned long caller) |
1330 | 1330 | { |
1331 | 1331 | int size = obj_size(cachep); |
... | ... | @@ -1358,7 +1358,7 @@ |
1358 | 1358 | } |
1359 | 1359 | #endif |
1360 | 1360 | |
1361 | -static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val) | |
1361 | +static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) | |
1362 | 1362 | { |
1363 | 1363 | int size = obj_size(cachep); |
1364 | 1364 | addr = &((char *)addr)[obj_offset(cachep)]; |
... | ... | @@ -1380,7 +1380,7 @@ |
1380 | 1380 | |
1381 | 1381 | #if DEBUG |
1382 | 1382 | |
1383 | -static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines) | |
1383 | +static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) | |
1384 | 1384 | { |
1385 | 1385 | int i, size; |
1386 | 1386 | char *realobj; |
... | ... | @@ -1409,7 +1409,7 @@ |
1409 | 1409 | } |
1410 | 1410 | } |
1411 | 1411 | |
1412 | -static void check_poison_obj(kmem_cache_t *cachep, void *objp) | |
1412 | +static void check_poison_obj(struct kmem_cache *cachep, void *objp) | |
1413 | 1413 | { |
1414 | 1414 | char *realobj; |
1415 | 1415 | int size, i; |
... | ... | @@ -1476,7 +1476,7 @@ |
1476 | 1476 | * slab_destroy_objs - call the registered destructor for each object in |
1477 | 1477 | * a slab that is to be destroyed. |
1478 | 1478 | */ |
1479 | -static void slab_destroy_objs(kmem_cache_t *cachep, struct slab *slabp) | |
1479 | +static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) | |
1480 | 1480 | { |
1481 | 1481 | int i; |
1482 | 1482 | for (i = 0; i < cachep->num; i++) { |
... | ... | @@ -1508,7 +1508,7 @@ |
1508 | 1508 | } |
1509 | 1509 | } |
1510 | 1510 | #else |
1511 | -static void slab_destroy_objs(kmem_cache_t *cachep, struct slab *slabp) | |
1511 | +static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) | |
1512 | 1512 | { |
1513 | 1513 | if (cachep->dtor) { |
1514 | 1514 | int i; |
... | ... | @@ -1525,7 +1525,7 @@ |
1525 | 1525 | * Before calling the slab must have been unlinked from the cache. |
1526 | 1526 | * The cache-lock is not held/needed. |
1527 | 1527 | */ |
1528 | -static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp) | |
1528 | +static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) | |
1529 | 1529 | { |
1530 | 1530 | void *addr = slabp->s_mem - slabp->colouroff; |
1531 | 1531 | |
... | ... | @@ -1546,7 +1546,7 @@ |
1546 | 1546 | |
1547 | 1547 | /* For setting up all the kmem_list3s for cache whose buffer_size is same |
1548 | 1548 | as size of kmem_list3. */ |
1549 | -static void set_up_list3s(kmem_cache_t *cachep, int index) | |
1549 | +static void set_up_list3s(struct kmem_cache *cachep, int index) | |
1550 | 1550 | { |
1551 | 1551 | int node; |
1552 | 1552 | |
... | ... | @@ -1566,7 +1566,7 @@ |
1566 | 1566 | * high order pages for slabs. When the gfp() functions are more friendly |
1567 | 1567 | * towards high-order requests, this should be changed. |
1568 | 1568 | */ |
1569 | -static inline size_t calculate_slab_order(kmem_cache_t *cachep, size_t size, | |
1569 | +static inline size_t calculate_slab_order(struct kmem_cache *cachep, size_t size, | |
1570 | 1570 | size_t align, gfp_t flags) |
1571 | 1571 | { |
1572 | 1572 | size_t left_over = 0; |
1573 | 1573 | |
1574 | 1574 | |
... | ... | @@ -1638,13 +1638,13 @@ |
1638 | 1638 | * cacheline. This can be beneficial if you're counting cycles as closely |
1639 | 1639 | * as davem. |
1640 | 1640 | */ |
1641 | -kmem_cache_t * | |
1641 | +struct kmem_cache * | |
1642 | 1642 | kmem_cache_create (const char *name, size_t size, size_t align, |
1643 | - unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long), | |
1644 | - void (*dtor)(void*, kmem_cache_t *, unsigned long)) | |
1643 | + unsigned long flags, void (*ctor)(void*, struct kmem_cache *, unsigned long), | |
1644 | + void (*dtor)(void*, struct kmem_cache *, unsigned long)) | |
1645 | 1645 | { |
1646 | 1646 | size_t left_over, slab_size, ralign; |
1647 | - kmem_cache_t *cachep = NULL; | |
1647 | + struct kmem_cache *cachep = NULL; | |
1648 | 1648 | struct list_head *p; |
1649 | 1649 | |
1650 | 1650 | /* |
... | ... | @@ -1662,7 +1662,7 @@ |
1662 | 1662 | mutex_lock(&cache_chain_mutex); |
1663 | 1663 | |
1664 | 1664 | list_for_each(p, &cache_chain) { |
1665 | - kmem_cache_t *pc = list_entry(p, kmem_cache_t, next); | |
1665 | + struct kmem_cache *pc = list_entry(p, struct kmem_cache, next); | |
1666 | 1666 | mm_segment_t old_fs = get_fs(); |
1667 | 1667 | char tmp; |
1668 | 1668 | int res; |
1669 | 1669 | |
... | ... | @@ -1762,10 +1762,10 @@ |
1762 | 1762 | align = ralign; |
1763 | 1763 | |
1764 | 1764 | /* Get cache's description obj. */ |
1765 | - cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL); | |
1765 | + cachep = kmem_cache_alloc(&cache_cache, SLAB_KERNEL); | |
1766 | 1766 | if (!cachep) |
1767 | 1767 | goto oops; |
1768 | - memset(cachep, 0, sizeof(kmem_cache_t)); | |
1768 | + memset(cachep, 0, sizeof(struct kmem_cache)); | |
1769 | 1769 | |
1770 | 1770 | #if DEBUG |
1771 | 1771 | cachep->obj_size = size; |
... | ... | @@ -1941,7 +1941,7 @@ |
1941 | 1941 | BUG_ON(irqs_disabled()); |
1942 | 1942 | } |
1943 | 1943 | |
1944 | -static void check_spinlock_acquired(kmem_cache_t *cachep) | |
1944 | +static void check_spinlock_acquired(struct kmem_cache *cachep) | |
1945 | 1945 | { |
1946 | 1946 | #ifdef CONFIG_SMP |
1947 | 1947 | check_irq_off(); |
... | ... | @@ -1949,7 +1949,7 @@ |
1949 | 1949 | #endif |
1950 | 1950 | } |
1951 | 1951 | |
1952 | -static void check_spinlock_acquired_node(kmem_cache_t *cachep, int node) | |
1952 | +static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) | |
1953 | 1953 | { |
1954 | 1954 | #ifdef CONFIG_SMP |
1955 | 1955 | check_irq_off(); |
1956 | 1956 | |
... | ... | @@ -1982,12 +1982,12 @@ |
1982 | 1982 | preempt_enable(); |
1983 | 1983 | } |
1984 | 1984 | |
1985 | -static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac, | |
1985 | +static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac, | |
1986 | 1986 | int force, int node); |
1987 | 1987 | |
1988 | 1988 | static void do_drain(void *arg) |
1989 | 1989 | { |
1990 | - kmem_cache_t *cachep = (kmem_cache_t *) arg; | |
1990 | + struct kmem_cache *cachep = (struct kmem_cache *) arg; | |
1991 | 1991 | struct array_cache *ac; |
1992 | 1992 | int node = numa_node_id(); |
1993 | 1993 | |
... | ... | @@ -1999,7 +1999,7 @@ |
1999 | 1999 | ac->avail = 0; |
2000 | 2000 | } |
2001 | 2001 | |
2002 | -static void drain_cpu_caches(kmem_cache_t *cachep) | |
2002 | +static void drain_cpu_caches(struct kmem_cache *cachep) | |
2003 | 2003 | { |
2004 | 2004 | struct kmem_list3 *l3; |
2005 | 2005 | int node; |
... | ... | @@ -2020,7 +2020,7 @@ |
2020 | 2020 | spin_unlock_irq(&cachep->spinlock); |
2021 | 2021 | } |
2022 | 2022 | |
2023 | -static int __node_shrink(kmem_cache_t *cachep, int node) | |
2023 | +static int __node_shrink(struct kmem_cache *cachep, int node) | |
2024 | 2024 | { |
2025 | 2025 | struct slab *slabp; |
2026 | 2026 | struct kmem_list3 *l3 = cachep->nodelists[node]; |
... | ... | @@ -2049,7 +2049,7 @@ |
2049 | 2049 | return ret; |
2050 | 2050 | } |
2051 | 2051 | |
2052 | -static int __cache_shrink(kmem_cache_t *cachep) | |
2052 | +static int __cache_shrink(struct kmem_cache *cachep) | |
2053 | 2053 | { |
2054 | 2054 | int ret = 0, i = 0; |
2055 | 2055 | struct kmem_list3 *l3; |
... | ... | @@ -2075,7 +2075,7 @@ |
2075 | 2075 | * Releases as many slabs as possible for a cache. |
2076 | 2076 | * To help debugging, a zero exit status indicates all slabs were released. |
2077 | 2077 | */ |
2078 | -int kmem_cache_shrink(kmem_cache_t *cachep) | |
2078 | +int kmem_cache_shrink(struct kmem_cache *cachep) | |
2079 | 2079 | { |
2080 | 2080 | if (!cachep || in_interrupt()) |
2081 | 2081 | BUG(); |
... | ... | @@ -2088,7 +2088,7 @@ |
2088 | 2088 | * kmem_cache_destroy - delete a cache |
2089 | 2089 | * @cachep: the cache to destroy |
2090 | 2090 | * |
2091 | - * Remove a kmem_cache_t object from the slab cache. | |
2091 | + * Remove a struct kmem_cache object from the slab cache. | |
2092 | 2092 | * Returns 0 on success. |
2093 | 2093 | * |
2094 | 2094 | * It is expected this function will be called by a module when it is |
... | ... | @@ -2101,7 +2101,7 @@ |
2101 | 2101 | * The caller must guarantee that noone will allocate memory from the cache |
2102 | 2102 | * during the kmem_cache_destroy(). |
2103 | 2103 | */ |
2104 | -int kmem_cache_destroy(kmem_cache_t *cachep) | |
2104 | +int kmem_cache_destroy(struct kmem_cache *cachep) | |
2105 | 2105 | { |
2106 | 2106 | int i; |
2107 | 2107 | struct kmem_list3 *l3; |
... | ... | @@ -2152,7 +2152,7 @@ |
2152 | 2152 | EXPORT_SYMBOL(kmem_cache_destroy); |
2153 | 2153 | |
2154 | 2154 | /* Get the memory for a slab management obj. */ |
2155 | -static struct slab *alloc_slabmgmt(kmem_cache_t *cachep, void *objp, | |
2155 | +static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, | |
2156 | 2156 | int colour_off, gfp_t local_flags) |
2157 | 2157 | { |
2158 | 2158 | struct slab *slabp; |
... | ... | @@ -2178,7 +2178,7 @@ |
2178 | 2178 | return (kmem_bufctl_t *) (slabp + 1); |
2179 | 2179 | } |
2180 | 2180 | |
2181 | -static void cache_init_objs(kmem_cache_t *cachep, | |
2181 | +static void cache_init_objs(struct kmem_cache *cachep, | |
2182 | 2182 | struct slab *slabp, unsigned long ctor_flags) |
2183 | 2183 | { |
2184 | 2184 | int i; |
... | ... | @@ -2227,7 +2227,7 @@ |
2227 | 2227 | slabp->free = 0; |
2228 | 2228 | } |
2229 | 2229 | |
2230 | -static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags) | |
2230 | +static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) | |
2231 | 2231 | { |
2232 | 2232 | if (flags & SLAB_DMA) { |
2233 | 2233 | if (!(cachep->gfpflags & GFP_DMA)) |
... | ... | @@ -2238,7 +2238,7 @@ |
2238 | 2238 | } |
2239 | 2239 | } |
2240 | 2240 | |
2241 | -static void *slab_get_obj(kmem_cache_t *cachep, struct slab *slabp, int nodeid) | |
2241 | +static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, int nodeid) | |
2242 | 2242 | { |
2243 | 2243 | void *objp = slabp->s_mem + (slabp->free * cachep->buffer_size); |
2244 | 2244 | kmem_bufctl_t next; |
... | ... | @@ -2254,7 +2254,7 @@ |
2254 | 2254 | return objp; |
2255 | 2255 | } |
2256 | 2256 | |
2257 | -static void slab_put_obj(kmem_cache_t *cachep, struct slab *slabp, void *objp, | |
2257 | +static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, void *objp, | |
2258 | 2258 | int nodeid) |
2259 | 2259 | { |
2260 | 2260 | unsigned int objnr = (unsigned)(objp-slabp->s_mem) / cachep->buffer_size; |
... | ... | @@ -2274,7 +2274,7 @@ |
2274 | 2274 | slabp->inuse--; |
2275 | 2275 | } |
2276 | 2276 | |
2277 | -static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp) | |
2277 | +static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp, void *objp) | |
2278 | 2278 | { |
2279 | 2279 | int i; |
2280 | 2280 | struct page *page; |
... | ... | @@ -2293,7 +2293,7 @@ |
2293 | 2293 | * Grow (by 1) the number of slabs within a cache. This is called by |
2294 | 2294 | * kmem_cache_alloc() when there are no active objs left in a cache. |
2295 | 2295 | */ |
2296 | -static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid) | |
2296 | +static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |
2297 | 2297 | { |
2298 | 2298 | struct slab *slabp; |
2299 | 2299 | void *objp; |
... | ... | @@ -2404,7 +2404,7 @@ |
2404 | 2404 | } |
2405 | 2405 | } |
2406 | 2406 | |
2407 | -static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, | |
2407 | +static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, | |
2408 | 2408 | void *caller) |
2409 | 2409 | { |
2410 | 2410 | struct page *page; |
... | ... | @@ -2478,7 +2478,7 @@ |
2478 | 2478 | return objp; |
2479 | 2479 | } |
2480 | 2480 | |
2481 | -static void check_slabp(kmem_cache_t *cachep, struct slab *slabp) | |
2481 | +static void check_slabp(struct kmem_cache *cachep, struct slab *slabp) | |
2482 | 2482 | { |
2483 | 2483 | kmem_bufctl_t i; |
2484 | 2484 | int entries = 0; |
... | ... | @@ -2511,7 +2511,7 @@ |
2511 | 2511 | #define check_slabp(x,y) do { } while(0) |
2512 | 2512 | #endif |
2513 | 2513 | |
2514 | -static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) | |
2514 | +static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) | |
2515 | 2515 | { |
2516 | 2516 | int batchcount; |
2517 | 2517 | struct kmem_list3 *l3; |
... | ... | @@ -2602,7 +2602,7 @@ |
2602 | 2602 | } |
2603 | 2603 | |
2604 | 2604 | static inline void |
2605 | -cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags) | |
2605 | +cache_alloc_debugcheck_before(struct kmem_cache *cachep, gfp_t flags) | |
2606 | 2606 | { |
2607 | 2607 | might_sleep_if(flags & __GFP_WAIT); |
2608 | 2608 | #if DEBUG |
... | ... | @@ -2611,7 +2611,7 @@ |
2611 | 2611 | } |
2612 | 2612 | |
2613 | 2613 | #if DEBUG |
2614 | -static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags, | |
2614 | +static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, gfp_t flags, | |
2615 | 2615 | void *objp, void *caller) |
2616 | 2616 | { |
2617 | 2617 | if (!objp) |
... | ... | @@ -2660,7 +2660,7 @@ |
2660 | 2660 | #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) |
2661 | 2661 | #endif |
2662 | 2662 | |
2663 | -static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags) | |
2663 | +static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |
2664 | 2664 | { |
2665 | 2665 | void *objp; |
2666 | 2666 | struct array_cache *ac; |
... | ... | @@ -2687,7 +2687,7 @@ |
2687 | 2687 | return objp; |
2688 | 2688 | } |
2689 | 2689 | |
2690 | -static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) | |
2690 | +static inline void *__cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |
2691 | 2691 | { |
2692 | 2692 | unsigned long save_flags; |
2693 | 2693 | void *objp; |
... | ... | @@ -2707,7 +2707,7 @@ |
2707 | 2707 | /* |
2708 | 2708 | * A interface to enable slab creation on nodeid |
2709 | 2709 | */ |
2710 | -static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) | |
2710 | +static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |
2711 | 2711 | { |
2712 | 2712 | struct list_head *entry; |
2713 | 2713 | struct slab *slabp; |
... | ... | @@ -2769,7 +2769,7 @@ |
2769 | 2769 | /* |
2770 | 2770 | * Caller needs to acquire correct kmem_list's list_lock |
2771 | 2771 | */ |
2772 | -static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, | |
2772 | +static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, | |
2773 | 2773 | int node) |
2774 | 2774 | { |
2775 | 2775 | int i; |
... | ... | @@ -2807,7 +2807,7 @@ |
2807 | 2807 | } |
2808 | 2808 | } |
2809 | 2809 | |
2810 | -static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac) | |
2810 | +static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) | |
2811 | 2811 | { |
2812 | 2812 | int batchcount; |
2813 | 2813 | struct kmem_list3 *l3; |
... | ... | @@ -2866,7 +2866,7 @@ |
2866 | 2866 | * |
2867 | 2867 | * Called with disabled ints. |
2868 | 2868 | */ |
2869 | -static inline void __cache_free(kmem_cache_t *cachep, void *objp) | |
2869 | +static inline void __cache_free(struct kmem_cache *cachep, void *objp) | |
2870 | 2870 | { |
2871 | 2871 | struct array_cache *ac = cpu_cache_get(cachep); |
2872 | 2872 | |
... | ... | @@ -2925,7 +2925,7 @@ |
2925 | 2925 | * Allocate an object from this cache. The flags are only relevant |
2926 | 2926 | * if the cache has no available objects. |
2927 | 2927 | */ |
2928 | -void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags) | |
2928 | +void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |
2929 | 2929 | { |
2930 | 2930 | return __cache_alloc(cachep, flags); |
2931 | 2931 | } |
... | ... | @@ -2945,7 +2945,7 @@ |
2945 | 2945 | * |
2946 | 2946 | * Currently only used for dentry validation. |
2947 | 2947 | */ |
2948 | -int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr) | |
2948 | +int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr) | |
2949 | 2949 | { |
2950 | 2950 | unsigned long addr = (unsigned long)ptr; |
2951 | 2951 | unsigned long min_addr = PAGE_OFFSET; |
... | ... | @@ -2986,7 +2986,7 @@ |
2986 | 2986 | * New and improved: it will now make sure that the object gets |
2987 | 2987 | * put on the correct node list so that there is no false sharing. |
2988 | 2988 | */ |
2989 | -void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) | |
2989 | +void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |
2990 | 2990 | { |
2991 | 2991 | unsigned long save_flags; |
2992 | 2992 | void *ptr; |
... | ... | @@ -3010,7 +3010,7 @@ |
3010 | 3010 | |
3011 | 3011 | void *kmalloc_node(size_t size, gfp_t flags, int node) |
3012 | 3012 | { |
3013 | - kmem_cache_t *cachep; | |
3013 | + struct kmem_cache *cachep; | |
3014 | 3014 | |
3015 | 3015 | cachep = kmem_find_general_cachep(size, flags); |
3016 | 3016 | if (unlikely(cachep == NULL)) |
... | ... | @@ -3043,7 +3043,7 @@ |
3043 | 3043 | */ |
3044 | 3044 | void *__kmalloc(size_t size, gfp_t flags) |
3045 | 3045 | { |
3046 | - kmem_cache_t *cachep; | |
3046 | + struct kmem_cache *cachep; | |
3047 | 3047 | |
3048 | 3048 | /* If you want to save a few bytes .text space: replace |
3049 | 3049 | * __ with kmem_. |
... | ... | @@ -3114,7 +3114,7 @@ |
3114 | 3114 | * Free an object which was previously allocated from this |
3115 | 3115 | * cache. |
3116 | 3116 | */ |
3117 | -void kmem_cache_free(kmem_cache_t *cachep, void *objp) | |
3117 | +void kmem_cache_free(struct kmem_cache *cachep, void *objp) | |
3118 | 3118 | { |
3119 | 3119 | unsigned long flags; |
3120 | 3120 | |
... | ... | @@ -3135,7 +3135,7 @@ |
3135 | 3135 | */ |
3136 | 3136 | void kfree(const void *objp) |
3137 | 3137 | { |
3138 | - kmem_cache_t *c; | |
3138 | + struct kmem_cache *c; | |
3139 | 3139 | unsigned long flags; |
3140 | 3140 | |
3141 | 3141 | if (unlikely(!objp)) |
3142 | 3142 | |
... | ... | @@ -3172,13 +3172,13 @@ |
3172 | 3172 | EXPORT_SYMBOL(free_percpu); |
3173 | 3173 | #endif |
3174 | 3174 | |
3175 | -unsigned int kmem_cache_size(kmem_cache_t *cachep) | |
3175 | +unsigned int kmem_cache_size(struct kmem_cache *cachep) | |
3176 | 3176 | { |
3177 | 3177 | return obj_size(cachep); |
3178 | 3178 | } |
3179 | 3179 | EXPORT_SYMBOL(kmem_cache_size); |
3180 | 3180 | |
3181 | -const char *kmem_cache_name(kmem_cache_t *cachep) | |
3181 | +const char *kmem_cache_name(struct kmem_cache *cachep) | |
3182 | 3182 | { |
3183 | 3183 | return cachep->name; |
3184 | 3184 | } |
... | ... | @@ -3187,7 +3187,7 @@ |
3187 | 3187 | /* |
3188 | 3188 | * This initializes kmem_list3 for all nodes. |
3189 | 3189 | */ |
3190 | -static int alloc_kmemlist(kmem_cache_t *cachep) | |
3190 | +static int alloc_kmemlist(struct kmem_cache *cachep) | |
3191 | 3191 | { |
3192 | 3192 | int node; |
3193 | 3193 | struct kmem_list3 *l3; |
... | ... | @@ -3243,7 +3243,7 @@ |
3243 | 3243 | } |
3244 | 3244 | |
3245 | 3245 | struct ccupdate_struct { |
3246 | - kmem_cache_t *cachep; | |
3246 | + struct kmem_cache *cachep; | |
3247 | 3247 | struct array_cache *new[NR_CPUS]; |
3248 | 3248 | }; |
3249 | 3249 | |
... | ... | @@ -3259,7 +3259,7 @@ |
3259 | 3259 | new->new[smp_processor_id()] = old; |
3260 | 3260 | } |
3261 | 3261 | |
3262 | -static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount, | |
3262 | +static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount, | |
3263 | 3263 | int shared) |
3264 | 3264 | { |
3265 | 3265 | struct ccupdate_struct new; |
... | ... | @@ -3305,7 +3305,7 @@ |
3305 | 3305 | return 0; |
3306 | 3306 | } |
3307 | 3307 | |
3308 | -static void enable_cpucache(kmem_cache_t *cachep) | |
3308 | +static void enable_cpucache(struct kmem_cache *cachep) | |
3309 | 3309 | { |
3310 | 3310 | int err; |
3311 | 3311 | int limit, shared; |
... | ... | @@ -3357,7 +3357,7 @@ |
3357 | 3357 | cachep->name, -err); |
3358 | 3358 | } |
3359 | 3359 | |
3360 | -static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac, | |
3360 | +static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac, | |
3361 | 3361 | int force, int node) |
3362 | 3362 | { |
3363 | 3363 | int tofree; |
3364 | 3364 | |
... | ... | @@ -3402,12 +3402,12 @@ |
3402 | 3402 | } |
3403 | 3403 | |
3404 | 3404 | list_for_each(walk, &cache_chain) { |
3405 | - kmem_cache_t *searchp; | |
3405 | + struct kmem_cache *searchp; | |
3406 | 3406 | struct list_head *p; |
3407 | 3407 | int tofree; |
3408 | 3408 | struct slab *slabp; |
3409 | 3409 | |
3410 | - searchp = list_entry(walk, kmem_cache_t, next); | |
3410 | + searchp = list_entry(walk, struct kmem_cache, next); | |
3411 | 3411 | |
3412 | 3412 | if (searchp->flags & SLAB_NO_REAP) |
3413 | 3413 | goto next; |
3414 | 3414 | |
3415 | 3415 | |
... | ... | @@ -3510,15 +3510,15 @@ |
3510 | 3510 | if (p == &cache_chain) |
3511 | 3511 | return NULL; |
3512 | 3512 | } |
3513 | - return list_entry(p, kmem_cache_t, next); | |
3513 | + return list_entry(p, struct kmem_cache, next); | |
3514 | 3514 | } |
3515 | 3515 | |
3516 | 3516 | static void *s_next(struct seq_file *m, void *p, loff_t *pos) |
3517 | 3517 | { |
3518 | - kmem_cache_t *cachep = p; | |
3518 | + struct kmem_cache *cachep = p; | |
3519 | 3519 | ++*pos; |
3520 | 3520 | return cachep->next.next == &cache_chain ? NULL |
3521 | - : list_entry(cachep->next.next, kmem_cache_t, next); | |
3521 | + : list_entry(cachep->next.next, struct kmem_cache, next); | |
3522 | 3522 | } |
3523 | 3523 | |
3524 | 3524 | static void s_stop(struct seq_file *m, void *p) |
... | ... | @@ -3528,7 +3528,7 @@ |
3528 | 3528 | |
3529 | 3529 | static int s_show(struct seq_file *m, void *p) |
3530 | 3530 | { |
3531 | - kmem_cache_t *cachep = p; | |
3531 | + struct kmem_cache *cachep = p; | |
3532 | 3532 | struct list_head *q; |
3533 | 3533 | struct slab *slabp; |
3534 | 3534 | unsigned long active_objs; |
... | ... | @@ -3678,7 +3678,8 @@ |
3678 | 3678 | mutex_lock(&cache_chain_mutex); |
3679 | 3679 | res = -EINVAL; |
3680 | 3680 | list_for_each(p, &cache_chain) { |
3681 | - kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next); | |
3681 | + struct kmem_cache *cachep = list_entry(p, struct kmem_cache, | |
3682 | + next); | |
3682 | 3683 | |
3683 | 3684 | if (!strcmp(cachep->name, kbuf)) { |
3684 | 3685 | if (limit < 1 || |