Commit fd37617e69fb865348d012eb1413aef0141ae2de

Authored by Pekka Enberg

Merge branches 'topic/fixes', 'topic/cleanups' and 'topic/documentation' into for-linus

Showing 3 changed files Side-by-side Diff

include/linux/slab.h
... ... @@ -253,9 +253,9 @@
253 253 * request comes from.
254 254 */
255 255 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
256   -extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
  256 +extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
257 257 #define kmalloc_track_caller(size, flags) \
258   - __kmalloc_track_caller(size, flags, __builtin_return_address(0))
  258 + __kmalloc_track_caller(size, flags, _RET_IP_)
259 259 #else
260 260 #define kmalloc_track_caller(size, flags) \
261 261 __kmalloc(size, flags)
262 262  
... ... @@ -271,10 +271,10 @@
271 271 * allocation request comes from.
272 272 */
273 273 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
274   -extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
  274 +extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
275 275 #define kmalloc_node_track_caller(size, flags, node) \
276 276 __kmalloc_node_track_caller(size, flags, node, \
277   - __builtin_return_address(0))
  277 + _RET_IP_)
278 278 #else
279 279 #define kmalloc_node_track_caller(size, flags, node) \
280 280 __kmalloc_node(size, flags, node)
... ... @@ -2123,6 +2123,8 @@
2123 2123 *
2124 2124 * @name must be valid until the cache is destroyed. This implies that
2125 2125 * the module calling this has to destroy the cache before getting unloaded.
  2126 + * Note that kmem_cache_name() is not guaranteed to return the same pointer,
  2127 + * therefore applications must manage it themselves.
2126 2128 *
2127 2129 * The flags are
2128 2130 *
... ... @@ -2997,7 +2999,7 @@
2997 2999 * there must be at least one object available for
2998 3000 * allocation.
2999 3001 */
3000   - BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num);
  3002 + BUG_ON(slabp->inuse >= cachep->num);
3001 3003  
3002 3004 while (slabp->inuse < cachep->num && batchcount--) {
3003 3005 STATS_INC_ALLOCED(cachep);
3004 3006  
... ... @@ -3686,9 +3688,9 @@
3686 3688 EXPORT_SYMBOL(__kmalloc_node);
3687 3689  
3688 3690 void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3689   - int node, void *caller)
  3691 + int node, unsigned long caller)
3690 3692 {
3691   - return __do_kmalloc_node(size, flags, node, caller);
  3693 + return __do_kmalloc_node(size, flags, node, (void *)caller);
3692 3694 }
3693 3695 EXPORT_SYMBOL(__kmalloc_node_track_caller);
3694 3696 #else
3695 3697  
... ... @@ -3730,9 +3732,9 @@
3730 3732 }
3731 3733 EXPORT_SYMBOL(__kmalloc);
3732 3734  
3733   -void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
  3735 +void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3734 3736 {
3735   - return __do_kmalloc(size, flags, caller);
  3737 + return __do_kmalloc(size, flags, (void *)caller);
3736 3738 }
3737 3739 EXPORT_SYMBOL(__kmalloc_track_caller);
3738 3740  
... ... @@ -153,6 +153,10 @@
153 153 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
154 154 #endif
155 155  
  156 +#define OO_SHIFT 16
  157 +#define OO_MASK ((1 << OO_SHIFT) - 1)
  158 +#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */
  159 +
156 160 /* Internal SLUB flags */
157 161 #define __OBJECT_POISON 0x80000000 /* Poison object */
158 162 #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
... ... @@ -178,7 +182,7 @@
178 182 * Tracking user of a slab.
179 183 */
180 184 struct track {
181   - void *addr; /* Called from address */
  185 + unsigned long addr; /* Called from address */
182 186 int cpu; /* Was running on cpu */
183 187 int pid; /* Pid context */
184 188 unsigned long when; /* When did the operation occur */
... ... @@ -290,7 +294,7 @@
290 294 unsigned long size)
291 295 {
292 296 struct kmem_cache_order_objects x = {
293   - (order << 16) + (PAGE_SIZE << order) / size
  297 + (order << OO_SHIFT) + (PAGE_SIZE << order) / size
294 298 };
295 299  
296 300 return x;
297 301  
... ... @@ -298,12 +302,12 @@
298 302  
299 303 static inline int oo_order(struct kmem_cache_order_objects x)
300 304 {
301   - return x.x >> 16;
  305 + return x.x >> OO_SHIFT;
302 306 }
303 307  
304 308 static inline int oo_objects(struct kmem_cache_order_objects x)
305 309 {
306   - return x.x & ((1 << 16) - 1);
  310 + return x.x & OO_MASK;
307 311 }
308 312  
309 313 #ifdef CONFIG_SLUB_DEBUG
... ... @@ -367,7 +371,7 @@
367 371 }
368 372  
369 373 static void set_track(struct kmem_cache *s, void *object,
370   - enum track_item alloc, void *addr)
  374 + enum track_item alloc, unsigned long addr)
371 375 {
372 376 struct track *p;
373 377  
... ... @@ -391,8 +395,8 @@
391 395 if (!(s->flags & SLAB_STORE_USER))
392 396 return;
393 397  
394   - set_track(s, object, TRACK_FREE, NULL);
395   - set_track(s, object, TRACK_ALLOC, NULL);
  398 + set_track(s, object, TRACK_FREE, 0UL);
  399 + set_track(s, object, TRACK_ALLOC, 0UL);
396 400 }
397 401  
398 402 static void print_track(const char *s, struct track *t)
... ... @@ -401,7 +405,7 @@
401 405 return;
402 406  
403 407 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
404   - s, t->addr, jiffies - t->when, t->cpu, t->pid);
  408 + s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
405 409 }
406 410  
407 411 static void print_tracking(struct kmem_cache *s, void *object)
... ... @@ -692,7 +696,7 @@
692 696 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
693 697 object_err(s, page, p, "Freepointer corrupt");
694 698 /*
695   - * No choice but to zap it and thus loose the remainder
  699 + * No choice but to zap it and thus lose the remainder
696 700 * of the free objects in this slab. May cause
697 701 * another error because the object count is now wrong.
698 702 */
... ... @@ -764,8 +768,8 @@
764 768 }
765 769  
766 770 max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
767   - if (max_objects > 65535)
768   - max_objects = 65535;
  771 + if (max_objects > MAX_OBJS_PER_PAGE)
  772 + max_objects = MAX_OBJS_PER_PAGE;
769 773  
770 774 if (page->objects != max_objects) {
771 775 slab_err(s, page, "Wrong number of objects. Found %d but "
... ... @@ -866,7 +870,7 @@
866 870 }
867 871  
868 872 static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
869   - void *object, void *addr)
  873 + void *object, unsigned long addr)
870 874 {
871 875 if (!check_slab(s, page))
872 876 goto bad;
... ... @@ -906,7 +910,7 @@
906 910 }
907 911  
908 912 static int free_debug_processing(struct kmem_cache *s, struct page *page,
909   - void *object, void *addr)
  913 + void *object, unsigned long addr)
910 914 {
911 915 if (!check_slab(s, page))
912 916 goto fail;
913 917  
... ... @@ -1029,10 +1033,10 @@
1029 1033 struct page *page, void *object) {}
1030 1034  
1031 1035 static inline int alloc_debug_processing(struct kmem_cache *s,
1032   - struct page *page, void *object, void *addr) { return 0; }
  1036 + struct page *page, void *object, unsigned long addr) { return 0; }
1033 1037  
1034 1038 static inline int free_debug_processing(struct kmem_cache *s,
1035   - struct page *page, void *object, void *addr) { return 0; }
  1039 + struct page *page, void *object, unsigned long addr) { return 0; }
1036 1040  
1037 1041 static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1038 1042 { return 1; }
... ... @@ -1499,8 +1503,8 @@
1499 1503 * we need to allocate a new slab. This is the slowest path since it involves
1500 1504 * a call to the page allocator and the setup of a new slab.
1501 1505 */
1502   -static void *__slab_alloc(struct kmem_cache *s,
1503   - gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c)
  1506 +static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
  1507 + unsigned long addr, struct kmem_cache_cpu *c)
1504 1508 {
1505 1509 void **object;
1506 1510 struct page *new;
... ... @@ -1584,7 +1588,7 @@
1584 1588 * Otherwise we can simply pick the next object from the lockless free list.
1585 1589 */
1586 1590 static __always_inline void *slab_alloc(struct kmem_cache *s,
1587   - gfp_t gfpflags, int node, void *addr)
  1591 + gfp_t gfpflags, int node, unsigned long addr)
1588 1592 {
1589 1593 void **object;
1590 1594 struct kmem_cache_cpu *c;
1591 1595  
... ... @@ -1614,14 +1618,14 @@
1614 1618  
1615 1619 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1616 1620 {
1617   - return slab_alloc(s, gfpflags, -1, __builtin_return_address(0));
  1621 + return slab_alloc(s, gfpflags, -1, _RET_IP_);
1618 1622 }
1619 1623 EXPORT_SYMBOL(kmem_cache_alloc);
1620 1624  
1621 1625 #ifdef CONFIG_NUMA
1622 1626 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1623 1627 {
1624   - return slab_alloc(s, gfpflags, node, __builtin_return_address(0));
  1628 + return slab_alloc(s, gfpflags, node, _RET_IP_);
1625 1629 }
1626 1630 EXPORT_SYMBOL(kmem_cache_alloc_node);
1627 1631 #endif
... ... @@ -1635,7 +1639,7 @@
1635 1639 * handling required then we can return immediately.
1636 1640 */
1637 1641 static void __slab_free(struct kmem_cache *s, struct page *page,
1638   - void *x, void *addr, unsigned int offset)
  1642 + void *x, unsigned long addr, unsigned int offset)
1639 1643 {
1640 1644 void *prior;
1641 1645 void **object = (void *)x;
... ... @@ -1705,7 +1709,7 @@
1705 1709 * with all sorts of special processing.
1706 1710 */
1707 1711 static __always_inline void slab_free(struct kmem_cache *s,
1708   - struct page *page, void *x, void *addr)
  1712 + struct page *page, void *x, unsigned long addr)
1709 1713 {
1710 1714 void **object = (void *)x;
1711 1715 struct kmem_cache_cpu *c;
1712 1716  
... ... @@ -1732,11 +1736,11 @@
1732 1736  
1733 1737 page = virt_to_head_page(x);
1734 1738  
1735   - slab_free(s, page, x, __builtin_return_address(0));
  1739 + slab_free(s, page, x, _RET_IP_);
1736 1740 }
1737 1741 EXPORT_SYMBOL(kmem_cache_free);
1738 1742  
1739   -/* Figure out on which slab object the object resides */
  1743 +/* Figure out on which slab page the object resides */
1740 1744 static struct page *get_object_page(const void *x)
1741 1745 {
1742 1746 struct page *page = virt_to_head_page(x);
... ... @@ -1808,8 +1812,8 @@
1808 1812 int rem;
1809 1813 int min_order = slub_min_order;
1810 1814  
1811   - if ((PAGE_SIZE << min_order) / size > 65535)
1812   - return get_order(size * 65535) - 1;
  1815 + if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE)
  1816 + return get_order(size * MAX_OBJS_PER_PAGE) - 1;
1813 1817  
1814 1818 for (order = max(min_order,
1815 1819 fls(min_objects * size - 1) - PAGE_SHIFT);
... ... @@ -2074,8 +2078,7 @@
2074 2078 * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2075 2079 * memory on a fresh node that has no slab structures yet.
2076 2080 */
2077   -static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
2078   - int node)
  2081 +static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node)
2079 2082 {
2080 2083 struct page *page;
2081 2084 struct kmem_cache_node *n;
... ... @@ -2113,7 +2116,6 @@
2113 2116 local_irq_save(flags);
2114 2117 add_partial(n, page, 0);
2115 2118 local_irq_restore(flags);
2116   - return n;
2117 2119 }
2118 2120  
2119 2121 static void free_kmem_cache_nodes(struct kmem_cache *s)
... ... @@ -2145,8 +2147,7 @@
2145 2147 n = &s->local_node;
2146 2148 else {
2147 2149 if (slab_state == DOWN) {
2148   - n = early_kmem_cache_node_alloc(gfpflags,
2149   - node);
  2150 + early_kmem_cache_node_alloc(gfpflags, node);
2150 2151 continue;
2151 2152 }
2152 2153 n = kmem_cache_alloc_node(kmalloc_caches,
... ... @@ -2660,7 +2661,7 @@
2660 2661 if (unlikely(ZERO_OR_NULL_PTR(s)))
2661 2662 return s;
2662 2663  
2663   - return slab_alloc(s, flags, -1, __builtin_return_address(0));
  2664 + return slab_alloc(s, flags, -1, _RET_IP_);
2664 2665 }
2665 2666 EXPORT_SYMBOL(__kmalloc);
2666 2667  
... ... @@ -2688,7 +2689,7 @@
2688 2689 if (unlikely(ZERO_OR_NULL_PTR(s)))
2689 2690 return s;
2690 2691  
2691   - return slab_alloc(s, flags, node, __builtin_return_address(0));
  2692 + return slab_alloc(s, flags, node, _RET_IP_);
2692 2693 }
2693 2694 EXPORT_SYMBOL(__kmalloc_node);
2694 2695 #endif
... ... @@ -2745,7 +2746,7 @@
2745 2746 put_page(page);
2746 2747 return;
2747 2748 }
2748   - slab_free(page->slab, page, object, __builtin_return_address(0));
  2749 + slab_free(page->slab, page, object, _RET_IP_);
2749 2750 }
2750 2751 EXPORT_SYMBOL(kfree);
2751 2752  
... ... @@ -3212,7 +3213,7 @@
3212 3213  
3213 3214 #endif
3214 3215  
3215   -void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
  3216 +void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3216 3217 {
3217 3218 struct kmem_cache *s;
3218 3219  
... ... @@ -3228,7 +3229,7 @@
3228 3229 }
3229 3230  
3230 3231 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3231   - int node, void *caller)
  3232 + int node, unsigned long caller)
3232 3233 {
3233 3234 struct kmem_cache *s;
3234 3235  
... ... @@ -3439,7 +3440,7 @@
3439 3440  
3440 3441 struct location {
3441 3442 unsigned long count;
3442   - void *addr;
  3443 + unsigned long addr;
3443 3444 long long sum_time;
3444 3445 long min_time;
3445 3446 long max_time;
... ... @@ -3487,7 +3488,7 @@
3487 3488 {
3488 3489 long start, end, pos;
3489 3490 struct location *l;
3490   - void *caddr;
  3491 + unsigned long caddr;
3491 3492 unsigned long age = jiffies - track->when;
3492 3493  
3493 3494 start = -1;
... ... @@ -4355,7 +4356,7 @@
4355 4356  
4356 4357 /*
4357 4358 * Need to buffer aliases during bootup until sysfs becomes
4358   - * available lest we loose that information.
  4359 + * available lest we lose that information.
4359 4360 */
4360 4361 struct saved_alias {
4361 4362 struct kmem_cache *s;