Commit 064287807c9dd64688084d34c6748a326b5f3ec8

Authored by Pekka Enberg
Committed by Christoph Lameter
1 parent 7c2e132c54

SLUB: Fix coding style violations

This fixes most of the obvious coding style violations in mm/slub.c as
reported by checkpatch.

Acked-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Christoph Lameter <clameter@sgi.com>

Showing 1 changed file with 23 additions and 23 deletions Side-by-side Diff

... ... @@ -357,22 +357,22 @@
357 357 printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
358 358 newline = 0;
359 359 }
360   - printk(" %02x", addr[i]);
  360 + printk(KERN_CONT " %02x", addr[i]);
361 361 offset = i % 16;
362 362 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
363 363 if (offset == 15) {
364   - printk(" %s\n",ascii);
  364 + printk(KERN_CONT " %s\n", ascii);
365 365 newline = 1;
366 366 }
367 367 }
368 368 if (!newline) {
369 369 i %= 16;
370 370 while (i < 16) {
371   - printk(" ");
  371 + printk(KERN_CONT " ");
372 372 ascii[i] = ' ';
373 373 i++;
374 374 }
375   - printk(" %s\n", ascii);
  375 + printk(KERN_CONT " %s\n", ascii);
376 376 }
377 377 }
378 378  
... ... @@ -532,7 +532,7 @@
532 532  
533 533 if (s->flags & __OBJECT_POISON) {
534 534 memset(p, POISON_FREE, s->objsize - 1);
535   - p[s->objsize -1] = POISON_END;
  535 + p[s->objsize - 1] = POISON_END;
536 536 }
537 537  
538 538 if (s->flags & SLAB_RED_ZONE)
... ... @@ -561,7 +561,7 @@
561 561  
562 562 static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
563 563 u8 *object, char *what,
564   - u8* start, unsigned int value, unsigned int bytes)
  564 + u8 *start, unsigned int value, unsigned int bytes)
565 565 {
566 566 u8 *fault;
567 567 u8 *end;
... ... @@ -695,7 +695,7 @@
695 695 (!check_bytes_and_report(s, page, p, "Poison", p,
696 696 POISON_FREE, s->objsize - 1) ||
697 697 !check_bytes_and_report(s, page, p, "Poison",
698   - p + s->objsize -1, POISON_END, 1)))
  698 + p + s->objsize - 1, POISON_END, 1)))
699 699 return 0;
700 700 /*
701 701 * check_pad_bytes cleans up on its own.
... ... @@ -903,8 +903,7 @@
903 903 "SLUB <none>: no slab for object 0x%p.\n",
904 904 object);
905 905 dump_stack();
906   - }
907   - else
  906 + } else
908 907 object_err(s, page, object,
909 908 "page slab pointer corrupt.");
910 909 goto fail;
... ... @@ -950,7 +949,7 @@
950 949 /*
951 950 * Determine which debug features should be switched on
952 951 */
953   - for ( ;*str && *str != ','; str++) {
  952 + for (; *str && *str != ','; str++) {
954 953 switch (tolower(*str)) {
955 954 case 'f':
956 955 slub_debug |= SLAB_DEBUG_FREE;
... ... @@ -969,7 +968,7 @@
969 968 break;
970 969 default:
971 970 printk(KERN_ERR "slub_debug option '%c' "
972   - "unknown. skipped\n",*str);
  971 + "unknown. skipped\n", *str);
973 972 }
974 973 }
975 974  
... ... @@ -1042,7 +1041,7 @@
1042 1041 */
1043 1042 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1044 1043 {
1045   - struct page * page;
  1044 + struct page *page;
1046 1045 int pages = 1 << s->order;
1047 1046  
1048 1047 if (s->order)
... ... @@ -1138,7 +1137,7 @@
1138 1137 mod_zone_page_state(page_zone(page),
1139 1138 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1140 1139 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1141   - - pages);
  1140 + -pages);
1142 1141  
1143 1142 __free_pages(page, s->order);
1144 1143 }
... ... @@ -1542,7 +1541,7 @@
1542 1541 *
1543 1542 * Otherwise we can simply pick the next object from the lockless free list.
1544 1543 */
1545   -static void __always_inline *slab_alloc(struct kmem_cache *s,
  1544 +static __always_inline void *slab_alloc(struct kmem_cache *s,
1546 1545 gfp_t gfpflags, int node, void *addr)
1547 1546 {
1548 1547 void **object;
... ... @@ -1650,7 +1649,7 @@
1650 1649 * If fastpath is not possible then fall back to __slab_free where we deal
1651 1650 * with all sorts of special processing.
1652 1651 */
1653   -static void __always_inline slab_free(struct kmem_cache *s,
  1652 +static __always_inline void slab_free(struct kmem_cache *s,
1654 1653 struct page *page, void *x, void *addr)
1655 1654 {
1656 1655 void **object = (void *)x;
... ... @@ -2231,7 +2230,7 @@
2231 2230 */
2232 2231 int kmem_ptr_validate(struct kmem_cache *s, const void *object)
2233 2232 {
2234   - struct page * page;
  2233 + struct page *page;
2235 2234  
2236 2235 page = get_object_page(object);
2237 2236  
... ... @@ -2343,7 +2342,7 @@
2343 2342  
2344 2343 static int __init setup_slub_min_order(char *str)
2345 2344 {
2346   - get_option (&str, &slub_min_order);
  2345 + get_option(&str, &slub_min_order);
2347 2346  
2348 2347 return 1;
2349 2348 }
... ... @@ -2352,7 +2351,7 @@
2352 2351  
2353 2352 static int __init setup_slub_max_order(char *str)
2354 2353 {
2355   - get_option (&str, &slub_max_order);
  2354 + get_option(&str, &slub_max_order);
2356 2355  
2357 2356 return 1;
2358 2357 }
... ... @@ -2361,7 +2360,7 @@
2361 2360  
2362 2361 static int __init setup_slub_min_objects(char *str)
2363 2362 {
2364   - get_option (&str, &slub_min_objects);
  2363 + get_option(&str, &slub_min_objects);
2365 2364  
2366 2365 return 1;
2367 2366 }
... ... @@ -2946,7 +2945,7 @@
2946 2945 * Check if alignment is compatible.
2947 2946 * Courtesy of Adrian Drzewiecki
2948 2947 */
2949   - if ((s->size & ~(align -1)) != s->size)
  2948 + if ((s->size & ~(align - 1)) != s->size)
2950 2949 continue;
2951 2950  
2952 2951 if (s->size - size >= sizeof(void *))
... ... @@ -3055,8 +3054,9 @@
3055 3054 return NOTIFY_OK;
3056 3055 }
3057 3056  
3058   -static struct notifier_block __cpuinitdata slab_notifier =
3059   - { &slab_cpuup_callback, NULL, 0 };
  3057 +static struct notifier_block __cpuinitdata slab_notifier = {
  3058 + &slab_cpuup_callback, NULL, 0
  3059 +};
3060 3060  
3061 3061 #endif
3062 3062  
... ... @@ -3864,7 +3864,7 @@
3864 3864 SLAB_ATTR(remote_node_defrag_ratio);
3865 3865 #endif
3866 3866  
3867   -static struct attribute * slab_attrs[] = {
  3867 +static struct attribute *slab_attrs[] = {
3868 3868 &slab_size_attr.attr,
3869 3869 &object_size_attr.attr,
3870 3870 &objs_per_slab_attr.attr,