Commit 6a2d7a955d8de6cb19ed9cd194b3c83008a22c32

Authored by Eric Dumazet
Committed by Linus Torvalds
1 parent 02a0e53d82

[PATCH] SLAB: use a multiply instead of a divide in obj_to_index()

When some objects are allocated by one CPU but freed by another CPU we can
consume lot of cycles doing divides in obj_to_index().

(Typical load on a dual processor machine where network interrupts are
handled by one particular CPU (allocating skbufs), and the other CPU is
running the application (consuming and freeing skbufs))

Here on one production server (dual-core AMD Opteron 285), I noticed this
divide took 1.20 % of CPU_CLK_UNHALTED events in kernel.  But Opteron are
quite modern cpus and the divide is much more expensive on oldest
architectures :

On a 200 MHz sparcv9 machine, the division takes 64 cycles instead of 1
cycle for a multiply.

Doing some math, we can use a reciprocal multiplication instead of a divide.

If we want to compute V = (A / B)  (A and B being u32 quantities)
we can instead use :

V = ((u64)A * RECIPROCAL(B)) >> 32 ;

where RECIPROCAL(B) is precalculated to ((1LL << 32) + (B - 1)) / B

Note :

I wrote pure C code for clarity. gcc output for i386 is not optimal but
acceptable :

mull   0x14(%ebx)
mov    %edx,%eax // part of the >> 32
xor     %edx,%edx // useless
mov    %eax,(%esp) // could be avoided
mov    %edx,0x4(%esp) // useless
mov    (%esp),%ebx

[akpm@osdl.org: small cleanups]
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Miller <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 4 changed files with 57 additions and 4 deletions Side-by-side Diff

include/linux/reciprocal_div.h
  1 +#ifndef _LINUX_RECIPROCAL_DIV_H
  2 +#define _LINUX_RECIPROCAL_DIV_H
  3 +
  4 +#include <linux/types.h>
  5 +
  6 +/*
  7 + * This file describes reciprocical division.
  8 + *
  9 + * This optimizes the (A/B) problem, when A and B are two u32
  10 + * and B is a known value (but not known at compile time)
  11 + *
  12 + * The math principle used is :
  13 + * Let RECIPROCAL_VALUE(B) be (((1LL << 32) + (B - 1))/ B)
  14 + * Then A / B = (u32)(((u64)(A) * (R)) >> 32)
  15 + *
  16 + * This replaces a divide by a multiply (and a shift), and
  17 + * is generally less expensive in CPU cycles.
  18 + */
  19 +
  20 +/*
  21 + * Computes the reciprocal value (R) for the value B of the divisor.
  22 + * Should not be called before each reciprocal_divide(),
  23 + * or else the performance is slower than a normal divide.
  24 + */
  25 +extern u32 reciprocal_value(u32 B);
  26 +
  27 +
  28 +static inline u32 reciprocal_divide(u32 A, u32 R)
  29 +{
  30 + return (u32)(((u64)A * R) >> 32);
  31 +}
  32 +#endif
... ... @@ -5,7 +5,7 @@
5 5 lib-y := ctype.o string.o vsprintf.o cmdline.o \
6 6 bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \
7 7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \
8   - sha1.o irq_regs.o
  8 + sha1.o irq_regs.o reciprocal_div.o
9 9  
10 10 lib-$(CONFIG_MMU) += ioremap.o
11 11 lib-$(CONFIG_SMP) += cpumask.o
lib/reciprocal_div.c
  1 +#include <asm/div64.h>
  2 +#include <linux/reciprocal_div.h>
  3 +
  4 +u32 reciprocal_value(u32 k)
  5 +{
  6 + u64 val = (1LL << 32) + (k - 1);
  7 + do_div(val, k);
  8 + return (u32)val;
  9 +}
... ... @@ -109,6 +109,7 @@
109 109 #include <linux/mutex.h>
110 110 #include <linux/fault-inject.h>
111 111 #include <linux/rtmutex.h>
  112 +#include <linux/reciprocal_div.h>
112 113  
113 114 #include <asm/cacheflush.h>
114 115 #include <asm/tlbflush.h>
... ... @@ -386,6 +387,7 @@
386 387 unsigned int shared;
387 388  
388 389 unsigned int buffer_size;
  390 + u32 reciprocal_buffer_size;
389 391 /* 3) touched by every alloc & free from the backend */
390 392 struct kmem_list3 *nodelists[MAX_NUMNODES];
391 393  
392 394  
... ... @@ -627,10 +629,17 @@
627 629 return slab->s_mem + cache->buffer_size * idx;
628 630 }
629 631  
630   -static inline unsigned int obj_to_index(struct kmem_cache *cache,
631   - struct slab *slab, void *obj)
  632 +/*
  633 + * We want to avoid an expensive divide : (offset / cache->buffer_size)
  634 + * Using the fact that buffer_size is a constant for a particular cache,
  635 + * we can replace (offset / cache->buffer_size) by
  636 + * reciprocal_divide(offset, cache->reciprocal_buffer_size)
  637 + */
  638 +static inline unsigned int obj_to_index(const struct kmem_cache *cache,
  639 + const struct slab *slab, void *obj)
632 640 {
633   - return (unsigned)(obj - slab->s_mem) / cache->buffer_size;
  641 + u32 offset = (obj - slab->s_mem);
  642 + return reciprocal_divide(offset, cache->reciprocal_buffer_size);
634 643 }
635 644  
636 645 /*
... ... @@ -1427,6 +1436,8 @@
1427 1436  
1428 1437 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
1429 1438 cache_line_size());
  1439 + cache_cache.reciprocal_buffer_size =
  1440 + reciprocal_value(cache_cache.buffer_size);
1430 1441  
1431 1442 for (order = 0; order < MAX_ORDER; order++) {
1432 1443 cache_estimate(order, cache_cache.buffer_size,
... ... @@ -2313,6 +2324,7 @@
2313 2324 if (flags & SLAB_CACHE_DMA)
2314 2325 cachep->gfpflags |= GFP_DMA;
2315 2326 cachep->buffer_size = size;
  2327 + cachep->reciprocal_buffer_size = reciprocal_value(size);
2316 2328  
2317 2329 if (flags & CFLGS_OFF_SLAB) {
2318 2330 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);