Commit 1996bda2a42480c275656233e631ee0966574be4

Authored by Peter Zijlstra
Committed by Ingo Molnar
1 parent d57e34fdd6

arch: Implement local64_t

On 64bit, local_t is of size long, and thus we make local64_t an alias.
On 32bit, we fall back to atomic64_t. (architecture can provide optimized
32-bit version)

(This new facility is to be used by perf events optimizations.)

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: linux-arch@vger.kernel.org
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 23 changed files with 118 additions and 0 deletions Side-by-side Diff

arch/alpha/include/asm/local64.h
  1 +#include <asm-generic/local64.h>
arch/arm/include/asm/local64.h
  1 +#include <asm-generic/local64.h>
arch/avr32/include/asm/local64.h
  1 +#include <asm-generic/local64.h>
arch/blackfin/include/asm/local64.h
  1 +#include <asm-generic/local64.h>
arch/cris/include/asm/local64.h
  1 +#include <asm-generic/local64.h>
arch/frv/include/asm/local64.h
  1 +#include <asm-generic/local64.h>
arch/frv/kernel/local64.h
  1 +#include <asm-generic/local64.h>
arch/h8300/include/asm/local64.h
  1 +#include <asm-generic/local64.h>
arch/ia64/include/asm/local64.h
  1 +#include <asm-generic/local64.h>
arch/m32r/include/asm/local64.h
  1 +#include <asm-generic/local64.h>
arch/m68k/include/asm/local64.h
  1 +#include <asm-generic/local64.h>
arch/microblaze/include/asm/local64.h
  1 +#include <asm-generic/local64.h>
arch/mips/include/asm/local64.h
  1 +#include <asm-generic/local64.h>
arch/mn10300/include/asm/local64.h
  1 +#include <asm-generic/local64.h>
arch/parisc/include/asm/local64.h
  1 +#include <asm-generic/local64.h>
arch/powerpc/include/asm/local64.h
  1 +#include <asm-generic/local64.h>
arch/s390/include/asm/local64.h
  1 +#include <asm-generic/local64.h>
arch/score/include/asm/local64.h
  1 +#include <asm-generic/local64.h>
arch/sh/include/asm/local64.h
  1 +#include <asm-generic/local64.h>
arch/sparc/include/asm/local64.h
  1 +#include <asm-generic/local64.h>
arch/x86/include/asm/local64.h
  1 +#include <asm-generic/local64.h>
arch/xtensa/include/asm/local64.h
  1 +#include <asm-generic/local64.h>
include/asm-generic/local64.h
  1 +#ifndef _ASM_GENERIC_LOCAL64_H
  2 +#define _ASM_GENERIC_LOCAL64_H
  3 +
  4 +#include <linux/percpu.h>
  5 +#include <asm/types.h>
  6 +
  7 +/*
  8 + * A signed long type for operations which are atomic for a single CPU.
  9 + * Usually used in combination with per-cpu variables.
  10 + *
  11 + * This is the default implementation, which uses atomic64_t. Which is
  12 + * rather pointless. The whole point behind local64_t is that some processors
  13 + * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs
  14 + * running on this CPU. local64_t allows exploitation of such capabilities.
  15 + */
  16 +
  17 +/* Implement in terms of atomics. */
  18 +
  19 +#if BITS_PER_LONG == 64
  20 +
  21 +#include <asm/local.h>
  22 +
  23 +typedef struct {
  24 + local_t a;
  25 +} local64_t;
  26 +
  27 +#define LOCAL64_INIT(i) { LOCAL_INIT(i) }
  28 +
  29 +#define local64_read(l) local_read(&(l)->a)
  30 +#define local64_set(l,i) local_set((&(l)->a),(i))
  31 +#define local64_inc(l) local_inc(&(l)->a)
  32 +#define local64_dec(l) local_dec(&(l)->a)
  33 +#define local64_add(i,l) local_add((i),(&(l)->a))
  34 +#define local64_sub(i,l) local_sub((i),(&(l)->a))
  35 +
  36 +#define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a))
  37 +#define local64_dec_and_test(l) local_dec_and_test(&(l)->a)
  38 +#define local64_inc_and_test(l) local_inc_and_test(&(l)->a)
  39 +#define local64_add_negative(i, l) local_add_negative((i), (&(l)->a))
  40 +#define local64_add_return(i, l) local_add_return((i), (&(l)->a))
  41 +#define local64_sub_return(i, l) local_sub_return((i), (&(l)->a))
  42 +#define local64_inc_return(l) local_inc_return(&(l)->a)
  43 +
  44 +#define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n))
  45 +#define local64_xchg(l, n) local_xchg((&(l)->a), (n))
  46 +#define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u))
  47 +#define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a)
  48 +
  49 +/* Non-atomic variants, ie. preemption disabled and won't be touched
  50 + * in interrupt, etc. Some archs can optimize this case well. */
  51 +#define __local64_inc(l) local64_set((l), local64_read(l) + 1)
  52 +#define __local64_dec(l) local64_set((l), local64_read(l) - 1)
  53 +#define __local64_add(i,l) local64_set((l), local64_read(l) + (i))
  54 +#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i))
  55 +
  56 +#else /* BITS_PER_LONG != 64 */
  57 +
  58 +#include <asm/atomic.h>
  59 +
  60 +/* Don't use typedef: don't want them to be mixed with atomic_t's. */
  61 +typedef struct {
  62 + atomic64_t a;
  63 +} local64_t;
  64 +
  65 +#define LOCAL64_INIT(i) { ATOMIC_LONG_INIT(i) }
  66 +
  67 +#define local64_read(l) atomic64_read(&(l)->a)
  68 +#define local64_set(l,i) atomic64_set((&(l)->a),(i))
  69 +#define local64_inc(l) atomic64_inc(&(l)->a)
  70 +#define local64_dec(l) atomic64_dec(&(l)->a)
  71 +#define local64_add(i,l) atomic64_add((i),(&(l)->a))
  72 +#define local64_sub(i,l) atomic64_sub((i),(&(l)->a))
  73 +
  74 +#define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a))
  75 +#define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a)
  76 +#define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a)
  77 +#define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a))
  78 +#define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a))
  79 +#define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a))
  80 +#define local64_inc_return(l) atomic64_inc_return(&(l)->a)
  81 +
  82 +#define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n))
  83 +#define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n))
  84 +#define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u))
  85 +#define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a)
  86 +
  87 +/* Non-atomic variants, ie. preemption disabled and won't be touched
  88 + * in interrupt, etc. Some archs can optimize this case well. */
  89 +#define __local64_inc(l) local64_set((l), local64_read(l) + 1)
  90 +#define __local64_dec(l) local64_set((l), local64_read(l) - 1)
  91 +#define __local64_add(i,l) local64_set((l), local64_read(l) + (i))
  92 +#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i))
  93 +
  94 +#endif /* BITS_PER_LONG != 64 */
  95 +
  96 +#endif /* _ASM_GENERIC_LOCAL64_H */