Blame view
lib/atomic64.c
4.12 KB
09d4e0edd lib: Provide gene... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
/* * Generic implementation of 64-bit atomics using spinlocks, * useful on processors that don't have 64-bit atomic instructions. * * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/types.h> #include <linux/cache.h> #include <linux/spinlock.h> #include <linux/init.h> |
3fc7b4b22 lib: export gener... |
16 |
#include <linux/module.h> |
09d4e0edd lib: Provide gene... |
17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
#include <asm/atomic.h> /* * We use a hashed array of spinlocks to provide exclusive access * to each atomic64_t variable. Since this is expected to used on * systems with small numbers of CPUs (<= 4 or so), we use a * relatively small array of 16 spinlocks to avoid wasting too much * memory on the spinlock array. */ #define NR_LOCKS 16 /* * Ensure each lock is in a separate cacheline. */ static union { spinlock_t lock; char pad[L1_CACHE_BYTES]; } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; static inline spinlock_t *lock_addr(const atomic64_t *v) { unsigned long addr = (unsigned long) v; addr >>= L1_CACHE_SHIFT; addr ^= (addr >> 8) ^ (addr >> 16); return &atomic64_lock[addr & (NR_LOCKS - 1)].lock; } long long atomic64_read(const atomic64_t *v) { unsigned long flags; spinlock_t *lock = lock_addr(v); long long val; spin_lock_irqsave(lock, flags); val = v->counter; spin_unlock_irqrestore(lock, flags); return val; } |
3fc7b4b22 lib: export gener... |
56 |
EXPORT_SYMBOL(atomic64_read); |
09d4e0edd lib: Provide gene... |
57 58 59 60 61 62 63 64 65 66 |
void atomic64_set(atomic64_t *v, long long i) { unsigned long flags; spinlock_t *lock = lock_addr(v); spin_lock_irqsave(lock, flags); v->counter = i; spin_unlock_irqrestore(lock, flags); } |
3fc7b4b22 lib: export gener... |
67 |
EXPORT_SYMBOL(atomic64_set); |
09d4e0edd lib: Provide gene... |
68 69 70 71 72 73 74 75 76 77 |
void atomic64_add(long long a, atomic64_t *v) { unsigned long flags; spinlock_t *lock = lock_addr(v); spin_lock_irqsave(lock, flags); v->counter += a; spin_unlock_irqrestore(lock, flags); } |
3fc7b4b22 lib: export gener... |
78 |
EXPORT_SYMBOL(atomic64_add); |
09d4e0edd lib: Provide gene... |
79 80 81 82 83 84 85 86 87 88 89 90 |
long long atomic64_add_return(long long a, atomic64_t *v) { unsigned long flags; spinlock_t *lock = lock_addr(v); long long val; spin_lock_irqsave(lock, flags); val = v->counter += a; spin_unlock_irqrestore(lock, flags); return val; } |
3fc7b4b22 lib: export gener... |
91 |
EXPORT_SYMBOL(atomic64_add_return); |
09d4e0edd lib: Provide gene... |
92 93 94 95 96 97 98 99 100 101 |
void atomic64_sub(long long a, atomic64_t *v) { unsigned long flags; spinlock_t *lock = lock_addr(v); spin_lock_irqsave(lock, flags); v->counter -= a; spin_unlock_irqrestore(lock, flags); } |
3fc7b4b22 lib: export gener... |
102 |
EXPORT_SYMBOL(atomic64_sub); |
09d4e0edd lib: Provide gene... |
103 104 105 106 107 108 109 110 111 112 113 114 |
long long atomic64_sub_return(long long a, atomic64_t *v) { unsigned long flags; spinlock_t *lock = lock_addr(v); long long val; spin_lock_irqsave(lock, flags); val = v->counter -= a; spin_unlock_irqrestore(lock, flags); return val; } |
3fc7b4b22 lib: export gener... |
115 |
EXPORT_SYMBOL(atomic64_sub_return); |
09d4e0edd lib: Provide gene... |
116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
long long atomic64_dec_if_positive(atomic64_t *v) { unsigned long flags; spinlock_t *lock = lock_addr(v); long long val; spin_lock_irqsave(lock, flags); val = v->counter - 1; if (val >= 0) v->counter = val; spin_unlock_irqrestore(lock, flags); return val; } |
3fc7b4b22 lib: export gener... |
130 |
EXPORT_SYMBOL(atomic64_dec_if_positive); |
09d4e0edd lib: Provide gene... |
131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) { unsigned long flags; spinlock_t *lock = lock_addr(v); long long val; spin_lock_irqsave(lock, flags); val = v->counter; if (val == o) v->counter = n; spin_unlock_irqrestore(lock, flags); return val; } |
3fc7b4b22 lib: export gener... |
145 |
EXPORT_SYMBOL(atomic64_cmpxchg); |
09d4e0edd lib: Provide gene... |
146 147 148 149 150 151 152 153 154 155 156 157 158 |
long long atomic64_xchg(atomic64_t *v, long long new) { unsigned long flags; spinlock_t *lock = lock_addr(v); long long val; spin_lock_irqsave(lock, flags); val = v->counter; v->counter = new; spin_unlock_irqrestore(lock, flags); return val; } |
3fc7b4b22 lib: export gener... |
159 |
EXPORT_SYMBOL(atomic64_xchg); |
09d4e0edd lib: Provide gene... |
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
int atomic64_add_unless(atomic64_t *v, long long a, long long u) { unsigned long flags; spinlock_t *lock = lock_addr(v); int ret = 1; spin_lock_irqsave(lock, flags); if (v->counter != u) { v->counter += a; ret = 0; } spin_unlock_irqrestore(lock, flags); return ret; } |
3fc7b4b22 lib: export gener... |
175 |
EXPORT_SYMBOL(atomic64_add_unless); |
09d4e0edd lib: Provide gene... |
176 177 178 179 180 181 182 183 184 185 186 |
static int init_atomic64_lock(void) { int i; for (i = 0; i < NR_LOCKS; ++i) spin_lock_init(&atomic64_lock[i].lock); return 0; } pure_initcall(init_atomic64_lock); |