Blame view
lib/atomic64.c
4.09 KB
09d4e0edd
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
/* * Generic implementation of 64-bit atomics using spinlocks, * useful on processors that don't have 64-bit atomic instructions. * * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/types.h> #include <linux/cache.h> #include <linux/spinlock.h> #include <linux/init.h> |
8bc3bcc93
|
16 |
#include <linux/export.h> |
60063497a
|
17 |
#include <linux/atomic.h> |
09d4e0edd
|
18 19 20 21 22 23 24 25 26 27 28 29 30 31 |
/* * We use a hashed array of spinlocks to provide exclusive access * to each atomic64_t variable. Since this is expected to used on * systems with small numbers of CPUs (<= 4 or so), we use a * relatively small array of 16 spinlocks to avoid wasting too much * memory on the spinlock array. */ #define NR_LOCKS 16 /* * Ensure each lock is in a separate cacheline. */ static union { |
f59ca0587
|
32 |
raw_spinlock_t lock; |
09d4e0edd
|
33 |
char pad[L1_CACHE_BYTES]; |
fcc16882a
|
34 35 36 37 38 |
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = { [0 ... (NR_LOCKS - 1)] = { .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock), }, }; |
09d4e0edd
|
39 |
|
cb475de3d
|
40 |
static inline raw_spinlock_t *lock_addr(const atomic64_t *v) |
09d4e0edd
|
41 42 43 44 45 46 47 48 49 50 51 |
{ unsigned long addr = (unsigned long) v; addr >>= L1_CACHE_SHIFT; addr ^= (addr >> 8) ^ (addr >> 16); return &atomic64_lock[addr & (NR_LOCKS - 1)].lock; } long long atomic64_read(const atomic64_t *v) { unsigned long flags; |
cb475de3d
|
52 |
raw_spinlock_t *lock = lock_addr(v); |
09d4e0edd
|
53 |
long long val; |
f59ca0587
|
54 |
raw_spin_lock_irqsave(lock, flags); |
09d4e0edd
|
55 |
val = v->counter; |
f59ca0587
|
56 |
raw_spin_unlock_irqrestore(lock, flags); |
09d4e0edd
|
57 58 |
return val; } |
3fc7b4b22
|
59 |
EXPORT_SYMBOL(atomic64_read); |
09d4e0edd
|
60 61 62 63 |
void atomic64_set(atomic64_t *v, long long i) { unsigned long flags; |
cb475de3d
|
64 |
raw_spinlock_t *lock = lock_addr(v); |
09d4e0edd
|
65 |
|
f59ca0587
|
66 |
raw_spin_lock_irqsave(lock, flags); |
09d4e0edd
|
67 |
v->counter = i; |
f59ca0587
|
68 |
raw_spin_unlock_irqrestore(lock, flags); |
09d4e0edd
|
69 |
} |
3fc7b4b22
|
70 |
EXPORT_SYMBOL(atomic64_set); |
09d4e0edd
|
71 |
|
560cb12a4
|
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
#define ATOMIC64_OP(op, c_op) \ void atomic64_##op(long long a, atomic64_t *v) \ { \ unsigned long flags; \ raw_spinlock_t *lock = lock_addr(v); \ \ raw_spin_lock_irqsave(lock, flags); \ v->counter c_op a; \ raw_spin_unlock_irqrestore(lock, flags); \ } \ EXPORT_SYMBOL(atomic64_##op); #define ATOMIC64_OP_RETURN(op, c_op) \ long long atomic64_##op##_return(long long a, atomic64_t *v) \ { \ unsigned long flags; \ raw_spinlock_t *lock = lock_addr(v); \ long long val; \ \ raw_spin_lock_irqsave(lock, flags); \ val = (v->counter c_op a); \ raw_spin_unlock_irqrestore(lock, flags); \ return val; \ } \ EXPORT_SYMBOL(atomic64_##op##_return); #define ATOMIC64_OPS(op, c_op) \ ATOMIC64_OP(op, c_op) \ ATOMIC64_OP_RETURN(op, c_op) ATOMIC64_OPS(add, +=) ATOMIC64_OPS(sub, -=) #undef ATOMIC64_OPS #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP |
09d4e0edd
|
108 109 110 111 |
long long atomic64_dec_if_positive(atomic64_t *v) { unsigned long flags; |
cb475de3d
|
112 |
raw_spinlock_t *lock = lock_addr(v); |
09d4e0edd
|
113 |
long long val; |
f59ca0587
|
114 |
raw_spin_lock_irqsave(lock, flags); |
09d4e0edd
|
115 116 117 |
val = v->counter - 1; if (val >= 0) v->counter = val; |
f59ca0587
|
118 |
raw_spin_unlock_irqrestore(lock, flags); |
09d4e0edd
|
119 120 |
return val; } |
3fc7b4b22
|
121 |
EXPORT_SYMBOL(atomic64_dec_if_positive); |
09d4e0edd
|
122 123 124 125 |
long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) { unsigned long flags; |
cb475de3d
|
126 |
raw_spinlock_t *lock = lock_addr(v); |
09d4e0edd
|
127 |
long long val; |
f59ca0587
|
128 |
raw_spin_lock_irqsave(lock, flags); |
09d4e0edd
|
129 130 131 |
val = v->counter; if (val == o) v->counter = n; |
f59ca0587
|
132 |
raw_spin_unlock_irqrestore(lock, flags); |
09d4e0edd
|
133 134 |
return val; } |
3fc7b4b22
|
135 |
EXPORT_SYMBOL(atomic64_cmpxchg); |
09d4e0edd
|
136 137 138 139 |
long long atomic64_xchg(atomic64_t *v, long long new) { unsigned long flags; |
cb475de3d
|
140 |
raw_spinlock_t *lock = lock_addr(v); |
09d4e0edd
|
141 |
long long val; |
f59ca0587
|
142 |
raw_spin_lock_irqsave(lock, flags); |
09d4e0edd
|
143 144 |
val = v->counter; v->counter = new; |
f59ca0587
|
145 |
raw_spin_unlock_irqrestore(lock, flags); |
09d4e0edd
|
146 147 |
return val; } |
3fc7b4b22
|
148 |
EXPORT_SYMBOL(atomic64_xchg); |
09d4e0edd
|
149 150 151 152 |
int atomic64_add_unless(atomic64_t *v, long long a, long long u) { unsigned long flags; |
cb475de3d
|
153 |
raw_spinlock_t *lock = lock_addr(v); |
97577896f
|
154 |
int ret = 0; |
09d4e0edd
|
155 |
|
f59ca0587
|
156 |
raw_spin_lock_irqsave(lock, flags); |
09d4e0edd
|
157 158 |
if (v->counter != u) { v->counter += a; |
97577896f
|
159 |
ret = 1; |
09d4e0edd
|
160 |
} |
f59ca0587
|
161 |
raw_spin_unlock_irqrestore(lock, flags); |
09d4e0edd
|
162 163 |
return ret; } |
3fc7b4b22
|
164 |
EXPORT_SYMBOL(atomic64_add_unless); |