Blame view
kernel/locking/mcs_spinlock.h
3.68 KB
b24413180 License cleanup: ... |
1 |
/* SPDX-License-Identifier: GPL-2.0 */ |
e72246748 locking/mutexes/m... |
2 3 4 5 6 7 8 9 10 11 12 13 14 |
/* * MCS lock defines * * This file contains the main data structure and API definitions of MCS lock. * * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock * with the desirable properties of being fair, and with each cpu trying * to acquire the lock spinning on a local variable. * It avoids expensive cache bouncings that common test-and-set spin-lock * implementations incur. */ #ifndef __LINUX_MCS_SPINLOCK_H #define __LINUX_MCS_SPINLOCK_H |
ddf1d169c locking/mcs: Allo... |
15 |
#include <asm/mcs_spinlock.h> |
e72246748 locking/mutexes/m... |
16 17 18 |
struct mcs_spinlock { struct mcs_spinlock *next; int locked; /* 1 if lock acquired */ |
a33fda35e locking/qspinlock... |
19 |
int count; /* nesting count, see qspinlock.c */ |
e72246748 locking/mutexes/m... |
20 |
}; |
e207552e6 locking/mcs: Allo... |
21 22 |
#ifndef arch_mcs_spin_lock_contended /* |
7f56b58a9 locking/mcs: Use ... |
23 24 25 26 27 |
* Using smp_cond_load_acquire() provides the acquire semantics * required so that subsequent operations happen after the * lock is acquired. Additionally, some architectures such as * ARM64 would like to do spin-waiting instead of purely * spinning, and smp_cond_load_acquire() provides that behavior. |
e207552e6 locking/mcs: Allo... |
28 29 30 |
*/ #define arch_mcs_spin_lock_contended(l) \ do { \ |
7f56b58a9 locking/mcs: Use ... |
31 |
smp_cond_load_acquire(l, VAL); \ |
e207552e6 locking/mcs: Allo... |
32 33 34 35 36 37 38 39 40 41 42 43 |
} while (0) #endif #ifndef arch_mcs_spin_unlock_contended /* * smp_store_release() provides a memory barrier to ensure all * operations in the critical section has been completed before * unlocking. */ #define arch_mcs_spin_unlock_contended(l) \ smp_store_release((l), 1) #endif |
e72246748 locking/mutexes/m... |
44 45 46 47 48 49 50 51 |
/* * Note: the smp_load_acquire/smp_store_release pair is not * sufficient to form a full memory barrier across * cpus for many architectures (except x86) for mcs_unlock and mcs_lock. * For applications that need a full barrier across multiple cpus * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be * used after mcs_lock. */ |
5faeb8adb locking/mcs: Micr... |
52 53 54 55 56 57 58 |
/* * In order to acquire the lock, the caller should declare a local node and * pass a reference of the node to this function in addition to the lock. * If the lock has already been acquired, then this will proceed to spin * on this node->locked until the previous lock holder sets the node->locked * in mcs_spin_unlock(). |
5faeb8adb locking/mcs: Micr... |
59 |
*/ |
e72246748 locking/mutexes/m... |
60 61 62 63 64 65 66 67 |
static inline void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node) { struct mcs_spinlock *prev; /* Init node */ node->locked = 0; node->next = NULL; |
920c720aa locking/mcs: Fix ... |
68 69 70 71 72 73 74 |
/* * We rely on the full barrier with global transitivity implied by the * below xchg() to order the initialization stores above against any * observation of @node. And to provide the ACQUIRE ordering associated * with a LOCK primitive. */ prev = xchg(lock, node); |
e72246748 locking/mutexes/m... |
75 |
if (likely(prev == NULL)) { |
5faeb8adb locking/mcs: Micr... |
76 77 78 79 80 81 82 83 |
/* * Lock acquired, don't need to set node->locked to 1. Threads * only spin on its own node->locked value for lock acquisition. * However, since this thread can immediately acquire the lock * and does not proceed to spin on its own node->locked, this * value won't be used. If a debug mode is needed to * audit lock status, then set node->locked value here. */ |
e72246748 locking/mutexes/m... |
84 85 |
return; } |
4d3199e4c locking: Remove A... |
86 |
WRITE_ONCE(prev->next, node); |
e207552e6 locking/mcs: Allo... |
87 88 89 |
/* Wait until the lock holder passes the lock down. */ arch_mcs_spin_lock_contended(&node->locked); |
e72246748 locking/mutexes/m... |
90 |
} |
5faeb8adb locking/mcs: Micr... |
91 92 93 94 |
/* * Releases the lock. The caller should pass in the corresponding node that * was used to acquire the lock. */ |
e72246748 locking/mutexes/m... |
95 96 97 |
static inline void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) { |
4d3199e4c locking: Remove A... |
98 |
struct mcs_spinlock *next = READ_ONCE(node->next); |
e72246748 locking/mutexes/m... |
99 100 101 102 103 |
if (likely(!next)) { /* * Release the lock by setting it to NULL */ |
3552a07a9 locking/mcs: Use ... |
104 |
if (likely(cmpxchg_release(lock, node, NULL) == node)) |
e72246748 locking/mutexes/m... |
105 106 |
return; /* Wait until the next pointer is set */ |
4d3199e4c locking: Remove A... |
107 |
while (!(next = READ_ONCE(node->next))) |
f2f09a4ce locking/core: Rem... |
108 |
cpu_relax(); |
e72246748 locking/mutexes/m... |
109 |
} |
e207552e6 locking/mcs: Allo... |
110 111 112 |
/* Pass lock to next waiter. */ arch_mcs_spin_unlock_contended(&next->locked); |
e72246748 locking/mutexes/m... |
113 114 115 |
} #endif /* __LINUX_MCS_SPINLOCK_H */ |