Blame view

include/asm-generic/qspinlock.h 3.73 KB
81f7e3824   Eric Lee   Initial Release, ...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
  /*
   * Queued spinlock
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published by
   * the Free Software Foundation; either version 2 of the License, or
   * (at your option) any later version.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   *
   * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
   * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
   *
   * Authors: Waiman Long <waiman.long@hpe.com>
   */
  #ifndef __ASM_GENERIC_QSPINLOCK_H
  #define __ASM_GENERIC_QSPINLOCK_H
  
  #include <asm-generic/qspinlock_types.h>
  
  /**
   * queued_spin_is_locked - is the spinlock locked?
   * @lock: Pointer to queued spinlock structure
   * Return: 1 if it is locked, 0 otherwise
   */
  #ifndef queued_spin_is_locked
  static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
  {
  	/*
  	 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
  	 * isn't immediately observable.
  	 */
  	return atomic_read(&lock->val);
  }
  #endif
  
  /**
   * queued_spin_value_unlocked - is the spinlock structure unlocked?
   * @lock: queued spinlock structure
   * Return: 1 if it is unlocked, 0 otherwise
   *
   * N.B. Whenever there are tasks waiting for the lock, it is considered
   *      locked wrt the lockref code to avoid lock stealing by the lockref
   *      code and change things underneath the lock. This also allows some
   *      optimizations to be applied without conflict with lockref.
   */
  static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
  {
  	return !atomic_read(&lock.val);
  }
  
  /**
   * queued_spin_is_contended - check if the lock is contended
   * @lock : Pointer to queued spinlock structure
   * Return: 1 if lock contended, 0 otherwise
   */
  static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
  {
  	return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
  }
  /**
   * queued_spin_trylock - try to acquire the queued spinlock
   * @lock : Pointer to queued spinlock structure
   * Return: 1 if lock acquired, 0 if failed
   */
  static __always_inline int queued_spin_trylock(struct qspinlock *lock)
  {
  	if (!atomic_read(&lock->val) &&
  	   (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0))
  		return 1;
  	return 0;
  }
  
  extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
  
  /**
   * queued_spin_lock - acquire a queued spinlock
   * @lock: Pointer to queued spinlock structure
   */
  static __always_inline void queued_spin_lock(struct qspinlock *lock)
  {
  	u32 val;
  
  	val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL);
  	if (likely(val == 0))
  		return;
  	queued_spin_lock_slowpath(lock, val);
  }
  
  #ifndef queued_spin_unlock
  /**
   * queued_spin_unlock - release a queued spinlock
   * @lock : Pointer to queued spinlock structure
   */
  static __always_inline void queued_spin_unlock(struct qspinlock *lock)
  {
  	/*
  	 * unlock() needs release semantics:
  	 */
  	(void)atomic_sub_return_release(_Q_LOCKED_VAL, &lock->val);
  }
  #endif
  
  #ifndef virt_spin_lock
  static __always_inline bool virt_spin_lock(struct qspinlock *lock)
  {
  	return false;
  }
  #endif
  
  /*
   * Remapping spinlock architecture specific functions to the corresponding
   * queued spinlock functions.
   */
  #define arch_spin_is_locked(l)		queued_spin_is_locked(l)
  #define arch_spin_is_contended(l)	queued_spin_is_contended(l)
  #define arch_spin_value_unlocked(l)	queued_spin_value_unlocked(l)
  #define arch_spin_lock(l)		queued_spin_lock(l)
  #define arch_spin_trylock(l)		queued_spin_trylock(l)
  #define arch_spin_unlock(l)		queued_spin_unlock(l)
  #define arch_spin_lock_flags(l, f)	queued_spin_lock(l)
  
  #endif /* __ASM_GENERIC_QSPINLOCK_H */