Blame view

include/linux/osq_lock.h 1 KB
90631822c   Jason Low   locking/spinlocks...
1
2
3
4
5
6
7
  #ifndef __LINUX_OSQ_LOCK_H
  #define __LINUX_OSQ_LOCK_H
  
  /*
   * An MCS like lock especially tailored for optimistic spinning for sleeping
   * lock implementations (mutex, rwsem, etc).
   */
d84b6728c   Davidlohr Bueso   locking/mcs: Bett...
8
9
10
11
12
  struct optimistic_spin_node {
  	struct optimistic_spin_node *next, *prev;
  	int locked; /* 1 if lock acquired */
  	int cpu; /* encoded CPU # + 1 value */
  };
90631822c   Jason Low   locking/spinlocks...
13
14
15
16
17
18
19
20
  
  struct optimistic_spin_queue {
  	/*
  	 * Stores an encoded value of the CPU # of the tail node in the queue.
  	 * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL.
  	 */
  	atomic_t tail;
  };
d84b6728c   Davidlohr Bueso   locking/mcs: Bett...
21
  #define OSQ_UNLOCKED_VAL (0)
4d9d951e6   Jason Low   locking/spinlocks...
22
23
24
25
26
27
28
  /* Init macro and function. */
  #define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
  
  static inline void osq_lock_init(struct optimistic_spin_queue *lock)
  {
  	atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
  }
d84b6728c   Davidlohr Bueso   locking/mcs: Bett...
29
30
  extern bool osq_lock(struct optimistic_spin_queue *lock);
  extern void osq_unlock(struct optimistic_spin_queue *lock);
59aabfc7e   Waiman Long   locking/rwsem: Re...
31
32
33
34
  static inline bool osq_is_locked(struct optimistic_spin_queue *lock)
  {
  	return atomic_read(&lock->tail) != OSQ_UNLOCKED_VAL;
  }
90631822c   Jason Low   locking/spinlocks...
35
  #endif