Blame view
include/linux/mutex.h
5.43 KB
6053ee3b3 [PATCH] mutex sub... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 |
/* * Mutexes: blocking mutual exclusion locks * * started by Ingo Molnar: * * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * * This file contains the main data structure and API definitions. */ #ifndef __LINUX_MUTEX_H #define __LINUX_MUTEX_H #include <linux/list.h> #include <linux/spinlock_types.h> |
a8b9ee739 [MUTEX]: linux/mu... |
15 |
#include <linux/linkage.h> |
ef5d4707b [PATCH] lockdep: ... |
16 |
#include <linux/lockdep.h> |
6053ee3b3 [PATCH] mutex sub... |
17 |
|
60063497a atomic: use <linu... |
18 |
#include <linux/atomic.h> |
6053ee3b3 [PATCH] mutex sub... |
19 20 21 22 23 24 25 26 27 28 29 30 31 |
/* * Simple, straightforward mutexes with strict semantics: * * - only one task can hold the mutex at a time * - only the owner can unlock the mutex * - multiple unlocks are not permitted * - recursive locking is not permitted * - a mutex object must be initialized via the API * - a mutex object must not be initialized via memset or copying * - task may not exit with mutex held * - memory areas where held locks reside must not be freed * - held mutexes must not be reinitialized |
f20fda486 Mutex documentati... |
32 33 |
* - mutexes may not be used in hardware or software interrupt * contexts such as tasklets and timers |
6053ee3b3 [PATCH] mutex sub... |
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
* * These semantics are fully enforced when DEBUG_MUTEXES is * enabled. Furthermore, besides enforcing the above rules, the mutex * debugging code also implements a number of additional features * that make lock debugging easier and faster: * * - uses symbolic names of mutexes, whenever they are printed in debug output * - point-of-acquire tracking, symbolic lookup of function names * - list of all locks held in the system, printout of them * - owner tracking * - detects self-recursing locks and prints out all relevant info * - detects multi-task circular deadlocks and prints out all affected * locks and tasks (and only those tasks) */ struct mutex { /* 1: unlocked, 0: locked, negative: locked, possible waiters */ atomic_t count; spinlock_t wait_lock; struct list_head wait_list; |
0d66bf6d3 mutex: implement ... |
53 |
#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) |
c6eb3dda2 mutex: Use p->on_... |
54 |
struct task_struct *owner; |
0d66bf6d3 mutex: implement ... |
55 56 |
#endif #ifdef CONFIG_DEBUG_MUTEXES |
6053ee3b3 [PATCH] mutex sub... |
57 58 59 |
const char *name; void *magic; #endif |
ef5d4707b [PATCH] lockdep: ... |
60 61 62 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif |
6053ee3b3 [PATCH] mutex sub... |
63 64 65 66 67 68 69 70 71 72 |
}; /* * This is the control structure for tasks blocked on mutex, * which resides on the blocked task's kernel stack: */ struct mutex_waiter { struct list_head list; struct task_struct *task; #ifdef CONFIG_DEBUG_MUTEXES |
6053ee3b3 [PATCH] mutex sub... |
73 74 75 76 77 78 79 80 |
void *magic; #endif }; #ifdef CONFIG_DEBUG_MUTEXES # include <linux/mutex-debug.h> #else # define __DEBUG_MUTEX_INITIALIZER(lockname) |
ef5dc121d mutex: Fix annota... |
81 82 83 84 85 86 87 88 |
/** * mutex_init - initialize the mutex * @mutex: the mutex to be initialized * * Initialize the mutex to unlocked state. * * It is not allowed to initialize an already locked mutex. */ |
ef5d4707b [PATCH] lockdep: ... |
89 90 91 92 93 94 |
# define mutex_init(mutex) \ do { \ static struct lock_class_key __key; \ \ __mutex_init((mutex), #mutex, &__key); \ } while (0) |
4582c0a48 mutex: Make mutex... |
95 |
static inline void mutex_destroy(struct mutex *lock) {} |
6053ee3b3 [PATCH] mutex sub... |
96 |
#endif |
ef5d4707b [PATCH] lockdep: ... |
97 98 99 100 101 102 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC # define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ , .dep_map = { .name = #lockname } #else # define __DEP_MAP_MUTEX_INITIALIZER(lockname) #endif |
6053ee3b3 [PATCH] mutex sub... |
103 104 |
#define __MUTEX_INITIALIZER(lockname) \ { .count = ATOMIC_INIT(1) \ |
6cfd76a26 [PATCH] lockdep: ... |
105 |
, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ |
6053ee3b3 [PATCH] mutex sub... |
106 |
, .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ |
ef5d4707b [PATCH] lockdep: ... |
107 108 |
__DEBUG_MUTEX_INITIALIZER(lockname) \ __DEP_MAP_MUTEX_INITIALIZER(lockname) } |
6053ee3b3 [PATCH] mutex sub... |
109 110 111 |
#define DEFINE_MUTEX(mutexname) \ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) |
ef5d4707b [PATCH] lockdep: ... |
112 113 |
extern void __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key); |
6053ee3b3 [PATCH] mutex sub... |
114 |
|
45f8bde0d [PATCH] fix vario... |
115 |
/** |
6053ee3b3 [PATCH] mutex sub... |
116 117 118 119 120 |
* mutex_is_locked - is the mutex locked * @lock: the mutex to be queried * * Returns 1 if the mutex is locked, 0 if unlocked. */ |
ec7015840 Remove fastcall f... |
121 |
static inline int mutex_is_locked(struct mutex *lock) |
6053ee3b3 [PATCH] mutex sub... |
122 123 124 125 126 127 128 129 |
{ return atomic_read(&lock->count) != 1; } /* * See kernel/mutex.c for detailed documentation of these APIs. * Also see Documentation/mutex-design.txt. */ |
ef5d4707b [PATCH] lockdep: ... |
130 131 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); |
e4c70a662 lockdep, mutex: p... |
132 |
extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); |
18d8362d5 mutex_lock_interr... |
133 134 |
extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass); |
ad776537c Add mutex_lock_ki... |
135 136 |
extern int __must_check mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass); |
e4564f79d lockdep: fixup mu... |
137 138 139 |
#define mutex_lock(lock) mutex_lock_nested(lock, 0) #define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) |
ad776537c Add mutex_lock_ki... |
140 |
#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0) |
e4c70a662 lockdep, mutex: p... |
141 142 143 144 145 146 |
#define mutex_lock_nest_lock(lock, nest_lock) \ do { \ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ } while (0) |
ef5d4707b [PATCH] lockdep: ... |
147 |
#else |
ec7015840 Remove fastcall f... |
148 149 150 |
extern void mutex_lock(struct mutex *lock); extern int __must_check mutex_lock_interruptible(struct mutex *lock); extern int __must_check mutex_lock_killable(struct mutex *lock); |
e4564f79d lockdep: fixup mu... |
151 |
|
ef5d4707b [PATCH] lockdep: ... |
152 |
# define mutex_lock_nested(lock, subclass) mutex_lock(lock) |
d63a5a74d [PATCH] lockdep: ... |
153 |
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) |
ad776537c Add mutex_lock_ki... |
154 |
# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) |
e4c70a662 lockdep, mutex: p... |
155 |
# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) |
ef5d4707b [PATCH] lockdep: ... |
156 |
#endif |
6053ee3b3 [PATCH] mutex sub... |
157 158 159 |
/* * NOTE: mutex_trylock() follows the spin_trylock() convention, * not the down_trylock() convention! |
d98d38f20 mutex: improve he... |
160 161 |
* * Returns 1 if the mutex has been acquired successfully, and 0 on contention. |
6053ee3b3 [PATCH] mutex sub... |
162 |
*/ |
ec7015840 Remove fastcall f... |
163 164 |
extern int mutex_trylock(struct mutex *lock); extern void mutex_unlock(struct mutex *lock); |
a511e3f96 mutex: add atomic... |
165 |
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); |
b1fca2663 mutex: add atomic... |
166 |
|
335d7afbf mutexes, sched: I... |
167 168 169 |
#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX #define arch_mutex_cpu_relax() cpu_relax() #endif |
6053ee3b3 [PATCH] mutex sub... |
170 |
#endif |