Blame view
arch/x86/include/asm/mutex_32.h
3.9 KB
2af7f59ee [PATCH] mutex sub... |
1 2 3 4 5 6 7 8 |
/* * Assembly implementation of the mutex fastpath, based on atomic * decrement/increment. * * started by Ingo Molnar: * * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> */ |
1965aae3c x86: Fix ASM_X86_... |
9 10 |
#ifndef _ASM_X86_MUTEX_32_H #define _ASM_X86_MUTEX_32_H |
2af7f59ee [PATCH] mutex sub... |
11 |
|
16281a998 x86: include/asm-... |
12 |
#include <asm/alternative.h> |
9a0b5817a [PATCH] x86: SMP ... |
13 |
|
2af7f59ee [PATCH] mutex sub... |
14 15 16 17 18 19 20 21 22 23 |
/** * __mutex_fastpath_lock - try to take the lock by moving the count * from 1 to a 0 value * @count: pointer of type atomic_t * @fn: function to call if the original value was not 1 * * Change the count from 1 to a value lower than 1, and call <fn> if it * wasn't 1 originally. This function MUST leave the value lower than 1 * even when the "1" assertion wasn't true. */ |
b2347fad5 include/asm-x86/m... |
24 25 26 27 28 |
#define __mutex_fastpath_lock(count, fail_fn) \ do { \ unsigned int dummy; \ \ typecheck(atomic_t *, count); \ |
341d8854d x86: remove fastc... |
29 |
typecheck_fn(void (*)(atomic_t *), fail_fn); \ |
b2347fad5 include/asm-x86/m... |
30 31 32 33 34 35 36 37 38 39 40 41 |
\ asm volatile(LOCK_PREFIX " decl (%%eax) " \ " jns 1f " \ " call " #fail_fn " " \ "1: " \ : "=a" (dummy) \ : "a" (count) \ : "memory", "ecx", "edx"); \ |
2af7f59ee [PATCH] mutex sub... |
42 43 44 45 46 47 48 49 50 51 52 53 54 |
} while (0) /** * __mutex_fastpath_lock_retval - try to take the lock by moving the count * from 1 to a 0 value * @count: pointer of type atomic_t * @fail_fn: function to call if the original value was not 1 * * Change the count from 1 to a value lower than 1, and call <fail_fn> if it * wasn't 1 originally. This function returns 0 if the fastpath succeeds, * or anything the slow path function returns */ |
b2347fad5 include/asm-x86/m... |
55 56 |
static inline int __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) |
2af7f59ee [PATCH] mutex sub... |
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
{ if (unlikely(atomic_dec_return(count) < 0)) return fail_fn(count); else return 0; } /** * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1 * @count: pointer of type atomic_t * @fail_fn: function to call if the original value was not 0 * * try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>. * In the failure case, this function is allowed to either set the value * to 1, or to set it to a value lower than 1. * * If the implementation sets it to a value of lower than 1, the * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs * to return 0 otherwise. */ |
b2347fad5 include/asm-x86/m... |
77 78 79 80 81 |
#define __mutex_fastpath_unlock(count, fail_fn) \ do { \ unsigned int dummy; \ \ typecheck(atomic_t *, count); \ |
341d8854d x86: remove fastc... |
82 |
typecheck_fn(void (*)(atomic_t *), fail_fn); \ |
b2347fad5 include/asm-x86/m... |
83 84 85 86 87 88 89 90 91 92 93 94 |
\ asm volatile(LOCK_PREFIX " incl (%%eax) " \ " jg 1f " \ " call " #fail_fn " " \ "1: " \ : "=a" (dummy) \ : "a" (count) \ : "memory", "ecx", "edx"); \ |
2af7f59ee [PATCH] mutex sub... |
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
} while (0) #define __mutex_slowpath_needs_to_unlock() 1 /** * __mutex_fastpath_trylock - try to acquire the mutex, without waiting * * @count: pointer of type atomic_t * @fail_fn: fallback function * * Change the count from 1 to a value lower than 1, and return 0 (failure) * if it wasn't 1 originally, or return 1 (success) otherwise. This function * MUST leave the value lower than 1 even when the "1" assertion wasn't true. * Additionally, if the value was < 0 originally, this function must not leave * it to 0 on failure. */ |
b2347fad5 include/asm-x86/m... |
111 112 |
static inline int __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) |
2af7f59ee [PATCH] mutex sub... |
113 114 115 116 117 118 119 120 121 122 123 124 |
{ /* * We have two variants here. The cmpxchg based one is the best one * because it never induce a false contention state. It is included * here because architectures using the inc/dec algorithms over the * xchg ones are much more likely to support cmpxchg natively. * * If not we fall back to the spinlock based variant - that is * just as efficient (and simpler) as a 'destructive' probing of * the mutex state would be. */ #ifdef __HAVE_ARCH_CMPXCHG |
4cec87361 Fix mutex_trylock... |
125 |
if (likely(atomic_cmpxchg(count, 1, 0) == 1)) |
2af7f59ee [PATCH] mutex sub... |
126 127 128 129 130 131 |
return 1; return 0; #else return fail_fn(count); #endif } |
1965aae3c x86: Fix ASM_X86_... |
132 |
#endif /* _ASM_X86_MUTEX_32_H */ |