Commit 0764d23cf066c52de42b653144605b481d3fbdbc

Authored by Steven Rostedt
Committed by Thomas Gleixner
1 parent 361943ad0b

ftrace: lockdep notrace annotations

Add notrace annotations to lockdep to keep ftrace from causing
recursive problems with lock tracing and debugging.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

Showing 2 changed files with 13 additions and 12 deletions Side-by-side Diff

... ... @@ -271,14 +271,14 @@
271 271 ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
272 272 (key2))
273 273  
274   -void lockdep_off(void)
  274 +notrace void lockdep_off(void)
275 275 {
276 276 current->lockdep_recursion++;
277 277 }
278 278  
279 279 EXPORT_SYMBOL(lockdep_off);
280 280  
281   -void lockdep_on(void)
  281 +notrace void lockdep_on(void)
282 282 {
283 283 current->lockdep_recursion--;
284 284 }
... ... @@ -1041,7 +1041,7 @@
1041 1041 * Return 1 otherwise and keep <backwards_match> unchanged.
1042 1042 * Return 0 on error.
1043 1043 */
1044   -static noinline int
  1044 +static noinline notrace int
1045 1045 find_usage_backwards(struct lock_class *source, unsigned int depth)
1046 1046 {
1047 1047 struct lock_list *entry;
... ... @@ -1591,7 +1591,7 @@
1591 1591 * We are building curr_chain_key incrementally, so double-check
1592 1592 * it from scratch, to make sure that it's done correctly:
1593 1593 */
1594   -static void check_chain_key(struct task_struct *curr)
  1594 +static notrace void check_chain_key(struct task_struct *curr)
1595 1595 {
1596 1596 #ifdef CONFIG_DEBUG_LOCKDEP
1597 1597 struct held_lock *hlock, *prev_hlock = NULL;
... ... @@ -1967,7 +1967,7 @@
1967 1967 /*
1968 1968 * Mark all held locks with a usage bit:
1969 1969 */
1970   -static int
  1970 +static notrace int
1971 1971 mark_held_locks(struct task_struct *curr, int hardirq)
1972 1972 {
1973 1973 enum lock_usage_bit usage_bit;
... ... @@ -2260,8 +2260,8 @@
2260 2260 /*
2261 2261 * Mark a lock with a usage bit, and validate the state transition:
2262 2262 */
2263   -static int mark_lock(struct task_struct *curr, struct held_lock *this,
2264   - enum lock_usage_bit new_bit)
  2263 +static notrace int mark_lock(struct task_struct *curr, struct held_lock *this,
  2264 + enum lock_usage_bit new_bit)
2265 2265 {
2266 2266 unsigned int new_mask = 1 << new_bit, ret = 1;
2267 2267  
... ... @@ -2663,7 +2663,7 @@
2663 2663 /*
2664 2664 * Check whether we follow the irq-flags state precisely:
2665 2665 */
2666   -static void check_flags(unsigned long flags)
  2666 +static notrace void check_flags(unsigned long flags)
2667 2667 {
2668 2668 #if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS)
2669 2669 if (!debug_locks)
... ... @@ -2700,8 +2700,8 @@
2700 2700 * We are not always called with irqs disabled - do that here,
2701 2701 * and also avoid lockdep recursion:
2702 2702 */
2703   -void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2704   - int trylock, int read, int check, unsigned long ip)
  2703 +notrace void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
  2704 + int trylock, int read, int check, unsigned long ip)
2705 2705 {
2706 2706 unsigned long flags;
2707 2707  
... ... @@ -2723,7 +2723,8 @@
2723 2723  
2724 2724 EXPORT_SYMBOL_GPL(lock_acquire);
2725 2725  
2726   -void lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
  2726 +notrace void lock_release(struct lockdep_map *lock, int nested,
  2727 + unsigned long ip)
2727 2728 {
2728 2729 unsigned long flags;
2729 2730  
... ... @@ -436,7 +436,7 @@
436 436 }
437 437 EXPORT_SYMBOL(_spin_trylock_bh);
438 438  
439   -int in_lock_functions(unsigned long addr)
  439 +notrace int in_lock_functions(unsigned long addr)
440 440 {
441 441 /* Linker adds these: start and end of __lockfunc functions */
442 442 extern char __lock_text_start[], __lock_text_end[];