Commit 07354eb1a74d1e1ece29f8bafe0b46e8c77a95ef

Authored by Thomas Gleixner
Committed by Ingo Molnar
1 parent 5389f6fad2

locking, printk: Annotate logbuf_lock as raw

The logbuf_lock lock can be taken in atomic context and therefore
cannot be preempted on -rt - annotate it.

In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
[ merged and fixed it ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 3 changed files with 28 additions and 28 deletions Side-by-side Diff

include/linux/ratelimit.h
... ... @@ -8,7 +8,7 @@
8 8 #define DEFAULT_RATELIMIT_BURST 10
9 9  
10 10 struct ratelimit_state {
11   - spinlock_t lock; /* protect the state */
  11 + raw_spinlock_t lock; /* protect the state */
12 12  
13 13 int interval;
14 14 int burst;
... ... @@ -20,7 +20,7 @@
20 20 #define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \
21 21 \
22 22 struct ratelimit_state name = { \
23   - .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
  23 + .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
24 24 .interval = interval_init, \
25 25 .burst = burst_init, \
26 26 }
... ... @@ -28,7 +28,7 @@
28 28 static inline void ratelimit_state_init(struct ratelimit_state *rs,
29 29 int interval, int burst)
30 30 {
31   - spin_lock_init(&rs->lock);
  31 + raw_spin_lock_init(&rs->lock);
32 32 rs->interval = interval;
33 33 rs->burst = burst;
34 34 rs->printed = 0;
... ... @@ -100,7 +100,7 @@
100 100 * It is also used in interesting ways to provide interlocking in
101 101 * console_unlock();.
102 102 */
103   -static DEFINE_SPINLOCK(logbuf_lock);
  103 +static DEFINE_RAW_SPINLOCK(logbuf_lock);
104 104  
105 105 #define LOG_BUF_MASK (log_buf_len-1)
106 106 #define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK])
... ... @@ -212,7 +212,7 @@
212 212 return;
213 213 }
214 214  
215   - spin_lock_irqsave(&logbuf_lock, flags);
  215 + raw_spin_lock_irqsave(&logbuf_lock, flags);
216 216 log_buf_len = new_log_buf_len;
217 217 log_buf = new_log_buf;
218 218 new_log_buf_len = 0;
... ... @@ -230,7 +230,7 @@
230 230 log_start -= offset;
231 231 con_start -= offset;
232 232 log_end -= offset;
233   - spin_unlock_irqrestore(&logbuf_lock, flags);
  233 + raw_spin_unlock_irqrestore(&logbuf_lock, flags);
234 234  
235 235 pr_info("log_buf_len: %d\n", log_buf_len);
236 236 pr_info("early log buf free: %d(%d%%)\n",
237 237  
238 238  
239 239  
... ... @@ -365,18 +365,18 @@
365 365 if (error)
366 366 goto out;
367 367 i = 0;
368   - spin_lock_irq(&logbuf_lock);
  368 + raw_spin_lock_irq(&logbuf_lock);
369 369 while (!error && (log_start != log_end) && i < len) {
370 370 c = LOG_BUF(log_start);
371 371 log_start++;
372   - spin_unlock_irq(&logbuf_lock);
  372 + raw_spin_unlock_irq(&logbuf_lock);
373 373 error = __put_user(c,buf);
374 374 buf++;
375 375 i++;
376 376 cond_resched();
377   - spin_lock_irq(&logbuf_lock);
  377 + raw_spin_lock_irq(&logbuf_lock);
378 378 }
379   - spin_unlock_irq(&logbuf_lock);
  379 + raw_spin_unlock_irq(&logbuf_lock);
380 380 if (!error)
381 381 error = i;
382 382 break;
... ... @@ -399,7 +399,7 @@
399 399 count = len;
400 400 if (count > log_buf_len)
401 401 count = log_buf_len;
402   - spin_lock_irq(&logbuf_lock);
  402 + raw_spin_lock_irq(&logbuf_lock);
403 403 if (count > logged_chars)
404 404 count = logged_chars;
405 405 if (do_clear)
406 406  
407 407  
... ... @@ -416,12 +416,12 @@
416 416 if (j + log_buf_len < log_end)
417 417 break;
418 418 c = LOG_BUF(j);
419   - spin_unlock_irq(&logbuf_lock);
  419 + raw_spin_unlock_irq(&logbuf_lock);
420 420 error = __put_user(c,&buf[count-1-i]);
421 421 cond_resched();
422   - spin_lock_irq(&logbuf_lock);
  422 + raw_spin_lock_irq(&logbuf_lock);
423 423 }
424   - spin_unlock_irq(&logbuf_lock);
  424 + raw_spin_unlock_irq(&logbuf_lock);
425 425 if (error)
426 426 break;
427 427 error = i;
... ... @@ -689,7 +689,7 @@
689 689 oops_timestamp = jiffies;
690 690  
691 691 /* If a crash is occurring, make sure we can't deadlock */
692   - spin_lock_init(&logbuf_lock);
  692 + raw_spin_lock_init(&logbuf_lock);
693 693 /* And make sure that we print immediately */
694 694 sema_init(&console_sem, 1);
695 695 }
696 696  
... ... @@ -802,9 +802,9 @@
802 802 }
803 803 }
804 804 printk_cpu = UINT_MAX;
805   - spin_unlock(&logbuf_lock);
806 805 if (wake)
807 806 up(&console_sem);
  807 + raw_spin_unlock(&logbuf_lock);
808 808 return retval;
809 809 }
810 810 static const char recursion_bug_msg [] =
... ... @@ -864,7 +864,7 @@
864 864 }
865 865  
866 866 lockdep_off();
867   - spin_lock(&logbuf_lock);
  867 + raw_spin_lock(&logbuf_lock);
868 868 printk_cpu = this_cpu;
869 869  
870 870 if (recursion_bug) {
871 871  
... ... @@ -1257,14 +1257,14 @@
1257 1257  
1258 1258 again:
1259 1259 for ( ; ; ) {
1260   - spin_lock_irqsave(&logbuf_lock, flags);
  1260 + raw_spin_lock_irqsave(&logbuf_lock, flags);
1261 1261 wake_klogd |= log_start - log_end;
1262 1262 if (con_start == log_end)
1263 1263 break; /* Nothing to print */
1264 1264 _con_start = con_start;
1265 1265 _log_end = log_end;
1266 1266 con_start = log_end; /* Flush */
1267   - spin_unlock(&logbuf_lock);
  1267 + raw_spin_unlock(&logbuf_lock);
1268 1268 stop_critical_timings(); /* don't trace print latency */
1269 1269 call_console_drivers(_con_start, _log_end);
1270 1270 start_critical_timings();
... ... @@ -1276,7 +1276,7 @@
1276 1276 if (unlikely(exclusive_console))
1277 1277 exclusive_console = NULL;
1278 1278  
1279   - spin_unlock(&logbuf_lock);
  1279 + raw_spin_unlock(&logbuf_lock);
1280 1280  
1281 1281 up(&console_sem);
1282 1282  
1283 1283  
1284 1284  
... ... @@ -1286,13 +1286,13 @@
1286 1286 * there's a new owner and the console_unlock() from them will do the
1287 1287 * flush, no worries.
1288 1288 */
1289   - spin_lock(&logbuf_lock);
  1289 + raw_spin_lock(&logbuf_lock);
1290 1290 if (con_start != log_end)
1291 1291 retry = 1;
1292   - spin_unlock_irqrestore(&logbuf_lock, flags);
1293 1292 if (retry && console_trylock())
1294 1293 goto again;
1295 1294  
  1295 + raw_spin_unlock_irqrestore(&logbuf_lock, flags);
1296 1296 if (wake_klogd)
1297 1297 wake_up_klogd();
1298 1298 }
1299 1299  
... ... @@ -1522,9 +1522,9 @@
1522 1522 * console_unlock(); will print out the buffered messages
1523 1523 * for us.
1524 1524 */
1525   - spin_lock_irqsave(&logbuf_lock, flags);
  1525 + raw_spin_lock_irqsave(&logbuf_lock, flags);
1526 1526 con_start = log_start;
1527   - spin_unlock_irqrestore(&logbuf_lock, flags);
  1527 + raw_spin_unlock_irqrestore(&logbuf_lock, flags);
1528 1528 /*
1529 1529 * We're about to replay the log buffer. Only do this to the
1530 1530 * just-registered console to avoid excessive message spam to
1531 1531  
... ... @@ -1731,10 +1731,10 @@
1731 1731 /* Theoretically, the log could move on after we do this, but
1732 1732 there's not a lot we can do about that. The new messages
1733 1733 will overwrite the start of what we dump. */
1734   - spin_lock_irqsave(&logbuf_lock, flags);
  1734 + raw_spin_lock_irqsave(&logbuf_lock, flags);
1735 1735 end = log_end & LOG_BUF_MASK;
1736 1736 chars = logged_chars;
1737   - spin_unlock_irqrestore(&logbuf_lock, flags);
  1737 + raw_spin_unlock_irqrestore(&logbuf_lock, flags);
1738 1738  
1739 1739 if (chars > end) {
1740 1740 s1 = log_buf + log_buf_len - chars + end;
... ... @@ -39,7 +39,7 @@
39 39 * in addition to the one that will be printed by
40 40 * the entity that is holding the lock already:
41 41 */
42   - if (!spin_trylock_irqsave(&rs->lock, flags))
  42 + if (!raw_spin_trylock_irqsave(&rs->lock, flags))
43 43 return 0;
44 44  
45 45 if (!rs->begin)
... ... @@ -60,7 +60,7 @@
60 60 rs->missed++;
61 61 ret = 0;
62 62 }
63   - spin_unlock_irqrestore(&rs->lock, flags);
  63 + raw_spin_unlock_irqrestore(&rs->lock, flags);
64 64  
65 65 return ret;
66 66 }