Commit 740969f91e950b64a18fdd0a25164cdee042abf0

Authored by Thomas Gleixner
Committed by Ingo Molnar
1 parent cdcc136ffd

locking, lib/proportions: Annotate prop_local_percpu::lock as raw

The prop_local_percpu::lock can be taken in atomic context and therefore
cannot be preempted on -rt - annotate it.

In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 2 changed files with 9 additions and 9 deletions Side-by-side Diff

include/linux/proportions.h
... ... @@ -58,7 +58,7 @@
58 58 */
59 59 int shift;
60 60 unsigned long period;
61   - spinlock_t lock; /* protect the snapshot state */
  61 + raw_spinlock_t lock; /* protect the snapshot state */
62 62 };
63 63  
64 64 int prop_local_init_percpu(struct prop_local_percpu *pl);
65 65  
... ... @@ -106,11 +106,11 @@
106 106 */
107 107 unsigned long period;
108 108 int shift;
109   - spinlock_t lock; /* protect the snapshot state */
  109 + raw_spinlock_t lock; /* protect the snapshot state */
110 110 };
111 111  
112 112 #define INIT_PROP_LOCAL_SINGLE(name) \
113   -{ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
  113 +{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
114 114 }
115 115  
116 116 int prop_local_init_single(struct prop_local_single *pl);
... ... @@ -190,7 +190,7 @@
190 190  
191 191 int prop_local_init_percpu(struct prop_local_percpu *pl)
192 192 {
193   - spin_lock_init(&pl->lock);
  193 + raw_spin_lock_init(&pl->lock);
194 194 pl->shift = 0;
195 195 pl->period = 0;
196 196 return percpu_counter_init(&pl->events, 0);
... ... @@ -226,7 +226,7 @@
226 226 if (pl->period == global_period)
227 227 return;
228 228  
229   - spin_lock_irqsave(&pl->lock, flags);
  229 + raw_spin_lock_irqsave(&pl->lock, flags);
230 230 prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
231 231  
232 232 /*
... ... @@ -247,7 +247,7 @@
247 247 percpu_counter_set(&pl->events, 0);
248 248  
249 249 pl->period = global_period;
250   - spin_unlock_irqrestore(&pl->lock, flags);
  250 + raw_spin_unlock_irqrestore(&pl->lock, flags);
251 251 }
252 252  
253 253 /*
... ... @@ -324,7 +324,7 @@
324 324  
325 325 int prop_local_init_single(struct prop_local_single *pl)
326 326 {
327   - spin_lock_init(&pl->lock);
  327 + raw_spin_lock_init(&pl->lock);
328 328 pl->shift = 0;
329 329 pl->period = 0;
330 330 pl->events = 0;
... ... @@ -356,7 +356,7 @@
356 356 if (pl->period == global_period)
357 357 return;
358 358  
359   - spin_lock_irqsave(&pl->lock, flags);
  359 + raw_spin_lock_irqsave(&pl->lock, flags);
360 360 prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
361 361 /*
362 362 * For each missed period, we half the local counter.
... ... @@ -367,7 +367,7 @@
367 367 else
368 368 pl->events = 0;
369 369 pl->period = global_period;
370   - spin_unlock_irqrestore(&pl->lock, flags);
  370 + raw_spin_unlock_irqrestore(&pl->lock, flags);
371 371 }
372 372  
373 373 /*