Commit 35a2af94c7ce7130ca292c68b1d27fcfdb648f6b
Committed by
Ingo Molnar
1 parent
ebdc195f2e
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
sched/wait: Make the __wait_event*() interface more friendly
Change all __wait_event*() implementations to match the corresponding wait_event*() signature for convenience. In particular this does away with the weird 'ret' logic. Since there are __wait_event*() users this requires we update them too. Reviewed-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20131002092529.042563462@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Showing 5 changed files with 73 additions and 81 deletions Side-by-side Diff
arch/mips/kernel/rtlx.c
... | ... | @@ -172,8 +172,9 @@ |
172 | 172 | if (rtlx == NULL) { |
173 | 173 | if( (p = vpe_get_shared(tclimit)) == NULL) { |
174 | 174 | if (can_sleep) { |
175 | - __wait_event_interruptible(channel_wqs[index].lx_queue, | |
176 | - (p = vpe_get_shared(tclimit)), ret); | |
175 | + ret = __wait_event_interruptible( | |
176 | + channel_wqs[index].lx_queue, | |
177 | + (p = vpe_get_shared(tclimit))); | |
177 | 178 | if (ret) |
178 | 179 | goto out_fail; |
179 | 180 | } else { |
180 | 181 | |
... | ... | @@ -263,11 +264,10 @@ |
263 | 264 | /* data available to read? */ |
264 | 265 | if (chan->lx_read == chan->lx_write) { |
265 | 266 | if (can_sleep) { |
266 | - int ret = 0; | |
267 | - | |
268 | - __wait_event_interruptible(channel_wqs[index].lx_queue, | |
267 | + int ret = __wait_event_interruptible( | |
268 | + channel_wqs[index].lx_queue, | |
269 | 269 | (chan->lx_read != chan->lx_write) || |
270 | - sp_stopping, ret); | |
270 | + sp_stopping); | |
271 | 271 | if (ret) |
272 | 272 | return ret; |
273 | 273 | |
274 | 274 | |
... | ... | @@ -440,14 +440,13 @@ |
440 | 440 | |
441 | 441 | /* any space left... */ |
442 | 442 | if (!rtlx_write_poll(minor)) { |
443 | - int ret = 0; | |
443 | + int ret; | |
444 | 444 | |
445 | 445 | if (file->f_flags & O_NONBLOCK) |
446 | 446 | return -EAGAIN; |
447 | 447 | |
448 | - __wait_event_interruptible(channel_wqs[minor].rt_queue, | |
449 | - rtlx_write_poll(minor), | |
450 | - ret); | |
448 | + ret = __wait_event_interruptible(channel_wqs[minor].rt_queue, | |
449 | + rtlx_write_poll(minor)); | |
451 | 450 | if (ret) |
452 | 451 | return ret; |
453 | 452 | } |
include/linux/tty.h
... | ... | @@ -672,14 +672,14 @@ |
672 | 672 | #define wait_event_interruptible_tty(tty, wq, condition) \ |
673 | 673 | ({ \ |
674 | 674 | int __ret = 0; \ |
675 | - if (!(condition)) { \ | |
676 | - __wait_event_interruptible_tty(tty, wq, condition, __ret); \ | |
677 | - } \ | |
675 | + if (!(condition)) \ | |
676 | + __ret = __wait_event_interruptible_tty(tty, wq, \ | |
677 | + condition); \ | |
678 | 678 | __ret; \ |
679 | 679 | }) |
680 | 680 | |
681 | -#define __wait_event_interruptible_tty(tty, wq, condition, ret) \ | |
682 | - ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, ret, \ | |
681 | +#define __wait_event_interruptible_tty(tty, wq, condition) \ | |
682 | + ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ | |
683 | 683 | tty_unlock(tty); \ |
684 | 684 | schedule(); \ |
685 | 685 | tty_lock(tty)) |
include/linux/wait.h
... | ... | @@ -179,24 +179,23 @@ |
179 | 179 | #define wake_up_interruptible_sync_poll(x, m) \ |
180 | 180 | __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m)) |
181 | 181 | |
182 | -#define ___wait_cond_timeout(condition, ret) \ | |
182 | +#define ___wait_cond_timeout(condition) \ | |
183 | 183 | ({ \ |
184 | 184 | bool __cond = (condition); \ |
185 | - if (__cond && !ret) \ | |
186 | - ret = 1; \ | |
187 | - __cond || !ret; \ | |
185 | + if (__cond && !__ret) \ | |
186 | + __ret = 1; \ | |
187 | + __cond || !__ret; \ | |
188 | 188 | }) |
189 | 189 | |
190 | 190 | #define ___wait_signal_pending(state) \ |
191 | 191 | ((state == TASK_INTERRUPTIBLE && signal_pending(current)) || \ |
192 | 192 | (state == TASK_KILLABLE && fatal_signal_pending(current))) |
193 | 193 | |
194 | -#define ___wait_nop_ret int ret __always_unused | |
195 | - | |
196 | 194 | #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \ |
197 | -do { \ | |
195 | +({ \ | |
198 | 196 | __label__ __out; \ |
199 | 197 | DEFINE_WAIT(__wait); \ |
198 | + long __ret = ret; \ | |
200 | 199 | \ |
201 | 200 | for (;;) { \ |
202 | 201 | if (exclusive) \ |
... | ... | @@ -208,7 +207,7 @@ |
208 | 207 | break; \ |
209 | 208 | \ |
210 | 209 | if (___wait_signal_pending(state)) { \ |
211 | - ret = -ERESTARTSYS; \ | |
210 | + __ret = -ERESTARTSYS; \ | |
212 | 211 | if (exclusive) { \ |
213 | 212 | abort_exclusive_wait(&wq, &__wait, \ |
214 | 213 | state, NULL); \ |
215 | 214 | |
... | ... | @@ -220,12 +219,12 @@ |
220 | 219 | cmd; \ |
221 | 220 | } \ |
222 | 221 | finish_wait(&wq, &__wait); \ |
223 | -__out: ; \ | |
224 | -} while (0) | |
222 | +__out: __ret; \ | |
223 | +}) | |
225 | 224 | |
226 | 225 | #define __wait_event(wq, condition) \ |
227 | - ___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ | |
228 | - ___wait_nop_ret, schedule()) | |
226 | + (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ | |
227 | + schedule()) | |
229 | 228 | |
230 | 229 | /** |
231 | 230 | * wait_event - sleep until a condition gets true |
... | ... | @@ -246,10 +245,10 @@ |
246 | 245 | __wait_event(wq, condition); \ |
247 | 246 | } while (0) |
248 | 247 | |
249 | -#define __wait_event_timeout(wq, condition, ret) \ | |
250 | - ___wait_event(wq, ___wait_cond_timeout(condition, ret), \ | |
251 | - TASK_UNINTERRUPTIBLE, 0, ret, \ | |
252 | - ret = schedule_timeout(ret)) | |
248 | +#define __wait_event_timeout(wq, condition, timeout) \ | |
249 | + ___wait_event(wq, ___wait_cond_timeout(condition), \ | |
250 | + TASK_UNINTERRUPTIBLE, 0, timeout, \ | |
251 | + __ret = schedule_timeout(__ret)) | |
253 | 252 | |
254 | 253 | /** |
255 | 254 | * wait_event_timeout - sleep until a condition gets true or a timeout elapses |
256 | 255 | |
... | ... | @@ -272,12 +271,12 @@ |
272 | 271 | ({ \ |
273 | 272 | long __ret = timeout; \ |
274 | 273 | if (!(condition)) \ |
275 | - __wait_event_timeout(wq, condition, __ret); \ | |
274 | + __ret = __wait_event_timeout(wq, condition, timeout); \ | |
276 | 275 | __ret; \ |
277 | 276 | }) |
278 | 277 | |
279 | -#define __wait_event_interruptible(wq, condition, ret) \ | |
280 | - ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, ret, \ | |
278 | +#define __wait_event_interruptible(wq, condition) \ | |
279 | + ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ | |
281 | 280 | schedule()) |
282 | 281 | |
283 | 282 | /** |
284 | 283 | |
... | ... | @@ -299,14 +298,14 @@ |
299 | 298 | ({ \ |
300 | 299 | int __ret = 0; \ |
301 | 300 | if (!(condition)) \ |
302 | - __wait_event_interruptible(wq, condition, __ret); \ | |
301 | + __ret = __wait_event_interruptible(wq, condition); \ | |
303 | 302 | __ret; \ |
304 | 303 | }) |
305 | 304 | |
306 | -#define __wait_event_interruptible_timeout(wq, condition, ret) \ | |
307 | - ___wait_event(wq, ___wait_cond_timeout(condition, ret), \ | |
308 | - TASK_INTERRUPTIBLE, 0, ret, \ | |
309 | - ret = schedule_timeout(ret)) | |
305 | +#define __wait_event_interruptible_timeout(wq, condition, timeout) \ | |
306 | + ___wait_event(wq, ___wait_cond_timeout(condition), \ | |
307 | + TASK_INTERRUPTIBLE, 0, timeout, \ | |
308 | + __ret = schedule_timeout(__ret)) | |
310 | 309 | |
311 | 310 | /** |
312 | 311 | * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses |
... | ... | @@ -330,7 +329,8 @@ |
330 | 329 | ({ \ |
331 | 330 | long __ret = timeout; \ |
332 | 331 | if (!(condition)) \ |
333 | - __wait_event_interruptible_timeout(wq, condition, __ret); \ | |
332 | + __ret = __wait_event_interruptible_timeout(wq, \ | |
333 | + condition, timeout); \ | |
334 | 334 | __ret; \ |
335 | 335 | }) |
336 | 336 | |
... | ... | @@ -347,7 +347,7 @@ |
347 | 347 | current->timer_slack_ns, \ |
348 | 348 | HRTIMER_MODE_REL); \ |
349 | 349 | \ |
350 | - ___wait_event(wq, condition, state, 0, __ret, \ | |
350 | + __ret = ___wait_event(wq, condition, state, 0, 0, \ | |
351 | 351 | if (!__t.task) { \ |
352 | 352 | __ret = -ETIME; \ |
353 | 353 | break; \ |
354 | 354 | |
... | ... | @@ -409,15 +409,15 @@ |
409 | 409 | __ret; \ |
410 | 410 | }) |
411 | 411 | |
412 | -#define __wait_event_interruptible_exclusive(wq, condition, ret) \ | |
413 | - ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, ret, \ | |
412 | +#define __wait_event_interruptible_exclusive(wq, condition) \ | |
413 | + ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ | |
414 | 414 | schedule()) |
415 | 415 | |
416 | 416 | #define wait_event_interruptible_exclusive(wq, condition) \ |
417 | 417 | ({ \ |
418 | 418 | int __ret = 0; \ |
419 | 419 | if (!(condition)) \ |
420 | - __wait_event_interruptible_exclusive(wq, condition, __ret);\ | |
420 | + __ret = __wait_event_interruptible_exclusive(wq, condition);\ | |
421 | 421 | __ret; \ |
422 | 422 | }) |
423 | 423 | |
... | ... | @@ -570,8 +570,8 @@ |
570 | 570 | |
571 | 571 | |
572 | 572 | |
573 | -#define __wait_event_killable(wq, condition, ret) \ | |
574 | - ___wait_event(wq, condition, TASK_KILLABLE, 0, ret, schedule()) | |
573 | +#define __wait_event_killable(wq, condition) \ | |
574 | + ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule()) | |
575 | 575 | |
576 | 576 | /** |
577 | 577 | * wait_event_killable - sleep until a condition gets true |
578 | 578 | |
... | ... | @@ -592,18 +592,17 @@ |
592 | 592 | ({ \ |
593 | 593 | int __ret = 0; \ |
594 | 594 | if (!(condition)) \ |
595 | - __wait_event_killable(wq, condition, __ret); \ | |
595 | + __ret = __wait_event_killable(wq, condition); \ | |
596 | 596 | __ret; \ |
597 | 597 | }) |
598 | 598 | |
599 | 599 | |
600 | 600 | #define __wait_event_lock_irq(wq, condition, lock, cmd) \ |
601 | - ___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ | |
602 | - ___wait_nop_ret, \ | |
603 | - spin_unlock_irq(&lock); \ | |
604 | - cmd; \ | |
605 | - schedule(); \ | |
606 | - spin_lock_irq(&lock)) | |
601 | + (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ | |
602 | + spin_unlock_irq(&lock); \ | |
603 | + cmd; \ | |
604 | + schedule(); \ | |
605 | + spin_lock_irq(&lock)) | |
607 | 606 | |
608 | 607 | /** |
609 | 608 | * wait_event_lock_irq_cmd - sleep until a condition gets true. The |
... | ... | @@ -663,11 +662,11 @@ |
663 | 662 | } while (0) |
664 | 663 | |
665 | 664 | |
666 | -#define __wait_event_interruptible_lock_irq(wq, condition, lock, ret, cmd) \ | |
667 | - ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, ret, \ | |
668 | - spin_unlock_irq(&lock); \ | |
669 | - cmd; \ | |
670 | - schedule(); \ | |
665 | +#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \ | |
666 | + ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ | |
667 | + spin_unlock_irq(&lock); \ | |
668 | + cmd; \ | |
669 | + schedule(); \ | |
671 | 670 | spin_lock_irq(&lock)) |
672 | 671 | |
673 | 672 | /** |
674 | 673 | |
... | ... | @@ -698,10 +697,9 @@ |
698 | 697 | #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \ |
699 | 698 | ({ \ |
700 | 699 | int __ret = 0; \ |
701 | - \ | |
702 | 700 | if (!(condition)) \ |
703 | - __wait_event_interruptible_lock_irq(wq, condition, \ | |
704 | - lock, __ret, cmd); \ | |
701 | + __ret = __wait_event_interruptible_lock_irq(wq, \ | |
702 | + condition, lock, cmd); \ | |
705 | 703 | __ret; \ |
706 | 704 | }) |
707 | 705 | |
708 | 706 | |
709 | 707 | |
... | ... | @@ -730,18 +728,18 @@ |
730 | 728 | #define wait_event_interruptible_lock_irq(wq, condition, lock) \ |
731 | 729 | ({ \ |
732 | 730 | int __ret = 0; \ |
733 | - \ | |
734 | 731 | if (!(condition)) \ |
735 | - __wait_event_interruptible_lock_irq(wq, condition, \ | |
736 | - lock, __ret, ); \ | |
732 | + __ret = __wait_event_interruptible_lock_irq(wq, \ | |
733 | + condition, lock,) \ | |
737 | 734 | __ret; \ |
738 | 735 | }) |
739 | 736 | |
740 | -#define __wait_event_interruptible_lock_irq_timeout(wq, condition, lock, ret) \ | |
741 | - ___wait_event(wq, ___wait_cond_timeout(condition, ret), \ | |
742 | - TASK_INTERRUPTIBLE, 0, ret, \ | |
743 | - spin_unlock_irq(&lock); \ | |
744 | - ret = schedule_timeout(ret); \ | |
737 | +#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \ | |
738 | + lock, timeout) \ | |
739 | + ___wait_event(wq, ___wait_cond_timeout(condition), \ | |
740 | + TASK_INTERRUPTIBLE, 0, ret, \ | |
741 | + spin_unlock_irq(&lock); \ | |
742 | + __ret = schedule_timeout(__ret); \ | |
745 | 743 | spin_lock_irq(&lock)); |
746 | 744 | |
747 | 745 | /** |
748 | 746 | |
... | ... | @@ -771,11 +769,10 @@ |
771 | 769 | #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \ |
772 | 770 | timeout) \ |
773 | 771 | ({ \ |
774 | - int __ret = timeout; \ | |
775 | - \ | |
772 | + long __ret = timeout; \ | |
776 | 773 | if (!(condition)) \ |
777 | - __wait_event_interruptible_lock_irq_timeout( \ | |
778 | - wq, condition, lock, __ret); \ | |
774 | + __ret = __wait_event_interruptible_lock_irq_timeout( \ | |
775 | + wq, condition, lock, timeout); \ | |
779 | 776 | __ret; \ |
780 | 777 | }) |
781 | 778 |
net/irda/af_irda.c
... | ... | @@ -2563,9 +2563,8 @@ |
2563 | 2563 | jiffies + msecs_to_jiffies(val)); |
2564 | 2564 | |
2565 | 2565 | /* Wait for IR-LMP to call us back */ |
2566 | - __wait_event_interruptible(self->query_wait, | |
2567 | - (self->cachedaddr != 0 || self->errno == -ETIME), | |
2568 | - err); | |
2566 | + err = __wait_event_interruptible(self->query_wait, | |
2567 | + (self->cachedaddr != 0 || self->errno == -ETIME)); | |
2569 | 2568 | |
2570 | 2569 | /* If watchdog is still activated, kill it! */ |
2571 | 2570 | del_timer(&(self->watchdog)); |
net/netfilter/ipvs/ip_vs_sync.c
... | ... | @@ -1637,12 +1637,9 @@ |
1637 | 1637 | continue; |
1638 | 1638 | } |
1639 | 1639 | while (ip_vs_send_sync_msg(tinfo->sock, sb->mesg) < 0) { |
1640 | - int ret = 0; | |
1641 | - | |
1642 | - __wait_event_interruptible(*sk_sleep(sk), | |
1640 | + int ret = __wait_event_interruptible(*sk_sleep(sk), | |
1643 | 1641 | sock_writeable(sk) || |
1644 | - kthread_should_stop(), | |
1645 | - ret); | |
1642 | + kthread_should_stop()); | |
1646 | 1643 | if (unlikely(kthread_should_stop())) |
1647 | 1644 | goto done; |
1648 | 1645 | } |