Commit 3fe4a975d662f11037cb710f8b4b158a3e38f9c0

Authored by Davide Libenzi
Committed by Linus Torvalds
1 parent 36025a812e

epoll: fix nested calls support

This fixes a regression in 2.6.30.

I unfortunately accepted a patch time ago, to drop the "current" usage
from possible IRQ context, w/out proper thought over it.  The patch
switched to using the CPU id by bounding the nested call callback with a
get_cpu()/put_cpu().

Unfortunately the ep_call_nested() function can be called with a callback
that grabs sleepy locks (from own f_op->poll()), that results in epic
fails.  The following patch uses the proper "context" depending on the
path where it is called, and on the kind of callback.

This has been reported by Stefan Richter, that has also verified the patch
is his previously failing environment.

Signed-off-by: Davide Libenzi <davidel@xmailserver.org>
Reported-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 12 additions and 9 deletions Side-by-side Diff

... ... @@ -98,7 +98,7 @@
98 98 struct nested_call_node {
99 99 struct list_head llink;
100 100 void *cookie;
101   - int cpu;
  101 + void *ctx;
102 102 };
103 103  
104 104 /*
105 105  
106 106  
... ... @@ -317,17 +317,17 @@
317 317 * @nproc: Nested call core function pointer.
318 318 * @priv: Opaque data to be passed to the @nproc callback.
319 319 * @cookie: Cookie to be used to identify this nested call.
  320 + * @ctx: This instance context.
320 321 *
321 322 * Returns: Returns the code returned by the @nproc callback, or -1 if
322 323 * the maximum recursion limit has been exceeded.
323 324 */
324 325 static int ep_call_nested(struct nested_calls *ncalls, int max_nests,
325 326 int (*nproc)(void *, void *, int), void *priv,
326   - void *cookie)
  327 + void *cookie, void *ctx)
327 328 {
328 329 int error, call_nests = 0;
329 330 unsigned long flags;
330   - int this_cpu = get_cpu();
331 331 struct list_head *lsthead = &ncalls->tasks_call_list;
332 332 struct nested_call_node *tncur;
333 333 struct nested_call_node tnode;
... ... @@ -340,7 +340,7 @@
340 340 * very much limited.
341 341 */
342 342 list_for_each_entry(tncur, lsthead, llink) {
343   - if (tncur->cpu == this_cpu &&
  343 + if (tncur->ctx == ctx &&
344 344 (tncur->cookie == cookie || ++call_nests > max_nests)) {
345 345 /*
346 346 * Ops ... loop detected or maximum nest level reached.
... ... @@ -352,7 +352,7 @@
352 352 }
353 353  
354 354 /* Add the current task and cookie to the list */
355   - tnode.cpu = this_cpu;
  355 + tnode.ctx = ctx;
356 356 tnode.cookie = cookie;
357 357 list_add(&tnode.llink, lsthead);
358 358  
359 359  
... ... @@ -364,10 +364,9 @@
364 364 /* Remove the current task from the list */
365 365 spin_lock_irqsave(&ncalls->lock, flags);
366 366 list_del(&tnode.llink);
367   - out_unlock:
  367 +out_unlock:
368 368 spin_unlock_irqrestore(&ncalls->lock, flags);
369 369  
370   - put_cpu();
371 370 return error;
372 371 }
373 372  
374 373  
... ... @@ -408,8 +407,12 @@
408 407 */
409 408 static void ep_poll_safewake(wait_queue_head_t *wq)
410 409 {
  410 + int this_cpu = get_cpu();
  411 +
411 412 ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
412   - ep_poll_wakeup_proc, NULL, wq);
  413 + ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
  414 +
  415 + put_cpu();
413 416 }
414 417  
415 418 /*
... ... @@ -663,7 +666,7 @@
663 666 * could re-enter here.
664 667 */
665 668 pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS,
666   - ep_poll_readyevents_proc, ep, ep);
  669 + ep_poll_readyevents_proc, ep, ep, current);
667 670  
668 671 return pollflags != -1 ? pollflags : 0;
669 672 }