Commit 6b8268b17a1ffc942bc72d7d00274e433d6b6719

Authored by Jens Axboe
Committed by David Howells
1 parent 0160950297

SLOW_WORK: Add delayed_slow_work support

This adds support for starting slow work with a delay, similar
to the functionality we have for workqueues.

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: David Howells <dhowells@redhat.com>

Showing 3 changed files with 171 additions and 3 deletions Side-by-side Diff

Documentation/slow-work.txt
... ... @@ -41,7 +41,14 @@
41 41 Operations of both types may sleep during execution, thus tying up the thread
42 42 loaned to it.
43 43  
  44 +A further class of work item is available, based on the slow work item class:
44 45  
  46 + (*) Delayed slow work items.
  47 +
  48 +These are slow work items that have a timer to defer queueing of the item for
  49 +a while.
  50 +
  51 +
45 52 THREAD-TO-CLASS ALLOCATION
46 53 --------------------------
47 54  
... ... @@ -95,6 +102,10 @@
95 102  
96 103 or:
97 104  
  105 + delayed_slow_work_init(&myitem, &myitem_ops);
  106 +
  107 + or:
  108 +
98 109 vslow_work_init(&myitem, &myitem_ops);
99 110  
100 111 depending on its class.
101 112  
102 113  
103 114  
... ... @@ -104,14 +115,17 @@
104 115 int ret = slow_work_enqueue(&myitem);
105 116  
106 117 This will return a -ve error if the thread pool is unable to gain a reference
107   -on the item, 0 otherwise.
  118 +on the item, 0 otherwise, or (for delayed work):
108 119  
  120 + int ret = delayed_slow_work_enqueue(&myitem, my_jiffy_delay);
109 121  
  122 +
110 123 The items are reference counted, so there ought to be no need for a flush
111 124 operation. But as the reference counting is optional, means to cancel
112 125 existing work items are also included:
113 126  
114 127 cancel_slow_work(&myitem);
  128 + cancel_delayed_slow_work(&myitem);
115 129  
116 130 can be used to cancel pending work. The above cancel function waits for
117 131 existing work to have been executed (or prevent execution of them, depending
include/linux/slow-work.h
... ... @@ -17,6 +17,7 @@
17 17 #ifdef CONFIG_SLOW_WORK
18 18  
19 19 #include <linux/sysctl.h>
  20 +#include <linux/timer.h>
20 21  
21 22 struct slow_work;
22 23  
23 24  
... ... @@ -52,10 +53,16 @@
52 53 #define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */
53 54 #define SLOW_WORK_VERY_SLOW 3 /* item is very slow */
54 55 #define SLOW_WORK_CANCELLING 4 /* item is being cancelled, don't enqueue */
  56 +#define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */
55 57 const struct slow_work_ops *ops; /* operations table for this item */
56 58 struct list_head link; /* link in queue */
57 59 };
58 60  
  61 +struct delayed_slow_work {
  62 + struct slow_work work;
  63 + struct timer_list timer;
  64 +};
  65 +
59 66 /**
60 67 * slow_work_init - Initialise a slow work item
61 68 * @work: The work item to initialise
... ... @@ -72,6 +79,20 @@
72 79 }
73 80  
74 81 /**
  82 + * slow_work_init - Initialise a delayed slow work item
  83 + * @work: The work item to initialise
  84 + * @ops: The operations to use to handle the slow work item
  85 + *
  86 + * Initialise a delayed slow work item.
  87 + */
  88 +static inline void delayed_slow_work_init(struct delayed_slow_work *dwork,
  89 + const struct slow_work_ops *ops)
  90 +{
  91 + init_timer(&dwork->timer);
  92 + slow_work_init(&dwork->work, ops);
  93 +}
  94 +
  95 +/**
75 96 * vslow_work_init - Initialise a very slow work item
76 97 * @work: The work item to initialise
77 98 * @ops: The operations to use to handle the slow work item
... ... @@ -92,6 +113,14 @@
92 113 extern void slow_work_cancel(struct slow_work *work);
93 114 extern int slow_work_register_user(struct module *owner);
94 115 extern void slow_work_unregister_user(struct module *owner);
  116 +
  117 +extern int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
  118 + unsigned long delay);
  119 +
  120 +static inline void delayed_slow_work_cancel(struct delayed_slow_work *dwork)
  121 +{
  122 + slow_work_cancel(&dwork->work);
  123 +}
95 124  
96 125 #ifdef CONFIG_SYSCTL
97 126 extern ctl_table slow_work_sysctls[];
... ... @@ -406,11 +406,40 @@
406 406 bool wait = true, put = false;
407 407  
408 408 set_bit(SLOW_WORK_CANCELLING, &work->flags);
  409 + smp_mb();
409 410  
  411 + /* if the work item is a delayed work item with an active timer, we
  412 + * need to wait for the timer to finish _before_ getting the spinlock,
  413 + * lest we deadlock against the timer routine
  414 + *
  415 + * the timer routine will leave DELAYED set if it notices the
  416 + * CANCELLING flag in time
  417 + */
  418 + if (test_bit(SLOW_WORK_DELAYED, &work->flags)) {
  419 + struct delayed_slow_work *dwork =
  420 + container_of(work, struct delayed_slow_work, work);
  421 + del_timer_sync(&dwork->timer);
  422 + }
  423 +
410 424 spin_lock_irq(&slow_work_queue_lock);
411 425  
412   - if (test_bit(SLOW_WORK_PENDING, &work->flags) &&
413   - !list_empty(&work->link)) {
  426 + if (test_bit(SLOW_WORK_DELAYED, &work->flags)) {
  427 + /* the timer routine aborted or never happened, so we are left
  428 + * holding the timer's reference on the item and should just
  429 + * drop the pending flag and wait for any ongoing execution to
  430 + * finish */
  431 + struct delayed_slow_work *dwork =
  432 + container_of(work, struct delayed_slow_work, work);
  433 +
  434 + BUG_ON(timer_pending(&dwork->timer));
  435 + BUG_ON(!list_empty(&work->link));
  436 +
  437 + clear_bit(SLOW_WORK_DELAYED, &work->flags);
  438 + put = true;
  439 + clear_bit(SLOW_WORK_PENDING, &work->flags);
  440 +
  441 + } else if (test_bit(SLOW_WORK_PENDING, &work->flags) &&
  442 + !list_empty(&work->link)) {
414 443 /* the link in the pending queue holds a reference on the item
415 444 * that we will need to release */
416 445 list_del_init(&work->link);
... ... @@ -439,6 +468,102 @@
439 468 slow_work_put_ref(work);
440 469 }
441 470 EXPORT_SYMBOL(slow_work_cancel);
  471 +
  472 +/*
  473 + * Handle expiry of the delay timer, indicating that a delayed slow work item
  474 + * should now be queued if not cancelled
  475 + */
  476 +static void delayed_slow_work_timer(unsigned long data)
  477 +{
  478 + struct slow_work *work = (struct slow_work *) data;
  479 + unsigned long flags;
  480 + bool queued = false, put = false;
  481 +
  482 + spin_lock_irqsave(&slow_work_queue_lock, flags);
  483 + if (likely(!test_bit(SLOW_WORK_CANCELLING, &work->flags))) {
  484 + clear_bit(SLOW_WORK_DELAYED, &work->flags);
  485 +
  486 + if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
  487 + /* we discard the reference the timer was holding in
  488 + * favour of the one the executor holds */
  489 + set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
  490 + put = true;
  491 + } else {
  492 + if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
  493 + list_add_tail(&work->link, &vslow_work_queue);
  494 + else
  495 + list_add_tail(&work->link, &slow_work_queue);
  496 + queued = true;
  497 + }
  498 + }
  499 +
  500 + spin_unlock_irqrestore(&slow_work_queue_lock, flags);
  501 + if (put)
  502 + slow_work_put_ref(work);
  503 + if (queued)
  504 + wake_up(&slow_work_thread_wq);
  505 +}
  506 +
  507 +/**
  508 + * delayed_slow_work_enqueue - Schedule a delayed slow work item for processing
  509 + * @dwork: The delayed work item to queue
  510 + * @delay: When to start executing the work, in jiffies from now
  511 + *
  512 + * This is similar to slow_work_enqueue(), but it adds a delay before the work
  513 + * is actually queued for processing.
  514 + *
  515 + * The item can have delayed processing requested on it whilst it is being
  516 + * executed. The delay will begin immediately, and if it expires before the
  517 + * item finishes executing, the item will be placed back on the queue when it
  518 + * has done executing.
  519 + */
  520 +int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
  521 + unsigned long delay)
  522 +{
  523 + struct slow_work *work = &dwork->work;
  524 + unsigned long flags;
  525 + int ret;
  526 +
  527 + if (delay == 0)
  528 + return slow_work_enqueue(&dwork->work);
  529 +
  530 + BUG_ON(slow_work_user_count <= 0);
  531 + BUG_ON(!work);
  532 + BUG_ON(!work->ops);
  533 +
  534 + if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
  535 + return -ECANCELED;
  536 +
  537 + if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
  538 + spin_lock_irqsave(&slow_work_queue_lock, flags);
  539 +
  540 + if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
  541 + goto cancelled;
  542 +
  543 + /* the timer holds a reference whilst it is pending */
  544 + ret = work->ops->get_ref(work);
  545 + if (ret < 0)
  546 + goto cant_get_ref;
  547 +
  548 + if (test_and_set_bit(SLOW_WORK_DELAYED, &work->flags))
  549 + BUG();
  550 + dwork->timer.expires = jiffies + delay;
  551 + dwork->timer.data = (unsigned long) work;
  552 + dwork->timer.function = delayed_slow_work_timer;
  553 + add_timer(&dwork->timer);
  554 +
  555 + spin_unlock_irqrestore(&slow_work_queue_lock, flags);
  556 + }
  557 +
  558 + return 0;
  559 +
  560 +cancelled:
  561 + ret = -ECANCELED;
  562 +cant_get_ref:
  563 + spin_unlock_irqrestore(&slow_work_queue_lock, flags);
  564 + return ret;
  565 +}
  566 +EXPORT_SYMBOL(delayed_slow_work_enqueue);
442 567  
443 568 /*
444 569 * Schedule a cull of the thread pool at some time in the near future