Commit 91b745016c12d440386c40fb76ab69c8e08cbc06

Authored by Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  workqueue: remove in_workqueue_context()
  workqueue: Clarify that schedule_on_each_cpu is synchronous
  memory_hotplug: drop spurious calls to flush_scheduled_work()
  shpchp: update workqueue usage
  pciehp: update workqueue usage
  isdn/eicon: don't call flush_scheduled_work() from diva_os_remove_soft_isr()
  workqueue: add and use WQ_MEM_RECLAIM flag
  workqueue: fix HIGHPRI handling in keep_working()
  workqueue: add queue_work and activate_work trace points
  workqueue: prepare for more tracepoints
  workqueue: implement flush[_delayed]_work_sync()
  workqueue: factor out start_flush_work()
  workqueue: cleanup flush/cancel functions
  workqueue: implement alloc_ordered_workqueue()

Fix up trivial conflict in fs/gfs2/main.c as per Tejun

Showing 17 changed files Side-by-side Diff

Documentation/workqueue.txt
... ... @@ -196,11 +196,11 @@
196 196 suspend operations. Work items on the wq are drained and no
197 197 new work item starts execution until thawed.
198 198  
199   - WQ_RESCUER
  199 + WQ_MEM_RECLAIM
200 200  
201 201 All wq which might be used in the memory reclaim paths _MUST_
202   - have this flag set. This reserves one worker exclusively for
203   - the execution of this wq under memory pressure.
  202 + have this flag set. The wq is guaranteed to have at least one
  203 + execution context regardless of memory pressure.
204 204  
205 205 WQ_HIGHPRI
206 206  
... ... @@ -356,11 +356,11 @@
356 356  
357 357 6. Guidelines
358 358  
359   -* Do not forget to use WQ_RESCUER if a wq may process work items which
360   - are used during memory reclaim. Each wq with WQ_RESCUER set has one
361   - rescuer thread reserved for it. If there is dependency among
362   - multiple work items used during memory reclaim, they should be
363   - queued to separate wq each with WQ_RESCUER.
  359 +* Do not forget to use WQ_MEM_RECLAIM if a wq may process work items
  360 + which are used during memory reclaim. Each wq with WQ_MEM_RECLAIM
  361 + set has an execution context reserved for it. If there is
  362 + dependency among multiple work items used during memory reclaim,
  363 + they should be queued to separate wq each with WQ_MEM_RECLAIM.
364 364  
365 365 * Unless strict ordering is required, there is no need to use ST wq.
366 366  
... ... @@ -368,12 +368,13 @@
368 368 recommended. In most use cases, concurrency level usually stays
369 369 well under the default limit.
370 370  
371   -* A wq serves as a domain for forward progress guarantee (WQ_RESCUER),
372   - flush and work item attributes. Work items which are not involved
373   - in memory reclaim and don't need to be flushed as a part of a group
374   - of work items, and don't require any special attribute, can use one
375   - of the system wq. There is no difference in execution
376   - characteristics between using a dedicated wq and a system wq.
  371 +* A wq serves as a domain for forward progress guarantee
  372 + (WQ_MEM_RECLAIM, flush and work item attributes. Work items which
  373 + are not involved in memory reclaim and don't need to be flushed as a
  374 + part of a group of work items, and don't require any special
  375 + attribute, can use one of the system wq. There is no difference in
  376 + execution characteristics between using a dedicated wq and a system
  377 + wq.
377 378  
378 379 * Unless work items are expected to consume a huge amount of CPU
379 380 cycles, using a bound wq is usually beneficial due to the increased
drivers/ata/libata-sff.c
... ... @@ -3335,7 +3335,7 @@
3335 3335  
3336 3336 int __init ata_sff_init(void)
3337 3337 {
3338   - ata_sff_wq = alloc_workqueue("ata_sff", WQ_RESCUER, WQ_MAX_ACTIVE);
  3338 + ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
3339 3339 if (!ata_sff_wq)
3340 3340 return -ENOMEM;
3341 3341  
drivers/isdn/hardware/eicon/divasmain.c
... ... @@ -15,7 +15,6 @@
15 15 #include <asm/uaccess.h>
16 16 #include <asm/io.h>
17 17 #include <linux/ioport.h>
18   -#include <linux/workqueue.h>
19 18 #include <linux/pci.h>
20 19 #include <linux/interrupt.h>
21 20 #include <linux/list.h>
... ... @@ -546,7 +545,6 @@
546 545 void *mem;
547 546  
548 547 tasklet_kill(&pdpc->divas_task);
549   - flush_scheduled_work();
550 548 mem = psoft_isr->object;
551 549 psoft_isr->object = NULL;
552 550 diva_os_free(0, mem);
drivers/pci/hotplug/pciehp.h
... ... @@ -36,6 +36,7 @@
36 36 #include <linux/sched.h> /* signal_pending() */
37 37 #include <linux/pcieport_if.h>
38 38 #include <linux/mutex.h>
  39 +#include <linux/workqueue.h>
39 40  
40 41 #define MY_NAME "pciehp"
41 42  
... ... @@ -44,6 +45,7 @@
44 45 extern int pciehp_debug;
45 46 extern int pciehp_force;
46 47 extern struct workqueue_struct *pciehp_wq;
  48 +extern struct workqueue_struct *pciehp_ordered_wq;
47 49  
48 50 #define dbg(format, arg...) \
49 51 do { \
drivers/pci/hotplug/pciehp_core.c
... ... @@ -43,6 +43,7 @@
43 43 int pciehp_poll_time;
44 44 int pciehp_force;
45 45 struct workqueue_struct *pciehp_wq;
  46 +struct workqueue_struct *pciehp_ordered_wq;
46 47  
47 48 #define DRIVER_VERSION "0.4"
48 49 #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
49 50  
50 51  
51 52  
... ... @@ -340,18 +341,33 @@
340 341 {
341 342 int retval = 0;
342 343  
  344 + pciehp_wq = alloc_workqueue("pciehp", 0, 0);
  345 + if (!pciehp_wq)
  346 + return -ENOMEM;
  347 +
  348 + pciehp_ordered_wq = alloc_ordered_workqueue("pciehp_ordered", 0);
  349 + if (!pciehp_ordered_wq) {
  350 + destroy_workqueue(pciehp_wq);
  351 + return -ENOMEM;
  352 + }
  353 +
343 354 pciehp_firmware_init();
344 355 retval = pcie_port_service_register(&hpdriver_portdrv);
345 356 dbg("pcie_port_service_register = %d\n", retval);
346 357 info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
347   - if (retval)
  358 + if (retval) {
  359 + destroy_workqueue(pciehp_ordered_wq);
  360 + destroy_workqueue(pciehp_wq);
348 361 dbg("Failure to register service\n");
  362 + }
349 363 return retval;
350 364 }
351 365  
352 366 static void __exit pcied_cleanup(void)
353 367 {
354 368 dbg("unload_pciehpd()\n");
  369 + destroy_workqueue(pciehp_ordered_wq);
  370 + destroy_workqueue(pciehp_wq);
355 371 pcie_port_service_unregister(&hpdriver_portdrv);
356 372 info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
357 373 }
drivers/pci/hotplug/pciehp_ctrl.c
... ... @@ -32,7 +32,6 @@
32 32 #include <linux/types.h>
33 33 #include <linux/slab.h>
34 34 #include <linux/pci.h>
35   -#include <linux/workqueue.h>
36 35 #include "../pci.h"
37 36 #include "pciehp.h"
38 37  
... ... @@ -50,7 +49,7 @@
50 49 info->p_slot = p_slot;
51 50 INIT_WORK(&info->work, interrupt_event_handler);
52 51  
53   - schedule_work(&info->work);
  52 + queue_work(pciehp_wq, &info->work);
54 53  
55 54 return 0;
56 55 }
... ... @@ -345,7 +344,7 @@
345 344 kfree(info);
346 345 goto out;
347 346 }
348   - queue_work(pciehp_wq, &info->work);
  347 + queue_work(pciehp_ordered_wq, &info->work);
349 348 out:
350 349 mutex_unlock(&p_slot->lock);
351 350 }
... ... @@ -378,7 +377,7 @@
378 377 if (ATTN_LED(ctrl))
379 378 pciehp_set_attention_status(p_slot, 0);
380 379  
381   - schedule_delayed_work(&p_slot->work, 5*HZ);
  380 + queue_delayed_work(pciehp_wq, &p_slot->work, 5*HZ);
382 381 break;
383 382 case BLINKINGOFF_STATE:
384 383 case BLINKINGON_STATE:
... ... @@ -440,7 +439,7 @@
440 439 else
441 440 p_slot->state = POWERON_STATE;
442 441  
443   - queue_work(pciehp_wq, &info->work);
  442 + queue_work(pciehp_ordered_wq, &info->work);
444 443 }
445 444  
446 445 static void interrupt_event_handler(struct work_struct *work)
drivers/pci/hotplug/pciehp_hpc.c
... ... @@ -41,8 +41,6 @@
41 41 #include "../pci.h"
42 42 #include "pciehp.h"
43 43  
44   -static atomic_t pciehp_num_controllers = ATOMIC_INIT(0);
45   -
46 44 static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value)
47 45 {
48 46 struct pci_dev *dev = ctrl->pcie->port;
49 47  
... ... @@ -805,8 +803,8 @@
805 803 {
806 804 struct slot *slot = ctrl->slot;
807 805 cancel_delayed_work(&slot->work);
808   - flush_scheduled_work();
809 806 flush_workqueue(pciehp_wq);
  807 + flush_workqueue(pciehp_ordered_wq);
810 808 kfree(slot);
811 809 }
812 810  
... ... @@ -912,16 +910,6 @@
912 910 /* Disable sotfware notification */
913 911 pcie_disable_notification(ctrl);
914 912  
915   - /*
916   - * If this is the first controller to be initialized,
917   - * initialize the pciehp work queue
918   - */
919   - if (atomic_add_return(1, &pciehp_num_controllers) == 1) {
920   - pciehp_wq = create_singlethread_workqueue("pciehpd");
921   - if (!pciehp_wq)
922   - goto abort_ctrl;
923   - }
924   -
925 913 ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
926 914 pdev->vendor, pdev->device, pdev->subsystem_vendor,
927 915 pdev->subsystem_device);
... ... @@ -941,12 +929,6 @@
941 929 {
942 930 pcie_shutdown_notification(ctrl);
943 931 pcie_cleanup_slot(ctrl);
944   - /*
945   - * If this is the last controller to be released, destroy the
946   - * pciehp work queue
947   - */
948   - if (atomic_dec_and_test(&pciehp_num_controllers))
949   - destroy_workqueue(pciehp_wq);
950 932 kfree(ctrl);
951 933 }
drivers/pci/hotplug/shpchp.h
... ... @@ -35,6 +35,7 @@
35 35 #include <linux/delay.h>
36 36 #include <linux/sched.h> /* signal_pending(), struct timer_list */
37 37 #include <linux/mutex.h>
  38 +#include <linux/workqueue.h>
38 39  
39 40 #if !defined(MODULE)
40 41 #define MY_NAME "shpchp"
... ... @@ -46,6 +47,7 @@
46 47 extern int shpchp_poll_time;
47 48 extern int shpchp_debug;
48 49 extern struct workqueue_struct *shpchp_wq;
  50 +extern struct workqueue_struct *shpchp_ordered_wq;
49 51  
50 52 #define dbg(format, arg...) \
51 53 do { \
drivers/pci/hotplug/shpchp_core.c
... ... @@ -33,7 +33,6 @@
33 33 #include <linux/types.h>
34 34 #include <linux/slab.h>
35 35 #include <linux/pci.h>
36   -#include <linux/workqueue.h>
37 36 #include "shpchp.h"
38 37  
39 38 /* Global variables */
... ... @@ -41,6 +40,7 @@
41 40 int shpchp_poll_mode;
42 41 int shpchp_poll_time;
43 42 struct workqueue_struct *shpchp_wq;
  43 +struct workqueue_struct *shpchp_ordered_wq;
44 44  
45 45 #define DRIVER_VERSION "0.4"
46 46 #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
47 47  
... ... @@ -174,8 +174,8 @@
174 174 slot = list_entry(tmp, struct slot, slot_list);
175 175 list_del(&slot->slot_list);
176 176 cancel_delayed_work(&slot->work);
177   - flush_scheduled_work();
178 177 flush_workqueue(shpchp_wq);
  178 + flush_workqueue(shpchp_ordered_wq);
179 179 pci_hp_deregister(slot->hotplug_slot);
180 180 }
181 181 }
182 182  
... ... @@ -360,9 +360,23 @@
360 360 {
361 361 int retval = 0;
362 362  
  363 + shpchp_wq = alloc_ordered_workqueue("shpchp", 0);
  364 + if (!shpchp_wq)
  365 + return -ENOMEM;
  366 +
  367 + shpchp_ordered_wq = alloc_ordered_workqueue("shpchp_ordered", 0);
  368 + if (!shpchp_ordered_wq) {
  369 + destroy_workqueue(shpchp_wq);
  370 + return -ENOMEM;
  371 + }
  372 +
363 373 retval = pci_register_driver(&shpc_driver);
364 374 dbg("%s: pci_register_driver = %d\n", __func__, retval);
365 375 info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
  376 + if (retval) {
  377 + destroy_workqueue(shpchp_ordered_wq);
  378 + destroy_workqueue(shpchp_wq);
  379 + }
366 380 return retval;
367 381 }
368 382  
... ... @@ -370,6 +384,8 @@
370 384 {
371 385 dbg("unload_shpchpd()\n");
372 386 pci_unregister_driver(&shpc_driver);
  387 + destroy_workqueue(shpchp_ordered_wq);
  388 + destroy_workqueue(shpchp_wq);
373 389 info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
374 390 }
375 391  
drivers/pci/hotplug/shpchp_ctrl.c
... ... @@ -32,7 +32,6 @@
32 32 #include <linux/types.h>
33 33 #include <linux/slab.h>
34 34 #include <linux/pci.h>
35   -#include <linux/workqueue.h>
36 35 #include "../pci.h"
37 36 #include "shpchp.h"
38 37  
... ... @@ -52,7 +51,7 @@
52 51 info->p_slot = p_slot;
53 52 INIT_WORK(&info->work, interrupt_event_handler);
54 53  
55   - schedule_work(&info->work);
  54 + queue_work(shpchp_wq, &info->work);
56 55  
57 56 return 0;
58 57 }
... ... @@ -457,7 +456,7 @@
457 456 kfree(info);
458 457 goto out;
459 458 }
460   - queue_work(shpchp_wq, &info->work);
  459 + queue_work(shpchp_ordered_wq, &info->work);
461 460 out:
462 461 mutex_unlock(&p_slot->lock);
463 462 }
... ... @@ -505,7 +504,7 @@
505 504 p_slot->hpc_ops->green_led_blink(p_slot);
506 505 p_slot->hpc_ops->set_attention_status(p_slot, 0);
507 506  
508   - schedule_delayed_work(&p_slot->work, 5*HZ);
  507 + queue_delayed_work(shpchp_wq, &p_slot->work, 5*HZ);
509 508 break;
510 509 case BLINKINGOFF_STATE:
511 510 case BLINKINGON_STATE:
drivers/pci/hotplug/shpchp_hpc.c
... ... @@ -179,8 +179,6 @@
179 179 #define SLOT_EVENT_LATCH 0x2
180 180 #define SLOT_SERR_INT_MASK 0x3
181 181  
182   -static atomic_t shpchp_num_controllers = ATOMIC_INIT(0);
183   -
184 182 static irqreturn_t shpc_isr(int irq, void *dev_id);
185 183 static void start_int_poll_timer(struct controller *ctrl, int sec);
186 184 static int hpc_check_cmd_status(struct controller *ctrl);
... ... @@ -614,13 +612,6 @@
614 612  
615 613 iounmap(ctrl->creg);
616 614 release_mem_region(ctrl->mmio_base, ctrl->mmio_size);
617   -
618   - /*
619   - * If this is the last controller to be released, destroy the
620   - * shpchpd work queue
621   - */
622   - if (atomic_dec_and_test(&shpchp_num_controllers))
623   - destroy_workqueue(shpchp_wq);
624 615 }
625 616  
626 617 static int hpc_power_on_slot(struct slot * slot)
... ... @@ -1077,9 +1068,8 @@
1077 1068  
1078 1069 rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED,
1079 1070 MY_NAME, (void *)ctrl);
1080   - ctrl_dbg(ctrl, "request_irq %d for hpc%d (returns %d)\n",
1081   - ctrl->pci_dev->irq,
1082   - atomic_read(&shpchp_num_controllers), rc);
  1071 + ctrl_dbg(ctrl, "request_irq %d (returns %d)\n",
  1072 + ctrl->pci_dev->irq, rc);
1083 1073 if (rc) {
1084 1074 ctrl_err(ctrl, "Can't get irq %d for the hotplug "
1085 1075 "controller\n", ctrl->pci_dev->irq);
... ... @@ -1090,18 +1080,6 @@
1090 1080  
1091 1081 shpc_get_max_bus_speed(ctrl);
1092 1082 shpc_get_cur_bus_speed(ctrl);
1093   -
1094   - /*
1095   - * If this is the first controller to be initialized,
1096   - * initialize the shpchpd work queue
1097   - */
1098   - if (atomic_add_return(1, &shpchp_num_controllers) == 1) {
1099   - shpchp_wq = create_singlethread_workqueue("shpchpd");
1100   - if (!shpchp_wq) {
1101   - rc = -ENOMEM;
1102   - goto abort_iounmap;
1103   - }
1104   - }
1105 1083  
1106 1084 /*
1107 1085 * Unmask all event interrupts of all slots
... ... @@ -144,7 +144,7 @@
144 144  
145 145 error = -ENOMEM;
146 146 gfs_recovery_wq = alloc_workqueue("gfs_recovery",
147   - WQ_RESCUER | WQ_FREEZEABLE, 0);
  147 + WQ_MEM_RECLAIM | WQ_FREEZEABLE, 0);
148 148 if (!gfs_recovery_wq)
149 149 goto fail_wq;
150 150  
fs/xfs/linux-2.6/xfs_buf.c
... ... @@ -1921,7 +1921,7 @@
1921 1921 goto out;
1922 1922  
1923 1923 xfslogd_workqueue = alloc_workqueue("xfslogd",
1924   - WQ_RESCUER | WQ_HIGHPRI, 1);
  1924 + WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
1925 1925 if (!xfslogd_workqueue)
1926 1926 goto out_free_buf_zone;
1927 1927  
include/linux/workqueue.h
... ... @@ -243,11 +243,12 @@
243 243 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
244 244 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
245 245 WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */
246   - WQ_RESCUER = 1 << 3, /* has an rescue worker */
  246 + WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
247 247 WQ_HIGHPRI = 1 << 4, /* high priority */
248 248 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
249 249  
250 250 WQ_DYING = 1 << 6, /* internal: workqueue is dying */
  251 + WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
251 252  
252 253 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
253 254 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
254 255  
255 256  
256 257  
... ... @@ -306,12 +307,30 @@
306 307 __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL)
307 308 #endif
308 309  
  310 +/**
  311 + * alloc_ordered_workqueue - allocate an ordered workqueue
  312 + * @name: name of the workqueue
  313 + * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful)
  314 + *
  315 + * Allocate an ordered workqueue. An ordered workqueue executes at
  316 + * most one work item at any given time in the queued order. They are
  317 + * implemented as unbound workqueues with @max_active of one.
  318 + *
  319 + * RETURNS:
  320 + * Pointer to the allocated workqueue on success, %NULL on failure.
  321 + */
  322 +static inline struct workqueue_struct *
  323 +alloc_ordered_workqueue(const char *name, unsigned int flags)
  324 +{
  325 + return alloc_workqueue(name, WQ_UNBOUND | flags, 1);
  326 +}
  327 +
309 328 #define create_workqueue(name) \
310   - alloc_workqueue((name), WQ_RESCUER, 1)
  329 + alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
311 330 #define create_freezeable_workqueue(name) \
312   - alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_RESCUER, 1)
  331 + alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
313 332 #define create_singlethread_workqueue(name) \
314   - alloc_workqueue((name), WQ_UNBOUND | WQ_RESCUER, 1)
  333 + alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
315 334  
316 335 extern void destroy_workqueue(struct workqueue_struct *wq);
317 336  
... ... @@ -325,7 +344,6 @@
325 344  
326 345 extern void flush_workqueue(struct workqueue_struct *wq);
327 346 extern void flush_scheduled_work(void);
328   -extern void flush_delayed_work(struct delayed_work *work);
329 347  
330 348 extern int schedule_work(struct work_struct *work);
331 349 extern int schedule_work_on(int cpu, struct work_struct *work);
332 350  
... ... @@ -337,9 +355,14 @@
337 355  
338 356 int execute_in_process_context(work_func_t fn, struct execute_work *);
339 357  
340   -extern int flush_work(struct work_struct *work);
341   -extern int cancel_work_sync(struct work_struct *work);
  358 +extern bool flush_work(struct work_struct *work);
  359 +extern bool flush_work_sync(struct work_struct *work);
  360 +extern bool cancel_work_sync(struct work_struct *work);
342 361  
  362 +extern bool flush_delayed_work(struct delayed_work *dwork);
  363 +extern bool flush_delayed_work_sync(struct delayed_work *work);
  364 +extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
  365 +
343 366 extern void workqueue_set_max_active(struct workqueue_struct *wq,
344 367 int max_active);
345 368 extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
346 369  
... ... @@ -352,9 +375,9 @@
352 375 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
353 376 * cancel_work_sync() to wait on it.
354 377 */
355   -static inline int cancel_delayed_work(struct delayed_work *work)
  378 +static inline bool cancel_delayed_work(struct delayed_work *work)
356 379 {
357   - int ret;
  380 + bool ret;
358 381  
359 382 ret = del_timer_sync(&work->timer);
360 383 if (ret)
361 384  
... ... @@ -367,9 +390,9 @@
367 390 * if it returns 0 the timer function may be running and the queueing is in
368 391 * progress.
369 392 */
370   -static inline int __cancel_delayed_work(struct delayed_work *work)
  393 +static inline bool __cancel_delayed_work(struct delayed_work *work)
371 394 {
372   - int ret;
  395 + bool ret;
373 396  
374 397 ret = del_timer(&work->timer);
375 398 if (ret)
... ... @@ -377,8 +400,6 @@
377 400 return ret;
378 401 }
379 402  
380   -extern int cancel_delayed_work_sync(struct delayed_work *work);
381   -
382 403 /* Obsolete. use cancel_delayed_work_sync() */
383 404 static inline
384 405 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
... ... @@ -408,10 +429,6 @@
408 429 extern bool freeze_workqueues_busy(void);
409 430 extern void thaw_workqueues(void);
410 431 #endif /* CONFIG_FREEZER */
411   -
412   -#ifdef CONFIG_LOCKDEP
413   -int in_workqueue_context(struct workqueue_struct *wq);
414   -#endif
415 432  
416 433 #endif
include/trace/events/workqueue.h
... ... @@ -7,38 +7,83 @@
7 7 #include <linux/tracepoint.h>
8 8 #include <linux/workqueue.h>
9 9  
  10 +DECLARE_EVENT_CLASS(workqueue_work,
  11 +
  12 + TP_PROTO(struct work_struct *work),
  13 +
  14 + TP_ARGS(work),
  15 +
  16 + TP_STRUCT__entry(
  17 + __field( void *, work )
  18 + ),
  19 +
  20 + TP_fast_assign(
  21 + __entry->work = work;
  22 + ),
  23 +
  24 + TP_printk("work struct %p", __entry->work)
  25 +);
  26 +
10 27 /**
11   - * workqueue_execute_start - called immediately before the workqueue callback
  28 + * workqueue_queue_work - called when a work gets queued
  29 + * @req_cpu: the requested cpu
  30 + * @cwq: pointer to struct cpu_workqueue_struct
12 31 * @work: pointer to struct work_struct
13 32 *
14   - * Allows to track workqueue execution.
  33 + * This event occurs when a work is queued immediately or once a
  34 + * delayed work is actually queued on a workqueue (ie: once the delay
  35 + * has been reached).
15 36 */
16   -TRACE_EVENT(workqueue_execute_start,
  37 +TRACE_EVENT(workqueue_queue_work,
17 38  
18   - TP_PROTO(struct work_struct *work),
  39 + TP_PROTO(unsigned int req_cpu, struct cpu_workqueue_struct *cwq,
  40 + struct work_struct *work),
19 41  
20   - TP_ARGS(work),
  42 + TP_ARGS(req_cpu, cwq, work),
21 43  
22 44 TP_STRUCT__entry(
23 45 __field( void *, work )
24 46 __field( void *, function)
  47 + __field( void *, workqueue)
  48 + __field( unsigned int, req_cpu )
  49 + __field( unsigned int, cpu )
25 50 ),
26 51  
27 52 TP_fast_assign(
28 53 __entry->work = work;
29 54 __entry->function = work->func;
  55 + __entry->workqueue = cwq->wq;
  56 + __entry->req_cpu = req_cpu;
  57 + __entry->cpu = cwq->gcwq->cpu;
30 58 ),
31 59  
32   - TP_printk("work struct %p: function %pf", __entry->work, __entry->function)
  60 + TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u",
  61 + __entry->work, __entry->function, __entry->workqueue,
  62 + __entry->req_cpu, __entry->cpu)
33 63 );
34 64  
35 65 /**
36   - * workqueue_execute_end - called immediately before the workqueue callback
  66 + * workqueue_activate_work - called when a work gets activated
37 67 * @work: pointer to struct work_struct
38 68 *
  69 + * This event occurs when a queued work is put on the active queue,
  70 + * which happens immediately after queueing unless @max_active limit
  71 + * is reached.
  72 + */
  73 +DEFINE_EVENT(workqueue_work, workqueue_activate_work,
  74 +
  75 + TP_PROTO(struct work_struct *work),
  76 +
  77 + TP_ARGS(work)
  78 +);
  79 +
  80 +/**
  81 + * workqueue_execute_start - called immediately before the workqueue callback
  82 + * @work: pointer to struct work_struct
  83 + *
39 84 * Allows to track workqueue execution.
40 85 */
41   -TRACE_EVENT(workqueue_execute_end,
  86 +TRACE_EVENT(workqueue_execute_start,
42 87  
43 88 TP_PROTO(struct work_struct *work),
44 89  
45 90  
46 91  
47 92  
... ... @@ -46,15 +91,29 @@
46 91  
47 92 TP_STRUCT__entry(
48 93 __field( void *, work )
  94 + __field( void *, function)
49 95 ),
50 96  
51 97 TP_fast_assign(
52 98 __entry->work = work;
  99 + __entry->function = work->func;
53 100 ),
54 101  
55   - TP_printk("work struct %p", __entry->work)
  102 + TP_printk("work struct %p: function %pf", __entry->work, __entry->function)
56 103 );
57 104  
  105 +/**
  106 + * workqueue_execute_end - called immediately before the workqueue callback
  107 + * @work: pointer to struct work_struct
  108 + *
  109 + * Allows to track workqueue execution.
  110 + */
  111 +DEFINE_EVENT(workqueue_work, workqueue_execute_end,
  112 +
  113 + TP_PROTO(struct work_struct *work),
  114 +
  115 + TP_ARGS(work)
  116 +);
58 117  
59 118 #endif /* _TRACE_WORKQUEUE_H */
60 119  
... ... @@ -42,9 +42,6 @@
42 42 #include <linux/lockdep.h>
43 43 #include <linux/idr.h>
44 44  
45   -#define CREATE_TRACE_POINTS
46   -#include <trace/events/workqueue.h>
47   -
48 45 #include "workqueue_sched.h"
49 46  
50 47 enum {
... ... @@ -257,6 +254,9 @@
257 254 EXPORT_SYMBOL_GPL(system_nrt_wq);
258 255 EXPORT_SYMBOL_GPL(system_unbound_wq);
259 256  
  257 +#define CREATE_TRACE_POINTS
  258 +#include <trace/events/workqueue.h>
  259 +
260 260 #define for_each_busy_worker(worker, i, pos, gcwq) \
261 261 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
262 262 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
... ... @@ -310,21 +310,6 @@
310 310 (cpu) < WORK_CPU_NONE; \
311 311 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
312 312  
313   -#ifdef CONFIG_LOCKDEP
314   -/**
315   - * in_workqueue_context() - in context of specified workqueue?
316   - * @wq: the workqueue of interest
317   - *
318   - * Checks lockdep state to see if the current task is executing from
319   - * within a workqueue item. This function exists only if lockdep is
320   - * enabled.
321   - */
322   -int in_workqueue_context(struct workqueue_struct *wq)
323   -{
324   - return lock_is_held(&wq->lockdep_map);
325   -}
326   -#endif
327   -
328 313 #ifdef CONFIG_DEBUG_OBJECTS_WORK
329 314  
330 315 static struct debug_obj_descr work_debug_descr;
... ... @@ -604,7 +589,9 @@
604 589 {
605 590 atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
606 591  
607   - return !list_empty(&gcwq->worklist) && atomic_read(nr_running) <= 1;
  592 + return !list_empty(&gcwq->worklist) &&
  593 + (atomic_read(nr_running) <= 1 ||
  594 + gcwq->flags & GCWQ_HIGHPRI_PENDING);
608 595 }
609 596  
610 597 /* Do we need a new worker? Called from manager. */
... ... @@ -997,6 +984,7 @@
997 984  
998 985 /* gcwq determined, get cwq and queue */
999 986 cwq = get_cwq(gcwq->cpu, wq);
  987 + trace_workqueue_queue_work(cpu, cwq, work);
1000 988  
1001 989 BUG_ON(!list_empty(&work->entry));
1002 990  
... ... @@ -1004,6 +992,7 @@
1004 992 work_flags = work_color_to_flags(cwq->work_color);
1005 993  
1006 994 if (likely(cwq->nr_active < cwq->max_active)) {
  995 + trace_workqueue_activate_work(work);
1007 996 cwq->nr_active++;
1008 997 worklist = gcwq_determine_ins_pos(gcwq, cwq);
1009 998 } else {
... ... @@ -1679,6 +1668,7 @@
1679 1668 struct work_struct, entry);
1680 1669 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
1681 1670  
  1671 + trace_workqueue_activate_work(work);
1682 1672 move_linked_works(work, pos, NULL);
1683 1673 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1684 1674 cwq->nr_active++;
1685 1675  
1686 1676  
... ... @@ -2326,27 +2316,17 @@
2326 2316 }
2327 2317 EXPORT_SYMBOL_GPL(flush_workqueue);
2328 2318  
2329   -/**
2330   - * flush_work - block until a work_struct's callback has terminated
2331   - * @work: the work which is to be flushed
2332   - *
2333   - * Returns false if @work has already terminated.
2334   - *
2335   - * It is expected that, prior to calling flush_work(), the caller has
2336   - * arranged for the work to not be requeued, otherwise it doesn't make
2337   - * sense to use this function.
2338   - */
2339   -int flush_work(struct work_struct *work)
  2319 +static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
  2320 + bool wait_executing)
2340 2321 {
2341 2322 struct worker *worker = NULL;
2342 2323 struct global_cwq *gcwq;
2343 2324 struct cpu_workqueue_struct *cwq;
2344   - struct wq_barrier barr;
2345 2325  
2346 2326 might_sleep();
2347 2327 gcwq = get_work_gcwq(work);
2348 2328 if (!gcwq)
2349   - return 0;
  2329 + return false;
2350 2330  
2351 2331 spin_lock_irq(&gcwq->lock);
2352 2332 if (!list_empty(&work->entry)) {
2353 2333  
2354 2334  
2355 2335  
2356 2336  
2357 2337  
2358 2338  
... ... @@ -2359,28 +2339,127 @@
2359 2339 cwq = get_work_cwq(work);
2360 2340 if (unlikely(!cwq || gcwq != cwq->gcwq))
2361 2341 goto already_gone;
2362   - } else {
  2342 + } else if (wait_executing) {
2363 2343 worker = find_worker_executing_work(gcwq, work);
2364 2344 if (!worker)
2365 2345 goto already_gone;
2366 2346 cwq = worker->current_cwq;
2367   - }
  2347 + } else
  2348 + goto already_gone;
2368 2349  
2369   - insert_wq_barrier(cwq, &barr, work, worker);
  2350 + insert_wq_barrier(cwq, barr, work, worker);
2370 2351 spin_unlock_irq(&gcwq->lock);
2371 2352  
2372 2353 lock_map_acquire(&cwq->wq->lockdep_map);
2373 2354 lock_map_release(&cwq->wq->lockdep_map);
2374   -
2375   - wait_for_completion(&barr.done);
2376   - destroy_work_on_stack(&barr.work);
2377   - return 1;
  2355 + return true;
2378 2356 already_gone:
2379 2357 spin_unlock_irq(&gcwq->lock);
2380   - return 0;
  2358 + return false;
2381 2359 }
  2360 +
  2361 +/**
  2362 + * flush_work - wait for a work to finish executing the last queueing instance
  2363 + * @work: the work to flush
  2364 + *
  2365 + * Wait until @work has finished execution. This function considers
  2366 + * only the last queueing instance of @work. If @work has been
  2367 + * enqueued across different CPUs on a non-reentrant workqueue or on
  2368 + * multiple workqueues, @work might still be executing on return on
  2369 + * some of the CPUs from earlier queueing.
  2370 + *
  2371 + * If @work was queued only on a non-reentrant, ordered or unbound
  2372 + * workqueue, @work is guaranteed to be idle on return if it hasn't
  2373 + * been requeued since flush started.
  2374 + *
  2375 + * RETURNS:
  2376 + * %true if flush_work() waited for the work to finish execution,
  2377 + * %false if it was already idle.
  2378 + */
  2379 +bool flush_work(struct work_struct *work)
  2380 +{
  2381 + struct wq_barrier barr;
  2382 +
  2383 + if (start_flush_work(work, &barr, true)) {
  2384 + wait_for_completion(&barr.done);
  2385 + destroy_work_on_stack(&barr.work);
  2386 + return true;
  2387 + } else
  2388 + return false;
  2389 +}
2382 2390 EXPORT_SYMBOL_GPL(flush_work);
2383 2391  
  2392 +static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
  2393 +{
  2394 + struct wq_barrier barr;
  2395 + struct worker *worker;
  2396 +
  2397 + spin_lock_irq(&gcwq->lock);
  2398 +
  2399 + worker = find_worker_executing_work(gcwq, work);
  2400 + if (unlikely(worker))
  2401 + insert_wq_barrier(worker->current_cwq, &barr, work, worker);
  2402 +
  2403 + spin_unlock_irq(&gcwq->lock);
  2404 +
  2405 + if (unlikely(worker)) {
  2406 + wait_for_completion(&barr.done);
  2407 + destroy_work_on_stack(&barr.work);
  2408 + return true;
  2409 + } else
  2410 + return false;
  2411 +}
  2412 +
  2413 +static bool wait_on_work(struct work_struct *work)
  2414 +{
  2415 + bool ret = false;
  2416 + int cpu;
  2417 +
  2418 + might_sleep();
  2419 +
  2420 + lock_map_acquire(&work->lockdep_map);
  2421 + lock_map_release(&work->lockdep_map);
  2422 +
  2423 + for_each_gcwq_cpu(cpu)
  2424 + ret |= wait_on_cpu_work(get_gcwq(cpu), work);
  2425 + return ret;
  2426 +}
  2427 +
  2428 +/**
  2429 + * flush_work_sync - wait until a work has finished execution
  2430 + * @work: the work to flush
  2431 + *
  2432 + * Wait until @work has finished execution. On return, it's
  2433 + * guaranteed that all queueing instances of @work which happened
  2434 + * before this function is called are finished. In other words, if
  2435 + * @work hasn't been requeued since this function was called, @work is
  2436 + * guaranteed to be idle on return.
  2437 + *
  2438 + * RETURNS:
  2439 + * %true if flush_work_sync() waited for the work to finish execution,
  2440 + * %false if it was already idle.
  2441 + */
  2442 +bool flush_work_sync(struct work_struct *work)
  2443 +{
  2444 + struct wq_barrier barr;
  2445 + bool pending, waited;
  2446 +
  2447 + /* we'll wait for executions separately, queue barr only if pending */
  2448 + pending = start_flush_work(work, &barr, false);
  2449 +
  2450 + /* wait for executions to finish */
  2451 + waited = wait_on_work(work);
  2452 +
  2453 + /* wait for the pending one */
  2454 + if (pending) {
  2455 + wait_for_completion(&barr.done);
  2456 + destroy_work_on_stack(&barr.work);
  2457 + }
  2458 +
  2459 + return pending || waited;
  2460 +}
  2461 +EXPORT_SYMBOL_GPL(flush_work_sync);
  2462 +
2384 2463 /*
2385 2464 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
2386 2465 * so this work can't be re-armed in any way.
... ... @@ -2423,39 +2502,7 @@
2423 2502 return ret;
2424 2503 }
2425 2504  
2426   -static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2427   -{
2428   - struct wq_barrier barr;
2429   - struct worker *worker;
2430   -
2431   - spin_lock_irq(&gcwq->lock);
2432   -
2433   - worker = find_worker_executing_work(gcwq, work);
2434   - if (unlikely(worker))
2435   - insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2436   -
2437   - spin_unlock_irq(&gcwq->lock);
2438   -
2439   - if (unlikely(worker)) {
2440   - wait_for_completion(&barr.done);
2441   - destroy_work_on_stack(&barr.work);
2442   - }
2443   -}
2444   -
2445   -static void wait_on_work(struct work_struct *work)
2446   -{
2447   - int cpu;
2448   -
2449   - might_sleep();
2450   -
2451   - lock_map_acquire(&work->lockdep_map);
2452   - lock_map_release(&work->lockdep_map);
2453   -
2454   - for_each_gcwq_cpu(cpu)
2455   - wait_on_cpu_work(get_gcwq(cpu), work);
2456   -}
2457   -
2458   -static int __cancel_work_timer(struct work_struct *work,
  2505 +static bool __cancel_work_timer(struct work_struct *work,
2459 2506 struct timer_list* timer)
2460 2507 {
2461 2508 int ret;
2462 2509  
2463 2510  
2464 2511  
2465 2512  
2466 2513  
2467 2514  
2468 2515  
2469 2516  
2470 2517  
2471 2518  
... ... @@ -2472,43 +2519,82 @@
2472 2519 }
2473 2520  
2474 2521 /**
2475   - * cancel_work_sync - block until a work_struct's callback has terminated
2476   - * @work: the work which is to be flushed
  2522 + * cancel_work_sync - cancel a work and wait for it to finish
  2523 + * @work: the work to cancel
2477 2524 *
2478   - * Returns true if @work was pending.
  2525 + * Cancel @work and wait for its execution to finish. This function
  2526 + * can be used even if the work re-queues itself or migrates to
  2527 + * another workqueue. On return from this function, @work is
  2528 + * guaranteed to be not pending or executing on any CPU.
2479 2529 *
2480   - * cancel_work_sync() will cancel the work if it is queued. If the work's
2481   - * callback appears to be running, cancel_work_sync() will block until it
2482   - * has completed.
  2530 + * cancel_work_sync(&delayed_work->work) must not be used for
  2531 + * delayed_work's. Use cancel_delayed_work_sync() instead.
2483 2532 *
2484   - * It is possible to use this function if the work re-queues itself. It can
2485   - * cancel the work even if it migrates to another workqueue, however in that
2486   - * case it only guarantees that work->func() has completed on the last queued
2487   - * workqueue.
2488   - *
2489   - * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
2490   - * pending, otherwise it goes into a busy-wait loop until the timer expires.
2491   - *
2492   - * The caller must ensure that workqueue_struct on which this work was last
  2533 + * The caller must ensure that the workqueue on which @work was last
2493 2534 * queued can't be destroyed before this function returns.
  2535 + *
  2536 + * RETURNS:
  2537 + * %true if @work was pending, %false otherwise.
2494 2538 */
2495   -int cancel_work_sync(struct work_struct *work)
  2539 +bool cancel_work_sync(struct work_struct *work)
2496 2540 {
2497 2541 return __cancel_work_timer(work, NULL);
2498 2542 }
2499 2543 EXPORT_SYMBOL_GPL(cancel_work_sync);
2500 2544  
2501 2545 /**
2502   - * cancel_delayed_work_sync - reliably kill off a delayed work.
2503   - * @dwork: the delayed work struct
  2546 + * flush_delayed_work - wait for a dwork to finish executing the last queueing
  2547 + * @dwork: the delayed work to flush
2504 2548 *
2505   - * Returns true if @dwork was pending.
  2549 + * Delayed timer is cancelled and the pending work is queued for
  2550 + * immediate execution. Like flush_work(), this function only
  2551 + * considers the last queueing instance of @dwork.
2506 2552 *
2507   - * It is possible to use this function if @dwork rearms itself via queue_work()
2508   - * or queue_delayed_work(). See also the comment for cancel_work_sync().
  2553 + * RETURNS:
  2554 + * %true if flush_work() waited for the work to finish execution,
  2555 + * %false if it was already idle.
2509 2556 */
2510   -int cancel_delayed_work_sync(struct delayed_work *dwork)
  2557 +bool flush_delayed_work(struct delayed_work *dwork)
2511 2558 {
  2559 + if (del_timer_sync(&dwork->timer))
  2560 + __queue_work(raw_smp_processor_id(),
  2561 + get_work_cwq(&dwork->work)->wq, &dwork->work);
  2562 + return flush_work(&dwork->work);
  2563 +}
  2564 +EXPORT_SYMBOL(flush_delayed_work);
  2565 +
  2566 +/**
  2567 + * flush_delayed_work_sync - wait for a dwork to finish
  2568 + * @dwork: the delayed work to flush
  2569 + *
  2570 + * Delayed timer is cancelled and the pending work is queued for
  2571 + * execution immediately. Other than timer handling, its behavior
  2572 + * is identical to flush_work_sync().
  2573 + *
  2574 + * RETURNS:
  2575 + * %true if flush_work_sync() waited for the work to finish execution,
  2576 + * %false if it was already idle.
  2577 + */
  2578 +bool flush_delayed_work_sync(struct delayed_work *dwork)
  2579 +{
  2580 + if (del_timer_sync(&dwork->timer))
  2581 + __queue_work(raw_smp_processor_id(),
  2582 + get_work_cwq(&dwork->work)->wq, &dwork->work);
  2583 + return flush_work_sync(&dwork->work);
  2584 +}
  2585 +EXPORT_SYMBOL(flush_delayed_work_sync);
  2586 +
  2587 +/**
  2588 + * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
  2589 + * @dwork: the delayed work cancel
  2590 + *
  2591 + * This is cancel_work_sync() for delayed works.
  2592 + *
  2593 + * RETURNS:
  2594 + * %true if @dwork was pending, %false otherwise.
  2595 + */
  2596 +bool cancel_delayed_work_sync(struct delayed_work *dwork)
  2597 +{
2512 2598 return __cancel_work_timer(&dwork->work, &dwork->timer);
2513 2599 }
2514 2600 EXPORT_SYMBOL(cancel_delayed_work_sync);
... ... @@ -2559,23 +2645,6 @@
2559 2645 EXPORT_SYMBOL(schedule_delayed_work);
2560 2646  
2561 2647 /**
2562   - * flush_delayed_work - block until a dwork_struct's callback has terminated
2563   - * @dwork: the delayed work which is to be flushed
2564   - *
2565   - * Any timeout is cancelled, and any pending work is run immediately.
2566   - */
2567   -void flush_delayed_work(struct delayed_work *dwork)
2568   -{
2569   - if (del_timer_sync(&dwork->timer)) {
2570   - __queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq,
2571   - &dwork->work);
2572   - put_cpu();
2573   - }
2574   - flush_work(&dwork->work);
2575   -}
2576   -EXPORT_SYMBOL(flush_delayed_work);
2577   -
2578   -/**
2579 2648 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
2580 2649 * @cpu: cpu to use
2581 2650 * @dwork: job to be done
2582 2651  
2583 2652  
... ... @@ -2592,13 +2661,15 @@
2592 2661 EXPORT_SYMBOL(schedule_delayed_work_on);
2593 2662  
2594 2663 /**
2595   - * schedule_on_each_cpu - call a function on each online CPU from keventd
  2664 + * schedule_on_each_cpu - execute a function synchronously on each online CPU
2596 2665 * @func: the function to call
2597 2666 *
2598   - * Returns zero on success.
2599   - * Returns -ve errno on failure.
2600   - *
  2667 + * schedule_on_each_cpu() executes @func on each online CPU using the
  2668 + * system workqueue and blocks until all CPUs have completed.
2601 2669 * schedule_on_each_cpu() is very slow.
  2670 + *
  2671 + * RETURNS:
  2672 + * 0 on success, -errno on failure.
2602 2673 */
2603 2674 int schedule_on_each_cpu(work_func_t func)
2604 2675 {
... ... @@ -2762,6 +2833,13 @@
2762 2833 {
2763 2834 struct workqueue_struct *wq;
2764 2835 unsigned int cpu;
  2836 +
  2837 + /*
  2838 + * Workqueues which may be used during memory reclaim should
  2839 + * have a rescuer to guarantee forward progress.
  2840 + */
  2841 + if (flags & WQ_MEM_RECLAIM)
  2842 + flags |= WQ_RESCUER;
2765 2843  
2766 2844 /*
2767 2845 * Unbound workqueues aren't concurrency managed and should be
... ... @@ -840,7 +840,6 @@
840 840 ret = 0;
841 841 if (drain) {
842 842 lru_add_drain_all();
843   - flush_scheduled_work();
844 843 cond_resched();
845 844 drain_all_pages();
846 845 }
... ... @@ -862,7 +861,6 @@
862 861 }
863 862 /* drain all zone's lru pagevec, this is asyncronous... */
864 863 lru_add_drain_all();
865   - flush_scheduled_work();
866 864 yield();
867 865 /* drain pcp pages , this is synchrouns. */
868 866 drain_all_pages();