Blame view
include/linux/workqueue.h
19 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 |
/* * workqueue.h --- work queue handling for Linux. */ #ifndef _LINUX_WORKQUEUE_H #define _LINUX_WORKQUEUE_H #include <linux/timer.h> #include <linux/linkage.h> #include <linux/bitops.h> |
4e6045f13
|
11 |
#include <linux/lockdep.h> |
7a22ad757
|
12 |
#include <linux/threads.h> |
60063497a
|
13 |
#include <linux/atomic.h> |
7a4e344c5
|
14 |
#include <linux/cpumask.h> |
1da177e4c
|
15 16 |
struct workqueue_struct; |
65f27f384
|
17 18 |
struct work_struct; typedef void (*work_func_t)(struct work_struct *work); |
d8e794dfd
|
19 |
void delayed_work_timer_fn(unsigned long __data); |
6bb49e596
|
20 |
|
a08727bae
|
21 22 23 24 25 |
/* * The first word is the work queue pointer and the flags rolled into * one */ #define work_data_bits(work) ((unsigned long *)(&(work)->data)) |
22df02bb3
|
26 27 |
enum { WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ |
8a2e8e5de
|
28 |
WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ |
112202d90
|
29 |
WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */ |
8a2e8e5de
|
30 |
WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ |
22df02bb3
|
31 |
#ifdef CONFIG_DEBUG_OBJECTS_WORK |
8a2e8e5de
|
32 33 |
WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */ |
0f900049c
|
34 |
#else |
8a2e8e5de
|
35 |
WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ |
22df02bb3
|
36 |
#endif |
73f53c4aa
|
37 |
WORK_STRUCT_COLOR_BITS = 4, |
22df02bb3
|
38 |
WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, |
8a2e8e5de
|
39 |
WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, |
112202d90
|
40 |
WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, |
affee4b29
|
41 |
WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, |
22df02bb3
|
42 43 44 45 46 |
#ifdef CONFIG_DEBUG_OBJECTS_WORK WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, #else WORK_STRUCT_STATIC = 0, #endif |
73f53c4aa
|
47 48 49 50 51 52 |
/* * The last color is no color used for works which don't * participate in workqueue flushing. */ WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1, WORK_NO_COLOR = WORK_NR_COLORS, |
79bc251f0
|
53 |
/* not bound to any CPU, prefer the local CPU */ |
f34217977
|
54 |
WORK_CPU_UNBOUND = NR_CPUS, |
bdbc5dd7d
|
55 |
|
73f53c4aa
|
56 |
/* |
112202d90
|
57 58 59 |
* Reserve 7 bits off of pwq pointer w/ debugobjects turned off. * This makes pwqs aligned to 256 bytes and allows 15 workqueue * flush colors. |
73f53c4aa
|
60 61 62 |
*/ WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + WORK_STRUCT_COLOR_BITS, |
112202d90
|
63 |
/* data contains off-queue information when !WORK_STRUCT_PWQ */ |
45d9550a0
|
64 |
WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, |
bbb68dfab
|
65 |
|
8603e1b30
|
66 67 |
__WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE, WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING), |
bbb68dfab
|
68 |
|
715b06b86
|
69 70 |
/* * When a work item is off queue, its high bits point to the last |
7c3eed5cd
|
71 72 |
* pool it was on. Cap at 31 bits and use the highest number to * indicate that no pool is associated. |
715b06b86
|
73 |
*/ |
bbb68dfab
|
74 |
WORK_OFFQ_FLAG_BITS = 1, |
7c3eed5cd
|
75 76 77 78 |
WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS, WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT, WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1, |
b54900772
|
79 80 |
/* convenience constants */ |
0f900049c
|
81 |
WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, |
22df02bb3
|
82 |
WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, |
7c3eed5cd
|
83 |
WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT, |
dcd989cb7
|
84 85 86 87 |
/* bit mask for work_busy() return values */ WORK_BUSY_PENDING = 1 << 0, WORK_BUSY_RUNNING = 1 << 1, |
3d1cb2059
|
88 89 90 |
/* maximum string length for set_worker_desc() */ WORKER_DESC_LEN = 24, |
22df02bb3
|
91 |
}; |
1da177e4c
|
92 |
struct work_struct { |
a08727bae
|
93 |
atomic_long_t data; |
1da177e4c
|
94 |
struct list_head entry; |
6bb49e596
|
95 |
work_func_t func; |
4e6045f13
|
96 97 98 |
#ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif |
52bad64d9
|
99 |
}; |
7c3eed5cd
|
100 |
#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL) |
7a22ad757
|
101 |
#define WORK_DATA_STATIC_INIT() \ |
7c3eed5cd
|
102 |
ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC) |
a08727bae
|
103 |
|
52bad64d9
|
104 105 |
struct delayed_work { struct work_struct work; |
1da177e4c
|
106 |
struct timer_list timer; |
60c057bca
|
107 108 109 |
/* target workqueue and CPU ->timer uses to queue ->work */ struct workqueue_struct *wq; |
1265057fa
|
110 |
int cpu; |
1da177e4c
|
111 |
}; |
7a4e344c5
|
112 113 114 |
/* * A struct for workqueue attributes. This can be used to change * attributes of an unbound workqueue. |
d55262c4d
|
115 116 117 118 |
* * Unlike other fields, ->no_numa isn't a property of a worker_pool. It * only modifies how apply_workqueue_attrs() select pools and thus doesn't * participate in pool hash calculations or equality comparisons. |
7a4e344c5
|
119 120 121 122 |
*/ struct workqueue_attrs { int nice; /* nice level */ cpumask_var_t cpumask; /* allowed CPUs */ |
d55262c4d
|
123 |
bool no_numa; /* disable NUMA affinity */ |
7a4e344c5
|
124 |
}; |
bf6aede71
|
125 126 127 128 |
static inline struct delayed_work *to_delayed_work(struct work_struct *work) { return container_of(work, struct delayed_work, work); } |
1fa44ecad
|
129 130 131 |
struct execute_work { struct work_struct work; }; |
4e6045f13
|
132 133 134 135 136 137 138 139 140 141 142 |
#ifdef CONFIG_LOCKDEP /* * NB: because we have to copy the lockdep_map, setting _key * here is required, otherwise it could get initialised to the * copy of the lockdep_map! */ #define __WORK_INIT_LOCKDEP_MAP(n, k) \ .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k), #else #define __WORK_INIT_LOCKDEP_MAP(n, k) #endif |
ee64e7f69
|
143 144 145 146 147 |
#define __WORK_INITIALIZER(n, f) { \ .data = WORK_DATA_STATIC_INIT(), \ .entry = { &(n).entry, &(n).entry }, \ .func = (f), \ __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ |
65f27f384
|
148 |
} |
f991b318c
|
149 |
#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \ |
ee64e7f69
|
150 |
.work = __WORK_INITIALIZER((n).work, (f)), \ |
f991b318c
|
151 |
.timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \ |
e0aecdd87
|
152 153 |
0, (unsigned long)&(n), \ (tflags) | TIMER_IRQSAFE), \ |
dd6414b50
|
154 |
} |
ee64e7f69
|
155 |
#define DECLARE_WORK(n, f) \ |
65f27f384
|
156 |
struct work_struct n = __WORK_INITIALIZER(n, f) |
ee64e7f69
|
157 |
#define DECLARE_DELAYED_WORK(n, f) \ |
f991b318c
|
158 |
struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0) |
65f27f384
|
159 |
|
203b42f73
|
160 |
#define DECLARE_DEFERRABLE_WORK(n, f) \ |
f991b318c
|
161 |
struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE) |
dd6414b50
|
162 |
|
dc186ad74
|
163 164 165 |
#ifdef CONFIG_DEBUG_OBJECTS_WORK extern void __init_work(struct work_struct *work, int onstack); extern void destroy_work_on_stack(struct work_struct *work); |
ea2e64f28
|
166 |
extern void destroy_delayed_work_on_stack(struct delayed_work *work); |
4690c4ab5
|
167 168 |
static inline unsigned int work_static(struct work_struct *work) { |
22df02bb3
|
169 |
return *work_data_bits(work) & WORK_STRUCT_STATIC; |
4690c4ab5
|
170 |
} |
dc186ad74
|
171 172 173 |
#else static inline void __init_work(struct work_struct *work, int onstack) { } static inline void destroy_work_on_stack(struct work_struct *work) { } |
ea2e64f28
|
174 |
static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { } |
4690c4ab5
|
175 |
static inline unsigned int work_static(struct work_struct *work) { return 0; } |
dc186ad74
|
176 |
#endif |
1da177e4c
|
177 |
/* |
52bad64d9
|
178 |
* initialize all of a work item in one go |
a08727bae
|
179 |
* |
b9049df5a
|
180 |
* NOTE! No point in using "atomic_long_set()": using a direct |
a08727bae
|
181 182 |
* assignment of the work data initializer allows the compiler * to generate better code. |
1da177e4c
|
183 |
*/ |
4e6045f13
|
184 |
#ifdef CONFIG_LOCKDEP |
dc186ad74
|
185 |
#define __INIT_WORK(_work, _func, _onstack) \ |
65f27f384
|
186 |
do { \ |
4e6045f13
|
187 188 |
static struct lock_class_key __key; \ \ |
dc186ad74
|
189 |
__init_work((_work), _onstack); \ |
23b2e5991
|
190 |
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ |
ee64e7f69
|
191 |
lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \ |
65f27f384
|
192 |
INIT_LIST_HEAD(&(_work)->entry); \ |
f073f9229
|
193 |
(_work)->func = (_func); \ |
65f27f384
|
194 |
} while (0) |
4e6045f13
|
195 |
#else |
dc186ad74
|
196 |
#define __INIT_WORK(_work, _func, _onstack) \ |
4e6045f13
|
197 |
do { \ |
dc186ad74
|
198 |
__init_work((_work), _onstack); \ |
4e6045f13
|
199 200 |
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ INIT_LIST_HEAD(&(_work)->entry); \ |
f073f9229
|
201 |
(_work)->func = (_func); \ |
4e6045f13
|
202 203 |
} while (0) #endif |
65f27f384
|
204 |
|
ee64e7f69
|
205 |
#define INIT_WORK(_work, _func) \ |
9da7dae94
|
206 |
__INIT_WORK((_work), (_func), 0) |
dc186ad74
|
207 |
|
ee64e7f69
|
208 |
#define INIT_WORK_ONSTACK(_work, _func) \ |
9da7dae94
|
209 |
__INIT_WORK((_work), (_func), 1) |
dc186ad74
|
210 |
|
f991b318c
|
211 |
#define __INIT_DELAYED_WORK(_work, _func, _tflags) \ |
ee64e7f69
|
212 213 |
do { \ INIT_WORK(&(_work)->work, (_func)); \ |
f991b318c
|
214 |
__setup_timer(&(_work)->timer, delayed_work_timer_fn, \ |
e0aecdd87
|
215 216 |
(unsigned long)(_work), \ (_tflags) | TIMER_IRQSAFE); \ |
52bad64d9
|
217 |
} while (0) |
f991b318c
|
218 |
#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \ |
ee64e7f69
|
219 220 |
do { \ INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ |
f991b318c
|
221 222 223 |
__setup_timer_on_stack(&(_work)->timer, \ delayed_work_timer_fn, \ (unsigned long)(_work), \ |
e0aecdd87
|
224 |
(_tflags) | TIMER_IRQSAFE); \ |
6d612b0f9
|
225 |
} while (0) |
f991b318c
|
226 227 228 229 230 |
#define INIT_DELAYED_WORK(_work, _func) \ __INIT_DELAYED_WORK(_work, _func, 0) #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0) |
203b42f73
|
231 |
#define INIT_DEFERRABLE_WORK(_work, _func) \ |
f991b318c
|
232 233 234 235 |
__INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE) #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \ __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE) |
28287033e
|
236 |
|
365970a1e
|
237 238 239 240 241 |
/** * work_pending - Find out whether a work item is currently pending * @work: The work item in question */ #define work_pending(work) \ |
22df02bb3
|
242 |
test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) |
365970a1e
|
243 244 245 246 247 248 |
/** * delayed_work_pending - Find out whether a delayable work item is currently * pending * @work: The work item in question */ |
0221872a3
|
249 250 |
#define delayed_work_pending(w) \ work_pending(&(w)->work) |
365970a1e
|
251 |
|
c54fce6ef
|
252 253 254 255 |
/* * Workqueue flags and constants. For details, please refer to * Documentation/workqueue.txt. */ |
97e37d7b9
|
256 |
enum { |
c7fc77f78
|
257 |
WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ |
58a69cb47
|
258 |
WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ |
6370a6ad3
|
259 |
WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ |
649027d73
|
260 |
WQ_HIGHPRI = 1 << 4, /* high priority */ |
41f50094b
|
261 |
WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */ |
226223ab3
|
262 |
WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */ |
b71ab8c20
|
263 |
|
cee22a150
|
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 |
/* * Per-cpu workqueues are generally preferred because they tend to * show better performance thanks to cache locality. Per-cpu * workqueues exclude the scheduler from choosing the CPU to * execute the worker threads, which has an unfortunate side effect * of increasing power consumption. * * The scheduler considers a CPU idle if it doesn't have any task * to execute and tries to keep idle cores idle to conserve power; * however, for example, a per-cpu work item scheduled from an * interrupt handler on an idle CPU will force the scheduler to * excute the work item on that CPU breaking the idleness, which in * turn may lead to more scheduling choices which are sub-optimal * in terms of power consumption. * * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default * but become unbound if workqueue.power_efficient kernel param is * specified. Per-cpu workqueues which are identified to * contribute significantly to power-consumption are identified and * marked with this flag and enabling the power_efficient mode * leads to noticeable power saving at the cost of small * performance disadvantage. * * http://thread.gmane.org/gmane.linux.kernel/1480396 */ WQ_POWER_EFFICIENT = 1 << 7, |
618b01eb4
|
290 |
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ |
8719dceae
|
291 |
__WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ |
e41e704bc
|
292 |
|
b71ab8c20
|
293 |
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ |
f34217977
|
294 |
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ |
b71ab8c20
|
295 |
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, |
97e37d7b9
|
296 |
}; |
52bad64d9
|
297 |
|
f34217977
|
298 299 300 |
/* unbound wq's aren't per-cpu, scale max_active according to #cpus */ #define WQ_UNBOUND_MAX_ACTIVE \ max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU) |
65f27f384
|
301 |
|
d320c0383
|
302 303 304 305 306 307 308 309 |
/* * System-wide workqueues which are always present. * * system_wq is the one used by schedule[_delayed]_work[_on](). * Multi-CPU multi-threaded. There are users which expect relatively * short queue flush time. Don't queue works which can run for too * long. * |
73e435444
|
310 311 312 |
* system_highpri_wq is similar to system_wq but for work items which * require WQ_HIGHPRI. * |
d320c0383
|
313 314 315 |
* system_long_wq is similar to system_wq but may host long running * works. Queue flushing might take relatively long. * |
f34217977
|
316 317 318 319 |
* system_unbound_wq is unbound workqueue. Workers are not bound to * any specific CPU, not concurrency managed, and all queued works are * executed immediately as long as max_active limit is not reached and * resources are available. |
4149efb22
|
320 |
* |
24d51add7
|
321 322 |
* system_freezable_wq is equivalent to system_wq except that it's * freezable. |
0668106ca
|
323 324 325 326 327 328 |
* * *_power_efficient_wq are inclined towards saving power and converted * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise, * they are same as their non-power-efficient counterparts - e.g. * system_power_efficient_wq is identical to system_wq if * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info. |
d320c0383
|
329 330 |
*/ extern struct workqueue_struct *system_wq; |
73e435444
|
331 |
extern struct workqueue_struct *system_highpri_wq; |
d320c0383
|
332 |
extern struct workqueue_struct *system_long_wq; |
f34217977
|
333 |
extern struct workqueue_struct *system_unbound_wq; |
24d51add7
|
334 |
extern struct workqueue_struct *system_freezable_wq; |
0668106ca
|
335 336 |
extern struct workqueue_struct *system_power_efficient_wq; extern struct workqueue_struct *system_freezable_power_efficient_wq; |
ae930e0f4
|
337 |
|
4e6045f13
|
338 |
extern struct workqueue_struct * |
b196be89c
|
339 340 |
__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6); |
4e6045f13
|
341 |
|
b196be89c
|
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 |
/** * alloc_workqueue - allocate a workqueue * @fmt: printf format for the name of the workqueue * @flags: WQ_* flags * @max_active: max in-flight work items, 0 for default * @args: args for @fmt * * Allocate a workqueue with the specified parameters. For detailed * information on WQ_* flags, please refer to Documentation/workqueue.txt. * * The __lock_name macro dance is to guarantee that single lock_class_key * doesn't end up with different namesm, which isn't allowed by lockdep. * * RETURNS: * Pointer to the allocated workqueue on success, %NULL on failure. */ |
4e6045f13
|
358 |
#ifdef CONFIG_LOCKDEP |
ee64e7f69
|
359 360 361 362 363 |
#define alloc_workqueue(fmt, flags, max_active, args...) \ ({ \ static struct lock_class_key __key; \ const char *__lock_name; \ \ |
fada94ee6
|
364 |
__lock_name = #fmt#args; \ |
ee64e7f69
|
365 366 367 |
\ __alloc_workqueue_key((fmt), (flags), (max_active), \ &__key, __lock_name, ##args); \ |
4e6045f13
|
368 369 |
}) #else |
ee64e7f69
|
370 371 |
#define alloc_workqueue(fmt, flags, max_active, args...) \ __alloc_workqueue_key((fmt), (flags), (max_active), \ |
b196be89c
|
372 |
NULL, NULL, ##args) |
4e6045f13
|
373 |
#endif |
81dcaf651
|
374 375 |
/** * alloc_ordered_workqueue - allocate an ordered workqueue |
b196be89c
|
376 |
* @fmt: printf format for the name of the workqueue |
58a69cb47
|
377 |
* @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) |
b196be89c
|
378 |
* @args: args for @fmt |
81dcaf651
|
379 380 381 382 383 384 385 386 |
* * Allocate an ordered workqueue. An ordered workqueue executes at * most one work item at any given time in the queued order. They are * implemented as unbound workqueues with @max_active of one. * * RETURNS: * Pointer to the allocated workqueue on success, %NULL on failure. */ |
ee64e7f69
|
387 |
#define alloc_ordered_workqueue(fmt, flags, args...) \ |
8719dceae
|
388 |
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) |
81dcaf651
|
389 |
|
ee64e7f69
|
390 |
#define create_workqueue(name) \ |
d8537548c
|
391 |
alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name)) |
ee64e7f69
|
392 |
#define create_freezable_workqueue(name) \ |
d8537548c
|
393 394 |
alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \ 1, (name)) |
ee64e7f69
|
395 |
#define create_singlethread_workqueue(name) \ |
e09c2c295
|
396 |
alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) |
1da177e4c
|
397 398 |
extern void destroy_workqueue(struct workqueue_struct *wq); |
7a4e344c5
|
399 400 |
struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask); void free_workqueue_attrs(struct workqueue_attrs *attrs); |
9e8cd2f58
|
401 402 |
int apply_workqueue_attrs(struct workqueue_struct *wq, const struct workqueue_attrs *attrs); |
7a4e344c5
|
403 |
|
d4283e937
|
404 |
extern bool queue_work_on(int cpu, struct workqueue_struct *wq, |
c1a220e7a
|
405 |
struct work_struct *work); |
d4283e937
|
406 |
extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
28e53bddf
|
407 |
struct delayed_work *work, unsigned long delay); |
8376fe22c
|
408 409 |
extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay); |
28e53bddf
|
410 |
|
b3c975286
|
411 |
extern void flush_workqueue(struct workqueue_struct *wq); |
9c5a2ba70
|
412 |
extern void drain_workqueue(struct workqueue_struct *wq); |
28e53bddf
|
413 |
extern void flush_scheduled_work(void); |
1da177e4c
|
414 |
|
65f27f384
|
415 |
extern int schedule_on_each_cpu(work_func_t func); |
1da177e4c
|
416 |
|
65f27f384
|
417 |
int execute_in_process_context(work_func_t fn, struct execute_work *); |
1da177e4c
|
418 |
|
401a8d048
|
419 420 421 422 |
extern bool flush_work(struct work_struct *work); extern bool cancel_work_sync(struct work_struct *work); extern bool flush_delayed_work(struct delayed_work *dwork); |
57b30ae77
|
423 |
extern bool cancel_delayed_work(struct delayed_work *dwork); |
401a8d048
|
424 |
extern bool cancel_delayed_work_sync(struct delayed_work *dwork); |
28e53bddf
|
425 |
|
dcd989cb7
|
426 427 |
extern void workqueue_set_max_active(struct workqueue_struct *wq, int max_active); |
e62676169
|
428 |
extern bool current_is_workqueue_rescuer(void); |
d84ff0512
|
429 |
extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); |
dcd989cb7
|
430 |
extern unsigned int work_busy(struct work_struct *work); |
3d1cb2059
|
431 432 |
extern __printf(1, 2) void set_worker_desc(const char *fmt, ...); extern void print_worker_info(const char *log_lvl, struct task_struct *task); |
3494fc308
|
433 |
extern void show_workqueue_state(void); |
dcd989cb7
|
434 |
|
8425e3d5b
|
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 |
/** * queue_work - queue work on a workqueue * @wq: workqueue to use * @work: work to queue * * Returns %false if @work was already on a queue, %true otherwise. * * We queue the work to the CPU on which it was submitted, but if the CPU dies * it can be processed by another CPU. */ static inline bool queue_work(struct workqueue_struct *wq, struct work_struct *work) { return queue_work_on(WORK_CPU_UNBOUND, wq, work); } /** * queue_delayed_work - queue work on a workqueue after delay * @wq: workqueue to use * @dwork: delayable work to queue * @delay: number of jiffies to wait before queueing * * Equivalent to queue_delayed_work_on() but tries to use the local CPU. */ static inline bool queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) { return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); } /** * mod_delayed_work - modify delay of or queue a delayed work * @wq: workqueue to use * @dwork: work to queue * @delay: number of jiffies to wait before queueing * * mod_delayed_work_on() on local CPU. */ static inline bool mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) { return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); } /** * schedule_work_on - put work task on a specific cpu * @cpu: cpu to put the work task on * @work: job to be done * * This puts a job on a specific cpu */ static inline bool schedule_work_on(int cpu, struct work_struct *work) { return queue_work_on(cpu, system_wq, work); } /** * schedule_work - put work task in global workqueue * @work: job to be done * * Returns %false if @work was already on the kernel-global workqueue and * %true otherwise. * * This puts a job in the kernel-global workqueue if it was not already * queued and leaves it in the same position on the kernel-global * workqueue otherwise. */ static inline bool schedule_work(struct work_struct *work) { return queue_work(system_wq, work); } /** * schedule_delayed_work_on - queue work in global workqueue on CPU after delay * @cpu: cpu to use * @dwork: job to be done * @delay: number of jiffies to wait * * After waiting for a given time this puts a job in the kernel-global * workqueue on the specified CPU. */ static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay) { return queue_delayed_work_on(cpu, system_wq, dwork, delay); } /** * schedule_delayed_work - put work task in global workqueue after delay * @dwork: job to be done * @delay: number of jiffies to wait or 0 for immediate execution * * After waiting for a given time this puts a job in the kernel-global * workqueue. */ static inline bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) { return queue_delayed_work(system_wq, dwork, delay); } /** * keventd_up - is workqueue initialized yet? */ static inline bool keventd_up(void) { return system_wq != NULL; } |
2d3854a37
|
545 |
#ifndef CONFIG_SMP |
d84ff0512
|
546 |
static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) |
2d3854a37
|
547 548 549 550 |
{ return fn(arg); } #else |
d84ff0512
|
551 |
long work_on_cpu(int cpu, long (*fn)(void *), void *arg); |
2d3854a37
|
552 |
#endif /* CONFIG_SMP */ |
a25909a4d
|
553 |
|
a0a1a5fd4
|
554 555 556 557 558 |
#ifdef CONFIG_FREEZER extern void freeze_workqueues_begin(void); extern bool freeze_workqueues_busy(void); extern void thaw_workqueues(void); #endif /* CONFIG_FREEZER */ |
226223ab3
|
559 560 561 562 563 564 |
#ifdef CONFIG_SYSFS int workqueue_sysfs_register(struct workqueue_struct *wq); #else /* CONFIG_SYSFS */ static inline int workqueue_sysfs_register(struct workqueue_struct *wq) { return 0; } #endif /* CONFIG_SYSFS */ |
1da177e4c
|
565 |
#endif |