Blame view
include/linux/workqueue.h
21.5 KB
b24413180 License cleanup: ... |
1 |
/* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4c Linux-2.6.12-rc2 |
2 3 4 5 6 7 8 9 10 11 |
/* * workqueue.h --- work queue handling for Linux. */ #ifndef _LINUX_WORKQUEUE_H #define _LINUX_WORKQUEUE_H #include <linux/timer.h> #include <linux/linkage.h> #include <linux/bitops.h> |
4e6045f13 workqueue: debug ... |
12 |
#include <linux/lockdep.h> |
7a22ad757 workqueue: carry ... |
13 |
#include <linux/threads.h> |
60063497a atomic: use <linu... |
14 |
#include <linux/atomic.h> |
7a4e344c5 workqueue: introd... |
15 |
#include <linux/cpumask.h> |
05f0fe6b7 RCU, workqueue: I... |
16 |
#include <linux/rcupdate.h> |
1da177e4c Linux-2.6.12-rc2 |
17 18 |
struct workqueue_struct; |
65f27f384 WorkStruct: Pass ... |
19 20 |
struct work_struct; typedef void (*work_func_t)(struct work_struct *work); |
8c20feb60 workqueue: Conver... |
21 |
void delayed_work_timer_fn(struct timer_list *t); |
6bb49e596 WorkStruct: Typed... |
22 |
|
a08727bae Make workqueue bi... |
23 24 25 26 27 |
/* * The first word is the work queue pointer and the flags rolled into * one */ #define work_data_bits(work) ((unsigned long *)(&(work)->data)) |
22df02bb3 workqueue: define... |
28 29 |
enum { WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ |
8a2e8e5de workqueue: fix cw... |
30 |
WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ |
112202d90 workqueue: rename... |
31 |
WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */ |
8a2e8e5de workqueue: fix cw... |
32 |
WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ |
22df02bb3 workqueue: define... |
33 |
#ifdef CONFIG_DEBUG_OBJECTS_WORK |
8a2e8e5de workqueue: fix cw... |
34 35 |
WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */ |
0f900049c workqueue: update... |
36 |
#else |
8a2e8e5de workqueue: fix cw... |
37 |
WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ |
22df02bb3 workqueue: define... |
38 |
#endif |
73f53c4aa workqueue: reimpl... |
39 |
WORK_STRUCT_COLOR_BITS = 4, |
22df02bb3 workqueue: define... |
40 |
WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, |
8a2e8e5de workqueue: fix cw... |
41 |
WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, |
112202d90 workqueue: rename... |
42 |
WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, |
affee4b29 workqueue: reimpl... |
43 |
WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, |
22df02bb3 workqueue: define... |
44 45 46 47 48 |
#ifdef CONFIG_DEBUG_OBJECTS_WORK WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, #else WORK_STRUCT_STATIC = 0, #endif |
73f53c4aa workqueue: reimpl... |
49 50 51 52 53 54 |
/* * The last color is no color used for works which don't * participate in workqueue flushing. */ WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1, WORK_NO_COLOR = WORK_NR_COLORS, |
79bc251f0 workqueue: remove... |
55 |
/* not bound to any CPU, prefer the local CPU */ |
f34217977 workqueue: implem... |
56 |
WORK_CPU_UNBOUND = NR_CPUS, |
bdbc5dd7d workqueue: prepar... |
57 |
|
73f53c4aa workqueue: reimpl... |
58 |
/* |
c39ba6b3a workqueue: fix a ... |
59 |
* Reserve 8 bits off of pwq pointer w/ debugobjects turned off. |
112202d90 workqueue: rename... |
60 61 |
* This makes pwqs aligned to 256 bytes and allows 15 workqueue * flush colors. |
73f53c4aa workqueue: reimpl... |
62 63 64 |
*/ WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + WORK_STRUCT_COLOR_BITS, |
112202d90 workqueue: rename... |
65 |
/* data contains off-queue information when !WORK_STRUCT_PWQ */ |
45d9550a0 workqueue: allow ... |
66 |
WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, |
bbb68dfab workqueue: mark a... |
67 |
|
8603e1b30 workqueue: fix ha... |
68 69 |
__WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE, WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING), |
bbb68dfab workqueue: mark a... |
70 |
|
715b06b86 workqueue: introd... |
71 72 |
/* * When a work item is off queue, its high bits point to the last |
7c3eed5cd workqueue: record... |
73 74 |
* pool it was on. Cap at 31 bits and use the highest number to * indicate that no pool is associated. |
715b06b86 workqueue: introd... |
75 |
*/ |
bbb68dfab workqueue: mark a... |
76 |
WORK_OFFQ_FLAG_BITS = 1, |
7c3eed5cd workqueue: record... |
77 78 79 80 |
WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS, WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT, WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1, |
b54900772 workqueue: introd... |
81 82 |
/* convenience constants */ |
0f900049c workqueue: update... |
83 |
WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, |
22df02bb3 workqueue: define... |
84 |
WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, |
7c3eed5cd workqueue: record... |
85 |
WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT, |
dcd989cb7 workqueue: implem... |
86 87 88 89 |
/* bit mask for work_busy() return values */ WORK_BUSY_PENDING = 1 << 0, WORK_BUSY_RUNNING = 1 << 1, |
3d1cb2059 workqueue: includ... |
90 91 92 |
/* maximum string length for set_worker_desc() */ WORKER_DESC_LEN = 24, |
22df02bb3 workqueue: define... |
93 |
}; |
1da177e4c Linux-2.6.12-rc2 |
94 |
struct work_struct { |
a08727bae Make workqueue bi... |
95 |
atomic_long_t data; |
1da177e4c Linux-2.6.12-rc2 |
96 |
struct list_head entry; |
6bb49e596 WorkStruct: Typed... |
97 |
work_func_t func; |
4e6045f13 workqueue: debug ... |
98 99 100 |
#ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif |
52bad64d9 WorkStruct: Separ... |
101 |
}; |
a45463cbf workqueue: avoid ... |
102 |
#define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) |
7a22ad757 workqueue: carry ... |
103 |
#define WORK_DATA_STATIC_INIT() \ |
a45463cbf workqueue: avoid ... |
104 |
ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)) |
a08727bae Make workqueue bi... |
105 |
|
52bad64d9 WorkStruct: Separ... |
106 107 |
struct delayed_work { struct work_struct work; |
1da177e4c Linux-2.6.12-rc2 |
108 |
struct timer_list timer; |
60c057bca workqueue: add de... |
109 110 111 |
/* target workqueue and CPU ->timer uses to queue ->work */ struct workqueue_struct *wq; |
1265057fa workqueue: fix CP... |
112 |
int cpu; |
1da177e4c Linux-2.6.12-rc2 |
113 |
}; |
05f0fe6b7 RCU, workqueue: I... |
114 115 116 117 118 119 120 |
struct rcu_work { struct work_struct work; struct rcu_head rcu; /* target workqueue ->rcu uses to queue ->work */ struct workqueue_struct *wq; }; |
42412c3aa workqueue: kernel... |
121 122 |
/** * struct workqueue_attrs - A struct for workqueue attributes. |
d55262c4d workqueue: update... |
123 |
* |
42412c3aa workqueue: kernel... |
124 |
* This can be used to change attributes of an unbound workqueue. |
7a4e344c5 workqueue: introd... |
125 126 |
*/ struct workqueue_attrs { |
42412c3aa workqueue: kernel... |
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
/** * @nice: nice level */ int nice; /** * @cpumask: allowed CPUs */ cpumask_var_t cpumask; /** * @no_numa: disable NUMA affinity * * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus * doesn't participate in pool hash calculations or equality comparisons. */ bool no_numa; |
7a4e344c5 workqueue: introd... |
145 |
}; |
bf6aede71 workqueue: add to... |
146 147 148 149 |
static inline struct delayed_work *to_delayed_work(struct work_struct *work) { return container_of(work, struct delayed_work, work); } |
05f0fe6b7 RCU, workqueue: I... |
150 151 152 153 |
static inline struct rcu_work *to_rcu_work(struct work_struct *work) { return container_of(work, struct rcu_work, work); } |
1fa44ecad [SCSI] add execut... |
154 155 156 |
struct execute_work { struct work_struct work; }; |
4e6045f13 workqueue: debug ... |
157 158 159 160 161 162 163 164 165 166 167 |
#ifdef CONFIG_LOCKDEP /* * NB: because we have to copy the lockdep_map, setting _key * here is required, otherwise it could get initialised to the * copy of the lockdep_map! */ #define __WORK_INIT_LOCKDEP_MAP(n, k) \ .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k), #else #define __WORK_INIT_LOCKDEP_MAP(n, k) #endif |
ee64e7f69 workqueue: cosmet... |
168 169 170 171 172 |
#define __WORK_INITIALIZER(n, f) { \ .data = WORK_DATA_STATIC_INIT(), \ .entry = { &(n).entry, &(n).entry }, \ .func = (f), \ __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ |
65f27f384 WorkStruct: Pass ... |
173 |
} |
f991b318c workqueue: clean ... |
174 |
#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \ |
ee64e7f69 workqueue: cosmet... |
175 |
.work = __WORK_INITIALIZER((n).work, (f)), \ |
841b86f32 treewide: Remove ... |
176 |
.timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\ |
e0aecdd87 workqueue: use ir... |
177 |
(tflags) | TIMER_IRQSAFE), \ |
dd6414b50 timer: Permit sta... |
178 |
} |
ee64e7f69 workqueue: cosmet... |
179 |
#define DECLARE_WORK(n, f) \ |
65f27f384 WorkStruct: Pass ... |
180 |
struct work_struct n = __WORK_INITIALIZER(n, f) |
ee64e7f69 workqueue: cosmet... |
181 |
#define DECLARE_DELAYED_WORK(n, f) \ |
f991b318c workqueue: clean ... |
182 |
struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0) |
65f27f384 WorkStruct: Pass ... |
183 |
|
203b42f73 workqueue: make d... |
184 |
#define DECLARE_DEFERRABLE_WORK(n, f) \ |
f991b318c workqueue: clean ... |
185 |
struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE) |
dd6414b50 timer: Permit sta... |
186 |
|
dc186ad74 workqueue: Add de... |
187 188 189 |
#ifdef CONFIG_DEBUG_OBJECTS_WORK extern void __init_work(struct work_struct *work, int onstack); extern void destroy_work_on_stack(struct work_struct *work); |
ea2e64f28 workqueue: Provid... |
190 |
extern void destroy_delayed_work_on_stack(struct delayed_work *work); |
4690c4ab5 workqueue: misc/c... |
191 192 |
static inline unsigned int work_static(struct work_struct *work) { |
22df02bb3 workqueue: define... |
193 |
return *work_data_bits(work) & WORK_STRUCT_STATIC; |
4690c4ab5 workqueue: misc/c... |
194 |
} |
dc186ad74 workqueue: Add de... |
195 196 197 |
#else static inline void __init_work(struct work_struct *work, int onstack) { } static inline void destroy_work_on_stack(struct work_struct *work) { } |
ea2e64f28 workqueue: Provid... |
198 |
static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { } |
4690c4ab5 workqueue: misc/c... |
199 |
static inline unsigned int work_static(struct work_struct *work) { return 0; } |
dc186ad74 workqueue: Add de... |
200 |
#endif |
1da177e4c Linux-2.6.12-rc2 |
201 |
/* |
52bad64d9 WorkStruct: Separ... |
202 |
* initialize all of a work item in one go |
a08727bae Make workqueue bi... |
203 |
* |
b9049df5a Change "useing" -... |
204 |
* NOTE! No point in using "atomic_long_set()": using a direct |
a08727bae Make workqueue bi... |
205 206 |
* assignment of the work data initializer allows the compiler * to generate better code. |
1da177e4c Linux-2.6.12-rc2 |
207 |
*/ |
4e6045f13 workqueue: debug ... |
208 |
#ifdef CONFIG_LOCKDEP |
dc186ad74 workqueue: Add de... |
209 |
#define __INIT_WORK(_work, _func, _onstack) \ |
65f27f384 WorkStruct: Pass ... |
210 |
do { \ |
4e6045f13 workqueue: debug ... |
211 212 |
static struct lock_class_key __key; \ \ |
dc186ad74 workqueue: Add de... |
213 |
__init_work((_work), _onstack); \ |
23b2e5991 workqueue: kill N... |
214 |
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ |
fd1a5b04d workqueue: Remove... |
215 |
lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \ |
65f27f384 WorkStruct: Pass ... |
216 |
INIT_LIST_HEAD(&(_work)->entry); \ |
f073f9229 workqueue: remove... |
217 |
(_work)->func = (_func); \ |
65f27f384 WorkStruct: Pass ... |
218 |
} while (0) |
4e6045f13 workqueue: debug ... |
219 |
#else |
dc186ad74 workqueue: Add de... |
220 |
#define __INIT_WORK(_work, _func, _onstack) \ |
4e6045f13 workqueue: debug ... |
221 |
do { \ |
dc186ad74 workqueue: Add de... |
222 |
__init_work((_work), _onstack); \ |
4e6045f13 workqueue: debug ... |
223 224 |
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ INIT_LIST_HEAD(&(_work)->entry); \ |
f073f9229 workqueue: remove... |
225 |
(_work)->func = (_func); \ |
4e6045f13 workqueue: debug ... |
226 227 |
} while (0) #endif |
65f27f384 WorkStruct: Pass ... |
228 |
|
ee64e7f69 workqueue: cosmet... |
229 |
#define INIT_WORK(_work, _func) \ |
9da7dae94 workqueue.h: remo... |
230 |
__INIT_WORK((_work), (_func), 0) |
dc186ad74 workqueue: Add de... |
231 |
|
ee64e7f69 workqueue: cosmet... |
232 |
#define INIT_WORK_ONSTACK(_work, _func) \ |
9da7dae94 workqueue.h: remo... |
233 |
__INIT_WORK((_work), (_func), 1) |
dc186ad74 workqueue: Add de... |
234 |
|
f991b318c workqueue: clean ... |
235 |
#define __INIT_DELAYED_WORK(_work, _func, _tflags) \ |
ee64e7f69 workqueue: cosmet... |
236 237 |
do { \ INIT_WORK(&(_work)->work, (_func)); \ |
919b250f8 timer: Remove red... |
238 239 240 |
__init_timer(&(_work)->timer, \ delayed_work_timer_fn, \ (_tflags) | TIMER_IRQSAFE); \ |
52bad64d9 WorkStruct: Separ... |
241 |
} while (0) |
f991b318c workqueue: clean ... |
242 |
#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \ |
ee64e7f69 workqueue: cosmet... |
243 244 |
do { \ INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ |
919b250f8 timer: Remove red... |
245 246 247 |
__init_timer_on_stack(&(_work)->timer, \ delayed_work_timer_fn, \ (_tflags) | TIMER_IRQSAFE); \ |
6d612b0f9 locking, hpet: an... |
248 |
} while (0) |
f991b318c workqueue: clean ... |
249 250 251 252 253 |
#define INIT_DELAYED_WORK(_work, _func) \ __INIT_DELAYED_WORK(_work, _func, 0) #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0) |
203b42f73 workqueue: make d... |
254 |
#define INIT_DEFERRABLE_WORK(_work, _func) \ |
f991b318c workqueue: clean ... |
255 256 257 258 |
__INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE) #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \ __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE) |
28287033e Add a new deferra... |
259 |
|
05f0fe6b7 RCU, workqueue: I... |
260 261 262 263 264 |
#define INIT_RCU_WORK(_work, _func) \ INIT_WORK(&(_work)->work, (_func)) #define INIT_RCU_WORK_ONSTACK(_work, _func) \ INIT_WORK_ONSTACK(&(_work)->work, (_func)) |
365970a1e WorkStruct: Merge... |
265 266 267 268 269 |
/** * work_pending - Find out whether a work item is currently pending * @work: The work item in question */ #define work_pending(work) \ |
22df02bb3 workqueue: define... |
270 |
test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) |
365970a1e WorkStruct: Merge... |
271 272 273 274 |
/** * delayed_work_pending - Find out whether a delayable work item is currently * pending |
355c06633 workqueue: fix so... |
275 |
* @w: The work item in question |
365970a1e WorkStruct: Merge... |
276 |
*/ |
0221872a3 Fix "delayed_work... |
277 278 |
#define delayed_work_pending(w) \ work_pending(&(w)->work) |
365970a1e WorkStruct: Merge... |
279 |
|
c54fce6ef workqueue: add do... |
280 281 |
/* * Workqueue flags and constants. For details, please refer to |
42412c3aa workqueue: kernel... |
282 |
* Documentation/core-api/workqueue.rst. |
c54fce6ef workqueue: add do... |
283 |
*/ |
97e37d7b9 workqueue: merge ... |
284 |
enum { |
c7fc77f78 workqueue: remove... |
285 |
WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ |
58a69cb47 workqueue, freeze... |
286 |
WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ |
6370a6ad3 workqueue: add an... |
287 |
WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ |
649027d73 workqueue: implem... |
288 |
WQ_HIGHPRI = 1 << 4, /* high priority */ |
41f50094b workqueue: Spelli... |
289 |
WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */ |
226223ab3 workqueue: implem... |
290 |
WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */ |
b71ab8c20 workqueue: increa... |
291 |
|
cee22a150 workqueues: Intro... |
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 |
/* * Per-cpu workqueues are generally preferred because they tend to * show better performance thanks to cache locality. Per-cpu * workqueues exclude the scheduler from choosing the CPU to * execute the worker threads, which has an unfortunate side effect * of increasing power consumption. * * The scheduler considers a CPU idle if it doesn't have any task * to execute and tries to keep idle cores idle to conserve power; * however, for example, a per-cpu work item scheduled from an * interrupt handler on an idle CPU will force the scheduler to * excute the work item on that CPU breaking the idleness, which in * turn may lead to more scheduling choices which are sub-optimal * in terms of power consumption. * * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default * but become unbound if workqueue.power_efficient kernel param is * specified. Per-cpu workqueues which are identified to * contribute significantly to power-consumption are identified and * marked with this flag and enabling the power_efficient mode * leads to noticeable power saving at the cost of small * performance disadvantage. * * http://thread.gmane.org/gmane.linux.kernel/1480396 */ WQ_POWER_EFFICIENT = 1 << 7, |
618b01eb4 workqueue: make i... |
318 |
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ |
8719dceae workqueue: reject... |
319 |
__WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ |
23d11a58a workqueue: skip f... |
320 |
__WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */ |
fbf1c41fc workqueue: Fix fl... |
321 |
__WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */ |
e41e704bc workqueue: improv... |
322 |
|
b71ab8c20 workqueue: increa... |
323 |
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ |
f34217977 workqueue: implem... |
324 |
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ |
b71ab8c20 workqueue: increa... |
325 |
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, |
97e37d7b9 workqueue: merge ... |
326 |
}; |
52bad64d9 WorkStruct: Separ... |
327 |
|
f34217977 workqueue: implem... |
328 329 330 |
/* unbound wq's aren't per-cpu, scale max_active according to #cpus */ #define WQ_UNBOUND_MAX_ACTIVE \ max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU) |
65f27f384 WorkStruct: Pass ... |
331 |
|
d320c0383 workqueue: s/__cr... |
332 333 334 335 336 337 338 339 |
/* * System-wide workqueues which are always present. * * system_wq is the one used by schedule[_delayed]_work[_on](). * Multi-CPU multi-threaded. There are users which expect relatively * short queue flush time. Don't queue works which can run for too * long. * |
73e435444 workqueue: declar... |
340 341 342 |
* system_highpri_wq is similar to system_wq but for work items which * require WQ_HIGHPRI. * |
d320c0383 workqueue: s/__cr... |
343 344 345 |
* system_long_wq is similar to system_wq but may host long running * works. Queue flushing might take relatively long. * |
f34217977 workqueue: implem... |
346 347 348 349 |
* system_unbound_wq is unbound workqueue. Workers are not bound to * any specific CPU, not concurrency managed, and all queued works are * executed immediately as long as max_active limit is not reached and * resources are available. |
4149efb22 workqueue: add sy... |
350 |
* |
24d51add7 workqueue: fix bu... |
351 352 |
* system_freezable_wq is equivalent to system_wq except that it's * freezable. |
0668106ca workqueue: Add sy... |
353 354 355 356 357 358 |
* * *_power_efficient_wq are inclined towards saving power and converted * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise, * they are same as their non-power-efficient counterparts - e.g. * system_power_efficient_wq is identical to system_wq if * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info. |
d320c0383 workqueue: s/__cr... |
359 360 |
*/ extern struct workqueue_struct *system_wq; |
73e435444 workqueue: declar... |
361 |
extern struct workqueue_struct *system_highpri_wq; |
d320c0383 workqueue: s/__cr... |
362 |
extern struct workqueue_struct *system_long_wq; |
f34217977 workqueue: implem... |
363 |
extern struct workqueue_struct *system_unbound_wq; |
24d51add7 workqueue: fix bu... |
364 |
extern struct workqueue_struct *system_freezable_wq; |
0668106ca workqueue: Add sy... |
365 366 |
extern struct workqueue_struct *system_power_efficient_wq; extern struct workqueue_struct *system_freezable_power_efficient_wq; |
ae930e0f4 workqueue: gut sy... |
367 |
|
b196be89c workqueue: make a... |
368 369 370 371 372 |
/** * alloc_workqueue - allocate a workqueue * @fmt: printf format for the name of the workqueue * @flags: WQ_* flags * @max_active: max in-flight work items, 0 for default |
669de8bda kernel/workqueue:... |
373 |
* remaining args: args for @fmt |
b196be89c workqueue: make a... |
374 375 |
* * Allocate a workqueue with the specified parameters. For detailed |
42412c3aa workqueue: kernel... |
376 377 |
* information on WQ_* flags, please refer to * Documentation/core-api/workqueue.rst. |
b196be89c workqueue: make a... |
378 |
* |
b196be89c workqueue: make a... |
379 380 381 |
* RETURNS: * Pointer to the allocated workqueue on success, %NULL on failure. */ |
669de8bda kernel/workqueue:... |
382 383 384 |
struct workqueue_struct *alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...); |
4e6045f13 workqueue: debug ... |
385 |
|
81dcaf651 workqueue: implem... |
386 387 |
/** * alloc_ordered_workqueue - allocate an ordered workqueue |
b196be89c workqueue: make a... |
388 |
* @fmt: printf format for the name of the workqueue |
58a69cb47 workqueue, freeze... |
389 |
* @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) |
355c06633 workqueue: fix so... |
390 |
* @args...: args for @fmt |
81dcaf651 workqueue: implem... |
391 392 393 394 395 396 397 398 |
* * Allocate an ordered workqueue. An ordered workqueue executes at * most one work item at any given time in the queued order. They are * implemented as unbound workqueues with @max_active of one. * * RETURNS: * Pointer to the allocated workqueue on success, %NULL on failure. */ |
ee64e7f69 workqueue: cosmet... |
399 |
#define alloc_ordered_workqueue(fmt, flags, args...) \ |
0a94efb5a workqueue: implic... |
400 401 |
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \ __WQ_ORDERED_EXPLICIT | (flags), 1, ##args) |
81dcaf651 workqueue: implem... |
402 |
|
ee64e7f69 workqueue: cosmet... |
403 |
#define create_workqueue(name) \ |
23d11a58a workqueue: skip f... |
404 |
alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name)) |
ee64e7f69 workqueue: cosmet... |
405 |
#define create_freezable_workqueue(name) \ |
23d11a58a workqueue: skip f... |
406 407 |
alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \ WQ_MEM_RECLAIM, 1, (name)) |
ee64e7f69 workqueue: cosmet... |
408 |
#define create_singlethread_workqueue(name) \ |
23d11a58a workqueue: skip f... |
409 |
alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name) |
1da177e4c Linux-2.6.12-rc2 |
410 411 |
extern void destroy_workqueue(struct workqueue_struct *wq); |
513c98d08 workqueue: unconf... |
412 413 414 415 |
struct workqueue_attrs *alloc_workqueue_attrs(void); void free_workqueue_attrs(struct workqueue_attrs *attrs); int apply_workqueue_attrs(struct workqueue_struct *wq, const struct workqueue_attrs *attrs); |
042f7df15 workqueue: Allow ... |
416 |
int workqueue_set_unbound_cpumask(cpumask_var_t cpumask); |
7a4e344c5 workqueue: introd... |
417 |
|
d4283e937 workqueue: make q... |
418 |
extern bool queue_work_on(int cpu, struct workqueue_struct *wq, |
c1a220e7a pm: introduce new... |
419 |
struct work_struct *work); |
8204e0c11 workqueue: Provid... |
420 421 |
extern bool queue_work_node(int node, struct workqueue_struct *wq, struct work_struct *work); |
d4283e937 workqueue: make q... |
422 |
extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
28e53bddf unify flush_work/... |
423 |
struct delayed_work *work, unsigned long delay); |
8376fe22c workqueue: implem... |
424 425 |
extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay); |
05f0fe6b7 RCU, workqueue: I... |
426 |
extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); |
28e53bddf unify flush_work/... |
427 |
|
b3c975286 include/linux: Re... |
428 |
extern void flush_workqueue(struct workqueue_struct *wq); |
9c5a2ba70 workqueue: separa... |
429 |
extern void drain_workqueue(struct workqueue_struct *wq); |
1da177e4c Linux-2.6.12-rc2 |
430 |
|
65f27f384 WorkStruct: Pass ... |
431 |
extern int schedule_on_each_cpu(work_func_t func); |
1da177e4c Linux-2.6.12-rc2 |
432 |
|
65f27f384 WorkStruct: Pass ... |
433 |
int execute_in_process_context(work_func_t fn, struct execute_work *); |
1da177e4c Linux-2.6.12-rc2 |
434 |
|
401a8d048 workqueue: cleanu... |
435 436 437 438 |
extern bool flush_work(struct work_struct *work); extern bool cancel_work_sync(struct work_struct *work); extern bool flush_delayed_work(struct delayed_work *dwork); |
57b30ae77 workqueue: reimpl... |
439 |
extern bool cancel_delayed_work(struct delayed_work *dwork); |
401a8d048 workqueue: cleanu... |
440 |
extern bool cancel_delayed_work_sync(struct delayed_work *dwork); |
28e53bddf unify flush_work/... |
441 |
|
05f0fe6b7 RCU, workqueue: I... |
442 |
extern bool flush_rcu_work(struct rcu_work *rwork); |
dcd989cb7 workqueue: implem... |
443 444 |
extern void workqueue_set_max_active(struct workqueue_struct *wq, int max_active); |
27d4ee030 workqueue: Allow ... |
445 |
extern struct work_struct *current_work(void); |
e62676169 workqueue: implem... |
446 |
extern bool current_is_workqueue_rescuer(void); |
d84ff0512 workqueue: consis... |
447 |
extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); |
dcd989cb7 workqueue: implem... |
448 |
extern unsigned int work_busy(struct work_struct *work); |
3d1cb2059 workqueue: includ... |
449 450 |
extern __printf(1, 2) void set_worker_desc(const char *fmt, ...); extern void print_worker_info(const char *log_lvl, struct task_struct *task); |
3494fc308 workqueue: dump w... |
451 |
extern void show_workqueue_state(void); |
6b59808bf workqueue: Show t... |
452 |
extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task); |
dcd989cb7 workqueue: implem... |
453 |
|
8425e3d5b workqueue: inline... |
454 455 456 457 458 459 460 461 462 |
/** * queue_work - queue work on a workqueue * @wq: workqueue to use * @work: work to queue * * Returns %false if @work was already on a queue, %true otherwise. * * We queue the work to the CPU on which it was submitted, but if the CPU dies * it can be processed by another CPU. |
dbb92f886 workqueue: Docume... |
463 464 465 466 467 468 469 470 471 472 473 474 475 |
* * Memory-ordering properties: If it returns %true, guarantees that all stores * preceding the call to queue_work() in the program order will be visible from * the CPU which will execute @work by the time such work executes, e.g., * * { x is initially 0 } * * CPU0 CPU1 * * WRITE_ONCE(x, 1); [ @work is being executed ] * r0 = queue_work(wq, work); r1 = READ_ONCE(x); * * Forbids: r0 == true && r1 == 0 |
8425e3d5b workqueue: inline... |
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 |
*/ static inline bool queue_work(struct workqueue_struct *wq, struct work_struct *work) { return queue_work_on(WORK_CPU_UNBOUND, wq, work); } /** * queue_delayed_work - queue work on a workqueue after delay * @wq: workqueue to use * @dwork: delayable work to queue * @delay: number of jiffies to wait before queueing * * Equivalent to queue_delayed_work_on() but tries to use the local CPU. */ static inline bool queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) { return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); } /** * mod_delayed_work - modify delay of or queue a delayed work * @wq: workqueue to use * @dwork: work to queue * @delay: number of jiffies to wait before queueing * * mod_delayed_work_on() on local CPU. */ static inline bool mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) { return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); } /** * schedule_work_on - put work task on a specific cpu * @cpu: cpu to put the work task on * @work: job to be done * * This puts a job on a specific cpu */ static inline bool schedule_work_on(int cpu, struct work_struct *work) { return queue_work_on(cpu, system_wq, work); } /** * schedule_work - put work task in global workqueue * @work: job to be done * * Returns %false if @work was already on the kernel-global workqueue and * %true otherwise. * * This puts a job in the kernel-global workqueue if it was not already * queued and leaves it in the same position on the kernel-global * workqueue otherwise. |
dbb92f886 workqueue: Docume... |
535 536 537 |
* * Shares the same memory-ordering properties of queue_work(), cf. the * DocBook header of queue_work(). |
8425e3d5b workqueue: inline... |
538 539 540 541 542 543 544 |
*/ static inline bool schedule_work(struct work_struct *work) { return queue_work(system_wq, work); } /** |
37b1ef31a workqueue: move f... |
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 |
* flush_scheduled_work - ensure that any scheduled work has run to completion. * * Forces execution of the kernel-global workqueue and blocks until its * completion. * * Think twice before calling this function! It's very easy to get into * trouble if you don't take great care. Either of the following situations * will lead to deadlock: * * One of the work items currently on the workqueue needs to acquire * a lock held by your code or its caller. * * Your code is running in the context of a work routine. * * They will be detected by lockdep when they occur, but the first might not * occur very often. It depends on what work items are on the workqueue and * what locks they need, which you have no control over. * * In most situations flushing the entire workqueue is overkill; you merely * need to know that a particular work item isn't queued and isn't running. * In such cases you should use cancel_delayed_work_sync() or * cancel_work_sync() instead. */ static inline void flush_scheduled_work(void) { flush_workqueue(system_wq); } /** |
8425e3d5b workqueue: inline... |
574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 |
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay * @cpu: cpu to use * @dwork: job to be done * @delay: number of jiffies to wait * * After waiting for a given time this puts a job in the kernel-global * workqueue on the specified CPU. */ static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay) { return queue_delayed_work_on(cpu, system_wq, dwork, delay); } /** * schedule_delayed_work - put work task in global workqueue after delay * @dwork: job to be done * @delay: number of jiffies to wait or 0 for immediate execution * * After waiting for a given time this puts a job in the kernel-global * workqueue. */ static inline bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) { return queue_delayed_work(system_wq, dwork, delay); } |
2d3854a37 cpumask: introduc... |
601 |
#ifndef CONFIG_SMP |
d84ff0512 workqueue: consis... |
602 |
static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) |
2d3854a37 cpumask: introduc... |
603 604 605 |
{ return fn(arg); } |
0e8d6a933 workqueue: Provid... |
606 607 608 609 |
static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg) { return fn(arg); } |
2d3854a37 cpumask: introduc... |
610 |
#else |
d84ff0512 workqueue: consis... |
611 |
long work_on_cpu(int cpu, long (*fn)(void *), void *arg); |
0e8d6a933 workqueue: Provid... |
612 |
long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg); |
2d3854a37 cpumask: introduc... |
613 |
#endif /* CONFIG_SMP */ |
a25909a4d lockdep: Add an i... |
614 |
|
a0a1a5fd4 workqueue: reimpl... |
615 616 617 618 619 |
#ifdef CONFIG_FREEZER extern void freeze_workqueues_begin(void); extern bool freeze_workqueues_busy(void); extern void thaw_workqueues(void); #endif /* CONFIG_FREEZER */ |
226223ab3 workqueue: implem... |
620 621 622 623 624 625 |
#ifdef CONFIG_SYSFS int workqueue_sysfs_register(struct workqueue_struct *wq); #else /* CONFIG_SYSFS */ static inline int workqueue_sysfs_register(struct workqueue_struct *wq) { return 0; } #endif /* CONFIG_SYSFS */ |
82607adcf workqueue: implem... |
626 627 628 629 630 |
#ifdef CONFIG_WQ_WATCHDOG void wq_watchdog_touch(int cpu); #else /* CONFIG_WQ_WATCHDOG */ static inline void wq_watchdog_touch(int cpu) { } #endif /* CONFIG_WQ_WATCHDOG */ |
7ee681b25 workqueue: Conver... |
631 632 633 634 635 |
#ifdef CONFIG_SMP int workqueue_prepare_cpu(unsigned int cpu); int workqueue_online_cpu(unsigned int cpu); int workqueue_offline_cpu(unsigned int cpu); #endif |
2333e8299 workqueue: Make w... |
636 637 |
void __init workqueue_init_early(void); void __init workqueue_init(void); |
3347fa092 workqueue: make w... |
638 |
|
1da177e4c Linux-2.6.12-rc2 |
639 |
#endif |