Blame view
include/linux/kthread.h
6.75 KB
b24413180
|
1 |
/* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4c
|
2 3 4 5 6 |
#ifndef _LINUX_KTHREAD_H #define _LINUX_KTHREAD_H /* Simple interface for creating and stopping kernel threads without mess. */ #include <linux/err.h> #include <linux/sched.h> |
9bf5b9eb2
|
7 |
struct mm_struct; |
b9075fa96
|
8 |
__printf(4, 5) |
207205a2b
|
9 10 11 |
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), void *data, int node, |
b9075fa96
|
12 |
const char namefmt[], ...); |
207205a2b
|
13 |
|
e154ccc83
|
14 15 16 17 18 |
/** * kthread_create - create a kthread on the current node * @threadfn: the function to run in the thread * @data: data pointer for @threadfn() * @namefmt: printf-style format string for the thread name |
d16977f3a
|
19 |
* @arg...: arguments for @namefmt. |
e154ccc83
|
20 21 22 23 24 |
* * This macro will create a kthread on the current node, leaving it in * the stopped state. This is just a helper for kthread_create_on_node(); * see the documentation there for more details. */ |
207205a2b
|
25 |
#define kthread_create(threadfn, data, namefmt, arg...) \ |
e9f069868
|
26 |
kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg) |
207205a2b
|
27 |
|
1da177e4c
|
28 |
|
2a1d44601
|
29 30 31 32 |
struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), void *data, unsigned int cpu, const char *namefmt); |
1da177e4c
|
33 |
/** |
9e37bd301
|
34 |
* kthread_run - create and wake a thread. |
1da177e4c
|
35 36 37 38 39 |
* @threadfn: the function to run until signal_pending(current). * @data: data ptr for @threadfn. * @namefmt: printf-style name for the thread. * * Description: Convenient wrapper for kthread_create() followed by |
9e37bd301
|
40 41 |
* wake_up_process(). Returns the kthread or ERR_PTR(-ENOMEM). */ |
1da177e4c
|
42 43 44 45 46 47 48 49 |
#define kthread_run(threadfn, data, namefmt, ...) \ ({ \ struct task_struct *__k \ = kthread_create(threadfn, data, namefmt, ## __VA_ARGS__); \ if (!IS_ERR(__k)) \ wake_up_process(__k); \ __k; \ }) |
1da5c46fa
|
50 |
void free_kthread_struct(struct task_struct *k); |
1da177e4c
|
51 |
void kthread_bind(struct task_struct *k, unsigned int cpu); |
25834c73f
|
52 |
void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); |
1da177e4c
|
53 |
int kthread_stop(struct task_struct *k); |
2a1d44601
|
54 55 |
bool kthread_should_stop(void); bool kthread_should_park(void); |
0121805d9
|
56 |
bool __kthread_should_park(struct task_struct *k); |
8a32c441c
|
57 |
bool kthread_freezable_should_stop(bool *was_frozen); |
52782c92a
|
58 |
void *kthread_func(struct task_struct *k); |
82805ab77
|
59 |
void *kthread_data(struct task_struct *k); |
e700591ae
|
60 |
void *kthread_probe_data(struct task_struct *k); |
2a1d44601
|
61 62 63 |
int kthread_park(struct task_struct *k); void kthread_unpark(struct task_struct *k); void kthread_parkme(void); |
1da177e4c
|
64 |
|
73c279927
|
65 66 |
int kthreadd(void *unused); extern struct task_struct *kthreadd_task; |
207205a2b
|
67 |
extern int tsk_fork_get_node(struct task_struct *tsk); |
73c279927
|
68 |
|
b56c0d893
|
69 70 71 72 |
/* * Simple work processor based on kthread. * * This provides easier way to make use of kthreads. A kthread_work |
3989144f8
|
73 |
* can be queued and flushed using queue/kthread_flush_work() |
b56c0d893
|
74 75 |
* respectively. Queued kthread_works are processed by a kthread * running kthread_worker_fn(). |
b56c0d893
|
76 77 78 |
*/ struct kthread_work; typedef void (*kthread_work_func_t)(struct kthread_work *work); |
fe5c3b69b
|
79 |
void kthread_delayed_work_timer_fn(struct timer_list *t); |
b56c0d893
|
80 |
|
dbf52682c
|
81 82 83 |
enum { KTW_FREEZABLE = 1 << 0, /* freeze during suspend */ }; |
b56c0d893
|
84 |
struct kthread_worker { |
dbf52682c
|
85 |
unsigned int flags; |
fe99a4f4d
|
86 |
raw_spinlock_t lock; |
b56c0d893
|
87 |
struct list_head work_list; |
22597dc3d
|
88 |
struct list_head delayed_work_list; |
b56c0d893
|
89 |
struct task_struct *task; |
46f3d9762
|
90 |
struct kthread_work *current_work; |
b56c0d893
|
91 92 93 94 95 |
}; struct kthread_work { struct list_head node; kthread_work_func_t func; |
46f3d9762
|
96 |
struct kthread_worker *worker; |
37be45d49
|
97 98 |
/* Number of canceling calls that are running at the moment. */ int canceling; |
b56c0d893
|
99 |
}; |
22597dc3d
|
100 101 102 103 |
struct kthread_delayed_work { struct kthread_work work; struct timer_list timer; }; |
b56c0d893
|
104 |
#define KTHREAD_WORKER_INIT(worker) { \ |
fe99a4f4d
|
105 |
.lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \ |
b56c0d893
|
106 |
.work_list = LIST_HEAD_INIT((worker).work_list), \ |
22597dc3d
|
107 |
.delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\ |
b56c0d893
|
108 109 110 111 112 |
} #define KTHREAD_WORK_INIT(work, fn) { \ .node = LIST_HEAD_INIT((work).node), \ .func = (fn), \ |
b56c0d893
|
113 |
} |
22597dc3d
|
114 115 |
#define KTHREAD_DELAYED_WORK_INIT(dwork, fn) { \ .work = KTHREAD_WORK_INIT((dwork).work, (fn)), \ |
841b86f32
|
116 |
.timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn,\ |
22597dc3d
|
117 118 |
TIMER_IRQSAFE), \ } |
b56c0d893
|
119 120 121 122 123 |
#define DEFINE_KTHREAD_WORKER(worker) \ struct kthread_worker worker = KTHREAD_WORKER_INIT(worker) #define DEFINE_KTHREAD_WORK(work, fn) \ struct kthread_work work = KTHREAD_WORK_INIT(work, fn) |
22597dc3d
|
124 125 126 |
#define DEFINE_KTHREAD_DELAYED_WORK(dwork, fn) \ struct kthread_delayed_work dwork = \ KTHREAD_DELAYED_WORK_INIT(dwork, fn) |
4f32e9b1f
|
127 |
/* |
95847e1bd
|
128 129 |
* kthread_worker.lock needs its own lockdep class key when defined on * stack with lockdep enabled. Use the following macros in such cases. |
4f32e9b1f
|
130 131 132 |
*/ #ifdef CONFIG_LOCKDEP # define KTHREAD_WORKER_INIT_ONSTACK(worker) \ |
3989144f8
|
133 |
({ kthread_init_worker(&worker); worker; }) |
4f32e9b1f
|
134 135 |
# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \ struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker) |
4f32e9b1f
|
136 137 |
#else # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker) |
4f32e9b1f
|
138 |
#endif |
3989144f8
|
139 |
extern void __kthread_init_worker(struct kthread_worker *worker, |
4f32e9b1f
|
140 |
const char *name, struct lock_class_key *key); |
3989144f8
|
141 |
#define kthread_init_worker(worker) \ |
4f32e9b1f
|
142 143 |
do { \ static struct lock_class_key __key; \ |
3989144f8
|
144 |
__kthread_init_worker((worker), "("#worker")->lock", &__key); \ |
4f32e9b1f
|
145 |
} while (0) |
3989144f8
|
146 |
#define kthread_init_work(work, fn) \ |
4f32e9b1f
|
147 148 149 150 |
do { \ memset((work), 0, sizeof(struct kthread_work)); \ INIT_LIST_HEAD(&(work)->node); \ (work)->func = (fn); \ |
4f32e9b1f
|
151 |
} while (0) |
b56c0d893
|
152 |
|
22597dc3d
|
153 154 155 |
#define kthread_init_delayed_work(dwork, fn) \ do { \ kthread_init_work(&(dwork)->work, (fn)); \ |
ad01423ae
|
156 |
timer_setup(&(dwork)->timer, \ |
98c985d7d
|
157 158 |
kthread_delayed_work_timer_fn, \ TIMER_IRQSAFE); \ |
22597dc3d
|
159 |
} while (0) |
b56c0d893
|
160 |
int kthread_worker_fn(void *worker_ptr); |
dbf52682c
|
161 |
__printf(2, 3) |
fbae2d44a
|
162 |
struct kthread_worker * |
dbf52682c
|
163 |
kthread_create_worker(unsigned int flags, const char namefmt[], ...); |
fbae2d44a
|
164 |
|
c0b942a76
|
165 |
__printf(3, 4) struct kthread_worker * |
dbf52682c
|
166 167 |
kthread_create_worker_on_cpu(int cpu, unsigned int flags, const char namefmt[], ...); |
fbae2d44a
|
168 |
|
3989144f8
|
169 |
bool kthread_queue_work(struct kthread_worker *worker, |
b56c0d893
|
170 |
struct kthread_work *work); |
22597dc3d
|
171 172 173 174 |
bool kthread_queue_delayed_work(struct kthread_worker *worker, struct kthread_delayed_work *dwork, unsigned long delay); |
9a6b06c8d
|
175 176 177 |
bool kthread_mod_delayed_work(struct kthread_worker *worker, struct kthread_delayed_work *dwork, unsigned long delay); |
3989144f8
|
178 179 |
void kthread_flush_work(struct kthread_work *work); void kthread_flush_worker(struct kthread_worker *worker); |
b56c0d893
|
180 |
|
37be45d49
|
181 182 |
bool kthread_cancel_work_sync(struct kthread_work *work); bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work); |
35033fe9c
|
183 |
void kthread_destroy_worker(struct kthread_worker *worker); |
f5678e7f2
|
184 185 |
void kthread_use_mm(struct mm_struct *mm); void kthread_unuse_mm(struct mm_struct *mm); |
9bf5b9eb2
|
186 |
|
8af0c18af
|
187 |
struct cgroup_subsys_state; |
0b508bc92
|
188 |
#ifdef CONFIG_BLK_CGROUP |
05e3db95e
|
189 190 191 192 193 194 195 196 197 |
void kthread_associate_blkcg(struct cgroup_subsys_state *css); struct cgroup_subsys_state *kthread_blkcg(void); #else static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { } static inline struct cgroup_subsys_state *kthread_blkcg(void) { return NULL; } #endif |
1da177e4c
|
198 |
#endif /* _LINUX_KTHREAD_H */ |