Blame view
kernel/async.c
10.2 KB
b886d83c5
|
1 |
// SPDX-License-Identifier: GPL-2.0-only |
22a9d6456
|
2 3 4 5 6 |
/* * async.c: Asynchronous function calls for boot performance * * (C) Copyright 2009 Intel Corporation * Author: Arjan van de Ven <arjan@linux.intel.com> |
22a9d6456
|
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
*/ /* Goals and Theory of Operation The primary goal of this feature is to reduce the kernel boot time, by doing various independent hardware delays and discovery operations decoupled and not strictly serialized. More specifically, the asynchronous function call concept allows certain operations (primarily during system boot) to happen asynchronously, out of order, while these operations still have their externally visible parts happen sequentially and in-order. (not unlike how out-of-order CPUs retire their instructions in order) Key to the asynchronous function call implementation is the concept of a "sequence cookie" (which, although it has an abstracted type, can be thought of as a monotonically incrementing number). The async core will assign each scheduled event such a sequence cookie and pass this to the called functions. The asynchronously called function should before doing a globally visible operation, such as registering device numbers, call the async_synchronize_cookie() function and pass in its own cookie. The async_synchronize_cookie() function will make sure that all asynchronous operations that were scheduled prior to the operation corresponding with the cookie have completed. Subsystem/driver initialization code that scheduled asynchronous probe functions, but which shares global resources with other drivers/subsystems that do not use the asynchronous call feature, need to do a full synchronization with the async_synchronize_full() function, before returning from their init function. This is to maintain strict ordering between the asynchronous and synchronous parts of the kernel. */ #include <linux/async.h> |
84c15027a
|
48 49 |
#include <linux/atomic.h> #include <linux/ktime.h> |
9984de1a5
|
50 |
#include <linux/export.h> |
22a9d6456
|
51 52 |
#include <linux/wait.h> #include <linux/sched.h> |
5a0e3ad6a
|
53 |
#include <linux/slab.h> |
083b804c4
|
54 |
#include <linux/workqueue.h> |
22a9d6456
|
55 |
|
84b233adc
|
56 |
#include "workqueue_internal.h" |
22a9d6456
|
57 |
static async_cookie_t next_cookie = 1; |
c68eee14e
|
58 59 |
#define MAX_WORK 32768 #define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */ |
22a9d6456
|
60 |
|
9fdb04cdc
|
61 |
static LIST_HEAD(async_global_pending); /* pending from all registered doms */ |
8723d5037
|
62 |
static ASYNC_DOMAIN(async_dfl_domain); |
22a9d6456
|
63 64 65 |
static DEFINE_SPINLOCK(async_lock); struct async_entry { |
9fdb04cdc
|
66 67 |
struct list_head domain_list; struct list_head global_list; |
083b804c4
|
68 69 |
struct work_struct work; async_cookie_t cookie; |
362f2b098
|
70 |
async_func_t func; |
083b804c4
|
71 |
void *data; |
8723d5037
|
72 |
struct async_domain *domain; |
22a9d6456
|
73 74 75 |
}; static DECLARE_WAIT_QUEUE_HEAD(async_done); |
22a9d6456
|
76 77 |
static atomic_t entry_count; |
22a9d6456
|
78 |
|
8723d5037
|
79 |
static async_cookie_t lowest_in_progress(struct async_domain *domain) |
37a76bd4f
|
80 |
{ |
4f7e988e6
|
81 |
struct async_entry *first = NULL; |
52722794d
|
82 |
async_cookie_t ret = ASYNC_COOKIE_MAX; |
37a76bd4f
|
83 |
unsigned long flags; |
37a76bd4f
|
84 85 |
spin_lock_irqsave(&async_lock, flags); |
9fdb04cdc
|
86 |
|
4f7e988e6
|
87 88 89 90 91 92 93 94 95 |
if (domain) { if (!list_empty(&domain->pending)) first = list_first_entry(&domain->pending, struct async_entry, domain_list); } else { if (!list_empty(&async_global_pending)) first = list_first_entry(&async_global_pending, struct async_entry, global_list); } |
9fdb04cdc
|
96 |
|
4f7e988e6
|
97 98 |
if (first) ret = first->cookie; |
9fdb04cdc
|
99 |
|
37a76bd4f
|
100 101 102 |
spin_unlock_irqrestore(&async_lock, flags); return ret; } |
083b804c4
|
103 |
|
22a9d6456
|
104 105 106 |
/* * pick the first pending entry and run it */ |
083b804c4
|
107 |
static void async_run_entry_fn(struct work_struct *work) |
22a9d6456
|
108 |
{ |
083b804c4
|
109 110 |
struct async_entry *entry = container_of(work, struct async_entry, work); |
22a9d6456
|
111 |
unsigned long flags; |
124ff4e53
|
112 |
ktime_t uninitialized_var(calltime), delta, rettime; |
22a9d6456
|
113 |
|
52722794d
|
114 |
/* 1) run (and print duration) */ |
b4def4272
|
115 |
if (initcall_debug && system_state < SYSTEM_RUNNING) { |
d75f773c8
|
116 117 |
pr_debug("calling %lli_%pS @ %i ", |
84c15027a
|
118 |
(long long)entry->cookie, |
58763a297
|
119 |
entry->func, task_pid_nr(current)); |
22a9d6456
|
120 121 122 |
calltime = ktime_get(); } entry->func(entry->data, entry->cookie); |
b4def4272
|
123 |
if (initcall_debug && system_state < SYSTEM_RUNNING) { |
22a9d6456
|
124 125 |
rettime = ktime_get(); delta = ktime_sub(rettime, calltime); |
d75f773c8
|
126 127 |
pr_debug("initcall %lli_%pS returned 0 after %lld usecs ", |
58763a297
|
128 129 130 |
(long long)entry->cookie, entry->func, (long long)ktime_to_ns(delta) >> 10); |
22a9d6456
|
131 |
} |
52722794d
|
132 |
/* 2) remove self from the pending queues */ |
22a9d6456
|
133 |
spin_lock_irqsave(&async_lock, flags); |
9fdb04cdc
|
134 135 |
list_del_init(&entry->domain_list); list_del_init(&entry->global_list); |
22a9d6456
|
136 |
|
52722794d
|
137 |
/* 3) free the entry */ |
22a9d6456
|
138 139 140 141 |
kfree(entry); atomic_dec(&entry_count); spin_unlock_irqrestore(&async_lock, flags); |
52722794d
|
142 |
/* 4) wake up any waiters */ |
22a9d6456
|
143 |
wake_up(&async_done); |
22a9d6456
|
144 |
} |
6be9238e5
|
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
/** * async_schedule_node_domain - NUMA specific version of async_schedule_domain * @func: function to execute asynchronously * @data: data pointer to pass to the function * @node: NUMA node that we want to schedule this on or close to * @domain: the domain * * Returns an async_cookie_t that may be used for checkpointing later. * @domain may be used in the async_synchronize_*_domain() functions to * wait within a certain synchronization domain rather than globally. * * Note: This function may be called from atomic or non-atomic contexts. * * The node requested will be honored on a best effort basis. If the node * has no CPUs associated with it then the work is distributed among all * available CPUs. */ async_cookie_t async_schedule_node_domain(async_func_t func, void *data, int node, struct async_domain *domain) |
22a9d6456
|
164 165 166 167 |
{ struct async_entry *entry; unsigned long flags; async_cookie_t newcookie; |
22a9d6456
|
168 169 170 171 172 173 174 175 |
/* allow irq-off callers */ entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC); /* * If we're out of memory or if there's too much work * pending already, we execute synchronously. */ |
083b804c4
|
176 |
if (!entry || atomic_read(&entry_count) > MAX_WORK) { |
22a9d6456
|
177 178 179 180 181 182 |
kfree(entry); spin_lock_irqsave(&async_lock, flags); newcookie = next_cookie++; spin_unlock_irqrestore(&async_lock, flags); /* low on memory.. run synchronously */ |
362f2b098
|
183 |
func(data, newcookie); |
22a9d6456
|
184 185 |
return newcookie; } |
a0327ff0e
|
186 187 |
INIT_LIST_HEAD(&entry->domain_list); INIT_LIST_HEAD(&entry->global_list); |
083b804c4
|
188 |
INIT_WORK(&entry->work, async_run_entry_fn); |
362f2b098
|
189 |
entry->func = func; |
22a9d6456
|
190 |
entry->data = data; |
8723d5037
|
191 |
entry->domain = domain; |
22a9d6456
|
192 193 |
spin_lock_irqsave(&async_lock, flags); |
9fdb04cdc
|
194 195 |
/* allocate cookie and queue */ |
22a9d6456
|
196 |
newcookie = entry->cookie = next_cookie++; |
9fdb04cdc
|
197 198 199 200 |
list_add_tail(&entry->domain_list, &domain->pending); if (domain->registered) list_add_tail(&entry->global_list, &async_global_pending); |
22a9d6456
|
201 202 |
atomic_inc(&entry_count); spin_unlock_irqrestore(&async_lock, flags); |
083b804c4
|
203 |
|
774a1221e
|
204 205 |
/* mark that this task has queued an async job, used by module init */ current->flags |= PF_USED_ASYNC; |
083b804c4
|
206 |
/* schedule for execution */ |
6be9238e5
|
207 |
queue_work_node(node, system_unbound_wq, &entry->work); |
083b804c4
|
208 |
|
22a9d6456
|
209 210 |
return newcookie; } |
6be9238e5
|
211 |
EXPORT_SYMBOL_GPL(async_schedule_node_domain); |
22a9d6456
|
212 |
|
f30d5b307
|
213 |
/** |
6be9238e5
|
214 |
* async_schedule_node - NUMA specific version of async_schedule |
362f2b098
|
215 |
* @func: function to execute asynchronously |
f30d5b307
|
216 |
* @data: data pointer to pass to the function |
6be9238e5
|
217 |
* @node: NUMA node that we want to schedule this on or close to |
f30d5b307
|
218 219 220 |
* * Returns an async_cookie_t that may be used for checkpointing later. * Note: This function may be called from atomic or non-atomic contexts. |
f30d5b307
|
221 |
* |
6be9238e5
|
222 223 224 |
* The node requested will be honored on a best effort basis. If the node * has no CPUs associated with it then the work is distributed among all * available CPUs. |
f30d5b307
|
225 |
*/ |
6be9238e5
|
226 |
async_cookie_t async_schedule_node(async_func_t func, void *data, int node) |
22a9d6456
|
227 |
{ |
6be9238e5
|
228 |
return async_schedule_node_domain(func, data, node, &async_dfl_domain); |
22a9d6456
|
229 |
} |
6be9238e5
|
230 |
EXPORT_SYMBOL_GPL(async_schedule_node); |
22a9d6456
|
231 |
|
f30d5b307
|
232 233 234 235 236 |
/** * async_synchronize_full - synchronize all asynchronous function calls * * This function waits until all asynchronous function calls have been done. */ |
22a9d6456
|
237 238 |
void async_synchronize_full(void) { |
9fdb04cdc
|
239 |
async_synchronize_full_domain(NULL); |
22a9d6456
|
240 241 |
} EXPORT_SYMBOL_GPL(async_synchronize_full); |
f30d5b307
|
242 |
/** |
a4683487f
|
243 244 245 246 247 248 249 250 251 252 |
* async_unregister_domain - ensure no more anonymous waiters on this domain * @domain: idle domain to flush out of any async_synchronize_full instances * * async_synchronize_{cookie|full}_domain() are not flushed since callers * of these routines should know the lifetime of @domain * * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing */ void async_unregister_domain(struct async_domain *domain) { |
a4683487f
|
253 |
spin_lock_irq(&async_lock); |
9fdb04cdc
|
254 |
WARN_ON(!domain->registered || !list_empty(&domain->pending)); |
a4683487f
|
255 256 |
domain->registered = 0; spin_unlock_irq(&async_lock); |
a4683487f
|
257 258 259 260 |
} EXPORT_SYMBOL_GPL(async_unregister_domain); /** |
766ccb9ed
|
261 |
* async_synchronize_full_domain - synchronize all asynchronous function within a certain domain |
8723d5037
|
262 |
* @domain: the domain to synchronize |
f30d5b307
|
263 |
* |
766ccb9ed
|
264 |
* This function waits until all asynchronous function calls for the |
8723d5037
|
265 |
* synchronization domain specified by @domain have been done. |
f30d5b307
|
266 |
*/ |
2955b47d2
|
267 |
void async_synchronize_full_domain(struct async_domain *domain) |
22a9d6456
|
268 |
{ |
c68eee14e
|
269 |
async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain); |
22a9d6456
|
270 |
} |
766ccb9ed
|
271 |
EXPORT_SYMBOL_GPL(async_synchronize_full_domain); |
22a9d6456
|
272 |
|
f30d5b307
|
273 |
/** |
766ccb9ed
|
274 |
* async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing |
f30d5b307
|
275 |
* @cookie: async_cookie_t to use as checkpoint |
9fdb04cdc
|
276 |
* @domain: the domain to synchronize (%NULL for all registered domains) |
f30d5b307
|
277 |
* |
766ccb9ed
|
278 |
* This function waits until all asynchronous function calls for the |
8723d5037
|
279 280 |
* synchronization domain specified by @domain submitted prior to @cookie * have been done. |
f30d5b307
|
281 |
*/ |
8723d5037
|
282 |
void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain) |
22a9d6456
|
283 |
{ |
124ff4e53
|
284 |
ktime_t uninitialized_var(starttime), delta, endtime; |
22a9d6456
|
285 |
|
b4def4272
|
286 |
if (initcall_debug && system_state < SYSTEM_RUNNING) { |
27fb10edc
|
287 288 |
pr_debug("async_waiting @ %i ", task_pid_nr(current)); |
22a9d6456
|
289 290 |
starttime = ktime_get(); } |
8723d5037
|
291 |
wait_event(async_done, lowest_in_progress(domain) >= cookie); |
22a9d6456
|
292 |
|
b4def4272
|
293 |
if (initcall_debug && system_state < SYSTEM_RUNNING) { |
22a9d6456
|
294 295 |
endtime = ktime_get(); delta = ktime_sub(endtime, starttime); |
27fb10edc
|
296 297 |
pr_debug("async_continuing @ %i after %lli usec ", |
58763a297
|
298 299 |
task_pid_nr(current), (long long)ktime_to_ns(delta) >> 10); |
22a9d6456
|
300 301 |
} } |
766ccb9ed
|
302 |
EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain); |
22a9d6456
|
303 |
|
f30d5b307
|
304 305 306 307 308 309 310 |
/** * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing * @cookie: async_cookie_t to use as checkpoint * * This function waits until all asynchronous function calls prior to @cookie * have been done. */ |
22a9d6456
|
311 312 |
void async_synchronize_cookie(async_cookie_t cookie) { |
8723d5037
|
313 |
async_synchronize_cookie_domain(cookie, &async_dfl_domain); |
22a9d6456
|
314 315 |
} EXPORT_SYMBOL_GPL(async_synchronize_cookie); |
84b233adc
|
316 317 318 319 320 321 322 323 324 325 326 327 |
/** * current_is_async - is %current an async worker task? * * Returns %true if %current is an async worker task. */ bool current_is_async(void) { struct worker *worker = current_wq_worker(); return worker && worker->current_func == async_run_entry_fn; } |
581da2cab
|
328 |
EXPORT_SYMBOL_GPL(current_is_async); |