Blame view
kernel/async.c
9.03 KB
22a9d6456
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
/* * async.c: Asynchronous function calls for boot performance * * (C) Copyright 2009 Intel Corporation * Author: Arjan van de Ven <arjan@linux.intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. */ /* Goals and Theory of Operation The primary goal of this feature is to reduce the kernel boot time, by doing various independent hardware delays and discovery operations decoupled and not strictly serialized. More specifically, the asynchronous function call concept allows certain operations (primarily during system boot) to happen asynchronously, out of order, while these operations still have their externally visible parts happen sequentially and in-order. (not unlike how out-of-order CPUs retire their instructions in order) Key to the asynchronous function call implementation is the concept of a "sequence cookie" (which, although it has an abstracted type, can be thought of as a monotonically incrementing number). The async core will assign each scheduled event such a sequence cookie and pass this to the called functions. The asynchronously called function should before doing a globally visible operation, such as registering device numbers, call the async_synchronize_cookie() function and pass in its own cookie. The async_synchronize_cookie() function will make sure that all asynchronous operations that were scheduled prior to the operation corresponding with the cookie have completed. Subsystem/driver initialization code that scheduled asynchronous probe functions, but which shares global resources with other drivers/subsystems that do not use the asynchronous call feature, need to do a full synchronization with the async_synchronize_full() function, before returning from their init function. This is to maintain strict ordering between the asynchronous and synchronous parts of the kernel. */ #include <linux/async.h> |
84c15027a
|
52 53 |
#include <linux/atomic.h> #include <linux/ktime.h> |
22a9d6456
|
54 55 56 |
#include <linux/module.h> #include <linux/wait.h> #include <linux/sched.h> |
5a0e3ad6a
|
57 |
#include <linux/slab.h> |
083b804c4
|
58 |
#include <linux/workqueue.h> |
22a9d6456
|
59 60 |
static async_cookie_t next_cookie = 1; |
22a9d6456
|
61 62 63 64 65 66 67 |
#define MAX_WORK 32768 static LIST_HEAD(async_pending); static LIST_HEAD(async_running); static DEFINE_SPINLOCK(async_lock); struct async_entry { |
083b804c4
|
68 69 70 71 72 73 |
struct list_head list; struct work_struct work; async_cookie_t cookie; async_func_ptr *func; void *data; struct list_head *running; |
22a9d6456
|
74 75 76 |
}; static DECLARE_WAIT_QUEUE_HEAD(async_done); |
22a9d6456
|
77 78 |
static atomic_t entry_count; |
22a9d6456
|
79 80 81 82 83 84 85 86 87 88 |
extern int initcall_debug; /* * MUST be called with the lock held! */ static async_cookie_t __lowest_in_progress(struct list_head *running) { struct async_entry *entry; |
d5a877e8d
|
89 |
|
37a76bd4f
|
90 91 |
if (!list_empty(running)) { entry = list_first_entry(running, |
22a9d6456
|
92 |
struct async_entry, list); |
3af968e06
|
93 |
return entry->cookie; |
22a9d6456
|
94 |
} |
3af968e06
|
95 96 97 |
list_for_each_entry(entry, &async_pending, list) if (entry->running == running) return entry->cookie; |
d5a877e8d
|
98 |
|
3af968e06
|
99 |
return next_cookie; /* "infinity" value */ |
22a9d6456
|
100 |
} |
37a76bd4f
|
101 102 103 104 105 106 107 108 109 110 111 |
static async_cookie_t lowest_in_progress(struct list_head *running) { unsigned long flags; async_cookie_t ret; spin_lock_irqsave(&async_lock, flags); ret = __lowest_in_progress(running); spin_unlock_irqrestore(&async_lock, flags); return ret; } |
083b804c4
|
112 |
|
22a9d6456
|
113 114 115 |
/* * pick the first pending entry and run it */ |
083b804c4
|
116 |
static void async_run_entry_fn(struct work_struct *work) |
22a9d6456
|
117 |
{ |
083b804c4
|
118 119 |
struct async_entry *entry = container_of(work, struct async_entry, work); |
22a9d6456
|
120 |
unsigned long flags; |
22a9d6456
|
121 |
ktime_t calltime, delta, rettime; |
083b804c4
|
122 |
/* 1) move self to the running queue */ |
22a9d6456
|
123 |
spin_lock_irqsave(&async_lock, flags); |
f7de7621f
|
124 |
list_move_tail(&entry->list, entry->running); |
22a9d6456
|
125 |
spin_unlock_irqrestore(&async_lock, flags); |
083b804c4
|
126 |
/* 2) run (and print duration) */ |
ad160d231
|
127 |
if (initcall_debug && system_state == SYSTEM_BOOTING) { |
84c15027a
|
128 129 130 |
printk(KERN_DEBUG "calling %lli_%pF @ %i ", (long long)entry->cookie, |
58763a297
|
131 |
entry->func, task_pid_nr(current)); |
22a9d6456
|
132 133 134 |
calltime = ktime_get(); } entry->func(entry->data, entry->cookie); |
ad160d231
|
135 |
if (initcall_debug && system_state == SYSTEM_BOOTING) { |
22a9d6456
|
136 137 |
rettime = ktime_get(); delta = ktime_sub(rettime, calltime); |
84c15027a
|
138 139 |
printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs ", |
58763a297
|
140 141 142 |
(long long)entry->cookie, entry->func, (long long)ktime_to_ns(delta) >> 10); |
22a9d6456
|
143 |
} |
083b804c4
|
144 |
/* 3) remove self from the running queue */ |
22a9d6456
|
145 146 |
spin_lock_irqsave(&async_lock, flags); list_del(&entry->list); |
083b804c4
|
147 |
/* 4) free the entry */ |
22a9d6456
|
148 149 150 151 |
kfree(entry); atomic_dec(&entry_count); spin_unlock_irqrestore(&async_lock, flags); |
083b804c4
|
152 |
/* 5) wake up any waiters */ |
22a9d6456
|
153 |
wake_up(&async_done); |
22a9d6456
|
154 |
} |
22a9d6456
|
155 156 157 158 159 |
static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running) { struct async_entry *entry; unsigned long flags; async_cookie_t newcookie; |
22a9d6456
|
160 161 162 163 164 165 166 167 |
/* allow irq-off callers */ entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC); /* * If we're out of memory or if there's too much work * pending already, we execute synchronously. */ |
083b804c4
|
168 |
if (!entry || atomic_read(&entry_count) > MAX_WORK) { |
22a9d6456
|
169 170 171 172 173 174 175 176 177 |
kfree(entry); spin_lock_irqsave(&async_lock, flags); newcookie = next_cookie++; spin_unlock_irqrestore(&async_lock, flags); /* low on memory.. run synchronously */ ptr(data, newcookie); return newcookie; } |
083b804c4
|
178 |
INIT_WORK(&entry->work, async_run_entry_fn); |
22a9d6456
|
179 180 181 182 183 184 185 186 187 |
entry->func = ptr; entry->data = data; entry->running = running; spin_lock_irqsave(&async_lock, flags); newcookie = entry->cookie = next_cookie++; list_add_tail(&entry->list, &async_pending); atomic_inc(&entry_count); spin_unlock_irqrestore(&async_lock, flags); |
083b804c4
|
188 189 190 |
/* schedule for execution */ queue_work(system_unbound_wq, &entry->work); |
22a9d6456
|
191 192 |
return newcookie; } |
f30d5b307
|
193 194 195 196 197 198 199 200 |
/** * async_schedule - schedule a function for asynchronous execution * @ptr: function to execute asynchronously * @data: data pointer to pass to the function * * Returns an async_cookie_t that may be used for checkpointing later. * Note: This function may be called from atomic or non-atomic contexts. */ |
22a9d6456
|
201 202 |
async_cookie_t async_schedule(async_func_ptr *ptr, void *data) { |
7a89bbc74
|
203 |
return __async_schedule(ptr, data, &async_running); |
22a9d6456
|
204 205 |
} EXPORT_SYMBOL_GPL(async_schedule); |
f30d5b307
|
206 |
/** |
766ccb9ed
|
207 |
* async_schedule_domain - schedule a function for asynchronous execution within a certain domain |
f30d5b307
|
208 209 |
* @ptr: function to execute asynchronously * @data: data pointer to pass to the function |
766ccb9ed
|
210 |
* @running: running list for the domain |
f30d5b307
|
211 212 |
* * Returns an async_cookie_t that may be used for checkpointing later. |
766ccb9ed
|
213 214 215 |
* @running may be used in the async_synchronize_*_domain() functions * to wait within a certain synchronization domain rather than globally. * A synchronization domain is specified via the running queue @running to use. |
f30d5b307
|
216 217 |
* Note: This function may be called from atomic or non-atomic contexts. */ |
766ccb9ed
|
218 219 |
async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data, struct list_head *running) |
22a9d6456
|
220 221 222 |
{ return __async_schedule(ptr, data, running); } |
766ccb9ed
|
223 |
EXPORT_SYMBOL_GPL(async_schedule_domain); |
22a9d6456
|
224 |
|
f30d5b307
|
225 226 227 228 229 |
/** * async_synchronize_full - synchronize all asynchronous function calls * * This function waits until all asynchronous function calls have been done. */ |
22a9d6456
|
230 231 |
void async_synchronize_full(void) { |
33b04b930
|
232 233 234 |
do { async_synchronize_cookie(next_cookie); } while (!list_empty(&async_running) || !list_empty(&async_pending)); |
22a9d6456
|
235 236 |
} EXPORT_SYMBOL_GPL(async_synchronize_full); |
f30d5b307
|
237 |
/** |
766ccb9ed
|
238 |
* async_synchronize_full_domain - synchronize all asynchronous function within a certain domain |
f30d5b307
|
239 240 |
* @list: running list to synchronize on * |
766ccb9ed
|
241 242 |
* This function waits until all asynchronous function calls for the * synchronization domain specified by the running list @list have been done. |
f30d5b307
|
243 |
*/ |
766ccb9ed
|
244 |
void async_synchronize_full_domain(struct list_head *list) |
22a9d6456
|
245 |
{ |
766ccb9ed
|
246 |
async_synchronize_cookie_domain(next_cookie, list); |
22a9d6456
|
247 |
} |
766ccb9ed
|
248 |
EXPORT_SYMBOL_GPL(async_synchronize_full_domain); |
22a9d6456
|
249 |
|
f30d5b307
|
250 |
/** |
766ccb9ed
|
251 |
* async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing |
f30d5b307
|
252 253 254 |
* @cookie: async_cookie_t to use as checkpoint * @running: running list to synchronize on * |
766ccb9ed
|
255 256 257 |
* This function waits until all asynchronous function calls for the * synchronization domain specified by the running list @list submitted * prior to @cookie have been done. |
f30d5b307
|
258 |
*/ |
766ccb9ed
|
259 260 |
void async_synchronize_cookie_domain(async_cookie_t cookie, struct list_head *running) |
22a9d6456
|
261 262 |
{ ktime_t starttime, delta, endtime; |
ad160d231
|
263 |
if (initcall_debug && system_state == SYSTEM_BOOTING) { |
84c15027a
|
264 265 |
printk(KERN_DEBUG "async_waiting @ %i ", task_pid_nr(current)); |
22a9d6456
|
266 267 |
starttime = ktime_get(); } |
37a76bd4f
|
268 |
wait_event(async_done, lowest_in_progress(running) >= cookie); |
22a9d6456
|
269 |
|
ad160d231
|
270 |
if (initcall_debug && system_state == SYSTEM_BOOTING) { |
22a9d6456
|
271 272 |
endtime = ktime_get(); delta = ktime_sub(endtime, starttime); |
84c15027a
|
273 274 |
printk(KERN_DEBUG "async_continuing @ %i after %lli usec ", |
58763a297
|
275 276 |
task_pid_nr(current), (long long)ktime_to_ns(delta) >> 10); |
22a9d6456
|
277 278 |
} } |
766ccb9ed
|
279 |
EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain); |
22a9d6456
|
280 |
|
f30d5b307
|
281 282 283 284 285 286 287 |
/** * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing * @cookie: async_cookie_t to use as checkpoint * * This function waits until all asynchronous function calls prior to @cookie * have been done. */ |
22a9d6456
|
288 289 |
void async_synchronize_cookie(async_cookie_t cookie) { |
766ccb9ed
|
290 |
async_synchronize_cookie_domain(cookie, &async_running); |
22a9d6456
|
291 292 |
} EXPORT_SYMBOL_GPL(async_synchronize_cookie); |