Blame view
kernel/jump_label.c
11 KB
bf5438fca
|
1 2 3 4 |
/* * jump label support * * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> |
d430d3d7e
|
5 |
* Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com> |
bf5438fca
|
6 7 |
* */ |
bf5438fca
|
8 9 10 11 |
#include <linux/memory.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/list.h> |
bf5438fca
|
12 13 14 |
#include <linux/slab.h> #include <linux/sort.h> #include <linux/err.h> |
c5905afb0
|
15 |
#include <linux/static_key.h> |
851cf6e7d
|
16 |
#include <linux/jump_label_ratelimit.h> |
bf5438fca
|
17 18 |
#ifdef HAVE_JUMP_LABEL |
bf5438fca
|
19 20 |
/* mutex to protect coming/going of the the jump_label table */ static DEFINE_MUTEX(jump_label_mutex); |
91bad2f8d
|
21 22 23 24 25 26 27 28 29 |
void jump_label_lock(void) { mutex_lock(&jump_label_mutex); } void jump_label_unlock(void) { mutex_unlock(&jump_label_mutex); } |
bf5438fca
|
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
static int jump_label_cmp(const void *a, const void *b) { const struct jump_entry *jea = a; const struct jump_entry *jeb = b; if (jea->key < jeb->key) return -1; if (jea->key > jeb->key) return 1; return 0; } static void |
d430d3d7e
|
45 |
jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) |
bf5438fca
|
46 47 48 49 50 51 52 |
{ unsigned long size; size = (((unsigned long)stop - (unsigned long)start) / sizeof(struct jump_entry)); sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); } |
c5905afb0
|
53 |
static void jump_label_update(struct static_key *key, int enable); |
bf5438fca
|
54 |
|
c5905afb0
|
55 |
void static_key_slow_inc(struct static_key *key) |
bf5438fca
|
56 |
{ |
c4b2c0c5f
|
57 |
STATIC_KEY_CHECK_USE(); |
d430d3d7e
|
58 59 |
if (atomic_inc_not_zero(&key->enabled)) return; |
bf5438fca
|
60 |
|
d430d3d7e
|
61 |
jump_label_lock(); |
c5905afb0
|
62 63 64 65 66 67 |
if (atomic_read(&key->enabled) == 0) { if (!jump_label_get_branch_default(key)) jump_label_update(key, JUMP_LABEL_ENABLE); else jump_label_update(key, JUMP_LABEL_DISABLE); } |
bbbf7af4b
|
68 |
atomic_inc(&key->enabled); |
d430d3d7e
|
69 |
jump_label_unlock(); |
bf5438fca
|
70 |
} |
c5905afb0
|
71 |
EXPORT_SYMBOL_GPL(static_key_slow_inc); |
bf5438fca
|
72 |
|
c5905afb0
|
73 |
static void __static_key_slow_dec(struct static_key *key, |
b20295207
|
74 |
unsigned long rate_limit, struct delayed_work *work) |
bf5438fca
|
75 |
{ |
fadf0464b
|
76 77 78 79 |
if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { WARN(atomic_read(&key->enabled) < 0, "jump label: negative count! "); |
d430d3d7e
|
80 |
return; |
fadf0464b
|
81 |
} |
bf5438fca
|
82 |
|
b20295207
|
83 84 85 |
if (rate_limit) { atomic_inc(&key->enabled); schedule_delayed_work(work, rate_limit); |
c5905afb0
|
86 87 88 89 90 91 |
} else { if (!jump_label_get_branch_default(key)) jump_label_update(key, JUMP_LABEL_DISABLE); else jump_label_update(key, JUMP_LABEL_ENABLE); } |
91bad2f8d
|
92 |
jump_label_unlock(); |
bf5438fca
|
93 |
} |
b20295207
|
94 95 |
static void jump_label_update_timeout(struct work_struct *work) { |
c5905afb0
|
96 97 98 |
struct static_key_deferred *key = container_of(work, struct static_key_deferred, work.work); __static_key_slow_dec(&key->key, 0, NULL); |
b20295207
|
99 |
} |
c5905afb0
|
100 |
void static_key_slow_dec(struct static_key *key) |
b20295207
|
101 |
{ |
c4b2c0c5f
|
102 |
STATIC_KEY_CHECK_USE(); |
c5905afb0
|
103 |
__static_key_slow_dec(key, 0, NULL); |
b20295207
|
104 |
} |
c5905afb0
|
105 |
EXPORT_SYMBOL_GPL(static_key_slow_dec); |
b20295207
|
106 |
|
c5905afb0
|
107 |
void static_key_slow_dec_deferred(struct static_key_deferred *key) |
b20295207
|
108 |
{ |
c4b2c0c5f
|
109 |
STATIC_KEY_CHECK_USE(); |
c5905afb0
|
110 |
__static_key_slow_dec(&key->key, key->timeout, &key->work); |
b20295207
|
111 |
} |
c5905afb0
|
112 |
EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred); |
b20295207
|
113 |
|
c5905afb0
|
114 |
void jump_label_rate_limit(struct static_key_deferred *key, |
b20295207
|
115 116 |
unsigned long rl) { |
c4b2c0c5f
|
117 |
STATIC_KEY_CHECK_USE(); |
b20295207
|
118 119 120 |
key->timeout = rl; INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); } |
a181dc14e
|
121 |
EXPORT_SYMBOL_GPL(jump_label_rate_limit); |
b20295207
|
122 |
|
4c3ef6d79
|
123 124 125 126 127 128 129 130 |
static int addr_conflict(struct jump_entry *entry, void *start, void *end) { if (entry->code <= (unsigned long)end && entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start) return 1; return 0; } |
d430d3d7e
|
131 132 |
static int __jump_label_text_reserved(struct jump_entry *iter_start, struct jump_entry *iter_stop, void *start, void *end) |
4c3ef6d79
|
133 |
{ |
4c3ef6d79
|
134 |
struct jump_entry *iter; |
4c3ef6d79
|
135 |
|
4c3ef6d79
|
136 137 |
iter = iter_start; while (iter < iter_stop) { |
d430d3d7e
|
138 139 |
if (addr_conflict(iter, start, end)) return 1; |
4c3ef6d79
|
140 141 |
iter++; } |
d430d3d7e
|
142 143 |
return 0; } |
20284aa77
|
144 145 146 147 148 149 |
/* * Update code which is definitely not currently executing. * Architectures which need heavyweight synchronization to modify * running code can override this to make the non-live update case * cheaper. */ |
9cdbe1cba
|
150 |
void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry, |
20284aa77
|
151 152 153 154 |
enum jump_label_type type) { arch_jump_label_transform(entry, type); } |
c5905afb0
|
155 |
static void __jump_label_update(struct static_key *key, |
7cbc5b8d4
|
156 157 |
struct jump_entry *entry, struct jump_entry *stop, int enable) |
d430d3d7e
|
158 |
{ |
7cbc5b8d4
|
159 160 161 |
for (; (entry < stop) && (entry->key == (jump_label_t)(unsigned long)key); entry++) { |
d430d3d7e
|
162 163 164 165 166 167 168 169 |
/* * entry->code set to 0 invalidates module init text sections * kernel_text_address() verifies we are not in core kernel * init code, see jump_label_invalidate_module_init(). */ if (entry->code && kernel_text_address(entry->code)) arch_jump_label_transform(entry, enable); } |
4c3ef6d79
|
170 |
} |
c5905afb0
|
171 172 173 174 175 176 177 178 179 180 |
static enum jump_label_type jump_label_type(struct static_key *key) { bool true_branch = jump_label_get_branch_default(key); bool state = static_key_enabled(key); if ((!true_branch && state) || (true_branch && !state)) return JUMP_LABEL_ENABLE; return JUMP_LABEL_DISABLE; } |
97ce2c88f
|
181 |
void __init jump_label_init(void) |
bf5438fca
|
182 |
{ |
bf5438fca
|
183 184 |
struct jump_entry *iter_start = __start___jump_table; struct jump_entry *iter_stop = __stop___jump_table; |
c5905afb0
|
185 |
struct static_key *key = NULL; |
bf5438fca
|
186 |
struct jump_entry *iter; |
91bad2f8d
|
187 |
jump_label_lock(); |
d430d3d7e
|
188 189 190 |
jump_label_sort_entries(iter_start, iter_stop); for (iter = iter_start; iter < iter_stop; iter++) { |
c5905afb0
|
191 |
struct static_key *iterk; |
37348804e
|
192 |
|
c5905afb0
|
193 194 |
iterk = (struct static_key *)(unsigned long)iter->key; arch_jump_label_transform_static(iter, jump_label_type(iterk)); |
37348804e
|
195 |
if (iterk == key) |
d430d3d7e
|
196 |
continue; |
37348804e
|
197 |
key = iterk; |
c5905afb0
|
198 199 200 201 |
/* * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH. */ *((unsigned long *)&key->entries) += (unsigned long)iter; |
d430d3d7e
|
202 203 204 |
#ifdef CONFIG_MODULES key->next = NULL; #endif |
bf5438fca
|
205 |
} |
c4b2c0c5f
|
206 |
static_key_initialized = true; |
91bad2f8d
|
207 |
jump_label_unlock(); |
bf5438fca
|
208 |
} |
bf5438fca
|
209 210 |
#ifdef CONFIG_MODULES |
c5905afb0
|
211 212 |
struct static_key_mod { struct static_key_mod *next; |
d430d3d7e
|
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 |
struct jump_entry *entries; struct module *mod; }; static int __jump_label_mod_text_reserved(void *start, void *end) { struct module *mod; mod = __module_text_address((unsigned long)start); if (!mod) return 0; WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); return __jump_label_text_reserved(mod->jump_entries, mod->jump_entries + mod->num_jump_entries, start, end); } |
c5905afb0
|
231 |
static void __jump_label_mod_update(struct static_key *key, int enable) |
d430d3d7e
|
232 |
{ |
c5905afb0
|
233 |
struct static_key_mod *mod = key->next; |
d430d3d7e
|
234 235 |
while (mod) { |
7cbc5b8d4
|
236 237 238 239 240 |
struct module *m = mod->mod; __jump_label_update(key, mod->entries, m->jump_entries + m->num_jump_entries, enable); |
d430d3d7e
|
241 242 243 244 245 246 247 248 249 250 251 252 253 |
mod = mod->next; } } /*** * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() * @mod: module to patch * * Allow for run-time selection of the optimal nops. Before the module * loads patch these with arch_get_jump_label_nop(), which is specified by * the arch specific jump label code. */ void jump_label_apply_nops(struct module *mod) |
bf5438fca
|
254 |
{ |
d430d3d7e
|
255 256 257 258 259 260 261 |
struct jump_entry *iter_start = mod->jump_entries; struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; struct jump_entry *iter; /* if the module doesn't have jump label entries, just return */ if (iter_start == iter_stop) return; |
ac99b862f
|
262 |
for (iter = iter_start; iter < iter_stop; iter++) { |
c5905afb0
|
263 |
arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE); |
ac99b862f
|
264 |
} |
bf5438fca
|
265 |
} |
d430d3d7e
|
266 |
static int jump_label_add_module(struct module *mod) |
bf5438fca
|
267 |
{ |
d430d3d7e
|
268 269 270 |
struct jump_entry *iter_start = mod->jump_entries; struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; struct jump_entry *iter; |
c5905afb0
|
271 272 |
struct static_key *key = NULL; struct static_key_mod *jlm; |
bf5438fca
|
273 274 |
/* if the module doesn't have jump label entries, just return */ |
d430d3d7e
|
275 |
if (iter_start == iter_stop) |
bf5438fca
|
276 |
return 0; |
d430d3d7e
|
277 278 279 |
jump_label_sort_entries(iter_start, iter_stop); for (iter = iter_start; iter < iter_stop; iter++) { |
c5905afb0
|
280 |
struct static_key *iterk; |
d430d3d7e
|
281 |
|
c5905afb0
|
282 283 284 |
iterk = (struct static_key *)(unsigned long)iter->key; if (iterk == key) continue; |
d430d3d7e
|
285 |
|
c5905afb0
|
286 |
key = iterk; |
d430d3d7e
|
287 |
if (__module_address(iter->key) == mod) { |
c5905afb0
|
288 289 290 291 |
/* * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH. */ *((unsigned long *)&key->entries) += (unsigned long)iter; |
d430d3d7e
|
292 293 |
key->next = NULL; continue; |
bf5438fca
|
294 |
} |
c5905afb0
|
295 |
jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL); |
d430d3d7e
|
296 297 |
if (!jlm) return -ENOMEM; |
d430d3d7e
|
298 299 300 301 |
jlm->mod = mod; jlm->entries = iter; jlm->next = key->next; key->next = jlm; |
c5905afb0
|
302 |
if (jump_label_type(key) == JUMP_LABEL_ENABLE) |
ac99b862f
|
303 |
__jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE); |
bf5438fca
|
304 |
} |
d430d3d7e
|
305 |
|
bf5438fca
|
306 307 |
return 0; } |
d430d3d7e
|
308 |
static void jump_label_del_module(struct module *mod) |
bf5438fca
|
309 |
{ |
d430d3d7e
|
310 311 312 |
struct jump_entry *iter_start = mod->jump_entries; struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; struct jump_entry *iter; |
c5905afb0
|
313 314 |
struct static_key *key = NULL; struct static_key_mod *jlm, **prev; |
bf5438fca
|
315 |
|
d430d3d7e
|
316 317 318 |
for (iter = iter_start; iter < iter_stop; iter++) { if (iter->key == (jump_label_t)(unsigned long)key) continue; |
c5905afb0
|
319 |
key = (struct static_key *)(unsigned long)iter->key; |
d430d3d7e
|
320 321 322 323 324 325 |
if (__module_address(iter->key) == mod) continue; prev = &key->next; jlm = key->next; |
bf5438fca
|
326 |
|
d430d3d7e
|
327 328 329 330 331 332 333 334 |
while (jlm && jlm->mod != mod) { prev = &jlm->next; jlm = jlm->next; } if (jlm) { *prev = jlm->next; kfree(jlm); |
bf5438fca
|
335 336 337 |
} } } |
d430d3d7e
|
338 |
static void jump_label_invalidate_module_init(struct module *mod) |
b842f8faf
|
339 |
{ |
d430d3d7e
|
340 341 |
struct jump_entry *iter_start = mod->jump_entries; struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
b842f8faf
|
342 |
struct jump_entry *iter; |
b842f8faf
|
343 |
|
d430d3d7e
|
344 345 346 |
for (iter = iter_start; iter < iter_stop; iter++) { if (within_module_init(iter->code, mod)) iter->code = 0; |
b842f8faf
|
347 348 |
} } |
bf5438fca
|
349 350 351 352 353 354 355 356 357 |
static int jump_label_module_notify(struct notifier_block *self, unsigned long val, void *data) { struct module *mod = data; int ret = 0; switch (val) { case MODULE_STATE_COMING: |
91bad2f8d
|
358 |
jump_label_lock(); |
d430d3d7e
|
359 |
ret = jump_label_add_module(mod); |
bf5438fca
|
360 |
if (ret) |
d430d3d7e
|
361 |
jump_label_del_module(mod); |
91bad2f8d
|
362 |
jump_label_unlock(); |
bf5438fca
|
363 364 |
break; case MODULE_STATE_GOING: |
91bad2f8d
|
365 |
jump_label_lock(); |
d430d3d7e
|
366 |
jump_label_del_module(mod); |
91bad2f8d
|
367 |
jump_label_unlock(); |
bf5438fca
|
368 |
break; |
b842f8faf
|
369 |
case MODULE_STATE_LIVE: |
91bad2f8d
|
370 |
jump_label_lock(); |
d430d3d7e
|
371 |
jump_label_invalidate_module_init(mod); |
91bad2f8d
|
372 |
jump_label_unlock(); |
b842f8faf
|
373 |
break; |
bf5438fca
|
374 |
} |
bf5438fca
|
375 |
|
d430d3d7e
|
376 |
return notifier_from_errno(ret); |
bf5438fca
|
377 378 379 380 |
} struct notifier_block jump_label_module_nb = { .notifier_call = jump_label_module_notify, |
d430d3d7e
|
381 |
.priority = 1, /* higher than tracepoints */ |
bf5438fca
|
382 |
}; |
d430d3d7e
|
383 |
static __init int jump_label_init_module(void) |
bf5438fca
|
384 385 386 |
{ return register_module_notifier(&jump_label_module_nb); } |
d430d3d7e
|
387 |
early_initcall(jump_label_init_module); |
bf5438fca
|
388 389 |
#endif /* CONFIG_MODULES */ |
d430d3d7e
|
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 |
/*** * jump_label_text_reserved - check if addr range is reserved * @start: start text addr * @end: end text addr * * checks if the text addr located between @start and @end * overlaps with any of the jump label patch addresses. Code * that wants to modify kernel text should first verify that * it does not overlap with any of the jump label addresses. * Caller must hold jump_label_mutex. * * returns 1 if there is an overlap, 0 otherwise */ int jump_label_text_reserved(void *start, void *end) { int ret = __jump_label_text_reserved(__start___jump_table, __stop___jump_table, start, end); if (ret) return ret; #ifdef CONFIG_MODULES ret = __jump_label_mod_text_reserved(start, end); #endif return ret; } |
c5905afb0
|
416 |
static void jump_label_update(struct static_key *key, int enable) |
d430d3d7e
|
417 |
{ |
c5905afb0
|
418 419 |
struct jump_entry *stop = __stop___jump_table; struct jump_entry *entry = jump_label_get_entries(key); |
d430d3d7e
|
420 421 |
#ifdef CONFIG_MODULES |
a746e3cc9
|
422 |
struct module *mod = __module_address((unsigned long)key); |
140fe3b1a
|
423 |
|
d430d3d7e
|
424 |
__jump_label_mod_update(key, enable); |
140fe3b1a
|
425 426 427 |
if (mod) stop = mod->jump_entries + mod->num_jump_entries; |
d430d3d7e
|
428 |
#endif |
140fe3b1a
|
429 430 431 |
/* if there are no users, entry can be NULL */ if (entry) __jump_label_update(key, entry, stop, enable); |
d430d3d7e
|
432 |
} |
bf5438fca
|
433 |
#endif |