Blame view
kernel/jump_label.c
20.8 KB
457c89965
|
1 |
// SPDX-License-Identifier: GPL-2.0-only |
bf5438fca
|
2 3 4 5 |
/* * jump label support * * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> |
90eec103b
|
6 |
* Copyright (C) 2011 Peter Zijlstra |
bf5438fca
|
7 8 |
* */ |
bf5438fca
|
9 10 11 12 |
#include <linux/memory.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/list.h> |
bf5438fca
|
13 14 15 |
#include <linux/slab.h> #include <linux/sort.h> #include <linux/err.h> |
c5905afb0
|
16 |
#include <linux/static_key.h> |
851cf6e7d
|
17 |
#include <linux/jump_label_ratelimit.h> |
1f69bf9c6
|
18 |
#include <linux/bug.h> |
f2545b2d4
|
19 |
#include <linux/cpu.h> |
578ae447e
|
20 |
#include <asm/sections.h> |
bf5438fca
|
21 |
|
bf5438fca
|
22 23 |
/* mutex to protect coming/going of the the jump_label table */ static DEFINE_MUTEX(jump_label_mutex); |
91bad2f8d
|
24 25 26 27 28 29 30 31 32 |
void jump_label_lock(void) { mutex_lock(&jump_label_mutex); } void jump_label_unlock(void) { mutex_unlock(&jump_label_mutex); } |
bf5438fca
|
33 34 35 36 |
static int jump_label_cmp(const void *a, const void *b) { const struct jump_entry *jea = a; const struct jump_entry *jeb = b; |
0f133021b
|
37 38 39 |
/* * Entrires are sorted by key. */ |
9ae033aca
|
40 |
if (jump_entry_key(jea) < jump_entry_key(jeb)) |
bf5438fca
|
41 |
return -1; |
9ae033aca
|
42 |
if (jump_entry_key(jea) > jump_entry_key(jeb)) |
bf5438fca
|
43 |
return 1; |
0f133021b
|
44 45 46 47 48 49 50 51 52 53 |
/* * In the batching mode, entries should also be sorted by the code * inside the already sorted list of entries, enabling a bsearch in * the vector. */ if (jump_entry_code(jea) < jump_entry_code(jeb)) return -1; if (jump_entry_code(jea) > jump_entry_code(jeb)) return 1; |
bf5438fca
|
54 55 |
return 0; } |
50ff18ab4
|
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
static void jump_label_swap(void *a, void *b, int size) { long delta = (unsigned long)a - (unsigned long)b; struct jump_entry *jea = a; struct jump_entry *jeb = b; struct jump_entry tmp = *jea; jea->code = jeb->code - delta; jea->target = jeb->target - delta; jea->key = jeb->key - delta; jeb->code = tmp.code + delta; jeb->target = tmp.target + delta; jeb->key = tmp.key + delta; } |
bf5438fca
|
71 |
static void |
d430d3d7e
|
72 |
jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) |
bf5438fca
|
73 74 |
{ unsigned long size; |
50ff18ab4
|
75 76 77 78 |
void *swapfn = NULL; if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE)) swapfn = jump_label_swap; |
bf5438fca
|
79 80 81 |
size = (((unsigned long)stop - (unsigned long)start) / sizeof(struct jump_entry)); |
50ff18ab4
|
82 |
sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn); |
bf5438fca
|
83 |
} |
706249c22
|
84 |
static void jump_label_update(struct static_key *key); |
a1efb01fe
|
85 |
|
1f69bf9c6
|
86 |
/* |
e9666d10a
|
87 |
* There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h. |
1f69bf9c6
|
88 89 |
* The use of 'atomic_read()' requires atomic.h and its problematic for some * kernel headers such as kernel.h and others. Since static_key_count() is not |
e9666d10a
|
90 |
* used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok |
1f69bf9c6
|
91 92 |
* to have it be a function here. Similarly, for 'static_key_enable()' and * 'static_key_disable()', which require bug.h. This should allow jump_label.h |
e9666d10a
|
93 |
* to be included from most/all places for CONFIG_JUMP_LABEL. |
1f69bf9c6
|
94 95 96 97 98 99 100 101 102 103 104 105 |
*/ int static_key_count(struct static_key *key) { /* * -1 means the first static_key_slow_inc() is in progress. * static_key_enabled() must return true, so return 1 here. */ int n = atomic_read(&key->enabled); return n >= 0 ? n : 1; } EXPORT_SYMBOL_GPL(static_key_count); |
ce48c1464
|
106 |
void static_key_slow_inc_cpuslocked(struct static_key *key) |
bf5438fca
|
107 |
{ |
4c5ea0a9c
|
108 |
int v, v1; |
5cdda5117
|
109 |
STATIC_KEY_CHECK_USE(key); |
cb538267e
|
110 |
lockdep_assert_cpus_held(); |
4c5ea0a9c
|
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
/* * Careful if we get concurrent static_key_slow_inc() calls; * later calls must wait for the first one to _finish_ the * jump_label_update() process. At the same time, however, * the jump_label_update() call below wants to see * static_key_enabled(&key) for jumps to be updated properly. * * So give a special meaning to negative key->enabled: it sends * static_key_slow_inc() down the slow path, and it is non-zero * so it counts as "enabled" in jump_label_update(). Note that * atomic_inc_unless_negative() checks >= 0, so roll our own. */ for (v = atomic_read(&key->enabled); v > 0; v = v1) { v1 = atomic_cmpxchg(&key->enabled, v, v + 1); |
8b7b41280
|
126 |
if (likely(v1 == v)) |
4c5ea0a9c
|
127 128 |
return; } |
bf5438fca
|
129 |
|
d430d3d7e
|
130 |
jump_label_lock(); |
4c5ea0a9c
|
131 132 |
if (atomic_read(&key->enabled) == 0) { atomic_set(&key->enabled, -1); |
706249c22
|
133 |
jump_label_update(key); |
d0646a6f5
|
134 135 136 137 138 |
/* * Ensure that if the above cmpxchg loop observes our positive * value, it must also observe all the text changes. */ atomic_set_release(&key->enabled, 1); |
4c5ea0a9c
|
139 140 141 |
} else { atomic_inc(&key->enabled); } |
d430d3d7e
|
142 |
jump_label_unlock(); |
8b7b41280
|
143 144 145 146 147 148 |
} void static_key_slow_inc(struct static_key *key) { cpus_read_lock(); static_key_slow_inc_cpuslocked(key); |
f2545b2d4
|
149 |
cpus_read_unlock(); |
bf5438fca
|
150 |
} |
c5905afb0
|
151 |
EXPORT_SYMBOL_GPL(static_key_slow_inc); |
bf5438fca
|
152 |
|
5a40527f8
|
153 |
void static_key_enable_cpuslocked(struct static_key *key) |
1dbb6704d
|
154 |
{ |
5cdda5117
|
155 |
STATIC_KEY_CHECK_USE(key); |
cb538267e
|
156 |
lockdep_assert_cpus_held(); |
5a40527f8
|
157 |
|
1dbb6704d
|
158 159 160 161 |
if (atomic_read(&key->enabled) > 0) { WARN_ON_ONCE(atomic_read(&key->enabled) != 1); return; } |
1dbb6704d
|
162 163 164 165 |
jump_label_lock(); if (atomic_read(&key->enabled) == 0) { atomic_set(&key->enabled, -1); jump_label_update(key); |
d0646a6f5
|
166 167 168 169 |
/* * See static_key_slow_inc(). */ atomic_set_release(&key->enabled, 1); |
1dbb6704d
|
170 171 |
} jump_label_unlock(); |
5a40527f8
|
172 173 174 175 176 177 178 |
} EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked); void static_key_enable(struct static_key *key) { cpus_read_lock(); static_key_enable_cpuslocked(key); |
1dbb6704d
|
179 180 181 |
cpus_read_unlock(); } EXPORT_SYMBOL_GPL(static_key_enable); |
5a40527f8
|
182 |
void static_key_disable_cpuslocked(struct static_key *key) |
1dbb6704d
|
183 |
{ |
5cdda5117
|
184 |
STATIC_KEY_CHECK_USE(key); |
cb538267e
|
185 |
lockdep_assert_cpus_held(); |
5a40527f8
|
186 |
|
1dbb6704d
|
187 188 189 190 |
if (atomic_read(&key->enabled) != 1) { WARN_ON_ONCE(atomic_read(&key->enabled) != 0); return; } |
1dbb6704d
|
191 192 193 194 |
jump_label_lock(); if (atomic_cmpxchg(&key->enabled, 1, 0)) jump_label_update(key); jump_label_unlock(); |
5a40527f8
|
195 196 197 198 199 200 201 |
} EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked); void static_key_disable(struct static_key *key) { cpus_read_lock(); static_key_disable_cpuslocked(key); |
1dbb6704d
|
202 203 204 |
cpus_read_unlock(); } EXPORT_SYMBOL_GPL(static_key_disable); |
b92e793bb
|
205 |
static bool static_key_slow_try_dec(struct static_key *key) |
bf5438fca
|
206 |
{ |
a1247d06d
|
207 |
int val; |
b92e793bb
|
208 209 210 |
val = atomic_fetch_add_unless(&key->enabled, -1, 1); if (val == 1) return false; |
cb538267e
|
211 |
|
4c5ea0a9c
|
212 213 214 215 216 217 218 |
/* * The negative count check is valid even when a negative * key->enabled is in use by static_key_slow_inc(); a * __static_key_slow_dec() before the first static_key_slow_inc() * returns is unbalanced, because all other static_key_slow_inc() * instances block while the update is in progress. */ |
b92e793bb
|
219 220 221 222 |
WARN(val < 0, "jump label: negative count! "); return true; } |
94b5f312c
|
223 |
static void __static_key_slow_dec_cpuslocked(struct static_key *key) |
b92e793bb
|
224 225 226 227 |
{ lockdep_assert_cpus_held(); if (static_key_slow_try_dec(key)) |
d430d3d7e
|
228 |
return; |
bf5438fca
|
229 |
|
a1247d06d
|
230 |
jump_label_lock(); |
94b5f312c
|
231 232 |
if (atomic_dec_and_test(&key->enabled)) jump_label_update(key); |
91bad2f8d
|
233 |
jump_label_unlock(); |
8b7b41280
|
234 |
} |
94b5f312c
|
235 |
static void __static_key_slow_dec(struct static_key *key) |
8b7b41280
|
236 237 |
{ cpus_read_lock(); |
94b5f312c
|
238 |
__static_key_slow_dec_cpuslocked(key); |
f2545b2d4
|
239 |
cpus_read_unlock(); |
bf5438fca
|
240 |
} |
ad282a811
|
241 |
void jump_label_update_timeout(struct work_struct *work) |
b20295207
|
242 |
{ |
c5905afb0
|
243 244 |
struct static_key_deferred *key = container_of(work, struct static_key_deferred, work.work); |
94b5f312c
|
245 |
__static_key_slow_dec(&key->key); |
b20295207
|
246 |
} |
ad282a811
|
247 |
EXPORT_SYMBOL_GPL(jump_label_update_timeout); |
b20295207
|
248 |
|
c5905afb0
|
249 |
void static_key_slow_dec(struct static_key *key) |
b20295207
|
250 |
{ |
5cdda5117
|
251 |
STATIC_KEY_CHECK_USE(key); |
94b5f312c
|
252 |
__static_key_slow_dec(key); |
b20295207
|
253 |
} |
c5905afb0
|
254 |
EXPORT_SYMBOL_GPL(static_key_slow_dec); |
b20295207
|
255 |
|
ce48c1464
|
256 257 258 |
void static_key_slow_dec_cpuslocked(struct static_key *key) { STATIC_KEY_CHECK_USE(key); |
94b5f312c
|
259 |
__static_key_slow_dec_cpuslocked(key); |
ce48c1464
|
260 |
} |
ad282a811
|
261 262 263 |
void __static_key_slow_dec_deferred(struct static_key *key, struct delayed_work *work, unsigned long timeout) |
b20295207
|
264 |
{ |
5cdda5117
|
265 |
STATIC_KEY_CHECK_USE(key); |
94b5f312c
|
266 267 268 269 270 |
if (static_key_slow_try_dec(key)) return; schedule_delayed_work(work, timeout); |
b20295207
|
271 |
} |
ad282a811
|
272 |
EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred); |
b20295207
|
273 |
|
ad282a811
|
274 |
void __static_key_deferred_flush(void *key, struct delayed_work *work) |
b6416e610
|
275 |
{ |
5cdda5117
|
276 |
STATIC_KEY_CHECK_USE(key); |
ad282a811
|
277 |
flush_delayed_work(work); |
b6416e610
|
278 |
} |
ad282a811
|
279 |
EXPORT_SYMBOL_GPL(__static_key_deferred_flush); |
b6416e610
|
280 |
|
c5905afb0
|
281 |
void jump_label_rate_limit(struct static_key_deferred *key, |
b20295207
|
282 283 |
unsigned long rl) { |
5cdda5117
|
284 |
STATIC_KEY_CHECK_USE(key); |
b20295207
|
285 286 287 |
key->timeout = rl; INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); } |
a181dc14e
|
288 |
EXPORT_SYMBOL_GPL(jump_label_rate_limit); |
b20295207
|
289 |
|
4c3ef6d79
|
290 291 |
static int addr_conflict(struct jump_entry *entry, void *start, void *end) { |
9ae033aca
|
292 293 |
if (jump_entry_code(entry) <= (unsigned long)end && jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start) |
4c3ef6d79
|
294 295 296 297 |
return 1; return 0; } |
d430d3d7e
|
298 299 |
static int __jump_label_text_reserved(struct jump_entry *iter_start, struct jump_entry *iter_stop, void *start, void *end) |
4c3ef6d79
|
300 |
{ |
4c3ef6d79
|
301 |
struct jump_entry *iter; |
4c3ef6d79
|
302 |
|
4c3ef6d79
|
303 304 |
iter = iter_start; while (iter < iter_stop) { |
d430d3d7e
|
305 306 |
if (addr_conflict(iter, start, end)) return 1; |
4c3ef6d79
|
307 308 |
iter++; } |
d430d3d7e
|
309 310 |
return 0; } |
706249c22
|
311 |
/* |
20284aa77
|
312 313 314 315 316 |
* Update code which is definitely not currently executing. * Architectures which need heavyweight synchronization to modify * running code can override this to make the non-live update case * cheaper. */ |
9cdbe1cba
|
317 |
void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry, |
20284aa77
|
318 319 |
enum jump_label_type type) { |
706249c22
|
320 |
arch_jump_label_transform(entry, type); |
20284aa77
|
321 |
} |
706249c22
|
322 |
static inline struct jump_entry *static_key_entries(struct static_key *key) |
d430d3d7e
|
323 |
{ |
3821fd35b
|
324 325 |
WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED); return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK); |
4c3ef6d79
|
326 |
} |
706249c22
|
327 |
static inline bool static_key_type(struct static_key *key) |
c5905afb0
|
328 |
{ |
3821fd35b
|
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 |
return key->type & JUMP_TYPE_TRUE; } static inline bool static_key_linked(struct static_key *key) { return key->type & JUMP_TYPE_LINKED; } static inline void static_key_clear_linked(struct static_key *key) { key->type &= ~JUMP_TYPE_LINKED; } static inline void static_key_set_linked(struct static_key *key) { key->type |= JUMP_TYPE_LINKED; |
a1efb01fe
|
345 |
} |
c5905afb0
|
346 |
|
3821fd35b
|
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 |
/*** * A 'struct static_key' uses a union such that it either points directly * to a table of 'struct jump_entry' or to a linked list of modules which in * turn point to 'struct jump_entry' tables. * * The two lower bits of the pointer are used to keep track of which pointer * type is in use and to store the initial branch direction, we use an access * function which preserves these bits. */ static void static_key_set_entries(struct static_key *key, struct jump_entry *entries) { unsigned long type; WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK); type = key->type & JUMP_TYPE_MASK; key->entries = entries; key->type |= type; } |
706249c22
|
366 |
static enum jump_label_type jump_label_type(struct jump_entry *entry) |
a1efb01fe
|
367 |
{ |
706249c22
|
368 |
struct static_key *key = jump_entry_key(entry); |
a1efb01fe
|
369 |
bool enabled = static_key_enabled(key); |
9ae033aca
|
370 |
bool branch = jump_entry_is_branch(entry); |
c5905afb0
|
371 |
|
11276d530
|
372 373 |
/* See the comment in linux/jump_label.h */ return enabled ^ branch; |
c5905afb0
|
374 |
} |
e1aacb3f4
|
375 376 377 378 379 380 381 382 383 |
static bool jump_label_can_update(struct jump_entry *entry, bool init) { /* * Cannot update code that was in an init text area. */ if (!init && jump_entry_is_init(entry)) return false; if (!kernel_text_address(jump_entry_code(entry))) { |
8f35eaa5f
|
384 385 386 |
WARN_ONCE(!jump_entry_is_init(entry), "can't patch jump_label at %pS", (void *)jump_entry_code(entry)); |
e1aacb3f4
|
387 388 389 390 391 |
return false; } return true; } |
c2ba8a15f
|
392 |
#ifndef HAVE_JUMP_LABEL_BATCH |
706249c22
|
393 394 |
static void __jump_label_update(struct static_key *key, struct jump_entry *entry, |
194836776
|
395 396 |
struct jump_entry *stop, bool init) |
706249c22
|
397 398 |
{ for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { |
e1aacb3f4
|
399 400 |
if (jump_label_can_update(entry, init)) arch_jump_label_transform(entry, jump_label_type(entry)); |
706249c22
|
401 402 |
} } |
c2ba8a15f
|
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 |
#else static void __jump_label_update(struct static_key *key, struct jump_entry *entry, struct jump_entry *stop, bool init) { for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { if (!jump_label_can_update(entry, init)) continue; if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) { /* * Queue is full: Apply the current queue and try again. */ arch_jump_label_transform_apply(); BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry))); } } arch_jump_label_transform_apply(); } #endif |
706249c22
|
425 |
|
97ce2c88f
|
426 |
void __init jump_label_init(void) |
bf5438fca
|
427 |
{ |
bf5438fca
|
428 429 |
struct jump_entry *iter_start = __start___jump_table; struct jump_entry *iter_stop = __stop___jump_table; |
c5905afb0
|
430 |
struct static_key *key = NULL; |
bf5438fca
|
431 |
struct jump_entry *iter; |
1f69bf9c6
|
432 433 434 435 436 437 438 439 |
/* * Since we are initializing the static_key.enabled field with * with the 'raw' int values (to avoid pulling in atomic.h) in * jump_label.h, let's make sure that is safe. There are only two * cases to check since we initialize to 0 or 1. */ BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0); BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1); |
e3f91083f
|
440 441 |
if (static_key_initialized) return; |
f2545b2d4
|
442 |
cpus_read_lock(); |
91bad2f8d
|
443 |
jump_label_lock(); |
d430d3d7e
|
444 445 446 |
jump_label_sort_entries(iter_start, iter_stop); for (iter = iter_start; iter < iter_stop; iter++) { |
c5905afb0
|
447 |
struct static_key *iterk; |
37348804e
|
448 |
|
11276d530
|
449 450 451 |
/* rewrite NOPs */ if (jump_label_type(iter) == JUMP_LABEL_NOP) arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); |
194836776
|
452 453 |
if (init_section_contains((void *)jump_entry_code(iter), 1)) jump_entry_set_init(iter); |
7dcfd915b
|
454 |
iterk = jump_entry_key(iter); |
37348804e
|
455 |
if (iterk == key) |
d430d3d7e
|
456 |
continue; |
37348804e
|
457 |
key = iterk; |
3821fd35b
|
458 |
static_key_set_entries(key, iter); |
bf5438fca
|
459 |
} |
c4b2c0c5f
|
460 |
static_key_initialized = true; |
91bad2f8d
|
461 |
jump_label_unlock(); |
f2545b2d4
|
462 |
cpus_read_unlock(); |
bf5438fca
|
463 |
} |
bf5438fca
|
464 465 |
#ifdef CONFIG_MODULES |
11276d530
|
466 467 468 469 |
static enum jump_label_type jump_label_init_type(struct jump_entry *entry) { struct static_key *key = jump_entry_key(entry); bool type = static_key_type(key); |
9ae033aca
|
470 |
bool branch = jump_entry_is_branch(entry); |
11276d530
|
471 472 473 474 |
/* See the comment in linux/jump_label.h */ return type ^ branch; } |
c5905afb0
|
475 476 |
struct static_key_mod { struct static_key_mod *next; |
d430d3d7e
|
477 478 479 |
struct jump_entry *entries; struct module *mod; }; |
3821fd35b
|
480 481 |
static inline struct static_key_mod *static_key_mod(struct static_key *key) { |
34e12b864
|
482 |
WARN_ON_ONCE(!static_key_linked(key)); |
3821fd35b
|
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 |
return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK); } /*** * key->type and key->next are the same via union. * This sets key->next and preserves the type bits. * * See additional comments above static_key_set_entries(). */ static void static_key_set_mod(struct static_key *key, struct static_key_mod *mod) { unsigned long type; WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK); type = key->type & JUMP_TYPE_MASK; key->next = mod; key->type |= type; } |
d430d3d7e
|
502 503 504 |
static int __jump_label_mod_text_reserved(void *start, void *end) { struct module *mod; |
bdc9f3735
|
505 |
preempt_disable(); |
d430d3d7e
|
506 |
mod = __module_text_address((unsigned long)start); |
bdc9f3735
|
507 508 |
WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); preempt_enable(); |
d430d3d7e
|
509 510 |
if (!mod) return 0; |
d430d3d7e
|
511 512 513 514 515 |
return __jump_label_text_reserved(mod->jump_entries, mod->jump_entries + mod->num_jump_entries, start, end); } |
706249c22
|
516 |
static void __jump_label_mod_update(struct static_key *key) |
d430d3d7e
|
517 |
{ |
706249c22
|
518 |
struct static_key_mod *mod; |
d430d3d7e
|
519 |
|
3821fd35b
|
520 521 522 523 524 525 526 527 528 529 |
for (mod = static_key_mod(key); mod; mod = mod->next) { struct jump_entry *stop; struct module *m; /* * NULL if the static_key is defined in a module * that does not use it */ if (!mod->entries) continue; |
7cbc5b8d4
|
530 |
|
3821fd35b
|
531 532 533 534 535 |
m = mod->mod; if (!m) stop = __stop___jump_table; else stop = m->jump_entries + m->num_jump_entries; |
194836776
|
536 |
__jump_label_update(key, mod->entries, stop, |
77ac1c02d
|
537 |
m && m->state == MODULE_STATE_COMING); |
d430d3d7e
|
538 539 540 541 542 543 544 545 546 547 548 549 |
} } /*** * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() * @mod: module to patch * * Allow for run-time selection of the optimal nops. Before the module * loads patch these with arch_get_jump_label_nop(), which is specified by * the arch specific jump label code. */ void jump_label_apply_nops(struct module *mod) |
bf5438fca
|
550 |
{ |
d430d3d7e
|
551 552 553 554 555 556 557 |
struct jump_entry *iter_start = mod->jump_entries; struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; struct jump_entry *iter; /* if the module doesn't have jump label entries, just return */ if (iter_start == iter_stop) return; |
11276d530
|
558 559 560 561 562 |
for (iter = iter_start; iter < iter_stop; iter++) { /* Only write NOPs for arch_branch_static(). */ if (jump_label_init_type(iter) == JUMP_LABEL_NOP) arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); } |
bf5438fca
|
563 |
} |
d430d3d7e
|
564 |
static int jump_label_add_module(struct module *mod) |
bf5438fca
|
565 |
{ |
d430d3d7e
|
566 567 568 |
struct jump_entry *iter_start = mod->jump_entries; struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; struct jump_entry *iter; |
c5905afb0
|
569 |
struct static_key *key = NULL; |
3821fd35b
|
570 |
struct static_key_mod *jlm, *jlm2; |
bf5438fca
|
571 572 |
/* if the module doesn't have jump label entries, just return */ |
d430d3d7e
|
573 |
if (iter_start == iter_stop) |
bf5438fca
|
574 |
return 0; |
d430d3d7e
|
575 576 577 |
jump_label_sort_entries(iter_start, iter_stop); for (iter = iter_start; iter < iter_stop; iter++) { |
c5905afb0
|
578 |
struct static_key *iterk; |
d430d3d7e
|
579 |
|
194836776
|
580 581 |
if (within_module_init(jump_entry_code(iter), mod)) jump_entry_set_init(iter); |
7dcfd915b
|
582 |
iterk = jump_entry_key(iter); |
c5905afb0
|
583 584 |
if (iterk == key) continue; |
d430d3d7e
|
585 |
|
c5905afb0
|
586 |
key = iterk; |
9ae033aca
|
587 |
if (within_module((unsigned long)key, mod)) { |
3821fd35b
|
588 |
static_key_set_entries(key, iter); |
d430d3d7e
|
589 |
continue; |
bf5438fca
|
590 |
} |
c5905afb0
|
591 |
jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL); |
d430d3d7e
|
592 593 |
if (!jlm) return -ENOMEM; |
3821fd35b
|
594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 |
if (!static_key_linked(key)) { jlm2 = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL); if (!jlm2) { kfree(jlm); return -ENOMEM; } preempt_disable(); jlm2->mod = __module_address((unsigned long)key); preempt_enable(); jlm2->entries = static_key_entries(key); jlm2->next = NULL; static_key_set_mod(key, jlm2); static_key_set_linked(key); } |
d430d3d7e
|
609 610 |
jlm->mod = mod; jlm->entries = iter; |
3821fd35b
|
611 612 613 |
jlm->next = static_key_mod(key); static_key_set_mod(key, jlm); static_key_set_linked(key); |
d430d3d7e
|
614 |
|
11276d530
|
615 616 |
/* Only update if we've changed from our initial state */ if (jump_label_type(iter) != jump_label_init_type(iter)) |
194836776
|
617 |
__jump_label_update(key, iter, iter_stop, true); |
bf5438fca
|
618 |
} |
d430d3d7e
|
619 |
|
bf5438fca
|
620 621 |
return 0; } |
d430d3d7e
|
622 |
static void jump_label_del_module(struct module *mod) |
bf5438fca
|
623 |
{ |
d430d3d7e
|
624 625 626 |
struct jump_entry *iter_start = mod->jump_entries; struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; struct jump_entry *iter; |
c5905afb0
|
627 628 |
struct static_key *key = NULL; struct static_key_mod *jlm, **prev; |
bf5438fca
|
629 |
|
d430d3d7e
|
630 |
for (iter = iter_start; iter < iter_stop; iter++) { |
7dcfd915b
|
631 |
if (jump_entry_key(iter) == key) |
d430d3d7e
|
632 |
continue; |
7dcfd915b
|
633 |
key = jump_entry_key(iter); |
d430d3d7e
|
634 |
|
9ae033aca
|
635 |
if (within_module((unsigned long)key, mod)) |
d430d3d7e
|
636 |
continue; |
3821fd35b
|
637 638 639 |
/* No memory during module load */ if (WARN_ON(!static_key_linked(key))) continue; |
d430d3d7e
|
640 |
prev = &key->next; |
3821fd35b
|
641 |
jlm = static_key_mod(key); |
bf5438fca
|
642 |
|
d430d3d7e
|
643 644 645 646 |
while (jlm && jlm->mod != mod) { prev = &jlm->next; jlm = jlm->next; } |
3821fd35b
|
647 648 649 650 651 652 653 |
/* No memory during module load */ if (WARN_ON(!jlm)) continue; if (prev == &key->next) static_key_set_mod(key, jlm->next); else |
d430d3d7e
|
654 |
*prev = jlm->next; |
3821fd35b
|
655 656 657 658 659 660 661 662 |
kfree(jlm); jlm = static_key_mod(key); /* if only one etry is left, fold it back into the static_key */ if (jlm->next == NULL) { static_key_set_entries(key, jlm->entries); static_key_clear_linked(key); |
d430d3d7e
|
663 |
kfree(jlm); |
bf5438fca
|
664 665 666 667 668 669 670 671 672 673 |
} } } static int jump_label_module_notify(struct notifier_block *self, unsigned long val, void *data) { struct module *mod = data; int ret = 0; |
f2545b2d4
|
674 675 |
cpus_read_lock(); jump_label_lock(); |
bf5438fca
|
676 677 |
switch (val) { case MODULE_STATE_COMING: |
d430d3d7e
|
678 |
ret = jump_label_add_module(mod); |
3821fd35b
|
679 |
if (ret) { |
da260fe12
|
680 681 |
WARN(1, "Failed to allocate memory: jump_label may not work properly. "); |
d430d3d7e
|
682 |
jump_label_del_module(mod); |
3821fd35b
|
683 |
} |
bf5438fca
|
684 685 |
break; case MODULE_STATE_GOING: |
d430d3d7e
|
686 |
jump_label_del_module(mod); |
bf5438fca
|
687 688 |
break; } |
bf5438fca
|
689 |
|
f2545b2d4
|
690 691 |
jump_label_unlock(); cpus_read_unlock(); |
d430d3d7e
|
692 |
return notifier_from_errno(ret); |
bf5438fca
|
693 |
} |
885885f6b
|
694 |
static struct notifier_block jump_label_module_nb = { |
bf5438fca
|
695 |
.notifier_call = jump_label_module_notify, |
d430d3d7e
|
696 |
.priority = 1, /* higher than tracepoints */ |
bf5438fca
|
697 |
}; |
d430d3d7e
|
698 |
static __init int jump_label_init_module(void) |
bf5438fca
|
699 700 701 |
{ return register_module_notifier(&jump_label_module_nb); } |
d430d3d7e
|
702 |
early_initcall(jump_label_init_module); |
bf5438fca
|
703 704 |
#endif /* CONFIG_MODULES */ |
d430d3d7e
|
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 |
/*** * jump_label_text_reserved - check if addr range is reserved * @start: start text addr * @end: end text addr * * checks if the text addr located between @start and @end * overlaps with any of the jump label patch addresses. Code * that wants to modify kernel text should first verify that * it does not overlap with any of the jump label addresses. * Caller must hold jump_label_mutex. * * returns 1 if there is an overlap, 0 otherwise */ int jump_label_text_reserved(void *start, void *end) { int ret = __jump_label_text_reserved(__start___jump_table, __stop___jump_table, start, end); if (ret) return ret; #ifdef CONFIG_MODULES ret = __jump_label_mod_text_reserved(start, end); #endif return ret; } |
706249c22
|
731 |
static void jump_label_update(struct static_key *key) |
d430d3d7e
|
732 |
{ |
c5905afb0
|
733 |
struct jump_entry *stop = __stop___jump_table; |
3821fd35b
|
734 |
struct jump_entry *entry; |
d430d3d7e
|
735 |
#ifdef CONFIG_MODULES |
bed831f9a
|
736 |
struct module *mod; |
140fe3b1a
|
737 |
|
3821fd35b
|
738 739 740 741 |
if (static_key_linked(key)) { __jump_label_mod_update(key); return; } |
140fe3b1a
|
742 |
|
bed831f9a
|
743 744 |
preempt_disable(); mod = __module_address((unsigned long)key); |
140fe3b1a
|
745 746 |
if (mod) stop = mod->jump_entries + mod->num_jump_entries; |
bed831f9a
|
747 |
preempt_enable(); |
d430d3d7e
|
748 |
#endif |
3821fd35b
|
749 |
entry = static_key_entries(key); |
140fe3b1a
|
750 751 |
/* if there are no users, entry can be NULL */ if (entry) |
194836776
|
752 753 |
__jump_label_update(key, entry, stop, system_state < SYSTEM_RUNNING); |
d430d3d7e
|
754 |
} |
1987c947d
|
755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 |
#ifdef CONFIG_STATIC_KEYS_SELFTEST static DEFINE_STATIC_KEY_TRUE(sk_true); static DEFINE_STATIC_KEY_FALSE(sk_false); static __init int jump_label_test(void) { int i; for (i = 0; i < 2; i++) { WARN_ON(static_key_enabled(&sk_true.key) != true); WARN_ON(static_key_enabled(&sk_false.key) != false); WARN_ON(!static_branch_likely(&sk_true)); WARN_ON(!static_branch_unlikely(&sk_true)); WARN_ON(static_branch_likely(&sk_false)); WARN_ON(static_branch_unlikely(&sk_false)); static_branch_disable(&sk_true); static_branch_enable(&sk_false); WARN_ON(static_key_enabled(&sk_true.key) == true); WARN_ON(static_key_enabled(&sk_false.key) == false); WARN_ON(static_branch_likely(&sk_true)); WARN_ON(static_branch_unlikely(&sk_true)); WARN_ON(!static_branch_likely(&sk_false)); WARN_ON(!static_branch_unlikely(&sk_false)); static_branch_enable(&sk_true); static_branch_disable(&sk_false); } return 0; } |
92ee46efe
|
789 |
early_initcall(jump_label_test); |
1987c947d
|
790 |
#endif /* STATIC_KEYS_SELFTEST */ |