Blame view
kernel/tracepoint.c
20.3 KB
1a59d1b8e
|
1 |
// SPDX-License-Identifier: GPL-2.0-or-later |
97e1c18e8
|
2 |
/* |
de7b29739
|
3 |
* Copyright (C) 2008-2014 Mathieu Desnoyers |
97e1c18e8
|
4 5 6 7 8 9 10 11 12 13 |
*/ #include <linux/module.h> #include <linux/mutex.h> #include <linux/types.h> #include <linux/jhash.h> #include <linux/list.h> #include <linux/rcupdate.h> #include <linux/tracepoint.h> #include <linux/err.h> #include <linux/slab.h> |
3f07c0144
|
14 |
#include <linux/sched/signal.h> |
299300258
|
15 |
#include <linux/sched/task.h> |
c5905afb0
|
16 |
#include <linux/static_key.h> |
97e1c18e8
|
17 |
|
231264d69
|
18 19 20 21 22 23 |
enum tp_func_state { TP_FUNC_0, TP_FUNC_1, TP_FUNC_2, TP_FUNC_N, }; |
9c0be3f6b
|
24 25 |
extern tracepoint_ptr_t __start___tracepoints_ptrs[]; extern tracepoint_ptr_t __stop___tracepoints_ptrs[]; |
97e1c18e8
|
26 |
|
e6753f23d
|
27 28 |
DEFINE_SRCU(tracepoint_srcu); EXPORT_SYMBOL_GPL(tracepoint_srcu); |
7b40066c9
|
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 |
enum tp_transition_sync { TP_TRANSITION_SYNC_1_0_1, TP_TRANSITION_SYNC_N_2_1, _NR_TP_TRANSITION_SYNC, }; struct tp_transition_snapshot { unsigned long rcu; unsigned long srcu; bool ongoing; }; /* Protected by tracepoints_mutex */ static struct tp_transition_snapshot tp_transition_snapshot[_NR_TP_TRANSITION_SYNC]; static void tp_rcu_get_state(enum tp_transition_sync sync) { struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync]; /* Keep the latest get_state snapshot. */ snapshot->rcu = get_state_synchronize_rcu(); snapshot->srcu = start_poll_synchronize_srcu(&tracepoint_srcu); snapshot->ongoing = true; } static void tp_rcu_cond_sync(enum tp_transition_sync sync) { struct tp_transition_snapshot *snapshot = &tp_transition_snapshot[sync]; if (!snapshot->ongoing) return; cond_synchronize_rcu(snapshot->rcu); if (!poll_state_synchronize_srcu(&tracepoint_srcu, snapshot->srcu)) synchronize_srcu(&tracepoint_srcu); snapshot->ongoing = false; } |
97e1c18e8
|
66 67 |
/* Set to 1 to enable tracepoint debug output */ static const int tracepoint_debug; |
de7b29739
|
68 |
#ifdef CONFIG_MODULES |
97e1c18e8
|
69 |
/* |
de7b29739
|
70 |
* Tracepoint module list mutex protects the local module list. |
97e1c18e8
|
71 |
*/ |
de7b29739
|
72 |
static DEFINE_MUTEX(tracepoint_module_list_mutex); |
97e1c18e8
|
73 |
|
de7b29739
|
74 |
/* Local list of struct tp_module */ |
b75ef8b44
|
75 76 |
static LIST_HEAD(tracepoint_module_list); #endif /* CONFIG_MODULES */ |
97e1c18e8
|
77 |
/* |
de7b29739
|
78 79 |
* tracepoints_mutex protects the builtin and module tracepoints. * tracepoints_mutex nests inside tracepoint_module_list_mutex. |
97e1c18e8
|
80 |
*/ |
de7b29739
|
81 |
static DEFINE_MUTEX(tracepoints_mutex); |
97e1c18e8
|
82 |
|
f8a79d5c7
|
83 84 |
static struct rcu_head *early_probes; static bool ok_to_free_tracepoints; |
97e1c18e8
|
85 86 |
/* * Note about RCU : |
fd589a8f0
|
87 |
* It is used to delay the free of multiple probes array until a quiescent |
97e1c18e8
|
88 |
* state is reached. |
97e1c18e8
|
89 |
*/ |
19dba33c4
|
90 |
struct tp_probes { |
0dea6d526
|
91 |
struct rcu_head rcu; |
9d0a49c70
|
92 |
struct tracepoint_func probes[]; |
19dba33c4
|
93 |
}; |
97e1c18e8
|
94 |
|
befe6d946
|
95 96 97 98 99 |
/* Called in removal of a func but failed to allocate a new tp_funcs */ static void tp_stub_func(void) { return; } |
19dba33c4
|
100 |
static inline void *allocate_probes(int count) |
97e1c18e8
|
101 |
{ |
f0553dcb9
|
102 103 |
struct tp_probes *p = kmalloc(struct_size(p, probes, count), GFP_KERNEL); |
19dba33c4
|
104 |
return p == NULL ? NULL : p->probes; |
97e1c18e8
|
105 |
} |
e6753f23d
|
106 |
static void srcu_free_old_probes(struct rcu_head *head) |
97e1c18e8
|
107 |
{ |
0dea6d526
|
108 |
kfree(container_of(head, struct tp_probes, rcu)); |
19dba33c4
|
109 |
} |
e6753f23d
|
110 111 112 113 |
static void rcu_free_old_probes(struct rcu_head *head) { call_srcu(&tracepoint_srcu, head, srcu_free_old_probes); } |
f8a79d5c7
|
114 115 116 117 118 119 120 121 122 |
static __init int release_early_probes(void) { struct rcu_head *tmp; ok_to_free_tracepoints = true; while (early_probes) { tmp = early_probes; early_probes = tmp->next; |
744017297
|
123 |
call_rcu(tmp, rcu_free_old_probes); |
f8a79d5c7
|
124 125 126 127 128 129 130 |
} return 0; } /* SRCU is initialized at core_initcall */ postcore_initcall(release_early_probes); |
38516ab59
|
131 |
static inline void release_probes(struct tracepoint_func *old) |
19dba33c4
|
132 133 134 135 |
{ if (old) { struct tp_probes *tp_probes = container_of(old, struct tp_probes, probes[0]); |
f8a79d5c7
|
136 137 138 139 140 141 142 143 144 145 |
/* * We can't free probes if SRCU is not initialized yet. * Postpone the freeing till after SRCU is initialized. */ if (unlikely(!ok_to_free_tracepoints)) { tp_probes->rcu.next = early_probes; early_probes = &tp_probes->rcu; return; } |
e6753f23d
|
146 147 148 149 150 151 |
/* * Tracepoint probes are protected by both sched RCU and SRCU, * by calling the SRCU callback in the sched RCU callback we * cover both cases. So let us chain the SRCU and sched RCU * callbacks to wait for both grace periods. */ |
744017297
|
152 |
call_rcu(&tp_probes->rcu, rcu_free_old_probes); |
19dba33c4
|
153 |
} |
97e1c18e8
|
154 |
} |
de7b29739
|
155 |
static void debug_print_probes(struct tracepoint_func *funcs) |
97e1c18e8
|
156 157 |
{ int i; |
de7b29739
|
158 |
if (!tracepoint_debug || !funcs) |
97e1c18e8
|
159 |
return; |
de7b29739
|
160 161 162 |
for (i = 0; funcs[i].func; i++) printk(KERN_DEBUG "Probe %d : %p ", i, funcs[i].func); |
97e1c18e8
|
163 |
} |
7904b5c49
|
164 165 166 |
static struct tracepoint_func * func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func, int prio) |
97e1c18e8
|
167 |
{ |
38516ab59
|
168 |
struct tracepoint_func *old, *new; |
7211f0a25
|
169 170 171 |
int iter_probes; /* Iterate over old probe array. */ int nr_probes = 0; /* Counter for probes */ int pos = -1; /* Insertion position into new array */ |
97e1c18e8
|
172 |
|
de7b29739
|
173 |
if (WARN_ON(!tp_func->func)) |
4c69e6ea4
|
174 |
return ERR_PTR(-EINVAL); |
97e1c18e8
|
175 |
|
de7b29739
|
176 177 |
debug_print_probes(*funcs); old = *funcs; |
97e1c18e8
|
178 179 |
if (old) { /* (N -> N+1), (N != 0, 1) probes */ |
7211f0a25
|
180 181 182 183 184 |
for (iter_probes = 0; old[iter_probes].func; iter_probes++) { if (old[iter_probes].func == tp_stub_func) continue; /* Skip stub functions. */ if (old[iter_probes].func == tp_func->func && old[iter_probes].data == tp_func->data) |
97e1c18e8
|
185 |
return ERR_PTR(-EEXIST); |
7211f0a25
|
186 |
nr_probes++; |
7904b5c49
|
187 |
} |
97e1c18e8
|
188 |
} |
7211f0a25
|
189 190 |
/* + 2 : one for new probe, one for NULL func */ new = allocate_probes(nr_probes + 2); |
97e1c18e8
|
191 192 |
if (new == NULL) return ERR_PTR(-ENOMEM); |
7904b5c49
|
193 |
if (old) { |
7211f0a25
|
194 195 196 197 198 199 200 201 |
nr_probes = 0; for (iter_probes = 0; old[iter_probes].func; iter_probes++) { if (old[iter_probes].func == tp_stub_func) continue; /* Insert before probes of lower priority */ if (pos < 0 && old[iter_probes].prio < prio) pos = nr_probes++; new[nr_probes++] = old[iter_probes]; |
7904b5c49
|
202 |
} |
7211f0a25
|
203 204 205 206 |
if (pos < 0) pos = nr_probes++; /* nr_probes now points to the end of the new array */ } else { |
7904b5c49
|
207 |
pos = 0; |
7211f0a25
|
208 209 |
nr_probes = 1; /* must point at end of array */ } |
7904b5c49
|
210 |
new[pos] = *tp_func; |
7211f0a25
|
211 |
new[nr_probes].func = NULL; |
de7b29739
|
212 213 |
*funcs = new; debug_print_probes(*funcs); |
97e1c18e8
|
214 215 |
return old; } |
de7b29739
|
216 217 |
static void *func_remove(struct tracepoint_func **funcs, struct tracepoint_func *tp_func) |
97e1c18e8
|
218 219 |
{ int nr_probes = 0, nr_del = 0, i; |
38516ab59
|
220 |
struct tracepoint_func *old, *new; |
97e1c18e8
|
221 |
|
de7b29739
|
222 |
old = *funcs; |
97e1c18e8
|
223 |
|
f66af459a
|
224 |
if (!old) |
19dba33c4
|
225 |
return ERR_PTR(-ENOENT); |
f66af459a
|
226 |
|
de7b29739
|
227 |
debug_print_probes(*funcs); |
97e1c18e8
|
228 |
/* (N -> M), (N > 1, M >= 0) probes */ |
de7b29739
|
229 |
if (tp_func->func) { |
4c69e6ea4
|
230 |
for (nr_probes = 0; old[nr_probes].func; nr_probes++) { |
befe6d946
|
231 232 233 |
if ((old[nr_probes].func == tp_func->func && old[nr_probes].data == tp_func->data) || old[nr_probes].func == tp_stub_func) |
4c69e6ea4
|
234 235 |
nr_del++; } |
97e1c18e8
|
236 |
} |
4c69e6ea4
|
237 238 239 240 |
/* * If probe is NULL, then nr_probes = nr_del = 0, and then the * entire entry will be removed. */ |
97e1c18e8
|
241 242 |
if (nr_probes - nr_del == 0) { /* N -> 0, (N > 1) */ |
de7b29739
|
243 244 |
*funcs = NULL; debug_print_probes(*funcs); |
97e1c18e8
|
245 246 247 248 249 |
return old; } else { int j = 0; /* N -> M, (N > 1, M > 0) */ /* + 1 for NULL */ |
19dba33c4
|
250 |
new = allocate_probes(nr_probes - nr_del + 1); |
befe6d946
|
251 |
if (new) { |
7211f0a25
|
252 253 254 255 |
for (i = 0; old[i].func; i++) { if ((old[i].func != tp_func->func || old[i].data != tp_func->data) && old[i].func != tp_stub_func) |
befe6d946
|
256 |
new[j++] = old[i]; |
7211f0a25
|
257 |
} |
befe6d946
|
258 259 260 261 262 263 264 |
new[nr_probes - nr_del].func = NULL; *funcs = new; } else { /* * Failed to allocate, replace the old function * with calls to tp_stub_func. */ |
7211f0a25
|
265 |
for (i = 0; old[i].func; i++) { |
befe6d946
|
266 |
if (old[i].func == tp_func->func && |
7211f0a25
|
267 268 269 |
old[i].data == tp_func->data) WRITE_ONCE(old[i].func, tp_stub_func); } |
befe6d946
|
270 271 |
*funcs = old; } |
97e1c18e8
|
272 |
} |
de7b29739
|
273 |
debug_print_probes(*funcs); |
97e1c18e8
|
274 275 |
return old; } |
231264d69
|
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 |
/* * Count the number of functions (enum tp_func_state) in a tp_funcs array. */ static enum tp_func_state nr_func_state(const struct tracepoint_func *tp_funcs) { if (!tp_funcs) return TP_FUNC_0; if (!tp_funcs[1].func) return TP_FUNC_1; if (!tp_funcs[2].func) return TP_FUNC_2; return TP_FUNC_N; /* 3 or more */ } static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs) |
d25e37d89
|
291 292 293 294 295 296 |
{ void *func = tp->iterator; /* Synthetic events do not have static call sites */ if (!tp->static_call_key) return; |
231264d69
|
297 |
if (nr_func_state(tp_funcs) == TP_FUNC_1) |
d25e37d89
|
298 |
func = tp_funcs[0].func; |
d25e37d89
|
299 300 |
__static_call_update(tp->static_call_key, tp->static_call_tramp, func); } |
97e1c18e8
|
301 |
/* |
de7b29739
|
302 |
* Add the probe function to a tracepoint. |
97e1c18e8
|
303 |
*/ |
de7b29739
|
304 |
static int tracepoint_add_func(struct tracepoint *tp, |
9913d5745
|
305 306 |
struct tracepoint_func *func, int prio, bool warn) |
97e1c18e8
|
307 |
{ |
de7b29739
|
308 |
struct tracepoint_func *old, *tp_funcs; |
8cf868aff
|
309 |
int ret; |
97e1c18e8
|
310 |
|
8cf868aff
|
311 312 313 314 315 |
if (tp->regfunc && !static_key_enabled(&tp->key)) { ret = tp->regfunc(); if (ret < 0) return ret; } |
97e1c18e8
|
316 |
|
b725dfea2
|
317 318 |
tp_funcs = rcu_dereference_protected(tp->funcs, lockdep_is_held(&tracepoints_mutex)); |
7904b5c49
|
319 |
old = func_add(&tp_funcs, func, prio); |
de7b29739
|
320 |
if (IS_ERR(old)) { |
9913d5745
|
321 |
WARN_ON_ONCE(warn && PTR_ERR(old) != -ENOMEM); |
de7b29739
|
322 323 |
return PTR_ERR(old); } |
974198758
|
324 |
|
97e1c18e8
|
325 |
/* |
243d1a797
|
326 327 328 329 |
* rcu_assign_pointer has as smp_store_release() which makes sure * that the new probe callbacks array is consistent before setting * a pointer to it. This array is referenced by __DO_TRACE from * include/linux/tracepoint.h using rcu_dereference_sched(). |
97e1c18e8
|
330 |
*/ |
231264d69
|
331 332 |
switch (nr_func_state(tp_funcs)) { case TP_FUNC_1: /* 0->1 */ |
7b40066c9
|
333 334 335 336 337 |
/* * Make sure new static func never uses old data after a * 1->0->1 transition sequence. */ tp_rcu_cond_sync(TP_TRANSITION_SYNC_1_0_1); |
231264d69
|
338 339 340 341 342 343 344 345 346 347 348 349 350 351 |
/* Set static call to first function */ tracepoint_update_call(tp, tp_funcs); /* Both iterator and static call handle NULL tp->funcs */ rcu_assign_pointer(tp->funcs, tp_funcs); static_key_enable(&tp->key); break; case TP_FUNC_2: /* 1->2 */ /* Set iterator static call */ tracepoint_update_call(tp, tp_funcs); /* * Iterator callback installed before updating tp->funcs. * Requires ordering between RCU assign/dereference and * static call update/call. */ |
7b40066c9
|
352 |
fallthrough; |
231264d69
|
353 354 |
case TP_FUNC_N: /* N->N+1 (N>1) */ rcu_assign_pointer(tp->funcs, tp_funcs); |
7b40066c9
|
355 356 357 358 359 360 |
/* * Make sure static func never uses incorrect data after a * N->...->2->1 (N>1) transition sequence. */ if (tp_funcs[0].data != old[0].data) tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1); |
231264d69
|
361 362 363 364 365 |
break; default: WARN_ON_ONCE(1); break; } |
d25e37d89
|
366 |
|
8058bd0fa
|
367 |
release_probes(old); |
de7b29739
|
368 |
return 0; |
97e1c18e8
|
369 370 371 |
} /* |
de7b29739
|
372 |
* Remove a probe function from a tracepoint. |
97e1c18e8
|
373 374 375 376 |
* Note: only waiting an RCU period after setting elem->call to the empty * function insures that the original callback is not used anymore. This insured * by preempt_disable around the call site. */ |
de7b29739
|
377 378 |
static int tracepoint_remove_func(struct tracepoint *tp, struct tracepoint_func *func) |
97e1c18e8
|
379 |
{ |
de7b29739
|
380 |
struct tracepoint_func *old, *tp_funcs; |
97e1c18e8
|
381 |
|
b725dfea2
|
382 383 |
tp_funcs = rcu_dereference_protected(tp->funcs, lockdep_is_held(&tracepoints_mutex)); |
de7b29739
|
384 |
old = func_remove(&tp_funcs, func); |
befe6d946
|
385 |
if (WARN_ON_ONCE(IS_ERR(old))) |
de7b29739
|
386 |
return PTR_ERR(old); |
befe6d946
|
387 388 389 390 |
if (tp_funcs == old) /* Failed allocating new tp_funcs, replaced func with stub */ return 0; |
b75ef8b44
|
391 |
|
231264d69
|
392 393 |
switch (nr_func_state(tp_funcs)) { case TP_FUNC_0: /* 1->0 */ |
de7b29739
|
394 395 396 |
/* Removed last function */ if (tp->unregfunc && static_key_enabled(&tp->key)) tp->unregfunc(); |
b75ef8b44
|
397 |
|
d25e37d89
|
398 |
static_key_disable(&tp->key); |
231264d69
|
399 400 401 402 403 |
/* Set iterator static call */ tracepoint_update_call(tp, tp_funcs); /* Both iterator and static call handle NULL tp->funcs */ rcu_assign_pointer(tp->funcs, NULL); /* |
7b40066c9
|
404 405 |
* Make sure new static func never uses old data after a * 1->0->1 transition sequence. |
231264d69
|
406 |
*/ |
7b40066c9
|
407 |
tp_rcu_get_state(TP_TRANSITION_SYNC_1_0_1); |
231264d69
|
408 409 |
break; case TP_FUNC_1: /* 2->1 */ |
547305a64
|
410 |
rcu_assign_pointer(tp->funcs, tp_funcs); |
231264d69
|
411 |
/* |
7b40066c9
|
412 413 414 415 416 |
* Make sure static func never uses incorrect data after a * N->...->2->1 (N>2) transition sequence. If the first * element's data has changed, then force the synchronization * to prevent current readers that have loaded the old data * from calling the new function. |
231264d69
|
417 |
*/ |
7b40066c9
|
418 419 420 |
if (tp_funcs[0].data != old[0].data) tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1); tp_rcu_cond_sync(TP_TRANSITION_SYNC_N_2_1); |
231264d69
|
421 422 423 424 425 426 |
/* Set static call to first function */ tracepoint_update_call(tp, tp_funcs); break; case TP_FUNC_2: /* N->N-1 (N>2) */ fallthrough; case TP_FUNC_N: |
547305a64
|
427 |
rcu_assign_pointer(tp->funcs, tp_funcs); |
7b40066c9
|
428 429 430 431 432 433 |
/* * Make sure static func never uses incorrect data after a * N->...->2->1 (N>2) transition sequence. */ if (tp_funcs[0].data != old[0].data) tp_rcu_get_state(TP_TRANSITION_SYNC_N_2_1); |
231264d69
|
434 435 436 437 |
break; default: WARN_ON_ONCE(1); break; |
127cafbb2
|
438 |
} |
8058bd0fa
|
439 |
release_probes(old); |
de7b29739
|
440 |
return 0; |
127cafbb2
|
441 |
} |
97e1c18e8
|
442 |
/** |
9913d5745
|
443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 |
* tracepoint_probe_register_prio_may_exist - Connect a probe to a tracepoint with priority * @tp: tracepoint * @probe: probe handler * @data: tracepoint data * @prio: priority of this function over other registered functions * * Same as tracepoint_probe_register_prio() except that it will not warn * if the tracepoint is already registered. */ int tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe, void *data, int prio) { struct tracepoint_func tp_func; int ret; mutex_lock(&tracepoints_mutex); tp_func.func = probe; tp_func.data = data; tp_func.prio = prio; ret = tracepoint_add_func(tp, &tp_func, prio, false); mutex_unlock(&tracepoints_mutex); return ret; } EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio_may_exist); /** |
f39e23910
|
469 |
* tracepoint_probe_register_prio - Connect a probe to a tracepoint with priority |
de7b29739
|
470 |
* @tp: tracepoint |
97e1c18e8
|
471 |
* @probe: probe handler |
cac92ba74
|
472 |
* @data: tracepoint data |
7904b5c49
|
473 |
* @prio: priority of this function over other registered functions |
97e1c18e8
|
474 |
* |
de7b29739
|
475 476 477 478 479 |
* Returns 0 if ok, error value on error. * Note: if @tp is within a module, the caller is responsible for * unregistering the probe before the module is gone. This can be * performed either with a tracepoint module going notifier, or from * within module exit functions. |
97e1c18e8
|
480 |
*/ |
7904b5c49
|
481 482 |
int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data, int prio) |
97e1c18e8
|
483 |
{ |
de7b29739
|
484 485 |
struct tracepoint_func tp_func; int ret; |
97e1c18e8
|
486 487 |
mutex_lock(&tracepoints_mutex); |
de7b29739
|
488 489 |
tp_func.func = probe; tp_func.data = data; |
7904b5c49
|
490 |
tp_func.prio = prio; |
9913d5745
|
491 |
ret = tracepoint_add_func(tp, &tp_func, prio, true); |
b75ef8b44
|
492 |
mutex_unlock(&tracepoints_mutex); |
b196e2b9e
|
493 |
return ret; |
97e1c18e8
|
494 |
} |
7904b5c49
|
495 496 497 498 499 500 501 |
EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio); /** * tracepoint_probe_register - Connect a probe to a tracepoint * @tp: tracepoint * @probe: probe handler * @data: tracepoint data |
7904b5c49
|
502 503 504 505 506 507 508 509 510 511 512 |
* * Returns 0 if ok, error value on error. * Note: if @tp is within a module, the caller is responsible for * unregistering the probe before the module is gone. This can be * performed either with a tracepoint module going notifier, or from * within module exit functions. */ int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data) { return tracepoint_probe_register_prio(tp, probe, data, TRACEPOINT_DEFAULT_PRIO); } |
97e1c18e8
|
513 514 515 516 |
EXPORT_SYMBOL_GPL(tracepoint_probe_register); /** * tracepoint_probe_unregister - Disconnect a probe from a tracepoint |
de7b29739
|
517 |
* @tp: tracepoint |
97e1c18e8
|
518 |
* @probe: probe function pointer |
cac92ba74
|
519 |
* @data: tracepoint data |
97e1c18e8
|
520 |
* |
de7b29739
|
521 |
* Returns 0 if ok, error value on error. |
97e1c18e8
|
522 |
*/ |
de7b29739
|
523 |
int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data) |
97e1c18e8
|
524 |
{ |
de7b29739
|
525 526 |
struct tracepoint_func tp_func; int ret; |
97e1c18e8
|
527 528 |
mutex_lock(&tracepoints_mutex); |
de7b29739
|
529 530 531 |
tp_func.func = probe; tp_func.data = data; ret = tracepoint_remove_func(tp, &tp_func); |
b75ef8b44
|
532 |
mutex_unlock(&tracepoints_mutex); |
de7b29739
|
533 |
return ret; |
97e1c18e8
|
534 535 |
} EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); |
9c0be3f6b
|
536 537 |
static void for_each_tracepoint_range( tracepoint_ptr_t *begin, tracepoint_ptr_t *end, |
46e0c9be2
|
538 539 540 |
void (*fct)(struct tracepoint *tp, void *priv), void *priv) { |
9c0be3f6b
|
541 |
tracepoint_ptr_t *iter; |
46e0c9be2
|
542 543 |
if (!begin) return; |
9c0be3f6b
|
544 545 |
for (iter = begin; iter < end; iter++) fct(tracepoint_ptr_deref(iter), priv); |
46e0c9be2
|
546 |
} |
227a83756
|
547 |
#ifdef CONFIG_MODULES |
45ab2813d
|
548 549 |
bool trace_module_has_bad_taint(struct module *mod) { |
66cc69e34
|
550 551 |
return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP) | (1 << TAINT_UNSIGNED_MODULE)); |
45ab2813d
|
552 |
} |
de7b29739
|
553 554 555 |
static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list); /** |
bd7409538
|
556 |
* register_tracepoint_module_notifier - register tracepoint coming/going notifier |
de7b29739
|
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 |
* @nb: notifier block * * Notifiers registered with this function are called on module * coming/going with the tracepoint_module_list_mutex held. * The notifier block callback should expect a "struct tp_module" data * pointer. */ int register_tracepoint_module_notifier(struct notifier_block *nb) { struct tp_module *tp_mod; int ret; mutex_lock(&tracepoint_module_list_mutex); ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb); if (ret) goto end; list_for_each_entry(tp_mod, &tracepoint_module_list, list) (void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod); end: mutex_unlock(&tracepoint_module_list_mutex); return ret; } EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier); /** |
bd7409538
|
582 |
* unregister_tracepoint_module_notifier - unregister tracepoint coming/going notifier |
de7b29739
|
583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 |
* @nb: notifier block * * The notifier block callback should expect a "struct tp_module" data * pointer. */ int unregister_tracepoint_module_notifier(struct notifier_block *nb) { struct tp_module *tp_mod; int ret; mutex_lock(&tracepoint_module_list_mutex); ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb); if (ret) goto end; list_for_each_entry(tp_mod, &tracepoint_module_list, list) (void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod); end: mutex_unlock(&tracepoint_module_list_mutex); return ret; } EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier); /* * Ensure the tracer unregistered the module's probes before the module * teardown is performed. Prevents leaks of probe and data pointers. */ |
46e0c9be2
|
610 |
static void tp_module_going_check_quiescent(struct tracepoint *tp, void *priv) |
de7b29739
|
611 |
{ |
46e0c9be2
|
612 |
WARN_ON_ONCE(tp->funcs); |
de7b29739
|
613 |
} |
b75ef8b44
|
614 615 |
static int tracepoint_module_coming(struct module *mod) { |
0dea6d526
|
616 |
struct tp_module *tp_mod; |
b75ef8b44
|
617 |
int ret = 0; |
7dec935a3
|
618 619 |
if (!mod->num_tracepoints) return 0; |
b75ef8b44
|
620 |
/* |
c10076c43
|
621 622 |
* We skip modules that taint the kernel, especially those with different * module headers (for forced load), to make sure we don't cause a crash. |
66cc69e34
|
623 |
* Staging, out-of-tree, and unsigned GPL modules are fine. |
b75ef8b44
|
624 |
*/ |
45ab2813d
|
625 |
if (trace_module_has_bad_taint(mod)) |
b75ef8b44
|
626 |
return 0; |
de7b29739
|
627 |
mutex_lock(&tracepoint_module_list_mutex); |
b75ef8b44
|
628 629 630 631 632 |
tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); if (!tp_mod) { ret = -ENOMEM; goto end; } |
eb7d035c5
|
633 |
tp_mod->mod = mod; |
0dea6d526
|
634 |
list_add_tail(&tp_mod->list, &tracepoint_module_list); |
de7b29739
|
635 636 |
blocking_notifier_call_chain(&tracepoint_notify_list, MODULE_STATE_COMING, tp_mod); |
b75ef8b44
|
637 |
end: |
de7b29739
|
638 |
mutex_unlock(&tracepoint_module_list_mutex); |
b75ef8b44
|
639 640 |
return ret; } |
de7b29739
|
641 |
static void tracepoint_module_going(struct module *mod) |
b75ef8b44
|
642 |
{ |
de7b29739
|
643 |
struct tp_module *tp_mod; |
b75ef8b44
|
644 |
|
7dec935a3
|
645 |
if (!mod->num_tracepoints) |
de7b29739
|
646 |
return; |
7dec935a3
|
647 |
|
de7b29739
|
648 649 |
mutex_lock(&tracepoint_module_list_mutex); list_for_each_entry(tp_mod, &tracepoint_module_list, list) { |
eb7d035c5
|
650 |
if (tp_mod->mod == mod) { |
de7b29739
|
651 652 653 654 655 656 657 658 |
blocking_notifier_call_chain(&tracepoint_notify_list, MODULE_STATE_GOING, tp_mod); list_del(&tp_mod->list); kfree(tp_mod); /* * Called the going notifier before checking for * quiescence. */ |
46e0c9be2
|
659 660 661 |
for_each_tracepoint_range(mod->tracepoints_ptrs, mod->tracepoints_ptrs + mod->num_tracepoints, tp_module_going_check_quiescent, NULL); |
b75ef8b44
|
662 663 664 665 666 667 668 669 670 |
break; } } /* * In the case of modules that were tainted at "coming", we'll simply * walk through the list without finding it. We cannot use the "tainted" * flag on "going", in case a module taints the kernel only after being * loaded. */ |
de7b29739
|
671 |
mutex_unlock(&tracepoint_module_list_mutex); |
b75ef8b44
|
672 |
} |
227a83756
|
673 |
|
de7b29739
|
674 675 |
static int tracepoint_module_notify(struct notifier_block *self, unsigned long val, void *data) |
32f857427
|
676 677 |
{ struct module *mod = data; |
b75ef8b44
|
678 |
int ret = 0; |
32f857427
|
679 680 681 |
switch (val) { case MODULE_STATE_COMING: |
b75ef8b44
|
682 683 684 685 |
ret = tracepoint_module_coming(mod); break; case MODULE_STATE_LIVE: break; |
32f857427
|
686 |
case MODULE_STATE_GOING: |
de7b29739
|
687 688 689 |
tracepoint_module_going(mod); break; case MODULE_STATE_UNFORMED: |
32f857427
|
690 691 |
break; } |
0340a6b7f
|
692 |
return notifier_from_errno(ret); |
32f857427
|
693 |
} |
de7b29739
|
694 |
static struct notifier_block tracepoint_module_nb = { |
32f857427
|
695 696 697 |
.notifier_call = tracepoint_module_notify, .priority = 0, }; |
de7b29739
|
698 |
static __init int init_tracepoints(void) |
32f857427
|
699 |
{ |
de7b29739
|
700 701 702 |
int ret; ret = register_module_notifier(&tracepoint_module_nb); |
eb7d035c5
|
703 |
if (ret) |
a395d6a7e
|
704 705 |
pr_warn("Failed to register tracepoint module enter notifier "); |
eb7d035c5
|
706 |
|
de7b29739
|
707 |
return ret; |
32f857427
|
708 709 |
} __initcall(init_tracepoints); |
227a83756
|
710 |
#endif /* CONFIG_MODULES */ |
a871bd33a
|
711 |
|
de7b29739
|
712 713 714 715 716 717 718 719 720 721 722 723 |
/** * for_each_kernel_tracepoint - iteration on all kernel tracepoints * @fct: callback * @priv: private data */ void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), void *priv) { for_each_tracepoint_range(__start___tracepoints_ptrs, __stop___tracepoints_ptrs, fct, priv); } EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint); |
3d27d8cb3
|
724 |
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS |
60d970c25
|
725 |
|
974198758
|
726 |
/* NB: reg/unreg are called while guarded with the tracepoints_mutex */ |
a871bd33a
|
727 |
static int sys_tracepoint_refcount; |
8cf868aff
|
728 |
int syscall_regfunc(void) |
a871bd33a
|
729 |
{ |
8063e41d2
|
730 |
struct task_struct *p, *t; |
a871bd33a
|
731 |
|
a871bd33a
|
732 |
if (!sys_tracepoint_refcount) { |
8063e41d2
|
733 734 |
read_lock(&tasklist_lock); for_each_process_thread(p, t) { |
524666cb5
|
735 |
set_task_syscall_work(t, SYSCALL_TRACEPOINT); |
8063e41d2
|
736 737 |
} read_unlock(&tasklist_lock); |
a871bd33a
|
738 739 |
} sys_tracepoint_refcount++; |
8cf868aff
|
740 741 |
return 0; |
a871bd33a
|
742 743 744 745 |
} void syscall_unregfunc(void) { |
8063e41d2
|
746 |
struct task_struct *p, *t; |
a871bd33a
|
747 |
|
a871bd33a
|
748 749 |
sys_tracepoint_refcount--; if (!sys_tracepoint_refcount) { |
8063e41d2
|
750 751 |
read_lock(&tasklist_lock); for_each_process_thread(p, t) { |
524666cb5
|
752 |
clear_task_syscall_work(t, SYSCALL_TRACEPOINT); |
8063e41d2
|
753 754 |
} read_unlock(&tasklist_lock); |
a871bd33a
|
755 |
} |
a871bd33a
|
756 |
} |
60d970c25
|
757 |
#endif |