Blame view
kernel/tracepoint.c
14.5 KB
97e1c18e8
|
1 |
/* |
de7b29739
|
2 |
* Copyright (C) 2008-2014 Mathieu Desnoyers |
97e1c18e8
|
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 |
* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/mutex.h> #include <linux/types.h> #include <linux/jhash.h> #include <linux/list.h> #include <linux/rcupdate.h> #include <linux/tracepoint.h> #include <linux/err.h> #include <linux/slab.h> |
a871bd33a
|
27 |
#include <linux/sched.h> |
c5905afb0
|
28 |
#include <linux/static_key.h> |
97e1c18e8
|
29 |
|
654986462
|
30 31 |
extern struct tracepoint * const __start___tracepoints_ptrs[]; extern struct tracepoint * const __stop___tracepoints_ptrs[]; |
97e1c18e8
|
32 33 34 |
/* Set to 1 to enable tracepoint debug output */ static const int tracepoint_debug; |
de7b29739
|
35 |
#ifdef CONFIG_MODULES |
97e1c18e8
|
36 |
/* |
de7b29739
|
37 |
* Tracepoint module list mutex protects the local module list. |
97e1c18e8
|
38 |
*/ |
de7b29739
|
39 |
static DEFINE_MUTEX(tracepoint_module_list_mutex); |
97e1c18e8
|
40 |
|
de7b29739
|
41 |
/* Local list of struct tp_module */ |
b75ef8b44
|
42 43 |
static LIST_HEAD(tracepoint_module_list); #endif /* CONFIG_MODULES */ |
97e1c18e8
|
44 |
/* |
de7b29739
|
45 46 |
* tracepoints_mutex protects the builtin and module tracepoints. * tracepoints_mutex nests inside tracepoint_module_list_mutex. |
97e1c18e8
|
47 |
*/ |
de7b29739
|
48 |
static DEFINE_MUTEX(tracepoints_mutex); |
97e1c18e8
|
49 50 51 |
/* * Note about RCU : |
fd589a8f0
|
52 |
* It is used to delay the free of multiple probes array until a quiescent |
97e1c18e8
|
53 |
* state is reached. |
97e1c18e8
|
54 |
*/ |
19dba33c4
|
55 |
struct tp_probes { |
0dea6d526
|
56 |
struct rcu_head rcu; |
38516ab59
|
57 |
struct tracepoint_func probes[0]; |
19dba33c4
|
58 |
}; |
97e1c18e8
|
59 |
|
19dba33c4
|
60 |
static inline void *allocate_probes(int count) |
97e1c18e8
|
61 |
{ |
38516ab59
|
62 |
struct tp_probes *p = kmalloc(count * sizeof(struct tracepoint_func) |
19dba33c4
|
63 64 |
+ sizeof(struct tp_probes), GFP_KERNEL); return p == NULL ? NULL : p->probes; |
97e1c18e8
|
65 |
} |
19dba33c4
|
66 |
static void rcu_free_old_probes(struct rcu_head *head) |
97e1c18e8
|
67 |
{ |
0dea6d526
|
68 |
kfree(container_of(head, struct tp_probes, rcu)); |
19dba33c4
|
69 |
} |
38516ab59
|
70 |
static inline void release_probes(struct tracepoint_func *old) |
19dba33c4
|
71 72 73 74 |
{ if (old) { struct tp_probes *tp_probes = container_of(old, struct tp_probes, probes[0]); |
0dea6d526
|
75 |
call_rcu_sched(&tp_probes->rcu, rcu_free_old_probes); |
19dba33c4
|
76 |
} |
97e1c18e8
|
77 |
} |
de7b29739
|
78 |
static void debug_print_probes(struct tracepoint_func *funcs) |
97e1c18e8
|
79 80 |
{ int i; |
de7b29739
|
81 |
if (!tracepoint_debug || !funcs) |
97e1c18e8
|
82 |
return; |
de7b29739
|
83 84 85 |
for (i = 0; funcs[i].func; i++) printk(KERN_DEBUG "Probe %d : %p ", i, funcs[i].func); |
97e1c18e8
|
86 |
} |
7904b5c49
|
87 88 89 |
static struct tracepoint_func * func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func, int prio) |
97e1c18e8
|
90 |
{ |
38516ab59
|
91 |
struct tracepoint_func *old, *new; |
7904b5c49
|
92 93 |
int nr_probes = 0; int pos = -1; |
97e1c18e8
|
94 |
|
de7b29739
|
95 |
if (WARN_ON(!tp_func->func)) |
4c69e6ea4
|
96 |
return ERR_PTR(-EINVAL); |
97e1c18e8
|
97 |
|
de7b29739
|
98 99 |
debug_print_probes(*funcs); old = *funcs; |
97e1c18e8
|
100 101 |
if (old) { /* (N -> N+1), (N != 0, 1) probes */ |
7904b5c49
|
102 103 104 105 |
for (nr_probes = 0; old[nr_probes].func; nr_probes++) { /* Insert before probes of lower priority */ if (pos < 0 && old[nr_probes].prio < prio) pos = nr_probes; |
de7b29739
|
106 107 |
if (old[nr_probes].func == tp_func->func && old[nr_probes].data == tp_func->data) |
97e1c18e8
|
108 |
return ERR_PTR(-EEXIST); |
7904b5c49
|
109 |
} |
97e1c18e8
|
110 111 |
} /* + 2 : one for new probe, one for NULL func */ |
19dba33c4
|
112 |
new = allocate_probes(nr_probes + 2); |
97e1c18e8
|
113 114 |
if (new == NULL) return ERR_PTR(-ENOMEM); |
7904b5c49
|
115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
if (old) { if (pos < 0) { pos = nr_probes; memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); } else { /* Copy higher priority probes ahead of the new probe */ memcpy(new, old, pos * sizeof(struct tracepoint_func)); /* Copy the rest after it. */ memcpy(new + pos + 1, old + pos, (nr_probes - pos) * sizeof(struct tracepoint_func)); } } else pos = 0; new[pos] = *tp_func; |
38516ab59
|
129 |
new[nr_probes + 1].func = NULL; |
de7b29739
|
130 131 |
*funcs = new; debug_print_probes(*funcs); |
97e1c18e8
|
132 133 |
return old; } |
de7b29739
|
134 135 |
static void *func_remove(struct tracepoint_func **funcs, struct tracepoint_func *tp_func) |
97e1c18e8
|
136 137 |
{ int nr_probes = 0, nr_del = 0, i; |
38516ab59
|
138 |
struct tracepoint_func *old, *new; |
97e1c18e8
|
139 |
|
de7b29739
|
140 |
old = *funcs; |
97e1c18e8
|
141 |
|
f66af459a
|
142 |
if (!old) |
19dba33c4
|
143 |
return ERR_PTR(-ENOENT); |
f66af459a
|
144 |
|
de7b29739
|
145 |
debug_print_probes(*funcs); |
97e1c18e8
|
146 |
/* (N -> M), (N > 1, M >= 0) probes */ |
de7b29739
|
147 |
if (tp_func->func) { |
4c69e6ea4
|
148 |
for (nr_probes = 0; old[nr_probes].func; nr_probes++) { |
de7b29739
|
149 150 |
if (old[nr_probes].func == tp_func->func && old[nr_probes].data == tp_func->data) |
4c69e6ea4
|
151 152 |
nr_del++; } |
97e1c18e8
|
153 |
} |
4c69e6ea4
|
154 155 156 157 |
/* * If probe is NULL, then nr_probes = nr_del = 0, and then the * entire entry will be removed. */ |
97e1c18e8
|
158 159 |
if (nr_probes - nr_del == 0) { /* N -> 0, (N > 1) */ |
de7b29739
|
160 161 |
*funcs = NULL; debug_print_probes(*funcs); |
97e1c18e8
|
162 163 164 165 166 |
return old; } else { int j = 0; /* N -> M, (N > 1, M > 0) */ /* + 1 for NULL */ |
19dba33c4
|
167 |
new = allocate_probes(nr_probes - nr_del + 1); |
97e1c18e8
|
168 169 |
if (new == NULL) return ERR_PTR(-ENOMEM); |
38516ab59
|
170 |
for (i = 0; old[i].func; i++) |
de7b29739
|
171 172 |
if (old[i].func != tp_func->func || old[i].data != tp_func->data) |
97e1c18e8
|
173 |
new[j++] = old[i]; |
38516ab59
|
174 |
new[nr_probes - nr_del].func = NULL; |
de7b29739
|
175 |
*funcs = new; |
97e1c18e8
|
176 |
} |
de7b29739
|
177 |
debug_print_probes(*funcs); |
97e1c18e8
|
178 179 180 181 |
return old; } /* |
de7b29739
|
182 |
* Add the probe function to a tracepoint. |
97e1c18e8
|
183 |
*/ |
de7b29739
|
184 |
static int tracepoint_add_func(struct tracepoint *tp, |
7904b5c49
|
185 |
struct tracepoint_func *func, int prio) |
97e1c18e8
|
186 |
{ |
de7b29739
|
187 |
struct tracepoint_func *old, *tp_funcs; |
97e1c18e8
|
188 |
|
de7b29739
|
189 190 |
if (tp->regfunc && !static_key_enabled(&tp->key)) tp->regfunc(); |
97e1c18e8
|
191 |
|
b725dfea2
|
192 193 |
tp_funcs = rcu_dereference_protected(tp->funcs, lockdep_is_held(&tracepoints_mutex)); |
7904b5c49
|
194 |
old = func_add(&tp_funcs, func, prio); |
de7b29739
|
195 196 197 198 |
if (IS_ERR(old)) { WARN_ON_ONCE(1); return PTR_ERR(old); } |
974198758
|
199 |
|
97e1c18e8
|
200 201 202 203 204 205 206 |
/* * rcu_assign_pointer has a smp_wmb() which makes sure that the new * probe callbacks array is consistent before setting a pointer to it. * This array is referenced by __DO_TRACE from * include/linux/tracepoints.h. A matching smp_read_barrier_depends() * is used. */ |
de7b29739
|
207 208 209 |
rcu_assign_pointer(tp->funcs, tp_funcs); if (!static_key_enabled(&tp->key)) static_key_slow_inc(&tp->key); |
8058bd0fa
|
210 |
release_probes(old); |
de7b29739
|
211 |
return 0; |
97e1c18e8
|
212 213 214 |
} /* |
de7b29739
|
215 |
* Remove a probe function from a tracepoint. |
97e1c18e8
|
216 217 218 219 |
* Note: only waiting an RCU period after setting elem->call to the empty * function insures that the original callback is not used anymore. This insured * by preempt_disable around the call site. */ |
de7b29739
|
220 221 |
static int tracepoint_remove_func(struct tracepoint *tp, struct tracepoint_func *func) |
97e1c18e8
|
222 |
{ |
de7b29739
|
223 |
struct tracepoint_func *old, *tp_funcs; |
97e1c18e8
|
224 |
|
b725dfea2
|
225 226 |
tp_funcs = rcu_dereference_protected(tp->funcs, lockdep_is_held(&tracepoints_mutex)); |
de7b29739
|
227 228 229 230 |
old = func_remove(&tp_funcs, func); if (IS_ERR(old)) { WARN_ON_ONCE(1); return PTR_ERR(old); |
97e1c18e8
|
231 |
} |
b75ef8b44
|
232 |
|
de7b29739
|
233 234 235 236 |
if (!tp_funcs) { /* Removed last function */ if (tp->unregfunc && static_key_enabled(&tp->key)) tp->unregfunc(); |
b75ef8b44
|
237 |
|
de7b29739
|
238 239 |
if (static_key_enabled(&tp->key)) static_key_slow_dec(&tp->key); |
127cafbb2
|
240 |
} |
de7b29739
|
241 |
rcu_assign_pointer(tp->funcs, tp_funcs); |
8058bd0fa
|
242 |
release_probes(old); |
de7b29739
|
243 |
return 0; |
127cafbb2
|
244 |
} |
97e1c18e8
|
245 246 |
/** * tracepoint_probe_register - Connect a probe to a tracepoint |
de7b29739
|
247 |
* @tp: tracepoint |
97e1c18e8
|
248 |
* @probe: probe handler |
cac92ba74
|
249 |
* @data: tracepoint data |
7904b5c49
|
250 |
* @prio: priority of this function over other registered functions |
97e1c18e8
|
251 |
* |
de7b29739
|
252 253 254 255 256 |
* Returns 0 if ok, error value on error. * Note: if @tp is within a module, the caller is responsible for * unregistering the probe before the module is gone. This can be * performed either with a tracepoint module going notifier, or from * within module exit functions. |
97e1c18e8
|
257 |
*/ |
7904b5c49
|
258 259 |
int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data, int prio) |
97e1c18e8
|
260 |
{ |
de7b29739
|
261 262 |
struct tracepoint_func tp_func; int ret; |
97e1c18e8
|
263 264 |
mutex_lock(&tracepoints_mutex); |
de7b29739
|
265 266 |
tp_func.func = probe; tp_func.data = data; |
7904b5c49
|
267 268 |
tp_func.prio = prio; ret = tracepoint_add_func(tp, &tp_func, prio); |
b75ef8b44
|
269 |
mutex_unlock(&tracepoints_mutex); |
b196e2b9e
|
270 |
return ret; |
97e1c18e8
|
271 |
} |
7904b5c49
|
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 |
EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio); /** * tracepoint_probe_register - Connect a probe to a tracepoint * @tp: tracepoint * @probe: probe handler * @data: tracepoint data * @prio: priority of this function over other registered functions * * Returns 0 if ok, error value on error. * Note: if @tp is within a module, the caller is responsible for * unregistering the probe before the module is gone. This can be * performed either with a tracepoint module going notifier, or from * within module exit functions. */ int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data) { return tracepoint_probe_register_prio(tp, probe, data, TRACEPOINT_DEFAULT_PRIO); } |
97e1c18e8
|
291 292 293 294 |
EXPORT_SYMBOL_GPL(tracepoint_probe_register); /** * tracepoint_probe_unregister - Disconnect a probe from a tracepoint |
de7b29739
|
295 |
* @tp: tracepoint |
97e1c18e8
|
296 |
* @probe: probe function pointer |
cac92ba74
|
297 |
* @data: tracepoint data |
97e1c18e8
|
298 |
* |
de7b29739
|
299 |
* Returns 0 if ok, error value on error. |
97e1c18e8
|
300 |
*/ |
de7b29739
|
301 |
int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data) |
97e1c18e8
|
302 |
{ |
de7b29739
|
303 304 |
struct tracepoint_func tp_func; int ret; |
97e1c18e8
|
305 306 |
mutex_lock(&tracepoints_mutex); |
de7b29739
|
307 308 309 |
tp_func.func = probe; tp_func.data = data; ret = tracepoint_remove_func(tp, &tp_func); |
b75ef8b44
|
310 |
mutex_unlock(&tracepoints_mutex); |
de7b29739
|
311 |
return ret; |
97e1c18e8
|
312 313 |
} EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); |
227a83756
|
314 |
#ifdef CONFIG_MODULES |
45ab2813d
|
315 316 |
bool trace_module_has_bad_taint(struct module *mod) { |
66cc69e34
|
317 318 |
return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP) | (1 << TAINT_UNSIGNED_MODULE)); |
45ab2813d
|
319 |
} |
de7b29739
|
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 |
static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list); /** * register_tracepoint_notifier - register tracepoint coming/going notifier * @nb: notifier block * * Notifiers registered with this function are called on module * coming/going with the tracepoint_module_list_mutex held. * The notifier block callback should expect a "struct tp_module" data * pointer. */ int register_tracepoint_module_notifier(struct notifier_block *nb) { struct tp_module *tp_mod; int ret; mutex_lock(&tracepoint_module_list_mutex); ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb); if (ret) goto end; list_for_each_entry(tp_mod, &tracepoint_module_list, list) (void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod); end: mutex_unlock(&tracepoint_module_list_mutex); return ret; } EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier); /** * unregister_tracepoint_notifier - unregister tracepoint coming/going notifier * @nb: notifier block * * The notifier block callback should expect a "struct tp_module" data * pointer. */ int unregister_tracepoint_module_notifier(struct notifier_block *nb) { struct tp_module *tp_mod; int ret; mutex_lock(&tracepoint_module_list_mutex); ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb); if (ret) goto end; list_for_each_entry(tp_mod, &tracepoint_module_list, list) (void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod); end: mutex_unlock(&tracepoint_module_list_mutex); return ret; } EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier); /* * Ensure the tracer unregistered the module's probes before the module * teardown is performed. Prevents leaks of probe and data pointers. */ static void tp_module_going_check_quiescent(struct tracepoint * const *begin, struct tracepoint * const *end) { struct tracepoint * const *iter; if (!begin) return; for (iter = begin; iter < end; iter++) WARN_ON_ONCE((*iter)->funcs); } |
b75ef8b44
|
387 388 |
static int tracepoint_module_coming(struct module *mod) { |
0dea6d526
|
389 |
struct tp_module *tp_mod; |
b75ef8b44
|
390 |
int ret = 0; |
7dec935a3
|
391 392 |
if (!mod->num_tracepoints) return 0; |
b75ef8b44
|
393 |
/* |
c10076c43
|
394 395 |
* We skip modules that taint the kernel, especially those with different * module headers (for forced load), to make sure we don't cause a crash. |
66cc69e34
|
396 |
* Staging, out-of-tree, and unsigned GPL modules are fine. |
b75ef8b44
|
397 |
*/ |
45ab2813d
|
398 |
if (trace_module_has_bad_taint(mod)) |
b75ef8b44
|
399 |
return 0; |
de7b29739
|
400 |
mutex_lock(&tracepoint_module_list_mutex); |
b75ef8b44
|
401 402 403 404 405 |
tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); if (!tp_mod) { ret = -ENOMEM; goto end; } |
eb7d035c5
|
406 |
tp_mod->mod = mod; |
0dea6d526
|
407 |
list_add_tail(&tp_mod->list, &tracepoint_module_list); |
de7b29739
|
408 409 |
blocking_notifier_call_chain(&tracepoint_notify_list, MODULE_STATE_COMING, tp_mod); |
b75ef8b44
|
410 |
end: |
de7b29739
|
411 |
mutex_unlock(&tracepoint_module_list_mutex); |
b75ef8b44
|
412 413 |
return ret; } |
de7b29739
|
414 |
static void tracepoint_module_going(struct module *mod) |
b75ef8b44
|
415 |
{ |
de7b29739
|
416 |
struct tp_module *tp_mod; |
b75ef8b44
|
417 |
|
7dec935a3
|
418 |
if (!mod->num_tracepoints) |
de7b29739
|
419 |
return; |
7dec935a3
|
420 |
|
de7b29739
|
421 422 |
mutex_lock(&tracepoint_module_list_mutex); list_for_each_entry(tp_mod, &tracepoint_module_list, list) { |
eb7d035c5
|
423 |
if (tp_mod->mod == mod) { |
de7b29739
|
424 425 426 427 428 429 430 431 432 433 |
blocking_notifier_call_chain(&tracepoint_notify_list, MODULE_STATE_GOING, tp_mod); list_del(&tp_mod->list); kfree(tp_mod); /* * Called the going notifier before checking for * quiescence. */ tp_module_going_check_quiescent(mod->tracepoints_ptrs, mod->tracepoints_ptrs + mod->num_tracepoints); |
b75ef8b44
|
434 435 436 437 438 439 440 441 442 |
break; } } /* * In the case of modules that were tainted at "coming", we'll simply * walk through the list without finding it. We cannot use the "tainted" * flag on "going", in case a module taints the kernel only after being * loaded. */ |
de7b29739
|
443 |
mutex_unlock(&tracepoint_module_list_mutex); |
b75ef8b44
|
444 |
} |
227a83756
|
445 |
|
de7b29739
|
446 447 |
static int tracepoint_module_notify(struct notifier_block *self, unsigned long val, void *data) |
32f857427
|
448 449 |
{ struct module *mod = data; |
b75ef8b44
|
450 |
int ret = 0; |
32f857427
|
451 452 453 |
switch (val) { case MODULE_STATE_COMING: |
b75ef8b44
|
454 455 456 457 |
ret = tracepoint_module_coming(mod); break; case MODULE_STATE_LIVE: break; |
32f857427
|
458 |
case MODULE_STATE_GOING: |
de7b29739
|
459 460 461 |
tracepoint_module_going(mod); break; case MODULE_STATE_UNFORMED: |
32f857427
|
462 463 |
break; } |
b75ef8b44
|
464 |
return ret; |
32f857427
|
465 |
} |
de7b29739
|
466 |
static struct notifier_block tracepoint_module_nb = { |
32f857427
|
467 468 469 |
.notifier_call = tracepoint_module_notify, .priority = 0, }; |
de7b29739
|
470 |
static __init int init_tracepoints(void) |
32f857427
|
471 |
{ |
de7b29739
|
472 473 474 |
int ret; ret = register_module_notifier(&tracepoint_module_nb); |
eb7d035c5
|
475 |
if (ret) |
a395d6a7e
|
476 477 |
pr_warn("Failed to register tracepoint module enter notifier "); |
eb7d035c5
|
478 |
|
de7b29739
|
479 |
return ret; |
32f857427
|
480 481 |
} __initcall(init_tracepoints); |
227a83756
|
482 |
#endif /* CONFIG_MODULES */ |
a871bd33a
|
483 |
|
de7b29739
|
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 |
static void for_each_tracepoint_range(struct tracepoint * const *begin, struct tracepoint * const *end, void (*fct)(struct tracepoint *tp, void *priv), void *priv) { struct tracepoint * const *iter; if (!begin) return; for (iter = begin; iter < end; iter++) fct(*iter, priv); } /** * for_each_kernel_tracepoint - iteration on all kernel tracepoints * @fct: callback * @priv: private data */ void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), void *priv) { for_each_tracepoint_range(__start___tracepoints_ptrs, __stop___tracepoints_ptrs, fct, priv); } EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint); |
3d27d8cb3
|
509 |
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS |
60d970c25
|
510 |
|
974198758
|
511 |
/* NB: reg/unreg are called while guarded with the tracepoints_mutex */ |
a871bd33a
|
512 513 514 515 |
static int sys_tracepoint_refcount; void syscall_regfunc(void) { |
8063e41d2
|
516 |
struct task_struct *p, *t; |
a871bd33a
|
517 |
|
a871bd33a
|
518 |
if (!sys_tracepoint_refcount) { |
8063e41d2
|
519 520 |
read_lock(&tasklist_lock); for_each_process_thread(p, t) { |
ea73c79e3
|
521 |
set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT); |
8063e41d2
|
522 523 |
} read_unlock(&tasklist_lock); |
a871bd33a
|
524 525 |
} sys_tracepoint_refcount++; |
a871bd33a
|
526 527 528 529 |
} void syscall_unregfunc(void) { |
8063e41d2
|
530 |
struct task_struct *p, *t; |
a871bd33a
|
531 |
|
a871bd33a
|
532 533 |
sys_tracepoint_refcount--; if (!sys_tracepoint_refcount) { |
8063e41d2
|
534 535 |
read_lock(&tasklist_lock); for_each_process_thread(p, t) { |
667000011
|
536 |
clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT); |
8063e41d2
|
537 538 |
} read_unlock(&tasklist_lock); |
a871bd33a
|
539 |
} |
a871bd33a
|
540 |
} |
60d970c25
|
541 |
#endif |