Blame view
drivers/oprofile/buffer_sync.c
13.4 KB
1da177e4c
|
1 2 3 |
/** * @file buffer_sync.c * |
ae735e996
|
4 |
* @remark Copyright 2002-2009 OProfile authors |
1da177e4c
|
5 6 7 |
* @remark Read the file COPYING * * @author John Levon <levon@movementarian.org> |
345c25730
|
8 |
* @author Barry Kasindorf |
ae735e996
|
9 |
* @author Robert Richter <robert.richter@amd.com> |
1da177e4c
|
10 11 12 13 14 15 16 17 18 19 20 21 22 |
* * This is the core of the buffer management. Each * CPU buffer is processed and entered into the * global event buffer. Such processing is necessary * in several circumstances, mentioned below. * * The processing does the job of converting the * transitory EIP value into a persistent dentry/offset * value that the profiler can record at its leisure. * * See fs/dcookies.c for a description of the dentry/offset * objects. */ |
11163348a
|
23 |
#include <linux/file.h> |
1da177e4c
|
24 25 26 27 28 29 30 |
#include <linux/mm.h> #include <linux/workqueue.h> #include <linux/notifier.h> #include <linux/dcookies.h> #include <linux/profile.h> #include <linux/module.h> #include <linux/fs.h> |
1474855d0
|
31 |
#include <linux/oprofile.h> |
e8edc6e03
|
32 |
#include <linux/sched.h> |
6e84f3152
|
33 |
#include <linux/sched/mm.h> |
0881e7bd3
|
34 |
#include <linux/sched/task.h> |
5a0e3ad6a
|
35 |
#include <linux/gfp.h> |
1474855d0
|
36 |
|
1da177e4c
|
37 38 39 40 |
#include "oprofile_stats.h" #include "event_buffer.h" #include "cpu_buffer.h" #include "buffer_sync.h" |
73185e0a5
|
41 |
|
1da177e4c
|
42 43 |
static LIST_HEAD(dying_tasks); static LIST_HEAD(dead_tasks); |
f7df8ed16
|
44 |
static cpumask_var_t marked_cpus; |
1da177e4c
|
45 46 |
static DEFINE_SPINLOCK(task_mortuary); static void process_task_mortuary(void); |
1da177e4c
|
47 48 49 50 |
/* Take ownership of the task struct and place it on the * list for processing. Only after two full buffer syncs * does the task eventually get freed, because by then * we are sure we will not reference it again. |
4369ef3c3
|
51 52 |
* Can be invoked from softirq via RCU callback due to * call_rcu() of the task struct, hence the _irqsave. |
1da177e4c
|
53 |
*/ |
73185e0a5
|
54 55 |
static int task_free_notify(struct notifier_block *self, unsigned long val, void *data) |
1da177e4c
|
56 |
{ |
4369ef3c3
|
57 |
unsigned long flags; |
73185e0a5
|
58 |
struct task_struct *task = data; |
4369ef3c3
|
59 |
spin_lock_irqsave(&task_mortuary, flags); |
1da177e4c
|
60 |
list_add(&task->tasks, &dying_tasks); |
4369ef3c3
|
61 |
spin_unlock_irqrestore(&task_mortuary, flags); |
1da177e4c
|
62 63 64 65 66 67 68 |
return NOTIFY_OK; } /* The task is on its way out. A sync of the buffer means we can catch * any remaining samples for this task. */ |
73185e0a5
|
69 70 |
static int task_exit_notify(struct notifier_block *self, unsigned long val, void *data) |
1da177e4c
|
71 72 73 74 |
{ /* To avoid latency problems, we only process the current CPU, * hoping that most samples for the task are on this CPU */ |
39c715b71
|
75 |
sync_buffer(raw_smp_processor_id()); |
73185e0a5
|
76 |
return 0; |
1da177e4c
|
77 78 79 80 81 82 83 84 |
} /* The task is about to try a do_munmap(). We peek at what it's going to * do, and if it's an executable region, process the samples first, so * we don't lose any. This does not have to be exact, it's a QoI issue * only. */ |
73185e0a5
|
85 86 |
static int munmap_notify(struct notifier_block *self, unsigned long val, void *data) |
1da177e4c
|
87 88 |
{ unsigned long addr = (unsigned long)data; |
73185e0a5
|
89 90 |
struct mm_struct *mm = current->mm; struct vm_area_struct *mpnt; |
1da177e4c
|
91 |
|
d8ed45c5d
|
92 |
mmap_read_lock(mm); |
1da177e4c
|
93 94 95 |
mpnt = find_vma(mm, addr); if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) { |
d8ed45c5d
|
96 |
mmap_read_unlock(mm); |
1da177e4c
|
97 98 99 |
/* To avoid latency problems, we only process the current CPU, * hoping that most samples for the task are on this CPU */ |
39c715b71
|
100 |
sync_buffer(raw_smp_processor_id()); |
1da177e4c
|
101 102 |
return 0; } |
d8ed45c5d
|
103 |
mmap_read_unlock(mm); |
1da177e4c
|
104 105 |
return 0; } |
73185e0a5
|
106 |
|
1da177e4c
|
107 108 109 |
/* We need to be told about new modules so we don't attribute to a previously * loaded module, or drop the samples on the floor. */ |
73185e0a5
|
110 111 |
static int module_load_notify(struct notifier_block *self, unsigned long val, void *data) |
1da177e4c
|
112 113 114 |
{ #ifdef CONFIG_MODULES if (val != MODULE_STATE_COMING) |
0340a6b7f
|
115 |
return NOTIFY_DONE; |
1da177e4c
|
116 117 |
/* FIXME: should we process all CPU buffers ? */ |
59cc185ad
|
118 |
mutex_lock(&buffer_mutex); |
1da177e4c
|
119 120 |
add_event_entry(ESCAPE_CODE); add_event_entry(MODULE_LOADED_CODE); |
59cc185ad
|
121 |
mutex_unlock(&buffer_mutex); |
1da177e4c
|
122 |
#endif |
0340a6b7f
|
123 |
return NOTIFY_OK; |
1da177e4c
|
124 |
} |
73185e0a5
|
125 |
|
1da177e4c
|
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
static struct notifier_block task_free_nb = { .notifier_call = task_free_notify, }; static struct notifier_block task_exit_nb = { .notifier_call = task_exit_notify, }; static struct notifier_block munmap_nb = { .notifier_call = munmap_notify, }; static struct notifier_block module_load_nb = { .notifier_call = module_load_notify, }; |
6ac6519b9
|
141 142 143 144 145 146 |
static void free_all_tasks(void) { /* make sure we don't leak task structs */ process_task_mortuary(); process_task_mortuary(); } |
1da177e4c
|
147 148 149 |
int sync_start(void) { int err; |
79f559977
|
150 |
if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) |
4c50d9ea9
|
151 |
return -ENOMEM; |
4c50d9ea9
|
152 |
|
1da177e4c
|
153 154 155 156 157 158 159 160 161 162 163 164 |
err = task_handoff_register(&task_free_nb); if (err) goto out1; err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb); if (err) goto out2; err = profile_event_register(PROFILE_MUNMAP, &munmap_nb); if (err) goto out3; err = register_module_notifier(&module_load_nb); if (err) goto out4; |
750d857c6
|
165 |
start_cpu_work(); |
1da177e4c
|
166 167 168 169 170 171 172 173 |
out: return err; out4: profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); out3: profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); out2: task_handoff_unregister(&task_free_nb); |
6ac6519b9
|
174 |
free_all_tasks(); |
1da177e4c
|
175 |
out1: |
4c50d9ea9
|
176 |
free_cpumask_var(marked_cpus); |
1da177e4c
|
177 178 179 180 181 182 |
goto out; } void sync_stop(void) { |
750d857c6
|
183 |
end_cpu_work(); |
1da177e4c
|
184 185 186 187 |
unregister_module_notifier(&module_load_nb); profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); task_handoff_unregister(&task_free_nb); |
130c5ce71
|
188 |
barrier(); /* do all of the above first */ |
3d7851b3c
|
189 |
flush_cpu_work(); |
750d857c6
|
190 |
|
6ac6519b9
|
191 |
free_all_tasks(); |
4c50d9ea9
|
192 |
free_cpumask_var(marked_cpus); |
1da177e4c
|
193 |
} |
448678a0f
|
194 |
|
1da177e4c
|
195 196 197 198 |
/* Optimisation. We can manage without taking the dcookie sem * because we cannot reach this code without at least one * dcookie user still being registered (namely, the reader * of the event buffer). */ |
71215a75c
|
199 |
static inline unsigned long fast_get_dcookie(const struct path *path) |
1da177e4c
|
200 201 |
{ unsigned long cookie; |
448678a0f
|
202 |
|
c2452f327
|
203 |
if (path->dentry->d_flags & DCACHE_COOKIE) |
448678a0f
|
204 205 |
return (unsigned long)path->dentry; get_dcookie(path, &cookie); |
1da177e4c
|
206 207 |
return cookie; } |
448678a0f
|
208 |
|
2dd8ad81e
|
209 |
/* Look up the dcookie for the task's mm->exe_file, |
1da177e4c
|
210 211 212 213 |
* which corresponds loosely to "application name". This is * not strictly necessary but allows oprofile to associate * shared-library samples with particular applications */ |
73185e0a5
|
214 |
static unsigned long get_exec_dcookie(struct mm_struct *mm) |
1da177e4c
|
215 |
{ |
0c0a400d1
|
216 |
unsigned long cookie = NO_COOKIE; |
11163348a
|
217 |
struct file *exe_file; |
73185e0a5
|
218 |
|
11163348a
|
219 220 221 222 223 224 |
if (!mm) goto done; exe_file = get_mm_exe_file(mm); if (!exe_file) goto done; |
1da177e4c
|
225 |
|
11163348a
|
226 227 228 |
cookie = fast_get_dcookie(&exe_file->f_path); fput(exe_file); done: |
1da177e4c
|
229 230 231 232 233 234 235 236 |
return cookie; } /* Convert the EIP value of a sample into a persistent dentry/offset * pair that can then be added to the global event buffer. We make * sure to do this lookup before a mm->mmap modification happens so * we don't lose track. |
11163348a
|
237 238 |
* * The caller must ensure the mm is not nil (ie: not a kernel thread). |
1da177e4c
|
239 |
*/ |
73185e0a5
|
240 241 |
static unsigned long lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset) |
1da177e4c
|
242 |
{ |
0c0a400d1
|
243 |
unsigned long cookie = NO_COOKIE; |
73185e0a5
|
244 |
struct vm_area_struct *vma; |
1da177e4c
|
245 |
|
d8ed45c5d
|
246 |
mmap_read_lock(mm); |
1da177e4c
|
247 |
for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { |
73185e0a5
|
248 |
|
1da177e4c
|
249 250 |
if (addr < vma->vm_start || addr >= vma->vm_end) continue; |
0c0a400d1
|
251 |
if (vma->vm_file) { |
448678a0f
|
252 |
cookie = fast_get_dcookie(&vma->vm_file->f_path); |
0c0a400d1
|
253 254 255 256 257 258 |
*offset = (vma->vm_pgoff << PAGE_SHIFT) + addr - vma->vm_start; } else { /* must be an anonymous map */ *offset = addr; } |
1da177e4c
|
259 260 |
break; } |
0c0a400d1
|
261 262 |
if (!vma) cookie = INVALID_COOKIE; |
d8ed45c5d
|
263 |
mmap_read_unlock(mm); |
0c0a400d1
|
264 |
|
1da177e4c
|
265 266 |
return cookie; } |
0c0a400d1
|
267 |
static unsigned long last_cookie = INVALID_COOKIE; |
73185e0a5
|
268 |
|
1da177e4c
|
269 270 271 272 273 |
static void add_cpu_switch(int i) { add_event_entry(ESCAPE_CODE); add_event_entry(CPU_SWITCH_CODE); add_event_entry(i); |
0c0a400d1
|
274 |
last_cookie = INVALID_COOKIE; |
1da177e4c
|
275 276 277 278 279 280 |
} static void add_kernel_ctx_switch(unsigned int in_kernel) { add_event_entry(ESCAPE_CODE); if (in_kernel) |
73185e0a5
|
281 |
add_event_entry(KERNEL_ENTER_SWITCH_CODE); |
1da177e4c
|
282 |
else |
73185e0a5
|
283 |
add_event_entry(KERNEL_EXIT_SWITCH_CODE); |
1da177e4c
|
284 |
} |
73185e0a5
|
285 |
|
1da177e4c
|
286 |
static void |
73185e0a5
|
287 |
add_user_ctx_switch(struct task_struct const *task, unsigned long cookie) |
1da177e4c
|
288 289 |
{ add_event_entry(ESCAPE_CODE); |
73185e0a5
|
290 |
add_event_entry(CTX_SWITCH_CODE); |
1da177e4c
|
291 292 293 294 295 296 297 |
add_event_entry(task->pid); add_event_entry(cookie); /* Another code for daemon back-compat */ add_event_entry(ESCAPE_CODE); add_event_entry(CTX_TGID_CODE); add_event_entry(task->tgid); } |
73185e0a5
|
298 |
|
1da177e4c
|
299 300 301 302 303 304 |
static void add_cookie_switch(unsigned long cookie) { add_event_entry(ESCAPE_CODE); add_event_entry(COOKIE_SWITCH_CODE); add_event_entry(cookie); } |
73185e0a5
|
305 |
|
1da177e4c
|
306 307 308 309 310 |
static void add_trace_begin(void) { add_event_entry(ESCAPE_CODE); add_event_entry(TRACE_BEGIN_CODE); } |
1acda878e
|
311 |
static void add_data(struct op_entry *entry, struct mm_struct *mm) |
345c25730
|
312 |
{ |
1acda878e
|
313 314 |
unsigned long code, pc, val; unsigned long cookie; |
345c25730
|
315 |
off_t offset; |
345c25730
|
316 |
|
1acda878e
|
317 318 319 320 321 |
if (!op_cpu_buffer_get_data(entry, &code)) return; if (!op_cpu_buffer_get_data(entry, &pc)) return; if (!op_cpu_buffer_get_size(entry)) |
dbe6e2835
|
322 |
return; |
345c25730
|
323 324 |
if (mm) { |
d358e75fc
|
325 |
cookie = lookup_dcookie(mm, pc, &offset); |
345c25730
|
326 |
|
d358e75fc
|
327 328 329 |
if (cookie == NO_COOKIE) offset = pc; if (cookie == INVALID_COOKIE) { |
345c25730
|
330 |
atomic_inc(&oprofile_stats.sample_lost_no_mapping); |
d358e75fc
|
331 |
offset = pc; |
345c25730
|
332 |
} |
d358e75fc
|
333 334 335 |
if (cookie != last_cookie) { add_cookie_switch(cookie); last_cookie = cookie; |
345c25730
|
336 337 |
} } else |
d358e75fc
|
338 |
offset = pc; |
345c25730
|
339 340 341 342 |
add_event_entry(ESCAPE_CODE); add_event_entry(code); add_event_entry(offset); /* Offset from Dcookie */ |
1acda878e
|
343 344 |
while (op_cpu_buffer_get_data(entry, &val)) add_event_entry(val); |
345c25730
|
345 |
} |
1da177e4c
|
346 |
|
6368a1f4d
|
347 |
static inline void add_sample_entry(unsigned long offset, unsigned long event) |
1da177e4c
|
348 349 350 351 |
{ add_event_entry(offset); add_event_entry(event); } |
9741b309b
|
352 353 354 355 356 357 358 |
/* * Add a sample to the global event buffer. If possible the * sample is converted into a persistent dentry/offset pair * for later lookup from userspace. Return 0 on failure. */ static int add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel) |
1da177e4c
|
359 360 361 |
{ unsigned long cookie; off_t offset; |
73185e0a5
|
362 |
|
9741b309b
|
363 364 365 366 367 368 369 370 371 372 373 |
if (in_kernel) { add_sample_entry(s->eip, s->event); return 1; } /* add userspace sample */ if (!mm) { atomic_inc(&oprofile_stats.sample_lost_no_mm); return 0; } |
73185e0a5
|
374 |
cookie = lookup_dcookie(mm, s->eip, &offset); |
0c0a400d1
|
375 |
if (cookie == INVALID_COOKIE) { |
1da177e4c
|
376 377 378 379 380 381 382 383 384 385 386 387 388 |
atomic_inc(&oprofile_stats.sample_lost_no_mapping); return 0; } if (cookie != last_cookie) { add_cookie_switch(cookie); last_cookie = cookie; } add_sample_entry(offset, s->event); return 1; } |
73185e0a5
|
389 |
|
73185e0a5
|
390 |
static void release_mm(struct mm_struct *mm) |
1da177e4c
|
391 392 393 |
{ if (!mm) return; |
1da177e4c
|
394 395 |
mmput(mm); } |
1da177e4c
|
396 397 398 399 |
static inline int is_code(unsigned long val) { return val == ESCAPE_CODE; } |
73185e0a5
|
400 |
|
1da177e4c
|
401 |
|
1da177e4c
|
402 403 404 405 406 407 408 409 |
/* Move tasks along towards death. Any tasks on dead_tasks * will definitely have no remaining references in any * CPU buffers at this point, because we use two lists, * and to have reached the list, it must have gone through * one full sync already. */ static void process_task_mortuary(void) { |
4369ef3c3
|
410 411 |
unsigned long flags; LIST_HEAD(local_dead_tasks); |
73185e0a5
|
412 413 |
struct task_struct *task; struct task_struct *ttask; |
1da177e4c
|
414 |
|
4369ef3c3
|
415 |
spin_lock_irqsave(&task_mortuary, flags); |
1da177e4c
|
416 |
|
4369ef3c3
|
417 418 |
list_splice_init(&dead_tasks, &local_dead_tasks); list_splice_init(&dying_tasks, &dead_tasks); |
1da177e4c
|
419 |
|
4369ef3c3
|
420 421 422 |
spin_unlock_irqrestore(&task_mortuary, flags); list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) { |
1da177e4c
|
423 |
list_del(&task->tasks); |
4369ef3c3
|
424 |
free_task(task); |
1da177e4c
|
425 |
} |
1da177e4c
|
426 427 428 429 430 431 |
} static void mark_done(int cpu) { int i; |
f7df8ed16
|
432 |
cpumask_set_cpu(cpu, marked_cpus); |
1da177e4c
|
433 434 |
for_each_online_cpu(i) { |
f7df8ed16
|
435 |
if (!cpumask_test_cpu(i, marked_cpus)) |
1da177e4c
|
436 437 438 439 440 441 442 |
return; } /* All CPUs have been processed at least once, * we can process the mortuary once */ process_task_mortuary(); |
f7df8ed16
|
443 |
cpumask_clear(marked_cpus); |
1da177e4c
|
444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 |
} /* FIXME: this is not sufficient if we implement syscall barrier backtrace * traversal, the code switch to sb_sample_start at first kernel enter/exit * switch so we need a fifth state and some special handling in sync_buffer() */ typedef enum { sb_bt_ignore = -2, sb_buffer_start, sb_bt_start, sb_sample_start, } sync_buffer_state; /* Sync one of the CPU's buffers into the global event buffer. * Here we need to go through each batch of samples punctuated |
c1e8d7c6a
|
460 |
* by context switch notes, taking the task's mmap_lock and doing |
1da177e4c
|
461 462 463 464 465 |
* lookup in task->mm->mmap to convert EIP into dcookie/offset * value. */ void sync_buffer(int cpu) { |
1da177e4c
|
466 |
struct mm_struct *mm = NULL; |
fd7826d56
|
467 |
struct mm_struct *oldmm; |
bd7dc46f7
|
468 |
unsigned long val; |
73185e0a5
|
469 |
struct task_struct *new; |
1da177e4c
|
470 471 |
unsigned long cookie = 0; int in_kernel = 1; |
1da177e4c
|
472 |
sync_buffer_state state = sb_buffer_start; |
9b1f26116
|
473 |
unsigned int i; |
1da177e4c
|
474 |
unsigned long available; |
ae735e996
|
475 |
unsigned long flags; |
2d87b14cf
|
476 477 |
struct op_entry entry; struct op_sample *sample; |
1da177e4c
|
478 |
|
59cc185ad
|
479 |
mutex_lock(&buffer_mutex); |
73185e0a5
|
480 |
|
1da177e4c
|
481 |
add_cpu_switch(cpu); |
6d2c53f3c
|
482 483 |
op_cpu_buffer_reset(cpu); available = op_cpu_buffer_entries(cpu); |
1da177e4c
|
484 485 |
for (i = 0; i < available; ++i) { |
2d87b14cf
|
486 487 |
sample = op_cpu_buffer_read_entry(&entry, cpu); if (!sample) |
6dad828b7
|
488 |
break; |
73185e0a5
|
489 |
|
2d87b14cf
|
490 |
if (is_code(sample->eip)) { |
ae735e996
|
491 492 493 494 495 496 |
flags = sample->event; if (flags & TRACE_BEGIN) { state = sb_bt_start; add_trace_begin(); } if (flags & KERNEL_CTX_SWITCH) { |
1da177e4c
|
497 |
/* kernel/userspace switch */ |
ae735e996
|
498 |
in_kernel = flags & IS_KERNEL; |
1da177e4c
|
499 500 |
if (state == sb_buffer_start) state = sb_sample_start; |
ae735e996
|
501 502 |
add_kernel_ctx_switch(flags & IS_KERNEL); } |
bd7dc46f7
|
503 504 |
if (flags & USER_CTX_SWITCH && op_cpu_buffer_get_data(&entry, &val)) { |
1da177e4c
|
505 |
/* userspace context switch */ |
bd7dc46f7
|
506 |
new = (struct task_struct *)val; |
fd7826d56
|
507 |
oldmm = mm; |
1da177e4c
|
508 |
release_mm(oldmm); |
11163348a
|
509 |
mm = get_task_mm(new); |
1da177e4c
|
510 511 512 |
if (mm != oldmm) cookie = get_exec_dcookie(mm); add_user_ctx_switch(new, cookie); |
1da177e4c
|
513 |
} |
1acda878e
|
514 515 |
if (op_cpu_buffer_get_size(&entry)) add_data(&entry, mm); |
317f33bce
|
516 517 518 519 520 521 |
continue; } if (state < sb_bt_start) /* ignore sample */ continue; |
2d87b14cf
|
522 |
if (add_sample(mm, sample, in_kernel)) |
317f33bce
|
523 524 525 526 527 528 |
continue; /* ignore backtraces if failed to add a sample */ if (state == sb_bt_start) { state = sb_bt_ignore; atomic_inc(&oprofile_stats.bt_lost_no_mapping); |
1da177e4c
|
529 |
} |
1da177e4c
|
530 531 532 533 |
} release_mm(mm); mark_done(cpu); |
59cc185ad
|
534 |
mutex_unlock(&buffer_mutex); |
1da177e4c
|
535 |
} |
a5598ca0d
|
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 |
/* The function can be used to add a buffer worth of data directly to * the kernel buffer. The buffer is assumed to be a circular buffer. * Take the entries from index start and end at index end, wrapping * at max_entries. */ void oprofile_put_buff(unsigned long *buf, unsigned int start, unsigned int stop, unsigned int max) { int i; i = start; mutex_lock(&buffer_mutex); while (i != stop) { add_event_entry(buf[i++]); if (i >= max) i = 0; } mutex_unlock(&buffer_mutex); } |