Blame view
drivers/oprofile/cpu_buffer.c
10.3 KB
1da177e4c
|
1 2 3 |
/** * @file cpu_buffer.c * |
2cc28b9f2
|
4 |
* @remark Copyright 2002-2009 OProfile authors |
1da177e4c
|
5 6 7 |
* @remark Read the file COPYING * * @author John Levon <levon@movementarian.org> |
345c25730
|
8 |
* @author Barry Kasindorf <barry.kasindorf@amd.com> |
2cc28b9f2
|
9 |
* @author Robert Richter <robert.richter@amd.com> |
1da177e4c
|
10 11 12 13 14 15 16 17 18 19 20 21 22 23 |
* * Each CPU has a local buffer that stores PC value/event * pairs. We also log context switches when we notice them. * Eventually each CPU's buffer is processed into the global * event buffer by sync_buffer(). * * We use a local buffer for two reasons: an NMI or similar * interrupt cannot synchronise, and high sampling rates * would lead to catastrophic global synchronisation if * a global buffer was used. */ #include <linux/sched.h> #include <linux/oprofile.h> |
1da177e4c
|
24 |
#include <linux/errno.h> |
6a18037d4
|
25 |
|
1da177e4c
|
26 27 28 29 |
#include "event_buffer.h" #include "cpu_buffer.h" #include "buffer_sync.h" #include "oprof.h" |
6dad828b7
|
30 |
#define OP_BUFFER_FLAGS 0 |
cb6e943cc
|
31 |
static struct ring_buffer *op_ring_buffer; |
b3e9f672b
|
32 |
DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer); |
1da177e4c
|
33 |
|
c4028958b
|
34 |
static void wq_sync_buffer(struct work_struct *work); |
1da177e4c
|
35 36 37 |
#define DEFAULT_TIMER_EXPIRE (HZ / 10) static int work_enabled; |
a5598ca0d
|
38 39 |
unsigned long oprofile_get_cpu_buffer_size(void) { |
bd2172f58
|
40 |
return oprofile_cpu_buffer_size; |
a5598ca0d
|
41 42 43 44 |
} void oprofile_cpu_buffer_inc_smpl_lost(void) { |
b3e9f672b
|
45 |
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); |
a5598ca0d
|
46 47 48 |
cpu_buf->sample_lost_overflow++; } |
300157768
|
49 50 |
void free_cpu_buffers(void) { |
cb6e943cc
|
51 52 53 |
if (op_ring_buffer) ring_buffer_free(op_ring_buffer); op_ring_buffer = NULL; |
300157768
|
54 |
} |
54f2c841f
|
55 |
#define RB_EVENT_HDR_SIZE 4 |
1da177e4c
|
56 57 58 |
int alloc_cpu_buffers(void) { int i; |
6a18037d4
|
59 |
|
bd2172f58
|
60 |
unsigned long buffer_size = oprofile_cpu_buffer_size; |
54f2c841f
|
61 62 |
unsigned long byte_size = buffer_size * (sizeof(struct op_sample) + RB_EVENT_HDR_SIZE); |
6a18037d4
|
63 |
|
cb6e943cc
|
64 65 |
op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); if (!op_ring_buffer) |
6dad828b7
|
66 |
goto fail; |
4bd9b9dc9
|
67 |
for_each_possible_cpu(i) { |
b3e9f672b
|
68 |
struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); |
6a18037d4
|
69 |
|
1da177e4c
|
70 71 72 73 |
b->last_task = NULL; b->last_is_kernel = -1; b->tracing = 0; b->buffer_size = buffer_size; |
1da177e4c
|
74 75 |
b->sample_received = 0; b->sample_lost_overflow = 0; |
df9d177aa
|
76 77 |
b->backtrace_aborted = 0; b->sample_invalid_eip = 0; |
1da177e4c
|
78 |
b->cpu = i; |
c4028958b
|
79 |
INIT_DELAYED_WORK(&b->work, wq_sync_buffer); |
1da177e4c
|
80 81 82 83 84 85 86 |
} return 0; fail: free_cpu_buffers(); return -ENOMEM; } |
1da177e4c
|
87 88 89 90 91 92 93 94 |
void start_cpu_work(void) { int i; work_enabled = 1; for_each_online_cpu(i) { |
b3e9f672b
|
95 |
struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); |
1da177e4c
|
96 97 98 99 100 101 102 103 |
/* * Spread the work by 1 jiffy per cpu so they dont all * fire at once. */ schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i); } } |
1da177e4c
|
104 105 106 107 108 109 110 |
void end_cpu_work(void) { int i; work_enabled = 0; for_each_online_cpu(i) { |
b3e9f672b
|
111 |
struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); |
1da177e4c
|
112 113 114 |
cancel_delayed_work(&b->work); } |
1da177e4c
|
115 |
} |
2cc28b9f2
|
116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
/* * This function prepares the cpu buffer to write a sample. * * Struct op_entry is used during operations on the ring buffer while * struct op_sample contains the data that is stored in the ring * buffer. Struct entry can be uninitialized. The function reserves a * data array that is specified by size. Use * op_cpu_buffer_write_commit() after preparing the sample. In case of * errors a null pointer is returned, otherwise the pointer to the * sample. * */ struct op_sample *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) |
9966718da
|
130 |
{ |
2cc28b9f2
|
131 |
entry->event = ring_buffer_lock_reserve |
cb6e943cc
|
132 |
(op_ring_buffer, sizeof(struct op_sample) + |
304cc6ae1
|
133 |
size * sizeof(entry->sample->data[0])); |
cb6e943cc
|
134 |
if (!entry->event) |
2cc28b9f2
|
135 |
return NULL; |
cb6e943cc
|
136 |
entry->sample = ring_buffer_event_data(entry->event); |
2cc28b9f2
|
137 138 139 140 |
entry->size = size; entry->data = entry->sample->data; return entry->sample; |
9966718da
|
141 142 143 144 |
} int op_cpu_buffer_write_commit(struct op_entry *entry) { |
cb6e943cc
|
145 |
return ring_buffer_unlock_commit(op_ring_buffer, entry->event); |
9966718da
|
146 |
} |
2d87b14cf
|
147 |
struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) |
9966718da
|
148 149 |
{ struct ring_buffer_event *e; |
b971f0618
|
150 |
e = ring_buffer_consume(op_ring_buffer, cpu, NULL, NULL); |
cb6e943cc
|
151 |
if (!e) |
9966718da
|
152 |
return NULL; |
2d87b14cf
|
153 |
|
2d87b14cf
|
154 155 156 157 158 159 |
entry->event = e; entry->sample = ring_buffer_event_data(e); entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample)) / sizeof(entry->sample->data[0]); entry->data = entry->sample->data; return entry->sample; |
9966718da
|
160 161 162 163 |
} unsigned long op_cpu_buffer_entries(int cpu) { |
cb6e943cc
|
164 |
return ring_buffer_entries_cpu(op_ring_buffer, cpu); |
9966718da
|
165 |
} |
ae735e996
|
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 |
static int op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace, int is_kernel, struct task_struct *task) { struct op_entry entry; struct op_sample *sample; unsigned long flags; int size; flags = 0; if (backtrace) flags |= TRACE_BEGIN; /* notice a switch from user->kernel or vice versa */ is_kernel = !!is_kernel; if (cpu_buf->last_is_kernel != is_kernel) { cpu_buf->last_is_kernel = is_kernel; flags |= KERNEL_CTX_SWITCH; if (is_kernel) flags |= IS_KERNEL; } /* notice a task switch */ if (cpu_buf->last_task != task) { cpu_buf->last_task = task; flags |= USER_CTX_SWITCH; } if (!flags) /* nothing to do */ return 0; if (flags & USER_CTX_SWITCH) size = 1; else size = 0; sample = op_cpu_buffer_write_reserve(&entry, size); if (!sample) return -ENOMEM; sample->eip = ESCAPE_CODE; sample->event = flags; if (size) |
d9928c25a
|
212 |
op_cpu_buffer_add_data(&entry, (unsigned long)task); |
ae735e996
|
213 214 215 216 217 |
op_cpu_buffer_write_commit(&entry); return 0; } |
211117ff0
|
218 |
static inline int |
d0e233846
|
219 220 |
op_add_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, unsigned long event) |
1da177e4c
|
221 |
{ |
6dad828b7
|
222 |
struct op_entry entry; |
2cc28b9f2
|
223 |
struct op_sample *sample; |
6dad828b7
|
224 |
|
2cc28b9f2
|
225 226 227 |
sample = op_cpu_buffer_write_reserve(&entry, 0); if (!sample) return -ENOMEM; |
6dad828b7
|
228 |
|
2cc28b9f2
|
229 230 |
sample->eip = pc; sample->event = event; |
6dad828b7
|
231 |
|
3967e93e0
|
232 |
return op_cpu_buffer_write_commit(&entry); |
1da177e4c
|
233 |
} |
ae735e996
|
234 235 |
/* * This must be safe from any context. |
1da177e4c
|
236 237 238 239 240 241 |
* * is_kernel is needed because on some architectures you cannot * tell if you are in kernel or user space simply by looking at * pc. We tag this in the buffer by generating kernel enter/exit * events whenever is_kernel changes */ |
ae735e996
|
242 243 244 |
static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, unsigned long backtrace, int is_kernel, unsigned long event) |
1da177e4c
|
245 |
{ |
1da177e4c
|
246 |
cpu_buf->sample_received++; |
df9d177aa
|
247 248 249 250 |
if (pc == ESCAPE_CODE) { cpu_buf->sample_invalid_eip++; return 0; } |
ae735e996
|
251 252 |
if (op_add_code(cpu_buf, backtrace, is_kernel, current)) goto fail; |
6a18037d4
|
253 |
|
d0e233846
|
254 |
if (op_add_sample(cpu_buf, pc, event)) |
211117ff0
|
255 |
goto fail; |
1da177e4c
|
256 |
return 1; |
211117ff0
|
257 258 259 260 |
fail: cpu_buf->sample_lost_overflow++; return 0; |
1da177e4c
|
261 |
} |
6352d92de
|
262 |
static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) |
1da177e4c
|
263 |
{ |
1da177e4c
|
264 |
cpu_buf->tracing = 1; |
1da177e4c
|
265 |
} |
6352d92de
|
266 |
static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf) |
1da177e4c
|
267 268 269 |
{ cpu_buf->tracing = 0; } |
d45d23bed
|
270 271 272 |
static inline void __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, unsigned long event, int is_kernel) |
1da177e4c
|
273 |
{ |
b3e9f672b
|
274 |
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); |
ae735e996
|
275 |
unsigned long backtrace = oprofile_backtrace_depth; |
1da177e4c
|
276 |
|
fd13f6c85
|
277 278 279 280 |
/* * if log_sample() fail we can't backtrace since we lost the * source of this event */ |
ae735e996
|
281 282 283 284 285 286 |
if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event)) /* failed */ return; if (!backtrace) return; |
6352d92de
|
287 |
|
ae735e996
|
288 289 |
oprofile_begin_trace(cpu_buf); oprofile_ops.backtrace(regs, backtrace); |
1da177e4c
|
290 291 |
oprofile_end_trace(cpu_buf); } |
d45d23bed
|
292 293 294 295 296 |
void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, unsigned long event, int is_kernel) { __oprofile_add_ext_sample(pc, regs, event, is_kernel); } |
273577165
|
297 298 |
void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) { |
9414e9967
|
299 300 301 302 303 304 305 306 307 308 |
int is_kernel; unsigned long pc; if (likely(regs)) { is_kernel = !user_mode(regs); pc = profile_pc(regs); } else { is_kernel = 0; /* This value will not be used */ pc = ESCAPE_CODE; /* as this causes an early return. */ } |
273577165
|
309 |
|
d45d23bed
|
310 |
__oprofile_add_ext_sample(pc, regs, event, is_kernel); |
273577165
|
311 |
} |
1acda878e
|
312 313 314 |
/* * Add samples with data to the ring buffer. * |
14f0ca8ea
|
315 316 |
* Use oprofile_add_data(&entry, val) to add data and * oprofile_write_commit(&entry) to commit the sample. |
1acda878e
|
317 |
*/ |
14f0ca8ea
|
318 319 |
void oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs, |
1acda878e
|
320 |
unsigned long pc, int code, int size) |
345c25730
|
321 |
{ |
1acda878e
|
322 |
struct op_sample *sample; |
e2fee2761
|
323 |
int is_kernel = !user_mode(regs); |
b3e9f672b
|
324 |
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); |
345c25730
|
325 326 |
cpu_buf->sample_received++; |
1acda878e
|
327 328 329 330 331 332 333 334 335 |
/* no backtraces for samples with data */ if (op_add_code(cpu_buf, 0, is_kernel, current)) goto fail; sample = op_cpu_buffer_write_reserve(entry, size + 2); if (!sample) goto fail; sample->eip = ESCAPE_CODE; sample->event = 0; /* no flags */ |
345c25730
|
336 |
|
1acda878e
|
337 338 |
op_cpu_buffer_add_data(entry, code); op_cpu_buffer_add_data(entry, pc); |
345c25730
|
339 |
|
1acda878e
|
340 |
return; |
345c25730
|
341 |
|
1acda878e
|
342 |
fail: |
fdb6a8f4d
|
343 |
entry->event = NULL; |
1acda878e
|
344 |
cpu_buf->sample_lost_overflow++; |
345c25730
|
345 |
} |
14f0ca8ea
|
346 347 |
int oprofile_add_data(struct op_entry *entry, unsigned long val) { |
fdb6a8f4d
|
348 349 |
if (!entry->event) return 0; |
14f0ca8ea
|
350 351 |
return op_cpu_buffer_add_data(entry, val); } |
51563a0e5
|
352 353 354 355 356 357 358 359 360 361 362 363 364 365 |
int oprofile_add_data64(struct op_entry *entry, u64 val) { if (!entry->event) return 0; if (op_cpu_buffer_get_size(entry) < 2) /* * the function returns 0 to indicate a too small * buffer, even if there is some space left */ return 0; if (!op_cpu_buffer_add_data(entry, (u32)val)) return 0; return op_cpu_buffer_add_data(entry, (u32)(val >> 32)); } |
14f0ca8ea
|
366 367 |
int oprofile_write_commit(struct op_entry *entry) { |
fdb6a8f4d
|
368 369 |
if (!entry->event) return -EINVAL; |
14f0ca8ea
|
370 371 |
return op_cpu_buffer_write_commit(entry); } |
1da177e4c
|
372 373 |
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) { |
b3e9f672b
|
374 |
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); |
ae735e996
|
375 |
log_sample(cpu_buf, pc, 0, is_kernel, event); |
1da177e4c
|
376 |
} |
1da177e4c
|
377 378 |
void oprofile_add_trace(unsigned long pc) { |
b3e9f672b
|
379 |
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); |
1da177e4c
|
380 381 382 |
if (!cpu_buf->tracing) return; |
fd13f6c85
|
383 384 385 386 |
/* * broken frame can give an eip with the same value as an * escape code, abort the trace if we get it */ |
211117ff0
|
387 388 |
if (pc == ESCAPE_CODE) goto fail; |
d0e233846
|
389 |
if (op_add_sample(cpu_buf, pc, 0)) |
211117ff0
|
390 |
goto fail; |
1da177e4c
|
391 |
|
211117ff0
|
392 393 394 395 396 |
return; fail: cpu_buf->tracing = 0; cpu_buf->backtrace_aborted++; return; |
1da177e4c
|
397 |
} |
1da177e4c
|
398 399 400 401 402 403 404 |
/* * This serves to avoid cpu buffer overflow, and makes sure * the task mortuary progresses * * By using schedule_delayed_work_on and then schedule_delayed_work * we guarantee this will stay on the correct cpu */ |
c4028958b
|
405 |
static void wq_sync_buffer(struct work_struct *work) |
1da177e4c
|
406 |
{ |
25ad2913c
|
407 |
struct oprofile_cpu_buffer *b = |
c4028958b
|
408 |
container_of(work, struct oprofile_cpu_buffer, work.work); |
1da177e4c
|
409 |
if (b->cpu != smp_processor_id()) { |
bd17b625c
|
410 411 |
printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d ", |
1da177e4c
|
412 |
smp_processor_id(), b->cpu); |
4bd9b9dc9
|
413 414 415 416 417 |
if (!cpu_online(b->cpu)) { cancel_delayed_work(&b->work); return; } |
1da177e4c
|
418 419 420 421 422 423 424 |
} sync_buffer(b->cpu); /* don't re-add the work if we're shutting down */ if (work_enabled) schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE); } |