Blame view
kernel/trace/ring_buffer_benchmark.c
10.5 KB
5092dbc96 ring-buffer: add ... |
1 2 3 4 5 6 7 8 9 10 |
/* * ring buffer tester and benchmark * * Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com> */ #include <linux/ring_buffer.h> #include <linux/completion.h> #include <linux/kthread.h> #include <linux/module.h> #include <linux/time.h> |
79615760f local_t: Move loc... |
11 |
#include <asm/local.h> |
5092dbc96 ring-buffer: add ... |
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
struct rb_page { u64 ts; local_t commit; char data[4080]; }; /* run time and sleep time in seconds */ #define RUN_TIME 10 #define SLEEP_TIME 10 /* number of events for writer to wake up the reader */ static int wakeup_interval = 100; static int reader_finish; static struct completion read_start; static struct completion read_done; static struct ring_buffer *buffer; static struct task_struct *producer; static struct task_struct *consumer; static unsigned long read; static int disable_reader; module_param(disable_reader, uint, 0644); MODULE_PARM_DESC(disable_reader, "only run producer"); |
a6f0eb6ad ring-buffer: Add ... |
38 39 40 |
static int write_iteration = 50; module_param(write_iteration, uint, 0644); MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings"); |
7ac074340 ring-buffer-bench... |
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
static int producer_nice = 19; static int consumer_nice = 19; static int producer_fifo = -1; static int consumer_fifo = -1; module_param(producer_nice, uint, 0644); MODULE_PARM_DESC(producer_nice, "nice prio for producer"); module_param(consumer_nice, uint, 0644); MODULE_PARM_DESC(consumer_nice, "nice prio for consumer"); module_param(producer_fifo, uint, 0644); MODULE_PARM_DESC(producer_fifo, "fifo prio for producer"); module_param(consumer_fifo, uint, 0644); MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer"); |
5092dbc96 ring-buffer: add ... |
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
static int read_events; static int kill_test; #define KILL_TEST() \ do { \ if (!kill_test) { \ kill_test = 1; \ WARN_ON(1); \ } \ } while (0) enum event_status { EVENT_FOUND, EVENT_DROPPED, }; static enum event_status read_event(int cpu) { struct ring_buffer_event *event; int *entry; u64 ts; |
66a8cb95e ring-buffer: Add ... |
80 |
event = ring_buffer_consume(buffer, cpu, &ts, NULL); |
5092dbc96 ring-buffer: add ... |
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
if (!event) return EVENT_DROPPED; entry = ring_buffer_event_data(event); if (*entry != cpu) { KILL_TEST(); return EVENT_DROPPED; } read++; return EVENT_FOUND; } static enum event_status read_page(int cpu) { struct ring_buffer_event *event; struct rb_page *rpage; unsigned long commit; void *bpage; int *entry; int ret; int inc; int i; |
7ea590640 tracing: Use NUMA... |
104 |
bpage = ring_buffer_alloc_read_page(buffer, cpu); |
00c81a58c ring-buffer: chec... |
105 106 |
if (!bpage) return EVENT_DROPPED; |
5092dbc96 ring-buffer: add ... |
107 108 109 |
ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1); if (ret >= 0) { rpage = bpage; |
a838b2e63 ring-buffer: Make... |
110 111 |
/* The commit may have missed event flags set, clear them */ commit = local_read(&rpage->commit) & 0xfffff; |
5092dbc96 ring-buffer: add ... |
112 113 114 115 116 117 118 119 120 121 122 |
for (i = 0; i < commit && !kill_test; i += inc) { if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) { KILL_TEST(); break; } inc = -1; event = (void *)&rpage->data[i]; switch (event->type_len) { case RINGBUF_TYPE_PADDING: |
9086c7b90 ring-buffer: have... |
123 124 125 126 |
/* failed writes may be discarded events */ if (!event->time_delta) KILL_TEST(); inc = event->array[0] + 4; |
5092dbc96 ring-buffer: add ... |
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 |
break; case RINGBUF_TYPE_TIME_EXTEND: inc = 8; break; case 0: entry = ring_buffer_event_data(event); if (*entry != cpu) { KILL_TEST(); break; } read++; if (!event->array[0]) { KILL_TEST(); break; } |
9086c7b90 ring-buffer: have... |
142 |
inc = event->array[0] + 4; |
5092dbc96 ring-buffer: add ... |
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 |
break; default: entry = ring_buffer_event_data(event); if (*entry != cpu) { KILL_TEST(); break; } read++; inc = ((event->type_len + 1) * 4); } if (kill_test) break; if (inc <= 0) { KILL_TEST(); break; } } } ring_buffer_free_read_page(buffer, bpage); if (ret < 0) return EVENT_DROPPED; return EVENT_FOUND; } static void ring_buffer_consumer(void) { /* toggle between reading pages and events */ read_events ^= 1; read = 0; while (!reader_finish && !kill_test) { int found; do { int cpu; found = 0; for_each_online_cpu(cpu) { enum event_status stat; if (read_events) stat = read_event(cpu); else stat = read_page(cpu); if (kill_test) break; if (stat == EVENT_FOUND) found = 1; } } while (found && !kill_test); set_current_state(TASK_INTERRUPTIBLE); if (reader_finish) break; schedule(); __set_current_state(TASK_RUNNING); } reader_finish = 0; complete(&read_done); } static void ring_buffer_producer(void) { struct timeval start_tv; struct timeval end_tv; unsigned long long time; unsigned long long entries; unsigned long long overruns; unsigned long missed = 0; unsigned long hit = 0; unsigned long avg; int cnt = 0; /* * Hammer the buffer for 10 secs (this may * make the system stall) */ |
4b221f031 ring-buffer: have... |
224 225 |
trace_printk("Starting ring buffer hammer "); |
5092dbc96 ring-buffer: add ... |
226 227 228 229 |
do_gettimeofday(&start_tv); do { struct ring_buffer_event *event; int *entry; |
a6f0eb6ad ring-buffer: Add ... |
230 231 232 233 234 235 236 237 238 239 240 241 |
int i; for (i = 0; i < write_iteration; i++) { event = ring_buffer_lock_reserve(buffer, 10); if (!event) { missed++; } else { hit++; entry = ring_buffer_event_data(event); *entry = smp_processor_id(); ring_buffer_unlock_commit(buffer, event); } |
5092dbc96 ring-buffer: add ... |
242 243 |
} do_gettimeofday(&end_tv); |
0574ea421 ring-buffer: only... |
244 245 |
cnt++; if (consumer && !(cnt % wakeup_interval)) |
5092dbc96 ring-buffer: add ... |
246 |
wake_up_process(consumer); |
0574ea421 ring-buffer: only... |
247 |
#ifndef CONFIG_PREEMPT |
29c8000ee ring-buffer: remo... |
248 249 250 251 252 |
/* * If we are a non preempt kernel, the 10 second run will * stop everything while it runs. Instead, we will call * cond_resched and also add any time that was lost by a * rescedule. |
0574ea421 ring-buffer: only... |
253 254 255 |
* * Do a cond resched at the same frequency we would wake up * the reader. |
29c8000ee ring-buffer: remo... |
256 |
*/ |
0574ea421 ring-buffer: only... |
257 258 259 |
if (cnt % wakeup_interval) cond_resched(); #endif |
3e07a4f68 ring-buffer: chan... |
260 |
|
5092dbc96 ring-buffer: add ... |
261 |
} while (end_tv.tv_sec < (start_tv.tv_sec + RUN_TIME) && !kill_test); |
4b221f031 ring-buffer: have... |
262 263 |
trace_printk("End ring buffer hammer "); |
5092dbc96 ring-buffer: add ... |
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 |
if (consumer) { /* Init both completions here to avoid races */ init_completion(&read_start); init_completion(&read_done); /* the completions must be visible before the finish var */ smp_wmb(); reader_finish = 1; /* finish var visible before waking up the consumer */ smp_wmb(); wake_up_process(consumer); wait_for_completion(&read_done); } time = end_tv.tv_sec - start_tv.tv_sec; |
5a772b2b3 ring-buffer: repl... |
279 |
time *= USEC_PER_SEC; |
5092dbc96 ring-buffer: add ... |
280 281 282 283 284 285 |
time += (long long)((long)end_tv.tv_usec - (long)start_tv.tv_usec); entries = ring_buffer_entries(buffer); overruns = ring_buffer_overruns(buffer); if (kill_test) |
4b221f031 ring-buffer: have... |
286 287 |
trace_printk("ERROR! "); |
7ac074340 ring-buffer-bench... |
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 |
if (!disable_reader) { if (consumer_fifo < 0) trace_printk("Running Consumer at nice: %d ", consumer_nice); else trace_printk("Running Consumer at SCHED_FIFO %d ", consumer_fifo); } if (producer_fifo < 0) trace_printk("Running Producer at nice: %d ", producer_nice); else trace_printk("Running Producer at SCHED_FIFO %d ", producer_fifo); /* Let the user know that the test is running at low priority */ if (producer_fifo < 0 && consumer_fifo < 0 && producer_nice == 19 && consumer_nice == 19) trace_printk("WARNING!!! This test is running at lowest priority. "); |
4b221f031 ring-buffer: have... |
313 314 315 316 |
trace_printk("Time: %lld (usecs) ", time); trace_printk("Overruns: %lld ", overruns); |
5092dbc96 ring-buffer: add ... |
317 |
if (disable_reader) |
4b221f031 ring-buffer: have... |
318 319 |
trace_printk("Read: (reader disabled) "); |
5092dbc96 ring-buffer: add ... |
320 |
else |
4b221f031 ring-buffer: have... |
321 322 |
trace_printk("Read: %ld (by %s) ", read, |
5092dbc96 ring-buffer: add ... |
323 |
read_events ? "events" : "pages"); |
4b221f031 ring-buffer: have... |
324 325 326 327 328 329 330 331 |
trace_printk("Entries: %lld ", entries); trace_printk("Total: %lld ", entries + overruns + read); trace_printk("Missed: %ld ", missed); trace_printk("Hit: %ld ", hit); |
5092dbc96 ring-buffer: add ... |
332 |
|
5a772b2b3 ring-buffer: repl... |
333 334 |
/* Convert time from usecs to millisecs */ do_div(time, USEC_PER_MSEC); |
5092dbc96 ring-buffer: add ... |
335 336 337 |
if (time) hit /= (long)time; else |
4b221f031 ring-buffer: have... |
338 339 |
trace_printk("TIME IS ZERO?? "); |
5092dbc96 ring-buffer: add ... |
340 |
|
4b221f031 ring-buffer: have... |
341 342 |
trace_printk("Entries per millisec: %ld ", hit); |
5092dbc96 ring-buffer: add ... |
343 344 |
if (hit) { |
5a772b2b3 ring-buffer: repl... |
345 346 |
/* Calculate the average time in nanosecs */ avg = NSEC_PER_MSEC / hit; |
4b221f031 ring-buffer: have... |
347 348 |
trace_printk("%ld ns per entry ", avg); |
5092dbc96 ring-buffer: add ... |
349 |
} |
7da3046d6 ring-buffer: add ... |
350 |
|
7da3046d6 ring-buffer: add ... |
351 352 353 |
if (missed) { if (time) missed /= (long)time; |
4b221f031 ring-buffer: have... |
354 355 356 |
trace_printk("Total iterations per millisec: %ld ", hit + missed); |
7da3046d6 ring-buffer: add ... |
357 |
|
d988ff94c ring-buffer: chec... |
358 359 |
/* it is possible that hit + missed will overflow and be zero */ if (!(hit + missed)) { |
4b221f031 ring-buffer: have... |
360 361 |
trace_printk("hit + missed overflowed and totalled zero! "); |
d988ff94c ring-buffer: chec... |
362 363 |
hit--; /* make it non zero */ } |
5a772b2b3 ring-buffer: repl... |
364 365 |
/* Caculate the average time in nanosecs */ avg = NSEC_PER_MSEC / (hit + missed); |
4b221f031 ring-buffer: have... |
366 367 |
trace_printk("%ld ns per entry ", avg); |
7da3046d6 ring-buffer: add ... |
368 |
} |
5092dbc96 ring-buffer: add ... |
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 |
} static void wait_to_die(void) { set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { schedule(); set_current_state(TASK_INTERRUPTIBLE); } __set_current_state(TASK_RUNNING); } static int ring_buffer_consumer_thread(void *arg) { while (!kthread_should_stop() && !kill_test) { complete(&read_start); ring_buffer_consumer(); set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop() || kill_test) break; schedule(); __set_current_state(TASK_RUNNING); } __set_current_state(TASK_RUNNING); if (kill_test) wait_to_die(); return 0; } static int ring_buffer_producer_thread(void *arg) { init_completion(&read_start); while (!kthread_should_stop() && !kill_test) { ring_buffer_reset(buffer); if (consumer) { smp_wmb(); wake_up_process(consumer); wait_for_completion(&read_start); } ring_buffer_producer(); |
4b221f031 ring-buffer: have... |
417 418 |
trace_printk("Sleeping for 10 secs "); |
5092dbc96 ring-buffer: add ... |
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 |
set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ * SLEEP_TIME); __set_current_state(TASK_RUNNING); } if (kill_test) wait_to_die(); return 0; } static int __init ring_buffer_benchmark_init(void) { int ret; /* make a one meg buffer in overwite mode */ buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE); if (!buffer) return -ENOMEM; if (!disable_reader) { consumer = kthread_create(ring_buffer_consumer_thread, NULL, "rb_consumer"); ret = PTR_ERR(consumer); if (IS_ERR(consumer)) goto out_fail; } producer = kthread_run(ring_buffer_producer_thread, NULL, "rb_producer"); ret = PTR_ERR(producer); if (IS_ERR(producer)) goto out_kill; |
98e4833ba ring-buffer bench... |
453 454 455 |
/* * Run them as low-prio background tasks by default: */ |
7ac074340 ring-buffer-bench... |
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 |
if (!disable_reader) { if (consumer_fifo >= 0) { struct sched_param param = { .sched_priority = consumer_fifo }; sched_setscheduler(consumer, SCHED_FIFO, ¶m); } else set_user_nice(consumer, consumer_nice); } if (producer_fifo >= 0) { struct sched_param param = { .sched_priority = consumer_fifo }; sched_setscheduler(producer, SCHED_FIFO, ¶m); } else set_user_nice(producer, producer_nice); |
98e4833ba ring-buffer bench... |
473 |
|
5092dbc96 ring-buffer: add ... |
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 |
return 0; out_kill: if (consumer) kthread_stop(consumer); out_fail: ring_buffer_free(buffer); return ret; } static void __exit ring_buffer_benchmark_exit(void) { kthread_stop(producer); if (consumer) kthread_stop(consumer); ring_buffer_free(buffer); } module_init(ring_buffer_benchmark_init); module_exit(ring_buffer_benchmark_exit); MODULE_AUTHOR("Steven Rostedt"); MODULE_DESCRIPTION("ring_buffer_benchmark"); MODULE_LICENSE("GPL"); |