Commit 5a772b2b3c68e7e0b503c5a48469113bb0634314
Committed by
Steven Rostedt
1 parent
79c5d3ce61
Exists in
master
and in
7 other branches
ring-buffer: replace constants with time macros in ring-buffer-benchmark
The use of numeric constants is discouraged. It is cleaner and more descriptive to use macros for constant time conversions. This patch also removes an extra new line. [ Impact: more descriptive time conversions ] Reported-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Showing 1 changed file with 7 additions and 5 deletions Inline Diff
kernel/trace/ring_buffer_benchmark.c
1 | /* | 1 | /* |
2 | * ring buffer tester and benchmark | 2 | * ring buffer tester and benchmark |
3 | * | 3 | * |
4 | * Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com> | 4 | * Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com> |
5 | */ | 5 | */ |
6 | #include <linux/ring_buffer.h> | 6 | #include <linux/ring_buffer.h> |
7 | #include <linux/completion.h> | 7 | #include <linux/completion.h> |
8 | #include <linux/kthread.h> | 8 | #include <linux/kthread.h> |
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/time.h> | 10 | #include <linux/time.h> |
11 | 11 | ||
12 | struct rb_page { | 12 | struct rb_page { |
13 | u64 ts; | 13 | u64 ts; |
14 | local_t commit; | 14 | local_t commit; |
15 | char data[4080]; | 15 | char data[4080]; |
16 | }; | 16 | }; |
17 | 17 | ||
18 | /* run time and sleep time in seconds */ | 18 | /* run time and sleep time in seconds */ |
19 | #define RUN_TIME 10 | 19 | #define RUN_TIME 10 |
20 | #define SLEEP_TIME 10 | 20 | #define SLEEP_TIME 10 |
21 | 21 | ||
22 | /* number of events for writer to wake up the reader */ | 22 | /* number of events for writer to wake up the reader */ |
23 | static int wakeup_interval = 100; | 23 | static int wakeup_interval = 100; |
24 | 24 | ||
25 | static int reader_finish; | 25 | static int reader_finish; |
26 | static struct completion read_start; | 26 | static struct completion read_start; |
27 | static struct completion read_done; | 27 | static struct completion read_done; |
28 | 28 | ||
29 | static struct ring_buffer *buffer; | 29 | static struct ring_buffer *buffer; |
30 | static struct task_struct *producer; | 30 | static struct task_struct *producer; |
31 | static struct task_struct *consumer; | 31 | static struct task_struct *consumer; |
32 | static unsigned long read; | 32 | static unsigned long read; |
33 | 33 | ||
34 | static int disable_reader; | 34 | static int disable_reader; |
35 | module_param(disable_reader, uint, 0644); | 35 | module_param(disable_reader, uint, 0644); |
36 | MODULE_PARM_DESC(disable_reader, "only run producer"); | 36 | MODULE_PARM_DESC(disable_reader, "only run producer"); |
37 | 37 | ||
38 | static int read_events; | 38 | static int read_events; |
39 | 39 | ||
40 | static int kill_test; | 40 | static int kill_test; |
41 | 41 | ||
42 | #define KILL_TEST() \ | 42 | #define KILL_TEST() \ |
43 | do { \ | 43 | do { \ |
44 | if (!kill_test) { \ | 44 | if (!kill_test) { \ |
45 | kill_test = 1; \ | 45 | kill_test = 1; \ |
46 | WARN_ON(1); \ | 46 | WARN_ON(1); \ |
47 | } \ | 47 | } \ |
48 | } while (0) | 48 | } while (0) |
49 | 49 | ||
50 | enum event_status { | 50 | enum event_status { |
51 | EVENT_FOUND, | 51 | EVENT_FOUND, |
52 | EVENT_DROPPED, | 52 | EVENT_DROPPED, |
53 | }; | 53 | }; |
54 | 54 | ||
55 | static enum event_status read_event(int cpu) | 55 | static enum event_status read_event(int cpu) |
56 | { | 56 | { |
57 | struct ring_buffer_event *event; | 57 | struct ring_buffer_event *event; |
58 | int *entry; | 58 | int *entry; |
59 | u64 ts; | 59 | u64 ts; |
60 | 60 | ||
61 | event = ring_buffer_consume(buffer, cpu, &ts); | 61 | event = ring_buffer_consume(buffer, cpu, &ts); |
62 | if (!event) | 62 | if (!event) |
63 | return EVENT_DROPPED; | 63 | return EVENT_DROPPED; |
64 | 64 | ||
65 | entry = ring_buffer_event_data(event); | 65 | entry = ring_buffer_event_data(event); |
66 | if (*entry != cpu) { | 66 | if (*entry != cpu) { |
67 | KILL_TEST(); | 67 | KILL_TEST(); |
68 | return EVENT_DROPPED; | 68 | return EVENT_DROPPED; |
69 | } | 69 | } |
70 | 70 | ||
71 | read++; | 71 | read++; |
72 | return EVENT_FOUND; | 72 | return EVENT_FOUND; |
73 | } | 73 | } |
74 | 74 | ||
75 | static enum event_status read_page(int cpu) | 75 | static enum event_status read_page(int cpu) |
76 | { | 76 | { |
77 | struct ring_buffer_event *event; | 77 | struct ring_buffer_event *event; |
78 | struct rb_page *rpage; | 78 | struct rb_page *rpage; |
79 | unsigned long commit; | 79 | unsigned long commit; |
80 | void *bpage; | 80 | void *bpage; |
81 | int *entry; | 81 | int *entry; |
82 | int ret; | 82 | int ret; |
83 | int inc; | 83 | int inc; |
84 | int i; | 84 | int i; |
85 | 85 | ||
86 | bpage = ring_buffer_alloc_read_page(buffer); | 86 | bpage = ring_buffer_alloc_read_page(buffer); |
87 | if (!bpage) | 87 | if (!bpage) |
88 | return EVENT_DROPPED; | 88 | return EVENT_DROPPED; |
89 | 89 | ||
90 | ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1); | 90 | ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1); |
91 | if (ret >= 0) { | 91 | if (ret >= 0) { |
92 | rpage = bpage; | 92 | rpage = bpage; |
93 | commit = local_read(&rpage->commit); | 93 | commit = local_read(&rpage->commit); |
94 | for (i = 0; i < commit && !kill_test; i += inc) { | 94 | for (i = 0; i < commit && !kill_test; i += inc) { |
95 | 95 | ||
96 | if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) { | 96 | if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) { |
97 | KILL_TEST(); | 97 | KILL_TEST(); |
98 | break; | 98 | break; |
99 | } | 99 | } |
100 | 100 | ||
101 | inc = -1; | 101 | inc = -1; |
102 | event = (void *)&rpage->data[i]; | 102 | event = (void *)&rpage->data[i]; |
103 | switch (event->type_len) { | 103 | switch (event->type_len) { |
104 | case RINGBUF_TYPE_PADDING: | 104 | case RINGBUF_TYPE_PADDING: |
105 | /* We don't expect any padding */ | 105 | /* We don't expect any padding */ |
106 | KILL_TEST(); | 106 | KILL_TEST(); |
107 | break; | 107 | break; |
108 | case RINGBUF_TYPE_TIME_EXTEND: | 108 | case RINGBUF_TYPE_TIME_EXTEND: |
109 | inc = 8; | 109 | inc = 8; |
110 | break; | 110 | break; |
111 | case 0: | 111 | case 0: |
112 | entry = ring_buffer_event_data(event); | 112 | entry = ring_buffer_event_data(event); |
113 | if (*entry != cpu) { | 113 | if (*entry != cpu) { |
114 | KILL_TEST(); | 114 | KILL_TEST(); |
115 | break; | 115 | break; |
116 | } | 116 | } |
117 | read++; | 117 | read++; |
118 | if (!event->array[0]) { | 118 | if (!event->array[0]) { |
119 | KILL_TEST(); | 119 | KILL_TEST(); |
120 | break; | 120 | break; |
121 | } | 121 | } |
122 | inc = event->array[0]; | 122 | inc = event->array[0]; |
123 | break; | 123 | break; |
124 | default: | 124 | default: |
125 | entry = ring_buffer_event_data(event); | 125 | entry = ring_buffer_event_data(event); |
126 | if (*entry != cpu) { | 126 | if (*entry != cpu) { |
127 | KILL_TEST(); | 127 | KILL_TEST(); |
128 | break; | 128 | break; |
129 | } | 129 | } |
130 | read++; | 130 | read++; |
131 | inc = ((event->type_len + 1) * 4); | 131 | inc = ((event->type_len + 1) * 4); |
132 | } | 132 | } |
133 | if (kill_test) | 133 | if (kill_test) |
134 | break; | 134 | break; |
135 | 135 | ||
136 | if (inc <= 0) { | 136 | if (inc <= 0) { |
137 | KILL_TEST(); | 137 | KILL_TEST(); |
138 | break; | 138 | break; |
139 | } | 139 | } |
140 | } | 140 | } |
141 | } | 141 | } |
142 | ring_buffer_free_read_page(buffer, bpage); | 142 | ring_buffer_free_read_page(buffer, bpage); |
143 | 143 | ||
144 | if (ret < 0) | 144 | if (ret < 0) |
145 | return EVENT_DROPPED; | 145 | return EVENT_DROPPED; |
146 | return EVENT_FOUND; | 146 | return EVENT_FOUND; |
147 | } | 147 | } |
148 | 148 | ||
149 | static void ring_buffer_consumer(void) | 149 | static void ring_buffer_consumer(void) |
150 | { | 150 | { |
151 | /* toggle between reading pages and events */ | 151 | /* toggle between reading pages and events */ |
152 | read_events ^= 1; | 152 | read_events ^= 1; |
153 | 153 | ||
154 | read = 0; | 154 | read = 0; |
155 | while (!reader_finish && !kill_test) { | 155 | while (!reader_finish && !kill_test) { |
156 | int found; | 156 | int found; |
157 | 157 | ||
158 | do { | 158 | do { |
159 | int cpu; | 159 | int cpu; |
160 | 160 | ||
161 | found = 0; | 161 | found = 0; |
162 | for_each_online_cpu(cpu) { | 162 | for_each_online_cpu(cpu) { |
163 | enum event_status stat; | 163 | enum event_status stat; |
164 | 164 | ||
165 | if (read_events) | 165 | if (read_events) |
166 | stat = read_event(cpu); | 166 | stat = read_event(cpu); |
167 | else | 167 | else |
168 | stat = read_page(cpu); | 168 | stat = read_page(cpu); |
169 | 169 | ||
170 | if (kill_test) | 170 | if (kill_test) |
171 | break; | 171 | break; |
172 | if (stat == EVENT_FOUND) | 172 | if (stat == EVENT_FOUND) |
173 | found = 1; | 173 | found = 1; |
174 | } | 174 | } |
175 | } while (found && !kill_test); | 175 | } while (found && !kill_test); |
176 | 176 | ||
177 | set_current_state(TASK_INTERRUPTIBLE); | 177 | set_current_state(TASK_INTERRUPTIBLE); |
178 | if (reader_finish) | 178 | if (reader_finish) |
179 | break; | 179 | break; |
180 | 180 | ||
181 | schedule(); | 181 | schedule(); |
182 | __set_current_state(TASK_RUNNING); | 182 | __set_current_state(TASK_RUNNING); |
183 | } | 183 | } |
184 | reader_finish = 0; | 184 | reader_finish = 0; |
185 | complete(&read_done); | 185 | complete(&read_done); |
186 | } | 186 | } |
187 | 187 | ||
188 | static void ring_buffer_producer(void) | 188 | static void ring_buffer_producer(void) |
189 | { | 189 | { |
190 | struct timeval start_tv; | 190 | struct timeval start_tv; |
191 | struct timeval end_tv; | 191 | struct timeval end_tv; |
192 | unsigned long long time; | 192 | unsigned long long time; |
193 | unsigned long long entries; | 193 | unsigned long long entries; |
194 | unsigned long long overruns; | 194 | unsigned long long overruns; |
195 | unsigned long missed = 0; | 195 | unsigned long missed = 0; |
196 | unsigned long hit = 0; | 196 | unsigned long hit = 0; |
197 | unsigned long avg; | 197 | unsigned long avg; |
198 | int cnt = 0; | 198 | int cnt = 0; |
199 | 199 | ||
200 | /* | 200 | /* |
201 | * Hammer the buffer for 10 secs (this may | 201 | * Hammer the buffer for 10 secs (this may |
202 | * make the system stall) | 202 | * make the system stall) |
203 | */ | 203 | */ |
204 | pr_info("Starting ring buffer hammer\n"); | 204 | pr_info("Starting ring buffer hammer\n"); |
205 | do_gettimeofday(&start_tv); | 205 | do_gettimeofday(&start_tv); |
206 | do { | 206 | do { |
207 | struct ring_buffer_event *event; | 207 | struct ring_buffer_event *event; |
208 | int *entry; | 208 | int *entry; |
209 | 209 | ||
210 | event = ring_buffer_lock_reserve(buffer, 10); | 210 | event = ring_buffer_lock_reserve(buffer, 10); |
211 | if (!event) { | 211 | if (!event) { |
212 | missed++; | 212 | missed++; |
213 | } else { | 213 | } else { |
214 | hit++; | 214 | hit++; |
215 | entry = ring_buffer_event_data(event); | 215 | entry = ring_buffer_event_data(event); |
216 | *entry = smp_processor_id(); | 216 | *entry = smp_processor_id(); |
217 | ring_buffer_unlock_commit(buffer, event); | 217 | ring_buffer_unlock_commit(buffer, event); |
218 | } | 218 | } |
219 | do_gettimeofday(&end_tv); | 219 | do_gettimeofday(&end_tv); |
220 | 220 | ||
221 | cnt++; | 221 | cnt++; |
222 | if (consumer && !(cnt % wakeup_interval)) | 222 | if (consumer && !(cnt % wakeup_interval)) |
223 | wake_up_process(consumer); | 223 | wake_up_process(consumer); |
224 | 224 | ||
225 | #ifndef CONFIG_PREEMPT | 225 | #ifndef CONFIG_PREEMPT |
226 | /* | 226 | /* |
227 | * If we are a non preempt kernel, the 10 second run will | 227 | * If we are a non preempt kernel, the 10 second run will |
228 | * stop everything while it runs. Instead, we will call | 228 | * stop everything while it runs. Instead, we will call |
229 | * cond_resched and also add any time that was lost by a | 229 | * cond_resched and also add any time that was lost by a |
230 | * rescedule. | 230 | * rescedule. |
231 | * | 231 | * |
232 | * Do a cond resched at the same frequency we would wake up | 232 | * Do a cond resched at the same frequency we would wake up |
233 | * the reader. | 233 | * the reader. |
234 | */ | 234 | */ |
235 | if (cnt % wakeup_interval) | 235 | if (cnt % wakeup_interval) |
236 | cond_resched(); | 236 | cond_resched(); |
237 | #endif | 237 | #endif |
238 | 238 | ||
239 | } while (end_tv.tv_sec < (start_tv.tv_sec + RUN_TIME) && !kill_test); | 239 | } while (end_tv.tv_sec < (start_tv.tv_sec + RUN_TIME) && !kill_test); |
240 | pr_info("End ring buffer hammer\n"); | 240 | pr_info("End ring buffer hammer\n"); |
241 | 241 | ||
242 | if (consumer) { | 242 | if (consumer) { |
243 | /* Init both completions here to avoid races */ | 243 | /* Init both completions here to avoid races */ |
244 | init_completion(&read_start); | 244 | init_completion(&read_start); |
245 | init_completion(&read_done); | 245 | init_completion(&read_done); |
246 | /* the completions must be visible before the finish var */ | 246 | /* the completions must be visible before the finish var */ |
247 | smp_wmb(); | 247 | smp_wmb(); |
248 | reader_finish = 1; | 248 | reader_finish = 1; |
249 | /* finish var visible before waking up the consumer */ | 249 | /* finish var visible before waking up the consumer */ |
250 | smp_wmb(); | 250 | smp_wmb(); |
251 | wake_up_process(consumer); | 251 | wake_up_process(consumer); |
252 | wait_for_completion(&read_done); | 252 | wait_for_completion(&read_done); |
253 | } | 253 | } |
254 | 254 | ||
255 | time = end_tv.tv_sec - start_tv.tv_sec; | 255 | time = end_tv.tv_sec - start_tv.tv_sec; |
256 | time *= 1000000; | 256 | time *= USEC_PER_SEC; |
257 | time += (long long)((long)end_tv.tv_usec - (long)start_tv.tv_usec); | 257 | time += (long long)((long)end_tv.tv_usec - (long)start_tv.tv_usec); |
258 | 258 | ||
259 | entries = ring_buffer_entries(buffer); | 259 | entries = ring_buffer_entries(buffer); |
260 | overruns = ring_buffer_overruns(buffer); | 260 | overruns = ring_buffer_overruns(buffer); |
261 | 261 | ||
262 | if (kill_test) | 262 | if (kill_test) |
263 | pr_info("ERROR!\n"); | 263 | pr_info("ERROR!\n"); |
264 | pr_info("Time: %lld (usecs)\n", time); | 264 | pr_info("Time: %lld (usecs)\n", time); |
265 | pr_info("Overruns: %lld\n", overruns); | 265 | pr_info("Overruns: %lld\n", overruns); |
266 | if (disable_reader) | 266 | if (disable_reader) |
267 | pr_info("Read: (reader disabled)\n"); | 267 | pr_info("Read: (reader disabled)\n"); |
268 | else | 268 | else |
269 | pr_info("Read: %ld (by %s)\n", read, | 269 | pr_info("Read: %ld (by %s)\n", read, |
270 | read_events ? "events" : "pages"); | 270 | read_events ? "events" : "pages"); |
271 | pr_info("Entries: %lld\n", entries); | 271 | pr_info("Entries: %lld\n", entries); |
272 | pr_info("Total: %lld\n", entries + overruns + read); | 272 | pr_info("Total: %lld\n", entries + overruns + read); |
273 | pr_info("Missed: %ld\n", missed); | 273 | pr_info("Missed: %ld\n", missed); |
274 | pr_info("Hit: %ld\n", hit); | 274 | pr_info("Hit: %ld\n", hit); |
275 | 275 | ||
276 | do_div(time, 1000); | 276 | /* Convert time from usecs to millisecs */ |
277 | do_div(time, USEC_PER_MSEC); | ||
277 | if (time) | 278 | if (time) |
278 | hit /= (long)time; | 279 | hit /= (long)time; |
279 | else | 280 | else |
280 | pr_info("TIME IS ZERO??\n"); | 281 | pr_info("TIME IS ZERO??\n"); |
281 | 282 | ||
282 | pr_info("Entries per millisec: %ld\n", hit); | 283 | pr_info("Entries per millisec: %ld\n", hit); |
283 | 284 | ||
284 | if (hit) { | 285 | if (hit) { |
285 | avg = 1000000 / hit; | 286 | /* Calculate the average time in nanosecs */ |
287 | avg = NSEC_PER_MSEC / hit; | ||
286 | pr_info("%ld ns per entry\n", avg); | 288 | pr_info("%ld ns per entry\n", avg); |
287 | } | 289 | } |
288 | 290 | ||
289 | |||
290 | if (missed) { | 291 | if (missed) { |
291 | if (time) | 292 | if (time) |
292 | missed /= (long)time; | 293 | missed /= (long)time; |
293 | 294 | ||
294 | pr_info("Total iterations per millisec: %ld\n", hit + missed); | 295 | pr_info("Total iterations per millisec: %ld\n", hit + missed); |
295 | 296 | ||
296 | avg = 1000000 / (hit + missed); | 297 | /* Caculate the average time in nanosecs */ |
298 | avg = NSEC_PER_MSEC / (hit + missed); | ||
297 | pr_info("%ld ns per entry\n", avg); | 299 | pr_info("%ld ns per entry\n", avg); |
298 | } | 300 | } |
299 | } | 301 | } |
300 | 302 | ||
301 | static void wait_to_die(void) | 303 | static void wait_to_die(void) |
302 | { | 304 | { |
303 | set_current_state(TASK_INTERRUPTIBLE); | 305 | set_current_state(TASK_INTERRUPTIBLE); |
304 | while (!kthread_should_stop()) { | 306 | while (!kthread_should_stop()) { |
305 | schedule(); | 307 | schedule(); |
306 | set_current_state(TASK_INTERRUPTIBLE); | 308 | set_current_state(TASK_INTERRUPTIBLE); |
307 | } | 309 | } |
308 | __set_current_state(TASK_RUNNING); | 310 | __set_current_state(TASK_RUNNING); |
309 | } | 311 | } |
310 | 312 | ||
311 | static int ring_buffer_consumer_thread(void *arg) | 313 | static int ring_buffer_consumer_thread(void *arg) |
312 | { | 314 | { |
313 | while (!kthread_should_stop() && !kill_test) { | 315 | while (!kthread_should_stop() && !kill_test) { |
314 | complete(&read_start); | 316 | complete(&read_start); |
315 | 317 | ||
316 | ring_buffer_consumer(); | 318 | ring_buffer_consumer(); |
317 | 319 | ||
318 | set_current_state(TASK_INTERRUPTIBLE); | 320 | set_current_state(TASK_INTERRUPTIBLE); |
319 | if (kthread_should_stop() || kill_test) | 321 | if (kthread_should_stop() || kill_test) |
320 | break; | 322 | break; |
321 | 323 | ||
322 | schedule(); | 324 | schedule(); |
323 | __set_current_state(TASK_RUNNING); | 325 | __set_current_state(TASK_RUNNING); |
324 | } | 326 | } |
325 | __set_current_state(TASK_RUNNING); | 327 | __set_current_state(TASK_RUNNING); |
326 | 328 | ||
327 | if (kill_test) | 329 | if (kill_test) |
328 | wait_to_die(); | 330 | wait_to_die(); |
329 | 331 | ||
330 | return 0; | 332 | return 0; |
331 | } | 333 | } |
332 | 334 | ||
333 | static int ring_buffer_producer_thread(void *arg) | 335 | static int ring_buffer_producer_thread(void *arg) |
334 | { | 336 | { |
335 | init_completion(&read_start); | 337 | init_completion(&read_start); |
336 | 338 | ||
337 | while (!kthread_should_stop() && !kill_test) { | 339 | while (!kthread_should_stop() && !kill_test) { |
338 | ring_buffer_reset(buffer); | 340 | ring_buffer_reset(buffer); |
339 | 341 | ||
340 | if (consumer) { | 342 | if (consumer) { |
341 | smp_wmb(); | 343 | smp_wmb(); |
342 | wake_up_process(consumer); | 344 | wake_up_process(consumer); |
343 | wait_for_completion(&read_start); | 345 | wait_for_completion(&read_start); |
344 | } | 346 | } |
345 | 347 | ||
346 | ring_buffer_producer(); | 348 | ring_buffer_producer(); |
347 | 349 | ||
348 | pr_info("Sleeping for 10 secs\n"); | 350 | pr_info("Sleeping for 10 secs\n"); |
349 | set_current_state(TASK_INTERRUPTIBLE); | 351 | set_current_state(TASK_INTERRUPTIBLE); |
350 | schedule_timeout(HZ * SLEEP_TIME); | 352 | schedule_timeout(HZ * SLEEP_TIME); |
351 | __set_current_state(TASK_RUNNING); | 353 | __set_current_state(TASK_RUNNING); |
352 | } | 354 | } |
353 | 355 | ||
354 | if (kill_test) | 356 | if (kill_test) |
355 | wait_to_die(); | 357 | wait_to_die(); |
356 | 358 | ||
357 | return 0; | 359 | return 0; |
358 | } | 360 | } |
359 | 361 | ||
360 | static int __init ring_buffer_benchmark_init(void) | 362 | static int __init ring_buffer_benchmark_init(void) |
361 | { | 363 | { |
362 | int ret; | 364 | int ret; |
363 | 365 | ||
364 | /* make a one meg buffer in overwite mode */ | 366 | /* make a one meg buffer in overwite mode */ |
365 | buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE); | 367 | buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE); |
366 | if (!buffer) | 368 | if (!buffer) |
367 | return -ENOMEM; | 369 | return -ENOMEM; |
368 | 370 | ||
369 | if (!disable_reader) { | 371 | if (!disable_reader) { |
370 | consumer = kthread_create(ring_buffer_consumer_thread, | 372 | consumer = kthread_create(ring_buffer_consumer_thread, |
371 | NULL, "rb_consumer"); | 373 | NULL, "rb_consumer"); |
372 | ret = PTR_ERR(consumer); | 374 | ret = PTR_ERR(consumer); |
373 | if (IS_ERR(consumer)) | 375 | if (IS_ERR(consumer)) |
374 | goto out_fail; | 376 | goto out_fail; |
375 | } | 377 | } |
376 | 378 | ||
377 | producer = kthread_run(ring_buffer_producer_thread, | 379 | producer = kthread_run(ring_buffer_producer_thread, |
378 | NULL, "rb_producer"); | 380 | NULL, "rb_producer"); |
379 | ret = PTR_ERR(producer); | 381 | ret = PTR_ERR(producer); |
380 | 382 | ||
381 | if (IS_ERR(producer)) | 383 | if (IS_ERR(producer)) |
382 | goto out_kill; | 384 | goto out_kill; |
383 | 385 | ||
384 | return 0; | 386 | return 0; |
385 | 387 | ||
386 | out_kill: | 388 | out_kill: |
387 | if (consumer) | 389 | if (consumer) |
388 | kthread_stop(consumer); | 390 | kthread_stop(consumer); |
389 | 391 | ||
390 | out_fail: | 392 | out_fail: |
391 | ring_buffer_free(buffer); | 393 | ring_buffer_free(buffer); |
392 | return ret; | 394 | return ret; |
393 | } | 395 | } |
394 | 396 | ||
395 | static void __exit ring_buffer_benchmark_exit(void) | 397 | static void __exit ring_buffer_benchmark_exit(void) |
396 | { | 398 | { |
397 | kthread_stop(producer); | 399 | kthread_stop(producer); |
398 | if (consumer) | 400 | if (consumer) |
399 | kthread_stop(consumer); | 401 | kthread_stop(consumer); |
400 | ring_buffer_free(buffer); | 402 | ring_buffer_free(buffer); |
401 | } | 403 | } |
402 | 404 | ||
403 | module_init(ring_buffer_benchmark_init); | 405 | module_init(ring_buffer_benchmark_init); |
404 | module_exit(ring_buffer_benchmark_exit); | 406 | module_exit(ring_buffer_benchmark_exit); |
405 | 407 | ||
406 | MODULE_AUTHOR("Steven Rostedt"); | 408 | MODULE_AUTHOR("Steven Rostedt"); |
407 | MODULE_DESCRIPTION("ring_buffer_benchmark"); | 409 | MODULE_DESCRIPTION("ring_buffer_benchmark"); |
408 | MODULE_LICENSE("GPL"); | 410 | MODULE_LICENSE("GPL"); |