Commit 8789a9e7df6bf9b93739c4c7d4e380725bc9e936
Committed by
Ingo Molnar
1 parent
abc9b56d66
Exists in
master
and in
7 other branches
ring-buffer: read page interface
Impact: new API to ring buffer This patch adds a new interface into the ring buffer that allows a page to be read from the ring buffer on a given CPU. For every page read, one must also be given to allow for a "swap" of the pages. rpage = ring_buffer_alloc_read_page(buffer); if (!rpage) goto err; ret = ring_buffer_read_page(buffer, &rpage, cpu, full); if (!ret) goto empty; process_page(rpage); ring_buffer_free_read_page(rpage); The caller of these functions must handle any waits that are needed to wait for new data. The ring_buffer_read_page will simply return 0 if there is no data, or if "full" is set and the writer is still on the current page. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 2 changed files with 171 additions and 0 deletions Inline Diff
include/linux/ring_buffer.h
1 | #ifndef _LINUX_RING_BUFFER_H | 1 | #ifndef _LINUX_RING_BUFFER_H |
2 | #define _LINUX_RING_BUFFER_H | 2 | #define _LINUX_RING_BUFFER_H |
3 | 3 | ||
4 | #include <linux/mm.h> | 4 | #include <linux/mm.h> |
5 | #include <linux/seq_file.h> | 5 | #include <linux/seq_file.h> |
6 | 6 | ||
7 | struct ring_buffer; | 7 | struct ring_buffer; |
8 | struct ring_buffer_iter; | 8 | struct ring_buffer_iter; |
9 | 9 | ||
10 | /* | 10 | /* |
11 | * Don't reference this struct directly, use functions below. | 11 | * Don't reference this struct directly, use functions below. |
12 | */ | 12 | */ |
13 | struct ring_buffer_event { | 13 | struct ring_buffer_event { |
14 | u32 type:2, len:3, time_delta:27; | 14 | u32 type:2, len:3, time_delta:27; |
15 | u32 array[]; | 15 | u32 array[]; |
16 | }; | 16 | }; |
17 | 17 | ||
18 | /** | 18 | /** |
19 | * enum ring_buffer_type - internal ring buffer types | 19 | * enum ring_buffer_type - internal ring buffer types |
20 | * | 20 | * |
21 | * @RINGBUF_TYPE_PADDING: Left over page padding | 21 | * @RINGBUF_TYPE_PADDING: Left over page padding |
22 | * array is ignored | 22 | * array is ignored |
23 | * size is variable depending on how much | 23 | * size is variable depending on how much |
24 | * padding is needed | 24 | * padding is needed |
25 | * | 25 | * |
26 | * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta | 26 | * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta |
27 | * array[0] = time delta (28 .. 59) | 27 | * array[0] = time delta (28 .. 59) |
28 | * size = 8 bytes | 28 | * size = 8 bytes |
29 | * | 29 | * |
30 | * @RINGBUF_TYPE_TIME_STAMP: Sync time stamp with external clock | 30 | * @RINGBUF_TYPE_TIME_STAMP: Sync time stamp with external clock |
31 | * array[0] = tv_nsec | 31 | * array[0] = tv_nsec |
32 | * array[1] = tv_sec | 32 | * array[1] = tv_sec |
33 | * size = 16 bytes | 33 | * size = 16 bytes |
34 | * | 34 | * |
35 | * @RINGBUF_TYPE_DATA: Data record | 35 | * @RINGBUF_TYPE_DATA: Data record |
36 | * If len is zero: | 36 | * If len is zero: |
37 | * array[0] holds the actual length | 37 | * array[0] holds the actual length |
38 | * array[1..(length+3)/4-1] holds data | 38 | * array[1..(length+3)/4-1] holds data |
39 | * else | 39 | * else |
40 | * length = len << 2 | 40 | * length = len << 2 |
41 | * array[0..(length+3)/4] holds data | 41 | * array[0..(length+3)/4] holds data |
42 | */ | 42 | */ |
43 | enum ring_buffer_type { | 43 | enum ring_buffer_type { |
44 | RINGBUF_TYPE_PADDING, | 44 | RINGBUF_TYPE_PADDING, |
45 | RINGBUF_TYPE_TIME_EXTEND, | 45 | RINGBUF_TYPE_TIME_EXTEND, |
46 | /* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */ | 46 | /* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */ |
47 | RINGBUF_TYPE_TIME_STAMP, | 47 | RINGBUF_TYPE_TIME_STAMP, |
48 | RINGBUF_TYPE_DATA, | 48 | RINGBUF_TYPE_DATA, |
49 | }; | 49 | }; |
50 | 50 | ||
51 | unsigned ring_buffer_event_length(struct ring_buffer_event *event); | 51 | unsigned ring_buffer_event_length(struct ring_buffer_event *event); |
52 | void *ring_buffer_event_data(struct ring_buffer_event *event); | 52 | void *ring_buffer_event_data(struct ring_buffer_event *event); |
53 | 53 | ||
54 | /** | 54 | /** |
55 | * ring_buffer_event_time_delta - return the delta timestamp of the event | 55 | * ring_buffer_event_time_delta - return the delta timestamp of the event |
56 | * @event: the event to get the delta timestamp of | 56 | * @event: the event to get the delta timestamp of |
57 | * | 57 | * |
58 | * The delta timestamp is the 27 bit timestamp since the last event. | 58 | * The delta timestamp is the 27 bit timestamp since the last event. |
59 | */ | 59 | */ |
60 | static inline unsigned | 60 | static inline unsigned |
61 | ring_buffer_event_time_delta(struct ring_buffer_event *event) | 61 | ring_buffer_event_time_delta(struct ring_buffer_event *event) |
62 | { | 62 | { |
63 | return event->time_delta; | 63 | return event->time_delta; |
64 | } | 64 | } |
65 | 65 | ||
66 | /* | 66 | /* |
67 | * size is in bytes for each per CPU buffer. | 67 | * size is in bytes for each per CPU buffer. |
68 | */ | 68 | */ |
69 | struct ring_buffer * | 69 | struct ring_buffer * |
70 | ring_buffer_alloc(unsigned long size, unsigned flags); | 70 | ring_buffer_alloc(unsigned long size, unsigned flags); |
71 | void ring_buffer_free(struct ring_buffer *buffer); | 71 | void ring_buffer_free(struct ring_buffer *buffer); |
72 | 72 | ||
73 | int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); | 73 | int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); |
74 | 74 | ||
75 | struct ring_buffer_event * | 75 | struct ring_buffer_event * |
76 | ring_buffer_lock_reserve(struct ring_buffer *buffer, | 76 | ring_buffer_lock_reserve(struct ring_buffer *buffer, |
77 | unsigned long length, | 77 | unsigned long length, |
78 | unsigned long *flags); | 78 | unsigned long *flags); |
79 | int ring_buffer_unlock_commit(struct ring_buffer *buffer, | 79 | int ring_buffer_unlock_commit(struct ring_buffer *buffer, |
80 | struct ring_buffer_event *event, | 80 | struct ring_buffer_event *event, |
81 | unsigned long flags); | 81 | unsigned long flags); |
82 | int ring_buffer_write(struct ring_buffer *buffer, | 82 | int ring_buffer_write(struct ring_buffer *buffer, |
83 | unsigned long length, void *data); | 83 | unsigned long length, void *data); |
84 | 84 | ||
85 | struct ring_buffer_event * | 85 | struct ring_buffer_event * |
86 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts); | 86 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts); |
87 | struct ring_buffer_event * | 87 | struct ring_buffer_event * |
88 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts); | 88 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts); |
89 | 89 | ||
90 | struct ring_buffer_iter * | 90 | struct ring_buffer_iter * |
91 | ring_buffer_read_start(struct ring_buffer *buffer, int cpu); | 91 | ring_buffer_read_start(struct ring_buffer *buffer, int cpu); |
92 | void ring_buffer_read_finish(struct ring_buffer_iter *iter); | 92 | void ring_buffer_read_finish(struct ring_buffer_iter *iter); |
93 | 93 | ||
94 | struct ring_buffer_event * | 94 | struct ring_buffer_event * |
95 | ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts); | 95 | ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts); |
96 | struct ring_buffer_event * | 96 | struct ring_buffer_event * |
97 | ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts); | 97 | ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts); |
98 | void ring_buffer_iter_reset(struct ring_buffer_iter *iter); | 98 | void ring_buffer_iter_reset(struct ring_buffer_iter *iter); |
99 | int ring_buffer_iter_empty(struct ring_buffer_iter *iter); | 99 | int ring_buffer_iter_empty(struct ring_buffer_iter *iter); |
100 | 100 | ||
101 | unsigned long ring_buffer_size(struct ring_buffer *buffer); | 101 | unsigned long ring_buffer_size(struct ring_buffer *buffer); |
102 | 102 | ||
103 | void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu); | 103 | void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu); |
104 | void ring_buffer_reset(struct ring_buffer *buffer); | 104 | void ring_buffer_reset(struct ring_buffer *buffer); |
105 | 105 | ||
106 | int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | 106 | int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, |
107 | struct ring_buffer *buffer_b, int cpu); | 107 | struct ring_buffer *buffer_b, int cpu); |
108 | 108 | ||
109 | int ring_buffer_empty(struct ring_buffer *buffer); | 109 | int ring_buffer_empty(struct ring_buffer *buffer); |
110 | int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); | 110 | int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); |
111 | 111 | ||
112 | void ring_buffer_record_disable(struct ring_buffer *buffer); | 112 | void ring_buffer_record_disable(struct ring_buffer *buffer); |
113 | void ring_buffer_record_enable(struct ring_buffer *buffer); | 113 | void ring_buffer_record_enable(struct ring_buffer *buffer); |
114 | void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); | 114 | void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); |
115 | void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); | 115 | void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); |
116 | 116 | ||
117 | unsigned long ring_buffer_entries(struct ring_buffer *buffer); | 117 | unsigned long ring_buffer_entries(struct ring_buffer *buffer); |
118 | unsigned long ring_buffer_overruns(struct ring_buffer *buffer); | 118 | unsigned long ring_buffer_overruns(struct ring_buffer *buffer); |
119 | 119 | ||
120 | u64 ring_buffer_time_stamp(int cpu); | 120 | u64 ring_buffer_time_stamp(int cpu); |
121 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); | 121 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); |
122 | 122 | ||
123 | void tracing_on(void); | 123 | void tracing_on(void); |
124 | void tracing_off(void); | 124 | void tracing_off(void); |
125 | void tracing_off_permanent(void); | 125 | void tracing_off_permanent(void); |
126 | 126 | ||
127 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer); | ||
128 | void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); | ||
129 | int ring_buffer_read_page(struct ring_buffer *buffer, | ||
130 | void **data_page, int cpu, int full); | ||
131 | |||
127 | enum ring_buffer_flags { | 132 | enum ring_buffer_flags { |
128 | RB_FL_OVERWRITE = 1 << 0, | 133 | RB_FL_OVERWRITE = 1 << 0, |
129 | }; | 134 | }; |
130 | 135 | ||
131 | #endif /* _LINUX_RING_BUFFER_H */ | 136 | #endif /* _LINUX_RING_BUFFER_H */ |
132 | 137 |
kernel/trace/ring_buffer.c
1 | /* | 1 | /* |
2 | * Generic ring buffer | 2 | * Generic ring buffer |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> | 4 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> |
5 | */ | 5 | */ |
6 | #include <linux/ring_buffer.h> | 6 | #include <linux/ring_buffer.h> |
7 | #include <linux/spinlock.h> | 7 | #include <linux/spinlock.h> |
8 | #include <linux/debugfs.h> | 8 | #include <linux/debugfs.h> |
9 | #include <linux/uaccess.h> | 9 | #include <linux/uaccess.h> |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/percpu.h> | 11 | #include <linux/percpu.h> |
12 | #include <linux/mutex.h> | 12 | #include <linux/mutex.h> |
13 | #include <linux/sched.h> /* used for sched_clock() (for now) */ | 13 | #include <linux/sched.h> /* used for sched_clock() (for now) */ |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/hash.h> | 15 | #include <linux/hash.h> |
16 | #include <linux/list.h> | 16 | #include <linux/list.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | 18 | ||
19 | #include "trace.h" | 19 | #include "trace.h" |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * A fast way to enable or disable all ring buffers is to | 22 | * A fast way to enable or disable all ring buffers is to |
23 | * call tracing_on or tracing_off. Turning off the ring buffers | 23 | * call tracing_on or tracing_off. Turning off the ring buffers |
24 | * prevents all ring buffers from being recorded to. | 24 | * prevents all ring buffers from being recorded to. |
25 | * Turning this switch on, makes it OK to write to the | 25 | * Turning this switch on, makes it OK to write to the |
26 | * ring buffer, if the ring buffer is enabled itself. | 26 | * ring buffer, if the ring buffer is enabled itself. |
27 | * | 27 | * |
28 | * There's three layers that must be on in order to write | 28 | * There's three layers that must be on in order to write |
29 | * to the ring buffer. | 29 | * to the ring buffer. |
30 | * | 30 | * |
31 | * 1) This global flag must be set. | 31 | * 1) This global flag must be set. |
32 | * 2) The ring buffer must be enabled for recording. | 32 | * 2) The ring buffer must be enabled for recording. |
33 | * 3) The per cpu buffer must be enabled for recording. | 33 | * 3) The per cpu buffer must be enabled for recording. |
34 | * | 34 | * |
35 | * In case of an anomaly, this global flag has a bit set that | 35 | * In case of an anomaly, this global flag has a bit set that |
36 | * will permantly disable all ring buffers. | 36 | * will permantly disable all ring buffers. |
37 | */ | 37 | */ |
38 | 38 | ||
39 | /* | 39 | /* |
40 | * Global flag to disable all recording to ring buffers | 40 | * Global flag to disable all recording to ring buffers |
41 | * This has two bits: ON, DISABLED | 41 | * This has two bits: ON, DISABLED |
42 | * | 42 | * |
43 | * ON DISABLED | 43 | * ON DISABLED |
44 | * ---- ---------- | 44 | * ---- ---------- |
45 | * 0 0 : ring buffers are off | 45 | * 0 0 : ring buffers are off |
46 | * 1 0 : ring buffers are on | 46 | * 1 0 : ring buffers are on |
47 | * X 1 : ring buffers are permanently disabled | 47 | * X 1 : ring buffers are permanently disabled |
48 | */ | 48 | */ |
49 | 49 | ||
50 | enum { | 50 | enum { |
51 | RB_BUFFERS_ON_BIT = 0, | 51 | RB_BUFFERS_ON_BIT = 0, |
52 | RB_BUFFERS_DISABLED_BIT = 1, | 52 | RB_BUFFERS_DISABLED_BIT = 1, |
53 | }; | 53 | }; |
54 | 54 | ||
55 | enum { | 55 | enum { |
56 | RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT, | 56 | RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT, |
57 | RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT, | 57 | RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT, |
58 | }; | 58 | }; |
59 | 59 | ||
60 | static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; | 60 | static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; |
61 | 61 | ||
62 | /** | 62 | /** |
63 | * tracing_on - enable all tracing buffers | 63 | * tracing_on - enable all tracing buffers |
64 | * | 64 | * |
65 | * This function enables all tracing buffers that may have been | 65 | * This function enables all tracing buffers that may have been |
66 | * disabled with tracing_off. | 66 | * disabled with tracing_off. |
67 | */ | 67 | */ |
68 | void tracing_on(void) | 68 | void tracing_on(void) |
69 | { | 69 | { |
70 | set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); | 70 | set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); |
71 | } | 71 | } |
72 | 72 | ||
73 | /** | 73 | /** |
74 | * tracing_off - turn off all tracing buffers | 74 | * tracing_off - turn off all tracing buffers |
75 | * | 75 | * |
76 | * This function stops all tracing buffers from recording data. | 76 | * This function stops all tracing buffers from recording data. |
77 | * It does not disable any overhead the tracers themselves may | 77 | * It does not disable any overhead the tracers themselves may |
78 | * be causing. This function simply causes all recording to | 78 | * be causing. This function simply causes all recording to |
79 | * the ring buffers to fail. | 79 | * the ring buffers to fail. |
80 | */ | 80 | */ |
81 | void tracing_off(void) | 81 | void tracing_off(void) |
82 | { | 82 | { |
83 | clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); | 83 | clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); |
84 | } | 84 | } |
85 | 85 | ||
86 | /** | 86 | /** |
87 | * tracing_off_permanent - permanently disable ring buffers | 87 | * tracing_off_permanent - permanently disable ring buffers |
88 | * | 88 | * |
89 | * This function, once called, will disable all ring buffers | 89 | * This function, once called, will disable all ring buffers |
90 | * permanenty. | 90 | * permanenty. |
91 | */ | 91 | */ |
92 | void tracing_off_permanent(void) | 92 | void tracing_off_permanent(void) |
93 | { | 93 | { |
94 | set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); | 94 | set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); |
95 | } | 95 | } |
96 | 96 | ||
97 | #include "trace.h" | 97 | #include "trace.h" |
98 | 98 | ||
99 | /* Up this if you want to test the TIME_EXTENTS and normalization */ | 99 | /* Up this if you want to test the TIME_EXTENTS and normalization */ |
100 | #define DEBUG_SHIFT 0 | 100 | #define DEBUG_SHIFT 0 |
101 | 101 | ||
102 | /* FIXME!!! */ | 102 | /* FIXME!!! */ |
103 | u64 ring_buffer_time_stamp(int cpu) | 103 | u64 ring_buffer_time_stamp(int cpu) |
104 | { | 104 | { |
105 | u64 time; | 105 | u64 time; |
106 | 106 | ||
107 | preempt_disable_notrace(); | 107 | preempt_disable_notrace(); |
108 | /* shift to debug/test normalization and TIME_EXTENTS */ | 108 | /* shift to debug/test normalization and TIME_EXTENTS */ |
109 | time = sched_clock() << DEBUG_SHIFT; | 109 | time = sched_clock() << DEBUG_SHIFT; |
110 | preempt_enable_notrace(); | 110 | preempt_enable_notrace(); |
111 | 111 | ||
112 | return time; | 112 | return time; |
113 | } | 113 | } |
114 | 114 | ||
115 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) | 115 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) |
116 | { | 116 | { |
117 | /* Just stupid testing the normalize function and deltas */ | 117 | /* Just stupid testing the normalize function and deltas */ |
118 | *ts >>= DEBUG_SHIFT; | 118 | *ts >>= DEBUG_SHIFT; |
119 | } | 119 | } |
120 | 120 | ||
121 | #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) | 121 | #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) |
122 | #define RB_ALIGNMENT_SHIFT 2 | 122 | #define RB_ALIGNMENT_SHIFT 2 |
123 | #define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT) | 123 | #define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT) |
124 | #define RB_MAX_SMALL_DATA 28 | 124 | #define RB_MAX_SMALL_DATA 28 |
125 | 125 | ||
126 | enum { | 126 | enum { |
127 | RB_LEN_TIME_EXTEND = 8, | 127 | RB_LEN_TIME_EXTEND = 8, |
128 | RB_LEN_TIME_STAMP = 16, | 128 | RB_LEN_TIME_STAMP = 16, |
129 | }; | 129 | }; |
130 | 130 | ||
131 | /* inline for ring buffer fast paths */ | 131 | /* inline for ring buffer fast paths */ |
132 | static inline unsigned | 132 | static inline unsigned |
133 | rb_event_length(struct ring_buffer_event *event) | 133 | rb_event_length(struct ring_buffer_event *event) |
134 | { | 134 | { |
135 | unsigned length; | 135 | unsigned length; |
136 | 136 | ||
137 | switch (event->type) { | 137 | switch (event->type) { |
138 | case RINGBUF_TYPE_PADDING: | 138 | case RINGBUF_TYPE_PADDING: |
139 | /* undefined */ | 139 | /* undefined */ |
140 | return -1; | 140 | return -1; |
141 | 141 | ||
142 | case RINGBUF_TYPE_TIME_EXTEND: | 142 | case RINGBUF_TYPE_TIME_EXTEND: |
143 | return RB_LEN_TIME_EXTEND; | 143 | return RB_LEN_TIME_EXTEND; |
144 | 144 | ||
145 | case RINGBUF_TYPE_TIME_STAMP: | 145 | case RINGBUF_TYPE_TIME_STAMP: |
146 | return RB_LEN_TIME_STAMP; | 146 | return RB_LEN_TIME_STAMP; |
147 | 147 | ||
148 | case RINGBUF_TYPE_DATA: | 148 | case RINGBUF_TYPE_DATA: |
149 | if (event->len) | 149 | if (event->len) |
150 | length = event->len << RB_ALIGNMENT_SHIFT; | 150 | length = event->len << RB_ALIGNMENT_SHIFT; |
151 | else | 151 | else |
152 | length = event->array[0]; | 152 | length = event->array[0]; |
153 | return length + RB_EVNT_HDR_SIZE; | 153 | return length + RB_EVNT_HDR_SIZE; |
154 | default: | 154 | default: |
155 | BUG(); | 155 | BUG(); |
156 | } | 156 | } |
157 | /* not hit */ | 157 | /* not hit */ |
158 | return 0; | 158 | return 0; |
159 | } | 159 | } |
160 | 160 | ||
161 | /** | 161 | /** |
162 | * ring_buffer_event_length - return the length of the event | 162 | * ring_buffer_event_length - return the length of the event |
163 | * @event: the event to get the length of | 163 | * @event: the event to get the length of |
164 | */ | 164 | */ |
165 | unsigned ring_buffer_event_length(struct ring_buffer_event *event) | 165 | unsigned ring_buffer_event_length(struct ring_buffer_event *event) |
166 | { | 166 | { |
167 | return rb_event_length(event); | 167 | return rb_event_length(event); |
168 | } | 168 | } |
169 | 169 | ||
170 | /* inline for ring buffer fast paths */ | 170 | /* inline for ring buffer fast paths */ |
171 | static inline void * | 171 | static inline void * |
172 | rb_event_data(struct ring_buffer_event *event) | 172 | rb_event_data(struct ring_buffer_event *event) |
173 | { | 173 | { |
174 | BUG_ON(event->type != RINGBUF_TYPE_DATA); | 174 | BUG_ON(event->type != RINGBUF_TYPE_DATA); |
175 | /* If length is in len field, then array[0] has the data */ | 175 | /* If length is in len field, then array[0] has the data */ |
176 | if (event->len) | 176 | if (event->len) |
177 | return (void *)&event->array[0]; | 177 | return (void *)&event->array[0]; |
178 | /* Otherwise length is in array[0] and array[1] has the data */ | 178 | /* Otherwise length is in array[0] and array[1] has the data */ |
179 | return (void *)&event->array[1]; | 179 | return (void *)&event->array[1]; |
180 | } | 180 | } |
181 | 181 | ||
182 | /** | 182 | /** |
183 | * ring_buffer_event_data - return the data of the event | 183 | * ring_buffer_event_data - return the data of the event |
184 | * @event: the event to get the data from | 184 | * @event: the event to get the data from |
185 | */ | 185 | */ |
186 | void *ring_buffer_event_data(struct ring_buffer_event *event) | 186 | void *ring_buffer_event_data(struct ring_buffer_event *event) |
187 | { | 187 | { |
188 | return rb_event_data(event); | 188 | return rb_event_data(event); |
189 | } | 189 | } |
190 | 190 | ||
191 | #define for_each_buffer_cpu(buffer, cpu) \ | 191 | #define for_each_buffer_cpu(buffer, cpu) \ |
192 | for_each_cpu_mask(cpu, buffer->cpumask) | 192 | for_each_cpu_mask(cpu, buffer->cpumask) |
193 | 193 | ||
194 | #define TS_SHIFT 27 | 194 | #define TS_SHIFT 27 |
195 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) | 195 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) |
196 | #define TS_DELTA_TEST (~TS_MASK) | 196 | #define TS_DELTA_TEST (~TS_MASK) |
197 | 197 | ||
198 | struct buffer_data_page { | 198 | struct buffer_data_page { |
199 | u64 time_stamp; /* page time stamp */ | 199 | u64 time_stamp; /* page time stamp */ |
200 | local_t commit; /* write commited index */ | 200 | local_t commit; /* write commited index */ |
201 | unsigned char data[]; /* data of buffer page */ | 201 | unsigned char data[]; /* data of buffer page */ |
202 | }; | 202 | }; |
203 | 203 | ||
204 | struct buffer_page { | 204 | struct buffer_page { |
205 | local_t write; /* index for next write */ | 205 | local_t write; /* index for next write */ |
206 | unsigned read; /* index for next read */ | 206 | unsigned read; /* index for next read */ |
207 | struct list_head list; /* list of free pages */ | 207 | struct list_head list; /* list of free pages */ |
208 | struct buffer_data_page *page; /* Actual data page */ | 208 | struct buffer_data_page *page; /* Actual data page */ |
209 | }; | 209 | }; |
210 | 210 | ||
211 | static void rb_init_page(struct buffer_data_page *page) | 211 | static void rb_init_page(struct buffer_data_page *page) |
212 | { | 212 | { |
213 | local_set(&page->commit, 0); | 213 | local_set(&page->commit, 0); |
214 | } | 214 | } |
215 | 215 | ||
216 | /* | 216 | /* |
217 | * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing | 217 | * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing |
218 | * this issue out. | 218 | * this issue out. |
219 | */ | 219 | */ |
220 | static inline void free_buffer_page(struct buffer_page *bpage) | 220 | static inline void free_buffer_page(struct buffer_page *bpage) |
221 | { | 221 | { |
222 | if (bpage->page) | 222 | if (bpage->page) |
223 | free_page((unsigned long)bpage->page); | 223 | free_page((unsigned long)bpage->page); |
224 | kfree(bpage); | 224 | kfree(bpage); |
225 | } | 225 | } |
226 | 226 | ||
227 | /* | 227 | /* |
228 | * We need to fit the time_stamp delta into 27 bits. | 228 | * We need to fit the time_stamp delta into 27 bits. |
229 | */ | 229 | */ |
230 | static inline int test_time_stamp(u64 delta) | 230 | static inline int test_time_stamp(u64 delta) |
231 | { | 231 | { |
232 | if (delta & TS_DELTA_TEST) | 232 | if (delta & TS_DELTA_TEST) |
233 | return 1; | 233 | return 1; |
234 | return 0; | 234 | return 0; |
235 | } | 235 | } |
236 | 236 | ||
237 | #define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page)) | 237 | #define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page)) |
238 | 238 | ||
239 | /* | 239 | /* |
240 | * head_page == tail_page && head == tail then buffer is empty. | 240 | * head_page == tail_page && head == tail then buffer is empty. |
241 | */ | 241 | */ |
242 | struct ring_buffer_per_cpu { | 242 | struct ring_buffer_per_cpu { |
243 | int cpu; | 243 | int cpu; |
244 | struct ring_buffer *buffer; | 244 | struct ring_buffer *buffer; |
245 | spinlock_t reader_lock; /* serialize readers */ | 245 | spinlock_t reader_lock; /* serialize readers */ |
246 | raw_spinlock_t lock; | 246 | raw_spinlock_t lock; |
247 | struct lock_class_key lock_key; | 247 | struct lock_class_key lock_key; |
248 | struct list_head pages; | 248 | struct list_head pages; |
249 | struct buffer_page *head_page; /* read from head */ | 249 | struct buffer_page *head_page; /* read from head */ |
250 | struct buffer_page *tail_page; /* write to tail */ | 250 | struct buffer_page *tail_page; /* write to tail */ |
251 | struct buffer_page *commit_page; /* commited pages */ | 251 | struct buffer_page *commit_page; /* commited pages */ |
252 | struct buffer_page *reader_page; | 252 | struct buffer_page *reader_page; |
253 | unsigned long overrun; | 253 | unsigned long overrun; |
254 | unsigned long entries; | 254 | unsigned long entries; |
255 | u64 write_stamp; | 255 | u64 write_stamp; |
256 | u64 read_stamp; | 256 | u64 read_stamp; |
257 | atomic_t record_disabled; | 257 | atomic_t record_disabled; |
258 | }; | 258 | }; |
259 | 259 | ||
260 | struct ring_buffer { | 260 | struct ring_buffer { |
261 | unsigned long size; | 261 | unsigned long size; |
262 | unsigned pages; | 262 | unsigned pages; |
263 | unsigned flags; | 263 | unsigned flags; |
264 | int cpus; | 264 | int cpus; |
265 | cpumask_t cpumask; | 265 | cpumask_t cpumask; |
266 | atomic_t record_disabled; | 266 | atomic_t record_disabled; |
267 | 267 | ||
268 | struct mutex mutex; | 268 | struct mutex mutex; |
269 | 269 | ||
270 | struct ring_buffer_per_cpu **buffers; | 270 | struct ring_buffer_per_cpu **buffers; |
271 | }; | 271 | }; |
272 | 272 | ||
273 | struct ring_buffer_iter { | 273 | struct ring_buffer_iter { |
274 | struct ring_buffer_per_cpu *cpu_buffer; | 274 | struct ring_buffer_per_cpu *cpu_buffer; |
275 | unsigned long head; | 275 | unsigned long head; |
276 | struct buffer_page *head_page; | 276 | struct buffer_page *head_page; |
277 | u64 read_stamp; | 277 | u64 read_stamp; |
278 | }; | 278 | }; |
279 | 279 | ||
280 | /* buffer may be either ring_buffer or ring_buffer_per_cpu */ | 280 | /* buffer may be either ring_buffer or ring_buffer_per_cpu */ |
281 | #define RB_WARN_ON(buffer, cond) \ | 281 | #define RB_WARN_ON(buffer, cond) \ |
282 | ({ \ | 282 | ({ \ |
283 | int _____ret = unlikely(cond); \ | 283 | int _____ret = unlikely(cond); \ |
284 | if (_____ret) { \ | 284 | if (_____ret) { \ |
285 | atomic_inc(&buffer->record_disabled); \ | 285 | atomic_inc(&buffer->record_disabled); \ |
286 | WARN_ON(1); \ | 286 | WARN_ON(1); \ |
287 | } \ | 287 | } \ |
288 | _____ret; \ | 288 | _____ret; \ |
289 | }) | 289 | }) |
290 | 290 | ||
291 | /** | 291 | /** |
292 | * check_pages - integrity check of buffer pages | 292 | * check_pages - integrity check of buffer pages |
293 | * @cpu_buffer: CPU buffer with pages to test | 293 | * @cpu_buffer: CPU buffer with pages to test |
294 | * | 294 | * |
295 | * As a safty measure we check to make sure the data pages have not | 295 | * As a safty measure we check to make sure the data pages have not |
296 | * been corrupted. | 296 | * been corrupted. |
297 | */ | 297 | */ |
298 | static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) | 298 | static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) |
299 | { | 299 | { |
300 | struct list_head *head = &cpu_buffer->pages; | 300 | struct list_head *head = &cpu_buffer->pages; |
301 | struct buffer_page *page, *tmp; | 301 | struct buffer_page *page, *tmp; |
302 | 302 | ||
303 | if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) | 303 | if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) |
304 | return -1; | 304 | return -1; |
305 | if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) | 305 | if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) |
306 | return -1; | 306 | return -1; |
307 | 307 | ||
308 | list_for_each_entry_safe(page, tmp, head, list) { | 308 | list_for_each_entry_safe(page, tmp, head, list) { |
309 | if (RB_WARN_ON(cpu_buffer, | 309 | if (RB_WARN_ON(cpu_buffer, |
310 | page->list.next->prev != &page->list)) | 310 | page->list.next->prev != &page->list)) |
311 | return -1; | 311 | return -1; |
312 | if (RB_WARN_ON(cpu_buffer, | 312 | if (RB_WARN_ON(cpu_buffer, |
313 | page->list.prev->next != &page->list)) | 313 | page->list.prev->next != &page->list)) |
314 | return -1; | 314 | return -1; |
315 | } | 315 | } |
316 | 316 | ||
317 | return 0; | 317 | return 0; |
318 | } | 318 | } |
319 | 319 | ||
320 | static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, | 320 | static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, |
321 | unsigned nr_pages) | 321 | unsigned nr_pages) |
322 | { | 322 | { |
323 | struct list_head *head = &cpu_buffer->pages; | 323 | struct list_head *head = &cpu_buffer->pages; |
324 | struct buffer_page *page, *tmp; | 324 | struct buffer_page *page, *tmp; |
325 | unsigned long addr; | 325 | unsigned long addr; |
326 | LIST_HEAD(pages); | 326 | LIST_HEAD(pages); |
327 | unsigned i; | 327 | unsigned i; |
328 | 328 | ||
329 | for (i = 0; i < nr_pages; i++) { | 329 | for (i = 0; i < nr_pages; i++) { |
330 | page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), | 330 | page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), |
331 | GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); | 331 | GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); |
332 | if (!page) | 332 | if (!page) |
333 | goto free_pages; | 333 | goto free_pages; |
334 | list_add(&page->list, &pages); | 334 | list_add(&page->list, &pages); |
335 | 335 | ||
336 | addr = __get_free_page(GFP_KERNEL); | 336 | addr = __get_free_page(GFP_KERNEL); |
337 | if (!addr) | 337 | if (!addr) |
338 | goto free_pages; | 338 | goto free_pages; |
339 | page->page = (void *)addr; | 339 | page->page = (void *)addr; |
340 | rb_init_page(page->page); | 340 | rb_init_page(page->page); |
341 | } | 341 | } |
342 | 342 | ||
343 | list_splice(&pages, head); | 343 | list_splice(&pages, head); |
344 | 344 | ||
345 | rb_check_pages(cpu_buffer); | 345 | rb_check_pages(cpu_buffer); |
346 | 346 | ||
347 | return 0; | 347 | return 0; |
348 | 348 | ||
349 | free_pages: | 349 | free_pages: |
350 | list_for_each_entry_safe(page, tmp, &pages, list) { | 350 | list_for_each_entry_safe(page, tmp, &pages, list) { |
351 | list_del_init(&page->list); | 351 | list_del_init(&page->list); |
352 | free_buffer_page(page); | 352 | free_buffer_page(page); |
353 | } | 353 | } |
354 | return -ENOMEM; | 354 | return -ENOMEM; |
355 | } | 355 | } |
356 | 356 | ||
357 | static struct ring_buffer_per_cpu * | 357 | static struct ring_buffer_per_cpu * |
358 | rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | 358 | rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) |
359 | { | 359 | { |
360 | struct ring_buffer_per_cpu *cpu_buffer; | 360 | struct ring_buffer_per_cpu *cpu_buffer; |
361 | struct buffer_page *page; | 361 | struct buffer_page *page; |
362 | unsigned long addr; | 362 | unsigned long addr; |
363 | int ret; | 363 | int ret; |
364 | 364 | ||
365 | cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), | 365 | cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), |
366 | GFP_KERNEL, cpu_to_node(cpu)); | 366 | GFP_KERNEL, cpu_to_node(cpu)); |
367 | if (!cpu_buffer) | 367 | if (!cpu_buffer) |
368 | return NULL; | 368 | return NULL; |
369 | 369 | ||
370 | cpu_buffer->cpu = cpu; | 370 | cpu_buffer->cpu = cpu; |
371 | cpu_buffer->buffer = buffer; | 371 | cpu_buffer->buffer = buffer; |
372 | spin_lock_init(&cpu_buffer->reader_lock); | 372 | spin_lock_init(&cpu_buffer->reader_lock); |
373 | cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 373 | cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; |
374 | INIT_LIST_HEAD(&cpu_buffer->pages); | 374 | INIT_LIST_HEAD(&cpu_buffer->pages); |
375 | 375 | ||
376 | page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), | 376 | page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), |
377 | GFP_KERNEL, cpu_to_node(cpu)); | 377 | GFP_KERNEL, cpu_to_node(cpu)); |
378 | if (!page) | 378 | if (!page) |
379 | goto fail_free_buffer; | 379 | goto fail_free_buffer; |
380 | 380 | ||
381 | cpu_buffer->reader_page = page; | 381 | cpu_buffer->reader_page = page; |
382 | addr = __get_free_page(GFP_KERNEL); | 382 | addr = __get_free_page(GFP_KERNEL); |
383 | if (!addr) | 383 | if (!addr) |
384 | goto fail_free_reader; | 384 | goto fail_free_reader; |
385 | page->page = (void *)addr; | 385 | page->page = (void *)addr; |
386 | rb_init_page(page->page); | 386 | rb_init_page(page->page); |
387 | 387 | ||
388 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); | 388 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); |
389 | 389 | ||
390 | ret = rb_allocate_pages(cpu_buffer, buffer->pages); | 390 | ret = rb_allocate_pages(cpu_buffer, buffer->pages); |
391 | if (ret < 0) | 391 | if (ret < 0) |
392 | goto fail_free_reader; | 392 | goto fail_free_reader; |
393 | 393 | ||
394 | cpu_buffer->head_page | 394 | cpu_buffer->head_page |
395 | = list_entry(cpu_buffer->pages.next, struct buffer_page, list); | 395 | = list_entry(cpu_buffer->pages.next, struct buffer_page, list); |
396 | cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; | 396 | cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; |
397 | 397 | ||
398 | return cpu_buffer; | 398 | return cpu_buffer; |
399 | 399 | ||
400 | fail_free_reader: | 400 | fail_free_reader: |
401 | free_buffer_page(cpu_buffer->reader_page); | 401 | free_buffer_page(cpu_buffer->reader_page); |
402 | 402 | ||
403 | fail_free_buffer: | 403 | fail_free_buffer: |
404 | kfree(cpu_buffer); | 404 | kfree(cpu_buffer); |
405 | return NULL; | 405 | return NULL; |
406 | } | 406 | } |
407 | 407 | ||
408 | static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) | 408 | static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) |
409 | { | 409 | { |
410 | struct list_head *head = &cpu_buffer->pages; | 410 | struct list_head *head = &cpu_buffer->pages; |
411 | struct buffer_page *page, *tmp; | 411 | struct buffer_page *page, *tmp; |
412 | 412 | ||
413 | list_del_init(&cpu_buffer->reader_page->list); | 413 | list_del_init(&cpu_buffer->reader_page->list); |
414 | free_buffer_page(cpu_buffer->reader_page); | 414 | free_buffer_page(cpu_buffer->reader_page); |
415 | 415 | ||
416 | list_for_each_entry_safe(page, tmp, head, list) { | 416 | list_for_each_entry_safe(page, tmp, head, list) { |
417 | list_del_init(&page->list); | 417 | list_del_init(&page->list); |
418 | free_buffer_page(page); | 418 | free_buffer_page(page); |
419 | } | 419 | } |
420 | kfree(cpu_buffer); | 420 | kfree(cpu_buffer); |
421 | } | 421 | } |
422 | 422 | ||
423 | /* | 423 | /* |
424 | * Causes compile errors if the struct buffer_page gets bigger | 424 | * Causes compile errors if the struct buffer_page gets bigger |
425 | * than the struct page. | 425 | * than the struct page. |
426 | */ | 426 | */ |
427 | extern int ring_buffer_page_too_big(void); | 427 | extern int ring_buffer_page_too_big(void); |
428 | 428 | ||
429 | /** | 429 | /** |
430 | * ring_buffer_alloc - allocate a new ring_buffer | 430 | * ring_buffer_alloc - allocate a new ring_buffer |
431 | * @size: the size in bytes that is needed. | 431 | * @size: the size in bytes that is needed. |
432 | * @flags: attributes to set for the ring buffer. | 432 | * @flags: attributes to set for the ring buffer. |
433 | * | 433 | * |
434 | * Currently the only flag that is available is the RB_FL_OVERWRITE | 434 | * Currently the only flag that is available is the RB_FL_OVERWRITE |
435 | * flag. This flag means that the buffer will overwrite old data | 435 | * flag. This flag means that the buffer will overwrite old data |
436 | * when the buffer wraps. If this flag is not set, the buffer will | 436 | * when the buffer wraps. If this flag is not set, the buffer will |
437 | * drop data when the tail hits the head. | 437 | * drop data when the tail hits the head. |
438 | */ | 438 | */ |
439 | struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | 439 | struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) |
440 | { | 440 | { |
441 | struct ring_buffer *buffer; | 441 | struct ring_buffer *buffer; |
442 | int bsize; | 442 | int bsize; |
443 | int cpu; | 443 | int cpu; |
444 | 444 | ||
445 | /* Paranoid! Optimizes out when all is well */ | 445 | /* Paranoid! Optimizes out when all is well */ |
446 | if (sizeof(struct buffer_page) > sizeof(struct page)) | 446 | if (sizeof(struct buffer_page) > sizeof(struct page)) |
447 | ring_buffer_page_too_big(); | 447 | ring_buffer_page_too_big(); |
448 | 448 | ||
449 | 449 | ||
450 | /* keep it in its own cache line */ | 450 | /* keep it in its own cache line */ |
451 | buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), | 451 | buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), |
452 | GFP_KERNEL); | 452 | GFP_KERNEL); |
453 | if (!buffer) | 453 | if (!buffer) |
454 | return NULL; | 454 | return NULL; |
455 | 455 | ||
456 | buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); | 456 | buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); |
457 | buffer->flags = flags; | 457 | buffer->flags = flags; |
458 | 458 | ||
459 | /* need at least two pages */ | 459 | /* need at least two pages */ |
460 | if (buffer->pages == 1) | 460 | if (buffer->pages == 1) |
461 | buffer->pages++; | 461 | buffer->pages++; |
462 | 462 | ||
463 | buffer->cpumask = cpu_possible_map; | 463 | buffer->cpumask = cpu_possible_map; |
464 | buffer->cpus = nr_cpu_ids; | 464 | buffer->cpus = nr_cpu_ids; |
465 | 465 | ||
466 | bsize = sizeof(void *) * nr_cpu_ids; | 466 | bsize = sizeof(void *) * nr_cpu_ids; |
467 | buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), | 467 | buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), |
468 | GFP_KERNEL); | 468 | GFP_KERNEL); |
469 | if (!buffer->buffers) | 469 | if (!buffer->buffers) |
470 | goto fail_free_buffer; | 470 | goto fail_free_buffer; |
471 | 471 | ||
472 | for_each_buffer_cpu(buffer, cpu) { | 472 | for_each_buffer_cpu(buffer, cpu) { |
473 | buffer->buffers[cpu] = | 473 | buffer->buffers[cpu] = |
474 | rb_allocate_cpu_buffer(buffer, cpu); | 474 | rb_allocate_cpu_buffer(buffer, cpu); |
475 | if (!buffer->buffers[cpu]) | 475 | if (!buffer->buffers[cpu]) |
476 | goto fail_free_buffers; | 476 | goto fail_free_buffers; |
477 | } | 477 | } |
478 | 478 | ||
479 | mutex_init(&buffer->mutex); | 479 | mutex_init(&buffer->mutex); |
480 | 480 | ||
481 | return buffer; | 481 | return buffer; |
482 | 482 | ||
483 | fail_free_buffers: | 483 | fail_free_buffers: |
484 | for_each_buffer_cpu(buffer, cpu) { | 484 | for_each_buffer_cpu(buffer, cpu) { |
485 | if (buffer->buffers[cpu]) | 485 | if (buffer->buffers[cpu]) |
486 | rb_free_cpu_buffer(buffer->buffers[cpu]); | 486 | rb_free_cpu_buffer(buffer->buffers[cpu]); |
487 | } | 487 | } |
488 | kfree(buffer->buffers); | 488 | kfree(buffer->buffers); |
489 | 489 | ||
490 | fail_free_buffer: | 490 | fail_free_buffer: |
491 | kfree(buffer); | 491 | kfree(buffer); |
492 | return NULL; | 492 | return NULL; |
493 | } | 493 | } |
494 | 494 | ||
495 | /** | 495 | /** |
496 | * ring_buffer_free - free a ring buffer. | 496 | * ring_buffer_free - free a ring buffer. |
497 | * @buffer: the buffer to free. | 497 | * @buffer: the buffer to free. |
498 | */ | 498 | */ |
499 | void | 499 | void |
500 | ring_buffer_free(struct ring_buffer *buffer) | 500 | ring_buffer_free(struct ring_buffer *buffer) |
501 | { | 501 | { |
502 | int cpu; | 502 | int cpu; |
503 | 503 | ||
504 | for_each_buffer_cpu(buffer, cpu) | 504 | for_each_buffer_cpu(buffer, cpu) |
505 | rb_free_cpu_buffer(buffer->buffers[cpu]); | 505 | rb_free_cpu_buffer(buffer->buffers[cpu]); |
506 | 506 | ||
507 | kfree(buffer); | 507 | kfree(buffer); |
508 | } | 508 | } |
509 | 509 | ||
510 | static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); | 510 | static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); |
511 | 511 | ||
512 | static void | 512 | static void |
513 | rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | 513 | rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) |
514 | { | 514 | { |
515 | struct buffer_page *page; | 515 | struct buffer_page *page; |
516 | struct list_head *p; | 516 | struct list_head *p; |
517 | unsigned i; | 517 | unsigned i; |
518 | 518 | ||
519 | atomic_inc(&cpu_buffer->record_disabled); | 519 | atomic_inc(&cpu_buffer->record_disabled); |
520 | synchronize_sched(); | 520 | synchronize_sched(); |
521 | 521 | ||
522 | for (i = 0; i < nr_pages; i++) { | 522 | for (i = 0; i < nr_pages; i++) { |
523 | if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) | 523 | if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) |
524 | return; | 524 | return; |
525 | p = cpu_buffer->pages.next; | 525 | p = cpu_buffer->pages.next; |
526 | page = list_entry(p, struct buffer_page, list); | 526 | page = list_entry(p, struct buffer_page, list); |
527 | list_del_init(&page->list); | 527 | list_del_init(&page->list); |
528 | free_buffer_page(page); | 528 | free_buffer_page(page); |
529 | } | 529 | } |
530 | if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) | 530 | if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) |
531 | return; | 531 | return; |
532 | 532 | ||
533 | rb_reset_cpu(cpu_buffer); | 533 | rb_reset_cpu(cpu_buffer); |
534 | 534 | ||
535 | rb_check_pages(cpu_buffer); | 535 | rb_check_pages(cpu_buffer); |
536 | 536 | ||
537 | atomic_dec(&cpu_buffer->record_disabled); | 537 | atomic_dec(&cpu_buffer->record_disabled); |
538 | 538 | ||
539 | } | 539 | } |
540 | 540 | ||
541 | static void | 541 | static void |
542 | rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | 542 | rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, |
543 | struct list_head *pages, unsigned nr_pages) | 543 | struct list_head *pages, unsigned nr_pages) |
544 | { | 544 | { |
545 | struct buffer_page *page; | 545 | struct buffer_page *page; |
546 | struct list_head *p; | 546 | struct list_head *p; |
547 | unsigned i; | 547 | unsigned i; |
548 | 548 | ||
549 | atomic_inc(&cpu_buffer->record_disabled); | 549 | atomic_inc(&cpu_buffer->record_disabled); |
550 | synchronize_sched(); | 550 | synchronize_sched(); |
551 | 551 | ||
552 | for (i = 0; i < nr_pages; i++) { | 552 | for (i = 0; i < nr_pages; i++) { |
553 | if (RB_WARN_ON(cpu_buffer, list_empty(pages))) | 553 | if (RB_WARN_ON(cpu_buffer, list_empty(pages))) |
554 | return; | 554 | return; |
555 | p = pages->next; | 555 | p = pages->next; |
556 | page = list_entry(p, struct buffer_page, list); | 556 | page = list_entry(p, struct buffer_page, list); |
557 | list_del_init(&page->list); | 557 | list_del_init(&page->list); |
558 | list_add_tail(&page->list, &cpu_buffer->pages); | 558 | list_add_tail(&page->list, &cpu_buffer->pages); |
559 | } | 559 | } |
560 | rb_reset_cpu(cpu_buffer); | 560 | rb_reset_cpu(cpu_buffer); |
561 | 561 | ||
562 | rb_check_pages(cpu_buffer); | 562 | rb_check_pages(cpu_buffer); |
563 | 563 | ||
564 | atomic_dec(&cpu_buffer->record_disabled); | 564 | atomic_dec(&cpu_buffer->record_disabled); |
565 | } | 565 | } |
566 | 566 | ||
567 | /** | 567 | /** |
568 | * ring_buffer_resize - resize the ring buffer | 568 | * ring_buffer_resize - resize the ring buffer |
569 | * @buffer: the buffer to resize. | 569 | * @buffer: the buffer to resize. |
570 | * @size: the new size. | 570 | * @size: the new size. |
571 | * | 571 | * |
572 | * The tracer is responsible for making sure that the buffer is | 572 | * The tracer is responsible for making sure that the buffer is |
573 | * not being used while changing the size. | 573 | * not being used while changing the size. |
574 | * Note: We may be able to change the above requirement by using | 574 | * Note: We may be able to change the above requirement by using |
575 | * RCU synchronizations. | 575 | * RCU synchronizations. |
576 | * | 576 | * |
577 | * Minimum size is 2 * BUF_PAGE_SIZE. | 577 | * Minimum size is 2 * BUF_PAGE_SIZE. |
578 | * | 578 | * |
579 | * Returns -1 on failure. | 579 | * Returns -1 on failure. |
580 | */ | 580 | */ |
581 | int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | 581 | int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) |
582 | { | 582 | { |
583 | struct ring_buffer_per_cpu *cpu_buffer; | 583 | struct ring_buffer_per_cpu *cpu_buffer; |
584 | unsigned nr_pages, rm_pages, new_pages; | 584 | unsigned nr_pages, rm_pages, new_pages; |
585 | struct buffer_page *page, *tmp; | 585 | struct buffer_page *page, *tmp; |
586 | unsigned long buffer_size; | 586 | unsigned long buffer_size; |
587 | unsigned long addr; | 587 | unsigned long addr; |
588 | LIST_HEAD(pages); | 588 | LIST_HEAD(pages); |
589 | int i, cpu; | 589 | int i, cpu; |
590 | 590 | ||
591 | /* | 591 | /* |
592 | * Always succeed at resizing a non-existent buffer: | 592 | * Always succeed at resizing a non-existent buffer: |
593 | */ | 593 | */ |
594 | if (!buffer) | 594 | if (!buffer) |
595 | return size; | 595 | return size; |
596 | 596 | ||
597 | size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); | 597 | size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); |
598 | size *= BUF_PAGE_SIZE; | 598 | size *= BUF_PAGE_SIZE; |
599 | buffer_size = buffer->pages * BUF_PAGE_SIZE; | 599 | buffer_size = buffer->pages * BUF_PAGE_SIZE; |
600 | 600 | ||
601 | /* we need a minimum of two pages */ | 601 | /* we need a minimum of two pages */ |
602 | if (size < BUF_PAGE_SIZE * 2) | 602 | if (size < BUF_PAGE_SIZE * 2) |
603 | size = BUF_PAGE_SIZE * 2; | 603 | size = BUF_PAGE_SIZE * 2; |
604 | 604 | ||
605 | if (size == buffer_size) | 605 | if (size == buffer_size) |
606 | return size; | 606 | return size; |
607 | 607 | ||
608 | mutex_lock(&buffer->mutex); | 608 | mutex_lock(&buffer->mutex); |
609 | 609 | ||
610 | nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); | 610 | nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); |
611 | 611 | ||
612 | if (size < buffer_size) { | 612 | if (size < buffer_size) { |
613 | 613 | ||
614 | /* easy case, just free pages */ | 614 | /* easy case, just free pages */ |
615 | if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) { | 615 | if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) { |
616 | mutex_unlock(&buffer->mutex); | 616 | mutex_unlock(&buffer->mutex); |
617 | return -1; | 617 | return -1; |
618 | } | 618 | } |
619 | 619 | ||
620 | rm_pages = buffer->pages - nr_pages; | 620 | rm_pages = buffer->pages - nr_pages; |
621 | 621 | ||
622 | for_each_buffer_cpu(buffer, cpu) { | 622 | for_each_buffer_cpu(buffer, cpu) { |
623 | cpu_buffer = buffer->buffers[cpu]; | 623 | cpu_buffer = buffer->buffers[cpu]; |
624 | rb_remove_pages(cpu_buffer, rm_pages); | 624 | rb_remove_pages(cpu_buffer, rm_pages); |
625 | } | 625 | } |
626 | goto out; | 626 | goto out; |
627 | } | 627 | } |
628 | 628 | ||
629 | /* | 629 | /* |
630 | * This is a bit more difficult. We only want to add pages | 630 | * This is a bit more difficult. We only want to add pages |
631 | * when we can allocate enough for all CPUs. We do this | 631 | * when we can allocate enough for all CPUs. We do this |
632 | * by allocating all the pages and storing them on a local | 632 | * by allocating all the pages and storing them on a local |
633 | * link list. If we succeed in our allocation, then we | 633 | * link list. If we succeed in our allocation, then we |
634 | * add these pages to the cpu_buffers. Otherwise we just free | 634 | * add these pages to the cpu_buffers. Otherwise we just free |
635 | * them all and return -ENOMEM; | 635 | * them all and return -ENOMEM; |
636 | */ | 636 | */ |
637 | if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) { | 637 | if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) { |
638 | mutex_unlock(&buffer->mutex); | 638 | mutex_unlock(&buffer->mutex); |
639 | return -1; | 639 | return -1; |
640 | } | 640 | } |
641 | 641 | ||
642 | new_pages = nr_pages - buffer->pages; | 642 | new_pages = nr_pages - buffer->pages; |
643 | 643 | ||
644 | for_each_buffer_cpu(buffer, cpu) { | 644 | for_each_buffer_cpu(buffer, cpu) { |
645 | for (i = 0; i < new_pages; i++) { | 645 | for (i = 0; i < new_pages; i++) { |
646 | page = kzalloc_node(ALIGN(sizeof(*page), | 646 | page = kzalloc_node(ALIGN(sizeof(*page), |
647 | cache_line_size()), | 647 | cache_line_size()), |
648 | GFP_KERNEL, cpu_to_node(cpu)); | 648 | GFP_KERNEL, cpu_to_node(cpu)); |
649 | if (!page) | 649 | if (!page) |
650 | goto free_pages; | 650 | goto free_pages; |
651 | list_add(&page->list, &pages); | 651 | list_add(&page->list, &pages); |
652 | addr = __get_free_page(GFP_KERNEL); | 652 | addr = __get_free_page(GFP_KERNEL); |
653 | if (!addr) | 653 | if (!addr) |
654 | goto free_pages; | 654 | goto free_pages; |
655 | page->page = (void *)addr; | 655 | page->page = (void *)addr; |
656 | rb_init_page(page->page); | 656 | rb_init_page(page->page); |
657 | } | 657 | } |
658 | } | 658 | } |
659 | 659 | ||
660 | for_each_buffer_cpu(buffer, cpu) { | 660 | for_each_buffer_cpu(buffer, cpu) { |
661 | cpu_buffer = buffer->buffers[cpu]; | 661 | cpu_buffer = buffer->buffers[cpu]; |
662 | rb_insert_pages(cpu_buffer, &pages, new_pages); | 662 | rb_insert_pages(cpu_buffer, &pages, new_pages); |
663 | } | 663 | } |
664 | 664 | ||
665 | if (RB_WARN_ON(buffer, !list_empty(&pages))) { | 665 | if (RB_WARN_ON(buffer, !list_empty(&pages))) { |
666 | mutex_unlock(&buffer->mutex); | 666 | mutex_unlock(&buffer->mutex); |
667 | return -1; | 667 | return -1; |
668 | } | 668 | } |
669 | 669 | ||
670 | out: | 670 | out: |
671 | buffer->pages = nr_pages; | 671 | buffer->pages = nr_pages; |
672 | mutex_unlock(&buffer->mutex); | 672 | mutex_unlock(&buffer->mutex); |
673 | 673 | ||
674 | return size; | 674 | return size; |
675 | 675 | ||
676 | free_pages: | 676 | free_pages: |
677 | list_for_each_entry_safe(page, tmp, &pages, list) { | 677 | list_for_each_entry_safe(page, tmp, &pages, list) { |
678 | list_del_init(&page->list); | 678 | list_del_init(&page->list); |
679 | free_buffer_page(page); | 679 | free_buffer_page(page); |
680 | } | 680 | } |
681 | mutex_unlock(&buffer->mutex); | 681 | mutex_unlock(&buffer->mutex); |
682 | return -ENOMEM; | 682 | return -ENOMEM; |
683 | } | 683 | } |
684 | 684 | ||
685 | static inline int rb_null_event(struct ring_buffer_event *event) | 685 | static inline int rb_null_event(struct ring_buffer_event *event) |
686 | { | 686 | { |
687 | return event->type == RINGBUF_TYPE_PADDING; | 687 | return event->type == RINGBUF_TYPE_PADDING; |
688 | } | 688 | } |
689 | 689 | ||
690 | static inline void * | ||
691 | __rb_data_page_index(struct buffer_data_page *page, unsigned index) | ||
692 | { | ||
693 | return page->data + index; | ||
694 | } | ||
695 | |||
690 | static inline void *__rb_page_index(struct buffer_page *page, unsigned index) | 696 | static inline void *__rb_page_index(struct buffer_page *page, unsigned index) |
691 | { | 697 | { |
692 | return page->page->data + index; | 698 | return page->page->data + index; |
693 | } | 699 | } |
694 | 700 | ||
695 | static inline struct ring_buffer_event * | 701 | static inline struct ring_buffer_event * |
696 | rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) | 702 | rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) |
697 | { | 703 | { |
698 | return __rb_page_index(cpu_buffer->reader_page, | 704 | return __rb_page_index(cpu_buffer->reader_page, |
699 | cpu_buffer->reader_page->read); | 705 | cpu_buffer->reader_page->read); |
700 | } | 706 | } |
701 | 707 | ||
702 | static inline struct ring_buffer_event * | 708 | static inline struct ring_buffer_event * |
703 | rb_head_event(struct ring_buffer_per_cpu *cpu_buffer) | 709 | rb_head_event(struct ring_buffer_per_cpu *cpu_buffer) |
704 | { | 710 | { |
705 | return __rb_page_index(cpu_buffer->head_page, | 711 | return __rb_page_index(cpu_buffer->head_page, |
706 | cpu_buffer->head_page->read); | 712 | cpu_buffer->head_page->read); |
707 | } | 713 | } |
708 | 714 | ||
709 | static inline struct ring_buffer_event * | 715 | static inline struct ring_buffer_event * |
710 | rb_iter_head_event(struct ring_buffer_iter *iter) | 716 | rb_iter_head_event(struct ring_buffer_iter *iter) |
711 | { | 717 | { |
712 | return __rb_page_index(iter->head_page, iter->head); | 718 | return __rb_page_index(iter->head_page, iter->head); |
713 | } | 719 | } |
714 | 720 | ||
715 | static inline unsigned rb_page_write(struct buffer_page *bpage) | 721 | static inline unsigned rb_page_write(struct buffer_page *bpage) |
716 | { | 722 | { |
717 | return local_read(&bpage->write); | 723 | return local_read(&bpage->write); |
718 | } | 724 | } |
719 | 725 | ||
720 | static inline unsigned rb_page_commit(struct buffer_page *bpage) | 726 | static inline unsigned rb_page_commit(struct buffer_page *bpage) |
721 | { | 727 | { |
722 | return local_read(&bpage->page->commit); | 728 | return local_read(&bpage->page->commit); |
723 | } | 729 | } |
724 | 730 | ||
725 | /* Size is determined by what has been commited */ | 731 | /* Size is determined by what has been commited */ |
726 | static inline unsigned rb_page_size(struct buffer_page *bpage) | 732 | static inline unsigned rb_page_size(struct buffer_page *bpage) |
727 | { | 733 | { |
728 | return rb_page_commit(bpage); | 734 | return rb_page_commit(bpage); |
729 | } | 735 | } |
730 | 736 | ||
731 | static inline unsigned | 737 | static inline unsigned |
732 | rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) | 738 | rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) |
733 | { | 739 | { |
734 | return rb_page_commit(cpu_buffer->commit_page); | 740 | return rb_page_commit(cpu_buffer->commit_page); |
735 | } | 741 | } |
736 | 742 | ||
737 | static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer) | 743 | static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer) |
738 | { | 744 | { |
739 | return rb_page_commit(cpu_buffer->head_page); | 745 | return rb_page_commit(cpu_buffer->head_page); |
740 | } | 746 | } |
741 | 747 | ||
742 | /* | 748 | /* |
743 | * When the tail hits the head and the buffer is in overwrite mode, | 749 | * When the tail hits the head and the buffer is in overwrite mode, |
744 | * the head jumps to the next page and all content on the previous | 750 | * the head jumps to the next page and all content on the previous |
745 | * page is discarded. But before doing so, we update the overrun | 751 | * page is discarded. But before doing so, we update the overrun |
746 | * variable of the buffer. | 752 | * variable of the buffer. |
747 | */ | 753 | */ |
748 | static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer) | 754 | static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer) |
749 | { | 755 | { |
750 | struct ring_buffer_event *event; | 756 | struct ring_buffer_event *event; |
751 | unsigned long head; | 757 | unsigned long head; |
752 | 758 | ||
753 | for (head = 0; head < rb_head_size(cpu_buffer); | 759 | for (head = 0; head < rb_head_size(cpu_buffer); |
754 | head += rb_event_length(event)) { | 760 | head += rb_event_length(event)) { |
755 | 761 | ||
756 | event = __rb_page_index(cpu_buffer->head_page, head); | 762 | event = __rb_page_index(cpu_buffer->head_page, head); |
757 | if (RB_WARN_ON(cpu_buffer, rb_null_event(event))) | 763 | if (RB_WARN_ON(cpu_buffer, rb_null_event(event))) |
758 | return; | 764 | return; |
759 | /* Only count data entries */ | 765 | /* Only count data entries */ |
760 | if (event->type != RINGBUF_TYPE_DATA) | 766 | if (event->type != RINGBUF_TYPE_DATA) |
761 | continue; | 767 | continue; |
762 | cpu_buffer->overrun++; | 768 | cpu_buffer->overrun++; |
763 | cpu_buffer->entries--; | 769 | cpu_buffer->entries--; |
764 | } | 770 | } |
765 | } | 771 | } |
766 | 772 | ||
767 | static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, | 773 | static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, |
768 | struct buffer_page **page) | 774 | struct buffer_page **page) |
769 | { | 775 | { |
770 | struct list_head *p = (*page)->list.next; | 776 | struct list_head *p = (*page)->list.next; |
771 | 777 | ||
772 | if (p == &cpu_buffer->pages) | 778 | if (p == &cpu_buffer->pages) |
773 | p = p->next; | 779 | p = p->next; |
774 | 780 | ||
775 | *page = list_entry(p, struct buffer_page, list); | 781 | *page = list_entry(p, struct buffer_page, list); |
776 | } | 782 | } |
777 | 783 | ||
778 | static inline unsigned | 784 | static inline unsigned |
779 | rb_event_index(struct ring_buffer_event *event) | 785 | rb_event_index(struct ring_buffer_event *event) |
780 | { | 786 | { |
781 | unsigned long addr = (unsigned long)event; | 787 | unsigned long addr = (unsigned long)event; |
782 | 788 | ||
783 | return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE); | 789 | return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE); |
784 | } | 790 | } |
785 | 791 | ||
786 | static inline int | 792 | static inline int |
787 | rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer, | 793 | rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer, |
788 | struct ring_buffer_event *event) | 794 | struct ring_buffer_event *event) |
789 | { | 795 | { |
790 | unsigned long addr = (unsigned long)event; | 796 | unsigned long addr = (unsigned long)event; |
791 | unsigned long index; | 797 | unsigned long index; |
792 | 798 | ||
793 | index = rb_event_index(event); | 799 | index = rb_event_index(event); |
794 | addr &= PAGE_MASK; | 800 | addr &= PAGE_MASK; |
795 | 801 | ||
796 | return cpu_buffer->commit_page->page == (void *)addr && | 802 | return cpu_buffer->commit_page->page == (void *)addr && |
797 | rb_commit_index(cpu_buffer) == index; | 803 | rb_commit_index(cpu_buffer) == index; |
798 | } | 804 | } |
799 | 805 | ||
800 | static inline void | 806 | static inline void |
801 | rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer, | 807 | rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer, |
802 | struct ring_buffer_event *event) | 808 | struct ring_buffer_event *event) |
803 | { | 809 | { |
804 | unsigned long addr = (unsigned long)event; | 810 | unsigned long addr = (unsigned long)event; |
805 | unsigned long index; | 811 | unsigned long index; |
806 | 812 | ||
807 | index = rb_event_index(event); | 813 | index = rb_event_index(event); |
808 | addr &= PAGE_MASK; | 814 | addr &= PAGE_MASK; |
809 | 815 | ||
810 | while (cpu_buffer->commit_page->page != (void *)addr) { | 816 | while (cpu_buffer->commit_page->page != (void *)addr) { |
811 | if (RB_WARN_ON(cpu_buffer, | 817 | if (RB_WARN_ON(cpu_buffer, |
812 | cpu_buffer->commit_page == cpu_buffer->tail_page)) | 818 | cpu_buffer->commit_page == cpu_buffer->tail_page)) |
813 | return; | 819 | return; |
814 | cpu_buffer->commit_page->page->commit = | 820 | cpu_buffer->commit_page->page->commit = |
815 | cpu_buffer->commit_page->write; | 821 | cpu_buffer->commit_page->write; |
816 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); | 822 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); |
817 | cpu_buffer->write_stamp = | 823 | cpu_buffer->write_stamp = |
818 | cpu_buffer->commit_page->page->time_stamp; | 824 | cpu_buffer->commit_page->page->time_stamp; |
819 | } | 825 | } |
820 | 826 | ||
821 | /* Now set the commit to the event's index */ | 827 | /* Now set the commit to the event's index */ |
822 | local_set(&cpu_buffer->commit_page->page->commit, index); | 828 | local_set(&cpu_buffer->commit_page->page->commit, index); |
823 | } | 829 | } |
824 | 830 | ||
825 | static inline void | 831 | static inline void |
826 | rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) | 832 | rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) |
827 | { | 833 | { |
828 | /* | 834 | /* |
829 | * We only race with interrupts and NMIs on this CPU. | 835 | * We only race with interrupts and NMIs on this CPU. |
830 | * If we own the commit event, then we can commit | 836 | * If we own the commit event, then we can commit |
831 | * all others that interrupted us, since the interruptions | 837 | * all others that interrupted us, since the interruptions |
832 | * are in stack format (they finish before they come | 838 | * are in stack format (they finish before they come |
833 | * back to us). This allows us to do a simple loop to | 839 | * back to us). This allows us to do a simple loop to |
834 | * assign the commit to the tail. | 840 | * assign the commit to the tail. |
835 | */ | 841 | */ |
836 | while (cpu_buffer->commit_page != cpu_buffer->tail_page) { | 842 | while (cpu_buffer->commit_page != cpu_buffer->tail_page) { |
837 | cpu_buffer->commit_page->page->commit = | 843 | cpu_buffer->commit_page->page->commit = |
838 | cpu_buffer->commit_page->write; | 844 | cpu_buffer->commit_page->write; |
839 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); | 845 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); |
840 | cpu_buffer->write_stamp = | 846 | cpu_buffer->write_stamp = |
841 | cpu_buffer->commit_page->page->time_stamp; | 847 | cpu_buffer->commit_page->page->time_stamp; |
842 | /* add barrier to keep gcc from optimizing too much */ | 848 | /* add barrier to keep gcc from optimizing too much */ |
843 | barrier(); | 849 | barrier(); |
844 | } | 850 | } |
845 | while (rb_commit_index(cpu_buffer) != | 851 | while (rb_commit_index(cpu_buffer) != |
846 | rb_page_write(cpu_buffer->commit_page)) { | 852 | rb_page_write(cpu_buffer->commit_page)) { |
847 | cpu_buffer->commit_page->page->commit = | 853 | cpu_buffer->commit_page->page->commit = |
848 | cpu_buffer->commit_page->write; | 854 | cpu_buffer->commit_page->write; |
849 | barrier(); | 855 | barrier(); |
850 | } | 856 | } |
851 | } | 857 | } |
852 | 858 | ||
853 | static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | 859 | static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) |
854 | { | 860 | { |
855 | cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; | 861 | cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; |
856 | cpu_buffer->reader_page->read = 0; | 862 | cpu_buffer->reader_page->read = 0; |
857 | } | 863 | } |
858 | 864 | ||
859 | static inline void rb_inc_iter(struct ring_buffer_iter *iter) | 865 | static inline void rb_inc_iter(struct ring_buffer_iter *iter) |
860 | { | 866 | { |
861 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 867 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
862 | 868 | ||
863 | /* | 869 | /* |
864 | * The iterator could be on the reader page (it starts there). | 870 | * The iterator could be on the reader page (it starts there). |
865 | * But the head could have moved, since the reader was | 871 | * But the head could have moved, since the reader was |
866 | * found. Check for this case and assign the iterator | 872 | * found. Check for this case and assign the iterator |
867 | * to the head page instead of next. | 873 | * to the head page instead of next. |
868 | */ | 874 | */ |
869 | if (iter->head_page == cpu_buffer->reader_page) | 875 | if (iter->head_page == cpu_buffer->reader_page) |
870 | iter->head_page = cpu_buffer->head_page; | 876 | iter->head_page = cpu_buffer->head_page; |
871 | else | 877 | else |
872 | rb_inc_page(cpu_buffer, &iter->head_page); | 878 | rb_inc_page(cpu_buffer, &iter->head_page); |
873 | 879 | ||
874 | iter->read_stamp = iter->head_page->page->time_stamp; | 880 | iter->read_stamp = iter->head_page->page->time_stamp; |
875 | iter->head = 0; | 881 | iter->head = 0; |
876 | } | 882 | } |
877 | 883 | ||
878 | /** | 884 | /** |
879 | * ring_buffer_update_event - update event type and data | 885 | * ring_buffer_update_event - update event type and data |
880 | * @event: the even to update | 886 | * @event: the even to update |
881 | * @type: the type of event | 887 | * @type: the type of event |
882 | * @length: the size of the event field in the ring buffer | 888 | * @length: the size of the event field in the ring buffer |
883 | * | 889 | * |
884 | * Update the type and data fields of the event. The length | 890 | * Update the type and data fields of the event. The length |
885 | * is the actual size that is written to the ring buffer, | 891 | * is the actual size that is written to the ring buffer, |
886 | * and with this, we can determine what to place into the | 892 | * and with this, we can determine what to place into the |
887 | * data field. | 893 | * data field. |
888 | */ | 894 | */ |
889 | static inline void | 895 | static inline void |
890 | rb_update_event(struct ring_buffer_event *event, | 896 | rb_update_event(struct ring_buffer_event *event, |
891 | unsigned type, unsigned length) | 897 | unsigned type, unsigned length) |
892 | { | 898 | { |
893 | event->type = type; | 899 | event->type = type; |
894 | 900 | ||
895 | switch (type) { | 901 | switch (type) { |
896 | 902 | ||
897 | case RINGBUF_TYPE_PADDING: | 903 | case RINGBUF_TYPE_PADDING: |
898 | break; | 904 | break; |
899 | 905 | ||
900 | case RINGBUF_TYPE_TIME_EXTEND: | 906 | case RINGBUF_TYPE_TIME_EXTEND: |
901 | event->len = | 907 | event->len = |
902 | (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1)) | 908 | (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1)) |
903 | >> RB_ALIGNMENT_SHIFT; | 909 | >> RB_ALIGNMENT_SHIFT; |
904 | break; | 910 | break; |
905 | 911 | ||
906 | case RINGBUF_TYPE_TIME_STAMP: | 912 | case RINGBUF_TYPE_TIME_STAMP: |
907 | event->len = | 913 | event->len = |
908 | (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1)) | 914 | (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1)) |
909 | >> RB_ALIGNMENT_SHIFT; | 915 | >> RB_ALIGNMENT_SHIFT; |
910 | break; | 916 | break; |
911 | 917 | ||
912 | case RINGBUF_TYPE_DATA: | 918 | case RINGBUF_TYPE_DATA: |
913 | length -= RB_EVNT_HDR_SIZE; | 919 | length -= RB_EVNT_HDR_SIZE; |
914 | if (length > RB_MAX_SMALL_DATA) { | 920 | if (length > RB_MAX_SMALL_DATA) { |
915 | event->len = 0; | 921 | event->len = 0; |
916 | event->array[0] = length; | 922 | event->array[0] = length; |
917 | } else | 923 | } else |
918 | event->len = | 924 | event->len = |
919 | (length + (RB_ALIGNMENT-1)) | 925 | (length + (RB_ALIGNMENT-1)) |
920 | >> RB_ALIGNMENT_SHIFT; | 926 | >> RB_ALIGNMENT_SHIFT; |
921 | break; | 927 | break; |
922 | default: | 928 | default: |
923 | BUG(); | 929 | BUG(); |
924 | } | 930 | } |
925 | } | 931 | } |
926 | 932 | ||
927 | static inline unsigned rb_calculate_event_length(unsigned length) | 933 | static inline unsigned rb_calculate_event_length(unsigned length) |
928 | { | 934 | { |
929 | struct ring_buffer_event event; /* Used only for sizeof array */ | 935 | struct ring_buffer_event event; /* Used only for sizeof array */ |
930 | 936 | ||
931 | /* zero length can cause confusions */ | 937 | /* zero length can cause confusions */ |
932 | if (!length) | 938 | if (!length) |
933 | length = 1; | 939 | length = 1; |
934 | 940 | ||
935 | if (length > RB_MAX_SMALL_DATA) | 941 | if (length > RB_MAX_SMALL_DATA) |
936 | length += sizeof(event.array[0]); | 942 | length += sizeof(event.array[0]); |
937 | 943 | ||
938 | length += RB_EVNT_HDR_SIZE; | 944 | length += RB_EVNT_HDR_SIZE; |
939 | length = ALIGN(length, RB_ALIGNMENT); | 945 | length = ALIGN(length, RB_ALIGNMENT); |
940 | 946 | ||
941 | return length; | 947 | return length; |
942 | } | 948 | } |
943 | 949 | ||
944 | static struct ring_buffer_event * | 950 | static struct ring_buffer_event * |
945 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | 951 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, |
946 | unsigned type, unsigned long length, u64 *ts) | 952 | unsigned type, unsigned long length, u64 *ts) |
947 | { | 953 | { |
948 | struct buffer_page *tail_page, *head_page, *reader_page; | 954 | struct buffer_page *tail_page, *head_page, *reader_page; |
949 | unsigned long tail, write; | 955 | unsigned long tail, write; |
950 | struct ring_buffer *buffer = cpu_buffer->buffer; | 956 | struct ring_buffer *buffer = cpu_buffer->buffer; |
951 | struct ring_buffer_event *event; | 957 | struct ring_buffer_event *event; |
952 | unsigned long flags; | 958 | unsigned long flags; |
953 | 959 | ||
954 | tail_page = cpu_buffer->tail_page; | 960 | tail_page = cpu_buffer->tail_page; |
955 | write = local_add_return(length, &tail_page->write); | 961 | write = local_add_return(length, &tail_page->write); |
956 | tail = write - length; | 962 | tail = write - length; |
957 | 963 | ||
958 | /* See if we shot pass the end of this buffer page */ | 964 | /* See if we shot pass the end of this buffer page */ |
959 | if (write > BUF_PAGE_SIZE) { | 965 | if (write > BUF_PAGE_SIZE) { |
960 | struct buffer_page *next_page = tail_page; | 966 | struct buffer_page *next_page = tail_page; |
961 | 967 | ||
962 | local_irq_save(flags); | 968 | local_irq_save(flags); |
963 | __raw_spin_lock(&cpu_buffer->lock); | 969 | __raw_spin_lock(&cpu_buffer->lock); |
964 | 970 | ||
965 | rb_inc_page(cpu_buffer, &next_page); | 971 | rb_inc_page(cpu_buffer, &next_page); |
966 | 972 | ||
967 | head_page = cpu_buffer->head_page; | 973 | head_page = cpu_buffer->head_page; |
968 | reader_page = cpu_buffer->reader_page; | 974 | reader_page = cpu_buffer->reader_page; |
969 | 975 | ||
970 | /* we grabbed the lock before incrementing */ | 976 | /* we grabbed the lock before incrementing */ |
971 | if (RB_WARN_ON(cpu_buffer, next_page == reader_page)) | 977 | if (RB_WARN_ON(cpu_buffer, next_page == reader_page)) |
972 | goto out_unlock; | 978 | goto out_unlock; |
973 | 979 | ||
974 | /* | 980 | /* |
975 | * If for some reason, we had an interrupt storm that made | 981 | * If for some reason, we had an interrupt storm that made |
976 | * it all the way around the buffer, bail, and warn | 982 | * it all the way around the buffer, bail, and warn |
977 | * about it. | 983 | * about it. |
978 | */ | 984 | */ |
979 | if (unlikely(next_page == cpu_buffer->commit_page)) { | 985 | if (unlikely(next_page == cpu_buffer->commit_page)) { |
980 | WARN_ON_ONCE(1); | 986 | WARN_ON_ONCE(1); |
981 | goto out_unlock; | 987 | goto out_unlock; |
982 | } | 988 | } |
983 | 989 | ||
984 | if (next_page == head_page) { | 990 | if (next_page == head_page) { |
985 | if (!(buffer->flags & RB_FL_OVERWRITE)) { | 991 | if (!(buffer->flags & RB_FL_OVERWRITE)) { |
986 | /* reset write */ | 992 | /* reset write */ |
987 | if (tail <= BUF_PAGE_SIZE) | 993 | if (tail <= BUF_PAGE_SIZE) |
988 | local_set(&tail_page->write, tail); | 994 | local_set(&tail_page->write, tail); |
989 | goto out_unlock; | 995 | goto out_unlock; |
990 | } | 996 | } |
991 | 997 | ||
992 | /* tail_page has not moved yet? */ | 998 | /* tail_page has not moved yet? */ |
993 | if (tail_page == cpu_buffer->tail_page) { | 999 | if (tail_page == cpu_buffer->tail_page) { |
994 | /* count overflows */ | 1000 | /* count overflows */ |
995 | rb_update_overflow(cpu_buffer); | 1001 | rb_update_overflow(cpu_buffer); |
996 | 1002 | ||
997 | rb_inc_page(cpu_buffer, &head_page); | 1003 | rb_inc_page(cpu_buffer, &head_page); |
998 | cpu_buffer->head_page = head_page; | 1004 | cpu_buffer->head_page = head_page; |
999 | cpu_buffer->head_page->read = 0; | 1005 | cpu_buffer->head_page->read = 0; |
1000 | } | 1006 | } |
1001 | } | 1007 | } |
1002 | 1008 | ||
1003 | /* | 1009 | /* |
1004 | * If the tail page is still the same as what we think | 1010 | * If the tail page is still the same as what we think |
1005 | * it is, then it is up to us to update the tail | 1011 | * it is, then it is up to us to update the tail |
1006 | * pointer. | 1012 | * pointer. |
1007 | */ | 1013 | */ |
1008 | if (tail_page == cpu_buffer->tail_page) { | 1014 | if (tail_page == cpu_buffer->tail_page) { |
1009 | local_set(&next_page->write, 0); | 1015 | local_set(&next_page->write, 0); |
1010 | local_set(&next_page->page->commit, 0); | 1016 | local_set(&next_page->page->commit, 0); |
1011 | cpu_buffer->tail_page = next_page; | 1017 | cpu_buffer->tail_page = next_page; |
1012 | 1018 | ||
1013 | /* reread the time stamp */ | 1019 | /* reread the time stamp */ |
1014 | *ts = ring_buffer_time_stamp(cpu_buffer->cpu); | 1020 | *ts = ring_buffer_time_stamp(cpu_buffer->cpu); |
1015 | cpu_buffer->tail_page->page->time_stamp = *ts; | 1021 | cpu_buffer->tail_page->page->time_stamp = *ts; |
1016 | } | 1022 | } |
1017 | 1023 | ||
1018 | /* | 1024 | /* |
1019 | * The actual tail page has moved forward. | 1025 | * The actual tail page has moved forward. |
1020 | */ | 1026 | */ |
1021 | if (tail < BUF_PAGE_SIZE) { | 1027 | if (tail < BUF_PAGE_SIZE) { |
1022 | /* Mark the rest of the page with padding */ | 1028 | /* Mark the rest of the page with padding */ |
1023 | event = __rb_page_index(tail_page, tail); | 1029 | event = __rb_page_index(tail_page, tail); |
1024 | event->type = RINGBUF_TYPE_PADDING; | 1030 | event->type = RINGBUF_TYPE_PADDING; |
1025 | } | 1031 | } |
1026 | 1032 | ||
1027 | if (tail <= BUF_PAGE_SIZE) | 1033 | if (tail <= BUF_PAGE_SIZE) |
1028 | /* Set the write back to the previous setting */ | 1034 | /* Set the write back to the previous setting */ |
1029 | local_set(&tail_page->write, tail); | 1035 | local_set(&tail_page->write, tail); |
1030 | 1036 | ||
1031 | /* | 1037 | /* |
1032 | * If this was a commit entry that failed, | 1038 | * If this was a commit entry that failed, |
1033 | * increment that too | 1039 | * increment that too |
1034 | */ | 1040 | */ |
1035 | if (tail_page == cpu_buffer->commit_page && | 1041 | if (tail_page == cpu_buffer->commit_page && |
1036 | tail == rb_commit_index(cpu_buffer)) { | 1042 | tail == rb_commit_index(cpu_buffer)) { |
1037 | rb_set_commit_to_write(cpu_buffer); | 1043 | rb_set_commit_to_write(cpu_buffer); |
1038 | } | 1044 | } |
1039 | 1045 | ||
1040 | __raw_spin_unlock(&cpu_buffer->lock); | 1046 | __raw_spin_unlock(&cpu_buffer->lock); |
1041 | local_irq_restore(flags); | 1047 | local_irq_restore(flags); |
1042 | 1048 | ||
1043 | /* fail and let the caller try again */ | 1049 | /* fail and let the caller try again */ |
1044 | return ERR_PTR(-EAGAIN); | 1050 | return ERR_PTR(-EAGAIN); |
1045 | } | 1051 | } |
1046 | 1052 | ||
1047 | /* We reserved something on the buffer */ | 1053 | /* We reserved something on the buffer */ |
1048 | 1054 | ||
1049 | if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE)) | 1055 | if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE)) |
1050 | return NULL; | 1056 | return NULL; |
1051 | 1057 | ||
1052 | event = __rb_page_index(tail_page, tail); | 1058 | event = __rb_page_index(tail_page, tail); |
1053 | rb_update_event(event, type, length); | 1059 | rb_update_event(event, type, length); |
1054 | 1060 | ||
1055 | /* | 1061 | /* |
1056 | * If this is a commit and the tail is zero, then update | 1062 | * If this is a commit and the tail is zero, then update |
1057 | * this page's time stamp. | 1063 | * this page's time stamp. |
1058 | */ | 1064 | */ |
1059 | if (!tail && rb_is_commit(cpu_buffer, event)) | 1065 | if (!tail && rb_is_commit(cpu_buffer, event)) |
1060 | cpu_buffer->commit_page->page->time_stamp = *ts; | 1066 | cpu_buffer->commit_page->page->time_stamp = *ts; |
1061 | 1067 | ||
1062 | return event; | 1068 | return event; |
1063 | 1069 | ||
1064 | out_unlock: | 1070 | out_unlock: |
1065 | __raw_spin_unlock(&cpu_buffer->lock); | 1071 | __raw_spin_unlock(&cpu_buffer->lock); |
1066 | local_irq_restore(flags); | 1072 | local_irq_restore(flags); |
1067 | return NULL; | 1073 | return NULL; |
1068 | } | 1074 | } |
1069 | 1075 | ||
1070 | static int | 1076 | static int |
1071 | rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, | 1077 | rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, |
1072 | u64 *ts, u64 *delta) | 1078 | u64 *ts, u64 *delta) |
1073 | { | 1079 | { |
1074 | struct ring_buffer_event *event; | 1080 | struct ring_buffer_event *event; |
1075 | static int once; | 1081 | static int once; |
1076 | int ret; | 1082 | int ret; |
1077 | 1083 | ||
1078 | if (unlikely(*delta > (1ULL << 59) && !once++)) { | 1084 | if (unlikely(*delta > (1ULL << 59) && !once++)) { |
1079 | printk(KERN_WARNING "Delta way too big! %llu" | 1085 | printk(KERN_WARNING "Delta way too big! %llu" |
1080 | " ts=%llu write stamp = %llu\n", | 1086 | " ts=%llu write stamp = %llu\n", |
1081 | (unsigned long long)*delta, | 1087 | (unsigned long long)*delta, |
1082 | (unsigned long long)*ts, | 1088 | (unsigned long long)*ts, |
1083 | (unsigned long long)cpu_buffer->write_stamp); | 1089 | (unsigned long long)cpu_buffer->write_stamp); |
1084 | WARN_ON(1); | 1090 | WARN_ON(1); |
1085 | } | 1091 | } |
1086 | 1092 | ||
1087 | /* | 1093 | /* |
1088 | * The delta is too big, we to add a | 1094 | * The delta is too big, we to add a |
1089 | * new timestamp. | 1095 | * new timestamp. |
1090 | */ | 1096 | */ |
1091 | event = __rb_reserve_next(cpu_buffer, | 1097 | event = __rb_reserve_next(cpu_buffer, |
1092 | RINGBUF_TYPE_TIME_EXTEND, | 1098 | RINGBUF_TYPE_TIME_EXTEND, |
1093 | RB_LEN_TIME_EXTEND, | 1099 | RB_LEN_TIME_EXTEND, |
1094 | ts); | 1100 | ts); |
1095 | if (!event) | 1101 | if (!event) |
1096 | return -EBUSY; | 1102 | return -EBUSY; |
1097 | 1103 | ||
1098 | if (PTR_ERR(event) == -EAGAIN) | 1104 | if (PTR_ERR(event) == -EAGAIN) |
1099 | return -EAGAIN; | 1105 | return -EAGAIN; |
1100 | 1106 | ||
1101 | /* Only a commited time event can update the write stamp */ | 1107 | /* Only a commited time event can update the write stamp */ |
1102 | if (rb_is_commit(cpu_buffer, event)) { | 1108 | if (rb_is_commit(cpu_buffer, event)) { |
1103 | /* | 1109 | /* |
1104 | * If this is the first on the page, then we need to | 1110 | * If this is the first on the page, then we need to |
1105 | * update the page itself, and just put in a zero. | 1111 | * update the page itself, and just put in a zero. |
1106 | */ | 1112 | */ |
1107 | if (rb_event_index(event)) { | 1113 | if (rb_event_index(event)) { |
1108 | event->time_delta = *delta & TS_MASK; | 1114 | event->time_delta = *delta & TS_MASK; |
1109 | event->array[0] = *delta >> TS_SHIFT; | 1115 | event->array[0] = *delta >> TS_SHIFT; |
1110 | } else { | 1116 | } else { |
1111 | cpu_buffer->commit_page->page->time_stamp = *ts; | 1117 | cpu_buffer->commit_page->page->time_stamp = *ts; |
1112 | event->time_delta = 0; | 1118 | event->time_delta = 0; |
1113 | event->array[0] = 0; | 1119 | event->array[0] = 0; |
1114 | } | 1120 | } |
1115 | cpu_buffer->write_stamp = *ts; | 1121 | cpu_buffer->write_stamp = *ts; |
1116 | /* let the caller know this was the commit */ | 1122 | /* let the caller know this was the commit */ |
1117 | ret = 1; | 1123 | ret = 1; |
1118 | } else { | 1124 | } else { |
1119 | /* Darn, this is just wasted space */ | 1125 | /* Darn, this is just wasted space */ |
1120 | event->time_delta = 0; | 1126 | event->time_delta = 0; |
1121 | event->array[0] = 0; | 1127 | event->array[0] = 0; |
1122 | ret = 0; | 1128 | ret = 0; |
1123 | } | 1129 | } |
1124 | 1130 | ||
1125 | *delta = 0; | 1131 | *delta = 0; |
1126 | 1132 | ||
1127 | return ret; | 1133 | return ret; |
1128 | } | 1134 | } |
1129 | 1135 | ||
1130 | static struct ring_buffer_event * | 1136 | static struct ring_buffer_event * |
1131 | rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, | 1137 | rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, |
1132 | unsigned type, unsigned long length) | 1138 | unsigned type, unsigned long length) |
1133 | { | 1139 | { |
1134 | struct ring_buffer_event *event; | 1140 | struct ring_buffer_event *event; |
1135 | u64 ts, delta; | 1141 | u64 ts, delta; |
1136 | int commit = 0; | 1142 | int commit = 0; |
1137 | int nr_loops = 0; | 1143 | int nr_loops = 0; |
1138 | 1144 | ||
1139 | again: | 1145 | again: |
1140 | /* | 1146 | /* |
1141 | * We allow for interrupts to reenter here and do a trace. | 1147 | * We allow for interrupts to reenter here and do a trace. |
1142 | * If one does, it will cause this original code to loop | 1148 | * If one does, it will cause this original code to loop |
1143 | * back here. Even with heavy interrupts happening, this | 1149 | * back here. Even with heavy interrupts happening, this |
1144 | * should only happen a few times in a row. If this happens | 1150 | * should only happen a few times in a row. If this happens |
1145 | * 1000 times in a row, there must be either an interrupt | 1151 | * 1000 times in a row, there must be either an interrupt |
1146 | * storm or we have something buggy. | 1152 | * storm or we have something buggy. |
1147 | * Bail! | 1153 | * Bail! |
1148 | */ | 1154 | */ |
1149 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) | 1155 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) |
1150 | return NULL; | 1156 | return NULL; |
1151 | 1157 | ||
1152 | ts = ring_buffer_time_stamp(cpu_buffer->cpu); | 1158 | ts = ring_buffer_time_stamp(cpu_buffer->cpu); |
1153 | 1159 | ||
1154 | /* | 1160 | /* |
1155 | * Only the first commit can update the timestamp. | 1161 | * Only the first commit can update the timestamp. |
1156 | * Yes there is a race here. If an interrupt comes in | 1162 | * Yes there is a race here. If an interrupt comes in |
1157 | * just after the conditional and it traces too, then it | 1163 | * just after the conditional and it traces too, then it |
1158 | * will also check the deltas. More than one timestamp may | 1164 | * will also check the deltas. More than one timestamp may |
1159 | * also be made. But only the entry that did the actual | 1165 | * also be made. But only the entry that did the actual |
1160 | * commit will be something other than zero. | 1166 | * commit will be something other than zero. |
1161 | */ | 1167 | */ |
1162 | if (cpu_buffer->tail_page == cpu_buffer->commit_page && | 1168 | if (cpu_buffer->tail_page == cpu_buffer->commit_page && |
1163 | rb_page_write(cpu_buffer->tail_page) == | 1169 | rb_page_write(cpu_buffer->tail_page) == |
1164 | rb_commit_index(cpu_buffer)) { | 1170 | rb_commit_index(cpu_buffer)) { |
1165 | 1171 | ||
1166 | delta = ts - cpu_buffer->write_stamp; | 1172 | delta = ts - cpu_buffer->write_stamp; |
1167 | 1173 | ||
1168 | /* make sure this delta is calculated here */ | 1174 | /* make sure this delta is calculated here */ |
1169 | barrier(); | 1175 | barrier(); |
1170 | 1176 | ||
1171 | /* Did the write stamp get updated already? */ | 1177 | /* Did the write stamp get updated already? */ |
1172 | if (unlikely(ts < cpu_buffer->write_stamp)) | 1178 | if (unlikely(ts < cpu_buffer->write_stamp)) |
1173 | delta = 0; | 1179 | delta = 0; |
1174 | 1180 | ||
1175 | if (test_time_stamp(delta)) { | 1181 | if (test_time_stamp(delta)) { |
1176 | 1182 | ||
1177 | commit = rb_add_time_stamp(cpu_buffer, &ts, &delta); | 1183 | commit = rb_add_time_stamp(cpu_buffer, &ts, &delta); |
1178 | 1184 | ||
1179 | if (commit == -EBUSY) | 1185 | if (commit == -EBUSY) |
1180 | return NULL; | 1186 | return NULL; |
1181 | 1187 | ||
1182 | if (commit == -EAGAIN) | 1188 | if (commit == -EAGAIN) |
1183 | goto again; | 1189 | goto again; |
1184 | 1190 | ||
1185 | RB_WARN_ON(cpu_buffer, commit < 0); | 1191 | RB_WARN_ON(cpu_buffer, commit < 0); |
1186 | } | 1192 | } |
1187 | } else | 1193 | } else |
1188 | /* Non commits have zero deltas */ | 1194 | /* Non commits have zero deltas */ |
1189 | delta = 0; | 1195 | delta = 0; |
1190 | 1196 | ||
1191 | event = __rb_reserve_next(cpu_buffer, type, length, &ts); | 1197 | event = __rb_reserve_next(cpu_buffer, type, length, &ts); |
1192 | if (PTR_ERR(event) == -EAGAIN) | 1198 | if (PTR_ERR(event) == -EAGAIN) |
1193 | goto again; | 1199 | goto again; |
1194 | 1200 | ||
1195 | if (!event) { | 1201 | if (!event) { |
1196 | if (unlikely(commit)) | 1202 | if (unlikely(commit)) |
1197 | /* | 1203 | /* |
1198 | * Ouch! We needed a timestamp and it was commited. But | 1204 | * Ouch! We needed a timestamp and it was commited. But |
1199 | * we didn't get our event reserved. | 1205 | * we didn't get our event reserved. |
1200 | */ | 1206 | */ |
1201 | rb_set_commit_to_write(cpu_buffer); | 1207 | rb_set_commit_to_write(cpu_buffer); |
1202 | return NULL; | 1208 | return NULL; |
1203 | } | 1209 | } |
1204 | 1210 | ||
1205 | /* | 1211 | /* |
1206 | * If the timestamp was commited, make the commit our entry | 1212 | * If the timestamp was commited, make the commit our entry |
1207 | * now so that we will update it when needed. | 1213 | * now so that we will update it when needed. |
1208 | */ | 1214 | */ |
1209 | if (commit) | 1215 | if (commit) |
1210 | rb_set_commit_event(cpu_buffer, event); | 1216 | rb_set_commit_event(cpu_buffer, event); |
1211 | else if (!rb_is_commit(cpu_buffer, event)) | 1217 | else if (!rb_is_commit(cpu_buffer, event)) |
1212 | delta = 0; | 1218 | delta = 0; |
1213 | 1219 | ||
1214 | event->time_delta = delta; | 1220 | event->time_delta = delta; |
1215 | 1221 | ||
1216 | return event; | 1222 | return event; |
1217 | } | 1223 | } |
1218 | 1224 | ||
1219 | static DEFINE_PER_CPU(int, rb_need_resched); | 1225 | static DEFINE_PER_CPU(int, rb_need_resched); |
1220 | 1226 | ||
1221 | /** | 1227 | /** |
1222 | * ring_buffer_lock_reserve - reserve a part of the buffer | 1228 | * ring_buffer_lock_reserve - reserve a part of the buffer |
1223 | * @buffer: the ring buffer to reserve from | 1229 | * @buffer: the ring buffer to reserve from |
1224 | * @length: the length of the data to reserve (excluding event header) | 1230 | * @length: the length of the data to reserve (excluding event header) |
1225 | * @flags: a pointer to save the interrupt flags | 1231 | * @flags: a pointer to save the interrupt flags |
1226 | * | 1232 | * |
1227 | * Returns a reseverd event on the ring buffer to copy directly to. | 1233 | * Returns a reseverd event on the ring buffer to copy directly to. |
1228 | * The user of this interface will need to get the body to write into | 1234 | * The user of this interface will need to get the body to write into |
1229 | * and can use the ring_buffer_event_data() interface. | 1235 | * and can use the ring_buffer_event_data() interface. |
1230 | * | 1236 | * |
1231 | * The length is the length of the data needed, not the event length | 1237 | * The length is the length of the data needed, not the event length |
1232 | * which also includes the event header. | 1238 | * which also includes the event header. |
1233 | * | 1239 | * |
1234 | * Must be paired with ring_buffer_unlock_commit, unless NULL is returned. | 1240 | * Must be paired with ring_buffer_unlock_commit, unless NULL is returned. |
1235 | * If NULL is returned, then nothing has been allocated or locked. | 1241 | * If NULL is returned, then nothing has been allocated or locked. |
1236 | */ | 1242 | */ |
1237 | struct ring_buffer_event * | 1243 | struct ring_buffer_event * |
1238 | ring_buffer_lock_reserve(struct ring_buffer *buffer, | 1244 | ring_buffer_lock_reserve(struct ring_buffer *buffer, |
1239 | unsigned long length, | 1245 | unsigned long length, |
1240 | unsigned long *flags) | 1246 | unsigned long *flags) |
1241 | { | 1247 | { |
1242 | struct ring_buffer_per_cpu *cpu_buffer; | 1248 | struct ring_buffer_per_cpu *cpu_buffer; |
1243 | struct ring_buffer_event *event; | 1249 | struct ring_buffer_event *event; |
1244 | int cpu, resched; | 1250 | int cpu, resched; |
1245 | 1251 | ||
1246 | if (ring_buffer_flags != RB_BUFFERS_ON) | 1252 | if (ring_buffer_flags != RB_BUFFERS_ON) |
1247 | return NULL; | 1253 | return NULL; |
1248 | 1254 | ||
1249 | if (atomic_read(&buffer->record_disabled)) | 1255 | if (atomic_read(&buffer->record_disabled)) |
1250 | return NULL; | 1256 | return NULL; |
1251 | 1257 | ||
1252 | /* If we are tracing schedule, we don't want to recurse */ | 1258 | /* If we are tracing schedule, we don't want to recurse */ |
1253 | resched = ftrace_preempt_disable(); | 1259 | resched = ftrace_preempt_disable(); |
1254 | 1260 | ||
1255 | cpu = raw_smp_processor_id(); | 1261 | cpu = raw_smp_processor_id(); |
1256 | 1262 | ||
1257 | if (!cpu_isset(cpu, buffer->cpumask)) | 1263 | if (!cpu_isset(cpu, buffer->cpumask)) |
1258 | goto out; | 1264 | goto out; |
1259 | 1265 | ||
1260 | cpu_buffer = buffer->buffers[cpu]; | 1266 | cpu_buffer = buffer->buffers[cpu]; |
1261 | 1267 | ||
1262 | if (atomic_read(&cpu_buffer->record_disabled)) | 1268 | if (atomic_read(&cpu_buffer->record_disabled)) |
1263 | goto out; | 1269 | goto out; |
1264 | 1270 | ||
1265 | length = rb_calculate_event_length(length); | 1271 | length = rb_calculate_event_length(length); |
1266 | if (length > BUF_PAGE_SIZE) | 1272 | if (length > BUF_PAGE_SIZE) |
1267 | goto out; | 1273 | goto out; |
1268 | 1274 | ||
1269 | event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length); | 1275 | event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length); |
1270 | if (!event) | 1276 | if (!event) |
1271 | goto out; | 1277 | goto out; |
1272 | 1278 | ||
1273 | /* | 1279 | /* |
1274 | * Need to store resched state on this cpu. | 1280 | * Need to store resched state on this cpu. |
1275 | * Only the first needs to. | 1281 | * Only the first needs to. |
1276 | */ | 1282 | */ |
1277 | 1283 | ||
1278 | if (preempt_count() == 1) | 1284 | if (preempt_count() == 1) |
1279 | per_cpu(rb_need_resched, cpu) = resched; | 1285 | per_cpu(rb_need_resched, cpu) = resched; |
1280 | 1286 | ||
1281 | return event; | 1287 | return event; |
1282 | 1288 | ||
1283 | out: | 1289 | out: |
1284 | ftrace_preempt_enable(resched); | 1290 | ftrace_preempt_enable(resched); |
1285 | return NULL; | 1291 | return NULL; |
1286 | } | 1292 | } |
1287 | 1293 | ||
1288 | static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, | 1294 | static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, |
1289 | struct ring_buffer_event *event) | 1295 | struct ring_buffer_event *event) |
1290 | { | 1296 | { |
1291 | cpu_buffer->entries++; | 1297 | cpu_buffer->entries++; |
1292 | 1298 | ||
1293 | /* Only process further if we own the commit */ | 1299 | /* Only process further if we own the commit */ |
1294 | if (!rb_is_commit(cpu_buffer, event)) | 1300 | if (!rb_is_commit(cpu_buffer, event)) |
1295 | return; | 1301 | return; |
1296 | 1302 | ||
1297 | cpu_buffer->write_stamp += event->time_delta; | 1303 | cpu_buffer->write_stamp += event->time_delta; |
1298 | 1304 | ||
1299 | rb_set_commit_to_write(cpu_buffer); | 1305 | rb_set_commit_to_write(cpu_buffer); |
1300 | } | 1306 | } |
1301 | 1307 | ||
1302 | /** | 1308 | /** |
1303 | * ring_buffer_unlock_commit - commit a reserved | 1309 | * ring_buffer_unlock_commit - commit a reserved |
1304 | * @buffer: The buffer to commit to | 1310 | * @buffer: The buffer to commit to |
1305 | * @event: The event pointer to commit. | 1311 | * @event: The event pointer to commit. |
1306 | * @flags: the interrupt flags received from ring_buffer_lock_reserve. | 1312 | * @flags: the interrupt flags received from ring_buffer_lock_reserve. |
1307 | * | 1313 | * |
1308 | * This commits the data to the ring buffer, and releases any locks held. | 1314 | * This commits the data to the ring buffer, and releases any locks held. |
1309 | * | 1315 | * |
1310 | * Must be paired with ring_buffer_lock_reserve. | 1316 | * Must be paired with ring_buffer_lock_reserve. |
1311 | */ | 1317 | */ |
1312 | int ring_buffer_unlock_commit(struct ring_buffer *buffer, | 1318 | int ring_buffer_unlock_commit(struct ring_buffer *buffer, |
1313 | struct ring_buffer_event *event, | 1319 | struct ring_buffer_event *event, |
1314 | unsigned long flags) | 1320 | unsigned long flags) |
1315 | { | 1321 | { |
1316 | struct ring_buffer_per_cpu *cpu_buffer; | 1322 | struct ring_buffer_per_cpu *cpu_buffer; |
1317 | int cpu = raw_smp_processor_id(); | 1323 | int cpu = raw_smp_processor_id(); |
1318 | 1324 | ||
1319 | cpu_buffer = buffer->buffers[cpu]; | 1325 | cpu_buffer = buffer->buffers[cpu]; |
1320 | 1326 | ||
1321 | rb_commit(cpu_buffer, event); | 1327 | rb_commit(cpu_buffer, event); |
1322 | 1328 | ||
1323 | /* | 1329 | /* |
1324 | * Only the last preempt count needs to restore preemption. | 1330 | * Only the last preempt count needs to restore preemption. |
1325 | */ | 1331 | */ |
1326 | if (preempt_count() == 1) | 1332 | if (preempt_count() == 1) |
1327 | ftrace_preempt_enable(per_cpu(rb_need_resched, cpu)); | 1333 | ftrace_preempt_enable(per_cpu(rb_need_resched, cpu)); |
1328 | else | 1334 | else |
1329 | preempt_enable_no_resched_notrace(); | 1335 | preempt_enable_no_resched_notrace(); |
1330 | 1336 | ||
1331 | return 0; | 1337 | return 0; |
1332 | } | 1338 | } |
1333 | 1339 | ||
1334 | /** | 1340 | /** |
1335 | * ring_buffer_write - write data to the buffer without reserving | 1341 | * ring_buffer_write - write data to the buffer without reserving |
1336 | * @buffer: The ring buffer to write to. | 1342 | * @buffer: The ring buffer to write to. |
1337 | * @length: The length of the data being written (excluding the event header) | 1343 | * @length: The length of the data being written (excluding the event header) |
1338 | * @data: The data to write to the buffer. | 1344 | * @data: The data to write to the buffer. |
1339 | * | 1345 | * |
1340 | * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as | 1346 | * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as |
1341 | * one function. If you already have the data to write to the buffer, it | 1347 | * one function. If you already have the data to write to the buffer, it |
1342 | * may be easier to simply call this function. | 1348 | * may be easier to simply call this function. |
1343 | * | 1349 | * |
1344 | * Note, like ring_buffer_lock_reserve, the length is the length of the data | 1350 | * Note, like ring_buffer_lock_reserve, the length is the length of the data |
1345 | * and not the length of the event which would hold the header. | 1351 | * and not the length of the event which would hold the header. |
1346 | */ | 1352 | */ |
1347 | int ring_buffer_write(struct ring_buffer *buffer, | 1353 | int ring_buffer_write(struct ring_buffer *buffer, |
1348 | unsigned long length, | 1354 | unsigned long length, |
1349 | void *data) | 1355 | void *data) |
1350 | { | 1356 | { |
1351 | struct ring_buffer_per_cpu *cpu_buffer; | 1357 | struct ring_buffer_per_cpu *cpu_buffer; |
1352 | struct ring_buffer_event *event; | 1358 | struct ring_buffer_event *event; |
1353 | unsigned long event_length; | 1359 | unsigned long event_length; |
1354 | void *body; | 1360 | void *body; |
1355 | int ret = -EBUSY; | 1361 | int ret = -EBUSY; |
1356 | int cpu, resched; | 1362 | int cpu, resched; |
1357 | 1363 | ||
1358 | if (ring_buffer_flags != RB_BUFFERS_ON) | 1364 | if (ring_buffer_flags != RB_BUFFERS_ON) |
1359 | return -EBUSY; | 1365 | return -EBUSY; |
1360 | 1366 | ||
1361 | if (atomic_read(&buffer->record_disabled)) | 1367 | if (atomic_read(&buffer->record_disabled)) |
1362 | return -EBUSY; | 1368 | return -EBUSY; |
1363 | 1369 | ||
1364 | resched = ftrace_preempt_disable(); | 1370 | resched = ftrace_preempt_disable(); |
1365 | 1371 | ||
1366 | cpu = raw_smp_processor_id(); | 1372 | cpu = raw_smp_processor_id(); |
1367 | 1373 | ||
1368 | if (!cpu_isset(cpu, buffer->cpumask)) | 1374 | if (!cpu_isset(cpu, buffer->cpumask)) |
1369 | goto out; | 1375 | goto out; |
1370 | 1376 | ||
1371 | cpu_buffer = buffer->buffers[cpu]; | 1377 | cpu_buffer = buffer->buffers[cpu]; |
1372 | 1378 | ||
1373 | if (atomic_read(&cpu_buffer->record_disabled)) | 1379 | if (atomic_read(&cpu_buffer->record_disabled)) |
1374 | goto out; | 1380 | goto out; |
1375 | 1381 | ||
1376 | event_length = rb_calculate_event_length(length); | 1382 | event_length = rb_calculate_event_length(length); |
1377 | event = rb_reserve_next_event(cpu_buffer, | 1383 | event = rb_reserve_next_event(cpu_buffer, |
1378 | RINGBUF_TYPE_DATA, event_length); | 1384 | RINGBUF_TYPE_DATA, event_length); |
1379 | if (!event) | 1385 | if (!event) |
1380 | goto out; | 1386 | goto out; |
1381 | 1387 | ||
1382 | body = rb_event_data(event); | 1388 | body = rb_event_data(event); |
1383 | 1389 | ||
1384 | memcpy(body, data, length); | 1390 | memcpy(body, data, length); |
1385 | 1391 | ||
1386 | rb_commit(cpu_buffer, event); | 1392 | rb_commit(cpu_buffer, event); |
1387 | 1393 | ||
1388 | ret = 0; | 1394 | ret = 0; |
1389 | out: | 1395 | out: |
1390 | ftrace_preempt_enable(resched); | 1396 | ftrace_preempt_enable(resched); |
1391 | 1397 | ||
1392 | return ret; | 1398 | return ret; |
1393 | } | 1399 | } |
1394 | 1400 | ||
1395 | static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) | 1401 | static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) |
1396 | { | 1402 | { |
1397 | struct buffer_page *reader = cpu_buffer->reader_page; | 1403 | struct buffer_page *reader = cpu_buffer->reader_page; |
1398 | struct buffer_page *head = cpu_buffer->head_page; | 1404 | struct buffer_page *head = cpu_buffer->head_page; |
1399 | struct buffer_page *commit = cpu_buffer->commit_page; | 1405 | struct buffer_page *commit = cpu_buffer->commit_page; |
1400 | 1406 | ||
1401 | return reader->read == rb_page_commit(reader) && | 1407 | return reader->read == rb_page_commit(reader) && |
1402 | (commit == reader || | 1408 | (commit == reader || |
1403 | (commit == head && | 1409 | (commit == head && |
1404 | head->read == rb_page_commit(commit))); | 1410 | head->read == rb_page_commit(commit))); |
1405 | } | 1411 | } |
1406 | 1412 | ||
1407 | /** | 1413 | /** |
1408 | * ring_buffer_record_disable - stop all writes into the buffer | 1414 | * ring_buffer_record_disable - stop all writes into the buffer |
1409 | * @buffer: The ring buffer to stop writes to. | 1415 | * @buffer: The ring buffer to stop writes to. |
1410 | * | 1416 | * |
1411 | * This prevents all writes to the buffer. Any attempt to write | 1417 | * This prevents all writes to the buffer. Any attempt to write |
1412 | * to the buffer after this will fail and return NULL. | 1418 | * to the buffer after this will fail and return NULL. |
1413 | * | 1419 | * |
1414 | * The caller should call synchronize_sched() after this. | 1420 | * The caller should call synchronize_sched() after this. |
1415 | */ | 1421 | */ |
1416 | void ring_buffer_record_disable(struct ring_buffer *buffer) | 1422 | void ring_buffer_record_disable(struct ring_buffer *buffer) |
1417 | { | 1423 | { |
1418 | atomic_inc(&buffer->record_disabled); | 1424 | atomic_inc(&buffer->record_disabled); |
1419 | } | 1425 | } |
1420 | 1426 | ||
1421 | /** | 1427 | /** |
1422 | * ring_buffer_record_enable - enable writes to the buffer | 1428 | * ring_buffer_record_enable - enable writes to the buffer |
1423 | * @buffer: The ring buffer to enable writes | 1429 | * @buffer: The ring buffer to enable writes |
1424 | * | 1430 | * |
1425 | * Note, multiple disables will need the same number of enables | 1431 | * Note, multiple disables will need the same number of enables |
1426 | * to truely enable the writing (much like preempt_disable). | 1432 | * to truely enable the writing (much like preempt_disable). |
1427 | */ | 1433 | */ |
1428 | void ring_buffer_record_enable(struct ring_buffer *buffer) | 1434 | void ring_buffer_record_enable(struct ring_buffer *buffer) |
1429 | { | 1435 | { |
1430 | atomic_dec(&buffer->record_disabled); | 1436 | atomic_dec(&buffer->record_disabled); |
1431 | } | 1437 | } |
1432 | 1438 | ||
1433 | /** | 1439 | /** |
1434 | * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer | 1440 | * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer |
1435 | * @buffer: The ring buffer to stop writes to. | 1441 | * @buffer: The ring buffer to stop writes to. |
1436 | * @cpu: The CPU buffer to stop | 1442 | * @cpu: The CPU buffer to stop |
1437 | * | 1443 | * |
1438 | * This prevents all writes to the buffer. Any attempt to write | 1444 | * This prevents all writes to the buffer. Any attempt to write |
1439 | * to the buffer after this will fail and return NULL. | 1445 | * to the buffer after this will fail and return NULL. |
1440 | * | 1446 | * |
1441 | * The caller should call synchronize_sched() after this. | 1447 | * The caller should call synchronize_sched() after this. |
1442 | */ | 1448 | */ |
1443 | void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) | 1449 | void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) |
1444 | { | 1450 | { |
1445 | struct ring_buffer_per_cpu *cpu_buffer; | 1451 | struct ring_buffer_per_cpu *cpu_buffer; |
1446 | 1452 | ||
1447 | if (!cpu_isset(cpu, buffer->cpumask)) | 1453 | if (!cpu_isset(cpu, buffer->cpumask)) |
1448 | return; | 1454 | return; |
1449 | 1455 | ||
1450 | cpu_buffer = buffer->buffers[cpu]; | 1456 | cpu_buffer = buffer->buffers[cpu]; |
1451 | atomic_inc(&cpu_buffer->record_disabled); | 1457 | atomic_inc(&cpu_buffer->record_disabled); |
1452 | } | 1458 | } |
1453 | 1459 | ||
1454 | /** | 1460 | /** |
1455 | * ring_buffer_record_enable_cpu - enable writes to the buffer | 1461 | * ring_buffer_record_enable_cpu - enable writes to the buffer |
1456 | * @buffer: The ring buffer to enable writes | 1462 | * @buffer: The ring buffer to enable writes |
1457 | * @cpu: The CPU to enable. | 1463 | * @cpu: The CPU to enable. |
1458 | * | 1464 | * |
1459 | * Note, multiple disables will need the same number of enables | 1465 | * Note, multiple disables will need the same number of enables |
1460 | * to truely enable the writing (much like preempt_disable). | 1466 | * to truely enable the writing (much like preempt_disable). |
1461 | */ | 1467 | */ |
1462 | void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) | 1468 | void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) |
1463 | { | 1469 | { |
1464 | struct ring_buffer_per_cpu *cpu_buffer; | 1470 | struct ring_buffer_per_cpu *cpu_buffer; |
1465 | 1471 | ||
1466 | if (!cpu_isset(cpu, buffer->cpumask)) | 1472 | if (!cpu_isset(cpu, buffer->cpumask)) |
1467 | return; | 1473 | return; |
1468 | 1474 | ||
1469 | cpu_buffer = buffer->buffers[cpu]; | 1475 | cpu_buffer = buffer->buffers[cpu]; |
1470 | atomic_dec(&cpu_buffer->record_disabled); | 1476 | atomic_dec(&cpu_buffer->record_disabled); |
1471 | } | 1477 | } |
1472 | 1478 | ||
1473 | /** | 1479 | /** |
1474 | * ring_buffer_entries_cpu - get the number of entries in a cpu buffer | 1480 | * ring_buffer_entries_cpu - get the number of entries in a cpu buffer |
1475 | * @buffer: The ring buffer | 1481 | * @buffer: The ring buffer |
1476 | * @cpu: The per CPU buffer to get the entries from. | 1482 | * @cpu: The per CPU buffer to get the entries from. |
1477 | */ | 1483 | */ |
1478 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) | 1484 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) |
1479 | { | 1485 | { |
1480 | struct ring_buffer_per_cpu *cpu_buffer; | 1486 | struct ring_buffer_per_cpu *cpu_buffer; |
1481 | 1487 | ||
1482 | if (!cpu_isset(cpu, buffer->cpumask)) | 1488 | if (!cpu_isset(cpu, buffer->cpumask)) |
1483 | return 0; | 1489 | return 0; |
1484 | 1490 | ||
1485 | cpu_buffer = buffer->buffers[cpu]; | 1491 | cpu_buffer = buffer->buffers[cpu]; |
1486 | return cpu_buffer->entries; | 1492 | return cpu_buffer->entries; |
1487 | } | 1493 | } |
1488 | 1494 | ||
1489 | /** | 1495 | /** |
1490 | * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer | 1496 | * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer |
1491 | * @buffer: The ring buffer | 1497 | * @buffer: The ring buffer |
1492 | * @cpu: The per CPU buffer to get the number of overruns from | 1498 | * @cpu: The per CPU buffer to get the number of overruns from |
1493 | */ | 1499 | */ |
1494 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) | 1500 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) |
1495 | { | 1501 | { |
1496 | struct ring_buffer_per_cpu *cpu_buffer; | 1502 | struct ring_buffer_per_cpu *cpu_buffer; |
1497 | 1503 | ||
1498 | if (!cpu_isset(cpu, buffer->cpumask)) | 1504 | if (!cpu_isset(cpu, buffer->cpumask)) |
1499 | return 0; | 1505 | return 0; |
1500 | 1506 | ||
1501 | cpu_buffer = buffer->buffers[cpu]; | 1507 | cpu_buffer = buffer->buffers[cpu]; |
1502 | return cpu_buffer->overrun; | 1508 | return cpu_buffer->overrun; |
1503 | } | 1509 | } |
1504 | 1510 | ||
1505 | /** | 1511 | /** |
1506 | * ring_buffer_entries - get the number of entries in a buffer | 1512 | * ring_buffer_entries - get the number of entries in a buffer |
1507 | * @buffer: The ring buffer | 1513 | * @buffer: The ring buffer |
1508 | * | 1514 | * |
1509 | * Returns the total number of entries in the ring buffer | 1515 | * Returns the total number of entries in the ring buffer |
1510 | * (all CPU entries) | 1516 | * (all CPU entries) |
1511 | */ | 1517 | */ |
1512 | unsigned long ring_buffer_entries(struct ring_buffer *buffer) | 1518 | unsigned long ring_buffer_entries(struct ring_buffer *buffer) |
1513 | { | 1519 | { |
1514 | struct ring_buffer_per_cpu *cpu_buffer; | 1520 | struct ring_buffer_per_cpu *cpu_buffer; |
1515 | unsigned long entries = 0; | 1521 | unsigned long entries = 0; |
1516 | int cpu; | 1522 | int cpu; |
1517 | 1523 | ||
1518 | /* if you care about this being correct, lock the buffer */ | 1524 | /* if you care about this being correct, lock the buffer */ |
1519 | for_each_buffer_cpu(buffer, cpu) { | 1525 | for_each_buffer_cpu(buffer, cpu) { |
1520 | cpu_buffer = buffer->buffers[cpu]; | 1526 | cpu_buffer = buffer->buffers[cpu]; |
1521 | entries += cpu_buffer->entries; | 1527 | entries += cpu_buffer->entries; |
1522 | } | 1528 | } |
1523 | 1529 | ||
1524 | return entries; | 1530 | return entries; |
1525 | } | 1531 | } |
1526 | 1532 | ||
1527 | /** | 1533 | /** |
1528 | * ring_buffer_overrun_cpu - get the number of overruns in buffer | 1534 | * ring_buffer_overrun_cpu - get the number of overruns in buffer |
1529 | * @buffer: The ring buffer | 1535 | * @buffer: The ring buffer |
1530 | * | 1536 | * |
1531 | * Returns the total number of overruns in the ring buffer | 1537 | * Returns the total number of overruns in the ring buffer |
1532 | * (all CPU entries) | 1538 | * (all CPU entries) |
1533 | */ | 1539 | */ |
1534 | unsigned long ring_buffer_overruns(struct ring_buffer *buffer) | 1540 | unsigned long ring_buffer_overruns(struct ring_buffer *buffer) |
1535 | { | 1541 | { |
1536 | struct ring_buffer_per_cpu *cpu_buffer; | 1542 | struct ring_buffer_per_cpu *cpu_buffer; |
1537 | unsigned long overruns = 0; | 1543 | unsigned long overruns = 0; |
1538 | int cpu; | 1544 | int cpu; |
1539 | 1545 | ||
1540 | /* if you care about this being correct, lock the buffer */ | 1546 | /* if you care about this being correct, lock the buffer */ |
1541 | for_each_buffer_cpu(buffer, cpu) { | 1547 | for_each_buffer_cpu(buffer, cpu) { |
1542 | cpu_buffer = buffer->buffers[cpu]; | 1548 | cpu_buffer = buffer->buffers[cpu]; |
1543 | overruns += cpu_buffer->overrun; | 1549 | overruns += cpu_buffer->overrun; |
1544 | } | 1550 | } |
1545 | 1551 | ||
1546 | return overruns; | 1552 | return overruns; |
1547 | } | 1553 | } |
1548 | 1554 | ||
1549 | static void rb_iter_reset(struct ring_buffer_iter *iter) | 1555 | static void rb_iter_reset(struct ring_buffer_iter *iter) |
1550 | { | 1556 | { |
1551 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 1557 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
1552 | 1558 | ||
1553 | /* Iterator usage is expected to have record disabled */ | 1559 | /* Iterator usage is expected to have record disabled */ |
1554 | if (list_empty(&cpu_buffer->reader_page->list)) { | 1560 | if (list_empty(&cpu_buffer->reader_page->list)) { |
1555 | iter->head_page = cpu_buffer->head_page; | 1561 | iter->head_page = cpu_buffer->head_page; |
1556 | iter->head = cpu_buffer->head_page->read; | 1562 | iter->head = cpu_buffer->head_page->read; |
1557 | } else { | 1563 | } else { |
1558 | iter->head_page = cpu_buffer->reader_page; | 1564 | iter->head_page = cpu_buffer->reader_page; |
1559 | iter->head = cpu_buffer->reader_page->read; | 1565 | iter->head = cpu_buffer->reader_page->read; |
1560 | } | 1566 | } |
1561 | if (iter->head) | 1567 | if (iter->head) |
1562 | iter->read_stamp = cpu_buffer->read_stamp; | 1568 | iter->read_stamp = cpu_buffer->read_stamp; |
1563 | else | 1569 | else |
1564 | iter->read_stamp = iter->head_page->page->time_stamp; | 1570 | iter->read_stamp = iter->head_page->page->time_stamp; |
1565 | } | 1571 | } |
1566 | 1572 | ||
1567 | /** | 1573 | /** |
1568 | * ring_buffer_iter_reset - reset an iterator | 1574 | * ring_buffer_iter_reset - reset an iterator |
1569 | * @iter: The iterator to reset | 1575 | * @iter: The iterator to reset |
1570 | * | 1576 | * |
1571 | * Resets the iterator, so that it will start from the beginning | 1577 | * Resets the iterator, so that it will start from the beginning |
1572 | * again. | 1578 | * again. |
1573 | */ | 1579 | */ |
1574 | void ring_buffer_iter_reset(struct ring_buffer_iter *iter) | 1580 | void ring_buffer_iter_reset(struct ring_buffer_iter *iter) |
1575 | { | 1581 | { |
1576 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 1582 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
1577 | unsigned long flags; | 1583 | unsigned long flags; |
1578 | 1584 | ||
1579 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 1585 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
1580 | rb_iter_reset(iter); | 1586 | rb_iter_reset(iter); |
1581 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 1587 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
1582 | } | 1588 | } |
1583 | 1589 | ||
1584 | /** | 1590 | /** |
1585 | * ring_buffer_iter_empty - check if an iterator has no more to read | 1591 | * ring_buffer_iter_empty - check if an iterator has no more to read |
1586 | * @iter: The iterator to check | 1592 | * @iter: The iterator to check |
1587 | */ | 1593 | */ |
1588 | int ring_buffer_iter_empty(struct ring_buffer_iter *iter) | 1594 | int ring_buffer_iter_empty(struct ring_buffer_iter *iter) |
1589 | { | 1595 | { |
1590 | struct ring_buffer_per_cpu *cpu_buffer; | 1596 | struct ring_buffer_per_cpu *cpu_buffer; |
1591 | 1597 | ||
1592 | cpu_buffer = iter->cpu_buffer; | 1598 | cpu_buffer = iter->cpu_buffer; |
1593 | 1599 | ||
1594 | return iter->head_page == cpu_buffer->commit_page && | 1600 | return iter->head_page == cpu_buffer->commit_page && |
1595 | iter->head == rb_commit_index(cpu_buffer); | 1601 | iter->head == rb_commit_index(cpu_buffer); |
1596 | } | 1602 | } |
1597 | 1603 | ||
1598 | static void | 1604 | static void |
1599 | rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, | 1605 | rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, |
1600 | struct ring_buffer_event *event) | 1606 | struct ring_buffer_event *event) |
1601 | { | 1607 | { |
1602 | u64 delta; | 1608 | u64 delta; |
1603 | 1609 | ||
1604 | switch (event->type) { | 1610 | switch (event->type) { |
1605 | case RINGBUF_TYPE_PADDING: | 1611 | case RINGBUF_TYPE_PADDING: |
1606 | return; | 1612 | return; |
1607 | 1613 | ||
1608 | case RINGBUF_TYPE_TIME_EXTEND: | 1614 | case RINGBUF_TYPE_TIME_EXTEND: |
1609 | delta = event->array[0]; | 1615 | delta = event->array[0]; |
1610 | delta <<= TS_SHIFT; | 1616 | delta <<= TS_SHIFT; |
1611 | delta += event->time_delta; | 1617 | delta += event->time_delta; |
1612 | cpu_buffer->read_stamp += delta; | 1618 | cpu_buffer->read_stamp += delta; |
1613 | return; | 1619 | return; |
1614 | 1620 | ||
1615 | case RINGBUF_TYPE_TIME_STAMP: | 1621 | case RINGBUF_TYPE_TIME_STAMP: |
1616 | /* FIXME: not implemented */ | 1622 | /* FIXME: not implemented */ |
1617 | return; | 1623 | return; |
1618 | 1624 | ||
1619 | case RINGBUF_TYPE_DATA: | 1625 | case RINGBUF_TYPE_DATA: |
1620 | cpu_buffer->read_stamp += event->time_delta; | 1626 | cpu_buffer->read_stamp += event->time_delta; |
1621 | return; | 1627 | return; |
1622 | 1628 | ||
1623 | default: | 1629 | default: |
1624 | BUG(); | 1630 | BUG(); |
1625 | } | 1631 | } |
1626 | return; | 1632 | return; |
1627 | } | 1633 | } |
1628 | 1634 | ||
1629 | static void | 1635 | static void |
1630 | rb_update_iter_read_stamp(struct ring_buffer_iter *iter, | 1636 | rb_update_iter_read_stamp(struct ring_buffer_iter *iter, |
1631 | struct ring_buffer_event *event) | 1637 | struct ring_buffer_event *event) |
1632 | { | 1638 | { |
1633 | u64 delta; | 1639 | u64 delta; |
1634 | 1640 | ||
1635 | switch (event->type) { | 1641 | switch (event->type) { |
1636 | case RINGBUF_TYPE_PADDING: | 1642 | case RINGBUF_TYPE_PADDING: |
1637 | return; | 1643 | return; |
1638 | 1644 | ||
1639 | case RINGBUF_TYPE_TIME_EXTEND: | 1645 | case RINGBUF_TYPE_TIME_EXTEND: |
1640 | delta = event->array[0]; | 1646 | delta = event->array[0]; |
1641 | delta <<= TS_SHIFT; | 1647 | delta <<= TS_SHIFT; |
1642 | delta += event->time_delta; | 1648 | delta += event->time_delta; |
1643 | iter->read_stamp += delta; | 1649 | iter->read_stamp += delta; |
1644 | return; | 1650 | return; |
1645 | 1651 | ||
1646 | case RINGBUF_TYPE_TIME_STAMP: | 1652 | case RINGBUF_TYPE_TIME_STAMP: |
1647 | /* FIXME: not implemented */ | 1653 | /* FIXME: not implemented */ |
1648 | return; | 1654 | return; |
1649 | 1655 | ||
1650 | case RINGBUF_TYPE_DATA: | 1656 | case RINGBUF_TYPE_DATA: |
1651 | iter->read_stamp += event->time_delta; | 1657 | iter->read_stamp += event->time_delta; |
1652 | return; | 1658 | return; |
1653 | 1659 | ||
1654 | default: | 1660 | default: |
1655 | BUG(); | 1661 | BUG(); |
1656 | } | 1662 | } |
1657 | return; | 1663 | return; |
1658 | } | 1664 | } |
1659 | 1665 | ||
1660 | static struct buffer_page * | 1666 | static struct buffer_page * |
1661 | rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | 1667 | rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) |
1662 | { | 1668 | { |
1663 | struct buffer_page *reader = NULL; | 1669 | struct buffer_page *reader = NULL; |
1664 | unsigned long flags; | 1670 | unsigned long flags; |
1665 | int nr_loops = 0; | 1671 | int nr_loops = 0; |
1666 | 1672 | ||
1667 | local_irq_save(flags); | 1673 | local_irq_save(flags); |
1668 | __raw_spin_lock(&cpu_buffer->lock); | 1674 | __raw_spin_lock(&cpu_buffer->lock); |
1669 | 1675 | ||
1670 | again: | 1676 | again: |
1671 | /* | 1677 | /* |
1672 | * This should normally only loop twice. But because the | 1678 | * This should normally only loop twice. But because the |
1673 | * start of the reader inserts an empty page, it causes | 1679 | * start of the reader inserts an empty page, it causes |
1674 | * a case where we will loop three times. There should be no | 1680 | * a case where we will loop three times. There should be no |
1675 | * reason to loop four times (that I know of). | 1681 | * reason to loop four times (that I know of). |
1676 | */ | 1682 | */ |
1677 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { | 1683 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { |
1678 | reader = NULL; | 1684 | reader = NULL; |
1679 | goto out; | 1685 | goto out; |
1680 | } | 1686 | } |
1681 | 1687 | ||
1682 | reader = cpu_buffer->reader_page; | 1688 | reader = cpu_buffer->reader_page; |
1683 | 1689 | ||
1684 | /* If there's more to read, return this page */ | 1690 | /* If there's more to read, return this page */ |
1685 | if (cpu_buffer->reader_page->read < rb_page_size(reader)) | 1691 | if (cpu_buffer->reader_page->read < rb_page_size(reader)) |
1686 | goto out; | 1692 | goto out; |
1687 | 1693 | ||
1688 | /* Never should we have an index greater than the size */ | 1694 | /* Never should we have an index greater than the size */ |
1689 | if (RB_WARN_ON(cpu_buffer, | 1695 | if (RB_WARN_ON(cpu_buffer, |
1690 | cpu_buffer->reader_page->read > rb_page_size(reader))) | 1696 | cpu_buffer->reader_page->read > rb_page_size(reader))) |
1691 | goto out; | 1697 | goto out; |
1692 | 1698 | ||
1693 | /* check if we caught up to the tail */ | 1699 | /* check if we caught up to the tail */ |
1694 | reader = NULL; | 1700 | reader = NULL; |
1695 | if (cpu_buffer->commit_page == cpu_buffer->reader_page) | 1701 | if (cpu_buffer->commit_page == cpu_buffer->reader_page) |
1696 | goto out; | 1702 | goto out; |
1697 | 1703 | ||
1698 | /* | 1704 | /* |
1699 | * Splice the empty reader page into the list around the head. | 1705 | * Splice the empty reader page into the list around the head. |
1700 | * Reset the reader page to size zero. | 1706 | * Reset the reader page to size zero. |
1701 | */ | 1707 | */ |
1702 | 1708 | ||
1703 | reader = cpu_buffer->head_page; | 1709 | reader = cpu_buffer->head_page; |
1704 | cpu_buffer->reader_page->list.next = reader->list.next; | 1710 | cpu_buffer->reader_page->list.next = reader->list.next; |
1705 | cpu_buffer->reader_page->list.prev = reader->list.prev; | 1711 | cpu_buffer->reader_page->list.prev = reader->list.prev; |
1706 | 1712 | ||
1707 | local_set(&cpu_buffer->reader_page->write, 0); | 1713 | local_set(&cpu_buffer->reader_page->write, 0); |
1708 | local_set(&cpu_buffer->reader_page->page->commit, 0); | 1714 | local_set(&cpu_buffer->reader_page->page->commit, 0); |
1709 | 1715 | ||
1710 | /* Make the reader page now replace the head */ | 1716 | /* Make the reader page now replace the head */ |
1711 | reader->list.prev->next = &cpu_buffer->reader_page->list; | 1717 | reader->list.prev->next = &cpu_buffer->reader_page->list; |
1712 | reader->list.next->prev = &cpu_buffer->reader_page->list; | 1718 | reader->list.next->prev = &cpu_buffer->reader_page->list; |
1713 | 1719 | ||
1714 | /* | 1720 | /* |
1715 | * If the tail is on the reader, then we must set the head | 1721 | * If the tail is on the reader, then we must set the head |
1716 | * to the inserted page, otherwise we set it one before. | 1722 | * to the inserted page, otherwise we set it one before. |
1717 | */ | 1723 | */ |
1718 | cpu_buffer->head_page = cpu_buffer->reader_page; | 1724 | cpu_buffer->head_page = cpu_buffer->reader_page; |
1719 | 1725 | ||
1720 | if (cpu_buffer->commit_page != reader) | 1726 | if (cpu_buffer->commit_page != reader) |
1721 | rb_inc_page(cpu_buffer, &cpu_buffer->head_page); | 1727 | rb_inc_page(cpu_buffer, &cpu_buffer->head_page); |
1722 | 1728 | ||
1723 | /* Finally update the reader page to the new head */ | 1729 | /* Finally update the reader page to the new head */ |
1724 | cpu_buffer->reader_page = reader; | 1730 | cpu_buffer->reader_page = reader; |
1725 | rb_reset_reader_page(cpu_buffer); | 1731 | rb_reset_reader_page(cpu_buffer); |
1726 | 1732 | ||
1727 | goto again; | 1733 | goto again; |
1728 | 1734 | ||
1729 | out: | 1735 | out: |
1730 | __raw_spin_unlock(&cpu_buffer->lock); | 1736 | __raw_spin_unlock(&cpu_buffer->lock); |
1731 | local_irq_restore(flags); | 1737 | local_irq_restore(flags); |
1732 | 1738 | ||
1733 | return reader; | 1739 | return reader; |
1734 | } | 1740 | } |
1735 | 1741 | ||
1736 | static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) | 1742 | static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) |
1737 | { | 1743 | { |
1738 | struct ring_buffer_event *event; | 1744 | struct ring_buffer_event *event; |
1739 | struct buffer_page *reader; | 1745 | struct buffer_page *reader; |
1740 | unsigned length; | 1746 | unsigned length; |
1741 | 1747 | ||
1742 | reader = rb_get_reader_page(cpu_buffer); | 1748 | reader = rb_get_reader_page(cpu_buffer); |
1743 | 1749 | ||
1744 | /* This function should not be called when buffer is empty */ | 1750 | /* This function should not be called when buffer is empty */ |
1745 | if (RB_WARN_ON(cpu_buffer, !reader)) | 1751 | if (RB_WARN_ON(cpu_buffer, !reader)) |
1746 | return; | 1752 | return; |
1747 | 1753 | ||
1748 | event = rb_reader_event(cpu_buffer); | 1754 | event = rb_reader_event(cpu_buffer); |
1749 | 1755 | ||
1750 | if (event->type == RINGBUF_TYPE_DATA) | 1756 | if (event->type == RINGBUF_TYPE_DATA) |
1751 | cpu_buffer->entries--; | 1757 | cpu_buffer->entries--; |
1752 | 1758 | ||
1753 | rb_update_read_stamp(cpu_buffer, event); | 1759 | rb_update_read_stamp(cpu_buffer, event); |
1754 | 1760 | ||
1755 | length = rb_event_length(event); | 1761 | length = rb_event_length(event); |
1756 | cpu_buffer->reader_page->read += length; | 1762 | cpu_buffer->reader_page->read += length; |
1757 | } | 1763 | } |
1758 | 1764 | ||
1759 | static void rb_advance_iter(struct ring_buffer_iter *iter) | 1765 | static void rb_advance_iter(struct ring_buffer_iter *iter) |
1760 | { | 1766 | { |
1761 | struct ring_buffer *buffer; | 1767 | struct ring_buffer *buffer; |
1762 | struct ring_buffer_per_cpu *cpu_buffer; | 1768 | struct ring_buffer_per_cpu *cpu_buffer; |
1763 | struct ring_buffer_event *event; | 1769 | struct ring_buffer_event *event; |
1764 | unsigned length; | 1770 | unsigned length; |
1765 | 1771 | ||
1766 | cpu_buffer = iter->cpu_buffer; | 1772 | cpu_buffer = iter->cpu_buffer; |
1767 | buffer = cpu_buffer->buffer; | 1773 | buffer = cpu_buffer->buffer; |
1768 | 1774 | ||
1769 | /* | 1775 | /* |
1770 | * Check if we are at the end of the buffer. | 1776 | * Check if we are at the end of the buffer. |
1771 | */ | 1777 | */ |
1772 | if (iter->head >= rb_page_size(iter->head_page)) { | 1778 | if (iter->head >= rb_page_size(iter->head_page)) { |
1773 | if (RB_WARN_ON(buffer, | 1779 | if (RB_WARN_ON(buffer, |
1774 | iter->head_page == cpu_buffer->commit_page)) | 1780 | iter->head_page == cpu_buffer->commit_page)) |
1775 | return; | 1781 | return; |
1776 | rb_inc_iter(iter); | 1782 | rb_inc_iter(iter); |
1777 | return; | 1783 | return; |
1778 | } | 1784 | } |
1779 | 1785 | ||
1780 | event = rb_iter_head_event(iter); | 1786 | event = rb_iter_head_event(iter); |
1781 | 1787 | ||
1782 | length = rb_event_length(event); | 1788 | length = rb_event_length(event); |
1783 | 1789 | ||
1784 | /* | 1790 | /* |
1785 | * This should not be called to advance the header if we are | 1791 | * This should not be called to advance the header if we are |
1786 | * at the tail of the buffer. | 1792 | * at the tail of the buffer. |
1787 | */ | 1793 | */ |
1788 | if (RB_WARN_ON(cpu_buffer, | 1794 | if (RB_WARN_ON(cpu_buffer, |
1789 | (iter->head_page == cpu_buffer->commit_page) && | 1795 | (iter->head_page == cpu_buffer->commit_page) && |
1790 | (iter->head + length > rb_commit_index(cpu_buffer)))) | 1796 | (iter->head + length > rb_commit_index(cpu_buffer)))) |
1791 | return; | 1797 | return; |
1792 | 1798 | ||
1793 | rb_update_iter_read_stamp(iter, event); | 1799 | rb_update_iter_read_stamp(iter, event); |
1794 | 1800 | ||
1795 | iter->head += length; | 1801 | iter->head += length; |
1796 | 1802 | ||
1797 | /* check for end of page padding */ | 1803 | /* check for end of page padding */ |
1798 | if ((iter->head >= rb_page_size(iter->head_page)) && | 1804 | if ((iter->head >= rb_page_size(iter->head_page)) && |
1799 | (iter->head_page != cpu_buffer->commit_page)) | 1805 | (iter->head_page != cpu_buffer->commit_page)) |
1800 | rb_advance_iter(iter); | 1806 | rb_advance_iter(iter); |
1801 | } | 1807 | } |
1802 | 1808 | ||
1803 | static struct ring_buffer_event * | 1809 | static struct ring_buffer_event * |
1804 | rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | 1810 | rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) |
1805 | { | 1811 | { |
1806 | struct ring_buffer_per_cpu *cpu_buffer; | 1812 | struct ring_buffer_per_cpu *cpu_buffer; |
1807 | struct ring_buffer_event *event; | 1813 | struct ring_buffer_event *event; |
1808 | struct buffer_page *reader; | 1814 | struct buffer_page *reader; |
1809 | int nr_loops = 0; | 1815 | int nr_loops = 0; |
1810 | 1816 | ||
1811 | if (!cpu_isset(cpu, buffer->cpumask)) | 1817 | if (!cpu_isset(cpu, buffer->cpumask)) |
1812 | return NULL; | 1818 | return NULL; |
1813 | 1819 | ||
1814 | cpu_buffer = buffer->buffers[cpu]; | 1820 | cpu_buffer = buffer->buffers[cpu]; |
1815 | 1821 | ||
1816 | again: | 1822 | again: |
1817 | /* | 1823 | /* |
1818 | * We repeat when a timestamp is encountered. It is possible | 1824 | * We repeat when a timestamp is encountered. It is possible |
1819 | * to get multiple timestamps from an interrupt entering just | 1825 | * to get multiple timestamps from an interrupt entering just |
1820 | * as one timestamp is about to be written. The max times | 1826 | * as one timestamp is about to be written. The max times |
1821 | * that this can happen is the number of nested interrupts we | 1827 | * that this can happen is the number of nested interrupts we |
1822 | * can have. Nesting 10 deep of interrupts is clearly | 1828 | * can have. Nesting 10 deep of interrupts is clearly |
1823 | * an anomaly. | 1829 | * an anomaly. |
1824 | */ | 1830 | */ |
1825 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10)) | 1831 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10)) |
1826 | return NULL; | 1832 | return NULL; |
1827 | 1833 | ||
1828 | reader = rb_get_reader_page(cpu_buffer); | 1834 | reader = rb_get_reader_page(cpu_buffer); |
1829 | if (!reader) | 1835 | if (!reader) |
1830 | return NULL; | 1836 | return NULL; |
1831 | 1837 | ||
1832 | event = rb_reader_event(cpu_buffer); | 1838 | event = rb_reader_event(cpu_buffer); |
1833 | 1839 | ||
1834 | switch (event->type) { | 1840 | switch (event->type) { |
1835 | case RINGBUF_TYPE_PADDING: | 1841 | case RINGBUF_TYPE_PADDING: |
1836 | RB_WARN_ON(cpu_buffer, 1); | 1842 | RB_WARN_ON(cpu_buffer, 1); |
1837 | rb_advance_reader(cpu_buffer); | 1843 | rb_advance_reader(cpu_buffer); |
1838 | return NULL; | 1844 | return NULL; |
1839 | 1845 | ||
1840 | case RINGBUF_TYPE_TIME_EXTEND: | 1846 | case RINGBUF_TYPE_TIME_EXTEND: |
1841 | /* Internal data, OK to advance */ | 1847 | /* Internal data, OK to advance */ |
1842 | rb_advance_reader(cpu_buffer); | 1848 | rb_advance_reader(cpu_buffer); |
1843 | goto again; | 1849 | goto again; |
1844 | 1850 | ||
1845 | case RINGBUF_TYPE_TIME_STAMP: | 1851 | case RINGBUF_TYPE_TIME_STAMP: |
1846 | /* FIXME: not implemented */ | 1852 | /* FIXME: not implemented */ |
1847 | rb_advance_reader(cpu_buffer); | 1853 | rb_advance_reader(cpu_buffer); |
1848 | goto again; | 1854 | goto again; |
1849 | 1855 | ||
1850 | case RINGBUF_TYPE_DATA: | 1856 | case RINGBUF_TYPE_DATA: |
1851 | if (ts) { | 1857 | if (ts) { |
1852 | *ts = cpu_buffer->read_stamp + event->time_delta; | 1858 | *ts = cpu_buffer->read_stamp + event->time_delta; |
1853 | ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts); | 1859 | ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts); |
1854 | } | 1860 | } |
1855 | return event; | 1861 | return event; |
1856 | 1862 | ||
1857 | default: | 1863 | default: |
1858 | BUG(); | 1864 | BUG(); |
1859 | } | 1865 | } |
1860 | 1866 | ||
1861 | return NULL; | 1867 | return NULL; |
1862 | } | 1868 | } |
1863 | 1869 | ||
1864 | static struct ring_buffer_event * | 1870 | static struct ring_buffer_event * |
1865 | rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | 1871 | rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) |
1866 | { | 1872 | { |
1867 | struct ring_buffer *buffer; | 1873 | struct ring_buffer *buffer; |
1868 | struct ring_buffer_per_cpu *cpu_buffer; | 1874 | struct ring_buffer_per_cpu *cpu_buffer; |
1869 | struct ring_buffer_event *event; | 1875 | struct ring_buffer_event *event; |
1870 | int nr_loops = 0; | 1876 | int nr_loops = 0; |
1871 | 1877 | ||
1872 | if (ring_buffer_iter_empty(iter)) | 1878 | if (ring_buffer_iter_empty(iter)) |
1873 | return NULL; | 1879 | return NULL; |
1874 | 1880 | ||
1875 | cpu_buffer = iter->cpu_buffer; | 1881 | cpu_buffer = iter->cpu_buffer; |
1876 | buffer = cpu_buffer->buffer; | 1882 | buffer = cpu_buffer->buffer; |
1877 | 1883 | ||
1878 | again: | 1884 | again: |
1879 | /* | 1885 | /* |
1880 | * We repeat when a timestamp is encountered. It is possible | 1886 | * We repeat when a timestamp is encountered. It is possible |
1881 | * to get multiple timestamps from an interrupt entering just | 1887 | * to get multiple timestamps from an interrupt entering just |
1882 | * as one timestamp is about to be written. The max times | 1888 | * as one timestamp is about to be written. The max times |
1883 | * that this can happen is the number of nested interrupts we | 1889 | * that this can happen is the number of nested interrupts we |
1884 | * can have. Nesting 10 deep of interrupts is clearly | 1890 | * can have. Nesting 10 deep of interrupts is clearly |
1885 | * an anomaly. | 1891 | * an anomaly. |
1886 | */ | 1892 | */ |
1887 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10)) | 1893 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10)) |
1888 | return NULL; | 1894 | return NULL; |
1889 | 1895 | ||
1890 | if (rb_per_cpu_empty(cpu_buffer)) | 1896 | if (rb_per_cpu_empty(cpu_buffer)) |
1891 | return NULL; | 1897 | return NULL; |
1892 | 1898 | ||
1893 | event = rb_iter_head_event(iter); | 1899 | event = rb_iter_head_event(iter); |
1894 | 1900 | ||
1895 | switch (event->type) { | 1901 | switch (event->type) { |
1896 | case RINGBUF_TYPE_PADDING: | 1902 | case RINGBUF_TYPE_PADDING: |
1897 | rb_inc_iter(iter); | 1903 | rb_inc_iter(iter); |
1898 | goto again; | 1904 | goto again; |
1899 | 1905 | ||
1900 | case RINGBUF_TYPE_TIME_EXTEND: | 1906 | case RINGBUF_TYPE_TIME_EXTEND: |
1901 | /* Internal data, OK to advance */ | 1907 | /* Internal data, OK to advance */ |
1902 | rb_advance_iter(iter); | 1908 | rb_advance_iter(iter); |
1903 | goto again; | 1909 | goto again; |
1904 | 1910 | ||
1905 | case RINGBUF_TYPE_TIME_STAMP: | 1911 | case RINGBUF_TYPE_TIME_STAMP: |
1906 | /* FIXME: not implemented */ | 1912 | /* FIXME: not implemented */ |
1907 | rb_advance_iter(iter); | 1913 | rb_advance_iter(iter); |
1908 | goto again; | 1914 | goto again; |
1909 | 1915 | ||
1910 | case RINGBUF_TYPE_DATA: | 1916 | case RINGBUF_TYPE_DATA: |
1911 | if (ts) { | 1917 | if (ts) { |
1912 | *ts = iter->read_stamp + event->time_delta; | 1918 | *ts = iter->read_stamp + event->time_delta; |
1913 | ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts); | 1919 | ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts); |
1914 | } | 1920 | } |
1915 | return event; | 1921 | return event; |
1916 | 1922 | ||
1917 | default: | 1923 | default: |
1918 | BUG(); | 1924 | BUG(); |
1919 | } | 1925 | } |
1920 | 1926 | ||
1921 | return NULL; | 1927 | return NULL; |
1922 | } | 1928 | } |
1923 | 1929 | ||
1924 | /** | 1930 | /** |
1925 | * ring_buffer_peek - peek at the next event to be read | 1931 | * ring_buffer_peek - peek at the next event to be read |
1926 | * @buffer: The ring buffer to read | 1932 | * @buffer: The ring buffer to read |
1927 | * @cpu: The cpu to peak at | 1933 | * @cpu: The cpu to peak at |
1928 | * @ts: The timestamp counter of this event. | 1934 | * @ts: The timestamp counter of this event. |
1929 | * | 1935 | * |
1930 | * This will return the event that will be read next, but does | 1936 | * This will return the event that will be read next, but does |
1931 | * not consume the data. | 1937 | * not consume the data. |
1932 | */ | 1938 | */ |
1933 | struct ring_buffer_event * | 1939 | struct ring_buffer_event * |
1934 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | 1940 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) |
1935 | { | 1941 | { |
1936 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 1942 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
1937 | struct ring_buffer_event *event; | 1943 | struct ring_buffer_event *event; |
1938 | unsigned long flags; | 1944 | unsigned long flags; |
1939 | 1945 | ||
1940 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 1946 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
1941 | event = rb_buffer_peek(buffer, cpu, ts); | 1947 | event = rb_buffer_peek(buffer, cpu, ts); |
1942 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 1948 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
1943 | 1949 | ||
1944 | return event; | 1950 | return event; |
1945 | } | 1951 | } |
1946 | 1952 | ||
1947 | /** | 1953 | /** |
1948 | * ring_buffer_iter_peek - peek at the next event to be read | 1954 | * ring_buffer_iter_peek - peek at the next event to be read |
1949 | * @iter: The ring buffer iterator | 1955 | * @iter: The ring buffer iterator |
1950 | * @ts: The timestamp counter of this event. | 1956 | * @ts: The timestamp counter of this event. |
1951 | * | 1957 | * |
1952 | * This will return the event that will be read next, but does | 1958 | * This will return the event that will be read next, but does |
1953 | * not increment the iterator. | 1959 | * not increment the iterator. |
1954 | */ | 1960 | */ |
1955 | struct ring_buffer_event * | 1961 | struct ring_buffer_event * |
1956 | ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | 1962 | ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) |
1957 | { | 1963 | { |
1958 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 1964 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
1959 | struct ring_buffer_event *event; | 1965 | struct ring_buffer_event *event; |
1960 | unsigned long flags; | 1966 | unsigned long flags; |
1961 | 1967 | ||
1962 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 1968 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
1963 | event = rb_iter_peek(iter, ts); | 1969 | event = rb_iter_peek(iter, ts); |
1964 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 1970 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
1965 | 1971 | ||
1966 | return event; | 1972 | return event; |
1967 | } | 1973 | } |
1968 | 1974 | ||
1969 | /** | 1975 | /** |
1970 | * ring_buffer_consume - return an event and consume it | 1976 | * ring_buffer_consume - return an event and consume it |
1971 | * @buffer: The ring buffer to get the next event from | 1977 | * @buffer: The ring buffer to get the next event from |
1972 | * | 1978 | * |
1973 | * Returns the next event in the ring buffer, and that event is consumed. | 1979 | * Returns the next event in the ring buffer, and that event is consumed. |
1974 | * Meaning, that sequential reads will keep returning a different event, | 1980 | * Meaning, that sequential reads will keep returning a different event, |
1975 | * and eventually empty the ring buffer if the producer is slower. | 1981 | * and eventually empty the ring buffer if the producer is slower. |
1976 | */ | 1982 | */ |
1977 | struct ring_buffer_event * | 1983 | struct ring_buffer_event * |
1978 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | 1984 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) |
1979 | { | 1985 | { |
1980 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 1986 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
1981 | struct ring_buffer_event *event; | 1987 | struct ring_buffer_event *event; |
1982 | unsigned long flags; | 1988 | unsigned long flags; |
1983 | 1989 | ||
1984 | if (!cpu_isset(cpu, buffer->cpumask)) | 1990 | if (!cpu_isset(cpu, buffer->cpumask)) |
1985 | return NULL; | 1991 | return NULL; |
1986 | 1992 | ||
1987 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 1993 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
1988 | 1994 | ||
1989 | event = rb_buffer_peek(buffer, cpu, ts); | 1995 | event = rb_buffer_peek(buffer, cpu, ts); |
1990 | if (!event) | 1996 | if (!event) |
1991 | goto out; | 1997 | goto out; |
1992 | 1998 | ||
1993 | rb_advance_reader(cpu_buffer); | 1999 | rb_advance_reader(cpu_buffer); |
1994 | 2000 | ||
1995 | out: | 2001 | out: |
1996 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2002 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
1997 | 2003 | ||
1998 | return event; | 2004 | return event; |
1999 | } | 2005 | } |
2000 | 2006 | ||
2001 | /** | 2007 | /** |
2002 | * ring_buffer_read_start - start a non consuming read of the buffer | 2008 | * ring_buffer_read_start - start a non consuming read of the buffer |
2003 | * @buffer: The ring buffer to read from | 2009 | * @buffer: The ring buffer to read from |
2004 | * @cpu: The cpu buffer to iterate over | 2010 | * @cpu: The cpu buffer to iterate over |
2005 | * | 2011 | * |
2006 | * This starts up an iteration through the buffer. It also disables | 2012 | * This starts up an iteration through the buffer. It also disables |
2007 | * the recording to the buffer until the reading is finished. | 2013 | * the recording to the buffer until the reading is finished. |
2008 | * This prevents the reading from being corrupted. This is not | 2014 | * This prevents the reading from being corrupted. This is not |
2009 | * a consuming read, so a producer is not expected. | 2015 | * a consuming read, so a producer is not expected. |
2010 | * | 2016 | * |
2011 | * Must be paired with ring_buffer_finish. | 2017 | * Must be paired with ring_buffer_finish. |
2012 | */ | 2018 | */ |
2013 | struct ring_buffer_iter * | 2019 | struct ring_buffer_iter * |
2014 | ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | 2020 | ring_buffer_read_start(struct ring_buffer *buffer, int cpu) |
2015 | { | 2021 | { |
2016 | struct ring_buffer_per_cpu *cpu_buffer; | 2022 | struct ring_buffer_per_cpu *cpu_buffer; |
2017 | struct ring_buffer_iter *iter; | 2023 | struct ring_buffer_iter *iter; |
2018 | unsigned long flags; | 2024 | unsigned long flags; |
2019 | 2025 | ||
2020 | if (!cpu_isset(cpu, buffer->cpumask)) | 2026 | if (!cpu_isset(cpu, buffer->cpumask)) |
2021 | return NULL; | 2027 | return NULL; |
2022 | 2028 | ||
2023 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); | 2029 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); |
2024 | if (!iter) | 2030 | if (!iter) |
2025 | return NULL; | 2031 | return NULL; |
2026 | 2032 | ||
2027 | cpu_buffer = buffer->buffers[cpu]; | 2033 | cpu_buffer = buffer->buffers[cpu]; |
2028 | 2034 | ||
2029 | iter->cpu_buffer = cpu_buffer; | 2035 | iter->cpu_buffer = cpu_buffer; |
2030 | 2036 | ||
2031 | atomic_inc(&cpu_buffer->record_disabled); | 2037 | atomic_inc(&cpu_buffer->record_disabled); |
2032 | synchronize_sched(); | 2038 | synchronize_sched(); |
2033 | 2039 | ||
2034 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2040 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
2035 | __raw_spin_lock(&cpu_buffer->lock); | 2041 | __raw_spin_lock(&cpu_buffer->lock); |
2036 | rb_iter_reset(iter); | 2042 | rb_iter_reset(iter); |
2037 | __raw_spin_unlock(&cpu_buffer->lock); | 2043 | __raw_spin_unlock(&cpu_buffer->lock); |
2038 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2044 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
2039 | 2045 | ||
2040 | return iter; | 2046 | return iter; |
2041 | } | 2047 | } |
2042 | 2048 | ||
2043 | /** | 2049 | /** |
2044 | * ring_buffer_finish - finish reading the iterator of the buffer | 2050 | * ring_buffer_finish - finish reading the iterator of the buffer |
2045 | * @iter: The iterator retrieved by ring_buffer_start | 2051 | * @iter: The iterator retrieved by ring_buffer_start |
2046 | * | 2052 | * |
2047 | * This re-enables the recording to the buffer, and frees the | 2053 | * This re-enables the recording to the buffer, and frees the |
2048 | * iterator. | 2054 | * iterator. |
2049 | */ | 2055 | */ |
2050 | void | 2056 | void |
2051 | ring_buffer_read_finish(struct ring_buffer_iter *iter) | 2057 | ring_buffer_read_finish(struct ring_buffer_iter *iter) |
2052 | { | 2058 | { |
2053 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 2059 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
2054 | 2060 | ||
2055 | atomic_dec(&cpu_buffer->record_disabled); | 2061 | atomic_dec(&cpu_buffer->record_disabled); |
2056 | kfree(iter); | 2062 | kfree(iter); |
2057 | } | 2063 | } |
2058 | 2064 | ||
2059 | /** | 2065 | /** |
2060 | * ring_buffer_read - read the next item in the ring buffer by the iterator | 2066 | * ring_buffer_read - read the next item in the ring buffer by the iterator |
2061 | * @iter: The ring buffer iterator | 2067 | * @iter: The ring buffer iterator |
2062 | * @ts: The time stamp of the event read. | 2068 | * @ts: The time stamp of the event read. |
2063 | * | 2069 | * |
2064 | * This reads the next event in the ring buffer and increments the iterator. | 2070 | * This reads the next event in the ring buffer and increments the iterator. |
2065 | */ | 2071 | */ |
2066 | struct ring_buffer_event * | 2072 | struct ring_buffer_event * |
2067 | ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) | 2073 | ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) |
2068 | { | 2074 | { |
2069 | struct ring_buffer_event *event; | 2075 | struct ring_buffer_event *event; |
2070 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 2076 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
2071 | unsigned long flags; | 2077 | unsigned long flags; |
2072 | 2078 | ||
2073 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2079 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
2074 | event = rb_iter_peek(iter, ts); | 2080 | event = rb_iter_peek(iter, ts); |
2075 | if (!event) | 2081 | if (!event) |
2076 | goto out; | 2082 | goto out; |
2077 | 2083 | ||
2078 | rb_advance_iter(iter); | 2084 | rb_advance_iter(iter); |
2079 | out: | 2085 | out: |
2080 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2086 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
2081 | 2087 | ||
2082 | return event; | 2088 | return event; |
2083 | } | 2089 | } |
2084 | 2090 | ||
2085 | /** | 2091 | /** |
2086 | * ring_buffer_size - return the size of the ring buffer (in bytes) | 2092 | * ring_buffer_size - return the size of the ring buffer (in bytes) |
2087 | * @buffer: The ring buffer. | 2093 | * @buffer: The ring buffer. |
2088 | */ | 2094 | */ |
2089 | unsigned long ring_buffer_size(struct ring_buffer *buffer) | 2095 | unsigned long ring_buffer_size(struct ring_buffer *buffer) |
2090 | { | 2096 | { |
2091 | return BUF_PAGE_SIZE * buffer->pages; | 2097 | return BUF_PAGE_SIZE * buffer->pages; |
2092 | } | 2098 | } |
2093 | 2099 | ||
2094 | static void | 2100 | static void |
2095 | rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | 2101 | rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) |
2096 | { | 2102 | { |
2097 | cpu_buffer->head_page | 2103 | cpu_buffer->head_page |
2098 | = list_entry(cpu_buffer->pages.next, struct buffer_page, list); | 2104 | = list_entry(cpu_buffer->pages.next, struct buffer_page, list); |
2099 | local_set(&cpu_buffer->head_page->write, 0); | 2105 | local_set(&cpu_buffer->head_page->write, 0); |
2100 | local_set(&cpu_buffer->head_page->page->commit, 0); | 2106 | local_set(&cpu_buffer->head_page->page->commit, 0); |
2101 | 2107 | ||
2102 | cpu_buffer->head_page->read = 0; | 2108 | cpu_buffer->head_page->read = 0; |
2103 | 2109 | ||
2104 | cpu_buffer->tail_page = cpu_buffer->head_page; | 2110 | cpu_buffer->tail_page = cpu_buffer->head_page; |
2105 | cpu_buffer->commit_page = cpu_buffer->head_page; | 2111 | cpu_buffer->commit_page = cpu_buffer->head_page; |
2106 | 2112 | ||
2107 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); | 2113 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); |
2108 | local_set(&cpu_buffer->reader_page->write, 0); | 2114 | local_set(&cpu_buffer->reader_page->write, 0); |
2109 | local_set(&cpu_buffer->reader_page->page->commit, 0); | 2115 | local_set(&cpu_buffer->reader_page->page->commit, 0); |
2110 | cpu_buffer->reader_page->read = 0; | 2116 | cpu_buffer->reader_page->read = 0; |
2111 | 2117 | ||
2112 | cpu_buffer->overrun = 0; | 2118 | cpu_buffer->overrun = 0; |
2113 | cpu_buffer->entries = 0; | 2119 | cpu_buffer->entries = 0; |
2114 | } | 2120 | } |
2115 | 2121 | ||
2116 | /** | 2122 | /** |
2117 | * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer | 2123 | * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer |
2118 | * @buffer: The ring buffer to reset a per cpu buffer of | 2124 | * @buffer: The ring buffer to reset a per cpu buffer of |
2119 | * @cpu: The CPU buffer to be reset | 2125 | * @cpu: The CPU buffer to be reset |
2120 | */ | 2126 | */ |
2121 | void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | 2127 | void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) |
2122 | { | 2128 | { |
2123 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 2129 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
2124 | unsigned long flags; | 2130 | unsigned long flags; |
2125 | 2131 | ||
2126 | if (!cpu_isset(cpu, buffer->cpumask)) | 2132 | if (!cpu_isset(cpu, buffer->cpumask)) |
2127 | return; | 2133 | return; |
2128 | 2134 | ||
2129 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2135 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
2130 | 2136 | ||
2131 | __raw_spin_lock(&cpu_buffer->lock); | 2137 | __raw_spin_lock(&cpu_buffer->lock); |
2132 | 2138 | ||
2133 | rb_reset_cpu(cpu_buffer); | 2139 | rb_reset_cpu(cpu_buffer); |
2134 | 2140 | ||
2135 | __raw_spin_unlock(&cpu_buffer->lock); | 2141 | __raw_spin_unlock(&cpu_buffer->lock); |
2136 | 2142 | ||
2137 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2143 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
2138 | } | 2144 | } |
2139 | 2145 | ||
2140 | /** | 2146 | /** |
2141 | * ring_buffer_reset - reset a ring buffer | 2147 | * ring_buffer_reset - reset a ring buffer |
2142 | * @buffer: The ring buffer to reset all cpu buffers | 2148 | * @buffer: The ring buffer to reset all cpu buffers |
2143 | */ | 2149 | */ |
2144 | void ring_buffer_reset(struct ring_buffer *buffer) | 2150 | void ring_buffer_reset(struct ring_buffer *buffer) |
2145 | { | 2151 | { |
2146 | int cpu; | 2152 | int cpu; |
2147 | 2153 | ||
2148 | for_each_buffer_cpu(buffer, cpu) | 2154 | for_each_buffer_cpu(buffer, cpu) |
2149 | ring_buffer_reset_cpu(buffer, cpu); | 2155 | ring_buffer_reset_cpu(buffer, cpu); |
2150 | } | 2156 | } |
2151 | 2157 | ||
2152 | /** | 2158 | /** |
2153 | * rind_buffer_empty - is the ring buffer empty? | 2159 | * rind_buffer_empty - is the ring buffer empty? |
2154 | * @buffer: The ring buffer to test | 2160 | * @buffer: The ring buffer to test |
2155 | */ | 2161 | */ |
2156 | int ring_buffer_empty(struct ring_buffer *buffer) | 2162 | int ring_buffer_empty(struct ring_buffer *buffer) |
2157 | { | 2163 | { |
2158 | struct ring_buffer_per_cpu *cpu_buffer; | 2164 | struct ring_buffer_per_cpu *cpu_buffer; |
2159 | int cpu; | 2165 | int cpu; |
2160 | 2166 | ||
2161 | /* yes this is racy, but if you don't like the race, lock the buffer */ | 2167 | /* yes this is racy, but if you don't like the race, lock the buffer */ |
2162 | for_each_buffer_cpu(buffer, cpu) { | 2168 | for_each_buffer_cpu(buffer, cpu) { |
2163 | cpu_buffer = buffer->buffers[cpu]; | 2169 | cpu_buffer = buffer->buffers[cpu]; |
2164 | if (!rb_per_cpu_empty(cpu_buffer)) | 2170 | if (!rb_per_cpu_empty(cpu_buffer)) |
2165 | return 0; | 2171 | return 0; |
2166 | } | 2172 | } |
2167 | return 1; | 2173 | return 1; |
2168 | } | 2174 | } |
2169 | 2175 | ||
2170 | /** | 2176 | /** |
2171 | * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? | 2177 | * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? |
2172 | * @buffer: The ring buffer | 2178 | * @buffer: The ring buffer |
2173 | * @cpu: The CPU buffer to test | 2179 | * @cpu: The CPU buffer to test |
2174 | */ | 2180 | */ |
2175 | int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) | 2181 | int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) |
2176 | { | 2182 | { |
2177 | struct ring_buffer_per_cpu *cpu_buffer; | 2183 | struct ring_buffer_per_cpu *cpu_buffer; |
2178 | 2184 | ||
2179 | if (!cpu_isset(cpu, buffer->cpumask)) | 2185 | if (!cpu_isset(cpu, buffer->cpumask)) |
2180 | return 1; | 2186 | return 1; |
2181 | 2187 | ||
2182 | cpu_buffer = buffer->buffers[cpu]; | 2188 | cpu_buffer = buffer->buffers[cpu]; |
2183 | return rb_per_cpu_empty(cpu_buffer); | 2189 | return rb_per_cpu_empty(cpu_buffer); |
2184 | } | 2190 | } |
2185 | 2191 | ||
2186 | /** | 2192 | /** |
2187 | * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers | 2193 | * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers |
2188 | * @buffer_a: One buffer to swap with | 2194 | * @buffer_a: One buffer to swap with |
2189 | * @buffer_b: The other buffer to swap with | 2195 | * @buffer_b: The other buffer to swap with |
2190 | * | 2196 | * |
2191 | * This function is useful for tracers that want to take a "snapshot" | 2197 | * This function is useful for tracers that want to take a "snapshot" |
2192 | * of a CPU buffer and has another back up buffer lying around. | 2198 | * of a CPU buffer and has another back up buffer lying around. |
2193 | * it is expected that the tracer handles the cpu buffer not being | 2199 | * it is expected that the tracer handles the cpu buffer not being |
2194 | * used at the moment. | 2200 | * used at the moment. |
2195 | */ | 2201 | */ |
2196 | int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | 2202 | int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, |
2197 | struct ring_buffer *buffer_b, int cpu) | 2203 | struct ring_buffer *buffer_b, int cpu) |
2198 | { | 2204 | { |
2199 | struct ring_buffer_per_cpu *cpu_buffer_a; | 2205 | struct ring_buffer_per_cpu *cpu_buffer_a; |
2200 | struct ring_buffer_per_cpu *cpu_buffer_b; | 2206 | struct ring_buffer_per_cpu *cpu_buffer_b; |
2201 | 2207 | ||
2202 | if (!cpu_isset(cpu, buffer_a->cpumask) || | 2208 | if (!cpu_isset(cpu, buffer_a->cpumask) || |
2203 | !cpu_isset(cpu, buffer_b->cpumask)) | 2209 | !cpu_isset(cpu, buffer_b->cpumask)) |
2204 | return -EINVAL; | 2210 | return -EINVAL; |
2205 | 2211 | ||
2206 | /* At least make sure the two buffers are somewhat the same */ | 2212 | /* At least make sure the two buffers are somewhat the same */ |
2207 | if (buffer_a->size != buffer_b->size || | 2213 | if (buffer_a->size != buffer_b->size || |
2208 | buffer_a->pages != buffer_b->pages) | 2214 | buffer_a->pages != buffer_b->pages) |
2209 | return -EINVAL; | 2215 | return -EINVAL; |
2210 | 2216 | ||
2211 | cpu_buffer_a = buffer_a->buffers[cpu]; | 2217 | cpu_buffer_a = buffer_a->buffers[cpu]; |
2212 | cpu_buffer_b = buffer_b->buffers[cpu]; | 2218 | cpu_buffer_b = buffer_b->buffers[cpu]; |
2213 | 2219 | ||
2214 | /* | 2220 | /* |
2215 | * We can't do a synchronize_sched here because this | 2221 | * We can't do a synchronize_sched here because this |
2216 | * function can be called in atomic context. | 2222 | * function can be called in atomic context. |
2217 | * Normally this will be called from the same CPU as cpu. | 2223 | * Normally this will be called from the same CPU as cpu. |
2218 | * If not it's up to the caller to protect this. | 2224 | * If not it's up to the caller to protect this. |
2219 | */ | 2225 | */ |
2220 | atomic_inc(&cpu_buffer_a->record_disabled); | 2226 | atomic_inc(&cpu_buffer_a->record_disabled); |
2221 | atomic_inc(&cpu_buffer_b->record_disabled); | 2227 | atomic_inc(&cpu_buffer_b->record_disabled); |
2222 | 2228 | ||
2223 | buffer_a->buffers[cpu] = cpu_buffer_b; | 2229 | buffer_a->buffers[cpu] = cpu_buffer_b; |
2224 | buffer_b->buffers[cpu] = cpu_buffer_a; | 2230 | buffer_b->buffers[cpu] = cpu_buffer_a; |
2225 | 2231 | ||
2226 | cpu_buffer_b->buffer = buffer_a; | 2232 | cpu_buffer_b->buffer = buffer_a; |
2227 | cpu_buffer_a->buffer = buffer_b; | 2233 | cpu_buffer_a->buffer = buffer_b; |
2228 | 2234 | ||
2229 | atomic_dec(&cpu_buffer_a->record_disabled); | 2235 | atomic_dec(&cpu_buffer_a->record_disabled); |
2230 | atomic_dec(&cpu_buffer_b->record_disabled); | 2236 | atomic_dec(&cpu_buffer_b->record_disabled); |
2231 | 2237 | ||
2232 | return 0; | 2238 | return 0; |
2239 | } | ||
2240 | |||
2241 | static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, | ||
2242 | struct buffer_data_page *page) | ||
2243 | { | ||
2244 | struct ring_buffer_event *event; | ||
2245 | unsigned long head; | ||
2246 | |||
2247 | __raw_spin_lock(&cpu_buffer->lock); | ||
2248 | for (head = 0; head < local_read(&page->commit); | ||
2249 | head += rb_event_length(event)) { | ||
2250 | |||
2251 | event = __rb_data_page_index(page, head); | ||
2252 | if (RB_WARN_ON(cpu_buffer, rb_null_event(event))) | ||
2253 | return; | ||
2254 | /* Only count data entries */ | ||
2255 | if (event->type != RINGBUF_TYPE_DATA) | ||
2256 | continue; | ||
2257 | cpu_buffer->entries--; | ||
2258 | } | ||
2259 | __raw_spin_unlock(&cpu_buffer->lock); | ||
2260 | } | ||
2261 | |||
2262 | /** | ||
2263 | * ring_buffer_alloc_read_page - allocate a page to read from buffer | ||
2264 | * @buffer: the buffer to allocate for. | ||
2265 | * | ||
2266 | * This function is used in conjunction with ring_buffer_read_page. | ||
2267 | * When reading a full page from the ring buffer, these functions | ||
2268 | * can be used to speed up the process. The calling function should | ||
2269 | * allocate a few pages first with this function. Then when it | ||
2270 | * needs to get pages from the ring buffer, it passes the result | ||
2271 | * of this function into ring_buffer_read_page, which will swap | ||
2272 | * the page that was allocated, with the read page of the buffer. | ||
2273 | * | ||
2274 | * Returns: | ||
2275 | * The page allocated, or NULL on error. | ||
2276 | */ | ||
2277 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer) | ||
2278 | { | ||
2279 | unsigned long addr; | ||
2280 | struct buffer_data_page *page; | ||
2281 | |||
2282 | addr = __get_free_page(GFP_KERNEL); | ||
2283 | if (!addr) | ||
2284 | return NULL; | ||
2285 | |||
2286 | page = (void *)addr; | ||
2287 | |||
2288 | return page; | ||
2289 | } | ||
2290 | |||
2291 | /** | ||
2292 | * ring_buffer_free_read_page - free an allocated read page | ||
2293 | * @buffer: the buffer the page was allocate for | ||
2294 | * @data: the page to free | ||
2295 | * | ||
2296 | * Free a page allocated from ring_buffer_alloc_read_page. | ||
2297 | */ | ||
2298 | void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) | ||
2299 | { | ||
2300 | free_page((unsigned long)data); | ||
2301 | } | ||
2302 | |||
2303 | /** | ||
2304 | * ring_buffer_read_page - extract a page from the ring buffer | ||
2305 | * @buffer: buffer to extract from | ||
2306 | * @data_page: the page to use allocated from ring_buffer_alloc_read_page | ||
2307 | * @cpu: the cpu of the buffer to extract | ||
2308 | * @full: should the extraction only happen when the page is full. | ||
2309 | * | ||
2310 | * This function will pull out a page from the ring buffer and consume it. | ||
2311 | * @data_page must be the address of the variable that was returned | ||
2312 | * from ring_buffer_alloc_read_page. This is because the page might be used | ||
2313 | * to swap with a page in the ring buffer. | ||
2314 | * | ||
2315 | * for example: | ||
2316 | * rpage = ring_buffer_alloc_page(buffer); | ||
2317 | * if (!rpage) | ||
2318 | * return error; | ||
2319 | * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0); | ||
2320 | * if (ret) | ||
2321 | * process_page(rpage); | ||
2322 | * | ||
2323 | * When @full is set, the function will not return true unless | ||
2324 | * the writer is off the reader page. | ||
2325 | * | ||
2326 | * Note: it is up to the calling functions to handle sleeps and wakeups. | ||
2327 | * The ring buffer can be used anywhere in the kernel and can not | ||
2328 | * blindly call wake_up. The layer that uses the ring buffer must be | ||
2329 | * responsible for that. | ||
2330 | * | ||
2331 | * Returns: | ||
2332 | * 1 if data has been transferred | ||
2333 | * 0 if no data has been transferred. | ||
2334 | */ | ||
2335 | int ring_buffer_read_page(struct ring_buffer *buffer, | ||
2336 | void **data_page, int cpu, int full) | ||
2337 | { | ||
2338 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | ||
2339 | struct ring_buffer_event *event; | ||
2340 | struct buffer_data_page *page; | ||
2341 | unsigned long flags; | ||
2342 | int ret = 0; | ||
2343 | |||
2344 | if (!data_page) | ||
2345 | return 0; | ||
2346 | |||
2347 | page = *data_page; | ||
2348 | if (!page) | ||
2349 | return 0; | ||
2350 | |||
2351 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | ||
2352 | |||
2353 | /* | ||
2354 | * rb_buffer_peek will get the next ring buffer if | ||
2355 | * the current reader page is empty. | ||
2356 | */ | ||
2357 | event = rb_buffer_peek(buffer, cpu, NULL); | ||
2358 | if (!event) | ||
2359 | goto out; | ||
2360 | |||
2361 | /* check for data */ | ||
2362 | if (!local_read(&cpu_buffer->reader_page->page->commit)) | ||
2363 | goto out; | ||
2364 | /* | ||
2365 | * If the writer is already off of the read page, then simply | ||
2366 | * switch the read page with the given page. Otherwise | ||
2367 | * we need to copy the data from the reader to the writer. | ||
2368 | */ | ||
2369 | if (cpu_buffer->reader_page == cpu_buffer->commit_page) { | ||
2370 | unsigned int read = cpu_buffer->reader_page->read; | ||
2371 | |||
2372 | if (full) | ||
2373 | goto out; | ||
2374 | /* The writer is still on the reader page, we must copy */ | ||
2375 | page = cpu_buffer->reader_page->page; | ||
2376 | memcpy(page->data, | ||
2377 | cpu_buffer->reader_page->page->data + read, | ||
2378 | local_read(&page->commit) - read); | ||
2379 | |||
2380 | /* consume what was read */ | ||
2381 | cpu_buffer->reader_page += read; | ||
2382 | |||
2383 | } else { | ||
2384 | /* swap the pages */ | ||
2385 | rb_init_page(page); | ||
2386 | page = cpu_buffer->reader_page->page; | ||
2387 | cpu_buffer->reader_page->page = *data_page; | ||
2388 | cpu_buffer->reader_page->read = 0; | ||
2389 | *data_page = page; | ||
2390 | } | ||
2391 | ret = 1; | ||
2392 | |||
2393 | /* update the entry counter */ | ||
2394 | rb_remove_entries(cpu_buffer, page); | ||
2395 | out: | ||
2396 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
2397 | |||
2398 | return ret; | ||
2233 | } | 2399 | } |
2234 | 2400 | ||
2235 | static ssize_t | 2401 | static ssize_t |
2236 | rb_simple_read(struct file *filp, char __user *ubuf, | 2402 | rb_simple_read(struct file *filp, char __user *ubuf, |
2237 | size_t cnt, loff_t *ppos) | 2403 | size_t cnt, loff_t *ppos) |
2238 | { | 2404 | { |
2239 | long *p = filp->private_data; | 2405 | long *p = filp->private_data; |
2240 | char buf[64]; | 2406 | char buf[64]; |
2241 | int r; | 2407 | int r; |
2242 | 2408 | ||
2243 | if (test_bit(RB_BUFFERS_DISABLED_BIT, p)) | 2409 | if (test_bit(RB_BUFFERS_DISABLED_BIT, p)) |
2244 | r = sprintf(buf, "permanently disabled\n"); | 2410 | r = sprintf(buf, "permanently disabled\n"); |
2245 | else | 2411 | else |
2246 | r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p)); | 2412 | r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p)); |
2247 | 2413 | ||
2248 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2414 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
2249 | } | 2415 | } |
2250 | 2416 | ||
2251 | static ssize_t | 2417 | static ssize_t |
2252 | rb_simple_write(struct file *filp, const char __user *ubuf, | 2418 | rb_simple_write(struct file *filp, const char __user *ubuf, |
2253 | size_t cnt, loff_t *ppos) | 2419 | size_t cnt, loff_t *ppos) |
2254 | { | 2420 | { |
2255 | long *p = filp->private_data; | 2421 | long *p = filp->private_data; |
2256 | char buf[64]; | 2422 | char buf[64]; |
2257 | long val; | 2423 | long val; |
2258 | int ret; | 2424 | int ret; |
2259 | 2425 | ||
2260 | if (cnt >= sizeof(buf)) | 2426 | if (cnt >= sizeof(buf)) |
2261 | return -EINVAL; | 2427 | return -EINVAL; |
2262 | 2428 | ||
2263 | if (copy_from_user(&buf, ubuf, cnt)) | 2429 | if (copy_from_user(&buf, ubuf, cnt)) |
2264 | return -EFAULT; | 2430 | return -EFAULT; |
2265 | 2431 | ||
2266 | buf[cnt] = 0; | 2432 | buf[cnt] = 0; |
2267 | 2433 | ||
2268 | ret = strict_strtoul(buf, 10, &val); | 2434 | ret = strict_strtoul(buf, 10, &val); |
2269 | if (ret < 0) | 2435 | if (ret < 0) |
2270 | return ret; | 2436 | return ret; |
2271 | 2437 | ||
2272 | if (val) | 2438 | if (val) |
2273 | set_bit(RB_BUFFERS_ON_BIT, p); | 2439 | set_bit(RB_BUFFERS_ON_BIT, p); |
2274 | else | 2440 | else |
2275 | clear_bit(RB_BUFFERS_ON_BIT, p); | 2441 | clear_bit(RB_BUFFERS_ON_BIT, p); |
2276 | 2442 | ||
2277 | (*ppos)++; | 2443 | (*ppos)++; |
2278 | 2444 | ||
2279 | return cnt; | 2445 | return cnt; |
2280 | } | 2446 | } |
2281 | 2447 | ||
2282 | static struct file_operations rb_simple_fops = { | 2448 | static struct file_operations rb_simple_fops = { |
2283 | .open = tracing_open_generic, | 2449 | .open = tracing_open_generic, |
2284 | .read = rb_simple_read, | 2450 | .read = rb_simple_read, |
2285 | .write = rb_simple_write, | 2451 | .write = rb_simple_write, |
2286 | }; | 2452 | }; |
2287 | 2453 | ||
2288 | 2454 | ||
2289 | static __init int rb_init_debugfs(void) | 2455 | static __init int rb_init_debugfs(void) |
2290 | { | 2456 | { |
2291 | struct dentry *d_tracer; | 2457 | struct dentry *d_tracer; |
2292 | struct dentry *entry; | 2458 | struct dentry *entry; |
2293 | 2459 | ||
2294 | d_tracer = tracing_init_dentry(); | 2460 | d_tracer = tracing_init_dentry(); |
2295 | 2461 | ||
2296 | entry = debugfs_create_file("tracing_on", 0644, d_tracer, | 2462 | entry = debugfs_create_file("tracing_on", 0644, d_tracer, |
2297 | &ring_buffer_flags, &rb_simple_fops); | 2463 | &ring_buffer_flags, &rb_simple_fops); |
2298 | if (!entry) | 2464 | if (!entry) |
2299 | pr_warning("Could not create debugfs 'tracing_on' entry\n"); | 2465 | pr_warning("Could not create debugfs 'tracing_on' entry\n"); |
2300 | 2466 | ||
2301 | return 0; | 2467 | return 0; |
2302 | } | 2468 | } |
2303 | 2469 | ||
2304 | fs_initcall(rb_init_debugfs); | 2470 | fs_initcall(rb_init_debugfs); |
2305 | 2471 |