Commit 6a18037d4165f691063b43816be3152e9006eb06

Authored by Robert Richter
1 parent 5a289395bf

oprofile: fixing whitespaces in drivers/oprofile/*

Signed-off-by: Robert Richter <robert.richter@amd.com>

Showing 12 changed files with 60 additions and 60 deletions Inline Diff

drivers/oprofile/buffer_sync.h
1 /** 1 /**
2 * @file buffer_sync.h 2 * @file buffer_sync.h
3 * 3 *
4 * @remark Copyright 2002 OProfile authors 4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 */ 8 */
9 9
10 #ifndef OPROFILE_BUFFER_SYNC_H 10 #ifndef OPROFILE_BUFFER_SYNC_H
11 #define OPROFILE_BUFFER_SYNC_H 11 #define OPROFILE_BUFFER_SYNC_H
12 12
13 /* add the necessary profiling hooks */ 13 /* add the necessary profiling hooks */
14 int sync_start(void); 14 int sync_start(void);
15 15
16 /* remove the hooks */ 16 /* remove the hooks */
17 void sync_stop(void); 17 void sync_stop(void);
18 18
19 /* sync the given CPU's buffer */ 19 /* sync the given CPU's buffer */
20 void sync_buffer(int cpu); 20 void sync_buffer(int cpu);
21 21
22 #endif /* OPROFILE_BUFFER_SYNC_H */ 22 #endif /* OPROFILE_BUFFER_SYNC_H */
23 23
drivers/oprofile/cpu_buffer.c
1 /** 1 /**
2 * @file cpu_buffer.c 2 * @file cpu_buffer.c
3 * 3 *
4 * @remark Copyright 2002 OProfile authors 4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 * @author Barry Kasindorf <barry.kasindorf@amd.com> 8 * @author Barry Kasindorf <barry.kasindorf@amd.com>
9 * 9 *
10 * Each CPU has a local buffer that stores PC value/event 10 * Each CPU has a local buffer that stores PC value/event
11 * pairs. We also log context switches when we notice them. 11 * pairs. We also log context switches when we notice them.
12 * Eventually each CPU's buffer is processed into the global 12 * Eventually each CPU's buffer is processed into the global
13 * event buffer by sync_buffer(). 13 * event buffer by sync_buffer().
14 * 14 *
15 * We use a local buffer for two reasons: an NMI or similar 15 * We use a local buffer for two reasons: an NMI or similar
16 * interrupt cannot synchronise, and high sampling rates 16 * interrupt cannot synchronise, and high sampling rates
17 * would lead to catastrophic global synchronisation if 17 * would lead to catastrophic global synchronisation if
18 * a global buffer was used. 18 * a global buffer was used.
19 */ 19 */
20 20
21 #include <linux/sched.h> 21 #include <linux/sched.h>
22 #include <linux/oprofile.h> 22 #include <linux/oprofile.h>
23 #include <linux/vmalloc.h> 23 #include <linux/vmalloc.h>
24 #include <linux/errno.h> 24 #include <linux/errno.h>
25 25
26 #include "event_buffer.h" 26 #include "event_buffer.h"
27 #include "cpu_buffer.h" 27 #include "cpu_buffer.h"
28 #include "buffer_sync.h" 28 #include "buffer_sync.h"
29 #include "oprof.h" 29 #include "oprof.h"
30 30
31 DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); 31 DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
32 32
33 static void wq_sync_buffer(struct work_struct *work); 33 static void wq_sync_buffer(struct work_struct *work);
34 34
35 #define DEFAULT_TIMER_EXPIRE (HZ / 10) 35 #define DEFAULT_TIMER_EXPIRE (HZ / 10)
36 static int work_enabled; 36 static int work_enabled;
37 37
38 void free_cpu_buffers(void) 38 void free_cpu_buffers(void)
39 { 39 {
40 int i; 40 int i;
41 41
42 for_each_online_cpu(i) { 42 for_each_online_cpu(i) {
43 vfree(per_cpu(cpu_buffer, i).buffer); 43 vfree(per_cpu(cpu_buffer, i).buffer);
44 per_cpu(cpu_buffer, i).buffer = NULL; 44 per_cpu(cpu_buffer, i).buffer = NULL;
45 } 45 }
46 } 46 }
47 47
48 int alloc_cpu_buffers(void) 48 int alloc_cpu_buffers(void)
49 { 49 {
50 int i; 50 int i;
51 51
52 unsigned long buffer_size = fs_cpu_buffer_size; 52 unsigned long buffer_size = fs_cpu_buffer_size;
53 53
54 for_each_online_cpu(i) { 54 for_each_online_cpu(i) {
55 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); 55 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
56 56
57 b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size, 57 b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
58 cpu_to_node(i)); 58 cpu_to_node(i));
59 if (!b->buffer) 59 if (!b->buffer)
60 goto fail; 60 goto fail;
61 61
62 b->last_task = NULL; 62 b->last_task = NULL;
63 b->last_is_kernel = -1; 63 b->last_is_kernel = -1;
64 b->tracing = 0; 64 b->tracing = 0;
65 b->buffer_size = buffer_size; 65 b->buffer_size = buffer_size;
66 b->tail_pos = 0; 66 b->tail_pos = 0;
67 b->head_pos = 0; 67 b->head_pos = 0;
68 b->sample_received = 0; 68 b->sample_received = 0;
69 b->sample_lost_overflow = 0; 69 b->sample_lost_overflow = 0;
70 b->backtrace_aborted = 0; 70 b->backtrace_aborted = 0;
71 b->sample_invalid_eip = 0; 71 b->sample_invalid_eip = 0;
72 b->cpu = i; 72 b->cpu = i;
73 INIT_DELAYED_WORK(&b->work, wq_sync_buffer); 73 INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
74 } 74 }
75 return 0; 75 return 0;
76 76
77 fail: 77 fail:
78 free_cpu_buffers(); 78 free_cpu_buffers();
79 return -ENOMEM; 79 return -ENOMEM;
80 } 80 }
81 81
82 void start_cpu_work(void) 82 void start_cpu_work(void)
83 { 83 {
84 int i; 84 int i;
85 85
86 work_enabled = 1; 86 work_enabled = 1;
87 87
88 for_each_online_cpu(i) { 88 for_each_online_cpu(i) {
89 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); 89 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
90 90
91 /* 91 /*
92 * Spread the work by 1 jiffy per cpu so they dont all 92 * Spread the work by 1 jiffy per cpu so they dont all
93 * fire at once. 93 * fire at once.
94 */ 94 */
95 schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i); 95 schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
96 } 96 }
97 } 97 }
98 98
99 void end_cpu_work(void) 99 void end_cpu_work(void)
100 { 100 {
101 int i; 101 int i;
102 102
103 work_enabled = 0; 103 work_enabled = 0;
104 104
105 for_each_online_cpu(i) { 105 for_each_online_cpu(i) {
106 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); 106 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
107 107
108 cancel_delayed_work(&b->work); 108 cancel_delayed_work(&b->work);
109 } 109 }
110 110
111 flush_scheduled_work(); 111 flush_scheduled_work();
112 } 112 }
113 113
114 /* Resets the cpu buffer to a sane state. */ 114 /* Resets the cpu buffer to a sane state. */
115 void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf) 115 void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf)
116 { 116 {
117 /* reset these to invalid values; the next sample 117 /* reset these to invalid values; the next sample
118 * collected will populate the buffer with proper 118 * collected will populate the buffer with proper
119 * values to initialize the buffer 119 * values to initialize the buffer
120 */ 120 */
121 cpu_buf->last_is_kernel = -1; 121 cpu_buf->last_is_kernel = -1;
122 cpu_buf->last_task = NULL; 122 cpu_buf->last_task = NULL;
123 } 123 }
124 124
125 /* compute number of available slots in cpu_buffer queue */ 125 /* compute number of available slots in cpu_buffer queue */
126 static unsigned long nr_available_slots(struct oprofile_cpu_buffer const *b) 126 static unsigned long nr_available_slots(struct oprofile_cpu_buffer const *b)
127 { 127 {
128 unsigned long head = b->head_pos; 128 unsigned long head = b->head_pos;
129 unsigned long tail = b->tail_pos; 129 unsigned long tail = b->tail_pos;
130 130
131 if (tail > head) 131 if (tail > head)
132 return (tail - head) - 1; 132 return (tail - head) - 1;
133 133
134 return tail + (b->buffer_size - head) - 1; 134 return tail + (b->buffer_size - head) - 1;
135 } 135 }
136 136
137 static void increment_head(struct oprofile_cpu_buffer *b) 137 static void increment_head(struct oprofile_cpu_buffer *b)
138 { 138 {
139 unsigned long new_head = b->head_pos + 1; 139 unsigned long new_head = b->head_pos + 1;
140 140
141 /* Ensure anything written to the slot before we 141 /* Ensure anything written to the slot before we
142 * increment is visible */ 142 * increment is visible */
143 wmb(); 143 wmb();
144 144
145 if (new_head < b->buffer_size) 145 if (new_head < b->buffer_size)
146 b->head_pos = new_head; 146 b->head_pos = new_head;
147 else 147 else
148 b->head_pos = 0; 148 b->head_pos = 0;
149 } 149 }
150 150
151 static inline void 151 static inline void
152 add_sample(struct oprofile_cpu_buffer *cpu_buf, 152 add_sample(struct oprofile_cpu_buffer *cpu_buf,
153 unsigned long pc, unsigned long event) 153 unsigned long pc, unsigned long event)
154 { 154 {
155 struct op_sample *entry = &cpu_buf->buffer[cpu_buf->head_pos]; 155 struct op_sample *entry = &cpu_buf->buffer[cpu_buf->head_pos];
156 entry->eip = pc; 156 entry->eip = pc;
157 entry->event = event; 157 entry->event = event;
158 increment_head(cpu_buf); 158 increment_head(cpu_buf);
159 } 159 }
160 160
161 static inline void 161 static inline void
162 add_code(struct oprofile_cpu_buffer *buffer, unsigned long value) 162 add_code(struct oprofile_cpu_buffer *buffer, unsigned long value)
163 { 163 {
164 add_sample(buffer, ESCAPE_CODE, value); 164 add_sample(buffer, ESCAPE_CODE, value);
165 } 165 }
166 166
167 /* This must be safe from any context. It's safe writing here 167 /* This must be safe from any context. It's safe writing here
168 * because of the head/tail separation of the writer and reader 168 * because of the head/tail separation of the writer and reader
169 * of the CPU buffer. 169 * of the CPU buffer.
170 * 170 *
171 * is_kernel is needed because on some architectures you cannot 171 * is_kernel is needed because on some architectures you cannot
172 * tell if you are in kernel or user space simply by looking at 172 * tell if you are in kernel or user space simply by looking at
173 * pc. We tag this in the buffer by generating kernel enter/exit 173 * pc. We tag this in the buffer by generating kernel enter/exit
174 * events whenever is_kernel changes 174 * events whenever is_kernel changes
175 */ 175 */
176 static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, 176 static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
177 int is_kernel, unsigned long event) 177 int is_kernel, unsigned long event)
178 { 178 {
179 struct task_struct *task; 179 struct task_struct *task;
180 180
181 cpu_buf->sample_received++; 181 cpu_buf->sample_received++;
182 182
183 if (pc == ESCAPE_CODE) { 183 if (pc == ESCAPE_CODE) {
184 cpu_buf->sample_invalid_eip++; 184 cpu_buf->sample_invalid_eip++;
185 return 0; 185 return 0;
186 } 186 }
187 187
188 if (nr_available_slots(cpu_buf) < 3) { 188 if (nr_available_slots(cpu_buf) < 3) {
189 cpu_buf->sample_lost_overflow++; 189 cpu_buf->sample_lost_overflow++;
190 return 0; 190 return 0;
191 } 191 }
192 192
193 is_kernel = !!is_kernel; 193 is_kernel = !!is_kernel;
194 194
195 task = current; 195 task = current;
196 196
197 /* notice a switch from user->kernel or vice versa */ 197 /* notice a switch from user->kernel or vice versa */
198 if (cpu_buf->last_is_kernel != is_kernel) { 198 if (cpu_buf->last_is_kernel != is_kernel) {
199 cpu_buf->last_is_kernel = is_kernel; 199 cpu_buf->last_is_kernel = is_kernel;
200 add_code(cpu_buf, is_kernel); 200 add_code(cpu_buf, is_kernel);
201 } 201 }
202 202
203 /* notice a task switch */ 203 /* notice a task switch */
204 if (cpu_buf->last_task != task) { 204 if (cpu_buf->last_task != task) {
205 cpu_buf->last_task = task; 205 cpu_buf->last_task = task;
206 add_code(cpu_buf, (unsigned long)task); 206 add_code(cpu_buf, (unsigned long)task);
207 } 207 }
208 208
209 add_sample(cpu_buf, pc, event); 209 add_sample(cpu_buf, pc, event);
210 return 1; 210 return 1;
211 } 211 }
212 212
213 static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) 213 static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
214 { 214 {
215 if (nr_available_slots(cpu_buf) < 4) { 215 if (nr_available_slots(cpu_buf) < 4) {
216 cpu_buf->sample_lost_overflow++; 216 cpu_buf->sample_lost_overflow++;
217 return 0; 217 return 0;
218 } 218 }
219 219
220 add_code(cpu_buf, CPU_TRACE_BEGIN); 220 add_code(cpu_buf, CPU_TRACE_BEGIN);
221 cpu_buf->tracing = 1; 221 cpu_buf->tracing = 1;
222 return 1; 222 return 1;
223 } 223 }
224 224
225 static void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf) 225 static void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
226 { 226 {
227 cpu_buf->tracing = 0; 227 cpu_buf->tracing = 0;
228 } 228 }
229 229
230 void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, 230 void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
231 unsigned long event, int is_kernel) 231 unsigned long event, int is_kernel)
232 { 232 {
233 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); 233 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
234 234
235 if (!backtrace_depth) { 235 if (!backtrace_depth) {
236 log_sample(cpu_buf, pc, is_kernel, event); 236 log_sample(cpu_buf, pc, is_kernel, event);
237 return; 237 return;
238 } 238 }
239 239
240 if (!oprofile_begin_trace(cpu_buf)) 240 if (!oprofile_begin_trace(cpu_buf))
241 return; 241 return;
242 242
243 /* if log_sample() fail we can't backtrace since we lost the source 243 /* if log_sample() fail we can't backtrace since we lost the source
244 * of this event */ 244 * of this event */
245 if (log_sample(cpu_buf, pc, is_kernel, event)) 245 if (log_sample(cpu_buf, pc, is_kernel, event))
246 oprofile_ops.backtrace(regs, backtrace_depth); 246 oprofile_ops.backtrace(regs, backtrace_depth);
247 oprofile_end_trace(cpu_buf); 247 oprofile_end_trace(cpu_buf);
248 } 248 }
249 249
250 void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) 250 void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
251 { 251 {
252 int is_kernel = !user_mode(regs); 252 int is_kernel = !user_mode(regs);
253 unsigned long pc = profile_pc(regs); 253 unsigned long pc = profile_pc(regs);
254 254
255 oprofile_add_ext_sample(pc, regs, event, is_kernel); 255 oprofile_add_ext_sample(pc, regs, event, is_kernel);
256 } 256 }
257 257
258 #ifdef CONFIG_OPROFILE_IBS 258 #ifdef CONFIG_OPROFILE_IBS
259 259
260 #define MAX_IBS_SAMPLE_SIZE 14 260 #define MAX_IBS_SAMPLE_SIZE 14
261 261
262 void oprofile_add_ibs_sample(struct pt_regs *const regs, 262 void oprofile_add_ibs_sample(struct pt_regs *const regs,
263 unsigned int *const ibs_sample, int ibs_code) 263 unsigned int *const ibs_sample, int ibs_code)
264 { 264 {
265 int is_kernel = !user_mode(regs); 265 int is_kernel = !user_mode(regs);
266 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); 266 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
267 struct task_struct *task; 267 struct task_struct *task;
268 268
269 cpu_buf->sample_received++; 269 cpu_buf->sample_received++;
270 270
271 if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) { 271 if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) {
272 /* we can't backtrace since we lost the source of this event */ 272 /* we can't backtrace since we lost the source of this event */
273 cpu_buf->sample_lost_overflow++; 273 cpu_buf->sample_lost_overflow++;
274 return; 274 return;
275 } 275 }
276 276
277 /* notice a switch from user->kernel or vice versa */ 277 /* notice a switch from user->kernel or vice versa */
278 if (cpu_buf->last_is_kernel != is_kernel) { 278 if (cpu_buf->last_is_kernel != is_kernel) {
279 cpu_buf->last_is_kernel = is_kernel; 279 cpu_buf->last_is_kernel = is_kernel;
280 add_code(cpu_buf, is_kernel); 280 add_code(cpu_buf, is_kernel);
281 } 281 }
282 282
283 /* notice a task switch */ 283 /* notice a task switch */
284 if (!is_kernel) { 284 if (!is_kernel) {
285 task = current; 285 task = current;
286 if (cpu_buf->last_task != task) { 286 if (cpu_buf->last_task != task) {
287 cpu_buf->last_task = task; 287 cpu_buf->last_task = task;
288 add_code(cpu_buf, (unsigned long)task); 288 add_code(cpu_buf, (unsigned long)task);
289 } 289 }
290 } 290 }
291 291
292 add_code(cpu_buf, ibs_code); 292 add_code(cpu_buf, ibs_code);
293 add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]); 293 add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]);
294 add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]); 294 add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]);
295 add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]); 295 add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]);
296 296
297 if (ibs_code == IBS_OP_BEGIN) { 297 if (ibs_code == IBS_OP_BEGIN) {
298 add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]); 298 add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]);
299 add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]); 299 add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]);
300 add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]); 300 add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]);
301 } 301 }
302 302
303 if (backtrace_depth) 303 if (backtrace_depth)
304 oprofile_ops.backtrace(regs, backtrace_depth); 304 oprofile_ops.backtrace(regs, backtrace_depth);
305 } 305 }
306 306
307 #endif 307 #endif
308 308
309 void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) 309 void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
310 { 310 {
311 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); 311 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
312 log_sample(cpu_buf, pc, is_kernel, event); 312 log_sample(cpu_buf, pc, is_kernel, event);
313 } 313 }
314 314
315 void oprofile_add_trace(unsigned long pc) 315 void oprofile_add_trace(unsigned long pc)
316 { 316 {
317 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); 317 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
318 318
319 if (!cpu_buf->tracing) 319 if (!cpu_buf->tracing)
320 return; 320 return;
321 321
322 if (nr_available_slots(cpu_buf) < 1) { 322 if (nr_available_slots(cpu_buf) < 1) {
323 cpu_buf->tracing = 0; 323 cpu_buf->tracing = 0;
324 cpu_buf->sample_lost_overflow++; 324 cpu_buf->sample_lost_overflow++;
325 return; 325 return;
326 } 326 }
327 327
328 /* broken frame can give an eip with the same value as an escape code, 328 /* broken frame can give an eip with the same value as an escape code,
329 * abort the trace if we get it */ 329 * abort the trace if we get it */
330 if (pc == ESCAPE_CODE) { 330 if (pc == ESCAPE_CODE) {
331 cpu_buf->tracing = 0; 331 cpu_buf->tracing = 0;
332 cpu_buf->backtrace_aborted++; 332 cpu_buf->backtrace_aborted++;
333 return; 333 return;
334 } 334 }
335 335
336 add_sample(cpu_buf, pc, 0); 336 add_sample(cpu_buf, pc, 0);
337 } 337 }
338 338
339 /* 339 /*
340 * This serves to avoid cpu buffer overflow, and makes sure 340 * This serves to avoid cpu buffer overflow, and makes sure
341 * the task mortuary progresses 341 * the task mortuary progresses
342 * 342 *
343 * By using schedule_delayed_work_on and then schedule_delayed_work 343 * By using schedule_delayed_work_on and then schedule_delayed_work
344 * we guarantee this will stay on the correct cpu 344 * we guarantee this will stay on the correct cpu
345 */ 345 */
346 static void wq_sync_buffer(struct work_struct *work) 346 static void wq_sync_buffer(struct work_struct *work)
347 { 347 {
348 struct oprofile_cpu_buffer *b = 348 struct oprofile_cpu_buffer *b =
349 container_of(work, struct oprofile_cpu_buffer, work.work); 349 container_of(work, struct oprofile_cpu_buffer, work.work);
350 if (b->cpu != smp_processor_id()) { 350 if (b->cpu != smp_processor_id()) {
351 printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n", 351 printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
352 smp_processor_id(), b->cpu); 352 smp_processor_id(), b->cpu);
353 } 353 }
354 sync_buffer(b->cpu); 354 sync_buffer(b->cpu);
355 355
356 /* don't re-add the work if we're shutting down */ 356 /* don't re-add the work if we're shutting down */
357 if (work_enabled) 357 if (work_enabled)
358 schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE); 358 schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
359 } 359 }
360 360
drivers/oprofile/cpu_buffer.h
1 /** 1 /**
2 * @file cpu_buffer.h 2 * @file cpu_buffer.h
3 * 3 *
4 * @remark Copyright 2002 OProfile authors 4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 */ 8 */
9 9
10 #ifndef OPROFILE_CPU_BUFFER_H 10 #ifndef OPROFILE_CPU_BUFFER_H
11 #define OPROFILE_CPU_BUFFER_H 11 #define OPROFILE_CPU_BUFFER_H
12 12
13 #include <linux/types.h> 13 #include <linux/types.h>
14 #include <linux/spinlock.h> 14 #include <linux/spinlock.h>
15 #include <linux/workqueue.h> 15 #include <linux/workqueue.h>
16 #include <linux/cache.h> 16 #include <linux/cache.h>
17 #include <linux/sched.h> 17 #include <linux/sched.h>
18 18
19 struct task_struct; 19 struct task_struct;
20 20
21 int alloc_cpu_buffers(void); 21 int alloc_cpu_buffers(void);
22 void free_cpu_buffers(void); 22 void free_cpu_buffers(void);
23 23
24 void start_cpu_work(void); 24 void start_cpu_work(void);
25 void end_cpu_work(void); 25 void end_cpu_work(void);
26 26
27 /* CPU buffer is composed of such entries (which are 27 /* CPU buffer is composed of such entries (which are
28 * also used for context switch notes) 28 * also used for context switch notes)
29 */ 29 */
30 struct op_sample { 30 struct op_sample {
31 unsigned long eip; 31 unsigned long eip;
32 unsigned long event; 32 unsigned long event;
33 }; 33 };
34 34
35 struct oprofile_cpu_buffer { 35 struct oprofile_cpu_buffer {
36 volatile unsigned long head_pos; 36 volatile unsigned long head_pos;
37 volatile unsigned long tail_pos; 37 volatile unsigned long tail_pos;
38 unsigned long buffer_size; 38 unsigned long buffer_size;
39 struct task_struct *last_task; 39 struct task_struct *last_task;
40 int last_is_kernel; 40 int last_is_kernel;
41 int tracing; 41 int tracing;
42 struct op_sample *buffer; 42 struct op_sample *buffer;
43 unsigned long sample_received; 43 unsigned long sample_received;
44 unsigned long sample_lost_overflow; 44 unsigned long sample_lost_overflow;
45 unsigned long backtrace_aborted; 45 unsigned long backtrace_aborted;
46 unsigned long sample_invalid_eip; 46 unsigned long sample_invalid_eip;
47 int cpu; 47 int cpu;
48 struct delayed_work work; 48 struct delayed_work work;
49 }; 49 };
50 50
51 DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); 51 DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
52 52
53 void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf); 53 void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf);
54 54
55 /* transient events for the CPU buffer -> event buffer */ 55 /* transient events for the CPU buffer -> event buffer */
56 #define CPU_IS_KERNEL 1 56 #define CPU_IS_KERNEL 1
57 #define CPU_TRACE_BEGIN 2 57 #define CPU_TRACE_BEGIN 2
58 #define IBS_FETCH_BEGIN 3 58 #define IBS_FETCH_BEGIN 3
59 #define IBS_OP_BEGIN 4 59 #define IBS_OP_BEGIN 4
60 60
61 #endif /* OPROFILE_CPU_BUFFER_H */ 61 #endif /* OPROFILE_CPU_BUFFER_H */
62 62
drivers/oprofile/event_buffer.c
1 /** 1 /**
2 * @file event_buffer.c 2 * @file event_buffer.c
3 * 3 *
4 * @remark Copyright 2002 OProfile authors 4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 * 8 *
9 * This is the global event buffer that the user-space 9 * This is the global event buffer that the user-space
10 * daemon reads from. The event buffer is an untyped array 10 * daemon reads from. The event buffer is an untyped array
11 * of unsigned longs. Entries are prefixed by the 11 * of unsigned longs. Entries are prefixed by the
12 * escape value ESCAPE_CODE followed by an identifying code. 12 * escape value ESCAPE_CODE followed by an identifying code.
13 */ 13 */
14 14
15 #include <linux/vmalloc.h> 15 #include <linux/vmalloc.h>
16 #include <linux/oprofile.h> 16 #include <linux/oprofile.h>
17 #include <linux/sched.h> 17 #include <linux/sched.h>
18 #include <linux/capability.h> 18 #include <linux/capability.h>
19 #include <linux/dcookies.h> 19 #include <linux/dcookies.h>
20 #include <linux/fs.h> 20 #include <linux/fs.h>
21 #include <asm/uaccess.h> 21 #include <asm/uaccess.h>
22 22
23 #include "oprof.h" 23 #include "oprof.h"
24 #include "event_buffer.h" 24 #include "event_buffer.h"
25 #include "oprofile_stats.h" 25 #include "oprofile_stats.h"
26 26
27 DEFINE_MUTEX(buffer_mutex); 27 DEFINE_MUTEX(buffer_mutex);
28 28
29 static unsigned long buffer_opened; 29 static unsigned long buffer_opened;
30 static DECLARE_WAIT_QUEUE_HEAD(buffer_wait); 30 static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
31 static unsigned long *event_buffer; 31 static unsigned long *event_buffer;
32 static unsigned long buffer_size; 32 static unsigned long buffer_size;
33 static unsigned long buffer_watershed; 33 static unsigned long buffer_watershed;
34 static size_t buffer_pos; 34 static size_t buffer_pos;
35 /* atomic_t because wait_event checks it outside of buffer_mutex */ 35 /* atomic_t because wait_event checks it outside of buffer_mutex */
36 static atomic_t buffer_ready = ATOMIC_INIT(0); 36 static atomic_t buffer_ready = ATOMIC_INIT(0);
37 37
38 /* Add an entry to the event buffer. When we 38 /* Add an entry to the event buffer. When we
39 * get near to the end we wake up the process 39 * get near to the end we wake up the process
40 * sleeping on the read() of the file. 40 * sleeping on the read() of the file.
41 */ 41 */
42 void add_event_entry(unsigned long value) 42 void add_event_entry(unsigned long value)
43 { 43 {
44 if (buffer_pos == buffer_size) { 44 if (buffer_pos == buffer_size) {
45 atomic_inc(&oprofile_stats.event_lost_overflow); 45 atomic_inc(&oprofile_stats.event_lost_overflow);
46 return; 46 return;
47 } 47 }
48 48
49 event_buffer[buffer_pos] = value; 49 event_buffer[buffer_pos] = value;
50 if (++buffer_pos == buffer_size - buffer_watershed) { 50 if (++buffer_pos == buffer_size - buffer_watershed) {
51 atomic_set(&buffer_ready, 1); 51 atomic_set(&buffer_ready, 1);
52 wake_up(&buffer_wait); 52 wake_up(&buffer_wait);
53 } 53 }
54 } 54 }
55 55
56 56
57 /* Wake up the waiting process if any. This happens 57 /* Wake up the waiting process if any. This happens
58 * on "echo 0 >/dev/oprofile/enable" so the daemon 58 * on "echo 0 >/dev/oprofile/enable" so the daemon
59 * processes the data remaining in the event buffer. 59 * processes the data remaining in the event buffer.
60 */ 60 */
61 void wake_up_buffer_waiter(void) 61 void wake_up_buffer_waiter(void)
62 { 62 {
63 mutex_lock(&buffer_mutex); 63 mutex_lock(&buffer_mutex);
64 atomic_set(&buffer_ready, 1); 64 atomic_set(&buffer_ready, 1);
65 wake_up(&buffer_wait); 65 wake_up(&buffer_wait);
66 mutex_unlock(&buffer_mutex); 66 mutex_unlock(&buffer_mutex);
67 } 67 }
68 68
69 69
70 int alloc_event_buffer(void) 70 int alloc_event_buffer(void)
71 { 71 {
72 int err = -ENOMEM; 72 int err = -ENOMEM;
73 unsigned long flags; 73 unsigned long flags;
74 74
75 spin_lock_irqsave(&oprofilefs_lock, flags); 75 spin_lock_irqsave(&oprofilefs_lock, flags);
76 buffer_size = fs_buffer_size; 76 buffer_size = fs_buffer_size;
77 buffer_watershed = fs_buffer_watershed; 77 buffer_watershed = fs_buffer_watershed;
78 spin_unlock_irqrestore(&oprofilefs_lock, flags); 78 spin_unlock_irqrestore(&oprofilefs_lock, flags);
79 79
80 if (buffer_watershed >= buffer_size) 80 if (buffer_watershed >= buffer_size)
81 return -EINVAL; 81 return -EINVAL;
82 82
83 event_buffer = vmalloc(sizeof(unsigned long) * buffer_size); 83 event_buffer = vmalloc(sizeof(unsigned long) * buffer_size);
84 if (!event_buffer) 84 if (!event_buffer)
85 goto out; 85 goto out;
86 86
87 err = 0; 87 err = 0;
88 out: 88 out:
89 return err; 89 return err;
90 } 90 }
91 91
92 92
93 void free_event_buffer(void) 93 void free_event_buffer(void)
94 { 94 {
95 vfree(event_buffer); 95 vfree(event_buffer);
96 96
97 event_buffer = NULL; 97 event_buffer = NULL;
98 } 98 }
99 99
100 100
101 static int event_buffer_open(struct inode *inode, struct file *file) 101 static int event_buffer_open(struct inode *inode, struct file *file)
102 { 102 {
103 int err = -EPERM; 103 int err = -EPERM;
104 104
105 if (!capable(CAP_SYS_ADMIN)) 105 if (!capable(CAP_SYS_ADMIN))
106 return -EPERM; 106 return -EPERM;
107 107
108 if (test_and_set_bit(0, &buffer_opened)) 108 if (test_and_set_bit(0, &buffer_opened))
109 return -EBUSY; 109 return -EBUSY;
110 110
111 /* Register as a user of dcookies 111 /* Register as a user of dcookies
112 * to ensure they persist for the lifetime of 112 * to ensure they persist for the lifetime of
113 * the open event file 113 * the open event file
114 */ 114 */
115 err = -EINVAL; 115 err = -EINVAL;
116 file->private_data = dcookie_register(); 116 file->private_data = dcookie_register();
117 if (!file->private_data) 117 if (!file->private_data)
118 goto out; 118 goto out;
119 119
120 if ((err = oprofile_setup())) 120 if ((err = oprofile_setup()))
121 goto fail; 121 goto fail;
122 122
123 /* NB: the actual start happens from userspace 123 /* NB: the actual start happens from userspace
124 * echo 1 >/dev/oprofile/enable 124 * echo 1 >/dev/oprofile/enable
125 */ 125 */
126 126
127 return 0; 127 return 0;
128 128
129 fail: 129 fail:
130 dcookie_unregister(file->private_data); 130 dcookie_unregister(file->private_data);
131 out: 131 out:
132 clear_bit(0, &buffer_opened); 132 clear_bit(0, &buffer_opened);
133 return err; 133 return err;
134 } 134 }
135 135
136 136
137 static int event_buffer_release(struct inode *inode, struct file *file) 137 static int event_buffer_release(struct inode *inode, struct file *file)
138 { 138 {
139 oprofile_stop(); 139 oprofile_stop();
140 oprofile_shutdown(); 140 oprofile_shutdown();
141 dcookie_unregister(file->private_data); 141 dcookie_unregister(file->private_data);
142 buffer_pos = 0; 142 buffer_pos = 0;
143 atomic_set(&buffer_ready, 0); 143 atomic_set(&buffer_ready, 0);
144 clear_bit(0, &buffer_opened); 144 clear_bit(0, &buffer_opened);
145 return 0; 145 return 0;
146 } 146 }
147 147
148 148
149 static ssize_t event_buffer_read(struct file *file, char __user *buf, 149 static ssize_t event_buffer_read(struct file *file, char __user *buf,
150 size_t count, loff_t *offset) 150 size_t count, loff_t *offset)
151 { 151 {
152 int retval = -EINVAL; 152 int retval = -EINVAL;
153 size_t const max = buffer_size * sizeof(unsigned long); 153 size_t const max = buffer_size * sizeof(unsigned long);
154 154
155 /* handling partial reads is more trouble than it's worth */ 155 /* handling partial reads is more trouble than it's worth */
156 if (count != max || *offset) 156 if (count != max || *offset)
157 return -EINVAL; 157 return -EINVAL;
158 158
159 wait_event_interruptible(buffer_wait, atomic_read(&buffer_ready)); 159 wait_event_interruptible(buffer_wait, atomic_read(&buffer_ready));
160 160
161 if (signal_pending(current)) 161 if (signal_pending(current))
162 return -EINTR; 162 return -EINTR;
163 163
164 /* can't currently happen */ 164 /* can't currently happen */
165 if (!atomic_read(&buffer_ready)) 165 if (!atomic_read(&buffer_ready))
166 return -EAGAIN; 166 return -EAGAIN;
167 167
168 mutex_lock(&buffer_mutex); 168 mutex_lock(&buffer_mutex);
169 169
170 atomic_set(&buffer_ready, 0); 170 atomic_set(&buffer_ready, 0);
171 171
172 retval = -EFAULT; 172 retval = -EFAULT;
173 173
174 count = buffer_pos * sizeof(unsigned long); 174 count = buffer_pos * sizeof(unsigned long);
175 175
176 if (copy_to_user(buf, event_buffer, count)) 176 if (copy_to_user(buf, event_buffer, count))
177 goto out; 177 goto out;
178 178
179 retval = count; 179 retval = count;
180 buffer_pos = 0; 180 buffer_pos = 0;
181 181
182 out: 182 out:
183 mutex_unlock(&buffer_mutex); 183 mutex_unlock(&buffer_mutex);
184 return retval; 184 return retval;
185 } 185 }
186 186
187 const struct file_operations event_buffer_fops = { 187 const struct file_operations event_buffer_fops = {
188 .open = event_buffer_open, 188 .open = event_buffer_open,
189 .release = event_buffer_release, 189 .release = event_buffer_release,
190 .read = event_buffer_read, 190 .read = event_buffer_read,
191 }; 191 };
192 192
drivers/oprofile/event_buffer.h
1 /** 1 /**
2 * @file event_buffer.h 2 * @file event_buffer.h
3 * 3 *
4 * @remark Copyright 2002 OProfile authors 4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 */ 8 */
9 9
10 #ifndef EVENT_BUFFER_H 10 #ifndef EVENT_BUFFER_H
11 #define EVENT_BUFFER_H 11 #define EVENT_BUFFER_H
12 12
13 #include <linux/types.h> 13 #include <linux/types.h>
14 #include <asm/mutex.h> 14 #include <asm/mutex.h>
15 15
16 int alloc_event_buffer(void); 16 int alloc_event_buffer(void);
17 17
18 void free_event_buffer(void); 18 void free_event_buffer(void);
19 19
20 /* wake up the process sleeping on the event file */ 20 /* wake up the process sleeping on the event file */
21 void wake_up_buffer_waiter(void); 21 void wake_up_buffer_waiter(void);
22 22
23 #define INVALID_COOKIE ~0UL 23 #define INVALID_COOKIE ~0UL
24 #define NO_COOKIE 0UL 24 #define NO_COOKIE 0UL
25 25
26 extern const struct file_operations event_buffer_fops; 26 extern const struct file_operations event_buffer_fops;
27 27
28 /* mutex between sync_cpu_buffers() and the 28 /* mutex between sync_cpu_buffers() and the
29 * file reading code. 29 * file reading code.
30 */ 30 */
31 extern struct mutex buffer_mutex; 31 extern struct mutex buffer_mutex;
32 32
33 #endif /* EVENT_BUFFER_H */ 33 #endif /* EVENT_BUFFER_H */
34 34
drivers/oprofile/oprof.c
1 /** 1 /**
2 * @file oprof.c 2 * @file oprof.c
3 * 3 *
4 * @remark Copyright 2002 OProfile authors 4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 */ 8 */
9 9
10 #include <linux/kernel.h> 10 #include <linux/kernel.h>
11 #include <linux/module.h> 11 #include <linux/module.h>
12 #include <linux/init.h> 12 #include <linux/init.h>
13 #include <linux/oprofile.h> 13 #include <linux/oprofile.h>
14 #include <linux/moduleparam.h> 14 #include <linux/moduleparam.h>
15 #include <asm/mutex.h> 15 #include <asm/mutex.h>
16 16
17 #include "oprof.h" 17 #include "oprof.h"
18 #include "event_buffer.h" 18 #include "event_buffer.h"
19 #include "cpu_buffer.h" 19 #include "cpu_buffer.h"
20 #include "buffer_sync.h" 20 #include "buffer_sync.h"
21 #include "oprofile_stats.h" 21 #include "oprofile_stats.h"
22 22
23 struct oprofile_operations oprofile_ops; 23 struct oprofile_operations oprofile_ops;
24 24
25 unsigned long oprofile_started; 25 unsigned long oprofile_started;
26 unsigned long backtrace_depth; 26 unsigned long backtrace_depth;
27 static unsigned long is_setup; 27 static unsigned long is_setup;
28 static DEFINE_MUTEX(start_mutex); 28 static DEFINE_MUTEX(start_mutex);
29 29
30 /* timer 30 /* timer
31 0 - use performance monitoring hardware if available 31 0 - use performance monitoring hardware if available
32 1 - use the timer int mechanism regardless 32 1 - use the timer int mechanism regardless
33 */ 33 */
34 static int timer = 0; 34 static int timer = 0;
35 35
36 int oprofile_setup(void) 36 int oprofile_setup(void)
37 { 37 {
38 int err; 38 int err;
39 39
40 mutex_lock(&start_mutex); 40 mutex_lock(&start_mutex);
41 41
42 if ((err = alloc_cpu_buffers())) 42 if ((err = alloc_cpu_buffers()))
43 goto out; 43 goto out;
44 44
45 if ((err = alloc_event_buffer())) 45 if ((err = alloc_event_buffer()))
46 goto out1; 46 goto out1;
47 47
48 if (oprofile_ops.setup && (err = oprofile_ops.setup())) 48 if (oprofile_ops.setup && (err = oprofile_ops.setup()))
49 goto out2; 49 goto out2;
50 50
51 /* Note even though this starts part of the 51 /* Note even though this starts part of the
52 * profiling overhead, it's necessary to prevent 52 * profiling overhead, it's necessary to prevent
53 * us missing task deaths and eventually oopsing 53 * us missing task deaths and eventually oopsing
54 * when trying to process the event buffer. 54 * when trying to process the event buffer.
55 */ 55 */
56 if (oprofile_ops.sync_start) { 56 if (oprofile_ops.sync_start) {
57 int sync_ret = oprofile_ops.sync_start(); 57 int sync_ret = oprofile_ops.sync_start();
58 switch (sync_ret) { 58 switch (sync_ret) {
59 case 0: 59 case 0:
60 goto post_sync; 60 goto post_sync;
61 case 1: 61 case 1:
62 goto do_generic; 62 goto do_generic;
63 case -1: 63 case -1:
64 goto out3; 64 goto out3;
65 default: 65 default:
66 goto out3; 66 goto out3;
67 } 67 }
68 } 68 }
69 do_generic: 69 do_generic:
70 if ((err = sync_start())) 70 if ((err = sync_start()))
71 goto out3; 71 goto out3;
72 72
73 post_sync: 73 post_sync:
74 is_setup = 1; 74 is_setup = 1;
75 mutex_unlock(&start_mutex); 75 mutex_unlock(&start_mutex);
76 return 0; 76 return 0;
77 77
78 out3: 78 out3:
79 if (oprofile_ops.shutdown) 79 if (oprofile_ops.shutdown)
80 oprofile_ops.shutdown(); 80 oprofile_ops.shutdown();
81 out2: 81 out2:
82 free_event_buffer(); 82 free_event_buffer();
83 out1: 83 out1:
84 free_cpu_buffers(); 84 free_cpu_buffers();
85 out: 85 out:
86 mutex_unlock(&start_mutex); 86 mutex_unlock(&start_mutex);
87 return err; 87 return err;
88 } 88 }
89 89
90 90
91 /* Actually start profiling (echo 1>/dev/oprofile/enable) */ 91 /* Actually start profiling (echo 1>/dev/oprofile/enable) */
92 int oprofile_start(void) 92 int oprofile_start(void)
93 { 93 {
94 int err = -EINVAL; 94 int err = -EINVAL;
95 95
96 mutex_lock(&start_mutex); 96 mutex_lock(&start_mutex);
97 97
98 if (!is_setup) 98 if (!is_setup)
99 goto out; 99 goto out;
100 100
101 err = 0; 101 err = 0;
102 102
103 if (oprofile_started) 103 if (oprofile_started)
104 goto out; 104 goto out;
105 105
106 oprofile_reset_stats(); 106 oprofile_reset_stats();
107 107
108 if ((err = oprofile_ops.start())) 108 if ((err = oprofile_ops.start()))
109 goto out; 109 goto out;
110 110
111 oprofile_started = 1; 111 oprofile_started = 1;
112 out: 112 out:
113 mutex_unlock(&start_mutex); 113 mutex_unlock(&start_mutex);
114 return err; 114 return err;
115 } 115 }
116 116
117 117
118 /* echo 0>/dev/oprofile/enable */ 118 /* echo 0>/dev/oprofile/enable */
119 void oprofile_stop(void) 119 void oprofile_stop(void)
120 { 120 {
121 mutex_lock(&start_mutex); 121 mutex_lock(&start_mutex);
122 if (!oprofile_started) 122 if (!oprofile_started)
123 goto out; 123 goto out;
124 oprofile_ops.stop(); 124 oprofile_ops.stop();
125 oprofile_started = 0; 125 oprofile_started = 0;
126 /* wake up the daemon to read what remains */ 126 /* wake up the daemon to read what remains */
127 wake_up_buffer_waiter(); 127 wake_up_buffer_waiter();
128 out: 128 out:
129 mutex_unlock(&start_mutex); 129 mutex_unlock(&start_mutex);
130 } 130 }
131 131
132 132
133 void oprofile_shutdown(void) 133 void oprofile_shutdown(void)
134 { 134 {
135 mutex_lock(&start_mutex); 135 mutex_lock(&start_mutex);
136 if (oprofile_ops.sync_stop) { 136 if (oprofile_ops.sync_stop) {
137 int sync_ret = oprofile_ops.sync_stop(); 137 int sync_ret = oprofile_ops.sync_stop();
138 switch (sync_ret) { 138 switch (sync_ret) {
139 case 0: 139 case 0:
140 goto post_sync; 140 goto post_sync;
141 case 1: 141 case 1:
142 goto do_generic; 142 goto do_generic;
143 default: 143 default:
144 goto post_sync; 144 goto post_sync;
145 } 145 }
146 } 146 }
147 do_generic: 147 do_generic:
148 sync_stop(); 148 sync_stop();
149 post_sync: 149 post_sync:
150 if (oprofile_ops.shutdown) 150 if (oprofile_ops.shutdown)
151 oprofile_ops.shutdown(); 151 oprofile_ops.shutdown();
152 is_setup = 0; 152 is_setup = 0;
153 free_event_buffer(); 153 free_event_buffer();
154 free_cpu_buffers(); 154 free_cpu_buffers();
155 mutex_unlock(&start_mutex); 155 mutex_unlock(&start_mutex);
156 } 156 }
157 157
158 158
159 int oprofile_set_backtrace(unsigned long val) 159 int oprofile_set_backtrace(unsigned long val)
160 { 160 {
161 int err = 0; 161 int err = 0;
162 162
163 mutex_lock(&start_mutex); 163 mutex_lock(&start_mutex);
164 164
165 if (oprofile_started) { 165 if (oprofile_started) {
166 err = -EBUSY; 166 err = -EBUSY;
167 goto out; 167 goto out;
168 } 168 }
169 169
170 if (!oprofile_ops.backtrace) { 170 if (!oprofile_ops.backtrace) {
171 err = -EINVAL; 171 err = -EINVAL;
172 goto out; 172 goto out;
173 } 173 }
174 174
175 backtrace_depth = val; 175 backtrace_depth = val;
176 176
177 out: 177 out:
178 mutex_unlock(&start_mutex); 178 mutex_unlock(&start_mutex);
179 return err; 179 return err;
180 } 180 }
181 181
182 static int __init oprofile_init(void) 182 static int __init oprofile_init(void)
183 { 183 {
184 int err; 184 int err;
185 185
186 err = oprofile_arch_init(&oprofile_ops); 186 err = oprofile_arch_init(&oprofile_ops);
187 187
188 if (err < 0 || timer) { 188 if (err < 0 || timer) {
189 printk(KERN_INFO "oprofile: using timer interrupt.\n"); 189 printk(KERN_INFO "oprofile: using timer interrupt.\n");
190 oprofile_timer_init(&oprofile_ops); 190 oprofile_timer_init(&oprofile_ops);
191 } 191 }
192 192
193 err = oprofilefs_register(); 193 err = oprofilefs_register();
194 if (err) 194 if (err)
195 oprofile_arch_exit(); 195 oprofile_arch_exit();
196 196
197 return err; 197 return err;
198 } 198 }
199 199
200 200
201 static void __exit oprofile_exit(void) 201 static void __exit oprofile_exit(void)
202 { 202 {
203 oprofilefs_unregister(); 203 oprofilefs_unregister();
204 oprofile_arch_exit(); 204 oprofile_arch_exit();
205 } 205 }
206 206
207 207
208 module_init(oprofile_init); 208 module_init(oprofile_init);
209 module_exit(oprofile_exit); 209 module_exit(oprofile_exit);
210 210
211 module_param_named(timer, timer, int, 0644); 211 module_param_named(timer, timer, int, 0644);
212 MODULE_PARM_DESC(timer, "force use of timer interrupt"); 212 MODULE_PARM_DESC(timer, "force use of timer interrupt");
213 213
214 MODULE_LICENSE("GPL"); 214 MODULE_LICENSE("GPL");
215 MODULE_AUTHOR("John Levon <levon@movementarian.org>"); 215 MODULE_AUTHOR("John Levon <levon@movementarian.org>");
216 MODULE_DESCRIPTION("OProfile system profiler"); 216 MODULE_DESCRIPTION("OProfile system profiler");
217 217
drivers/oprofile/oprof.h
1 /** 1 /**
2 * @file oprof.h 2 * @file oprof.h
3 * 3 *
4 * @remark Copyright 2002 OProfile authors 4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 */ 8 */
9 9
10 #ifndef OPROF_H 10 #ifndef OPROF_H
11 #define OPROF_H 11 #define OPROF_H
12 12
13 int oprofile_setup(void); 13 int oprofile_setup(void);
14 void oprofile_shutdown(void); 14 void oprofile_shutdown(void);
15 15
16 int oprofilefs_register(void); 16 int oprofilefs_register(void);
17 void oprofilefs_unregister(void); 17 void oprofilefs_unregister(void);
18 18
19 int oprofile_start(void); 19 int oprofile_start(void);
20 void oprofile_stop(void); 20 void oprofile_stop(void);
21 21
22 struct oprofile_operations; 22 struct oprofile_operations;
23 23
24 extern unsigned long fs_buffer_size; 24 extern unsigned long fs_buffer_size;
25 extern unsigned long fs_cpu_buffer_size; 25 extern unsigned long fs_cpu_buffer_size;
26 extern unsigned long fs_buffer_watershed; 26 extern unsigned long fs_buffer_watershed;
27 extern struct oprofile_operations oprofile_ops; 27 extern struct oprofile_operations oprofile_ops;
28 extern unsigned long oprofile_started; 28 extern unsigned long oprofile_started;
29 extern unsigned long backtrace_depth; 29 extern unsigned long backtrace_depth;
30 30
31 struct super_block; 31 struct super_block;
32 struct dentry; 32 struct dentry;
33 33
34 void oprofile_create_files(struct super_block *sb, struct dentry *root); 34 void oprofile_create_files(struct super_block *sb, struct dentry *root);
35 void oprofile_timer_init(struct oprofile_operations *ops); 35 void oprofile_timer_init(struct oprofile_operations *ops);
36 36
37 int oprofile_set_backtrace(unsigned long depth); 37 int oprofile_set_backtrace(unsigned long depth);
38 38
39 #endif /* OPROF_H */ 39 #endif /* OPROF_H */
40 40
drivers/oprofile/oprofile_files.c
1 /** 1 /**
2 * @file oprofile_files.c 2 * @file oprofile_files.c
3 * 3 *
4 * @remark Copyright 2002 OProfile authors 4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 */ 8 */
9 9
10 #include <linux/fs.h> 10 #include <linux/fs.h>
11 #include <linux/oprofile.h> 11 #include <linux/oprofile.h>
12 12
13 #include "event_buffer.h" 13 #include "event_buffer.h"
14 #include "oprofile_stats.h" 14 #include "oprofile_stats.h"
15 #include "oprof.h" 15 #include "oprof.h"
16 16
17 unsigned long fs_buffer_size = 131072; 17 unsigned long fs_buffer_size = 131072;
18 unsigned long fs_cpu_buffer_size = 8192; 18 unsigned long fs_cpu_buffer_size = 8192;
19 unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */ 19 unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
20 20
21 static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset) 21 static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
22 { 22 {
23 return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset); 23 return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset);
24 } 24 }
25 25
26 26
27 static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) 27 static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
28 { 28 {
29 unsigned long val; 29 unsigned long val;
30 int retval; 30 int retval;
31 31
32 if (*offset) 32 if (*offset)
33 return -EINVAL; 33 return -EINVAL;
34 34
35 retval = oprofilefs_ulong_from_user(&val, buf, count); 35 retval = oprofilefs_ulong_from_user(&val, buf, count);
36 if (retval) 36 if (retval)
37 return retval; 37 return retval;
38 38
39 retval = oprofile_set_backtrace(val); 39 retval = oprofile_set_backtrace(val);
40 40
41 if (retval) 41 if (retval)
42 return retval; 42 return retval;
43 return count; 43 return count;
44 } 44 }
45 45
46 46
47 static const struct file_operations depth_fops = { 47 static const struct file_operations depth_fops = {
48 .read = depth_read, 48 .read = depth_read,
49 .write = depth_write 49 .write = depth_write
50 }; 50 };
51 51
52 52
53 static ssize_t pointer_size_read(struct file *file, char __user *buf, size_t count, loff_t *offset) 53 static ssize_t pointer_size_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
54 { 54 {
55 return oprofilefs_ulong_to_user(sizeof(void *), buf, count, offset); 55 return oprofilefs_ulong_to_user(sizeof(void *), buf, count, offset);
56 } 56 }
57 57
58 58
59 static const struct file_operations pointer_size_fops = { 59 static const struct file_operations pointer_size_fops = {
60 .read = pointer_size_read, 60 .read = pointer_size_read,
61 }; 61 };
62 62
63 63
64 static ssize_t cpu_type_read(struct file *file, char __user *buf, size_t count, loff_t *offset) 64 static ssize_t cpu_type_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
65 { 65 {
66 return oprofilefs_str_to_user(oprofile_ops.cpu_type, buf, count, offset); 66 return oprofilefs_str_to_user(oprofile_ops.cpu_type, buf, count, offset);
67 } 67 }
68 68
69 69
70 static const struct file_operations cpu_type_fops = { 70 static const struct file_operations cpu_type_fops = {
71 .read = cpu_type_read, 71 .read = cpu_type_read,
72 }; 72 };
73 73
74 74
75 static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset) 75 static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
76 { 76 {
77 return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset); 77 return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset);
78 } 78 }
79 79
80 80
81 static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) 81 static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
82 { 82 {
83 unsigned long val; 83 unsigned long val;
84 int retval; 84 int retval;
85 85
86 if (*offset) 86 if (*offset)
87 return -EINVAL; 87 return -EINVAL;
88 88
89 retval = oprofilefs_ulong_from_user(&val, buf, count); 89 retval = oprofilefs_ulong_from_user(&val, buf, count);
90 if (retval) 90 if (retval)
91 return retval; 91 return retval;
92 92
93 if (val) 93 if (val)
94 retval = oprofile_start(); 94 retval = oprofile_start();
95 else 95 else
96 oprofile_stop(); 96 oprofile_stop();
97 97
98 if (retval) 98 if (retval)
99 return retval; 99 return retval;
100 return count; 100 return count;
101 } 101 }
102 102
103 103
104 static const struct file_operations enable_fops = { 104 static const struct file_operations enable_fops = {
105 .read = enable_read, 105 .read = enable_read,
106 .write = enable_write, 106 .write = enable_write,
107 }; 107 };
108 108
109 109
110 static ssize_t dump_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) 110 static ssize_t dump_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
111 { 111 {
112 wake_up_buffer_waiter(); 112 wake_up_buffer_waiter();
113 return count; 113 return count;
114 } 114 }
115 115
116 116
117 static const struct file_operations dump_fops = { 117 static const struct file_operations dump_fops = {
118 .write = dump_write, 118 .write = dump_write,
119 }; 119 };
120 120
121 void oprofile_create_files(struct super_block *sb, struct dentry *root) 121 void oprofile_create_files(struct super_block *sb, struct dentry *root)
122 { 122 {
123 oprofilefs_create_file(sb, root, "enable", &enable_fops); 123 oprofilefs_create_file(sb, root, "enable", &enable_fops);
124 oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666); 124 oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
125 oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops); 125 oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
126 oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size); 126 oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
127 oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed); 127 oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
128 oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &fs_cpu_buffer_size); 128 oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &fs_cpu_buffer_size);
129 oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops); 129 oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
130 oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops); 130 oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops);
131 oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops); 131 oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);
132 oprofile_create_stats_files(sb, root); 132 oprofile_create_stats_files(sb, root);
133 if (oprofile_ops.create_files) 133 if (oprofile_ops.create_files)
134 oprofile_ops.create_files(sb, root); 134 oprofile_ops.create_files(sb, root);
135 } 135 }
136 136
drivers/oprofile/oprofile_stats.c
1 /** 1 /**
2 * @file oprofile_stats.c 2 * @file oprofile_stats.c
3 * 3 *
4 * @remark Copyright 2002 OProfile authors 4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon 7 * @author John Levon
8 */ 8 */
9 9
10 #include <linux/oprofile.h> 10 #include <linux/oprofile.h>
11 #include <linux/smp.h> 11 #include <linux/smp.h>
12 #include <linux/cpumask.h> 12 #include <linux/cpumask.h>
13 #include <linux/threads.h> 13 #include <linux/threads.h>
14 14
15 #include "oprofile_stats.h" 15 #include "oprofile_stats.h"
16 #include "cpu_buffer.h" 16 #include "cpu_buffer.h"
17 17
18 struct oprofile_stat_struct oprofile_stats; 18 struct oprofile_stat_struct oprofile_stats;
19 19
20 void oprofile_reset_stats(void) 20 void oprofile_reset_stats(void)
21 { 21 {
22 struct oprofile_cpu_buffer *cpu_buf; 22 struct oprofile_cpu_buffer *cpu_buf;
23 int i; 23 int i;
24 24
25 for_each_possible_cpu(i) { 25 for_each_possible_cpu(i) {
26 cpu_buf = &per_cpu(cpu_buffer, i); 26 cpu_buf = &per_cpu(cpu_buffer, i);
27 cpu_buf->sample_received = 0; 27 cpu_buf->sample_received = 0;
28 cpu_buf->sample_lost_overflow = 0; 28 cpu_buf->sample_lost_overflow = 0;
29 cpu_buf->backtrace_aborted = 0; 29 cpu_buf->backtrace_aborted = 0;
30 cpu_buf->sample_invalid_eip = 0; 30 cpu_buf->sample_invalid_eip = 0;
31 } 31 }
32 32
33 atomic_set(&oprofile_stats.sample_lost_no_mm, 0); 33 atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
34 atomic_set(&oprofile_stats.sample_lost_no_mapping, 0); 34 atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35 atomic_set(&oprofile_stats.event_lost_overflow, 0); 35 atomic_set(&oprofile_stats.event_lost_overflow, 0);
36 } 36 }
37 37
38 38
39 void oprofile_create_stats_files(struct super_block *sb, struct dentry *root) 39 void oprofile_create_stats_files(struct super_block *sb, struct dentry *root)
40 { 40 {
41 struct oprofile_cpu_buffer *cpu_buf; 41 struct oprofile_cpu_buffer *cpu_buf;
42 struct dentry *cpudir; 42 struct dentry *cpudir;
43 struct dentry *dir; 43 struct dentry *dir;
44 char buf[10]; 44 char buf[10];
45 int i; 45 int i;
46 46
47 dir = oprofilefs_mkdir(sb, root, "stats"); 47 dir = oprofilefs_mkdir(sb, root, "stats");
48 if (!dir) 48 if (!dir)
49 return; 49 return;
50 50
51 for_each_possible_cpu(i) { 51 for_each_possible_cpu(i) {
52 cpu_buf = &per_cpu(cpu_buffer, i); 52 cpu_buf = &per_cpu(cpu_buffer, i);
53 snprintf(buf, 10, "cpu%d", i); 53 snprintf(buf, 10, "cpu%d", i);
54 cpudir = oprofilefs_mkdir(sb, dir, buf); 54 cpudir = oprofilefs_mkdir(sb, dir, buf);
55 55
56 /* Strictly speaking access to these ulongs is racy, 56 /* Strictly speaking access to these ulongs is racy,
57 * but we can't simply lock them, and they are 57 * but we can't simply lock them, and they are
58 * informational only. 58 * informational only.
59 */ 59 */
60 oprofilefs_create_ro_ulong(sb, cpudir, "sample_received", 60 oprofilefs_create_ro_ulong(sb, cpudir, "sample_received",
61 &cpu_buf->sample_received); 61 &cpu_buf->sample_received);
62 oprofilefs_create_ro_ulong(sb, cpudir, "sample_lost_overflow", 62 oprofilefs_create_ro_ulong(sb, cpudir, "sample_lost_overflow",
63 &cpu_buf->sample_lost_overflow); 63 &cpu_buf->sample_lost_overflow);
64 oprofilefs_create_ro_ulong(sb, cpudir, "backtrace_aborted", 64 oprofilefs_create_ro_ulong(sb, cpudir, "backtrace_aborted",
65 &cpu_buf->backtrace_aborted); 65 &cpu_buf->backtrace_aborted);
66 oprofilefs_create_ro_ulong(sb, cpudir, "sample_invalid_eip", 66 oprofilefs_create_ro_ulong(sb, cpudir, "sample_invalid_eip",
67 &cpu_buf->sample_invalid_eip); 67 &cpu_buf->sample_invalid_eip);
68 } 68 }
69 69
70 oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mm", 70 oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mm",
71 &oprofile_stats.sample_lost_no_mm); 71 &oprofile_stats.sample_lost_no_mm);
72 oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mapping", 72 oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mapping",
73 &oprofile_stats.sample_lost_no_mapping); 73 &oprofile_stats.sample_lost_no_mapping);
74 oprofilefs_create_ro_atomic(sb, dir, "event_lost_overflow", 74 oprofilefs_create_ro_atomic(sb, dir, "event_lost_overflow",
75 &oprofile_stats.event_lost_overflow); 75 &oprofile_stats.event_lost_overflow);
76 oprofilefs_create_ro_atomic(sb, dir, "bt_lost_no_mapping", 76 oprofilefs_create_ro_atomic(sb, dir, "bt_lost_no_mapping",
77 &oprofile_stats.bt_lost_no_mapping); 77 &oprofile_stats.bt_lost_no_mapping);
78 } 78 }
79 79
drivers/oprofile/oprofile_stats.h
1 /** 1 /**
2 * @file oprofile_stats.h 2 * @file oprofile_stats.h
3 * 3 *
4 * @remark Copyright 2002 OProfile authors 4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon 7 * @author John Levon
8 */ 8 */
9 9
10 #ifndef OPROFILE_STATS_H 10 #ifndef OPROFILE_STATS_H
11 #define OPROFILE_STATS_H 11 #define OPROFILE_STATS_H
12 12
13 #include <asm/atomic.h> 13 #include <asm/atomic.h>
14 14
15 struct oprofile_stat_struct { 15 struct oprofile_stat_struct {
16 atomic_t sample_lost_no_mm; 16 atomic_t sample_lost_no_mm;
17 atomic_t sample_lost_no_mapping; 17 atomic_t sample_lost_no_mapping;
18 atomic_t bt_lost_no_mapping; 18 atomic_t bt_lost_no_mapping;
19 atomic_t event_lost_overflow; 19 atomic_t event_lost_overflow;
20 }; 20 };
21 21
22 extern struct oprofile_stat_struct oprofile_stats; 22 extern struct oprofile_stat_struct oprofile_stats;
23 23
24 /* reset all stats to zero */ 24 /* reset all stats to zero */
25 void oprofile_reset_stats(void); 25 void oprofile_reset_stats(void);
26 26
27 struct super_block; 27 struct super_block;
28 struct dentry; 28 struct dentry;
29 29
30 /* create the stats/ dir */ 30 /* create the stats/ dir */
31 void oprofile_create_stats_files(struct super_block *sb, struct dentry *root); 31 void oprofile_create_stats_files(struct super_block *sb, struct dentry *root);
32 32
33 #endif /* OPROFILE_STATS_H */ 33 #endif /* OPROFILE_STATS_H */
34 34
drivers/oprofile/oprofilefs.c
1 /** 1 /**
2 * @file oprofilefs.c 2 * @file oprofilefs.c
3 * 3 *
4 * @remark Copyright 2002 OProfile authors 4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon 7 * @author John Levon
8 * 8 *
9 * A simple filesystem for configuration and 9 * A simple filesystem for configuration and
10 * access of oprofile. 10 * access of oprofile.
11 */ 11 */
12 12
13 #include <linux/init.h> 13 #include <linux/init.h>
14 #include <linux/module.h> 14 #include <linux/module.h>
15 #include <linux/oprofile.h> 15 #include <linux/oprofile.h>
16 #include <linux/fs.h> 16 #include <linux/fs.h>
17 #include <linux/pagemap.h> 17 #include <linux/pagemap.h>
18 #include <asm/uaccess.h> 18 #include <asm/uaccess.h>
19 19
20 #include "oprof.h" 20 #include "oprof.h"
21 21
22 #define OPROFILEFS_MAGIC 0x6f70726f 22 #define OPROFILEFS_MAGIC 0x6f70726f
23 23
24 DEFINE_SPINLOCK(oprofilefs_lock); 24 DEFINE_SPINLOCK(oprofilefs_lock);
25 25
26 static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode) 26 static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode)
27 { 27 {
28 struct inode *inode = new_inode(sb); 28 struct inode *inode = new_inode(sb);
29 29
30 if (inode) { 30 if (inode) {
31 inode->i_mode = mode; 31 inode->i_mode = mode;
32 inode->i_uid = 0; 32 inode->i_uid = 0;
33 inode->i_gid = 0; 33 inode->i_gid = 0;
34 inode->i_blocks = 0; 34 inode->i_blocks = 0;
35 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 35 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
36 } 36 }
37 return inode; 37 return inode;
38 } 38 }
39 39
40 40
41 static struct super_operations s_ops = { 41 static struct super_operations s_ops = {
42 .statfs = simple_statfs, 42 .statfs = simple_statfs,
43 .drop_inode = generic_delete_inode, 43 .drop_inode = generic_delete_inode,
44 }; 44 };
45 45
46 46
47 ssize_t oprofilefs_str_to_user(char const *str, char __user *buf, size_t count, loff_t *offset) 47 ssize_t oprofilefs_str_to_user(char const *str, char __user *buf, size_t count, loff_t *offset)
48 { 48 {
49 return simple_read_from_buffer(buf, count, offset, str, strlen(str)); 49 return simple_read_from_buffer(buf, count, offset, str, strlen(str));
50 } 50 }
51 51
52 52
53 #define TMPBUFSIZE 50 53 #define TMPBUFSIZE 50
54 54
55 ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t count, loff_t *offset) 55 ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t count, loff_t *offset)
56 { 56 {
57 char tmpbuf[TMPBUFSIZE]; 57 char tmpbuf[TMPBUFSIZE];
58 size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val); 58 size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val);
59 if (maxlen > TMPBUFSIZE) 59 if (maxlen > TMPBUFSIZE)
60 maxlen = TMPBUFSIZE; 60 maxlen = TMPBUFSIZE;
61 return simple_read_from_buffer(buf, count, offset, tmpbuf, maxlen); 61 return simple_read_from_buffer(buf, count, offset, tmpbuf, maxlen);
62 } 62 }
63 63
64 64
65 int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count) 65 int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
66 { 66 {
67 char tmpbuf[TMPBUFSIZE]; 67 char tmpbuf[TMPBUFSIZE];
68 unsigned long flags; 68 unsigned long flags;
69 69
70 if (!count) 70 if (!count)
71 return 0; 71 return 0;
72 72
73 if (count > TMPBUFSIZE - 1) 73 if (count > TMPBUFSIZE - 1)
74 return -EINVAL; 74 return -EINVAL;
75 75
76 memset(tmpbuf, 0x0, TMPBUFSIZE); 76 memset(tmpbuf, 0x0, TMPBUFSIZE);
77 77
78 if (copy_from_user(tmpbuf, buf, count)) 78 if (copy_from_user(tmpbuf, buf, count))
79 return -EFAULT; 79 return -EFAULT;
80 80
81 spin_lock_irqsave(&oprofilefs_lock, flags); 81 spin_lock_irqsave(&oprofilefs_lock, flags);
82 *val = simple_strtoul(tmpbuf, NULL, 0); 82 *val = simple_strtoul(tmpbuf, NULL, 0);
83 spin_unlock_irqrestore(&oprofilefs_lock, flags); 83 spin_unlock_irqrestore(&oprofilefs_lock, flags);
84 return 0; 84 return 0;
85 } 85 }
86 86
87 87
88 static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset) 88 static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
89 { 89 {
90 unsigned long *val = file->private_data; 90 unsigned long *val = file->private_data;
91 return oprofilefs_ulong_to_user(*val, buf, count, offset); 91 return oprofilefs_ulong_to_user(*val, buf, count, offset);
92 } 92 }
93 93
94 94
95 static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset) 95 static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
96 { 96 {
97 unsigned long *value = file->private_data; 97 unsigned long *value = file->private_data;
98 int retval; 98 int retval;
99 99
100 if (*offset) 100 if (*offset)
101 return -EINVAL; 101 return -EINVAL;
102 102
103 retval = oprofilefs_ulong_from_user(value, buf, count); 103 retval = oprofilefs_ulong_from_user(value, buf, count);
104 104
105 if (retval) 105 if (retval)
106 return retval; 106 return retval;
107 return count; 107 return count;
108 } 108 }
109 109
110 110
111 static int default_open(struct inode *inode, struct file *filp) 111 static int default_open(struct inode *inode, struct file *filp)
112 { 112 {
113 if (inode->i_private) 113 if (inode->i_private)
114 filp->private_data = inode->i_private; 114 filp->private_data = inode->i_private;
115 return 0; 115 return 0;
116 } 116 }
117 117
118 118
119 static const struct file_operations ulong_fops = { 119 static const struct file_operations ulong_fops = {
120 .read = ulong_read_file, 120 .read = ulong_read_file,
121 .write = ulong_write_file, 121 .write = ulong_write_file,
122 .open = default_open, 122 .open = default_open,
123 }; 123 };
124 124
125 125
126 static const struct file_operations ulong_ro_fops = { 126 static const struct file_operations ulong_ro_fops = {
127 .read = ulong_read_file, 127 .read = ulong_read_file,
128 .open = default_open, 128 .open = default_open,
129 }; 129 };
130 130
131 131
132 static struct dentry *__oprofilefs_create_file(struct super_block *sb, 132 static struct dentry *__oprofilefs_create_file(struct super_block *sb,
133 struct dentry *root, char const *name, const struct file_operations *fops, 133 struct dentry *root, char const *name, const struct file_operations *fops,
134 int perm) 134 int perm)
135 { 135 {
136 struct dentry *dentry; 136 struct dentry *dentry;
137 struct inode *inode; 137 struct inode *inode;
138 138
139 dentry = d_alloc_name(root, name); 139 dentry = d_alloc_name(root, name);
140 if (!dentry) 140 if (!dentry)
141 return NULL; 141 return NULL;
142 inode = oprofilefs_get_inode(sb, S_IFREG | perm); 142 inode = oprofilefs_get_inode(sb, S_IFREG | perm);
143 if (!inode) { 143 if (!inode) {
144 dput(dentry); 144 dput(dentry);
145 return NULL; 145 return NULL;
146 } 146 }
147 inode->i_fop = fops; 147 inode->i_fop = fops;
148 d_add(dentry, inode); 148 d_add(dentry, inode);
149 return dentry; 149 return dentry;
150 } 150 }
151 151
152 152
153 int oprofilefs_create_ulong(struct super_block *sb, struct dentry *root, 153 int oprofilefs_create_ulong(struct super_block *sb, struct dentry *root,
154 char const *name, unsigned long *val) 154 char const *name, unsigned long *val)
155 { 155 {
156 struct dentry *d = __oprofilefs_create_file(sb, root, name, 156 struct dentry *d = __oprofilefs_create_file(sb, root, name,
157 &ulong_fops, 0644); 157 &ulong_fops, 0644);
158 if (!d) 158 if (!d)
159 return -EFAULT; 159 return -EFAULT;
160 160
161 d->d_inode->i_private = val; 161 d->d_inode->i_private = val;
162 return 0; 162 return 0;
163 } 163 }
164 164
165 165
166 int oprofilefs_create_ro_ulong(struct super_block *sb, struct dentry *root, 166 int oprofilefs_create_ro_ulong(struct super_block *sb, struct dentry *root,
167 char const *name, unsigned long *val) 167 char const *name, unsigned long *val)
168 { 168 {
169 struct dentry *d = __oprofilefs_create_file(sb, root, name, 169 struct dentry *d = __oprofilefs_create_file(sb, root, name,
170 &ulong_ro_fops, 0444); 170 &ulong_ro_fops, 0444);
171 if (!d) 171 if (!d)
172 return -EFAULT; 172 return -EFAULT;
173 173
174 d->d_inode->i_private = val; 174 d->d_inode->i_private = val;
175 return 0; 175 return 0;
176 } 176 }
177 177
178 178
179 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset) 179 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
180 { 180 {
181 atomic_t *val = file->private_data; 181 atomic_t *val = file->private_data;
182 return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset); 182 return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
183 } 183 }
184
185 184
185
186 static const struct file_operations atomic_ro_fops = { 186 static const struct file_operations atomic_ro_fops = {
187 .read = atomic_read_file, 187 .read = atomic_read_file,
188 .open = default_open, 188 .open = default_open,
189 }; 189 };
190
191 190
191
192 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root, 192 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
193 char const *name, atomic_t *val) 193 char const *name, atomic_t *val)
194 { 194 {
195 struct dentry *d = __oprofilefs_create_file(sb, root, name, 195 struct dentry *d = __oprofilefs_create_file(sb, root, name,
196 &atomic_ro_fops, 0444); 196 &atomic_ro_fops, 0444);
197 if (!d) 197 if (!d)
198 return -EFAULT; 198 return -EFAULT;
199 199
200 d->d_inode->i_private = val; 200 d->d_inode->i_private = val;
201 return 0; 201 return 0;
202 } 202 }
203 203
204 204
205 int oprofilefs_create_file(struct super_block *sb, struct dentry *root, 205 int oprofilefs_create_file(struct super_block *sb, struct dentry *root,
206 char const *name, const struct file_operations *fops) 206 char const *name, const struct file_operations *fops)
207 { 207 {
208 if (!__oprofilefs_create_file(sb, root, name, fops, 0644)) 208 if (!__oprofilefs_create_file(sb, root, name, fops, 0644))
209 return -EFAULT; 209 return -EFAULT;
210 return 0; 210 return 0;
211 } 211 }
212 212
213 213
214 int oprofilefs_create_file_perm(struct super_block *sb, struct dentry *root, 214 int oprofilefs_create_file_perm(struct super_block *sb, struct dentry *root,
215 char const *name, const struct file_operations *fops, int perm) 215 char const *name, const struct file_operations *fops, int perm)
216 { 216 {
217 if (!__oprofilefs_create_file(sb, root, name, fops, perm)) 217 if (!__oprofilefs_create_file(sb, root, name, fops, perm))
218 return -EFAULT; 218 return -EFAULT;
219 return 0; 219 return 0;
220 } 220 }
221 221
222 222
223 struct dentry *oprofilefs_mkdir(struct super_block *sb, 223 struct dentry *oprofilefs_mkdir(struct super_block *sb,
224 struct dentry *root, char const *name) 224 struct dentry *root, char const *name)
225 { 225 {
226 struct dentry *dentry; 226 struct dentry *dentry;
227 struct inode *inode; 227 struct inode *inode;
228 228
229 dentry = d_alloc_name(root, name); 229 dentry = d_alloc_name(root, name);
230 if (!dentry) 230 if (!dentry)
231 return NULL; 231 return NULL;
232 inode = oprofilefs_get_inode(sb, S_IFDIR | 0755); 232 inode = oprofilefs_get_inode(sb, S_IFDIR | 0755);
233 if (!inode) { 233 if (!inode) {
234 dput(dentry); 234 dput(dentry);
235 return NULL; 235 return NULL;
236 } 236 }
237 inode->i_op = &simple_dir_inode_operations; 237 inode->i_op = &simple_dir_inode_operations;
238 inode->i_fop = &simple_dir_operations; 238 inode->i_fop = &simple_dir_operations;
239 d_add(dentry, inode); 239 d_add(dentry, inode);
240 return dentry; 240 return dentry;
241 } 241 }
242 242
243 243
244 static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent) 244 static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent)
245 { 245 {
246 struct inode *root_inode; 246 struct inode *root_inode;
247 struct dentry *root_dentry; 247 struct dentry *root_dentry;
248 248
249 sb->s_blocksize = PAGE_CACHE_SIZE; 249 sb->s_blocksize = PAGE_CACHE_SIZE;
250 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 250 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
251 sb->s_magic = OPROFILEFS_MAGIC; 251 sb->s_magic = OPROFILEFS_MAGIC;
252 sb->s_op = &s_ops; 252 sb->s_op = &s_ops;
253 sb->s_time_gran = 1; 253 sb->s_time_gran = 1;
254 254
255 root_inode = oprofilefs_get_inode(sb, S_IFDIR | 0755); 255 root_inode = oprofilefs_get_inode(sb, S_IFDIR | 0755);
256 if (!root_inode) 256 if (!root_inode)
257 return -ENOMEM; 257 return -ENOMEM;
258 root_inode->i_op = &simple_dir_inode_operations; 258 root_inode->i_op = &simple_dir_inode_operations;
259 root_inode->i_fop = &simple_dir_operations; 259 root_inode->i_fop = &simple_dir_operations;
260 root_dentry = d_alloc_root(root_inode); 260 root_dentry = d_alloc_root(root_inode);
261 if (!root_dentry) { 261 if (!root_dentry) {
262 iput(root_inode); 262 iput(root_inode);
263 return -ENOMEM; 263 return -ENOMEM;
264 } 264 }
265 265
266 sb->s_root = root_dentry; 266 sb->s_root = root_dentry;
267 267
268 oprofile_create_files(sb, root_dentry); 268 oprofile_create_files(sb, root_dentry);
269 269
270 // FIXME: verify kill_litter_super removes our dentries 270 // FIXME: verify kill_litter_super removes our dentries
271 return 0; 271 return 0;
272 } 272 }
273 273
274 274
275 static int oprofilefs_get_sb(struct file_system_type *fs_type, 275 static int oprofilefs_get_sb(struct file_system_type *fs_type,
276 int flags, const char *dev_name, void *data, struct vfsmount *mnt) 276 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
277 { 277 {
278 return get_sb_single(fs_type, flags, data, oprofilefs_fill_super, mnt); 278 return get_sb_single(fs_type, flags, data, oprofilefs_fill_super, mnt);
279 } 279 }
280 280
281 281
282 static struct file_system_type oprofilefs_type = { 282 static struct file_system_type oprofilefs_type = {
283 .owner = THIS_MODULE, 283 .owner = THIS_MODULE,
284 .name = "oprofilefs", 284 .name = "oprofilefs",
285 .get_sb = oprofilefs_get_sb, 285 .get_sb = oprofilefs_get_sb,
286 .kill_sb = kill_litter_super, 286 .kill_sb = kill_litter_super,
287 }; 287 };
288 288
289 289
290 int __init oprofilefs_register(void) 290 int __init oprofilefs_register(void)
291 { 291 {
292 return register_filesystem(&oprofilefs_type); 292 return register_filesystem(&oprofilefs_type);
293 } 293 }
294 294
295 295
296 void __exit oprofilefs_unregister(void) 296 void __exit oprofilefs_unregister(void)
297 { 297 {
298 unregister_filesystem(&oprofilefs_type); 298 unregister_filesystem(&oprofilefs_type);
drivers/oprofile/timer_int.c
1 /** 1 /**
2 * @file timer_int.c 2 * @file timer_int.c
3 * 3 *
4 * @remark Copyright 2002 OProfile authors 4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 */ 8 */
9 9
10 #include <linux/kernel.h> 10 #include <linux/kernel.h>
11 #include <linux/notifier.h> 11 #include <linux/notifier.h>
12 #include <linux/smp.h> 12 #include <linux/smp.h>
13 #include <linux/oprofile.h> 13 #include <linux/oprofile.h>
14 #include <linux/profile.h> 14 #include <linux/profile.h>
15 #include <linux/init.h> 15 #include <linux/init.h>
16 #include <asm/ptrace.h> 16 #include <asm/ptrace.h>
17 17
18 #include "oprof.h" 18 #include "oprof.h"
19 19
20 static int timer_notify(struct pt_regs *regs) 20 static int timer_notify(struct pt_regs *regs)
21 { 21 {
22 oprofile_add_sample(regs, 0); 22 oprofile_add_sample(regs, 0);
23 return 0; 23 return 0;
24 } 24 }
25 25
26 static int timer_start(void) 26 static int timer_start(void)
27 { 27 {
28 return register_timer_hook(timer_notify); 28 return register_timer_hook(timer_notify);
29 } 29 }
30 30
31 31
32 static void timer_stop(void) 32 static void timer_stop(void)
33 { 33 {
34 unregister_timer_hook(timer_notify); 34 unregister_timer_hook(timer_notify);
35 } 35 }
36 36
37 37
38 void __init oprofile_timer_init(struct oprofile_operations *ops) 38 void __init oprofile_timer_init(struct oprofile_operations *ops)
39 { 39 {
40 ops->create_files = NULL; 40 ops->create_files = NULL;
41 ops->setup = NULL; 41 ops->setup = NULL;
42 ops->shutdown = NULL; 42 ops->shutdown = NULL;
43 ops->start = timer_start; 43 ops->start = timer_start;
44 ops->stop = timer_stop; 44 ops->stop = timer_stop;
45 ops->cpu_type = "timer"; 45 ops->cpu_type = "timer";
46 } 46 }
47 47