Blame view

kernel/sched_debug.c 11.5 KB
43ae34cb4   Ingo Molnar   sched: scheduler ...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
  /*
   * kernel/time/sched_debug.c
   *
   * Print the CFS rbtree
   *
   * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License version 2 as
   * published by the Free Software Foundation.
   */
  
  #include <linux/proc_fs.h>
  #include <linux/sched.h>
  #include <linux/seq_file.h>
  #include <linux/kallsyms.h>
  #include <linux/utsname.h>
  
  /*
   * This allows printing both to /proc/sched_debug and
   * to the console
   */
  #define SEQ_printf(m, x...)			\
   do {						\
  	if (m)					\
  		seq_printf(m, x);		\
  	else					\
  		printk(x);			\
   } while (0)
ef83a5714   Ingo Molnar   sched: enhance de...
30
31
32
  /*
   * Ease the printing of nsec fields:
   */
90b2628f1   Ingo Molnar   sched: fix gcc wa...
33
  static long long nsec_high(unsigned long long nsec)
ef83a5714   Ingo Molnar   sched: enhance de...
34
  {
90b2628f1   Ingo Molnar   sched: fix gcc wa...
35
  	if ((long long)nsec < 0) {
ef83a5714   Ingo Molnar   sched: enhance de...
36
37
38
39
40
41
42
43
  		nsec = -nsec;
  		do_div(nsec, 1000000);
  		return -nsec;
  	}
  	do_div(nsec, 1000000);
  
  	return nsec;
  }
90b2628f1   Ingo Molnar   sched: fix gcc wa...
44
  static unsigned long nsec_low(unsigned long long nsec)
ef83a5714   Ingo Molnar   sched: enhance de...
45
  {
90b2628f1   Ingo Molnar   sched: fix gcc wa...
46
  	if ((long long)nsec < 0)
ef83a5714   Ingo Molnar   sched: enhance de...
47
48
49
50
51
52
  		nsec = -nsec;
  
  	return do_div(nsec, 1000000);
  }
  
  #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
ff9b48c35   Bharata B Rao   sched: include gr...
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
  #ifdef CONFIG_FAIR_GROUP_SCHED
  static void print_cfs_group_stats(struct seq_file *m, int cpu,
  		struct task_group *tg)
  {
  	struct sched_entity *se = tg->se[cpu];
  	if (!se)
  		return;
  
  #define P(F) \
  	SEQ_printf(m, "  .%-30s: %lld
  ", #F, (long long)F)
  #define PN(F) \
  	SEQ_printf(m, "  .%-30s: %lld.%06ld
  ", #F, SPLIT_NS((long long)F))
  
  	PN(se->exec_start);
  	PN(se->vruntime);
  	PN(se->sum_exec_runtime);
  #ifdef CONFIG_SCHEDSTATS
  	PN(se->wait_start);
  	PN(se->sleep_start);
  	PN(se->block_start);
  	PN(se->sleep_max);
  	PN(se->block_max);
  	PN(se->exec_max);
  	PN(se->slice_max);
  	PN(se->wait_max);
  	PN(se->wait_sum);
  	P(se->wait_count);
  #endif
  	P(se->load.weight);
  #undef PN
  #undef P
  }
  #endif
43ae34cb4   Ingo Molnar   sched: scheduler ...
88
  static void
a48da48b4   Ingo Molnar   sched debug: remo...
89
  print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
43ae34cb4   Ingo Molnar   sched: scheduler ...
90
91
92
93
94
  {
  	if (rq->curr == p)
  		SEQ_printf(m, "R");
  	else
  		SEQ_printf(m, " ");
ef83a5714   Ingo Molnar   sched: enhance de...
95
  	SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
43ae34cb4   Ingo Molnar   sched: scheduler ...
96
  		p->comm, p->pid,
ef83a5714   Ingo Molnar   sched: enhance de...
97
  		SPLIT_NS(p->se.vruntime),
43ae34cb4   Ingo Molnar   sched: scheduler ...
98
  		(long long)(p->nvcsw + p->nivcsw),
6f605d83d   Al Viro   take sched_debug....
99
  		p->prio);
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
100
  #ifdef CONFIG_SCHEDSTATS
d19ca3087   Peter Zijlstra   sched: debug: add...
101
  	SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
ef83a5714   Ingo Molnar   sched: enhance de...
102
103
104
  		SPLIT_NS(p->se.vruntime),
  		SPLIT_NS(p->se.sum_exec_runtime),
  		SPLIT_NS(p->se.sum_sleep_runtime));
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
105
  #else
d19ca3087   Peter Zijlstra   sched: debug: add...
106
  	SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
ef83a5714   Ingo Molnar   sched: enhance de...
107
  		0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
108
  #endif
d19ca3087   Peter Zijlstra   sched: debug: add...
109
110
111
112
113
114
115
116
117
118
119
  
  #ifdef CONFIG_CGROUP_SCHED
  	{
  		char path[64];
  
  		cgroup_path(task_group(p)->css.cgroup, path, sizeof(path));
  		SEQ_printf(m, " %s", path);
  	}
  #endif
  	SEQ_printf(m, "
  ");
43ae34cb4   Ingo Molnar   sched: scheduler ...
120
  }
a48da48b4   Ingo Molnar   sched debug: remo...
121
  static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
43ae34cb4   Ingo Molnar   sched: scheduler ...
122
123
  {
  	struct task_struct *g, *p;
ab63a633c   Peter Zijlstra   sched: fix uncond...
124
  	unsigned long flags;
43ae34cb4   Ingo Molnar   sched: scheduler ...
125
126
127
128
129
  
  	SEQ_printf(m,
  	"
  runnable tasks:
  "
c86da3a3d   Mike Galbraith   sched: fix format...
130
131
132
  	"            task   PID         tree-key  switches  prio"
  	"     exec-runtime         sum-exec        sum-sleep
  "
1a75b94f7   Ingo Molnar   sched: prettify /...
133
  	"------------------------------------------------------"
c86da3a3d   Mike Galbraith   sched: fix format...
134
135
  	"----------------------------------------------------
  ");
43ae34cb4   Ingo Molnar   sched: scheduler ...
136

ab63a633c   Peter Zijlstra   sched: fix uncond...
137
  	read_lock_irqsave(&tasklist_lock, flags);
43ae34cb4   Ingo Molnar   sched: scheduler ...
138
139
140
141
  
  	do_each_thread(g, p) {
  		if (!p->se.on_rq || task_cpu(p) != rq_cpu)
  			continue;
a48da48b4   Ingo Molnar   sched debug: remo...
142
  		print_task(m, rq, p);
43ae34cb4   Ingo Molnar   sched: scheduler ...
143
  	} while_each_thread(g, p);
ab63a633c   Peter Zijlstra   sched: fix uncond...
144
  	read_unlock_irqrestore(&tasklist_lock, flags);
43ae34cb4   Ingo Molnar   sched: scheduler ...
145
  }
805194c35   Li Zefan   sched: partly rev...
146
147
148
149
150
151
152
153
154
155
156
157
  #if defined(CONFIG_CGROUP_SCHED) && \
  	(defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED))
  static void task_group_path(struct task_group *tg, char *buf, int buflen)
  {
  	/* may be NULL if the underlying cgroup isn't fully-created yet */
  	if (!tg->css.cgroup) {
  		buf[0] = '\0';
  		return;
  	}
  	cgroup_path(tg->css.cgroup, buf, buflen);
  }
  #endif
5cef9eca3   Ingo Molnar   sched: remove the...
158
  void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
43ae34cb4   Ingo Molnar   sched: scheduler ...
159
  {
86d9560cb   Ingo Molnar   sched: add more v...
160
161
  	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
  		spread, rq0_min_vruntime, spread0;
67e12eac3   Ingo Molnar   sched: add se->vr...
162
163
164
  	struct rq *rq = &per_cpu(runqueues, cpu);
  	struct sched_entity *last;
  	unsigned long flags;
ada18de2e   Peter Zijlstra   sched: debug: add...
165
  #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
805194c35   Li Zefan   sched: partly rev...
166
  	char path[128];
d19ca3087   Peter Zijlstra   sched: debug: add...
167
  	struct task_group *tg = cfs_rq->tg;
805194c35   Li Zefan   sched: partly rev...
168
  	task_group_path(tg, path, sizeof(path));
d19ca3087   Peter Zijlstra   sched: debug: add...
169
170
171
172
  
  	SEQ_printf(m, "
  cfs_rq[%d]:%s
  ", cpu, path);
6c415b923   Arun R Bharadwaj   sched: add uid in...
173
174
175
176
177
178
179
  #elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
  	{
  		uid_t uid = cfs_rq->tg->uid;
  		SEQ_printf(m, "
  cfs_rq[%d] for UID: %u
  ", cpu, uid);
  	}
ada18de2e   Peter Zijlstra   sched: debug: add...
180
181
182
183
  #else
  	SEQ_printf(m, "
  cfs_rq[%d]:
  ", cpu);
d19ca3087   Peter Zijlstra   sched: debug: add...
184
  #endif
ef83a5714   Ingo Molnar   sched: enhance de...
185
186
187
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", "exec_clock",
  			SPLIT_NS(cfs_rq->exec_clock));
67e12eac3   Ingo Molnar   sched: add se->vr...
188
189
190
191
192
193
194
  
  	spin_lock_irqsave(&rq->lock, flags);
  	if (cfs_rq->rb_leftmost)
  		MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime;
  	last = __pick_last_entity(cfs_rq);
  	if (last)
  		max_vruntime = last->vruntime;
5ac5c4d60   Peter Zijlstra   sched: clean up d...
195
  	min_vruntime = cfs_rq->min_vruntime;
86d9560cb   Ingo Molnar   sched: add more v...
196
  	rq0_min_vruntime = per_cpu(runqueues, 0).cfs.min_vruntime;
67e12eac3   Ingo Molnar   sched: add se->vr...
197
  	spin_unlock_irqrestore(&rq->lock, flags);
ef83a5714   Ingo Molnar   sched: enhance de...
198
199
200
201
202
203
204
205
206
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", "MIN_vruntime",
  			SPLIT_NS(MIN_vruntime));
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", "min_vruntime",
  			SPLIT_NS(min_vruntime));
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", "max_vruntime",
  			SPLIT_NS(max_vruntime));
67e12eac3   Ingo Molnar   sched: add se->vr...
207
  	spread = max_vruntime - MIN_vruntime;
ef83a5714   Ingo Molnar   sched: enhance de...
208
209
210
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", "spread",
  			SPLIT_NS(spread));
86d9560cb   Ingo Molnar   sched: add more v...
211
  	spread0 = min_vruntime - rq0_min_vruntime;
ef83a5714   Ingo Molnar   sched: enhance de...
212
213
214
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", "spread0",
  			SPLIT_NS(spread0));
545f3b181   Srivatsa Vaddagiri   sched: print nr_r...
215
216
217
218
  	SEQ_printf(m, "  .%-30s: %ld
  ", "nr_running", cfs_rq->nr_running);
  	SEQ_printf(m, "  .%-30s: %ld
  ", "load", cfs_rq->load.weight);
32df2ee86   Peter Zijlstra   sched: add full s...
219

5ac5c4d60   Peter Zijlstra   sched: clean up d...
220
221
  	SEQ_printf(m, "  .%-30s: %d
  ", "nr_spread_over",
ddc972975   Peter Zijlstra   sched debug: chec...
222
  			cfs_rq->nr_spread_over);
c09595f63   Peter Zijlstra   sched: revert rev...
223
224
225
226
227
  #ifdef CONFIG_FAIR_GROUP_SCHED
  #ifdef CONFIG_SMP
  	SEQ_printf(m, "  .%-30s: %lu
  ", "shares", cfs_rq->shares);
  #endif
ff9b48c35   Bharata B Rao   sched: include gr...
228
  	print_cfs_group_stats(m, cpu, cfs_rq->tg);
c09595f63   Peter Zijlstra   sched: revert rev...
229
  #endif
43ae34cb4   Ingo Molnar   sched: scheduler ...
230
  }
ada18de2e   Peter Zijlstra   sched: debug: add...
231
232
233
  void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
  {
  #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED)
805194c35   Li Zefan   sched: partly rev...
234
  	char path[128];
ada18de2e   Peter Zijlstra   sched: debug: add...
235
  	struct task_group *tg = rt_rq->tg;
805194c35   Li Zefan   sched: partly rev...
236
  	task_group_path(tg, path, sizeof(path));
ada18de2e   Peter Zijlstra   sched: debug: add...
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
  
  	SEQ_printf(m, "
  rt_rq[%d]:%s
  ", cpu, path);
  #else
  	SEQ_printf(m, "
  rt_rq[%d]:
  ", cpu);
  #endif
  
  
  #define P(x) \
  	SEQ_printf(m, "  .%-30s: %Ld
  ", #x, (long long)(rt_rq->x))
  #define PN(x) \
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", #x, SPLIT_NS(rt_rq->x))
  
  	P(rt_nr_running);
  	P(rt_throttled);
  	PN(rt_time);
  	PN(rt_runtime);
  
  #undef PN
  #undef P
  }
a48da48b4   Ingo Molnar   sched debug: remo...
263
  static void print_cpu(struct seq_file *m, int cpu)
43ae34cb4   Ingo Molnar   sched: scheduler ...
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
  {
  	struct rq *rq = &per_cpu(runqueues, cpu);
  
  #ifdef CONFIG_X86
  	{
  		unsigned int freq = cpu_khz ? : 1;
  
  		SEQ_printf(m, "
  cpu#%d, %u.%03u MHz
  ",
  			   cpu, freq / 1000, (freq % 1000));
  	}
  #else
  	SEQ_printf(m, "
  cpu#%d
  ", cpu);
  #endif
  
  #define P(x) \
  	SEQ_printf(m, "  .%-30s: %Ld
  ", #x, (long long)(rq->x))
ef83a5714   Ingo Molnar   sched: enhance de...
285
286
287
  #define PN(x) \
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", #x, SPLIT_NS(rq->x))
43ae34cb4   Ingo Molnar   sched: scheduler ...
288
289
290
291
  
  	P(nr_running);
  	SEQ_printf(m, "  .%-30s: %lu
  ", "load",
495eca494   Dmitry Adamushko   sched: clean up s...
292
  		   rq->load.weight);
43ae34cb4   Ingo Molnar   sched: scheduler ...
293
294
295
  	P(nr_switches);
  	P(nr_load_updates);
  	P(nr_uninterruptible);
ef83a5714   Ingo Molnar   sched: enhance de...
296
  	PN(next_balance);
43ae34cb4   Ingo Molnar   sched: scheduler ...
297
  	P(curr->pid);
ef83a5714   Ingo Molnar   sched: enhance de...
298
  	PN(clock);
43ae34cb4   Ingo Molnar   sched: scheduler ...
299
300
301
302
303
304
  	P(cpu_load[0]);
  	P(cpu_load[1]);
  	P(cpu_load[2]);
  	P(cpu_load[3]);
  	P(cpu_load[4]);
  #undef P
ef83a5714   Ingo Molnar   sched: enhance de...
305
  #undef PN
43ae34cb4   Ingo Molnar   sched: scheduler ...
306

5ac5c4d60   Peter Zijlstra   sched: clean up d...
307
308
309
  #ifdef CONFIG_SCHEDSTATS
  #define P(n) SEQ_printf(m, "  .%-30s: %d
  ", #n, rq->n);
5ac5c4d60   Peter Zijlstra   sched: clean up d...
310
311
312
313
314
315
316
317
318
319
320
321
322
  	P(yld_count);
  
  	P(sched_switch);
  	P(sched_count);
  	P(sched_goidle);
  
  	P(ttwu_count);
  	P(ttwu_local);
  
  	P(bkl_count);
  
  #undef P
  #endif
5cef9eca3   Ingo Molnar   sched: remove the...
323
  	print_cfs_stats(m, cpu);
ada18de2e   Peter Zijlstra   sched: debug: add...
324
  	print_rt_stats(m, cpu);
43ae34cb4   Ingo Molnar   sched: scheduler ...
325

a48da48b4   Ingo Molnar   sched debug: remo...
326
  	print_rq(m, rq, cpu);
43ae34cb4   Ingo Molnar   sched: scheduler ...
327
328
329
330
331
332
  }
  
  static int sched_debug_show(struct seq_file *m, void *v)
  {
  	u64 now = ktime_to_ns(ktime_get());
  	int cpu;
67aa0f767   Luis Henriques   sched: remove unu...
333
334
  	SEQ_printf(m, "Sched Debug Version: v0.09, %s %.*s
  ",
43ae34cb4   Ingo Molnar   sched: scheduler ...
335
336
337
  		init_utsname()->release,
  		(int)strcspn(init_utsname()->version, " "),
  		init_utsname()->version);
ef83a5714   Ingo Molnar   sched: enhance de...
338
339
  	SEQ_printf(m, "now at %Lu.%06ld msecs
  ", SPLIT_NS(now));
43ae34cb4   Ingo Molnar   sched: scheduler ...
340

1aa4731ef   Ingo Molnar   sched debug: prin...
341
  #define P(x) \
d822ceced   Ingo Molnar   sched debug: more...
342
343
  	SEQ_printf(m, "  .%-40s: %Ld
  ", #x, (long long)(x))
1aa4731ef   Ingo Molnar   sched debug: prin...
344
  #define PN(x) \
d822ceced   Ingo Molnar   sched debug: more...
345
346
  	SEQ_printf(m, "  .%-40s: %Ld.%06ld
  ", #x, SPLIT_NS(x))
af66df5ec   Luis Henriques   sched: jiffies no...
347
  	P(jiffies);
1aa4731ef   Ingo Molnar   sched debug: prin...
348
  	PN(sysctl_sched_latency);
b2be5e96d   Peter Zijlstra   sched: reintroduc...
349
  	PN(sysctl_sched_min_granularity);
1aa4731ef   Ingo Molnar   sched debug: prin...
350
  	PN(sysctl_sched_wakeup_granularity);
1aa4731ef   Ingo Molnar   sched debug: prin...
351
352
353
354
  	PN(sysctl_sched_child_runs_first);
  	P(sysctl_sched_features);
  #undef PN
  #undef P
43ae34cb4   Ingo Molnar   sched: scheduler ...
355
  	for_each_online_cpu(cpu)
a48da48b4   Ingo Molnar   sched debug: remo...
356
  		print_cpu(m, cpu);
43ae34cb4   Ingo Molnar   sched: scheduler ...
357
358
359
360
361
362
  
  	SEQ_printf(m, "
  ");
  
  	return 0;
  }
f33734619   Josh Triplett   [PATCH] sched: ma...
363
  static void sysrq_sched_debug_show(void)
43ae34cb4   Ingo Molnar   sched: scheduler ...
364
365
366
367
368
369
370
371
  {
  	sched_debug_show(NULL, NULL);
  }
  
  static int sched_debug_open(struct inode *inode, struct file *filp)
  {
  	return single_open(filp, sched_debug_show, NULL);
  }
0dbee3a6b   Arjan van de Ven   Make scheduler de...
372
  static const struct file_operations sched_debug_fops = {
43ae34cb4   Ingo Molnar   sched: scheduler ...
373
374
375
  	.open		= sched_debug_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
5ea473a1d   Alexey Dobriyan   Fix leaks on /pro...
376
  	.release	= single_release,
43ae34cb4   Ingo Molnar   sched: scheduler ...
377
378
379
380
381
  };
  
  static int __init init_sched_debug_procfs(void)
  {
  	struct proc_dir_entry *pe;
a9cf4ddb3   Li Zefan   sched: change sch...
382
  	pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
43ae34cb4   Ingo Molnar   sched: scheduler ...
383
384
  	if (!pe)
  		return -ENOMEM;
43ae34cb4   Ingo Molnar   sched: scheduler ...
385
386
387
388
389
390
391
  	return 0;
  }
  
  __initcall(init_sched_debug_procfs);
  
  void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
  {
cc367732f   Ingo Molnar   sched: debug, imp...
392
  	unsigned long nr_switches;
43ae34cb4   Ingo Molnar   sched: scheduler ...
393
394
  	unsigned long flags;
  	int num_threads = 1;
43ae34cb4   Ingo Molnar   sched: scheduler ...
395
396
397
398
  	if (lock_task_sighand(p, &flags)) {
  		num_threads = atomic_read(&p->signal->count);
  		unlock_task_sighand(p, &flags);
  	}
43ae34cb4   Ingo Molnar   sched: scheduler ...
399
400
401
  
  	SEQ_printf(m, "%s (%d, #threads: %d)
  ", p->comm, p->pid, num_threads);
2d92f2278   Ingo Molnar   sched: debug: inc...
402
403
404
  	SEQ_printf(m,
  		"---------------------------------------------------------
  ");
cc367732f   Ingo Molnar   sched: debug, imp...
405
406
407
  #define __P(F) \
  	SEQ_printf(m, "%-35s:%21Ld
  ", #F, (long long)F)
43ae34cb4   Ingo Molnar   sched: scheduler ...
408
  #define P(F) \
2d92f2278   Ingo Molnar   sched: debug: inc...
409
410
  	SEQ_printf(m, "%-35s:%21Ld
  ", #F, (long long)p->F)
cc367732f   Ingo Molnar   sched: debug, imp...
411
412
413
  #define __PN(F) \
  	SEQ_printf(m, "%-35s:%14Ld.%06ld
  ", #F, SPLIT_NS((long long)F))
ef83a5714   Ingo Molnar   sched: enhance de...
414
  #define PN(F) \
2d92f2278   Ingo Molnar   sched: debug: inc...
415
416
  	SEQ_printf(m, "%-35s:%14Ld.%06ld
  ", #F, SPLIT_NS((long long)p->F))
43ae34cb4   Ingo Molnar   sched: scheduler ...
417

ef83a5714   Ingo Molnar   sched: enhance de...
418
419
420
  	PN(se.exec_start);
  	PN(se.vruntime);
  	PN(se.sum_exec_runtime);
4ae7d5cef   Ingo Molnar   sched: improve af...
421
  	PN(se.avg_overlap);
831451ac4   Peter Zijlstra   sched: introduce ...
422
  	PN(se.avg_wakeup);
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
423

cc367732f   Ingo Molnar   sched: debug, imp...
424
  	nr_switches = p->nvcsw + p->nivcsw;
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
425
  #ifdef CONFIG_SCHEDSTATS
ef83a5714   Ingo Molnar   sched: enhance de...
426
427
428
429
430
431
432
433
  	PN(se.wait_start);
  	PN(se.sleep_start);
  	PN(se.block_start);
  	PN(se.sleep_max);
  	PN(se.block_max);
  	PN(se.exec_max);
  	PN(se.slice_max);
  	PN(se.wait_max);
6d082592b   Arjan van de Ven   sched: keep total...
434
435
  	PN(se.wait_sum);
  	P(se.wait_count);
2d72376b3   Ingo Molnar   sched: clean up s...
436
  	P(sched_info.bkl_count);
cc367732f   Ingo Molnar   sched: debug, imp...
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
  	P(se.nr_migrations);
  	P(se.nr_migrations_cold);
  	P(se.nr_failed_migrations_affine);
  	P(se.nr_failed_migrations_running);
  	P(se.nr_failed_migrations_hot);
  	P(se.nr_forced_migrations);
  	P(se.nr_forced2_migrations);
  	P(se.nr_wakeups);
  	P(se.nr_wakeups_sync);
  	P(se.nr_wakeups_migrate);
  	P(se.nr_wakeups_local);
  	P(se.nr_wakeups_remote);
  	P(se.nr_wakeups_affine);
  	P(se.nr_wakeups_affine_attempts);
  	P(se.nr_wakeups_passive);
  	P(se.nr_wakeups_idle);
  
  	{
  		u64 avg_atom, avg_per_cpu;
  
  		avg_atom = p->se.sum_exec_runtime;
  		if (nr_switches)
  			do_div(avg_atom, nr_switches);
  		else
  			avg_atom = -1LL;
  
  		avg_per_cpu = p->se.sum_exec_runtime;
c1a89740d   Ingo Molnar   sched: clean up o...
464
  		if (p->se.nr_migrations) {
6f6d6a1a6   Roman Zippel   rename div64_64 t...
465
466
  			avg_per_cpu = div64_u64(avg_per_cpu,
  						p->se.nr_migrations);
c1a89740d   Ingo Molnar   sched: clean up o...
467
  		} else {
cc367732f   Ingo Molnar   sched: debug, imp...
468
  			avg_per_cpu = -1LL;
c1a89740d   Ingo Molnar   sched: clean up o...
469
  		}
cc367732f   Ingo Molnar   sched: debug, imp...
470
471
472
473
  
  		__PN(avg_atom);
  		__PN(avg_per_cpu);
  	}
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
474
  #endif
cc367732f   Ingo Molnar   sched: debug, imp...
475
  	__P(nr_switches);
2d92f2278   Ingo Molnar   sched: debug: inc...
476
477
  	SEQ_printf(m, "%-35s:%21Ld
  ",
cc367732f   Ingo Molnar   sched: debug, imp...
478
479
480
481
  		   "nr_voluntary_switches", (long long)p->nvcsw);
  	SEQ_printf(m, "%-35s:%21Ld
  ",
  		   "nr_involuntary_switches", (long long)p->nivcsw);
43ae34cb4   Ingo Molnar   sched: scheduler ...
482
483
484
  	P(se.load.weight);
  	P(policy);
  	P(prio);
ef83a5714   Ingo Molnar   sched: enhance de...
485
  #undef PN
cc367732f   Ingo Molnar   sched: debug, imp...
486
487
488
  #undef __PN
  #undef P
  #undef __P
43ae34cb4   Ingo Molnar   sched: scheduler ...
489
490
  
  	{
29d7b90c1   Ingo Molnar   sched: fix kernel...
491
  		unsigned int this_cpu = raw_smp_processor_id();
43ae34cb4   Ingo Molnar   sched: scheduler ...
492
  		u64 t0, t1;
29d7b90c1   Ingo Molnar   sched: fix kernel...
493
494
  		t0 = cpu_clock(this_cpu);
  		t1 = cpu_clock(this_cpu);
2d92f2278   Ingo Molnar   sched: debug: inc...
495
496
  		SEQ_printf(m, "%-35s:%21Ld
  ",
43ae34cb4   Ingo Molnar   sched: scheduler ...
497
498
499
500
501
502
  			   "clock-delta", (long long)(t1-t0));
  	}
  }
  
  void proc_sched_set_task(struct task_struct *p)
  {
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
503
  #ifdef CONFIG_SCHEDSTATS
cc367732f   Ingo Molnar   sched: debug, imp...
504
  	p->se.wait_max				= 0;
6d082592b   Arjan van de Ven   sched: keep total...
505
506
  	p->se.wait_sum				= 0;
  	p->se.wait_count			= 0;
cc367732f   Ingo Molnar   sched: debug, imp...
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
  	p->se.sleep_max				= 0;
  	p->se.sum_sleep_runtime			= 0;
  	p->se.block_max				= 0;
  	p->se.exec_max				= 0;
  	p->se.slice_max				= 0;
  	p->se.nr_migrations			= 0;
  	p->se.nr_migrations_cold		= 0;
  	p->se.nr_failed_migrations_affine	= 0;
  	p->se.nr_failed_migrations_running	= 0;
  	p->se.nr_failed_migrations_hot		= 0;
  	p->se.nr_forced_migrations		= 0;
  	p->se.nr_forced2_migrations		= 0;
  	p->se.nr_wakeups			= 0;
  	p->se.nr_wakeups_sync			= 0;
  	p->se.nr_wakeups_migrate		= 0;
  	p->se.nr_wakeups_local			= 0;
  	p->se.nr_wakeups_remote			= 0;
  	p->se.nr_wakeups_affine			= 0;
  	p->se.nr_wakeups_affine_attempts	= 0;
  	p->se.nr_wakeups_passive		= 0;
  	p->se.nr_wakeups_idle			= 0;
  	p->sched_info.bkl_count			= 0;
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
529
  #endif
cc367732f   Ingo Molnar   sched: debug, imp...
530
531
532
533
  	p->se.sum_exec_runtime			= 0;
  	p->se.prev_sum_exec_runtime		= 0;
  	p->nvcsw				= 0;
  	p->nivcsw				= 0;
43ae34cb4   Ingo Molnar   sched: scheduler ...
534
  }