Blame view

kernel/sched_debug.c 11.8 KB
43ae34cb4   Ingo Molnar   sched: scheduler ...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
  /*
   * kernel/time/sched_debug.c
   *
   * Print the CFS rbtree
   *
   * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License version 2 as
   * published by the Free Software Foundation.
   */
  
  #include <linux/proc_fs.h>
  #include <linux/sched.h>
  #include <linux/seq_file.h>
  #include <linux/kallsyms.h>
  #include <linux/utsname.h>
efe25c2c7   Bharata B Rao   sched: Reinstate ...
18
  static DEFINE_SPINLOCK(sched_debug_lock);
43ae34cb4   Ingo Molnar   sched: scheduler ...
19
20
21
22
23
24
25
26
27
28
29
  /*
   * This allows printing both to /proc/sched_debug and
   * to the console
   */
  #define SEQ_printf(m, x...)			\
   do {						\
  	if (m)					\
  		seq_printf(m, x);		\
  	else					\
  		printk(x);			\
   } while (0)
ef83a5714   Ingo Molnar   sched: enhance de...
30
31
32
  /*
   * Ease the printing of nsec fields:
   */
90b2628f1   Ingo Molnar   sched: fix gcc wa...
33
  static long long nsec_high(unsigned long long nsec)
ef83a5714   Ingo Molnar   sched: enhance de...
34
  {
90b2628f1   Ingo Molnar   sched: fix gcc wa...
35
  	if ((long long)nsec < 0) {
ef83a5714   Ingo Molnar   sched: enhance de...
36
37
38
39
40
41
42
43
  		nsec = -nsec;
  		do_div(nsec, 1000000);
  		return -nsec;
  	}
  	do_div(nsec, 1000000);
  
  	return nsec;
  }
90b2628f1   Ingo Molnar   sched: fix gcc wa...
44
  static unsigned long nsec_low(unsigned long long nsec)
ef83a5714   Ingo Molnar   sched: enhance de...
45
  {
90b2628f1   Ingo Molnar   sched: fix gcc wa...
46
  	if ((long long)nsec < 0)
ef83a5714   Ingo Molnar   sched: enhance de...
47
48
49
50
51
52
  		nsec = -nsec;
  
  	return do_div(nsec, 1000000);
  }
  
  #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
ff9b48c35   Bharata B Rao   sched: include gr...
53
  #ifdef CONFIG_FAIR_GROUP_SCHED
5091faa44   Mike Galbraith   sched: Add 'autog...
54
  static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
ff9b48c35   Bharata B Rao   sched: include gr...
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
  {
  	struct sched_entity *se = tg->se[cpu];
  	if (!se)
  		return;
  
  #define P(F) \
  	SEQ_printf(m, "  .%-30s: %lld
  ", #F, (long long)F)
  #define PN(F) \
  	SEQ_printf(m, "  .%-30s: %lld.%06ld
  ", #F, SPLIT_NS((long long)F))
  
  	PN(se->exec_start);
  	PN(se->vruntime);
  	PN(se->sum_exec_runtime);
  #ifdef CONFIG_SCHEDSTATS
41acab885   Lucas De Marchi   sched: Implement ...
71
72
73
74
75
76
77
78
79
80
  	PN(se->statistics.wait_start);
  	PN(se->statistics.sleep_start);
  	PN(se->statistics.block_start);
  	PN(se->statistics.sleep_max);
  	PN(se->statistics.block_max);
  	PN(se->statistics.exec_max);
  	PN(se->statistics.slice_max);
  	PN(se->statistics.wait_max);
  	PN(se->statistics.wait_sum);
  	P(se->statistics.wait_count);
ff9b48c35   Bharata B Rao   sched: include gr...
81
82
83
84
85
86
  #endif
  	P(se->load.weight);
  #undef PN
  #undef P
  }
  #endif
efe25c2c7   Bharata B Rao   sched: Reinstate ...
87
88
89
90
91
  #ifdef CONFIG_CGROUP_SCHED
  static char group_path[PATH_MAX];
  
  static char *task_group_path(struct task_group *tg)
  {
8ecedd7a0   Bharata B Rao   sched: Display au...
92
93
  	if (autogroup_path(tg, group_path, PATH_MAX))
  		return group_path;
efe25c2c7   Bharata B Rao   sched: Reinstate ...
94
95
96
97
98
99
100
101
102
103
104
  	/*
  	 * May be NULL if the underlying cgroup isn't fully-created yet
  	 */
  	if (!tg->css.cgroup) {
  		group_path[0] = '\0';
  		return group_path;
  	}
  	cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
  	return group_path;
  }
  #endif
43ae34cb4   Ingo Molnar   sched: scheduler ...
105
  static void
a48da48b4   Ingo Molnar   sched debug: remo...
106
  print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
43ae34cb4   Ingo Molnar   sched: scheduler ...
107
108
109
110
111
  {
  	if (rq->curr == p)
  		SEQ_printf(m, "R");
  	else
  		SEQ_printf(m, " ");
ef83a5714   Ingo Molnar   sched: enhance de...
112
  	SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
43ae34cb4   Ingo Molnar   sched: scheduler ...
113
  		p->comm, p->pid,
ef83a5714   Ingo Molnar   sched: enhance de...
114
  		SPLIT_NS(p->se.vruntime),
43ae34cb4   Ingo Molnar   sched: scheduler ...
115
  		(long long)(p->nvcsw + p->nivcsw),
6f605d83d   Al Viro   take sched_debug....
116
  		p->prio);
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
117
  #ifdef CONFIG_SCHEDSTATS
d19ca3087   Peter Zijlstra   sched: debug: add...
118
  	SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
ef83a5714   Ingo Molnar   sched: enhance de...
119
120
  		SPLIT_NS(p->se.vruntime),
  		SPLIT_NS(p->se.sum_exec_runtime),
41acab885   Lucas De Marchi   sched: Implement ...
121
  		SPLIT_NS(p->se.statistics.sum_sleep_runtime));
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
122
  #else
d19ca3087   Peter Zijlstra   sched: debug: add...
123
  	SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
ef83a5714   Ingo Molnar   sched: enhance de...
124
  		0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
125
  #endif
efe25c2c7   Bharata B Rao   sched: Reinstate ...
126
127
128
  #ifdef CONFIG_CGROUP_SCHED
  	SEQ_printf(m, " %s", task_group_path(task_group(p)));
  #endif
d19ca3087   Peter Zijlstra   sched: debug: add...
129

d19ca3087   Peter Zijlstra   sched: debug: add...
130
131
  	SEQ_printf(m, "
  ");
43ae34cb4   Ingo Molnar   sched: scheduler ...
132
  }
a48da48b4   Ingo Molnar   sched debug: remo...
133
  static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
43ae34cb4   Ingo Molnar   sched: scheduler ...
134
135
  {
  	struct task_struct *g, *p;
ab63a633c   Peter Zijlstra   sched: fix uncond...
136
  	unsigned long flags;
43ae34cb4   Ingo Molnar   sched: scheduler ...
137
138
139
140
141
  
  	SEQ_printf(m,
  	"
  runnable tasks:
  "
c86da3a3d   Mike Galbraith   sched: fix format...
142
143
144
  	"            task   PID         tree-key  switches  prio"
  	"     exec-runtime         sum-exec        sum-sleep
  "
1a75b94f7   Ingo Molnar   sched: prettify /...
145
  	"------------------------------------------------------"
c86da3a3d   Mike Galbraith   sched: fix format...
146
147
  	"----------------------------------------------------
  ");
43ae34cb4   Ingo Molnar   sched: scheduler ...
148

ab63a633c   Peter Zijlstra   sched: fix uncond...
149
  	read_lock_irqsave(&tasklist_lock, flags);
43ae34cb4   Ingo Molnar   sched: scheduler ...
150
151
  
  	do_each_thread(g, p) {
fd2f4419b   Peter Zijlstra   sched: Provide p-...
152
  		if (!p->on_rq || task_cpu(p) != rq_cpu)
43ae34cb4   Ingo Molnar   sched: scheduler ...
153
  			continue;
a48da48b4   Ingo Molnar   sched debug: remo...
154
  		print_task(m, rq, p);
43ae34cb4   Ingo Molnar   sched: scheduler ...
155
  	} while_each_thread(g, p);
ab63a633c   Peter Zijlstra   sched: fix uncond...
156
  	read_unlock_irqrestore(&tasklist_lock, flags);
43ae34cb4   Ingo Molnar   sched: scheduler ...
157
  }
5cef9eca3   Ingo Molnar   sched: remove the...
158
  void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
43ae34cb4   Ingo Molnar   sched: scheduler ...
159
  {
86d9560cb   Ingo Molnar   sched: add more v...
160
161
  	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
  		spread, rq0_min_vruntime, spread0;
348ec61e6   Hitoshi Mitake   sched: Hide runqu...
162
  	struct rq *rq = cpu_rq(cpu);
67e12eac3   Ingo Molnar   sched: add se->vr...
163
164
  	struct sched_entity *last;
  	unsigned long flags;
efe25c2c7   Bharata B Rao   sched: Reinstate ...
165
166
167
168
169
  #ifdef CONFIG_FAIR_GROUP_SCHED
  	SEQ_printf(m, "
  cfs_rq[%d]:%s
  ", cpu, task_group_path(cfs_rq->tg));
  #else
ada18de2e   Peter Zijlstra   sched: debug: add...
170
171
172
  	SEQ_printf(m, "
  cfs_rq[%d]:
  ", cpu);
efe25c2c7   Bharata B Rao   sched: Reinstate ...
173
  #endif
ef83a5714   Ingo Molnar   sched: enhance de...
174
175
176
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", "exec_clock",
  			SPLIT_NS(cfs_rq->exec_clock));
67e12eac3   Ingo Molnar   sched: add se->vr...
177

05fa785cf   Thomas Gleixner   sched: Convert rq...
178
  	raw_spin_lock_irqsave(&rq->lock, flags);
67e12eac3   Ingo Molnar   sched: add se->vr...
179
  	if (cfs_rq->rb_leftmost)
ac53db596   Rik van Riel   sched: Use a budd...
180
  		MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
67e12eac3   Ingo Molnar   sched: add se->vr...
181
182
183
  	last = __pick_last_entity(cfs_rq);
  	if (last)
  		max_vruntime = last->vruntime;
5ac5c4d60   Peter Zijlstra   sched: clean up d...
184
  	min_vruntime = cfs_rq->min_vruntime;
348ec61e6   Hitoshi Mitake   sched: Hide runqu...
185
  	rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
05fa785cf   Thomas Gleixner   sched: Convert rq...
186
  	raw_spin_unlock_irqrestore(&rq->lock, flags);
ef83a5714   Ingo Molnar   sched: enhance de...
187
188
189
190
191
192
193
194
195
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", "MIN_vruntime",
  			SPLIT_NS(MIN_vruntime));
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", "min_vruntime",
  			SPLIT_NS(min_vruntime));
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", "max_vruntime",
  			SPLIT_NS(max_vruntime));
67e12eac3   Ingo Molnar   sched: add se->vr...
196
  	spread = max_vruntime - MIN_vruntime;
ef83a5714   Ingo Molnar   sched: enhance de...
197
198
199
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", "spread",
  			SPLIT_NS(spread));
86d9560cb   Ingo Molnar   sched: add more v...
200
  	spread0 = min_vruntime - rq0_min_vruntime;
ef83a5714   Ingo Molnar   sched: enhance de...
201
202
203
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", "spread0",
  			SPLIT_NS(spread0));
5ac5c4d60   Peter Zijlstra   sched: clean up d...
204
205
  	SEQ_printf(m, "  .%-30s: %d
  ", "nr_spread_over",
ddc972975   Peter Zijlstra   sched debug: chec...
206
  			cfs_rq->nr_spread_over);
2069dd75c   Peter Zijlstra   sched: Rewrite tg...
207
208
209
210
  	SEQ_printf(m, "  .%-30s: %ld
  ", "nr_running", cfs_rq->nr_running);
  	SEQ_printf(m, "  .%-30s: %ld
  ", "load", cfs_rq->load.weight);
c09595f63   Peter Zijlstra   sched: revert rev...
211
212
  #ifdef CONFIG_FAIR_GROUP_SCHED
  #ifdef CONFIG_SMP
2069dd75c   Peter Zijlstra   sched: Rewrite tg...
213
214
215
216
217
218
219
220
221
222
223
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", "load_avg",
  			SPLIT_NS(cfs_rq->load_avg));
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", "load_period",
  			SPLIT_NS(cfs_rq->load_period));
  	SEQ_printf(m, "  .%-30s: %ld
  ", "load_contrib",
  			cfs_rq->load_contribution);
  	SEQ_printf(m, "  .%-30s: %d
  ", "load_tg",
5091faa44   Mike Galbraith   sched: Add 'autog...
224
  			atomic_read(&cfs_rq->tg->load_weight));
c09595f63   Peter Zijlstra   sched: revert rev...
225
  #endif
2069dd75c   Peter Zijlstra   sched: Rewrite tg...
226

ff9b48c35   Bharata B Rao   sched: include gr...
227
  	print_cfs_group_stats(m, cpu, cfs_rq->tg);
c09595f63   Peter Zijlstra   sched: revert rev...
228
  #endif
43ae34cb4   Ingo Molnar   sched: scheduler ...
229
  }
ada18de2e   Peter Zijlstra   sched: debug: add...
230
231
  void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
  {
efe25c2c7   Bharata B Rao   sched: Reinstate ...
232
233
234
235
236
  #ifdef CONFIG_RT_GROUP_SCHED
  	SEQ_printf(m, "
  rt_rq[%d]:%s
  ", cpu, task_group_path(rt_rq->tg));
  #else
ada18de2e   Peter Zijlstra   sched: debug: add...
237
238
239
  	SEQ_printf(m, "
  rt_rq[%d]:
  ", cpu);
efe25c2c7   Bharata B Rao   sched: Reinstate ...
240
  #endif
ada18de2e   Peter Zijlstra   sched: debug: add...
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
  
  #define P(x) \
  	SEQ_printf(m, "  .%-30s: %Ld
  ", #x, (long long)(rt_rq->x))
  #define PN(x) \
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", #x, SPLIT_NS(rt_rq->x))
  
  	P(rt_nr_running);
  	P(rt_throttled);
  	PN(rt_time);
  	PN(rt_runtime);
  
  #undef PN
  #undef P
  }
5bb6b1ea6   Peter Zijlstra   sched: Add some c...
257
  extern __read_mostly int sched_clock_running;
a48da48b4   Ingo Molnar   sched debug: remo...
258
  static void print_cpu(struct seq_file *m, int cpu)
43ae34cb4   Ingo Molnar   sched: scheduler ...
259
  {
348ec61e6   Hitoshi Mitake   sched: Hide runqu...
260
  	struct rq *rq = cpu_rq(cpu);
efe25c2c7   Bharata B Rao   sched: Reinstate ...
261
  	unsigned long flags;
43ae34cb4   Ingo Molnar   sched: scheduler ...
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
  
  #ifdef CONFIG_X86
  	{
  		unsigned int freq = cpu_khz ? : 1;
  
  		SEQ_printf(m, "
  cpu#%d, %u.%03u MHz
  ",
  			   cpu, freq / 1000, (freq % 1000));
  	}
  #else
  	SEQ_printf(m, "
  cpu#%d
  ", cpu);
  #endif
  
  #define P(x) \
  	SEQ_printf(m, "  .%-30s: %Ld
  ", #x, (long long)(rq->x))
ef83a5714   Ingo Molnar   sched: enhance de...
281
282
283
  #define PN(x) \
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", #x, SPLIT_NS(rq->x))
43ae34cb4   Ingo Molnar   sched: scheduler ...
284
285
286
287
  
  	P(nr_running);
  	SEQ_printf(m, "  .%-30s: %lu
  ", "load",
495eca494   Dmitry Adamushko   sched: clean up s...
288
  		   rq->load.weight);
43ae34cb4   Ingo Molnar   sched: scheduler ...
289
290
291
  	P(nr_switches);
  	P(nr_load_updates);
  	P(nr_uninterruptible);
ef83a5714   Ingo Molnar   sched: enhance de...
292
  	PN(next_balance);
43ae34cb4   Ingo Molnar   sched: scheduler ...
293
  	P(curr->pid);
ef83a5714   Ingo Molnar   sched: enhance de...
294
  	PN(clock);
43ae34cb4   Ingo Molnar   sched: scheduler ...
295
296
297
298
299
300
  	P(cpu_load[0]);
  	P(cpu_load[1]);
  	P(cpu_load[2]);
  	P(cpu_load[3]);
  	P(cpu_load[4]);
  #undef P
ef83a5714   Ingo Molnar   sched: enhance de...
301
  #undef PN
43ae34cb4   Ingo Molnar   sched: scheduler ...
302

5ac5c4d60   Peter Zijlstra   sched: clean up d...
303
304
305
  #ifdef CONFIG_SCHEDSTATS
  #define P(n) SEQ_printf(m, "  .%-30s: %d
  ", #n, rq->n);
1b9508f68   Mike Galbraith   sched: Rate-limit...
306
307
  #define P64(n) SEQ_printf(m, "  .%-30s: %Ld
  ", #n, rq->n);
5ac5c4d60   Peter Zijlstra   sched: clean up d...
308

5ac5c4d60   Peter Zijlstra   sched: clean up d...
309
310
311
312
313
  	P(yld_count);
  
  	P(sched_switch);
  	P(sched_count);
  	P(sched_goidle);
1b9508f68   Mike Galbraith   sched: Rate-limit...
314
315
316
  #ifdef CONFIG_SMP
  	P64(avg_idle);
  #endif
5ac5c4d60   Peter Zijlstra   sched: clean up d...
317
318
319
  
  	P(ttwu_count);
  	P(ttwu_local);
5ac5c4d60   Peter Zijlstra   sched: clean up d...
320
  #undef P
fce209798   Yong Zhang   sched: Replace rq...
321
  #undef P64
5ac5c4d60   Peter Zijlstra   sched: clean up d...
322
  #endif
efe25c2c7   Bharata B Rao   sched: Reinstate ...
323
  	spin_lock_irqsave(&sched_debug_lock, flags);
5cef9eca3   Ingo Molnar   sched: remove the...
324
  	print_cfs_stats(m, cpu);
ada18de2e   Peter Zijlstra   sched: debug: add...
325
  	print_rt_stats(m, cpu);
43ae34cb4   Ingo Molnar   sched: scheduler ...
326

efe25c2c7   Bharata B Rao   sched: Reinstate ...
327
  	rcu_read_lock();
a48da48b4   Ingo Molnar   sched debug: remo...
328
  	print_rq(m, rq, cpu);
efe25c2c7   Bharata B Rao   sched: Reinstate ...
329
330
  	rcu_read_unlock();
  	spin_unlock_irqrestore(&sched_debug_lock, flags);
43ae34cb4   Ingo Molnar   sched: scheduler ...
331
  }
1983a922a   Christian Ehrhardt   sched: Make tunab...
332
333
334
335
336
  static const char *sched_tunable_scaling_names[] = {
  	"none",
  	"logaritmic",
  	"linear"
  };
43ae34cb4   Ingo Molnar   sched: scheduler ...
337
338
  static int sched_debug_show(struct seq_file *m, void *v)
  {
5bb6b1ea6   Peter Zijlstra   sched: Add some c...
339
340
  	u64 ktime, sched_clk, cpu_clk;
  	unsigned long flags;
43ae34cb4   Ingo Molnar   sched: scheduler ...
341
  	int cpu;
5bb6b1ea6   Peter Zijlstra   sched: Add some c...
342
343
344
345
346
347
348
349
  	local_irq_save(flags);
  	ktime = ktime_to_ns(ktime_get());
  	sched_clk = sched_clock();
  	cpu_clk = local_clock();
  	local_irq_restore(flags);
  
  	SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s
  ",
43ae34cb4   Ingo Molnar   sched: scheduler ...
350
351
352
  		init_utsname()->release,
  		(int)strcspn(init_utsname()->version, " "),
  		init_utsname()->version);
5bb6b1ea6   Peter Zijlstra   sched: Add some c...
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
  #define P(x) \
  	SEQ_printf(m, "%-40s: %Ld
  ", #x, (long long)(x))
  #define PN(x) \
  	SEQ_printf(m, "%-40s: %Ld.%06ld
  ", #x, SPLIT_NS(x))
  	PN(ktime);
  	PN(sched_clk);
  	PN(cpu_clk);
  	P(jiffies);
  #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
  	P(sched_clock_stable);
  #endif
  #undef PN
  #undef P
  
  	SEQ_printf(m, "
  ");
  	SEQ_printf(m, "sysctl_sched
  ");
43ae34cb4   Ingo Molnar   sched: scheduler ...
373

1aa4731ef   Ingo Molnar   sched debug: prin...
374
  #define P(x) \
d822ceced   Ingo Molnar   sched debug: more...
375
376
  	SEQ_printf(m, "  .%-40s: %Ld
  ", #x, (long long)(x))
1aa4731ef   Ingo Molnar   sched debug: prin...
377
  #define PN(x) \
d822ceced   Ingo Molnar   sched debug: more...
378
379
  	SEQ_printf(m, "  .%-40s: %Ld.%06ld
  ", #x, SPLIT_NS(x))
1aa4731ef   Ingo Molnar   sched debug: prin...
380
  	PN(sysctl_sched_latency);
b2be5e96d   Peter Zijlstra   sched: reintroduc...
381
  	PN(sysctl_sched_min_granularity);
1aa4731ef   Ingo Molnar   sched debug: prin...
382
  	PN(sysctl_sched_wakeup_granularity);
eebef7469   Josh Hunt   sched: Use correc...
383
  	P(sysctl_sched_child_runs_first);
1aa4731ef   Ingo Molnar   sched debug: prin...
384
385
386
  	P(sysctl_sched_features);
  #undef PN
  #undef P
1983a922a   Christian Ehrhardt   sched: Make tunab...
387
388
389
390
  	SEQ_printf(m, "  .%-40s: %d (%s)
  ", "sysctl_sched_tunable_scaling",
  		sysctl_sched_tunable_scaling,
  		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
43ae34cb4   Ingo Molnar   sched: scheduler ...
391
  	for_each_online_cpu(cpu)
a48da48b4   Ingo Molnar   sched debug: remo...
392
  		print_cpu(m, cpu);
43ae34cb4   Ingo Molnar   sched: scheduler ...
393
394
395
396
397
398
  
  	SEQ_printf(m, "
  ");
  
  	return 0;
  }
f33734619   Josh Triplett   [PATCH] sched: ma...
399
  static void sysrq_sched_debug_show(void)
43ae34cb4   Ingo Molnar   sched: scheduler ...
400
401
402
403
404
405
406
407
  {
  	sched_debug_show(NULL, NULL);
  }
  
  static int sched_debug_open(struct inode *inode, struct file *filp)
  {
  	return single_open(filp, sched_debug_show, NULL);
  }
0dbee3a6b   Arjan van de Ven   Make scheduler de...
408
  static const struct file_operations sched_debug_fops = {
43ae34cb4   Ingo Molnar   sched: scheduler ...
409
410
411
  	.open		= sched_debug_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
5ea473a1d   Alexey Dobriyan   Fix leaks on /pro...
412
  	.release	= single_release,
43ae34cb4   Ingo Molnar   sched: scheduler ...
413
414
415
416
417
  };
  
  static int __init init_sched_debug_procfs(void)
  {
  	struct proc_dir_entry *pe;
a9cf4ddb3   Li Zefan   sched: change sch...
418
  	pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
43ae34cb4   Ingo Molnar   sched: scheduler ...
419
420
  	if (!pe)
  		return -ENOMEM;
43ae34cb4   Ingo Molnar   sched: scheduler ...
421
422
423
424
425
426
427
  	return 0;
  }
  
  __initcall(init_sched_debug_procfs);
  
  void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
  {
cc367732f   Ingo Molnar   sched: debug, imp...
428
  	unsigned long nr_switches;
43ae34cb4   Ingo Molnar   sched: scheduler ...
429

5089a9768   Oleg Nesterov   proc_sched_show_t...
430
431
432
  	SEQ_printf(m, "%s (%d, #threads: %d)
  ", p->comm, p->pid,
  						get_nr_threads(p));
2d92f2278   Ingo Molnar   sched: debug: inc...
433
434
435
  	SEQ_printf(m,
  		"---------------------------------------------------------
  ");
cc367732f   Ingo Molnar   sched: debug, imp...
436
437
438
  #define __P(F) \
  	SEQ_printf(m, "%-35s:%21Ld
  ", #F, (long long)F)
43ae34cb4   Ingo Molnar   sched: scheduler ...
439
  #define P(F) \
2d92f2278   Ingo Molnar   sched: debug: inc...
440
441
  	SEQ_printf(m, "%-35s:%21Ld
  ", #F, (long long)p->F)
cc367732f   Ingo Molnar   sched: debug, imp...
442
443
444
  #define __PN(F) \
  	SEQ_printf(m, "%-35s:%14Ld.%06ld
  ", #F, SPLIT_NS((long long)F))
ef83a5714   Ingo Molnar   sched: enhance de...
445
  #define PN(F) \
2d92f2278   Ingo Molnar   sched: debug: inc...
446
447
  	SEQ_printf(m, "%-35s:%14Ld.%06ld
  ", #F, SPLIT_NS((long long)p->F))
43ae34cb4   Ingo Molnar   sched: scheduler ...
448

ef83a5714   Ingo Molnar   sched: enhance de...
449
450
451
  	PN(se.exec_start);
  	PN(se.vruntime);
  	PN(se.sum_exec_runtime);
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
452

cc367732f   Ingo Molnar   sched: debug, imp...
453
  	nr_switches = p->nvcsw + p->nivcsw;
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
454
  #ifdef CONFIG_SCHEDSTATS
41acab885   Lucas De Marchi   sched: Implement ...
455
456
457
458
459
460
461
462
463
464
465
466
  	PN(se.statistics.wait_start);
  	PN(se.statistics.sleep_start);
  	PN(se.statistics.block_start);
  	PN(se.statistics.sleep_max);
  	PN(se.statistics.block_max);
  	PN(se.statistics.exec_max);
  	PN(se.statistics.slice_max);
  	PN(se.statistics.wait_max);
  	PN(se.statistics.wait_sum);
  	P(se.statistics.wait_count);
  	PN(se.statistics.iowait_sum);
  	P(se.statistics.iowait_count);
cc367732f   Ingo Molnar   sched: debug, imp...
467
  	P(se.nr_migrations);
41acab885   Lucas De Marchi   sched: Implement ...
468
469
470
471
472
473
474
475
476
477
478
479
480
481
  	P(se.statistics.nr_migrations_cold);
  	P(se.statistics.nr_failed_migrations_affine);
  	P(se.statistics.nr_failed_migrations_running);
  	P(se.statistics.nr_failed_migrations_hot);
  	P(se.statistics.nr_forced_migrations);
  	P(se.statistics.nr_wakeups);
  	P(se.statistics.nr_wakeups_sync);
  	P(se.statistics.nr_wakeups_migrate);
  	P(se.statistics.nr_wakeups_local);
  	P(se.statistics.nr_wakeups_remote);
  	P(se.statistics.nr_wakeups_affine);
  	P(se.statistics.nr_wakeups_affine_attempts);
  	P(se.statistics.nr_wakeups_passive);
  	P(se.statistics.nr_wakeups_idle);
cc367732f   Ingo Molnar   sched: debug, imp...
482
483
484
485
486
487
488
489
490
491
492
  
  	{
  		u64 avg_atom, avg_per_cpu;
  
  		avg_atom = p->se.sum_exec_runtime;
  		if (nr_switches)
  			do_div(avg_atom, nr_switches);
  		else
  			avg_atom = -1LL;
  
  		avg_per_cpu = p->se.sum_exec_runtime;
c1a89740d   Ingo Molnar   sched: clean up o...
493
  		if (p->se.nr_migrations) {
6f6d6a1a6   Roman Zippel   rename div64_64 t...
494
495
  			avg_per_cpu = div64_u64(avg_per_cpu,
  						p->se.nr_migrations);
c1a89740d   Ingo Molnar   sched: clean up o...
496
  		} else {
cc367732f   Ingo Molnar   sched: debug, imp...
497
  			avg_per_cpu = -1LL;
c1a89740d   Ingo Molnar   sched: clean up o...
498
  		}
cc367732f   Ingo Molnar   sched: debug, imp...
499
500
501
502
  
  		__PN(avg_atom);
  		__PN(avg_per_cpu);
  	}
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
503
  #endif
cc367732f   Ingo Molnar   sched: debug, imp...
504
  	__P(nr_switches);
2d92f2278   Ingo Molnar   sched: debug: inc...
505
506
  	SEQ_printf(m, "%-35s:%21Ld
  ",
cc367732f   Ingo Molnar   sched: debug, imp...
507
508
509
510
  		   "nr_voluntary_switches", (long long)p->nvcsw);
  	SEQ_printf(m, "%-35s:%21Ld
  ",
  		   "nr_involuntary_switches", (long long)p->nivcsw);
43ae34cb4   Ingo Molnar   sched: scheduler ...
511
512
513
  	P(se.load.weight);
  	P(policy);
  	P(prio);
ef83a5714   Ingo Molnar   sched: enhance de...
514
  #undef PN
cc367732f   Ingo Molnar   sched: debug, imp...
515
516
517
  #undef __PN
  #undef P
  #undef __P
43ae34cb4   Ingo Molnar   sched: scheduler ...
518
519
  
  	{
29d7b90c1   Ingo Molnar   sched: fix kernel...
520
  		unsigned int this_cpu = raw_smp_processor_id();
43ae34cb4   Ingo Molnar   sched: scheduler ...
521
  		u64 t0, t1;
29d7b90c1   Ingo Molnar   sched: fix kernel...
522
523
  		t0 = cpu_clock(this_cpu);
  		t1 = cpu_clock(this_cpu);
2d92f2278   Ingo Molnar   sched: debug: inc...
524
525
  		SEQ_printf(m, "%-35s:%21Ld
  ",
43ae34cb4   Ingo Molnar   sched: scheduler ...
526
527
528
529
530
531
  			   "clock-delta", (long long)(t1-t0));
  	}
  }
  
  void proc_sched_set_task(struct task_struct *p)
  {
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
532
  #ifdef CONFIG_SCHEDSTATS
41acab885   Lucas De Marchi   sched: Implement ...
533
  	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
534
  #endif
43ae34cb4   Ingo Molnar   sched: scheduler ...
535
  }