Blame view

kernel/sched/debug.c 23 KB
43ae34cb4   Ingo Molnar   sched: scheduler ...
1
  /*
391e43da7   Peter Zijlstra   sched: Move all s...
2
   * kernel/sched/debug.c
43ae34cb4   Ingo Molnar   sched: scheduler ...
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
   *
   * Print the CFS rbtree
   *
   * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License version 2 as
   * published by the Free Software Foundation.
   */
  
  #include <linux/proc_fs.h>
  #include <linux/sched.h>
  #include <linux/seq_file.h>
  #include <linux/kallsyms.h>
  #include <linux/utsname.h>
b32e86b43   Ingo Molnar   sched/numa: Add d...
18
  #include <linux/mempolicy.h>
d6ca41d79   Steven Rostedt (Red Hat)   sched/debug: Move...
19
  #include <linux/debugfs.h>
43ae34cb4   Ingo Molnar   sched: scheduler ...
20

029632fbb   Peter Zijlstra   sched: Make separ...
21
  #include "sched.h"
efe25c2c7   Bharata B Rao   sched: Reinstate ...
22
  static DEFINE_SPINLOCK(sched_debug_lock);
43ae34cb4   Ingo Molnar   sched: scheduler ...
23
24
25
26
27
28
29
30
31
32
33
  /*
   * This allows printing both to /proc/sched_debug and
   * to the console
   */
  #define SEQ_printf(m, x...)			\
   do {						\
  	if (m)					\
  		seq_printf(m, x);		\
  	else					\
  		printk(x);			\
   } while (0)
ef83a5714   Ingo Molnar   sched: enhance de...
34
35
36
  /*
   * Ease the printing of nsec fields:
   */
90b2628f1   Ingo Molnar   sched: fix gcc wa...
37
  static long long nsec_high(unsigned long long nsec)
ef83a5714   Ingo Molnar   sched: enhance de...
38
  {
90b2628f1   Ingo Molnar   sched: fix gcc wa...
39
  	if ((long long)nsec < 0) {
ef83a5714   Ingo Molnar   sched: enhance de...
40
41
42
43
44
45
46
47
  		nsec = -nsec;
  		do_div(nsec, 1000000);
  		return -nsec;
  	}
  	do_div(nsec, 1000000);
  
  	return nsec;
  }
90b2628f1   Ingo Molnar   sched: fix gcc wa...
48
  static unsigned long nsec_low(unsigned long long nsec)
ef83a5714   Ingo Molnar   sched: enhance de...
49
  {
90b2628f1   Ingo Molnar   sched: fix gcc wa...
50
  	if ((long long)nsec < 0)
ef83a5714   Ingo Molnar   sched: enhance de...
51
52
53
54
55
56
  		nsec = -nsec;
  
  	return do_div(nsec, 1000000);
  }
  
  #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
d6ca41d79   Steven Rostedt (Red Hat)   sched/debug: Move...
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
  #define SCHED_FEAT(name, enabled)	\
  	#name ,
  
  static const char * const sched_feat_names[] = {
  #include "features.h"
  };
  
  #undef SCHED_FEAT
  
  static int sched_feat_show(struct seq_file *m, void *v)
  {
  	int i;
  
  	for (i = 0; i < __SCHED_FEAT_NR; i++) {
  		if (!(sysctl_sched_features & (1UL << i)))
  			seq_puts(m, "NO_");
  		seq_printf(m, "%s ", sched_feat_names[i]);
  	}
  	seq_puts(m, "
  ");
  
  	return 0;
  }
  
  #ifdef HAVE_JUMP_LABEL
  
  #define jump_label_key__true  STATIC_KEY_INIT_TRUE
  #define jump_label_key__false STATIC_KEY_INIT_FALSE
  
  #define SCHED_FEAT(name, enabled)	\
  	jump_label_key__##enabled ,
  
  struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
  #include "features.h"
  };
  
  #undef SCHED_FEAT
  
  static void sched_feat_disable(int i)
  {
  	static_key_disable(&sched_feat_keys[i]);
  }
  
  static void sched_feat_enable(int i)
  {
  	static_key_enable(&sched_feat_keys[i]);
  }
  #else
  static void sched_feat_disable(int i) { };
  static void sched_feat_enable(int i) { };
  #endif /* HAVE_JUMP_LABEL */
  
  static int sched_feat_set(char *cmp)
  {
  	int i;
  	int neg = 0;
  
  	if (strncmp(cmp, "NO_", 3) == 0) {
  		neg = 1;
  		cmp += 3;
  	}
  
  	for (i = 0; i < __SCHED_FEAT_NR; i++) {
  		if (strcmp(cmp, sched_feat_names[i]) == 0) {
  			if (neg) {
  				sysctl_sched_features &= ~(1UL << i);
  				sched_feat_disable(i);
  			} else {
  				sysctl_sched_features |= (1UL << i);
  				sched_feat_enable(i);
  			}
  			break;
  		}
  	}
  
  	return i;
  }
  
  static ssize_t
  sched_feat_write(struct file *filp, const char __user *ubuf,
  		size_t cnt, loff_t *ppos)
  {
  	char buf[64];
  	char *cmp;
  	int i;
  	struct inode *inode;
  
  	if (cnt > 63)
  		cnt = 63;
  
  	if (copy_from_user(&buf, ubuf, cnt))
  		return -EFAULT;
  
  	buf[cnt] = 0;
  	cmp = strstrip(buf);
  
  	/* Ensure the static_key remains in a consistent state */
  	inode = file_inode(filp);
  	inode_lock(inode);
  	i = sched_feat_set(cmp);
  	inode_unlock(inode);
  	if (i == __SCHED_FEAT_NR)
  		return -EINVAL;
  
  	*ppos += cnt;
  
  	return cnt;
  }
  
  static int sched_feat_open(struct inode *inode, struct file *filp)
  {
  	return single_open(filp, sched_feat_show, NULL);
  }
  
  static const struct file_operations sched_feat_fops = {
  	.open		= sched_feat_open,
  	.write		= sched_feat_write,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
  	.release	= single_release,
  };
  
  static __init int sched_init_debug(void)
  {
  	debugfs_create_file("sched_features", 0644, NULL, NULL,
  			&sched_feat_fops);
  
  	return 0;
  }
  late_initcall(sched_init_debug);
3866e845e   Steven Rostedt (Red Hat)   sched/debug: Move...
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
  #ifdef CONFIG_SMP
  
  #ifdef CONFIG_SYSCTL
  
  static struct ctl_table sd_ctl_dir[] = {
  	{
  		.procname	= "sched_domain",
  		.mode		= 0555,
  	},
  	{}
  };
  
  static struct ctl_table sd_ctl_root[] = {
  	{
  		.procname	= "kernel",
  		.mode		= 0555,
  		.child		= sd_ctl_dir,
  	},
  	{}
  };
  
  static struct ctl_table *sd_alloc_ctl_entry(int n)
  {
  	struct ctl_table *entry =
  		kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
  
  	return entry;
  }
  
  static void sd_free_ctl_entry(struct ctl_table **tablep)
  {
  	struct ctl_table *entry;
  
  	/*
  	 * In the intermediate directories, both the child directory and
  	 * procname are dynamically allocated and could fail but the mode
  	 * will always be set. In the lowest directory the names are
  	 * static strings and all have proc handlers.
  	 */
  	for (entry = *tablep; entry->mode; entry++) {
  		if (entry->child)
  			sd_free_ctl_entry(&entry->child);
  		if (entry->proc_handler == NULL)
  			kfree(entry->procname);
  	}
  
  	kfree(*tablep);
  	*tablep = NULL;
  }
  
  static int min_load_idx = 0;
  static int max_load_idx = CPU_LOAD_IDX_MAX-1;
  
  static void
  set_table_entry(struct ctl_table *entry,
  		const char *procname, void *data, int maxlen,
  		umode_t mode, proc_handler *proc_handler,
  		bool load_idx)
  {
  	entry->procname = procname;
  	entry->data = data;
  	entry->maxlen = maxlen;
  	entry->mode = mode;
  	entry->proc_handler = proc_handler;
  
  	if (load_idx) {
  		entry->extra1 = &min_load_idx;
  		entry->extra2 = &max_load_idx;
  	}
  }
  
  static struct ctl_table *
  sd_alloc_ctl_domain_table(struct sched_domain *sd)
  {
  	struct ctl_table *table = sd_alloc_ctl_entry(14);
  
  	if (table == NULL)
  		return NULL;
  
  	set_table_entry(&table[0], "min_interval", &sd->min_interval,
  		sizeof(long), 0644, proc_doulongvec_minmax, false);
  	set_table_entry(&table[1], "max_interval", &sd->max_interval,
  		sizeof(long), 0644, proc_doulongvec_minmax, false);
  	set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
  		sizeof(int), 0644, proc_dointvec_minmax, true);
  	set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
  		sizeof(int), 0644, proc_dointvec_minmax, true);
  	set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
  		sizeof(int), 0644, proc_dointvec_minmax, true);
  	set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
  		sizeof(int), 0644, proc_dointvec_minmax, true);
  	set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
  		sizeof(int), 0644, proc_dointvec_minmax, true);
  	set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
  		sizeof(int), 0644, proc_dointvec_minmax, false);
  	set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
  		sizeof(int), 0644, proc_dointvec_minmax, false);
  	set_table_entry(&table[9], "cache_nice_tries",
  		&sd->cache_nice_tries,
  		sizeof(int), 0644, proc_dointvec_minmax, false);
  	set_table_entry(&table[10], "flags", &sd->flags,
  		sizeof(int), 0644, proc_dointvec_minmax, false);
  	set_table_entry(&table[11], "max_newidle_lb_cost",
  		&sd->max_newidle_lb_cost,
  		sizeof(long), 0644, proc_doulongvec_minmax, false);
  	set_table_entry(&table[12], "name", sd->name,
  		CORENAME_MAX_SIZE, 0444, proc_dostring, false);
  	/* &table[13] is terminator */
  
  	return table;
  }
  
  static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
  {
  	struct ctl_table *entry, *table;
  	struct sched_domain *sd;
  	int domain_num = 0, i;
  	char buf[32];
  
  	for_each_domain(cpu, sd)
  		domain_num++;
  	entry = table = sd_alloc_ctl_entry(domain_num + 1);
  	if (table == NULL)
  		return NULL;
  
  	i = 0;
  	for_each_domain(cpu, sd) {
  		snprintf(buf, 32, "domain%d", i);
  		entry->procname = kstrdup(buf, GFP_KERNEL);
  		entry->mode = 0555;
  		entry->child = sd_alloc_ctl_domain_table(sd);
  		entry++;
  		i++;
  	}
  	return table;
  }
  
  static struct ctl_table_header *sd_sysctl_header;
  void register_sched_domain_sysctl(void)
  {
  	int i, cpu_num = num_possible_cpus();
  	struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
  	char buf[32];
  
  	WARN_ON(sd_ctl_dir[0].child);
  	sd_ctl_dir[0].child = entry;
  
  	if (entry == NULL)
  		return;
  
  	for_each_possible_cpu(i) {
  		snprintf(buf, 32, "cpu%d", i);
  		entry->procname = kstrdup(buf, GFP_KERNEL);
  		entry->mode = 0555;
  		entry->child = sd_alloc_ctl_cpu_table(i);
  		entry++;
  	}
  
  	WARN_ON(sd_sysctl_header);
  	sd_sysctl_header = register_sysctl_table(sd_ctl_root);
  }
  
  /* may be called multiple times per register */
  void unregister_sched_domain_sysctl(void)
  {
  	unregister_sysctl_table(sd_sysctl_header);
  	sd_sysctl_header = NULL;
  	if (sd_ctl_dir[0].child)
  		sd_free_ctl_entry(&sd_ctl_dir[0].child);
  }
  #endif /* CONFIG_SYSCTL */
  #endif /* CONFIG_SMP */
ff9b48c35   Bharata B Rao   sched: include gr...
359
  #ifdef CONFIG_FAIR_GROUP_SCHED
5091faa44   Mike Galbraith   sched: Add 'autog...
360
  static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
ff9b48c35   Bharata B Rao   sched: include gr...
361
362
  {
  	struct sched_entity *se = tg->se[cpu];
ff9b48c35   Bharata B Rao   sched: include gr...
363
364
365
366
  
  #define P(F) \
  	SEQ_printf(m, "  .%-30s: %lld
  ", #F, (long long)F)
4fa8d299b   Josh Poimboeuf   sched/debug: Remo...
367
368
369
  #define P_SCHEDSTAT(F) \
  	SEQ_printf(m, "  .%-30s: %lld
  ", #F, (long long)schedstat_val(F))
ff9b48c35   Bharata B Rao   sched: include gr...
370
371
372
  #define PN(F) \
  	SEQ_printf(m, "  .%-30s: %lld.%06ld
  ", #F, SPLIT_NS((long long)F))
4fa8d299b   Josh Poimboeuf   sched/debug: Remo...
373
374
375
  #define PN_SCHEDSTAT(F) \
  	SEQ_printf(m, "  .%-30s: %lld.%06ld
  ", #F, SPLIT_NS((long long)schedstat_val(F)))
ff9b48c35   Bharata B Rao   sched: include gr...
376

cd126afe8   Yuyang Du   sched/fair: Remov...
377
  	if (!se)
18bf2805d   Ben Segall   sched: Maintain p...
378
  		return;
18bf2805d   Ben Segall   sched: Maintain p...
379

ff9b48c35   Bharata B Rao   sched: include gr...
380
381
382
  	PN(se->exec_start);
  	PN(se->vruntime);
  	PN(se->sum_exec_runtime);
cb2517653   Mel Gorman   sched/debug: Make...
383
  	if (schedstat_enabled()) {
4fa8d299b   Josh Poimboeuf   sched/debug: Remo...
384
385
386
387
388
389
390
391
392
393
  		PN_SCHEDSTAT(se->statistics.wait_start);
  		PN_SCHEDSTAT(se->statistics.sleep_start);
  		PN_SCHEDSTAT(se->statistics.block_start);
  		PN_SCHEDSTAT(se->statistics.sleep_max);
  		PN_SCHEDSTAT(se->statistics.block_max);
  		PN_SCHEDSTAT(se->statistics.exec_max);
  		PN_SCHEDSTAT(se->statistics.slice_max);
  		PN_SCHEDSTAT(se->statistics.wait_max);
  		PN_SCHEDSTAT(se->statistics.wait_sum);
  		P_SCHEDSTAT(se->statistics.wait_count);
cb2517653   Mel Gorman   sched/debug: Make...
394
  	}
ff9b48c35   Bharata B Rao   sched: include gr...
395
  	P(se->load.weight);
9d85f21c9   Paul Turner   sched: Track the ...
396
  #ifdef CONFIG_SMP
9d89c257d   Yuyang Du   sched/fair: Rewri...
397
398
  	P(se->avg.load_avg);
  	P(se->avg.util_avg);
9d85f21c9   Paul Turner   sched: Track the ...
399
  #endif
4fa8d299b   Josh Poimboeuf   sched/debug: Remo...
400
401
  
  #undef PN_SCHEDSTAT
ff9b48c35   Bharata B Rao   sched: include gr...
402
  #undef PN
4fa8d299b   Josh Poimboeuf   sched/debug: Remo...
403
  #undef P_SCHEDSTAT
ff9b48c35   Bharata B Rao   sched: include gr...
404
405
406
  #undef P
  }
  #endif
efe25c2c7   Bharata B Rao   sched: Reinstate ...
407
408
409
410
411
  #ifdef CONFIG_CGROUP_SCHED
  static char group_path[PATH_MAX];
  
  static char *task_group_path(struct task_group *tg)
  {
8ecedd7a0   Bharata B Rao   sched: Display au...
412
413
  	if (autogroup_path(tg, group_path, PATH_MAX))
  		return group_path;
4c737b41d   Tejun Heo   cgroup: make cgro...
414
415
  	cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
  	return group_path;
efe25c2c7   Bharata B Rao   sched: Reinstate ...
416
417
  }
  #endif
43ae34cb4   Ingo Molnar   sched: scheduler ...
418
  static void
a48da48b4   Ingo Molnar   sched debug: remo...
419
  print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
43ae34cb4   Ingo Molnar   sched: scheduler ...
420
421
422
423
424
  {
  	if (rq->curr == p)
  		SEQ_printf(m, "R");
  	else
  		SEQ_printf(m, " ");
ef83a5714   Ingo Molnar   sched: enhance de...
425
  	SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
fc840914e   Peter Zijlstra   sched/debug: Take...
426
  		p->comm, task_pid_nr(p),
ef83a5714   Ingo Molnar   sched: enhance de...
427
  		SPLIT_NS(p->se.vruntime),
43ae34cb4   Ingo Molnar   sched: scheduler ...
428
  		(long long)(p->nvcsw + p->nivcsw),
6f605d83d   Al Viro   take sched_debug....
429
  		p->prio);
9c5725911   Josh Poimboeuf   sched/debug: Fix ...
430

33d6176eb   Srikar Dronamraju   sched/debug: Prop...
431
  	SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
20e1d4863   Josh Poimboeuf   sched/debug: Rena...
432
  		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
33d6176eb   Srikar Dronamraju   sched/debug: Prop...
433
  		SPLIT_NS(p->se.sum_exec_runtime),
20e1d4863   Josh Poimboeuf   sched/debug: Rena...
434
  		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
9c5725911   Josh Poimboeuf   sched/debug: Fix ...
435

b32e86b43   Ingo Molnar   sched/numa: Add d...
436
  #ifdef CONFIG_NUMA_BALANCING
e3d24d0a6   Srikar Dronamraju   sched/numa: Show ...
437
  	SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
b32e86b43   Ingo Molnar   sched/numa: Add d...
438
  #endif
efe25c2c7   Bharata B Rao   sched: Reinstate ...
439
440
441
  #ifdef CONFIG_CGROUP_SCHED
  	SEQ_printf(m, " %s", task_group_path(task_group(p)));
  #endif
d19ca3087   Peter Zijlstra   sched: debug: add...
442

d19ca3087   Peter Zijlstra   sched: debug: add...
443
444
  	SEQ_printf(m, "
  ");
43ae34cb4   Ingo Molnar   sched: scheduler ...
445
  }
a48da48b4   Ingo Molnar   sched debug: remo...
446
  static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
43ae34cb4   Ingo Molnar   sched: scheduler ...
447
448
449
450
451
452
453
  {
  	struct task_struct *g, *p;
  
  	SEQ_printf(m,
  	"
  runnable tasks:
  "
c86da3a3d   Mike Galbraith   sched: fix format...
454
  	"            task   PID         tree-key  switches  prio"
c5f3ab1c3   Srikar Dronamraju   sched/debug: Repl...
455
456
  	"     wait-time             sum-exec        sum-sleep
  "
1a75b94f7   Ingo Molnar   sched: prettify /...
457
  	"------------------------------------------------------"
c86da3a3d   Mike Galbraith   sched: fix format...
458
459
  	"----------------------------------------------------
  ");
43ae34cb4   Ingo Molnar   sched: scheduler ...
460

5bd96ab6f   Oleg Nesterov   sched: print_rq()...
461
  	rcu_read_lock();
d38e83c71   Oleg Nesterov   sched: s/do_each_...
462
  	for_each_process_thread(g, p) {
b32e86b43   Ingo Molnar   sched/numa: Add d...
463
  		if (task_cpu(p) != rq_cpu)
43ae34cb4   Ingo Molnar   sched: scheduler ...
464
  			continue;
a48da48b4   Ingo Molnar   sched debug: remo...
465
  		print_task(m, rq, p);
d38e83c71   Oleg Nesterov   sched: s/do_each_...
466
  	}
5bd96ab6f   Oleg Nesterov   sched: print_rq()...
467
  	rcu_read_unlock();
43ae34cb4   Ingo Molnar   sched: scheduler ...
468
  }
5cef9eca3   Ingo Molnar   sched: remove the...
469
  void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
43ae34cb4   Ingo Molnar   sched: scheduler ...
470
  {
86d9560cb   Ingo Molnar   sched: add more v...
471
472
  	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
  		spread, rq0_min_vruntime, spread0;
348ec61e6   Hitoshi Mitake   sched: Hide runqu...
473
  	struct rq *rq = cpu_rq(cpu);
67e12eac3   Ingo Molnar   sched: add se->vr...
474
475
  	struct sched_entity *last;
  	unsigned long flags;
efe25c2c7   Bharata B Rao   sched: Reinstate ...
476
477
478
479
480
  #ifdef CONFIG_FAIR_GROUP_SCHED
  	SEQ_printf(m, "
  cfs_rq[%d]:%s
  ", cpu, task_group_path(cfs_rq->tg));
  #else
ada18de2e   Peter Zijlstra   sched: debug: add...
481
482
483
  	SEQ_printf(m, "
  cfs_rq[%d]:
  ", cpu);
efe25c2c7   Bharata B Rao   sched: Reinstate ...
484
  #endif
ef83a5714   Ingo Molnar   sched: enhance de...
485
486
487
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", "exec_clock",
  			SPLIT_NS(cfs_rq->exec_clock));
67e12eac3   Ingo Molnar   sched: add se->vr...
488

05fa785cf   Thomas Gleixner   sched: Convert rq...
489
  	raw_spin_lock_irqsave(&rq->lock, flags);
67e12eac3   Ingo Molnar   sched: add se->vr...
490
  	if (cfs_rq->rb_leftmost)
ac53db596   Rik van Riel   sched: Use a budd...
491
  		MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
67e12eac3   Ingo Molnar   sched: add se->vr...
492
493
494
  	last = __pick_last_entity(cfs_rq);
  	if (last)
  		max_vruntime = last->vruntime;
5ac5c4d60   Peter Zijlstra   sched: clean up d...
495
  	min_vruntime = cfs_rq->min_vruntime;
348ec61e6   Hitoshi Mitake   sched: Hide runqu...
496
  	rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
05fa785cf   Thomas Gleixner   sched: Convert rq...
497
  	raw_spin_unlock_irqrestore(&rq->lock, flags);
ef83a5714   Ingo Molnar   sched: enhance de...
498
499
500
501
502
503
504
505
506
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", "MIN_vruntime",
  			SPLIT_NS(MIN_vruntime));
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", "min_vruntime",
  			SPLIT_NS(min_vruntime));
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", "max_vruntime",
  			SPLIT_NS(max_vruntime));
67e12eac3   Ingo Molnar   sched: add se->vr...
507
  	spread = max_vruntime - MIN_vruntime;
ef83a5714   Ingo Molnar   sched: enhance de...
508
509
510
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", "spread",
  			SPLIT_NS(spread));
86d9560cb   Ingo Molnar   sched: add more v...
511
  	spread0 = min_vruntime - rq0_min_vruntime;
ef83a5714   Ingo Molnar   sched: enhance de...
512
513
514
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", "spread0",
  			SPLIT_NS(spread0));
5ac5c4d60   Peter Zijlstra   sched: clean up d...
515
516
  	SEQ_printf(m, "  .%-30s: %d
  ", "nr_spread_over",
ddc972975   Peter Zijlstra   sched debug: chec...
517
  			cfs_rq->nr_spread_over);
c82513e51   Peter Zijlstra   sched: Change rq-...
518
519
  	SEQ_printf(m, "  .%-30s: %d
  ", "nr_running", cfs_rq->nr_running);
2069dd75c   Peter Zijlstra   sched: Rewrite tg...
520
521
  	SEQ_printf(m, "  .%-30s: %ld
  ", "load", cfs_rq->load.weight);
c09595f63   Peter Zijlstra   sched: revert rev...
522
  #ifdef CONFIG_SMP
9d89c257d   Yuyang Du   sched/fair: Rewri...
523
524
525
  	SEQ_printf(m, "  .%-30s: %lu
  ", "load_avg",
  			cfs_rq->avg.load_avg);
139622343   Yuyang Du   sched/fair: Provi...
526
527
528
  	SEQ_printf(m, "  .%-30s: %lu
  ", "runnable_load_avg",
  			cfs_rq->runnable_load_avg);
9d89c257d   Yuyang Du   sched/fair: Rewri...
529
530
531
532
533
534
535
536
537
  	SEQ_printf(m, "  .%-30s: %lu
  ", "util_avg",
  			cfs_rq->avg.util_avg);
  	SEQ_printf(m, "  .%-30s: %ld
  ", "removed_load_avg",
  			atomic_long_read(&cfs_rq->removed_load_avg));
  	SEQ_printf(m, "  .%-30s: %ld
  ", "removed_util_avg",
  			atomic_long_read(&cfs_rq->removed_util_avg));
333bb864f   Alex Shi   sched/debug: Remo...
538
  #ifdef CONFIG_FAIR_GROUP_SCHED
9d89c257d   Yuyang Du   sched/fair: Rewri...
539
540
541
  	SEQ_printf(m, "  .%-30s: %lu
  ", "tg_load_avg_contrib",
  			cfs_rq->tg_load_avg_contrib);
333bb864f   Alex Shi   sched/debug: Remo...
542
543
544
  	SEQ_printf(m, "  .%-30s: %ld
  ", "tg_load_avg",
  			atomic_long_read(&cfs_rq->tg->load_avg));
c09595f63   Peter Zijlstra   sched: revert rev...
545
  #endif
333bb864f   Alex Shi   sched/debug: Remo...
546
  #endif
f9f9ffc23   Ben Segall   sched: Avoid thro...
547
  #ifdef CONFIG_CFS_BANDWIDTH
f9f9ffc23   Ben Segall   sched: Avoid thro...
548
549
550
551
552
553
554
  	SEQ_printf(m, "  .%-30s: %d
  ", "throttled",
  			cfs_rq->throttled);
  	SEQ_printf(m, "  .%-30s: %d
  ", "throttle_count",
  			cfs_rq->throttle_count);
  #endif
2069dd75c   Peter Zijlstra   sched: Rewrite tg...
555

333bb864f   Alex Shi   sched/debug: Remo...
556
  #ifdef CONFIG_FAIR_GROUP_SCHED
ff9b48c35   Bharata B Rao   sched: include gr...
557
  	print_cfs_group_stats(m, cpu, cfs_rq->tg);
c09595f63   Peter Zijlstra   sched: revert rev...
558
  #endif
43ae34cb4   Ingo Molnar   sched: scheduler ...
559
  }
ada18de2e   Peter Zijlstra   sched: debug: add...
560
561
  void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
  {
efe25c2c7   Bharata B Rao   sched: Reinstate ...
562
563
564
565
566
  #ifdef CONFIG_RT_GROUP_SCHED
  	SEQ_printf(m, "
  rt_rq[%d]:%s
  ", cpu, task_group_path(rt_rq->tg));
  #else
ada18de2e   Peter Zijlstra   sched: debug: add...
567
568
569
  	SEQ_printf(m, "
  rt_rq[%d]:
  ", cpu);
efe25c2c7   Bharata B Rao   sched: Reinstate ...
570
  #endif
ada18de2e   Peter Zijlstra   sched: debug: add...
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
  
  #define P(x) \
  	SEQ_printf(m, "  .%-30s: %Ld
  ", #x, (long long)(rt_rq->x))
  #define PN(x) \
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", #x, SPLIT_NS(rt_rq->x))
  
  	P(rt_nr_running);
  	P(rt_throttled);
  	PN(rt_time);
  	PN(rt_runtime);
  
  #undef PN
  #undef P
  }
acb32132e   Wanpeng Li   sched/deadline: A...
587
588
  void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
  {
ef477183d   Steven Rostedt (Red Hat)   sched/debug: Add ...
589
  	struct dl_bw *dl_bw;
acb32132e   Wanpeng Li   sched/deadline: A...
590
591
592
593
594
  	SEQ_printf(m, "
  dl_rq[%d]:
  ", cpu);
  	SEQ_printf(m, "  .%-30s: %ld
  ", "dl_nr_running", dl_rq->dl_nr_running);
ef477183d   Steven Rostedt (Red Hat)   sched/debug: Add ...
595
596
597
598
599
600
601
602
603
  #ifdef CONFIG_SMP
  	dl_bw = &cpu_rq(cpu)->rd->dl_bw;
  #else
  	dl_bw = &dl_rq->dl_bw;
  #endif
  	SEQ_printf(m, "  .%-30s: %lld
  ", "dl_bw->bw", dl_bw->bw);
  	SEQ_printf(m, "  .%-30s: %lld
  ", "dl_bw->total_bw", dl_bw->total_bw);
acb32132e   Wanpeng Li   sched/deadline: A...
604
  }
5bb6b1ea6   Peter Zijlstra   sched: Add some c...
605
  extern __read_mostly int sched_clock_running;
a48da48b4   Ingo Molnar   sched debug: remo...
606
  static void print_cpu(struct seq_file *m, int cpu)
43ae34cb4   Ingo Molnar   sched: scheduler ...
607
  {
348ec61e6   Hitoshi Mitake   sched: Hide runqu...
608
  	struct rq *rq = cpu_rq(cpu);
efe25c2c7   Bharata B Rao   sched: Reinstate ...
609
  	unsigned long flags;
43ae34cb4   Ingo Molnar   sched: scheduler ...
610
611
612
613
  
  #ifdef CONFIG_X86
  	{
  		unsigned int freq = cpu_khz ? : 1;
bbbfeac92   Nathan Zimmer   sched: Fix /proc/...
614
615
  		SEQ_printf(m, "cpu#%d, %u.%03u MHz
  ",
43ae34cb4   Ingo Molnar   sched: scheduler ...
616
617
618
  			   cpu, freq / 1000, (freq % 1000));
  	}
  #else
bbbfeac92   Nathan Zimmer   sched: Fix /proc/...
619
620
  	SEQ_printf(m, "cpu#%d
  ", cpu);
43ae34cb4   Ingo Molnar   sched: scheduler ...
621
  #endif
13e099d2f   Peter Zijlstra   sched/debug: Fix ...
622
623
624
625
626
627
628
629
630
  #define P(x)								\
  do {									\
  	if (sizeof(rq->x) == 4)						\
  		SEQ_printf(m, "  .%-30s: %ld
  ", #x, (long)(rq->x));	\
  	else								\
  		SEQ_printf(m, "  .%-30s: %Ld
  ", #x, (long long)(rq->x));\
  } while (0)
ef83a5714   Ingo Molnar   sched: enhance de...
631
632
633
  #define PN(x) \
  	SEQ_printf(m, "  .%-30s: %Ld.%06ld
  ", #x, SPLIT_NS(rq->x))
43ae34cb4   Ingo Molnar   sched: scheduler ...
634
635
636
637
  
  	P(nr_running);
  	SEQ_printf(m, "  .%-30s: %lu
  ", "load",
495eca494   Dmitry Adamushko   sched: clean up s...
638
  		   rq->load.weight);
43ae34cb4   Ingo Molnar   sched: scheduler ...
639
640
641
  	P(nr_switches);
  	P(nr_load_updates);
  	P(nr_uninterruptible);
ef83a5714   Ingo Molnar   sched: enhance de...
642
  	PN(next_balance);
fc840914e   Peter Zijlstra   sched/debug: Take...
643
644
  	SEQ_printf(m, "  .%-30s: %ld
  ", "curr->pid", (long)(task_pid_nr(rq->curr)));
ef83a5714   Ingo Molnar   sched: enhance de...
645
  	PN(clock);
5a5375977   Peter Zijlstra   sched/debug: Prin...
646
  	PN(clock_task);
43ae34cb4   Ingo Molnar   sched: scheduler ...
647
648
649
650
651
652
  	P(cpu_load[0]);
  	P(cpu_load[1]);
  	P(cpu_load[2]);
  	P(cpu_load[3]);
  	P(cpu_load[4]);
  #undef P
ef83a5714   Ingo Molnar   sched: enhance de...
653
  #undef PN
43ae34cb4   Ingo Molnar   sched: scheduler ...
654

1b9508f68   Mike Galbraith   sched: Rate-limit...
655
  #ifdef CONFIG_SMP
db6ea2fb0   Wanpeng Li   sched/debug: Prin...
656
657
  #define P64(n) SEQ_printf(m, "  .%-30s: %Ld
  ", #n, rq->n);
1b9508f68   Mike Galbraith   sched: Rate-limit...
658
  	P64(avg_idle);
37e6bae83   Alex Shi   sched: Add statis...
659
  	P64(max_idle_balance_cost);
db6ea2fb0   Wanpeng Li   sched/debug: Prin...
660
  #undef P64
1b9508f68   Mike Galbraith   sched: Rate-limit...
661
  #endif
5ac5c4d60   Peter Zijlstra   sched: clean up d...
662

4fa8d299b   Josh Poimboeuf   sched/debug: Remo...
663
664
  #define P(n) SEQ_printf(m, "  .%-30s: %d
  ", #n, schedstat_val(rq->n));
cb2517653   Mel Gorman   sched/debug: Make...
665
666
667
668
669
670
671
  	if (schedstat_enabled()) {
  		P(yld_count);
  		P(sched_count);
  		P(sched_goidle);
  		P(ttwu_count);
  		P(ttwu_local);
  	}
5ac5c4d60   Peter Zijlstra   sched: clean up d...
672
  #undef P
4fa8d299b   Josh Poimboeuf   sched/debug: Remo...
673

efe25c2c7   Bharata B Rao   sched: Reinstate ...
674
  	spin_lock_irqsave(&sched_debug_lock, flags);
5cef9eca3   Ingo Molnar   sched: remove the...
675
  	print_cfs_stats(m, cpu);
ada18de2e   Peter Zijlstra   sched: debug: add...
676
  	print_rt_stats(m, cpu);
acb32132e   Wanpeng Li   sched/deadline: A...
677
  	print_dl_stats(m, cpu);
43ae34cb4   Ingo Molnar   sched: scheduler ...
678

a48da48b4   Ingo Molnar   sched debug: remo...
679
  	print_rq(m, rq, cpu);
efe25c2c7   Bharata B Rao   sched: Reinstate ...
680
  	spin_unlock_irqrestore(&sched_debug_lock, flags);
bbbfeac92   Nathan Zimmer   sched: Fix /proc/...
681
682
  	SEQ_printf(m, "
  ");
43ae34cb4   Ingo Molnar   sched: scheduler ...
683
  }
1983a922a   Christian Ehrhardt   sched: Make tunab...
684
685
686
687
688
  static const char *sched_tunable_scaling_names[] = {
  	"none",
  	"logaritmic",
  	"linear"
  };
bbbfeac92   Nathan Zimmer   sched: Fix /proc/...
689
  static void sched_debug_header(struct seq_file *m)
43ae34cb4   Ingo Molnar   sched: scheduler ...
690
  {
5bb6b1ea6   Peter Zijlstra   sched: Add some c...
691
692
  	u64 ktime, sched_clk, cpu_clk;
  	unsigned long flags;
43ae34cb4   Ingo Molnar   sched: scheduler ...
693

5bb6b1ea6   Peter Zijlstra   sched: Add some c...
694
695
696
697
698
  	local_irq_save(flags);
  	ktime = ktime_to_ns(ktime_get());
  	sched_clk = sched_clock();
  	cpu_clk = local_clock();
  	local_irq_restore(flags);
b32e86b43   Ingo Molnar   sched/numa: Add d...
699
700
  	SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s
  ",
43ae34cb4   Ingo Molnar   sched: scheduler ...
701
702
703
  		init_utsname()->release,
  		(int)strcspn(init_utsname()->version, " "),
  		init_utsname()->version);
5bb6b1ea6   Peter Zijlstra   sched: Add some c...
704
705
706
707
708
709
710
711
712
713
714
  #define P(x) \
  	SEQ_printf(m, "%-40s: %Ld
  ", #x, (long long)(x))
  #define PN(x) \
  	SEQ_printf(m, "%-40s: %Ld.%06ld
  ", #x, SPLIT_NS(x))
  	PN(ktime);
  	PN(sched_clk);
  	PN(cpu_clk);
  	P(jiffies);
  #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
35af99e64   Peter Zijlstra   sched/clock, x86:...
715
  	P(sched_clock_stable());
5bb6b1ea6   Peter Zijlstra   sched: Add some c...
716
717
718
719
720
721
722
723
  #endif
  #undef PN
  #undef P
  
  	SEQ_printf(m, "
  ");
  	SEQ_printf(m, "sysctl_sched
  ");
43ae34cb4   Ingo Molnar   sched: scheduler ...
724

1aa4731ef   Ingo Molnar   sched debug: prin...
725
  #define P(x) \
d822ceced   Ingo Molnar   sched debug: more...
726
727
  	SEQ_printf(m, "  .%-40s: %Ld
  ", #x, (long long)(x))
1aa4731ef   Ingo Molnar   sched debug: prin...
728
  #define PN(x) \
d822ceced   Ingo Molnar   sched debug: more...
729
730
  	SEQ_printf(m, "  .%-40s: %Ld.%06ld
  ", #x, SPLIT_NS(x))
1aa4731ef   Ingo Molnar   sched debug: prin...
731
  	PN(sysctl_sched_latency);
b2be5e96d   Peter Zijlstra   sched: reintroduc...
732
  	PN(sysctl_sched_min_granularity);
1aa4731ef   Ingo Molnar   sched debug: prin...
733
  	PN(sysctl_sched_wakeup_granularity);
eebef7469   Josh Hunt   sched: Use correc...
734
  	P(sysctl_sched_child_runs_first);
1aa4731ef   Ingo Molnar   sched debug: prin...
735
736
737
  	P(sysctl_sched_features);
  #undef PN
  #undef P
bbbfeac92   Nathan Zimmer   sched: Fix /proc/...
738
739
740
  	SEQ_printf(m, "  .%-40s: %d (%s)
  ",
  		"sysctl_sched_tunable_scaling",
1983a922a   Christian Ehrhardt   sched: Make tunab...
741
742
  		sysctl_sched_tunable_scaling,
  		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
bbbfeac92   Nathan Zimmer   sched: Fix /proc/...
743
744
745
  	SEQ_printf(m, "
  ");
  }
1983a922a   Christian Ehrhardt   sched: Make tunab...
746

bbbfeac92   Nathan Zimmer   sched: Fix /proc/...
747
748
749
  static int sched_debug_show(struct seq_file *m, void *v)
  {
  	int cpu = (unsigned long)(v - 2);
43ae34cb4   Ingo Molnar   sched: scheduler ...
750

bbbfeac92   Nathan Zimmer   sched: Fix /proc/...
751
752
753
754
  	if (cpu != -1)
  		print_cpu(m, cpu);
  	else
  		sched_debug_header(m);
43ae34cb4   Ingo Molnar   sched: scheduler ...
755
756
757
  
  	return 0;
  }
029632fbb   Peter Zijlstra   sched: Make separ...
758
  void sysrq_sched_debug_show(void)
43ae34cb4   Ingo Molnar   sched: scheduler ...
759
  {
bbbfeac92   Nathan Zimmer   sched: Fix /proc/...
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
  	int cpu;
  
  	sched_debug_header(NULL);
  	for_each_online_cpu(cpu)
  		print_cpu(NULL, cpu);
  
  }
  
  /*
   * This itererator needs some explanation.
   * It returns 1 for the header position.
   * This means 2 is cpu 0.
   * In a hotplugged system some cpus, including cpu 0, may be missing so we have
   * to use cpumask_* to iterate over the cpus.
   */
  static void *sched_debug_start(struct seq_file *file, loff_t *offset)
  {
  	unsigned long n = *offset;
  
  	if (n == 0)
  		return (void *) 1;
  
  	n--;
  
  	if (n > 0)
  		n = cpumask_next(n - 1, cpu_online_mask);
  	else
  		n = cpumask_first(cpu_online_mask);
  
  	*offset = n + 1;
  
  	if (n < nr_cpu_ids)
  		return (void *)(unsigned long)(n + 2);
  	return NULL;
  }
  
  static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
  {
  	(*offset)++;
  	return sched_debug_start(file, offset);
  }
  
  static void sched_debug_stop(struct seq_file *file, void *data)
  {
  }
  
  static const struct seq_operations sched_debug_sops = {
  	.start = sched_debug_start,
  	.next = sched_debug_next,
  	.stop = sched_debug_stop,
  	.show = sched_debug_show,
  };
  
  static int sched_debug_release(struct inode *inode, struct file *file)
  {
  	seq_release(inode, file);
  
  	return 0;
43ae34cb4   Ingo Molnar   sched: scheduler ...
818
819
820
821
  }
  
  static int sched_debug_open(struct inode *inode, struct file *filp)
  {
bbbfeac92   Nathan Zimmer   sched: Fix /proc/...
822
823
824
825
826
  	int ret = 0;
  
  	ret = seq_open(filp, &sched_debug_sops);
  
  	return ret;
43ae34cb4   Ingo Molnar   sched: scheduler ...
827
  }
0dbee3a6b   Arjan van de Ven   Make scheduler de...
828
  static const struct file_operations sched_debug_fops = {
43ae34cb4   Ingo Molnar   sched: scheduler ...
829
830
831
  	.open		= sched_debug_open,
  	.read		= seq_read,
  	.llseek		= seq_lseek,
bbbfeac92   Nathan Zimmer   sched: Fix /proc/...
832
  	.release	= sched_debug_release,
43ae34cb4   Ingo Molnar   sched: scheduler ...
833
834
835
836
837
  };
  
  static int __init init_sched_debug_procfs(void)
  {
  	struct proc_dir_entry *pe;
a9cf4ddb3   Li Zefan   sched: change sch...
838
  	pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
43ae34cb4   Ingo Molnar   sched: scheduler ...
839
840
  	if (!pe)
  		return -ENOMEM;
43ae34cb4   Ingo Molnar   sched: scheduler ...
841
842
843
844
  	return 0;
  }
  
  __initcall(init_sched_debug_procfs);
b32e86b43   Ingo Molnar   sched/numa: Add d...
845
846
847
848
849
850
851
852
853
854
855
856
  #define __P(F) \
  	SEQ_printf(m, "%-45s:%21Ld
  ", #F, (long long)F)
  #define P(F) \
  	SEQ_printf(m, "%-45s:%21Ld
  ", #F, (long long)p->F)
  #define __PN(F) \
  	SEQ_printf(m, "%-45s:%14Ld.%06ld
  ", #F, SPLIT_NS((long long)F))
  #define PN(F) \
  	SEQ_printf(m, "%-45s:%14Ld.%06ld
  ", #F, SPLIT_NS((long long)p->F))
397f2378f   Srikar Dronamraju   sched/numa: Fix n...
857
858
859
860
861
862
863
864
865
866
  #ifdef CONFIG_NUMA_BALANCING
  void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
  		unsigned long tpf, unsigned long gsf, unsigned long gpf)
  {
  	SEQ_printf(m, "numa_faults node=%d ", node);
  	SEQ_printf(m, "task_private=%lu task_shared=%lu ", tsf, tpf);
  	SEQ_printf(m, "group_private=%lu group_shared=%lu
  ", gsf, gpf);
  }
  #endif
b32e86b43   Ingo Molnar   sched/numa: Add d...
867
868
869
870
  static void sched_show_numa(struct task_struct *p, struct seq_file *m)
  {
  #ifdef CONFIG_NUMA_BALANCING
  	struct mempolicy *pol;
b32e86b43   Ingo Molnar   sched/numa: Add d...
871
872
873
874
875
876
877
878
879
880
  
  	if (p->mm)
  		P(mm->numa_scan_seq);
  
  	task_lock(p);
  	pol = p->mempolicy;
  	if (pol && !(pol->flags & MPOL_F_MORON))
  		pol = NULL;
  	mpol_get(pol);
  	task_unlock(p);
397f2378f   Srikar Dronamraju   sched/numa: Fix n...
881
882
883
884
885
886
887
  	P(numa_pages_migrated);
  	P(numa_preferred_nid);
  	P(total_numa_faults);
  	SEQ_printf(m, "current_node=%d, numa_group_id=%d
  ",
  			task_node(p), task_numa_group_id(p));
  	show_numa_stats(p, m);
b32e86b43   Ingo Molnar   sched/numa: Add d...
888
889
890
  	mpol_put(pol);
  #endif
  }
43ae34cb4   Ingo Molnar   sched: scheduler ...
891
892
  void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
  {
cc367732f   Ingo Molnar   sched: debug, imp...
893
  	unsigned long nr_switches;
43ae34cb4   Ingo Molnar   sched: scheduler ...
894

fc840914e   Peter Zijlstra   sched/debug: Take...
895
896
  	SEQ_printf(m, "%s (%d, #threads: %d)
  ", p->comm, task_pid_nr(p),
5089a9768   Oleg Nesterov   proc_sched_show_t...
897
  						get_nr_threads(p));
2d92f2278   Ingo Molnar   sched: debug: inc...
898
  	SEQ_printf(m,
add332a15   Kamalesh Babulal   sched/debug: Fix ...
899
900
901
  		"---------------------------------------------------------"
  		"----------
  ");
cc367732f   Ingo Molnar   sched: debug, imp...
902
  #define __P(F) \
add332a15   Kamalesh Babulal   sched/debug: Fix ...
903
904
  	SEQ_printf(m, "%-45s:%21Ld
  ", #F, (long long)F)
43ae34cb4   Ingo Molnar   sched: scheduler ...
905
  #define P(F) \
add332a15   Kamalesh Babulal   sched/debug: Fix ...
906
907
  	SEQ_printf(m, "%-45s:%21Ld
  ", #F, (long long)p->F)
4fa8d299b   Josh Poimboeuf   sched/debug: Remo...
908
909
910
  #define P_SCHEDSTAT(F) \
  	SEQ_printf(m, "%-45s:%21Ld
  ", #F, (long long)schedstat_val(p->F))
cc367732f   Ingo Molnar   sched: debug, imp...
911
  #define __PN(F) \
add332a15   Kamalesh Babulal   sched/debug: Fix ...
912
913
  	SEQ_printf(m, "%-45s:%14Ld.%06ld
  ", #F, SPLIT_NS((long long)F))
ef83a5714   Ingo Molnar   sched: enhance de...
914
  #define PN(F) \
add332a15   Kamalesh Babulal   sched/debug: Fix ...
915
916
  	SEQ_printf(m, "%-45s:%14Ld.%06ld
  ", #F, SPLIT_NS((long long)p->F))
4fa8d299b   Josh Poimboeuf   sched/debug: Remo...
917
918
919
  #define PN_SCHEDSTAT(F) \
  	SEQ_printf(m, "%-45s:%14Ld.%06ld
  ", #F, SPLIT_NS((long long)schedstat_val(p->F)))
43ae34cb4   Ingo Molnar   sched: scheduler ...
920

ef83a5714   Ingo Molnar   sched: enhance de...
921
922
923
  	PN(se.exec_start);
  	PN(se.vruntime);
  	PN(se.sum_exec_runtime);
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
924

cc367732f   Ingo Molnar   sched: debug, imp...
925
  	nr_switches = p->nvcsw + p->nivcsw;
cc367732f   Ingo Molnar   sched: debug, imp...
926
  	P(se.nr_migrations);
cc367732f   Ingo Molnar   sched: debug, imp...
927

cb2517653   Mel Gorman   sched/debug: Make...
928
  	if (schedstat_enabled()) {
cc367732f   Ingo Molnar   sched: debug, imp...
929
  		u64 avg_atom, avg_per_cpu;
4fa8d299b   Josh Poimboeuf   sched/debug: Remo...
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
  		PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
  		PN_SCHEDSTAT(se.statistics.wait_start);
  		PN_SCHEDSTAT(se.statistics.sleep_start);
  		PN_SCHEDSTAT(se.statistics.block_start);
  		PN_SCHEDSTAT(se.statistics.sleep_max);
  		PN_SCHEDSTAT(se.statistics.block_max);
  		PN_SCHEDSTAT(se.statistics.exec_max);
  		PN_SCHEDSTAT(se.statistics.slice_max);
  		PN_SCHEDSTAT(se.statistics.wait_max);
  		PN_SCHEDSTAT(se.statistics.wait_sum);
  		P_SCHEDSTAT(se.statistics.wait_count);
  		PN_SCHEDSTAT(se.statistics.iowait_sum);
  		P_SCHEDSTAT(se.statistics.iowait_count);
  		P_SCHEDSTAT(se.statistics.nr_migrations_cold);
  		P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
  		P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
  		P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
  		P_SCHEDSTAT(se.statistics.nr_forced_migrations);
  		P_SCHEDSTAT(se.statistics.nr_wakeups);
  		P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
  		P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
  		P_SCHEDSTAT(se.statistics.nr_wakeups_local);
  		P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
  		P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
  		P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
  		P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
  		P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
cb2517653   Mel Gorman   sched/debug: Make...
957

cc367732f   Ingo Molnar   sched: debug, imp...
958
959
  		avg_atom = p->se.sum_exec_runtime;
  		if (nr_switches)
b0ab99e77   Mateusz Guzik   sched: Fix possib...
960
  			avg_atom = div64_ul(avg_atom, nr_switches);
cc367732f   Ingo Molnar   sched: debug, imp...
961
962
963
964
  		else
  			avg_atom = -1LL;
  
  		avg_per_cpu = p->se.sum_exec_runtime;
c1a89740d   Ingo Molnar   sched: clean up o...
965
  		if (p->se.nr_migrations) {
6f6d6a1a6   Roman Zippel   rename div64_64 t...
966
967
  			avg_per_cpu = div64_u64(avg_per_cpu,
  						p->se.nr_migrations);
c1a89740d   Ingo Molnar   sched: clean up o...
968
  		} else {
cc367732f   Ingo Molnar   sched: debug, imp...
969
  			avg_per_cpu = -1LL;
c1a89740d   Ingo Molnar   sched: clean up o...
970
  		}
cc367732f   Ingo Molnar   sched: debug, imp...
971
972
973
974
  
  		__PN(avg_atom);
  		__PN(avg_per_cpu);
  	}
4fa8d299b   Josh Poimboeuf   sched/debug: Remo...
975

cc367732f   Ingo Molnar   sched: debug, imp...
976
  	__P(nr_switches);
add332a15   Kamalesh Babulal   sched/debug: Fix ...
977
978
  	SEQ_printf(m, "%-45s:%21Ld
  ",
cc367732f   Ingo Molnar   sched: debug, imp...
979
  		   "nr_voluntary_switches", (long long)p->nvcsw);
add332a15   Kamalesh Babulal   sched/debug: Fix ...
980
981
  	SEQ_printf(m, "%-45s:%21Ld
  ",
cc367732f   Ingo Molnar   sched: debug, imp...
982
  		   "nr_involuntary_switches", (long long)p->nivcsw);
43ae34cb4   Ingo Molnar   sched: scheduler ...
983
  	P(se.load.weight);
333bb864f   Alex Shi   sched/debug: Remo...
984
  #ifdef CONFIG_SMP
9d89c257d   Yuyang Du   sched/fair: Rewri...
985
986
987
988
989
  	P(se.avg.load_sum);
  	P(se.avg.util_sum);
  	P(se.avg.load_avg);
  	P(se.avg.util_avg);
  	P(se.avg.last_update_time);
939fd731e   Kamalesh Babulal   sched/debug: Add ...
990
  #endif
43ae34cb4   Ingo Molnar   sched: scheduler ...
991
992
  	P(policy);
  	P(prio);
4fa8d299b   Josh Poimboeuf   sched/debug: Remo...
993
  #undef PN_SCHEDSTAT
ef83a5714   Ingo Molnar   sched: enhance de...
994
  #undef PN
cc367732f   Ingo Molnar   sched: debug, imp...
995
  #undef __PN
4fa8d299b   Josh Poimboeuf   sched/debug: Remo...
996
  #undef P_SCHEDSTAT
cc367732f   Ingo Molnar   sched: debug, imp...
997
998
  #undef P
  #undef __P
43ae34cb4   Ingo Molnar   sched: scheduler ...
999
1000
  
  	{
29d7b90c1   Ingo Molnar   sched: fix kernel...
1001
  		unsigned int this_cpu = raw_smp_processor_id();
43ae34cb4   Ingo Molnar   sched: scheduler ...
1002
  		u64 t0, t1;
29d7b90c1   Ingo Molnar   sched: fix kernel...
1003
1004
  		t0 = cpu_clock(this_cpu);
  		t1 = cpu_clock(this_cpu);
add332a15   Kamalesh Babulal   sched/debug: Fix ...
1005
1006
  		SEQ_printf(m, "%-45s:%21Ld
  ",
43ae34cb4   Ingo Molnar   sched: scheduler ...
1007
1008
  			   "clock-delta", (long long)(t1-t0));
  	}
b32e86b43   Ingo Molnar   sched/numa: Add d...
1009
1010
  
  	sched_show_numa(p, m);
43ae34cb4   Ingo Molnar   sched: scheduler ...
1011
1012
1013
1014
  }
  
  void proc_sched_set_task(struct task_struct *p)
  {
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
1015
  #ifdef CONFIG_SCHEDSTATS
41acab885   Lucas De Marchi   sched: Implement ...
1016
  	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
6cfb0d5d0   Ingo Molnar   [PATCH] sched: re...
1017
  #endif
43ae34cb4   Ingo Molnar   sched: scheduler ...
1018
  }