Blame view
kernel/sched/debug.c
11.8 KB
43ae34cb4 sched: scheduler ... |
1 |
/* |
391e43da7 sched: Move all s... |
2 |
* kernel/sched/debug.c |
43ae34cb4 sched: scheduler ... |
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 |
* * Print the CFS rbtree * * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/proc_fs.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/kallsyms.h> #include <linux/utsname.h> |
029632fbb sched: Make separ... |
18 |
#include "sched.h" |
efe25c2c7 sched: Reinstate ... |
19 |
static DEFINE_SPINLOCK(sched_debug_lock); |
43ae34cb4 sched: scheduler ... |
20 21 22 23 24 25 26 27 28 29 30 |
/* * This allows printing both to /proc/sched_debug and * to the console */ #define SEQ_printf(m, x...) \ do { \ if (m) \ seq_printf(m, x); \ else \ printk(x); \ } while (0) |
ef83a5714 sched: enhance de... |
31 32 33 |
/* * Ease the printing of nsec fields: */ |
90b2628f1 sched: fix gcc wa... |
34 |
static long long nsec_high(unsigned long long nsec) |
ef83a5714 sched: enhance de... |
35 |
{ |
90b2628f1 sched: fix gcc wa... |
36 |
if ((long long)nsec < 0) { |
ef83a5714 sched: enhance de... |
37 38 39 40 41 42 43 44 |
nsec = -nsec; do_div(nsec, 1000000); return -nsec; } do_div(nsec, 1000000); return nsec; } |
90b2628f1 sched: fix gcc wa... |
45 |
static unsigned long nsec_low(unsigned long long nsec) |
ef83a5714 sched: enhance de... |
46 |
{ |
90b2628f1 sched: fix gcc wa... |
47 |
if ((long long)nsec < 0) |
ef83a5714 sched: enhance de... |
48 49 50 51 52 53 |
nsec = -nsec; return do_div(nsec, 1000000); } #define SPLIT_NS(x) nsec_high(x), nsec_low(x) |
ff9b48c35 sched: include gr... |
54 |
#ifdef CONFIG_FAIR_GROUP_SCHED |
5091faa44 sched: Add 'autog... |
55 |
static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg) |
ff9b48c35 sched: include gr... |
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
{ struct sched_entity *se = tg->se[cpu]; if (!se) return; #define P(F) \ SEQ_printf(m, " .%-30s: %lld ", #F, (long long)F) #define PN(F) \ SEQ_printf(m, " .%-30s: %lld.%06ld ", #F, SPLIT_NS((long long)F)) PN(se->exec_start); PN(se->vruntime); PN(se->sum_exec_runtime); #ifdef CONFIG_SCHEDSTATS |
41acab885 sched: Implement ... |
72 73 74 75 76 77 78 79 80 81 |
PN(se->statistics.wait_start); PN(se->statistics.sleep_start); PN(se->statistics.block_start); PN(se->statistics.sleep_max); PN(se->statistics.block_max); PN(se->statistics.exec_max); PN(se->statistics.slice_max); PN(se->statistics.wait_max); PN(se->statistics.wait_sum); P(se->statistics.wait_count); |
ff9b48c35 sched: include gr... |
82 83 84 85 86 87 |
#endif P(se->load.weight); #undef PN #undef P } #endif |
efe25c2c7 sched: Reinstate ... |
88 89 90 91 92 |
#ifdef CONFIG_CGROUP_SCHED static char group_path[PATH_MAX]; static char *task_group_path(struct task_group *tg) { |
8ecedd7a0 sched: Display au... |
93 94 |
if (autogroup_path(tg, group_path, PATH_MAX)) return group_path; |
efe25c2c7 sched: Reinstate ... |
95 96 97 98 99 100 101 102 103 104 105 |
/* * May be NULL if the underlying cgroup isn't fully-created yet */ if (!tg->css.cgroup) { group_path[0] = '\0'; return group_path; } cgroup_path(tg->css.cgroup, group_path, PATH_MAX); return group_path; } #endif |
43ae34cb4 sched: scheduler ... |
106 |
static void |
a48da48b4 sched debug: remo... |
107 |
print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) |
43ae34cb4 sched: scheduler ... |
108 109 110 111 112 |
{ if (rq->curr == p) SEQ_printf(m, "R"); else SEQ_printf(m, " "); |
ef83a5714 sched: enhance de... |
113 |
SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ", |
43ae34cb4 sched: scheduler ... |
114 |
p->comm, p->pid, |
ef83a5714 sched: enhance de... |
115 |
SPLIT_NS(p->se.vruntime), |
43ae34cb4 sched: scheduler ... |
116 |
(long long)(p->nvcsw + p->nivcsw), |
6f605d83d take sched_debug.... |
117 |
p->prio); |
6cfb0d5d0 [PATCH] sched: re... |
118 |
#ifdef CONFIG_SCHEDSTATS |
d19ca3087 sched: debug: add... |
119 |
SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", |
ef83a5714 sched: enhance de... |
120 121 |
SPLIT_NS(p->se.vruntime), SPLIT_NS(p->se.sum_exec_runtime), |
41acab885 sched: Implement ... |
122 |
SPLIT_NS(p->se.statistics.sum_sleep_runtime)); |
6cfb0d5d0 [PATCH] sched: re... |
123 |
#else |
d19ca3087 sched: debug: add... |
124 |
SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld", |
ef83a5714 sched: enhance de... |
125 |
0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L); |
6cfb0d5d0 [PATCH] sched: re... |
126 |
#endif |
efe25c2c7 sched: Reinstate ... |
127 128 129 |
#ifdef CONFIG_CGROUP_SCHED SEQ_printf(m, " %s", task_group_path(task_group(p))); #endif |
d19ca3087 sched: debug: add... |
130 |
|
d19ca3087 sched: debug: add... |
131 132 |
SEQ_printf(m, " "); |
43ae34cb4 sched: scheduler ... |
133 |
} |
a48da48b4 sched debug: remo... |
134 |
static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) |
43ae34cb4 sched: scheduler ... |
135 136 |
{ struct task_struct *g, *p; |
ab63a633c sched: fix uncond... |
137 |
unsigned long flags; |
43ae34cb4 sched: scheduler ... |
138 139 140 141 142 |
SEQ_printf(m, " runnable tasks: " |
c86da3a3d sched: fix format... |
143 144 145 |
" task PID tree-key switches prio" " exec-runtime sum-exec sum-sleep " |
1a75b94f7 sched: prettify /... |
146 |
"------------------------------------------------------" |
c86da3a3d sched: fix format... |
147 148 |
"---------------------------------------------------- "); |
43ae34cb4 sched: scheduler ... |
149 |
|
ab63a633c sched: fix uncond... |
150 |
read_lock_irqsave(&tasklist_lock, flags); |
43ae34cb4 sched: scheduler ... |
151 152 |
do_each_thread(g, p) { |
fd2f4419b sched: Provide p-... |
153 |
if (!p->on_rq || task_cpu(p) != rq_cpu) |
43ae34cb4 sched: scheduler ... |
154 |
continue; |
a48da48b4 sched debug: remo... |
155 |
print_task(m, rq, p); |
43ae34cb4 sched: scheduler ... |
156 |
} while_each_thread(g, p); |
ab63a633c sched: fix uncond... |
157 |
read_unlock_irqrestore(&tasklist_lock, flags); |
43ae34cb4 sched: scheduler ... |
158 |
} |
5cef9eca3 sched: remove the... |
159 |
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) |
43ae34cb4 sched: scheduler ... |
160 |
{ |
86d9560cb sched: add more v... |
161 162 |
s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, spread, rq0_min_vruntime, spread0; |
348ec61e6 sched: Hide runqu... |
163 |
struct rq *rq = cpu_rq(cpu); |
67e12eac3 sched: add se->vr... |
164 165 |
struct sched_entity *last; unsigned long flags; |
efe25c2c7 sched: Reinstate ... |
166 167 168 169 170 |
#ifdef CONFIG_FAIR_GROUP_SCHED SEQ_printf(m, " cfs_rq[%d]:%s ", cpu, task_group_path(cfs_rq->tg)); #else |
ada18de2e sched: debug: add... |
171 172 173 |
SEQ_printf(m, " cfs_rq[%d]: ", cpu); |
efe25c2c7 sched: Reinstate ... |
174 |
#endif |
ef83a5714 sched: enhance de... |
175 176 177 |
SEQ_printf(m, " .%-30s: %Ld.%06ld ", "exec_clock", SPLIT_NS(cfs_rq->exec_clock)); |
67e12eac3 sched: add se->vr... |
178 |
|
05fa785cf sched: Convert rq... |
179 |
raw_spin_lock_irqsave(&rq->lock, flags); |
67e12eac3 sched: add se->vr... |
180 |
if (cfs_rq->rb_leftmost) |
ac53db596 sched: Use a budd... |
181 |
MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime; |
67e12eac3 sched: add se->vr... |
182 183 184 |
last = __pick_last_entity(cfs_rq); if (last) max_vruntime = last->vruntime; |
5ac5c4d60 sched: clean up d... |
185 |
min_vruntime = cfs_rq->min_vruntime; |
348ec61e6 sched: Hide runqu... |
186 |
rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; |
05fa785cf sched: Convert rq... |
187 |
raw_spin_unlock_irqrestore(&rq->lock, flags); |
ef83a5714 sched: enhance de... |
188 189 190 191 192 193 194 195 196 |
SEQ_printf(m, " .%-30s: %Ld.%06ld ", "MIN_vruntime", SPLIT_NS(MIN_vruntime)); SEQ_printf(m, " .%-30s: %Ld.%06ld ", "min_vruntime", SPLIT_NS(min_vruntime)); SEQ_printf(m, " .%-30s: %Ld.%06ld ", "max_vruntime", SPLIT_NS(max_vruntime)); |
67e12eac3 sched: add se->vr... |
197 |
spread = max_vruntime - MIN_vruntime; |
ef83a5714 sched: enhance de... |
198 199 200 |
SEQ_printf(m, " .%-30s: %Ld.%06ld ", "spread", SPLIT_NS(spread)); |
86d9560cb sched: add more v... |
201 |
spread0 = min_vruntime - rq0_min_vruntime; |
ef83a5714 sched: enhance de... |
202 203 204 |
SEQ_printf(m, " .%-30s: %Ld.%06ld ", "spread0", SPLIT_NS(spread0)); |
5ac5c4d60 sched: clean up d... |
205 206 |
SEQ_printf(m, " .%-30s: %d ", "nr_spread_over", |
ddc972975 sched debug: chec... |
207 |
cfs_rq->nr_spread_over); |
2069dd75c sched: Rewrite tg... |
208 209 210 211 |
SEQ_printf(m, " .%-30s: %ld ", "nr_running", cfs_rq->nr_running); SEQ_printf(m, " .%-30s: %ld ", "load", cfs_rq->load.weight); |
c09595f63 sched: revert rev... |
212 213 |
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_SMP |
2069dd75c sched: Rewrite tg... |
214 215 216 217 218 219 220 221 222 223 224 |
SEQ_printf(m, " .%-30s: %Ld.%06ld ", "load_avg", SPLIT_NS(cfs_rq->load_avg)); SEQ_printf(m, " .%-30s: %Ld.%06ld ", "load_period", SPLIT_NS(cfs_rq->load_period)); SEQ_printf(m, " .%-30s: %ld ", "load_contrib", cfs_rq->load_contribution); SEQ_printf(m, " .%-30s: %d ", "load_tg", |
5091faa44 sched: Add 'autog... |
225 |
atomic_read(&cfs_rq->tg->load_weight)); |
c09595f63 sched: revert rev... |
226 |
#endif |
2069dd75c sched: Rewrite tg... |
227 |
|
ff9b48c35 sched: include gr... |
228 |
print_cfs_group_stats(m, cpu, cfs_rq->tg); |
c09595f63 sched: revert rev... |
229 |
#endif |
43ae34cb4 sched: scheduler ... |
230 |
} |
ada18de2e sched: debug: add... |
231 232 |
void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) { |
efe25c2c7 sched: Reinstate ... |
233 234 235 236 237 |
#ifdef CONFIG_RT_GROUP_SCHED SEQ_printf(m, " rt_rq[%d]:%s ", cpu, task_group_path(rt_rq->tg)); #else |
ada18de2e sched: debug: add... |
238 239 240 |
SEQ_printf(m, " rt_rq[%d]: ", cpu); |
efe25c2c7 sched: Reinstate ... |
241 |
#endif |
ada18de2e sched: debug: add... |
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 |
#define P(x) \ SEQ_printf(m, " .%-30s: %Ld ", #x, (long long)(rt_rq->x)) #define PN(x) \ SEQ_printf(m, " .%-30s: %Ld.%06ld ", #x, SPLIT_NS(rt_rq->x)) P(rt_nr_running); P(rt_throttled); PN(rt_time); PN(rt_runtime); #undef PN #undef P } |
5bb6b1ea6 sched: Add some c... |
258 |
extern __read_mostly int sched_clock_running; |
a48da48b4 sched debug: remo... |
259 |
static void print_cpu(struct seq_file *m, int cpu) |
43ae34cb4 sched: scheduler ... |
260 |
{ |
348ec61e6 sched: Hide runqu... |
261 |
struct rq *rq = cpu_rq(cpu); |
efe25c2c7 sched: Reinstate ... |
262 |
unsigned long flags; |
43ae34cb4 sched: scheduler ... |
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 |
#ifdef CONFIG_X86 { unsigned int freq = cpu_khz ? : 1; SEQ_printf(m, " cpu#%d, %u.%03u MHz ", cpu, freq / 1000, (freq % 1000)); } #else SEQ_printf(m, " cpu#%d ", cpu); #endif #define P(x) \ SEQ_printf(m, " .%-30s: %Ld ", #x, (long long)(rq->x)) |
ef83a5714 sched: enhance de... |
282 283 284 |
#define PN(x) \ SEQ_printf(m, " .%-30s: %Ld.%06ld ", #x, SPLIT_NS(rq->x)) |
43ae34cb4 sched: scheduler ... |
285 286 287 288 |
P(nr_running); SEQ_printf(m, " .%-30s: %lu ", "load", |
495eca494 sched: clean up s... |
289 |
rq->load.weight); |
43ae34cb4 sched: scheduler ... |
290 291 292 |
P(nr_switches); P(nr_load_updates); P(nr_uninterruptible); |
ef83a5714 sched: enhance de... |
293 |
PN(next_balance); |
43ae34cb4 sched: scheduler ... |
294 |
P(curr->pid); |
ef83a5714 sched: enhance de... |
295 |
PN(clock); |
43ae34cb4 sched: scheduler ... |
296 297 298 299 300 301 |
P(cpu_load[0]); P(cpu_load[1]); P(cpu_load[2]); P(cpu_load[3]); P(cpu_load[4]); #undef P |
ef83a5714 sched: enhance de... |
302 |
#undef PN |
43ae34cb4 sched: scheduler ... |
303 |
|
5ac5c4d60 sched: clean up d... |
304 305 306 |
#ifdef CONFIG_SCHEDSTATS #define P(n) SEQ_printf(m, " .%-30s: %d ", #n, rq->n); |
1b9508f68 sched: Rate-limit... |
307 308 |
#define P64(n) SEQ_printf(m, " .%-30s: %Ld ", #n, rq->n); |
5ac5c4d60 sched: clean up d... |
309 |
|
5ac5c4d60 sched: clean up d... |
310 311 312 313 314 |
P(yld_count); P(sched_switch); P(sched_count); P(sched_goidle); |
1b9508f68 sched: Rate-limit... |
315 316 317 |
#ifdef CONFIG_SMP P64(avg_idle); #endif |
5ac5c4d60 sched: clean up d... |
318 319 320 |
P(ttwu_count); P(ttwu_local); |
5ac5c4d60 sched: clean up d... |
321 |
#undef P |
fce209798 sched: Replace rq... |
322 |
#undef P64 |
5ac5c4d60 sched: clean up d... |
323 |
#endif |
efe25c2c7 sched: Reinstate ... |
324 |
spin_lock_irqsave(&sched_debug_lock, flags); |
5cef9eca3 sched: remove the... |
325 |
print_cfs_stats(m, cpu); |
ada18de2e sched: debug: add... |
326 |
print_rt_stats(m, cpu); |
43ae34cb4 sched: scheduler ... |
327 |
|
efe25c2c7 sched: Reinstate ... |
328 |
rcu_read_lock(); |
a48da48b4 sched debug: remo... |
329 |
print_rq(m, rq, cpu); |
efe25c2c7 sched: Reinstate ... |
330 331 |
rcu_read_unlock(); spin_unlock_irqrestore(&sched_debug_lock, flags); |
43ae34cb4 sched: scheduler ... |
332 |
} |
1983a922a sched: Make tunab... |
333 334 335 336 337 |
static const char *sched_tunable_scaling_names[] = { "none", "logaritmic", "linear" }; |
43ae34cb4 sched: scheduler ... |
338 339 |
static int sched_debug_show(struct seq_file *m, void *v) { |
5bb6b1ea6 sched: Add some c... |
340 341 |
u64 ktime, sched_clk, cpu_clk; unsigned long flags; |
43ae34cb4 sched: scheduler ... |
342 |
int cpu; |
5bb6b1ea6 sched: Add some c... |
343 344 345 346 347 348 349 350 |
local_irq_save(flags); ktime = ktime_to_ns(ktime_get()); sched_clk = sched_clock(); cpu_clk = local_clock(); local_irq_restore(flags); SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s ", |
43ae34cb4 sched: scheduler ... |
351 352 353 |
init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); |
5bb6b1ea6 sched: Add some c... |
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 |
#define P(x) \ SEQ_printf(m, "%-40s: %Ld ", #x, (long long)(x)) #define PN(x) \ SEQ_printf(m, "%-40s: %Ld.%06ld ", #x, SPLIT_NS(x)) PN(ktime); PN(sched_clk); PN(cpu_clk); P(jiffies); #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK P(sched_clock_stable); #endif #undef PN #undef P SEQ_printf(m, " "); SEQ_printf(m, "sysctl_sched "); |
43ae34cb4 sched: scheduler ... |
374 |
|
1aa4731ef sched debug: prin... |
375 |
#define P(x) \ |
d822ceced sched debug: more... |
376 377 |
SEQ_printf(m, " .%-40s: %Ld ", #x, (long long)(x)) |
1aa4731ef sched debug: prin... |
378 |
#define PN(x) \ |
d822ceced sched debug: more... |
379 380 |
SEQ_printf(m, " .%-40s: %Ld.%06ld ", #x, SPLIT_NS(x)) |
1aa4731ef sched debug: prin... |
381 |
PN(sysctl_sched_latency); |
b2be5e96d sched: reintroduc... |
382 |
PN(sysctl_sched_min_granularity); |
1aa4731ef sched debug: prin... |
383 |
PN(sysctl_sched_wakeup_granularity); |
eebef7469 sched: Use correc... |
384 |
P(sysctl_sched_child_runs_first); |
1aa4731ef sched debug: prin... |
385 386 387 |
P(sysctl_sched_features); #undef PN #undef P |
1983a922a sched: Make tunab... |
388 389 390 391 |
SEQ_printf(m, " .%-40s: %d (%s) ", "sysctl_sched_tunable_scaling", sysctl_sched_tunable_scaling, sched_tunable_scaling_names[sysctl_sched_tunable_scaling]); |
43ae34cb4 sched: scheduler ... |
392 |
for_each_online_cpu(cpu) |
a48da48b4 sched debug: remo... |
393 |
print_cpu(m, cpu); |
43ae34cb4 sched: scheduler ... |
394 395 396 397 398 399 |
SEQ_printf(m, " "); return 0; } |
029632fbb sched: Make separ... |
400 |
void sysrq_sched_debug_show(void) |
43ae34cb4 sched: scheduler ... |
401 402 403 404 405 406 407 408 |
{ sched_debug_show(NULL, NULL); } static int sched_debug_open(struct inode *inode, struct file *filp) { return single_open(filp, sched_debug_show, NULL); } |
0dbee3a6b Make scheduler de... |
409 |
static const struct file_operations sched_debug_fops = { |
43ae34cb4 sched: scheduler ... |
410 411 412 |
.open = sched_debug_open, .read = seq_read, .llseek = seq_lseek, |
5ea473a1d Fix leaks on /pro... |
413 |
.release = single_release, |
43ae34cb4 sched: scheduler ... |
414 415 416 417 418 |
}; static int __init init_sched_debug_procfs(void) { struct proc_dir_entry *pe; |
a9cf4ddb3 sched: change sch... |
419 |
pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops); |
43ae34cb4 sched: scheduler ... |
420 421 |
if (!pe) return -ENOMEM; |
43ae34cb4 sched: scheduler ... |
422 423 424 425 426 427 428 |
return 0; } __initcall(init_sched_debug_procfs); void proc_sched_show_task(struct task_struct *p, struct seq_file *m) { |
cc367732f sched: debug, imp... |
429 |
unsigned long nr_switches; |
43ae34cb4 sched: scheduler ... |
430 |
|
5089a9768 proc_sched_show_t... |
431 432 433 |
SEQ_printf(m, "%s (%d, #threads: %d) ", p->comm, p->pid, get_nr_threads(p)); |
2d92f2278 sched: debug: inc... |
434 435 436 |
SEQ_printf(m, "--------------------------------------------------------- "); |
cc367732f sched: debug, imp... |
437 438 439 |
#define __P(F) \ SEQ_printf(m, "%-35s:%21Ld ", #F, (long long)F) |
43ae34cb4 sched: scheduler ... |
440 |
#define P(F) \ |
2d92f2278 sched: debug: inc... |
441 442 |
SEQ_printf(m, "%-35s:%21Ld ", #F, (long long)p->F) |
cc367732f sched: debug, imp... |
443 444 445 |
#define __PN(F) \ SEQ_printf(m, "%-35s:%14Ld.%06ld ", #F, SPLIT_NS((long long)F)) |
ef83a5714 sched: enhance de... |
446 |
#define PN(F) \ |
2d92f2278 sched: debug: inc... |
447 448 |
SEQ_printf(m, "%-35s:%14Ld.%06ld ", #F, SPLIT_NS((long long)p->F)) |
43ae34cb4 sched: scheduler ... |
449 |
|
ef83a5714 sched: enhance de... |
450 451 452 |
PN(se.exec_start); PN(se.vruntime); PN(se.sum_exec_runtime); |
6cfb0d5d0 [PATCH] sched: re... |
453 |
|
cc367732f sched: debug, imp... |
454 |
nr_switches = p->nvcsw + p->nivcsw; |
6cfb0d5d0 [PATCH] sched: re... |
455 |
#ifdef CONFIG_SCHEDSTATS |
41acab885 sched: Implement ... |
456 457 458 459 460 461 462 463 464 465 466 467 |
PN(se.statistics.wait_start); PN(se.statistics.sleep_start); PN(se.statistics.block_start); PN(se.statistics.sleep_max); PN(se.statistics.block_max); PN(se.statistics.exec_max); PN(se.statistics.slice_max); PN(se.statistics.wait_max); PN(se.statistics.wait_sum); P(se.statistics.wait_count); PN(se.statistics.iowait_sum); P(se.statistics.iowait_count); |
cc367732f sched: debug, imp... |
468 |
P(se.nr_migrations); |
41acab885 sched: Implement ... |
469 470 471 472 473 474 475 476 477 478 479 480 481 482 |
P(se.statistics.nr_migrations_cold); P(se.statistics.nr_failed_migrations_affine); P(se.statistics.nr_failed_migrations_running); P(se.statistics.nr_failed_migrations_hot); P(se.statistics.nr_forced_migrations); P(se.statistics.nr_wakeups); P(se.statistics.nr_wakeups_sync); P(se.statistics.nr_wakeups_migrate); P(se.statistics.nr_wakeups_local); P(se.statistics.nr_wakeups_remote); P(se.statistics.nr_wakeups_affine); P(se.statistics.nr_wakeups_affine_attempts); P(se.statistics.nr_wakeups_passive); P(se.statistics.nr_wakeups_idle); |
cc367732f sched: debug, imp... |
483 484 485 486 487 488 489 490 491 492 493 |
{ u64 avg_atom, avg_per_cpu; avg_atom = p->se.sum_exec_runtime; if (nr_switches) do_div(avg_atom, nr_switches); else avg_atom = -1LL; avg_per_cpu = p->se.sum_exec_runtime; |
c1a89740d sched: clean up o... |
494 |
if (p->se.nr_migrations) { |
6f6d6a1a6 rename div64_64 t... |
495 496 |
avg_per_cpu = div64_u64(avg_per_cpu, p->se.nr_migrations); |
c1a89740d sched: clean up o... |
497 |
} else { |
cc367732f sched: debug, imp... |
498 |
avg_per_cpu = -1LL; |
c1a89740d sched: clean up o... |
499 |
} |
cc367732f sched: debug, imp... |
500 501 502 503 |
__PN(avg_atom); __PN(avg_per_cpu); } |
6cfb0d5d0 [PATCH] sched: re... |
504 |
#endif |
cc367732f sched: debug, imp... |
505 |
__P(nr_switches); |
2d92f2278 sched: debug: inc... |
506 507 |
SEQ_printf(m, "%-35s:%21Ld ", |
cc367732f sched: debug, imp... |
508 509 510 511 |
"nr_voluntary_switches", (long long)p->nvcsw); SEQ_printf(m, "%-35s:%21Ld ", "nr_involuntary_switches", (long long)p->nivcsw); |
43ae34cb4 sched: scheduler ... |
512 513 514 |
P(se.load.weight); P(policy); P(prio); |
ef83a5714 sched: enhance de... |
515 |
#undef PN |
cc367732f sched: debug, imp... |
516 517 518 |
#undef __PN #undef P #undef __P |
43ae34cb4 sched: scheduler ... |
519 520 |
{ |
29d7b90c1 sched: fix kernel... |
521 |
unsigned int this_cpu = raw_smp_processor_id(); |
43ae34cb4 sched: scheduler ... |
522 |
u64 t0, t1; |
29d7b90c1 sched: fix kernel... |
523 524 |
t0 = cpu_clock(this_cpu); t1 = cpu_clock(this_cpu); |
2d92f2278 sched: debug: inc... |
525 526 |
SEQ_printf(m, "%-35s:%21Ld ", |
43ae34cb4 sched: scheduler ... |
527 528 529 530 531 532 |
"clock-delta", (long long)(t1-t0)); } } void proc_sched_set_task(struct task_struct *p) { |
6cfb0d5d0 [PATCH] sched: re... |
533 |
#ifdef CONFIG_SCHEDSTATS |
41acab885 sched: Implement ... |
534 |
memset(&p->se.statistics, 0, sizeof(p->se.statistics)); |
6cfb0d5d0 [PATCH] sched: re... |
535 |
#endif |
43ae34cb4 sched: scheduler ... |
536 |
} |