blob: 4a23bbc3111bd287ce4437e6fa2de530f9a56406 [file] [log] [blame]
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001/*
Peter Zijlstra391e43d2011-11-15 17:14:39 +01002 * kernel/sched/debug.c
Ingo Molnar43ae34c2007-07-09 18:52:00 +02003 *
4 * Print the CFS rbtree
5 *
6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/proc_fs.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010014#include <linux/sched/mm.h>
Ingo Molnarf719ff9b2017-02-06 10:57:33 +010015#include <linux/sched/task.h>
Ingo Molnar43ae34c2007-07-09 18:52:00 +020016#include <linux/seq_file.h>
17#include <linux/kallsyms.h>
18#include <linux/utsname.h>
Ingo Molnarb32e86b2013-10-07 11:29:30 +010019#include <linux/mempolicy.h>
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -050020#include <linux/debugfs.h>
Ingo Molnar43ae34c2007-07-09 18:52:00 +020021
Peter Zijlstra029632f2011-10-25 10:00:11 +020022#include "sched.h"
23
Bharata B Raoefe25c22011-01-11 15:41:54 +053024static DEFINE_SPINLOCK(sched_debug_lock);
25
Ingo Molnar43ae34c2007-07-09 18:52:00 +020026/*
27 * This allows printing both to /proc/sched_debug and
28 * to the console
29 */
30#define SEQ_printf(m, x...) \
31 do { \
32 if (m) \
33 seq_printf(m, x); \
34 else \
35 printk(x); \
36 } while (0)
37
Ingo Molnaref83a572007-10-15 17:00:08 +020038/*
39 * Ease the printing of nsec fields:
40 */
Ingo Molnar90b26282007-12-30 17:24:35 +010041static long long nsec_high(unsigned long long nsec)
Ingo Molnaref83a572007-10-15 17:00:08 +020042{
Ingo Molnar90b26282007-12-30 17:24:35 +010043 if ((long long)nsec < 0) {
Ingo Molnaref83a572007-10-15 17:00:08 +020044 nsec = -nsec;
45 do_div(nsec, 1000000);
46 return -nsec;
47 }
48 do_div(nsec, 1000000);
49
50 return nsec;
51}
52
Ingo Molnar90b26282007-12-30 17:24:35 +010053static unsigned long nsec_low(unsigned long long nsec)
Ingo Molnaref83a572007-10-15 17:00:08 +020054{
Ingo Molnar90b26282007-12-30 17:24:35 +010055 if ((long long)nsec < 0)
Ingo Molnaref83a572007-10-15 17:00:08 +020056 nsec = -nsec;
57
58 return do_div(nsec, 1000000);
59}
60
61#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
62
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -050063#define SCHED_FEAT(name, enabled) \
64 #name ,
65
66static const char * const sched_feat_names[] = {
67#include "features.h"
68};
69
70#undef SCHED_FEAT
71
72static int sched_feat_show(struct seq_file *m, void *v)
73{
74 int i;
75
76 for (i = 0; i < __SCHED_FEAT_NR; i++) {
77 if (!(sysctl_sched_features & (1UL << i)))
78 seq_puts(m, "NO_");
79 seq_printf(m, "%s ", sched_feat_names[i]);
80 }
81 seq_puts(m, "\n");
82
83 return 0;
84}
85
86#ifdef HAVE_JUMP_LABEL
87
88#define jump_label_key__true STATIC_KEY_INIT_TRUE
89#define jump_label_key__false STATIC_KEY_INIT_FALSE
90
91#define SCHED_FEAT(name, enabled) \
92 jump_label_key__##enabled ,
93
94struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
95#include "features.h"
96};
97
98#undef SCHED_FEAT
99
100static void sched_feat_disable(int i)
101{
102 static_key_disable(&sched_feat_keys[i]);
103}
104
105static void sched_feat_enable(int i)
106{
107 static_key_enable(&sched_feat_keys[i]);
108}
109#else
110static void sched_feat_disable(int i) { };
111static void sched_feat_enable(int i) { };
112#endif /* HAVE_JUMP_LABEL */
113
114static int sched_feat_set(char *cmp)
115{
116 int i;
117 int neg = 0;
118
119 if (strncmp(cmp, "NO_", 3) == 0) {
120 neg = 1;
121 cmp += 3;
122 }
123
124 for (i = 0; i < __SCHED_FEAT_NR; i++) {
125 if (strcmp(cmp, sched_feat_names[i]) == 0) {
126 if (neg) {
127 sysctl_sched_features &= ~(1UL << i);
128 sched_feat_disable(i);
129 } else {
130 sysctl_sched_features |= (1UL << i);
131 sched_feat_enable(i);
132 }
133 break;
134 }
135 }
136
137 return i;
138}
139
140static ssize_t
141sched_feat_write(struct file *filp, const char __user *ubuf,
142 size_t cnt, loff_t *ppos)
143{
144 char buf[64];
145 char *cmp;
146 int i;
147 struct inode *inode;
148
149 if (cnt > 63)
150 cnt = 63;
151
152 if (copy_from_user(&buf, ubuf, cnt))
153 return -EFAULT;
154
155 buf[cnt] = 0;
156 cmp = strstrip(buf);
157
158 /* Ensure the static_key remains in a consistent state */
159 inode = file_inode(filp);
160 inode_lock(inode);
161 i = sched_feat_set(cmp);
162 inode_unlock(inode);
163 if (i == __SCHED_FEAT_NR)
164 return -EINVAL;
165
166 *ppos += cnt;
167
168 return cnt;
169}
170
171static int sched_feat_open(struct inode *inode, struct file *filp)
172{
173 return single_open(filp, sched_feat_show, NULL);
174}
175
176static const struct file_operations sched_feat_fops = {
177 .open = sched_feat_open,
178 .write = sched_feat_write,
179 .read = seq_read,
180 .llseek = seq_lseek,
181 .release = single_release,
182};
183
184static __init int sched_init_debug(void)
185{
186 debugfs_create_file("sched_features", 0644, NULL, NULL,
187 &sched_feat_fops);
188
189 return 0;
190}
191late_initcall(sched_init_debug);
192
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500193#ifdef CONFIG_SMP
194
195#ifdef CONFIG_SYSCTL
196
197static struct ctl_table sd_ctl_dir[] = {
198 {
199 .procname = "sched_domain",
200 .mode = 0555,
201 },
202 {}
203};
204
205static struct ctl_table sd_ctl_root[] = {
206 {
207 .procname = "kernel",
208 .mode = 0555,
209 .child = sd_ctl_dir,
210 },
211 {}
212};
213
214static struct ctl_table *sd_alloc_ctl_entry(int n)
215{
216 struct ctl_table *entry =
217 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
218
219 return entry;
220}
221
222static void sd_free_ctl_entry(struct ctl_table **tablep)
223{
224 struct ctl_table *entry;
225
226 /*
227 * In the intermediate directories, both the child directory and
228 * procname are dynamically allocated and could fail but the mode
229 * will always be set. In the lowest directory the names are
230 * static strings and all have proc handlers.
231 */
232 for (entry = *tablep; entry->mode; entry++) {
233 if (entry->child)
234 sd_free_ctl_entry(&entry->child);
235 if (entry->proc_handler == NULL)
236 kfree(entry->procname);
237 }
238
239 kfree(*tablep);
240 *tablep = NULL;
241}
242
243static int min_load_idx = 0;
244static int max_load_idx = CPU_LOAD_IDX_MAX-1;
245
246static void
247set_table_entry(struct ctl_table *entry,
248 const char *procname, void *data, int maxlen,
249 umode_t mode, proc_handler *proc_handler,
250 bool load_idx)
251{
252 entry->procname = procname;
253 entry->data = data;
254 entry->maxlen = maxlen;
255 entry->mode = mode;
256 entry->proc_handler = proc_handler;
257
258 if (load_idx) {
259 entry->extra1 = &min_load_idx;
260 entry->extra2 = &max_load_idx;
261 }
262}
263
264static struct ctl_table *
265sd_alloc_ctl_domain_table(struct sched_domain *sd)
266{
267 struct ctl_table *table = sd_alloc_ctl_entry(14);
268
269 if (table == NULL)
270 return NULL;
271
272 set_table_entry(&table[0], "min_interval", &sd->min_interval,
273 sizeof(long), 0644, proc_doulongvec_minmax, false);
274 set_table_entry(&table[1], "max_interval", &sd->max_interval,
275 sizeof(long), 0644, proc_doulongvec_minmax, false);
276 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
277 sizeof(int), 0644, proc_dointvec_minmax, true);
278 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
279 sizeof(int), 0644, proc_dointvec_minmax, true);
280 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
281 sizeof(int), 0644, proc_dointvec_minmax, true);
282 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
283 sizeof(int), 0644, proc_dointvec_minmax, true);
284 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
285 sizeof(int), 0644, proc_dointvec_minmax, true);
286 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
287 sizeof(int), 0644, proc_dointvec_minmax, false);
288 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
289 sizeof(int), 0644, proc_dointvec_minmax, false);
290 set_table_entry(&table[9], "cache_nice_tries",
291 &sd->cache_nice_tries,
292 sizeof(int), 0644, proc_dointvec_minmax, false);
293 set_table_entry(&table[10], "flags", &sd->flags,
294 sizeof(int), 0644, proc_dointvec_minmax, false);
295 set_table_entry(&table[11], "max_newidle_lb_cost",
296 &sd->max_newidle_lb_cost,
297 sizeof(long), 0644, proc_doulongvec_minmax, false);
298 set_table_entry(&table[12], "name", sd->name,
299 CORENAME_MAX_SIZE, 0444, proc_dostring, false);
300 /* &table[13] is terminator */
301
302 return table;
303}
304
305static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
306{
307 struct ctl_table *entry, *table;
308 struct sched_domain *sd;
309 int domain_num = 0, i;
310 char buf[32];
311
312 for_each_domain(cpu, sd)
313 domain_num++;
314 entry = table = sd_alloc_ctl_entry(domain_num + 1);
315 if (table == NULL)
316 return NULL;
317
318 i = 0;
319 for_each_domain(cpu, sd) {
320 snprintf(buf, 32, "domain%d", i);
321 entry->procname = kstrdup(buf, GFP_KERNEL);
322 entry->mode = 0555;
323 entry->child = sd_alloc_ctl_domain_table(sd);
324 entry++;
325 i++;
326 }
327 return table;
328}
329
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200330static cpumask_var_t sd_sysctl_cpus;
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500331static struct ctl_table_header *sd_sysctl_header;
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200332
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500333void register_sched_domain_sysctl(void)
334{
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200335 static struct ctl_table *cpu_entries;
336 static struct ctl_table **cpu_idx;
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500337 char buf[32];
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200338 int i;
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500339
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200340 if (!cpu_entries) {
341 cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
342 if (!cpu_entries)
343 return;
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500344
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200345 WARN_ON(sd_ctl_dir[0].child);
346 sd_ctl_dir[0].child = cpu_entries;
347 }
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500348
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200349 if (!cpu_idx) {
350 struct ctl_table *e = cpu_entries;
351
352 cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
353 if (!cpu_idx)
354 return;
355
356 /* deal with sparse possible map */
357 for_each_possible_cpu(i) {
358 cpu_idx[i] = e;
359 e++;
360 }
361 }
362
363 if (!cpumask_available(sd_sysctl_cpus)) {
364 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
365 return;
366
367 /* init to possible to not have holes in @cpu_entries */
368 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
369 }
370
371 for_each_cpu(i, sd_sysctl_cpus) {
372 struct ctl_table *e = cpu_idx[i];
373
374 if (e->child)
375 sd_free_ctl_entry(&e->child);
376
377 if (!e->procname) {
378 snprintf(buf, 32, "cpu%d", i);
379 e->procname = kstrdup(buf, GFP_KERNEL);
380 }
381 e->mode = 0555;
382 e->child = sd_alloc_ctl_cpu_table(i);
383
384 __cpumask_clear_cpu(i, sd_sysctl_cpus);
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500385 }
386
387 WARN_ON(sd_sysctl_header);
388 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
389}
390
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200391void dirty_sched_domain_sysctl(int cpu)
392{
393 if (cpumask_available(sd_sysctl_cpus))
394 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
395}
396
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500397/* may be called multiple times per register */
398void unregister_sched_domain_sysctl(void)
399{
400 unregister_sysctl_table(sd_sysctl_header);
401 sd_sysctl_header = NULL;
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500402}
403#endif /* CONFIG_SYSCTL */
404#endif /* CONFIG_SMP */
405
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530406#ifdef CONFIG_FAIR_GROUP_SCHED
Mike Galbraith5091faa2010-11-30 14:18:03 +0100407static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530408{
409 struct sched_entity *se = tg->se[cpu];
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530410
411#define P(F) \
412 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500413#define P_SCHEDSTAT(F) \
414 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530415#define PN(F) \
416 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500417#define PN_SCHEDSTAT(F) \
418 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530419
Yuyang Ducd126af2015-07-15 08:04:36 +0800420 if (!se)
Ben Segall18bf2802012-10-04 12:51:20 +0200421 return;
Ben Segall18bf2802012-10-04 12:51:20 +0200422
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530423 PN(se->exec_start);
424 PN(se->vruntime);
425 PN(se->sum_exec_runtime);
Mel Gormancb251762016-02-05 09:08:36 +0000426 if (schedstat_enabled()) {
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500427 PN_SCHEDSTAT(se->statistics.wait_start);
428 PN_SCHEDSTAT(se->statistics.sleep_start);
429 PN_SCHEDSTAT(se->statistics.block_start);
430 PN_SCHEDSTAT(se->statistics.sleep_max);
431 PN_SCHEDSTAT(se->statistics.block_max);
432 PN_SCHEDSTAT(se->statistics.exec_max);
433 PN_SCHEDSTAT(se->statistics.slice_max);
434 PN_SCHEDSTAT(se->statistics.wait_max);
435 PN_SCHEDSTAT(se->statistics.wait_sum);
436 P_SCHEDSTAT(se->statistics.wait_count);
Mel Gormancb251762016-02-05 09:08:36 +0000437 }
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530438 P(se->load.weight);
Paul Turner9d85f212012-10-04 13:18:29 +0200439#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +0800440 P(se->avg.load_avg);
441 P(se->avg.util_avg);
Paul Turner9d85f212012-10-04 13:18:29 +0200442#endif
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500443
444#undef PN_SCHEDSTAT
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530445#undef PN
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500446#undef P_SCHEDSTAT
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530447#undef P
448}
449#endif
450
Bharata B Raoefe25c22011-01-11 15:41:54 +0530451#ifdef CONFIG_CGROUP_SCHED
452static char group_path[PATH_MAX];
453
454static char *task_group_path(struct task_group *tg)
455{
Bharata B Rao8ecedd72011-01-11 15:42:57 +0530456 if (autogroup_path(tg, group_path, PATH_MAX))
457 return group_path;
458
Tejun Heo4c737b42016-08-10 11:23:44 -0400459 cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
460 return group_path;
Bharata B Raoefe25c22011-01-11 15:41:54 +0530461}
462#endif
463
Xie XiuQie8c16492017-08-07 16:44:22 +0800464static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
465
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200466static void
Ingo Molnara48da482007-08-09 11:16:51 +0200467print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200468{
Xie XiuQi20435d82017-08-07 16:44:23 +0800469 if (rq->curr == p)
Xie XiuQie8c16492017-08-07 16:44:22 +0800470 SEQ_printf(m, ">R");
Xie XiuQi20435d82017-08-07 16:44:23 +0800471 else
472 SEQ_printf(m, " %c", task_state_to_char(p));
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200473
Ingo Molnaref83a572007-10-15 17:00:08 +0200474 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
Peter Zijlstrafc840912013-09-09 13:01:41 +0200475 p->comm, task_pid_nr(p),
Ingo Molnaref83a572007-10-15 17:00:08 +0200476 SPLIT_NS(p->se.vruntime),
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200477 (long long)(p->nvcsw + p->nivcsw),
Al Viro6f605d82007-08-06 04:26:59 +0100478 p->prio);
Josh Poimboeuf9c572592016-06-03 17:58:40 -0500479
Srikar Dronamraju33d61762015-06-08 13:40:39 +0530480 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
Josh Poimboeuf20e1d482016-06-17 12:43:25 -0500481 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
Srikar Dronamraju33d61762015-06-08 13:40:39 +0530482 SPLIT_NS(p->se.sum_exec_runtime),
Josh Poimboeuf20e1d482016-06-17 12:43:25 -0500483 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
Josh Poimboeuf9c572592016-06-03 17:58:40 -0500484
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100485#ifdef CONFIG_NUMA_BALANCING
Srikar Dronamrajue3d24d02015-06-25 22:51:42 +0530486 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100487#endif
Bharata B Raoefe25c22011-01-11 15:41:54 +0530488#ifdef CONFIG_CGROUP_SCHED
489 SEQ_printf(m, " %s", task_group_path(task_group(p)));
490#endif
Peter Zijlstrad19ca302008-04-19 19:45:00 +0200491
Peter Zijlstrad19ca302008-04-19 19:45:00 +0200492 SEQ_printf(m, "\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200493}
494
Ingo Molnara48da482007-08-09 11:16:51 +0200495static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200496{
497 struct task_struct *g, *p;
498
499 SEQ_printf(m,
500 "\nrunnable tasks:\n"
Xie XiuQie8c16492017-08-07 16:44:22 +0800501 " S task PID tree-key switches prio"
Srikar Dronamrajuc5f3ab12015-06-08 13:40:40 +0530502 " wait-time sum-exec sum-sleep\n"
Xie XiuQie8c16492017-08-07 16:44:22 +0800503 "-------------------------------------------------------"
Mike Galbraithc86da3a2007-10-15 17:00:08 +0200504 "----------------------------------------------------\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200505
Oleg Nesterov5bd96ab2014-09-21 21:33:41 +0200506 rcu_read_lock();
Oleg Nesterovd38e83c2014-08-13 21:19:56 +0200507 for_each_process_thread(g, p) {
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100508 if (task_cpu(p) != rq_cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200509 continue;
510
Ingo Molnara48da482007-08-09 11:16:51 +0200511 print_task(m, rq, p);
Oleg Nesterovd38e83c2014-08-13 21:19:56 +0200512 }
Oleg Nesterov5bd96ab2014-09-21 21:33:41 +0200513 rcu_read_unlock();
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200514}
515
Ingo Molnar5cef9ec2007-08-09 11:16:47 +0200516void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200517{
Ingo Molnar86d95602007-10-15 17:00:06 +0200518 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
519 spread, rq0_min_vruntime, spread0;
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900520 struct rq *rq = cpu_rq(cpu);
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200521 struct sched_entity *last;
522 unsigned long flags;
523
Bharata B Raoefe25c22011-01-11 15:41:54 +0530524#ifdef CONFIG_FAIR_GROUP_SCHED
525 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
526#else
Peter Zijlstraada18de2008-06-19 14:22:24 +0200527 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530528#endif
Ingo Molnaref83a572007-10-15 17:00:08 +0200529 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
530 SPLIT_NS(cfs_rq->exec_clock));
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200531
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100532 raw_spin_lock_irqsave(&rq->lock, flags);
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200533 if (cfs_rq->rb_leftmost)
Rik van Rielac53db52011-02-01 09:51:03 -0500534 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200535 last = __pick_last_entity(cfs_rq);
536 if (last)
537 max_vruntime = last->vruntime;
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100538 min_vruntime = cfs_rq->min_vruntime;
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900539 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100540 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnaref83a572007-10-15 17:00:08 +0200541 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
542 SPLIT_NS(MIN_vruntime));
543 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
544 SPLIT_NS(min_vruntime));
545 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
546 SPLIT_NS(max_vruntime));
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200547 spread = max_vruntime - MIN_vruntime;
Ingo Molnaref83a572007-10-15 17:00:08 +0200548 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
549 SPLIT_NS(spread));
Ingo Molnar86d95602007-10-15 17:00:06 +0200550 spread0 = min_vruntime - rq0_min_vruntime;
Ingo Molnaref83a572007-10-15 17:00:08 +0200551 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
552 SPLIT_NS(spread0));
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100553 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
Peter Zijlstraddc97292007-10-15 17:00:10 +0200554 cfs_rq->nr_spread_over);
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200555 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800556 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200557#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +0800558 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
559 cfs_rq->avg.load_avg);
Yuyang Du13962232015-07-15 08:04:41 +0800560 SEQ_printf(m, " .%-30s: %lu\n", "runnable_load_avg",
561 cfs_rq->runnable_load_avg);
Yuyang Du9d89c252015-07-15 08:04:37 +0800562 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
563 cfs_rq->avg.util_avg);
564 SEQ_printf(m, " .%-30s: %ld\n", "removed_load_avg",
565 atomic_long_read(&cfs_rq->removed_load_avg));
566 SEQ_printf(m, " .%-30s: %ld\n", "removed_util_avg",
567 atomic_long_read(&cfs_rq->removed_util_avg));
Alex Shi333bb862013-06-28 19:10:35 +0800568#ifdef CONFIG_FAIR_GROUP_SCHED
Yuyang Du9d89c252015-07-15 08:04:37 +0800569 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
570 cfs_rq->tg_load_avg_contrib);
Alex Shi333bb862013-06-28 19:10:35 +0800571 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
572 atomic_long_read(&cfs_rq->tg->load_avg));
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200573#endif
Alex Shi333bb862013-06-28 19:10:35 +0800574#endif
Ben Segallf9f9ffc2013-10-16 11:16:32 -0700575#ifdef CONFIG_CFS_BANDWIDTH
Ben Segallf9f9ffc2013-10-16 11:16:32 -0700576 SEQ_printf(m, " .%-30s: %d\n", "throttled",
577 cfs_rq->throttled);
578 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
579 cfs_rq->throttle_count);
580#endif
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800581
Alex Shi333bb862013-06-28 19:10:35 +0800582#ifdef CONFIG_FAIR_GROUP_SCHED
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530583 print_cfs_group_stats(m, cpu, cfs_rq->tg);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200584#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200585}
586
Peter Zijlstraada18de2008-06-19 14:22:24 +0200587void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
588{
Bharata B Raoefe25c22011-01-11 15:41:54 +0530589#ifdef CONFIG_RT_GROUP_SCHED
590 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
591#else
Peter Zijlstraada18de2008-06-19 14:22:24 +0200592 SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530593#endif
Peter Zijlstraada18de2008-06-19 14:22:24 +0200594
595#define P(x) \
596 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200597#define PU(x) \
598 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
Peter Zijlstraada18de2008-06-19 14:22:24 +0200599#define PN(x) \
600 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
601
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200602 PU(rt_nr_running);
603#ifdef CONFIG_SMP
604 PU(rt_nr_migratory);
605#endif
Peter Zijlstraada18de2008-06-19 14:22:24 +0200606 P(rt_throttled);
607 PN(rt_time);
608 PN(rt_runtime);
609
610#undef PN
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200611#undef PU
Peter Zijlstraada18de2008-06-19 14:22:24 +0200612#undef P
613}
614
Wanpeng Liacb32132014-10-31 06:39:33 +0800615void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
616{
Steven Rostedt (Red Hat)ef477182016-02-22 16:26:52 -0500617 struct dl_bw *dl_bw;
618
Wanpeng Liacb32132014-10-31 06:39:33 +0800619 SEQ_printf(m, "\ndl_rq[%d]:\n", cpu);
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200620
621#define PU(x) \
622 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
623
624 PU(dl_nr_running);
Steven Rostedt (Red Hat)ef477182016-02-22 16:26:52 -0500625#ifdef CONFIG_SMP
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200626 PU(dl_nr_migratory);
Steven Rostedt (Red Hat)ef477182016-02-22 16:26:52 -0500627 dl_bw = &cpu_rq(cpu)->rd->dl_bw;
628#else
629 dl_bw = &dl_rq->dl_bw;
630#endif
631 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
632 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200633
634#undef PU
Wanpeng Liacb32132014-10-31 06:39:33 +0800635}
636
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100637extern __read_mostly int sched_clock_running;
638
Ingo Molnara48da482007-08-09 11:16:51 +0200639static void print_cpu(struct seq_file *m, int cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200640{
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900641 struct rq *rq = cpu_rq(cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530642 unsigned long flags;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200643
644#ifdef CONFIG_X86
645 {
646 unsigned int freq = cpu_khz ? : 1;
647
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800648 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200649 cpu, freq / 1000, (freq % 1000));
650 }
651#else
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800652 SEQ_printf(m, "cpu#%d\n", cpu);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200653#endif
654
Peter Zijlstra13e099d2012-05-14 14:34:00 +0200655#define P(x) \
656do { \
657 if (sizeof(rq->x) == 4) \
658 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
659 else \
660 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
661} while (0)
662
Ingo Molnaref83a572007-10-15 17:00:08 +0200663#define PN(x) \
664 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200665
666 P(nr_running);
667 SEQ_printf(m, " .%-30s: %lu\n", "load",
Dmitry Adamushko495eca42007-10-15 17:00:06 +0200668 rq->load.weight);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200669 P(nr_switches);
670 P(nr_load_updates);
671 P(nr_uninterruptible);
Ingo Molnaref83a572007-10-15 17:00:08 +0200672 PN(next_balance);
Peter Zijlstrafc840912013-09-09 13:01:41 +0200673 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
Ingo Molnaref83a572007-10-15 17:00:08 +0200674 PN(clock);
Peter Zijlstra5a537592015-01-05 11:18:12 +0100675 PN(clock_task);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200676 P(cpu_load[0]);
677 P(cpu_load[1]);
678 P(cpu_load[2]);
679 P(cpu_load[3]);
680 P(cpu_load[4]);
681#undef P
Ingo Molnaref83a572007-10-15 17:00:08 +0200682#undef PN
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200683
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100684#ifdef CONFIG_SMP
Wanpeng Lidb6ea2f2016-05-03 12:38:25 +0800685#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100686 P64(avg_idle);
Alex Shi37e6bae2014-01-23 18:39:54 +0800687 P64(max_idle_balance_cost);
Wanpeng Lidb6ea2f2016-05-03 12:38:25 +0800688#undef P64
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100689#endif
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100690
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500691#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
Mel Gormancb251762016-02-05 09:08:36 +0000692 if (schedstat_enabled()) {
693 P(yld_count);
694 P(sched_count);
695 P(sched_goidle);
696 P(ttwu_count);
697 P(ttwu_local);
698 }
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100699#undef P
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500700
Bharata B Raoefe25c22011-01-11 15:41:54 +0530701 spin_lock_irqsave(&sched_debug_lock, flags);
Ingo Molnar5cef9ec2007-08-09 11:16:47 +0200702 print_cfs_stats(m, cpu);
Peter Zijlstraada18de2008-06-19 14:22:24 +0200703 print_rt_stats(m, cpu);
Wanpeng Liacb32132014-10-31 06:39:33 +0800704 print_dl_stats(m, cpu);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200705
Ingo Molnara48da482007-08-09 11:16:51 +0200706 print_rq(m, rq, cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530707 spin_unlock_irqrestore(&sched_debug_lock, flags);
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800708 SEQ_printf(m, "\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200709}
710
Christian Ehrhardt1983a922009-11-30 12:16:47 +0100711static const char *sched_tunable_scaling_names[] = {
712 "none",
713 "logaritmic",
714 "linear"
715};
716
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800717static void sched_debug_header(struct seq_file *m)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200718{
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100719 u64 ktime, sched_clk, cpu_clk;
720 unsigned long flags;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200721
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100722 local_irq_save(flags);
723 ktime = ktime_to_ns(ktime_get());
724 sched_clk = sched_clock();
725 cpu_clk = local_clock();
726 local_irq_restore(flags);
727
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100728 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200729 init_utsname()->release,
730 (int)strcspn(init_utsname()->version, " "),
731 init_utsname()->version);
732
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100733#define P(x) \
734 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
735#define PN(x) \
736 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
737 PN(ktime);
738 PN(sched_clk);
739 PN(cpu_clk);
740 P(jiffies);
741#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
Peter Zijlstra35af99e2013-11-28 19:38:42 +0100742 P(sched_clock_stable());
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100743#endif
744#undef PN
745#undef P
746
747 SEQ_printf(m, "\n");
748 SEQ_printf(m, "sysctl_sched\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200749
Ingo Molnar1aa47312007-10-15 17:00:10 +0200750#define P(x) \
Ingo Molnard822cec2007-10-15 17:00:10 +0200751 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
Ingo Molnar1aa47312007-10-15 17:00:10 +0200752#define PN(x) \
Ingo Molnard822cec2007-10-15 17:00:10 +0200753 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
Ingo Molnar1aa47312007-10-15 17:00:10 +0200754 PN(sysctl_sched_latency);
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100755 PN(sysctl_sched_min_granularity);
Ingo Molnar1aa47312007-10-15 17:00:10 +0200756 PN(sysctl_sched_wakeup_granularity);
Josh Hunteebef742010-07-19 12:31:16 -0700757 P(sysctl_sched_child_runs_first);
Ingo Molnar1aa47312007-10-15 17:00:10 +0200758 P(sysctl_sched_features);
759#undef PN
760#undef P
761
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800762 SEQ_printf(m, " .%-40s: %d (%s)\n",
763 "sysctl_sched_tunable_scaling",
Christian Ehrhardt1983a922009-11-30 12:16:47 +0100764 sysctl_sched_tunable_scaling,
765 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200766 SEQ_printf(m, "\n");
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800767}
768
769static int sched_debug_show(struct seq_file *m, void *v)
770{
771 int cpu = (unsigned long)(v - 2);
772
773 if (cpu != -1)
774 print_cpu(m, cpu);
775 else
776 sched_debug_header(m);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200777
778 return 0;
779}
780
Peter Zijlstra029632f2011-10-25 10:00:11 +0200781void sysrq_sched_debug_show(void)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200782{
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800783 int cpu;
784
785 sched_debug_header(NULL);
786 for_each_online_cpu(cpu)
787 print_cpu(NULL, cpu);
788
789}
790
791/*
792 * This itererator needs some explanation.
793 * It returns 1 for the header position.
794 * This means 2 is cpu 0.
795 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
796 * to use cpumask_* to iterate over the cpus.
797 */
798static void *sched_debug_start(struct seq_file *file, loff_t *offset)
799{
800 unsigned long n = *offset;
801
802 if (n == 0)
803 return (void *) 1;
804
805 n--;
806
807 if (n > 0)
808 n = cpumask_next(n - 1, cpu_online_mask);
809 else
810 n = cpumask_first(cpu_online_mask);
811
812 *offset = n + 1;
813
814 if (n < nr_cpu_ids)
815 return (void *)(unsigned long)(n + 2);
816 return NULL;
817}
818
819static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
820{
821 (*offset)++;
822 return sched_debug_start(file, offset);
823}
824
825static void sched_debug_stop(struct seq_file *file, void *data)
826{
827}
828
829static const struct seq_operations sched_debug_sops = {
830 .start = sched_debug_start,
831 .next = sched_debug_next,
832 .stop = sched_debug_stop,
833 .show = sched_debug_show,
834};
835
836static int sched_debug_release(struct inode *inode, struct file *file)
837{
838 seq_release(inode, file);
839
840 return 0;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200841}
842
843static int sched_debug_open(struct inode *inode, struct file *filp)
844{
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800845 int ret = 0;
846
847 ret = seq_open(filp, &sched_debug_sops);
848
849 return ret;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200850}
851
Arjan van de Ven0dbee3a2007-10-15 17:00:19 +0200852static const struct file_operations sched_debug_fops = {
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200853 .open = sched_debug_open,
854 .read = seq_read,
855 .llseek = seq_lseek,
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800856 .release = sched_debug_release,
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200857};
858
859static int __init init_sched_debug_procfs(void)
860{
861 struct proc_dir_entry *pe;
862
Li Zefana9cf4dd2008-10-30 15:23:34 +0800863 pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200864 if (!pe)
865 return -ENOMEM;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200866 return 0;
867}
868
869__initcall(init_sched_debug_procfs);
870
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100871#define __P(F) \
872 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
873#define P(F) \
874 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
875#define __PN(F) \
876 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
877#define PN(F) \
878 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
879
880
Srikar Dronamraju397f2372015-06-25 22:51:43 +0530881#ifdef CONFIG_NUMA_BALANCING
882void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
883 unsigned long tpf, unsigned long gsf, unsigned long gpf)
884{
885 SEQ_printf(m, "numa_faults node=%d ", node);
886 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tsf, tpf);
887 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gsf, gpf);
888}
889#endif
890
891
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100892static void sched_show_numa(struct task_struct *p, struct seq_file *m)
893{
894#ifdef CONFIG_NUMA_BALANCING
895 struct mempolicy *pol;
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100896
897 if (p->mm)
898 P(mm->numa_scan_seq);
899
900 task_lock(p);
901 pol = p->mempolicy;
902 if (pol && !(pol->flags & MPOL_F_MORON))
903 pol = NULL;
904 mpol_get(pol);
905 task_unlock(p);
906
Srikar Dronamraju397f2372015-06-25 22:51:43 +0530907 P(numa_pages_migrated);
908 P(numa_preferred_nid);
909 P(total_numa_faults);
910 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
911 task_node(p), task_numa_group_id(p));
912 show_numa_stats(p, m);
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100913 mpol_put(pol);
914#endif
915}
916
Aleksa Sarai74dc3382017-08-06 14:41:41 +1000917void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
918 struct seq_file *m)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200919{
Ingo Molnarcc367732007-10-15 17:00:18 +0200920 unsigned long nr_switches;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200921
Aleksa Sarai74dc3382017-08-06 14:41:41 +1000922 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
Oleg Nesterov5089a972010-05-26 14:43:22 -0700923 get_nr_threads(p));
Ingo Molnar2d92f222007-10-15 17:00:18 +0200924 SEQ_printf(m,
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530925 "---------------------------------------------------------"
926 "----------\n");
Ingo Molnarcc367732007-10-15 17:00:18 +0200927#define __P(F) \
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530928 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200929#define P(F) \
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530930 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500931#define P_SCHEDSTAT(F) \
932 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)schedstat_val(p->F))
Ingo Molnarcc367732007-10-15 17:00:18 +0200933#define __PN(F) \
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530934 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
Ingo Molnaref83a572007-10-15 17:00:08 +0200935#define PN(F) \
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530936 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500937#define PN_SCHEDSTAT(F) \
938 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(p->F)))
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200939
Ingo Molnaref83a572007-10-15 17:00:08 +0200940 PN(se.exec_start);
941 PN(se.vruntime);
942 PN(se.sum_exec_runtime);
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200943
Ingo Molnarcc367732007-10-15 17:00:18 +0200944 nr_switches = p->nvcsw + p->nivcsw;
945
Ingo Molnarcc367732007-10-15 17:00:18 +0200946 P(se.nr_migrations);
Ingo Molnarcc367732007-10-15 17:00:18 +0200947
Mel Gormancb251762016-02-05 09:08:36 +0000948 if (schedstat_enabled()) {
Ingo Molnarcc367732007-10-15 17:00:18 +0200949 u64 avg_atom, avg_per_cpu;
950
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500951 PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
952 PN_SCHEDSTAT(se.statistics.wait_start);
953 PN_SCHEDSTAT(se.statistics.sleep_start);
954 PN_SCHEDSTAT(se.statistics.block_start);
955 PN_SCHEDSTAT(se.statistics.sleep_max);
956 PN_SCHEDSTAT(se.statistics.block_max);
957 PN_SCHEDSTAT(se.statistics.exec_max);
958 PN_SCHEDSTAT(se.statistics.slice_max);
959 PN_SCHEDSTAT(se.statistics.wait_max);
960 PN_SCHEDSTAT(se.statistics.wait_sum);
961 P_SCHEDSTAT(se.statistics.wait_count);
962 PN_SCHEDSTAT(se.statistics.iowait_sum);
963 P_SCHEDSTAT(se.statistics.iowait_count);
964 P_SCHEDSTAT(se.statistics.nr_migrations_cold);
965 P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
966 P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
967 P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
968 P_SCHEDSTAT(se.statistics.nr_forced_migrations);
969 P_SCHEDSTAT(se.statistics.nr_wakeups);
970 P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
971 P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
972 P_SCHEDSTAT(se.statistics.nr_wakeups_local);
973 P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
974 P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
975 P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
976 P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
977 P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
Mel Gormancb251762016-02-05 09:08:36 +0000978
Ingo Molnarcc367732007-10-15 17:00:18 +0200979 avg_atom = p->se.sum_exec_runtime;
980 if (nr_switches)
Mateusz Guzikb0ab99e2014-06-14 15:00:09 +0200981 avg_atom = div64_ul(avg_atom, nr_switches);
Ingo Molnarcc367732007-10-15 17:00:18 +0200982 else
983 avg_atom = -1LL;
984
985 avg_per_cpu = p->se.sum_exec_runtime;
Ingo Molnarc1a897402007-11-28 15:52:56 +0100986 if (p->se.nr_migrations) {
Roman Zippel6f6d6a12008-05-01 04:34:28 -0700987 avg_per_cpu = div64_u64(avg_per_cpu,
988 p->se.nr_migrations);
Ingo Molnarc1a897402007-11-28 15:52:56 +0100989 } else {
Ingo Molnarcc367732007-10-15 17:00:18 +0200990 avg_per_cpu = -1LL;
Ingo Molnarc1a897402007-11-28 15:52:56 +0100991 }
Ingo Molnarcc367732007-10-15 17:00:18 +0200992
993 __PN(avg_atom);
994 __PN(avg_per_cpu);
995 }
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500996
Ingo Molnarcc367732007-10-15 17:00:18 +0200997 __P(nr_switches);
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530998 SEQ_printf(m, "%-45s:%21Ld\n",
Ingo Molnarcc367732007-10-15 17:00:18 +0200999 "nr_voluntary_switches", (long long)p->nvcsw);
Kamalesh Babulaladd332a2013-06-27 22:20:05 +05301000 SEQ_printf(m, "%-45s:%21Ld\n",
Ingo Molnarcc367732007-10-15 17:00:18 +02001001 "nr_involuntary_switches", (long long)p->nivcsw);
1002
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001003 P(se.load.weight);
Alex Shi333bb862013-06-28 19:10:35 +08001004#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +08001005 P(se.avg.load_sum);
1006 P(se.avg.util_sum);
1007 P(se.avg.load_avg);
1008 P(se.avg.util_avg);
1009 P(se.avg.last_update_time);
Kamalesh Babulal939fd732013-06-25 13:33:36 +05301010#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001011 P(policy);
1012 P(prio);
Tommaso Cucinotta59f8c292016-10-26 11:17:17 +02001013 if (p->policy == SCHED_DEADLINE) {
1014 P(dl.runtime);
1015 P(dl.deadline);
1016 }
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -05001017#undef PN_SCHEDSTAT
Ingo Molnaref83a572007-10-15 17:00:08 +02001018#undef PN
Ingo Molnarcc367732007-10-15 17:00:18 +02001019#undef __PN
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -05001020#undef P_SCHEDSTAT
Ingo Molnarcc367732007-10-15 17:00:18 +02001021#undef P
1022#undef __P
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001023
1024 {
Ingo Molnar29d7b902008-11-16 08:07:15 +01001025 unsigned int this_cpu = raw_smp_processor_id();
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001026 u64 t0, t1;
1027
Ingo Molnar29d7b902008-11-16 08:07:15 +01001028 t0 = cpu_clock(this_cpu);
1029 t1 = cpu_clock(this_cpu);
Kamalesh Babulaladd332a2013-06-27 22:20:05 +05301030 SEQ_printf(m, "%-45s:%21Ld\n",
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001031 "clock-delta", (long long)(t1-t0));
1032 }
Ingo Molnarb32e86b2013-10-07 11:29:30 +01001033
1034 sched_show_numa(p, m);
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001035}
1036
1037void proc_sched_set_task(struct task_struct *p)
1038{
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02001039#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -03001040 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02001041#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001042}