blob: 644d9a464380f8c607856bc0e6684be8d5ab0889 [file] [log] [blame]
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001/*
Peter Zijlstra391e43d2011-11-15 17:14:39 +01002 * kernel/sched/debug.c
Ingo Molnar43ae34c2007-07-09 18:52:00 +02003 *
Ingo Molnar325ea102018-03-03 12:20:47 +01004 * Print the CFS rbtree and other debugging details
Ingo Molnar43ae34c2007-07-09 18:52:00 +02005 *
6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
Peter Zijlstra029632f2011-10-25 10:00:11 +020012#include "sched.h"
13
Bharata B Raoefe25c22011-01-11 15:41:54 +053014static DEFINE_SPINLOCK(sched_debug_lock);
15
Ingo Molnar43ae34c2007-07-09 18:52:00 +020016/*
17 * This allows printing both to /proc/sched_debug and
18 * to the console
19 */
20#define SEQ_printf(m, x...) \
21 do { \
22 if (m) \
23 seq_printf(m, x); \
24 else \
25 printk(x); \
26 } while (0)
27
Ingo Molnaref83a572007-10-15 17:00:08 +020028/*
29 * Ease the printing of nsec fields:
30 */
Ingo Molnar90b26282007-12-30 17:24:35 +010031static long long nsec_high(unsigned long long nsec)
Ingo Molnaref83a572007-10-15 17:00:08 +020032{
Ingo Molnar90b26282007-12-30 17:24:35 +010033 if ((long long)nsec < 0) {
Ingo Molnaref83a572007-10-15 17:00:08 +020034 nsec = -nsec;
35 do_div(nsec, 1000000);
36 return -nsec;
37 }
38 do_div(nsec, 1000000);
39
40 return nsec;
41}
42
Ingo Molnar90b26282007-12-30 17:24:35 +010043static unsigned long nsec_low(unsigned long long nsec)
Ingo Molnaref83a572007-10-15 17:00:08 +020044{
Ingo Molnar90b26282007-12-30 17:24:35 +010045 if ((long long)nsec < 0)
Ingo Molnaref83a572007-10-15 17:00:08 +020046 nsec = -nsec;
47
48 return do_div(nsec, 1000000);
49}
50
51#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
52
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -050053#define SCHED_FEAT(name, enabled) \
54 #name ,
55
56static const char * const sched_feat_names[] = {
57#include "features.h"
58};
59
60#undef SCHED_FEAT
61
62static int sched_feat_show(struct seq_file *m, void *v)
63{
64 int i;
65
66 for (i = 0; i < __SCHED_FEAT_NR; i++) {
67 if (!(sysctl_sched_features & (1UL << i)))
68 seq_puts(m, "NO_");
69 seq_printf(m, "%s ", sched_feat_names[i]);
70 }
71 seq_puts(m, "\n");
72
73 return 0;
74}
75
76#ifdef HAVE_JUMP_LABEL
77
78#define jump_label_key__true STATIC_KEY_INIT_TRUE
79#define jump_label_key__false STATIC_KEY_INIT_FALSE
80
81#define SCHED_FEAT(name, enabled) \
82 jump_label_key__##enabled ,
83
84struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
85#include "features.h"
86};
87
88#undef SCHED_FEAT
89
90static void sched_feat_disable(int i)
91{
92 static_key_disable(&sched_feat_keys[i]);
93}
94
95static void sched_feat_enable(int i)
96{
97 static_key_enable(&sched_feat_keys[i]);
98}
99#else
100static void sched_feat_disable(int i) { };
101static void sched_feat_enable(int i) { };
102#endif /* HAVE_JUMP_LABEL */
103
104static int sched_feat_set(char *cmp)
105{
106 int i;
107 int neg = 0;
108
109 if (strncmp(cmp, "NO_", 3) == 0) {
110 neg = 1;
111 cmp += 3;
112 }
113
114 for (i = 0; i < __SCHED_FEAT_NR; i++) {
115 if (strcmp(cmp, sched_feat_names[i]) == 0) {
116 if (neg) {
117 sysctl_sched_features &= ~(1UL << i);
118 sched_feat_disable(i);
119 } else {
120 sysctl_sched_features |= (1UL << i);
121 sched_feat_enable(i);
122 }
123 break;
124 }
125 }
126
127 return i;
128}
129
130static ssize_t
131sched_feat_write(struct file *filp, const char __user *ubuf,
132 size_t cnt, loff_t *ppos)
133{
134 char buf[64];
135 char *cmp;
136 int i;
137 struct inode *inode;
138
139 if (cnt > 63)
140 cnt = 63;
141
142 if (copy_from_user(&buf, ubuf, cnt))
143 return -EFAULT;
144
145 buf[cnt] = 0;
146 cmp = strstrip(buf);
147
148 /* Ensure the static_key remains in a consistent state */
149 inode = file_inode(filp);
150 inode_lock(inode);
151 i = sched_feat_set(cmp);
152 inode_unlock(inode);
153 if (i == __SCHED_FEAT_NR)
154 return -EINVAL;
155
156 *ppos += cnt;
157
158 return cnt;
159}
160
161static int sched_feat_open(struct inode *inode, struct file *filp)
162{
163 return single_open(filp, sched_feat_show, NULL);
164}
165
166static const struct file_operations sched_feat_fops = {
167 .open = sched_feat_open,
168 .write = sched_feat_write,
169 .read = seq_read,
170 .llseek = seq_lseek,
171 .release = single_release,
172};
173
Peter Zijlstra9469eb02017-09-07 17:03:53 +0200174__read_mostly bool sched_debug_enabled;
175
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500176static __init int sched_init_debug(void)
177{
178 debugfs_create_file("sched_features", 0644, NULL, NULL,
179 &sched_feat_fops);
180
Peter Zijlstra9469eb02017-09-07 17:03:53 +0200181 debugfs_create_bool("sched_debug", 0644, NULL,
182 &sched_debug_enabled);
183
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500184 return 0;
185}
186late_initcall(sched_init_debug);
187
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500188#ifdef CONFIG_SMP
189
190#ifdef CONFIG_SYSCTL
191
192static struct ctl_table sd_ctl_dir[] = {
193 {
194 .procname = "sched_domain",
195 .mode = 0555,
196 },
197 {}
198};
199
200static struct ctl_table sd_ctl_root[] = {
201 {
202 .procname = "kernel",
203 .mode = 0555,
204 .child = sd_ctl_dir,
205 },
206 {}
207};
208
209static struct ctl_table *sd_alloc_ctl_entry(int n)
210{
211 struct ctl_table *entry =
212 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
213
214 return entry;
215}
216
217static void sd_free_ctl_entry(struct ctl_table **tablep)
218{
219 struct ctl_table *entry;
220
221 /*
222 * In the intermediate directories, both the child directory and
223 * procname are dynamically allocated and could fail but the mode
224 * will always be set. In the lowest directory the names are
225 * static strings and all have proc handlers.
226 */
227 for (entry = *tablep; entry->mode; entry++) {
228 if (entry->child)
229 sd_free_ctl_entry(&entry->child);
230 if (entry->proc_handler == NULL)
231 kfree(entry->procname);
232 }
233
234 kfree(*tablep);
235 *tablep = NULL;
236}
237
238static int min_load_idx = 0;
239static int max_load_idx = CPU_LOAD_IDX_MAX-1;
240
241static void
242set_table_entry(struct ctl_table *entry,
243 const char *procname, void *data, int maxlen,
244 umode_t mode, proc_handler *proc_handler,
245 bool load_idx)
246{
247 entry->procname = procname;
248 entry->data = data;
249 entry->maxlen = maxlen;
250 entry->mode = mode;
251 entry->proc_handler = proc_handler;
252
253 if (load_idx) {
254 entry->extra1 = &min_load_idx;
255 entry->extra2 = &max_load_idx;
256 }
257}
258
259static struct ctl_table *
260sd_alloc_ctl_domain_table(struct sched_domain *sd)
261{
262 struct ctl_table *table = sd_alloc_ctl_entry(14);
263
264 if (table == NULL)
265 return NULL;
266
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100267 set_table_entry(&table[0] , "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax, false);
268 set_table_entry(&table[1] , "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax, false);
269 set_table_entry(&table[2] , "busy_idx", &sd->busy_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
270 set_table_entry(&table[3] , "idle_idx", &sd->idle_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
271 set_table_entry(&table[4] , "newidle_idx", &sd->newidle_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
272 set_table_entry(&table[5] , "wake_idx", &sd->wake_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
273 set_table_entry(&table[6] , "forkexec_idx", &sd->forkexec_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
274 set_table_entry(&table[7] , "busy_factor", &sd->busy_factor, sizeof(int) , 0644, proc_dointvec_minmax, false);
275 set_table_entry(&table[8] , "imbalance_pct", &sd->imbalance_pct, sizeof(int) , 0644, proc_dointvec_minmax, false);
276 set_table_entry(&table[9] , "cache_nice_tries", &sd->cache_nice_tries, sizeof(int) , 0644, proc_dointvec_minmax, false);
277 set_table_entry(&table[10], "flags", &sd->flags, sizeof(int) , 0644, proc_dointvec_minmax, false);
278 set_table_entry(&table[11], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax, false);
279 set_table_entry(&table[12], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring, false);
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500280 /* &table[13] is terminator */
281
282 return table;
283}
284
285static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
286{
287 struct ctl_table *entry, *table;
288 struct sched_domain *sd;
289 int domain_num = 0, i;
290 char buf[32];
291
292 for_each_domain(cpu, sd)
293 domain_num++;
294 entry = table = sd_alloc_ctl_entry(domain_num + 1);
295 if (table == NULL)
296 return NULL;
297
298 i = 0;
299 for_each_domain(cpu, sd) {
300 snprintf(buf, 32, "domain%d", i);
301 entry->procname = kstrdup(buf, GFP_KERNEL);
302 entry->mode = 0555;
303 entry->child = sd_alloc_ctl_domain_table(sd);
304 entry++;
305 i++;
306 }
307 return table;
308}
309
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100310static cpumask_var_t sd_sysctl_cpus;
311static struct ctl_table_header *sd_sysctl_header;
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200312
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500313void register_sched_domain_sysctl(void)
314{
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200315 static struct ctl_table *cpu_entries;
316 static struct ctl_table **cpu_idx;
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500317 char buf[32];
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200318 int i;
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500319
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200320 if (!cpu_entries) {
321 cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
322 if (!cpu_entries)
323 return;
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500324
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200325 WARN_ON(sd_ctl_dir[0].child);
326 sd_ctl_dir[0].child = cpu_entries;
327 }
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500328
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200329 if (!cpu_idx) {
330 struct ctl_table *e = cpu_entries;
331
332 cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
333 if (!cpu_idx)
334 return;
335
336 /* deal with sparse possible map */
337 for_each_possible_cpu(i) {
338 cpu_idx[i] = e;
339 e++;
340 }
341 }
342
343 if (!cpumask_available(sd_sysctl_cpus)) {
344 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
345 return;
346
347 /* init to possible to not have holes in @cpu_entries */
348 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
349 }
350
351 for_each_cpu(i, sd_sysctl_cpus) {
352 struct ctl_table *e = cpu_idx[i];
353
354 if (e->child)
355 sd_free_ctl_entry(&e->child);
356
357 if (!e->procname) {
358 snprintf(buf, 32, "cpu%d", i);
359 e->procname = kstrdup(buf, GFP_KERNEL);
360 }
361 e->mode = 0555;
362 e->child = sd_alloc_ctl_cpu_table(i);
363
364 __cpumask_clear_cpu(i, sd_sysctl_cpus);
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500365 }
366
367 WARN_ON(sd_sysctl_header);
368 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
369}
370
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200371void dirty_sched_domain_sysctl(int cpu)
372{
373 if (cpumask_available(sd_sysctl_cpus))
374 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
375}
376
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500377/* may be called multiple times per register */
378void unregister_sched_domain_sysctl(void)
379{
380 unregister_sysctl_table(sd_sysctl_header);
381 sd_sysctl_header = NULL;
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500382}
383#endif /* CONFIG_SYSCTL */
384#endif /* CONFIG_SMP */
385
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530386#ifdef CONFIG_FAIR_GROUP_SCHED
Mike Galbraith5091faa2010-11-30 14:18:03 +0100387static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530388{
389 struct sched_entity *se = tg->se[cpu];
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530390
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100391#define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
392#define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
393#define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
394#define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530395
Yuyang Ducd126af2015-07-15 08:04:36 +0800396 if (!se)
Ben Segall18bf2802012-10-04 12:51:20 +0200397 return;
Ben Segall18bf2802012-10-04 12:51:20 +0200398
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530399 PN(se->exec_start);
400 PN(se->vruntime);
401 PN(se->sum_exec_runtime);
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100402
Mel Gormancb251762016-02-05 09:08:36 +0000403 if (schedstat_enabled()) {
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500404 PN_SCHEDSTAT(se->statistics.wait_start);
405 PN_SCHEDSTAT(se->statistics.sleep_start);
406 PN_SCHEDSTAT(se->statistics.block_start);
407 PN_SCHEDSTAT(se->statistics.sleep_max);
408 PN_SCHEDSTAT(se->statistics.block_max);
409 PN_SCHEDSTAT(se->statistics.exec_max);
410 PN_SCHEDSTAT(se->statistics.slice_max);
411 PN_SCHEDSTAT(se->statistics.wait_max);
412 PN_SCHEDSTAT(se->statistics.wait_sum);
413 P_SCHEDSTAT(se->statistics.wait_count);
Mel Gormancb251762016-02-05 09:08:36 +0000414 }
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100415
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530416 P(se->load.weight);
Peter Zijlstra1ea6c462017-05-06 15:59:54 +0200417 P(se->runnable_weight);
Paul Turner9d85f212012-10-04 13:18:29 +0200418#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +0800419 P(se->avg.load_avg);
420 P(se->avg.util_avg);
Peter Zijlstra1ea6c462017-05-06 15:59:54 +0200421 P(se->avg.runnable_load_avg);
Paul Turner9d85f212012-10-04 13:18:29 +0200422#endif
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500423
424#undef PN_SCHEDSTAT
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530425#undef PN
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500426#undef P_SCHEDSTAT
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530427#undef P
428}
429#endif
430
Bharata B Raoefe25c22011-01-11 15:41:54 +0530431#ifdef CONFIG_CGROUP_SCHED
432static char group_path[PATH_MAX];
433
434static char *task_group_path(struct task_group *tg)
435{
Bharata B Rao8ecedd72011-01-11 15:42:57 +0530436 if (autogroup_path(tg, group_path, PATH_MAX))
437 return group_path;
438
Tejun Heo4c737b42016-08-10 11:23:44 -0400439 cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100440
Tejun Heo4c737b42016-08-10 11:23:44 -0400441 return group_path;
Bharata B Raoefe25c22011-01-11 15:41:54 +0530442}
443#endif
444
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200445static void
Ingo Molnara48da482007-08-09 11:16:51 +0200446print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200447{
Xie XiuQi20435d82017-08-07 16:44:23 +0800448 if (rq->curr == p)
Xie XiuQie8c16492017-08-07 16:44:22 +0800449 SEQ_printf(m, ">R");
Xie XiuQi20435d82017-08-07 16:44:23 +0800450 else
451 SEQ_printf(m, " %c", task_state_to_char(p));
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200452
Ingo Molnaref83a572007-10-15 17:00:08 +0200453 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
Peter Zijlstrafc840912013-09-09 13:01:41 +0200454 p->comm, task_pid_nr(p),
Ingo Molnaref83a572007-10-15 17:00:08 +0200455 SPLIT_NS(p->se.vruntime),
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200456 (long long)(p->nvcsw + p->nivcsw),
Al Viro6f605d82007-08-06 04:26:59 +0100457 p->prio);
Josh Poimboeuf9c572592016-06-03 17:58:40 -0500458
Srikar Dronamraju33d61762015-06-08 13:40:39 +0530459 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
Josh Poimboeuf20e1d482016-06-17 12:43:25 -0500460 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
Srikar Dronamraju33d61762015-06-08 13:40:39 +0530461 SPLIT_NS(p->se.sum_exec_runtime),
Josh Poimboeuf20e1d482016-06-17 12:43:25 -0500462 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
Josh Poimboeuf9c572592016-06-03 17:58:40 -0500463
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100464#ifdef CONFIG_NUMA_BALANCING
Srikar Dronamrajue3d24d02015-06-25 22:51:42 +0530465 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100466#endif
Bharata B Raoefe25c22011-01-11 15:41:54 +0530467#ifdef CONFIG_CGROUP_SCHED
468 SEQ_printf(m, " %s", task_group_path(task_group(p)));
469#endif
Peter Zijlstrad19ca302008-04-19 19:45:00 +0200470
Peter Zijlstrad19ca302008-04-19 19:45:00 +0200471 SEQ_printf(m, "\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200472}
473
Ingo Molnara48da482007-08-09 11:16:51 +0200474static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200475{
476 struct task_struct *g, *p;
477
478 SEQ_printf(m,
479 "\nrunnable tasks:\n"
Xie XiuQie8c16492017-08-07 16:44:22 +0800480 " S task PID tree-key switches prio"
Srikar Dronamrajuc5f3ab12015-06-08 13:40:40 +0530481 " wait-time sum-exec sum-sleep\n"
Xie XiuQie8c16492017-08-07 16:44:22 +0800482 "-------------------------------------------------------"
Mike Galbraithc86da3a2007-10-15 17:00:08 +0200483 "----------------------------------------------------\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200484
Oleg Nesterov5bd96ab2014-09-21 21:33:41 +0200485 rcu_read_lock();
Oleg Nesterovd38e83c2014-08-13 21:19:56 +0200486 for_each_process_thread(g, p) {
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100487 if (task_cpu(p) != rq_cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200488 continue;
489
Ingo Molnara48da482007-08-09 11:16:51 +0200490 print_task(m, rq, p);
Oleg Nesterovd38e83c2014-08-13 21:19:56 +0200491 }
Oleg Nesterov5bd96ab2014-09-21 21:33:41 +0200492 rcu_read_unlock();
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200493}
494
Ingo Molnar5cef9ec2007-08-09 11:16:47 +0200495void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200496{
Ingo Molnar86d95602007-10-15 17:00:06 +0200497 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
498 spread, rq0_min_vruntime, spread0;
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900499 struct rq *rq = cpu_rq(cpu);
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200500 struct sched_entity *last;
501 unsigned long flags;
502
Bharata B Raoefe25c22011-01-11 15:41:54 +0530503#ifdef CONFIG_FAIR_GROUP_SCHED
504 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
505#else
Peter Zijlstraada18de2008-06-19 14:22:24 +0200506 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530507#endif
Ingo Molnaref83a572007-10-15 17:00:08 +0200508 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
509 SPLIT_NS(cfs_rq->exec_clock));
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200510
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100511 raw_spin_lock_irqsave(&rq->lock, flags);
Davidlohr Buesobfb06882017-09-08 16:14:55 -0700512 if (rb_first_cached(&cfs_rq->tasks_timeline))
Rik van Rielac53db52011-02-01 09:51:03 -0500513 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200514 last = __pick_last_entity(cfs_rq);
515 if (last)
516 max_vruntime = last->vruntime;
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100517 min_vruntime = cfs_rq->min_vruntime;
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900518 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100519 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnaref83a572007-10-15 17:00:08 +0200520 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
521 SPLIT_NS(MIN_vruntime));
522 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
523 SPLIT_NS(min_vruntime));
524 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
525 SPLIT_NS(max_vruntime));
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200526 spread = max_vruntime - MIN_vruntime;
Ingo Molnaref83a572007-10-15 17:00:08 +0200527 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
528 SPLIT_NS(spread));
Ingo Molnar86d95602007-10-15 17:00:06 +0200529 spread0 = min_vruntime - rq0_min_vruntime;
Ingo Molnaref83a572007-10-15 17:00:08 +0200530 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
531 SPLIT_NS(spread0));
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100532 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
Peter Zijlstraddc97292007-10-15 17:00:10 +0200533 cfs_rq->nr_spread_over);
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200534 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800535 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200536#ifdef CONFIG_SMP
Peter Zijlstra1ea6c462017-05-06 15:59:54 +0200537 SEQ_printf(m, " .%-30s: %ld\n", "runnable_weight", cfs_rq->runnable_weight);
Yuyang Du9d89c252015-07-15 08:04:37 +0800538 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
539 cfs_rq->avg.load_avg);
Yuyang Du13962232015-07-15 08:04:41 +0800540 SEQ_printf(m, " .%-30s: %lu\n", "runnable_load_avg",
Peter Zijlstra1ea6c462017-05-06 15:59:54 +0200541 cfs_rq->avg.runnable_load_avg);
Yuyang Du9d89c252015-07-15 08:04:37 +0800542 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
543 cfs_rq->avg.util_avg);
Peter Zijlstra2a2f5d4e2017-05-08 16:51:41 +0200544 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
545 cfs_rq->removed.load_avg);
546 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
547 cfs_rq->removed.util_avg);
Peter Zijlstra0e2d2aa2017-05-08 17:30:46 +0200548 SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_sum",
549 cfs_rq->removed.runnable_sum);
Alex Shi333bb862013-06-28 19:10:35 +0800550#ifdef CONFIG_FAIR_GROUP_SCHED
Yuyang Du9d89c252015-07-15 08:04:37 +0800551 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
552 cfs_rq->tg_load_avg_contrib);
Alex Shi333bb862013-06-28 19:10:35 +0800553 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
554 atomic_long_read(&cfs_rq->tg->load_avg));
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200555#endif
Alex Shi333bb862013-06-28 19:10:35 +0800556#endif
Ben Segallf9f9ffc2013-10-16 11:16:32 -0700557#ifdef CONFIG_CFS_BANDWIDTH
Ben Segallf9f9ffc2013-10-16 11:16:32 -0700558 SEQ_printf(m, " .%-30s: %d\n", "throttled",
559 cfs_rq->throttled);
560 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
561 cfs_rq->throttle_count);
562#endif
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800563
Alex Shi333bb862013-06-28 19:10:35 +0800564#ifdef CONFIG_FAIR_GROUP_SCHED
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530565 print_cfs_group_stats(m, cpu, cfs_rq->tg);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200566#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200567}
568
Peter Zijlstraada18de2008-06-19 14:22:24 +0200569void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
570{
Bharata B Raoefe25c22011-01-11 15:41:54 +0530571#ifdef CONFIG_RT_GROUP_SCHED
572 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
573#else
Peter Zijlstraada18de2008-06-19 14:22:24 +0200574 SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530575#endif
Peter Zijlstraada18de2008-06-19 14:22:24 +0200576
577#define P(x) \
578 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200579#define PU(x) \
580 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
Peter Zijlstraada18de2008-06-19 14:22:24 +0200581#define PN(x) \
582 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
583
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200584 PU(rt_nr_running);
585#ifdef CONFIG_SMP
586 PU(rt_nr_migratory);
587#endif
Peter Zijlstraada18de2008-06-19 14:22:24 +0200588 P(rt_throttled);
589 PN(rt_time);
590 PN(rt_runtime);
591
592#undef PN
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200593#undef PU
Peter Zijlstraada18de2008-06-19 14:22:24 +0200594#undef P
595}
596
Wanpeng Liacb32132014-10-31 06:39:33 +0800597void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
598{
Steven Rostedt (Red Hat)ef477182016-02-22 16:26:52 -0500599 struct dl_bw *dl_bw;
600
Wanpeng Liacb32132014-10-31 06:39:33 +0800601 SEQ_printf(m, "\ndl_rq[%d]:\n", cpu);
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200602
603#define PU(x) \
604 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
605
606 PU(dl_nr_running);
Steven Rostedt (Red Hat)ef477182016-02-22 16:26:52 -0500607#ifdef CONFIG_SMP
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200608 PU(dl_nr_migratory);
Steven Rostedt (Red Hat)ef477182016-02-22 16:26:52 -0500609 dl_bw = &cpu_rq(cpu)->rd->dl_bw;
610#else
611 dl_bw = &dl_rq->dl_bw;
612#endif
613 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
614 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200615
616#undef PU
Wanpeng Liacb32132014-10-31 06:39:33 +0800617}
618
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100619extern __read_mostly int sched_clock_running;
620
Ingo Molnara48da482007-08-09 11:16:51 +0200621static void print_cpu(struct seq_file *m, int cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200622{
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900623 struct rq *rq = cpu_rq(cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530624 unsigned long flags;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200625
626#ifdef CONFIG_X86
627 {
628 unsigned int freq = cpu_khz ? : 1;
629
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800630 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200631 cpu, freq / 1000, (freq % 1000));
632 }
633#else
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800634 SEQ_printf(m, "cpu#%d\n", cpu);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200635#endif
636
Peter Zijlstra13e099d2012-05-14 14:34:00 +0200637#define P(x) \
638do { \
639 if (sizeof(rq->x) == 4) \
640 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
641 else \
642 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
643} while (0)
644
Ingo Molnaref83a572007-10-15 17:00:08 +0200645#define PN(x) \
646 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200647
648 P(nr_running);
649 SEQ_printf(m, " .%-30s: %lu\n", "load",
Dmitry Adamushko495eca42007-10-15 17:00:06 +0200650 rq->load.weight);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200651 P(nr_switches);
652 P(nr_load_updates);
653 P(nr_uninterruptible);
Ingo Molnaref83a572007-10-15 17:00:08 +0200654 PN(next_balance);
Peter Zijlstrafc840912013-09-09 13:01:41 +0200655 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
Ingo Molnaref83a572007-10-15 17:00:08 +0200656 PN(clock);
Peter Zijlstra5a537592015-01-05 11:18:12 +0100657 PN(clock_task);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200658 P(cpu_load[0]);
659 P(cpu_load[1]);
660 P(cpu_load[2]);
661 P(cpu_load[3]);
662 P(cpu_load[4]);
663#undef P
Ingo Molnaref83a572007-10-15 17:00:08 +0200664#undef PN
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200665
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100666#ifdef CONFIG_SMP
Wanpeng Lidb6ea2f2016-05-03 12:38:25 +0800667#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100668 P64(avg_idle);
Alex Shi37e6bae2014-01-23 18:39:54 +0800669 P64(max_idle_balance_cost);
Wanpeng Lidb6ea2f2016-05-03 12:38:25 +0800670#undef P64
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100671#endif
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100672
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500673#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
Mel Gormancb251762016-02-05 09:08:36 +0000674 if (schedstat_enabled()) {
675 P(yld_count);
676 P(sched_count);
677 P(sched_goidle);
678 P(ttwu_count);
679 P(ttwu_local);
680 }
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100681#undef P
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500682
Bharata B Raoefe25c22011-01-11 15:41:54 +0530683 spin_lock_irqsave(&sched_debug_lock, flags);
Ingo Molnar5cef9ec2007-08-09 11:16:47 +0200684 print_cfs_stats(m, cpu);
Peter Zijlstraada18de2008-06-19 14:22:24 +0200685 print_rt_stats(m, cpu);
Wanpeng Liacb32132014-10-31 06:39:33 +0800686 print_dl_stats(m, cpu);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200687
Ingo Molnara48da482007-08-09 11:16:51 +0200688 print_rq(m, rq, cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530689 spin_unlock_irqrestore(&sched_debug_lock, flags);
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800690 SEQ_printf(m, "\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200691}
692
Christian Ehrhardt1983a922009-11-30 12:16:47 +0100693static const char *sched_tunable_scaling_names[] = {
694 "none",
695 "logaritmic",
696 "linear"
697};
698
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800699static void sched_debug_header(struct seq_file *m)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200700{
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100701 u64 ktime, sched_clk, cpu_clk;
702 unsigned long flags;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200703
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100704 local_irq_save(flags);
705 ktime = ktime_to_ns(ktime_get());
706 sched_clk = sched_clock();
707 cpu_clk = local_clock();
708 local_irq_restore(flags);
709
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100710 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200711 init_utsname()->release,
712 (int)strcspn(init_utsname()->version, " "),
713 init_utsname()->version);
714
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100715#define P(x) \
716 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
717#define PN(x) \
718 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
719 PN(ktime);
720 PN(sched_clk);
721 PN(cpu_clk);
722 P(jiffies);
723#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
Peter Zijlstra35af99e2013-11-28 19:38:42 +0100724 P(sched_clock_stable());
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100725#endif
726#undef PN
727#undef P
728
729 SEQ_printf(m, "\n");
730 SEQ_printf(m, "sysctl_sched\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200731
Ingo Molnar1aa47312007-10-15 17:00:10 +0200732#define P(x) \
Ingo Molnard822cec2007-10-15 17:00:10 +0200733 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
Ingo Molnar1aa47312007-10-15 17:00:10 +0200734#define PN(x) \
Ingo Molnard822cec2007-10-15 17:00:10 +0200735 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
Ingo Molnar1aa47312007-10-15 17:00:10 +0200736 PN(sysctl_sched_latency);
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100737 PN(sysctl_sched_min_granularity);
Ingo Molnar1aa47312007-10-15 17:00:10 +0200738 PN(sysctl_sched_wakeup_granularity);
Josh Hunteebef742010-07-19 12:31:16 -0700739 P(sysctl_sched_child_runs_first);
Ingo Molnar1aa47312007-10-15 17:00:10 +0200740 P(sysctl_sched_features);
741#undef PN
742#undef P
743
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800744 SEQ_printf(m, " .%-40s: %d (%s)\n",
745 "sysctl_sched_tunable_scaling",
Christian Ehrhardt1983a922009-11-30 12:16:47 +0100746 sysctl_sched_tunable_scaling,
747 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200748 SEQ_printf(m, "\n");
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800749}
750
751static int sched_debug_show(struct seq_file *m, void *v)
752{
753 int cpu = (unsigned long)(v - 2);
754
755 if (cpu != -1)
756 print_cpu(m, cpu);
757 else
758 sched_debug_header(m);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200759
760 return 0;
761}
762
Peter Zijlstra029632f2011-10-25 10:00:11 +0200763void sysrq_sched_debug_show(void)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200764{
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800765 int cpu;
766
767 sched_debug_header(NULL);
768 for_each_online_cpu(cpu)
769 print_cpu(NULL, cpu);
770
771}
772
773/*
774 * This itererator needs some explanation.
775 * It returns 1 for the header position.
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100776 * This means 2 is CPU 0.
777 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
778 * to use cpumask_* to iterate over the CPUs.
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800779 */
780static void *sched_debug_start(struct seq_file *file, loff_t *offset)
781{
782 unsigned long n = *offset;
783
784 if (n == 0)
785 return (void *) 1;
786
787 n--;
788
789 if (n > 0)
790 n = cpumask_next(n - 1, cpu_online_mask);
791 else
792 n = cpumask_first(cpu_online_mask);
793
794 *offset = n + 1;
795
796 if (n < nr_cpu_ids)
797 return (void *)(unsigned long)(n + 2);
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100798
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800799 return NULL;
800}
801
802static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
803{
804 (*offset)++;
805 return sched_debug_start(file, offset);
806}
807
808static void sched_debug_stop(struct seq_file *file, void *data)
809{
810}
811
812static const struct seq_operations sched_debug_sops = {
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100813 .start = sched_debug_start,
814 .next = sched_debug_next,
815 .stop = sched_debug_stop,
816 .show = sched_debug_show,
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800817};
818
819static int sched_debug_release(struct inode *inode, struct file *file)
820{
821 seq_release(inode, file);
822
823 return 0;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200824}
825
826static int sched_debug_open(struct inode *inode, struct file *filp)
827{
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800828 int ret = 0;
829
830 ret = seq_open(filp, &sched_debug_sops);
831
832 return ret;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200833}
834
Arjan van de Ven0dbee3a2007-10-15 17:00:19 +0200835static const struct file_operations sched_debug_fops = {
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200836 .open = sched_debug_open,
837 .read = seq_read,
838 .llseek = seq_lseek,
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800839 .release = sched_debug_release,
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200840};
841
842static int __init init_sched_debug_procfs(void)
843{
844 struct proc_dir_entry *pe;
845
Li Zefana9cf4dd2008-10-30 15:23:34 +0800846 pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200847 if (!pe)
848 return -ENOMEM;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200849 return 0;
850}
851
852__initcall(init_sched_debug_procfs);
853
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100854#define __P(F) SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
855#define P(F) SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
856#define __PN(F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
857#define PN(F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100858
859
Srikar Dronamraju397f2372015-06-25 22:51:43 +0530860#ifdef CONFIG_NUMA_BALANCING
861void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
862 unsigned long tpf, unsigned long gsf, unsigned long gpf)
863{
864 SEQ_printf(m, "numa_faults node=%d ", node);
865 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tsf, tpf);
866 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gsf, gpf);
867}
868#endif
869
870
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100871static void sched_show_numa(struct task_struct *p, struct seq_file *m)
872{
873#ifdef CONFIG_NUMA_BALANCING
874 struct mempolicy *pol;
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100875
876 if (p->mm)
877 P(mm->numa_scan_seq);
878
879 task_lock(p);
880 pol = p->mempolicy;
881 if (pol && !(pol->flags & MPOL_F_MORON))
882 pol = NULL;
883 mpol_get(pol);
884 task_unlock(p);
885
Srikar Dronamraju397f2372015-06-25 22:51:43 +0530886 P(numa_pages_migrated);
887 P(numa_preferred_nid);
888 P(total_numa_faults);
889 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
890 task_node(p), task_numa_group_id(p));
891 show_numa_stats(p, m);
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100892 mpol_put(pol);
893#endif
894}
895
Aleksa Sarai74dc3382017-08-06 14:41:41 +1000896void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
897 struct seq_file *m)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200898{
Ingo Molnarcc367732007-10-15 17:00:18 +0200899 unsigned long nr_switches;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200900
Aleksa Sarai74dc3382017-08-06 14:41:41 +1000901 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
Oleg Nesterov5089a972010-05-26 14:43:22 -0700902 get_nr_threads(p));
Ingo Molnar2d92f222007-10-15 17:00:18 +0200903 SEQ_printf(m,
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530904 "---------------------------------------------------------"
905 "----------\n");
Ingo Molnarcc367732007-10-15 17:00:18 +0200906#define __P(F) \
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530907 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200908#define P(F) \
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530909 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500910#define P_SCHEDSTAT(F) \
911 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)schedstat_val(p->F))
Ingo Molnarcc367732007-10-15 17:00:18 +0200912#define __PN(F) \
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530913 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
Ingo Molnaref83a572007-10-15 17:00:08 +0200914#define PN(F) \
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530915 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500916#define PN_SCHEDSTAT(F) \
917 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(p->F)))
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200918
Ingo Molnaref83a572007-10-15 17:00:08 +0200919 PN(se.exec_start);
920 PN(se.vruntime);
921 PN(se.sum_exec_runtime);
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200922
Ingo Molnarcc367732007-10-15 17:00:18 +0200923 nr_switches = p->nvcsw + p->nivcsw;
924
Ingo Molnarcc367732007-10-15 17:00:18 +0200925 P(se.nr_migrations);
Ingo Molnarcc367732007-10-15 17:00:18 +0200926
Mel Gormancb251762016-02-05 09:08:36 +0000927 if (schedstat_enabled()) {
Ingo Molnarcc367732007-10-15 17:00:18 +0200928 u64 avg_atom, avg_per_cpu;
929
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500930 PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
931 PN_SCHEDSTAT(se.statistics.wait_start);
932 PN_SCHEDSTAT(se.statistics.sleep_start);
933 PN_SCHEDSTAT(se.statistics.block_start);
934 PN_SCHEDSTAT(se.statistics.sleep_max);
935 PN_SCHEDSTAT(se.statistics.block_max);
936 PN_SCHEDSTAT(se.statistics.exec_max);
937 PN_SCHEDSTAT(se.statistics.slice_max);
938 PN_SCHEDSTAT(se.statistics.wait_max);
939 PN_SCHEDSTAT(se.statistics.wait_sum);
940 P_SCHEDSTAT(se.statistics.wait_count);
941 PN_SCHEDSTAT(se.statistics.iowait_sum);
942 P_SCHEDSTAT(se.statistics.iowait_count);
943 P_SCHEDSTAT(se.statistics.nr_migrations_cold);
944 P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
945 P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
946 P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
947 P_SCHEDSTAT(se.statistics.nr_forced_migrations);
948 P_SCHEDSTAT(se.statistics.nr_wakeups);
949 P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
950 P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
951 P_SCHEDSTAT(se.statistics.nr_wakeups_local);
952 P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
953 P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
954 P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
955 P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
956 P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
Mel Gormancb251762016-02-05 09:08:36 +0000957
Ingo Molnarcc367732007-10-15 17:00:18 +0200958 avg_atom = p->se.sum_exec_runtime;
959 if (nr_switches)
Mateusz Guzikb0ab99e2014-06-14 15:00:09 +0200960 avg_atom = div64_ul(avg_atom, nr_switches);
Ingo Molnarcc367732007-10-15 17:00:18 +0200961 else
962 avg_atom = -1LL;
963
964 avg_per_cpu = p->se.sum_exec_runtime;
Ingo Molnarc1a897402007-11-28 15:52:56 +0100965 if (p->se.nr_migrations) {
Roman Zippel6f6d6a12008-05-01 04:34:28 -0700966 avg_per_cpu = div64_u64(avg_per_cpu,
967 p->se.nr_migrations);
Ingo Molnarc1a897402007-11-28 15:52:56 +0100968 } else {
Ingo Molnarcc367732007-10-15 17:00:18 +0200969 avg_per_cpu = -1LL;
Ingo Molnarc1a897402007-11-28 15:52:56 +0100970 }
Ingo Molnarcc367732007-10-15 17:00:18 +0200971
972 __PN(avg_atom);
973 __PN(avg_per_cpu);
974 }
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500975
Ingo Molnarcc367732007-10-15 17:00:18 +0200976 __P(nr_switches);
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530977 SEQ_printf(m, "%-45s:%21Ld\n",
Ingo Molnarcc367732007-10-15 17:00:18 +0200978 "nr_voluntary_switches", (long long)p->nvcsw);
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530979 SEQ_printf(m, "%-45s:%21Ld\n",
Ingo Molnarcc367732007-10-15 17:00:18 +0200980 "nr_involuntary_switches", (long long)p->nivcsw);
981
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200982 P(se.load.weight);
Peter Zijlstra1ea6c462017-05-06 15:59:54 +0200983 P(se.runnable_weight);
Alex Shi333bb862013-06-28 19:10:35 +0800984#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +0800985 P(se.avg.load_sum);
Peter Zijlstra1ea6c462017-05-06 15:59:54 +0200986 P(se.avg.runnable_load_sum);
Yuyang Du9d89c252015-07-15 08:04:37 +0800987 P(se.avg.util_sum);
988 P(se.avg.load_avg);
Peter Zijlstra1ea6c462017-05-06 15:59:54 +0200989 P(se.avg.runnable_load_avg);
Yuyang Du9d89c252015-07-15 08:04:37 +0800990 P(se.avg.util_avg);
991 P(se.avg.last_update_time);
Kamalesh Babulal939fd732013-06-25 13:33:36 +0530992#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200993 P(policy);
994 P(prio);
Tommaso Cucinotta59f8c292016-10-26 11:17:17 +0200995 if (p->policy == SCHED_DEADLINE) {
996 P(dl.runtime);
997 P(dl.deadline);
998 }
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500999#undef PN_SCHEDSTAT
Ingo Molnaref83a572007-10-15 17:00:08 +02001000#undef PN
Ingo Molnarcc367732007-10-15 17:00:18 +02001001#undef __PN
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -05001002#undef P_SCHEDSTAT
Ingo Molnarcc367732007-10-15 17:00:18 +02001003#undef P
1004#undef __P
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001005
1006 {
Ingo Molnar29d7b902008-11-16 08:07:15 +01001007 unsigned int this_cpu = raw_smp_processor_id();
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001008 u64 t0, t1;
1009
Ingo Molnar29d7b902008-11-16 08:07:15 +01001010 t0 = cpu_clock(this_cpu);
1011 t1 = cpu_clock(this_cpu);
Kamalesh Babulaladd332a2013-06-27 22:20:05 +05301012 SEQ_printf(m, "%-45s:%21Ld\n",
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001013 "clock-delta", (long long)(t1-t0));
1014 }
Ingo Molnarb32e86b2013-10-07 11:29:30 +01001015
1016 sched_show_numa(p, m);
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001017}
1018
1019void proc_sched_set_task(struct task_struct *p)
1020{
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02001021#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -03001022 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
Ingo Molnar6cfb0d52007-08-02 17:41:40 +02001023#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001024}