blob: 641511771ae6a696271f77532ac9e40e28175749 [file] [log] [blame]
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001/*
Peter Zijlstra391e43d2011-11-15 17:14:39 +01002 * kernel/sched/debug.c
Ingo Molnar43ae34c2007-07-09 18:52:00 +02003 *
4 * Print the CFS rbtree
5 *
6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/proc_fs.h>
14#include <linux/sched.h>
15#include <linux/seq_file.h>
16#include <linux/kallsyms.h>
17#include <linux/utsname.h>
Ingo Molnarb32e86b2013-10-07 11:29:30 +010018#include <linux/mempolicy.h>
Ingo Molnar43ae34c2007-07-09 18:52:00 +020019
Peter Zijlstra029632f2011-10-25 10:00:11 +020020#include "sched.h"
21
Bharata B Raoefe25c22011-01-11 15:41:54 +053022static DEFINE_SPINLOCK(sched_debug_lock);
23
Ingo Molnar43ae34c2007-07-09 18:52:00 +020024/*
25 * This allows printing both to /proc/sched_debug and
26 * to the console
27 */
28#define SEQ_printf(m, x...) \
29 do { \
30 if (m) \
31 seq_printf(m, x); \
32 else \
33 printk(x); \
34 } while (0)
35
Ingo Molnaref83a572007-10-15 17:00:08 +020036/*
37 * Ease the printing of nsec fields:
38 */
Ingo Molnar90b26282007-12-30 17:24:35 +010039static long long nsec_high(unsigned long long nsec)
Ingo Molnaref83a572007-10-15 17:00:08 +020040{
Ingo Molnar90b26282007-12-30 17:24:35 +010041 if ((long long)nsec < 0) {
Ingo Molnaref83a572007-10-15 17:00:08 +020042 nsec = -nsec;
43 do_div(nsec, 1000000);
44 return -nsec;
45 }
46 do_div(nsec, 1000000);
47
48 return nsec;
49}
50
Ingo Molnar90b26282007-12-30 17:24:35 +010051static unsigned long nsec_low(unsigned long long nsec)
Ingo Molnaref83a572007-10-15 17:00:08 +020052{
Ingo Molnar90b26282007-12-30 17:24:35 +010053 if ((long long)nsec < 0)
Ingo Molnaref83a572007-10-15 17:00:08 +020054 nsec = -nsec;
55
56 return do_div(nsec, 1000000);
57}
58
59#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
60
Bharata B Raoff9b48c2008-11-10 21:34:09 +053061#ifdef CONFIG_FAIR_GROUP_SCHED
Mike Galbraith5091faa2010-11-30 14:18:03 +010062static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
Bharata B Raoff9b48c2008-11-10 21:34:09 +053063{
64 struct sched_entity *se = tg->se[cpu];
Bharata B Raoff9b48c2008-11-10 21:34:09 +053065
66#define P(F) \
67 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
68#define PN(F) \
69 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
70
Yuyang Ducd126af2015-07-15 08:04:36 +080071 if (!se)
Ben Segall18bf2802012-10-04 12:51:20 +020072 return;
Ben Segall18bf2802012-10-04 12:51:20 +020073
Bharata B Raoff9b48c2008-11-10 21:34:09 +053074 PN(se->exec_start);
75 PN(se->vruntime);
76 PN(se->sum_exec_runtime);
77#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -030078 PN(se->statistics.wait_start);
79 PN(se->statistics.sleep_start);
80 PN(se->statistics.block_start);
81 PN(se->statistics.sleep_max);
82 PN(se->statistics.block_max);
83 PN(se->statistics.exec_max);
84 PN(se->statistics.slice_max);
85 PN(se->statistics.wait_max);
86 PN(se->statistics.wait_sum);
87 P(se->statistics.wait_count);
Bharata B Raoff9b48c2008-11-10 21:34:09 +053088#endif
89 P(se->load.weight);
Paul Turner9d85f212012-10-04 13:18:29 +020090#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +080091 P(se->avg.load_avg);
92 P(se->avg.util_avg);
Paul Turner9d85f212012-10-04 13:18:29 +020093#endif
Bharata B Raoff9b48c2008-11-10 21:34:09 +053094#undef PN
95#undef P
96}
97#endif
98
Bharata B Raoefe25c22011-01-11 15:41:54 +053099#ifdef CONFIG_CGROUP_SCHED
100static char group_path[PATH_MAX];
101
102static char *task_group_path(struct task_group *tg)
103{
Bharata B Rao8ecedd72011-01-11 15:42:57 +0530104 if (autogroup_path(tg, group_path, PATH_MAX))
105 return group_path;
106
Tejun Heoe61734c2014-02-12 09:29:50 -0500107 return cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530108}
109#endif
110
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200111static void
Ingo Molnara48da482007-08-09 11:16:51 +0200112print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200113{
114 if (rq->curr == p)
115 SEQ_printf(m, "R");
116 else
117 SEQ_printf(m, " ");
118
Ingo Molnaref83a572007-10-15 17:00:08 +0200119 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
Peter Zijlstrafc840912013-09-09 13:01:41 +0200120 p->comm, task_pid_nr(p),
Ingo Molnaref83a572007-10-15 17:00:08 +0200121 SPLIT_NS(p->se.vruntime),
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200122 (long long)(p->nvcsw + p->nivcsw),
Al Viro6f605d82007-08-06 04:26:59 +0100123 p->prio);
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200124#ifdef CONFIG_SCHEDSTATS
Peter Zijlstrad19ca302008-04-19 19:45:00 +0200125 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
Srikar Dronamrajuc5f3ab12015-06-08 13:40:40 +0530126 SPLIT_NS(p->se.statistics.wait_sum),
Ingo Molnaref83a572007-10-15 17:00:08 +0200127 SPLIT_NS(p->se.sum_exec_runtime),
Lucas De Marchi41acab82010-03-10 23:37:45 -0300128 SPLIT_NS(p->se.statistics.sum_sleep_runtime));
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200129#else
Srikar Dronamraju33d61762015-06-08 13:40:39 +0530130 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
131 0LL, 0L,
132 SPLIT_NS(p->se.sum_exec_runtime),
133 0LL, 0L);
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200134#endif
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100135#ifdef CONFIG_NUMA_BALANCING
Srikar Dronamrajue3d24d02015-06-25 22:51:42 +0530136 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100137#endif
Bharata B Raoefe25c22011-01-11 15:41:54 +0530138#ifdef CONFIG_CGROUP_SCHED
139 SEQ_printf(m, " %s", task_group_path(task_group(p)));
140#endif
Peter Zijlstrad19ca302008-04-19 19:45:00 +0200141
Peter Zijlstrad19ca302008-04-19 19:45:00 +0200142 SEQ_printf(m, "\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200143}
144
Ingo Molnara48da482007-08-09 11:16:51 +0200145static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200146{
147 struct task_struct *g, *p;
148
149 SEQ_printf(m,
150 "\nrunnable tasks:\n"
Mike Galbraithc86da3a2007-10-15 17:00:08 +0200151 " task PID tree-key switches prio"
Srikar Dronamrajuc5f3ab12015-06-08 13:40:40 +0530152 " wait-time sum-exec sum-sleep\n"
Ingo Molnar1a75b942007-10-15 17:00:08 +0200153 "------------------------------------------------------"
Mike Galbraithc86da3a2007-10-15 17:00:08 +0200154 "----------------------------------------------------\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200155
Oleg Nesterov5bd96ab2014-09-21 21:33:41 +0200156 rcu_read_lock();
Oleg Nesterovd38e83c2014-08-13 21:19:56 +0200157 for_each_process_thread(g, p) {
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100158 if (task_cpu(p) != rq_cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200159 continue;
160
Ingo Molnara48da482007-08-09 11:16:51 +0200161 print_task(m, rq, p);
Oleg Nesterovd38e83c2014-08-13 21:19:56 +0200162 }
Oleg Nesterov5bd96ab2014-09-21 21:33:41 +0200163 rcu_read_unlock();
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200164}
165
Ingo Molnar5cef9ec2007-08-09 11:16:47 +0200166void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200167{
Ingo Molnar86d95602007-10-15 17:00:06 +0200168 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
169 spread, rq0_min_vruntime, spread0;
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900170 struct rq *rq = cpu_rq(cpu);
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200171 struct sched_entity *last;
172 unsigned long flags;
173
Bharata B Raoefe25c22011-01-11 15:41:54 +0530174#ifdef CONFIG_FAIR_GROUP_SCHED
175 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
176#else
Peter Zijlstraada18de2008-06-19 14:22:24 +0200177 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530178#endif
Ingo Molnaref83a572007-10-15 17:00:08 +0200179 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
180 SPLIT_NS(cfs_rq->exec_clock));
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200181
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100182 raw_spin_lock_irqsave(&rq->lock, flags);
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200183 if (cfs_rq->rb_leftmost)
Rik van Rielac53db52011-02-01 09:51:03 -0500184 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200185 last = __pick_last_entity(cfs_rq);
186 if (last)
187 max_vruntime = last->vruntime;
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100188 min_vruntime = cfs_rq->min_vruntime;
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900189 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100190 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnaref83a572007-10-15 17:00:08 +0200191 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
192 SPLIT_NS(MIN_vruntime));
193 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
194 SPLIT_NS(min_vruntime));
195 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
196 SPLIT_NS(max_vruntime));
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200197 spread = max_vruntime - MIN_vruntime;
Ingo Molnaref83a572007-10-15 17:00:08 +0200198 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
199 SPLIT_NS(spread));
Ingo Molnar86d95602007-10-15 17:00:06 +0200200 spread0 = min_vruntime - rq0_min_vruntime;
Ingo Molnaref83a572007-10-15 17:00:08 +0200201 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
202 SPLIT_NS(spread0));
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100203 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
Peter Zijlstraddc97292007-10-15 17:00:10 +0200204 cfs_rq->nr_spread_over);
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200205 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800206 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200207#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +0800208 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
209 cfs_rq->avg.load_avg);
Yuyang Du13962232015-07-15 08:04:41 +0800210 SEQ_printf(m, " .%-30s: %lu\n", "runnable_load_avg",
211 cfs_rq->runnable_load_avg);
Yuyang Du9d89c252015-07-15 08:04:37 +0800212 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
213 cfs_rq->avg.util_avg);
214 SEQ_printf(m, " .%-30s: %ld\n", "removed_load_avg",
215 atomic_long_read(&cfs_rq->removed_load_avg));
216 SEQ_printf(m, " .%-30s: %ld\n", "removed_util_avg",
217 atomic_long_read(&cfs_rq->removed_util_avg));
Alex Shi333bb862013-06-28 19:10:35 +0800218#ifdef CONFIG_FAIR_GROUP_SCHED
Yuyang Du9d89c252015-07-15 08:04:37 +0800219 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
220 cfs_rq->tg_load_avg_contrib);
Alex Shi333bb862013-06-28 19:10:35 +0800221 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
222 atomic_long_read(&cfs_rq->tg->load_avg));
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200223#endif
Alex Shi333bb862013-06-28 19:10:35 +0800224#endif
Ben Segallf9f9ffc2013-10-16 11:16:32 -0700225#ifdef CONFIG_CFS_BANDWIDTH
Ben Segallf9f9ffc2013-10-16 11:16:32 -0700226 SEQ_printf(m, " .%-30s: %d\n", "throttled",
227 cfs_rq->throttled);
228 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
229 cfs_rq->throttle_count);
230#endif
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800231
Alex Shi333bb862013-06-28 19:10:35 +0800232#ifdef CONFIG_FAIR_GROUP_SCHED
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530233 print_cfs_group_stats(m, cpu, cfs_rq->tg);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200234#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200235}
236
Peter Zijlstraada18de2008-06-19 14:22:24 +0200237void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
238{
Bharata B Raoefe25c22011-01-11 15:41:54 +0530239#ifdef CONFIG_RT_GROUP_SCHED
240 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
241#else
Peter Zijlstraada18de2008-06-19 14:22:24 +0200242 SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530243#endif
Peter Zijlstraada18de2008-06-19 14:22:24 +0200244
245#define P(x) \
246 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
247#define PN(x) \
248 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
249
250 P(rt_nr_running);
251 P(rt_throttled);
252 PN(rt_time);
253 PN(rt_runtime);
254
255#undef PN
256#undef P
257}
258
Wanpeng Liacb32132014-10-31 06:39:33 +0800259void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
260{
261 SEQ_printf(m, "\ndl_rq[%d]:\n", cpu);
262 SEQ_printf(m, " .%-30s: %ld\n", "dl_nr_running", dl_rq->dl_nr_running);
263}
264
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100265extern __read_mostly int sched_clock_running;
266
Ingo Molnara48da482007-08-09 11:16:51 +0200267static void print_cpu(struct seq_file *m, int cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200268{
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900269 struct rq *rq = cpu_rq(cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530270 unsigned long flags;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200271
272#ifdef CONFIG_X86
273 {
274 unsigned int freq = cpu_khz ? : 1;
275
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800276 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200277 cpu, freq / 1000, (freq % 1000));
278 }
279#else
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800280 SEQ_printf(m, "cpu#%d\n", cpu);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200281#endif
282
Peter Zijlstra13e099d2012-05-14 14:34:00 +0200283#define P(x) \
284do { \
285 if (sizeof(rq->x) == 4) \
286 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
287 else \
288 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
289} while (0)
290
Ingo Molnaref83a572007-10-15 17:00:08 +0200291#define PN(x) \
292 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200293
294 P(nr_running);
295 SEQ_printf(m, " .%-30s: %lu\n", "load",
Dmitry Adamushko495eca42007-10-15 17:00:06 +0200296 rq->load.weight);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200297 P(nr_switches);
298 P(nr_load_updates);
299 P(nr_uninterruptible);
Ingo Molnaref83a572007-10-15 17:00:08 +0200300 PN(next_balance);
Peter Zijlstrafc840912013-09-09 13:01:41 +0200301 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
Ingo Molnaref83a572007-10-15 17:00:08 +0200302 PN(clock);
Peter Zijlstra5a537592015-01-05 11:18:12 +0100303 PN(clock_task);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200304 P(cpu_load[0]);
305 P(cpu_load[1]);
306 P(cpu_load[2]);
307 P(cpu_load[3]);
308 P(cpu_load[4]);
309#undef P
Ingo Molnaref83a572007-10-15 17:00:08 +0200310#undef PN
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200311
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100312#ifdef CONFIG_SCHEDSTATS
313#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100314#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100315
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100316 P(yld_count);
317
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100318 P(sched_count);
319 P(sched_goidle);
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100320#ifdef CONFIG_SMP
321 P64(avg_idle);
Alex Shi37e6bae2014-01-23 18:39:54 +0800322 P64(max_idle_balance_cost);
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100323#endif
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100324
325 P(ttwu_count);
326 P(ttwu_local);
327
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100328#undef P
Yong Zhangfce20972011-01-14 15:57:39 +0800329#undef P64
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100330#endif
Bharata B Raoefe25c22011-01-11 15:41:54 +0530331 spin_lock_irqsave(&sched_debug_lock, flags);
Ingo Molnar5cef9ec2007-08-09 11:16:47 +0200332 print_cfs_stats(m, cpu);
Peter Zijlstraada18de2008-06-19 14:22:24 +0200333 print_rt_stats(m, cpu);
Wanpeng Liacb32132014-10-31 06:39:33 +0800334 print_dl_stats(m, cpu);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200335
Ingo Molnara48da482007-08-09 11:16:51 +0200336 print_rq(m, rq, cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530337 spin_unlock_irqrestore(&sched_debug_lock, flags);
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800338 SEQ_printf(m, "\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200339}
340
Christian Ehrhardt1983a922009-11-30 12:16:47 +0100341static const char *sched_tunable_scaling_names[] = {
342 "none",
343 "logaritmic",
344 "linear"
345};
346
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800347static void sched_debug_header(struct seq_file *m)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200348{
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100349 u64 ktime, sched_clk, cpu_clk;
350 unsigned long flags;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200351
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100352 local_irq_save(flags);
353 ktime = ktime_to_ns(ktime_get());
354 sched_clk = sched_clock();
355 cpu_clk = local_clock();
356 local_irq_restore(flags);
357
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100358 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200359 init_utsname()->release,
360 (int)strcspn(init_utsname()->version, " "),
361 init_utsname()->version);
362
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100363#define P(x) \
364 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
365#define PN(x) \
366 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
367 PN(ktime);
368 PN(sched_clk);
369 PN(cpu_clk);
370 P(jiffies);
371#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
Peter Zijlstra35af99e2013-11-28 19:38:42 +0100372 P(sched_clock_stable());
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100373#endif
374#undef PN
375#undef P
376
377 SEQ_printf(m, "\n");
378 SEQ_printf(m, "sysctl_sched\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200379
Ingo Molnar1aa47312007-10-15 17:00:10 +0200380#define P(x) \
Ingo Molnard822cec2007-10-15 17:00:10 +0200381 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
Ingo Molnar1aa47312007-10-15 17:00:10 +0200382#define PN(x) \
Ingo Molnard822cec2007-10-15 17:00:10 +0200383 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
Ingo Molnar1aa47312007-10-15 17:00:10 +0200384 PN(sysctl_sched_latency);
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100385 PN(sysctl_sched_min_granularity);
Ingo Molnar1aa47312007-10-15 17:00:10 +0200386 PN(sysctl_sched_wakeup_granularity);
Josh Hunteebef742010-07-19 12:31:16 -0700387 P(sysctl_sched_child_runs_first);
Ingo Molnar1aa47312007-10-15 17:00:10 +0200388 P(sysctl_sched_features);
389#undef PN
390#undef P
391
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800392 SEQ_printf(m, " .%-40s: %d (%s)\n",
393 "sysctl_sched_tunable_scaling",
Christian Ehrhardt1983a922009-11-30 12:16:47 +0100394 sysctl_sched_tunable_scaling,
395 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200396 SEQ_printf(m, "\n");
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800397}
398
399static int sched_debug_show(struct seq_file *m, void *v)
400{
401 int cpu = (unsigned long)(v - 2);
402
403 if (cpu != -1)
404 print_cpu(m, cpu);
405 else
406 sched_debug_header(m);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200407
408 return 0;
409}
410
Peter Zijlstra029632f2011-10-25 10:00:11 +0200411void sysrq_sched_debug_show(void)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200412{
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800413 int cpu;
414
415 sched_debug_header(NULL);
416 for_each_online_cpu(cpu)
417 print_cpu(NULL, cpu);
418
419}
420
421/*
422 * This itererator needs some explanation.
423 * It returns 1 for the header position.
424 * This means 2 is cpu 0.
425 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
426 * to use cpumask_* to iterate over the cpus.
427 */
428static void *sched_debug_start(struct seq_file *file, loff_t *offset)
429{
430 unsigned long n = *offset;
431
432 if (n == 0)
433 return (void *) 1;
434
435 n--;
436
437 if (n > 0)
438 n = cpumask_next(n - 1, cpu_online_mask);
439 else
440 n = cpumask_first(cpu_online_mask);
441
442 *offset = n + 1;
443
444 if (n < nr_cpu_ids)
445 return (void *)(unsigned long)(n + 2);
446 return NULL;
447}
448
449static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
450{
451 (*offset)++;
452 return sched_debug_start(file, offset);
453}
454
455static void sched_debug_stop(struct seq_file *file, void *data)
456{
457}
458
459static const struct seq_operations sched_debug_sops = {
460 .start = sched_debug_start,
461 .next = sched_debug_next,
462 .stop = sched_debug_stop,
463 .show = sched_debug_show,
464};
465
466static int sched_debug_release(struct inode *inode, struct file *file)
467{
468 seq_release(inode, file);
469
470 return 0;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200471}
472
473static int sched_debug_open(struct inode *inode, struct file *filp)
474{
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800475 int ret = 0;
476
477 ret = seq_open(filp, &sched_debug_sops);
478
479 return ret;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200480}
481
Arjan van de Ven0dbee3a2007-10-15 17:00:19 +0200482static const struct file_operations sched_debug_fops = {
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200483 .open = sched_debug_open,
484 .read = seq_read,
485 .llseek = seq_lseek,
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800486 .release = sched_debug_release,
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200487};
488
489static int __init init_sched_debug_procfs(void)
490{
491 struct proc_dir_entry *pe;
492
Li Zefana9cf4dd2008-10-30 15:23:34 +0800493 pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200494 if (!pe)
495 return -ENOMEM;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200496 return 0;
497}
498
499__initcall(init_sched_debug_procfs);
500
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100501#define __P(F) \
502 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
503#define P(F) \
504 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
505#define __PN(F) \
506 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
507#define PN(F) \
508 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
509
510
Srikar Dronamraju397f2372015-06-25 22:51:43 +0530511#ifdef CONFIG_NUMA_BALANCING
512void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
513 unsigned long tpf, unsigned long gsf, unsigned long gpf)
514{
515 SEQ_printf(m, "numa_faults node=%d ", node);
516 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tsf, tpf);
517 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gsf, gpf);
518}
519#endif
520
521
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100522static void sched_show_numa(struct task_struct *p, struct seq_file *m)
523{
524#ifdef CONFIG_NUMA_BALANCING
525 struct mempolicy *pol;
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100526
527 if (p->mm)
528 P(mm->numa_scan_seq);
529
530 task_lock(p);
531 pol = p->mempolicy;
532 if (pol && !(pol->flags & MPOL_F_MORON))
533 pol = NULL;
534 mpol_get(pol);
535 task_unlock(p);
536
Srikar Dronamraju397f2372015-06-25 22:51:43 +0530537 P(numa_pages_migrated);
538 P(numa_preferred_nid);
539 P(total_numa_faults);
540 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
541 task_node(p), task_numa_group_id(p));
542 show_numa_stats(p, m);
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100543 mpol_put(pol);
544#endif
545}
546
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200547void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
548{
Ingo Molnarcc367732007-10-15 17:00:18 +0200549 unsigned long nr_switches;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200550
Peter Zijlstrafc840912013-09-09 13:01:41 +0200551 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
Oleg Nesterov5089a972010-05-26 14:43:22 -0700552 get_nr_threads(p));
Ingo Molnar2d92f222007-10-15 17:00:18 +0200553 SEQ_printf(m,
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530554 "---------------------------------------------------------"
555 "----------\n");
Ingo Molnarcc367732007-10-15 17:00:18 +0200556#define __P(F) \
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530557 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200558#define P(F) \
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530559 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
Ingo Molnarcc367732007-10-15 17:00:18 +0200560#define __PN(F) \
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530561 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
Ingo Molnaref83a572007-10-15 17:00:08 +0200562#define PN(F) \
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530563 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200564
Ingo Molnaref83a572007-10-15 17:00:08 +0200565 PN(se.exec_start);
566 PN(se.vruntime);
567 PN(se.sum_exec_runtime);
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200568
Ingo Molnarcc367732007-10-15 17:00:18 +0200569 nr_switches = p->nvcsw + p->nivcsw;
570
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200571#ifdef CONFIG_SCHEDSTATS
Srikar Dronamraju82a0d272015-06-08 13:40:41 +0530572 PN(se.statistics.sum_sleep_runtime);
Lucas De Marchi41acab82010-03-10 23:37:45 -0300573 PN(se.statistics.wait_start);
574 PN(se.statistics.sleep_start);
575 PN(se.statistics.block_start);
576 PN(se.statistics.sleep_max);
577 PN(se.statistics.block_max);
578 PN(se.statistics.exec_max);
579 PN(se.statistics.slice_max);
580 PN(se.statistics.wait_max);
581 PN(se.statistics.wait_sum);
582 P(se.statistics.wait_count);
583 PN(se.statistics.iowait_sum);
584 P(se.statistics.iowait_count);
Ingo Molnarcc367732007-10-15 17:00:18 +0200585 P(se.nr_migrations);
Lucas De Marchi41acab82010-03-10 23:37:45 -0300586 P(se.statistics.nr_migrations_cold);
587 P(se.statistics.nr_failed_migrations_affine);
588 P(se.statistics.nr_failed_migrations_running);
589 P(se.statistics.nr_failed_migrations_hot);
590 P(se.statistics.nr_forced_migrations);
591 P(se.statistics.nr_wakeups);
592 P(se.statistics.nr_wakeups_sync);
593 P(se.statistics.nr_wakeups_migrate);
594 P(se.statistics.nr_wakeups_local);
595 P(se.statistics.nr_wakeups_remote);
596 P(se.statistics.nr_wakeups_affine);
597 P(se.statistics.nr_wakeups_affine_attempts);
598 P(se.statistics.nr_wakeups_passive);
599 P(se.statistics.nr_wakeups_idle);
Ingo Molnarcc367732007-10-15 17:00:18 +0200600
601 {
602 u64 avg_atom, avg_per_cpu;
603
604 avg_atom = p->se.sum_exec_runtime;
605 if (nr_switches)
Mateusz Guzikb0ab99e2014-06-14 15:00:09 +0200606 avg_atom = div64_ul(avg_atom, nr_switches);
Ingo Molnarcc367732007-10-15 17:00:18 +0200607 else
608 avg_atom = -1LL;
609
610 avg_per_cpu = p->se.sum_exec_runtime;
Ingo Molnarc1a897402007-11-28 15:52:56 +0100611 if (p->se.nr_migrations) {
Roman Zippel6f6d6a12008-05-01 04:34:28 -0700612 avg_per_cpu = div64_u64(avg_per_cpu,
613 p->se.nr_migrations);
Ingo Molnarc1a897402007-11-28 15:52:56 +0100614 } else {
Ingo Molnarcc367732007-10-15 17:00:18 +0200615 avg_per_cpu = -1LL;
Ingo Molnarc1a897402007-11-28 15:52:56 +0100616 }
Ingo Molnarcc367732007-10-15 17:00:18 +0200617
618 __PN(avg_atom);
619 __PN(avg_per_cpu);
620 }
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200621#endif
Ingo Molnarcc367732007-10-15 17:00:18 +0200622 __P(nr_switches);
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530623 SEQ_printf(m, "%-45s:%21Ld\n",
Ingo Molnarcc367732007-10-15 17:00:18 +0200624 "nr_voluntary_switches", (long long)p->nvcsw);
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530625 SEQ_printf(m, "%-45s:%21Ld\n",
Ingo Molnarcc367732007-10-15 17:00:18 +0200626 "nr_involuntary_switches", (long long)p->nivcsw);
627
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200628 P(se.load.weight);
Alex Shi333bb862013-06-28 19:10:35 +0800629#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +0800630 P(se.avg.load_sum);
631 P(se.avg.util_sum);
632 P(se.avg.load_avg);
633 P(se.avg.util_avg);
634 P(se.avg.last_update_time);
Kamalesh Babulal939fd732013-06-25 13:33:36 +0530635#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200636 P(policy);
637 P(prio);
Ingo Molnaref83a572007-10-15 17:00:08 +0200638#undef PN
Ingo Molnarcc367732007-10-15 17:00:18 +0200639#undef __PN
640#undef P
641#undef __P
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200642
643 {
Ingo Molnar29d7b902008-11-16 08:07:15 +0100644 unsigned int this_cpu = raw_smp_processor_id();
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200645 u64 t0, t1;
646
Ingo Molnar29d7b902008-11-16 08:07:15 +0100647 t0 = cpu_clock(this_cpu);
648 t1 = cpu_clock(this_cpu);
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530649 SEQ_printf(m, "%-45s:%21Ld\n",
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200650 "clock-delta", (long long)(t1-t0));
651 }
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100652
653 sched_show_numa(p, m);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200654}
655
656void proc_sched_set_task(struct task_struct *p)
657{
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200658#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -0300659 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200660#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200661}