blob: 30eee3b5293d893800e9c5aa5b778ba4dc145ff2 [file] [log] [blame]
Ingo Molnar43ae34c2007-07-09 18:52:00 +02001/*
Peter Zijlstra391e43d2011-11-15 17:14:39 +01002 * kernel/sched/debug.c
Ingo Molnar43ae34c2007-07-09 18:52:00 +02003 *
4 * Print the CFS rbtree
5 *
6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/proc_fs.h>
14#include <linux/sched.h>
15#include <linux/seq_file.h>
16#include <linux/kallsyms.h>
17#include <linux/utsname.h>
Ingo Molnarb32e86b2013-10-07 11:29:30 +010018#include <linux/mempolicy.h>
Ingo Molnar43ae34c2007-07-09 18:52:00 +020019
Peter Zijlstra029632f2011-10-25 10:00:11 +020020#include "sched.h"
21
Bharata B Raoefe25c22011-01-11 15:41:54 +053022static DEFINE_SPINLOCK(sched_debug_lock);
23
Ingo Molnar43ae34c2007-07-09 18:52:00 +020024/*
25 * This allows printing both to /proc/sched_debug and
26 * to the console
27 */
28#define SEQ_printf(m, x...) \
29 do { \
30 if (m) \
31 seq_printf(m, x); \
32 else \
33 printk(x); \
34 } while (0)
35
Ingo Molnaref83a572007-10-15 17:00:08 +020036/*
37 * Ease the printing of nsec fields:
38 */
Ingo Molnar90b26282007-12-30 17:24:35 +010039static long long nsec_high(unsigned long long nsec)
Ingo Molnaref83a572007-10-15 17:00:08 +020040{
Ingo Molnar90b26282007-12-30 17:24:35 +010041 if ((long long)nsec < 0) {
Ingo Molnaref83a572007-10-15 17:00:08 +020042 nsec = -nsec;
43 do_div(nsec, 1000000);
44 return -nsec;
45 }
46 do_div(nsec, 1000000);
47
48 return nsec;
49}
50
Ingo Molnar90b26282007-12-30 17:24:35 +010051static unsigned long nsec_low(unsigned long long nsec)
Ingo Molnaref83a572007-10-15 17:00:08 +020052{
Ingo Molnar90b26282007-12-30 17:24:35 +010053 if ((long long)nsec < 0)
Ingo Molnaref83a572007-10-15 17:00:08 +020054 nsec = -nsec;
55
56 return do_div(nsec, 1000000);
57}
58
59#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
60
Bharata B Raoff9b48c2008-11-10 21:34:09 +053061#ifdef CONFIG_FAIR_GROUP_SCHED
Mike Galbraith5091faa2010-11-30 14:18:03 +010062static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
Bharata B Raoff9b48c2008-11-10 21:34:09 +053063{
64 struct sched_entity *se = tg->se[cpu];
Bharata B Raoff9b48c2008-11-10 21:34:09 +053065
66#define P(F) \
67 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
68#define PN(F) \
69 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
70
Ben Segall18bf2802012-10-04 12:51:20 +020071 if (!se) {
72 struct sched_avg *avg = &cpu_rq(cpu)->avg;
73 P(avg->runnable_avg_sum);
74 P(avg->runnable_avg_period);
75 return;
76 }
77
78
Bharata B Raoff9b48c2008-11-10 21:34:09 +053079 PN(se->exec_start);
80 PN(se->vruntime);
81 PN(se->sum_exec_runtime);
82#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -030083 PN(se->statistics.wait_start);
84 PN(se->statistics.sleep_start);
85 PN(se->statistics.block_start);
86 PN(se->statistics.sleep_max);
87 PN(se->statistics.block_max);
88 PN(se->statistics.exec_max);
89 PN(se->statistics.slice_max);
90 PN(se->statistics.wait_max);
91 PN(se->statistics.wait_sum);
92 P(se->statistics.wait_count);
Bharata B Raoff9b48c2008-11-10 21:34:09 +053093#endif
94 P(se->load.weight);
Paul Turner9d85f212012-10-04 13:18:29 +020095#ifdef CONFIG_SMP
96 P(se->avg.runnable_avg_sum);
97 P(se->avg.runnable_avg_period);
Paul Turner2dac7542012-10-04 13:18:30 +020098 P(se->avg.load_avg_contrib);
Paul Turner9ee474f2012-10-04 13:18:30 +020099 P(se->avg.decay_count);
Paul Turner9d85f212012-10-04 13:18:29 +0200100#endif
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530101#undef PN
102#undef P
103}
104#endif
105
Bharata B Raoefe25c22011-01-11 15:41:54 +0530106#ifdef CONFIG_CGROUP_SCHED
107static char group_path[PATH_MAX];
108
109static char *task_group_path(struct task_group *tg)
110{
Bharata B Rao8ecedd72011-01-11 15:42:57 +0530111 if (autogroup_path(tg, group_path, PATH_MAX))
112 return group_path;
113
Tejun Heoe61734c2014-02-12 09:29:50 -0500114 return cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530115}
116#endif
117
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200118static void
Ingo Molnara48da482007-08-09 11:16:51 +0200119print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200120{
121 if (rq->curr == p)
122 SEQ_printf(m, "R");
123 else
124 SEQ_printf(m, " ");
125
Ingo Molnaref83a572007-10-15 17:00:08 +0200126 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
Peter Zijlstrafc840912013-09-09 13:01:41 +0200127 p->comm, task_pid_nr(p),
Ingo Molnaref83a572007-10-15 17:00:08 +0200128 SPLIT_NS(p->se.vruntime),
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200129 (long long)(p->nvcsw + p->nivcsw),
Al Viro6f605d82007-08-06 04:26:59 +0100130 p->prio);
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200131#ifdef CONFIG_SCHEDSTATS
Peter Zijlstrad19ca302008-04-19 19:45:00 +0200132 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
Ingo Molnaref83a572007-10-15 17:00:08 +0200133 SPLIT_NS(p->se.vruntime),
134 SPLIT_NS(p->se.sum_exec_runtime),
Lucas De Marchi41acab82010-03-10 23:37:45 -0300135 SPLIT_NS(p->se.statistics.sum_sleep_runtime));
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200136#else
Peter Zijlstrad19ca302008-04-19 19:45:00 +0200137 SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
Ingo Molnaref83a572007-10-15 17:00:08 +0200138 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200139#endif
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100140#ifdef CONFIG_NUMA_BALANCING
Wanpeng Lide1b3012013-12-12 15:23:24 +0800141 SEQ_printf(m, " %d", task_node(p));
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100142#endif
Bharata B Raoefe25c22011-01-11 15:41:54 +0530143#ifdef CONFIG_CGROUP_SCHED
144 SEQ_printf(m, " %s", task_group_path(task_group(p)));
145#endif
Peter Zijlstrad19ca302008-04-19 19:45:00 +0200146
Peter Zijlstrad19ca302008-04-19 19:45:00 +0200147 SEQ_printf(m, "\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200148}
149
Ingo Molnara48da482007-08-09 11:16:51 +0200150static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200151{
152 struct task_struct *g, *p;
Peter Zijlstraab63a632007-10-25 14:02:45 +0200153 unsigned long flags;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200154
155 SEQ_printf(m,
156 "\nrunnable tasks:\n"
Mike Galbraithc86da3a2007-10-15 17:00:08 +0200157 " task PID tree-key switches prio"
158 " exec-runtime sum-exec sum-sleep\n"
Ingo Molnar1a75b942007-10-15 17:00:08 +0200159 "------------------------------------------------------"
Mike Galbraithc86da3a2007-10-15 17:00:08 +0200160 "----------------------------------------------------\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200161
Peter Zijlstraab63a632007-10-25 14:02:45 +0200162 read_lock_irqsave(&tasklist_lock, flags);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200163
164 do_each_thread(g, p) {
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100165 if (task_cpu(p) != rq_cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200166 continue;
167
Ingo Molnara48da482007-08-09 11:16:51 +0200168 print_task(m, rq, p);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200169 } while_each_thread(g, p);
170
Peter Zijlstraab63a632007-10-25 14:02:45 +0200171 read_unlock_irqrestore(&tasklist_lock, flags);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200172}
173
Ingo Molnar5cef9ec2007-08-09 11:16:47 +0200174void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200175{
Ingo Molnar86d95602007-10-15 17:00:06 +0200176 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
177 spread, rq0_min_vruntime, spread0;
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900178 struct rq *rq = cpu_rq(cpu);
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200179 struct sched_entity *last;
180 unsigned long flags;
181
Bharata B Raoefe25c22011-01-11 15:41:54 +0530182#ifdef CONFIG_FAIR_GROUP_SCHED
183 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
184#else
Peter Zijlstraada18de2008-06-19 14:22:24 +0200185 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530186#endif
Ingo Molnaref83a572007-10-15 17:00:08 +0200187 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
188 SPLIT_NS(cfs_rq->exec_clock));
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200189
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100190 raw_spin_lock_irqsave(&rq->lock, flags);
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200191 if (cfs_rq->rb_leftmost)
Rik van Rielac53db52011-02-01 09:51:03 -0500192 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200193 last = __pick_last_entity(cfs_rq);
194 if (last)
195 max_vruntime = last->vruntime;
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100196 min_vruntime = cfs_rq->min_vruntime;
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900197 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100198 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnaref83a572007-10-15 17:00:08 +0200199 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
200 SPLIT_NS(MIN_vruntime));
201 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
202 SPLIT_NS(min_vruntime));
203 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
204 SPLIT_NS(max_vruntime));
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200205 spread = max_vruntime - MIN_vruntime;
Ingo Molnaref83a572007-10-15 17:00:08 +0200206 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
207 SPLIT_NS(spread));
Ingo Molnar86d95602007-10-15 17:00:06 +0200208 spread0 = min_vruntime - rq0_min_vruntime;
Ingo Molnaref83a572007-10-15 17:00:08 +0200209 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
210 SPLIT_NS(spread0));
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100211 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
Peter Zijlstraddc97292007-10-15 17:00:10 +0200212 cfs_rq->nr_spread_over);
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200213 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800214 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200215#ifdef CONFIG_SMP
Alex Shi72a4cf22013-06-20 10:18:53 +0800216 SEQ_printf(m, " .%-30s: %ld\n", "runnable_load_avg",
Paul Turner2dac7542012-10-04 13:18:30 +0200217 cfs_rq->runnable_load_avg);
Alex Shi72a4cf22013-06-20 10:18:53 +0800218 SEQ_printf(m, " .%-30s: %ld\n", "blocked_load_avg",
Paul Turner9ee474f2012-10-04 13:18:30 +0200219 cfs_rq->blocked_load_avg);
Alex Shi333bb862013-06-28 19:10:35 +0800220#ifdef CONFIG_FAIR_GROUP_SCHED
Alex Shibf5b9862013-06-20 10:18:54 +0800221 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_contrib",
Paul Turnerc566e8e2012-10-04 13:18:30 +0200222 cfs_rq->tg_load_contrib);
Paul Turnerbb17f652012-10-04 13:18:31 +0200223 SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib",
224 cfs_rq->tg_runnable_contrib);
Alex Shi333bb862013-06-28 19:10:35 +0800225 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
226 atomic_long_read(&cfs_rq->tg->load_avg));
Paul Turnerbb17f652012-10-04 13:18:31 +0200227 SEQ_printf(m, " .%-30s: %d\n", "tg->runnable_avg",
228 atomic_read(&cfs_rq->tg->runnable_avg));
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200229#endif
Alex Shi333bb862013-06-28 19:10:35 +0800230#endif
Ben Segallf9f9ffc2013-10-16 11:16:32 -0700231#ifdef CONFIG_CFS_BANDWIDTH
232 SEQ_printf(m, " .%-30s: %d\n", "tg->cfs_bandwidth.timer_active",
233 cfs_rq->tg->cfs_bandwidth.timer_active);
234 SEQ_printf(m, " .%-30s: %d\n", "throttled",
235 cfs_rq->throttled);
236 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
237 cfs_rq->throttle_count);
238#endif
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800239
Alex Shi333bb862013-06-28 19:10:35 +0800240#ifdef CONFIG_FAIR_GROUP_SCHED
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530241 print_cfs_group_stats(m, cpu, cfs_rq->tg);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200242#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200243}
244
Peter Zijlstraada18de2008-06-19 14:22:24 +0200245void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
246{
Bharata B Raoefe25c22011-01-11 15:41:54 +0530247#ifdef CONFIG_RT_GROUP_SCHED
248 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
249#else
Peter Zijlstraada18de2008-06-19 14:22:24 +0200250 SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530251#endif
Peter Zijlstraada18de2008-06-19 14:22:24 +0200252
253#define P(x) \
254 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
255#define PN(x) \
256 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
257
258 P(rt_nr_running);
259 P(rt_throttled);
260 PN(rt_time);
261 PN(rt_runtime);
262
263#undef PN
264#undef P
265}
266
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100267extern __read_mostly int sched_clock_running;
268
Ingo Molnara48da482007-08-09 11:16:51 +0200269static void print_cpu(struct seq_file *m, int cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200270{
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900271 struct rq *rq = cpu_rq(cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530272 unsigned long flags;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200273
274#ifdef CONFIG_X86
275 {
276 unsigned int freq = cpu_khz ? : 1;
277
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800278 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200279 cpu, freq / 1000, (freq % 1000));
280 }
281#else
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800282 SEQ_printf(m, "cpu#%d\n", cpu);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200283#endif
284
Peter Zijlstra13e099d2012-05-14 14:34:00 +0200285#define P(x) \
286do { \
287 if (sizeof(rq->x) == 4) \
288 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
289 else \
290 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
291} while (0)
292
Ingo Molnaref83a572007-10-15 17:00:08 +0200293#define PN(x) \
294 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200295
296 P(nr_running);
297 SEQ_printf(m, " .%-30s: %lu\n", "load",
Dmitry Adamushko495eca42007-10-15 17:00:06 +0200298 rq->load.weight);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200299 P(nr_switches);
300 P(nr_load_updates);
301 P(nr_uninterruptible);
Ingo Molnaref83a572007-10-15 17:00:08 +0200302 PN(next_balance);
Peter Zijlstrafc840912013-09-09 13:01:41 +0200303 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
Ingo Molnaref83a572007-10-15 17:00:08 +0200304 PN(clock);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200305 P(cpu_load[0]);
306 P(cpu_load[1]);
307 P(cpu_load[2]);
308 P(cpu_load[3]);
309 P(cpu_load[4]);
310#undef P
Ingo Molnaref83a572007-10-15 17:00:08 +0200311#undef PN
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200312
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100313#ifdef CONFIG_SCHEDSTATS
314#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100315#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100316
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100317 P(yld_count);
318
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100319 P(sched_count);
320 P(sched_goidle);
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100321#ifdef CONFIG_SMP
322 P64(avg_idle);
323#endif
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100324
325 P(ttwu_count);
326 P(ttwu_local);
327
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100328#undef P
Yong Zhangfce20972011-01-14 15:57:39 +0800329#undef P64
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100330#endif
Bharata B Raoefe25c22011-01-11 15:41:54 +0530331 spin_lock_irqsave(&sched_debug_lock, flags);
Ingo Molnar5cef9ec2007-08-09 11:16:47 +0200332 print_cfs_stats(m, cpu);
Peter Zijlstraada18de2008-06-19 14:22:24 +0200333 print_rt_stats(m, cpu);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200334
Bharata B Raoefe25c22011-01-11 15:41:54 +0530335 rcu_read_lock();
Ingo Molnara48da482007-08-09 11:16:51 +0200336 print_rq(m, rq, cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530337 rcu_read_unlock();
338 spin_unlock_irqrestore(&sched_debug_lock, flags);
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800339 SEQ_printf(m, "\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200340}
341
Christian Ehrhardt1983a922009-11-30 12:16:47 +0100342static const char *sched_tunable_scaling_names[] = {
343 "none",
344 "logaritmic",
345 "linear"
346};
347
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800348static void sched_debug_header(struct seq_file *m)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200349{
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100350 u64 ktime, sched_clk, cpu_clk;
351 unsigned long flags;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200352
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100353 local_irq_save(flags);
354 ktime = ktime_to_ns(ktime_get());
355 sched_clk = sched_clock();
356 cpu_clk = local_clock();
357 local_irq_restore(flags);
358
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100359 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200360 init_utsname()->release,
361 (int)strcspn(init_utsname()->version, " "),
362 init_utsname()->version);
363
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100364#define P(x) \
365 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
366#define PN(x) \
367 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
368 PN(ktime);
369 PN(sched_clk);
370 PN(cpu_clk);
371 P(jiffies);
372#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
Peter Zijlstra35af99e2013-11-28 19:38:42 +0100373 P(sched_clock_stable());
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100374#endif
375#undef PN
376#undef P
377
378 SEQ_printf(m, "\n");
379 SEQ_printf(m, "sysctl_sched\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200380
Ingo Molnar1aa47312007-10-15 17:00:10 +0200381#define P(x) \
Ingo Molnard822cec2007-10-15 17:00:10 +0200382 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
Ingo Molnar1aa47312007-10-15 17:00:10 +0200383#define PN(x) \
Ingo Molnard822cec2007-10-15 17:00:10 +0200384 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
Ingo Molnar1aa47312007-10-15 17:00:10 +0200385 PN(sysctl_sched_latency);
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100386 PN(sysctl_sched_min_granularity);
Ingo Molnar1aa47312007-10-15 17:00:10 +0200387 PN(sysctl_sched_wakeup_granularity);
Josh Hunteebef742010-07-19 12:31:16 -0700388 P(sysctl_sched_child_runs_first);
Ingo Molnar1aa47312007-10-15 17:00:10 +0200389 P(sysctl_sched_features);
390#undef PN
391#undef P
392
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800393 SEQ_printf(m, " .%-40s: %d (%s)\n",
394 "sysctl_sched_tunable_scaling",
Christian Ehrhardt1983a922009-11-30 12:16:47 +0100395 sysctl_sched_tunable_scaling,
396 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200397 SEQ_printf(m, "\n");
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800398}
399
400static int sched_debug_show(struct seq_file *m, void *v)
401{
402 int cpu = (unsigned long)(v - 2);
403
404 if (cpu != -1)
405 print_cpu(m, cpu);
406 else
407 sched_debug_header(m);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200408
409 return 0;
410}
411
Peter Zijlstra029632f2011-10-25 10:00:11 +0200412void sysrq_sched_debug_show(void)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200413{
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800414 int cpu;
415
416 sched_debug_header(NULL);
417 for_each_online_cpu(cpu)
418 print_cpu(NULL, cpu);
419
420}
421
422/*
423 * This itererator needs some explanation.
424 * It returns 1 for the header position.
425 * This means 2 is cpu 0.
426 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
427 * to use cpumask_* to iterate over the cpus.
428 */
429static void *sched_debug_start(struct seq_file *file, loff_t *offset)
430{
431 unsigned long n = *offset;
432
433 if (n == 0)
434 return (void *) 1;
435
436 n--;
437
438 if (n > 0)
439 n = cpumask_next(n - 1, cpu_online_mask);
440 else
441 n = cpumask_first(cpu_online_mask);
442
443 *offset = n + 1;
444
445 if (n < nr_cpu_ids)
446 return (void *)(unsigned long)(n + 2);
447 return NULL;
448}
449
450static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
451{
452 (*offset)++;
453 return sched_debug_start(file, offset);
454}
455
456static void sched_debug_stop(struct seq_file *file, void *data)
457{
458}
459
460static const struct seq_operations sched_debug_sops = {
461 .start = sched_debug_start,
462 .next = sched_debug_next,
463 .stop = sched_debug_stop,
464 .show = sched_debug_show,
465};
466
467static int sched_debug_release(struct inode *inode, struct file *file)
468{
469 seq_release(inode, file);
470
471 return 0;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200472}
473
474static int sched_debug_open(struct inode *inode, struct file *filp)
475{
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800476 int ret = 0;
477
478 ret = seq_open(filp, &sched_debug_sops);
479
480 return ret;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200481}
482
Arjan van de Ven0dbee3a2007-10-15 17:00:19 +0200483static const struct file_operations sched_debug_fops = {
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200484 .open = sched_debug_open,
485 .read = seq_read,
486 .llseek = seq_lseek,
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800487 .release = sched_debug_release,
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200488};
489
490static int __init init_sched_debug_procfs(void)
491{
492 struct proc_dir_entry *pe;
493
Li Zefana9cf4dd2008-10-30 15:23:34 +0800494 pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200495 if (!pe)
496 return -ENOMEM;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200497 return 0;
498}
499
500__initcall(init_sched_debug_procfs);
501
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100502#define __P(F) \
503 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
504#define P(F) \
505 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
506#define __PN(F) \
507 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
508#define PN(F) \
509 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
510
511
512static void sched_show_numa(struct task_struct *p, struct seq_file *m)
513{
514#ifdef CONFIG_NUMA_BALANCING
515 struct mempolicy *pol;
516 int node, i;
517
518 if (p->mm)
519 P(mm->numa_scan_seq);
520
521 task_lock(p);
522 pol = p->mempolicy;
523 if (pol && !(pol->flags & MPOL_F_MORON))
524 pol = NULL;
525 mpol_get(pol);
526 task_unlock(p);
527
528 SEQ_printf(m, "numa_migrations, %ld\n", xchg(&p->numa_pages_migrated, 0));
529
530 for_each_online_node(node) {
531 for (i = 0; i < 2; i++) {
532 unsigned long nr_faults = -1;
533 int cpu_current, home_node;
534
535 if (p->numa_faults)
536 nr_faults = p->numa_faults[2*node + i];
537
538 cpu_current = !i ? (task_node(p) == node) :
539 (pol && node_isset(node, pol->v.nodes));
540
541 home_node = (p->numa_preferred_nid == node);
542
543 SEQ_printf(m, "numa_faults, %d, %d, %d, %d, %ld\n",
544 i, node, cpu_current, home_node, nr_faults);
545 }
546 }
547
548 mpol_put(pol);
549#endif
550}
551
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200552void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
553{
Ingo Molnarcc367732007-10-15 17:00:18 +0200554 unsigned long nr_switches;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200555
Peter Zijlstrafc840912013-09-09 13:01:41 +0200556 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
Oleg Nesterov5089a972010-05-26 14:43:22 -0700557 get_nr_threads(p));
Ingo Molnar2d92f222007-10-15 17:00:18 +0200558 SEQ_printf(m,
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530559 "---------------------------------------------------------"
560 "----------\n");
Ingo Molnarcc367732007-10-15 17:00:18 +0200561#define __P(F) \
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530562 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200563#define P(F) \
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530564 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
Ingo Molnarcc367732007-10-15 17:00:18 +0200565#define __PN(F) \
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530566 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
Ingo Molnaref83a572007-10-15 17:00:08 +0200567#define PN(F) \
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530568 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200569
Ingo Molnaref83a572007-10-15 17:00:08 +0200570 PN(se.exec_start);
571 PN(se.vruntime);
572 PN(se.sum_exec_runtime);
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200573
Ingo Molnarcc367732007-10-15 17:00:18 +0200574 nr_switches = p->nvcsw + p->nivcsw;
575
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200576#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -0300577 PN(se.statistics.wait_start);
578 PN(se.statistics.sleep_start);
579 PN(se.statistics.block_start);
580 PN(se.statistics.sleep_max);
581 PN(se.statistics.block_max);
582 PN(se.statistics.exec_max);
583 PN(se.statistics.slice_max);
584 PN(se.statistics.wait_max);
585 PN(se.statistics.wait_sum);
586 P(se.statistics.wait_count);
587 PN(se.statistics.iowait_sum);
588 P(se.statistics.iowait_count);
Ingo Molnarcc367732007-10-15 17:00:18 +0200589 P(se.nr_migrations);
Lucas De Marchi41acab82010-03-10 23:37:45 -0300590 P(se.statistics.nr_migrations_cold);
591 P(se.statistics.nr_failed_migrations_affine);
592 P(se.statistics.nr_failed_migrations_running);
593 P(se.statistics.nr_failed_migrations_hot);
594 P(se.statistics.nr_forced_migrations);
595 P(se.statistics.nr_wakeups);
596 P(se.statistics.nr_wakeups_sync);
597 P(se.statistics.nr_wakeups_migrate);
598 P(se.statistics.nr_wakeups_local);
599 P(se.statistics.nr_wakeups_remote);
600 P(se.statistics.nr_wakeups_affine);
601 P(se.statistics.nr_wakeups_affine_attempts);
602 P(se.statistics.nr_wakeups_passive);
603 P(se.statistics.nr_wakeups_idle);
Ingo Molnarcc367732007-10-15 17:00:18 +0200604
605 {
606 u64 avg_atom, avg_per_cpu;
607
608 avg_atom = p->se.sum_exec_runtime;
609 if (nr_switches)
610 do_div(avg_atom, nr_switches);
611 else
612 avg_atom = -1LL;
613
614 avg_per_cpu = p->se.sum_exec_runtime;
Ingo Molnarc1a897402007-11-28 15:52:56 +0100615 if (p->se.nr_migrations) {
Roman Zippel6f6d6a12008-05-01 04:34:28 -0700616 avg_per_cpu = div64_u64(avg_per_cpu,
617 p->se.nr_migrations);
Ingo Molnarc1a897402007-11-28 15:52:56 +0100618 } else {
Ingo Molnarcc367732007-10-15 17:00:18 +0200619 avg_per_cpu = -1LL;
Ingo Molnarc1a897402007-11-28 15:52:56 +0100620 }
Ingo Molnarcc367732007-10-15 17:00:18 +0200621
622 __PN(avg_atom);
623 __PN(avg_per_cpu);
624 }
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200625#endif
Ingo Molnarcc367732007-10-15 17:00:18 +0200626 __P(nr_switches);
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530627 SEQ_printf(m, "%-45s:%21Ld\n",
Ingo Molnarcc367732007-10-15 17:00:18 +0200628 "nr_voluntary_switches", (long long)p->nvcsw);
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530629 SEQ_printf(m, "%-45s:%21Ld\n",
Ingo Molnarcc367732007-10-15 17:00:18 +0200630 "nr_involuntary_switches", (long long)p->nivcsw);
631
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200632 P(se.load.weight);
Alex Shi333bb862013-06-28 19:10:35 +0800633#ifdef CONFIG_SMP
Kamalesh Babulal939fd732013-06-25 13:33:36 +0530634 P(se.avg.runnable_avg_sum);
635 P(se.avg.runnable_avg_period);
636 P(se.avg.load_avg_contrib);
637 P(se.avg.decay_count);
638#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200639 P(policy);
640 P(prio);
Ingo Molnaref83a572007-10-15 17:00:08 +0200641#undef PN
Ingo Molnarcc367732007-10-15 17:00:18 +0200642#undef __PN
643#undef P
644#undef __P
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200645
646 {
Ingo Molnar29d7b902008-11-16 08:07:15 +0100647 unsigned int this_cpu = raw_smp_processor_id();
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200648 u64 t0, t1;
649
Ingo Molnar29d7b902008-11-16 08:07:15 +0100650 t0 = cpu_clock(this_cpu);
651 t1 = cpu_clock(this_cpu);
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530652 SEQ_printf(m, "%-45s:%21Ld\n",
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200653 "clock-delta", (long long)(t1-t0));
654 }
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100655
656 sched_show_numa(p, m);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200657}
658
659void proc_sched_set_task(struct task_struct *p)
660{
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200661#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -0300662 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200663#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200664}