blob: eaa606071d51c44f71a0c2d265c78566739f2c1f [file] [log] [blame]
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
Steven Rostedt4fd29172008-01-25 21:08:06 +01006#ifdef CONFIG_SMP
Ingo Molnar84de4272008-01-25 21:08:15 +01007
Gregory Haskins637f5082008-01-25 21:08:18 +01008static inline int rt_overloaded(struct rq *rq)
Steven Rostedt4fd29172008-01-25 21:08:06 +01009{
Gregory Haskins637f5082008-01-25 21:08:18 +010010 return atomic_read(&rq->rd->rto_count);
Steven Rostedt4fd29172008-01-25 21:08:06 +010011}
Ingo Molnar84de4272008-01-25 21:08:15 +010012
Steven Rostedt4fd29172008-01-25 21:08:06 +010013static inline void rt_set_overload(struct rq *rq)
14{
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -040015 if (!rq->online)
16 return;
17
Gregory Haskins637f5082008-01-25 21:08:18 +010018 cpu_set(rq->cpu, rq->rd->rto_mask);
Steven Rostedt4fd29172008-01-25 21:08:06 +010019 /*
20 * Make sure the mask is visible before we set
21 * the overload count. That is checked to determine
22 * if we should look at the mask. It would be a shame
23 * if we looked at the mask, but the mask was not
24 * updated yet.
25 */
26 wmb();
Gregory Haskins637f5082008-01-25 21:08:18 +010027 atomic_inc(&rq->rd->rto_count);
Steven Rostedt4fd29172008-01-25 21:08:06 +010028}
Ingo Molnar84de4272008-01-25 21:08:15 +010029
Steven Rostedt4fd29172008-01-25 21:08:06 +010030static inline void rt_clear_overload(struct rq *rq)
31{
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -040032 if (!rq->online)
33 return;
34
Steven Rostedt4fd29172008-01-25 21:08:06 +010035 /* the order here really doesn't matter */
Gregory Haskins637f5082008-01-25 21:08:18 +010036 atomic_dec(&rq->rd->rto_count);
37 cpu_clear(rq->cpu, rq->rd->rto_mask);
Steven Rostedt4fd29172008-01-25 21:08:06 +010038}
Gregory Haskins73fe6aae2008-01-25 21:08:07 +010039
40static void update_rt_migration(struct rq *rq)
41{
Gregory Haskins637f5082008-01-25 21:08:18 +010042 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
Gregory Haskinscdc8eb92008-01-25 21:08:23 +010043 if (!rq->rt.overloaded) {
44 rt_set_overload(rq);
45 rq->rt.overloaded = 1;
46 }
47 } else if (rq->rt.overloaded) {
Gregory Haskins73fe6aae2008-01-25 21:08:07 +010048 rt_clear_overload(rq);
Gregory Haskins637f5082008-01-25 21:08:18 +010049 rq->rt.overloaded = 0;
50 }
Gregory Haskins73fe6aae2008-01-25 21:08:07 +010051}
Steven Rostedt4fd29172008-01-25 21:08:06 +010052#endif /* CONFIG_SMP */
53
Peter Zijlstra6f505b12008-01-25 21:08:30 +010054static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
Peter Zijlstrafa85ae22008-01-25 21:08:29 +010055{
Peter Zijlstra6f505b12008-01-25 21:08:30 +010056 return container_of(rt_se, struct task_struct, rt);
57}
58
59static inline int on_rt_rq(struct sched_rt_entity *rt_se)
60{
61 return !list_empty(&rt_se->run_list);
62}
63
Peter Zijlstra052f1dc2008-02-13 15:45:40 +010064#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +010065
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +010066static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +010067{
68 if (!rt_rq->tg)
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +010069 return RUNTIME_INF;
Peter Zijlstra6f505b12008-01-25 21:08:30 +010070
Peter Zijlstraac086bc2008-04-19 19:44:58 +020071 return rt_rq->rt_runtime;
72}
73
74static inline u64 sched_rt_period(struct rt_rq *rt_rq)
75{
76 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
Peter Zijlstra6f505b12008-01-25 21:08:30 +010077}
78
79#define for_each_leaf_rt_rq(rt_rq, rq) \
80 list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
81
82static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
83{
84 return rt_rq->rq;
85}
86
87static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
88{
89 return rt_se->rt_rq;
90}
91
92#define for_each_sched_rt_entity(rt_se) \
93 for (; rt_se; rt_se = rt_se->parent)
94
95static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
96{
97 return rt_se->my_q;
98}
99
100static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
101static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
102
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100103static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100104{
105 struct sched_rt_entity *rt_se = rt_rq->rt_se;
106
107 if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
Peter Zijlstra10203872008-01-25 21:08:32 +0100108 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
109
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100110 enqueue_rt_entity(rt_se);
Peter Zijlstra10203872008-01-25 21:08:32 +0100111 if (rt_rq->highest_prio < curr->prio)
112 resched_task(curr);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100113 }
114}
115
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100116static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100117{
118 struct sched_rt_entity *rt_se = rt_rq->rt_se;
119
120 if (rt_se && on_rt_rq(rt_se))
121 dequeue_rt_entity(rt_se);
122}
123
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100124static inline int rt_rq_throttled(struct rt_rq *rt_rq)
125{
126 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
127}
128
129static int rt_se_boosted(struct sched_rt_entity *rt_se)
130{
131 struct rt_rq *rt_rq = group_rt_rq(rt_se);
132 struct task_struct *p;
133
134 if (rt_rq)
135 return !!rt_rq->rt_nr_boosted;
136
137 p = rt_task_of(rt_se);
138 return p->prio != p->normal_prio;
139}
140
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200141#ifdef CONFIG_SMP
142static inline cpumask_t sched_rt_period_mask(void)
143{
144 return cpu_rq(smp_processor_id())->rd->span;
145}
146#else
147static inline cpumask_t sched_rt_period_mask(void)
148{
149 return cpu_online_map;
150}
151#endif
152
153static inline
154struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
155{
156 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
157}
158
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200159static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
160{
161 return &rt_rq->tg->rt_bandwidth;
162}
163
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100164#else
165
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100166static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100167{
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200168 return rt_rq->rt_runtime;
169}
170
171static inline u64 sched_rt_period(struct rt_rq *rt_rq)
172{
173 return ktime_to_ns(def_rt_bandwidth.rt_period);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100174}
175
176#define for_each_leaf_rt_rq(rt_rq, rq) \
177 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
178
179static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
180{
181 return container_of(rt_rq, struct rq, rt);
182}
183
184static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
185{
186 struct task_struct *p = rt_task_of(rt_se);
187 struct rq *rq = task_rq(p);
188
189 return &rq->rt;
190}
191
192#define for_each_sched_rt_entity(rt_se) \
193 for (; rt_se; rt_se = NULL)
194
195static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
196{
197 return NULL;
198}
199
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100200static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100201{
202}
203
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100204static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100205{
206}
207
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100208static inline int rt_rq_throttled(struct rt_rq *rt_rq)
209{
210 return rt_rq->rt_throttled;
211}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200212
213static inline cpumask_t sched_rt_period_mask(void)
214{
215 return cpu_online_map;
216}
217
218static inline
219struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
220{
221 return &cpu_rq(cpu)->rt;
222}
223
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200224static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
225{
226 return &def_rt_bandwidth;
227}
228
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100229#endif
230
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200231static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
232{
233 int i, idle = 1;
234 cpumask_t span;
235
236 if (rt_b->rt_runtime == RUNTIME_INF)
237 return 1;
238
239 span = sched_rt_period_mask();
240 for_each_cpu_mask(i, span) {
241 int enqueue = 0;
242 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
243 struct rq *rq = rq_of_rt_rq(rt_rq);
244
245 spin_lock(&rq->lock);
246 if (rt_rq->rt_time) {
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200247 u64 runtime;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200248
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200249 spin_lock(&rt_rq->rt_runtime_lock);
250 runtime = rt_rq->rt_runtime;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200251 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
252 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
253 rt_rq->rt_throttled = 0;
254 enqueue = 1;
255 }
256 if (rt_rq->rt_time || rt_rq->rt_nr_running)
257 idle = 0;
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200258 spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200259 }
260
261 if (enqueue)
262 sched_rt_rq_enqueue(rt_rq);
263 spin_unlock(&rq->lock);
264 }
265
266 return idle;
267}
268
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200269#ifdef CONFIG_SMP
270static int balance_runtime(struct rt_rq *rt_rq)
271{
272 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
273 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
274 int i, weight, more = 0;
275 u64 rt_period;
276
277 weight = cpus_weight(rd->span);
278
279 spin_lock(&rt_b->rt_runtime_lock);
280 rt_period = ktime_to_ns(rt_b->rt_period);
281 for_each_cpu_mask(i, rd->span) {
282 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
283 s64 diff;
284
285 if (iter == rt_rq)
286 continue;
287
288 spin_lock(&iter->rt_runtime_lock);
289 diff = iter->rt_runtime - iter->rt_time;
290 if (diff > 0) {
291 do_div(diff, weight);
292 if (rt_rq->rt_runtime + diff > rt_period)
293 diff = rt_period - rt_rq->rt_runtime;
294 iter->rt_runtime -= diff;
295 rt_rq->rt_runtime += diff;
296 more = 1;
297 if (rt_rq->rt_runtime == rt_period) {
298 spin_unlock(&iter->rt_runtime_lock);
299 break;
300 }
301 }
302 spin_unlock(&iter->rt_runtime_lock);
303 }
304 spin_unlock(&rt_b->rt_runtime_lock);
305
306 return more;
307}
308#endif
309
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100310static inline int rt_se_prio(struct sched_rt_entity *rt_se)
311{
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100312#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100313 struct rt_rq *rt_rq = group_rt_rq(rt_se);
314
315 if (rt_rq)
316 return rt_rq->highest_prio;
317#endif
318
319 return rt_task_of(rt_se)->prio;
320}
321
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100322static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100323{
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100324 u64 runtime = sched_rt_runtime(rt_rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100325
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100326 if (runtime == RUNTIME_INF)
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100327 return 0;
328
329 if (rt_rq->rt_throttled)
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100330 return rt_rq_throttled(rt_rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100331
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200332 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
333 return 0;
334
335#ifdef CONFIG_SMP
336 if (rt_rq->rt_time > runtime) {
337 int more;
338
339 spin_unlock(&rt_rq->rt_runtime_lock);
340 more = balance_runtime(rt_rq);
341 spin_lock(&rt_rq->rt_runtime_lock);
342
343 if (more)
344 runtime = sched_rt_runtime(rt_rq);
345 }
346#endif
347
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100348 if (rt_rq->rt_time > runtime) {
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100349 rt_rq->rt_throttled = 1;
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100350 if (rt_rq_throttled(rt_rq)) {
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100351 sched_rt_rq_dequeue(rt_rq);
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100352 return 1;
353 }
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100354 }
355
356 return 0;
357}
358
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200359/*
360 * Update the current task's runtime statistics. Skip current tasks that
361 * are not in our scheduling class.
362 */
Alexey Dobriyana9957442007-10-15 17:00:13 +0200363static void update_curr_rt(struct rq *rq)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200364{
365 struct task_struct *curr = rq->curr;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100366 struct sched_rt_entity *rt_se = &curr->rt;
367 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200368 u64 delta_exec;
369
370 if (!task_has_rt_policy(curr))
371 return;
372
Ingo Molnard2819182007-08-09 11:16:47 +0200373 delta_exec = rq->clock - curr->se.exec_start;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200374 if (unlikely((s64)delta_exec < 0))
375 delta_exec = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200376
377 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200378
379 curr->se.sum_exec_runtime += delta_exec;
Ingo Molnard2819182007-08-09 11:16:47 +0200380 curr->se.exec_start = rq->clock;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100381 cpuacct_charge(curr, delta_exec);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100382
Dhaval Giani354d60c2008-04-19 19:44:59 +0200383 for_each_sched_rt_entity(rt_se) {
384 rt_rq = rt_rq_of_se(rt_se);
385
386 spin_lock(&rt_rq->rt_runtime_lock);
387 rt_rq->rt_time += delta_exec;
388 if (sched_rt_runtime_exceeded(rt_rq))
389 resched_task(curr);
390 spin_unlock(&rt_rq->rt_runtime_lock);
391 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200392}
393
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100394static inline
395void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
Steven Rostedt63489e42008-01-25 21:08:03 +0100396{
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100397 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
398 rt_rq->rt_nr_running++;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100399#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Gregory Haskins6e0534f2008-05-12 21:21:01 +0200400 if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
401 struct rq *rq = rq_of_rt_rq(rt_rq);
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -0400402
Ingo Molnar1100ac92008-06-05 12:25:37 +0200403 rt_rq->highest_prio = rt_se_prio(rt_se);
404#ifdef CONFIG_SMP
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -0400405 if (rq->online)
406 cpupri_set(&rq->rd->cpupri, rq->cpu,
407 rt_se_prio(rt_se));
Ingo Molnar1100ac92008-06-05 12:25:37 +0200408#endif
Gregory Haskins6e0534f2008-05-12 21:21:01 +0200409 }
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100410#endif
Steven Rostedt764a9d62008-01-25 21:08:04 +0100411#ifdef CONFIG_SMP
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100412 if (rt_se->nr_cpus_allowed > 1) {
413 struct rq *rq = rq_of_rt_rq(rt_rq);
Ingo Molnar1100ac92008-06-05 12:25:37 +0200414
Gregory Haskins73fe6aae2008-01-25 21:08:07 +0100415 rq->rt.rt_nr_migratory++;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100416 }
Gregory Haskins73fe6aae2008-01-25 21:08:07 +0100417
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100418 update_rt_migration(rq_of_rt_rq(rt_rq));
419#endif
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100420#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100421 if (rt_se_boosted(rt_se))
422 rt_rq->rt_nr_boosted++;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200423
424 if (rt_rq->tg)
425 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
426#else
427 start_rt_bandwidth(&def_rt_bandwidth);
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100428#endif
Steven Rostedt63489e42008-01-25 21:08:03 +0100429}
430
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100431static inline
432void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
Steven Rostedt63489e42008-01-25 21:08:03 +0100433{
Gregory Haskins6e0534f2008-05-12 21:21:01 +0200434#ifdef CONFIG_SMP
435 int highest_prio = rt_rq->highest_prio;
436#endif
437
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100438 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
439 WARN_ON(!rt_rq->rt_nr_running);
440 rt_rq->rt_nr_running--;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100441#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100442 if (rt_rq->rt_nr_running) {
Steven Rostedt764a9d62008-01-25 21:08:04 +0100443 struct rt_prio_array *array;
444
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100445 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
446 if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
Steven Rostedt764a9d62008-01-25 21:08:04 +0100447 /* recalculate */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100448 array = &rt_rq->active;
449 rt_rq->highest_prio =
Steven Rostedt764a9d62008-01-25 21:08:04 +0100450 sched_find_first_bit(array->bitmap);
451 } /* otherwise leave rq->highest prio alone */
452 } else
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100453 rt_rq->highest_prio = MAX_RT_PRIO;
454#endif
455#ifdef CONFIG_SMP
456 if (rt_se->nr_cpus_allowed > 1) {
457 struct rq *rq = rq_of_rt_rq(rt_rq);
Gregory Haskins73fe6aae2008-01-25 21:08:07 +0100458 rq->rt.rt_nr_migratory--;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100459 }
Gregory Haskins73fe6aae2008-01-25 21:08:07 +0100460
Gregory Haskins6e0534f2008-05-12 21:21:01 +0200461 if (rt_rq->highest_prio != highest_prio) {
462 struct rq *rq = rq_of_rt_rq(rt_rq);
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -0400463
464 if (rq->online)
465 cpupri_set(&rq->rd->cpupri, rq->cpu,
466 rt_rq->highest_prio);
Gregory Haskins6e0534f2008-05-12 21:21:01 +0200467 }
468
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100469 update_rt_migration(rq_of_rt_rq(rt_rq));
Steven Rostedt764a9d62008-01-25 21:08:04 +0100470#endif /* CONFIG_SMP */
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100471#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100472 if (rt_se_boosted(rt_se))
473 rt_rq->rt_nr_boosted--;
474
475 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
476#endif
Steven Rostedt63489e42008-01-25 21:08:03 +0100477}
478
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100479static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200480{
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100481 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
482 struct rt_prio_array *array = &rt_rq->active;
483 struct rt_rq *group_rq = group_rt_rq(rt_se);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200484
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100485 if (group_rq && rt_rq_throttled(group_rq))
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100486 return;
Steven Rostedt63489e42008-01-25 21:08:03 +0100487
Gregory Haskins45c01e82008-05-12 21:20:41 +0200488 if (rt_se->nr_cpus_allowed == 1)
489 list_add_tail(&rt_se->run_list,
490 array->xqueue + rt_se_prio(rt_se));
491 else
492 list_add_tail(&rt_se->run_list,
493 array->squeue + rt_se_prio(rt_se));
494
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100495 __set_bit(rt_se_prio(rt_se), array->bitmap);
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100496
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100497 inc_rt_tasks(rt_se, rt_rq);
498}
499
500static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
501{
502 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
503 struct rt_prio_array *array = &rt_rq->active;
504
505 list_del_init(&rt_se->run_list);
Gregory Haskins45c01e82008-05-12 21:20:41 +0200506 if (list_empty(array->squeue + rt_se_prio(rt_se))
507 && list_empty(array->xqueue + rt_se_prio(rt_se)))
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100508 __clear_bit(rt_se_prio(rt_se), array->bitmap);
509
510 dec_rt_tasks(rt_se, rt_rq);
511}
512
513/*
514 * Because the prio of an upper entry depends on the lower
515 * entries, we must remove entries top - down.
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100516 */
517static void dequeue_rt_stack(struct task_struct *p)
518{
Peter Zijlstra58d6c2d2008-04-19 19:45:00 +0200519 struct sched_rt_entity *rt_se, *back = NULL;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100520
Peter Zijlstra58d6c2d2008-04-19 19:45:00 +0200521 rt_se = &p->rt;
522 for_each_sched_rt_entity(rt_se) {
523 rt_se->back = back;
524 back = rt_se;
525 }
526
527 for (rt_se = back; rt_se; rt_se = rt_se->back) {
528 if (on_rt_rq(rt_se))
529 dequeue_rt_entity(rt_se);
530 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200531}
532
533/*
534 * Adding/removing a task to/from a priority array:
535 */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100536static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
537{
538 struct sched_rt_entity *rt_se = &p->rt;
539
540 if (wakeup)
541 rt_se->timeout = 0;
542
543 dequeue_rt_stack(p);
544
545 /*
546 * enqueue everybody, bottom - up.
547 */
548 for_each_sched_rt_entity(rt_se)
549 enqueue_rt_entity(rt_se);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100550}
551
Ingo Molnarf02231e2007-08-09 11:16:48 +0200552static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200553{
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100554 struct sched_rt_entity *rt_se = &p->rt;
555 struct rt_rq *rt_rq;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200556
Ingo Molnarf1e14ef2007-08-09 11:16:48 +0200557 update_curr_rt(rq);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200558
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100559 dequeue_rt_stack(p);
Steven Rostedt63489e42008-01-25 21:08:03 +0100560
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100561 /*
562 * re-enqueue all non-empty rt_rq entities.
563 */
564 for_each_sched_rt_entity(rt_se) {
565 rt_rq = group_rt_rq(rt_se);
566 if (rt_rq && rt_rq->rt_nr_running)
567 enqueue_rt_entity(rt_se);
568 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200569}
570
571/*
572 * Put task to the end of the run list without the overhead of dequeue
573 * followed by enqueue.
Gregory Haskins45c01e82008-05-12 21:20:41 +0200574 *
575 * Note: We always enqueue the task to the shared-queue, regardless of its
576 * previous position w.r.t. exclusive vs shared. This is so that exclusive RR
577 * tasks fairly round-robin with all tasks on the runqueue, not just other
578 * exclusive tasks.
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200579 */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100580static
581void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200582{
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100583 struct rt_prio_array *array = &rt_rq->active;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200584
Gregory Haskins45c01e82008-05-12 21:20:41 +0200585 list_del_init(&rt_se->run_list);
586 list_add_tail(&rt_se->run_list, array->squeue + rt_se_prio(rt_se));
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200587}
588
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100589static void requeue_task_rt(struct rq *rq, struct task_struct *p)
590{
591 struct sched_rt_entity *rt_se = &p->rt;
592 struct rt_rq *rt_rq;
593
594 for_each_sched_rt_entity(rt_se) {
595 rt_rq = rt_rq_of_se(rt_se);
596 requeue_rt_entity(rt_rq, rt_se);
597 }
598}
599
600static void yield_task_rt(struct rq *rq)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200601{
Dmitry Adamushko4530d7a2007-10-15 17:00:08 +0200602 requeue_task_rt(rq, rq->curr);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200603}
604
Gregory Haskinse7693a32008-01-25 21:08:09 +0100605#ifdef CONFIG_SMP
Gregory Haskins318e0892008-01-25 21:08:10 +0100606static int find_lowest_rq(struct task_struct *task);
607
Gregory Haskinse7693a32008-01-25 21:08:09 +0100608static int select_task_rq_rt(struct task_struct *p, int sync)
609{
Gregory Haskins318e0892008-01-25 21:08:10 +0100610 struct rq *rq = task_rq(p);
611
612 /*
Steven Rostedte1f47d82008-01-25 21:08:12 +0100613 * If the current task is an RT task, then
614 * try to see if we can wake this RT task up on another
615 * runqueue. Otherwise simply start this RT task
616 * on its current runqueue.
617 *
618 * We want to avoid overloading runqueues. Even if
619 * the RT task is of higher priority than the current RT task.
620 * RT tasks behave differently than other tasks. If
621 * one gets preempted, we try to push it off to another queue.
622 * So trying to keep a preempting RT task on the same
623 * cache hot CPU will force the running RT task to
624 * a cold CPU. So we waste all the cache for the lower
625 * RT task in hopes of saving some of a RT task
626 * that is just being woken and probably will have
627 * cold cache anyway.
Gregory Haskins318e0892008-01-25 21:08:10 +0100628 */
Gregory Haskins17b32792008-01-25 21:08:13 +0100629 if (unlikely(rt_task(rq->curr)) &&
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100630 (p->rt.nr_cpus_allowed > 1)) {
Gregory Haskins318e0892008-01-25 21:08:10 +0100631 int cpu = find_lowest_rq(p);
632
633 return (cpu == -1) ? task_cpu(p) : cpu;
634 }
635
636 /*
637 * Otherwise, just let it ride on the affined RQ and the
638 * post-schedule router will push the preempted task away
639 */
Gregory Haskinse7693a32008-01-25 21:08:09 +0100640 return task_cpu(p);
641}
642#endif /* CONFIG_SMP */
643
Gregory Haskins45c01e82008-05-12 21:20:41 +0200644static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
645 struct rt_rq *rt_rq);
646
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200647/*
648 * Preempt the current task with a newly woken task if needed:
649 */
650static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
651{
Gregory Haskins45c01e82008-05-12 21:20:41 +0200652 if (p->prio < rq->curr->prio) {
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200653 resched_task(rq->curr);
Gregory Haskins45c01e82008-05-12 21:20:41 +0200654 return;
655 }
656
657#ifdef CONFIG_SMP
658 /*
659 * If:
660 *
661 * - the newly woken task is of equal priority to the current task
662 * - the newly woken task is non-migratable while current is migratable
663 * - current will be preempted on the next reschedule
664 *
665 * we should check to see if current can readily move to a different
666 * cpu. If so, we will reschedule to allow the push logic to try
667 * to move current somewhere else, making room for our non-migratable
668 * task.
669 */
670 if((p->prio == rq->curr->prio)
671 && p->rt.nr_cpus_allowed == 1
672 && rq->curr->rt.nr_cpus_allowed != 1
673 && pick_next_rt_entity(rq, &rq->rt) != &rq->curr->rt) {
674 cpumask_t mask;
675
676 if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
677 /*
678 * There appears to be other cpus that can accept
679 * current, so lets reschedule to try and push it away
680 */
681 resched_task(rq->curr);
682 }
683#endif
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200684}
685
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100686static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
687 struct rt_rq *rt_rq)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200688{
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100689 struct rt_prio_array *array = &rt_rq->active;
690 struct sched_rt_entity *next = NULL;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200691 struct list_head *queue;
692 int idx;
693
694 idx = sched_find_first_bit(array->bitmap);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100695 BUG_ON(idx >= MAX_RT_PRIO);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200696
Gregory Haskins45c01e82008-05-12 21:20:41 +0200697 queue = array->xqueue + idx;
698 if (!list_empty(queue))
699 next = list_entry(queue->next, struct sched_rt_entity,
700 run_list);
701 else {
702 queue = array->squeue + idx;
703 next = list_entry(queue->next, struct sched_rt_entity,
704 run_list);
705 }
Dmitry Adamushko326587b2008-01-25 21:08:34 +0100706
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200707 return next;
708}
709
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100710static struct task_struct *pick_next_task_rt(struct rq *rq)
711{
712 struct sched_rt_entity *rt_se;
713 struct task_struct *p;
714 struct rt_rq *rt_rq;
715
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100716 rt_rq = &rq->rt;
717
718 if (unlikely(!rt_rq->rt_nr_running))
719 return NULL;
720
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100721 if (rt_rq_throttled(rt_rq))
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100722 return NULL;
723
724 do {
725 rt_se = pick_next_rt_entity(rq, rt_rq);
Dmitry Adamushko326587b2008-01-25 21:08:34 +0100726 BUG_ON(!rt_se);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100727 rt_rq = group_rt_rq(rt_se);
728 } while (rt_rq);
729
730 p = rt_task_of(rt_se);
731 p->se.exec_start = rq->clock;
732 return p;
733}
734
Ingo Molnar31ee5292007-08-09 11:16:49 +0200735static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200736{
Ingo Molnarf1e14ef2007-08-09 11:16:48 +0200737 update_curr_rt(rq);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200738 p->se.exec_start = 0;
739}
740
Peter Williams681f3e62007-10-24 18:23:51 +0200741#ifdef CONFIG_SMP
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100742
Steven Rostedte8fa1362008-01-25 21:08:05 +0100743/* Only try algorithms three times */
744#define RT_MAX_TRIES 3
745
746static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
747static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
748
Steven Rostedtf65eda42008-01-25 21:08:07 +0100749static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
750{
751 if (!task_running(rq, p) &&
Gregory Haskins73fe6aae2008-01-25 21:08:07 +0100752 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100753 (p->rt.nr_cpus_allowed > 1))
Steven Rostedtf65eda42008-01-25 21:08:07 +0100754 return 1;
755 return 0;
756}
757
Steven Rostedte8fa1362008-01-25 21:08:05 +0100758/* Return the second highest RT task, NULL otherwise */
Ingo Molnar79064fb2008-01-25 21:08:14 +0100759static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
Steven Rostedte8fa1362008-01-25 21:08:05 +0100760{
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100761 struct task_struct *next = NULL;
762 struct sched_rt_entity *rt_se;
763 struct rt_prio_array *array;
764 struct rt_rq *rt_rq;
Steven Rostedte8fa1362008-01-25 21:08:05 +0100765 int idx;
766
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100767 for_each_leaf_rt_rq(rt_rq, rq) {
768 array = &rt_rq->active;
769 idx = sched_find_first_bit(array->bitmap);
770 next_idx:
771 if (idx >= MAX_RT_PRIO)
772 continue;
773 if (next && next->prio < idx)
774 continue;
Gregory Haskins45c01e82008-05-12 21:20:41 +0200775 list_for_each_entry(rt_se, array->squeue + idx, run_list) {
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100776 struct task_struct *p = rt_task_of(rt_se);
777 if (pick_rt_task(rq, p, cpu)) {
778 next = p;
779 break;
780 }
781 }
782 if (!next) {
783 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
784 goto next_idx;
785 }
Steven Rostedte8fa1362008-01-25 21:08:05 +0100786 }
787
Steven Rostedte8fa1362008-01-25 21:08:05 +0100788 return next;
789}
790
791static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
792
Gregory Haskins6e1254d2008-01-25 21:08:11 +0100793static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
794{
795 int first;
796
797 /* "this_cpu" is cheaper to preempt than a remote processor */
798 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
799 return this_cpu;
800
801 first = first_cpu(*mask);
802 if (first != NR_CPUS)
803 return first;
804
805 return -1;
806}
807
808static int find_lowest_rq(struct task_struct *task)
809{
810 struct sched_domain *sd;
811 cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
812 int this_cpu = smp_processor_id();
813 int cpu = task_cpu(task);
814
Gregory Haskins6e0534f2008-05-12 21:21:01 +0200815 if (task->rt.nr_cpus_allowed == 1)
816 return -1; /* No other targets possible */
817
818 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
Gregory Haskins06f90db2008-01-25 21:08:13 +0100819 return -1; /* No targets found */
820
821 /*
Gregory Haskins6e1254d2008-01-25 21:08:11 +0100822 * At this point we have built a mask of cpus representing the
823 * lowest priority tasks in the system. Now we want to elect
824 * the best one based on our affinity and topology.
825 *
826 * We prioritize the last cpu that the task executed on since
827 * it is most likely cache-hot in that location.
828 */
829 if (cpu_isset(cpu, *lowest_mask))
830 return cpu;
831
832 /*
833 * Otherwise, we consult the sched_domains span maps to figure
834 * out which cpu is logically closest to our hot cache data.
835 */
836 if (this_cpu == cpu)
837 this_cpu = -1; /* Skip this_cpu opt if the same */
838
839 for_each_domain(cpu, sd) {
840 if (sd->flags & SD_WAKE_AFFINE) {
841 cpumask_t domain_mask;
842 int best_cpu;
843
844 cpus_and(domain_mask, sd->span, *lowest_mask);
845
846 best_cpu = pick_optimal_cpu(this_cpu,
847 &domain_mask);
848 if (best_cpu != -1)
849 return best_cpu;
850 }
851 }
852
853 /*
854 * And finally, if there were no matches within the domains
855 * just give the caller *something* to work with from the compatible
856 * locations.
857 */
858 return pick_optimal_cpu(this_cpu, lowest_mask);
Gregory Haskins07b40322008-01-25 21:08:10 +0100859}
860
Steven Rostedte8fa1362008-01-25 21:08:05 +0100861/* Will lock the rq it finds */
Ingo Molnar4df64c02008-01-25 21:08:15 +0100862static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
Steven Rostedte8fa1362008-01-25 21:08:05 +0100863{
864 struct rq *lowest_rq = NULL;
Steven Rostedte8fa1362008-01-25 21:08:05 +0100865 int tries;
Ingo Molnar4df64c02008-01-25 21:08:15 +0100866 int cpu;
Steven Rostedte8fa1362008-01-25 21:08:05 +0100867
868 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
Gregory Haskins07b40322008-01-25 21:08:10 +0100869 cpu = find_lowest_rq(task);
Steven Rostedte8fa1362008-01-25 21:08:05 +0100870
Gregory Haskins2de0b462008-01-25 21:08:10 +0100871 if ((cpu == -1) || (cpu == rq->cpu))
Steven Rostedte8fa1362008-01-25 21:08:05 +0100872 break;
873
Gregory Haskins07b40322008-01-25 21:08:10 +0100874 lowest_rq = cpu_rq(cpu);
875
Steven Rostedte8fa1362008-01-25 21:08:05 +0100876 /* if the prio of this runqueue changed, try again */
Gregory Haskins07b40322008-01-25 21:08:10 +0100877 if (double_lock_balance(rq, lowest_rq)) {
Steven Rostedte8fa1362008-01-25 21:08:05 +0100878 /*
879 * We had to unlock the run queue. In
880 * the mean time, task could have
881 * migrated already or had its affinity changed.
882 * Also make sure that it wasn't scheduled on its rq.
883 */
Gregory Haskins07b40322008-01-25 21:08:10 +0100884 if (unlikely(task_rq(task) != rq ||
Ingo Molnar4df64c02008-01-25 21:08:15 +0100885 !cpu_isset(lowest_rq->cpu,
886 task->cpus_allowed) ||
Gregory Haskins07b40322008-01-25 21:08:10 +0100887 task_running(rq, task) ||
Steven Rostedte8fa1362008-01-25 21:08:05 +0100888 !task->se.on_rq)) {
Ingo Molnar4df64c02008-01-25 21:08:15 +0100889
Steven Rostedte8fa1362008-01-25 21:08:05 +0100890 spin_unlock(&lowest_rq->lock);
891 lowest_rq = NULL;
892 break;
893 }
894 }
895
896 /* If this rq is still suitable use it. */
897 if (lowest_rq->rt.highest_prio > task->prio)
898 break;
899
900 /* try again */
901 spin_unlock(&lowest_rq->lock);
902 lowest_rq = NULL;
903 }
904
905 return lowest_rq;
906}
907
908/*
909 * If the current CPU has more than one RT task, see if the non
910 * running task can migrate over to a CPU that is running a task
911 * of lesser priority.
912 */
Gregory Haskins697f0a42008-01-25 21:08:09 +0100913static int push_rt_task(struct rq *rq)
Steven Rostedte8fa1362008-01-25 21:08:05 +0100914{
915 struct task_struct *next_task;
916 struct rq *lowest_rq;
917 int ret = 0;
918 int paranoid = RT_MAX_TRIES;
919
Gregory Haskinsa22d7fc12008-01-25 21:08:12 +0100920 if (!rq->rt.overloaded)
921 return 0;
922
Gregory Haskins697f0a42008-01-25 21:08:09 +0100923 next_task = pick_next_highest_task_rt(rq, -1);
Steven Rostedte8fa1362008-01-25 21:08:05 +0100924 if (!next_task)
925 return 0;
926
927 retry:
Gregory Haskins697f0a42008-01-25 21:08:09 +0100928 if (unlikely(next_task == rq->curr)) {
Steven Rostedtf65eda42008-01-25 21:08:07 +0100929 WARN_ON(1);
Steven Rostedte8fa1362008-01-25 21:08:05 +0100930 return 0;
Steven Rostedtf65eda42008-01-25 21:08:07 +0100931 }
Steven Rostedte8fa1362008-01-25 21:08:05 +0100932
933 /*
934 * It's possible that the next_task slipped in of
935 * higher priority than current. If that's the case
936 * just reschedule current.
937 */
Gregory Haskins697f0a42008-01-25 21:08:09 +0100938 if (unlikely(next_task->prio < rq->curr->prio)) {
939 resched_task(rq->curr);
Steven Rostedte8fa1362008-01-25 21:08:05 +0100940 return 0;
941 }
942
Gregory Haskins697f0a42008-01-25 21:08:09 +0100943 /* We might release rq lock */
Steven Rostedte8fa1362008-01-25 21:08:05 +0100944 get_task_struct(next_task);
945
946 /* find_lock_lowest_rq locks the rq if found */
Gregory Haskins697f0a42008-01-25 21:08:09 +0100947 lowest_rq = find_lock_lowest_rq(next_task, rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +0100948 if (!lowest_rq) {
949 struct task_struct *task;
950 /*
Gregory Haskins697f0a42008-01-25 21:08:09 +0100951 * find lock_lowest_rq releases rq->lock
Steven Rostedte8fa1362008-01-25 21:08:05 +0100952 * so it is possible that next_task has changed.
953 * If it has, then try again.
954 */
Gregory Haskins697f0a42008-01-25 21:08:09 +0100955 task = pick_next_highest_task_rt(rq, -1);
Steven Rostedte8fa1362008-01-25 21:08:05 +0100956 if (unlikely(task != next_task) && task && paranoid--) {
957 put_task_struct(next_task);
958 next_task = task;
959 goto retry;
960 }
961 goto out;
962 }
963
Gregory Haskins697f0a42008-01-25 21:08:09 +0100964 deactivate_task(rq, next_task, 0);
Steven Rostedte8fa1362008-01-25 21:08:05 +0100965 set_task_cpu(next_task, lowest_rq->cpu);
966 activate_task(lowest_rq, next_task, 0);
967
968 resched_task(lowest_rq->curr);
969
970 spin_unlock(&lowest_rq->lock);
971
972 ret = 1;
973out:
974 put_task_struct(next_task);
975
976 return ret;
977}
978
979/*
980 * TODO: Currently we just use the second highest prio task on
981 * the queue, and stop when it can't migrate (or there's
982 * no more RT tasks). There may be a case where a lower
983 * priority RT task has a different affinity than the
984 * higher RT task. In this case the lower RT task could
985 * possibly be able to migrate where as the higher priority
986 * RT task could not. We currently ignore this issue.
987 * Enhancements are welcome!
988 */
989static void push_rt_tasks(struct rq *rq)
990{
991 /* push_rt_task will return true if it moved an RT */
992 while (push_rt_task(rq))
993 ;
994}
995
Steven Rostedtf65eda42008-01-25 21:08:07 +0100996static int pull_rt_task(struct rq *this_rq)
997{
Ingo Molnar80bf3172008-01-25 21:08:17 +0100998 int this_cpu = this_rq->cpu, ret = 0, cpu;
999 struct task_struct *p, *next;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001000 struct rq *src_rq;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001001
Gregory Haskins637f5082008-01-25 21:08:18 +01001002 if (likely(!rt_overloaded(this_rq)))
Steven Rostedtf65eda42008-01-25 21:08:07 +01001003 return 0;
1004
1005 next = pick_next_task_rt(this_rq);
1006
Gregory Haskins637f5082008-01-25 21:08:18 +01001007 for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
Steven Rostedtf65eda42008-01-25 21:08:07 +01001008 if (this_cpu == cpu)
1009 continue;
1010
1011 src_rq = cpu_rq(cpu);
Steven Rostedtf65eda42008-01-25 21:08:07 +01001012 /*
1013 * We can potentially drop this_rq's lock in
1014 * double_lock_balance, and another CPU could
1015 * steal our next task - hence we must cause
1016 * the caller to recalculate the next task
1017 * in that case:
1018 */
1019 if (double_lock_balance(this_rq, src_rq)) {
1020 struct task_struct *old_next = next;
Ingo Molnar80bf3172008-01-25 21:08:17 +01001021
Steven Rostedtf65eda42008-01-25 21:08:07 +01001022 next = pick_next_task_rt(this_rq);
1023 if (next != old_next)
1024 ret = 1;
1025 }
1026
1027 /*
1028 * Are there still pullable RT tasks?
1029 */
Mike Galbraith614ee1f2008-01-25 21:08:30 +01001030 if (src_rq->rt.rt_nr_running <= 1)
1031 goto skip;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001032
Steven Rostedtf65eda42008-01-25 21:08:07 +01001033 p = pick_next_highest_task_rt(src_rq, this_cpu);
1034
1035 /*
1036 * Do we have an RT task that preempts
1037 * the to-be-scheduled task?
1038 */
1039 if (p && (!next || (p->prio < next->prio))) {
1040 WARN_ON(p == src_rq->curr);
1041 WARN_ON(!p->se.on_rq);
1042
1043 /*
1044 * There's a chance that p is higher in priority
1045 * than what's currently running on its cpu.
1046 * This is just that p is wakeing up and hasn't
1047 * had a chance to schedule. We only pull
1048 * p if it is lower in priority than the
1049 * current task on the run queue or
1050 * this_rq next task is lower in prio than
1051 * the current task on that rq.
1052 */
1053 if (p->prio < src_rq->curr->prio ||
1054 (next && next->prio < src_rq->curr->prio))
Mike Galbraith614ee1f2008-01-25 21:08:30 +01001055 goto skip;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001056
1057 ret = 1;
1058
1059 deactivate_task(src_rq, p, 0);
1060 set_task_cpu(p, this_cpu);
1061 activate_task(this_rq, p, 0);
1062 /*
1063 * We continue with the search, just in
1064 * case there's an even higher prio task
1065 * in another runqueue. (low likelyhood
1066 * but possible)
Ingo Molnar80bf3172008-01-25 21:08:17 +01001067 *
Steven Rostedtf65eda42008-01-25 21:08:07 +01001068 * Update next so that we won't pick a task
1069 * on another cpu with a priority lower (or equal)
1070 * than the one we just picked.
1071 */
1072 next = p;
1073
1074 }
Mike Galbraith614ee1f2008-01-25 21:08:30 +01001075 skip:
Steven Rostedtf65eda42008-01-25 21:08:07 +01001076 spin_unlock(&src_rq->lock);
1077 }
1078
1079 return ret;
1080}
1081
Steven Rostedt9a897c52008-01-25 21:08:22 +01001082static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
Steven Rostedtf65eda42008-01-25 21:08:07 +01001083{
1084 /* Try to pull RT tasks here if we lower this rq's prio */
Ingo Molnar7f51f292008-01-25 21:08:17 +01001085 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
Steven Rostedtf65eda42008-01-25 21:08:07 +01001086 pull_rt_task(rq);
1087}
1088
Steven Rostedt9a897c52008-01-25 21:08:22 +01001089static void post_schedule_rt(struct rq *rq)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001090{
1091 /*
1092 * If we have more than one rt_task queued, then
1093 * see if we can push the other rt_tasks off to other CPUS.
1094 * Note we may release the rq lock, and since
1095 * the lock was owned by prev, we need to release it
1096 * first via finish_lock_switch and then reaquire it here.
1097 */
Gregory Haskinsa22d7fc12008-01-25 21:08:12 +01001098 if (unlikely(rq->rt.overloaded)) {
Steven Rostedte8fa1362008-01-25 21:08:05 +01001099 spin_lock_irq(&rq->lock);
1100 push_rt_tasks(rq);
1101 spin_unlock_irq(&rq->lock);
1102 }
1103}
1104
Gregory Haskins8ae121a2008-04-23 07:13:29 -04001105/*
1106 * If we are not running and we are not going to reschedule soon, we should
1107 * try to push tasks away now
1108 */
Steven Rostedt9a897c52008-01-25 21:08:22 +01001109static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
Steven Rostedt4642daf2008-01-25 21:08:07 +01001110{
Steven Rostedt9a897c52008-01-25 21:08:22 +01001111 if (!task_running(rq, p) &&
Gregory Haskins8ae121a2008-04-23 07:13:29 -04001112 !test_tsk_need_resched(rq->curr) &&
Gregory Haskinsa22d7fc12008-01-25 21:08:12 +01001113 rq->rt.overloaded)
Steven Rostedt4642daf2008-01-25 21:08:07 +01001114 push_rt_tasks(rq);
1115}
1116
Peter Williams43010652007-08-09 11:16:46 +02001117static unsigned long
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001118load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
Peter Williamse1d14842007-10-24 18:23:51 +02001119 unsigned long max_load_move,
1120 struct sched_domain *sd, enum cpu_idle_type idle,
1121 int *all_pinned, int *this_best_prio)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001122{
Steven Rostedtc7a1e462008-01-25 21:08:07 +01001123 /* don't touch RT tasks */
1124 return 0;
Peter Williamse1d14842007-10-24 18:23:51 +02001125}
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001126
Peter Williamse1d14842007-10-24 18:23:51 +02001127static int
1128move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1129 struct sched_domain *sd, enum cpu_idle_type idle)
1130{
Steven Rostedtc7a1e462008-01-25 21:08:07 +01001131 /* don't touch RT tasks */
1132 return 0;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001133}
Ingo Molnardeeeccd2008-01-25 21:08:15 +01001134
Mike Traviscd8ba7c2008-03-26 14:23:49 -07001135static void set_cpus_allowed_rt(struct task_struct *p,
1136 const cpumask_t *new_mask)
Gregory Haskins73fe6aae2008-01-25 21:08:07 +01001137{
1138 int weight = cpus_weight(*new_mask);
1139
1140 BUG_ON(!rt_task(p));
1141
1142 /*
1143 * Update the migration status of the RQ if we have an RT task
1144 * which is running AND changing its weight value.
1145 */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001146 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
Gregory Haskins73fe6aae2008-01-25 21:08:07 +01001147 struct rq *rq = task_rq(p);
1148
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001149 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
Gregory Haskins73fe6aae2008-01-25 21:08:07 +01001150 rq->rt.rt_nr_migratory++;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001151 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
Gregory Haskins73fe6aae2008-01-25 21:08:07 +01001152 BUG_ON(!rq->rt.rt_nr_migratory);
1153 rq->rt.rt_nr_migratory--;
1154 }
1155
1156 update_rt_migration(rq);
Gregory Haskins45c01e82008-05-12 21:20:41 +02001157
1158 if (unlikely(weight == 1 || p->rt.nr_cpus_allowed == 1))
1159 /*
1160 * If either the new or old weight is a "1", we need
1161 * to requeue to properly move between shared and
1162 * exclusive queues.
1163 */
1164 requeue_task_rt(rq, p);
Gregory Haskins73fe6aae2008-01-25 21:08:07 +01001165 }
1166
1167 p->cpus_allowed = *new_mask;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001168 p->rt.nr_cpus_allowed = weight;
Gregory Haskins73fe6aae2008-01-25 21:08:07 +01001169}
Ingo Molnardeeeccd2008-01-25 21:08:15 +01001170
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001171/* Assumes rq->lock is held */
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -04001172static void rq_online_rt(struct rq *rq)
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001173{
1174 if (rq->rt.overloaded)
1175 rt_set_overload(rq);
Gregory Haskins6e0534f2008-05-12 21:21:01 +02001176
1177 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001178}
1179
1180/* Assumes rq->lock is held */
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -04001181static void rq_offline_rt(struct rq *rq)
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001182{
1183 if (rq->rt.overloaded)
1184 rt_clear_overload(rq);
Gregory Haskins6e0534f2008-05-12 21:21:01 +02001185
1186 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001187}
Steven Rostedtcb469842008-01-25 21:08:22 +01001188
1189/*
1190 * When switch from the rt queue, we bring ourselves to a position
1191 * that we might want to pull RT tasks from other runqueues.
1192 */
1193static void switched_from_rt(struct rq *rq, struct task_struct *p,
1194 int running)
1195{
1196 /*
1197 * If there are other RT tasks then we will reschedule
1198 * and the scheduling of the other RT tasks will handle
1199 * the balancing. But if we are the last RT task
1200 * we may need to handle the pulling of RT tasks
1201 * now.
1202 */
1203 if (!rq->rt.rt_nr_running)
1204 pull_rt_task(rq);
1205}
Steven Rostedte8fa1362008-01-25 21:08:05 +01001206#endif /* CONFIG_SMP */
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001207
Steven Rostedtcb469842008-01-25 21:08:22 +01001208/*
1209 * When switching a task to RT, we may overload the runqueue
1210 * with RT tasks. In this case we try to push them off to
1211 * other runqueues.
1212 */
1213static void switched_to_rt(struct rq *rq, struct task_struct *p,
1214 int running)
1215{
1216 int check_resched = 1;
1217
1218 /*
1219 * If we are already running, then there's nothing
1220 * that needs to be done. But if we are not running
1221 * we may need to preempt the current running task.
1222 * If that current running task is also an RT task
1223 * then see if we can move to another run queue.
1224 */
1225 if (!running) {
1226#ifdef CONFIG_SMP
1227 if (rq->rt.overloaded && push_rt_task(rq) &&
1228 /* Don't resched if we changed runqueues */
1229 rq != task_rq(p))
1230 check_resched = 0;
1231#endif /* CONFIG_SMP */
1232 if (check_resched && p->prio < rq->curr->prio)
1233 resched_task(rq->curr);
1234 }
1235}
1236
1237/*
1238 * Priority of the task has changed. This may cause
1239 * us to initiate a push or pull.
1240 */
1241static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1242 int oldprio, int running)
1243{
1244 if (running) {
1245#ifdef CONFIG_SMP
1246 /*
1247 * If our priority decreases while running, we
1248 * may need to pull tasks to this runqueue.
1249 */
1250 if (oldprio < p->prio)
1251 pull_rt_task(rq);
1252 /*
1253 * If there's a higher priority task waiting to run
Steven Rostedt6fa46fa2008-03-05 10:00:12 -05001254 * then reschedule. Note, the above pull_rt_task
1255 * can release the rq lock and p could migrate.
1256 * Only reschedule if p is still on the same runqueue.
Steven Rostedtcb469842008-01-25 21:08:22 +01001257 */
Steven Rostedt6fa46fa2008-03-05 10:00:12 -05001258 if (p->prio > rq->rt.highest_prio && rq->curr == p)
Steven Rostedtcb469842008-01-25 21:08:22 +01001259 resched_task(p);
1260#else
1261 /* For UP simply resched on drop of prio */
1262 if (oldprio < p->prio)
1263 resched_task(p);
1264#endif /* CONFIG_SMP */
1265 } else {
1266 /*
1267 * This task is not running, but if it is
1268 * greater than the current running task
1269 * then reschedule.
1270 */
1271 if (p->prio < rq->curr->prio)
1272 resched_task(rq->curr);
1273 }
1274}
1275
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001276static void watchdog(struct rq *rq, struct task_struct *p)
1277{
1278 unsigned long soft, hard;
1279
1280 if (!p->signal)
1281 return;
1282
1283 soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1284 hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1285
1286 if (soft != RLIM_INFINITY) {
1287 unsigned long next;
1288
1289 p->rt.timeout++;
1290 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
Peter Zijlstra5a52dd52008-01-25 21:08:32 +01001291 if (p->rt.timeout > next)
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001292 p->it_sched_expires = p->se.sum_exec_runtime;
1293 }
1294}
Steven Rostedtcb469842008-01-25 21:08:22 +01001295
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001296static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001297{
Peter Zijlstra67e2be02007-12-20 15:01:17 +01001298 update_curr_rt(rq);
1299
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001300 watchdog(rq, p);
1301
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001302 /*
1303 * RR tasks need a special form of timeslice management.
1304 * FIFO tasks have no timeslices.
1305 */
1306 if (p->policy != SCHED_RR)
1307 return;
1308
Peter Zijlstrafa717062008-01-25 21:08:27 +01001309 if (--p->rt.time_slice)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001310 return;
1311
Peter Zijlstrafa717062008-01-25 21:08:27 +01001312 p->rt.time_slice = DEF_TIMESLICE;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001313
Dmitry Adamushko98fbc792007-08-24 20:39:10 +02001314 /*
1315 * Requeue to the end of queue if we are not the only element
1316 * on the queue:
1317 */
Peter Zijlstrafa717062008-01-25 21:08:27 +01001318 if (p->rt.run_list.prev != p->rt.run_list.next) {
Dmitry Adamushko98fbc792007-08-24 20:39:10 +02001319 requeue_task_rt(rq, p);
1320 set_tsk_need_resched(p);
1321 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001322}
1323
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001324static void set_curr_task_rt(struct rq *rq)
1325{
1326 struct task_struct *p = rq->curr;
1327
1328 p->se.exec_start = rq->clock;
1329}
1330
Harvey Harrison2abdad02008-04-25 10:53:13 -07001331static const struct sched_class rt_sched_class = {
Ingo Molnar5522d5d2007-10-15 17:00:12 +02001332 .next = &fair_sched_class,
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001333 .enqueue_task = enqueue_task_rt,
1334 .dequeue_task = dequeue_task_rt,
1335 .yield_task = yield_task_rt,
Gregory Haskinse7693a32008-01-25 21:08:09 +01001336#ifdef CONFIG_SMP
1337 .select_task_rq = select_task_rq_rt,
1338#endif /* CONFIG_SMP */
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001339
1340 .check_preempt_curr = check_preempt_curr_rt,
1341
1342 .pick_next_task = pick_next_task_rt,
1343 .put_prev_task = put_prev_task_rt,
1344
Peter Williams681f3e62007-10-24 18:23:51 +02001345#ifdef CONFIG_SMP
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001346 .load_balance = load_balance_rt,
Peter Williamse1d14842007-10-24 18:23:51 +02001347 .move_one_task = move_one_task_rt,
Gregory Haskins73fe6aae2008-01-25 21:08:07 +01001348 .set_cpus_allowed = set_cpus_allowed_rt,
Gregory Haskins1f11eb6a2008-06-04 15:04:05 -04001349 .rq_online = rq_online_rt,
1350 .rq_offline = rq_offline_rt,
Steven Rostedt9a897c52008-01-25 21:08:22 +01001351 .pre_schedule = pre_schedule_rt,
1352 .post_schedule = post_schedule_rt,
1353 .task_wake_up = task_wake_up_rt,
Steven Rostedtcb469842008-01-25 21:08:22 +01001354 .switched_from = switched_from_rt,
Peter Williams681f3e62007-10-24 18:23:51 +02001355#endif
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001356
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001357 .set_curr_task = set_curr_task_rt,
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001358 .task_tick = task_tick_rt,
Steven Rostedtcb469842008-01-25 21:08:22 +01001359
1360 .prio_changed = prio_changed_rt,
1361 .switched_to = switched_to_rt,
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001362};