2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
7 * Update the current task's runtime statistics. Skip current tasks that
8 * are not in our scheduling class.
10 static void update_curr_rt(struct rq
*rq
)
12 struct task_struct
*curr
= rq
->curr
;
15 if (!task_has_rt_policy(curr
))
18 delta_exec
= rq
->clock
- curr
->se
.exec_start
;
19 if (unlikely((s64
)delta_exec
< 0))
22 schedstat_set(curr
->se
.exec_max
, max(curr
->se
.exec_max
, delta_exec
));
24 curr
->se
.sum_exec_runtime
+= delta_exec
;
25 curr
->se
.exec_start
= rq
->clock
;
28 static void enqueue_task_rt(struct rq
*rq
, struct task_struct
*p
, int wakeup
)
30 struct rt_prio_array
*array
= &rq
->rt
.active
;
32 list_add_tail(&p
->run_list
, array
->queue
+ p
->prio
);
33 __set_bit(p
->prio
, array
->bitmap
);
37 * Adding/removing a task to/from a priority array:
39 static void dequeue_task_rt(struct rq
*rq
, struct task_struct
*p
, int sleep
)
41 struct rt_prio_array
*array
= &rq
->rt
.active
;
45 list_del(&p
->run_list
);
46 if (list_empty(array
->queue
+ p
->prio
))
47 __clear_bit(p
->prio
, array
->bitmap
);
51 * Put task to the end of the run list without the overhead of dequeue
52 * followed by enqueue.
54 static void requeue_task_rt(struct rq
*rq
, struct task_struct
*p
)
56 struct rt_prio_array
*array
= &rq
->rt
.active
;
58 list_move_tail(&p
->run_list
, array
->queue
+ p
->prio
);
62 yield_task_rt(struct rq
*rq
)
64 requeue_task_rt(rq
, rq
->curr
);
68 * Preempt the current task with a newly woken task if needed:
70 static void check_preempt_curr_rt(struct rq
*rq
, struct task_struct
*p
)
72 if (p
->prio
< rq
->curr
->prio
)
73 resched_task(rq
->curr
);
76 static struct task_struct
*pick_next_task_rt(struct rq
*rq
)
78 struct rt_prio_array
*array
= &rq
->rt
.active
;
79 struct task_struct
*next
;
80 struct list_head
*queue
;
83 idx
= sched_find_first_bit(array
->bitmap
);
84 if (idx
>= MAX_RT_PRIO
)
87 queue
= array
->queue
+ idx
;
88 next
= list_entry(queue
->next
, struct task_struct
, run_list
);
90 next
->se
.exec_start
= rq
->clock
;
95 static void put_prev_task_rt(struct rq
*rq
, struct task_struct
*p
)
103 * Load-balancing iterator. Note: while the runqueue stays locked
104 * during the whole iteration, the current task might be
105 * dequeued so the iterator has to be dequeue-safe. Here we
106 * achieve that by always pre-iterating before returning
109 static struct task_struct
*load_balance_start_rt(void *arg
)
112 struct rt_prio_array
*array
= &rq
->rt
.active
;
113 struct list_head
*head
, *curr
;
114 struct task_struct
*p
;
117 idx
= sched_find_first_bit(array
->bitmap
);
118 if (idx
>= MAX_RT_PRIO
)
121 head
= array
->queue
+ idx
;
124 p
= list_entry(curr
, struct task_struct
, run_list
);
128 rq
->rt
.rt_load_balance_idx
= idx
;
129 rq
->rt
.rt_load_balance_head
= head
;
130 rq
->rt
.rt_load_balance_curr
= curr
;
135 static struct task_struct
*load_balance_next_rt(void *arg
)
138 struct rt_prio_array
*array
= &rq
->rt
.active
;
139 struct list_head
*head
, *curr
;
140 struct task_struct
*p
;
143 idx
= rq
->rt
.rt_load_balance_idx
;
144 head
= rq
->rt
.rt_load_balance_head
;
145 curr
= rq
->rt
.rt_load_balance_curr
;
148 * If we arrived back to the head again then
149 * iterate to the next queue (if any):
151 if (unlikely(head
== curr
)) {
152 int next_idx
= find_next_bit(array
->bitmap
, MAX_RT_PRIO
, idx
+1);
154 if (next_idx
>= MAX_RT_PRIO
)
158 head
= array
->queue
+ idx
;
161 rq
->rt
.rt_load_balance_idx
= idx
;
162 rq
->rt
.rt_load_balance_head
= head
;
165 p
= list_entry(curr
, struct task_struct
, run_list
);
169 rq
->rt
.rt_load_balance_curr
= curr
;
175 load_balance_rt(struct rq
*this_rq
, int this_cpu
, struct rq
*busiest
,
176 unsigned long max_load_move
,
177 struct sched_domain
*sd
, enum cpu_idle_type idle
,
178 int *all_pinned
, int *this_best_prio
)
180 struct rq_iterator rt_rq_iterator
;
182 rt_rq_iterator
.start
= load_balance_start_rt
;
183 rt_rq_iterator
.next
= load_balance_next_rt
;
184 /* pass 'busiest' rq argument into
185 * load_balance_[start|next]_rt iterators
187 rt_rq_iterator
.arg
= busiest
;
189 return balance_tasks(this_rq
, this_cpu
, busiest
, max_load_move
, sd
,
190 idle
, all_pinned
, this_best_prio
, &rt_rq_iterator
);
194 move_one_task_rt(struct rq
*this_rq
, int this_cpu
, struct rq
*busiest
,
195 struct sched_domain
*sd
, enum cpu_idle_type idle
)
197 struct rq_iterator rt_rq_iterator
;
199 rt_rq_iterator
.start
= load_balance_start_rt
;
200 rt_rq_iterator
.next
= load_balance_next_rt
;
201 rt_rq_iterator
.arg
= busiest
;
203 return iter_move_one_task(this_rq
, this_cpu
, busiest
, sd
, idle
,
208 static void task_tick_rt(struct rq
*rq
, struct task_struct
*p
)
211 * RR tasks need a special form of timeslice management.
212 * FIFO tasks have no timeslices.
214 if (p
->policy
!= SCHED_RR
)
220 p
->time_slice
= DEF_TIMESLICE
;
223 * Requeue to the end of queue if we are not the only element
226 if (p
->run_list
.prev
!= p
->run_list
.next
) {
227 requeue_task_rt(rq
, p
);
228 set_tsk_need_resched(p
);
232 static void set_curr_task_rt(struct rq
*rq
)
234 struct task_struct
*p
= rq
->curr
;
236 p
->se
.exec_start
= rq
->clock
;
239 const struct sched_class rt_sched_class
= {
240 .next
= &fair_sched_class
,
241 .enqueue_task
= enqueue_task_rt
,
242 .dequeue_task
= dequeue_task_rt
,
243 .yield_task
= yield_task_rt
,
245 .check_preempt_curr
= check_preempt_curr_rt
,
247 .pick_next_task
= pick_next_task_rt
,
248 .put_prev_task
= put_prev_task_rt
,
251 .load_balance
= load_balance_rt
,
252 .move_one_task
= move_one_task_rt
,
255 .set_curr_task
= set_curr_task_rt
,
256 .task_tick
= task_tick_rt
,