Added new file for cbs functions
[cbs-scheduler.git] / kernel / sched_cbs.c
blobdffb7ff9834393a6765598c837d93dde72719133
1 static inline struct task_struct *cbs_task_of(struct sched_cbs_entity *se)
3 return container_of(se, struct task_struct, cbs_se);
6 static inline struct rq *cbs_rq_of(struct cbs_rq *cbs_rq)
8 return container_of(cbs_rq, struct rq, cbs);
11 #define for_each_cbs_sched_entity(se) \
12 for (; se; se = NULL)
14 static inline struct cbs_rq *task_cbs_rq(struct task_struct *p)
16 return &task_rq(p)->cbs;
20 static inline struct cbs_rq *cbs_cbs_rq_of(struct sched_cbs_entity *se)
22 struct task_struct *p = cbs_task_of(se);
23 struct rq *rq = task_rq(p);
25 return &rq->cbs;
28 /* runqueue "owned" by this group */
29 static inline struct cbs_rq *group_cbs_rq(struct sched_cbs_entity *grp)
31 return NULL;
34 static inline int
35 is_same_cbs_group(struct sched_cbs_entity *se, struct sched_cbs_entity *pse)
37 return 1;
40 static inline struct sched_cbs_entity *parent_cbs_entity(struct sched_cbs_entity *se)
42 return NULL;
47 /**************************************************************
48 * Scheduling class tree data structure manipulation methods:
51 static inline u64 max_dl(u64 min_dl, u64 dl)
53 s64 delta = (s64)(dl - min_dl);
54 if (delta > 0)
55 min_dl = dl;
57 return min_dl;
60 static inline u64 min_dl(u64 min_dl, u64 dl)
62 s64 delta = (s64)(dl - min_dl);
63 if (delta < 0)
64 min_dl = dl;
66 return min_dl;
69 static inline void deadline_postpone(struct sched_cbs_entity *cbs_se)
71 while (cbs_se->budget < 0) {
72 cbs_se->deadline += cbs_se->period;
73 cbs_se->budget += cbs_se->max_budget;
77 static inline s64 entity_deadline(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se)
79 return se->deadline - cbs_rq->min_deadline;
83 * Enqueue an entity into the rb-tree:
85 static void __enqueue_cbs_entity(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se)
87 struct rb_node **link = &cbs_rq->tasks_timeline.rb_node;
88 struct rb_node *parent = NULL;
89 struct sched_cbs_entity *entry;
90 s64 key = entity_deadline(cbs_rq, se);
91 int leftmost = 1;
94 * Find the right place in the rbtree:
96 while (*link) {
97 parent = *link;
98 entry = rb_entry(parent, struct sched_cbs_entity, run_node);
100 * We dont care about collisions. Nodes with
101 * the same key stay together.
103 if (key < entity_deadline(cbs_rq, entry)) {
104 link = &parent->rb_left;
105 } else {
106 link = &parent->rb_right;
107 leftmost = 0;
112 * Maintain a cache of leftmost tree entries (it is frequently
113 * used):
115 if (leftmost) {
116 cbs_rq->rb_leftmost = &se->run_node;
118 * maintain cbs_rq->min_deadline to be a monotonic increasing
119 * value tracking the leftmost deadline in the tree.
121 cbs_rq->min_deadline =
122 max_dl(cbs_rq->min_deadline, se->deadline);
125 rb_link_node(&se->run_node, parent, link);
126 rb_insert_color(&se->run_node, &cbs_rq->tasks_timeline);
129 static void __dequeue_cbs_entity(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se)
131 if (cbs_rq->rb_leftmost == &se->run_node) {
132 struct rb_node *next_node;
133 struct sched_cbs_entity *next;
135 next_node = rb_next(&se->run_node);
136 cbs_rq->rb_leftmost = next_node;
138 if (next_node) {
139 next = rb_entry(next_node,
140 struct sched_cbs_entity, run_node);
141 cbs_rq->min_deadline =
142 max_dl(cbs_rq->min_deadline,
143 next->deadline);
147 rb_erase(&se->run_node, &cbs_rq->tasks_timeline);
150 static inline struct rb_node *earliest_deadline(struct cbs_rq *cbs_rq)
152 return cbs_rq->rb_leftmost;
155 static struct sched_cbs_entity *__pick_next_cbs_entity(struct cbs_rq *cbs_rq)
157 return rb_entry(earliest_deadline(cbs_rq), struct sched_cbs_entity, run_node);
161 * Update the current task's runtime statistics. Skip current tasks that
162 * are not in our scheduling class.
164 static inline void
165 __update_curr_cbs(struct cbs_rq *cbs_rq, struct sched_cbs_entity *curr,
166 unsigned long delta_exec)
168 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
170 // curr->sum_exec_runtime += delta_exec;
171 schedstat_add(cbs_rq, exec_clock, delta_exec);
172 curr->budget -= delta_exec;
173 deadline_postpone(curr);
176 static void update_curr_cbs(struct cbs_rq *cbs_rq)
178 struct sched_cbs_entity *curr = cbs_rq->curr;
179 u64 now = cbs_rq_of(cbs_rq)->clock;
180 unsigned long delta_exec;
182 if (unlikely(!curr))
183 return;
186 * Get the amount of time the current task was running
187 * since the last accounting time
189 delta_exec = (unsigned long)(now - curr->exec_start);
191 __update_curr_cbs(cbs_rq, curr, delta_exec);
192 curr->exec_start = now;
194 #if 0
195 if (entity_is_task(curr)) {
196 struct task_struct *curtask = cbs_task_of(curr);
198 cpuacct_charge(curtask, delta_exec);
200 #endif
204 * We are picking a new current task - update its stats:
206 static inline void
207 update_stats_curr_start_cbs(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se)
210 * We are starting a new run period:
212 se->exec_start = cbs_rq_of(cbs_rq)->clock;
215 /**************************************************
216 * Scheduling class queueing methods:
219 static void
220 account_cbs_entity_enqueue(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se)
222 cbs_rq->nr_running++;
223 se->on_rq = 1;
226 static void
227 account_cbs_entity_dequeue(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se)
229 BUG_ON(se->on_rq == 0);
230 BUG_ON(cbs_rq->nr_running == 0);
231 cbs_rq->nr_running--;
232 se->on_rq = 0;
235 static void
236 enqueue_cbs_entity(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se)
238 u64 vt, now = cbs_rq_of(cbs_rq)->clock;
241 * Update run-time statistics of the 'current'.
243 update_curr_cbs(cbs_rq);
244 account_cbs_entity_enqueue(cbs_rq, se);
246 vt = se->period * se->budget;
247 do_div(vt, se->max_budget);
249 if (vt + now > se->deadline) {
250 se->budget = se->max_budget;
251 se->deadline = se->period + now;
254 if (se != cbs_rq->curr)
255 __enqueue_cbs_entity(cbs_rq, se);
258 static void
259 dequeue_cbs_entity(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se)
262 * Update run-time statistics of the 'current'.
264 update_curr_cbs(cbs_rq);
266 if (se != cbs_rq->curr)
267 __dequeue_cbs_entity(cbs_rq, se);
268 account_cbs_entity_dequeue(cbs_rq, se);
271 static void
272 set_next_cbs_entity(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se)
274 /* 'current' is not kept within the tree. */
275 if (se->on_rq) {
276 __dequeue_cbs_entity(cbs_rq, se);
279 update_stats_curr_start_cbs(cbs_rq, se);
280 cbs_rq->curr = se;
281 // se->prev_sum_exec_runtime = se->sum_exec_runtime;
284 static int
285 wakeup_preempt_cbs_entity(struct sched_cbs_entity *curr, struct sched_cbs_entity *se)
287 return se->deadline < curr->deadline;
291 static struct sched_cbs_entity *pick_next_cbs_entity(struct cbs_rq *cbs_rq)
293 struct sched_cbs_entity *se = NULL;
295 if (earliest_deadline(cbs_rq)) {
296 se = __pick_next_cbs_entity(cbs_rq);
297 set_next_cbs_entity(cbs_rq, se);
300 return se;
303 static void put_prev_cbs_entity(struct cbs_rq *cbs_rq, struct sched_cbs_entity *prev)
306 * If still on the runqueue then deactivate_task()
307 * was not called and update_curr() has to be done:
309 if (prev->on_rq)
310 update_curr_cbs(cbs_rq);
312 if (prev->on_rq) {
313 /* Put 'current' back into the tree. */
314 __enqueue_cbs_entity(cbs_rq, prev);
316 cbs_rq->curr = NULL;
319 static void
320 cbs_entity_tick(struct cbs_rq *cbs_rq, struct sched_cbs_entity *curr, int queued)
323 * Update run-time statistics of the 'current'.
325 update_curr_cbs(cbs_rq);
327 if (cbs_rq->nr_running > 1)
328 resched_task(cbs_rq_of(cbs_rq)->curr); /* FIXME: Check! */
332 /**************************************************
333 * CBS operations on tasks:
336 #ifdef CONFIG_SCHED_HRTICK
337 static void hrtick_start_cbs(struct rq *rq, struct task_struct *p)
339 int requeue = rq->curr == p;
340 struct sched_cbs_entity *se = &p->cbs_se;
341 s64 delta;
343 WARN_ON(task_rq(p) != rq);
346 * Don't schedule timeouts shorter than 10000ns, that just
347 * doesn't make sense.
349 delta = max(10000LL, se->budget);
350 hrtick_start(rq, delta, requeue);
352 #else
353 static inline void
354 hrtick_start_cbs(struct rq *rq, struct task_struct *p)
357 #endif
360 * The enqueue_task method is called before nr_running is
361 * increased. Here we update the fair scheduling stats and
362 * then put the task into the rbtree:
364 static void enqueue_task_cbs(struct rq *rq, struct task_struct *p, int wakeup)
366 struct cbs_rq *cbs_rq;
367 struct sched_cbs_entity *se = &p->cbs_se;
369 for_each_cbs_sched_entity(se) {
370 if (se->on_rq)
371 break;
372 cbs_rq = cbs_cbs_rq_of(se);
373 enqueue_cbs_entity(cbs_rq, se);
376 hrtick_start_cbs(rq, rq->curr);
380 * The dequeue_task method is called before nr_running is
381 * decreased. We remove the task from the rbtree and
382 * update the fair scheduling stats:
384 static void dequeue_task_cbs(struct rq *rq, struct task_struct *p, int sleep)
386 struct cbs_rq *cbs_rq;
387 struct sched_cbs_entity *se = &p->cbs_se;
389 for_each_cbs_sched_entity(se) {
390 cbs_rq = cbs_cbs_rq_of(se);
391 dequeue_cbs_entity(cbs_rq, se);
392 /* FIXME: Don't dequeue parent if it has other entities besides us */
395 hrtick_start_cbs(rq, rq->curr);
399 * sched_yield() is broken on CBS.
401 * If compat_yield is turned on then we requeue to the end of the tree.
403 static void yield_task_cbs(struct rq *rq)
407 /* return depth at which a sched entity is present in the hierarchy */
408 static inline int depth_se_cbs(struct sched_cbs_entity *se)
410 int depth = 0;
412 for_each_cbs_sched_entity(se)
413 depth++;
415 return depth;
419 * Preempt the current task with a newly woken task if needed:
421 static void check_preempt_wakeup_cbs(struct rq *rq, struct task_struct *p)
423 struct task_struct *curr = rq->curr;
424 struct cbs_rq *cbs_rq = task_cbs_rq(curr);
425 struct sched_cbs_entity *se = &curr->cbs_se, *pse = &p->cbs_se;
426 #if 0
427 int se_depth, pse_depth;
428 #endif
429 if (unlikely(rt_prio(p->prio))) {
430 update_rq_clock(rq);
431 update_curr_cbs(cbs_rq);
432 resched_task(curr);
433 return;
436 // se->last_wakeup = se->sum_exec_runtime;
437 if (unlikely(se == pse))
438 return;
440 #if 0
442 * preemption test can be made between sibling entities who are in the
443 * same cbs_rq i.e who have a common parent. Walk up the hierarchy of
444 * both tasks until we find their ancestors who are siblings of common
445 * parent.
448 /* First walk up until both entities are at same depth */
449 se_depth = depth_se_cbs(se);
450 pse_depth = depth_se_cbs(pse);
452 while (se_depth > pse_depth) {
453 se_depth--;
454 se = parent_cbs_entity(se);
457 while (pse_depth > se_depth) {
458 pse_depth--;
459 pse = parent_cbs_entity(pse);
462 while (!is_same_cbs_group(se, pse)) {
463 se = parent_cbs_entity(se);
464 pse = parent_cbs_entity(pse);
466 #endif
467 if (wakeup_preempt_cbs_entity(se, pse) == 1)
468 resched_task(curr);
471 static struct task_struct *pick_next_task_cbs(struct rq *rq)
473 struct task_struct *p;
474 struct cbs_rq *cbs_rq = &rq->cbs;
475 struct sched_cbs_entity *se;
477 if (unlikely(!cbs_rq->nr_running))
478 return NULL;
480 do {
481 se = pick_next_cbs_entity(cbs_rq);
482 cbs_rq = group_cbs_rq(se);
483 } while (cbs_rq);
485 p = cbs_task_of(se);
486 hrtick_start_cbs(rq, p);
488 return p;
492 * Account for a descheduled task:
494 static void put_prev_task_cbs(struct rq *rq, struct task_struct *prev)
496 struct sched_cbs_entity *se = &prev->cbs_se;
497 struct cbs_rq *cbs_rq;
499 for_each_cbs_sched_entity(se) {
500 cbs_rq = cbs_cbs_rq_of(se);
501 put_prev_cbs_entity(cbs_rq, se);
506 * scheduler tick hitting a task of our scheduling class:
508 static void task_tick_cbs(struct rq *rq, struct task_struct *curr, int queued)
510 struct cbs_rq *cbs_rq;
511 struct sched_cbs_entity *se = &curr->cbs_se;
513 for_each_cbs_sched_entity(se) {
514 cbs_rq = cbs_cbs_rq_of(se);
515 cbs_entity_tick(cbs_rq, se, queued);
520 * FIXME!
522 static void task_new_cbs(struct rq *rq, struct task_struct *p)
524 #warning Task New CBS is W R O N G ! ! !
525 struct cbs_rq *cbs_rq = task_cbs_rq(p);
526 printk("task_new_cbs has been called!\n");
527 sched_info_queued(p);
529 update_curr_cbs(cbs_rq);
531 enqueue_task_cbs(rq, p, 0);
532 resched_task(rq->curr);
536 * Priority of the task has changed. Check to see if we preempt
537 * the current task.
539 static void prio_changed_cbs(struct rq *rq, struct task_struct *p,
540 int oldprio, int running)
542 #warning Check prio_changed_cbs() implementation, thanks!
543 printk("prio_changed_cbs has been called!\n");
544 check_preempt_curr(rq, p);
548 * We switched to the sched_cbs class.
550 static void switched_to_cbs(struct rq *rq, struct task_struct *p,
551 int running)
553 #warning Check switched_to_cbs() implementation, thanks!
554 //printk("switched_to_cbs has been called!\n");
555 check_preempt_curr(rq, p);
558 /* Account for a task changing its policy or group.
560 * This routine is mostly called to set cbs_rq->curr field when a task
561 * migrates between groups/classes.
563 static void set_curr_task_cbs(struct rq *rq)
565 struct sched_cbs_entity *se = &rq->curr->cbs_se;
567 for_each_cbs_sched_entity(se)
568 set_next_cbs_entity(cbs_cbs_rq_of(se), se);
572 * All the scheduling class methods:
574 static const struct sched_class cbs_sched_class = {
575 .next = &fair_sched_class,
576 .enqueue_task = enqueue_task_cbs,
577 .dequeue_task = dequeue_task_cbs,
578 .yield_task = yield_task_cbs,
579 #ifdef CONFIG_SMP
580 #error CBS SMP is still a No-No!
581 .select_task_rq = ,
582 #endif /* CONFIG_SMP */
584 .check_preempt_curr = check_preempt_wakeup_cbs,
586 .pick_next_task = pick_next_task_cbs,
587 .put_prev_task = put_prev_task_cbs,
589 #ifdef CONFIG_SMP
590 #error CBS SMP is still a No-No!
591 .load_balance = ,
592 .move_one_task = ,
593 #endif
595 .set_curr_task = set_curr_task_cbs,
596 .task_tick = task_tick_cbs,
597 .task_new = task_new_cbs,
599 .prio_changed = prio_changed_cbs,
600 .switched_to = switched_to_cbs,
602 #ifdef CONFIG_CBS_GROUP_SCHED
603 .moved_group = ,
604 #endif