1 /* sched.c - SPU scheduler.
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
6 * 2006-03-31 NUMA domains added.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/module.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/kernel.h>
30 #include <linux/completion.h>
31 #include <linux/vmalloc.h>
32 #include <linux/smp.h>
33 #include <linux/stddef.h>
34 #include <linux/unistd.h>
35 #include <linux/numa.h>
36 #include <linux/mutex.h>
37 #include <linux/notifier.h>
38 #include <linux/kthread.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/proc_fs.h>
41 #include <linux/seq_file.h>
44 #include <asm/mmu_context.h>
46 #include <asm/spu_csa.h>
47 #include <asm/spu_priv1.h>
50 struct spu_prio_array
{
51 DECLARE_BITMAP(bitmap
, MAX_PRIO
);
52 struct list_head runq
[MAX_PRIO
];
57 static unsigned long spu_avenrun
[3];
58 static struct spu_prio_array
*spu_prio
;
59 static struct task_struct
*spusched_task
;
60 static struct timer_list spusched_timer
;
63 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
65 #define NORMAL_PRIO 120
68 * Frequency of the spu scheduler tick. By default we do one SPU scheduler
69 * tick for every 10 CPU scheduler ticks.
71 #define SPUSCHED_TICK (10)
74 * These are the 'tuning knobs' of the scheduler:
76 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
77 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
79 #define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
80 #define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
82 #define MAX_USER_PRIO (MAX_PRIO - MAX_RT_PRIO)
83 #define SCALE_PRIO(x, prio) \
84 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
87 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
88 * [800ms ... 100ms ... 5ms]
90 * The higher a thread's priority, the bigger timeslices
91 * it gets during one round of execution. But even the lowest
92 * priority thread gets MIN_TIMESLICE worth of execution time.
94 void spu_set_timeslice(struct spu_context
*ctx
)
96 if (ctx
->prio
< NORMAL_PRIO
)
97 ctx
->time_slice
= SCALE_PRIO(DEF_SPU_TIMESLICE
* 4, ctx
->prio
);
99 ctx
->time_slice
= SCALE_PRIO(DEF_SPU_TIMESLICE
, ctx
->prio
);
103 * Update scheduling information from the owning thread.
105 void __spu_update_sched_info(struct spu_context
*ctx
)
108 * 32-Bit assignment are atomic on powerpc, and we don't care about
109 * memory ordering here because retriving the controlling thread is
110 * per defintion racy.
112 ctx
->tid
= current
->pid
;
115 * We do our own priority calculations, so we normally want
116 * ->static_prio to start with. Unfortunately thies field
117 * contains junk for threads with a realtime scheduling
118 * policy so we have to look at ->prio in this case.
120 if (rt_prio(current
->prio
))
121 ctx
->prio
= current
->prio
;
123 ctx
->prio
= current
->static_prio
;
124 ctx
->policy
= current
->policy
;
127 * A lot of places that don't hold list_mutex poke into
128 * cpus_allowed, including grab_runnable_context which
129 * already holds the runq_lock. So abuse runq_lock
130 * to protect this field aswell.
132 spin_lock(&spu_prio
->runq_lock
);
133 ctx
->cpus_allowed
= current
->cpus_allowed
;
134 spin_unlock(&spu_prio
->runq_lock
);
137 void spu_update_sched_info(struct spu_context
*ctx
)
139 int node
= ctx
->spu
->node
;
141 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
142 __spu_update_sched_info(ctx
);
143 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
146 static int __node_allowed(struct spu_context
*ctx
, int node
)
148 if (nr_cpus_node(node
)) {
149 cpumask_t mask
= node_to_cpumask(node
);
151 if (cpus_intersects(mask
, ctx
->cpus_allowed
))
158 static int node_allowed(struct spu_context
*ctx
, int node
)
162 spin_lock(&spu_prio
->runq_lock
);
163 rval
= __node_allowed(ctx
, node
);
164 spin_unlock(&spu_prio
->runq_lock
);
169 static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier
);
171 void spu_switch_notify(struct spu
*spu
, struct spu_context
*ctx
)
173 blocking_notifier_call_chain(&spu_switch_notifier
,
174 ctx
? ctx
->object_id
: 0, spu
);
177 static void notify_spus_active(void)
182 * Wake up the active spu_contexts.
184 * When the awakened processes see their "notify_active" flag is set,
185 * they will call spu_switch_notify();
187 for_each_online_node(node
) {
190 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
191 list_for_each_entry(spu
, &cbe_spu_info
[node
].spus
, cbe_list
) {
192 if (spu
->alloc_state
!= SPU_FREE
) {
193 struct spu_context
*ctx
= spu
->ctx
;
194 set_bit(SPU_SCHED_NOTIFY_ACTIVE
,
197 wake_up_all(&ctx
->stop_wq
);
200 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
204 int spu_switch_event_register(struct notifier_block
* n
)
207 ret
= blocking_notifier_chain_register(&spu_switch_notifier
, n
);
209 notify_spus_active();
212 EXPORT_SYMBOL_GPL(spu_switch_event_register
);
214 int spu_switch_event_unregister(struct notifier_block
* n
)
216 return blocking_notifier_chain_unregister(&spu_switch_notifier
, n
);
218 EXPORT_SYMBOL_GPL(spu_switch_event_unregister
);
221 * spu_bind_context - bind spu context to physical spu
222 * @spu: physical spu to bind to
223 * @ctx: context to bind
225 static void spu_bind_context(struct spu
*spu
, struct spu_context
*ctx
)
227 pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__
, current
->pid
,
228 spu
->number
, spu
->node
);
229 spuctx_switch_state(ctx
, SPU_UTIL_SYSTEM
);
231 if (ctx
->flags
& SPU_CREATE_NOSCHED
)
232 atomic_inc(&cbe_spu_info
[spu
->node
].reserved_spus
);
234 ctx
->stats
.slb_flt_base
= spu
->stats
.slb_flt
;
235 ctx
->stats
.class2_intr_base
= spu
->stats
.class2_intr
;
240 ctx
->ops
= &spu_hw_ops
;
241 spu
->pid
= current
->pid
;
242 spu
->tgid
= current
->tgid
;
243 spu_associate_mm(spu
, ctx
->owner
);
244 spu
->ibox_callback
= spufs_ibox_callback
;
245 spu
->wbox_callback
= spufs_wbox_callback
;
246 spu
->stop_callback
= spufs_stop_callback
;
247 spu
->mfc_callback
= spufs_mfc_callback
;
248 spu
->dma_callback
= spufs_dma_callback
;
250 spu_unmap_mappings(ctx
);
251 spu_restore(&ctx
->csa
, spu
);
252 spu
->timestamp
= jiffies
;
253 spu_cpu_affinity_set(spu
, raw_smp_processor_id());
254 spu_switch_notify(spu
, ctx
);
255 ctx
->state
= SPU_STATE_RUNNABLE
;
257 spuctx_switch_state(ctx
, SPU_UTIL_IDLE_LOADED
);
261 * Must be used with the list_mutex held.
263 static inline int sched_spu(struct spu
*spu
)
265 BUG_ON(!mutex_is_locked(&cbe_spu_info
[spu
->node
].list_mutex
));
267 return (!spu
->ctx
|| !(spu
->ctx
->flags
& SPU_CREATE_NOSCHED
));
270 static void aff_merge_remaining_ctxs(struct spu_gang
*gang
)
272 struct spu_context
*ctx
;
274 list_for_each_entry(ctx
, &gang
->aff_list_head
, aff_list
) {
275 if (list_empty(&ctx
->aff_list
))
276 list_add(&ctx
->aff_list
, &gang
->aff_list_head
);
278 gang
->aff_flags
|= AFF_MERGED
;
281 static void aff_set_offsets(struct spu_gang
*gang
)
283 struct spu_context
*ctx
;
287 list_for_each_entry_reverse(ctx
, &gang
->aff_ref_ctx
->aff_list
,
289 if (&ctx
->aff_list
== &gang
->aff_list_head
)
291 ctx
->aff_offset
= offset
--;
295 list_for_each_entry(ctx
, gang
->aff_ref_ctx
->aff_list
.prev
, aff_list
) {
296 if (&ctx
->aff_list
== &gang
->aff_list_head
)
298 ctx
->aff_offset
= offset
++;
301 gang
->aff_flags
|= AFF_OFFSETS_SET
;
304 static struct spu
*aff_ref_location(struct spu_context
*ctx
, int mem_aff
,
305 int group_size
, int lowest_offset
)
311 * TODO: A better algorithm could be used to find a good spu to be
312 * used as reference location for the ctxs chain.
314 node
= cpu_to_node(raw_smp_processor_id());
315 for (n
= 0; n
< MAX_NUMNODES
; n
++, node
++) {
316 node
= (node
< MAX_NUMNODES
) ? node
: 0;
317 if (!node_allowed(ctx
, node
))
319 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
320 list_for_each_entry(spu
, &cbe_spu_info
[node
].spus
, cbe_list
) {
321 if ((!mem_aff
|| spu
->has_mem_affinity
) &&
323 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
327 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
332 static void aff_set_ref_point_location(struct spu_gang
*gang
)
334 int mem_aff
, gs
, lowest_offset
;
335 struct spu_context
*ctx
;
338 mem_aff
= gang
->aff_ref_ctx
->flags
& SPU_CREATE_AFFINITY_MEM
;
342 list_for_each_entry(tmp
, &gang
->aff_list_head
, aff_list
)
345 list_for_each_entry_reverse(ctx
, &gang
->aff_ref_ctx
->aff_list
,
347 if (&ctx
->aff_list
== &gang
->aff_list_head
)
349 lowest_offset
= ctx
->aff_offset
;
352 gang
->aff_ref_spu
= aff_ref_location(gang
->aff_ref_ctx
, mem_aff
, gs
,
356 static struct spu
*ctx_location(struct spu
*ref
, int offset
, int node
)
362 list_for_each_entry(spu
, ref
->aff_list
.prev
, aff_list
) {
363 BUG_ON(spu
->node
!= node
);
370 list_for_each_entry_reverse(spu
, ref
->aff_list
.next
, aff_list
) {
371 BUG_ON(spu
->node
!= node
);
383 * affinity_check is called each time a context is going to be scheduled.
384 * It returns the spu ptr on which the context must run.
386 static int has_affinity(struct spu_context
*ctx
)
388 struct spu_gang
*gang
= ctx
->gang
;
390 if (list_empty(&ctx
->aff_list
))
393 if (!gang
->aff_ref_spu
) {
394 if (!(gang
->aff_flags
& AFF_MERGED
))
395 aff_merge_remaining_ctxs(gang
);
396 if (!(gang
->aff_flags
& AFF_OFFSETS_SET
))
397 aff_set_offsets(gang
);
398 aff_set_ref_point_location(gang
);
401 return gang
->aff_ref_spu
!= NULL
;
405 * spu_unbind_context - unbind spu context from physical spu
406 * @spu: physical spu to unbind from
407 * @ctx: context to unbind
409 static void spu_unbind_context(struct spu
*spu
, struct spu_context
*ctx
)
411 pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__
,
412 spu
->pid
, spu
->number
, spu
->node
);
413 spuctx_switch_state(ctx
, SPU_UTIL_SYSTEM
);
415 if (spu
->ctx
->flags
& SPU_CREATE_NOSCHED
)
416 atomic_dec(&cbe_spu_info
[spu
->node
].reserved_spus
);
419 mutex_lock(&ctx
->gang
->aff_mutex
);
420 if (has_affinity(ctx
)) {
421 if (atomic_dec_and_test(&ctx
->gang
->aff_sched_count
))
422 ctx
->gang
->aff_ref_spu
= NULL
;
424 mutex_unlock(&ctx
->gang
->aff_mutex
);
427 spu_switch_notify(spu
, NULL
);
428 spu_unmap_mappings(ctx
);
429 spu_save(&ctx
->csa
, spu
);
430 spu
->timestamp
= jiffies
;
431 ctx
->state
= SPU_STATE_SAVED
;
432 spu
->ibox_callback
= NULL
;
433 spu
->wbox_callback
= NULL
;
434 spu
->stop_callback
= NULL
;
435 spu
->mfc_callback
= NULL
;
436 spu
->dma_callback
= NULL
;
437 spu_associate_mm(spu
, NULL
);
440 ctx
->ops
= &spu_backing_ops
;
444 ctx
->stats
.slb_flt
+=
445 (spu
->stats
.slb_flt
- ctx
->stats
.slb_flt_base
);
446 ctx
->stats
.class2_intr
+=
447 (spu
->stats
.class2_intr
- ctx
->stats
.class2_intr_base
);
449 /* This maps the underlying spu state to idle */
450 spuctx_switch_state(ctx
, SPU_UTIL_IDLE_LOADED
);
455 * spu_add_to_rq - add a context to the runqueue
456 * @ctx: context to add
458 static void __spu_add_to_rq(struct spu_context
*ctx
)
461 * Unfortunately this code path can be called from multiple threads
462 * on behalf of a single context due to the way the problem state
463 * mmap support works.
465 * Fortunately we need to wake up all these threads at the same time
466 * and can simply skip the runqueue addition for every but the first
467 * thread getting into this codepath.
469 * It's still quite hacky, and long-term we should proxy all other
470 * threads through the owner thread so that spu_run is in control
471 * of all the scheduling activity for a given context.
473 if (list_empty(&ctx
->rq
)) {
474 list_add_tail(&ctx
->rq
, &spu_prio
->runq
[ctx
->prio
]);
475 set_bit(ctx
->prio
, spu_prio
->bitmap
);
476 if (!spu_prio
->nr_waiting
++)
477 __mod_timer(&spusched_timer
, jiffies
+ SPUSCHED_TICK
);
481 static void __spu_del_from_rq(struct spu_context
*ctx
)
483 int prio
= ctx
->prio
;
485 if (!list_empty(&ctx
->rq
)) {
486 if (!--spu_prio
->nr_waiting
)
487 del_timer(&spusched_timer
);
488 list_del_init(&ctx
->rq
);
490 if (list_empty(&spu_prio
->runq
[prio
]))
491 clear_bit(prio
, spu_prio
->bitmap
);
495 static void spu_prio_wait(struct spu_context
*ctx
)
499 spin_lock(&spu_prio
->runq_lock
);
500 prepare_to_wait_exclusive(&ctx
->stop_wq
, &wait
, TASK_INTERRUPTIBLE
);
501 if (!signal_pending(current
)) {
502 __spu_add_to_rq(ctx
);
503 spin_unlock(&spu_prio
->runq_lock
);
504 mutex_unlock(&ctx
->state_mutex
);
506 mutex_lock(&ctx
->state_mutex
);
507 spin_lock(&spu_prio
->runq_lock
);
508 __spu_del_from_rq(ctx
);
510 spin_unlock(&spu_prio
->runq_lock
);
511 __set_current_state(TASK_RUNNING
);
512 remove_wait_queue(&ctx
->stop_wq
, &wait
);
515 static struct spu
*spu_get_idle(struct spu_context
*ctx
)
517 struct spu
*spu
, *aff_ref_spu
;
521 mutex_lock(&ctx
->gang
->aff_mutex
);
522 if (has_affinity(ctx
)) {
523 aff_ref_spu
= ctx
->gang
->aff_ref_spu
;
524 atomic_inc(&ctx
->gang
->aff_sched_count
);
525 mutex_unlock(&ctx
->gang
->aff_mutex
);
526 node
= aff_ref_spu
->node
;
528 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
529 spu
= ctx_location(aff_ref_spu
, ctx
->aff_offset
, node
);
530 if (spu
&& spu
->alloc_state
== SPU_FREE
)
532 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
534 mutex_lock(&ctx
->gang
->aff_mutex
);
535 if (atomic_dec_and_test(&ctx
->gang
->aff_sched_count
))
536 ctx
->gang
->aff_ref_spu
= NULL
;
537 mutex_unlock(&ctx
->gang
->aff_mutex
);
541 mutex_unlock(&ctx
->gang
->aff_mutex
);
543 node
= cpu_to_node(raw_smp_processor_id());
544 for (n
= 0; n
< MAX_NUMNODES
; n
++, node
++) {
545 node
= (node
< MAX_NUMNODES
) ? node
: 0;
546 if (!node_allowed(ctx
, node
))
549 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
550 list_for_each_entry(spu
, &cbe_spu_info
[node
].spus
, cbe_list
) {
551 if (spu
->alloc_state
== SPU_FREE
)
554 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
560 spu
->alloc_state
= SPU_USED
;
561 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
562 pr_debug("Got SPU %d %d\n", spu
->number
, spu
->node
);
563 spu_init_channels(spu
);
568 * find_victim - find a lower priority context to preempt
569 * @ctx: canidate context for running
571 * Returns the freed physical spu to run the new context on.
573 static struct spu
*find_victim(struct spu_context
*ctx
)
575 struct spu_context
*victim
= NULL
;
580 * Look for a possible preemption candidate on the local node first.
581 * If there is no candidate look at the other nodes. This isn't
582 * exactly fair, but so far the whole spu schedule tries to keep
583 * a strong node affinity. We might want to fine-tune this in
587 node
= cpu_to_node(raw_smp_processor_id());
588 for (n
= 0; n
< MAX_NUMNODES
; n
++, node
++) {
589 node
= (node
< MAX_NUMNODES
) ? node
: 0;
590 if (!node_allowed(ctx
, node
))
593 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
594 list_for_each_entry(spu
, &cbe_spu_info
[node
].spus
, cbe_list
) {
595 struct spu_context
*tmp
= spu
->ctx
;
597 if (tmp
->prio
> ctx
->prio
&&
598 (!victim
|| tmp
->prio
> victim
->prio
))
601 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
605 * This nests ctx->state_mutex, but we always lock
606 * higher priority contexts before lower priority
607 * ones, so this is safe until we introduce
608 * priority inheritance schemes.
610 if (!mutex_trylock(&victim
->state_mutex
)) {
618 * This race can happen because we've dropped
619 * the active list mutex. No a problem, just
620 * restart the search.
622 mutex_unlock(&victim
->state_mutex
);
627 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
628 cbe_spu_info
[node
].nr_active
--;
629 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
631 spu_unbind_context(spu
, victim
);
632 victim
->stats
.invol_ctx_switch
++;
633 spu
->stats
.invol_ctx_switch
++;
634 mutex_unlock(&victim
->state_mutex
);
636 * We need to break out of the wait loop in spu_run
637 * manually to ensure this context gets put on the
638 * runqueue again ASAP.
640 wake_up(&victim
->stop_wq
);
649 * spu_activate - find a free spu for a context and execute it
650 * @ctx: spu context to schedule
651 * @flags: flags (currently ignored)
653 * Tries to find a free spu to run @ctx. If no free spu is available
654 * add the context to the runqueue so it gets woken up once an spu
657 int spu_activate(struct spu_context
*ctx
, unsigned long flags
)
663 * If there are multiple threads waiting for a single context
664 * only one actually binds the context while the others will
665 * only be able to acquire the state_mutex once the context
666 * already is in runnable state.
671 spu
= spu_get_idle(ctx
);
673 * If this is a realtime thread we try to get it running by
674 * preempting a lower priority thread.
676 if (!spu
&& rt_prio(ctx
->prio
))
677 spu
= find_victim(ctx
);
679 int node
= spu
->node
;
681 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
682 spu_bind_context(spu
, ctx
);
683 cbe_spu_info
[node
].nr_active
++;
684 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
689 } while (!signal_pending(current
));
695 * grab_runnable_context - try to find a runnable context
697 * Remove the highest priority context on the runqueue and return it
698 * to the caller. Returns %NULL if no runnable context was found.
700 static struct spu_context
*grab_runnable_context(int prio
, int node
)
702 struct spu_context
*ctx
;
705 spin_lock(&spu_prio
->runq_lock
);
706 best
= find_first_bit(spu_prio
->bitmap
, prio
);
707 while (best
< prio
) {
708 struct list_head
*rq
= &spu_prio
->runq
[best
];
710 list_for_each_entry(ctx
, rq
, rq
) {
711 /* XXX(hch): check for affinity here aswell */
712 if (__node_allowed(ctx
, node
)) {
713 __spu_del_from_rq(ctx
);
721 spin_unlock(&spu_prio
->runq_lock
);
725 static int __spu_deactivate(struct spu_context
*ctx
, int force
, int max_prio
)
727 struct spu
*spu
= ctx
->spu
;
728 struct spu_context
*new = NULL
;
731 new = grab_runnable_context(max_prio
, spu
->node
);
733 int node
= spu
->node
;
735 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
736 spu_unbind_context(spu
, ctx
);
737 spu
->alloc_state
= SPU_FREE
;
738 cbe_spu_info
[node
].nr_active
--;
739 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
741 ctx
->stats
.vol_ctx_switch
++;
742 spu
->stats
.vol_ctx_switch
++;
745 wake_up(&new->stop_wq
);
754 * spu_deactivate - unbind a context from it's physical spu
755 * @ctx: spu context to unbind
757 * Unbind @ctx from the physical spu it is running on and schedule
758 * the highest priority context to run on the freed physical spu.
760 void spu_deactivate(struct spu_context
*ctx
)
762 __spu_deactivate(ctx
, 1, MAX_PRIO
);
766 * spu_yield - yield a physical spu if others are waiting
767 * @ctx: spu context to yield
769 * Check if there is a higher priority context waiting and if yes
770 * unbind @ctx from the physical spu and schedule the highest
771 * priority context to run on the freed physical spu instead.
773 void spu_yield(struct spu_context
*ctx
)
775 if (!(ctx
->flags
& SPU_CREATE_NOSCHED
)) {
776 mutex_lock(&ctx
->state_mutex
);
777 __spu_deactivate(ctx
, 0, MAX_PRIO
);
778 mutex_unlock(&ctx
->state_mutex
);
782 static noinline
void spusched_tick(struct spu_context
*ctx
)
784 if (ctx
->flags
& SPU_CREATE_NOSCHED
)
786 if (ctx
->policy
== SCHED_FIFO
)
789 if (--ctx
->time_slice
)
793 * Unfortunately list_mutex ranks outside of state_mutex, so
794 * we have to trylock here. If we fail give the context another
795 * tick and try again.
797 if (mutex_trylock(&ctx
->state_mutex
)) {
798 struct spu
*spu
= ctx
->spu
;
799 struct spu_context
*new;
801 new = grab_runnable_context(ctx
->prio
+ 1, spu
->node
);
803 spu_unbind_context(spu
, ctx
);
804 ctx
->stats
.invol_ctx_switch
++;
805 spu
->stats
.invol_ctx_switch
++;
806 spu
->alloc_state
= SPU_FREE
;
807 cbe_spu_info
[spu
->node
].nr_active
--;
808 wake_up(&new->stop_wq
);
810 * We need to break out of the wait loop in
811 * spu_run manually to ensure this context
812 * gets put on the runqueue again ASAP.
814 wake_up(&ctx
->stop_wq
);
816 spu_set_timeslice(ctx
);
817 mutex_unlock(&ctx
->state_mutex
);
824 * count_active_contexts - count nr of active tasks
826 * Return the number of tasks currently running or waiting to run.
828 * Note that we don't take runq_lock / list_mutex here. Reading
829 * a single 32bit value is atomic on powerpc, and we don't care
830 * about memory ordering issues here.
832 static unsigned long count_active_contexts(void)
834 int nr_active
= 0, node
;
836 for (node
= 0; node
< MAX_NUMNODES
; node
++)
837 nr_active
+= cbe_spu_info
[node
].nr_active
;
838 nr_active
+= spu_prio
->nr_waiting
;
844 * spu_calc_load - given tick count, update the avenrun load estimates.
847 * No locking against reading these values from userspace, as for
848 * the CPU loadavg code.
850 static void spu_calc_load(unsigned long ticks
)
852 unsigned long active_tasks
; /* fixed-point */
853 static int count
= LOAD_FREQ
;
857 if (unlikely(count
< 0)) {
858 active_tasks
= count_active_contexts() * FIXED_1
;
860 CALC_LOAD(spu_avenrun
[0], EXP_1
, active_tasks
);
861 CALC_LOAD(spu_avenrun
[1], EXP_5
, active_tasks
);
862 CALC_LOAD(spu_avenrun
[2], EXP_15
, active_tasks
);
868 static void spusched_wake(unsigned long data
)
870 mod_timer(&spusched_timer
, jiffies
+ SPUSCHED_TICK
);
871 wake_up_process(spusched_task
);
872 spu_calc_load(SPUSCHED_TICK
);
875 static int spusched_thread(void *unused
)
880 while (!kthread_should_stop()) {
881 set_current_state(TASK_INTERRUPTIBLE
);
883 for (node
= 0; node
< MAX_NUMNODES
; node
++) {
884 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
885 list_for_each_entry(spu
, &cbe_spu_info
[node
].spus
, cbe_list
)
887 spusched_tick(spu
->ctx
);
888 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
895 #define LOAD_INT(x) ((x) >> FSHIFT)
896 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
898 static int show_spu_loadavg(struct seq_file
*s
, void *private)
902 a
= spu_avenrun
[0] + (FIXED_1
/200);
903 b
= spu_avenrun
[1] + (FIXED_1
/200);
904 c
= spu_avenrun
[2] + (FIXED_1
/200);
907 * Note that last_pid doesn't really make much sense for the
908 * SPU loadavg (it even seems very odd on the CPU side..),
909 * but we include it here to have a 100% compatible interface.
911 seq_printf(s
, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
912 LOAD_INT(a
), LOAD_FRAC(a
),
913 LOAD_INT(b
), LOAD_FRAC(b
),
914 LOAD_INT(c
), LOAD_FRAC(c
),
915 count_active_contexts(),
916 atomic_read(&nr_spu_contexts
),
917 current
->nsproxy
->pid_ns
->last_pid
);
921 static int spu_loadavg_open(struct inode
*inode
, struct file
*file
)
923 return single_open(file
, show_spu_loadavg
, NULL
);
926 static const struct file_operations spu_loadavg_fops
= {
927 .open
= spu_loadavg_open
,
930 .release
= single_release
,
933 int __init
spu_sched_init(void)
935 struct proc_dir_entry
*entry
;
936 int err
= -ENOMEM
, i
;
938 spu_prio
= kzalloc(sizeof(struct spu_prio_array
), GFP_KERNEL
);
942 for (i
= 0; i
< MAX_PRIO
; i
++) {
943 INIT_LIST_HEAD(&spu_prio
->runq
[i
]);
944 __clear_bit(i
, spu_prio
->bitmap
);
946 spin_lock_init(&spu_prio
->runq_lock
);
948 setup_timer(&spusched_timer
, spusched_wake
, 0);
950 spusched_task
= kthread_run(spusched_thread
, NULL
, "spusched");
951 if (IS_ERR(spusched_task
)) {
952 err
= PTR_ERR(spusched_task
);
953 goto out_free_spu_prio
;
956 entry
= create_proc_entry("spu_loadavg", 0, NULL
);
958 goto out_stop_kthread
;
959 entry
->proc_fops
= &spu_loadavg_fops
;
961 pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
962 SPUSCHED_TICK
, MIN_SPU_TIMESLICE
, DEF_SPU_TIMESLICE
);
966 kthread_stop(spusched_task
);
973 void spu_sched_exit(void)
978 remove_proc_entry("spu_loadavg", NULL
);
980 del_timer_sync(&spusched_timer
);
981 kthread_stop(spusched_task
);
983 for (node
= 0; node
< MAX_NUMNODES
; node
++) {
984 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
985 list_for_each_entry(spu
, &cbe_spu_info
[node
].spus
, cbe_list
)
986 if (spu
->alloc_state
!= SPU_FREE
)
987 spu
->alloc_state
= SPU_FREE
;
988 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);