2 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $DragonFly: src/sys/kern/usched_bsd4.c,v 1.14 2006/06/10 20:19:38 dillon Exp $
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
33 #include <sys/queue.h>
35 #include <sys/rtprio.h>
37 #include <sys/sysctl.h>
38 #include <sys/resourcevar.h>
39 #include <sys/spinlock.h>
40 #include <machine/ipl.h>
41 #include <machine/cpu.h>
42 #include <machine/smp.h>
44 #include <sys/thread2.h>
45 #include <sys/spinlock2.h>
48 * Priorities. Note that with 32 run queues per scheduler each queue
49 * represents four priority levels.
53 #define PRIMASK (MAXPRI - 1)
54 #define PRIBASE_REALTIME 0
55 #define PRIBASE_NORMAL MAXPRI
56 #define PRIBASE_IDLE (MAXPRI * 2)
57 #define PRIBASE_THREAD (MAXPRI * 3)
58 #define PRIBASE_NULL (MAXPRI * 4)
60 #define NQS 32 /* 32 run queues. */
61 #define PPQ (MAXPRI / NQS) /* priorities per queue */
62 #define PPQMASK (PPQ - 1)
65 * NICEPPQ - number of nice units per priority queue
66 * ESTCPURAMP - number of scheduler ticks for estcpu to switch queues
68 * ESTCPUPPQ - number of estcpu units per priority queue
69 * ESTCPUMAX - number of estcpu units
70 * ESTCPUINCR - amount we have to increment p_estcpu per scheduling tick at
76 #define ESTCPUMAX (ESTCPUPPQ * NQS)
77 #define ESTCPUINCR (ESTCPUPPQ / ESTCPURAMP)
78 #define PRIO_RANGE (PRIO_MAX - PRIO_MIN + 1)
80 #define ESTCPULIM(v) min((v), ESTCPUMAX)
84 #define lwp_priority lwp_usdata.bsd4.priority
85 #define lwp_rqindex lwp_usdata.bsd4.rqindex
86 #define lwp_origcpu lwp_usdata.bsd4.origcpu
87 #define lwp_estcpu lwp_usdata.bsd4.estcpu
88 #define lwp_rqtype lwp_usdata.bsd4.rqtype
90 static void bsd4_acquire_curproc(struct lwp
*lp
);
91 static void bsd4_release_curproc(struct lwp
*lp
);
92 static void bsd4_select_curproc(globaldata_t gd
);
93 static void bsd4_setrunqueue(struct lwp
*lp
);
94 static void bsd4_schedulerclock(struct lwp
*lp
, sysclock_t period
,
96 static void bsd4_recalculate_estcpu(struct lwp
*lp
);
97 static void bsd4_resetpriority(struct lwp
*lp
);
98 static void bsd4_forking(struct lwp
*plp
, struct lwp
*lp
);
99 static void bsd4_exiting(struct lwp
*plp
, struct lwp
*lp
);
102 static void need_user_resched_remote(void *dummy
);
104 static struct lwp
*chooseproc_locked(struct lwp
*chklp
);
105 static void bsd4_remrunqueue_locked(struct lwp
*lp
);
106 static void bsd4_setrunqueue_locked(struct lwp
*lp
);
108 struct usched usched_bsd4
= {
110 "bsd4", "Original DragonFly Scheduler",
111 NULL
, /* default registration */
112 NULL
, /* default deregistration */
113 bsd4_acquire_curproc
,
114 bsd4_release_curproc
,
117 bsd4_recalculate_estcpu
,
121 NULL
/* setcpumask not supported */
124 struct usched_bsd4_pcpu
{
125 struct thread helper_thread
;
128 struct lwp
*uschedcp
;
131 typedef struct usched_bsd4_pcpu
*bsd4_pcpu_t
;
134 * We have NQS (32) run queues per scheduling class. For the normal
135 * class, there are 128 priorities scaled onto these 32 queues. New
136 * processes are added to the last entry in each queue, and processes
137 * are selected for running by taking them from the head and maintaining
138 * a simple FIFO arrangement. Realtime and Idle priority processes have
139 * and explicit 0-31 priority which maps directly onto their class queue
140 * index. When a queue has something in it, the corresponding bit is
141 * set in the queuebits variable, allowing a single read to determine
142 * the state of all 32 queues and then a ffs() to find the first busy
145 static struct rq bsd4_queues
[NQS
];
146 static struct rq bsd4_rtqueues
[NQS
];
147 static struct rq bsd4_idqueues
[NQS
];
148 static u_int32_t bsd4_queuebits
;
149 static u_int32_t bsd4_rtqueuebits
;
150 static u_int32_t bsd4_idqueuebits
;
151 static cpumask_t bsd4_curprocmask
= -1; /* currently running a user process */
152 static cpumask_t bsd4_rdyprocmask
; /* ready to accept a user process */
153 static int bsd4_runqcount
;
155 static volatile int bsd4_scancpu
;
157 static struct spinlock bsd4_spin
;
158 static struct usched_bsd4_pcpu bsd4_pcpu
[MAXCPU
];
160 SYSCTL_INT(_debug
, OID_AUTO
, bsd4_runqcount
, CTLFLAG_RD
, &bsd4_runqcount
, 0, "");
162 static int usched_nonoptimal
;
163 SYSCTL_INT(_debug
, OID_AUTO
, usched_nonoptimal
, CTLFLAG_RW
,
164 &usched_nonoptimal
, 0, "acquire_curproc() was not optimal");
165 static int usched_optimal
;
166 SYSCTL_INT(_debug
, OID_AUTO
, usched_optimal
, CTLFLAG_RW
,
167 &usched_optimal
, 0, "acquire_curproc() was optimal");
169 static int usched_debug
= -1;
170 SYSCTL_INT(_debug
, OID_AUTO
, scdebug
, CTLFLAG_RW
, &usched_debug
, 0, "");
172 static int remote_resched_nonaffinity
;
173 static int remote_resched_affinity
;
174 static int choose_affinity
;
175 SYSCTL_INT(_debug
, OID_AUTO
, remote_resched_nonaffinity
, CTLFLAG_RD
,
176 &remote_resched_nonaffinity
, 0, "Number of remote rescheds");
177 SYSCTL_INT(_debug
, OID_AUTO
, remote_resched_affinity
, CTLFLAG_RD
,
178 &remote_resched_affinity
, 0, "Number of remote rescheds");
179 SYSCTL_INT(_debug
, OID_AUTO
, choose_affinity
, CTLFLAG_RD
,
180 &choose_affinity
, 0, "chooseproc() was smart");
183 static int usched_bsd4_rrinterval
= (ESTCPUFREQ
+ 9) / 10;
184 SYSCTL_INT(_kern
, OID_AUTO
, usched_bsd4_rrinterval
, CTLFLAG_RW
,
185 &usched_bsd4_rrinterval
, 0, "");
186 static int usched_bsd4_decay
= ESTCPUINCR
/ 2;
187 SYSCTL_INT(_kern
, OID_AUTO
, usched_bsd4_decay
, CTLFLAG_RW
,
188 &usched_bsd4_decay
, 0, "");
191 * Initialize the run queues at boot time.
198 spin_init(&bsd4_spin
);
199 for (i
= 0; i
< NQS
; i
++) {
200 TAILQ_INIT(&bsd4_queues
[i
]);
201 TAILQ_INIT(&bsd4_rtqueues
[i
]);
202 TAILQ_INIT(&bsd4_idqueues
[i
]);
204 atomic_clear_int(&bsd4_curprocmask
, 1);
206 SYSINIT(runqueue
, SI_SUB_RUN_QUEUE
, SI_ORDER_FIRST
, rqinit
, NULL
)
209 * BSD4_ACQUIRE_CURPROC
211 * This function is called when the kernel intends to return to userland.
212 * It is responsible for making the thread the current designated userland
213 * thread for this cpu, blocking if necessary.
215 * We are expected to handle userland reschedule requests here too.
217 * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
218 * TO ANOTHER CPU! Because most of the kernel assumes that no migration will
219 * occur, this function is called only under very controlled circumstances.
221 * Basically we recalculate our estcpu to hopefully give us a more
222 * favorable disposition, setrunqueue, then wait for the curlwp
223 * designation to be handed to us (if the setrunqueue didn't do it).
228 bsd4_acquire_curproc(struct lwp
*lp
)
230 globaldata_t gd
= mycpu
;
231 bsd4_pcpu_t dd
= &bsd4_pcpu
[gd
->gd_cpuid
];
234 * Possibly select another thread, or keep the current thread.
236 if (user_resched_wanted())
237 bsd4_select_curproc(gd
);
240 * If uschedcp is still pointing to us, we're done
242 if (dd
->uschedcp
== lp
)
246 * If this cpu has no current thread, and the run queue is
247 * empty, we can safely select ourself.
249 if (dd
->uschedcp
== NULL
&& bsd4_runqcount
== 0) {
250 atomic_set_int(&bsd4_curprocmask
, gd
->gd_cpumask
);
252 dd
->upri
= lp
->lwp_priority
;
257 * Adjust estcpu and recalculate our priority, then put us back on
258 * the user process scheduler's runq. Only increment the involuntary
259 * context switch count if the setrunqueue call did not immediately
262 * Loop until we become the currently scheduled process. Note that
263 * calling setrunqueue can cause us to be migrated to another cpu
264 * after we switch away.
268 bsd4_recalculate_estcpu(lp
);
269 lwkt_deschedule_self(gd
->gd_curthread
);
270 bsd4_setrunqueue(lp
);
271 if ((gd
->gd_curthread
->td_flags
& TDF_RUNQ
) == 0)
272 ++lp
->lwp_stats
->p_ru
.ru_nivcsw
;
276 dd
= &bsd4_pcpu
[gd
->gd_cpuid
];
277 } while (dd
->uschedcp
!= lp
);
278 KKASSERT((lp
->lwp_proc
->p_flag
& P_ONRUNQ
) == 0);
282 * BSD4_RELEASE_CURPROC
284 * This routine detaches the current thread from the userland scheduler,
285 * usually because the thread needs to run in the kernel (at kernel priority)
288 * This routine is also responsible for selecting a new thread to
289 * make the current thread.
291 * NOTE: This implementation differs from the dummy example in that
292 * bsd4_select_curproc() is able to select the current process, whereas
293 * dummy_select_curproc() is not able to select the current process.
294 * This means we have to NULL out uschedcp.
296 * Additionally, note that we may already be on a run queue if releasing
297 * via the lwkt_switch() in bsd4_setrunqueue().
299 * WARNING! The MP lock may be in an unsynchronized state due to the
300 * way get_mplock() works and the fact that this function may be called
301 * from a passive release during a lwkt_switch(). try_mplock() will deal
302 * with this for us but you should be aware that td_mpcount may not be
308 bsd4_release_curproc(struct lwp
*lp
)
310 globaldata_t gd
= mycpu
;
311 bsd4_pcpu_t dd
= &bsd4_pcpu
[gd
->gd_cpuid
];
313 if (dd
->uschedcp
== lp
) {
315 * Note: we leave ou curprocmask bit set to prevent
316 * unnecessary scheduler helper wakeups.
317 * bsd4_select_curproc() will clean it up.
319 KKASSERT((lp
->lwp_proc
->p_flag
& P_ONRUNQ
) == 0);
320 dd
->uschedcp
= NULL
; /* don't let lp be selected */
321 bsd4_select_curproc(gd
);
326 * BSD4_SELECT_CURPROC
328 * Select a new current process for this cpu. This satisfies a user
329 * scheduler reschedule request so clear that too.
331 * This routine is also responsible for equal-priority round-robining,
332 * typically triggered from bsd4_schedulerclock(). In our dummy example
333 * all the 'user' threads are LWKT scheduled all at once and we just
334 * call lwkt_switch().
340 bsd4_select_curproc(globaldata_t gd
)
342 bsd4_pcpu_t dd
= &bsd4_pcpu
[gd
->gd_cpuid
];
344 int cpuid
= gd
->gd_cpuid
;
347 clear_user_resched(); /* This satisfied the reschedule request */
348 dd
->rrcount
= 0; /* Reset the round-robin counter */
350 spin_lock_wr(&bsd4_spin
);
351 if ((nlp
= chooseproc_locked(dd
->uschedcp
)) != NULL
) {
352 atomic_set_int(&bsd4_curprocmask
, 1 << cpuid
);
353 dd
->upri
= nlp
->lwp_priority
;
355 spin_unlock_wr(&bsd4_spin
);
357 lwkt_acquire(nlp
->lwp_thread
);
359 lwkt_schedule(nlp
->lwp_thread
);
360 } else if (dd
->uschedcp
) {
361 dd
->upri
= dd
->uschedcp
->lwp_priority
;
362 spin_unlock_wr(&bsd4_spin
);
363 KKASSERT(bsd4_curprocmask
& (1 << cpuid
));
364 } else if (bsd4_runqcount
&& (bsd4_rdyprocmask
& (1 << cpuid
))) {
365 atomic_clear_int(&bsd4_curprocmask
, 1 << cpuid
);
366 atomic_clear_int(&bsd4_rdyprocmask
, 1 << cpuid
);
368 dd
->upri
= PRIBASE_NULL
;
369 spin_unlock_wr(&bsd4_spin
);
370 lwkt_schedule(&dd
->helper_thread
);
373 dd
->upri
= PRIBASE_NULL
;
374 atomic_clear_int(&bsd4_curprocmask
, 1 << cpuid
);
375 spin_unlock_wr(&bsd4_spin
);
383 * This routine is called to schedule a new user process after a fork.
385 * The caller may set P_PASSIVE_ACQ in p_flag to indicate that we should
386 * attempt to leave the thread on the current cpu.
388 * If P_PASSIVE_ACQ is set setrunqueue() will not wakeup potential target
389 * cpus in an attempt to keep the process on the current cpu at least for
390 * a little while to take advantage of locality of reference (e.g. fork/exec
391 * or short fork/exit, and uio_yield()).
393 * CPU AFFINITY: cpu affinity is handled by attempting to either schedule
394 * or (user level) preempt on the same cpu that a process was previously
395 * scheduled to. If we cannot do this but we are at enough of a higher
396 * priority then the processes running on other cpus, we will allow the
397 * process to be stolen by another cpu.
399 * WARNING! This routine cannot block. bsd4_acquire_curproc() does
400 * a deschedule/switch interlock and we can be moved to another cpu
401 * the moment we are switched out. Our LWKT run state is the only
402 * thing preventing the transfer.
404 * The associated thread must NOT currently be scheduled (but can be the
405 * current process after it has been LWKT descheduled). It must NOT be on
406 * a bsd4 scheduler queue either. The purpose of this routine is to put
407 * it on a scheduler queue or make it the current user process and LWKT
408 * schedule it. It is possible that the thread is in the middle of a LWKT
409 * switchout on another cpu, lwkt_acquire() deals with that case.
411 * The process must be runnable.
416 bsd4_setrunqueue(struct lwp
*lp
)
427 * First validate the process state relative to the current cpu.
428 * We don't need the spinlock for this, just a critical section.
429 * We are in control of the process.
432 KASSERT(lp
->lwp_proc
->p_stat
== SRUN
, ("setrunqueue: proc not SRUN"));
433 KASSERT((lp
->lwp_proc
->p_flag
& P_ONRUNQ
) == 0,
434 ("lwp %d/%d already on runq! flag %08x", lp
->lwp_proc
->p_pid
,
435 lp
->lwp_tid
, lp
->lwp_proc
->p_flag
));
436 KKASSERT((lp
->lwp_thread
->td_flags
& TDF_RUNQ
) == 0);
439 * Note: gd and dd are relative to the target thread's last cpu,
440 * NOT our current cpu.
442 gd
= lp
->lwp_thread
->td_gd
;
443 dd
= &bsd4_pcpu
[gd
->gd_cpuid
];
446 * If setrunqueue is being called due to being woken up, verses
447 * being called when aquiring the current process, recalculate
450 * Because recalculate is only called once or twice for long sleeps,
451 * not every second forever while the process is sleeping, we have
452 * to manually call it to resynchronize p_cpbase on wakeup or it
453 * will wrap if the process was sleeping long enough (e.g. ~10 min
454 * with the ACPI timer) and really mess up the nticks calculation.
456 * NOTE: because P_ONRUNQ is not set, bsd4_recalculate_estcpu()'s
457 * calls to resetpriority will just play with the processes priority
458 * fields and not mess with any queues, so it is MPSAFE in this
461 if (lp
->lwp_slptime
&& (lp
->lwp_thread
->td_flags
& TDF_RUNNING
) == 0) {
462 bsd4_recalculate_estcpu(lp
);
467 * This process is not supposed to be scheduled anywhere or assigned
468 * as the current process anywhere. Assert the condition.
470 KKASSERT(dd
->uschedcp
!= lp
);
473 * Check local cpu affinity. The associated thread is stable at
474 * the moment. Note that we may be checking another cpu here so we
475 * have to be careful. We can only assign uschedcp on OUR cpu.
477 * This allows us to avoid actually queueing the process.
478 * acquire_curproc() will handle any threads we mistakenly schedule.
480 cpuid
= gd
->gd_cpuid
;
481 if (gd
== mycpu
&& (bsd4_curprocmask
& (1 << cpuid
)) == 0) {
482 atomic_set_int(&bsd4_curprocmask
, 1 << cpuid
);
484 dd
->upri
= lp
->lwp_priority
;
485 lwkt_schedule(lp
->lwp_thread
);
491 * gd and cpuid may still 'hint' at another cpu. Even so we have
492 * to place this process on the userland scheduler's run queue for
493 * action by the target cpu.
497 * XXX fixme. Could be part of a remrunqueue/setrunqueue
498 * operation when the priority is recalculated, so TDF_MIGRATING
499 * may already be set.
501 if ((lp
->lwp_thread
->td_flags
& TDF_MIGRATING
) == 0)
502 lwkt_giveaway(lp
->lwp_thread
);
506 * We lose control of lp the moment we release the spinlock after
507 * having placed lp on the queue. i.e. another cpu could pick it
508 * up and it could exit, or its priority could be further adjusted,
509 * or something like that.
511 spin_lock_wr(&bsd4_spin
);
512 bsd4_setrunqueue_locked(lp
);
515 * gd, dd, and cpuid are still our target cpu 'hint', not our current
518 * We always try to schedule a LWP to its original cpu first. It
519 * is possible for the scheduler helper or setrunqueue to assign
520 * the LWP to a different cpu before the one we asked for wakes
523 * If the LWP has higher priority (lower lwp_priority value) on
524 * its target cpu, reschedule on that cpu.
526 if ((lp
->lwp_thread
->td_flags
& TDF_NORESCHED
) == 0) {
527 if ((dd
->upri
& ~PRIMASK
) > (lp
->lwp_priority
& ~PRIMASK
)) {
528 dd
->upri
= lp
->lwp_priority
;
529 spin_unlock_wr(&bsd4_spin
);
534 lwkt_send_ipiq(gd
, need_user_resched_remote
,
544 spin_unlock_wr(&bsd4_spin
);
548 * Otherwise the LWP has a lower priority or we were asked not
549 * to reschedule. Look for an idle cpu whos scheduler helper
550 * is ready to accept more work.
552 * Look for an idle cpu starting at our rotator (bsd4_scancpu).
554 * If no cpus are ready to accept work, just return.
558 mask
= ~bsd4_curprocmask
& bsd4_rdyprocmask
& mycpu
->gd_other_cpus
&
561 cpuid
= bsd4_scancpu
;
562 if (++cpuid
== ncpus
)
564 tmpmask
= ~((1 << cpuid
) - 1);
566 cpuid
= bsfl(mask
& tmpmask
);
569 atomic_clear_int(&bsd4_rdyprocmask
, 1 << cpuid
);
570 bsd4_scancpu
= cpuid
;
571 lwkt_schedule(&bsd4_pcpu
[cpuid
].helper_thread
);
578 * This routine is called from a systimer IPI. It MUST be MP-safe and
579 * the BGL IS NOT HELD ON ENTRY. This routine is called at ESTCPUFREQ on
582 * Because this is effectively a 'fast' interrupt, we cannot safely
583 * use spinlocks unless gd_spinlock_rd is NULL and gd_spinlocks_wr is 0,
584 * even if the spinlocks are 'non conflicting'. This is due to the way
585 * spinlock conflicts against cached read locks are handled.
591 bsd4_schedulerclock(struct lwp
*lp
, sysclock_t period
, sysclock_t cpstamp
)
593 globaldata_t gd
= mycpu
;
594 bsd4_pcpu_t dd
= &bsd4_pcpu
[gd
->gd_cpuid
];
597 * Do we need to round-robin? We round-robin 10 times a second.
598 * This should only occur for cpu-bound batch processes.
600 if (++dd
->rrcount
>= usched_bsd4_rrinterval
) {
606 * As the process accumulates cpu time p_estcpu is bumped and may
607 * push the process into another scheduling queue. It typically
608 * takes 4 ticks to bump the queue.
610 lp
->lwp_estcpu
= ESTCPULIM(lp
->lwp_estcpu
+ ESTCPUINCR
);
613 * Reducing p_origcpu over time causes more of our estcpu to be
614 * returned to the parent when we exit. This is a small tweak
615 * for the batch detection heuristic.
621 * We can only safely call bsd4_resetpriority(), which uses spinlocks,
622 * if we aren't interrupting a thread that is using spinlocks.
623 * Otherwise we can deadlock with another cpu waiting for our read
624 * spinlocks to clear.
626 if (gd
->gd_spinlock_rd
== NULL
&& gd
->gd_spinlocks_wr
== 0)
627 bsd4_resetpriority(lp
);
633 * Called from acquire and from kern_synch's one-second timer (one of the
634 * callout helper threads) with a critical section held.
636 * Decay p_estcpu based on the number of ticks we haven't been running
637 * and our p_nice. As the load increases each process observes a larger
638 * number of idle ticks (because other processes are running in them).
639 * This observation leads to a larger correction which tends to make the
640 * system more 'batchy'.
642 * Note that no recalculation occurs for a process which sleeps and wakes
643 * up in the same tick. That is, a system doing thousands of context
644 * switches per second will still only do serious estcpu calculations
645 * ESTCPUFREQ times per second.
651 bsd4_recalculate_estcpu(struct lwp
*lp
)
653 globaldata_t gd
= mycpu
;
661 * We have to subtract periodic to get the last schedclock
662 * timeout time, otherwise we would get the upcoming timeout.
663 * Keep in mind that a process can migrate between cpus and
664 * while the scheduler clock should be very close, boundary
665 * conditions could lead to a small negative delta.
667 cpbase
= gd
->gd_schedclock
.time
- gd
->gd_schedclock
.periodic
;
669 if (lp
->lwp_slptime
> 1) {
671 * Too much time has passed, do a coarse correction.
673 lp
->lwp_estcpu
= lp
->lwp_estcpu
>> 1;
674 bsd4_resetpriority(lp
);
675 lp
->lwp_cpbase
= cpbase
;
677 } else if (lp
->lwp_cpbase
!= cpbase
) {
679 * Adjust estcpu if we are in a different tick. Don't waste
680 * time if we are in the same tick.
682 * First calculate the number of ticks in the measurement
683 * interval. The nticks calculation can wind up 0 due to
684 * a bug in the handling of lwp_slptime (as yet not found),
685 * so make sure we do not get a divide by 0 panic.
687 nticks
= (cpbase
- lp
->lwp_cpbase
) / gd
->gd_schedclock
.periodic
;
690 updatepcpu(lp
, lp
->lwp_cpticks
, nticks
);
692 if ((nleft
= nticks
- lp
->lwp_cpticks
) < 0)
694 if (usched_debug
== lp
->lwp_proc
->p_pid
) {
695 printf("pid %d tid %d estcpu %d cpticks %d nticks %d nleft %d",
696 lp
->lwp_proc
->p_pid
, lp
->lwp_tid
, lp
->lwp_estcpu
,
697 lp
->lwp_cpticks
, nticks
, nleft
);
701 * Calculate a decay value based on ticks remaining scaled
702 * down by the instantanious load and p_nice.
704 if ((loadfac
= bsd4_runqcount
) < 2)
706 ndecay
= nleft
* usched_bsd4_decay
* 2 *
707 (PRIO_MAX
* 2 - lp
->lwp_proc
->p_nice
) / (loadfac
* PRIO_MAX
* 2);
710 * Adjust p_estcpu. Handle a border case where batch jobs
711 * can get stalled long enough to decay to zero when they
714 if (lp
->lwp_estcpu
> ndecay
* 2)
715 lp
->lwp_estcpu
-= ndecay
;
717 lp
->lwp_estcpu
>>= 1;
719 if (usched_debug
== lp
->lwp_proc
->p_pid
)
720 printf(" ndecay %d estcpu %d\n", ndecay
, lp
->lwp_estcpu
);
721 bsd4_resetpriority(lp
);
722 lp
->lwp_cpbase
= cpbase
;
728 * Compute the priority of a process when running in user mode.
729 * Arrange to reschedule if the resulting priority is better
730 * than that of the current process.
732 * This routine may be called with any process.
734 * This routine is called by fork1() for initial setup with the process
735 * of the run queue, and also may be called normally with the process on or
741 bsd4_resetpriority(struct lwp
*lp
)
749 * Calculate the new priority and queue type
752 spin_lock_wr(&bsd4_spin
);
754 newrqtype
= lp
->lwp_rtprio
.type
;
757 case RTP_PRIO_REALTIME
:
758 newpriority
= PRIBASE_REALTIME
+
759 (lp
->lwp_rtprio
.prio
& PRIMASK
);
761 case RTP_PRIO_NORMAL
:
762 newpriority
= (lp
->lwp_proc
->p_nice
- PRIO_MIN
) * PPQ
/ NICEPPQ
;
763 newpriority
+= lp
->lwp_estcpu
* PPQ
/ ESTCPUPPQ
;
764 newpriority
= newpriority
* MAXPRI
/ (PRIO_RANGE
* PPQ
/
765 NICEPPQ
+ ESTCPUMAX
* PPQ
/ ESTCPUPPQ
);
766 newpriority
= PRIBASE_NORMAL
+ (newpriority
& PRIMASK
);
769 newpriority
= PRIBASE_IDLE
+ (lp
->lwp_rtprio
.prio
& PRIMASK
);
771 case RTP_PRIO_THREAD
:
772 newpriority
= PRIBASE_THREAD
+ (lp
->lwp_rtprio
.prio
& PRIMASK
);
775 panic("Bad RTP_PRIO %d", newrqtype
);
780 * The newpriority incorporates the queue type so do a simple masked
781 * check to determine if the process has moved to another queue. If
782 * it has, and it is currently on a run queue, then move it.
784 if ((lp
->lwp_priority
^ newpriority
) & ~PPQMASK
) {
785 lp
->lwp_priority
= newpriority
;
786 if (lp
->lwp_proc
->p_flag
& P_ONRUNQ
) {
787 bsd4_remrunqueue_locked(lp
);
788 lp
->lwp_rqtype
= newrqtype
;
789 lp
->lwp_rqindex
= (newpriority
& PRIMASK
) / PPQ
;
790 bsd4_setrunqueue_locked(lp
);
791 reschedcpu
= lp
->lwp_thread
->td_gd
->gd_cpuid
;
793 lp
->lwp_rqtype
= newrqtype
;
794 lp
->lwp_rqindex
= (newpriority
& PRIMASK
) / PPQ
;
798 lp
->lwp_priority
= newpriority
;
801 spin_unlock_wr(&bsd4_spin
);
804 * Determine if we need to reschedule the target cpu. This only
805 * occurs if the LWP is already on a scheduler queue, which means
806 * that idle cpu notification has already occured. At most we
807 * need only issue a need_user_resched() on the appropriate cpu.
809 if (reschedcpu
>= 0) {
810 dd
= &bsd4_pcpu
[reschedcpu
];
811 KKASSERT(dd
->uschedcp
!= lp
);
812 if ((dd
->upri
& ~PRIMASK
) > (lp
->lwp_priority
& ~PRIMASK
)) {
813 dd
->upri
= lp
->lwp_priority
;
815 if (reschedcpu
== mycpu
->gd_cpuid
) {
818 lwkt_send_ipiq(lp
->lwp_thread
->td_gd
,
819 need_user_resched_remote
, NULL
);
830 * Called from fork1() when a new child process is being created.
832 * Give the child process an initial estcpu that is more batch then
833 * its parent and dock the parent for the fork (but do not
834 * reschedule the parent). This comprises the main part of our batch
835 * detection heuristic for both parallel forking and sequential execs.
837 * Interactive processes will decay the boosted estcpu quickly while batch
838 * processes will tend to compound it.
839 * XXX lwp should be "spawning" instead of "forking"
844 bsd4_forking(struct lwp
*plp
, struct lwp
*lp
)
846 lp
->lwp_estcpu
= ESTCPULIM(plp
->lwp_estcpu
+ ESTCPUPPQ
);
847 lp
->lwp_origcpu
= lp
->lwp_estcpu
;
848 plp
->lwp_estcpu
= ESTCPULIM(plp
->lwp_estcpu
+ ESTCPUPPQ
);
852 * Called when the parent reaps a child. Propogate cpu use by the child
853 * back to the parent.
858 bsd4_exiting(struct lwp
*plp
, struct lwp
*lp
)
862 if (plp
->lwp_proc
->p_pid
!= 1) {
863 delta
= lp
->lwp_estcpu
- lp
->lwp_origcpu
;
865 plp
->lwp_estcpu
= ESTCPULIM(plp
->lwp_estcpu
+ delta
);
871 * chooseproc() is called when a cpu needs a user process to LWKT schedule,
872 * it selects a user process and returns it. If chklp is non-NULL and chklp
873 * has a better or equal priority then the process that would otherwise be
874 * chosen, NULL is returned.
876 * Until we fix the RUNQ code the chklp test has to be strict or we may
877 * bounce between processes trying to acquire the current process designation.
879 * MPSAFE - must be called with bsd4_spin exclusive held. The spinlock is
880 * left intact through the entire routine.
884 chooseproc_locked(struct lwp
*chklp
)
888 u_int32_t
*which
, *which2
;
895 rtqbits
= bsd4_rtqueuebits
;
896 tsqbits
= bsd4_queuebits
;
897 idqbits
= bsd4_idqueuebits
;
898 cpumask
= mycpu
->gd_cpumask
;
905 q
= &bsd4_rtqueues
[pri
];
906 which
= &bsd4_rtqueuebits
;
908 } else if (tsqbits
) {
910 q
= &bsd4_queues
[pri
];
911 which
= &bsd4_queuebits
;
913 } else if (idqbits
) {
915 q
= &bsd4_idqueues
[pri
];
916 which
= &bsd4_idqueuebits
;
922 KASSERT(lp
, ("chooseproc: no lwp on busy queue"));
925 while ((lp
->lwp_cpumask
& cpumask
) == 0) {
926 lp
= TAILQ_NEXT(lp
, lwp_procq
);
928 *which2
&= ~(1 << pri
);
935 * If the passed lwp <chklp> is reasonably close to the selected
936 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
938 * Note that we must error on the side of <chklp> to avoid bouncing
939 * between threads in the acquire code.
942 if (chklp
->lwp_priority
< lp
->lwp_priority
+ PPQ
)
948 * If the chosen lwp does not reside on this cpu spend a few
949 * cycles looking for a better candidate at the same priority level.
950 * This is a fallback check, setrunqueue() tries to wakeup the
951 * correct cpu and is our front-line affinity.
953 if (lp
->lwp_thread
->td_gd
!= mycpu
&&
954 (chklp
= TAILQ_NEXT(lp
, lwp_procq
)) != NULL
956 if (chklp
->lwp_thread
->td_gd
== mycpu
) {
963 TAILQ_REMOVE(q
, lp
, lwp_procq
);
966 *which
&= ~(1 << pri
);
967 KASSERT((lp
->lwp_proc
->p_flag
& P_ONRUNQ
) != 0, ("not on runq6!"));
968 lp
->lwp_proc
->p_flag
&= ~P_ONRUNQ
;
974 * Called via an ipi message to reschedule on another cpu.
980 need_user_resched_remote(void *dummy
)
989 * bsd4_remrunqueue_locked() removes a given process from the run queue
990 * that it is on, clearing the queue busy bit if it becomes empty.
992 * Note that user process scheduler is different from the LWKT schedule.
993 * The user process scheduler only manages user processes but it uses LWKT
994 * underneath, and a user process operating in the kernel will often be
995 * 'released' from our management.
997 * MPSAFE - bsd4_spin must be held exclusively on call
1000 bsd4_remrunqueue_locked(struct lwp
*lp
)
1006 KKASSERT(lp
->lwp_proc
->p_flag
& P_ONRUNQ
);
1007 lp
->lwp_proc
->p_flag
&= ~P_ONRUNQ
;
1009 KKASSERT(bsd4_runqcount
>= 0);
1011 pri
= lp
->lwp_rqindex
;
1012 switch(lp
->lwp_rqtype
) {
1013 case RTP_PRIO_NORMAL
:
1014 q
= &bsd4_queues
[pri
];
1015 which
= &bsd4_queuebits
;
1017 case RTP_PRIO_REALTIME
:
1019 q
= &bsd4_rtqueues
[pri
];
1020 which
= &bsd4_rtqueuebits
;
1023 q
= &bsd4_idqueues
[pri
];
1024 which
= &bsd4_idqueuebits
;
1027 panic("remrunqueue: invalid rtprio type");
1030 TAILQ_REMOVE(q
, lp
, lwp_procq
);
1031 if (TAILQ_EMPTY(q
)) {
1032 KASSERT((*which
& (1 << pri
)) != 0,
1033 ("remrunqueue: remove from empty queue"));
1034 *which
&= ~(1 << pri
);
1039 * bsd4_setrunqueue_locked()
1041 * Add a process whos rqtype and rqindex had previously been calculated
1042 * onto the appropriate run queue. Determine if the addition requires
1043 * a reschedule on a cpu and return the cpuid or -1.
1045 * NOTE: Lower priorities are better priorities.
1047 * MPSAFE - bsd4_spin must be held exclusively on call
1050 bsd4_setrunqueue_locked(struct lwp
*lp
)
1056 KKASSERT((lp
->lwp_proc
->p_flag
& P_ONRUNQ
) == 0);
1057 lp
->lwp_proc
->p_flag
|= P_ONRUNQ
;
1060 pri
= lp
->lwp_rqindex
;
1062 switch(lp
->lwp_rqtype
) {
1063 case RTP_PRIO_NORMAL
:
1064 q
= &bsd4_queues
[pri
];
1065 which
= &bsd4_queuebits
;
1067 case RTP_PRIO_REALTIME
:
1069 q
= &bsd4_rtqueues
[pri
];
1070 which
= &bsd4_rtqueuebits
;
1073 q
= &bsd4_idqueues
[pri
];
1074 which
= &bsd4_idqueuebits
;
1077 panic("remrunqueue: invalid rtprio type");
1082 * Add to the correct queue and set the appropriate bit. If no
1083 * lower priority (i.e. better) processes are in the queue then
1084 * we want a reschedule, calculate the best cpu for the job.
1086 * Always run reschedules on the LWPs original cpu.
1088 TAILQ_INSERT_TAIL(q
, lp
, lwp_procq
);
1095 * For SMP systems a user scheduler helper thread is created for each
1096 * cpu and is used to allow one cpu to wakeup another for the purposes of
1097 * scheduling userland threads from setrunqueue(). UP systems do not
1098 * need the helper since there is only one cpu. We can't use the idle
1099 * thread for this because we need to hold the MP lock. Additionally,
1100 * doing things this way allows us to HLT idle cpus on MP systems.
1105 sched_thread(void *dummy
)
1116 cpuid
= gd
->gd_cpuid
; /* doesn't change */
1117 cpumask
= 1 << cpuid
; /* doesn't change */
1118 dd
= &bsd4_pcpu
[cpuid
];
1121 * The scheduler thread does not need to hold the MP lock. Since we
1122 * are woken up only when no user processes are scheduled on a cpu, we
1123 * can run at an ultra low priority.
1126 lwkt_setpri_self(TDPRI_USER_SCHEDULER
);
1130 * We use the LWKT deschedule-interlock trick to avoid racing
1131 * bsd4_rdyprocmask. This means we cannot block through to the
1132 * manual lwkt_switch() call we make below.
1135 lwkt_deschedule_self(gd
->gd_curthread
);
1136 spin_lock_wr(&bsd4_spin
);
1137 atomic_set_int(&bsd4_rdyprocmask
, cpumask
);
1138 if ((bsd4_curprocmask
& cpumask
) == 0) {
1139 if ((nlp
= chooseproc_locked(NULL
)) != NULL
) {
1140 atomic_set_int(&bsd4_curprocmask
, cpumask
);
1141 dd
->upri
= nlp
->lwp_priority
;
1143 spin_unlock_wr(&bsd4_spin
);
1144 lwkt_acquire(nlp
->lwp_thread
);
1145 lwkt_schedule(nlp
->lwp_thread
);
1147 spin_unlock_wr(&bsd4_spin
);
1151 * Someone scheduled us but raced. In order to not lose
1152 * track of the fact that there may be a LWP ready to go,
1153 * forward the request to another cpu if available.
1155 * Rotate through cpus starting with cpuid + 1. Since cpuid
1156 * is already masked out by gd_other_cpus, just use ~cpumask.
1158 tmpmask
= ~bsd4_curprocmask
& bsd4_rdyprocmask
&
1159 mycpu
->gd_other_cpus
;
1161 if (tmpmask
& ~(cpumask
- 1))
1162 tmpid
= bsfl(tmpmask
& ~(cpumask
- 1));
1164 tmpid
= bsfl(tmpmask
);
1165 bsd4_scancpu
= tmpid
;
1166 atomic_clear_int(&bsd4_rdyprocmask
, 1 << tmpid
);
1167 spin_unlock_wr(&bsd4_spin
);
1168 lwkt_schedule(&bsd4_pcpu
[tmpid
].helper_thread
);
1170 spin_unlock_wr(&bsd4_spin
);
1179 * Setup our scheduler helpers. Note that curprocmask bit 0 has already
1180 * been cleared by rqinit() and we should not mess with it further.
1183 sched_thread_cpu_init(void)
1188 printf("start scheduler helpers on cpus:");
1190 for (i
= 0; i
< ncpus
; ++i
) {
1191 bsd4_pcpu_t dd
= &bsd4_pcpu
[i
];
1192 cpumask_t mask
= 1 << i
;
1194 if ((mask
& smp_active_mask
) == 0)
1200 lwkt_create(sched_thread
, NULL
, NULL
, &dd
->helper_thread
,
1201 TDF_STOPREQ
, i
, "usched %d", i
);
1204 * Allow user scheduling on the target cpu. cpu #0 has already
1205 * been enabled in rqinit().
1208 atomic_clear_int(&bsd4_curprocmask
, mask
);
1209 atomic_set_int(&bsd4_rdyprocmask
, mask
);
1214 SYSINIT(uschedtd
, SI_SUB_FINISH_SMP
, SI_ORDER_ANY
, sched_thread_cpu_init
, NULL
)