2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
39 * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $
40 * $DragonFly: src/sys/kern/kern_synch.c,v 1.41 2005/01/14 02:20:22 dillon Exp $
43 #include "opt_ktrace.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/signalvar.h>
50 #include <sys/resourcevar.h>
51 #include <sys/vmmeter.h>
52 #include <sys/sysctl.h>
53 #include <sys/thread2.h>
56 #include <sys/ktrace.h>
58 #include <sys/xwait.h>
60 #include <machine/cpu.h>
61 #include <machine/ipl.h>
62 #include <machine/smp.h>
64 static void sched_setup (void *dummy
);
65 SYSINIT(sched_setup
, SI_SUB_KICK_SCHEDULER
, SI_ORDER_FIRST
, sched_setup
, NULL
)
69 int sched_quantum
; /* Roundrobin scheduling quantum in ticks. */
71 int ncpus2
, ncpus2_shift
, ncpus2_mask
;
73 static struct callout loadav_callout
;
74 static struct callout roundrobin_callout
;
75 static struct callout schedcpu_callout
;
77 struct loadavg averunnable
=
78 { {0, 0, 0}, FSCALE
}; /* load average, of runnable procs */
80 * Constants for averages over 1, 5, and 15 minutes
81 * when sampling at 5 second intervals.
83 static fixpt_t cexp
[3] = {
84 0.9200444146293232 * FSCALE
, /* exp(-1/12) */
85 0.9834714538216174 * FSCALE
, /* exp(-1/60) */
86 0.9944598480048967 * FSCALE
, /* exp(-1/180) */
89 static void endtsleep (void *);
90 static void loadav (void *arg
);
91 static void roundrobin (void *arg
);
92 static void schedcpu (void *arg
);
93 static void updatepri (struct proc
*p
);
94 static void crit_panicints(void);
97 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS
)
101 new_val
= sched_quantum
* tick
;
102 error
= sysctl_handle_int(oidp
, &new_val
, 0, req
);
103 if (error
!= 0 || req
->newptr
== NULL
)
107 sched_quantum
= new_val
/ tick
;
108 hogticks
= 2 * sched_quantum
;
112 SYSCTL_PROC(_kern
, OID_AUTO
, quantum
, CTLTYPE_INT
|CTLFLAG_RW
,
113 0, sizeof sched_quantum
, sysctl_kern_quantum
, "I", "");
116 roundrobin_interval(void)
118 return (sched_quantum
);
122 * Force switch among equal priority processes every 100ms.
124 * WARNING! The MP lock is not held on ipi message remotes.
129 roundrobin_remote(void *arg
)
131 struct proc
*p
= lwkt_preempted_proc();
132 if (p
== NULL
|| RTP_PRIO_NEED_RR(p
->p_rtprio
.type
))
139 roundrobin(void *arg
)
141 struct proc
*p
= lwkt_preempted_proc();
142 if (p
== NULL
|| RTP_PRIO_NEED_RR(p
->p_rtprio
.type
))
145 lwkt_send_ipiq_mask(mycpu
->gd_other_cpus
, roundrobin_remote
, NULL
);
147 callout_reset(&roundrobin_callout
, sched_quantum
, roundrobin
, NULL
);
153 resched_cpus(u_int32_t mask
)
155 lwkt_send_ipiq_mask(mask
, roundrobin_remote
, NULL
);
161 * The load average is scaled by FSCALE (2048 typ). The estimated cpu is
162 * incremented at a rate of ESTCPUVFREQ per second (40hz typ), but this is
163 * divided up across all cpu bound processes running in the system so an
164 * individual process will get less under load. ESTCPULIM typicaly caps
165 * out at ESTCPUMAX (around 376, or 11 nice levels).
167 * Generally speaking the decay equation needs to break-even on growth
168 * at the limit at all load levels >= 1.0, so if the estimated cpu for
169 * a process increases by (ESTVCPUFREQ / load) per second, then the decay
170 * should reach this value when estcpu reaches ESTCPUMAX. That calculation
173 * ESTCPUMAX * decay = ESTCPUVFREQ / load
174 * decay = ESTCPUVFREQ / (load * ESTCPUMAX)
175 * decay = estcpu * 0.053 / load
177 * If the load is less then 1.0 we assume a load of 1.0.
180 #define cload(loadav) ((loadav) < FSCALE ? FSCALE : (loadav))
181 #define decay_cpu(loadav,estcpu) \
182 ((estcpu) * (FSCALE * ESTCPUVFREQ / ESTCPUMAX) / cload(loadav))
184 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
185 static fixpt_t ccpu
= 0.95122942450071400909 * FSCALE
; /* exp(-1/20) */
186 SYSCTL_INT(_kern
, OID_AUTO
, ccpu
, CTLFLAG_RD
, &ccpu
, 0, "");
188 /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
189 static int fscale __unused
= FSCALE
;
190 SYSCTL_INT(_kern
, OID_AUTO
, fscale
, CTLFLAG_RD
, 0, FSCALE
, "");
193 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
194 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
195 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
197 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
198 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
200 * If you don't want to bother with the faster/more-accurate formula, you
201 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
202 * (more general) method of calculating the %age of CPU used by a process.
204 #define CCPU_SHIFT 11
207 * Recompute process priorities, once a second.
213 fixpt_t loadfac
= averunnable
.ldavg
[0];
218 FOREACH_PROC_IN_SYSTEM(p
) {
220 * Increment time in/out of memory and sleep time
221 * (if sleeping). We ignore overflow; with 16-bit int's
222 * (remember them?) overflow takes 45 days.
225 if (p
->p_stat
== SSLEEP
|| p
->p_stat
== SSTOP
)
227 p
->p_pctcpu
= (p
->p_pctcpu
* ccpu
) >> FSHIFT
;
230 * If the process has slept the entire second,
231 * stop recalculating its priority until it wakes up.
233 * Note that interactive calculations do not occur for
234 * long sleeps (because that isn't necessarily indicative
235 * of an interactive process).
237 if (p
->p_slptime
> 1)
239 /* prevent state changes and protect run queue */
242 * p_cpticks runs at ESTCPUFREQ but must be divided by the
243 * load average for par-100% use. Higher p_interactive
244 * values mean less interactive, lower values mean more
247 if ((((fixpt_t
)p
->p_cpticks
* cload(loadfac
)) >> FSHIFT
) >
249 if (p
->p_interactive
< 127)
252 if (p
->p_interactive
> -127)
256 * p_pctcpu is only for ps.
258 #if (FSHIFT >= CCPU_SHIFT)
259 p
->p_pctcpu
+= (ESTCPUFREQ
== 100)?
260 ((fixpt_t
) p
->p_cpticks
) << (FSHIFT
- CCPU_SHIFT
):
261 100 * (((fixpt_t
) p
->p_cpticks
)
262 << (FSHIFT
- CCPU_SHIFT
)) / ESTCPUFREQ
;
264 p
->p_pctcpu
+= ((FSCALE
- ccpu
) *
265 (p
->p_cpticks
* FSCALE
/ ESTCPUFREQ
)) >> FSHIFT
;
268 ndecay
= decay_cpu(loadfac
, p
->p_estcpu
);
269 if (p
->p_estcpu
> ndecay
)
270 p
->p_estcpu
-= ndecay
;
276 wakeup((caddr_t
)&lbolt
);
277 callout_reset(&schedcpu_callout
, hz
, schedcpu
, NULL
);
281 * Recalculate the priority of a process after it has slept for a while.
282 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
283 * least six times the loadfactor will decay p_estcpu to zero.
286 updatepri(struct proc
*p
)
290 ndecay
= decay_cpu(averunnable
.ldavg
[0], p
->p_estcpu
) * p
->p_slptime
;
291 if (p
->p_estcpu
> ndecay
)
292 p
->p_estcpu
-= ndecay
;
299 * We're only looking at 7 bits of the address; everything is
300 * aligned to 4, lots of things are aligned to greater powers
301 * of 2. Shift right by 8, i.e. drop the bottom 256 worth.
303 #define TABLESIZE 128
304 static TAILQ_HEAD(slpquehead
, thread
) slpque
[TABLESIZE
];
305 #define LOOKUP(x) (((intptr_t)(x) >> 8) & (TABLESIZE - 1))
308 * During autoconfiguration or after a panic, a sleep will simply
309 * lower the priority briefly to allow interrupts, then return.
310 * The priority to be used (safepri) is machine-dependent, thus this
311 * value is initialized and maintained in the machine-dependent layers.
312 * This priority will typically be 0, or the lowest priority
313 * that is safe for use on the interrupt stack; it can be made
314 * higher to block network software interrupts after panics.
323 sched_quantum
= hz
/10;
324 hogticks
= 2 * sched_quantum
;
325 for (i
= 0; i
< TABLESIZE
; i
++)
326 TAILQ_INIT(&slpque
[i
]);
330 * General sleep call. Suspends the current process until a wakeup is
331 * performed on the specified identifier. The process will then be made
332 * runnable with the specified priority. Sleeps at most timo/hz seconds
333 * (0 means no timeout). If flags includes PCATCH flag, signals are checked
334 * before and after sleeping, else signals are not checked. Returns 0 if
335 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
336 * signal needs to be delivered, ERESTART is returned if the current system
337 * call should be restarted if possible, and EINTR is returned if the system
338 * call should be interrupted by the signal (return EINTR).
340 * Note that if we are a process, we release_curproc() before messing with
341 * the LWKT scheduler.
344 tsleep(void *ident
, int flags
, const char *wmesg
, int timo
)
346 struct thread
*td
= curthread
;
347 struct proc
*p
= td
->td_proc
; /* may be NULL */
348 int sig
= 0, catch = flags
& PCATCH
;
349 int id
= LOOKUP(ident
);
350 struct callout thandle
;
353 * NOTE: removed KTRPOINT, it could cause races due to blocking
354 * even in stable. Just scrap it for now.
356 if (cold
|| panicstr
) {
358 * After a panic, or during autoconfiguration,
359 * just give interrupts a chance, then just return;
360 * don't run any other procs or panic below,
361 * in case this is the idle process and already asleep.
366 KKASSERT(td
!= &mycpu
->gd_idlethread
); /* you must be kidding! */
367 crit_enter_quick(td
);
368 KASSERT(ident
!= NULL
, ("tsleep: no ident"));
369 KASSERT(p
== NULL
|| p
->p_stat
== SRUN
, ("tsleep %p %s %d",
370 ident
, wmesg
, p
->p_stat
));
372 td
->td_wchan
= ident
;
373 td
->td_wmesg
= wmesg
;
374 td
->td_wdomain
= flags
& PDOMAIN_MASK
;
376 if (flags
& PNORESCHED
)
377 td
->td_flags
|= TDF_NORESCHED
;
381 lwkt_deschedule_self(td
);
382 TAILQ_INSERT_TAIL(&slpque
[id
], td
, td_threadq
);
384 callout_init(&thandle
);
385 callout_reset(&thandle
, timo
, endtsleep
, td
);
388 * We put ourselves on the sleep queue and start our timeout
389 * before calling CURSIG, as we could stop there, and a wakeup
390 * or a SIGCONT (or both) could occur while we were stopped.
391 * A SIGCONT would cause us to be marked as SSLEEP
392 * without resuming us, thus we must be ready for sleep
393 * when CURSIG is called. If the wakeup happens while we're
394 * stopped, td->td_wchan will be 0 upon return from CURSIG.
398 p
->p_flag
|= P_SINTR
;
399 if ((sig
= CURSIG(p
))) {
402 lwkt_schedule_self(td
);
407 if (td
->td_wchan
== NULL
) {
416 * If we are not the current process we have to remove ourself
417 * from the run queue.
419 KASSERT(p
->p_stat
== SRUN
, ("PSTAT NOT SRUN %d %d", p
->p_pid
, p
->p_stat
));
421 * If this is the current 'user' process schedule another one.
423 clrrunnable(p
, SSLEEP
);
424 p
->p_stats
->p_ru
.ru_nvcsw
++;
426 KASSERT(p
->p_stat
== SRUN
, ("tsleep: stat not srun"));
432 p
->p_flag
&= ~P_SINTR
;
434 td
->td_flags
&= ~TDF_NORESCHED
;
435 if (td
->td_flags
& TDF_TIMEOUT
) {
436 td
->td_flags
&= ~TDF_TIMEOUT
;
438 return (EWOULDBLOCK
);
440 callout_stop(&thandle
);
441 } else if (td
->td_wmesg
) {
443 * This can happen if a thread is woken up directly. Clear
444 * wmesg to avoid debugging confusion.
448 /* inline of iscaught() */
450 if (catch && (sig
!= 0 || (sig
= CURSIG(p
)))) {
451 if (SIGISMEMBER(p
->p_sigacts
->ps_sigintr
, sig
))
460 * Implement the timeout for tsleep. We interlock against
461 * wchan when setting TDF_TIMEOUT. For processes we remove
462 * the sleep if the process is stopped rather then sleeping,
463 * so it remains stopped.
473 td
->td_flags
|= TDF_TIMEOUT
;
474 if ((p
= td
->td_proc
) != NULL
) {
475 if (p
->p_stat
== SSLEEP
)
488 * Remove a process from its wait queue
491 unsleep(struct thread
*td
)
496 if (p
->p_flag
& P_XSLEEP
) {
497 struct xwait
*w
= p
->p_wchan
;
498 TAILQ_REMOVE(&w
->waitq
, p
, p_procq
);
499 p
->p_flag
&= ~P_XSLEEP
;
502 TAILQ_REMOVE(&slpque
[LOOKUP(td
->td_wchan
)], td
, td_threadq
);
510 * Make all processes sleeping on the explicit lock structure runnable.
513 xwakeup(struct xwait
*w
)
519 while ((p
= TAILQ_FIRST(&w
->waitq
)) != NULL
) {
520 TAILQ_REMOVE(&w
->waitq
, p
, p_procq
);
521 KASSERT(p
->p_wchan
== w
&& (p
->p_flag
& P_XSLEEP
),
522 ("xwakeup: wchan mismatch for %p (%p/%p) %08x", p
, p
->p_wchan
, w
, p
->p_flag
& P_XSLEEP
));
524 p
->p_flag
&= ~P_XSLEEP
;
525 if (p
->p_stat
== SSLEEP
) {
526 /* OPTIMIZED EXPANSION OF setrunnable(p); */
527 if (p
->p_slptime
> 1)
531 if (p
->p_flag
& P_INMEM
) {
534 p
->p_flag
|= P_SWAPINREQ
;
535 wakeup((caddr_t
)&proc0
);
544 * Make all processes sleeping on the specified identifier runnable.
547 _wakeup(void *ident
, int domain
, int count
)
549 struct slpquehead
*qp
;
553 int id
= LOOKUP(ident
);
558 for (td
= TAILQ_FIRST(qp
); td
!= NULL
; td
= ntd
) {
559 ntd
= TAILQ_NEXT(td
, td_threadq
);
560 if (td
->td_wchan
== ident
&& td
->td_wdomain
== domain
) {
561 TAILQ_REMOVE(qp
, td
, td_threadq
);
563 if ((p
= td
->td_proc
) != NULL
&& p
->p_stat
== SSLEEP
) {
564 /* OPTIMIZED EXPANSION OF setrunnable(p); */
565 if (p
->p_slptime
> 1)
569 if (p
->p_flag
& P_INMEM
) {
571 * LWKT scheduled now, there is no
572 * userland runq interaction until
573 * the thread tries to return to user
580 p
->p_flag
|= P_SWAPINREQ
;
581 wakeup((caddr_t
)&proc0
);
583 /* END INLINE EXPANSION */
584 } else if (p
== NULL
) {
598 _wakeup(ident
, 0, 0);
602 wakeup_one(void *ident
)
604 _wakeup(ident
, 0, 1);
608 wakeup_domain(void *ident
, int domain
)
610 _wakeup(ident
, domain
, 0);
614 wakeup_domain_one(void *ident
, int domain
)
616 _wakeup(ident
, domain
, 1);
620 * The machine independent parts of mi_switch().
622 * 'p' must be the current process.
625 mi_switch(struct proc
*p
)
627 thread_t td
= p
->p_thread
;
631 KKASSERT(td
== mycpu
->gd_curthread
);
633 crit_enter_quick(td
);
636 * Check if the process exceeds its cpu resource allocation.
637 * If over max, kill it. Time spent in interrupts is not
638 * included. YYY 64 bit match is expensive. Ick.
640 ttime
= td
->td_sticks
+ td
->td_uticks
;
641 if (p
->p_stat
!= SZOMB
&& p
->p_limit
->p_cpulimit
!= RLIM_INFINITY
&&
642 ttime
> p
->p_limit
->p_cpulimit
) {
643 rlim
= &p
->p_rlimit
[RLIMIT_CPU
];
644 if (ttime
/ (rlim_t
)1000000 >= rlim
->rlim_max
) {
645 killproc(p
, "exceeded maximum CPU limit");
648 if (rlim
->rlim_cur
< rlim
->rlim_max
) {
649 /* XXX: we should make a private copy */
656 * If we are in a SSTOPped state we deschedule ourselves.
657 * YYY this needs to be cleaned up, remember that LWKTs stay on
658 * their run queue which works differently then the user scheduler
659 * which removes the process from the runq when it runs it.
661 mycpu
->gd_cnt
.v_swtch
++;
662 if (p
->p_stat
== SSTOP
)
663 lwkt_deschedule_self(td
);
669 * Change process state to be runnable,
670 * placing it on the run queue if it is in memory,
671 * and awakening the swapper if it isn't in memory.
674 setrunnable(struct proc
*p
)
684 panic("setrunnable");
687 unsleep(p
->p_thread
); /* e.g. when sending signals */
696 * The process is controlled by LWKT at this point, we do not mess
697 * around with the userland scheduler until the thread tries to
698 * return to user mode.
701 if (p
->p_flag
& P_INMEM
)
704 if (p
->p_flag
& P_INMEM
)
705 lwkt_schedule(p
->p_thread
);
707 if (p
->p_slptime
> 1)
710 if ((p
->p_flag
& P_INMEM
) == 0) {
711 p
->p_flag
|= P_SWAPINREQ
;
712 wakeup((caddr_t
)&proc0
);
717 * Change the process state to NOT be runnable, removing it from the run
721 clrrunnable(struct proc
*p
, int stat
)
723 crit_enter_quick(p
->p_thread
);
724 if (p
->p_stat
== SRUN
&& (p
->p_flag
& P_ONRUNQ
))
727 crit_exit_quick(p
->p_thread
);
731 * Compute the priority of a process when running in user mode.
732 * Arrange to reschedule if the resulting priority is better
733 * than that of the current process.
736 resetpriority(struct proc
*p
)
744 * Set p_priority for general process comparisons
746 switch(p
->p_rtprio
.type
) {
747 case RTP_PRIO_REALTIME
:
748 p
->p_priority
= PRIBASE_REALTIME
+ p
->p_rtprio
.prio
;
750 case RTP_PRIO_NORMAL
:
753 p
->p_priority
= PRIBASE_IDLE
+ p
->p_rtprio
.prio
;
755 case RTP_PRIO_THREAD
:
756 p
->p_priority
= PRIBASE_THREAD
+ p
->p_rtprio
.prio
;
761 * NORMAL priorities fall through. These are based on niceness
762 * and cpu use. Lower numbers == higher priorities.
764 newpriority
= (int)(NICE_ADJUST(p
->p_nice
- PRIO_MIN
) +
765 p
->p_estcpu
/ ESTCPURAMP
);
768 * p_interactive is -128 to +127 and represents very long term
769 * interactivity or batch (whereas estcpu is a much faster variable).
770 * Interactivity can modify the priority by up to 8 units either way.
771 * (8 units == approximately 4 nice levels).
773 interactive
= p
->p_interactive
/ 10;
774 newpriority
+= interactive
;
776 newpriority
= MIN(newpriority
, MAXPRI
);
777 newpriority
= MAX(newpriority
, 0);
778 npq
= newpriority
/ PPQ
;
780 opq
= (p
->p_priority
& PRIMASK
) / PPQ
;
781 if (p
->p_stat
== SRUN
&& (p
->p_flag
& P_ONRUNQ
) && opq
!= npq
) {
783 * We have to move the process to another queue
786 p
->p_priority
= PRIBASE_NORMAL
+ newpriority
;
790 * We can just adjust the priority and it will be picked
793 KKASSERT(opq
== npq
|| (p
->p_flag
& P_ONRUNQ
) == 0);
794 p
->p_priority
= PRIBASE_NORMAL
+ newpriority
;
800 * Compute a tenex style load average of a quantity on
801 * 1, 5 and 15 minute intervals.
813 FOREACH_PROC_IN_SYSTEM(p
) {
816 if ((td
= p
->p_thread
) == NULL
)
818 if (td
->td_flags
& TDF_BLOCKED
)
828 for (i
= 0; i
< 3; i
++)
829 avg
->ldavg
[i
] = (cexp
[i
] * avg
->ldavg
[i
] +
830 nrun
* FSCALE
* (FSCALE
- cexp
[i
])) >> FSHIFT
;
833 * Schedule the next update to occur after 5 seconds, but add a
834 * random variation to avoid synchronisation with processes that
835 * run at regular intervals.
837 callout_reset(&loadav_callout
, hz
* 4 + (int)(random() % (hz
* 2 + 1)),
843 sched_setup(void *dummy
)
845 callout_init(&loadav_callout
);
846 callout_init(&roundrobin_callout
);
847 callout_init(&schedcpu_callout
);
849 /* Kick off timeout driven events by calling first time. */
856 * We adjust the priority of the current process. The priority of
857 * a process gets worse as it accumulates CPU time. The cpu usage
858 * estimator (p_estcpu) is increased here. resetpriority() will
859 * compute a different priority each time p_estcpu increases by
860 * INVERSE_ESTCPU_WEIGHT * (until MAXPRI is reached).
862 * The cpu usage estimator ramps up quite quickly when the process is
863 * running (linearly), and decays away exponentially, at a rate which
864 * is proportionally slower when the system is busy. The basic principle
865 * is that the system will 90% forget that the process used a lot of CPU
866 * time in 5 * loadav seconds. This causes the system to favor processes
867 * which haven't run much recently, and to round-robin among other processes.
869 * The actual schedulerclock interrupt rate is ESTCPUFREQ, but we generally
870 * want to ramp-up at a faster rate, ESTCPUVFREQ, so p_estcpu is scaled
871 * by (ESTCPUVFREQ / ESTCPUFREQ). You can control the ramp-up/ramp-down
872 * rate by adjusting ESTCPUVFREQ in sys/proc.h in integer multiples
875 * WARNING! called from a fast-int or an IPI, the MP lock MIGHT NOT BE HELD
876 * and we cannot block.
879 schedulerclock(void *dummy
)
885 if ((p
= td
->td_proc
) != NULL
) {
886 p
->p_cpticks
++; /* cpticks runs at ESTCPUFREQ */
887 p
->p_estcpu
= ESTCPULIM(p
->p_estcpu
+ ESTCPUVFREQ
/ ESTCPUFREQ
);
903 cpri
= crit_panic_save();
905 crit_panic_restore(cpri
);