2 * Copyright (c) 2012 The DragonFly Project. All rights reserved.
3 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
6 * by Matthew Dillon <dillon@backplane.com>,
7 * by Mihai Carabas <mihai.carabas@gmail.com>
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
36 #include <sys/queue.h>
38 #include <sys/rtprio.h>
40 #include <sys/sysctl.h>
41 #include <sys/resourcevar.h>
42 #include <sys/spinlock.h>
43 #include <sys/cpu_topology.h>
44 #include <sys/thread2.h>
45 #include <sys/spinlock2.h>
46 #include <sys/mplock2.h>
50 #include <machine/cpu.h>
51 #include <machine/smp.h>
54 * Priorities. Note that with 32 run queues per scheduler each queue
55 * represents four priority levels.
59 #define PRIMASK (MAXPRI - 1)
60 #define PRIBASE_REALTIME 0
61 #define PRIBASE_NORMAL MAXPRI
62 #define PRIBASE_IDLE (MAXPRI * 2)
63 #define PRIBASE_THREAD (MAXPRI * 3)
64 #define PRIBASE_NULL (MAXPRI * 4)
66 #define NQS 32 /* 32 run queues. */
67 #define PPQ (MAXPRI / NQS) /* priorities per queue */
68 #define PPQMASK (PPQ - 1)
71 * NICEPPQ - number of nice units per priority queue
73 * ESTCPUPPQ - number of estcpu units per priority queue
74 * ESTCPUMAX - number of estcpu units
78 #define ESTCPUMAX (ESTCPUPPQ * NQS)
79 #define BATCHMAX (ESTCPUFREQ * 30)
80 #define PRIO_RANGE (PRIO_MAX - PRIO_MIN + 1)
82 #define ESTCPULIM(v) min((v), ESTCPUMAX)
86 #define lwp_priority lwp_usdata.bsd4.priority
87 #define lwp_rqindex lwp_usdata.bsd4.rqindex
88 #define lwp_estcpu lwp_usdata.bsd4.estcpu
89 #define lwp_batch lwp_usdata.bsd4.batch
90 #define lwp_rqtype lwp_usdata.bsd4.rqtype
92 static void bsd4_acquire_curproc(struct lwp
*lp
);
93 static void bsd4_release_curproc(struct lwp
*lp
);
94 static void bsd4_select_curproc(globaldata_t gd
);
95 static void bsd4_setrunqueue(struct lwp
*lp
);
96 static void bsd4_schedulerclock(struct lwp
*lp
, sysclock_t period
,
98 static void bsd4_recalculate_estcpu(struct lwp
*lp
);
99 static void bsd4_resetpriority(struct lwp
*lp
);
100 static void bsd4_forking(struct lwp
*plp
, struct lwp
*lp
);
101 static void bsd4_exiting(struct lwp
*lp
, struct proc
*);
102 static void bsd4_uload_update(struct lwp
*lp
);
103 static void bsd4_yield(struct lwp
*lp
);
104 static void bsd4_need_user_resched_remote(void *dummy
);
105 static int bsd4_batchy_looser_pri_test(struct lwp
* lp
);
106 static struct lwp
*bsd4_chooseproc_locked_cache_coherent(struct lwp
*chklp
);
107 static void bsd4_kick_helper(struct lwp
*lp
);
108 static struct lwp
*bsd4_chooseproc_locked(struct lwp
*chklp
);
109 static void bsd4_remrunqueue_locked(struct lwp
*lp
);
110 static void bsd4_setrunqueue_locked(struct lwp
*lp
);
111 static void bsd4_changedcpu(struct lwp
*lp
);
113 struct usched usched_bsd4
= {
115 "bsd4", "Original DragonFly Scheduler",
116 NULL
, /* default registration */
117 NULL
, /* default deregistration */
118 bsd4_acquire_curproc
,
119 bsd4_release_curproc
,
122 bsd4_recalculate_estcpu
,
127 NULL
, /* setcpumask not supported */
132 struct usched_bsd4_pcpu
{
133 struct thread
*helper_thread
;
136 struct lwp
*uschedcp
;
137 struct lwp
*old_uschedcp
;
141 typedef struct usched_bsd4_pcpu
*bsd4_pcpu_t
;
144 * We have NQS (32) run queues per scheduling class. For the normal
145 * class, there are 128 priorities scaled onto these 32 queues. New
146 * processes are added to the last entry in each queue, and processes
147 * are selected for running by taking them from the head and maintaining
148 * a simple FIFO arrangement. Realtime and Idle priority processes have
149 * and explicit 0-31 priority which maps directly onto their class queue
150 * index. When a queue has something in it, the corresponding bit is
151 * set in the queuebits variable, allowing a single read to determine
152 * the state of all 32 queues and then a ffs() to find the first busy
155 static struct rq bsd4_queues
[NQS
];
156 static struct rq bsd4_rtqueues
[NQS
];
157 static struct rq bsd4_idqueues
[NQS
];
158 static u_int32_t bsd4_queuebits
;
159 static u_int32_t bsd4_rtqueuebits
;
160 static u_int32_t bsd4_idqueuebits
;
161 /* currently running a user process */
162 static cpumask_t bsd4_curprocmask
= CPUMASK_INITIALIZER_ALLONES
;
163 /* ready to accept a user process */
164 static cpumask_t bsd4_rdyprocmask
;
165 static int bsd4_runqcount
;
166 static volatile int bsd4_scancpu
;
167 static struct spinlock bsd4_spin
;
168 static struct usched_bsd4_pcpu bsd4_pcpu
[MAXCPU
];
169 static struct sysctl_ctx_list usched_bsd4_sysctl_ctx
;
170 static struct sysctl_oid
*usched_bsd4_sysctl_tree
;
172 /* Debug info exposed through debug.* sysctl */
174 SYSCTL_INT(_debug
, OID_AUTO
, bsd4_runqcount
, CTLFLAG_RD
,
176 "Number of run queues");
178 static int usched_bsd4_debug
= -1;
179 SYSCTL_INT(_debug
, OID_AUTO
, bsd4_scdebug
, CTLFLAG_RW
,
180 &usched_bsd4_debug
, 0,
181 "Print debug information for this pid");
183 static int usched_bsd4_pid_debug
= -1;
184 SYSCTL_INT(_debug
, OID_AUTO
, bsd4_pid_debug
, CTLFLAG_RW
,
185 &usched_bsd4_pid_debug
, 0,
186 "Print KTR debug information for this pid");
188 /* Tunning usched_bsd4 - configurable through kern.usched_bsd4.* */
189 static int usched_bsd4_smt
= 0;
190 static int usched_bsd4_cache_coherent
= 0;
191 static int usched_bsd4_upri_affinity
= 16; /* 32 queues - half-way */
192 static int usched_bsd4_queue_checks
= 5;
193 static int usched_bsd4_stick_to_level
= 0;
194 static long usched_bsd4_kicks
;
195 static int usched_bsd4_rrinterval
= (ESTCPUFREQ
+ 9) / 10;
196 static int usched_bsd4_decay
= 8;
197 static int usched_bsd4_batch_time
= 10;
199 /* KTR debug printings */
201 KTR_INFO_MASTER_EXTERN(usched
);
203 #if !defined(KTR_USCHED_BSD4)
204 #define KTR_USCHED_BSD4 KTR_ALL
207 KTR_INFO(KTR_USCHED_BSD4
, usched
, bsd4_acquire_curproc_urw
, 0,
208 "USCHED_BSD4(bsd4_acquire_curproc in user_reseched_wanted "
209 "after release: pid %d, cpuid %d, curr_cpuid %d)",
210 pid_t pid
, int cpuid
, int curr
);
211 KTR_INFO(KTR_USCHED_BSD4
, usched
, bsd4_acquire_curproc_before_loop
, 0,
212 "USCHED_BSD4(bsd4_acquire_curproc before loop: pid %d, cpuid %d, "
214 pid_t pid
, int cpuid
, int curr
);
215 KTR_INFO(KTR_USCHED_BSD4
, usched
, bsd4_acquire_curproc_not
, 0,
216 "USCHED_BSD4(bsd4_acquire_curproc couldn't acquire after "
217 "bsd4_setrunqueue: pid %d, cpuid %d, curr_lp pid %d, curr_cpuid %d)",
218 pid_t pid
, int cpuid
, pid_t curr_pid
, int curr_cpuid
);
219 KTR_INFO(KTR_USCHED_BSD4
, usched
, bsd4_acquire_curproc_switch
, 0,
220 "USCHED_BSD4(bsd4_acquire_curproc after lwkt_switch: pid %d, "
221 "cpuid %d, curr_cpuid %d)",
222 pid_t pid
, int cpuid
, int curr
);
224 KTR_INFO(KTR_USCHED_BSD4
, usched
, bsd4_release_curproc
, 0,
225 "USCHED_BSD4(bsd4_release_curproc before select: pid %d, "
226 "cpuid %d, curr_cpuid %d)",
227 pid_t pid
, int cpuid
, int curr
);
229 KTR_INFO(KTR_USCHED_BSD4
, usched
, bsd4_select_curproc
, 0,
230 "USCHED_BSD4(bsd4_release_curproc before select: pid %d, "
231 "cpuid %d, old_pid %d, old_cpuid %d, curr_cpuid %d)",
232 pid_t pid
, int cpuid
, pid_t old_pid
, int old_cpuid
, int curr
);
234 KTR_INFO(KTR_USCHED_BSD4
, usched
, batchy_test_false
, 0,
235 "USCHED_BSD4(batchy_looser_pri_test false: pid %d, "
236 "cpuid %d, verify_mask %lu)",
237 pid_t pid
, int cpuid
, unsigned long mask
);
238 KTR_INFO(KTR_USCHED_BSD4
, usched
, batchy_test_true
, 0,
239 "USCHED_BSD4(batchy_looser_pri_test true: pid %d, "
240 "cpuid %d, verify_mask %lu)",
241 pid_t pid
, int cpuid
, unsigned long mask
);
243 KTR_INFO(KTR_USCHED_BSD4
, usched
, bsd4_setrunqueue_fc_smt
, 0,
244 "USCHED_BSD4(bsd4_setrunqueue free cpus smt: pid %d, cpuid %d, "
245 "mask %lu, curr_cpuid %d)",
246 pid_t pid
, int cpuid
, unsigned long mask
, int curr
);
247 KTR_INFO(KTR_USCHED_BSD4
, usched
, bsd4_setrunqueue_fc_non_smt
, 0,
248 "USCHED_BSD4(bsd4_setrunqueue free cpus check non_smt: pid %d, "
249 "cpuid %d, mask %lu, curr_cpuid %d)",
250 pid_t pid
, int cpuid
, unsigned long mask
, int curr
);
251 KTR_INFO(KTR_USCHED_BSD4
, usched
, bsd4_setrunqueue_rc
, 0,
252 "USCHED_BSD4(bsd4_setrunqueue running cpus check: pid %d, "
253 "cpuid %d, mask %lu, curr_cpuid %d)",
254 pid_t pid
, int cpuid
, unsigned long mask
, int curr
);
255 KTR_INFO(KTR_USCHED_BSD4
, usched
, bsd4_setrunqueue_found
, 0,
256 "USCHED_BSD4(bsd4_setrunqueue found cpu: pid %d, cpuid %d, "
257 "mask %lu, found_cpuid %d, curr_cpuid %d)",
258 pid_t pid
, int cpuid
, unsigned long mask
, int found_cpuid
, int curr
);
259 KTR_INFO(KTR_USCHED_BSD4
, usched
, bsd4_setrunqueue_not_found
, 0,
260 "USCHED_BSD4(bsd4_setrunqueue not found cpu: pid %d, cpuid %d, "
261 "try_cpuid %d, curr_cpuid %d)",
262 pid_t pid
, int cpuid
, int try_cpuid
, int curr
);
263 KTR_INFO(KTR_USCHED_BSD4
, usched
, bsd4_setrunqueue_found_best_cpuid
, 0,
264 "USCHED_BSD4(bsd4_setrunqueue found cpu: pid %d, cpuid %d, "
265 "mask %lu, found_cpuid %d, curr_cpuid %d)",
266 pid_t pid
, int cpuid
, unsigned long mask
, int found_cpuid
, int curr
);
268 KTR_INFO(KTR_USCHED_BSD4
, usched
, bsd4_chooseproc
, 0,
269 "USCHED_BSD4(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
270 pid_t pid
, int old_cpuid
, int curr
);
271 KTR_INFO(KTR_USCHED_BSD4
, usched
, chooseproc_cc
, 0,
272 "USCHED_BSD4(chooseproc_cc: pid %d, old_cpuid %d, curr_cpuid %d)",
273 pid_t pid
, int old_cpuid
, int curr
);
274 KTR_INFO(KTR_USCHED_BSD4
, usched
, chooseproc_cc_not_good
, 0,
275 "USCHED_BSD4(chooseproc_cc not good: pid %d, old_cpumask %lu, "
276 "sibling_mask %lu, curr_cpumask %lu)",
277 pid_t pid
, unsigned long old_cpumask
, unsigned long sibling_mask
, unsigned long curr
);
278 KTR_INFO(KTR_USCHED_BSD4
, usched
, chooseproc_cc_elected
, 0,
279 "USCHED_BSD4(chooseproc_cc elected: pid %d, old_cpumask %lu, "
280 "sibling_mask %lu, curr_cpumask: %lu)",
281 pid_t pid
, unsigned long old_cpumask
, unsigned long sibling_mask
, unsigned long curr
);
283 KTR_INFO(KTR_USCHED_BSD4
, usched
, sched_thread_no_process
, 0,
284 "USCHED_BSD4(sched_thread %d no process scheduled: pid %d, old_cpuid %d)",
285 int id
, pid_t pid
, int cpuid
);
286 KTR_INFO(KTR_USCHED_BSD4
, usched
, sched_thread_process
, 0,
287 "USCHED_BSD4(sched_thread %d process scheduled: pid %d, old_cpuid %d)",
288 int id
, pid_t pid
, int cpuid
);
289 KTR_INFO(KTR_USCHED_BSD4
, usched
, sched_thread_no_process_found
, 0,
290 "USCHED_BSD4(sched_thread %d no process found; tmpmask %lu)",
291 int id
, unsigned long tmpmask
);
294 * Initialize the run queues at boot time.
297 bsd4_rqinit(void *dummy
)
301 spin_init(&bsd4_spin
, "bsd4rq");
302 for (i
= 0; i
< NQS
; i
++) {
303 TAILQ_INIT(&bsd4_queues
[i
]);
304 TAILQ_INIT(&bsd4_rtqueues
[i
]);
305 TAILQ_INIT(&bsd4_idqueues
[i
]);
307 ATOMIC_CPUMASK_NANDBIT(bsd4_curprocmask
, 0);
309 SYSINIT(runqueue
, SI_BOOT2_USCHED
, SI_ORDER_FIRST
, bsd4_rqinit
, NULL
);
312 * BSD4_ACQUIRE_CURPROC
314 * This function is called when the kernel intends to return to userland.
315 * It is responsible for making the thread the current designated userland
316 * thread for this cpu, blocking if necessary.
318 * The kernel will not depress our LWKT priority until after we return,
319 * in case we have to shove over to another cpu.
321 * We must determine our thread's disposition before we switch away. This
322 * is very sensitive code.
324 * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
325 * TO ANOTHER CPU! Because most of the kernel assumes that no migration will
326 * occur, this function is called only under very controlled circumstances.
331 bsd4_acquire_curproc(struct lwp
*lp
)
341 * Make sure we aren't sitting on a tsleep queue.
344 crit_enter_quick(td
);
345 if (td
->td_flags
& TDF_TSLEEPQ
)
347 bsd4_recalculate_estcpu(lp
);
350 * If a reschedule was requested give another thread the
353 if (user_resched_wanted()) {
354 clear_user_resched();
355 bsd4_release_curproc(lp
);
357 KTR_COND_LOG(usched_bsd4_acquire_curproc_urw
,
358 lp
->lwp_proc
->p_pid
== usched_bsd4_pid_debug
,
360 lp
->lwp_thread
->td_gd
->gd_cpuid
,
365 * Loop until we are the current user thread
368 dd
= &bsd4_pcpu
[gd
->gd_cpuid
];
370 KTR_COND_LOG(usched_bsd4_acquire_curproc_before_loop
,
371 lp
->lwp_proc
->p_pid
== usched_bsd4_pid_debug
,
373 lp
->lwp_thread
->td_gd
->gd_cpuid
,
378 * Process any pending events and higher priority threads.
383 * Become the currently scheduled user thread for this cpu
384 * if we can do so trivially.
386 * We can steal another thread's current thread designation
387 * on this cpu since if we are running that other thread
388 * must not be, so we can safely deschedule it.
390 if (dd
->uschedcp
== lp
) {
392 * We are already the current lwp (hot path).
394 dd
->upri
= lp
->lwp_priority
;
395 } else if (dd
->uschedcp
== NULL
) {
397 * We can trivially become the current lwp.
399 ATOMIC_CPUMASK_ORBIT(bsd4_curprocmask
, gd
->gd_cpuid
);
401 dd
->upri
= lp
->lwp_priority
;
402 } else if (dd
->upri
> lp
->lwp_priority
) {
404 * We can steal the current cpu's lwp designation
405 * away simply by replacing it. The other thread
406 * will stall when it tries to return to userland.
409 dd
->upri
= lp
->lwp_priority
;
411 lwkt_deschedule(olp->lwp_thread);
412 bsd4_setrunqueue(olp);
416 * We cannot become the current lwp, place the lp
417 * on the bsd4 run-queue and deschedule ourselves.
419 * When we are reactivated we will have another
422 lwkt_deschedule(lp
->lwp_thread
);
424 bsd4_setrunqueue(lp
);
426 KTR_COND_LOG(usched_bsd4_acquire_curproc_not
,
427 lp
->lwp_proc
->p_pid
== usched_bsd4_pid_debug
,
429 lp
->lwp_thread
->td_gd
->gd_cpuid
,
430 dd
->uschedcp
->lwp_proc
->p_pid
,
437 * Reload after a switch or setrunqueue/switch possibly
438 * moved us to another cpu.
441 dd
= &bsd4_pcpu
[gd
->gd_cpuid
];
443 KTR_COND_LOG(usched_bsd4_acquire_curproc_switch
,
444 lp
->lwp_proc
->p_pid
== usched_bsd4_pid_debug
,
446 lp
->lwp_thread
->td_gd
->gd_cpuid
,
449 } while (dd
->uschedcp
!= lp
);
452 KKASSERT((lp
->lwp_mpflags
& LWP_MP_ONRUNQ
) == 0);
456 * BSD4_RELEASE_CURPROC
458 * This routine detaches the current thread from the userland scheduler,
459 * usually because the thread needs to run or block in the kernel (at
460 * kernel priority) for a while.
462 * This routine is also responsible for selecting a new thread to
463 * make the current thread.
465 * NOTE: This implementation differs from the dummy example in that
466 * bsd4_select_curproc() is able to select the current process, whereas
467 * dummy_select_curproc() is not able to select the current process.
468 * This means we have to NULL out uschedcp.
470 * Additionally, note that we may already be on a run queue if releasing
471 * via the lwkt_switch() in bsd4_setrunqueue().
477 bsd4_release_curproc(struct lwp
*lp
)
479 globaldata_t gd
= mycpu
;
480 bsd4_pcpu_t dd
= &bsd4_pcpu
[gd
->gd_cpuid
];
482 if (dd
->uschedcp
== lp
) {
484 KKASSERT((lp
->lwp_mpflags
& LWP_MP_ONRUNQ
) == 0);
486 KTR_COND_LOG(usched_bsd4_release_curproc
,
487 lp
->lwp_proc
->p_pid
== usched_bsd4_pid_debug
,
489 lp
->lwp_thread
->td_gd
->gd_cpuid
,
492 dd
->uschedcp
= NULL
; /* don't let lp be selected */
493 dd
->upri
= PRIBASE_NULL
;
494 ATOMIC_CPUMASK_NANDBIT(bsd4_curprocmask
, gd
->gd_cpuid
);
495 dd
->old_uschedcp
= lp
; /* used only for KTR debug prints */
496 bsd4_select_curproc(gd
);
502 * BSD4_SELECT_CURPROC
504 * Select a new current process for this cpu and clear any pending user
505 * reschedule request. The cpu currently has no current process.
507 * This routine is also responsible for equal-priority round-robining,
508 * typically triggered from bsd4_schedulerclock(). In our dummy example
509 * all the 'user' threads are LWKT scheduled all at once and we just
510 * call lwkt_switch().
512 * The calling process is not on the queue and cannot be selected.
518 bsd4_select_curproc(globaldata_t gd
)
520 bsd4_pcpu_t dd
= &bsd4_pcpu
[gd
->gd_cpuid
];
522 int cpuid
= gd
->gd_cpuid
;
526 spin_lock(&bsd4_spin
);
527 if(usched_bsd4_cache_coherent
)
528 nlp
= bsd4_chooseproc_locked_cache_coherent(dd
->uschedcp
);
530 nlp
= bsd4_chooseproc_locked(dd
->uschedcp
);
534 KTR_COND_LOG(usched_bsd4_select_curproc
,
535 nlp
->lwp_proc
->p_pid
== usched_bsd4_pid_debug
,
536 nlp
->lwp_proc
->p_pid
,
537 nlp
->lwp_thread
->td_gd
->gd_cpuid
,
538 dd
->old_uschedcp
->lwp_proc
->p_pid
,
539 dd
->old_uschedcp
->lwp_thread
->td_gd
->gd_cpuid
,
542 ATOMIC_CPUMASK_ORBIT(bsd4_curprocmask
, cpuid
);
543 dd
->upri
= nlp
->lwp_priority
;
545 dd
->rrcount
= 0; /* reset round robin */
546 spin_unlock(&bsd4_spin
);
547 lwkt_acquire(nlp
->lwp_thread
);
548 lwkt_schedule(nlp
->lwp_thread
);
550 spin_unlock(&bsd4_spin
);
554 } else if (bsd4_runqcount
&& CPUMASK_TESTBIT(bsd4_rdyprocmask
, cpuid
)) {
555 ATOMIC_CPUMASK_NANDBIT(bsd4_rdyprocmask
, cpuid
);
556 spin_unlock(&bsd4_spin
);
557 lwkt_schedule(dd
->helper_thread
);
559 spin_unlock(&bsd4_spin
);
566 * batchy_looser_pri_test() - determine if a process is batchy or not
567 * relative to the other processes running in the system
570 bsd4_batchy_looser_pri_test(struct lwp
* lp
)
573 bsd4_pcpu_t other_dd
;
576 /* Current running processes */
577 mask
= bsd4_curprocmask
;
578 CPUMASK_ANDMASK(mask
, smp_active_mask
);
579 CPUMASK_ANDMASK(mask
, usched_global_cpumask
);
581 while (CPUMASK_TESTNZERO(mask
)) {
582 cpu
= BSFCPUMASK(mask
);
583 other_dd
= &bsd4_pcpu
[cpu
];
584 if (other_dd
->upri
- lp
->lwp_priority
> usched_bsd4_upri_affinity
* PPQ
) {
586 KTR_COND_LOG(usched_batchy_test_false
,
587 lp
->lwp_proc
->p_pid
== usched_bsd4_pid_debug
,
589 lp
->lwp_thread
->td_gd
->gd_cpuid
,
590 (unsigned long)CPUMASK_LOWMASK(mask
));
594 CPUMASK_NANDBIT(mask
, cpu
);
597 KTR_COND_LOG(usched_batchy_test_true
,
598 lp
->lwp_proc
->p_pid
== usched_bsd4_pid_debug
,
600 lp
->lwp_thread
->td_gd
->gd_cpuid
,
601 (unsigned long)CPUMASK_LOWMASK(mask
));
610 * Place the specified lwp on the user scheduler's run queue. This routine
611 * must be called with the thread descheduled. The lwp must be runnable.
613 * The thread may be the current thread as a special case.
618 bsd4_setrunqueue(struct lwp
*lp
)
627 * First validate the process state relative to the current cpu.
628 * We don't need the spinlock for this, just a critical section.
629 * We are in control of the process.
632 KASSERT(lp
->lwp_stat
== LSRUN
, ("setrunqueue: lwp not LSRUN"));
633 KASSERT((lp
->lwp_mpflags
& LWP_MP_ONRUNQ
) == 0,
634 ("lwp %d/%d already on runq! flag %08x/%08x", lp
->lwp_proc
->p_pid
,
635 lp
->lwp_tid
, lp
->lwp_proc
->p_flags
, lp
->lwp_flags
));
636 KKASSERT((lp
->lwp_thread
->td_flags
& TDF_RUNQ
) == 0);
639 * Note: gd and dd are relative to the target thread's last cpu,
640 * NOT our current cpu.
642 gd
= lp
->lwp_thread
->td_gd
;
643 dd
= &bsd4_pcpu
[gd
->gd_cpuid
];
646 * This process is not supposed to be scheduled anywhere or assigned
647 * as the current process anywhere. Assert the condition.
649 KKASSERT(dd
->uschedcp
!= lp
);
652 * XXX fixme. Could be part of a remrunqueue/setrunqueue
653 * operation when the priority is recalculated, so TDF_MIGRATING
654 * may already be set.
656 if ((lp
->lwp_thread
->td_flags
& TDF_MIGRATING
) == 0)
657 lwkt_giveaway(lp
->lwp_thread
);
660 * We lose control of lp the moment we release the spinlock after
661 * having placed lp on the queue. i.e. another cpu could pick it
662 * up and it could exit, or its priority could be further adjusted,
663 * or something like that.
665 spin_lock(&bsd4_spin
);
666 bsd4_setrunqueue_locked(lp
);
667 lp
->lwp_rebal_ticks
= sched_ticks
;
670 * Kick the scheduler helper on one of the other cpu's
671 * and request a reschedule if appropriate.
673 * NOTE: We check all cpus whos rdyprocmask is set. First we
674 * look for cpus without designated lps, then we look for
675 * cpus with designated lps with a worse priority than our
680 if (usched_bsd4_smt
) {
683 * SMT heuristic - Try to schedule on a free physical core.
684 * If no physical core found than choose the one that has
685 * an interactive thread.
689 int min_prio
= MAXPRI
* MAXPRI
;
692 cpuid
= (bsd4_scancpu
& 0xFFFF) % ncpus
;
693 mask
= bsd4_rdyprocmask
;
694 CPUMASK_NANDMASK(mask
, bsd4_curprocmask
);
695 CPUMASK_ANDMASK(mask
, lp
->lwp_cpumask
);
696 CPUMASK_ANDMASK(mask
, smp_active_mask
);
697 CPUMASK_ANDMASK(mask
, usched_global_cpumask
);
699 KTR_COND_LOG(usched_bsd4_setrunqueue_fc_smt
,
700 lp
->lwp_proc
->p_pid
== usched_bsd4_pid_debug
,
702 lp
->lwp_thread
->td_gd
->gd_cpuid
,
703 (unsigned long)CPUMASK_LOWMASK(mask
),
706 while (CPUMASK_TESTNZERO(mask
)) {
707 CPUMASK_ASSNBMASK(tmpmask
, cpuid
);
708 if (CPUMASK_TESTMASK(tmpmask
, mask
)) {
709 CPUMASK_ANDMASK(tmpmask
, mask
);
710 cpuid
= BSFCPUMASK(tmpmask
);
712 cpuid
= BSFCPUMASK(mask
);
714 gd
= globaldata_find(cpuid
);
715 dd
= &bsd4_pcpu
[cpuid
];
717 if ((dd
->upri
& ~PPQMASK
) >= (lp
->lwp_priority
& ~PPQMASK
)) {
718 tmpmask
= dd
->cpunode
->parent_node
->members
;
719 CPUMASK_NANDMASK(tmpmask
, dd
->cpunode
->members
);
720 CPUMASK_ANDMASK(tmpmask
, mask
);
721 if (CPUMASK_TESTNZERO(tmpmask
)) {
722 KTR_COND_LOG(usched_bsd4_setrunqueue_found
,
723 lp
->lwp_proc
->p_pid
== usched_bsd4_pid_debug
,
725 lp
->lwp_thread
->td_gd
->gd_cpuid
,
726 (unsigned long)CPUMASK_LOWMASK(mask
),
733 dd
->cpunode
->parent_node
->members
;
734 CPUMASK_NANDMASK(tmpmask
,
735 dd
->cpunode
->members
);
736 sibling
= BSFCPUMASK(tmpmask
);
738 bsd4_pcpu
[sibling
].upri
) {
740 bsd4_pcpu
[sibling
].upri
;
745 CPUMASK_NANDBIT(mask
, cpuid
);
748 if (best_cpuid
!= -1) {
750 gd
= globaldata_find(cpuid
);
751 dd
= &bsd4_pcpu
[cpuid
];
753 KTR_COND_LOG(usched_bsd4_setrunqueue_found_best_cpuid
,
754 lp
->lwp_proc
->p_pid
== usched_bsd4_pid_debug
,
756 lp
->lwp_thread
->td_gd
->gd_cpuid
,
757 (unsigned long)CPUMASK_LOWMASK(mask
),
764 /* Fallback to the original heuristic */
765 cpuid
= (bsd4_scancpu
& 0xFFFF) % ncpus
;
766 mask
= bsd4_rdyprocmask
;
767 CPUMASK_NANDMASK(mask
, bsd4_curprocmask
);
768 CPUMASK_ANDMASK(mask
, lp
->lwp_cpumask
);
769 CPUMASK_ANDMASK(mask
, smp_active_mask
);
770 CPUMASK_ANDMASK(mask
, usched_global_cpumask
);
772 KTR_COND_LOG(usched_bsd4_setrunqueue_fc_non_smt
,
773 lp
->lwp_proc
->p_pid
== usched_bsd4_pid_debug
,
775 lp
->lwp_thread
->td_gd
->gd_cpuid
,
776 (unsigned long)CPUMASK_LOWMASK(mask
),
779 while (CPUMASK_TESTNZERO(mask
)) {
780 CPUMASK_ASSNBMASK(tmpmask
, cpuid
);
781 if (CPUMASK_TESTMASK(tmpmask
, mask
)) {
782 CPUMASK_ANDMASK(tmpmask
, mask
);
783 cpuid
= BSFCPUMASK(tmpmask
);
785 cpuid
= BSFCPUMASK(mask
);
787 gd
= globaldata_find(cpuid
);
788 dd
= &bsd4_pcpu
[cpuid
];
790 if ((dd
->upri
& ~PPQMASK
) >=
791 (lp
->lwp_priority
& ~PPQMASK
)) {
792 KTR_COND_LOG(usched_bsd4_setrunqueue_found
,
793 lp
->lwp_proc
->p_pid
== usched_bsd4_pid_debug
,
795 lp
->lwp_thread
->td_gd
->gd_cpuid
,
796 (unsigned long)CPUMASK_LOWMASK(mask
),
802 CPUMASK_NANDBIT(mask
, cpuid
);
807 * Then cpus which might have a currently running lp
809 mask
= bsd4_curprocmask
;
810 CPUMASK_ANDMASK(mask
, bsd4_rdyprocmask
);
811 CPUMASK_ANDMASK(mask
, lp
->lwp_cpumask
);
812 CPUMASK_ANDMASK(mask
, smp_active_mask
);
813 CPUMASK_ANDMASK(mask
, usched_global_cpumask
);
815 KTR_COND_LOG(usched_bsd4_setrunqueue_rc
,
816 lp
->lwp_proc
->p_pid
== usched_bsd4_pid_debug
,
818 lp
->lwp_thread
->td_gd
->gd_cpuid
,
819 (unsigned long)CPUMASK_LOWMASK(mask
),
822 while (CPUMASK_TESTNZERO(mask
)) {
823 CPUMASK_ASSNBMASK(tmpmask
, cpuid
);
824 if (CPUMASK_TESTMASK(tmpmask
, mask
)) {
825 CPUMASK_ANDMASK(tmpmask
, mask
);
826 cpuid
= BSFCPUMASK(tmpmask
);
828 cpuid
= BSFCPUMASK(mask
);
830 gd
= globaldata_find(cpuid
);
831 dd
= &bsd4_pcpu
[cpuid
];
833 if ((dd
->upri
& ~PPQMASK
) > (lp
->lwp_priority
& ~PPQMASK
)) {
834 KTR_COND_LOG(usched_bsd4_setrunqueue_found
,
835 lp
->lwp_proc
->p_pid
== usched_bsd4_pid_debug
,
837 lp
->lwp_thread
->td_gd
->gd_cpuid
,
838 (unsigned long)CPUMASK_LOWMASK(mask
),
844 CPUMASK_NANDBIT(mask
, cpuid
);
848 * If we cannot find a suitable cpu we reload from bsd4_scancpu
849 * and round-robin. Other cpus will pickup as they release their
850 * current lwps or become ready.
852 * Avoid a degenerate system lockup case if usched_global_cpumask
853 * is set to 0 or otherwise does not cover lwp_cpumask.
855 * We only kick the target helper thread in this case, we do not
856 * set the user resched flag because
858 cpuid
= (bsd4_scancpu
& 0xFFFF) % ncpus
;
859 if (CPUMASK_TESTBIT(usched_global_cpumask
, cpuid
) == 0)
861 gd
= globaldata_find(cpuid
);
862 dd
= &bsd4_pcpu
[cpuid
];
864 KTR_COND_LOG(usched_bsd4_setrunqueue_not_found
,
865 lp
->lwp_proc
->p_pid
== usched_bsd4_pid_debug
,
867 lp
->lwp_thread
->td_gd
->gd_cpuid
,
873 spin_unlock(&bsd4_spin
);
874 if ((dd
->upri
& ~PPQMASK
) > (lp
->lwp_priority
& ~PPQMASK
)) {
875 if (dd
->uschedcp
== NULL
) {
876 wakeup_mycpu(dd
->helper_thread
);
882 ATOMIC_CPUMASK_NANDBIT(bsd4_rdyprocmask
, cpuid
);
883 spin_unlock(&bsd4_spin
);
884 if ((dd
->upri
& ~PPQMASK
) > (lp
->lwp_priority
& ~PPQMASK
))
885 lwkt_send_ipiq(gd
, bsd4_need_user_resched_remote
, NULL
);
887 wakeup(dd
->helper_thread
);
893 * This routine is called from a systimer IPI. It MUST be MP-safe and
894 * the BGL IS NOT HELD ON ENTRY. This routine is called at ESTCPUFREQ on
897 * This routine is called on every sched tick. If the currently running
898 * thread belongs to this scheduler it will be called with a non-NULL lp,
899 * otherwise it will be called with a NULL lp.
905 bsd4_schedulerclock(struct lwp
*lp
, sysclock_t period
, sysclock_t cpstamp
)
907 globaldata_t gd
= mycpu
;
908 bsd4_pcpu_t dd
= &bsd4_pcpu
[gd
->gd_cpuid
];
911 * No impl if no lp running.
917 * Do we need to round-robin? We round-robin 10 times a second.
918 * This should only occur for cpu-bound batch processes.
920 if (++dd
->rrcount
>= usched_bsd4_rrinterval
) {
926 * Adjust estcpu upward using a real time equivalent calculation.
928 lp
->lwp_estcpu
= ESTCPULIM(lp
->lwp_estcpu
+ ESTCPUMAX
/ ESTCPUFREQ
+ 1);
931 * Spinlocks also hold a critical section so there should not be
934 KKASSERT(gd
->gd_spinlocks
== 0);
936 bsd4_resetpriority(lp
);
940 * Called from acquire and from kern_synch's one-second timer (one of the
941 * callout helper threads) with a critical section held.
943 * Decay p_estcpu based on the number of ticks we haven't been running
944 * and our p_nice. As the load increases each process observes a larger
945 * number of idle ticks (because other processes are running in them).
946 * This observation leads to a larger correction which tends to make the
947 * system more 'batchy'.
949 * Note that no recalculation occurs for a process which sleeps and wakes
950 * up in the same tick. That is, a system doing thousands of context
951 * switches per second will still only do serious estcpu calculations
952 * ESTCPUFREQ times per second.
958 bsd4_recalculate_estcpu(struct lwp
*lp
)
960 globaldata_t gd
= mycpu
;
967 * We have to subtract periodic to get the last schedclock
968 * timeout time, otherwise we would get the upcoming timeout.
969 * Keep in mind that a process can migrate between cpus and
970 * while the scheduler clock should be very close, boundary
971 * conditions could lead to a small negative delta.
973 cpbase
= gd
->gd_schedclock
.time
- gd
->gd_schedclock
.periodic
;
975 if (lp
->lwp_slptime
> 1) {
977 * Too much time has passed, do a coarse correction.
979 lp
->lwp_estcpu
= lp
->lwp_estcpu
>> 1;
980 bsd4_resetpriority(lp
);
981 lp
->lwp_cpbase
= cpbase
;
983 lp
->lwp_batch
-= ESTCPUFREQ
;
984 if (lp
->lwp_batch
< 0)
986 } else if (lp
->lwp_cpbase
!= cpbase
) {
988 * Adjust estcpu if we are in a different tick. Don't waste
989 * time if we are in the same tick.
991 * First calculate the number of ticks in the measurement
992 * interval. The ttlticks calculation can wind up 0 due to
993 * a bug in the handling of lwp_slptime (as yet not found),
994 * so make sure we do not get a divide by 0 panic.
996 ttlticks
= (cpbase
- lp
->lwp_cpbase
) /
997 gd
->gd_schedclock
.periodic
;
998 if ((ssysclock_t
)ttlticks
< 0) {
1000 lp
->lwp_cpbase
= cpbase
;
1004 updatepcpu(lp
, lp
->lwp_cpticks
, ttlticks
);
1007 * Calculate the percentage of one cpu used factoring in ncpus
1008 * and the load and adjust estcpu. Handle degenerate cases
1009 * by adding 1 to bsd4_runqcount.
1011 * estcpu is scaled by ESTCPUMAX.
1013 * bsd4_runqcount is the excess number of user processes
1014 * that cannot be immediately scheduled to cpus. We want
1015 * to count these as running to avoid range compression
1016 * in the base calculation (which is the actual percentage
1019 estcpu
= (lp
->lwp_cpticks
* ESTCPUMAX
) *
1020 (bsd4_runqcount
+ ncpus
) / (ncpus
* ttlticks
);
1023 * If estcpu is > 50% we become more batch-like
1024 * If estcpu is <= 50% we become less batch-like
1026 * It takes 30 cpu seconds to traverse the entire range.
1028 if (estcpu
> ESTCPUMAX
/ 2) {
1029 lp
->lwp_batch
+= ttlticks
;
1030 if (lp
->lwp_batch
> BATCHMAX
)
1031 lp
->lwp_batch
= BATCHMAX
;
1033 lp
->lwp_batch
-= ttlticks
;
1034 if (lp
->lwp_batch
< 0)
1038 if (usched_bsd4_debug
== lp
->lwp_proc
->p_pid
) {
1039 kprintf("pid %d lwp %p estcpu %3d %3d bat %d cp %d/%d",
1040 lp
->lwp_proc
->p_pid
, lp
,
1041 estcpu
, lp
->lwp_estcpu
,
1043 lp
->lwp_cpticks
, ttlticks
);
1047 * Adjust lp->lwp_esetcpu. The decay factor determines how
1048 * quickly lwp_estcpu collapses to its realtime calculation.
1049 * A slower collapse gives us a more accurate number but
1050 * can cause a cpu hog to eat too much cpu before the
1051 * scheduler decides to downgrade it.
1053 * NOTE: p_nice is accounted for in bsd4_resetpriority(),
1054 * and not here, but we must still ensure that a
1055 * cpu-bound nice -20 process does not completely
1056 * override a cpu-bound nice +20 process.
1058 * NOTE: We must use ESTCPULIM() here to deal with any
1061 decay_factor
= usched_bsd4_decay
;
1062 if (decay_factor
< 1)
1064 if (decay_factor
> 1024)
1065 decay_factor
= 1024;
1067 lp
->lwp_estcpu
= ESTCPULIM(
1068 (lp
->lwp_estcpu
* decay_factor
+ estcpu
) /
1069 (decay_factor
+ 1));
1071 if (usched_bsd4_debug
== lp
->lwp_proc
->p_pid
)
1072 kprintf(" finalestcpu %d\n", lp
->lwp_estcpu
);
1073 bsd4_resetpriority(lp
);
1074 lp
->lwp_cpbase
+= ttlticks
* gd
->gd_schedclock
.periodic
;
1075 lp
->lwp_cpticks
= 0;
1080 * Compute the priority of a process when running in user mode.
1081 * Arrange to reschedule if the resulting priority is better
1082 * than that of the current process.
1084 * This routine may be called with any process.
1086 * This routine is called by fork1() for initial setup with the process
1087 * of the run queue, and also may be called normally with the process on or
1088 * off the run queue.
1093 bsd4_resetpriority(struct lwp
*lp
)
1103 * Calculate the new priority and queue type
1106 spin_lock(&bsd4_spin
);
1108 newrqtype
= lp
->lwp_rtprio
.type
;
1111 case RTP_PRIO_REALTIME
:
1113 newpriority
= PRIBASE_REALTIME
+
1114 (lp
->lwp_rtprio
.prio
& PRIMASK
);
1116 case RTP_PRIO_NORMAL
:
1118 * Detune estcpu based on batchiness. lwp_batch ranges
1119 * from 0 to BATCHMAX. Limit estcpu for the sake of
1120 * the priority calculation to between 50% and 100%.
1122 estcpu
= lp
->lwp_estcpu
* (lp
->lwp_batch
+ BATCHMAX
) /
1126 * p_nice piece Adds (0-40) * 2 0-80
1127 * estcpu Adds 16384 * 4 / 512 0-128
1129 newpriority
= (lp
->lwp_proc
->p_nice
- PRIO_MIN
) * PPQ
/ NICEPPQ
;
1130 newpriority
+= estcpu
* PPQ
/ ESTCPUPPQ
;
1131 newpriority
= newpriority
* MAXPRI
/ (PRIO_RANGE
* PPQ
/
1132 NICEPPQ
+ ESTCPUMAX
* PPQ
/ ESTCPUPPQ
);
1133 newpriority
= PRIBASE_NORMAL
+ (newpriority
& PRIMASK
);
1136 newpriority
= PRIBASE_IDLE
+ (lp
->lwp_rtprio
.prio
& PRIMASK
);
1138 case RTP_PRIO_THREAD
:
1139 newpriority
= PRIBASE_THREAD
+ (lp
->lwp_rtprio
.prio
& PRIMASK
);
1142 panic("Bad RTP_PRIO %d", newrqtype
);
1147 * The newpriority incorporates the queue type so do a simple masked
1148 * check to determine if the process has moved to another queue. If
1149 * it has, and it is currently on a run queue, then move it.
1151 * td_upri has normal sense (higher values are more desireable), so
1154 lp
->lwp_thread
->td_upri
= -(newpriority
& ~PPQMASK
);
1155 if ((lp
->lwp_priority
^ newpriority
) & ~PPQMASK
) {
1156 lp
->lwp_priority
= newpriority
;
1157 if (lp
->lwp_mpflags
& LWP_MP_ONRUNQ
) {
1158 bsd4_remrunqueue_locked(lp
);
1159 lp
->lwp_rqtype
= newrqtype
;
1160 lp
->lwp_rqindex
= (newpriority
& PRIMASK
) / PPQ
;
1161 bsd4_setrunqueue_locked(lp
);
1164 lp
->lwp_rqtype
= newrqtype
;
1165 lp
->lwp_rqindex
= (newpriority
& PRIMASK
) / PPQ
;
1168 reschedcpu
= lp
->lwp_thread
->td_gd
->gd_cpuid
;
1170 lp
->lwp_priority
= newpriority
;
1176 * Determine if we need to reschedule the target cpu. This only
1177 * occurs if the LWP is already on a scheduler queue, which means
1178 * that idle cpu notification has already occured. At most we
1179 * need only issue a need_user_resched() on the appropriate cpu.
1181 * The LWP may be owned by a CPU different from the current one,
1182 * in which case dd->uschedcp may be modified without an MP lock
1183 * or a spinlock held. The worst that happens is that the code
1184 * below causes a spurious need_user_resched() on the target CPU
1185 * and dd->pri to be wrong for a short period of time, both of
1186 * which are harmless.
1188 * If checkpri is 0 we are adjusting the priority of the current
1189 * process, possibly higher (less desireable), so ignore the upri
1190 * check which will fail in that case.
1192 if (reschedcpu
>= 0) {
1193 dd
= &bsd4_pcpu
[reschedcpu
];
1194 if (CPUMASK_TESTBIT(bsd4_rdyprocmask
, reschedcpu
) &&
1196 (dd
->upri
& ~PRIMASK
) > (lp
->lwp_priority
& ~PRIMASK
))) {
1197 if (reschedcpu
== mycpu
->gd_cpuid
) {
1198 spin_unlock(&bsd4_spin
);
1199 need_user_resched();
1201 spin_unlock(&bsd4_spin
);
1202 ATOMIC_CPUMASK_NANDBIT(bsd4_rdyprocmask
,
1204 lwkt_send_ipiq(lp
->lwp_thread
->td_gd
,
1205 bsd4_need_user_resched_remote
,
1209 spin_unlock(&bsd4_spin
);
1212 spin_unlock(&bsd4_spin
);
1222 bsd4_yield(struct lwp
*lp
)
1225 /* FUTURE (or something similar) */
1226 switch(lp
->lwp_rqtype
) {
1227 case RTP_PRIO_NORMAL
:
1228 lp
->lwp_estcpu
= ESTCPULIM(lp
->lwp_estcpu
+ ESTCPUINCR
);
1234 need_user_resched();
1239 bsd4_changedcpu(struct lwp
*lp __unused
)
1244 * Called from fork1() when a new child process is being created.
1246 * Give the child process an initial estcpu that is more batch then
1247 * its parent and dock the parent for the fork (but do not
1248 * reschedule the parent). This comprises the main part of our batch
1249 * detection heuristic for both parallel forking and sequential execs.
1251 * XXX lwp should be "spawning" instead of "forking"
1256 bsd4_forking(struct lwp
*plp
, struct lwp
*lp
)
1259 * Put the child 4 queue slots (out of 32) higher than the parent
1260 * (less desireable than the parent).
1262 lp
->lwp_estcpu
= ESTCPULIM(plp
->lwp_estcpu
+ ESTCPUPPQ
* 4);
1265 * The batch status of children always starts out centerline
1266 * and will inch-up or inch-down as appropriate. It takes roughly
1267 * ~15 seconds of >50% cpu to hit the limit.
1269 lp
->lwp_batch
= BATCHMAX
/ 2;
1272 * Dock the parent a cost for the fork, protecting us from fork
1273 * bombs. If the parent is forking quickly make the child more
1276 plp
->lwp_estcpu
= ESTCPULIM(plp
->lwp_estcpu
+ ESTCPUPPQ
/ 16);
1280 * Called when a lwp is being removed from this scheduler, typically
1281 * during lwp_exit().
1284 bsd4_exiting(struct lwp
*lp
, struct proc
*child_proc
)
1289 bsd4_uload_update(struct lwp
*lp
)
1294 * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1295 * it selects a user process and returns it. If chklp is non-NULL and chklp
1296 * has a better or equal priority then the process that would otherwise be
1297 * chosen, NULL is returned.
1299 * Until we fix the RUNQ code the chklp test has to be strict or we may
1300 * bounce between processes trying to acquire the current process designation.
1302 * MPSAFE - must be called with bsd4_spin exclusive held. The spinlock is
1303 * left intact through the entire routine.
1307 bsd4_chooseproc_locked(struct lwp
*chklp
)
1311 u_int32_t
*which
, *which2
;
1318 rtqbits
= bsd4_rtqueuebits
;
1319 tsqbits
= bsd4_queuebits
;
1320 idqbits
= bsd4_idqueuebits
;
1321 cpumask
= mycpu
->gd_cpumask
;
1326 pri
= bsfl(rtqbits
);
1327 q
= &bsd4_rtqueues
[pri
];
1328 which
= &bsd4_rtqueuebits
;
1330 } else if (tsqbits
) {
1331 pri
= bsfl(tsqbits
);
1332 q
= &bsd4_queues
[pri
];
1333 which
= &bsd4_queuebits
;
1335 } else if (idqbits
) {
1336 pri
= bsfl(idqbits
);
1337 q
= &bsd4_idqueues
[pri
];
1338 which
= &bsd4_idqueuebits
;
1343 lp
= TAILQ_FIRST(q
);
1344 KASSERT(lp
, ("chooseproc: no lwp on busy queue"));
1346 while (CPUMASK_TESTMASK(lp
->lwp_cpumask
, cpumask
) == 0) {
1347 lp
= TAILQ_NEXT(lp
, lwp_procq
);
1349 *which2
&= ~(1 << pri
);
1355 * If the passed lwp <chklp> is reasonably close to the selected
1356 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1358 * Note that we must error on the side of <chklp> to avoid bouncing
1359 * between threads in the acquire code.
1362 if (chklp
->lwp_priority
< lp
->lwp_priority
+ PPQ
)
1367 * If the chosen lwp does not reside on this cpu spend a few
1368 * cycles looking for a better candidate at the same priority level.
1369 * This is a fallback check, setrunqueue() tries to wakeup the
1370 * correct cpu and is our front-line affinity.
1372 if (lp
->lwp_thread
->td_gd
!= mycpu
&&
1373 (chklp
= TAILQ_NEXT(lp
, lwp_procq
)) != NULL
1375 if (chklp
->lwp_thread
->td_gd
== mycpu
) {
1380 KTR_COND_LOG(usched_bsd4_chooseproc
,
1381 lp
->lwp_proc
->p_pid
== usched_bsd4_pid_debug
,
1382 lp
->lwp_proc
->p_pid
,
1383 lp
->lwp_thread
->td_gd
->gd_cpuid
,
1386 TAILQ_REMOVE(q
, lp
, lwp_procq
);
1389 *which
&= ~(1 << pri
);
1390 KASSERT((lp
->lwp_mpflags
& LWP_MP_ONRUNQ
) != 0, ("not on runq6!"));
1391 atomic_clear_int(&lp
->lwp_mpflags
, LWP_MP_ONRUNQ
);
1397 * chooseproc() - with a cache coherence heuristic. Try to pull a process that
1398 * has its home on the current CPU> If the process doesn't have its home here
1399 * and is a batchy one (see batcy_looser_pri_test), we can wait for a
1400 * sched_tick, may be its home will become free and pull it in. Anyway,
1401 * we can't wait more than one tick. If that tick expired, we pull in that
1402 * process, no matter what.
1406 bsd4_chooseproc_locked_cache_coherent(struct lwp
*chklp
)
1410 u_int32_t
*which
, *which2
;
1418 struct lwp
* min_level_lwp
= NULL
;
1419 struct rq
*min_q
= NULL
;
1421 cpu_node_t
* cpunode
= NULL
;
1422 u_int32_t min_level
= MAXCPU
; /* number of levels < MAXCPU */
1423 u_int32_t
*min_which
= NULL
;
1424 u_int32_t min_pri
= 0;
1425 u_int32_t level
= 0;
1427 rtqbits
= bsd4_rtqueuebits
;
1428 tsqbits
= bsd4_queuebits
;
1429 idqbits
= bsd4_idqueuebits
;
1430 cpumask
= mycpu
->gd_cpumask
;
1432 /* Get the mask coresponding to the sysctl configured level */
1433 cpunode
= bsd4_pcpu
[mycpu
->gd_cpuid
].cpunode
;
1434 level
= usched_bsd4_stick_to_level
;
1436 cpunode
= cpunode
->parent_node
;
1439 /* The cpus which can ellect a process */
1440 siblings
= cpunode
->members
;
1445 pri
= bsfl(rtqbits
);
1446 q
= &bsd4_rtqueues
[pri
];
1447 which
= &bsd4_rtqueuebits
;
1449 } else if (tsqbits
) {
1450 pri
= bsfl(tsqbits
);
1451 q
= &bsd4_queues
[pri
];
1452 which
= &bsd4_queuebits
;
1454 } else if (idqbits
) {
1455 pri
= bsfl(idqbits
);
1456 q
= &bsd4_idqueues
[pri
];
1457 which
= &bsd4_idqueuebits
;
1461 * No more left and we didn't reach the checks limit.
1463 bsd4_kick_helper(min_level_lwp
);
1466 lp
= TAILQ_FIRST(q
);
1467 KASSERT(lp
, ("chooseproc: no lwp on busy queue"));
1470 * Limit the number of checks/queue to a configurable value to
1471 * minimize the contention (we are in a locked region
1473 while (checks
< usched_bsd4_queue_checks
) {
1474 if (CPUMASK_TESTMASK(lp
->lwp_cpumask
, cpumask
) == 0 ||
1475 (CPUMASK_TESTMASK(siblings
,
1476 lp
->lwp_thread
->td_gd
->gd_cpumask
) == 0 &&
1477 (lp
->lwp_rebal_ticks
== sched_ticks
||
1478 lp
->lwp_rebal_ticks
== (int)(sched_ticks
- 1)) &&
1479 bsd4_batchy_looser_pri_test(lp
))) {
1481 KTR_COND_LOG(usched_chooseproc_cc_not_good
,
1482 lp
->lwp_proc
->p_pid
== usched_bsd4_pid_debug
,
1483 lp
->lwp_proc
->p_pid
,
1484 (unsigned long)CPUMASK_LOWMASK(
1485 lp
->lwp_thread
->td_gd
->gd_cpumask
),
1486 (unsigned long)CPUMASK_LOWMASK(siblings
),
1487 (unsigned long)CPUMASK_LOWMASK(cpumask
));
1489 cpunode
= bsd4_pcpu
[lp
->lwp_thread
->td_gd
->gd_cpuid
].cpunode
;
1492 if (CPUMASK_TESTMASK(cpunode
->members
,
1496 cpunode
= cpunode
->parent_node
;
1499 if (level
< min_level
||
1500 (level
== min_level
&& min_level_lwp
&&
1501 lp
->lwp_priority
< min_level_lwp
->lwp_priority
)) {
1502 bsd4_kick_helper(min_level_lwp
);
1509 bsd4_kick_helper(lp
);
1511 lp
= TAILQ_NEXT(lp
, lwp_procq
);
1513 *which2
&= ~(1 << pri
);
1517 KTR_COND_LOG(usched_chooseproc_cc_elected
,
1518 lp
->lwp_proc
->p_pid
== usched_bsd4_pid_debug
,
1519 lp
->lwp_proc
->p_pid
,
1520 (unsigned long)CPUMASK_LOWMASK(
1521 lp
->lwp_thread
->td_gd
->gd_cpumask
),
1522 (unsigned long)CPUMASK_LOWMASK(siblings
),
1523 (unsigned long)CPUMASK_LOWMASK(cpumask
));
1531 * Checks exhausted, we tried to defer too many threads, so schedule
1532 * the best of the worst.
1538 KASSERT(lp
, ("chooseproc: at least the first lp was good"));
1543 * If the passed lwp <chklp> is reasonably close to the selected
1544 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1546 * Note that we must error on the side of <chklp> to avoid bouncing
1547 * between threads in the acquire code.
1550 if (chklp
->lwp_priority
< lp
->lwp_priority
+ PPQ
) {
1551 bsd4_kick_helper(lp
);
1556 KTR_COND_LOG(usched_chooseproc_cc
,
1557 lp
->lwp_proc
->p_pid
== usched_bsd4_pid_debug
,
1558 lp
->lwp_proc
->p_pid
,
1559 lp
->lwp_thread
->td_gd
->gd_cpuid
,
1562 TAILQ_REMOVE(q
, lp
, lwp_procq
);
1565 *which
&= ~(1 << pri
);
1566 KASSERT((lp
->lwp_mpflags
& LWP_MP_ONRUNQ
) != 0, ("not on runq6!"));
1567 atomic_clear_int(&lp
->lwp_mpflags
, LWP_MP_ONRUNQ
);
1573 * If we aren't willing to schedule a ready process on our cpu, give it's
1574 * target cpu a kick rather than wait for the next tick.
1576 * Called with bsd4_spin held.
1580 bsd4_kick_helper(struct lwp
*lp
)
1588 gd
= lp
->lwp_thread
->td_gd
;
1589 dd
= &bsd4_pcpu
[gd
->gd_cpuid
];
1591 tmpmask
= smp_active_mask
;
1592 CPUMASK_ANDMASK(tmpmask
, usched_global_cpumask
);
1593 CPUMASK_ANDMASK(tmpmask
, bsd4_rdyprocmask
);
1594 CPUMASK_ANDMASK(tmpmask
, gd
->gd_cpumask
);
1595 if (CPUMASK_TESTZERO(tmpmask
))
1598 ++usched_bsd4_kicks
;
1599 ATOMIC_CPUMASK_NANDBIT(bsd4_rdyprocmask
, gd
->gd_cpuid
);
1600 if ((dd
->upri
& ~PPQMASK
) > (lp
->lwp_priority
& ~PPQMASK
)) {
1601 lwkt_send_ipiq(gd
, bsd4_need_user_resched_remote
, NULL
);
1603 wakeup(dd
->helper_thread
);
1609 bsd4_need_user_resched_remote(void *dummy
)
1611 globaldata_t gd
= mycpu
;
1612 bsd4_pcpu_t dd
= &bsd4_pcpu
[gd
->gd_cpuid
];
1614 need_user_resched();
1616 /* Call wakeup_mycpu to avoid sending IPIs to other CPUs */
1617 wakeup_mycpu(dd
->helper_thread
);
1621 * bsd4_remrunqueue_locked() removes a given process from the run queue
1622 * that it is on, clearing the queue busy bit if it becomes empty.
1624 * Note that user process scheduler is different from the LWKT schedule.
1625 * The user process scheduler only manages user processes but it uses LWKT
1626 * underneath, and a user process operating in the kernel will often be
1627 * 'released' from our management.
1629 * MPSAFE - bsd4_spin must be held exclusively on call
1632 bsd4_remrunqueue_locked(struct lwp
*lp
)
1638 KKASSERT(lp
->lwp_mpflags
& LWP_MP_ONRUNQ
);
1639 atomic_clear_int(&lp
->lwp_mpflags
, LWP_MP_ONRUNQ
);
1641 KKASSERT(bsd4_runqcount
>= 0);
1643 pri
= lp
->lwp_rqindex
;
1644 switch(lp
->lwp_rqtype
) {
1645 case RTP_PRIO_NORMAL
:
1646 q
= &bsd4_queues
[pri
];
1647 which
= &bsd4_queuebits
;
1649 case RTP_PRIO_REALTIME
:
1651 q
= &bsd4_rtqueues
[pri
];
1652 which
= &bsd4_rtqueuebits
;
1655 q
= &bsd4_idqueues
[pri
];
1656 which
= &bsd4_idqueuebits
;
1659 panic("remrunqueue: invalid rtprio type");
1662 TAILQ_REMOVE(q
, lp
, lwp_procq
);
1663 if (TAILQ_EMPTY(q
)) {
1664 KASSERT((*which
& (1 << pri
)) != 0,
1665 ("remrunqueue: remove from empty queue"));
1666 *which
&= ~(1 << pri
);
1671 * bsd4_setrunqueue_locked()
1673 * Add a process whos rqtype and rqindex had previously been calculated
1674 * onto the appropriate run queue. Determine if the addition requires
1675 * a reschedule on a cpu and return the cpuid or -1.
1677 * NOTE: Lower priorities are better priorities.
1679 * MPSAFE - bsd4_spin must be held exclusively on call
1682 bsd4_setrunqueue_locked(struct lwp
*lp
)
1688 KKASSERT((lp
->lwp_mpflags
& LWP_MP_ONRUNQ
) == 0);
1689 atomic_set_int(&lp
->lwp_mpflags
, LWP_MP_ONRUNQ
);
1692 pri
= lp
->lwp_rqindex
;
1694 switch(lp
->lwp_rqtype
) {
1695 case RTP_PRIO_NORMAL
:
1696 q
= &bsd4_queues
[pri
];
1697 which
= &bsd4_queuebits
;
1699 case RTP_PRIO_REALTIME
:
1701 q
= &bsd4_rtqueues
[pri
];
1702 which
= &bsd4_rtqueuebits
;
1705 q
= &bsd4_idqueues
[pri
];
1706 which
= &bsd4_idqueuebits
;
1709 panic("remrunqueue: invalid rtprio type");
1714 * Add to the correct queue and set the appropriate bit. If no
1715 * lower priority (i.e. better) processes are in the queue then
1716 * we want a reschedule, calculate the best cpu for the job.
1718 * Always run reschedules on the LWPs original cpu.
1720 TAILQ_INSERT_TAIL(q
, lp
, lwp_procq
);
1725 * For SMP systems a user scheduler helper thread is created for each
1726 * cpu and is used to allow one cpu to wakeup another for the purposes of
1727 * scheduling userland threads from setrunqueue().
1729 * UP systems do not need the helper since there is only one cpu.
1731 * We can't use the idle thread for this because we might block.
1732 * Additionally, doing things this way allows us to HLT idle cpus
1738 sched_thread(void *dummy
)
1750 cpuid
= gd
->gd_cpuid
; /* doesn't change */
1751 mask
= gd
->gd_cpumask
; /* doesn't change */
1752 dd
= &bsd4_pcpu
[cpuid
];
1755 * Since we are woken up only when no user processes are scheduled
1756 * on a cpu, we can run at an ultra low priority.
1758 lwkt_setpri_self(TDPRI_USER_SCHEDULER
);
1760 tsleep(dd
->helper_thread
, 0, "sched_thread_sleep", 0);
1764 * We use the LWKT deschedule-interlock trick to avoid racing
1765 * bsd4_rdyprocmask. This means we cannot block through to the
1766 * manual lwkt_switch() call we make below.
1769 tsleep_interlock(dd
->helper_thread
, 0);
1770 spin_lock(&bsd4_spin
);
1771 ATOMIC_CPUMASK_ORMASK(bsd4_rdyprocmask
, mask
);
1773 clear_user_resched(); /* This satisfied the reschedule request */
1774 dd
->rrcount
= 0; /* Reset the round-robin counter */
1776 if (CPUMASK_TESTMASK(bsd4_curprocmask
, mask
) == 0) {
1778 * No thread is currently scheduled.
1780 KKASSERT(dd
->uschedcp
== NULL
);
1781 if ((nlp
= bsd4_chooseproc_locked(NULL
)) != NULL
) {
1782 KTR_COND_LOG(usched_sched_thread_no_process
,
1783 nlp
->lwp_proc
->p_pid
== usched_bsd4_pid_debug
,
1785 nlp
->lwp_proc
->p_pid
,
1786 nlp
->lwp_thread
->td_gd
->gd_cpuid
);
1788 ATOMIC_CPUMASK_ORMASK(bsd4_curprocmask
, mask
);
1789 dd
->upri
= nlp
->lwp_priority
;
1791 dd
->rrcount
= 0; /* reset round robin */
1792 spin_unlock(&bsd4_spin
);
1793 lwkt_acquire(nlp
->lwp_thread
);
1794 lwkt_schedule(nlp
->lwp_thread
);
1796 spin_unlock(&bsd4_spin
);
1798 } else if (bsd4_runqcount
) {
1799 if ((nlp
= bsd4_chooseproc_locked(dd
->uschedcp
)) != NULL
) {
1800 KTR_COND_LOG(usched_sched_thread_process
,
1801 nlp
->lwp_proc
->p_pid
== usched_bsd4_pid_debug
,
1803 nlp
->lwp_proc
->p_pid
,
1804 nlp
->lwp_thread
->td_gd
->gd_cpuid
);
1806 dd
->upri
= nlp
->lwp_priority
;
1808 dd
->rrcount
= 0; /* reset round robin */
1809 spin_unlock(&bsd4_spin
);
1810 lwkt_acquire(nlp
->lwp_thread
);
1811 lwkt_schedule(nlp
->lwp_thread
);
1814 * CHAINING CONDITION TRAIN
1816 * We could not deal with the scheduler wakeup
1817 * request on this cpu, locate a ready scheduler
1818 * with no current lp assignment and chain to it.
1820 * This ensures that a wakeup race which fails due
1821 * to priority test does not leave other unscheduled
1822 * cpus idle when the runqueue is not empty.
1824 tmpmask
= bsd4_rdyprocmask
;
1825 CPUMASK_NANDMASK(tmpmask
, bsd4_curprocmask
);
1826 CPUMASK_ANDMASK(tmpmask
, smp_active_mask
);
1827 if (CPUMASK_TESTNZERO(tmpmask
)) {
1828 tmpid
= BSFCPUMASK(tmpmask
);
1829 tmpdd
= &bsd4_pcpu
[tmpid
];
1830 ATOMIC_CPUMASK_NANDBIT(bsd4_rdyprocmask
, tmpid
);
1831 spin_unlock(&bsd4_spin
);
1832 wakeup(tmpdd
->helper_thread
);
1834 spin_unlock(&bsd4_spin
);
1837 KTR_LOG(usched_sched_thread_no_process_found
,
1838 gd
->gd_cpuid
, (unsigned long)CPUMASK_LOWMASK(tmpmask
));
1842 * The runq is empty.
1844 spin_unlock(&bsd4_spin
);
1848 * We're descheduled unless someone scheduled us. Switch away.
1849 * Exiting the critical section will cause splz() to be called
1850 * for us if interrupts and such are pending.
1853 tsleep(dd
->helper_thread
, PINTERLOCKED
, "schslp", 0);
1857 /* sysctl stick_to_level parameter */
1859 sysctl_usched_bsd4_stick_to_level(SYSCTL_HANDLER_ARGS
)
1863 new_val
= usched_bsd4_stick_to_level
;
1865 error
= sysctl_handle_int(oidp
, &new_val
, 0, req
);
1866 if (error
!= 0 || req
->newptr
== NULL
)
1868 if (new_val
> cpu_topology_levels_number
- 1 || new_val
< 0)
1870 usched_bsd4_stick_to_level
= new_val
;
1875 * Setup our scheduler helpers. Note that curprocmask bit 0 has already
1876 * been cleared by rqinit() and we should not mess with it further.
1879 sched_thread_cpu_init(void)
1882 int smt_not_supported
= 0;
1883 int cache_coherent_not_supported
= 0;
1886 kprintf("Start usched_bsd4 helpers on cpus:\n");
1888 sysctl_ctx_init(&usched_bsd4_sysctl_ctx
);
1889 usched_bsd4_sysctl_tree
=
1890 SYSCTL_ADD_NODE(&usched_bsd4_sysctl_ctx
,
1891 SYSCTL_STATIC_CHILDREN(_kern
), OID_AUTO
,
1892 "usched_bsd4", CTLFLAG_RD
, 0, "");
1894 for (i
= 0; i
< ncpus
; ++i
) {
1895 bsd4_pcpu_t dd
= &bsd4_pcpu
[i
];
1898 CPUMASK_ASSBIT(mask
, i
);
1900 if (CPUMASK_TESTMASK(mask
, smp_active_mask
) == 0)
1903 dd
->cpunode
= get_cpu_node_by_cpuid(i
);
1905 if (dd
->cpunode
== NULL
) {
1906 smt_not_supported
= 1;
1907 cache_coherent_not_supported
= 1;
1909 kprintf (" cpu%d - WARNING: No CPU NODE "
1910 "found for cpu\n", i
);
1912 switch (dd
->cpunode
->type
) {
1915 kprintf (" cpu%d - HyperThreading "
1916 "available. Core siblings: ",
1920 smt_not_supported
= 1;
1923 kprintf (" cpu%d - No HT available, "
1924 "multi-core/physical "
1925 "cpu. Physical siblings: ",
1929 smt_not_supported
= 1;
1932 kprintf (" cpu%d - No HT available, "
1933 "single-core/physical cpu. "
1934 "Package Siblings: ",
1938 /* Let's go for safe defaults here */
1939 smt_not_supported
= 1;
1940 cache_coherent_not_supported
= 1;
1942 kprintf (" cpu%d - Unknown cpunode->"
1943 "type=%u. Siblings: ",
1945 (u_int
)dd
->cpunode
->type
);
1950 if (dd
->cpunode
->parent_node
!= NULL
) {
1951 kprint_cpuset(&dd
->cpunode
->
1952 parent_node
->members
);
1955 kprintf(" no siblings\n");
1960 lwkt_create(sched_thread
, NULL
, &dd
->helper_thread
, NULL
,
1961 0, i
, "usched %d", i
);
1964 * Allow user scheduling on the target cpu. cpu #0 has already
1965 * been enabled in rqinit().
1968 ATOMIC_CPUMASK_NANDMASK(bsd4_curprocmask
, mask
);
1969 ATOMIC_CPUMASK_ORMASK(bsd4_rdyprocmask
, mask
);
1970 dd
->upri
= PRIBASE_NULL
;
1974 /* usched_bsd4 sysctl configurable parameters */
1976 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx
,
1977 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree
),
1978 OID_AUTO
, "rrinterval", CTLFLAG_RW
,
1979 &usched_bsd4_rrinterval
, 0, "");
1980 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx
,
1981 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree
),
1982 OID_AUTO
, "decay", CTLFLAG_RW
,
1983 &usched_bsd4_decay
, 0, "Extra decay when not running");
1984 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx
,
1985 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree
),
1986 OID_AUTO
, "batch_time", CTLFLAG_RW
,
1987 &usched_bsd4_batch_time
, 0, "Min batch counter value");
1988 SYSCTL_ADD_LONG(&usched_bsd4_sysctl_ctx
,
1989 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree
),
1990 OID_AUTO
, "kicks", CTLFLAG_RW
,
1991 &usched_bsd4_kicks
, "Number of kickstarts");
1993 /* Add enable/disable option for SMT scheduling if supported */
1994 if (smt_not_supported
) {
1995 usched_bsd4_smt
= 0;
1996 SYSCTL_ADD_STRING(&usched_bsd4_sysctl_ctx
,
1997 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree
),
1998 OID_AUTO
, "smt", CTLFLAG_RD
,
1999 "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2001 usched_bsd4_smt
= 1;
2002 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx
,
2003 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree
),
2004 OID_AUTO
, "smt", CTLFLAG_RW
,
2005 &usched_bsd4_smt
, 0, "Enable SMT scheduling");
2009 * Add enable/disable option for cache coherent scheduling
2012 if (cache_coherent_not_supported
) {
2013 usched_bsd4_cache_coherent
= 0;
2014 SYSCTL_ADD_STRING(&usched_bsd4_sysctl_ctx
,
2015 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree
),
2016 OID_AUTO
, "cache_coherent", CTLFLAG_RD
,
2018 "Cache coherence NOT SUPPORTED");
2020 usched_bsd4_cache_coherent
= 1;
2021 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx
,
2022 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree
),
2023 OID_AUTO
, "cache_coherent", CTLFLAG_RW
,
2024 &usched_bsd4_cache_coherent
, 0,
2025 "Enable/Disable cache coherent scheduling");
2027 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx
,
2028 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree
),
2029 OID_AUTO
, "upri_affinity", CTLFLAG_RW
,
2030 &usched_bsd4_upri_affinity
, 1,
2031 "Number of PPQs in user priority check");
2033 SYSCTL_ADD_INT(&usched_bsd4_sysctl_ctx
,
2034 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree
),
2035 OID_AUTO
, "queue_checks", CTLFLAG_RW
,
2036 &usched_bsd4_queue_checks
, 5,
2037 "LWPs to check from a queue before giving up");
2039 SYSCTL_ADD_PROC(&usched_bsd4_sysctl_ctx
,
2040 SYSCTL_CHILDREN(usched_bsd4_sysctl_tree
),
2041 OID_AUTO
, "stick_to_level",
2042 CTLTYPE_INT
| CTLFLAG_RW
,
2043 NULL
, sizeof usched_bsd4_stick_to_level
,
2044 sysctl_usched_bsd4_stick_to_level
, "I",
2045 "Stick a process to this level. See sysctl"
2046 "paremter hw.cpu_topology.level_description");
2049 SYSINIT(uschedtd
, SI_BOOT2_USCHED
, SI_ORDER_SECOND
,
2050 sched_thread_cpu_init
, NULL
);