ifq: Switch to drop-head for default enqueue method.
[dragonfly.git] / sys / kern / usched_dfly.c
blob7c3d53f8b2f94e80147d5e09922670523f284e5c
1 /*
2 * Copyright (c) 2012 The DragonFly Project. All rights reserved.
3 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
6 * by Matthew Dillon <dillon@backplane.com>,
7 * by Mihai Carabas <mihai.carabas@gmail.com>
8 * and many others.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
19 * distribution.
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
51 #include <sys/mplock2.h>
53 #include <sys/ktr.h>
55 #include <machine/cpu.h>
56 #include <machine/smp.h>
59 * Priorities. Note that with 32 run queues per scheduler each queue
60 * represents four priority levels.
63 int dfly_rebalanced;
65 #define MAXPRI 128
66 #define PRIMASK (MAXPRI - 1)
67 #define PRIBASE_REALTIME 0
68 #define PRIBASE_NORMAL MAXPRI
69 #define PRIBASE_IDLE (MAXPRI * 2)
70 #define PRIBASE_THREAD (MAXPRI * 3)
71 #define PRIBASE_NULL (MAXPRI * 4)
73 #define NQS 32 /* 32 run queues. */
74 #define PPQ (MAXPRI / NQS) /* priorities per queue */
75 #define PPQMASK (PPQ - 1)
78 * NICEPPQ - number of nice units per priority queue
79 * ESTCPUPPQ - number of estcpu units per priority queue
80 * ESTCPUMAX - number of estcpu units
82 #define NICEPPQ 2
83 #define ESTCPUPPQ 512
84 #define ESTCPUMAX (ESTCPUPPQ * NQS)
85 #define BATCHMAX (ESTCPUFREQ * 30)
86 #define PRIO_RANGE (PRIO_MAX - PRIO_MIN + 1)
88 #define ESTCPULIM(v) min((v), ESTCPUMAX)
90 TAILQ_HEAD(rq, lwp);
92 #define lwp_priority lwp_usdata.dfly.priority
93 #define lwp_forked lwp_usdata.dfly.forked
94 #define lwp_rqindex lwp_usdata.dfly.rqindex
95 #define lwp_estcpu lwp_usdata.dfly.estcpu
96 #define lwp_estfast lwp_usdata.dfly.estfast
97 #define lwp_uload lwp_usdata.dfly.uload
98 #define lwp_rqtype lwp_usdata.dfly.rqtype
99 #define lwp_qcpu lwp_usdata.dfly.qcpu
100 #define lwp_rrcount lwp_usdata.dfly.rrcount
102 struct usched_dfly_pcpu {
103 struct spinlock spin;
104 struct thread *helper_thread;
105 u_short scancpu;
106 short upri;
107 int uload;
108 int ucount;
109 struct lwp *uschedcp;
110 struct rq queues[NQS];
111 struct rq rtqueues[NQS];
112 struct rq idqueues[NQS];
113 u_int32_t queuebits;
114 u_int32_t rtqueuebits;
115 u_int32_t idqueuebits;
116 int runqcount;
117 int cpuid;
118 cpumask_t cpumask;
119 cpu_node_t *cpunode;
122 typedef struct usched_dfly_pcpu *dfly_pcpu_t;
124 static void dfly_acquire_curproc(struct lwp *lp);
125 static void dfly_release_curproc(struct lwp *lp);
126 static void dfly_select_curproc(globaldata_t gd);
127 static void dfly_setrunqueue(struct lwp *lp);
128 static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp);
129 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
130 sysclock_t cpstamp);
131 static void dfly_recalculate_estcpu(struct lwp *lp);
132 static void dfly_resetpriority(struct lwp *lp);
133 static void dfly_forking(struct lwp *plp, struct lwp *lp);
134 static void dfly_exiting(struct lwp *lp, struct proc *);
135 static void dfly_uload_update(struct lwp *lp);
136 static void dfly_yield(struct lwp *lp);
137 static void dfly_changeqcpu_locked(struct lwp *lp,
138 dfly_pcpu_t dd, dfly_pcpu_t rdd);
139 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
140 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd);
141 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
142 static void dfly_need_user_resched_remote(void *dummy);
143 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
144 struct lwp *chklp, int worst);
145 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
146 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
147 static void dfly_changedcpu(struct lwp *lp);
149 struct usched usched_dfly = {
150 { NULL },
151 "dfly", "Original DragonFly Scheduler",
152 NULL, /* default registration */
153 NULL, /* default deregistration */
154 dfly_acquire_curproc,
155 dfly_release_curproc,
156 dfly_setrunqueue,
157 dfly_schedulerclock,
158 dfly_recalculate_estcpu,
159 dfly_resetpriority,
160 dfly_forking,
161 dfly_exiting,
162 dfly_uload_update,
163 NULL, /* setcpumask not supported */
164 dfly_yield,
165 dfly_changedcpu
169 * We have NQS (32) run queues per scheduling class. For the normal
170 * class, there are 128 priorities scaled onto these 32 queues. New
171 * processes are added to the last entry in each queue, and processes
172 * are selected for running by taking them from the head and maintaining
173 * a simple FIFO arrangement. Realtime and Idle priority processes have
174 * and explicit 0-31 priority which maps directly onto their class queue
175 * index. When a queue has something in it, the corresponding bit is
176 * set in the queuebits variable, allowing a single read to determine
177 * the state of all 32 queues and then a ffs() to find the first busy
178 * queue.
180 /* currently running a user process */
181 static cpumask_t dfly_curprocmask = CPUMASK_INITIALIZER_ALLONES;
182 static cpumask_t dfly_rdyprocmask; /* ready to accept a user process */
183 static volatile int dfly_ucount; /* total running on whole system */
184 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
185 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
186 static struct sysctl_oid *usched_dfly_sysctl_tree;
188 /* Debug info exposed through debug.* sysctl */
190 static int usched_dfly_debug = -1;
191 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
192 &usched_dfly_debug, 0,
193 "Print debug information for this pid");
195 static int usched_dfly_pid_debug = -1;
196 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
197 &usched_dfly_pid_debug, 0,
198 "Print KTR debug information for this pid");
200 static int usched_dfly_chooser = 0;
201 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
202 &usched_dfly_chooser, 0,
203 "Print KTR debug information for this pid");
206 * Tunning usched_dfly - configurable through kern.usched_dfly.
208 * weight1 - Tries to keep threads on their current cpu. If you
209 * make this value too large the scheduler will not be
210 * able to load-balance large loads.
212 * weight2 - If non-zero, detects thread pairs undergoing synchronous
213 * communications and tries to move them closer together.
214 * Behavior is adjusted by bit 4 of features (0x10).
216 * WARNING! Weight2 is a ridiculously sensitive parameter,
217 * a small value is recommended.
219 * weight3 - Weighting based on the number of recently runnable threads
220 * on the userland scheduling queue (ignoring their loads).
221 * A nominal value here prevents high-priority (low-load)
222 * threads from accumulating on one cpu core when other
223 * cores are available.
225 * This value should be left fairly small relative to weight1
226 * and weight4.
228 * weight4 - Weighting based on other cpu queues being available
229 * or running processes with higher lwp_priority's.
231 * This allows a thread to migrate to another nearby cpu if it
232 * is unable to run on the current cpu based on the other cpu
233 * being idle or running a lower priority (higher lwp_priority)
234 * thread. This value should be large enough to override weight1
236 * features - These flags can be set or cleared to enable or disable various
237 * features.
239 * 0x01 Enable idle-cpu pulling (default)
240 * 0x02 Enable proactive pushing (default)
241 * 0x04 Enable rebalancing rover (default)
242 * 0x08 Enable more proactive pushing (default)
243 * 0x10 (flip weight2 limit on same cpu) (default)
244 * 0x20 choose best cpu for forked process
245 * 0x40 choose current cpu for forked process
246 * 0x80 choose random cpu for forked process (default)
248 static int usched_dfly_smt = 0;
249 static int usched_dfly_cache_coherent = 0;
250 static int usched_dfly_weight1 = 200; /* keep thread on current cpu */
251 static int usched_dfly_weight2 = 180; /* synchronous peer's current cpu */
252 static int usched_dfly_weight3 = 40; /* number of threads on queue */
253 static int usched_dfly_weight4 = 160; /* availability of idle cores */
254 static int usched_dfly_features = 0x8F; /* allow pulls */
255 static int usched_dfly_fast_resched = 0;/* delta priority / resched */
256 static int usched_dfly_swmask = ~PPQMASK; /* allow pulls */
257 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
258 static int usched_dfly_decay = 8;
260 /* KTR debug printings */
262 KTR_INFO_MASTER(usched);
264 #if !defined(KTR_USCHED_DFLY)
265 #define KTR_USCHED_DFLY KTR_ALL
266 #endif
268 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
269 "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
270 pid_t pid, int old_cpuid, int curr);
273 * This function is called when the kernel intends to return to userland.
274 * It is responsible for making the thread the current designated userland
275 * thread for this cpu, blocking if necessary.
277 * The kernel will not depress our LWKT priority until after we return,
278 * in case we have to shove over to another cpu.
280 * We must determine our thread's disposition before we switch away. This
281 * is very sensitive code.
283 * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
284 * TO ANOTHER CPU! Because most of the kernel assumes that no migration will
285 * occur, this function is called only under very controlled circumstances.
287 static void
288 dfly_acquire_curproc(struct lwp *lp)
290 globaldata_t gd;
291 dfly_pcpu_t dd;
292 dfly_pcpu_t rdd;
293 thread_t td;
294 int force_resched;
297 * Make sure we aren't sitting on a tsleep queue.
299 td = lp->lwp_thread;
300 crit_enter_quick(td);
301 if (td->td_flags & TDF_TSLEEPQ)
302 tsleep_remove(td);
303 dfly_recalculate_estcpu(lp);
305 gd = mycpu;
306 dd = &dfly_pcpu[gd->gd_cpuid];
309 * Process any pending interrupts/ipi's, then handle reschedule
310 * requests. dfly_release_curproc() will try to assign a new
311 * uschedcp that isn't us and otherwise NULL it out.
313 force_resched = 0;
314 if ((td->td_mpflags & TDF_MP_BATCH_DEMARC) &&
315 lp->lwp_rrcount >= usched_dfly_rrinterval / 2) {
316 force_resched = 1;
319 if (user_resched_wanted()) {
320 if (dd->uschedcp == lp)
321 force_resched = 1;
322 clear_user_resched();
323 dfly_release_curproc(lp);
327 * Loop until we are the current user thread.
329 * NOTE: dd spinlock not held at top of loop.
331 if (dd->uschedcp == lp)
332 lwkt_yield_quick();
334 while (dd->uschedcp != lp) {
335 lwkt_yield_quick();
337 spin_lock(&dd->spin);
339 if (force_resched &&
340 (usched_dfly_features & 0x08) &&
341 (rdd = dfly_choose_best_queue(lp)) != dd) {
343 * We are not or are no longer the current lwp and a
344 * forced reschedule was requested. Figure out the
345 * best cpu to run on (our current cpu will be given
346 * significant weight).
348 * (if a reschedule was not requested we want to
349 * move this step after the uschedcp tests).
351 dfly_changeqcpu_locked(lp, dd, rdd);
352 spin_unlock(&dd->spin);
353 lwkt_deschedule(lp->lwp_thread);
354 dfly_setrunqueue_dd(rdd, lp);
355 lwkt_switch();
356 gd = mycpu;
357 dd = &dfly_pcpu[gd->gd_cpuid];
358 continue;
362 * Either no reschedule was requested or the best queue was
363 * dd, and no current process has been selected. We can
364 * trivially become the current lwp on the current cpu.
366 if (dd->uschedcp == NULL) {
367 atomic_clear_int(&lp->lwp_thread->td_mpflags,
368 TDF_MP_DIDYIELD);
369 ATOMIC_CPUMASK_ORBIT(dfly_curprocmask, gd->gd_cpuid);
370 dd->uschedcp = lp;
371 dd->upri = lp->lwp_priority;
372 KKASSERT(lp->lwp_qcpu == dd->cpuid);
373 spin_unlock(&dd->spin);
374 break;
378 * Put us back on the same run queue unconditionally.
380 * Set rrinterval to force placement at end of queue.
381 * Select the worst queue to ensure we round-robin,
382 * but do not change estcpu.
384 if (lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) {
385 u_int32_t tsqbits;
387 switch(lp->lwp_rqtype) {
388 case RTP_PRIO_NORMAL:
389 tsqbits = dd->queuebits;
390 spin_unlock(&dd->spin);
392 lp->lwp_rrcount = usched_dfly_rrinterval;
393 if (tsqbits)
394 lp->lwp_rqindex = bsrl(tsqbits);
395 break;
396 default:
397 spin_unlock(&dd->spin);
398 break;
400 lwkt_deschedule(lp->lwp_thread);
401 dfly_setrunqueue_dd(dd, lp);
402 atomic_clear_int(&lp->lwp_thread->td_mpflags,
403 TDF_MP_DIDYIELD);
404 lwkt_switch();
405 gd = mycpu;
406 dd = &dfly_pcpu[gd->gd_cpuid];
407 continue;
411 * Can we steal the current designated user thread?
413 * If we do the other thread will stall when it tries to
414 * return to userland, possibly rescheduling elsewhere.
416 * It is important to do a masked test to avoid the edge
417 * case where two near-equal-priority threads are constantly
418 * interrupting each other.
420 * In the exact match case another thread has already gained
421 * uschedcp and lowered its priority, if we steal it the
422 * other thread will stay stuck on the LWKT runq and not
423 * push to another cpu. So don't steal on equal-priority even
424 * though it might appear to be more beneficial due to not
425 * having to switch back to the other thread's context.
427 * usched_dfly_fast_resched requires that two threads be
428 * significantly far apart in priority in order to interrupt.
430 * If better but not sufficiently far apart, the current
431 * uschedcp will be interrupted at the next scheduler clock.
433 if (dd->uschedcp &&
434 (dd->upri & ~PPQMASK) >
435 (lp->lwp_priority & ~PPQMASK) + usched_dfly_fast_resched) {
436 dd->uschedcp = lp;
437 dd->upri = lp->lwp_priority;
438 KKASSERT(lp->lwp_qcpu == dd->cpuid);
439 spin_unlock(&dd->spin);
440 break;
443 * We are not the current lwp, figure out the best cpu
444 * to run on (our current cpu will be given significant
445 * weight). Loop on cpu change.
447 if ((usched_dfly_features & 0x02) &&
448 force_resched == 0 &&
449 (rdd = dfly_choose_best_queue(lp)) != dd) {
450 dfly_changeqcpu_locked(lp, dd, rdd);
451 spin_unlock(&dd->spin);
452 lwkt_deschedule(lp->lwp_thread);
453 dfly_setrunqueue_dd(rdd, lp);
454 lwkt_switch();
455 gd = mycpu;
456 dd = &dfly_pcpu[gd->gd_cpuid];
457 continue;
461 * We cannot become the current lwp, place the lp on the
462 * run-queue of this or another cpu and deschedule ourselves.
464 * When we are reactivated we will have another chance.
466 * Reload after a switch or setrunqueue/switch possibly
467 * moved us to another cpu.
469 spin_unlock(&dd->spin);
470 lwkt_deschedule(lp->lwp_thread);
471 dfly_setrunqueue_dd(dd, lp);
472 lwkt_switch();
473 gd = mycpu;
474 dd = &dfly_pcpu[gd->gd_cpuid];
478 * Make sure upri is synchronized, then yield to LWKT threads as
479 * needed before returning. This could result in another reschedule.
480 * XXX
482 crit_exit_quick(td);
484 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
488 * DFLY_RELEASE_CURPROC
490 * This routine detaches the current thread from the userland scheduler,
491 * usually because the thread needs to run or block in the kernel (at
492 * kernel priority) for a while.
494 * This routine is also responsible for selecting a new thread to
495 * make the current thread.
497 * NOTE: This implementation differs from the dummy example in that
498 * dfly_select_curproc() is able to select the current process, whereas
499 * dummy_select_curproc() is not able to select the current process.
500 * This means we have to NULL out uschedcp.
502 * Additionally, note that we may already be on a run queue if releasing
503 * via the lwkt_switch() in dfly_setrunqueue().
505 static void
506 dfly_release_curproc(struct lwp *lp)
508 globaldata_t gd = mycpu;
509 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
512 * Make sure td_wakefromcpu is defaulted. This will be overwritten
513 * by wakeup().
515 if (dd->uschedcp == lp) {
516 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
517 spin_lock(&dd->spin);
518 if (dd->uschedcp == lp) {
519 dd->uschedcp = NULL; /* don't let lp be selected */
520 dd->upri = PRIBASE_NULL;
521 ATOMIC_CPUMASK_NANDBIT(dfly_curprocmask, gd->gd_cpuid);
522 spin_unlock(&dd->spin);
523 dfly_select_curproc(gd);
524 } else {
525 spin_unlock(&dd->spin);
531 * DFLY_SELECT_CURPROC
533 * Select a new current process for this cpu and clear any pending user
534 * reschedule request. The cpu currently has no current process.
536 * This routine is also responsible for equal-priority round-robining,
537 * typically triggered from dfly_schedulerclock(). In our dummy example
538 * all the 'user' threads are LWKT scheduled all at once and we just
539 * call lwkt_switch().
541 * The calling process is not on the queue and cannot be selected.
543 static
544 void
545 dfly_select_curproc(globaldata_t gd)
547 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
548 struct lwp *nlp;
549 int cpuid = gd->gd_cpuid;
551 crit_enter_gd(gd);
553 spin_lock(&dd->spin);
554 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
556 if (nlp) {
557 ATOMIC_CPUMASK_ORBIT(dfly_curprocmask, cpuid);
558 dd->upri = nlp->lwp_priority;
559 dd->uschedcp = nlp;
560 #if 0
561 dd->rrcount = 0; /* reset round robin */
562 #endif
563 spin_unlock(&dd->spin);
564 lwkt_acquire(nlp->lwp_thread);
565 lwkt_schedule(nlp->lwp_thread);
566 } else {
567 spin_unlock(&dd->spin);
569 crit_exit_gd(gd);
573 * Place the specified lwp on the user scheduler's run queue. This routine
574 * must be called with the thread descheduled. The lwp must be runnable.
575 * It must not be possible for anyone else to explicitly schedule this thread.
577 * The thread may be the current thread as a special case.
579 static void
580 dfly_setrunqueue(struct lwp *lp)
582 dfly_pcpu_t dd;
583 dfly_pcpu_t rdd;
586 * First validate the process LWKT state.
588 KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
589 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
590 ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
591 lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
592 KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
595 * NOTE: dd/rdd do not necessarily represent the current cpu.
596 * Instead they may represent the cpu the thread was last
597 * scheduled on or inherited by its parent.
599 dd = &dfly_pcpu[lp->lwp_qcpu];
600 rdd = dd;
603 * This process is not supposed to be scheduled anywhere or assigned
604 * as the current process anywhere. Assert the condition.
606 KKASSERT(rdd->uschedcp != lp);
609 * Ok, we have to setrunqueue some target cpu and request a reschedule
610 * if necessary.
612 * We have to choose the best target cpu. It might not be the current
613 * target even if the current cpu has no running user thread (for
614 * example, because the current cpu might be a hyperthread and its
615 * sibling has a thread assigned).
617 * If we just forked it is most optimal to run the child on the same
618 * cpu just in case the parent decides to wait for it (thus getting
619 * off that cpu). As long as there is nothing else runnable on the
620 * cpu, that is. If we did this unconditionally a parent forking
621 * multiple children before waiting (e.g. make -j N) leaves other
622 * cpus idle that could be working.
624 if (lp->lwp_forked) {
625 lp->lwp_forked = 0;
626 if (usched_dfly_features & 0x20)
627 rdd = dfly_choose_best_queue(lp);
628 else if (usched_dfly_features & 0x40)
629 rdd = &dfly_pcpu[lp->lwp_qcpu];
630 else if (usched_dfly_features & 0x80)
631 rdd = dfly_choose_queue_simple(rdd, lp);
632 else if (dfly_pcpu[lp->lwp_qcpu].runqcount)
633 rdd = dfly_choose_best_queue(lp);
634 else
635 rdd = &dfly_pcpu[lp->lwp_qcpu];
636 } else {
637 rdd = dfly_choose_best_queue(lp);
638 /* rdd = &dfly_pcpu[lp->lwp_qcpu]; */
640 if (lp->lwp_qcpu != rdd->cpuid) {
641 spin_lock(&dd->spin);
642 dfly_changeqcpu_locked(lp, dd, rdd);
643 spin_unlock(&dd->spin);
645 dfly_setrunqueue_dd(rdd, lp);
649 * Change qcpu to rdd->cpuid. The dd the lp is CURRENTLY on must be
650 * spin-locked on-call. rdd does not have to be.
652 static void
653 dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd)
655 if (lp->lwp_qcpu != rdd->cpuid) {
656 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
657 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
658 atomic_add_int(&dd->uload, -lp->lwp_uload);
659 atomic_add_int(&dd->ucount, -1);
660 atomic_add_int(&dfly_ucount, -1);
662 lp->lwp_qcpu = rdd->cpuid;
667 * Place lp on rdd's runqueue. Nothing is locked on call. This function
668 * also performs all necessary ancillary notification actions.
670 static void
671 dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp)
673 globaldata_t rgd;
676 * We might be moving the lp to another cpu's run queue, and once
677 * on the runqueue (even if it is our cpu's), another cpu can rip
678 * it away from us.
680 * TDF_MIGRATING might already be set if this is part of a
681 * remrunqueue+setrunqueue sequence.
683 if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
684 lwkt_giveaway(lp->lwp_thread);
686 rgd = globaldata_find(rdd->cpuid);
689 * We lose control of the lp the moment we release the spinlock
690 * after having placed it on the queue. i.e. another cpu could pick
691 * it up, or it could exit, or its priority could be further
692 * adjusted, or something like that.
694 * WARNING! rdd can point to a foreign cpu!
696 spin_lock(&rdd->spin);
697 dfly_setrunqueue_locked(rdd, lp);
700 * Potentially interrupt the currently-running thread
702 if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK)) {
704 * Currently running thread is better or same, do not
705 * interrupt.
707 spin_unlock(&rdd->spin);
708 } else if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK) +
709 usched_dfly_fast_resched) {
711 * Currently running thread is not better, but not so bad
712 * that we need to interrupt it. Let it run for one more
713 * scheduler tick.
715 if (rdd->uschedcp &&
716 rdd->uschedcp->lwp_rrcount < usched_dfly_rrinterval) {
717 rdd->uschedcp->lwp_rrcount = usched_dfly_rrinterval - 1;
719 spin_unlock(&rdd->spin);
720 } else if (rgd == mycpu) {
722 * We should interrupt the currently running thread, which
723 * is on the current cpu. However, if DIDYIELD is set we
724 * round-robin unconditionally and do not interrupt it.
726 spin_unlock(&rdd->spin);
727 if (rdd->uschedcp == NULL)
728 wakeup_mycpu(rdd->helper_thread); /* XXX */
729 if ((lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) == 0)
730 need_user_resched();
731 } else {
733 * We should interrupt the currently running thread, which
734 * is on a different cpu.
736 spin_unlock(&rdd->spin);
737 lwkt_send_ipiq(rgd, dfly_need_user_resched_remote, NULL);
742 * This routine is called from a systimer IPI. It MUST be MP-safe and
743 * the BGL IS NOT HELD ON ENTRY. This routine is called at ESTCPUFREQ on
744 * each cpu.
746 static
747 void
748 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
750 globaldata_t gd = mycpu;
751 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
754 * Spinlocks also hold a critical section so there should not be
755 * any active.
757 KKASSERT(gd->gd_spinlocks == 0 || dumping);
760 * If lp is NULL we might be contended and lwkt_switch() may have
761 * cycled into the idle thread. Apply the tick to the current
762 * process on this cpu if it is contended.
764 if (gd->gd_curthread == &gd->gd_idlethread) {
765 lp = dd->uschedcp;
766 if (lp && (lp->lwp_thread == NULL ||
767 lp->lwp_thread->td_contended == 0)) {
768 lp = NULL;
773 * Dock thread for tick
775 if (lp) {
777 * Do we need to round-robin? We round-robin 10 times a
778 * second. This should only occur for cpu-bound batch
779 * processes.
781 if (++lp->lwp_rrcount >= usched_dfly_rrinterval) {
782 lp->lwp_thread->td_wakefromcpu = -1;
783 need_user_resched();
787 * Adjust estcpu upward using a real time equivalent
788 * calculation, and recalculate lp's priority.
790 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu +
791 ESTCPUMAX / ESTCPUFREQ + 1);
792 dfly_resetpriority(lp);
796 * Rebalance two cpus every 8 ticks, pulling the worst thread
797 * from the worst cpu's queue into a rotating cpu number.
799 * This mechanic is needed because the push algorithms can
800 * steady-state in an non-optimal configuration. We need to mix it
801 * up a little, even if it means breaking up a paired thread, so
802 * the push algorithms can rebalance the degenerate conditions.
803 * This portion of the algorithm exists to ensure stability at the
804 * selected weightings.
806 * Because we might be breaking up optimal conditions we do not want
807 * to execute this too quickly, hence we only rebalance approximately
808 * ~7-8 times per second. The push's, on the otherhand, are capable
809 * moving threads to other cpus at a much higher rate.
811 * We choose the most heavily loaded thread from the worst queue
812 * in order to ensure that multiple heavy-weight threads on the same
813 * queue get broken up, and also because these threads are the most
814 * likely to be able to remain in place. Hopefully then any pairings,
815 * if applicable, migrate to where these threads are.
817 if ((usched_dfly_features & 0x04) &&
818 ((u_int)sched_ticks & 7) == 0 &&
819 (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) {
821 * Our cpu is up.
823 struct lwp *nlp;
824 dfly_pcpu_t rdd;
826 rdd = dfly_choose_worst_queue(dd);
827 if (rdd) {
828 spin_lock(&dd->spin);
829 if (spin_trylock(&rdd->spin)) {
830 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
831 spin_unlock(&rdd->spin);
832 if (nlp == NULL)
833 spin_unlock(&dd->spin);
834 } else {
835 spin_unlock(&dd->spin);
836 nlp = NULL;
838 } else {
839 nlp = NULL;
841 /* dd->spin held if nlp != NULL */
844 * Either schedule it or add it to our queue.
846 if (nlp &&
847 (nlp->lwp_priority & ~PPQMASK) < (dd->upri & ~PPQMASK)) {
848 ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, dd->cpumask);
849 dd->upri = nlp->lwp_priority;
850 dd->uschedcp = nlp;
851 #if 0
852 dd->rrcount = 0; /* reset round robin */
853 #endif
854 spin_unlock(&dd->spin);
855 lwkt_acquire(nlp->lwp_thread);
856 lwkt_schedule(nlp->lwp_thread);
857 } else if (nlp) {
858 dfly_setrunqueue_locked(dd, nlp);
859 spin_unlock(&dd->spin);
865 * Called from acquire and from kern_synch's one-second timer (one of the
866 * callout helper threads) with a critical section held.
868 * Adjust p_estcpu based on our single-cpu load, p_nice, and compensate for
869 * overall system load.
871 * Note that no recalculation occurs for a process which sleeps and wakes
872 * up in the same tick. That is, a system doing thousands of context
873 * switches per second will still only do serious estcpu calculations
874 * ESTCPUFREQ times per second.
876 static
877 void
878 dfly_recalculate_estcpu(struct lwp *lp)
880 globaldata_t gd = mycpu;
881 sysclock_t cpbase;
882 sysclock_t ttlticks;
883 int estcpu;
884 int decay_factor;
885 int ucount;
888 * We have to subtract periodic to get the last schedclock
889 * timeout time, otherwise we would get the upcoming timeout.
890 * Keep in mind that a process can migrate between cpus and
891 * while the scheduler clock should be very close, boundary
892 * conditions could lead to a small negative delta.
894 cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
896 if (lp->lwp_slptime > 1) {
898 * Too much time has passed, do a coarse correction.
900 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
901 dfly_resetpriority(lp);
902 lp->lwp_cpbase = cpbase;
903 lp->lwp_cpticks = 0;
904 lp->lwp_estfast = 0;
905 } else if (lp->lwp_cpbase != cpbase) {
907 * Adjust estcpu if we are in a different tick. Don't waste
908 * time if we are in the same tick.
910 * First calculate the number of ticks in the measurement
911 * interval. The ttlticks calculation can wind up 0 due to
912 * a bug in the handling of lwp_slptime (as yet not found),
913 * so make sure we do not get a divide by 0 panic.
915 ttlticks = (cpbase - lp->lwp_cpbase) /
916 gd->gd_schedclock.periodic;
917 if ((ssysclock_t)ttlticks < 0) {
918 ttlticks = 0;
919 lp->lwp_cpbase = cpbase;
921 if (ttlticks == 0)
922 return;
923 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
926 * Calculate the percentage of one cpu being used then
927 * compensate for any system load in excess of ncpus.
929 * For example, if we have 8 cores and 16 running cpu-bound
930 * processes then all things being equal each process will
931 * get 50% of one cpu. We need to pump this value back
932 * up to 100% so the estcpu calculation properly adjusts
933 * the process's dynamic priority.
935 * estcpu is scaled by ESTCPUMAX, pctcpu is scaled by FSCALE.
937 estcpu = (lp->lwp_pctcpu * ESTCPUMAX) >> FSHIFT;
938 ucount = dfly_ucount;
939 if (ucount > ncpus) {
940 estcpu += estcpu * (ucount - ncpus) / ncpus;
943 if (usched_dfly_debug == lp->lwp_proc->p_pid) {
944 kprintf("pid %d lwp %p estcpu %3d %3d cp %d/%d",
945 lp->lwp_proc->p_pid, lp,
946 estcpu, lp->lwp_estcpu,
947 lp->lwp_cpticks, ttlticks);
951 * Adjust lp->lwp_esetcpu. The decay factor determines how
952 * quickly lwp_estcpu collapses to its realtime calculation.
953 * A slower collapse gives us a more accurate number over
954 * the long term but can create problems with bursty threads
955 * or threads which become cpu hogs.
957 * To solve this problem, newly started lwps and lwps which
958 * are restarting after having been asleep for a while are
959 * given a much, much faster decay in order to quickly
960 * detect whether they become cpu-bound.
962 * NOTE: p_nice is accounted for in dfly_resetpriority(),
963 * and not here, but we must still ensure that a
964 * cpu-bound nice -20 process does not completely
965 * override a cpu-bound nice +20 process.
967 * NOTE: We must use ESTCPULIM() here to deal with any
968 * overshoot.
970 decay_factor = usched_dfly_decay;
971 if (decay_factor < 1)
972 decay_factor = 1;
973 if (decay_factor > 1024)
974 decay_factor = 1024;
976 if (lp->lwp_estfast < usched_dfly_decay) {
977 ++lp->lwp_estfast;
978 lp->lwp_estcpu = ESTCPULIM(
979 (lp->lwp_estcpu * lp->lwp_estfast + estcpu) /
980 (lp->lwp_estfast + 1));
981 } else {
982 lp->lwp_estcpu = ESTCPULIM(
983 (lp->lwp_estcpu * decay_factor + estcpu) /
984 (decay_factor + 1));
987 if (usched_dfly_debug == lp->lwp_proc->p_pid)
988 kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
989 dfly_resetpriority(lp);
990 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
991 lp->lwp_cpticks = 0;
996 * Compute the priority of a process when running in user mode.
997 * Arrange to reschedule if the resulting priority is better
998 * than that of the current process.
1000 * This routine may be called with any process.
1002 * This routine is called by fork1() for initial setup with the process of
1003 * the run queue, and also may be called normally with the process on or
1004 * off the run queue.
1006 static void
1007 dfly_resetpriority(struct lwp *lp)
1009 dfly_pcpu_t rdd;
1010 int newpriority;
1011 u_short newrqtype;
1012 int rcpu;
1013 int checkpri;
1014 int estcpu;
1015 int delta_uload;
1017 crit_enter();
1020 * Lock the scheduler (lp) belongs to. This can be on a different
1021 * cpu. Handle races. This loop breaks out with the appropriate
1022 * rdd locked.
1024 for (;;) {
1025 rcpu = lp->lwp_qcpu;
1026 cpu_ccfence();
1027 rdd = &dfly_pcpu[rcpu];
1028 spin_lock(&rdd->spin);
1029 if (rcpu == lp->lwp_qcpu)
1030 break;
1031 spin_unlock(&rdd->spin);
1035 * Calculate the new priority and queue type
1037 newrqtype = lp->lwp_rtprio.type;
1039 switch(newrqtype) {
1040 case RTP_PRIO_REALTIME:
1041 case RTP_PRIO_FIFO:
1042 newpriority = PRIBASE_REALTIME +
1043 (lp->lwp_rtprio.prio & PRIMASK);
1044 break;
1045 case RTP_PRIO_NORMAL:
1049 estcpu = lp->lwp_estcpu;
1052 * p_nice piece Adds (0-40) * 2 0-80
1053 * estcpu Adds 16384 * 4 / 512 0-128
1055 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
1056 newpriority += estcpu * PPQ / ESTCPUPPQ;
1057 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
1058 NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
1059 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
1060 break;
1061 case RTP_PRIO_IDLE:
1062 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1063 break;
1064 case RTP_PRIO_THREAD:
1065 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1066 break;
1067 default:
1068 panic("Bad RTP_PRIO %d", newrqtype);
1069 /* NOT REACHED */
1073 * The LWKT scheduler doesn't dive usched structures, give it a hint
1074 * on the relative priority of user threads running in the kernel.
1075 * The LWKT scheduler will always ensure that a user thread running
1076 * in the kernel will get cpu some time, regardless of its upri,
1077 * but can decide not to instantly switch from one kernel or user
1078 * mode user thread to a kernel-mode user thread when it has a less
1079 * desireable user priority.
1081 * td_upri has normal sense (higher values are more desireable), so
1082 * negate it.
1084 lp->lwp_thread->td_upri = -(newpriority & usched_dfly_swmask);
1087 * The newpriority incorporates the queue type so do a simple masked
1088 * check to determine if the process has moved to another queue. If
1089 * it has, and it is currently on a run queue, then move it.
1091 * Since uload is ~PPQMASK masked, no modifications are necessary if
1092 * we end up in the same run queue.
1094 * Reset rrcount if moving to a higher-priority queue, otherwise
1095 * retain rrcount.
1097 if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1098 if (lp->lwp_priority < newpriority)
1099 lp->lwp_rrcount = 0;
1100 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1101 dfly_remrunqueue_locked(rdd, lp);
1102 lp->lwp_priority = newpriority;
1103 lp->lwp_rqtype = newrqtype;
1104 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1105 dfly_setrunqueue_locked(rdd, lp);
1106 checkpri = 1;
1107 } else {
1108 lp->lwp_priority = newpriority;
1109 lp->lwp_rqtype = newrqtype;
1110 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1111 checkpri = 0;
1113 } else {
1115 * In the same PPQ, uload cannot change.
1117 lp->lwp_priority = newpriority;
1118 checkpri = 1;
1119 rcpu = -1;
1123 * Adjust effective load.
1125 * Calculate load then scale up or down geometrically based on p_nice.
1126 * Processes niced up (positive) are less important, and processes
1127 * niced downard (negative) are more important. The higher the uload,
1128 * the more important the thread.
1130 /* 0-511, 0-100% cpu */
1131 delta_uload = lp->lwp_estcpu / NQS;
1132 delta_uload -= delta_uload * lp->lwp_proc->p_nice / (PRIO_MAX + 1);
1135 delta_uload -= lp->lwp_uload;
1136 lp->lwp_uload += delta_uload;
1137 if (lp->lwp_mpflags & LWP_MP_ULOAD)
1138 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, delta_uload);
1141 * Determine if we need to reschedule the target cpu. This only
1142 * occurs if the LWP is already on a scheduler queue, which means
1143 * that idle cpu notification has already occured. At most we
1144 * need only issue a need_user_resched() on the appropriate cpu.
1146 * The LWP may be owned by a CPU different from the current one,
1147 * in which case dd->uschedcp may be modified without an MP lock
1148 * or a spinlock held. The worst that happens is that the code
1149 * below causes a spurious need_user_resched() on the target CPU
1150 * and dd->pri to be wrong for a short period of time, both of
1151 * which are harmless.
1153 * If checkpri is 0 we are adjusting the priority of the current
1154 * process, possibly higher (less desireable), so ignore the upri
1155 * check which will fail in that case.
1157 if (rcpu >= 0) {
1158 if (CPUMASK_TESTBIT(dfly_rdyprocmask, rcpu) &&
1159 (checkpri == 0 ||
1160 (rdd->upri & ~PRIMASK) >
1161 (lp->lwp_priority & ~PRIMASK))) {
1162 if (rcpu == mycpu->gd_cpuid) {
1163 spin_unlock(&rdd->spin);
1164 need_user_resched();
1165 } else {
1166 spin_unlock(&rdd->spin);
1167 lwkt_send_ipiq(globaldata_find(rcpu),
1168 dfly_need_user_resched_remote,
1169 NULL);
1171 } else {
1172 spin_unlock(&rdd->spin);
1174 } else {
1175 spin_unlock(&rdd->spin);
1177 crit_exit();
1180 static
1181 void
1182 dfly_yield(struct lwp *lp)
1184 if (lp->lwp_qcpu != mycpu->gd_cpuid)
1185 return;
1186 KKASSERT(lp == curthread->td_lwp);
1189 * Don't set need_user_resched() or mess with rrcount or anything.
1190 * the TDF flag will override everything as long as we release.
1192 atomic_set_int(&lp->lwp_thread->td_mpflags, TDF_MP_DIDYIELD);
1193 dfly_release_curproc(lp);
1197 * Thread was forcefully migrated to another cpu. Normally forced migrations
1198 * are used for iterations and the kernel returns to the original cpu before
1199 * returning and this is not needed. However, if the kernel migrates a
1200 * thread to another cpu and wants to leave it there, it has to call this
1201 * scheduler helper.
1203 * Note that the lwkt_migratecpu() function also released the thread, so
1204 * we don't have to worry about that.
1206 static
1207 void
1208 dfly_changedcpu(struct lwp *lp)
1210 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1211 dfly_pcpu_t rdd = &dfly_pcpu[mycpu->gd_cpuid];
1213 if (dd != rdd) {
1214 spin_lock(&dd->spin);
1215 dfly_changeqcpu_locked(lp, dd, rdd);
1216 spin_unlock(&dd->spin);
1221 * Called from fork1() when a new child process is being created.
1223 * Give the child process an initial estcpu that is more batch then
1224 * its parent and dock the parent for the fork (but do not
1225 * reschedule the parent).
1227 * fast
1229 * XXX lwp should be "spawning" instead of "forking"
1231 static void
1232 dfly_forking(struct lwp *plp, struct lwp *lp)
1235 * Put the child 4 queue slots (out of 32) higher than the parent
1236 * (less desireable than the parent).
1238 lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1239 lp->lwp_forked = 1;
1240 lp->lwp_estfast = 0;
1243 * Even though the lp will be scheduled specially the first time
1244 * due to lp->lwp_forked, it is important to initialize lwp_qcpu
1245 * to avoid favoring a fixed cpu.
1247 #if 0
1248 static uint16_t save_cpu;
1249 lp->lwp_qcpu = ++save_cpu % ncpus;
1250 #else
1251 lp->lwp_qcpu = plp->lwp_qcpu;
1252 #endif
1255 * Dock the parent a cost for the fork, protecting us from fork
1256 * bombs. If the parent is forking quickly make the child more
1257 * batchy.
1259 plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1263 * Called when a lwp is being removed from this scheduler, typically
1264 * during lwp_exit(). We have to clean out any ULOAD accounting before
1265 * we can let the lp go. The dd->spin lock is not needed for uload
1266 * updates.
1268 * Scheduler dequeueing has already occurred, no further action in that
1269 * regard is needed.
1271 static void
1272 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1274 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1276 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1277 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1278 atomic_add_int(&dd->uload, -lp->lwp_uload);
1279 atomic_add_int(&dd->ucount, -1);
1280 atomic_add_int(&dfly_ucount, -1);
1285 * This function cannot block in any way, but spinlocks are ok.
1287 * Update the uload based on the state of the thread (whether it is going
1288 * to sleep or running again). The uload is meant to be a longer-term
1289 * load and not an instantanious load.
1291 static void
1292 dfly_uload_update(struct lwp *lp)
1294 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1296 if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1297 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1298 spin_lock(&dd->spin);
1299 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1300 atomic_set_int(&lp->lwp_mpflags,
1301 LWP_MP_ULOAD);
1302 atomic_add_int(&dd->uload, lp->lwp_uload);
1303 atomic_add_int(&dd->ucount, 1);
1304 atomic_add_int(&dfly_ucount, 1);
1306 spin_unlock(&dd->spin);
1308 } else if (lp->lwp_slptime > 0) {
1309 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1310 spin_lock(&dd->spin);
1311 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1312 atomic_clear_int(&lp->lwp_mpflags,
1313 LWP_MP_ULOAD);
1314 atomic_add_int(&dd->uload, -lp->lwp_uload);
1315 atomic_add_int(&dd->ucount, -1);
1316 atomic_add_int(&dfly_ucount, -1);
1318 spin_unlock(&dd->spin);
1324 * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1325 * it selects a user process and returns it. If chklp is non-NULL and chklp
1326 * has a better or equal priority then the process that would otherwise be
1327 * chosen, NULL is returned.
1329 * Until we fix the RUNQ code the chklp test has to be strict or we may
1330 * bounce between processes trying to acquire the current process designation.
1332 * Must be called with rdd->spin locked. The spinlock is left intact through
1333 * the entire routine. dd->spin does not have to be locked.
1335 * If worst is non-zero this function finds the worst thread instead of the
1336 * best thread (used by the schedulerclock-based rover).
1338 static
1339 struct lwp *
1340 dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
1341 struct lwp *chklp, int worst)
1343 struct lwp *lp;
1344 struct rq *q;
1345 u_int32_t *which;
1346 u_int32_t pri;
1347 u_int32_t rtqbits;
1348 u_int32_t tsqbits;
1349 u_int32_t idqbits;
1351 rtqbits = rdd->rtqueuebits;
1352 tsqbits = rdd->queuebits;
1353 idqbits = rdd->idqueuebits;
1355 if (worst) {
1356 if (idqbits) {
1357 pri = bsrl(idqbits);
1358 q = &rdd->idqueues[pri];
1359 which = &rdd->idqueuebits;
1360 } else if (tsqbits) {
1361 pri = bsrl(tsqbits);
1362 q = &rdd->queues[pri];
1363 which = &rdd->queuebits;
1364 } else if (rtqbits) {
1365 pri = bsrl(rtqbits);
1366 q = &rdd->rtqueues[pri];
1367 which = &rdd->rtqueuebits;
1368 } else {
1369 return (NULL);
1371 lp = TAILQ_LAST(q, rq);
1372 } else {
1373 if (rtqbits) {
1374 pri = bsfl(rtqbits);
1375 q = &rdd->rtqueues[pri];
1376 which = &rdd->rtqueuebits;
1377 } else if (tsqbits) {
1378 pri = bsfl(tsqbits);
1379 q = &rdd->queues[pri];
1380 which = &rdd->queuebits;
1381 } else if (idqbits) {
1382 pri = bsfl(idqbits);
1383 q = &rdd->idqueues[pri];
1384 which = &rdd->idqueuebits;
1385 } else {
1386 return (NULL);
1388 lp = TAILQ_FIRST(q);
1390 KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1393 * If the passed lwp <chklp> is reasonably close to the selected
1394 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1396 * Note that we must error on the side of <chklp> to avoid bouncing
1397 * between threads in the acquire code.
1399 if (chklp) {
1400 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1401 return(NULL);
1404 KTR_COND_LOG(usched_chooseproc,
1405 lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1406 lp->lwp_proc->p_pid,
1407 lp->lwp_thread->td_gd->gd_cpuid,
1408 mycpu->gd_cpuid);
1410 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1411 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1412 TAILQ_REMOVE(q, lp, lwp_procq);
1413 --rdd->runqcount;
1414 if (TAILQ_EMPTY(q))
1415 *which &= ~(1 << pri);
1418 * If we are choosing a process from rdd with the intent to
1419 * move it to dd, lwp_qcpu must be adjusted while rdd's spinlock
1420 * is still held.
1422 if (rdd != dd) {
1423 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1424 atomic_add_int(&rdd->uload, -lp->lwp_uload);
1425 atomic_add_int(&rdd->ucount, -1);
1426 atomic_add_int(&dfly_ucount, -1);
1428 lp->lwp_qcpu = dd->cpuid;
1429 atomic_add_int(&dd->uload, lp->lwp_uload);
1430 atomic_add_int(&dd->ucount, 1);
1431 atomic_add_int(&dfly_ucount, 1);
1432 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1434 return lp;
1438 * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1440 * Choose a cpu node to schedule lp on, hopefully nearby its current
1441 * node.
1443 * We give the current node a modest advantage for obvious reasons.
1445 * We also give the node the thread was woken up FROM a slight advantage
1446 * in order to try to schedule paired threads which synchronize/block waiting
1447 * for each other fairly close to each other. Similarly in a network setting
1448 * this feature will also attempt to place a user process near the kernel
1449 * protocol thread that is feeding it data. THIS IS A CRITICAL PART of the
1450 * algorithm as it heuristically groups synchronizing processes for locality
1451 * of reference in multi-socket systems.
1453 * We check against running processes and give a big advantage if there
1454 * are none running.
1456 * The caller will normally dfly_setrunqueue() lp on the returned queue.
1458 * When the topology is known choose a cpu whos group has, in aggregate,
1459 * has the lowest weighted load.
1461 static
1462 dfly_pcpu_t
1463 dfly_choose_best_queue(struct lwp *lp)
1465 cpumask_t wakemask;
1466 cpumask_t mask;
1467 cpu_node_t *cpup;
1468 cpu_node_t *cpun;
1469 cpu_node_t *cpub;
1470 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1471 dfly_pcpu_t rdd;
1472 int wakecpu;
1473 int cpuid;
1474 int n;
1475 int count;
1476 int load;
1477 int lowest_load;
1480 * When the topology is unknown choose a random cpu that is hopefully
1481 * idle.
1483 if (dd->cpunode == NULL)
1484 return (dfly_choose_queue_simple(dd, lp));
1487 * Pairing mask
1489 if ((wakecpu = lp->lwp_thread->td_wakefromcpu) >= 0)
1490 wakemask = dfly_pcpu[wakecpu].cpumask;
1491 else
1492 CPUMASK_ASSZERO(wakemask);
1495 * When the topology is known choose a cpu whos group has, in
1496 * aggregate, has the lowest weighted load.
1498 cpup = root_cpu_node;
1499 rdd = dd;
1501 while (cpup) {
1503 * Degenerate case super-root
1505 if (cpup->child_no == 1) {
1506 cpup = cpup->child_node[0];
1507 continue;
1511 * Terminal cpunode
1513 if (cpup->child_no == 0) {
1514 rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1515 break;
1518 cpub = NULL;
1519 lowest_load = 0x7FFFFFFF;
1521 for (n = 0; n < cpup->child_no; ++n) {
1523 * Accumulate load information for all cpus
1524 * which are members of this node.
1526 cpun = cpup->child_node[n];
1527 mask = cpun->members;
1528 CPUMASK_ANDMASK(mask, usched_global_cpumask);
1529 CPUMASK_ANDMASK(mask, smp_active_mask);
1530 CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1531 if (CPUMASK_TESTZERO(mask))
1532 continue;
1534 count = 0;
1535 load = 0;
1537 while (CPUMASK_TESTNZERO(mask)) {
1538 cpuid = BSFCPUMASK(mask);
1539 rdd = &dfly_pcpu[cpuid];
1540 load += rdd->uload;
1541 load += rdd->ucount * usched_dfly_weight3;
1543 if (rdd->uschedcp == NULL &&
1544 rdd->runqcount == 0 &&
1545 globaldata_find(cpuid)->gd_tdrunqcount == 0
1547 load -= usched_dfly_weight4;
1549 #if 0
1550 else if (rdd->upri > lp->lwp_priority + PPQ) {
1551 load -= usched_dfly_weight4 / 2;
1553 #endif
1554 CPUMASK_NANDBIT(mask, cpuid);
1555 ++count;
1559 * Compensate if the lp is already accounted for in
1560 * the aggregate uload for this mask set. We want
1561 * to calculate the loads as if lp were not present,
1562 * otherwise the calculation is bogus.
1564 if ((lp->lwp_mpflags & LWP_MP_ULOAD) &&
1565 CPUMASK_TESTMASK(dd->cpumask, cpun->members)) {
1566 load -= lp->lwp_uload;
1567 load -= usched_dfly_weight3;
1570 load /= count;
1573 * Advantage the cpu group (lp) is already on.
1575 if (CPUMASK_TESTMASK(cpun->members, dd->cpumask))
1576 load -= usched_dfly_weight1;
1579 * Advantage the cpu group we want to pair (lp) to,
1580 * but don't let it go to the exact same cpu as
1581 * the wakecpu target.
1583 * We do this by checking whether cpun is a
1584 * terminal node or not. All cpun's at the same
1585 * level will either all be terminal or all not
1586 * terminal.
1588 * If it is and we match we disadvantage the load.
1589 * If it is and we don't match we advantage the load.
1591 * Also note that we are effectively disadvantaging
1592 * all-but-one by the same amount, so it won't effect
1593 * the weight1 factor for the all-but-one nodes.
1595 if (CPUMASK_TESTMASK(cpun->members, wakemask)) {
1596 if (cpun->child_no != 0) {
1597 /* advantage */
1598 load -= usched_dfly_weight2;
1599 } else {
1600 if (usched_dfly_features & 0x10)
1601 load += usched_dfly_weight2;
1602 else
1603 load -= usched_dfly_weight2;
1608 * Calculate the best load
1610 if (cpub == NULL || lowest_load > load ||
1611 (lowest_load == load &&
1612 CPUMASK_TESTMASK(cpun->members, dd->cpumask))
1614 lowest_load = load;
1615 cpub = cpun;
1618 cpup = cpub;
1620 if (usched_dfly_chooser > 0) {
1621 --usched_dfly_chooser; /* only N lines */
1622 kprintf("lp %02d->%02d %s\n",
1623 lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1625 return (rdd);
1629 * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1631 * Choose the worst queue close to dd's cpu node with a non-empty runq
1632 * that is NOT dd. Also require that the moving of the highest-load thread
1633 * from rdd to dd does not cause the uload's to cross each other.
1635 * This is used by the thread chooser when the current cpu's queues are
1636 * empty to steal a thread from another cpu's queue. We want to offload
1637 * the most heavily-loaded queue.
1639 static
1640 dfly_pcpu_t
1641 dfly_choose_worst_queue(dfly_pcpu_t dd)
1643 cpumask_t mask;
1644 cpu_node_t *cpup;
1645 cpu_node_t *cpun;
1646 cpu_node_t *cpub;
1647 dfly_pcpu_t rdd;
1648 int cpuid;
1649 int n;
1650 int count;
1651 int load;
1652 #if 0
1653 int pri;
1654 int hpri;
1655 #endif
1656 int highest_load;
1659 * When the topology is unknown choose a random cpu that is hopefully
1660 * idle.
1662 if (dd->cpunode == NULL) {
1663 return (NULL);
1667 * When the topology is known choose a cpu whos group has, in
1668 * aggregate, has the highest weighted load.
1670 cpup = root_cpu_node;
1671 rdd = dd;
1672 while (cpup) {
1674 * Degenerate case super-root
1676 if (cpup->child_no == 1) {
1677 cpup = cpup->child_node[0];
1678 continue;
1682 * Terminal cpunode
1684 if (cpup->child_no == 0) {
1685 rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1686 break;
1689 cpub = NULL;
1690 highest_load = 0;
1692 for (n = 0; n < cpup->child_no; ++n) {
1694 * Accumulate load information for all cpus
1695 * which are members of this node.
1697 cpun = cpup->child_node[n];
1698 mask = cpun->members;
1699 CPUMASK_ANDMASK(mask, usched_global_cpumask);
1700 CPUMASK_ANDMASK(mask, smp_active_mask);
1701 if (CPUMASK_TESTZERO(mask))
1702 continue;
1704 count = 0;
1705 load = 0;
1707 while (CPUMASK_TESTNZERO(mask)) {
1708 cpuid = BSFCPUMASK(mask);
1709 rdd = &dfly_pcpu[cpuid];
1710 load += rdd->uload;
1711 load += rdd->ucount * usched_dfly_weight3;
1713 if (rdd->uschedcp == NULL &&
1714 rdd->runqcount == 0 &&
1715 globaldata_find(cpuid)->gd_tdrunqcount == 0
1717 load -= usched_dfly_weight4;
1719 #if 0
1720 else if (rdd->upri > dd->upri + PPQ) {
1721 load -= usched_dfly_weight4 / 2;
1723 #endif
1724 CPUMASK_NANDBIT(mask, cpuid);
1725 ++count;
1727 load /= count;
1730 * Prefer candidates which are somewhat closer to
1731 * our cpu.
1733 if (CPUMASK_TESTMASK(dd->cpumask, cpun->members))
1734 load += usched_dfly_weight1;
1737 * The best candidate is the one with the worst
1738 * (highest) load.
1740 if (cpub == NULL || highest_load < load ||
1741 (highest_load == load &&
1742 CPUMASK_TESTMASK(cpun->members, dd->cpumask))) {
1743 highest_load = load;
1744 cpub = cpun;
1747 cpup = cpub;
1751 * We never return our own node (dd), and only return a remote
1752 * node if it's load is significantly worse than ours (i.e. where
1753 * stealing a thread would be considered reasonable).
1755 * This also helps us avoid breaking paired threads apart which
1756 * can have disastrous effects on performance.
1758 if (rdd == dd)
1759 return(NULL);
1761 #if 0
1762 hpri = 0;
1763 if (rdd->rtqueuebits && hpri < (pri = bsrl(rdd->rtqueuebits)))
1764 hpri = pri;
1765 if (rdd->queuebits && hpri < (pri = bsrl(rdd->queuebits)))
1766 hpri = pri;
1767 if (rdd->idqueuebits && hpri < (pri = bsrl(rdd->idqueuebits)))
1768 hpri = pri;
1769 hpri *= PPQ;
1770 if (rdd->uload - hpri < dd->uload + hpri)
1771 return(NULL);
1772 #endif
1773 return (rdd);
1776 static
1777 dfly_pcpu_t
1778 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
1780 dfly_pcpu_t rdd;
1781 cpumask_t tmpmask;
1782 cpumask_t mask;
1783 int cpubase;
1784 int cpuid;
1787 * Fallback to the original heuristic, select random cpu,
1788 * first checking the cpus not currently running a user thread.
1790 * Use cpuid as the base cpu in our scan, first checking
1791 * cpuid...(ncpus-1), then 0...(cpuid-1). This avoid favoring
1792 * lower-numbered cpus.
1794 ++dd->scancpu; /* SMP race ok */
1795 mask = dfly_rdyprocmask;
1796 CPUMASK_NANDMASK(mask, dfly_curprocmask);
1797 CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1798 CPUMASK_ANDMASK(mask, smp_active_mask);
1799 CPUMASK_ANDMASK(mask, usched_global_cpumask);
1801 cpubase = (int)(dd->scancpu % ncpus);
1802 CPUMASK_ASSBMASK(tmpmask, cpubase);
1803 CPUMASK_INVMASK(tmpmask);
1804 CPUMASK_ANDMASK(tmpmask, mask);
1805 while (CPUMASK_TESTNZERO(tmpmask)) {
1806 cpuid = BSFCPUMASK(tmpmask);
1807 rdd = &dfly_pcpu[cpuid];
1809 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1810 goto found;
1811 CPUMASK_NANDBIT(tmpmask, cpuid);
1814 CPUMASK_ASSBMASK(tmpmask, cpubase);
1815 CPUMASK_ANDMASK(tmpmask, mask);
1816 while (CPUMASK_TESTNZERO(tmpmask)) {
1817 cpuid = BSFCPUMASK(tmpmask);
1818 rdd = &dfly_pcpu[cpuid];
1820 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1821 goto found;
1822 CPUMASK_NANDBIT(tmpmask, cpuid);
1826 * Then cpus which might have a currently running lp
1828 mask = dfly_rdyprocmask;
1829 CPUMASK_ANDMASK(mask, dfly_curprocmask);
1830 CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1831 CPUMASK_ANDMASK(mask, smp_active_mask);
1832 CPUMASK_ANDMASK(mask, usched_global_cpumask);
1834 CPUMASK_ASSBMASK(tmpmask, cpubase);
1835 CPUMASK_INVMASK(tmpmask);
1836 CPUMASK_ANDMASK(tmpmask, mask);
1837 while (CPUMASK_TESTNZERO(tmpmask)) {
1838 cpuid = BSFCPUMASK(tmpmask);
1839 rdd = &dfly_pcpu[cpuid];
1841 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1842 goto found;
1843 CPUMASK_NANDBIT(tmpmask, cpuid);
1846 CPUMASK_ASSBMASK(tmpmask, cpubase);
1847 CPUMASK_ANDMASK(tmpmask, mask);
1848 while (CPUMASK_TESTNZERO(tmpmask)) {
1849 cpuid = BSFCPUMASK(tmpmask);
1850 rdd = &dfly_pcpu[cpuid];
1852 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1853 goto found;
1854 CPUMASK_NANDBIT(tmpmask, cpuid);
1858 * If we cannot find a suitable cpu we round-robin using scancpu.
1859 * Other cpus will pickup as they release their current lwps or
1860 * become ready.
1862 * Avoid a degenerate system lockup case if usched_global_cpumask
1863 * is set to 0 or otherwise does not cover lwp_cpumask.
1865 * We only kick the target helper thread in this case, we do not
1866 * set the user resched flag because
1868 cpuid = cpubase;
1869 if (CPUMASK_TESTBIT(usched_global_cpumask, cpuid) == 0)
1870 cpuid = 0;
1871 rdd = &dfly_pcpu[cpuid];
1872 found:
1873 return (rdd);
1876 static
1877 void
1878 dfly_need_user_resched_remote(void *dummy)
1880 globaldata_t gd = mycpu;
1881 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
1884 * Flag reschedule needed
1886 need_user_resched();
1889 * If no user thread is currently running we need to kick the helper
1890 * on our cpu to recover. Otherwise the cpu will never schedule
1891 * anything again.
1893 * We cannot schedule the process ourselves because this is an
1894 * IPI callback and we cannot acquire spinlocks in an IPI callback.
1896 * Call wakeup_mycpu to avoid sending IPIs to other CPUs
1898 if (dd->uschedcp == NULL &&
1899 CPUMASK_TESTBIT(dfly_rdyprocmask, gd->gd_cpuid)) {
1900 ATOMIC_CPUMASK_NANDBIT(dfly_rdyprocmask, gd->gd_cpuid);
1901 wakeup_mycpu(dd->helper_thread);
1906 * dfly_remrunqueue_locked() removes a given process from the run queue
1907 * that it is on, clearing the queue busy bit if it becomes empty.
1909 * Note that user process scheduler is different from the LWKT schedule.
1910 * The user process scheduler only manages user processes but it uses LWKT
1911 * underneath, and a user process operating in the kernel will often be
1912 * 'released' from our management.
1914 * uload is NOT adjusted here. It is only adjusted if the lwkt_thread goes
1915 * to sleep or the lwp is moved to a different runq.
1917 static void
1918 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1920 struct rq *q;
1921 u_int32_t *which;
1922 u_int8_t pri;
1924 KKASSERT(rdd->runqcount >= 0);
1926 pri = lp->lwp_rqindex;
1928 switch(lp->lwp_rqtype) {
1929 case RTP_PRIO_NORMAL:
1930 q = &rdd->queues[pri];
1931 which = &rdd->queuebits;
1932 break;
1933 case RTP_PRIO_REALTIME:
1934 case RTP_PRIO_FIFO:
1935 q = &rdd->rtqueues[pri];
1936 which = &rdd->rtqueuebits;
1937 break;
1938 case RTP_PRIO_IDLE:
1939 q = &rdd->idqueues[pri];
1940 which = &rdd->idqueuebits;
1941 break;
1942 default:
1943 panic("remrunqueue: invalid rtprio type");
1944 /* NOT REACHED */
1946 KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1947 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1948 TAILQ_REMOVE(q, lp, lwp_procq);
1949 --rdd->runqcount;
1950 if (TAILQ_EMPTY(q)) {
1951 KASSERT((*which & (1 << pri)) != 0,
1952 ("remrunqueue: remove from empty queue"));
1953 *which &= ~(1 << pri);
1958 * dfly_setrunqueue_locked()
1960 * Add a process whos rqtype and rqindex had previously been calculated
1961 * onto the appropriate run queue. Determine if the addition requires
1962 * a reschedule on a cpu and return the cpuid or -1.
1964 * NOTE: Lower priorities are better priorities.
1966 * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
1967 * sum of the rough lwp_priority for all running and runnable
1968 * processes. Lower priority processes (higher lwp_priority
1969 * values) actually DO count as more load, not less, because
1970 * these are the programs which require the most care with
1971 * regards to cpu selection.
1973 static void
1974 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1976 u_int32_t *which;
1977 struct rq *q;
1978 int pri;
1980 KKASSERT(lp->lwp_qcpu == rdd->cpuid);
1982 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1983 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1984 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, lp->lwp_uload);
1985 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].ucount, 1);
1986 atomic_add_int(&dfly_ucount, 1);
1989 pri = lp->lwp_rqindex;
1991 switch(lp->lwp_rqtype) {
1992 case RTP_PRIO_NORMAL:
1993 q = &rdd->queues[pri];
1994 which = &rdd->queuebits;
1995 break;
1996 case RTP_PRIO_REALTIME:
1997 case RTP_PRIO_FIFO:
1998 q = &rdd->rtqueues[pri];
1999 which = &rdd->rtqueuebits;
2000 break;
2001 case RTP_PRIO_IDLE:
2002 q = &rdd->idqueues[pri];
2003 which = &rdd->idqueuebits;
2004 break;
2005 default:
2006 panic("remrunqueue: invalid rtprio type");
2007 /* NOT REACHED */
2011 * Place us on the selected queue. Determine if we should be
2012 * placed at the head of the queue or at the end.
2014 * We are placed at the tail if our round-robin count has expired,
2015 * or is about to expire and the system thinks its a good place to
2016 * round-robin, or there is already a next thread on the queue
2017 * (it might be trying to pick up where it left off and we don't
2018 * want to interfere).
2020 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
2021 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
2022 ++rdd->runqcount;
2024 if (lp->lwp_rrcount >= usched_dfly_rrinterval ||
2025 (lp->lwp_rrcount >= usched_dfly_rrinterval / 2 &&
2026 (lp->lwp_thread->td_mpflags & TDF_MP_BATCH_DEMARC))
2029 * Place on tail
2031 atomic_clear_int(&lp->lwp_thread->td_mpflags,
2032 TDF_MP_BATCH_DEMARC);
2033 lp->lwp_rrcount = 0;
2034 TAILQ_INSERT_TAIL(q, lp, lwp_procq);
2035 } else {
2037 * Retain rrcount and place on head. Count is retained
2038 * even if the queue is empty.
2040 TAILQ_INSERT_HEAD(q, lp, lwp_procq);
2042 *which |= 1 << pri;
2046 * For SMP systems a user scheduler helper thread is created for each
2047 * cpu and is used to allow one cpu to wakeup another for the purposes of
2048 * scheduling userland threads from setrunqueue().
2050 * UP systems do not need the helper since there is only one cpu.
2052 * We can't use the idle thread for this because we might block.
2053 * Additionally, doing things this way allows us to HLT idle cpus
2054 * on MP systems.
2056 static void
2057 dfly_helper_thread(void *dummy)
2059 globaldata_t gd;
2060 dfly_pcpu_t dd;
2061 dfly_pcpu_t rdd;
2062 struct lwp *nlp;
2063 cpumask_t mask;
2064 int cpuid;
2066 gd = mycpu;
2067 cpuid = gd->gd_cpuid; /* doesn't change */
2068 mask = gd->gd_cpumask; /* doesn't change */
2069 dd = &dfly_pcpu[cpuid];
2072 * Since we only want to be woken up only when no user processes
2073 * are scheduled on a cpu, run at an ultra low priority.
2075 lwkt_setpri_self(TDPRI_USER_SCHEDULER);
2077 tsleep(dd->helper_thread, 0, "schslp", 0);
2079 for (;;) {
2081 * We use the LWKT deschedule-interlock trick to avoid racing
2082 * dfly_rdyprocmask. This means we cannot block through to the
2083 * manual lwkt_switch() call we make below.
2085 crit_enter_gd(gd);
2086 tsleep_interlock(dd->helper_thread, 0);
2088 spin_lock(&dd->spin);
2090 ATOMIC_CPUMASK_ORMASK(dfly_rdyprocmask, mask);
2091 clear_user_resched(); /* This satisfied the reschedule request */
2092 #if 0
2093 dd->rrcount = 0; /* Reset the round-robin counter */
2094 #endif
2096 if (dd->runqcount || dd->uschedcp != NULL) {
2098 * Threads are available. A thread may or may not be
2099 * currently scheduled. Get the best thread already queued
2100 * to this cpu.
2102 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
2103 if (nlp) {
2104 ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, mask);
2105 dd->upri = nlp->lwp_priority;
2106 dd->uschedcp = nlp;
2107 #if 0
2108 dd->rrcount = 0; /* reset round robin */
2109 #endif
2110 spin_unlock(&dd->spin);
2111 lwkt_acquire(nlp->lwp_thread);
2112 lwkt_schedule(nlp->lwp_thread);
2113 } else {
2115 * This situation should not occur because we had
2116 * at least one thread available.
2118 spin_unlock(&dd->spin);
2120 } else if (usched_dfly_features & 0x01) {
2122 * This cpu is devoid of runnable threads, steal a thread
2123 * from another cpu. Since we're stealing, might as well
2124 * load balance at the same time.
2126 * We choose the highest-loaded thread from the worst queue.
2128 * NOTE! This function only returns a non-NULL rdd when
2129 * another cpu's queue is obviously overloaded. We
2130 * do not want to perform the type of rebalancing
2131 * the schedclock does here because it would result
2132 * in insane process pulling when 'steady' state is
2133 * partially unbalanced (e.g. 6 runnables and only
2134 * 4 cores).
2136 rdd = dfly_choose_worst_queue(dd);
2137 if (rdd && spin_trylock(&rdd->spin)) {
2138 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
2139 spin_unlock(&rdd->spin);
2140 } else {
2141 nlp = NULL;
2143 if (nlp) {
2144 ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, mask);
2145 dd->upri = nlp->lwp_priority;
2146 dd->uschedcp = nlp;
2147 #if 0
2148 dd->rrcount = 0; /* reset round robin */
2149 #endif
2150 spin_unlock(&dd->spin);
2151 lwkt_acquire(nlp->lwp_thread);
2152 lwkt_schedule(nlp->lwp_thread);
2153 } else {
2155 * Leave the thread on our run queue. Another
2156 * scheduler will try to pull it later.
2158 spin_unlock(&dd->spin);
2160 } else {
2162 * devoid of runnable threads and not allowed to steal
2163 * any.
2165 spin_unlock(&dd->spin);
2169 * We're descheduled unless someone scheduled us. Switch away.
2170 * Exiting the critical section will cause splz() to be called
2171 * for us if interrupts and such are pending.
2173 crit_exit_gd(gd);
2174 tsleep(dd->helper_thread, PINTERLOCKED, "schslp", 0);
2178 #if 0
2179 static int
2180 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
2182 int error, new_val;
2184 new_val = usched_dfly_stick_to_level;
2186 error = sysctl_handle_int(oidp, &new_val, 0, req);
2187 if (error != 0 || req->newptr == NULL)
2188 return (error);
2189 if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
2190 return (EINVAL);
2191 usched_dfly_stick_to_level = new_val;
2192 return (0);
2194 #endif
2197 * Setup the queues and scheduler helpers (scheduler helpers are SMP only).
2198 * Note that curprocmask bit 0 has already been cleared by rqinit() and
2199 * we should not mess with it further.
2201 static void
2202 usched_dfly_cpu_init(void)
2204 int i;
2205 int j;
2206 int smt_not_supported = 0;
2207 int cache_coherent_not_supported = 0;
2209 if (bootverbose)
2210 kprintf("Start usched_dfly helpers on cpus:\n");
2212 sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2213 usched_dfly_sysctl_tree =
2214 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2215 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2216 "usched_dfly", CTLFLAG_RD, 0, "");
2218 for (i = 0; i < ncpus; ++i) {
2219 dfly_pcpu_t dd = &dfly_pcpu[i];
2220 cpumask_t mask;
2222 CPUMASK_ASSBIT(mask, i);
2223 if (CPUMASK_TESTMASK(mask, smp_active_mask) == 0)
2224 continue;
2226 spin_init(&dd->spin, "uschedcpuinit");
2227 dd->cpunode = get_cpu_node_by_cpuid(i);
2228 dd->cpuid = i;
2229 CPUMASK_ASSBIT(dd->cpumask, i);
2230 for (j = 0; j < NQS; j++) {
2231 TAILQ_INIT(&dd->queues[j]);
2232 TAILQ_INIT(&dd->rtqueues[j]);
2233 TAILQ_INIT(&dd->idqueues[j]);
2235 ATOMIC_CPUMASK_NANDBIT(dfly_curprocmask, 0);
2237 if (dd->cpunode == NULL) {
2238 smt_not_supported = 1;
2239 cache_coherent_not_supported = 1;
2240 if (bootverbose)
2241 kprintf (" cpu%d - WARNING: No CPU NODE "
2242 "found for cpu\n", i);
2243 } else {
2244 switch (dd->cpunode->type) {
2245 case THREAD_LEVEL:
2246 if (bootverbose)
2247 kprintf (" cpu%d - HyperThreading "
2248 "available. Core siblings: ",
2250 break;
2251 case CORE_LEVEL:
2252 smt_not_supported = 1;
2254 if (bootverbose)
2255 kprintf (" cpu%d - No HT available, "
2256 "multi-core/physical "
2257 "cpu. Physical siblings: ",
2259 break;
2260 case CHIP_LEVEL:
2261 smt_not_supported = 1;
2263 if (bootverbose)
2264 kprintf (" cpu%d - No HT available, "
2265 "single-core/physical cpu. "
2266 "Package siblings: ",
2268 break;
2269 default:
2270 /* Let's go for safe defaults here */
2271 smt_not_supported = 1;
2272 cache_coherent_not_supported = 1;
2273 if (bootverbose)
2274 kprintf (" cpu%d - Unknown cpunode->"
2275 "type=%u. siblings: ",
2277 (u_int)dd->cpunode->type);
2278 break;
2281 if (bootverbose) {
2282 if (dd->cpunode->parent_node != NULL) {
2283 kprint_cpuset(&dd->cpunode->
2284 parent_node->members);
2285 kprintf("\n");
2286 } else {
2287 kprintf(" no siblings\n");
2292 lwkt_create(dfly_helper_thread, NULL, &dd->helper_thread, NULL,
2293 0, i, "usched %d", i);
2296 * Allow user scheduling on the target cpu. cpu #0 has already
2297 * been enabled in rqinit().
2299 if (i)
2300 ATOMIC_CPUMASK_NANDMASK(dfly_curprocmask, mask);
2301 ATOMIC_CPUMASK_ORMASK(dfly_rdyprocmask, mask);
2302 dd->upri = PRIBASE_NULL;
2306 /* usched_dfly sysctl configurable parameters */
2308 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2309 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2310 OID_AUTO, "rrinterval", CTLFLAG_RW,
2311 &usched_dfly_rrinterval, 0, "");
2312 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2313 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2314 OID_AUTO, "decay", CTLFLAG_RW,
2315 &usched_dfly_decay, 0, "Extra decay when not running");
2317 /* Add enable/disable option for SMT scheduling if supported */
2318 if (smt_not_supported) {
2319 usched_dfly_smt = 0;
2320 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2321 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2322 OID_AUTO, "smt", CTLFLAG_RD,
2323 "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2324 } else {
2325 usched_dfly_smt = 1;
2326 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2327 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2328 OID_AUTO, "smt", CTLFLAG_RW,
2329 &usched_dfly_smt, 0, "Enable SMT scheduling");
2333 * Add enable/disable option for cache coherent scheduling
2334 * if supported
2336 if (cache_coherent_not_supported) {
2337 usched_dfly_cache_coherent = 0;
2338 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2339 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2340 OID_AUTO, "cache_coherent", CTLFLAG_RD,
2341 "NOT SUPPORTED", 0,
2342 "Cache coherence NOT SUPPORTED");
2343 } else {
2344 usched_dfly_cache_coherent = 1;
2345 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2346 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2347 OID_AUTO, "cache_coherent", CTLFLAG_RW,
2348 &usched_dfly_cache_coherent, 0,
2349 "Enable/Disable cache coherent scheduling");
2351 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2352 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2353 OID_AUTO, "weight1", CTLFLAG_RW,
2354 &usched_dfly_weight1, 200,
2355 "Weight selection for current cpu");
2357 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2358 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2359 OID_AUTO, "weight2", CTLFLAG_RW,
2360 &usched_dfly_weight2, 180,
2361 "Weight selection for wakefrom cpu");
2363 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2364 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2365 OID_AUTO, "weight3", CTLFLAG_RW,
2366 &usched_dfly_weight3, 40,
2367 "Weight selection for num threads on queue");
2369 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2370 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2371 OID_AUTO, "weight4", CTLFLAG_RW,
2372 &usched_dfly_weight4, 160,
2373 "Availability of other idle cpus");
2375 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2376 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2377 OID_AUTO, "fast_resched", CTLFLAG_RW,
2378 &usched_dfly_fast_resched, 0,
2379 "Availability of other idle cpus");
2381 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2382 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2383 OID_AUTO, "features", CTLFLAG_RW,
2384 &usched_dfly_features, 0x8F,
2385 "Allow pulls into empty queues");
2387 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2388 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2389 OID_AUTO, "swmask", CTLFLAG_RW,
2390 &usched_dfly_swmask, ~PPQMASK,
2391 "Queue mask to force thread switch");
2393 #if 0
2394 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2395 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2396 OID_AUTO, "stick_to_level",
2397 CTLTYPE_INT | CTLFLAG_RW,
2398 NULL, sizeof usched_dfly_stick_to_level,
2399 sysctl_usched_dfly_stick_to_level, "I",
2400 "Stick a process to this level. See sysctl"
2401 "paremter hw.cpu_topology.level_description");
2402 #endif
2405 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2406 usched_dfly_cpu_init, NULL);