kernel - kernel - Incidental MPLOCK removal (usched, affinity)
[dragonfly.git] / sys / kern / usched_dfly.c
blob9691b176f69b44a59ac55ab7fd89103494087aa5
1 /*
2 * Copyright (c) 2012 The DragonFly Project. All rights reserved.
3 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
6 * by Matthew Dillon <dillon@backplane.com>,
7 * by Mihai Carabas <mihai.carabas@gmail.com>
8 * and many others.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
19 * distribution.
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
52 #include <sys/ktr.h>
54 #include <machine/cpu.h>
55 #include <machine/smp.h>
58 * Priorities. Note that with 32 run queues per scheduler each queue
59 * represents four priority levels.
62 int dfly_rebalanced;
64 #define MAXPRI 128
65 #define PRIMASK (MAXPRI - 1)
66 #define PRIBASE_REALTIME 0
67 #define PRIBASE_NORMAL MAXPRI
68 #define PRIBASE_IDLE (MAXPRI * 2)
69 #define PRIBASE_THREAD (MAXPRI * 3)
70 #define PRIBASE_NULL (MAXPRI * 4)
72 #define NQS 32 /* 32 run queues. */
73 #define PPQ (MAXPRI / NQS) /* priorities per queue */
74 #define PPQMASK (PPQ - 1)
77 * NICEPPQ - number of nice units per priority queue
78 * ESTCPUPPQ - number of estcpu units per priority queue
79 * ESTCPUMAX - number of estcpu units
81 #define NICEPPQ 2
82 #define ESTCPUPPQ 512
83 #define ESTCPUMAX (ESTCPUPPQ * NQS)
84 #define BATCHMAX (ESTCPUFREQ * 30)
85 #define PRIO_RANGE (PRIO_MAX - PRIO_MIN + 1)
87 #define ESTCPULIM(v) min((v), ESTCPUMAX)
89 TAILQ_HEAD(rq, lwp);
91 #define lwp_priority lwp_usdata.dfly.priority
92 #define lwp_forked lwp_usdata.dfly.forked
93 #define lwp_rqindex lwp_usdata.dfly.rqindex
94 #define lwp_estcpu lwp_usdata.dfly.estcpu
95 #define lwp_estfast lwp_usdata.dfly.estfast
96 #define lwp_uload lwp_usdata.dfly.uload
97 #define lwp_rqtype lwp_usdata.dfly.rqtype
98 #define lwp_qcpu lwp_usdata.dfly.qcpu
99 #define lwp_rrcount lwp_usdata.dfly.rrcount
101 struct usched_dfly_pcpu {
102 struct spinlock spin;
103 struct thread *helper_thread;
104 u_short scancpu;
105 short upri;
106 int uload;
107 int ucount;
108 struct lwp *uschedcp;
109 struct rq queues[NQS];
110 struct rq rtqueues[NQS];
111 struct rq idqueues[NQS];
112 u_int32_t queuebits;
113 u_int32_t rtqueuebits;
114 u_int32_t idqueuebits;
115 int runqcount;
116 int cpuid;
117 cpumask_t cpumask;
118 cpu_node_t *cpunode;
121 typedef struct usched_dfly_pcpu *dfly_pcpu_t;
123 static void dfly_acquire_curproc(struct lwp *lp);
124 static void dfly_release_curproc(struct lwp *lp);
125 static void dfly_select_curproc(globaldata_t gd);
126 static void dfly_setrunqueue(struct lwp *lp);
127 static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp);
128 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
129 sysclock_t cpstamp);
130 static void dfly_recalculate_estcpu(struct lwp *lp);
131 static void dfly_resetpriority(struct lwp *lp);
132 static void dfly_forking(struct lwp *plp, struct lwp *lp);
133 static void dfly_exiting(struct lwp *lp, struct proc *);
134 static void dfly_uload_update(struct lwp *lp);
135 static void dfly_yield(struct lwp *lp);
136 static void dfly_changeqcpu_locked(struct lwp *lp,
137 dfly_pcpu_t dd, dfly_pcpu_t rdd);
138 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
139 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd);
140 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
141 static void dfly_need_user_resched_remote(void *dummy);
142 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
143 struct lwp *chklp, int worst);
144 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
145 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
146 static void dfly_changedcpu(struct lwp *lp);
148 struct usched usched_dfly = {
149 { NULL },
150 "dfly", "Original DragonFly Scheduler",
151 NULL, /* default registration */
152 NULL, /* default deregistration */
153 dfly_acquire_curproc,
154 dfly_release_curproc,
155 dfly_setrunqueue,
156 dfly_schedulerclock,
157 dfly_recalculate_estcpu,
158 dfly_resetpriority,
159 dfly_forking,
160 dfly_exiting,
161 dfly_uload_update,
162 NULL, /* setcpumask not supported */
163 dfly_yield,
164 dfly_changedcpu
168 * We have NQS (32) run queues per scheduling class. For the normal
169 * class, there are 128 priorities scaled onto these 32 queues. New
170 * processes are added to the last entry in each queue, and processes
171 * are selected for running by taking them from the head and maintaining
172 * a simple FIFO arrangement. Realtime and Idle priority processes have
173 * and explicit 0-31 priority which maps directly onto their class queue
174 * index. When a queue has something in it, the corresponding bit is
175 * set in the queuebits variable, allowing a single read to determine
176 * the state of all 32 queues and then a ffs() to find the first busy
177 * queue.
179 /* currently running a user process */
180 static cpumask_t dfly_curprocmask = CPUMASK_INITIALIZER_ALLONES;
181 static cpumask_t dfly_rdyprocmask; /* ready to accept a user process */
182 static volatile int dfly_ucount; /* total running on whole system */
183 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
184 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
185 static struct sysctl_oid *usched_dfly_sysctl_tree;
187 /* Debug info exposed through debug.* sysctl */
189 static int usched_dfly_debug = -1;
190 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
191 &usched_dfly_debug, 0,
192 "Print debug information for this pid");
194 static int usched_dfly_pid_debug = -1;
195 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
196 &usched_dfly_pid_debug, 0,
197 "Print KTR debug information for this pid");
199 static int usched_dfly_chooser = 0;
200 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
201 &usched_dfly_chooser, 0,
202 "Print KTR debug information for this pid");
205 * Tunning usched_dfly - configurable through kern.usched_dfly.
207 * weight1 - Tries to keep threads on their current cpu. If you
208 * make this value too large the scheduler will not be
209 * able to load-balance large loads.
211 * weight2 - If non-zero, detects thread pairs undergoing synchronous
212 * communications and tries to move them closer together.
213 * Behavior is adjusted by bit 4 of features (0x10).
215 * WARNING! Weight2 is a ridiculously sensitive parameter,
216 * a small value is recommended.
218 * weight3 - Weighting based on the number of recently runnable threads
219 * on the userland scheduling queue (ignoring their loads).
220 * A nominal value here prevents high-priority (low-load)
221 * threads from accumulating on one cpu core when other
222 * cores are available.
224 * This value should be left fairly small relative to weight1
225 * and weight4.
227 * weight4 - Weighting based on other cpu queues being available
228 * or running processes with higher lwp_priority's.
230 * This allows a thread to migrate to another nearby cpu if it
231 * is unable to run on the current cpu based on the other cpu
232 * being idle or running a lower priority (higher lwp_priority)
233 * thread. This value should be large enough to override weight1
235 * features - These flags can be set or cleared to enable or disable various
236 * features.
238 * 0x01 Enable idle-cpu pulling (default)
239 * 0x02 Enable proactive pushing (default)
240 * 0x04 Enable rebalancing rover (default)
241 * 0x08 Enable more proactive pushing (default)
242 * 0x10 (flip weight2 limit on same cpu) (default)
243 * 0x20 choose best cpu for forked process
244 * 0x40 choose current cpu for forked process
245 * 0x80 choose random cpu for forked process (default)
247 static int usched_dfly_smt = 0;
248 static int usched_dfly_cache_coherent = 0;
249 static int usched_dfly_weight1 = 200; /* keep thread on current cpu */
250 static int usched_dfly_weight2 = 180; /* synchronous peer's current cpu */
251 static int usched_dfly_weight3 = 40; /* number of threads on queue */
252 static int usched_dfly_weight4 = 160; /* availability of idle cores */
253 static int usched_dfly_features = 0x8F; /* allow pulls */
254 static int usched_dfly_fast_resched = 0;/* delta priority / resched */
255 static int usched_dfly_swmask = ~PPQMASK; /* allow pulls */
256 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
257 static int usched_dfly_decay = 8;
259 /* KTR debug printings */
261 KTR_INFO_MASTER(usched);
263 #if !defined(KTR_USCHED_DFLY)
264 #define KTR_USCHED_DFLY KTR_ALL
265 #endif
267 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
268 "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
269 pid_t pid, int old_cpuid, int curr);
272 * This function is called when the kernel intends to return to userland.
273 * It is responsible for making the thread the current designated userland
274 * thread for this cpu, blocking if necessary.
276 * The kernel will not depress our LWKT priority until after we return,
277 * in case we have to shove over to another cpu.
279 * We must determine our thread's disposition before we switch away. This
280 * is very sensitive code.
282 * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
283 * TO ANOTHER CPU! Because most of the kernel assumes that no migration will
284 * occur, this function is called only under very controlled circumstances.
286 static void
287 dfly_acquire_curproc(struct lwp *lp)
289 globaldata_t gd;
290 dfly_pcpu_t dd;
291 dfly_pcpu_t rdd;
292 thread_t td;
293 int force_resched;
296 * Make sure we aren't sitting on a tsleep queue.
298 td = lp->lwp_thread;
299 crit_enter_quick(td);
300 if (td->td_flags & TDF_TSLEEPQ)
301 tsleep_remove(td);
302 dfly_recalculate_estcpu(lp);
304 gd = mycpu;
305 dd = &dfly_pcpu[gd->gd_cpuid];
308 * Process any pending interrupts/ipi's, then handle reschedule
309 * requests. dfly_release_curproc() will try to assign a new
310 * uschedcp that isn't us and otherwise NULL it out.
312 force_resched = 0;
313 if ((td->td_mpflags & TDF_MP_BATCH_DEMARC) &&
314 lp->lwp_rrcount >= usched_dfly_rrinterval / 2) {
315 force_resched = 1;
318 if (user_resched_wanted()) {
319 if (dd->uschedcp == lp)
320 force_resched = 1;
321 clear_user_resched();
322 dfly_release_curproc(lp);
326 * Loop until we are the current user thread.
328 * NOTE: dd spinlock not held at top of loop.
330 if (dd->uschedcp == lp)
331 lwkt_yield_quick();
333 while (dd->uschedcp != lp) {
334 lwkt_yield_quick();
336 spin_lock(&dd->spin);
338 /* This lwp is an outcast; force reschedule. */
339 if (__predict_false(
340 CPUMASK_TESTBIT(lp->lwp_cpumask, gd->gd_cpuid) == 0) &&
341 (rdd = dfly_choose_best_queue(lp)) != dd) {
342 dfly_changeqcpu_locked(lp, dd, rdd);
343 spin_unlock(&dd->spin);
344 lwkt_deschedule(lp->lwp_thread);
345 dfly_setrunqueue_dd(rdd, lp);
346 lwkt_switch();
347 gd = mycpu;
348 dd = &dfly_pcpu[gd->gd_cpuid];
349 continue;
352 if (force_resched &&
353 (usched_dfly_features & 0x08) &&
354 (rdd = dfly_choose_best_queue(lp)) != dd) {
356 * We are not or are no longer the current lwp and a
357 * forced reschedule was requested. Figure out the
358 * best cpu to run on (our current cpu will be given
359 * significant weight).
361 * (if a reschedule was not requested we want to
362 * move this step after the uschedcp tests).
364 dfly_changeqcpu_locked(lp, dd, rdd);
365 spin_unlock(&dd->spin);
366 lwkt_deschedule(lp->lwp_thread);
367 dfly_setrunqueue_dd(rdd, lp);
368 lwkt_switch();
369 gd = mycpu;
370 dd = &dfly_pcpu[gd->gd_cpuid];
371 continue;
375 * Either no reschedule was requested or the best queue was
376 * dd, and no current process has been selected. We can
377 * trivially become the current lwp on the current cpu.
379 if (dd->uschedcp == NULL) {
380 atomic_clear_int(&lp->lwp_thread->td_mpflags,
381 TDF_MP_DIDYIELD);
382 ATOMIC_CPUMASK_ORBIT(dfly_curprocmask, gd->gd_cpuid);
383 dd->uschedcp = lp;
384 dd->upri = lp->lwp_priority;
385 KKASSERT(lp->lwp_qcpu == dd->cpuid);
386 spin_unlock(&dd->spin);
387 break;
391 * Put us back on the same run queue unconditionally.
393 * Set rrinterval to force placement at end of queue.
394 * Select the worst queue to ensure we round-robin,
395 * but do not change estcpu.
397 if (lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) {
398 u_int32_t tsqbits;
400 switch(lp->lwp_rqtype) {
401 case RTP_PRIO_NORMAL:
402 tsqbits = dd->queuebits;
403 spin_unlock(&dd->spin);
405 lp->lwp_rrcount = usched_dfly_rrinterval;
406 if (tsqbits)
407 lp->lwp_rqindex = bsrl(tsqbits);
408 break;
409 default:
410 spin_unlock(&dd->spin);
411 break;
413 lwkt_deschedule(lp->lwp_thread);
414 dfly_setrunqueue_dd(dd, lp);
415 atomic_clear_int(&lp->lwp_thread->td_mpflags,
416 TDF_MP_DIDYIELD);
417 lwkt_switch();
418 gd = mycpu;
419 dd = &dfly_pcpu[gd->gd_cpuid];
420 continue;
424 * Can we steal the current designated user thread?
426 * If we do the other thread will stall when it tries to
427 * return to userland, possibly rescheduling elsewhere.
429 * It is important to do a masked test to avoid the edge
430 * case where two near-equal-priority threads are constantly
431 * interrupting each other.
433 * In the exact match case another thread has already gained
434 * uschedcp and lowered its priority, if we steal it the
435 * other thread will stay stuck on the LWKT runq and not
436 * push to another cpu. So don't steal on equal-priority even
437 * though it might appear to be more beneficial due to not
438 * having to switch back to the other thread's context.
440 * usched_dfly_fast_resched requires that two threads be
441 * significantly far apart in priority in order to interrupt.
443 * If better but not sufficiently far apart, the current
444 * uschedcp will be interrupted at the next scheduler clock.
446 if (dd->uschedcp &&
447 (dd->upri & ~PPQMASK) >
448 (lp->lwp_priority & ~PPQMASK) + usched_dfly_fast_resched) {
449 dd->uschedcp = lp;
450 dd->upri = lp->lwp_priority;
451 KKASSERT(lp->lwp_qcpu == dd->cpuid);
452 spin_unlock(&dd->spin);
453 break;
456 * We are not the current lwp, figure out the best cpu
457 * to run on (our current cpu will be given significant
458 * weight). Loop on cpu change.
460 if ((usched_dfly_features & 0x02) &&
461 force_resched == 0 &&
462 (rdd = dfly_choose_best_queue(lp)) != dd) {
463 dfly_changeqcpu_locked(lp, dd, rdd);
464 spin_unlock(&dd->spin);
465 lwkt_deschedule(lp->lwp_thread);
466 dfly_setrunqueue_dd(rdd, lp);
467 lwkt_switch();
468 gd = mycpu;
469 dd = &dfly_pcpu[gd->gd_cpuid];
470 continue;
474 * We cannot become the current lwp, place the lp on the
475 * run-queue of this or another cpu and deschedule ourselves.
477 * When we are reactivated we will have another chance.
479 * Reload after a switch or setrunqueue/switch possibly
480 * moved us to another cpu.
482 spin_unlock(&dd->spin);
483 lwkt_deschedule(lp->lwp_thread);
484 dfly_setrunqueue_dd(dd, lp);
485 lwkt_switch();
486 gd = mycpu;
487 dd = &dfly_pcpu[gd->gd_cpuid];
491 * Make sure upri is synchronized, then yield to LWKT threads as
492 * needed before returning. This could result in another reschedule.
493 * XXX
495 crit_exit_quick(td);
497 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
501 * DFLY_RELEASE_CURPROC
503 * This routine detaches the current thread from the userland scheduler,
504 * usually because the thread needs to run or block in the kernel (at
505 * kernel priority) for a while.
507 * This routine is also responsible for selecting a new thread to
508 * make the current thread.
510 * NOTE: This implementation differs from the dummy example in that
511 * dfly_select_curproc() is able to select the current process, whereas
512 * dummy_select_curproc() is not able to select the current process.
513 * This means we have to NULL out uschedcp.
515 * Additionally, note that we may already be on a run queue if releasing
516 * via the lwkt_switch() in dfly_setrunqueue().
518 static void
519 dfly_release_curproc(struct lwp *lp)
521 globaldata_t gd = mycpu;
522 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
525 * Make sure td_wakefromcpu is defaulted. This will be overwritten
526 * by wakeup().
528 if (dd->uschedcp == lp) {
529 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
530 spin_lock(&dd->spin);
531 if (dd->uschedcp == lp) {
532 dd->uschedcp = NULL; /* don't let lp be selected */
533 dd->upri = PRIBASE_NULL;
534 ATOMIC_CPUMASK_NANDBIT(dfly_curprocmask, gd->gd_cpuid);
535 spin_unlock(&dd->spin);
536 dfly_select_curproc(gd);
537 } else {
538 spin_unlock(&dd->spin);
544 * DFLY_SELECT_CURPROC
546 * Select a new current process for this cpu and clear any pending user
547 * reschedule request. The cpu currently has no current process.
549 * This routine is also responsible for equal-priority round-robining,
550 * typically triggered from dfly_schedulerclock(). In our dummy example
551 * all the 'user' threads are LWKT scheduled all at once and we just
552 * call lwkt_switch().
554 * The calling process is not on the queue and cannot be selected.
556 static
557 void
558 dfly_select_curproc(globaldata_t gd)
560 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
561 struct lwp *nlp;
562 int cpuid = gd->gd_cpuid;
564 crit_enter_gd(gd);
566 spin_lock(&dd->spin);
567 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
569 if (nlp) {
570 ATOMIC_CPUMASK_ORBIT(dfly_curprocmask, cpuid);
571 dd->upri = nlp->lwp_priority;
572 dd->uschedcp = nlp;
573 #if 0
574 dd->rrcount = 0; /* reset round robin */
575 #endif
576 spin_unlock(&dd->spin);
577 lwkt_acquire(nlp->lwp_thread);
578 lwkt_schedule(nlp->lwp_thread);
579 } else {
580 spin_unlock(&dd->spin);
582 crit_exit_gd(gd);
586 * Place the specified lwp on the user scheduler's run queue. This routine
587 * must be called with the thread descheduled. The lwp must be runnable.
588 * It must not be possible for anyone else to explicitly schedule this thread.
590 * The thread may be the current thread as a special case.
592 static void
593 dfly_setrunqueue(struct lwp *lp)
595 dfly_pcpu_t dd;
596 dfly_pcpu_t rdd;
599 * First validate the process LWKT state.
601 KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
602 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
603 ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
604 lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
605 KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
608 * NOTE: dd/rdd do not necessarily represent the current cpu.
609 * Instead they may represent the cpu the thread was last
610 * scheduled on or inherited by its parent.
612 dd = &dfly_pcpu[lp->lwp_qcpu];
613 rdd = dd;
616 * This process is not supposed to be scheduled anywhere or assigned
617 * as the current process anywhere. Assert the condition.
619 KKASSERT(rdd->uschedcp != lp);
622 * Ok, we have to setrunqueue some target cpu and request a reschedule
623 * if necessary.
625 * We have to choose the best target cpu. It might not be the current
626 * target even if the current cpu has no running user thread (for
627 * example, because the current cpu might be a hyperthread and its
628 * sibling has a thread assigned).
630 * If we just forked it is most optimal to run the child on the same
631 * cpu just in case the parent decides to wait for it (thus getting
632 * off that cpu). As long as there is nothing else runnable on the
633 * cpu, that is. If we did this unconditionally a parent forking
634 * multiple children before waiting (e.g. make -j N) leaves other
635 * cpus idle that could be working.
637 if (lp->lwp_forked) {
638 lp->lwp_forked = 0;
639 if (usched_dfly_features & 0x20)
640 rdd = dfly_choose_best_queue(lp);
641 else if (usched_dfly_features & 0x40)
642 rdd = &dfly_pcpu[lp->lwp_qcpu];
643 else if (usched_dfly_features & 0x80)
644 rdd = dfly_choose_queue_simple(rdd, lp);
645 else if (dfly_pcpu[lp->lwp_qcpu].runqcount)
646 rdd = dfly_choose_best_queue(lp);
647 else
648 rdd = &dfly_pcpu[lp->lwp_qcpu];
649 } else {
650 rdd = dfly_choose_best_queue(lp);
651 /* rdd = &dfly_pcpu[lp->lwp_qcpu]; */
653 if (lp->lwp_qcpu != rdd->cpuid) {
654 spin_lock(&dd->spin);
655 dfly_changeqcpu_locked(lp, dd, rdd);
656 spin_unlock(&dd->spin);
658 dfly_setrunqueue_dd(rdd, lp);
662 * Change qcpu to rdd->cpuid. The dd the lp is CURRENTLY on must be
663 * spin-locked on-call. rdd does not have to be.
665 static void
666 dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd)
668 if (lp->lwp_qcpu != rdd->cpuid) {
669 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
670 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
671 atomic_add_int(&dd->uload, -lp->lwp_uload);
672 atomic_add_int(&dd->ucount, -1);
673 atomic_add_int(&dfly_ucount, -1);
675 lp->lwp_qcpu = rdd->cpuid;
680 * Place lp on rdd's runqueue. Nothing is locked on call. This function
681 * also performs all necessary ancillary notification actions.
683 static void
684 dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp)
686 globaldata_t rgd;
689 * We might be moving the lp to another cpu's run queue, and once
690 * on the runqueue (even if it is our cpu's), another cpu can rip
691 * it away from us.
693 * TDF_MIGRATING might already be set if this is part of a
694 * remrunqueue+setrunqueue sequence.
696 if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
697 lwkt_giveaway(lp->lwp_thread);
699 rgd = globaldata_find(rdd->cpuid);
702 * We lose control of the lp the moment we release the spinlock
703 * after having placed it on the queue. i.e. another cpu could pick
704 * it up, or it could exit, or its priority could be further
705 * adjusted, or something like that.
707 * WARNING! rdd can point to a foreign cpu!
709 spin_lock(&rdd->spin);
710 dfly_setrunqueue_locked(rdd, lp);
713 * Potentially interrupt the currently-running thread
715 if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK)) {
717 * Currently running thread is better or same, do not
718 * interrupt.
720 spin_unlock(&rdd->spin);
721 } else if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK) +
722 usched_dfly_fast_resched) {
724 * Currently running thread is not better, but not so bad
725 * that we need to interrupt it. Let it run for one more
726 * scheduler tick.
728 if (rdd->uschedcp &&
729 rdd->uschedcp->lwp_rrcount < usched_dfly_rrinterval) {
730 rdd->uschedcp->lwp_rrcount = usched_dfly_rrinterval - 1;
732 spin_unlock(&rdd->spin);
733 } else if (rgd == mycpu) {
735 * We should interrupt the currently running thread, which
736 * is on the current cpu. However, if DIDYIELD is set we
737 * round-robin unconditionally and do not interrupt it.
739 spin_unlock(&rdd->spin);
740 if (rdd->uschedcp == NULL)
741 wakeup_mycpu(rdd->helper_thread); /* XXX */
742 if ((lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) == 0)
743 need_user_resched();
744 } else {
746 * We should interrupt the currently running thread, which
747 * is on a different cpu.
749 spin_unlock(&rdd->spin);
750 lwkt_send_ipiq(rgd, dfly_need_user_resched_remote, NULL);
755 * This routine is called from a systimer IPI. It MUST be MP-safe and
756 * the BGL IS NOT HELD ON ENTRY. This routine is called at ESTCPUFREQ on
757 * each cpu.
759 static
760 void
761 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
763 globaldata_t gd = mycpu;
764 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
767 * Spinlocks also hold a critical section so there should not be
768 * any active.
770 KKASSERT(gd->gd_spinlocks == 0 || dumping);
773 * If lp is NULL we might be contended and lwkt_switch() may have
774 * cycled into the idle thread. Apply the tick to the current
775 * process on this cpu if it is contended.
777 if (gd->gd_curthread == &gd->gd_idlethread) {
778 lp = dd->uschedcp;
779 if (lp && (lp->lwp_thread == NULL ||
780 lp->lwp_thread->td_contended == 0)) {
781 lp = NULL;
786 * Dock thread for tick
788 if (lp) {
790 * Do we need to round-robin? We round-robin 10 times a
791 * second. This should only occur for cpu-bound batch
792 * processes.
794 if (++lp->lwp_rrcount >= usched_dfly_rrinterval) {
795 lp->lwp_thread->td_wakefromcpu = -1;
796 need_user_resched();
800 * Adjust estcpu upward using a real time equivalent
801 * calculation, and recalculate lp's priority.
803 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu +
804 ESTCPUMAX / ESTCPUFREQ + 1);
805 dfly_resetpriority(lp);
809 * Rebalance two cpus every 8 ticks, pulling the worst thread
810 * from the worst cpu's queue into a rotating cpu number.
812 * This mechanic is needed because the push algorithms can
813 * steady-state in an non-optimal configuration. We need to mix it
814 * up a little, even if it means breaking up a paired thread, so
815 * the push algorithms can rebalance the degenerate conditions.
816 * This portion of the algorithm exists to ensure stability at the
817 * selected weightings.
819 * Because we might be breaking up optimal conditions we do not want
820 * to execute this too quickly, hence we only rebalance approximately
821 * ~7-8 times per second. The push's, on the otherhand, are capable
822 * moving threads to other cpus at a much higher rate.
824 * We choose the most heavily loaded thread from the worst queue
825 * in order to ensure that multiple heavy-weight threads on the same
826 * queue get broken up, and also because these threads are the most
827 * likely to be able to remain in place. Hopefully then any pairings,
828 * if applicable, migrate to where these threads are.
830 if ((usched_dfly_features & 0x04) &&
831 ((u_int)sched_ticks & 7) == 0 &&
832 (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) {
834 * Our cpu is up.
836 struct lwp *nlp;
837 dfly_pcpu_t rdd;
839 rdd = dfly_choose_worst_queue(dd);
840 if (rdd) {
841 spin_lock(&dd->spin);
842 if (spin_trylock(&rdd->spin)) {
843 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
844 spin_unlock(&rdd->spin);
845 if (nlp == NULL)
846 spin_unlock(&dd->spin);
847 } else {
848 spin_unlock(&dd->spin);
849 nlp = NULL;
851 } else {
852 nlp = NULL;
854 /* dd->spin held if nlp != NULL */
857 * Either schedule it or add it to our queue.
859 if (nlp &&
860 (nlp->lwp_priority & ~PPQMASK) < (dd->upri & ~PPQMASK)) {
861 ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, dd->cpumask);
862 dd->upri = nlp->lwp_priority;
863 dd->uschedcp = nlp;
864 #if 0
865 dd->rrcount = 0; /* reset round robin */
866 #endif
867 spin_unlock(&dd->spin);
868 lwkt_acquire(nlp->lwp_thread);
869 lwkt_schedule(nlp->lwp_thread);
870 } else if (nlp) {
871 dfly_setrunqueue_locked(dd, nlp);
872 spin_unlock(&dd->spin);
878 * Called from acquire and from kern_synch's one-second timer (one of the
879 * callout helper threads) with a critical section held.
881 * Adjust p_estcpu based on our single-cpu load, p_nice, and compensate for
882 * overall system load.
884 * Note that no recalculation occurs for a process which sleeps and wakes
885 * up in the same tick. That is, a system doing thousands of context
886 * switches per second will still only do serious estcpu calculations
887 * ESTCPUFREQ times per second.
889 static
890 void
891 dfly_recalculate_estcpu(struct lwp *lp)
893 globaldata_t gd = mycpu;
894 sysclock_t cpbase;
895 sysclock_t ttlticks;
896 int estcpu;
897 int decay_factor;
898 int ucount;
901 * We have to subtract periodic to get the last schedclock
902 * timeout time, otherwise we would get the upcoming timeout.
903 * Keep in mind that a process can migrate between cpus and
904 * while the scheduler clock should be very close, boundary
905 * conditions could lead to a small negative delta.
907 cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
909 if (lp->lwp_slptime > 1) {
911 * Too much time has passed, do a coarse correction.
913 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
914 dfly_resetpriority(lp);
915 lp->lwp_cpbase = cpbase;
916 lp->lwp_cpticks = 0;
917 lp->lwp_estfast = 0;
918 } else if (lp->lwp_cpbase != cpbase) {
920 * Adjust estcpu if we are in a different tick. Don't waste
921 * time if we are in the same tick.
923 * First calculate the number of ticks in the measurement
924 * interval. The ttlticks calculation can wind up 0 due to
925 * a bug in the handling of lwp_slptime (as yet not found),
926 * so make sure we do not get a divide by 0 panic.
928 ttlticks = (cpbase - lp->lwp_cpbase) /
929 gd->gd_schedclock.periodic;
930 if ((ssysclock_t)ttlticks < 0) {
931 ttlticks = 0;
932 lp->lwp_cpbase = cpbase;
934 if (ttlticks == 0)
935 return;
936 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
939 * Calculate the percentage of one cpu being used then
940 * compensate for any system load in excess of ncpus.
942 * For example, if we have 8 cores and 16 running cpu-bound
943 * processes then all things being equal each process will
944 * get 50% of one cpu. We need to pump this value back
945 * up to 100% so the estcpu calculation properly adjusts
946 * the process's dynamic priority.
948 * estcpu is scaled by ESTCPUMAX, pctcpu is scaled by FSCALE.
950 estcpu = (lp->lwp_pctcpu * ESTCPUMAX) >> FSHIFT;
951 ucount = dfly_ucount;
952 if (ucount > ncpus) {
953 estcpu += estcpu * (ucount - ncpus) / ncpus;
956 if (usched_dfly_debug == lp->lwp_proc->p_pid) {
957 kprintf("pid %d lwp %p estcpu %3d %3d cp %d/%d",
958 lp->lwp_proc->p_pid, lp,
959 estcpu, lp->lwp_estcpu,
960 lp->lwp_cpticks, ttlticks);
964 * Adjust lp->lwp_esetcpu. The decay factor determines how
965 * quickly lwp_estcpu collapses to its realtime calculation.
966 * A slower collapse gives us a more accurate number over
967 * the long term but can create problems with bursty threads
968 * or threads which become cpu hogs.
970 * To solve this problem, newly started lwps and lwps which
971 * are restarting after having been asleep for a while are
972 * given a much, much faster decay in order to quickly
973 * detect whether they become cpu-bound.
975 * NOTE: p_nice is accounted for in dfly_resetpriority(),
976 * and not here, but we must still ensure that a
977 * cpu-bound nice -20 process does not completely
978 * override a cpu-bound nice +20 process.
980 * NOTE: We must use ESTCPULIM() here to deal with any
981 * overshoot.
983 decay_factor = usched_dfly_decay;
984 if (decay_factor < 1)
985 decay_factor = 1;
986 if (decay_factor > 1024)
987 decay_factor = 1024;
989 if (lp->lwp_estfast < usched_dfly_decay) {
990 ++lp->lwp_estfast;
991 lp->lwp_estcpu = ESTCPULIM(
992 (lp->lwp_estcpu * lp->lwp_estfast + estcpu) /
993 (lp->lwp_estfast + 1));
994 } else {
995 lp->lwp_estcpu = ESTCPULIM(
996 (lp->lwp_estcpu * decay_factor + estcpu) /
997 (decay_factor + 1));
1000 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1001 kprintf(" finalestcpu %d\n", lp->lwp_estcpu);
1002 dfly_resetpriority(lp);
1003 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
1004 lp->lwp_cpticks = 0;
1009 * Compute the priority of a process when running in user mode.
1010 * Arrange to reschedule if the resulting priority is better
1011 * than that of the current process.
1013 * This routine may be called with any process.
1015 * This routine is called by fork1() for initial setup with the process of
1016 * the run queue, and also may be called normally with the process on or
1017 * off the run queue.
1019 static void
1020 dfly_resetpriority(struct lwp *lp)
1022 dfly_pcpu_t rdd;
1023 int newpriority;
1024 u_short newrqtype;
1025 int rcpu;
1026 int checkpri;
1027 int estcpu;
1028 int delta_uload;
1030 crit_enter();
1033 * Lock the scheduler (lp) belongs to. This can be on a different
1034 * cpu. Handle races. This loop breaks out with the appropriate
1035 * rdd locked.
1037 for (;;) {
1038 rcpu = lp->lwp_qcpu;
1039 cpu_ccfence();
1040 rdd = &dfly_pcpu[rcpu];
1041 spin_lock(&rdd->spin);
1042 if (rcpu == lp->lwp_qcpu)
1043 break;
1044 spin_unlock(&rdd->spin);
1048 * Calculate the new priority and queue type
1050 newrqtype = lp->lwp_rtprio.type;
1052 switch(newrqtype) {
1053 case RTP_PRIO_REALTIME:
1054 case RTP_PRIO_FIFO:
1055 newpriority = PRIBASE_REALTIME +
1056 (lp->lwp_rtprio.prio & PRIMASK);
1057 break;
1058 case RTP_PRIO_NORMAL:
1062 estcpu = lp->lwp_estcpu;
1065 * p_nice piece Adds (0-40) * 2 0-80
1066 * estcpu Adds 16384 * 4 / 512 0-128
1068 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ;
1069 newpriority += estcpu * PPQ / ESTCPUPPQ;
1070 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ /
1071 NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ);
1072 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK);
1073 break;
1074 case RTP_PRIO_IDLE:
1075 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1076 break;
1077 case RTP_PRIO_THREAD:
1078 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1079 break;
1080 default:
1081 panic("Bad RTP_PRIO %d", newrqtype);
1082 /* NOT REACHED */
1086 * The LWKT scheduler doesn't dive usched structures, give it a hint
1087 * on the relative priority of user threads running in the kernel.
1088 * The LWKT scheduler will always ensure that a user thread running
1089 * in the kernel will get cpu some time, regardless of its upri,
1090 * but can decide not to instantly switch from one kernel or user
1091 * mode user thread to a kernel-mode user thread when it has a less
1092 * desireable user priority.
1094 * td_upri has normal sense (higher values are more desireable), so
1095 * negate it.
1097 lp->lwp_thread->td_upri = -(newpriority & usched_dfly_swmask);
1100 * The newpriority incorporates the queue type so do a simple masked
1101 * check to determine if the process has moved to another queue. If
1102 * it has, and it is currently on a run queue, then move it.
1104 * Since uload is ~PPQMASK masked, no modifications are necessary if
1105 * we end up in the same run queue.
1107 * Reset rrcount if moving to a higher-priority queue, otherwise
1108 * retain rrcount.
1110 if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1111 if (lp->lwp_priority < newpriority)
1112 lp->lwp_rrcount = 0;
1113 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1114 dfly_remrunqueue_locked(rdd, lp);
1115 lp->lwp_priority = newpriority;
1116 lp->lwp_rqtype = newrqtype;
1117 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1118 dfly_setrunqueue_locked(rdd, lp);
1119 checkpri = 1;
1120 } else {
1121 lp->lwp_priority = newpriority;
1122 lp->lwp_rqtype = newrqtype;
1123 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1124 checkpri = 0;
1126 } else {
1128 * In the same PPQ, uload cannot change.
1130 lp->lwp_priority = newpriority;
1131 checkpri = 1;
1132 rcpu = -1;
1136 * Adjust effective load.
1138 * Calculate load then scale up or down geometrically based on p_nice.
1139 * Processes niced up (positive) are less important, and processes
1140 * niced downard (negative) are more important. The higher the uload,
1141 * the more important the thread.
1143 /* 0-511, 0-100% cpu */
1144 delta_uload = lp->lwp_estcpu / NQS;
1145 delta_uload -= delta_uload * lp->lwp_proc->p_nice / (PRIO_MAX + 1);
1148 delta_uload -= lp->lwp_uload;
1149 lp->lwp_uload += delta_uload;
1150 if (lp->lwp_mpflags & LWP_MP_ULOAD)
1151 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, delta_uload);
1154 * Determine if we need to reschedule the target cpu. This only
1155 * occurs if the LWP is already on a scheduler queue, which means
1156 * that idle cpu notification has already occured. At most we
1157 * need only issue a need_user_resched() on the appropriate cpu.
1159 * The LWP may be owned by a CPU different from the current one,
1160 * in which case dd->uschedcp may be modified without an MP lock
1161 * or a spinlock held. The worst that happens is that the code
1162 * below causes a spurious need_user_resched() on the target CPU
1163 * and dd->pri to be wrong for a short period of time, both of
1164 * which are harmless.
1166 * If checkpri is 0 we are adjusting the priority of the current
1167 * process, possibly higher (less desireable), so ignore the upri
1168 * check which will fail in that case.
1170 if (rcpu >= 0) {
1171 if (CPUMASK_TESTBIT(dfly_rdyprocmask, rcpu) &&
1172 (checkpri == 0 ||
1173 (rdd->upri & ~PRIMASK) >
1174 (lp->lwp_priority & ~PRIMASK))) {
1175 if (rcpu == mycpu->gd_cpuid) {
1176 spin_unlock(&rdd->spin);
1177 need_user_resched();
1178 } else {
1179 spin_unlock(&rdd->spin);
1180 lwkt_send_ipiq(globaldata_find(rcpu),
1181 dfly_need_user_resched_remote,
1182 NULL);
1184 } else {
1185 spin_unlock(&rdd->spin);
1187 } else {
1188 spin_unlock(&rdd->spin);
1190 crit_exit();
1193 static
1194 void
1195 dfly_yield(struct lwp *lp)
1197 if (lp->lwp_qcpu != mycpu->gd_cpuid)
1198 return;
1199 KKASSERT(lp == curthread->td_lwp);
1202 * Don't set need_user_resched() or mess with rrcount or anything.
1203 * the TDF flag will override everything as long as we release.
1205 atomic_set_int(&lp->lwp_thread->td_mpflags, TDF_MP_DIDYIELD);
1206 dfly_release_curproc(lp);
1210 * Thread was forcefully migrated to another cpu. Normally forced migrations
1211 * are used for iterations and the kernel returns to the original cpu before
1212 * returning and this is not needed. However, if the kernel migrates a
1213 * thread to another cpu and wants to leave it there, it has to call this
1214 * scheduler helper.
1216 * Note that the lwkt_migratecpu() function also released the thread, so
1217 * we don't have to worry about that.
1219 static
1220 void
1221 dfly_changedcpu(struct lwp *lp)
1223 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1224 dfly_pcpu_t rdd = &dfly_pcpu[mycpu->gd_cpuid];
1226 if (dd != rdd) {
1227 spin_lock(&dd->spin);
1228 dfly_changeqcpu_locked(lp, dd, rdd);
1229 spin_unlock(&dd->spin);
1234 * Called from fork1() when a new child process is being created.
1236 * Give the child process an initial estcpu that is more batch then
1237 * its parent and dock the parent for the fork (but do not
1238 * reschedule the parent).
1240 * fast
1242 * XXX lwp should be "spawning" instead of "forking"
1244 static void
1245 dfly_forking(struct lwp *plp, struct lwp *lp)
1248 * Put the child 4 queue slots (out of 32) higher than the parent
1249 * (less desireable than the parent).
1251 lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ * 4);
1252 lp->lwp_forked = 1;
1253 lp->lwp_estfast = 0;
1256 * Even though the lp will be scheduled specially the first time
1257 * due to lp->lwp_forked, it is important to initialize lwp_qcpu
1258 * to avoid favoring a fixed cpu.
1260 #if 0
1261 static uint16_t save_cpu;
1262 lp->lwp_qcpu = ++save_cpu % ncpus;
1263 #else
1264 lp->lwp_qcpu = plp->lwp_qcpu;
1265 #endif
1268 * Dock the parent a cost for the fork, protecting us from fork
1269 * bombs. If the parent is forking quickly make the child more
1270 * batchy.
1272 plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ / 16);
1276 * Called when a lwp is being removed from this scheduler, typically
1277 * during lwp_exit(). We have to clean out any ULOAD accounting before
1278 * we can let the lp go. The dd->spin lock is not needed for uload
1279 * updates.
1281 * Scheduler dequeueing has already occurred, no further action in that
1282 * regard is needed.
1284 static void
1285 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1287 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1289 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1290 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1291 atomic_add_int(&dd->uload, -lp->lwp_uload);
1292 atomic_add_int(&dd->ucount, -1);
1293 atomic_add_int(&dfly_ucount, -1);
1298 * This function cannot block in any way, but spinlocks are ok.
1300 * Update the uload based on the state of the thread (whether it is going
1301 * to sleep or running again). The uload is meant to be a longer-term
1302 * load and not an instantanious load.
1304 static void
1305 dfly_uload_update(struct lwp *lp)
1307 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1309 if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1310 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1311 spin_lock(&dd->spin);
1312 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1313 atomic_set_int(&lp->lwp_mpflags,
1314 LWP_MP_ULOAD);
1315 atomic_add_int(&dd->uload, lp->lwp_uload);
1316 atomic_add_int(&dd->ucount, 1);
1317 atomic_add_int(&dfly_ucount, 1);
1319 spin_unlock(&dd->spin);
1321 } else if (lp->lwp_slptime > 0) {
1322 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1323 spin_lock(&dd->spin);
1324 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1325 atomic_clear_int(&lp->lwp_mpflags,
1326 LWP_MP_ULOAD);
1327 atomic_add_int(&dd->uload, -lp->lwp_uload);
1328 atomic_add_int(&dd->ucount, -1);
1329 atomic_add_int(&dfly_ucount, -1);
1331 spin_unlock(&dd->spin);
1337 * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1338 * it selects a user process and returns it. If chklp is non-NULL and chklp
1339 * has a better or equal priority then the process that would otherwise be
1340 * chosen, NULL is returned.
1342 * Until we fix the RUNQ code the chklp test has to be strict or we may
1343 * bounce between processes trying to acquire the current process designation.
1345 * Must be called with rdd->spin locked. The spinlock is left intact through
1346 * the entire routine. dd->spin does not have to be locked.
1348 * If worst is non-zero this function finds the worst thread instead of the
1349 * best thread (used by the schedulerclock-based rover).
1351 static
1352 struct lwp *
1353 dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
1354 struct lwp *chklp, int worst)
1356 struct lwp *lp;
1357 struct rq *q;
1358 u_int32_t *which;
1359 u_int32_t pri;
1360 u_int32_t rtqbits;
1361 u_int32_t tsqbits;
1362 u_int32_t idqbits;
1364 rtqbits = rdd->rtqueuebits;
1365 tsqbits = rdd->queuebits;
1366 idqbits = rdd->idqueuebits;
1368 if (worst) {
1369 if (idqbits) {
1370 pri = bsrl(idqbits);
1371 q = &rdd->idqueues[pri];
1372 which = &rdd->idqueuebits;
1373 } else if (tsqbits) {
1374 pri = bsrl(tsqbits);
1375 q = &rdd->queues[pri];
1376 which = &rdd->queuebits;
1377 } else if (rtqbits) {
1378 pri = bsrl(rtqbits);
1379 q = &rdd->rtqueues[pri];
1380 which = &rdd->rtqueuebits;
1381 } else {
1382 return (NULL);
1384 lp = TAILQ_LAST(q, rq);
1385 } else {
1386 if (rtqbits) {
1387 pri = bsfl(rtqbits);
1388 q = &rdd->rtqueues[pri];
1389 which = &rdd->rtqueuebits;
1390 } else if (tsqbits) {
1391 pri = bsfl(tsqbits);
1392 q = &rdd->queues[pri];
1393 which = &rdd->queuebits;
1394 } else if (idqbits) {
1395 pri = bsfl(idqbits);
1396 q = &rdd->idqueues[pri];
1397 which = &rdd->idqueuebits;
1398 } else {
1399 return (NULL);
1401 lp = TAILQ_FIRST(q);
1403 KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1406 * If the passed lwp <chklp> is reasonably close to the selected
1407 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1409 * Note that we must error on the side of <chklp> to avoid bouncing
1410 * between threads in the acquire code.
1412 if (chklp) {
1413 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1414 return(NULL);
1417 KTR_COND_LOG(usched_chooseproc,
1418 lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1419 lp->lwp_proc->p_pid,
1420 lp->lwp_thread->td_gd->gd_cpuid,
1421 mycpu->gd_cpuid);
1423 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1424 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1425 TAILQ_REMOVE(q, lp, lwp_procq);
1426 --rdd->runqcount;
1427 if (TAILQ_EMPTY(q))
1428 *which &= ~(1 << pri);
1431 * If we are choosing a process from rdd with the intent to
1432 * move it to dd, lwp_qcpu must be adjusted while rdd's spinlock
1433 * is still held.
1435 if (rdd != dd) {
1436 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1437 atomic_add_int(&rdd->uload, -lp->lwp_uload);
1438 atomic_add_int(&rdd->ucount, -1);
1439 atomic_add_int(&dfly_ucount, -1);
1441 lp->lwp_qcpu = dd->cpuid;
1442 atomic_add_int(&dd->uload, lp->lwp_uload);
1443 atomic_add_int(&dd->ucount, 1);
1444 atomic_add_int(&dfly_ucount, 1);
1445 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1447 return lp;
1451 * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1453 * Choose a cpu node to schedule lp on, hopefully nearby its current
1454 * node.
1456 * We give the current node a modest advantage for obvious reasons.
1458 * We also give the node the thread was woken up FROM a slight advantage
1459 * in order to try to schedule paired threads which synchronize/block waiting
1460 * for each other fairly close to each other. Similarly in a network setting
1461 * this feature will also attempt to place a user process near the kernel
1462 * protocol thread that is feeding it data. THIS IS A CRITICAL PART of the
1463 * algorithm as it heuristically groups synchronizing processes for locality
1464 * of reference in multi-socket systems.
1466 * We check against running processes and give a big advantage if there
1467 * are none running.
1469 * The caller will normally dfly_setrunqueue() lp on the returned queue.
1471 * When the topology is known choose a cpu whos group has, in aggregate,
1472 * has the lowest weighted load.
1474 static
1475 dfly_pcpu_t
1476 dfly_choose_best_queue(struct lwp *lp)
1478 cpumask_t wakemask;
1479 cpumask_t mask;
1480 cpu_node_t *cpup;
1481 cpu_node_t *cpun;
1482 cpu_node_t *cpub;
1483 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1484 dfly_pcpu_t rdd;
1485 int wakecpu;
1486 int cpuid;
1487 int n;
1488 int count;
1489 int load;
1490 int lowest_load;
1493 * When the topology is unknown choose a random cpu that is hopefully
1494 * idle.
1496 if (dd->cpunode == NULL)
1497 return (dfly_choose_queue_simple(dd, lp));
1500 * Pairing mask
1502 if ((wakecpu = lp->lwp_thread->td_wakefromcpu) >= 0)
1503 wakemask = dfly_pcpu[wakecpu].cpumask;
1504 else
1505 CPUMASK_ASSZERO(wakemask);
1508 * When the topology is known choose a cpu whos group has, in
1509 * aggregate, has the lowest weighted load.
1511 cpup = root_cpu_node;
1512 rdd = dd;
1514 while (cpup) {
1516 * Degenerate case super-root
1518 if (cpup->child_no == 1) {
1519 cpup = cpup->child_node[0];
1520 continue;
1524 * Terminal cpunode
1526 if (cpup->child_no == 0) {
1527 rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1528 break;
1531 cpub = NULL;
1532 lowest_load = 0x7FFFFFFF;
1534 for (n = 0; n < cpup->child_no; ++n) {
1536 * Accumulate load information for all cpus
1537 * which are members of this node.
1539 cpun = cpup->child_node[n];
1540 mask = cpun->members;
1541 CPUMASK_ANDMASK(mask, usched_global_cpumask);
1542 CPUMASK_ANDMASK(mask, smp_active_mask);
1543 CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1544 if (CPUMASK_TESTZERO(mask))
1545 continue;
1547 count = 0;
1548 load = 0;
1550 while (CPUMASK_TESTNZERO(mask)) {
1551 cpuid = BSFCPUMASK(mask);
1552 rdd = &dfly_pcpu[cpuid];
1553 load += rdd->uload;
1554 load += rdd->ucount * usched_dfly_weight3;
1556 if (rdd->uschedcp == NULL &&
1557 rdd->runqcount == 0 &&
1558 globaldata_find(cpuid)->gd_tdrunqcount == 0
1560 load -= usched_dfly_weight4;
1562 #if 0
1563 else if (rdd->upri > lp->lwp_priority + PPQ) {
1564 load -= usched_dfly_weight4 / 2;
1566 #endif
1567 CPUMASK_NANDBIT(mask, cpuid);
1568 ++count;
1572 * Compensate if the lp is already accounted for in
1573 * the aggregate uload for this mask set. We want
1574 * to calculate the loads as if lp were not present,
1575 * otherwise the calculation is bogus.
1577 if ((lp->lwp_mpflags & LWP_MP_ULOAD) &&
1578 CPUMASK_TESTMASK(dd->cpumask, cpun->members)) {
1579 load -= lp->lwp_uload;
1580 load -= usched_dfly_weight3;
1583 load /= count;
1586 * Advantage the cpu group (lp) is already on.
1588 if (CPUMASK_TESTMASK(cpun->members, dd->cpumask))
1589 load -= usched_dfly_weight1;
1592 * Advantage the cpu group we want to pair (lp) to,
1593 * but don't let it go to the exact same cpu as
1594 * the wakecpu target.
1596 * We do this by checking whether cpun is a
1597 * terminal node or not. All cpun's at the same
1598 * level will either all be terminal or all not
1599 * terminal.
1601 * If it is and we match we disadvantage the load.
1602 * If it is and we don't match we advantage the load.
1604 * Also note that we are effectively disadvantaging
1605 * all-but-one by the same amount, so it won't effect
1606 * the weight1 factor for the all-but-one nodes.
1608 if (CPUMASK_TESTMASK(cpun->members, wakemask)) {
1609 if (cpun->child_no != 0) {
1610 /* advantage */
1611 load -= usched_dfly_weight2;
1612 } else {
1613 if (usched_dfly_features & 0x10)
1614 load += usched_dfly_weight2;
1615 else
1616 load -= usched_dfly_weight2;
1621 * Calculate the best load
1623 if (cpub == NULL || lowest_load > load ||
1624 (lowest_load == load &&
1625 CPUMASK_TESTMASK(cpun->members, dd->cpumask))
1627 lowest_load = load;
1628 cpub = cpun;
1631 cpup = cpub;
1633 /* Dispatch this outcast to a proper CPU. */
1634 if (__predict_false(CPUMASK_TESTBIT(lp->lwp_cpumask, rdd->cpuid) == 0))
1635 rdd = &dfly_pcpu[BSFCPUMASK(lp->lwp_cpumask)];
1636 if (usched_dfly_chooser > 0) {
1637 --usched_dfly_chooser; /* only N lines */
1638 kprintf("lp %02d->%02d %s\n",
1639 lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1641 return (rdd);
1645 * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1647 * Choose the worst queue close to dd's cpu node with a non-empty runq
1648 * that is NOT dd. Also require that the moving of the highest-load thread
1649 * from rdd to dd does not cause the uload's to cross each other.
1651 * This is used by the thread chooser when the current cpu's queues are
1652 * empty to steal a thread from another cpu's queue. We want to offload
1653 * the most heavily-loaded queue.
1655 static
1656 dfly_pcpu_t
1657 dfly_choose_worst_queue(dfly_pcpu_t dd)
1659 cpumask_t mask;
1660 cpu_node_t *cpup;
1661 cpu_node_t *cpun;
1662 cpu_node_t *cpub;
1663 dfly_pcpu_t rdd;
1664 int cpuid;
1665 int n;
1666 int count;
1667 int load;
1668 #if 0
1669 int pri;
1670 int hpri;
1671 #endif
1672 int highest_load;
1675 * When the topology is unknown choose a random cpu that is hopefully
1676 * idle.
1678 if (dd->cpunode == NULL) {
1679 return (NULL);
1683 * When the topology is known choose a cpu whos group has, in
1684 * aggregate, has the highest weighted load.
1686 cpup = root_cpu_node;
1687 rdd = dd;
1688 while (cpup) {
1690 * Degenerate case super-root
1692 if (cpup->child_no == 1) {
1693 cpup = cpup->child_node[0];
1694 continue;
1698 * Terminal cpunode
1700 if (cpup->child_no == 0) {
1701 rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1702 break;
1705 cpub = NULL;
1706 highest_load = 0;
1708 for (n = 0; n < cpup->child_no; ++n) {
1710 * Accumulate load information for all cpus
1711 * which are members of this node.
1713 cpun = cpup->child_node[n];
1714 mask = cpun->members;
1715 CPUMASK_ANDMASK(mask, usched_global_cpumask);
1716 CPUMASK_ANDMASK(mask, smp_active_mask);
1717 if (CPUMASK_TESTZERO(mask))
1718 continue;
1720 count = 0;
1721 load = 0;
1723 while (CPUMASK_TESTNZERO(mask)) {
1724 cpuid = BSFCPUMASK(mask);
1725 rdd = &dfly_pcpu[cpuid];
1726 load += rdd->uload;
1727 load += rdd->ucount * usched_dfly_weight3;
1729 if (rdd->uschedcp == NULL &&
1730 rdd->runqcount == 0 &&
1731 globaldata_find(cpuid)->gd_tdrunqcount == 0
1733 load -= usched_dfly_weight4;
1735 #if 0
1736 else if (rdd->upri > dd->upri + PPQ) {
1737 load -= usched_dfly_weight4 / 2;
1739 #endif
1740 CPUMASK_NANDBIT(mask, cpuid);
1741 ++count;
1743 load /= count;
1746 * Prefer candidates which are somewhat closer to
1747 * our cpu.
1749 if (CPUMASK_TESTMASK(dd->cpumask, cpun->members))
1750 load += usched_dfly_weight1;
1753 * The best candidate is the one with the worst
1754 * (highest) load.
1756 if (cpub == NULL || highest_load < load ||
1757 (highest_load == load &&
1758 CPUMASK_TESTMASK(cpun->members, dd->cpumask))) {
1759 highest_load = load;
1760 cpub = cpun;
1763 cpup = cpub;
1767 * We never return our own node (dd), and only return a remote
1768 * node if it's load is significantly worse than ours (i.e. where
1769 * stealing a thread would be considered reasonable).
1771 * This also helps us avoid breaking paired threads apart which
1772 * can have disastrous effects on performance.
1774 if (rdd == dd)
1775 return(NULL);
1777 #if 0
1778 hpri = 0;
1779 if (rdd->rtqueuebits && hpri < (pri = bsrl(rdd->rtqueuebits)))
1780 hpri = pri;
1781 if (rdd->queuebits && hpri < (pri = bsrl(rdd->queuebits)))
1782 hpri = pri;
1783 if (rdd->idqueuebits && hpri < (pri = bsrl(rdd->idqueuebits)))
1784 hpri = pri;
1785 hpri *= PPQ;
1786 if (rdd->uload - hpri < dd->uload + hpri)
1787 return(NULL);
1788 #endif
1789 return (rdd);
1792 static
1793 dfly_pcpu_t
1794 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
1796 dfly_pcpu_t rdd;
1797 cpumask_t tmpmask;
1798 cpumask_t mask;
1799 int cpubase;
1800 int cpuid;
1803 * Fallback to the original heuristic, select random cpu,
1804 * first checking the cpus not currently running a user thread.
1806 * Use cpuid as the base cpu in our scan, first checking
1807 * cpuid...(ncpus-1), then 0...(cpuid-1). This avoid favoring
1808 * lower-numbered cpus.
1810 ++dd->scancpu; /* SMP race ok */
1811 mask = dfly_rdyprocmask;
1812 CPUMASK_NANDMASK(mask, dfly_curprocmask);
1813 CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1814 CPUMASK_ANDMASK(mask, smp_active_mask);
1815 CPUMASK_ANDMASK(mask, usched_global_cpumask);
1817 cpubase = (int)(dd->scancpu % ncpus);
1818 CPUMASK_ASSBMASK(tmpmask, cpubase);
1819 CPUMASK_INVMASK(tmpmask);
1820 CPUMASK_ANDMASK(tmpmask, mask);
1821 while (CPUMASK_TESTNZERO(tmpmask)) {
1822 cpuid = BSFCPUMASK(tmpmask);
1823 rdd = &dfly_pcpu[cpuid];
1825 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1826 goto found;
1827 CPUMASK_NANDBIT(tmpmask, cpuid);
1830 CPUMASK_ASSBMASK(tmpmask, cpubase);
1831 CPUMASK_ANDMASK(tmpmask, mask);
1832 while (CPUMASK_TESTNZERO(tmpmask)) {
1833 cpuid = BSFCPUMASK(tmpmask);
1834 rdd = &dfly_pcpu[cpuid];
1836 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1837 goto found;
1838 CPUMASK_NANDBIT(tmpmask, cpuid);
1842 * Then cpus which might have a currently running lp
1844 mask = dfly_rdyprocmask;
1845 CPUMASK_ANDMASK(mask, dfly_curprocmask);
1846 CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1847 CPUMASK_ANDMASK(mask, smp_active_mask);
1848 CPUMASK_ANDMASK(mask, usched_global_cpumask);
1850 CPUMASK_ASSBMASK(tmpmask, cpubase);
1851 CPUMASK_INVMASK(tmpmask);
1852 CPUMASK_ANDMASK(tmpmask, mask);
1853 while (CPUMASK_TESTNZERO(tmpmask)) {
1854 cpuid = BSFCPUMASK(tmpmask);
1855 rdd = &dfly_pcpu[cpuid];
1857 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1858 goto found;
1859 CPUMASK_NANDBIT(tmpmask, cpuid);
1862 CPUMASK_ASSBMASK(tmpmask, cpubase);
1863 CPUMASK_ANDMASK(tmpmask, mask);
1864 while (CPUMASK_TESTNZERO(tmpmask)) {
1865 cpuid = BSFCPUMASK(tmpmask);
1866 rdd = &dfly_pcpu[cpuid];
1868 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
1869 goto found;
1870 CPUMASK_NANDBIT(tmpmask, cpuid);
1874 * If we cannot find a suitable cpu we round-robin using scancpu.
1875 * Other cpus will pickup as they release their current lwps or
1876 * become ready.
1878 * Avoid a degenerate system lockup case if usched_global_cpumask
1879 * is set to 0 or otherwise does not cover lwp_cpumask.
1881 * We only kick the target helper thread in this case, we do not
1882 * set the user resched flag because
1884 cpuid = cpubase;
1885 if (CPUMASK_TESTBIT(lp->lwp_cpumask, cpuid) == 0)
1886 cpuid = BSFCPUMASK(lp->lwp_cpumask);
1887 else if (CPUMASK_TESTBIT(usched_global_cpumask, cpuid) == 0)
1888 cpuid = 0;
1889 rdd = &dfly_pcpu[cpuid];
1890 found:
1891 return (rdd);
1894 static
1895 void
1896 dfly_need_user_resched_remote(void *dummy)
1898 globaldata_t gd = mycpu;
1899 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
1902 * Flag reschedule needed
1904 need_user_resched();
1907 * If no user thread is currently running we need to kick the helper
1908 * on our cpu to recover. Otherwise the cpu will never schedule
1909 * anything again.
1911 * We cannot schedule the process ourselves because this is an
1912 * IPI callback and we cannot acquire spinlocks in an IPI callback.
1914 * Call wakeup_mycpu to avoid sending IPIs to other CPUs
1916 if (dd->uschedcp == NULL &&
1917 CPUMASK_TESTBIT(dfly_rdyprocmask, gd->gd_cpuid)) {
1918 ATOMIC_CPUMASK_NANDBIT(dfly_rdyprocmask, gd->gd_cpuid);
1919 wakeup_mycpu(dd->helper_thread);
1924 * dfly_remrunqueue_locked() removes a given process from the run queue
1925 * that it is on, clearing the queue busy bit if it becomes empty.
1927 * Note that user process scheduler is different from the LWKT schedule.
1928 * The user process scheduler only manages user processes but it uses LWKT
1929 * underneath, and a user process operating in the kernel will often be
1930 * 'released' from our management.
1932 * uload is NOT adjusted here. It is only adjusted if the lwkt_thread goes
1933 * to sleep or the lwp is moved to a different runq.
1935 static void
1936 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1938 struct rq *q;
1939 u_int32_t *which;
1940 u_int8_t pri;
1942 KKASSERT(rdd->runqcount >= 0);
1944 pri = lp->lwp_rqindex;
1946 switch(lp->lwp_rqtype) {
1947 case RTP_PRIO_NORMAL:
1948 q = &rdd->queues[pri];
1949 which = &rdd->queuebits;
1950 break;
1951 case RTP_PRIO_REALTIME:
1952 case RTP_PRIO_FIFO:
1953 q = &rdd->rtqueues[pri];
1954 which = &rdd->rtqueuebits;
1955 break;
1956 case RTP_PRIO_IDLE:
1957 q = &rdd->idqueues[pri];
1958 which = &rdd->idqueuebits;
1959 break;
1960 default:
1961 panic("remrunqueue: invalid rtprio type");
1962 /* NOT REACHED */
1964 KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
1965 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1966 TAILQ_REMOVE(q, lp, lwp_procq);
1967 --rdd->runqcount;
1968 if (TAILQ_EMPTY(q)) {
1969 KASSERT((*which & (1 << pri)) != 0,
1970 ("remrunqueue: remove from empty queue"));
1971 *which &= ~(1 << pri);
1976 * dfly_setrunqueue_locked()
1978 * Add a process whos rqtype and rqindex had previously been calculated
1979 * onto the appropriate run queue. Determine if the addition requires
1980 * a reschedule on a cpu and return the cpuid or -1.
1982 * NOTE: Lower priorities are better priorities.
1984 * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
1985 * sum of the rough lwp_priority for all running and runnable
1986 * processes. Lower priority processes (higher lwp_priority
1987 * values) actually DO count as more load, not less, because
1988 * these are the programs which require the most care with
1989 * regards to cpu selection.
1991 static void
1992 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
1994 u_int32_t *which;
1995 struct rq *q;
1996 int pri;
1998 KKASSERT(lp->lwp_qcpu == rdd->cpuid);
2000 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
2001 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
2002 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].uload, lp->lwp_uload);
2003 atomic_add_int(&dfly_pcpu[lp->lwp_qcpu].ucount, 1);
2004 atomic_add_int(&dfly_ucount, 1);
2007 pri = lp->lwp_rqindex;
2009 switch(lp->lwp_rqtype) {
2010 case RTP_PRIO_NORMAL:
2011 q = &rdd->queues[pri];
2012 which = &rdd->queuebits;
2013 break;
2014 case RTP_PRIO_REALTIME:
2015 case RTP_PRIO_FIFO:
2016 q = &rdd->rtqueues[pri];
2017 which = &rdd->rtqueuebits;
2018 break;
2019 case RTP_PRIO_IDLE:
2020 q = &rdd->idqueues[pri];
2021 which = &rdd->idqueuebits;
2022 break;
2023 default:
2024 panic("remrunqueue: invalid rtprio type");
2025 /* NOT REACHED */
2029 * Place us on the selected queue. Determine if we should be
2030 * placed at the head of the queue or at the end.
2032 * We are placed at the tail if our round-robin count has expired,
2033 * or is about to expire and the system thinks its a good place to
2034 * round-robin, or there is already a next thread on the queue
2035 * (it might be trying to pick up where it left off and we don't
2036 * want to interfere).
2038 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
2039 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
2040 ++rdd->runqcount;
2042 if (lp->lwp_rrcount >= usched_dfly_rrinterval ||
2043 (lp->lwp_rrcount >= usched_dfly_rrinterval / 2 &&
2044 (lp->lwp_thread->td_mpflags & TDF_MP_BATCH_DEMARC))
2047 * Place on tail
2049 atomic_clear_int(&lp->lwp_thread->td_mpflags,
2050 TDF_MP_BATCH_DEMARC);
2051 lp->lwp_rrcount = 0;
2052 TAILQ_INSERT_TAIL(q, lp, lwp_procq);
2053 } else {
2055 * Retain rrcount and place on head. Count is retained
2056 * even if the queue is empty.
2058 TAILQ_INSERT_HEAD(q, lp, lwp_procq);
2060 *which |= 1 << pri;
2064 * For SMP systems a user scheduler helper thread is created for each
2065 * cpu and is used to allow one cpu to wakeup another for the purposes of
2066 * scheduling userland threads from setrunqueue().
2068 * UP systems do not need the helper since there is only one cpu.
2070 * We can't use the idle thread for this because we might block.
2071 * Additionally, doing things this way allows us to HLT idle cpus
2072 * on MP systems.
2074 static void
2075 dfly_helper_thread(void *dummy)
2077 globaldata_t gd;
2078 dfly_pcpu_t dd;
2079 dfly_pcpu_t rdd;
2080 struct lwp *nlp;
2081 cpumask_t mask;
2082 int cpuid;
2084 gd = mycpu;
2085 cpuid = gd->gd_cpuid; /* doesn't change */
2086 mask = gd->gd_cpumask; /* doesn't change */
2087 dd = &dfly_pcpu[cpuid];
2090 * Since we only want to be woken up only when no user processes
2091 * are scheduled on a cpu, run at an ultra low priority.
2093 lwkt_setpri_self(TDPRI_USER_SCHEDULER);
2095 tsleep(dd->helper_thread, 0, "schslp", 0);
2097 for (;;) {
2099 * We use the LWKT deschedule-interlock trick to avoid racing
2100 * dfly_rdyprocmask. This means we cannot block through to the
2101 * manual lwkt_switch() call we make below.
2103 crit_enter_gd(gd);
2104 tsleep_interlock(dd->helper_thread, 0);
2106 spin_lock(&dd->spin);
2108 ATOMIC_CPUMASK_ORMASK(dfly_rdyprocmask, mask);
2109 clear_user_resched(); /* This satisfied the reschedule request */
2110 #if 0
2111 dd->rrcount = 0; /* Reset the round-robin counter */
2112 #endif
2114 if (dd->runqcount || dd->uschedcp != NULL) {
2116 * Threads are available. A thread may or may not be
2117 * currently scheduled. Get the best thread already queued
2118 * to this cpu.
2120 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
2121 if (nlp) {
2122 ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, mask);
2123 dd->upri = nlp->lwp_priority;
2124 dd->uschedcp = nlp;
2125 #if 0
2126 dd->rrcount = 0; /* reset round robin */
2127 #endif
2128 spin_unlock(&dd->spin);
2129 lwkt_acquire(nlp->lwp_thread);
2130 lwkt_schedule(nlp->lwp_thread);
2131 } else {
2133 * This situation should not occur because we had
2134 * at least one thread available.
2136 spin_unlock(&dd->spin);
2138 } else if (usched_dfly_features & 0x01) {
2140 * This cpu is devoid of runnable threads, steal a thread
2141 * from another cpu. Since we're stealing, might as well
2142 * load balance at the same time.
2144 * We choose the highest-loaded thread from the worst queue.
2146 * NOTE! This function only returns a non-NULL rdd when
2147 * another cpu's queue is obviously overloaded. We
2148 * do not want to perform the type of rebalancing
2149 * the schedclock does here because it would result
2150 * in insane process pulling when 'steady' state is
2151 * partially unbalanced (e.g. 6 runnables and only
2152 * 4 cores).
2154 rdd = dfly_choose_worst_queue(dd);
2155 if (rdd && spin_trylock(&rdd->spin)) {
2156 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
2157 spin_unlock(&rdd->spin);
2158 } else {
2159 nlp = NULL;
2161 if (nlp) {
2162 ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, mask);
2163 dd->upri = nlp->lwp_priority;
2164 dd->uschedcp = nlp;
2165 #if 0
2166 dd->rrcount = 0; /* reset round robin */
2167 #endif
2168 spin_unlock(&dd->spin);
2169 lwkt_acquire(nlp->lwp_thread);
2170 lwkt_schedule(nlp->lwp_thread);
2171 } else {
2173 * Leave the thread on our run queue. Another
2174 * scheduler will try to pull it later.
2176 spin_unlock(&dd->spin);
2178 } else {
2180 * devoid of runnable threads and not allowed to steal
2181 * any.
2183 spin_unlock(&dd->spin);
2187 * We're descheduled unless someone scheduled us. Switch away.
2188 * Exiting the critical section will cause splz() to be called
2189 * for us if interrupts and such are pending.
2191 crit_exit_gd(gd);
2192 tsleep(dd->helper_thread, PINTERLOCKED, "schslp", 0);
2196 #if 0
2197 static int
2198 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
2200 int error, new_val;
2202 new_val = usched_dfly_stick_to_level;
2204 error = sysctl_handle_int(oidp, &new_val, 0, req);
2205 if (error != 0 || req->newptr == NULL)
2206 return (error);
2207 if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
2208 return (EINVAL);
2209 usched_dfly_stick_to_level = new_val;
2210 return (0);
2212 #endif
2215 * Setup the queues and scheduler helpers (scheduler helpers are SMP only).
2216 * Note that curprocmask bit 0 has already been cleared by rqinit() and
2217 * we should not mess with it further.
2219 static void
2220 usched_dfly_cpu_init(void)
2222 int i;
2223 int j;
2224 int smt_not_supported = 0;
2225 int cache_coherent_not_supported = 0;
2227 if (bootverbose)
2228 kprintf("Start usched_dfly helpers on cpus:\n");
2230 sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2231 usched_dfly_sysctl_tree =
2232 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2233 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2234 "usched_dfly", CTLFLAG_RD, 0, "");
2236 for (i = 0; i < ncpus; ++i) {
2237 dfly_pcpu_t dd = &dfly_pcpu[i];
2238 cpumask_t mask;
2240 CPUMASK_ASSBIT(mask, i);
2241 if (CPUMASK_TESTMASK(mask, smp_active_mask) == 0)
2242 continue;
2244 spin_init(&dd->spin, "uschedcpuinit");
2245 dd->cpunode = get_cpu_node_by_cpuid(i);
2246 dd->cpuid = i;
2247 CPUMASK_ASSBIT(dd->cpumask, i);
2248 for (j = 0; j < NQS; j++) {
2249 TAILQ_INIT(&dd->queues[j]);
2250 TAILQ_INIT(&dd->rtqueues[j]);
2251 TAILQ_INIT(&dd->idqueues[j]);
2253 ATOMIC_CPUMASK_NANDBIT(dfly_curprocmask, 0);
2255 if (dd->cpunode == NULL) {
2256 smt_not_supported = 1;
2257 cache_coherent_not_supported = 1;
2258 if (bootverbose)
2259 kprintf (" cpu%d - WARNING: No CPU NODE "
2260 "found for cpu\n", i);
2261 } else {
2262 switch (dd->cpunode->type) {
2263 case THREAD_LEVEL:
2264 if (bootverbose)
2265 kprintf (" cpu%d - HyperThreading "
2266 "available. Core siblings: ",
2268 break;
2269 case CORE_LEVEL:
2270 smt_not_supported = 1;
2272 if (bootverbose)
2273 kprintf (" cpu%d - No HT available, "
2274 "multi-core/physical "
2275 "cpu. Physical siblings: ",
2277 break;
2278 case CHIP_LEVEL:
2279 smt_not_supported = 1;
2281 if (bootverbose)
2282 kprintf (" cpu%d - No HT available, "
2283 "single-core/physical cpu. "
2284 "Package siblings: ",
2286 break;
2287 default:
2288 /* Let's go for safe defaults here */
2289 smt_not_supported = 1;
2290 cache_coherent_not_supported = 1;
2291 if (bootverbose)
2292 kprintf (" cpu%d - Unknown cpunode->"
2293 "type=%u. siblings: ",
2295 (u_int)dd->cpunode->type);
2296 break;
2299 if (bootverbose) {
2300 if (dd->cpunode->parent_node != NULL) {
2301 kprint_cpuset(&dd->cpunode->
2302 parent_node->members);
2303 kprintf("\n");
2304 } else {
2305 kprintf(" no siblings\n");
2310 lwkt_create(dfly_helper_thread, NULL, &dd->helper_thread, NULL,
2311 0, i, "usched %d", i);
2314 * Allow user scheduling on the target cpu. cpu #0 has already
2315 * been enabled in rqinit().
2317 if (i)
2318 ATOMIC_CPUMASK_NANDMASK(dfly_curprocmask, mask);
2319 ATOMIC_CPUMASK_ORMASK(dfly_rdyprocmask, mask);
2320 dd->upri = PRIBASE_NULL;
2324 /* usched_dfly sysctl configurable parameters */
2326 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2327 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2328 OID_AUTO, "rrinterval", CTLFLAG_RW,
2329 &usched_dfly_rrinterval, 0, "");
2330 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2331 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2332 OID_AUTO, "decay", CTLFLAG_RW,
2333 &usched_dfly_decay, 0, "Extra decay when not running");
2335 /* Add enable/disable option for SMT scheduling if supported */
2336 if (smt_not_supported) {
2337 usched_dfly_smt = 0;
2338 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2339 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2340 OID_AUTO, "smt", CTLFLAG_RD,
2341 "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2342 } else {
2343 usched_dfly_smt = 1;
2344 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2345 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2346 OID_AUTO, "smt", CTLFLAG_RW,
2347 &usched_dfly_smt, 0, "Enable SMT scheduling");
2351 * Add enable/disable option for cache coherent scheduling
2352 * if supported
2354 if (cache_coherent_not_supported) {
2355 usched_dfly_cache_coherent = 0;
2356 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2357 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2358 OID_AUTO, "cache_coherent", CTLFLAG_RD,
2359 "NOT SUPPORTED", 0,
2360 "Cache coherence NOT SUPPORTED");
2361 } else {
2362 usched_dfly_cache_coherent = 1;
2363 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2364 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2365 OID_AUTO, "cache_coherent", CTLFLAG_RW,
2366 &usched_dfly_cache_coherent, 0,
2367 "Enable/Disable cache coherent scheduling");
2369 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2370 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2371 OID_AUTO, "weight1", CTLFLAG_RW,
2372 &usched_dfly_weight1, 200,
2373 "Weight selection for current cpu");
2375 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2376 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2377 OID_AUTO, "weight2", CTLFLAG_RW,
2378 &usched_dfly_weight2, 180,
2379 "Weight selection for wakefrom cpu");
2381 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2382 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2383 OID_AUTO, "weight3", CTLFLAG_RW,
2384 &usched_dfly_weight3, 40,
2385 "Weight selection for num threads on queue");
2387 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2388 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2389 OID_AUTO, "weight4", CTLFLAG_RW,
2390 &usched_dfly_weight4, 160,
2391 "Availability of other idle cpus");
2393 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2394 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2395 OID_AUTO, "fast_resched", CTLFLAG_RW,
2396 &usched_dfly_fast_resched, 0,
2397 "Availability of other idle cpus");
2399 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2400 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2401 OID_AUTO, "features", CTLFLAG_RW,
2402 &usched_dfly_features, 0x8F,
2403 "Allow pulls into empty queues");
2405 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2406 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2407 OID_AUTO, "swmask", CTLFLAG_RW,
2408 &usched_dfly_swmask, ~PPQMASK,
2409 "Queue mask to force thread switch");
2411 #if 0
2412 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2413 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2414 OID_AUTO, "stick_to_level",
2415 CTLTYPE_INT | CTLFLAG_RW,
2416 NULL, sizeof usched_dfly_stick_to_level,
2417 sysctl_usched_dfly_stick_to_level, "I",
2418 "Stick a process to this level. See sysctl"
2419 "paremter hw.cpu_topology.level_description");
2420 #endif
2423 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2424 usched_dfly_cpu_init, NULL);