kernel/libc: Remove sigstack() remains.
[dragonfly.git] / sys / kern / usched_dfly.c
blob26fdf4802e709e0ce0826d484cc7702e6e0ff349
1 /*
2 * Copyright (c) 2012-2017 The DragonFly Project. All rights reserved.
3 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
6 * by Matthew Dillon <dillon@backplane.com>,
7 * by Mihai Carabas <mihai.carabas@gmail.com>
8 * and many others.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
19 * distribution.
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/resourcevar.h>
47 #include <sys/spinlock.h>
48 #include <sys/cpu_topology.h>
49 #include <sys/thread2.h>
50 #include <sys/spinlock2.h>
52 #include <sys/ktr.h>
54 #include <machine/cpu.h>
55 #include <machine/smp.h>
58 * Priorities. Note that with 32 run queues per scheduler each queue
59 * represents four priority levels.
62 int dfly_rebalanced;
64 #define MAXPRI 128
65 #define PRIMASK (MAXPRI - 1)
66 #define PRIBASE_REALTIME 0
67 #define PRIBASE_NORMAL MAXPRI
68 #define PRIBASE_IDLE (MAXPRI * 2)
69 #define PRIBASE_THREAD (MAXPRI * 3)
70 #define PRIBASE_NULL (MAXPRI * 4)
72 #define NQS 32 /* 32 run queues. */
73 #define PPQ (MAXPRI / NQS) /* priorities per queue */
74 #define PPQMASK (PPQ - 1)
77 * NICE_QS - maximum queues nice can shift the process
78 * EST_QS - maximum queues estcpu can shift the process
80 * ESTCPUPPQ - number of estcpu units per priority queue
81 * ESTCPUMAX - number of estcpu units
83 * Remember that NICE runs over the whole -20 to +20 range.
85 #define NICE_QS 24 /* -20 to +20 shift in whole queues */
86 #define EST_QS 20 /* 0-MAX shift in whole queues */
87 #define ESTCPUPPQ 512
88 #define ESTCPUMAX (ESTCPUPPQ * EST_QS)
89 #define PRIO_RANGE (PRIO_MAX - PRIO_MIN + 1)
91 #define ESTCPULIM(v) min((v), ESTCPUMAX)
93 TAILQ_HEAD(rq, lwp);
95 #define lwp_priority lwp_usdata.dfly.priority
96 #define lwp_forked lwp_usdata.dfly.forked
97 #define lwp_rqindex lwp_usdata.dfly.rqindex
98 #define lwp_estcpu lwp_usdata.dfly.estcpu
99 #define lwp_estfast lwp_usdata.dfly.estfast
100 #define lwp_uload lwp_usdata.dfly.uload
101 #define lwp_rqtype lwp_usdata.dfly.rqtype
102 #define lwp_qcpu lwp_usdata.dfly.qcpu
103 #define lwp_rrcount lwp_usdata.dfly.rrcount
105 static __inline int
106 lptouload(struct lwp *lp)
108 int uload;
110 uload = lp->lwp_estcpu / NQS;
111 uload -= uload * lp->lwp_proc->p_nice / (PRIO_MAX + 1);
113 return uload;
117 * DFly scheduler pcpu structure. Note that the pcpu uload field must
118 * be 64-bits to avoid overflowing in the situation where more than 32768
119 * processes are on a single cpu's queue. Since high-end systems can
120 * easily run 900,000+ processes, we have to deal with it.
122 struct usched_dfly_pcpu {
123 struct spinlock spin;
124 struct thread *helper_thread;
125 struct globaldata *gd;
126 u_short scancpu;
127 short upri;
128 long uload; /* 64-bits to avoid overflow (1) */
129 int ucount;
130 int flags;
131 struct lwp *uschedcp;
132 struct rq queues[NQS];
133 struct rq rtqueues[NQS];
134 struct rq idqueues[NQS];
135 u_int32_t queuebits;
136 u_int32_t rtqueuebits;
137 u_int32_t idqueuebits;
138 int runqcount;
139 int cpuid;
140 cpumask_t cpumask;
141 cpu_node_t *cpunode;
142 } __cachealign;
145 * Reflecting bits in the global atomic masks allows us to avoid
146 * a certain degree of global ping-ponging.
148 #define DFLY_PCPU_RDYMASK 0x0001 /* reflect rdyprocmask */
149 #define DFLY_PCPU_CURMASK 0x0002 /* reflect curprocmask */
151 typedef struct usched_dfly_pcpu *dfly_pcpu_t;
153 static void dfly_acquire_curproc(struct lwp *lp);
154 static void dfly_release_curproc(struct lwp *lp);
155 static void dfly_select_curproc(globaldata_t gd);
156 static void dfly_setrunqueue(struct lwp *lp);
157 static void dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp);
158 static void dfly_schedulerclock(struct lwp *lp, sysclock_t period,
159 sysclock_t cpstamp);
160 static void dfly_recalculate_estcpu(struct lwp *lp);
161 static void dfly_resetpriority(struct lwp *lp);
162 static void dfly_forking(struct lwp *plp, struct lwp *lp);
163 static void dfly_exiting(struct lwp *lp, struct proc *);
164 static void dfly_uload_update(struct lwp *lp);
165 static void dfly_yield(struct lwp *lp);
166 static void dfly_changeqcpu_locked(struct lwp *lp,
167 dfly_pcpu_t dd, dfly_pcpu_t rdd);
168 static dfly_pcpu_t dfly_choose_best_queue(struct lwp *lp);
169 static dfly_pcpu_t dfly_choose_worst_queue(dfly_pcpu_t dd, int forceit);
170 static dfly_pcpu_t dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp);
171 static void dfly_need_user_resched_remote(void *dummy);
172 static struct lwp *dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
173 struct lwp *chklp, int worst);
174 static void dfly_remrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
175 static void dfly_setrunqueue_locked(dfly_pcpu_t dd, struct lwp *lp);
176 static void dfly_changedcpu(struct lwp *lp);
178 struct usched usched_dfly = {
179 { NULL },
180 "dfly", "Original DragonFly Scheduler",
181 NULL, /* default registration */
182 NULL, /* default deregistration */
183 dfly_acquire_curproc,
184 dfly_release_curproc,
185 dfly_setrunqueue,
186 dfly_schedulerclock,
187 dfly_recalculate_estcpu,
188 dfly_resetpriority,
189 dfly_forking,
190 dfly_exiting,
191 dfly_uload_update,
192 NULL, /* setcpumask not supported */
193 dfly_yield,
194 dfly_changedcpu
198 * We have NQS (32) run queues per scheduling class. For the normal
199 * class, there are 128 priorities scaled onto these 32 queues. New
200 * processes are added to the last entry in each queue, and processes
201 * are selected for running by taking them from the head and maintaining
202 * a simple FIFO arrangement. Realtime and Idle priority processes have
203 * and explicit 0-31 priority which maps directly onto their class queue
204 * index. When a queue has something in it, the corresponding bit is
205 * set in the queuebits variable, allowing a single read to determine
206 * the state of all 32 queues and then a ffs() to find the first busy
207 * queue.
209 * curprocmask is used to publish cpus with assigned curprocs to the rest
210 * of the cpus. In certain situations curprocmask may leave a bit set
211 * (e.g. a yield or a token-based yield) even though dd->uschedcp is
212 * NULL'd out temporarily).
214 /* currently running a user process */
215 static cpumask_t dfly_curprocmask = CPUMASK_INITIALIZER_ALLONES;
216 static cpumask_t dfly_rdyprocmask; /* ready to accept a user process */
217 static struct usched_dfly_pcpu dfly_pcpu[MAXCPU];
218 static struct sysctl_ctx_list usched_dfly_sysctl_ctx;
219 static struct sysctl_oid *usched_dfly_sysctl_tree;
220 static struct lock usched_dfly_config_lk = LOCK_INITIALIZER("usdfs", 0, 0);
222 /* Debug info exposed through debug.* sysctl */
224 static int usched_dfly_debug = -1;
225 SYSCTL_INT(_debug, OID_AUTO, dfly_scdebug, CTLFLAG_RW,
226 &usched_dfly_debug, 0,
227 "Print debug information for this pid");
229 static int usched_dfly_pid_debug = -1;
230 SYSCTL_INT(_debug, OID_AUTO, dfly_pid_debug, CTLFLAG_RW,
231 &usched_dfly_pid_debug, 0,
232 "Print KTR debug information for this pid");
234 static int usched_dfly_chooser = 0;
235 SYSCTL_INT(_debug, OID_AUTO, dfly_chooser, CTLFLAG_RW,
236 &usched_dfly_chooser, 0,
237 "Print KTR debug information for this pid");
240 * WARNING!
242 * The fork bias can have a large effect on the system in the face of a
243 * make -j N or other high-forking applications.
245 * Larger values are much less invasive vs other things that
246 * might be running in the system, but can cause exec chains
247 * such as those typically generated by make to have higher
248 * latencies in the face of modest load.
250 * Lower values are more invasive but have reduced latencies
251 * for such exec chains.
253 * make -j 10 buildkernel example, build times:
255 * +0 3:04
256 * +1 3:14 -5.2% <-- default
257 * +2 3:22 -8.9%
259 * This issue occurs due to the way the scheduler affinity heuristics work.
260 * There is no way to really 'fix' the affinity heuristics because when it
261 * comes right down to it trying to instantly schedule a process on an
262 * available cpu (even if it will become unavailable a microsecond later)
263 * tends to cause processes to shift around between cpus and sockets too much
264 * and breaks the affinity.
266 * NOTE: Heavily concurrent builds typically have enough things on the pan
267 * that they remain time-efficient even with a higher bias.
269 static int usched_dfly_forkbias = 1;
270 SYSCTL_INT(_debug, OID_AUTO, dfly_forkbias, CTLFLAG_RW,
271 &usched_dfly_forkbias, 0,
272 "Fork bias for estcpu in whole queues");
275 * Tunning usched_dfly - configurable through kern.usched_dfly.
277 * weight1 - Tries to keep threads on their current cpu. If you
278 * make this value too large the scheduler will not be
279 * able to load-balance large loads.
281 * Generally set to a fairly low value, but high enough
282 * such that estcpu jitter doesn't move threads around.
284 * weight2 - If non-zero, detects thread pairs undergoing synchronous
285 * communications and tries to move them closer together.
286 * Behavior is adjusted by bit 4 of features (0x10).
288 * WARNING! Weight2 is a ridiculously sensitive parameter,
289 * change the default at your peril.
291 * weight3 - Weighting based on the number of recently runnable threads
292 * on the userland scheduling queue (ignoring their loads).
294 * A nominal value here prevents high-priority (low-load)
295 * threads from accumulating on one cpu core when other
296 * cores are available.
298 * This value should be left fairly small because low-load
299 * high priority threads can still be mostly idle and too
300 * high a value will kick cpu-bound processes off the cpu
301 * unnecessarily.
303 * weight4 - Weighting based on other cpu queues being available
304 * or running processes with higher lwp_priority's.
306 * This allows a thread to migrate to another nearby cpu if it
307 * is unable to run on the current cpu based on the other cpu
308 * being idle or running a lower priority (higher lwp_priority)
309 * thread. This value should be large enough to override weight1
311 * weight5 - Weighting based on the relative amount of ram connected
312 * to the node a cpu resides on.
314 * This value should remain fairly low to allow assymetric
315 * NUMA nodes to get threads scheduled to them. Setting a very
316 * high level will prevent scheduling on assymetric NUMA nodes
317 * with low amounts of directly-attached memory.
319 * Note that when testing e.g. N threads on a machine with N
320 * cpu cores with assymtric NUMA nodes, a non-zero value will
321 * cause some cpu threads on the low-priority NUMA nodes to remain
322 * idle even when a few process threads are doubled-up on other
323 * cpus. But this is typically more ideal because it deschedules
324 * low-priority NUMA nodes at lighter nodes.
326 * Values between 50 and 200 are recommended. Default is 50.
328 * weight6 - rdd transfer weight hysteresis. Defaults to 0, can be increased
329 * to improve stabillity at the cost of more mis-schedules.
331 * features - These flags can be set or cleared to enable or disable various
332 * features.
334 * 0x01 Enable idle-cpu pulling (default)
335 * 0x02 Enable proactive pushing (default)
336 * 0x04 Enable rebalancing rover (default)
337 * 0x08 Enable more proactive pushing (default)
338 * 0x10 (flip weight2 limit on same cpu) (default)
339 * 0x20 choose best cpu for forked process
340 * 0x40 choose current cpu for forked process
341 * 0x80 choose random cpu for forked process (default)
343 static int usched_dfly_smt = 0;
344 static int usched_dfly_cache_coherent = 0;
345 static int usched_dfly_weight1 = 10; /* keep thread on current cpu */
346 static int usched_dfly_weight2 = 180; /* synchronous peer's current cpu */
347 static int usched_dfly_weight3 = 10; /* number of threads on queue */
348 static int usched_dfly_weight4 = 160; /* availability of idle cores */
349 static int usched_dfly_weight5 = 50; /* node attached memory */
350 static int usched_dfly_weight6 = 0; /* rdd trasnfer weight */
351 static int usched_dfly_features = 0x8F; /* allow pulls */
352 static int usched_dfly_fast_resched = PPQ / 2; /* delta priority / resched */
353 static int usched_dfly_swmask = ~PPQMASK; /* allow pulls */
354 static int usched_dfly_rrinterval = (ESTCPUFREQ + 9) / 10;
355 static int usched_dfly_decay = 8;
356 static long usched_dfly_node_mem;
358 /* KTR debug printings */
360 KTR_INFO_MASTER(usched);
362 #if !defined(KTR_USCHED_DFLY)
363 #define KTR_USCHED_DFLY KTR_ALL
364 #endif
366 KTR_INFO(KTR_USCHED_DFLY, usched, chooseproc, 0,
367 "USCHED_DFLY(chooseproc: pid %d, old_cpuid %d, curr_cpuid %d)",
368 pid_t pid, int old_cpuid, int curr);
371 * This function is called when the kernel intends to return to userland.
372 * It is responsible for making the thread the current designated userland
373 * thread for this cpu, blocking if necessary.
375 * The kernel will not depress our LWKT priority until after we return,
376 * in case we have to shove over to another cpu.
378 * We must determine our thread's disposition before we switch away. This
379 * is very sensitive code.
381 * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE
382 * TO ANOTHER CPU! Because most of the kernel assumes that no migration will
383 * occur, this function is called only under very controlled circumstances.
385 static void
386 dfly_acquire_curproc(struct lwp *lp)
388 globaldata_t gd;
389 dfly_pcpu_t dd;
390 dfly_pcpu_t rdd;
391 thread_t td;
392 int force_resched;
395 * Make sure we aren't sitting on a tsleep queue.
397 td = lp->lwp_thread;
398 crit_enter_quick(td);
399 if (td->td_flags & TDF_TSLEEPQ)
400 tsleep_remove(td);
401 dfly_recalculate_estcpu(lp);
403 gd = mycpu;
404 dd = &dfly_pcpu[gd->gd_cpuid];
407 * Process any pending interrupts/ipi's, then handle reschedule
408 * requests. dfly_release_curproc() will try to assign a new
409 * uschedcp that isn't us and otherwise NULL it out.
411 force_resched = 0;
412 if ((td->td_mpflags & TDF_MP_BATCH_DEMARC) &&
413 lp->lwp_rrcount >= usched_dfly_rrinterval / 2) {
414 force_resched = 1;
417 if (user_resched_wanted()) {
418 if (dd->uschedcp == lp)
419 force_resched = 1;
420 clear_user_resched();
421 dfly_release_curproc(lp);
425 * Loop until we are the current user thread.
427 * NOTE: dd spinlock not held at top of loop.
429 if (dd->uschedcp == lp)
430 lwkt_yield_quick();
432 while (dd->uschedcp != lp) {
434 * Do not do a lwkt_yield_quick() here as it will prevent
435 * the lwp from being placed on the dfly_bsd runqueue for
436 * one cycle (possibly an entire round-robin), preventing
437 * it from being scheduled to another cpu.
439 /* lwkt_yield_quick(); */
441 spin_lock(&dd->spin);
443 /* This lwp is an outcast; force reschedule. */
444 if (__predict_false(
445 CPUMASK_TESTBIT(lp->lwp_cpumask, gd->gd_cpuid) == 0) &&
446 (rdd = dfly_choose_best_queue(lp)) != dd) {
447 dfly_changeqcpu_locked(lp, dd, rdd);
448 spin_unlock(&dd->spin);
449 lwkt_deschedule(lp->lwp_thread);
450 dfly_setrunqueue_dd(rdd, lp);
451 lwkt_switch();
452 gd = mycpu;
453 dd = &dfly_pcpu[gd->gd_cpuid];
454 continue;
458 * We are not or are no longer the current lwp and a forced
459 * reschedule was requested. Figure out the best cpu to
460 * run on (our current cpu will be given significant weight).
462 * Doing this on many cpus simultaneously leads to
463 * instability so pace the operation.
465 * (if a reschedule was not requested we want to move this
466 * step after the uschedcp tests).
468 if (force_resched &&
469 (usched_dfly_features & 0x08) &&
470 (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid &&
471 (rdd = dfly_choose_best_queue(lp)) != dd) {
472 dfly_changeqcpu_locked(lp, dd, rdd);
473 spin_unlock(&dd->spin);
474 lwkt_deschedule(lp->lwp_thread);
475 dfly_setrunqueue_dd(rdd, lp);
476 lwkt_switch();
477 gd = mycpu;
478 dd = &dfly_pcpu[gd->gd_cpuid];
479 continue;
483 * Either no reschedule was requested or the best queue was
484 * dd, and no current process has been selected. We can
485 * trivially become the current lwp on the current cpu.
487 if (dd->uschedcp == NULL) {
488 atomic_clear_int(&lp->lwp_thread->td_mpflags,
489 TDF_MP_DIDYIELD);
490 if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
491 ATOMIC_CPUMASK_ORBIT(dfly_curprocmask,
492 gd->gd_cpuid);
493 dd->flags |= DFLY_PCPU_CURMASK;
495 dd->uschedcp = lp;
496 dd->upri = lp->lwp_priority;
497 KKASSERT(lp->lwp_qcpu == dd->cpuid);
498 spin_unlock(&dd->spin);
499 break;
503 * Can we steal the current designated user thread?
505 * If we do the other thread will stall when it tries to
506 * return to userland, possibly rescheduling elsewhere.
507 * Set need_user_resched() to get the thread to cycle soonest.
509 * It is important to do a masked test to avoid the edge
510 * case where two near-equal-priority threads are constantly
511 * interrupting each other.
513 * In the exact match case another thread has already gained
514 * uschedcp and lowered its priority, if we steal it the
515 * other thread will stay stuck on the LWKT runq and not
516 * push to another cpu. So don't steal on equal-priority even
517 * though it might appear to be more beneficial due to not
518 * having to switch back to the other thread's context.
520 * usched_dfly_fast_resched requires that two threads be
521 * significantly far apart in priority in order to interrupt.
523 * If better but not sufficiently far apart, the current
524 * uschedcp will be interrupted at the next scheduler clock.
526 if (dd->uschedcp &&
527 (dd->upri & ~PPQMASK) >
528 (lp->lwp_priority & ~PPQMASK) + usched_dfly_fast_resched) {
529 dd->uschedcp = lp;
530 dd->upri = lp->lwp_priority;
531 KKASSERT(lp->lwp_qcpu == dd->cpuid);
532 need_user_resched();
533 spin_unlock(&dd->spin);
534 break;
538 * Requeue us at lwp_priority, which recalculate_estcpu()
539 * set for us. Reset the rrcount to force placement
540 * at the end of the queue.
542 * We used to move ourselves to the worst queue, but
543 * this creates a fairly serious priority inversion
544 * problem.
546 if (lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) {
547 spin_unlock(&dd->spin);
548 lp->lwp_rrcount = usched_dfly_rrinterval;
549 lp->lwp_rqindex = (lp->lwp_priority & PRIMASK) / PPQ;
551 lwkt_deschedule(lp->lwp_thread);
552 dfly_setrunqueue_dd(dd, lp);
553 atomic_clear_int(&lp->lwp_thread->td_mpflags,
554 TDF_MP_DIDYIELD);
555 lwkt_switch();
556 gd = mycpu;
557 dd = &dfly_pcpu[gd->gd_cpuid];
558 continue;
562 * We are not the current lwp, figure out the best cpu
563 * to run on (our current cpu will be given significant
564 * weight). Loop on cpu change.
566 if ((usched_dfly_features & 0x02) &&
567 force_resched == 0 &&
568 (rdd = dfly_choose_best_queue(lp)) != dd) {
569 dfly_changeqcpu_locked(lp, dd, rdd);
570 spin_unlock(&dd->spin);
571 lwkt_deschedule(lp->lwp_thread);
572 dfly_setrunqueue_dd(rdd, lp);
573 lwkt_switch();
574 gd = mycpu;
575 dd = &dfly_pcpu[gd->gd_cpuid];
576 continue;
580 * We cannot become the current lwp, place the lp on the
581 * run-queue of this or another cpu and deschedule ourselves.
583 * When we are reactivated we will have another chance.
585 * Reload after a switch or setrunqueue/switch possibly
586 * moved us to another cpu.
588 spin_unlock(&dd->spin);
589 lwkt_deschedule(lp->lwp_thread);
590 dfly_setrunqueue_dd(dd, lp);
591 lwkt_switch();
592 gd = mycpu;
593 dd = &dfly_pcpu[gd->gd_cpuid];
597 * Make sure upri is synchronized, then yield to LWKT threads as
598 * needed before returning. This could result in another reschedule.
599 * XXX
601 crit_exit_quick(td);
603 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
607 * DFLY_RELEASE_CURPROC
609 * This routine detaches the current thread from the userland scheduler,
610 * usually because the thread needs to run or block in the kernel (at
611 * kernel priority) for a while.
613 * This routine is also responsible for selecting a new thread to
614 * make the current thread.
616 * NOTE: This implementation differs from the dummy example in that
617 * dfly_select_curproc() is able to select the current process, whereas
618 * dummy_select_curproc() is not able to select the current process.
619 * This means we have to NULL out uschedcp.
621 * Additionally, note that we may already be on a run queue if releasing
622 * via the lwkt_switch() in dfly_setrunqueue().
624 static void
625 dfly_release_curproc(struct lwp *lp)
627 globaldata_t gd = mycpu;
628 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
631 * Make sure td_wakefromcpu is defaulted. This will be overwritten
632 * by wakeup().
634 if (dd->uschedcp == lp) {
635 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
636 spin_lock(&dd->spin);
637 if (dd->uschedcp == lp) {
638 dd->uschedcp = NULL; /* don't let lp be selected */
639 dd->upri = PRIBASE_NULL;
642 * We're just going to set it again, avoid the global
643 * cache line ping-pong.
645 if ((lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) == 0) {
646 if (dd->flags & DFLY_PCPU_CURMASK) {
647 ATOMIC_CPUMASK_NANDBIT(dfly_curprocmask,
648 gd->gd_cpuid);
649 dd->flags &= ~DFLY_PCPU_CURMASK;
652 spin_unlock(&dd->spin);
653 dfly_select_curproc(gd);
654 } else {
655 spin_unlock(&dd->spin);
661 * DFLY_SELECT_CURPROC
663 * Select a new current process for this cpu and clear any pending user
664 * reschedule request. The cpu currently has no current process.
666 * This routine is also responsible for equal-priority round-robining,
667 * typically triggered from dfly_schedulerclock(). In our dummy example
668 * all the 'user' threads are LWKT scheduled all at once and we just
669 * call lwkt_switch().
671 * The calling process is not on the queue and cannot be selected.
673 static
674 void
675 dfly_select_curproc(globaldata_t gd)
677 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
678 struct lwp *nlp;
679 int cpuid = gd->gd_cpuid;
681 crit_enter_gd(gd);
683 spin_lock(&dd->spin);
684 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
686 if (nlp) {
687 if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
688 ATOMIC_CPUMASK_ORBIT(dfly_curprocmask, cpuid);
689 dd->flags |= DFLY_PCPU_CURMASK;
691 dd->upri = nlp->lwp_priority;
692 dd->uschedcp = nlp;
693 #if 0
694 dd->rrcount = 0; /* reset round robin */
695 #endif
696 spin_unlock(&dd->spin);
697 lwkt_acquire(nlp->lwp_thread);
698 lwkt_schedule(nlp->lwp_thread);
699 } else {
700 spin_unlock(&dd->spin);
702 crit_exit_gd(gd);
706 * Place the specified lwp on the user scheduler's run queue. This routine
707 * must be called with the thread descheduled. The lwp must be runnable.
708 * It must not be possible for anyone else to explicitly schedule this thread.
710 * The thread may be the current thread as a special case.
712 static void
713 dfly_setrunqueue(struct lwp *lp)
715 dfly_pcpu_t dd;
716 dfly_pcpu_t rdd;
719 * First validate the process LWKT state.
721 KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN"));
722 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0,
723 ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid,
724 lp->lwp_tid, lp->lwp_proc->p_flags, lp->lwp_flags));
725 KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0);
728 * NOTE: dd/rdd do not necessarily represent the current cpu.
729 * Instead they may represent the cpu the thread was last
730 * scheduled on or inherited by its parent.
732 dd = &dfly_pcpu[lp->lwp_qcpu];
733 rdd = dd;
736 * This process is not supposed to be scheduled anywhere or assigned
737 * as the current process anywhere. Assert the condition.
739 KKASSERT(rdd->uschedcp != lp);
742 * Ok, we have to setrunqueue some target cpu and request a reschedule
743 * if necessary.
745 * We have to choose the best target cpu. It might not be the current
746 * target even if the current cpu has no running user thread (for
747 * example, because the current cpu might be a hyperthread and its
748 * sibling has a thread assigned).
750 * If we just forked it is most optimal to run the child on the same
751 * cpu just in case the parent decides to wait for it (thus getting
752 * off that cpu). As long as there is nothing else runnable on the
753 * cpu, that is. If we did this unconditionally a parent forking
754 * multiple children before waiting (e.g. make -j N) leaves other
755 * cpus idle that could be working.
757 if (lp->lwp_forked) {
758 lp->lwp_forked = 0;
759 if (usched_dfly_features & 0x20)
760 rdd = dfly_choose_best_queue(lp);
761 else if (usched_dfly_features & 0x40)
762 rdd = &dfly_pcpu[lp->lwp_qcpu];
763 else if (usched_dfly_features & 0x80)
764 rdd = dfly_choose_queue_simple(rdd, lp);
765 else if (dfly_pcpu[lp->lwp_qcpu].runqcount)
766 rdd = dfly_choose_best_queue(lp);
767 else
768 rdd = &dfly_pcpu[lp->lwp_qcpu];
769 } else {
770 rdd = dfly_choose_best_queue(lp);
771 /* rdd = &dfly_pcpu[lp->lwp_qcpu]; */
773 if (lp->lwp_qcpu != rdd->cpuid) {
774 spin_lock(&dd->spin);
775 dfly_changeqcpu_locked(lp, dd, rdd);
776 spin_unlock(&dd->spin);
778 dfly_setrunqueue_dd(rdd, lp);
782 * Change qcpu to rdd->cpuid. The dd the lp is CURRENTLY on must be
783 * spin-locked on-call. rdd does not have to be.
785 static void
786 dfly_changeqcpu_locked(struct lwp *lp, dfly_pcpu_t dd, dfly_pcpu_t rdd)
788 if (lp->lwp_qcpu != rdd->cpuid) {
789 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
790 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
791 atomic_add_long(&dd->uload, -lp->lwp_uload);
792 atomic_add_int(&dd->ucount, -1);
794 lp->lwp_qcpu = rdd->cpuid;
799 * Place lp on rdd's runqueue. Nothing is locked on call. This function
800 * also performs all necessary ancillary notification actions.
802 static void
803 dfly_setrunqueue_dd(dfly_pcpu_t rdd, struct lwp *lp)
805 globaldata_t rgd;
808 * We might be moving the lp to another cpu's run queue, and once
809 * on the runqueue (even if it is our cpu's), another cpu can rip
810 * it away from us.
812 * TDF_MIGRATING might already be set if this is part of a
813 * remrunqueue+setrunqueue sequence.
815 if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0)
816 lwkt_giveaway(lp->lwp_thread);
818 rgd = rdd->gd;
821 * We lose control of the lp the moment we release the spinlock
822 * after having placed it on the queue. i.e. another cpu could pick
823 * it up, or it could exit, or its priority could be further
824 * adjusted, or something like that.
826 * WARNING! rdd can point to a foreign cpu!
828 spin_lock(&rdd->spin);
829 dfly_setrunqueue_locked(rdd, lp);
832 * Potentially interrupt the currently-running thread
834 if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK)) {
836 * Currently running thread is better or same, do not
837 * interrupt.
839 spin_unlock(&rdd->spin);
840 } else if ((rdd->upri & ~PPQMASK) <= (lp->lwp_priority & ~PPQMASK) +
841 usched_dfly_fast_resched) {
843 * Currently running thread is not better, but not so bad
844 * that we need to interrupt it. Let it run for one more
845 * scheduler tick.
847 if (rdd->uschedcp &&
848 rdd->uschedcp->lwp_rrcount < usched_dfly_rrinterval) {
849 rdd->uschedcp->lwp_rrcount = usched_dfly_rrinterval - 1;
851 spin_unlock(&rdd->spin);
852 } else if (rgd == mycpu) {
854 * We should interrupt the currently running thread, which
855 * is on the current cpu. However, if DIDYIELD is set we
856 * round-robin unconditionally and do not interrupt it.
858 spin_unlock(&rdd->spin);
859 if (rdd->uschedcp == NULL)
860 wakeup_mycpu(rdd->helper_thread); /* XXX */
861 if ((lp->lwp_thread->td_mpflags & TDF_MP_DIDYIELD) == 0)
862 need_user_resched();
863 } else {
865 * We should interrupt the currently running thread, which
866 * is on a different cpu.
868 spin_unlock(&rdd->spin);
869 lwkt_send_ipiq(rgd, dfly_need_user_resched_remote, NULL);
874 * This routine is called from a systimer IPI. It MUST be MP-safe and
875 * the BGL IS NOT HELD ON ENTRY. This routine is called at ESTCPUFREQ on
876 * each cpu.
878 static
879 void
880 dfly_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp)
882 globaldata_t gd = mycpu;
883 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
886 * Spinlocks also hold a critical section so there should not be
887 * any active.
889 KKASSERT(gd->gd_spinlocks == 0 || dumping);
892 * If lp is NULL we might be contended and lwkt_switch() may have
893 * cycled into the idle thread. Apply the tick to the current
894 * process on this cpu if it is contended.
896 if (gd->gd_curthread == &gd->gd_idlethread) {
897 lp = dd->uschedcp;
898 if (lp && (lp->lwp_thread == NULL ||
899 lp->lwp_thread->td_contended == 0)) {
900 lp = NULL;
905 * Dock thread for tick
907 if (lp) {
909 * Do we need to round-robin? We round-robin 10 times a
910 * second. This should only occur for cpu-bound batch
911 * processes.
913 if (++lp->lwp_rrcount >= usched_dfly_rrinterval) {
914 lp->lwp_thread->td_wakefromcpu = -1;
915 need_user_resched();
919 * Adjust estcpu upward using a real time equivalent
920 * calculation, and recalculate lp's priority. Estcpu
921 * is increased such that it will cap-out over a period
922 * of one second.
924 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu +
925 ESTCPUMAX / ESTCPUFREQ + 1);
926 dfly_resetpriority(lp);
930 * Rebalance two cpus every 8 ticks, pulling the worst thread
931 * from the worst cpu's queue into a rotating cpu number.
932 * Also require that the moving of the highest-load thread
933 * from rdd to dd does not cause the uload to cross over.
935 * This mechanic is needed because the push algorithms can
936 * steady-state in an non-optimal configuration. We need to mix it
937 * up a little, even if it means breaking up a paired thread, so
938 * the push algorithms can rebalance the degenerate conditions.
939 * This portion of the algorithm exists to ensure stability at the
940 * selected weightings.
942 * Because we might be breaking up optimal conditions we do not want
943 * to execute this too quickly, hence we only rebalance approximately
944 * ~7-8 times per second. The push's, on the otherhand, are capable
945 * moving threads to other cpus at a much higher rate.
947 * We choose the most heavily loaded thread from the worst queue
948 * in order to ensure that multiple heavy-weight threads on the same
949 * queue get broken up, and also because these threads are the most
950 * likely to be able to remain in place. Hopefully then any pairings,
951 * if applicable, migrate to where these threads are.
953 if ((usched_dfly_features & 0x04) &&
954 ((u_int)sched_ticks & 7) == 0 &&
955 (u_int)sched_ticks / 8 % ncpus == gd->gd_cpuid) {
957 * Our cpu is up.
959 struct lwp *nlp;
960 dfly_pcpu_t rdd;
962 rdd = dfly_choose_worst_queue(dd, 1);
963 if (rdd && dd->uload + usched_dfly_weight6 / 2 < rdd->uload) {
964 spin_lock(&dd->spin);
965 if (spin_trylock(&rdd->spin)) {
966 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
967 spin_unlock(&rdd->spin);
968 if (nlp == NULL)
969 spin_unlock(&dd->spin);
970 } else {
971 spin_unlock(&dd->spin);
972 nlp = NULL;
974 } else {
975 nlp = NULL;
977 /* dd->spin held if nlp != NULL */
980 * Either schedule it or add it to our queue.
982 if (nlp &&
983 (nlp->lwp_priority & ~PPQMASK) < (dd->upri & ~PPQMASK)) {
984 if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
985 ATOMIC_CPUMASK_ORMASK(dfly_curprocmask,
986 dd->cpumask);
987 dd->flags |= DFLY_PCPU_CURMASK;
989 dd->upri = nlp->lwp_priority;
990 dd->uschedcp = nlp;
991 #if 0
992 dd->rrcount = 0; /* reset round robin */
993 #endif
994 spin_unlock(&dd->spin);
995 lwkt_acquire(nlp->lwp_thread);
996 lwkt_schedule(nlp->lwp_thread);
997 } else if (nlp) {
998 dfly_setrunqueue_locked(dd, nlp);
999 spin_unlock(&dd->spin);
1005 * Called from acquire and from kern_synch's one-second timer (one of the
1006 * callout helper threads) with a critical section held.
1008 * Adjust p_estcpu based on our single-cpu load, p_nice, and compensate for
1009 * overall system load.
1011 * Note that no recalculation occurs for a process which sleeps and wakes
1012 * up in the same tick. That is, a system doing thousands of context
1013 * switches per second will still only do serious estcpu calculations
1014 * ESTCPUFREQ times per second.
1016 static
1017 void
1018 dfly_recalculate_estcpu(struct lwp *lp)
1020 globaldata_t gd = mycpu;
1021 sysclock_t cpbase;
1022 sysclock_t ttlticks;
1023 int estcpu;
1024 int decay_factor;
1025 int ucount;
1028 * We have to subtract periodic to get the last schedclock
1029 * timeout time, otherwise we would get the upcoming timeout.
1030 * Keep in mind that a process can migrate between cpus and
1031 * while the scheduler clock should be very close, boundary
1032 * conditions could lead to a small negative delta.
1034 cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
1036 if (lp->lwp_slptime > 1) {
1038 * Too much time has passed, do a coarse correction.
1040 lp->lwp_estcpu = lp->lwp_estcpu >> 1;
1041 dfly_resetpriority(lp);
1042 lp->lwp_cpbase = cpbase;
1043 lp->lwp_cpticks = 0;
1044 lp->lwp_estfast = 0;
1045 } else if (lp->lwp_cpbase != cpbase) {
1047 * Adjust estcpu if we are in a different tick. Don't waste
1048 * time if we are in the same tick.
1050 * First calculate the number of ticks in the measurement
1051 * interval. The ttlticks calculation can wind up 0 due to
1052 * a bug in the handling of lwp_slptime (as yet not found),
1053 * so make sure we do not get a divide by 0 panic.
1055 ttlticks = (cpbase - lp->lwp_cpbase) /
1056 gd->gd_schedclock.periodic;
1057 if ((ssysclock_t)ttlticks < 0) {
1058 ttlticks = 0;
1059 lp->lwp_cpbase = cpbase;
1061 if (ttlticks < 4)
1062 return;
1063 updatepcpu(lp, lp->lwp_cpticks, ttlticks);
1066 * Calculate instant estcpu based percentage of (one) cpu
1067 * used and exponentially average it into the current
1068 * lwp_estcpu.
1070 ucount = dfly_pcpu[lp->lwp_qcpu].ucount;
1071 estcpu = lp->lwp_cpticks * ESTCPUMAX / ttlticks;
1074 * The higher ttlticks gets, the more meaning the calculation
1075 * has and the smaller our decay_factor in the exponential
1076 * average.
1078 * The uload calculation has been removed because it actually
1079 * makes things worse, causing processes which use less cpu
1080 * (such as a browser) to be pumped up and treated the same
1081 * as a cpu-bound process (such as a make). The same effect
1082 * can occur with sufficient load without the uload
1083 * calculation, but occurs less quickly and takes more load.
1084 * In addition, the less cpu a process uses the smaller the
1085 * effect of the overload.
1087 if (ttlticks >= hz)
1088 decay_factor = 1;
1089 else
1090 decay_factor = hz - ttlticks;
1092 lp->lwp_estcpu = ESTCPULIM(
1093 (lp->lwp_estcpu * ttlticks + estcpu) /
1094 (ttlticks + 1));
1095 if (usched_dfly_debug == lp->lwp_proc->p_pid)
1096 kprintf(" finalestcpu %d %d\n", estcpu, lp->lwp_estcpu);
1098 dfly_resetpriority(lp);
1099 lp->lwp_cpbase += ttlticks * gd->gd_schedclock.periodic;
1100 lp->lwp_cpticks = 0;
1105 * Compute the priority of a process when running in user mode.
1106 * Arrange to reschedule if the resulting priority is better
1107 * than that of the current process.
1109 * This routine may be called with any process.
1111 * This routine is called by fork1() for initial setup with the process of
1112 * the run queue, and also may be called normally with the process on or
1113 * off the run queue.
1115 static void
1116 dfly_resetpriority(struct lwp *lp)
1118 dfly_pcpu_t rdd;
1119 int newpriority;
1120 u_short newrqtype;
1121 int rcpu;
1122 int checkpri;
1123 int estcpu;
1124 int delta_uload;
1126 crit_enter();
1129 * Lock the scheduler (lp) belongs to. This can be on a different
1130 * cpu. Handle races. This loop breaks out with the appropriate
1131 * rdd locked.
1133 for (;;) {
1134 rcpu = lp->lwp_qcpu;
1135 cpu_ccfence();
1136 rdd = &dfly_pcpu[rcpu];
1137 spin_lock(&rdd->spin);
1138 if (rcpu == lp->lwp_qcpu)
1139 break;
1140 spin_unlock(&rdd->spin);
1144 * Calculate the new priority and queue type
1146 newrqtype = lp->lwp_rtprio.type;
1148 switch(newrqtype) {
1149 case RTP_PRIO_REALTIME:
1150 case RTP_PRIO_FIFO:
1151 newpriority = PRIBASE_REALTIME +
1152 (lp->lwp_rtprio.prio & PRIMASK);
1153 break;
1154 case RTP_PRIO_NORMAL:
1156 * Calculate the new priority.
1158 * nice contributes up to NICE_QS queues (typ 32 - full range)
1159 * estcpu contributes up to EST_QS queues (typ 24)
1161 * A nice +20 process receives 1/10 cpu vs nice+0. Niced
1162 * process more than 20 apart may receive no cpu, so cpu
1163 * bound nice -20 can prevent a nice +5 from getting any
1164 * cpu. A nice+0, being in the middle, always gets some cpu
1165 * no matter what.
1167 estcpu = lp->lwp_estcpu;
1168 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) *
1169 (NICE_QS * PPQ) / PRIO_RANGE;
1170 newpriority += estcpu * PPQ / ESTCPUPPQ;
1171 if (newpriority < 0)
1172 newpriority = 0;
1173 if (newpriority >= MAXPRI)
1174 newpriority = MAXPRI - 1;
1175 newpriority += PRIBASE_NORMAL;
1176 break;
1177 case RTP_PRIO_IDLE:
1178 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK);
1179 break;
1180 case RTP_PRIO_THREAD:
1181 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK);
1182 break;
1183 default:
1184 panic("Bad RTP_PRIO %d", newrqtype);
1185 /* NOT REACHED */
1189 * The LWKT scheduler doesn't dive usched structures, give it a hint
1190 * on the relative priority of user threads running in the kernel.
1191 * The LWKT scheduler will always ensure that a user thread running
1192 * in the kernel will get cpu some time, regardless of its upri,
1193 * but can decide not to instantly switch from one kernel or user
1194 * mode user thread to a kernel-mode user thread when it has a less
1195 * desireable user priority.
1197 * td_upri has normal sense (higher values are more desireable), so
1198 * negate it (this is a different field lp->lwp_priority)
1200 lp->lwp_thread->td_upri = -(newpriority & usched_dfly_swmask);
1203 * The newpriority incorporates the queue type so do a simple masked
1204 * check to determine if the process has moved to another queue. If
1205 * it has, and it is currently on a run queue, then move it.
1207 * Since uload is ~PPQMASK masked, no modifications are necessary if
1208 * we end up in the same run queue.
1210 * Reset rrcount if moving to a higher-priority queue, otherwise
1211 * retain rrcount.
1213 if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) {
1214 if (lp->lwp_priority < newpriority)
1215 lp->lwp_rrcount = 0;
1216 if (lp->lwp_mpflags & LWP_MP_ONRUNQ) {
1217 dfly_remrunqueue_locked(rdd, lp);
1218 lp->lwp_priority = newpriority;
1219 lp->lwp_rqtype = newrqtype;
1220 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1221 dfly_setrunqueue_locked(rdd, lp);
1222 checkpri = 1;
1223 } else {
1224 lp->lwp_priority = newpriority;
1225 lp->lwp_rqtype = newrqtype;
1226 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ;
1227 checkpri = 0;
1229 } else {
1231 * In the same PPQ, uload cannot change.
1233 lp->lwp_priority = newpriority;
1234 checkpri = 1;
1235 rcpu = -1;
1239 * Adjust effective load.
1241 * Calculate load then scale up or down geometrically based on p_nice.
1242 * Processes niced up (positive) are less important, and processes
1243 * niced downard (negative) are more important. The higher the uload,
1244 * the more important the thread.
1246 /* 0-511, 0-100% cpu */
1247 delta_uload = lptouload(lp);
1248 delta_uload -= lp->lwp_uload;
1249 if (lp->lwp_uload + delta_uload < -32767) {
1250 delta_uload = -32768 - lp->lwp_uload;
1251 } else if (lp->lwp_uload + delta_uload > 32767) {
1252 delta_uload = 32767 - lp->lwp_uload;
1254 lp->lwp_uload += delta_uload;
1255 if (lp->lwp_mpflags & LWP_MP_ULOAD)
1256 atomic_add_long(&dfly_pcpu[lp->lwp_qcpu].uload, delta_uload);
1259 * Determine if we need to reschedule the target cpu. This only
1260 * occurs if the LWP is already on a scheduler queue, which means
1261 * that idle cpu notification has already occured. At most we
1262 * need only issue a need_user_resched() on the appropriate cpu.
1264 * The LWP may be owned by a CPU different from the current one,
1265 * in which case dd->uschedcp may be modified without an MP lock
1266 * or a spinlock held. The worst that happens is that the code
1267 * below causes a spurious need_user_resched() on the target CPU
1268 * and dd->pri to be wrong for a short period of time, both of
1269 * which are harmless.
1271 * If checkpri is 0 we are adjusting the priority of the current
1272 * process, possibly higher (less desireable), so ignore the upri
1273 * check which will fail in that case.
1275 if (rcpu >= 0) {
1276 if (CPUMASK_TESTBIT(dfly_rdyprocmask, rcpu) &&
1277 (checkpri == 0 ||
1278 (rdd->upri & ~PRIMASK) >
1279 (lp->lwp_priority & ~PRIMASK))) {
1280 if (rcpu == mycpu->gd_cpuid) {
1281 spin_unlock(&rdd->spin);
1282 need_user_resched();
1283 } else {
1284 spin_unlock(&rdd->spin);
1285 lwkt_send_ipiq(globaldata_find(rcpu),
1286 dfly_need_user_resched_remote,
1287 NULL);
1289 } else {
1290 spin_unlock(&rdd->spin);
1292 } else {
1293 spin_unlock(&rdd->spin);
1295 crit_exit();
1298 static
1299 void
1300 dfly_yield(struct lwp *lp)
1302 if (lp->lwp_qcpu != mycpu->gd_cpuid)
1303 return;
1304 KKASSERT(lp == curthread->td_lwp);
1307 * Don't set need_user_resched() or mess with rrcount or anything.
1308 * the TDF flag will override everything as long as we release.
1310 atomic_set_int(&lp->lwp_thread->td_mpflags, TDF_MP_DIDYIELD);
1311 dfly_release_curproc(lp);
1315 * Thread was forcefully migrated to another cpu. Normally forced migrations
1316 * are used for iterations and the kernel returns to the original cpu before
1317 * returning and this is not needed. However, if the kernel migrates a
1318 * thread to another cpu and wants to leave it there, it has to call this
1319 * scheduler helper.
1321 * Note that the lwkt_migratecpu() function also released the thread, so
1322 * we don't have to worry about that.
1324 static
1325 void
1326 dfly_changedcpu(struct lwp *lp)
1328 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1329 dfly_pcpu_t rdd = &dfly_pcpu[mycpu->gd_cpuid];
1331 if (dd != rdd) {
1332 spin_lock(&dd->spin);
1333 dfly_changeqcpu_locked(lp, dd, rdd);
1334 spin_unlock(&dd->spin);
1339 * Called from fork1() when a new child process is being created.
1341 * Give the child process an initial estcpu that is more batch then
1342 * its parent and dock the parent for the fork (but do not
1343 * reschedule the parent).
1345 * fast
1347 * XXX lwp should be "spawning" instead of "forking"
1349 static void
1350 dfly_forking(struct lwp *plp, struct lwp *lp)
1352 int estcpu;
1355 * Put the child 4 queue slots (out of 32) higher than the parent
1356 * (less desireable than the parent).
1358 lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu +
1359 ESTCPUPPQ * usched_dfly_forkbias);
1360 lp->lwp_forked = 1;
1361 lp->lwp_estfast = 0;
1364 * Even though the lp will be scheduled specially the first time
1365 * due to lp->lwp_forked, it is important to initialize lwp_qcpu
1366 * to avoid favoring a fixed cpu.
1368 #if 0
1369 static uint16_t save_cpu;
1370 lp->lwp_qcpu = ++save_cpu % ncpus;
1371 #else
1372 lp->lwp_qcpu = plp->lwp_qcpu;
1373 if (CPUMASK_TESTBIT(lp->lwp_cpumask, lp->lwp_qcpu) == 0)
1374 lp->lwp_qcpu = BSFCPUMASK(lp->lwp_cpumask);
1375 #endif
1378 * Dock the parent a cost for the fork, protecting us from fork
1379 * bombs. If the parent is forking quickly this makes both the
1380 * parent and child more batchy.
1382 estcpu = plp->lwp_estcpu + ESTCPUPPQ / 16;
1383 plp->lwp_estcpu = ESTCPULIM(estcpu);
1387 * Called when a lwp is being removed from this scheduler, typically
1388 * during lwp_exit(). We have to clean out any ULOAD accounting before
1389 * we can let the lp go. The dd->spin lock is not needed for uload
1390 * updates.
1392 * Scheduler dequeueing has already occurred, no further action in that
1393 * regard is needed.
1395 static void
1396 dfly_exiting(struct lwp *lp, struct proc *child_proc)
1398 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1400 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1401 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1402 atomic_add_long(&dd->uload, -lp->lwp_uload);
1403 atomic_add_int(&dd->ucount, -1);
1408 * This function cannot block in any way, but spinlocks are ok.
1410 * Update the uload based on the state of the thread (whether it is going
1411 * to sleep or running again). The uload is meant to be a longer-term
1412 * load and not an instantanious load.
1414 static void
1415 dfly_uload_update(struct lwp *lp)
1417 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1419 if (lp->lwp_thread->td_flags & TDF_RUNQ) {
1420 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1421 spin_lock(&dd->spin);
1422 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
1423 atomic_set_int(&lp->lwp_mpflags,
1424 LWP_MP_ULOAD);
1425 atomic_add_long(&dd->uload, lp->lwp_uload);
1426 atomic_add_int(&dd->ucount, 1);
1428 spin_unlock(&dd->spin);
1430 } else if (lp->lwp_slptime > 0) {
1431 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1432 spin_lock(&dd->spin);
1433 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1434 atomic_clear_int(&lp->lwp_mpflags,
1435 LWP_MP_ULOAD);
1436 atomic_add_long(&dd->uload, -lp->lwp_uload);
1437 atomic_add_int(&dd->ucount, -1);
1439 spin_unlock(&dd->spin);
1445 * chooseproc() is called when a cpu needs a user process to LWKT schedule,
1446 * it selects a user process and returns it. If chklp is non-NULL and chklp
1447 * has a better or equal priority then the process that would otherwise be
1448 * chosen, NULL is returned.
1450 * Until we fix the RUNQ code the chklp test has to be strict or we may
1451 * bounce between processes trying to acquire the current process designation.
1453 * Must be called with rdd->spin locked. The spinlock is left intact through
1454 * the entire routine. dd->spin does not have to be locked.
1456 * If worst is non-zero this function finds the worst thread instead of the
1457 * best thread (used by the schedulerclock-based rover).
1459 static
1460 struct lwp *
1461 dfly_chooseproc_locked(dfly_pcpu_t rdd, dfly_pcpu_t dd,
1462 struct lwp *chklp, int worst)
1464 struct lwp *lp;
1465 struct rq *q;
1466 u_int32_t *which;
1467 u_int32_t pri;
1468 u_int32_t rtqbits;
1469 u_int32_t tsqbits;
1470 u_int32_t idqbits;
1473 * Select best or worst process. Once selected, clear the bit
1474 * in our local variable (idqbits, tsqbits, or rtqbits) just
1475 * in case we have to loop.
1477 rtqbits = rdd->rtqueuebits;
1478 tsqbits = rdd->queuebits;
1479 idqbits = rdd->idqueuebits;
1481 loopfar:
1482 if (worst) {
1483 if (idqbits) {
1484 pri = bsrl(idqbits);
1485 idqbits &= ~(1U << pri);
1486 q = &rdd->idqueues[pri];
1487 which = &rdd->idqueuebits;
1488 } else if (tsqbits) {
1489 pri = bsrl(tsqbits);
1490 tsqbits &= ~(1U << pri);
1491 q = &rdd->queues[pri];
1492 which = &rdd->queuebits;
1493 } else if (rtqbits) {
1494 pri = bsrl(rtqbits);
1495 rtqbits &= ~(1U << pri);
1496 q = &rdd->rtqueues[pri];
1497 which = &rdd->rtqueuebits;
1498 } else {
1499 return (NULL);
1501 lp = TAILQ_LAST(q, rq);
1502 } else {
1503 if (rtqbits) {
1504 pri = bsfl(rtqbits);
1505 rtqbits &= ~(1U << pri);
1506 q = &rdd->rtqueues[pri];
1507 which = &rdd->rtqueuebits;
1508 } else if (tsqbits) {
1509 pri = bsfl(tsqbits);
1510 tsqbits &= ~(1U << pri);
1511 q = &rdd->queues[pri];
1512 which = &rdd->queuebits;
1513 } else if (idqbits) {
1514 pri = bsfl(idqbits);
1515 idqbits &= ~(1U << pri);
1516 q = &rdd->idqueues[pri];
1517 which = &rdd->idqueuebits;
1518 } else {
1519 return (NULL);
1521 lp = TAILQ_FIRST(q);
1523 KASSERT(lp, ("chooseproc: no lwp on busy queue"));
1525 loopnear:
1527 * If the passed lwp <chklp> is reasonably close to the selected
1528 * lwp <lp>, return NULL (indicating that <chklp> should be kept).
1530 * Note that we must error on the side of <chklp> to avoid bouncing
1531 * between threads in the acquire code.
1533 if (chklp) {
1534 if (chklp->lwp_priority < lp->lwp_priority + PPQ)
1535 return(NULL);
1539 * When rdd != dd, we have to make sure that the process we
1540 * are pulling is allow to run on our cpu. This alternative
1541 * path is a bit more expensive but its not considered to be
1542 * in the critical path.
1544 if (rdd != dd && CPUMASK_TESTBIT(lp->lwp_cpumask, dd->cpuid) == 0) {
1545 if (worst)
1546 lp = TAILQ_PREV(lp, rq, lwp_procq);
1547 else
1548 lp = TAILQ_NEXT(lp, lwp_procq);
1549 if (lp)
1550 goto loopnear;
1551 goto loopfar;
1554 KTR_COND_LOG(usched_chooseproc,
1555 lp->lwp_proc->p_pid == usched_dfly_pid_debug,
1556 lp->lwp_proc->p_pid,
1557 lp->lwp_thread->td_gd->gd_cpuid,
1558 mycpu->gd_cpuid);
1560 KASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) != 0, ("not on runq6!"));
1561 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
1562 TAILQ_REMOVE(q, lp, lwp_procq);
1563 --rdd->runqcount;
1564 if (TAILQ_EMPTY(q))
1565 *which &= ~(1 << pri);
1568 * If we are choosing a process from rdd with the intent to
1569 * move it to dd, lwp_qcpu must be adjusted while rdd's spinlock
1570 * is still held.
1572 if (rdd != dd) {
1573 if (lp->lwp_mpflags & LWP_MP_ULOAD) {
1574 atomic_add_long(&rdd->uload, -lp->lwp_uload);
1575 atomic_add_int(&rdd->ucount, -1);
1577 lp->lwp_qcpu = dd->cpuid;
1578 atomic_add_long(&dd->uload, lp->lwp_uload);
1579 atomic_add_int(&dd->ucount, 1);
1580 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
1582 return lp;
1586 * USED TO PUSH RUNNABLE LWPS TO THE LEAST LOADED CPU.
1588 * Choose a cpu node to schedule lp on, hopefully nearby its current
1589 * node.
1591 * We give the current node a modest advantage for obvious reasons.
1593 * We also give the node the thread was woken up FROM a slight advantage
1594 * in order to try to schedule paired threads which synchronize/block waiting
1595 * for each other fairly close to each other. Similarly in a network setting
1596 * this feature will also attempt to place a user process near the kernel
1597 * protocol thread that is feeding it data. THIS IS A CRITICAL PART of the
1598 * algorithm as it heuristically groups synchronizing processes for locality
1599 * of reference in multi-socket systems.
1601 * We check against running processes and give a big advantage if there
1602 * are none running.
1604 * The caller will normally dfly_setrunqueue() lp on the returned queue.
1606 * When the topology is known choose a cpu whos group has, in aggregate,
1607 * has the lowest weighted load.
1609 static
1610 dfly_pcpu_t
1611 dfly_choose_best_queue(struct lwp *lp)
1613 cpumask_t wakemask;
1614 cpumask_t mask;
1615 cpu_node_t *cpup;
1616 cpu_node_t *cpun;
1617 cpu_node_t *cpub;
1618 dfly_pcpu_t dd = &dfly_pcpu[lp->lwp_qcpu];
1619 dfly_pcpu_t rdd;
1620 int wakecpu;
1621 int cpuid;
1622 int n;
1623 long load;
1624 long lowest_load;
1627 * When the topology is unknown choose a random cpu that is hopefully
1628 * idle.
1630 if (dd->cpunode == NULL)
1631 return (dfly_choose_queue_simple(dd, lp));
1634 * Pairing mask
1636 if ((wakecpu = lp->lwp_thread->td_wakefromcpu) >= 0)
1637 wakemask = dfly_pcpu[wakecpu].cpumask;
1638 else
1639 CPUMASK_ASSZERO(wakemask);
1642 * When the topology is known choose a cpu whos group has, in
1643 * aggregate, has the lowest weighted load.
1645 cpup = root_cpu_node;
1646 rdd = dd;
1648 while (cpup) {
1650 * Degenerate case super-root
1652 if (cpup->child_no == 1) {
1653 cpup = cpup->child_node[0];
1654 continue;
1658 * Terminal cpunode
1660 if (cpup->child_no == 0) {
1661 rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1662 break;
1665 cpub = NULL;
1666 lowest_load = 0x7FFFFFFFFFFFFFFFLL;
1668 for (n = 0; n < cpup->child_no; ++n) {
1670 * Accumulate load information for all cpus
1671 * which are members of this node.
1673 int count;
1675 cpun = cpup->child_node[n];
1676 mask = cpun->members;
1677 CPUMASK_ANDMASK(mask, usched_global_cpumask);
1678 CPUMASK_ANDMASK(mask, smp_active_mask);
1679 CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1680 if (CPUMASK_TESTZERO(mask))
1681 continue;
1683 load = 0;
1684 count = 0;
1686 while (CPUMASK_TESTNZERO(mask)) {
1687 cpuid = BSFCPUMASK(mask);
1688 rdd = &dfly_pcpu[cpuid];
1690 if (rdd->uschedcp == NULL &&
1691 rdd->runqcount == 0 &&
1692 rdd->gd->gd_tdrunqcount == 0
1694 load += rdd->uload / 2;
1695 load += rdd->ucount *
1696 usched_dfly_weight3 / 2;
1697 } else {
1698 load += rdd->uload;
1699 load += rdd->ucount *
1700 usched_dfly_weight3;
1702 CPUMASK_NANDBIT(mask, cpuid);
1703 ++count;
1707 * Compensate if the lp is already accounted for in
1708 * the aggregate uload for this mask set. We want
1709 * to calculate the loads as if lp were not present,
1710 * otherwise the calculation is bogus.
1712 if ((lp->lwp_mpflags & LWP_MP_ULOAD) &&
1713 CPUMASK_TESTMASK(dd->cpumask, cpun->members)) {
1714 load -= lp->lwp_uload;
1715 load -= usched_dfly_weight3; /* ucount */
1718 load /= count;
1721 * Advantage the cpu group (lp) is already on.
1723 if (CPUMASK_TESTMASK(cpun->members, dd->cpumask))
1724 load -= usched_dfly_weight1;
1727 * Advantage nodes with more memory
1729 if (usched_dfly_node_mem) {
1730 load -= cpun->phys_mem * usched_dfly_weight5 /
1731 usched_dfly_node_mem;
1735 * Advantage the cpu group we want to pair (lp) to,
1736 * but don't let it go to the exact same cpu as
1737 * the wakecpu target.
1739 * We do this by checking whether cpun is a
1740 * terminal node or not. All cpun's at the same
1741 * level will either all be terminal or all not
1742 * terminal.
1744 * If it is and we match we disadvantage the load.
1745 * If it is and we don't match we advantage the load.
1747 * Also note that we are effectively disadvantaging
1748 * all-but-one by the same amount, so it won't effect
1749 * the weight1 factor for the all-but-one nodes.
1751 if (CPUMASK_TESTMASK(cpun->members, wakemask)) {
1752 if (cpun->child_no != 0) {
1753 /* advantage */
1754 load -= usched_dfly_weight2;
1755 } else {
1756 if (usched_dfly_features & 0x10)
1757 load += usched_dfly_weight2;
1758 else
1759 load -= usched_dfly_weight2;
1764 * Calculate the best load
1766 if (cpub == NULL || lowest_load > load ||
1767 (lowest_load == load &&
1768 CPUMASK_TESTMASK(cpun->members, dd->cpumask))
1770 lowest_load = load;
1771 cpub = cpun;
1774 cpup = cpub;
1776 /* Dispatch this outcast to a proper CPU. */
1777 if (__predict_false(CPUMASK_TESTBIT(lp->lwp_cpumask, rdd->cpuid) == 0))
1778 rdd = &dfly_pcpu[BSFCPUMASK(lp->lwp_cpumask)];
1779 if (usched_dfly_chooser > 0) {
1780 --usched_dfly_chooser; /* only N lines */
1781 kprintf("lp %02d->%02d %s\n",
1782 lp->lwp_qcpu, rdd->cpuid, lp->lwp_proc->p_comm);
1784 return (rdd);
1788 * USED TO PULL RUNNABLE LWPS FROM THE MOST LOADED CPU.
1790 * Choose the worst queue close to dd's cpu node with a non-empty runq
1791 * that is NOT dd.
1793 * This is used by the thread chooser when the current cpu's queues are
1794 * empty to steal a thread from another cpu's queue. We want to offload
1795 * the most heavily-loaded queue.
1797 * However, we do not want to steal from far-away nodes who themselves
1798 * have idle cpu's that are more suitable to distribute the far-away
1799 * thread to.
1801 static
1802 dfly_pcpu_t
1803 dfly_choose_worst_queue(dfly_pcpu_t dd, int forceit)
1805 cpumask_t mask;
1806 cpu_node_t *cpup;
1807 cpu_node_t *cpun;
1808 cpu_node_t *cpub;
1809 dfly_pcpu_t rdd;
1810 int cpuid;
1811 int n;
1812 long load;
1813 long highest_load;
1814 #if 0
1815 int pri;
1816 int hpri;
1817 #endif
1820 * When the topology is unknown choose a random cpu that is hopefully
1821 * idle.
1823 if (dd->cpunode == NULL) {
1824 return (NULL);
1828 * When the topology is known choose a cpu whos group has, in
1829 * aggregate, has the highest weighted load.
1831 cpup = root_cpu_node;
1832 rdd = dd;
1833 while (cpup) {
1835 * Degenerate case super-root
1837 if (cpup->child_no == 1) {
1838 cpup = cpup->child_node[0];
1839 continue;
1843 * Terminal cpunode
1845 if (cpup->child_no == 0) {
1846 rdd = &dfly_pcpu[BSFCPUMASK(cpup->members)];
1847 break;
1850 cpub = NULL;
1851 highest_load = -0x7FFFFFFFFFFFFFFFLL;
1853 for (n = 0; n < cpup->child_no; ++n) {
1855 * Accumulate load information for all cpus
1856 * which are members of this node.
1858 int count;
1860 cpun = cpup->child_node[n];
1861 mask = cpun->members;
1862 CPUMASK_ANDMASK(mask, usched_global_cpumask);
1863 CPUMASK_ANDMASK(mask, smp_active_mask);
1864 if (CPUMASK_TESTZERO(mask))
1865 continue;
1867 load = 0;
1868 count = 0;
1870 while (CPUMASK_TESTNZERO(mask)) {
1871 cpuid = BSFCPUMASK(mask);
1872 rdd = &dfly_pcpu[cpuid];
1874 if (rdd->uschedcp == NULL &&
1875 rdd->runqcount == 0 &&
1876 rdd->gd->gd_tdrunqcount == 0
1878 load += rdd->uload / 2;
1879 load += rdd->ucount *
1880 usched_dfly_weight3 / 2;
1881 } else {
1882 load += rdd->uload;
1883 load += rdd->ucount *
1884 usched_dfly_weight3;
1886 CPUMASK_NANDBIT(mask, cpuid);
1887 ++count;
1889 load /= count;
1892 * Advantage the cpu group (dd) is already on.
1894 * When choosing the worst queue we reverse the
1895 * sign, but only count half the weight.
1897 * weight1 needs to be high enough to be stable,
1898 * but this can also cause it to be too sticky,
1899 * so the iterator which rebalances the load sets
1900 * forceit to ignore it.
1902 if (forceit == 0 &&
1903 CPUMASK_TESTMASK(dd->cpumask, cpun->members)) {
1904 load += usched_dfly_weight1 / 2;
1908 * Disadvantage nodes with more memory (same sign).
1910 if (usched_dfly_node_mem) {
1911 load -= cpun->phys_mem * usched_dfly_weight5 /
1912 usched_dfly_node_mem;
1917 * The best candidate is the one with the worst
1918 * (highest) load.
1920 if (cpub == NULL || highest_load < load ||
1921 (highest_load == load &&
1922 CPUMASK_TESTMASK(cpun->members, dd->cpumask))) {
1923 highest_load = load;
1924 cpub = cpun;
1927 cpup = cpub;
1931 * We never return our own node (dd), and only return a remote
1932 * node if it's load is significantly worse than ours (i.e. where
1933 * stealing a thread would be considered reasonable).
1935 * This also helps us avoid breaking paired threads apart which
1936 * can have disastrous effects on performance.
1938 if (rdd == dd)
1939 return(NULL);
1941 #if 0
1942 hpri = 0;
1943 if (rdd->rtqueuebits && hpri < (pri = bsrl(rdd->rtqueuebits)))
1944 hpri = pri;
1945 if (rdd->queuebits && hpri < (pri = bsrl(rdd->queuebits)))
1946 hpri = pri;
1947 if (rdd->idqueuebits && hpri < (pri = bsrl(rdd->idqueuebits)))
1948 hpri = pri;
1949 hpri *= PPQ;
1950 if (rdd->uload - hpri < dd->uload + hpri)
1951 return(NULL);
1952 #endif
1953 return (rdd);
1956 static
1957 dfly_pcpu_t
1958 dfly_choose_queue_simple(dfly_pcpu_t dd, struct lwp *lp)
1960 dfly_pcpu_t rdd;
1961 cpumask_t tmpmask;
1962 cpumask_t mask;
1963 int cpubase;
1964 int cpuid;
1967 * Fallback to the original heuristic, select random cpu,
1968 * first checking the cpus not currently running a user thread.
1970 * Use cpuid as the base cpu in our scan, first checking
1971 * cpuid...(ncpus-1), then 0...(cpuid-1). This avoid favoring
1972 * lower-numbered cpus.
1974 ++dd->scancpu; /* SMP race ok */
1975 mask = dfly_rdyprocmask;
1976 CPUMASK_NANDMASK(mask, dfly_curprocmask);
1977 CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
1978 CPUMASK_ANDMASK(mask, smp_active_mask);
1979 CPUMASK_ANDMASK(mask, usched_global_cpumask);
1981 cpubase = (int)(dd->scancpu % ncpus);
1982 CPUMASK_ASSBMASK(tmpmask, cpubase);
1983 CPUMASK_INVMASK(tmpmask);
1984 CPUMASK_ANDMASK(tmpmask, mask);
1985 while (CPUMASK_TESTNZERO(tmpmask)) {
1986 cpuid = BSFCPUMASK(tmpmask);
1987 rdd = &dfly_pcpu[cpuid];
1989 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
1990 goto found;
1991 CPUMASK_NANDBIT(tmpmask, cpuid);
1994 CPUMASK_ASSBMASK(tmpmask, cpubase);
1995 CPUMASK_ANDMASK(tmpmask, mask);
1996 while (CPUMASK_TESTNZERO(tmpmask)) {
1997 cpuid = BSFCPUMASK(tmpmask);
1998 rdd = &dfly_pcpu[cpuid];
2000 if ((rdd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK))
2001 goto found;
2002 CPUMASK_NANDBIT(tmpmask, cpuid);
2006 * Then cpus which might have a currently running lp
2008 mask = dfly_rdyprocmask;
2009 CPUMASK_ANDMASK(mask, dfly_curprocmask);
2010 CPUMASK_ANDMASK(mask, lp->lwp_cpumask);
2011 CPUMASK_ANDMASK(mask, smp_active_mask);
2012 CPUMASK_ANDMASK(mask, usched_global_cpumask);
2014 CPUMASK_ASSBMASK(tmpmask, cpubase);
2015 CPUMASK_INVMASK(tmpmask);
2016 CPUMASK_ANDMASK(tmpmask, mask);
2017 while (CPUMASK_TESTNZERO(tmpmask)) {
2018 cpuid = BSFCPUMASK(tmpmask);
2019 rdd = &dfly_pcpu[cpuid];
2021 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
2022 goto found;
2023 CPUMASK_NANDBIT(tmpmask, cpuid);
2026 CPUMASK_ASSBMASK(tmpmask, cpubase);
2027 CPUMASK_ANDMASK(tmpmask, mask);
2028 while (CPUMASK_TESTNZERO(tmpmask)) {
2029 cpuid = BSFCPUMASK(tmpmask);
2030 rdd = &dfly_pcpu[cpuid];
2032 if ((rdd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK))
2033 goto found;
2034 CPUMASK_NANDBIT(tmpmask, cpuid);
2038 * If we cannot find a suitable cpu we round-robin using scancpu.
2039 * Other cpus will pickup as they release their current lwps or
2040 * become ready.
2042 * Avoid a degenerate system lockup case if usched_global_cpumask
2043 * is set to 0 or otherwise does not cover lwp_cpumask.
2045 * We only kick the target helper thread in this case, we do not
2046 * set the user resched flag because
2048 cpuid = cpubase;
2049 if (CPUMASK_TESTBIT(lp->lwp_cpumask, cpuid) == 0)
2050 cpuid = BSFCPUMASK(lp->lwp_cpumask);
2051 else if (CPUMASK_TESTBIT(usched_global_cpumask, cpuid) == 0)
2052 cpuid = 0;
2053 rdd = &dfly_pcpu[cpuid];
2054 found:
2055 return (rdd);
2058 static
2059 void
2060 dfly_need_user_resched_remote(void *dummy)
2062 globaldata_t gd = mycpu;
2063 dfly_pcpu_t dd = &dfly_pcpu[gd->gd_cpuid];
2066 * Flag reschedule needed
2068 need_user_resched();
2071 * If no user thread is currently running we need to kick the helper
2072 * on our cpu to recover. Otherwise the cpu will never schedule
2073 * anything again.
2075 * We cannot schedule the process ourselves because this is an
2076 * IPI callback and we cannot acquire spinlocks in an IPI callback.
2078 * Call wakeup_mycpu to avoid sending IPIs to other CPUs
2080 if (dd->uschedcp == NULL && (dd->flags & DFLY_PCPU_RDYMASK)) {
2081 ATOMIC_CPUMASK_NANDBIT(dfly_rdyprocmask, gd->gd_cpuid);
2082 dd->flags &= ~DFLY_PCPU_RDYMASK;
2083 wakeup_mycpu(dd->helper_thread);
2088 * dfly_remrunqueue_locked() removes a given process from the run queue
2089 * that it is on, clearing the queue busy bit if it becomes empty.
2091 * Note that user process scheduler is different from the LWKT schedule.
2092 * The user process scheduler only manages user processes but it uses LWKT
2093 * underneath, and a user process operating in the kernel will often be
2094 * 'released' from our management.
2096 * uload is NOT adjusted here. It is only adjusted if the lwkt_thread goes
2097 * to sleep or the lwp is moved to a different runq.
2099 static void
2100 dfly_remrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
2102 struct rq *q;
2103 u_int32_t *which;
2104 u_int8_t pri;
2106 KKASSERT(rdd->runqcount >= 0);
2108 pri = lp->lwp_rqindex;
2110 switch(lp->lwp_rqtype) {
2111 case RTP_PRIO_NORMAL:
2112 q = &rdd->queues[pri];
2113 which = &rdd->queuebits;
2114 break;
2115 case RTP_PRIO_REALTIME:
2116 case RTP_PRIO_FIFO:
2117 q = &rdd->rtqueues[pri];
2118 which = &rdd->rtqueuebits;
2119 break;
2120 case RTP_PRIO_IDLE:
2121 q = &rdd->idqueues[pri];
2122 which = &rdd->idqueuebits;
2123 break;
2124 default:
2125 panic("remrunqueue: invalid rtprio type");
2126 /* NOT REACHED */
2128 KKASSERT(lp->lwp_mpflags & LWP_MP_ONRUNQ);
2129 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
2130 TAILQ_REMOVE(q, lp, lwp_procq);
2131 --rdd->runqcount;
2132 if (TAILQ_EMPTY(q)) {
2133 KASSERT((*which & (1 << pri)) != 0,
2134 ("remrunqueue: remove from empty queue"));
2135 *which &= ~(1 << pri);
2140 * dfly_setrunqueue_locked()
2142 * Add a process whos rqtype and rqindex had previously been calculated
2143 * onto the appropriate run queue. Determine if the addition requires
2144 * a reschedule on a cpu and return the cpuid or -1.
2146 * NOTE: Lower priorities are better priorities.
2148 * NOTE ON ULOAD: This variable specifies the aggregate load on a cpu, the
2149 * sum of the rough lwp_priority for all running and runnable
2150 * processes. Lower priority processes (higher lwp_priority
2151 * values) actually DO count as more load, not less, because
2152 * these are the programs which require the most care with
2153 * regards to cpu selection.
2155 static void
2156 dfly_setrunqueue_locked(dfly_pcpu_t rdd, struct lwp *lp)
2158 u_int32_t *which;
2159 struct rq *q;
2160 int pri;
2162 KKASSERT(lp->lwp_qcpu == rdd->cpuid);
2164 if ((lp->lwp_mpflags & LWP_MP_ULOAD) == 0) {
2165 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ULOAD);
2166 atomic_add_long(&rdd->uload, lp->lwp_uload);
2167 atomic_add_int(&rdd->ucount, 1);
2170 pri = lp->lwp_rqindex;
2172 switch(lp->lwp_rqtype) {
2173 case RTP_PRIO_NORMAL:
2174 q = &rdd->queues[pri];
2175 which = &rdd->queuebits;
2176 break;
2177 case RTP_PRIO_REALTIME:
2178 case RTP_PRIO_FIFO:
2179 q = &rdd->rtqueues[pri];
2180 which = &rdd->rtqueuebits;
2181 break;
2182 case RTP_PRIO_IDLE:
2183 q = &rdd->idqueues[pri];
2184 which = &rdd->idqueuebits;
2185 break;
2186 default:
2187 panic("remrunqueue: invalid rtprio type");
2188 /* NOT REACHED */
2192 * Place us on the selected queue. Determine if we should be
2193 * placed at the head of the queue or at the end.
2195 * We are placed at the tail if our round-robin count has expired,
2196 * or is about to expire and the system thinks its a good place to
2197 * round-robin, or there is already a next thread on the queue
2198 * (it might be trying to pick up where it left off and we don't
2199 * want to interfere).
2201 KKASSERT((lp->lwp_mpflags & LWP_MP_ONRUNQ) == 0);
2202 atomic_set_int(&lp->lwp_mpflags, LWP_MP_ONRUNQ);
2203 ++rdd->runqcount;
2205 if (lp->lwp_rrcount >= usched_dfly_rrinterval ||
2206 (lp->lwp_rrcount >= usched_dfly_rrinterval / 2 &&
2207 (lp->lwp_thread->td_mpflags & TDF_MP_BATCH_DEMARC))
2210 * Place on tail
2212 atomic_clear_int(&lp->lwp_thread->td_mpflags,
2213 TDF_MP_BATCH_DEMARC);
2214 lp->lwp_rrcount = 0;
2215 TAILQ_INSERT_TAIL(q, lp, lwp_procq);
2216 } else {
2218 * Retain rrcount and place on head. Count is retained
2219 * even if the queue is empty.
2221 TAILQ_INSERT_HEAD(q, lp, lwp_procq);
2223 *which |= 1 << pri;
2227 * For SMP systems a user scheduler helper thread is created for each
2228 * cpu and is used to allow one cpu to wakeup another for the purposes of
2229 * scheduling userland threads from setrunqueue().
2231 * UP systems do not need the helper since there is only one cpu.
2233 * We can't use the idle thread for this because we might block.
2234 * Additionally, doing things this way allows us to HLT idle cpus
2235 * on MP systems.
2237 static void
2238 dfly_helper_thread(void *dummy)
2240 globaldata_t gd;
2241 dfly_pcpu_t dd;
2242 dfly_pcpu_t rdd;
2243 struct lwp *nlp;
2244 cpumask_t mask;
2245 int cpuid;
2247 gd = mycpu;
2248 cpuid = gd->gd_cpuid; /* doesn't change */
2249 mask = gd->gd_cpumask; /* doesn't change */
2250 dd = &dfly_pcpu[cpuid];
2253 * Initial interlock, make sure all dfly_pcpu[] structures have
2254 * been initialized before proceeding.
2256 lockmgr(&usched_dfly_config_lk, LK_SHARED);
2257 lockmgr(&usched_dfly_config_lk, LK_RELEASE);
2260 * Since we only want to be woken up only when no user processes
2261 * are scheduled on a cpu, run at an ultra low priority.
2263 lwkt_setpri_self(TDPRI_USER_SCHEDULER);
2265 for (;;) {
2267 * We use the LWKT deschedule-interlock trick to avoid racing
2268 * dfly_rdyprocmask. This means we cannot block through to the
2269 * manual lwkt_switch() call we make below.
2271 crit_enter_gd(gd);
2272 tsleep_interlock(dd->helper_thread, 0);
2274 spin_lock(&dd->spin);
2275 if ((dd->flags & DFLY_PCPU_RDYMASK) == 0) {
2276 ATOMIC_CPUMASK_ORMASK(dfly_rdyprocmask, mask);
2277 dd->flags |= DFLY_PCPU_RDYMASK;
2279 clear_user_resched(); /* This satisfied the reschedule request */
2280 #if 0
2281 dd->rrcount = 0; /* Reset the round-robin counter */
2282 #endif
2284 if (dd->runqcount || dd->uschedcp != NULL) {
2286 * Threads are available. A thread may or may not be
2287 * currently scheduled. Get the best thread already queued
2288 * to this cpu.
2290 nlp = dfly_chooseproc_locked(dd, dd, dd->uschedcp, 0);
2291 if (nlp) {
2292 if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
2293 ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, mask);
2294 dd->flags |= DFLY_PCPU_CURMASK;
2296 dd->upri = nlp->lwp_priority;
2297 dd->uschedcp = nlp;
2298 #if 0
2299 dd->rrcount = 0; /* reset round robin */
2300 #endif
2301 spin_unlock(&dd->spin);
2302 lwkt_acquire(nlp->lwp_thread);
2303 lwkt_schedule(nlp->lwp_thread);
2304 } else {
2306 * This situation should not occur because we had
2307 * at least one thread available.
2309 spin_unlock(&dd->spin);
2311 } else if (usched_dfly_features & 0x01) {
2313 * This cpu is devoid of runnable threads, steal a thread
2314 * from another cpu. Since we're stealing, might as well
2315 * load balance at the same time.
2317 * We choose the highest-loaded thread from the worst queue.
2319 * NOTE! This function only returns a non-NULL rdd when
2320 * another cpu's queue is obviously overloaded. We
2321 * do not want to perform the type of rebalancing
2322 * the schedclock does here because it would result
2323 * in insane process pulling when 'steady' state is
2324 * partially unbalanced (e.g. 6 runnables and only
2325 * 4 cores).
2327 rdd = dfly_choose_worst_queue(dd, 0);
2328 if (rdd && dd->uload + usched_dfly_weight6 < rdd->uload &&
2329 spin_trylock(&rdd->spin)) {
2330 nlp = dfly_chooseproc_locked(rdd, dd, NULL, 1);
2331 spin_unlock(&rdd->spin);
2332 } else {
2333 nlp = NULL;
2335 if (nlp) {
2336 if ((dd->flags & DFLY_PCPU_CURMASK) == 0) {
2337 ATOMIC_CPUMASK_ORMASK(dfly_curprocmask, mask);
2338 dd->flags |= DFLY_PCPU_CURMASK;
2340 dd->upri = nlp->lwp_priority;
2341 dd->uschedcp = nlp;
2342 #if 0
2343 dd->rrcount = 0; /* reset round robin */
2344 #endif
2345 spin_unlock(&dd->spin);
2346 lwkt_acquire(nlp->lwp_thread);
2347 lwkt_schedule(nlp->lwp_thread);
2348 } else {
2350 * Leave the thread on our run queue. Another
2351 * scheduler will try to pull it later.
2353 spin_unlock(&dd->spin);
2355 } else {
2357 * devoid of runnable threads and not allowed to steal
2358 * any.
2360 spin_unlock(&dd->spin);
2364 * We're descheduled unless someone scheduled us. Switch away.
2365 * Exiting the critical section will cause splz() to be called
2366 * for us if interrupts and such are pending.
2368 crit_exit_gd(gd);
2369 tsleep(dd->helper_thread, PINTERLOCKED, "schslp", 0);
2373 #if 0
2374 static int
2375 sysctl_usched_dfly_stick_to_level(SYSCTL_HANDLER_ARGS)
2377 int error, new_val;
2379 new_val = usched_dfly_stick_to_level;
2381 error = sysctl_handle_int(oidp, &new_val, 0, req);
2382 if (error != 0 || req->newptr == NULL)
2383 return (error);
2384 if (new_val > cpu_topology_levels_number - 1 || new_val < 0)
2385 return (EINVAL);
2386 usched_dfly_stick_to_level = new_val;
2387 return (0);
2389 #endif
2392 * Setup the queues and scheduler helpers (scheduler helpers are SMP only).
2393 * Note that curprocmask bit 0 has already been cleared by rqinit() and
2394 * we should not mess with it further.
2396 static void
2397 usched_dfly_cpu_init(void)
2399 int i;
2400 int j;
2401 int smt_not_supported = 0;
2402 int cache_coherent_not_supported = 0;
2404 if (bootverbose)
2405 kprintf("Start usched_dfly helpers on cpus:\n");
2407 sysctl_ctx_init(&usched_dfly_sysctl_ctx);
2408 usched_dfly_sysctl_tree =
2409 SYSCTL_ADD_NODE(&usched_dfly_sysctl_ctx,
2410 SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO,
2411 "usched_dfly", CTLFLAG_RD, 0, "");
2413 usched_dfly_node_mem = get_highest_node_memory();
2415 lockmgr(&usched_dfly_config_lk, LK_EXCLUSIVE);
2417 for (i = 0; i < ncpus; ++i) {
2418 dfly_pcpu_t dd = &dfly_pcpu[i];
2419 cpumask_t mask;
2421 CPUMASK_ASSBIT(mask, i);
2422 if (CPUMASK_TESTMASK(mask, smp_active_mask) == 0)
2423 continue;
2425 spin_init(&dd->spin, "uschedcpuinit");
2426 dd->cpunode = get_cpu_node_by_cpuid(i);
2427 dd->cpuid = i;
2428 dd->gd = globaldata_find(i);
2429 CPUMASK_ASSBIT(dd->cpumask, i);
2430 for (j = 0; j < NQS; j++) {
2431 TAILQ_INIT(&dd->queues[j]);
2432 TAILQ_INIT(&dd->rtqueues[j]);
2433 TAILQ_INIT(&dd->idqueues[j]);
2435 ATOMIC_CPUMASK_NANDBIT(dfly_curprocmask, 0);
2436 if (i == 0)
2437 dd->flags &= ~DFLY_PCPU_CURMASK;
2439 if (dd->cpunode == NULL) {
2440 smt_not_supported = 1;
2441 cache_coherent_not_supported = 1;
2442 if (bootverbose)
2443 kprintf (" cpu%d - WARNING: No CPU NODE "
2444 "found for cpu\n", i);
2445 } else {
2446 switch (dd->cpunode->type) {
2447 case THREAD_LEVEL:
2448 if (bootverbose)
2449 kprintf (" cpu%d - HyperThreading "
2450 "available. Core siblings: ",
2452 break;
2453 case CORE_LEVEL:
2454 smt_not_supported = 1;
2456 if (bootverbose)
2457 kprintf (" cpu%d - No HT available, "
2458 "multi-core/physical "
2459 "cpu. Physical siblings: ",
2461 break;
2462 case CHIP_LEVEL:
2463 smt_not_supported = 1;
2465 if (bootverbose)
2466 kprintf (" cpu%d - No HT available, "
2467 "single-core/physical cpu. "
2468 "Package siblings: ",
2470 break;
2471 default:
2472 /* Let's go for safe defaults here */
2473 smt_not_supported = 1;
2474 cache_coherent_not_supported = 1;
2475 if (bootverbose)
2476 kprintf (" cpu%d - Unknown cpunode->"
2477 "type=%u. siblings: ",
2479 (u_int)dd->cpunode->type);
2480 break;
2483 if (bootverbose) {
2484 if (dd->cpunode->parent_node != NULL) {
2485 kprint_cpuset(&dd->cpunode->
2486 parent_node->members);
2487 kprintf("\n");
2488 } else {
2489 kprintf(" no siblings\n");
2494 lwkt_create(dfly_helper_thread, NULL, &dd->helper_thread, NULL,
2495 0, i, "usched %d", i);
2498 * Allow user scheduling on the target cpu. cpu #0 has already
2499 * been enabled in rqinit().
2501 if (i) {
2502 ATOMIC_CPUMASK_NANDMASK(dfly_curprocmask, mask);
2503 dd->flags &= ~DFLY_PCPU_CURMASK;
2505 if ((dd->flags & DFLY_PCPU_RDYMASK) == 0) {
2506 ATOMIC_CPUMASK_ORMASK(dfly_rdyprocmask, mask);
2507 dd->flags |= DFLY_PCPU_RDYMASK;
2509 dd->upri = PRIBASE_NULL;
2513 /* usched_dfly sysctl configurable parameters */
2515 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2516 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2517 OID_AUTO, "rrinterval", CTLFLAG_RW,
2518 &usched_dfly_rrinterval, 0, "");
2519 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2520 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2521 OID_AUTO, "decay", CTLFLAG_RW,
2522 &usched_dfly_decay, 0, "Extra decay when not running");
2524 /* Add enable/disable option for SMT scheduling if supported */
2525 if (smt_not_supported) {
2526 usched_dfly_smt = 0;
2527 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2528 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2529 OID_AUTO, "smt", CTLFLAG_RD,
2530 "NOT SUPPORTED", 0, "SMT NOT SUPPORTED");
2531 } else {
2532 usched_dfly_smt = 1;
2533 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2534 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2535 OID_AUTO, "smt", CTLFLAG_RW,
2536 &usched_dfly_smt, 0, "Enable SMT scheduling");
2540 * Add enable/disable option for cache coherent scheduling
2541 * if supported
2543 if (cache_coherent_not_supported) {
2544 usched_dfly_cache_coherent = 0;
2545 SYSCTL_ADD_STRING(&usched_dfly_sysctl_ctx,
2546 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2547 OID_AUTO, "cache_coherent", CTLFLAG_RD,
2548 "NOT SUPPORTED", 0,
2549 "Cache coherence NOT SUPPORTED");
2550 } else {
2551 usched_dfly_cache_coherent = 1;
2552 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2553 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2554 OID_AUTO, "cache_coherent", CTLFLAG_RW,
2555 &usched_dfly_cache_coherent, 0,
2556 "Enable/Disable cache coherent scheduling");
2558 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2559 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2560 OID_AUTO, "weight1", CTLFLAG_RW,
2561 &usched_dfly_weight1, 200,
2562 "Weight selection for current cpu");
2564 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2565 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2566 OID_AUTO, "weight2", CTLFLAG_RW,
2567 &usched_dfly_weight2, 180,
2568 "Weight selection for wakefrom cpu");
2570 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2571 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2572 OID_AUTO, "weight3", CTLFLAG_RW,
2573 &usched_dfly_weight3, 40,
2574 "Weight selection for num threads on queue");
2576 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2577 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2578 OID_AUTO, "weight4", CTLFLAG_RW,
2579 &usched_dfly_weight4, 160,
2580 "Availability of other idle cpus");
2582 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2583 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2584 OID_AUTO, "weight5", CTLFLAG_RW,
2585 &usched_dfly_weight5, 50,
2586 "Memory attached to node");
2588 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2589 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2590 OID_AUTO, "weight6", CTLFLAG_RW,
2591 &usched_dfly_weight6, 150,
2592 "Transfer weight");
2594 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2595 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2596 OID_AUTO, "fast_resched", CTLFLAG_RW,
2597 &usched_dfly_fast_resched, 0,
2598 "Availability of other idle cpus");
2600 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2601 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2602 OID_AUTO, "features", CTLFLAG_RW,
2603 &usched_dfly_features, 0x8F,
2604 "Allow pulls into empty queues");
2606 SYSCTL_ADD_INT(&usched_dfly_sysctl_ctx,
2607 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2608 OID_AUTO, "swmask", CTLFLAG_RW,
2609 &usched_dfly_swmask, ~PPQMASK,
2610 "Queue mask to force thread switch");
2612 #if 0
2613 SYSCTL_ADD_PROC(&usched_dfly_sysctl_ctx,
2614 SYSCTL_CHILDREN(usched_dfly_sysctl_tree),
2615 OID_AUTO, "stick_to_level",
2616 CTLTYPE_INT | CTLFLAG_RW,
2617 NULL, sizeof usched_dfly_stick_to_level,
2618 sysctl_usched_dfly_stick_to_level, "I",
2619 "Stick a process to this level. See sysctl"
2620 "paremter hw.cpu_topology.level_description");
2621 #endif
2623 lockmgr(&usched_dfly_config_lk, LK_RELEASE);
2626 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND,
2627 usched_dfly_cpu_init, NULL);