usr.sbin/makefs/ffs: Remove m_buf::b_is_hammer2
[dragonfly.git] / sys / kern / subr_sleepqueue.c
bloba6ceb028a1444190506a51f2d5a38f14d59ba0d0
1 /*
2 * Copyright (c) 2023 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 * DragonFly implementation of the FreeBSD sleepq*() API. FreeBSD's sleepq*()
36 * API differs from our tsleep*() API in several major ways.
38 * ONLY USE THIS FOR FREEBSD COMPATIBILITY, E.G. THE LINUX KPI
40 * - Explicit interlock via sleepq_lock(wchan)
41 * - Single global wchan hash table
42 * - Explicit blocking counts
43 * - Expects all blockers on the same wchan to be of the same type
45 * In addition, the API calls have a ton of unnecessary redundancy which
46 * creates problems with using on-stack structures for things. To make
47 * ends meat, DragonFly records information in the thread structure and
48 * only enacts the actual operation when the thread issues a *wait*() API
49 * call. Only the spin interlock is handled in real-time.
52 #include "opt_ddb.h"
54 #include <sys/cdefs.h>
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/lock.h>
58 #include <sys/priv.h>
59 #include <sys/malloc.h>
60 #include <sys/queue.h>
61 #include <sys/sleepqueue.h>
62 #include <sys/objcache.h>
63 #include <sys/sysctl.h>
65 #include <sys/time.h>
67 #include <machine/atomic.h>
69 #ifdef DDB
70 #include <ddb/ddb.h>
71 #endif
73 #include <sys/signal2.h>
74 #include <sys/thread2.h>
75 #include <sys/spinlock2.h>
76 #include <sys/mutex2.h>
78 #include <vm/vm_extern.h>
80 #define SLEEPQ_HSIZE 1024
81 #define SLEEPQ_HMASK (SLEEPQ_HSIZE - 1)
82 #define SLEEPQ_HASH(wchan) ((((uintptr_t)(wchan) >> 10) ^ \
83 ((uintptr_t)(wchan) & SLEEPQ_HMASK)))
85 #define SLEEPQ_LOOKUP(wchan) &sleepq_chains[SLEEPQ_HASH(wchan)]
86 #define SLEEPQ_NRQUEUES 2
87 #define SLEEPQ_FREEPERSLOT 4
89 struct sleepqueue_wchan;
91 struct sleepqueue_chain {
92 struct spinlock sc_spin;
93 TAILQ_HEAD(, sleepqueue_wchan) sc_wchead;
94 u_int sc_free_count;
97 struct sleepqueue_wchan {
98 TAILQ_ENTRY(sleepqueue_wchan) wc_entry;
99 const void *wc_wchan;
100 struct sleepqueue_chain *wc_sc;
101 u_int wc_refs;
102 int wc_type;
103 u_int wc_blocked[SLEEPQ_NRQUEUES];
106 static struct sleepqueue_chain sleepq_chains[SLEEPQ_HSIZE];
107 static MALLOC_DEFINE(M_SLEEPQ, "sleepq", "fbsd sleepq api");
108 static struct objcache *sleepq_wc_cache;
111 * Return existing wchan, assert not NULL.
113 static __inline
114 struct sleepqueue_wchan *
115 sleepq_wclookup(const void *wchan)
117 struct sleepqueue_chain *sc;
118 struct sleepqueue_wchan *wc;
120 sc = SLEEPQ_LOOKUP(wchan);
121 KKASSERT(spin_held(&sc->sc_spin));
122 TAILQ_FOREACH(wc, &sc->sc_wchead, wc_entry) {
123 if (wc->wc_wchan == wchan)
124 return(wc);
126 panic("sleepq_wclookup: wchan %p not found\n", wc);
128 return NULL; /* not reached */
132 * Early initialization of sleep queues
134 static void
135 init_sleepqueues(void)
137 int i;
139 for (i = 0; i < SLEEPQ_HSIZE; ++i) {
140 spin_init(&sleepq_chains[i].sc_spin, "sleepq");
141 TAILQ_INIT(&sleepq_chains[i].sc_wchead);
143 /*thread0.td_sleepqueue = sleepq_alloc();*/
145 sleepq_wc_cache = objcache_create_simple(M_SLEEPQ,
146 sizeof(struct sleepqueue_wchan));
149 SYSINIT(sysinit_sleepqueues, SI_BOOT2_LWKT_INIT, SI_ORDER_SECOND,
150 init_sleepqueues, NULL);
153 * DragonFlyBSD Interface Functions
155 * Setup and teardown sleepq related thread structures
157 * sleepq_alloc() - not applicable
158 * sleepq_free() - not applicable
160 void
161 sleepq_setup_thread(struct thread *td)
165 void
166 sleepq_teardown_thread(struct thread *td)
171 * Lock the wchan, creating the structure if necessary. Returns with
172 * the spin-lock held.
174 * Also used as an interlock, so we need to actually lock something here.
176 void
177 sleepq_lock(const void *wchan)
179 struct sleepqueue_chain *sc;
180 struct sleepqueue_wchan *wc;
182 sc = SLEEPQ_LOOKUP(wchan);
183 spin_lock(&sc->sc_spin);
185 for (;;) {
187 * Look for the wchan, if not found then allocate one from
188 * the existing in-list cache if available.
190 TAILQ_FOREACH(wc, &sc->sc_wchead, wc_entry) {
191 if (wc->wc_wchan == wchan) {
192 ++wc->wc_refs;
193 return;
195 if (wc->wc_wchan == NULL) {
196 wc->wc_wchan = wchan;
197 ++wc->wc_refs;
198 --sc->sc_free_count;
199 return;
204 * Not found and no free entries available, allocate
205 * a new, free wc, then relock and repeat the search.
207 spin_unlock(&sc->sc_spin);
208 wc = objcache_get(sleepq_wc_cache, M_WAITOK);
209 KKASSERT(wc->wc_wchan == NULL && wc->wc_refs == 0);
210 wc->wc_sc = sc;
211 spin_lock(&sc->sc_spin);
212 TAILQ_INSERT_TAIL(&sc->sc_wchead, wc, wc_entry);
213 ++sc->sc_free_count;
214 /* loop re-search */
219 * Unlock the sleep queue chain associated with a given wait channel.
221 void
222 sleepq_release(const void *wchan)
224 struct sleepqueue_chain *sc;
225 struct sleepqueue_wchan *wc;
227 wc = sleepq_wclookup(wchan);
228 sc = wc->wc_sc;
229 KKASSERT(wc->wc_refs > 0);
230 if (--wc->wc_refs == 0) {
231 /* just sanity-check one for brevity */
232 KKASSERT(wc->wc_blocked[0] == 0);
233 wc->wc_wchan = NULL;
234 wc->wc_type = 0;
235 ++sc->sc_free_count;
238 spin_unlock(&sc->sc_spin);
242 * Place the current thread on the specified wait channel and sub-queue.
244 * The caller must be holding the wchan lock and it will remain locked
245 * on return. The caller must follow-up with a sleepq_*wait*() call.
247 * If INVARIANTS is enabled, then it associates the passed in lock with
248 * the sleepq to make sure it is held when that sleep queue is woken up
249 * (XXX not implemented)
251 void
252 sleepq_add(const void *wchan, struct lock_object *lock, const char *wmesg,
253 int flags, int queue)
255 struct sleepqueue_wchan *wc;
256 struct thread *td;
257 int domain;
260 * Locate the wc (also asserts that the lock is held). Bump
261 * wc->wc_refs and wc->wc_blocked[queue] to indicate that the
262 * thread has been placed on the sleepq.
264 wc = sleepq_wclookup(wchan);
265 ++wc->wc_refs;
266 ++wc->wc_blocked[queue];
267 wc->wc_type = flags & SLEEPQ_TYPE;
270 * tsleep_interlock() sets TDF_TSLEEPQ, sets td_wchan, and td_wdomain,
271 * and places the thread on the appropriate sleepq.
273 td = curthread;
274 td->td_wmesg = wmesg;
275 domain = PDOMAIN_FBSD0 + queue * PDOMAIN_FBSDINC;
276 tsleep_interlock(wchan, domain);
279 * Clear timeout to discern a possible later sleepq_set_timeout_sbt()
280 * call.
282 td->td_sqwc = wc;
283 td->td_sqtimo = 0;
284 td->td_sqqueue = queue;
288 * Set a timeout for the thread after it has been added to a wait channel.
290 void
291 sleepq_set_timeout_sbt(const void *wchan, sbintime_t sbt, sbintime_t pr,
292 int flags)
294 struct sleepqueue_wchan *wc;
295 struct thread *td;
297 td = curthread;
298 wc = td->td_sqwc;
299 KKASSERT(wc && wc->wc_wchan == wchan);
300 td->td_sqtimo = sbticks + sbt;
304 * Return the number of actual sleepers for the specified queue.
306 * Caller must be holding the wchan locked
308 u_int
309 sleepq_sleepcnt(const void *wchan, int queue)
311 struct sleepqueue_wchan *wc;
313 wc = sleepq_wclookup(wchan);
314 return (wc->wc_blocked[queue]);
317 static __inline
319 _sleepq_wait_begin(struct thread *td, struct sleepqueue_chain *sc,
320 struct sleepqueue_wchan *wc, sbintime_t timo, int tflags)
322 int domain;
323 int ret;
325 spin_unlock(&sc->sc_spin);
327 domain = PDOMAIN_FBSD0 + td->td_sqqueue * PDOMAIN_FBSDINC;
328 if (timo) {
329 timo -= sbticks;
330 if (timo > 0) {
331 ret = tsleep(td->td_wchan, tflags, td->td_wmesg, timo);
332 } else {
333 ret = EWOULDBLOCK;
335 } else {
336 ret = tsleep(td->td_wchan, tflags, td->td_wmesg, 0);
339 return ret;
343 * Wait for completion
345 static __inline
346 void
347 _sleepq_wait_complete(struct thread *td, struct sleepqueue_chain *sc,
348 struct sleepqueue_wchan *wc)
350 struct sleepqueue_wchan *wcn;
352 td->td_sqwc = NULL;
353 spin_lock(&sc->sc_spin);
354 --wc->wc_blocked[td->td_sqqueue];
355 if (--wc->wc_refs == 0) {
356 wc->wc_wchan = NULL;
357 wc->wc_type = 0;
358 ++sc->sc_free_count;
359 if (sc->sc_free_count <= SLEEPQ_FREEPERSLOT) {
360 wcn = TAILQ_NEXT(wc, wc_entry);
361 if (wcn && wcn->wc_wchan) {
362 TAILQ_REMOVE(&sc->sc_wchead, wc, wc_entry);
363 TAILQ_INSERT_TAIL(&sc->sc_wchead, wc, wc_entry);
365 spin_unlock(&sc->sc_spin);
366 } else {
367 --sc->sc_free_count;
368 TAILQ_REMOVE(&sc->sc_wchead, wc, wc_entry);
369 spin_unlock(&sc->sc_spin);
370 objcache_put(sleepq_wc_cache, wc);
372 } else {
373 spin_unlock(&sc->sc_spin);
378 * Various combinations of wait until unblocked, with and without
379 * signaling and timeouts.
381 * The wchan lock must be held by the caller and will be released upon
382 * return.
384 * NOTE: tsleep_interlock() was already issued by sleepq_add().
386 void
387 sleepq_wait(const void *wchan, int pri)
389 struct sleepqueue_chain *sc;
390 struct sleepqueue_wchan *wc;
391 struct thread *td;
393 td = curthread;
394 wc = td->td_sqwc;
395 KKASSERT(wc != NULL && wc->wc_wchan == wchan);
396 sc = wc->wc_sc;
398 (void)_sleepq_wait_begin(td, sc, wc, 0, PINTERLOCKED);
399 _sleepq_wait_complete(td, sc, wc);
403 sleepq_wait_sig(const void *wchan, int pri)
405 struct sleepqueue_chain *sc;
406 struct sleepqueue_wchan *wc;
407 struct thread *td;
408 int ret;
410 td = curthread;
411 wc = td->td_sqwc;
412 KKASSERT(wc != NULL && wc->wc_wchan == wchan);
413 sc = wc->wc_sc;
415 ret = _sleepq_wait_begin(td, sc, wc, 0, PINTERLOCKED | PCATCH);
416 _sleepq_wait_complete(td, sc, wc);
418 return ret;
422 sleepq_timedwait(const void *wchan, int pri)
424 struct sleepqueue_chain *sc;
425 struct sleepqueue_wchan *wc;
426 struct thread *td;
427 int ret;
429 td = curthread;
430 wc = td->td_sqwc;
431 KKASSERT(wc != NULL && wc->wc_wchan == wchan);
432 sc = wc->wc_sc;
434 ret = _sleepq_wait_begin(td, sc, wc, td->td_sqtimo, PINTERLOCKED);
435 _sleepq_wait_complete(td, sc, wc);
437 return ret;
441 sleepq_timedwait_sig(const void *wchan, int pri)
443 struct sleepqueue_chain *sc;
444 struct sleepqueue_wchan *wc;
445 struct thread *td;
446 int ret;
448 td = curthread;
449 wc = td->td_sqwc;
450 KKASSERT(wc != NULL && wc->wc_wchan == wchan);
451 sc = wc->wc_sc;
453 ret = _sleepq_wait_begin(td, sc, wc, td->td_sqtimo,
454 PINTERLOCKED | PCATCH);
455 _sleepq_wait_complete(td, sc, wc);
457 return ret;
461 * Returns the type of sleepqueue given a waitchannel. Returns 0 if
462 * sleepq_add() has not been called no the wchan.
464 * wchan must be locked.
467 sleepq_type(const void *wchan)
469 struct sleepqueue_wchan *wc;
470 int type;
472 wc = sleepq_wclookup(wchan);
473 type = wc->wc_type;
475 return type;
479 * Wakeup one thread sleeping on a wait channel.
481 * DragonFly: Presumably callers also deal with multiple wakeups,
482 * our wakeup_domaoin_one() function is a bit non-deterministic.
484 * Nominally returns whether the swapper should be woken up which
485 * is not applicable to DragonFlyBSD.
488 sleepq_signal(const void *wchan, int flags, int pri, int queue)
490 int domain;
492 domain = PDOMAIN_FBSD0 + queue * PDOMAIN_FBSDINC;
493 wakeup_domain_one(wchan, domain);
495 return 0;
499 * Resume everything sleeping on the specified wait channel and queue.
501 * Nominally returns whether the swapper should be woken up which is not
502 * applicable to DragonFlyBSD.
505 sleepq_broadcast(const void *wchan, int flags, int pri, int queue)
507 int domain;
509 domain = PDOMAIN_FBSD0 + queue * PDOMAIN_FBSDINC;
510 wakeup_domain(wchan, domain);
512 return 0;