More minor IPI work.
[dragonfly/vkernel-mp.git] / lib / libcaps / uthread.c
blobf7df0170a0dcfe1f03b59b0517e087c247856de6
1 /*
2 * Copyright (c) 2003 Galen Sampson <galen_sampson@yahoo.com>
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
26 * $DragonFly: src/lib/libcaps/uthread.c,v 1.5 2004/07/29 10:55:02 dillon Exp $
30 * Each cpu in a system has its own self-contained light weight kernel
31 * thread scheduler, which means that generally speaking we only need
32 * to use a critical section to avoid problems. Foreign thread
33 * scheduling is queued via (async) IPIs.
36 #include "defs.h"
38 void cpu_lwkt_switch(thread_t);
41 * system message port for the system call interface
43 lwkt_port_t sysport;
45 static void
46 lwkt_idleloop(void *dummy)
48 globaldata_t gd = mycpu;
50 DBPRINTF(("idlestart cpu %d pri %d (should be < 32) mpcount %d (should be 0)\n",
51 gd->gd_cpuid, curthread->td_pri, curthread->td_mpcount));
53 gd->gd_pid = getpid();
55 for (;;) {
57 * If only our 'main' thread is left, schedule it.
59 if (gd->gd_num_threads == gd->gd_sys_threads) {
60 int i;
61 globaldata_t tgd;
63 for (i = 0; i < ncpus; ++i) {
64 tgd = globaldata_find(i);
65 if (tgd->gd_num_threads != tgd->gd_sys_threads)
66 break;
68 if (i == ncpus && (main_td.td_flags & TDF_RUNQ) == 0)
69 lwkt_schedule(&main_td);
73 * Wait for an interrupt, aka wait for a signal or an upcall to
74 * occur, then switch away.
76 crit_enter();
77 if (gd->gd_runqmask || (curthread->td_flags & TDF_IDLE_NOHLT)) {
78 curthread->td_flags &= ~TDF_IDLE_NOHLT;
79 } else {
80 printf("cpu %d halting\n", gd->gd_cpuid);
81 cpu_halt();
82 printf("cpu %d resuming\n", gd->gd_cpuid);
84 crit_exit();
85 lwkt_switch();
90 * Userland override of lwkt_init_thread. The only difference is
91 * the manipulation of gd->gd_num_threads.
93 static void
94 lwkt_init_thread_remote(void *arg)
96 thread_t td = arg;
97 globaldata_t gd = td->td_gd;
99 printf("init_thread_remote td %p on cpu %d\n", td, gd->gd_cpuid);
101 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq);
102 ++gd->gd_num_threads;
103 if (td->td_flags & TDF_SYSTHREAD)
104 ++gd->gd_sys_threads;
107 void
108 lwkt_init_thread(thread_t td, void *stack, int stksize, int flags,
109 struct globaldata *gd)
111 bzero(td, sizeof(struct thread));
112 td->td_kstack = stack;
113 td->td_kstack_size = stksize;
114 td->td_flags |= flags;
115 td->td_gd = gd;
116 td->td_pri = TDPRI_KERN_DAEMON + TDPRI_CRIT;
117 lwkt_initport(&td->td_msgport, td);
118 cpu_init_thread(td);
119 if (td == &gd->gd_idlethread) {
120 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq);
121 /* idle thread is not counted in gd_num_threads */
122 } else if (gd == mycpu) {
123 crit_enter();
124 TAILQ_INSERT_TAIL(&gd->gd_tdallq, td, td_allq);
125 ++gd->gd_num_threads;
126 if (td->td_flags & TDF_SYSTHREAD)
127 ++gd->gd_sys_threads;
128 crit_exit();
129 } else {
130 lwkt_send_ipiq(gd, lwkt_init_thread_remote, td);
135 * Userland override of lwkt_exit. The only difference is
136 * the manipulation of gd->gd_num_threads;
138 void
139 lwkt_exit(void)
141 thread_t td = curthread;
142 globaldata_t gd = mycpu;
144 if (td->td_flags & TDF_VERBOSE)
145 printf("kthread %p %s has exited\n", td, td->td_comm);
146 crit_enter();
147 lwkt_deschedule_self(td);
148 ++gd->gd_tdfreecount;
149 if (td->td_flags & TDF_SYSTHREAD)
150 --gd->gd_sys_threads;
151 --gd->gd_num_threads;
152 TAILQ_INSERT_TAIL(&gd->gd_tdfreeq, td, td_threadq);
153 cpu_thread_exit();
157 * Userland override of lwkt_gdinit. Called from mi_gdinit(). Note that
158 * critical sections do not work until lwkt_init_thread() is called. The
159 * idle thread will be left in a critical section.
161 void
162 lwkt_gdinit(struct globaldata *gd)
164 int i;
166 for (i = 0; i < sizeof(gd->gd_tdrunq)/sizeof(gd->gd_tdrunq[0]); ++i)
167 TAILQ_INIT(&gd->gd_tdrunq[i]);
168 gd->gd_runqmask = 0;
169 gd->gd_curthread = &gd->gd_idlethread;
170 TAILQ_INIT(&gd->gd_tdallq);
172 /* Set up this cpu's idle thread */
173 lwkt_init_thread(&gd->gd_idlethread,
174 libcaps_alloc_stack(LWKT_THREAD_STACK), LWKT_THREAD_STACK,
175 0, gd);
176 cpu_set_thread_handler(&gd->gd_idlethread, lwkt_exit, lwkt_idleloop, NULL);
180 * Start threading.
182 void
183 lwkt_start_threading(thread_t td)
185 lwkt_switch();