Make smp_invltlb() a NOP.
[dragonfly/vkernel-mp.git] / sys / platform / vkernel / i386 / mp.c
blob36b6c5be45540a4af8225bb933d065136cf7f6c9
1 /*
2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/platform/vkernel/i386/mp.c,v 1.1 2007/06/18 18:57:12 josepht Exp $
38 #include <sys/interrupt.h>
39 #include <sys/kernel.h>
40 #include <sys/memrange.h>
41 #include <sys/tls.h>
42 #include <sys/types.h>
44 #include <vm/vm_extern.h>
45 #include <vm/vm_kern.h>
46 #include <vm/vm_object.h>
47 #include <vm/vm_page.h>
49 #include <machine/cpufunc.h>
50 #include <machine/globaldata.h>
51 #include <machine/md_var.h>
52 #include <machine/pmap.h>
53 #include <machine/smp.h>
54 #include <machine/tls.h>
56 #include <unistd.h>
57 #include <pthread.h>
58 #include <signal.h>
59 #include <stdio.h>
61 extern pt_entry_t *KPTphys;
63 volatile u_int stopped_cpus;
64 cpumask_t smp_active_mask = 0; /* which cpus are ready for IPIs etc? */
65 static int boot_address;
66 static cpumask_t smp_startup_mask = 1; /* which cpus have been started */
67 int mp_naps; /* # of Applications processors */
68 static int mp_finish;
70 /* function prototypes XXX these should go elsewhere */
71 void bootstrap_idle(void);
72 void single_cpu_ipi(int, int, int);
73 void selected_cpu_ipi(u_int, int, int);
74 #if 0
75 void ipi_handler(int);
76 #endif
78 pt_entry_t *SMPpt;
80 /* AP uses this during bootstrap. Do not staticize. */
81 char *bootSTK;
82 static int bootAP;
85 /* XXX these need to go into the appropriate header file */
86 static int start_all_aps(u_int);
87 void init_secondary(void);
88 void *start_ap(void *);
91 * Get SMP fully working before we start initializing devices.
93 static
94 void
95 ap_finish(void)
97 int i;
98 cpumask_t ncpus_mask = 0;
100 for (i = 1; i <= ncpus; i++)
101 ncpus_mask |= (1 << i);
103 mp_finish = 1;
104 if (bootverbose)
105 kprintf("Finish MP startup\n");
107 rel_mplock();
109 while (smp_active_mask != smp_startup_mask)
110 cpu_lfence();
112 while (try_mplock() == 0)
114 if (bootverbose)
115 kprintf("Active CPU Mask: %08x\n", smp_active_mask);
118 SYSINIT(finishsmp, SI_BOOT2_FINISH_SMP, SI_ORDER_FIRST, ap_finish, NULL)
121 void *
122 start_ap(void *arg)
124 init_secondary();
126 bootstrap_idle();
128 return(NULL); /* NOTREACHED */
131 /* storage for AP thread IDs */
132 pthread_t ap_tids[MAXCPU];
134 void
135 mp_start(void)
137 int shift;
139 /* XXX testing 2 cpus */
140 ncpus = 2;
142 mp_naps = ncpus - 1;
144 /* ncpus2 -- ncpus rounded down to the nearest power of 2 */
145 for (shift = 0; (1 << shift) <= ncpus; ++shift)
147 --shift;
148 ncpus2_shift = shift;
149 ncpus2 = 1 << shift;
150 ncpus2_mask = ncpus2 - 1;
152 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */
153 if ((1 << shift) < ncpus)
154 ++shift;
155 ncpus_fit = 1 << shift;
156 ncpus_fit_mask = ncpus_fit - 1;
158 start_all_aps(boot_address);
162 void
163 mp_announce(void)
165 int x;
167 kprintf("DragonFly/MP: Multiprocessor\n");
168 kprintf(" cpu0 (BSP)\n");
170 for (x = 1; x <= mp_naps; ++x)
171 kprintf(" cpu%d (AP)\n", x);
174 void
175 forward_fastint_remote(void *arg)
177 panic("XXX forward_fastint_remote()");
180 void
181 cpu_send_ipiq(int dcpu)
183 kprintf("cpu_send_ipiq(%d), smp_active_mask = %x\n", dcpu, smp_active_mask);
184 if ((1 << dcpu) & smp_active_mask)
185 if (pthread_kill(ap_tids[dcpu], SIGUSR1) != 0)
186 panic("pthread_kill failed in cpu_send_ipiq");
187 #if 0
188 panic("XXX cpu_send_ipiq()");
189 #endif
192 void
193 smp_invltlb(void)
197 void
198 single_cpu_ipi(int cpu, int vector, int delivery_mode)
200 kprintf("XXX single_cpu_ipi\n");
203 void
204 selected_cpu_ipi(u_int target, int vector, int delivery_mode)
206 crit_enter();
207 while (target) {
208 int n = bsfl(target);
209 target &= ~(1 << n);
210 single_cpu_ipi(n, vector, delivery_mode);
212 crit_exit();
216 stop_cpus(u_int map)
218 map &= smp_active_mask;
220 crit_enter();
221 while (map) {
222 int n = bsfl(map);
223 map &= ~(1 << n);
224 if (pthread_kill(ap_tids[n], SIGSTOP) != 0)
225 panic("stop_cpus: pthread_kill failed");
227 crit_exit();
228 #if 0
229 panic("XXX stop_cpus()");
230 #endif
232 return(1);
236 restart_cpus(u_int map)
238 map &= smp_active_mask;
240 crit_enter();
241 while (map) {
242 int n = bsfl(map);
243 map &= ~(1 << n);
244 if (pthread_kill(ap_tids[n], SIGCONT) != 0)
245 panic("restart_cpus: pthread_kill failed");
247 crit_exit();
248 #if 0
249 panic("XXX restart_cpus()");
250 #endif
252 return(1);
255 void
256 ap_init(void)
259 * Adjust smp_startup_mask to signal the BSP that we have started
260 * up successfully. Note that we do not yet hold the BGL. The BSP
261 * is waiting for our signal.
263 * We can't set our bit in smp_active_mask yet because we are holding
264 * interrupts physically disabled and remote cpus could deadlock
265 * trying to send us an IPI.
268 smp_startup_mask |= 1 << mycpu->gd_cpuid;
269 cpu_mfence();
272 * Interlock for finalization. Wait until mp_finish is non-zero,
273 * then get the MP lock.
275 * Note: We are in a critical section.
277 * Note: We have to synchronize td_mpcount to our desired MP state
278 * before calling cpu_try_mplock().
280 * Note: we are the idle thread, we can only spin.
282 * Note: The load fence is memory volatile and prevents the compiler
283 * from improperly caching mp_finish, and the cpu from improperly
284 * caching it.
287 while (mp_finish == 0) {
288 cpu_lfence();
290 ++curthread->td_mpcount;
291 while (cpu_try_mplock() == 0)
294 /* BSP may have changed PTD while we're waiting for the lock */
295 cpu_invltlb();
297 /* Build our map of 'other' CPUs. */
298 mycpu->gd_other_cpus = smp_startup_mask & ~(1 << mycpu->gd_cpuid);
300 kprintf("SMP: AP CPU #%d Launched!\n", mycpu->gd_cpuid);
303 /* Set memory range attributes for this CPU to match the BSP */
304 mem_range_AP_init();
306 * Once we go active we must process any IPIQ messages that may
307 * have been queued, because no actual IPI will occur until we
308 * set our bit in the smp_active_mask. If we don't the IPI
309 * message interlock could be left set which would also prevent
310 * further IPIs.
312 * The idle loop doesn't expect the BGL to be held and while
313 * lwkt_switch() normally cleans things up this is a special case
314 * because we returning almost directly into the idle loop.
316 * The idle thread is never placed on the runq, make sure
317 * nothing we've done put it there.
319 KKASSERT(curthread->td_mpcount == 1);
320 smp_active_mask |= 1 << mycpu->gd_cpuid;
322 mdcpu->gd_fpending = 0;
323 mdcpu->gd_ipending = 0;
324 initclocks_pcpu(); /* clock interrupts (via IPIs) */
325 lwkt_process_ipiq();
328 * Releasing the mp lock lets the BSP finish up the SMP init
330 rel_mplock();
331 KKASSERT((curthread->td_flags & TDF_RUNQ) == 0);
334 void
335 init_secondary(void)
337 int myid = bootAP;
338 struct mdglobaldata *md;
339 struct privatespace *ps;
341 ps = &CPU_prvspace[myid];
343 ps->mdglobaldata.mi.gd_prvspace = ps;
346 * Setup the %gs for cpu #n. The mycpu macro works after this
347 * point.
349 tls_set_fs(&CPU_prvspace[myid], sizeof(struct privatespace));
351 md = mdcpu; /* loaded through %fs:0 (mdglobaldata.mi.gd_prvspace)*/
353 md->gd_common_tss.tss_esp0 = 0; /* not used until after switch */
354 md->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
355 md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
358 * Set to a known state:
359 * Set by mpboot.s: CR0_PG, CR0_PE
360 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
364 static int
365 start_all_aps(u_int boot_addr)
367 int x, i;
368 int *arg;
369 struct mdglobaldata *gd;
370 struct privatespace *ps;
371 vm_page_t m;
372 vm_offset_t va;
373 #if 0
374 struct lwp_params params;
375 #endif
377 #if 0
378 /* store the mappings so we can populate gd_CMAP[0-2] and gd_PMAP3 */
379 vpte_t *SMPpt2[4];
380 #endif
383 * needed for ipis to initial thread
384 * FIXME: rename ap_tids?
386 ap_tids[0] = pthread_self();
388 for (x = 1; x <= mp_naps; x++)
390 /* Allocate space for the CPU's private space. */
391 va = (vm_offset_t)&CPU_prvspace[x];
392 for (i = 0; i < sizeof(struct mdglobaldata); i += PAGE_SIZE) {
393 va =(vm_offset_t)&CPU_prvspace[x].mdglobaldata + i;
394 m = vm_page_alloc(&kernel_object, va, VM_ALLOC_SYSTEM);
395 pmap_kenter_quick(va, m->phys_addr);
396 #if 0
397 KKASSERT(i < 4);
398 SMPpt2[i] = pmap_kpte(va);
399 #endif
402 for (i = 0; i < sizeof(CPU_prvspace[x].idlestack); i += PAGE_SIZE) {
403 va =(vm_offset_t)&CPU_prvspace[x].idlestack + i;
404 m = vm_page_alloc(&kernel_object, va, VM_ALLOC_SYSTEM);
405 pmap_kenter_quick(va, m->phys_addr);
408 gd = &CPU_prvspace[x].mdglobaldata; /* official location */
409 bzero(gd, sizeof(*gd));
410 gd->mi.gd_prvspace = ps = &CPU_prvspace[x];
412 /* prime data page for it to use */
413 mi_gdinit(&gd->mi, x);
414 cpu_gdinit(gd, x);
416 #if 0
417 gd->gd_CMAP1 = SMPpt2[0];
418 gd->gd_CMAP2 = SMPpt2[1];
419 gd->gd_CMAP3 = SMPpt2[2];
420 gd->gd_PMAP1 = SMPpt2[3];
421 gd->gd_CADDR1 = ps->CPAGE1;
422 gd->gd_CADDR2 = ps->CPAGE2;
423 gd->gd_CADDR3 = ps->CPAGE3;
424 gd->gd_PADDR1 = (unsigned *)ps->PPAGE1;
425 #endif
426 gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * (mp_naps + 1));
427 bzero(gd->mi.gd_ipiq, sizeof(lwkt_ipiq) * (mp_naps + 1));
430 * Setup the AP boot stack
432 bootSTK = &ps->idlestack[UPAGES*PAGE_SIZE/2];
433 bootAP = x;
435 /* build our map of 'other' CPUs */
436 mycpu->gd_other_cpus = smp_startup_mask &
437 ~(1 << mycpu->gd_cpuid);
438 mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map,
439 sizeof(lwkt_ipiq) * ncpus);
441 bzero(mycpu->gd_ipiq, sizeof(lwkt_ipiq) * ncpus);
444 * Setup the AP's lwp, this is the 'cpu'
446 arg = (int *)kmem_alloc(&kernel_map, sizeof(int));
447 *arg = x;
449 pthread_create(&ap_tids[x], NULL, start_ap, arg);
451 while((smp_startup_mask & (1 << x)) == 0)
452 cpu_lfence(); /* XXX spin until the AP has started */
454 /* XXX hack, sleep for a second to let the APs start up */
455 sleep(1);
457 /* set cpu0 active so the ap_finish can run*/
458 smp_active_mask |= 1;
461 return(ncpus - 1);