2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include <sys/interrupt.h>
37 #include <sys/kernel.h>
38 #include <sys/memrange.h>
40 #include <sys/types.h>
43 #include <vm/vm_extern.h>
44 #include <vm/vm_kern.h>
45 #include <vm/vm_object.h>
46 #include <vm/vm_page.h>
48 #include <sys/mplock2.h>
50 #include <machine/cpu.h>
51 #include <machine/cpufunc.h>
52 #include <machine/globaldata.h>
53 #include <machine/md_var.h>
54 #include <machine/pmap.h>
55 #include <machine/smp.h>
56 #include <machine/tls.h>
57 #include <machine/param.h>
64 extern pt_entry_t
*KPTphys
;
66 extern int vmm_enabled
;
68 volatile cpumask_t stopped_cpus
;
69 /* which cpus are ready for IPIs etc? */
70 cpumask_t smp_active_mask
= CPUMASK_INITIALIZER_ONLYONE
;
71 static int boot_address
;
72 /* which cpus have been started */
73 static cpumask_t smp_startup_mask
= CPUMASK_INITIALIZER_ONLYONE
;
74 int mp_naps
; /* # of Applications processors */
77 /* Local data for detecting CPU TOPOLOGY */
78 static int core_bits
= 0;
79 static int logical_CPU_bits
= 0;
81 /* function prototypes XXX these should go elsewhere */
82 void bootstrap_idle(void);
83 void single_cpu_ipi(int, int, int);
84 void selected_cpu_ipi(cpumask_t
, int, int);
86 void ipi_handler(int);
91 /* AP uses this during bootstrap. Do not staticize. */
96 /* XXX these need to go into the appropriate header file */
97 static int start_all_aps(u_int
);
98 void init_secondary(void);
99 void *start_ap(void *);
102 * Get SMP fully working before we start initializing devices.
110 kprintf("Finish MP startup\n");
112 /* build our map of 'other' CPUs */
113 mycpu
->gd_other_cpus
= smp_startup_mask
;
114 CPUMASK_NANDBIT(mycpu
->gd_other_cpus
, mycpu
->gd_cpuid
);
117 * Let the other cpu's finish initializing and build their map
121 while (CPUMASK_CMPMASKNEQ(smp_active_mask
,smp_startup_mask
)) {
126 while (try_mplock() == 0)
129 kprintf("Active CPU Mask: %08lx\n",
130 (long)CPUMASK_LOWMASK(smp_active_mask
));
133 SYSINIT(finishsmp
, SI_BOOT2_FINISH_SMP
, SI_ORDER_FIRST
, ap_finish
, NULL
);
136 start_ap(void *arg __unused
)
142 return(NULL
); /* NOTREACHED */
145 /* storage for AP thread IDs */
146 pthread_t ap_tids
[MAXCPU
];
158 /* ncpus2 -- ncpus rounded down to the nearest power of 2 */
159 for (shift
= 0; (1 << shift
) <= ncpus
; ++shift
)
162 ncpus2_shift
= shift
;
164 ncpus2_mask
= ncpus2
- 1;
166 /* ncpus_fit -- ncpus rounded up to the nearest power of 2 */
167 if ((1 << shift
) < ncpus
)
169 ncpus_fit
= 1 << shift
;
170 ncpus_fit_mask
= ncpus_fit
- 1;
173 * cpu0 initialization
175 ipiq_size
= sizeof(struct lwkt_ipiq
) * ncpus
;
176 mycpu
->gd_ipiq
= (void *)kmem_alloc(&kernel_map
, ipiq_size
,
178 bzero(mycpu
->gd_ipiq
, ipiq_size
);
183 start_all_aps(boot_address
);
192 kprintf("DragonFly/MP: Multiprocessor\n");
193 kprintf(" cpu0 (BSP)\n");
195 for (x
= 1; x
<= mp_naps
; ++x
)
196 kprintf(" cpu%d (AP)\n", x
);
200 cpu_send_ipiq(int dcpu
)
202 if (CPUMASK_TESTBIT(smp_active_mask
, dcpu
)) {
203 if (pthread_kill(ap_tids
[dcpu
], SIGUSR1
) != 0)
204 panic("pthread_kill failed in cpu_send_ipiq");
207 panic("XXX cpu_send_ipiq()");
212 single_cpu_ipi(int cpu
, int vector
, int delivery_mode
)
214 kprintf("XXX single_cpu_ipi\n");
218 selected_cpu_ipi(cpumask_t target
, int vector
, int delivery_mode
)
221 while (CPUMASK_TESTNZERO(target
)) {
222 int n
= BSFCPUMASK(target
);
223 CPUMASK_NANDBIT(target
, n
);
224 single_cpu_ipi(n
, vector
, delivery_mode
);
230 stop_cpus(cpumask_t map
)
232 CPUMASK_ANDMASK(map
, smp_active_mask
);
235 while (CPUMASK_TESTNZERO(map
)) {
236 int n
= BSFCPUMASK(map
);
237 CPUMASK_NANDBIT(map
, n
);
238 ATOMIC_CPUMASK_ORBIT(stopped_cpus
, n
);
239 if (pthread_kill(ap_tids
[n
], SIGXCPU
) != 0)
240 panic("stop_cpus: pthread_kill failed");
244 panic("XXX stop_cpus()");
251 restart_cpus(cpumask_t map
)
253 CPUMASK_ANDMASK(map
, smp_active_mask
);
256 while (CPUMASK_TESTNZERO(map
)) {
257 int n
= BSFCPUMASK(map
);
258 CPUMASK_NANDBIT(map
, n
);
259 ATOMIC_CPUMASK_NANDBIT(stopped_cpus
, n
);
260 if (pthread_kill(ap_tids
[n
], SIGXCPU
) != 0)
261 panic("restart_cpus: pthread_kill failed");
265 panic("XXX restart_cpus()");
274 * Adjust smp_startup_mask to signal the BSP that we have started
275 * up successfully. Note that we do not yet hold the BGL. The BSP
276 * is waiting for our signal.
278 * We can't set our bit in smp_active_mask yet because we are holding
279 * interrupts physically disabled and remote cpus could deadlock
280 * trying to send us an IPI.
282 ATOMIC_CPUMASK_ORBIT(smp_startup_mask
, mycpu
->gd_cpuid
);
286 * Interlock for finalization. Wait until mp_finish is non-zero,
287 * then get the MP lock.
289 * Note: We are in a critical section.
291 * Note: we are the idle thread, we can only spin.
293 * Note: The load fence is memory volatile and prevents the compiler
294 * from improperly caching mp_finish, and the cpu from improperly
298 while (mp_finish
== 0) {
302 while (try_mplock() == 0)
305 /* BSP may have changed PTD while we're waiting for the lock */
308 /* Build our map of 'other' CPUs. */
309 mycpu
->gd_other_cpus
= smp_startup_mask
;
310 CPUMASK_NANDBIT(mycpu
->gd_other_cpus
, mycpu
->gd_cpuid
);
312 kprintf("SMP: AP CPU #%d Launched!\n", mycpu
->gd_cpuid
);
315 /* Set memory range attributes for this CPU to match the BSP */
318 * Once we go active we must process any IPIQ messages that may
319 * have been queued, because no actual IPI will occur until we
320 * set our bit in the smp_active_mask. If we don't the IPI
321 * message interlock could be left set which would also prevent
324 * The idle loop doesn't expect the BGL to be held and while
325 * lwkt_switch() normally cleans things up this is a special case
326 * because we returning almost directly into the idle loop.
328 * The idle thread is never placed on the runq, make sure
329 * nothing we've done put it there.
331 KKASSERT(get_mplock_count(curthread
) == 1);
332 ATOMIC_CPUMASK_ORBIT(smp_active_mask
, mycpu
->gd_cpuid
);
334 mdcpu
->gd_fpending
= 0;
335 mdcpu
->gd_ipending
= 0;
336 initclocks_pcpu(); /* clock interrupts (via IPIs) */
339 * Since we may have cleaned up the interrupt triggers, manually
340 * process any pending IPIs before exiting our critical section.
341 * Once the critical section has exited, normal interrupt processing
344 atomic_swap_int(&mycpu
->gd_npoll
, 0);
348 * Releasing the mp lock lets the BSP finish up the SMP init
351 KKASSERT((curthread
->td_flags
& TDF_RUNQ
) == 0);
358 struct mdglobaldata
*md
;
359 struct privatespace
*ps
;
361 ps
= &CPU_prvspace
[myid
];
363 KKASSERT(ps
->mdglobaldata
.mi
.gd_prvspace
== ps
);
366 * Setup the %gs for cpu #n. The mycpu macro works after this
367 * point. Note that %fs is used by pthreads.
369 tls_set_gs(&CPU_prvspace
[myid
], sizeof(struct privatespace
));
371 md
= mdcpu
; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/
374 md
->gd_common_tss
.tss_rsp0
= 0; /* not used until after switch */
375 //md->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
376 //md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
379 * Set to a known state:
380 * Set by mpboot.s: CR0_PG, CR0_PE
381 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
386 start_all_aps(u_int boot_addr
)
389 struct mdglobaldata
*gd
;
390 struct privatespace
*ps
;
397 struct lwp_params params
;
401 * needed for ipis to initial thread
402 * FIXME: rename ap_tids?
404 ap_tids
[0] = pthread_self();
405 pthread_attr_init(&attr
);
407 vm_object_hold(&kernel_object
);
408 for (x
= 1; x
<= mp_naps
; x
++)
410 /* Allocate space for the CPU's private space. */
411 for (i
= 0; i
< sizeof(struct mdglobaldata
); i
+= PAGE_SIZE
) {
412 va
=(vm_offset_t
)&CPU_prvspace
[x
].mdglobaldata
+ i
;
413 m
= vm_page_alloc(&kernel_object
, va
, VM_ALLOC_SYSTEM
);
414 pmap_kenter_quick(va
, m
->phys_addr
);
417 for (i
= 0; i
< sizeof(CPU_prvspace
[x
].idlestack
); i
+= PAGE_SIZE
) {
418 va
=(vm_offset_t
)&CPU_prvspace
[x
].idlestack
+ i
;
419 m
= vm_page_alloc(&kernel_object
, va
, VM_ALLOC_SYSTEM
);
420 pmap_kenter_quick(va
, m
->phys_addr
);
423 gd
= &CPU_prvspace
[x
].mdglobaldata
; /* official location */
424 bzero(gd
, sizeof(*gd
));
425 gd
->mi
.gd_prvspace
= ps
= &CPU_prvspace
[x
];
427 /* prime data page for it to use */
428 mi_gdinit(&gd
->mi
, x
);
432 gd
->gd_CMAP1
= pmap_kpte((vm_offset_t
)CPU_prvspace
[x
].CPAGE1
);
433 gd
->gd_CMAP2
= pmap_kpte((vm_offset_t
)CPU_prvspace
[x
].CPAGE2
);
434 gd
->gd_CMAP3
= pmap_kpte((vm_offset_t
)CPU_prvspace
[x
].CPAGE3
);
435 gd
->gd_PMAP1
= pmap_kpte((vm_offset_t
)CPU_prvspace
[x
].PPAGE1
);
436 gd
->gd_CADDR1
= ps
->CPAGE1
;
437 gd
->gd_CADDR2
= ps
->CPAGE2
;
438 gd
->gd_CADDR3
= ps
->CPAGE3
;
439 gd
->gd_PADDR1
= (vpte_t
*)ps
->PPAGE1
;
442 ipiq_size
= sizeof(struct lwkt_ipiq
) * (mp_naps
+ 1);
443 gd
->mi
.gd_ipiq
= (void *)kmem_alloc(&kernel_map
, ipiq_size
,
445 bzero(gd
->mi
.gd_ipiq
, ipiq_size
);
448 * Setup the AP boot stack
450 bootSTK
= &ps
->idlestack
[UPAGES
*PAGE_SIZE
/2];
454 * Setup the AP's lwp, this is the 'cpu'
456 * We have to make sure our signals are masked or the new LWP
457 * may pick up a signal that it isn't ready for yet. SMP
458 * startup occurs after SI_BOOT2_LEAVE_CRIT so interrupts
459 * have already been enabled.
464 stack
= mmap(NULL
, KERNEL_STACK_SIZE
,
465 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
467 if (stack
== MAP_FAILED
) {
468 panic("Unable to allocate stack for thread %d\n", x
);
470 pthread_attr_setstack(&attr
, stack
, KERNEL_STACK_SIZE
);
473 pthread_create(&ap_tids
[x
], &attr
, start_ap
, NULL
);
476 while (CPUMASK_TESTBIT(smp_startup_mask
, x
) == 0) {
477 cpu_lfence(); /* XXX spin until the AP has started */
481 vm_object_drop(&kernel_object
);
482 pthread_attr_destroy(&attr
);
488 * CPU TOPOLOGY DETECTION FUNCTIONS.
492 detect_cpu_topology(void)
494 logical_CPU_bits
= vkernel_b_arg
;
495 core_bits
= vkernel_B_arg
;
499 get_chip_ID(int cpuid
)
501 return get_apicid_from_cpuid(cpuid
) >>
502 (logical_CPU_bits
+ core_bits
);
506 get_core_number_within_chip(int cpuid
)
508 return (get_apicid_from_cpuid(cpuid
) >> logical_CPU_bits
) &
509 ( (1 << core_bits
) -1);
513 get_logical_CPU_number_within_core(int cpuid
)
515 return get_apicid_from_cpuid(cpuid
) &
516 ( (1 << logical_CPU_bits
) -1);