2 * This file is part of the coreboot project.
4 * Copyright (C) 2001 Eric Biederman
5 * Copyright (C) 2001 Ronald G. Minnich
6 * Copyright (C) 2005 Yinghai Lu
7 * Copyright (C) 2008 coresystems GmbH
8 * Copyright (C) 2015 Timothy Pearson <tpearson@raptorengineeringinc.com>, Raptor Engineering
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #include <cpu/x86/cr.h>
21 #include <cpu/x86/gdt.h>
22 #include <cpu/x86/lapic.h>
23 #include <arch/acpi.h>
29 #include <console/console.h>
30 #include <device/device.h>
31 #include <device/path.h>
32 #include <smp/atomic.h>
33 #include <smp/spinlock.h>
35 #include <cpu/intel/speedstep.h>
38 #if CONFIG_SMP && CONFIG_MAX_CPUS > 1
39 /* This is a lot more paranoid now, since Linux can NOT handle
40 * being told there is a CPU when none exists. So any errors
41 * will return 0, meaning no CPU.
43 * We actually handling that case by noting which cpus startup
44 * and not telling anyone about the ones that don't.
47 /* Start-UP IPI vector must be 4kB aligned and below 1MB. */
48 #define AP_SIPI_VECTOR 0x1000
50 static char *lowmem_backup
;
51 static char *lowmem_backup_ptr
;
52 static int lowmem_backup_size
;
54 static inline void setup_secondary_gdt(void)
63 gdt_limit
= (void *)&_secondary_gdt_addr
;
64 gdt_base
= (void *)&gdt_limit
[1];
66 *gdt_limit
= (uintptr_t)&gdt_end
- (uintptr_t)&gdt
- 1;
67 *gdt_base
= (uintptr_t)&gdt
;
70 static void copy_secondary_start_to_lowest_1M(void)
72 unsigned long code_size
;
74 /* Fill in secondary_start's local gdt. */
75 setup_secondary_gdt();
77 code_size
= (unsigned long)_secondary_start_end
- (unsigned long)_secondary_start
;
79 if (acpi_is_wakeup_s3()) {
80 /* need to save it for RAM resume */
81 lowmem_backup_size
= code_size
;
82 lowmem_backup
= malloc(code_size
);
83 lowmem_backup_ptr
= (char *)AP_SIPI_VECTOR
;
85 if (lowmem_backup
== NULL
)
86 die("Out of backup memory\n");
88 memcpy(lowmem_backup
, lowmem_backup_ptr
, lowmem_backup_size
);
91 /* copy the _secondary_start to the ram below 1M*/
92 memcpy((unsigned char *)AP_SIPI_VECTOR
, (unsigned char *)_secondary_start
, code_size
);
94 printk(BIOS_DEBUG
, "start_eip=0x%08lx, code_size=0x%08lx\n",
95 (long unsigned int)AP_SIPI_VECTOR
, code_size
);
98 static void recover_lowest_1M(void)
100 if (acpi_is_wakeup_s3())
101 memcpy(lowmem_backup_ptr
, lowmem_backup
, lowmem_backup_size
);
104 static int lapic_start_cpu(unsigned long apicid
)
107 unsigned long send_status
, accept_status
;
111 * Starting actual IPI sequence...
114 printk(BIOS_SPEW
, "Asserting INIT.\n");
117 * Turn INIT on target chip
119 lapic_write_around(LAPIC_ICR2
, SET_LAPIC_DEST_FIELD(apicid
));
125 lapic_write_around(LAPIC_ICR
, LAPIC_INT_LEVELTRIG
| LAPIC_INT_ASSERT
128 printk(BIOS_SPEW
, "Waiting for send to finish...\n");
131 printk(BIOS_SPEW
, "+");
133 send_status
= lapic_read(LAPIC_ICR
) & LAPIC_ICR_BUSY
;
134 } while (send_status
&& (timeout
++ < 1000));
135 if (timeout
>= 1000) {
136 printk(BIOS_ERR
, "CPU %ld: First APIC write timed out. "
137 "Disabling\n", apicid
);
139 printk(BIOS_ERR
, "ESR is 0x%lx\n", lapic_read(LAPIC_ESR
));
140 if (lapic_read(LAPIC_ESR
)) {
141 printk(BIOS_ERR
, "Try to reset ESR\n");
142 lapic_write_around(LAPIC_ESR
, 0);
143 printk(BIOS_ERR
, "ESR is 0x%lx\n",
144 lapic_read(LAPIC_ESR
));
148 #if !CONFIG_CPU_AMD_MODEL_10XXX && !CONFIG_CPU_INTEL_MODEL_206AX && !CONFIG_CPU_INTEL_MODEL_2065X
152 printk(BIOS_SPEW
, "Deasserting INIT.\n");
155 lapic_write_around(LAPIC_ICR2
, SET_LAPIC_DEST_FIELD(apicid
));
158 lapic_write_around(LAPIC_ICR
, LAPIC_INT_LEVELTRIG
| LAPIC_DM_INIT
);
160 printk(BIOS_SPEW
, "Waiting for send to finish...\n");
163 printk(BIOS_SPEW
, "+");
165 send_status
= lapic_read(LAPIC_ICR
) & LAPIC_ICR_BUSY
;
166 } while (send_status
&& (timeout
++ < 1000));
167 if (timeout
>= 1000) {
168 printk(BIOS_ERR
, "CPU %ld: Second apic write timed out. "
169 "Disabling\n", apicid
);
175 * Run STARTUP IPI loop.
177 printk(BIOS_SPEW
, "#startup loops: %d.\n", CONFIG_NUM_IPI_STARTS
);
181 for (j
= 1; j
<= CONFIG_NUM_IPI_STARTS
; j
++) {
182 printk(BIOS_SPEW
, "Sending STARTUP #%d to %lu.\n", j
, apicid
);
183 lapic_read_around(LAPIC_SPIV
);
184 lapic_write(LAPIC_ESR
, 0);
185 lapic_read(LAPIC_ESR
);
186 printk(BIOS_SPEW
, "After apic_write.\n");
193 lapic_write_around(LAPIC_ICR2
, SET_LAPIC_DEST_FIELD(apicid
));
195 /* Boot on the stack */
196 /* Kick the second */
197 lapic_write_around(LAPIC_ICR
, LAPIC_DM_STARTUP
198 | (AP_SIPI_VECTOR
>> 12));
201 * Give the other CPU some time to accept the IPI.
205 printk(BIOS_SPEW
, "Startup point 1.\n");
207 printk(BIOS_SPEW
, "Waiting for send to finish...\n");
210 printk(BIOS_SPEW
, "+");
212 send_status
= lapic_read(LAPIC_ICR
) & LAPIC_ICR_BUSY
;
213 } while (send_status
&& (timeout
++ < 1000));
216 * Give the other CPU some time to accept the IPI.
220 * Due to the Pentium erratum 3AP.
223 lapic_read_around(LAPIC_SPIV
);
224 lapic_write(LAPIC_ESR
, 0);
226 accept_status
= (lapic_read(LAPIC_ESR
) & 0xEF);
227 if (send_status
|| accept_status
)
230 printk(BIOS_SPEW
, "After Startup.\n");
232 printk(BIOS_WARNING
, "APIC never delivered???\n");
234 printk(BIOS_WARNING
, "APIC delivery error (%lx).\n",
236 if (send_status
|| accept_status
)
241 /* Number of cpus that are currently running in coreboot */
242 static atomic_t active_cpus
= ATOMIC_INIT(1);
244 /* start_cpu_lock covers last_cpu_index and secondary_stack.
245 * Only starting one cpu at a time let's me remove the logic
246 * for select the stack from assembly language.
248 * In addition communicating by variables to the cpu I
249 * am starting allows me to verify it has started before
253 static spinlock_t start_cpu_lock
= SPIN_LOCK_UNLOCKED
;
254 static unsigned int last_cpu_index
= 0;
255 static void *stacks
[CONFIG_MAX_CPUS
];
256 volatile unsigned long secondary_stack
;
257 volatile unsigned int secondary_cpu_index
;
259 int start_cpu(struct device
*cpu
)
261 struct cpu_info
*info
;
262 unsigned long stack_end
;
263 unsigned long stack_base
;
264 unsigned long *stack
;
265 unsigned long apicid
;
271 spin_lock(&start_cpu_lock
);
273 /* Get the CPU's apicid */
274 apicid
= cpu
->path
.apic
.apic_id
;
276 /* Get an index for the new processor */
277 index
= ++last_cpu_index
;
279 /* Find end of the new processor's stack */
280 stack_end
= ((unsigned long)_estack
) - (CONFIG_STACK_SIZE
*index
) -
281 sizeof(struct cpu_info
);
283 stack_base
= ((unsigned long)_estack
) - (CONFIG_STACK_SIZE
*(index
+1));
284 printk(BIOS_SPEW
, "CPU%d: stack_base %p, stack_end %p\n", index
,
285 (void *)stack_base
, (void *)stack_end
);
286 /* poison the stack */
287 for(stack
= (void *)stack_base
, i
= 0; i
< CONFIG_STACK_SIZE
; i
++)
288 stack
[i
/sizeof(*stack
)] = 0xDEADBEEF;
289 stacks
[index
] = stack
;
290 /* Record the index and which CPU structure we are using */
291 info
= (struct cpu_info
*)stack_end
;
294 thread_init_cpu_info_non_bsp(info
);
296 /* Advertise the new stack and index to start_cpu */
297 secondary_stack
= stack_end
;
298 secondary_cpu_index
= index
;
300 /* Until the CPU starts up report the CPU is not enabled */
302 cpu
->initialized
= 0;
305 result
= lapic_start_cpu(apicid
);
309 /* Wait 1s or until the new cpu calls in */
310 for(count
= 0; count
< 100000 ; count
++) {
311 if (secondary_stack
== 0) {
319 spin_unlock(&start_cpu_lock
);
323 #if CONFIG_AP_IN_SIPI_WAIT
326 * Sending INIT IPI to self is equivalent of asserting #INIT with a bit of
328 * An undefined number of instruction cycles will complete. All global locks
329 * must be released before INIT IPI and no printk is allowed after this.
330 * De-asserting INIT IPI is a no-op on later Intel CPUs.
332 * If you set DEBUG_HALT_SELF to 1, printk's after INIT IPI are enabled
333 * but running thread may halt without releasing the lock and effectively
334 * deadlock other CPUs.
336 #define DEBUG_HALT_SELF 0
339 * Normally this function is defined in lapic.h as an always inline function
340 * that just keeps the CPU in a hlt() loop. This does not work on all CPUs.
341 * I think all hyperthreading CPUs might need this version, but I could only
342 * verify this on the Intel Core Duo
344 void stop_this_cpu(void)
347 unsigned long send_status
;
350 id
= lapic_read(LAPIC_ID
) >> 24;
352 printk(BIOS_DEBUG
, "CPU %ld going down...\n", id
);
354 /* send an LAPIC INIT to myself */
355 lapic_write_around(LAPIC_ICR2
, SET_LAPIC_DEST_FIELD(id
));
356 lapic_write_around(LAPIC_ICR
, LAPIC_INT_LEVELTRIG
|
357 LAPIC_INT_ASSERT
| LAPIC_DM_INIT
);
359 /* wait for the ipi send to finish */
361 printk(BIOS_SPEW
, "Waiting for send to finish...\n");
366 printk(BIOS_SPEW
, "+");
369 send_status
= lapic_read(LAPIC_ICR
) & LAPIC_ICR_BUSY
;
370 } while (send_status
&& (timeout
++ < 1000));
371 if (timeout
>= 1000) {
373 printk(BIOS_ERR
, "timed out\n");
379 printk(BIOS_SPEW
, "Deasserting INIT.\n");
381 /* Deassert the LAPIC INIT */
382 lapic_write_around(LAPIC_ICR2
, SET_LAPIC_DEST_FIELD(id
));
383 lapic_write_around(LAPIC_ICR
, LAPIC_INT_LEVELTRIG
| LAPIC_DM_INIT
);
386 printk(BIOS_SPEW
, "Waiting for send to finish...\n");
391 printk(BIOS_SPEW
, "+");
394 send_status
= lapic_read(LAPIC_ICR
) & LAPIC_ICR_BUSY
;
395 } while (send_status
&& (timeout
++ < 1000));
396 if (timeout
>= 1000) {
398 printk(BIOS_ERR
, "timed out\n");
406 /* C entry point of secondary cpus */
407 void asmlinkage
secondary_cpu_init(unsigned int index
)
409 atomic_inc(&active_cpus
);
411 if (!IS_ENABLED(CONFIG_PARALLEL_CPU_INIT
))
412 spin_lock(&start_cpu_lock
);
416 * Seems that CR4 was cleared when AP start via lapic_start_cpu()
417 * Turn on CR4.OSFXSR and CR4.OSXMMEXCPT when SSE options enabled
420 cr4_val
= read_cr4();
421 cr4_val
|= (CR4_OSFXSR
| CR4_OSXMMEXCPT
);
424 cpu_initialize(index
);
426 if (!IS_ENABLED(CONFIG_PARALLEL_CPU_INIT
))
427 spin_unlock(&start_cpu_lock
);
429 atomic_dec(&active_cpus
);
434 static void start_other_cpus(struct bus
*cpu_bus
, struct device
*bsp_cpu
)
437 /* Loop through the cpus once getting them started */
439 for(cpu
= cpu_bus
->children
; cpu
; cpu
= cpu
->sibling
) {
440 if (cpu
->path
.type
!= DEVICE_PATH_APIC
) {
444 if (IS_ENABLED(CONFIG_PARALLEL_CPU_INIT
) && (cpu
==bsp_cpu
))
451 if (cpu
->initialized
) {
455 if (!start_cpu(cpu
)) {
456 /* Record the error in cpu? */
457 printk(BIOS_ERR
, "CPU 0x%02x would not start!\n",
458 cpu
->path
.apic
.apic_id
);
461 if (!IS_ENABLED(CONFIG_PARALLEL_CPU_INIT
))
467 static void smm_other_cpus(struct bus
*cpu_bus
, device_t bsp_cpu
)
470 int pre_count
= atomic_read(&active_cpus
);
472 /* Loop through the cpus once to let them run through SMM relocator */
474 for(cpu
= cpu_bus
->children
; cpu
; cpu
= cpu
->sibling
) {
475 if (cpu
->path
.type
!= DEVICE_PATH_APIC
) {
479 printk(BIOS_ERR
, "considering CPU 0x%02x for SMM init\n",
480 cpu
->path
.apic
.apic_id
);
489 if (!start_cpu(cpu
)) {
490 /* Record the error in cpu? */
491 printk(BIOS_ERR
, "CPU 0x%02x would not start!\n",
492 cpu
->path
.apic
.apic_id
);
495 /* FIXME: endless loop */
496 while (atomic_read(&active_cpus
) != pre_count
) ;
500 static void wait_other_cpus_stop(struct bus
*cpu_bus
)
503 int old_active_count
, active_count
;
507 /* Now loop until the other cpus have finished initializing */
508 old_active_count
= 1;
509 active_count
= atomic_read(&active_cpus
);
510 while(active_count
> 1) {
511 if (active_count
!= old_active_count
) {
512 printk(BIOS_INFO
, "Waiting for %d CPUS to stop\n",
514 old_active_count
= active_count
;
517 active_count
= atomic_read(&active_cpus
);
520 for(cpu
= cpu_bus
->children
; cpu
; cpu
= cpu
->sibling
) {
521 if (cpu
->path
.type
!= DEVICE_PATH_APIC
) {
524 if (cpu
->path
.apic
.apic_id
== SPEEDSTEP_APIC_MAGIC
) {
527 if (!cpu
->initialized
) {
528 printk(BIOS_ERR
, "CPU 0x%02x did not initialize!\n",
529 cpu
->path
.apic
.apic_id
);
532 printk(BIOS_DEBUG
, "All AP CPUs stopped (%ld loops)\n", loopcount
);
533 checkstack(_estack
, 0);
534 for(i
= 1; i
<= last_cpu_index
; i
++)
535 checkstack((void *)stacks
[i
] + CONFIG_STACK_SIZE
, i
);
538 #endif /* CONFIG_SMP */
540 void initialize_cpus(struct bus
*cpu_bus
)
542 struct device_path cpu_path
;
543 struct cpu_info
*info
;
545 /* Find the info struct for this cpu */
549 /* Ensure the local apic is enabled */
552 /* Get the device path of the boot cpu */
553 cpu_path
.type
= DEVICE_PATH_APIC
;
554 cpu_path
.apic
.apic_id
= lapicid();
556 /* Get the device path of the boot cpu */
557 cpu_path
.type
= DEVICE_PATH_CPU
;
561 /* Find the device structure for the boot cpu */
562 info
->cpu
= alloc_find_dev(cpu_bus
, &cpu_path
);
564 #if CONFIG_SMP && CONFIG_MAX_CPUS > 1
565 // why here? In case some day we can start core1 in amd_sibling_init
566 copy_secondary_start_to_lowest_1M();
569 #if CONFIG_HAVE_SMI_HANDLER
570 if (!IS_ENABLED(CONFIG_SERIALIZED_SMM_INITIALIZATION
))
574 #if CONFIG_SMP && CONFIG_MAX_CPUS > 1
575 /* start all aps at first, so we can init ECC all together */
576 if (IS_ENABLED(CONFIG_PARALLEL_CPU_INIT
))
577 start_other_cpus(cpu_bus
, info
->cpu
);
580 /* Initialize the bootstrap processor */
583 #if CONFIG_SMP && CONFIG_MAX_CPUS > 1
584 if (!IS_ENABLED(CONFIG_PARALLEL_CPU_INIT
))
585 start_other_cpus(cpu_bus
, info
->cpu
);
587 /* Now wait the rest of the cpus stop*/
588 wait_other_cpus_stop(cpu_bus
);
591 if (IS_ENABLED(CONFIG_SERIALIZED_SMM_INITIALIZATION
)) {
592 /* At this point, all APs are sleeping:
593 * smm_init() will queue a pending SMI on all cpus
594 * and smm_other_cpus() will start them one by one */
596 #if CONFIG_SMP && CONFIG_MAX_CPUS > 1
598 smm_other_cpus(cpu_bus
, info
->cpu
);
602 #if CONFIG_SMP && CONFIG_MAX_CPUS > 1