2 * This file is part of the coreboot project.
4 * Copyright 2013 Google Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA, 02110-1301 USA
22 #include <arch/barrier.h>
23 #include <arch/lib_helpers.h>
25 #include <console/console.h>
27 #include "cpu-internal.h"
29 static struct cpu_info cpu_infos
[CONFIG_MAX_CPUS
];
30 struct cpu_info
*bsp_cpu_info
;
32 static inline struct cpu_info
*cpu_info_for_cpu(unsigned int id
)
34 return &cpu_infos
[id
];
37 struct cpu_info
*cpu_info(void)
39 return cpu_info_for_cpu(smp_processor_id());
42 static int cpu_online(struct cpu_info
*ci
)
44 return load_acquire(&ci
->online
) != 0;
47 static void cpu_mark_online(struct cpu_info
*ci
)
49 store_release(&ci
->online
, 1);
52 static inline void cpu_disable_dev(device_t dev
)
57 static struct cpu_driver
*locate_cpu_driver(uint32_t midr
)
59 struct cpu_driver
*cur
;
61 for (cur
= cpu_drivers
; cur
!= ecpu_drivers
; cur
++) {
62 const struct cpu_device_id
*id_table
= cur
->id_table
;
64 for (; id_table
->midr
!= CPU_ID_END
; id_table
++) {
65 if (id_table
->midr
== midr
)
72 static int cpu_set_device_operations(device_t dev
)
75 struct cpu_driver
*driver
;
77 midr
= raw_read_midr_el1();
78 driver
= locate_cpu_driver(midr
);
81 printk(BIOS_WARNING
, "No CPU driver for MIDR %08x\n", midr
);
84 dev
->ops
= driver
->ops
;
88 /* Set up default SCR values. */
89 static void el3_init(void)
93 if (get_current_el() != EL3
)
96 scr
= raw_read_scr_el3();
97 /* Default to non-secure EL1 and EL0. */
98 scr
&= ~(SCR_NS_MASK
);
100 /* Disable IRQ, FIQ, and external abort interrupt routing. */
101 scr
&= ~(SCR_IRQ_MASK
| SCR_FIQ_MASK
| SCR_EA_MASK
);
102 scr
|= SCR_IRQ_DISABLE
| SCR_FIQ_DISABLE
| SCR_EA_DISABLE
;
104 scr
&= ~(SCR_HVC_MASK
);
105 scr
|= SCR_HVC_ENABLE
;
107 scr
&= ~(SCR_SMC_MASK
);
108 scr
|= SCR_SMC_DISABLE
;
109 /* Disable secure instruction fetches. */
110 scr
&= ~(SCR_SIF_MASK
);
111 scr
|= SCR_SIF_DISABLE
;
112 /* All lower exception levels 64-bit by default. */
113 scr
&= ~(SCR_RW_MASK
);
114 scr
|= SCR_LOWER_AARCH64
;
115 /* Disable secure EL1 access to secure timer. */
116 scr
&= ~(SCR_ST_MASK
);
117 scr
|= SCR_ST_DISABLE
;
118 /* Don't trap on WFE or WFI instructions. */
119 scr
&= ~(SCR_TWI_MASK
| SCR_TWE_MASK
);
120 scr
|= SCR_TWI_DISABLE
| SCR_TWE_DISABLE
;
121 raw_write_scr_el3(scr
);
125 static void init_this_cpu(void *arg
)
127 struct cpu_info
*ci
= arg
;
128 device_t dev
= ci
->cpu
;
130 cpu_set_device_operations(dev
);
134 /* Initialize the GIC. */
137 if (dev
->ops
!= NULL
&& dev
->ops
->init
!= NULL
) {
138 dev
->initialized
= 1;
139 printk(BIOS_DEBUG
, "%s init\n", dev_path(dev
));
144 /* Fill in cpu_info structures according to device tree. */
145 static void init_cpu_info(struct bus
*bus
)
149 for (cur
= bus
->children
; cur
!= NULL
; cur
= cur
->sibling
) {
151 unsigned int id
= cur
->path
.cpu
.id
;
153 if (cur
->path
.type
!= DEVICE_PATH_CPU
)
156 /* IDs are currently mapped 1:1 with logical CPU numbers. */
157 if (id
>= CONFIG_MAX_CPUS
) {
159 "CPU id %x too large. Disabling.\n", id
);
160 cpu_disable_dev(cur
);
164 ci
= cpu_info_for_cpu(id
);
165 if (ci
->cpu
!= NULL
) {
167 "Duplicate ID %x in device tree.\n", id
);
168 cpu_disable_dev(cur
);
172 ci
->id
= cur
->path
.cpu
.id
;
175 /* Mark current cpu online. */
176 cpu_mark_online(cpu_info());
179 static inline int action_queue_empty(struct cpu_action_queue
*q
)
181 return load_acquire_exclusive(&q
->todo
) == NULL
;
184 static inline int action_completed(struct cpu_action_queue
*q
,
185 struct cpu_action
*action
)
187 return load_acquire(&q
->completed
) == action
;
190 static inline void wait_for_action_queue_slot(struct cpu_action_queue
*q
)
192 while (!action_queue_empty(q
))
196 static void wait_for_action_complete(struct cpu_action_queue
*q
,
197 struct cpu_action
*a
)
199 while (!action_completed(q
, a
))
203 static struct cpu_action
*wait_for_action(struct cpu_action_queue
*q
,
204 struct cpu_action
*local
)
206 struct cpu_action
*action
;
208 while (action_queue_empty(q
))
212 * Keep original address, but use a local copy for async processing.
215 action
= load_acquire_exclusive(&q
->todo
);
217 } while (!store_release_exclusive(&q
->todo
, NULL
));
222 static void queue_action(struct cpu_action_queue
*q
, struct cpu_action
*action
)
225 wait_for_action_queue_slot(q
);
226 if (load_acquire_exclusive(&q
->todo
) != NULL
)
228 } while (!store_release_exclusive(&q
->todo
, action
));
231 static void action_queue_complete(struct cpu_action_queue
*q
,
232 struct cpu_action
*action
)
234 /* Mark completion and send events to waiters. */
235 store_release(&q
->completed
, action
);
239 static void action_run(struct cpu_action
*action
)
241 action
->run(action
->arg
);
244 static void action_run_on_cpu(struct cpu_info
*ci
, struct cpu_action
*action
,
247 struct cpu_action_queue
*q
= &ci
->action_queue
;
249 /* Don't run actions on non-online or enabled devices. */
250 if (!cpu_online(ci
) || ci
->cpu
== NULL
|| !ci
->cpu
->enabled
)
253 if (ci
->id
== smp_processor_id()) {
254 action
->run(action
->arg
);
258 queue_action(q
, action
);
259 /* Wait for CPU to pick it up. Empty slot means it was picked up. */
260 wait_for_action_queue_slot(q
);
261 /* Wait for completion if requested. */
263 wait_for_action_complete(q
, action
);
266 static int __arch_run_on_cpu(unsigned int cpu
, struct cpu_action
*action
,
271 if (cpu
>= CONFIG_MAX_CPUS
)
274 ci
= cpu_info_for_cpu(cpu
);
276 action_run_on_cpu(ci
, action
, sync
);
281 int arch_run_on_cpu(unsigned int cpu
, struct cpu_action
*action
)
283 return __arch_run_on_cpu(cpu
, action
, 1);
286 int arch_run_on_cpu_async(unsigned int cpu
, struct cpu_action
*action
)
288 return __arch_run_on_cpu(cpu
, action
, 0);
291 static int __arch_run_on_all_cpus(struct cpu_action
*action
, int sync
)
295 for (i
= 0; i
< CONFIG_MAX_CPUS
; i
++)
296 action_run_on_cpu(cpu_info_for_cpu(i
), action
, sync
);
301 static int __arch_run_on_all_cpus_but_self(struct cpu_action
*action
, int sync
)
304 struct cpu_info
*me
= cpu_info();
306 for (i
= 0; i
< CONFIG_MAX_CPUS
; i
++) {
307 struct cpu_info
*ci
= cpu_info_for_cpu(i
);
310 action_run_on_cpu(ci
, action
, sync
);
316 int arch_run_on_all_cpus(struct cpu_action
*action
)
318 return __arch_run_on_all_cpus(action
, 1);
321 int arch_run_on_all_cpus_async(struct cpu_action
*action
)
323 return __arch_run_on_all_cpus(action
, 0);
326 int arch_run_on_all_cpus_but_self(struct cpu_action
*action
)
328 return __arch_run_on_all_cpus_but_self(action
, 1);
331 int arch_run_on_all_cpus_but_self_async(struct cpu_action
*action
)
333 return __arch_run_on_all_cpus_but_self(action
, 0);
336 void arch_secondary_cpu_init(void)
338 struct cpu_info
*ci
= cpu_info();
339 struct cpu_action_queue
*q
= &ci
->action_queue
;
341 /* Mark this CPU online. */
345 struct cpu_action
*orig
;
346 struct cpu_action action
;
348 orig
= wait_for_action(q
, &action
);
351 action_queue_complete(q
, orig
);
355 void arch_initialize_cpus(device_t cluster
, struct cpu_control_ops
*cntrl_ops
)
363 if (cluster
->path
.type
!= DEVICE_PATH_CPU_CLUSTER
) {
365 "CPU init failed. Device is not a CPU_CLUSTER: %s\n",
370 bus
= cluster
->link_list
;
372 /* Check if no children under this device. */
376 entry
= prepare_secondary_cpu_startup();
378 /* Initialize the cpu_info structures. */
380 max_cpus
= cntrl_ops
->total_cpus();
382 if (max_cpus
> CONFIG_MAX_CPUS
) {
384 "max_cpus (%zu) exceeds CONFIG_MAX_CPUS (%zu).\n",
385 max_cpus
, (size_t)CONFIG_MAX_CPUS
);
386 max_cpus
= CONFIG_MAX_CPUS
;
389 for (i
= 0; i
< max_cpus
; i
++) {
391 struct cpu_action action
;
393 ci
= cpu_info_for_cpu(i
);
396 /* Disregard CPUs not in device tree. */
400 /* Skip disabled CPUs. */
404 if (!cpu_online(ci
)) {
406 printk(BIOS_DEBUG
, "Starting CPU%x\n", ci
->id
);
407 if (cntrl_ops
->start_cpu(ci
->id
, entry
)) {
409 "Failed to start CPU%x\n", ci
->id
);
412 /* Wait for CPU to come online. */
413 while (!cpu_online(ci
));
414 printk(BIOS_DEBUG
, "CPU%x online.\n", ci
->id
);
417 /* Send it the init action. */
418 action
.run
= init_this_cpu
;
420 action_run_on_cpu(ci
, &action
, 1);