2 * PowerPC emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "hw/ppc/ppc.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
25 #include "qemu/main-loop.h"
27 /*****************************************************************************/
30 target_ulong
helper_load_tbl(CPUPPCState
*env
)
32 return (target_ulong
)cpu_ppc_load_tbl(env
);
35 target_ulong
helper_load_tbu(CPUPPCState
*env
)
37 return cpu_ppc_load_tbu(env
);
40 target_ulong
helper_load_atbl(CPUPPCState
*env
)
42 return (target_ulong
)cpu_ppc_load_atbl(env
);
45 target_ulong
helper_load_atbu(CPUPPCState
*env
)
47 return cpu_ppc_load_atbu(env
);
50 target_ulong
helper_load_vtb(CPUPPCState
*env
)
52 return cpu_ppc_load_vtb(env
);
55 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
56 target_ulong
helper_load_purr(CPUPPCState
*env
)
58 return (target_ulong
)cpu_ppc_load_purr(env
);
61 void helper_store_purr(CPUPPCState
*env
, target_ulong val
)
63 CPUState
*cs
= env_cpu(env
);
65 uint32_t nr_threads
= cs
->nr_threads
;
67 if (nr_threads
== 1 || !(env
->flags
& POWERPC_FLAG_SMT_1LPAR
)) {
68 cpu_ppc_store_purr(env
, val
);
72 THREAD_SIBLING_FOREACH(cs
, ccs
) {
73 CPUPPCState
*cenv
= &POWERPC_CPU(ccs
)->env
;
74 cpu_ppc_store_purr(cenv
, val
);
79 #if !defined(CONFIG_USER_ONLY)
80 void helper_store_tbl(CPUPPCState
*env
, target_ulong val
)
82 CPUState
*cs
= env_cpu(env
);
84 uint32_t nr_threads
= cs
->nr_threads
;
86 if (nr_threads
== 1 || !(env
->flags
& POWERPC_FLAG_SMT_1LPAR
)) {
87 cpu_ppc_store_tbl(env
, val
);
91 THREAD_SIBLING_FOREACH(cs
, ccs
) {
92 CPUPPCState
*cenv
= &POWERPC_CPU(ccs
)->env
;
93 cpu_ppc_store_tbl(cenv
, val
);
97 void helper_store_tbu(CPUPPCState
*env
, target_ulong val
)
99 CPUState
*cs
= env_cpu(env
);
101 uint32_t nr_threads
= cs
->nr_threads
;
103 if (nr_threads
== 1 || !(env
->flags
& POWERPC_FLAG_SMT_1LPAR
)) {
104 cpu_ppc_store_tbu(env
, val
);
108 THREAD_SIBLING_FOREACH(cs
, ccs
) {
109 CPUPPCState
*cenv
= &POWERPC_CPU(ccs
)->env
;
110 cpu_ppc_store_tbu(cenv
, val
);
114 void helper_store_atbl(CPUPPCState
*env
, target_ulong val
)
116 cpu_ppc_store_atbl(env
, val
);
119 void helper_store_atbu(CPUPPCState
*env
, target_ulong val
)
121 cpu_ppc_store_atbu(env
, val
);
124 target_ulong
helper_load_decr(CPUPPCState
*env
)
126 return cpu_ppc_load_decr(env
);
129 void helper_store_decr(CPUPPCState
*env
, target_ulong val
)
131 cpu_ppc_store_decr(env
, val
);
134 target_ulong
helper_load_hdecr(CPUPPCState
*env
)
136 return cpu_ppc_load_hdecr(env
);
139 void helper_store_hdecr(CPUPPCState
*env
, target_ulong val
)
141 CPUState
*cs
= env_cpu(env
);
143 uint32_t nr_threads
= cs
->nr_threads
;
145 if (nr_threads
== 1 || !(env
->flags
& POWERPC_FLAG_SMT_1LPAR
)) {
146 cpu_ppc_store_hdecr(env
, val
);
150 THREAD_SIBLING_FOREACH(cs
, ccs
) {
151 CPUPPCState
*cenv
= &POWERPC_CPU(ccs
)->env
;
152 cpu_ppc_store_hdecr(cenv
, val
);
156 void helper_store_vtb(CPUPPCState
*env
, target_ulong val
)
158 CPUState
*cs
= env_cpu(env
);
160 uint32_t nr_threads
= cs
->nr_threads
;
162 if (nr_threads
== 1 || !(env
->flags
& POWERPC_FLAG_SMT_1LPAR
)) {
163 cpu_ppc_store_vtb(env
, val
);
167 THREAD_SIBLING_FOREACH(cs
, ccs
) {
168 CPUPPCState
*cenv
= &POWERPC_CPU(ccs
)->env
;
169 cpu_ppc_store_vtb(cenv
, val
);
173 void helper_store_tbu40(CPUPPCState
*env
, target_ulong val
)
175 CPUState
*cs
= env_cpu(env
);
177 uint32_t nr_threads
= cs
->nr_threads
;
179 if (nr_threads
== 1 || !(env
->flags
& POWERPC_FLAG_SMT_1LPAR
)) {
180 cpu_ppc_store_tbu40(env
, val
);
184 THREAD_SIBLING_FOREACH(cs
, ccs
) {
185 CPUPPCState
*cenv
= &POWERPC_CPU(ccs
)->env
;
186 cpu_ppc_store_tbu40(cenv
, val
);
190 target_ulong
helper_load_40x_pit(CPUPPCState
*env
)
192 return load_40x_pit(env
);
195 void helper_store_40x_pit(CPUPPCState
*env
, target_ulong val
)
197 store_40x_pit(env
, val
);
200 void helper_store_40x_tcr(CPUPPCState
*env
, target_ulong val
)
202 store_40x_tcr(env
, val
);
205 void helper_store_40x_tsr(CPUPPCState
*env
, target_ulong val
)
207 store_40x_tsr(env
, val
);
210 void helper_store_booke_tcr(CPUPPCState
*env
, target_ulong val
)
212 store_booke_tcr(env
, val
);
215 void helper_store_booke_tsr(CPUPPCState
*env
, target_ulong val
)
217 store_booke_tsr(env
, val
);
220 #if defined(TARGET_PPC64)
222 * POWER processor Timebase Facility
226 * The TBST is the timebase state machine, which is a per-core machine that
227 * is used to synchronize the core TB with the ChipTOD. States 3,4,5 are
228 * not used in POWER8/9/10.
230 * The state machine gets driven by writes to TFMR SPR from the core, and
231 * by signals from the ChipTOD. The state machine table for common
232 * transitions is as follows (according to hardware specs, not necessarily
233 * this implementation):
235 * | Cur | Event | New |
236 * +----------------+----------------------------------+-----+
237 * | 0 RESET | TFMR |= LOAD_TOD_MOD | 1 |
238 * | 1 SEND_TOD_MOD | "immediate transition" | 2 |
239 * | 2 NOT_SET | mttbu/mttbu40/mttbl | 2 |
240 * | 2 NOT_SET | TFMR |= MOVE_CHIP_TOD_TO_TB | 6 |
241 * | 6 SYNC_WAIT | "sync pulse from ChipTOD" | 7 |
242 * | 7 GET_TOD | ChipTOD xscom MOVE_TOD_TO_TB_REG | 8 |
243 * | 8 TB_RUNNING | mttbu/mttbu40 | 8 |
244 * | 8 TB_RUNNING | TFMR |= LOAD_TOD_MOD | 1 |
245 * | 8 TB_RUNNING | mttbl | 9 |
246 * | 9 TB_ERROR | TFMR |= CLEAR_TB_ERRORS | 0 |
248 * - LOAD_TOD_MOD will also move states 2,6 to state 1, omitted from table
249 * because it's not a typical init flow.
251 * - The ERROR state can be entered from most/all other states on invalid
252 * states (e.g., if some TFMR control bit is set from a state where it's
253 * not listed to cause a transition away from), omitted to avoid clutter.
255 * Note: mttbl causes a timebase error because this inevitably causes
256 * ticks to be lost and TB to become unsynchronized, whereas TB can be
257 * adjusted using mttbu* without losing ticks. mttbl behaviour is not
260 * Note: the TB state machine does not actually cause any real TB adjustment!
261 * TB starts out synchronized across all vCPUs (hardware threads) in
262 * QMEU, so for now the purpose of the TBST and ChipTOD model is simply
263 * to step through firmware initialisation sequences.
265 static unsigned int tfmr_get_tb_state(uint64_t tfmr
)
267 return (tfmr
& TFMR_TBST_ENCODED
) >> (63 - 31);
270 static uint64_t tfmr_new_tb_state(uint64_t tfmr
, unsigned int tbst
)
272 tfmr
&= ~TFMR_TBST_LAST
;
273 tfmr
|= (tfmr
& TFMR_TBST_ENCODED
) >> 4; /* move state to last state */
274 tfmr
&= ~TFMR_TBST_ENCODED
;
275 tfmr
|= (uint64_t)tbst
<< (63 - 31); /* move new state to state */
277 if (tbst
== TBST_TB_RUNNING
) {
278 tfmr
|= TFMR_TB_VALID
;
280 tfmr
&= ~TFMR_TB_VALID
;
286 static void write_tfmr(CPUPPCState
*env
, target_ulong val
)
288 CPUState
*cs
= env_cpu(env
);
290 if (cs
->nr_threads
== 1) {
291 env
->spr
[SPR_TFMR
] = val
;
294 THREAD_SIBLING_FOREACH(cs
, ccs
) {
295 CPUPPCState
*cenv
= &POWERPC_CPU(ccs
)->env
;
296 cenv
->spr
[SPR_TFMR
] = val
;
301 static void tb_state_machine_step(CPUPPCState
*env
)
303 uint64_t tfmr
= env
->spr
[SPR_TFMR
];
304 unsigned int tbst
= tfmr_get_tb_state(tfmr
);
306 if (!(tfmr
& TFMR_TB_ECLIPZ
) || tbst
== TBST_TB_ERROR
) {
310 if (env
->pnv_tod_tbst
.tb_sync_pulse_timer
) {
311 env
->pnv_tod_tbst
.tb_sync_pulse_timer
--;
313 tfmr
|= TFMR_TB_SYNC_OCCURED
;
314 write_tfmr(env
, tfmr
);
317 if (env
->pnv_tod_tbst
.tb_state_timer
) {
318 env
->pnv_tod_tbst
.tb_state_timer
--;
322 if (tfmr
& TFMR_LOAD_TOD_MOD
) {
323 tfmr
&= ~TFMR_LOAD_TOD_MOD
;
324 if (tbst
== TBST_GET_TOD
) {
325 tfmr
= tfmr_new_tb_state(tfmr
, TBST_TB_ERROR
);
326 tfmr
|= TFMR_FIRMWARE_CONTROL_ERROR
;
328 tfmr
= tfmr_new_tb_state(tfmr
, TBST_SEND_TOD_MOD
);
329 /* State seems to transition immediately */
330 tfmr
= tfmr_new_tb_state(tfmr
, TBST_NOT_SET
);
332 } else if (tfmr
& TFMR_MOVE_CHIP_TOD_TO_TB
) {
333 if (tbst
== TBST_SYNC_WAIT
) {
334 tfmr
= tfmr_new_tb_state(tfmr
, TBST_GET_TOD
);
335 env
->pnv_tod_tbst
.tb_state_timer
= 3;
336 } else if (tbst
== TBST_GET_TOD
) {
337 if (env
->pnv_tod_tbst
.tod_sent_to_tb
) {
338 tfmr
= tfmr_new_tb_state(tfmr
, TBST_TB_RUNNING
);
339 tfmr
&= ~TFMR_MOVE_CHIP_TOD_TO_TB
;
340 env
->pnv_tod_tbst
.tb_ready_for_tod
= 0;
341 env
->pnv_tod_tbst
.tod_sent_to_tb
= 0;
344 qemu_log_mask(LOG_GUEST_ERROR
, "TFMR error: MOVE_CHIP_TOD_TO_TB "
345 "state machine in invalid state 0x%x\n", tbst
);
346 tfmr
= tfmr_new_tb_state(tfmr
, TBST_TB_ERROR
);
347 tfmr
|= TFMR_FIRMWARE_CONTROL_ERROR
;
348 env
->pnv_tod_tbst
.tb_ready_for_tod
= 0;
352 write_tfmr(env
, tfmr
);
355 target_ulong
helper_load_tfmr(CPUPPCState
*env
)
357 tb_state_machine_step(env
);
359 return env
->spr
[SPR_TFMR
] | TFMR_TB_ECLIPZ
;
362 void helper_store_tfmr(CPUPPCState
*env
, target_ulong val
)
364 uint64_t tfmr
= env
->spr
[SPR_TFMR
];
365 uint64_t clear_on_write
;
366 unsigned int tbst
= tfmr_get_tb_state(tfmr
);
368 if (!(val
& TFMR_TB_ECLIPZ
)) {
369 qemu_log_mask(LOG_UNIMP
, "TFMR non-ECLIPZ mode not implemented\n");
370 tfmr
&= ~TFMR_TBST_ENCODED
;
371 tfmr
&= ~TFMR_TBST_LAST
;
375 /* Update control bits */
376 tfmr
= (tfmr
& ~TFMR_CONTROL_MASK
) | (val
& TFMR_CONTROL_MASK
);
378 /* Several bits are clear-on-write, only one is implemented so far */
379 clear_on_write
= val
& TFMR_FIRMWARE_CONTROL_ERROR
;
380 tfmr
&= ~clear_on_write
;
383 * mtspr always clears this. The sync pulse timer makes it come back
384 * after the second mfspr.
386 tfmr
&= ~TFMR_TB_SYNC_OCCURED
;
387 env
->pnv_tod_tbst
.tb_sync_pulse_timer
= 1;
389 if (ppc_cpu_tir(env_archcpu(env
)) != 0 &&
390 (val
& (TFMR_LOAD_TOD_MOD
| TFMR_MOVE_CHIP_TOD_TO_TB
))) {
391 qemu_log_mask(LOG_UNIMP
, "TFMR timebase state machine can only be "
392 "driven by thread 0\n");
396 if (((tfmr
| val
) & (TFMR_LOAD_TOD_MOD
| TFMR_MOVE_CHIP_TOD_TO_TB
)) ==
397 (TFMR_LOAD_TOD_MOD
| TFMR_MOVE_CHIP_TOD_TO_TB
)) {
398 qemu_log_mask(LOG_GUEST_ERROR
, "TFMR error: LOAD_TOD_MOD and "
399 "MOVE_CHIP_TOD_TO_TB both set\n");
400 tfmr
= tfmr_new_tb_state(tfmr
, TBST_TB_ERROR
);
401 tfmr
|= TFMR_FIRMWARE_CONTROL_ERROR
;
402 env
->pnv_tod_tbst
.tb_ready_for_tod
= 0;
406 if (tfmr
& TFMR_CLEAR_TB_ERRORS
) {
408 * Workbook says TFMR_CLEAR_TB_ERRORS should be written twice.
409 * This is not simulated/required here.
411 tfmr
= tfmr_new_tb_state(tfmr
, TBST_RESET
);
412 tfmr
&= ~TFMR_CLEAR_TB_ERRORS
;
413 tfmr
&= ~TFMR_LOAD_TOD_MOD
;
414 tfmr
&= ~TFMR_MOVE_CHIP_TOD_TO_TB
;
415 tfmr
&= ~TFMR_FIRMWARE_CONTROL_ERROR
; /* XXX: should this be cleared? */
416 env
->pnv_tod_tbst
.tb_ready_for_tod
= 0;
417 env
->pnv_tod_tbst
.tod_sent_to_tb
= 0;
421 if (tbst
== TBST_TB_ERROR
) {
422 qemu_log_mask(LOG_GUEST_ERROR
, "TFMR error: mtspr TFMR in TB_ERROR"
424 tfmr
|= TFMR_FIRMWARE_CONTROL_ERROR
;
428 if (tfmr
& TFMR_LOAD_TOD_MOD
) {
429 /* Wait for an arbitrary 3 mfspr until the next state transition. */
430 env
->pnv_tod_tbst
.tb_state_timer
= 3;
431 } else if (tfmr
& TFMR_MOVE_CHIP_TOD_TO_TB
) {
432 if (tbst
== TBST_NOT_SET
) {
433 tfmr
= tfmr_new_tb_state(tfmr
, TBST_SYNC_WAIT
);
434 env
->pnv_tod_tbst
.tb_ready_for_tod
= 1;
435 env
->pnv_tod_tbst
.tb_state_timer
= 3; /* arbitrary */
437 qemu_log_mask(LOG_GUEST_ERROR
, "TFMR error: MOVE_CHIP_TOD_TO_TB "
438 "not in TB not set state 0x%x\n",
440 tfmr
= tfmr_new_tb_state(tfmr
, TBST_TB_ERROR
);
441 tfmr
|= TFMR_FIRMWARE_CONTROL_ERROR
;
442 env
->pnv_tod_tbst
.tb_ready_for_tod
= 0;
447 write_tfmr(env
, tfmr
);
451 /*****************************************************************************/
452 /* Embedded PowerPC specific helpers */
454 /* XXX: to be improved to check access rights when in user-mode */
455 target_ulong
helper_load_dcr(CPUPPCState
*env
, target_ulong dcrn
)
459 if (unlikely(env
->dcr_env
== NULL
)) {
460 qemu_log_mask(LOG_GUEST_ERROR
, "No DCR environment\n");
461 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
463 POWERPC_EXCP_INVAL_INVAL
, GETPC());
468 ret
= ppc_dcr_read(env
->dcr_env
, (uint32_t)dcrn
, &val
);
470 if (unlikely(ret
!= 0)) {
471 qemu_log_mask(LOG_GUEST_ERROR
, "DCR read error %d %03x\n",
472 (uint32_t)dcrn
, (uint32_t)dcrn
);
473 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
475 POWERPC_EXCP_INVAL_INVAL
, GETPC());
481 void helper_store_dcr(CPUPPCState
*env
, target_ulong dcrn
, target_ulong val
)
483 if (unlikely(env
->dcr_env
== NULL
)) {
484 qemu_log_mask(LOG_GUEST_ERROR
, "No DCR environment\n");
485 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
487 POWERPC_EXCP_INVAL_INVAL
, GETPC());
491 ret
= ppc_dcr_write(env
->dcr_env
, (uint32_t)dcrn
, (uint32_t)val
);
493 if (unlikely(ret
!= 0)) {
494 qemu_log_mask(LOG_GUEST_ERROR
, "DCR write error %d %03x\n",
495 (uint32_t)dcrn
, (uint32_t)dcrn
);
496 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
498 POWERPC_EXCP_INVAL_INVAL
, GETPC());