2 * The per-CPU TranslationBlock jump cache.
4 * Copyright (c) 2003 Fabrice Bellard
6 * SPDX-License-Identifier: GPL-2.0-or-later
9 #ifndef ACCEL_TCG_TB_JMP_CACHE_H
10 #define ACCEL_TCG_TB_JMP_CACHE_H
12 #define TB_JMP_CACHE_BITS 12
13 #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
16 * Accessed in parallel; all accesses to 'tb' must be atomic.
17 * For TARGET_TB_PCREL, accesses to 'pc' must be protected by
18 * a load_acquire/store_release to 'tb'.
26 } array
[TB_JMP_CACHE_SIZE
];
29 static inline TranslationBlock
*
30 tb_jmp_cache_get_tb(CPUJumpCache
*jc
, uint32_t hash
)
33 /* Use acquire to ensure current load of pc from jc. */
34 return qatomic_load_acquire(&jc
->array
[hash
].tb
);
36 /* Use rcu_read to ensure current load of pc from *tb. */
37 return qatomic_rcu_read(&jc
->array
[hash
].tb
);
41 static inline target_ulong
42 tb_jmp_cache_get_pc(CPUJumpCache
*jc
, uint32_t hash
, TranslationBlock
*tb
)
45 return jc
->array
[hash
].pc
;
52 tb_jmp_cache_set(CPUJumpCache
*jc
, uint32_t hash
,
53 TranslationBlock
*tb
, target_ulong pc
)
56 jc
->array
[hash
].pc
= pc
;
57 /* Use store_release on tb to ensure pc is written first. */
58 qatomic_store_release(&jc
->array
[hash
].tb
, tb
);
60 /* Use the pc value already stored in tb->pc. */
61 qatomic_set(&jc
->array
[hash
].tb
, tb
);
65 #endif /* ACCEL_TCG_TB_JMP_CACHE_H */