2 * ARM v8.5-MemTag Operations
4 * Copyright (c) 2020 Linaro, Ltd.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "internals.h"
23 #include "exec/exec-all.h"
24 #include "exec/ram_addr.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
29 static int choose_nonexcluded_tag(int tag
, int offset
, uint16_t exclude
)
31 if (exclude
== 0xffff) {
35 while (exclude
& (1 << tag
)) {
42 } while (exclude
& (1 << tag
));
43 } while (--offset
> 0);
50 * @env: the cpu environment
51 * @ptr_mmu_idx: the addressing regime to use for the virtual address
52 * @ptr: the virtual address for which to look up tag memory
53 * @ptr_access: the access to use for the virtual address
54 * @ptr_size: the number of bytes in the normal memory access
55 * @tag_access: the access to use for the tag memory
56 * @tag_size: the number of bytes in the tag memory access
57 * @ra: the return address for exception handling
59 * Our tag memory is formatted as a sequence of little-endian nibbles.
60 * That is, the byte at (addr >> (LOG2_TAG_GRANULE + 1)) contains two
61 * tags, with the tag at [3:0] for the lower addr and the tag at [7:4]
62 * for the higher addr.
64 * Here, resolve the physical address from the virtual address, and return
65 * a pointer to the corresponding tag byte. Exit with exception if the
66 * virtual address is not accessible for @ptr_access.
68 * The @ptr_size and @tag_size values may not have an obvious relation
69 * due to the alignment of @ptr, and the number of tag checks required.
71 * If there is no tag storage corresponding to @ptr, return NULL.
73 static uint8_t *allocation_tag_mem(CPUARMState
*env
, int ptr_mmu_idx
,
74 uint64_t ptr
, MMUAccessType ptr_access
,
75 int ptr_size
, MMUAccessType tag_access
,
76 int tag_size
, uintptr_t ra
)
78 #ifdef CONFIG_USER_ONLY
79 /* Tag storage not implemented. */
83 CPUIOTLBEntry
*iotlbentry
;
86 hwaddr ptr_paddr
, tag_paddr
, xlat
;
93 * Probe the first byte of the virtual address. This raises an
94 * exception for inaccessible pages, and resolves the virtual address
95 * into the softmmu tlb.
97 * When RA == 0, this is for mte_probe1. The page is expected to be
98 * valid. Indicate to probe_access_flags no-fault, then assert that
99 * we received a valid page.
101 flags
= probe_access_flags(env
, ptr
, ptr_access
, ptr_mmu_idx
,
103 assert(!(flags
& TLB_INVALID_MASK
));
106 * Find the iotlbentry for ptr. This *must* be present in the TLB
107 * because we just found the mapping.
108 * TODO: Perhaps there should be a cputlb helper that returns a
109 * matching tlb entry + iotlb entry.
111 index
= tlb_index(env
, ptr_mmu_idx
, ptr
);
112 # ifdef CONFIG_DEBUG_TCG
114 CPUTLBEntry
*entry
= tlb_entry(env
, ptr_mmu_idx
, ptr
);
115 target_ulong comparator
= (ptr_access
== MMU_DATA_LOAD
117 : tlb_addr_write(entry
));
118 g_assert(tlb_hit(comparator
, ptr
));
121 iotlbentry
= &env_tlb(env
)->d
[ptr_mmu_idx
].iotlb
[index
];
123 /* If the virtual page MemAttr != Tagged, access unchecked. */
124 if (!arm_tlb_mte_tagged(&iotlbentry
->attrs
)) {
129 * If not backed by host ram, there is no tag storage: access unchecked.
130 * This is probably a guest os bug though, so log it.
132 if (unlikely(flags
& TLB_MMIO
)) {
133 qemu_log_mask(LOG_GUEST_ERROR
,
134 "Page @ 0x%" PRIx64
" indicates Tagged Normal memory "
135 "but is not backed by host ram\n", ptr
);
140 * The Normal memory access can extend to the next page. E.g. a single
141 * 8-byte access to the last byte of a page will check only the last
142 * tag on the first page.
143 * Any page access exception has priority over tag check exception.
145 in_page
= -(ptr
| TARGET_PAGE_MASK
);
146 if (unlikely(ptr_size
> in_page
)) {
148 flags
|= probe_access_flags(env
, ptr
+ in_page
, ptr_access
,
149 ptr_mmu_idx
, ra
== 0, &ignore
, ra
);
150 assert(!(flags
& TLB_INVALID_MASK
));
153 /* Any debug exception has priority over a tag check exception. */
154 if (unlikely(flags
& TLB_WATCHPOINT
)) {
155 int wp
= ptr_access
== MMU_DATA_LOAD
? BP_MEM_READ
: BP_MEM_WRITE
;
157 cpu_check_watchpoint(env_cpu(env
), ptr
, ptr_size
,
158 iotlbentry
->attrs
, wp
, ra
);
162 * Find the physical address within the normal mem space.
163 * The memory region lookup must succeed because TLB_MMIO was
164 * not set in the cputlb lookup above.
166 mr
= memory_region_from_host(host
, &ptr_ra
);
167 tcg_debug_assert(mr
!= NULL
);
168 tcg_debug_assert(memory_region_is_ram(mr
));
171 ptr_paddr
+= mr
->addr
;
175 /* Convert to the physical address in tag space. */
176 tag_paddr
= ptr_paddr
>> (LOG2_TAG_GRANULE
+ 1);
178 /* Look up the address in tag space. */
179 tag_asi
= iotlbentry
->attrs
.secure
? ARMASIdx_TagS
: ARMASIdx_TagNS
;
180 tag_as
= cpu_get_address_space(env_cpu(env
), tag_asi
);
181 mr
= address_space_translate(tag_as
, tag_paddr
, &xlat
, NULL
,
182 tag_access
== MMU_DATA_STORE
,
186 * Note that @mr will never be NULL. If there is nothing in the address
187 * space at @tag_paddr, the translation will return the unallocated memory
188 * region. For our purposes, the result must be ram.
190 if (unlikely(!memory_region_is_ram(mr
))) {
191 /* ??? Failure is a board configuration error. */
192 qemu_log_mask(LOG_UNIMP
,
193 "Tag Memory @ 0x%" HWADDR_PRIx
" not found for "
194 "Normal Memory @ 0x%" HWADDR_PRIx
"\n",
195 tag_paddr
, ptr_paddr
);
200 * Ensure the tag memory is dirty on write, for migration.
201 * Tag memory can never contain code or display memory (vga).
203 if (tag_access
== MMU_DATA_STORE
) {
204 ram_addr_t tag_ra
= memory_region_get_ram_addr(mr
) + xlat
;
205 cpu_physical_memory_set_dirty_flag(tag_ra
, DIRTY_MEMORY_MIGRATION
);
208 return memory_region_get_ram_ptr(mr
) + xlat
;
212 uint64_t HELPER(irg
)(CPUARMState
*env
, uint64_t rn
, uint64_t rm
)
217 * Our IMPDEF choice for GCR_EL1.RRND==1 is to behave as if
218 * GCR_EL1.RRND==0, always producing deterministic results.
220 uint16_t exclude
= extract32(rm
| env
->cp15
.gcr_el1
, 0, 16);
221 int start
= extract32(env
->cp15
.rgsr_el1
, 0, 4);
222 int seed
= extract32(env
->cp15
.rgsr_el1
, 8, 16);
226 for (i
= offset
= 0; i
< 4; ++i
) {
227 /* NextRandomTagBit */
228 int top
= (extract32(seed
, 5, 1) ^ extract32(seed
, 3, 1) ^
229 extract32(seed
, 2, 1) ^ extract32(seed
, 0, 1));
230 seed
= (top
<< 15) | (seed
>> 1);
233 rtag
= choose_nonexcluded_tag(start
, offset
, exclude
);
234 env
->cp15
.rgsr_el1
= rtag
| (seed
<< 8);
236 return address_with_allocation_tag(rn
, rtag
);
239 uint64_t HELPER(addsubg
)(CPUARMState
*env
, uint64_t ptr
,
240 int32_t offset
, uint32_t tag_offset
)
242 int start_tag
= allocation_tag_from_addr(ptr
);
243 uint16_t exclude
= extract32(env
->cp15
.gcr_el1
, 0, 16);
244 int rtag
= choose_nonexcluded_tag(start_tag
, tag_offset
, exclude
);
246 return address_with_allocation_tag(ptr
+ offset
, rtag
);
249 static int load_tag1(uint64_t ptr
, uint8_t *mem
)
251 int ofs
= extract32(ptr
, LOG2_TAG_GRANULE
, 1) * 4;
252 return extract32(*mem
, ofs
, 4);
255 uint64_t HELPER(ldg
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
257 int mmu_idx
= cpu_mmu_index(env
, false);
261 /* Trap if accessing an invalid page. */
262 mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_LOAD
, 1,
263 MMU_DATA_LOAD
, 1, GETPC());
265 /* Load if page supports tags. */
267 rtag
= load_tag1(ptr
, mem
);
270 return address_with_allocation_tag(xt
, rtag
);
273 static void check_tag_aligned(CPUARMState
*env
, uint64_t ptr
, uintptr_t ra
)
275 if (unlikely(!QEMU_IS_ALIGNED(ptr
, TAG_GRANULE
))) {
276 arm_cpu_do_unaligned_access(env_cpu(env
), ptr
, MMU_DATA_STORE
,
277 cpu_mmu_index(env
, false), ra
);
278 g_assert_not_reached();
282 /* For use in a non-parallel context, store to the given nibble. */
283 static void store_tag1(uint64_t ptr
, uint8_t *mem
, int tag
)
285 int ofs
= extract32(ptr
, LOG2_TAG_GRANULE
, 1) * 4;
286 *mem
= deposit32(*mem
, ofs
, 4, tag
);
289 /* For use in a parallel context, atomically store to the given nibble. */
290 static void store_tag1_parallel(uint64_t ptr
, uint8_t *mem
, int tag
)
292 int ofs
= extract32(ptr
, LOG2_TAG_GRANULE
, 1) * 4;
293 uint8_t old
= atomic_read(mem
);
296 uint8_t new = deposit32(old
, ofs
, 4, tag
);
297 uint8_t cmp
= atomic_cmpxchg(mem
, old
, new);
298 if (likely(cmp
== old
)) {
305 typedef void stg_store1(uint64_t, uint8_t *, int);
307 static inline void do_stg(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
,
308 uintptr_t ra
, stg_store1 store1
)
310 int mmu_idx
= cpu_mmu_index(env
, false);
313 check_tag_aligned(env
, ptr
, ra
);
315 /* Trap if accessing an invalid page. */
316 mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
, TAG_GRANULE
,
317 MMU_DATA_STORE
, 1, ra
);
319 /* Store if page supports tags. */
321 store1(ptr
, mem
, allocation_tag_from_addr(xt
));
325 void HELPER(stg
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
327 do_stg(env
, ptr
, xt
, GETPC(), store_tag1
);
330 void HELPER(stg_parallel
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
332 do_stg(env
, ptr
, xt
, GETPC(), store_tag1_parallel
);
335 void HELPER(stg_stub
)(CPUARMState
*env
, uint64_t ptr
)
337 int mmu_idx
= cpu_mmu_index(env
, false);
338 uintptr_t ra
= GETPC();
340 check_tag_aligned(env
, ptr
, ra
);
341 probe_write(env
, ptr
, TAG_GRANULE
, mmu_idx
, ra
);
344 static inline void do_st2g(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
,
345 uintptr_t ra
, stg_store1 store1
)
347 int mmu_idx
= cpu_mmu_index(env
, false);
348 int tag
= allocation_tag_from_addr(xt
);
349 uint8_t *mem1
, *mem2
;
351 check_tag_aligned(env
, ptr
, ra
);
354 * Trap if accessing an invalid page(s).
355 * This takes priority over !allocation_tag_access_enabled.
357 if (ptr
& TAG_GRANULE
) {
358 /* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */
359 mem1
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
,
360 TAG_GRANULE
, MMU_DATA_STORE
, 1, ra
);
361 mem2
= allocation_tag_mem(env
, mmu_idx
, ptr
+ TAG_GRANULE
,
362 MMU_DATA_STORE
, TAG_GRANULE
,
363 MMU_DATA_STORE
, 1, ra
);
365 /* Store if page(s) support tags. */
367 store1(TAG_GRANULE
, mem1
, tag
);
370 store1(0, mem2
, tag
);
373 /* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */
374 mem1
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
,
375 2 * TAG_GRANULE
, MMU_DATA_STORE
, 1, ra
);
378 atomic_set(mem1
, tag
);
383 void HELPER(st2g
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
385 do_st2g(env
, ptr
, xt
, GETPC(), store_tag1
);
388 void HELPER(st2g_parallel
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
390 do_st2g(env
, ptr
, xt
, GETPC(), store_tag1_parallel
);
393 void HELPER(st2g_stub
)(CPUARMState
*env
, uint64_t ptr
)
395 int mmu_idx
= cpu_mmu_index(env
, false);
396 uintptr_t ra
= GETPC();
397 int in_page
= -(ptr
| TARGET_PAGE_MASK
);
399 check_tag_aligned(env
, ptr
, ra
);
401 if (likely(in_page
>= 2 * TAG_GRANULE
)) {
402 probe_write(env
, ptr
, 2 * TAG_GRANULE
, mmu_idx
, ra
);
404 probe_write(env
, ptr
, TAG_GRANULE
, mmu_idx
, ra
);
405 probe_write(env
, ptr
+ TAG_GRANULE
, TAG_GRANULE
, mmu_idx
, ra
);
409 #define LDGM_STGM_SIZE (4 << GMID_EL1_BS)
411 uint64_t HELPER(ldgm
)(CPUARMState
*env
, uint64_t ptr
)
413 int mmu_idx
= cpu_mmu_index(env
, false);
414 uintptr_t ra
= GETPC();
417 ptr
= QEMU_ALIGN_DOWN(ptr
, LDGM_STGM_SIZE
);
419 /* Trap if accessing an invalid page. */
420 tag_mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_LOAD
,
421 LDGM_STGM_SIZE
, MMU_DATA_LOAD
,
422 LDGM_STGM_SIZE
/ (2 * TAG_GRANULE
), ra
);
424 /* The tag is squashed to zero if the page does not support tags. */
429 QEMU_BUILD_BUG_ON(GMID_EL1_BS
!= 6);
431 * We are loading 64-bits worth of tags. The ordering of elements
432 * within the word corresponds to a 64-bit little-endian operation.
434 return ldq_le_p(tag_mem
);
437 void HELPER(stgm
)(CPUARMState
*env
, uint64_t ptr
, uint64_t val
)
439 int mmu_idx
= cpu_mmu_index(env
, false);
440 uintptr_t ra
= GETPC();
443 ptr
= QEMU_ALIGN_DOWN(ptr
, LDGM_STGM_SIZE
);
445 /* Trap if accessing an invalid page. */
446 tag_mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
,
447 LDGM_STGM_SIZE
, MMU_DATA_LOAD
,
448 LDGM_STGM_SIZE
/ (2 * TAG_GRANULE
), ra
);
451 * Tag store only happens if the page support tags,
452 * and if the OS has enabled access to the tags.
458 QEMU_BUILD_BUG_ON(GMID_EL1_BS
!= 6);
460 * We are storing 64-bits worth of tags. The ordering of elements
461 * within the word corresponds to a 64-bit little-endian operation.
463 stq_le_p(tag_mem
, val
);
466 void HELPER(stzgm_tags
)(CPUARMState
*env
, uint64_t ptr
, uint64_t val
)
468 uintptr_t ra
= GETPC();
469 int mmu_idx
= cpu_mmu_index(env
, false);
470 int log2_dcz_bytes
, log2_tag_bytes
;
471 intptr_t dcz_bytes
, tag_bytes
;
475 * In arm_cpu_realizefn, we assert that dcz > LOG2_TAG_GRANULE+1,
476 * i.e. 32 bytes, which is an unreasonably small dcz anyway,
477 * to make sure that we can access one complete tag byte here.
479 log2_dcz_bytes
= env_archcpu(env
)->dcz_blocksize
+ 2;
480 log2_tag_bytes
= log2_dcz_bytes
- (LOG2_TAG_GRANULE
+ 1);
481 dcz_bytes
= (intptr_t)1 << log2_dcz_bytes
;
482 tag_bytes
= (intptr_t)1 << log2_tag_bytes
;
485 mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
, dcz_bytes
,
486 MMU_DATA_STORE
, tag_bytes
, ra
);
488 int tag_pair
= (val
& 0xf) * 0x11;
489 memset(mem
, tag_pair
, tag_bytes
);
493 /* Record a tag check failure. */
494 static void mte_check_fail(CPUARMState
*env
, int mmu_idx
,
495 uint64_t dirty_ptr
, uintptr_t ra
)
497 ARMMMUIdx arm_mmu_idx
= core_to_aa64_mmu_idx(mmu_idx
);
498 int el
, reg_el
, tcf
, select
;
501 reg_el
= regime_el(env
, arm_mmu_idx
);
502 sctlr
= env
->cp15
.sctlr_el
[reg_el
];
504 switch (arm_mmu_idx
) {
505 case ARMMMUIdx_E10_0
:
506 case ARMMMUIdx_E20_0
:
508 tcf
= extract64(sctlr
, 38, 2);
512 tcf
= extract64(sctlr
, 40, 2);
518 * Tag check fail causes a synchronous exception.
520 * In restore_state_to_opc, we set the exception syndrome
521 * for the load or store operation. Unwind first so we
522 * may overwrite that with the syndrome for the tag check.
524 cpu_restore_state(env_cpu(env
), ra
, true);
525 env
->exception
.vaddress
= dirty_ptr
;
526 raise_exception(env
, EXCP_DATA_ABORT
,
527 syn_data_abort_no_iss(el
!= 0, 0, 0, 0, 0, 0, 0x11),
528 exception_target_el(env
));
529 /* noreturn, but fall through to the assert anyway */
533 * Tag check fail does not affect the PE.
534 * We eliminate this case by not setting MTE_ACTIVE
535 * in tb_flags, so that we never make this runtime call.
537 g_assert_not_reached();
540 /* Tag check fail causes asynchronous flag set. */
541 mmu_idx
= arm_mmu_idx_el(env
, el
);
542 if (regime_has_2_ranges(mmu_idx
)) {
543 select
= extract64(dirty_ptr
, 55, 1);
547 env
->cp15
.tfsr_el
[el
] |= 1 << select
;
551 /* Case 3: Reserved. */
552 qemu_log_mask(LOG_GUEST_ERROR
,
553 "Tag check failure with SCTLR_EL%d.TCF%s "
554 "set to reserved value %d\n",
555 reg_el
, el
? "" : "0", tcf
);
561 * Perform an MTE checked access for a single logical or atomic access.
563 static bool mte_probe1_int(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
,
564 uintptr_t ra
, int bit55
)
566 int mem_tag
, mmu_idx
, ptr_tag
, size
;
570 ptr_tag
= allocation_tag_from_addr(ptr
);
572 if (tcma_check(desc
, bit55
, ptr_tag
)) {
576 mmu_idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
577 type
= FIELD_EX32(desc
, MTEDESC
, WRITE
) ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
578 size
= FIELD_EX32(desc
, MTEDESC
, ESIZE
);
580 mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, type
, size
,
581 MMU_DATA_LOAD
, 1, ra
);
586 mem_tag
= load_tag1(ptr
, mem
);
587 return ptr_tag
== mem_tag
;
591 * No-fault version of mte_check1, to be used by SVE for MemSingleNF.
592 * Returns false if the access is Checked and the check failed. This
593 * is only intended to probe the tag -- the validity of the page must
594 * be checked beforehand.
596 bool mte_probe1(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
)
598 int bit55
= extract64(ptr
, 55, 1);
600 /* If TBI is disabled, the access is unchecked. */
601 if (unlikely(!tbi_check(desc
, bit55
))) {
605 return mte_probe1_int(env
, desc
, ptr
, 0, bit55
);
608 uint64_t mte_check1(CPUARMState
*env
, uint32_t desc
,
609 uint64_t ptr
, uintptr_t ra
)
611 int bit55
= extract64(ptr
, 55, 1);
613 /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
614 if (unlikely(!tbi_check(desc
, bit55
))) {
618 if (unlikely(!mte_probe1_int(env
, desc
, ptr
, ra
, bit55
))) {
619 int mmu_idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
620 mte_check_fail(env
, mmu_idx
, ptr
, ra
);
623 return useronly_clean_ptr(ptr
);
626 uint64_t HELPER(mte_check1
)(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
)
628 return mte_check1(env
, desc
, ptr
, GETPC());
632 * Perform an MTE checked access for multiple logical accesses.
637 * @tag: tag memory to test
638 * @odd: true to begin testing at tags at odd nibble
639 * @cmp: the tag to compare against
640 * @count: number of tags to test
642 * Return the number of successful tests.
643 * Thus a return value < @count indicates a failure.
645 * A note about sizes: count is expected to be small.
647 * The most common use will be LDP/STP of two integer registers,
648 * which means 16 bytes of memory touching at most 2 tags, but
649 * often the access is aligned and thus just 1 tag.
651 * Using AdvSIMD LD/ST (multiple), one can access 64 bytes of memory,
652 * touching at most 5 tags. SVE LDR/STR (vector) with the default
653 * vector length is also 64 bytes; the maximum architectural length
654 * is 256 bytes touching at most 9 tags.
656 * The loop below uses 7 logical operations and 1 memory operation
657 * per tag pair. An implementation that loads an aligned word and
658 * uses masking to ignore adjacent tags requires 18 logical operations
659 * and thus does not begin to pay off until 6 tags.
660 * Which, according to the survey above, is unlikely to be common.
662 static int checkN(uint8_t *mem
, int odd
, int cmp
, int count
)
666 /* Replicate the test tag and compare. */
676 if (unlikely((diff
) & 0x0f)) {
685 if (unlikely((diff
) & 0xf0)) {
697 uint64_t mte_checkN(CPUARMState
*env
, uint32_t desc
,
698 uint64_t ptr
, uintptr_t ra
)
700 int mmu_idx
, ptr_tag
, bit55
;
701 uint64_t ptr_last
, ptr_end
, prev_page
, next_page
;
702 uint64_t tag_first
, tag_end
;
703 uint64_t tag_byte_first
, tag_byte_end
;
704 uint32_t esize
, total
, tag_count
, tag_size
, n
, c
;
705 uint8_t *mem1
, *mem2
;
708 bit55
= extract64(ptr
, 55, 1);
710 /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
711 if (unlikely(!tbi_check(desc
, bit55
))) {
715 ptr_tag
= allocation_tag_from_addr(ptr
);
717 if (tcma_check(desc
, bit55
, ptr_tag
)) {
721 mmu_idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
722 type
= FIELD_EX32(desc
, MTEDESC
, WRITE
) ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
723 esize
= FIELD_EX32(desc
, MTEDESC
, ESIZE
);
724 total
= FIELD_EX32(desc
, MTEDESC
, TSIZE
);
726 /* Find the addr of the end of the access, and of the last element. */
727 ptr_end
= ptr
+ total
;
728 ptr_last
= ptr_end
- esize
;
730 /* Round the bounds to the tag granule, and compute the number of tags. */
731 tag_first
= QEMU_ALIGN_DOWN(ptr
, TAG_GRANULE
);
732 tag_end
= QEMU_ALIGN_UP(ptr_last
, TAG_GRANULE
);
733 tag_count
= (tag_end
- tag_first
) / TAG_GRANULE
;
735 /* Round the bounds to twice the tag granule, and compute the bytes. */
736 tag_byte_first
= QEMU_ALIGN_DOWN(ptr
, 2 * TAG_GRANULE
);
737 tag_byte_end
= QEMU_ALIGN_UP(ptr_last
, 2 * TAG_GRANULE
);
739 /* Locate the page boundaries. */
740 prev_page
= ptr
& TARGET_PAGE_MASK
;
741 next_page
= prev_page
+ TARGET_PAGE_SIZE
;
743 if (likely(tag_end
- prev_page
<= TARGET_PAGE_SIZE
)) {
744 /* Memory access stays on one page. */
745 tag_size
= (tag_byte_end
- tag_byte_first
) / (2 * TAG_GRANULE
);
746 mem1
= allocation_tag_mem(env
, mmu_idx
, ptr
, type
, total
,
747 MMU_DATA_LOAD
, tag_size
, ra
);
751 /* Perform all of the comparisons. */
752 n
= checkN(mem1
, ptr
& TAG_GRANULE
, ptr_tag
, tag_count
);
754 /* Memory access crosses to next page. */
755 tag_size
= (next_page
- tag_byte_first
) / (2 * TAG_GRANULE
);
756 mem1
= allocation_tag_mem(env
, mmu_idx
, ptr
, type
, next_page
- ptr
,
757 MMU_DATA_LOAD
, tag_size
, ra
);
759 tag_size
= (tag_byte_end
- next_page
) / (2 * TAG_GRANULE
);
760 mem2
= allocation_tag_mem(env
, mmu_idx
, next_page
, type
,
762 MMU_DATA_LOAD
, tag_size
, ra
);
765 * Perform all of the comparisons.
766 * Note the possible but unlikely case of the operation spanning
767 * two pages that do not both have tagging enabled.
769 n
= c
= (next_page
- tag_first
) / TAG_GRANULE
;
771 n
= checkN(mem1
, ptr
& TAG_GRANULE
, ptr_tag
, c
);
777 n
+= checkN(mem2
, 0, ptr_tag
, tag_count
- c
);
782 * If we failed, we know which granule. Compute the element that
783 * is first in that granule, and signal failure on that element.
785 if (unlikely(n
< tag_count
)) {
788 fail_ofs
= tag_first
+ n
* TAG_GRANULE
- ptr
;
789 fail_ofs
= ROUND_UP(fail_ofs
, esize
);
790 mte_check_fail(env
, mmu_idx
, ptr
+ fail_ofs
, ra
);
794 return useronly_clean_ptr(ptr
);
797 uint64_t HELPER(mte_checkN
)(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
)
799 return mte_checkN(env
, desc
, ptr
, GETPC());
803 * Perform an MTE checked access for DC_ZVA.
805 uint64_t HELPER(mte_check_zva
)(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
)
807 uintptr_t ra
= GETPC();
808 int log2_dcz_bytes
, log2_tag_bytes
;
810 intptr_t dcz_bytes
, tag_bytes
, i
;
812 uint64_t ptr_tag
, mem_tag
, align_ptr
;
814 bit55
= extract64(ptr
, 55, 1);
816 /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
817 if (unlikely(!tbi_check(desc
, bit55
))) {
821 ptr_tag
= allocation_tag_from_addr(ptr
);
823 if (tcma_check(desc
, bit55
, ptr_tag
)) {
828 * In arm_cpu_realizefn, we asserted that dcz > LOG2_TAG_GRANULE+1,
829 * i.e. 32 bytes, which is an unreasonably small dcz anyway, to make
830 * sure that we can access one complete tag byte here.
832 log2_dcz_bytes
= env_archcpu(env
)->dcz_blocksize
+ 2;
833 log2_tag_bytes
= log2_dcz_bytes
- (LOG2_TAG_GRANULE
+ 1);
834 dcz_bytes
= (intptr_t)1 << log2_dcz_bytes
;
835 tag_bytes
= (intptr_t)1 << log2_tag_bytes
;
836 align_ptr
= ptr
& -dcz_bytes
;
839 * Trap if accessing an invalid page. DC_ZVA requires that we supply
840 * the original pointer for an invalid page. But watchpoints require
841 * that we probe the actual space. So do both.
843 mmu_idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
844 (void) probe_write(env
, ptr
, 1, mmu_idx
, ra
);
845 mem
= allocation_tag_mem(env
, mmu_idx
, align_ptr
, MMU_DATA_STORE
,
846 dcz_bytes
, MMU_DATA_LOAD
, tag_bytes
, ra
);
852 * Unlike the reasoning for checkN, DC_ZVA is always aligned, and thus
853 * it is quite easy to perform all of the comparisons at once without
856 * The most common zva block size is 64; some of the thunderx cpus use
857 * a block size of 128. For user-only, aarch64_max_initfn will set the
858 * block size to 512. Fill out the other cases for future-proofing.
860 * In order to be able to find the first miscompare later, we want the
861 * tag bytes to be in little-endian order.
863 switch (log2_tag_bytes
) {
864 case 0: /* zva_blocksize 32 */
865 mem_tag
= *(uint8_t *)mem
;
868 case 1: /* zva_blocksize 64 */
869 mem_tag
= cpu_to_le16(*(uint16_t *)mem
);
872 case 2: /* zva_blocksize 128 */
873 mem_tag
= cpu_to_le32(*(uint32_t *)mem
);
874 ptr_tag
*= 0x11111111u
;
876 case 3: /* zva_blocksize 256 */
877 mem_tag
= cpu_to_le64(*(uint64_t *)mem
);
878 ptr_tag
*= 0x1111111111111111ull
;
881 default: /* zva_blocksize 512, 1024, 2048 */
882 ptr_tag
*= 0x1111111111111111ull
;
885 mem_tag
= cpu_to_le64(*(uint64_t *)(mem
+ i
));
886 if (unlikely(mem_tag
!= ptr_tag
)) {
890 align_ptr
+= 16 * TAG_GRANULE
;
891 } while (i
< tag_bytes
);
895 if (likely(mem_tag
== ptr_tag
)) {
900 /* Locate the first nibble that differs. */
901 i
= ctz64(mem_tag
^ ptr_tag
) >> 4;
902 mte_check_fail(env
, mmu_idx
, align_ptr
+ i
* TAG_GRANULE
, ra
);
905 return useronly_clean_ptr(ptr
);