2 * ARM v8.5-MemTag Operations
4 * Copyright (c) 2020 Linaro, Ltd.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "internals.h"
23 #include "exec/exec-all.h"
24 #include "exec/ram_addr.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "qapi/error.h"
28 #include "qemu/guest-random.h"
31 static int choose_nonexcluded_tag(int tag
, int offset
, uint16_t exclude
)
33 if (exclude
== 0xffff) {
37 while (exclude
& (1 << tag
)) {
44 } while (exclude
& (1 << tag
));
45 } while (--offset
> 0);
52 * @env: the cpu environment
53 * @ptr_mmu_idx: the addressing regime to use for the virtual address
54 * @ptr: the virtual address for which to look up tag memory
55 * @ptr_access: the access to use for the virtual address
56 * @ptr_size: the number of bytes in the normal memory access
57 * @tag_access: the access to use for the tag memory
58 * @tag_size: the number of bytes in the tag memory access
59 * @ra: the return address for exception handling
61 * Our tag memory is formatted as a sequence of little-endian nibbles.
62 * That is, the byte at (addr >> (LOG2_TAG_GRANULE + 1)) contains two
63 * tags, with the tag at [3:0] for the lower addr and the tag at [7:4]
64 * for the higher addr.
66 * Here, resolve the physical address from the virtual address, and return
67 * a pointer to the corresponding tag byte. Exit with exception if the
68 * virtual address is not accessible for @ptr_access.
70 * The @ptr_size and @tag_size values may not have an obvious relation
71 * due to the alignment of @ptr, and the number of tag checks required.
73 * If there is no tag storage corresponding to @ptr, return NULL.
75 static uint8_t *allocation_tag_mem(CPUARMState
*env
, int ptr_mmu_idx
,
76 uint64_t ptr
, MMUAccessType ptr_access
,
77 int ptr_size
, MMUAccessType tag_access
,
78 int tag_size
, uintptr_t ra
)
80 #ifdef CONFIG_USER_ONLY
81 /* Tag storage not implemented. */
85 CPUIOTLBEntry
*iotlbentry
;
88 hwaddr ptr_paddr
, tag_paddr
, xlat
;
95 * Probe the first byte of the virtual address. This raises an
96 * exception for inaccessible pages, and resolves the virtual address
97 * into the softmmu tlb.
99 * When RA == 0, this is for mte_probe1. The page is expected to be
100 * valid. Indicate to probe_access_flags no-fault, then assert that
101 * we received a valid page.
103 flags
= probe_access_flags(env
, ptr
, ptr_access
, ptr_mmu_idx
,
105 assert(!(flags
& TLB_INVALID_MASK
));
108 * Find the iotlbentry for ptr. This *must* be present in the TLB
109 * because we just found the mapping.
110 * TODO: Perhaps there should be a cputlb helper that returns a
111 * matching tlb entry + iotlb entry.
113 index
= tlb_index(env
, ptr_mmu_idx
, ptr
);
114 # ifdef CONFIG_DEBUG_TCG
116 CPUTLBEntry
*entry
= tlb_entry(env
, ptr_mmu_idx
, ptr
);
117 target_ulong comparator
= (ptr_access
== MMU_DATA_LOAD
119 : tlb_addr_write(entry
));
120 g_assert(tlb_hit(comparator
, ptr
));
123 iotlbentry
= &env_tlb(env
)->d
[ptr_mmu_idx
].iotlb
[index
];
125 /* If the virtual page MemAttr != Tagged, access unchecked. */
126 if (!arm_tlb_mte_tagged(&iotlbentry
->attrs
)) {
131 * If not backed by host ram, there is no tag storage: access unchecked.
132 * This is probably a guest os bug though, so log it.
134 if (unlikely(flags
& TLB_MMIO
)) {
135 qemu_log_mask(LOG_GUEST_ERROR
,
136 "Page @ 0x%" PRIx64
" indicates Tagged Normal memory "
137 "but is not backed by host ram\n", ptr
);
142 * The Normal memory access can extend to the next page. E.g. a single
143 * 8-byte access to the last byte of a page will check only the last
144 * tag on the first page.
145 * Any page access exception has priority over tag check exception.
147 in_page
= -(ptr
| TARGET_PAGE_MASK
);
148 if (unlikely(ptr_size
> in_page
)) {
150 flags
|= probe_access_flags(env
, ptr
+ in_page
, ptr_access
,
151 ptr_mmu_idx
, ra
== 0, &ignore
, ra
);
152 assert(!(flags
& TLB_INVALID_MASK
));
155 /* Any debug exception has priority over a tag check exception. */
156 if (unlikely(flags
& TLB_WATCHPOINT
)) {
157 int wp
= ptr_access
== MMU_DATA_LOAD
? BP_MEM_READ
: BP_MEM_WRITE
;
159 cpu_check_watchpoint(env_cpu(env
), ptr
, ptr_size
,
160 iotlbentry
->attrs
, wp
, ra
);
164 * Find the physical address within the normal mem space.
165 * The memory region lookup must succeed because TLB_MMIO was
166 * not set in the cputlb lookup above.
168 mr
= memory_region_from_host(host
, &ptr_ra
);
169 tcg_debug_assert(mr
!= NULL
);
170 tcg_debug_assert(memory_region_is_ram(mr
));
173 ptr_paddr
+= mr
->addr
;
177 /* Convert to the physical address in tag space. */
178 tag_paddr
= ptr_paddr
>> (LOG2_TAG_GRANULE
+ 1);
180 /* Look up the address in tag space. */
181 tag_asi
= iotlbentry
->attrs
.secure
? ARMASIdx_TagS
: ARMASIdx_TagNS
;
182 tag_as
= cpu_get_address_space(env_cpu(env
), tag_asi
);
183 mr
= address_space_translate(tag_as
, tag_paddr
, &xlat
, NULL
,
184 tag_access
== MMU_DATA_STORE
,
188 * Note that @mr will never be NULL. If there is nothing in the address
189 * space at @tag_paddr, the translation will return the unallocated memory
190 * region. For our purposes, the result must be ram.
192 if (unlikely(!memory_region_is_ram(mr
))) {
193 /* ??? Failure is a board configuration error. */
194 qemu_log_mask(LOG_UNIMP
,
195 "Tag Memory @ 0x%" HWADDR_PRIx
" not found for "
196 "Normal Memory @ 0x%" HWADDR_PRIx
"\n",
197 tag_paddr
, ptr_paddr
);
202 * Ensure the tag memory is dirty on write, for migration.
203 * Tag memory can never contain code or display memory (vga).
205 if (tag_access
== MMU_DATA_STORE
) {
206 ram_addr_t tag_ra
= memory_region_get_ram_addr(mr
) + xlat
;
207 cpu_physical_memory_set_dirty_flag(tag_ra
, DIRTY_MEMORY_MIGRATION
);
210 return memory_region_get_ram_ptr(mr
) + xlat
;
214 uint64_t HELPER(irg
)(CPUARMState
*env
, uint64_t rn
, uint64_t rm
)
216 uint16_t exclude
= extract32(rm
| env
->cp15
.gcr_el1
, 0, 16);
217 int rrnd
= extract32(env
->cp15
.gcr_el1
, 16, 1);
218 int start
= extract32(env
->cp15
.rgsr_el1
, 0, 4);
219 int seed
= extract32(env
->cp15
.rgsr_el1
, 8, 16);
223 * Our IMPDEF choice for GCR_EL1.RRND==1 is to continue to use the
224 * deterministic algorithm. Except that with RRND==1 the kernel is
225 * not required to have set RGSR_EL1.SEED != 0, which is required for
226 * the deterministic algorithm to function. So we force a non-zero
227 * SEED for that case.
229 if (unlikely(seed
== 0) && rrnd
) {
234 if (qemu_guest_getrandom(&two
, sizeof(two
), &err
) < 0) {
236 * Failed, for unknown reasons in the crypto subsystem.
237 * Best we can do is log the reason and use a constant seed.
239 qemu_log_mask(LOG_UNIMP
, "IRG: Crypto failure: %s\n",
240 error_get_pretty(err
));
249 for (i
= offset
= 0; i
< 4; ++i
) {
250 /* NextRandomTagBit */
251 int top
= (extract32(seed
, 5, 1) ^ extract32(seed
, 3, 1) ^
252 extract32(seed
, 2, 1) ^ extract32(seed
, 0, 1));
253 seed
= (top
<< 15) | (seed
>> 1);
256 rtag
= choose_nonexcluded_tag(start
, offset
, exclude
);
257 env
->cp15
.rgsr_el1
= rtag
| (seed
<< 8);
259 return address_with_allocation_tag(rn
, rtag
);
262 uint64_t HELPER(addsubg
)(CPUARMState
*env
, uint64_t ptr
,
263 int32_t offset
, uint32_t tag_offset
)
265 int start_tag
= allocation_tag_from_addr(ptr
);
266 uint16_t exclude
= extract32(env
->cp15
.gcr_el1
, 0, 16);
267 int rtag
= choose_nonexcluded_tag(start_tag
, tag_offset
, exclude
);
269 return address_with_allocation_tag(ptr
+ offset
, rtag
);
272 static int load_tag1(uint64_t ptr
, uint8_t *mem
)
274 int ofs
= extract32(ptr
, LOG2_TAG_GRANULE
, 1) * 4;
275 return extract32(*mem
, ofs
, 4);
278 uint64_t HELPER(ldg
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
280 int mmu_idx
= cpu_mmu_index(env
, false);
284 /* Trap if accessing an invalid page. */
285 mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_LOAD
, 1,
286 MMU_DATA_LOAD
, 1, GETPC());
288 /* Load if page supports tags. */
290 rtag
= load_tag1(ptr
, mem
);
293 return address_with_allocation_tag(xt
, rtag
);
296 static void check_tag_aligned(CPUARMState
*env
, uint64_t ptr
, uintptr_t ra
)
298 if (unlikely(!QEMU_IS_ALIGNED(ptr
, TAG_GRANULE
))) {
299 arm_cpu_do_unaligned_access(env_cpu(env
), ptr
, MMU_DATA_STORE
,
300 cpu_mmu_index(env
, false), ra
);
301 g_assert_not_reached();
305 /* For use in a non-parallel context, store to the given nibble. */
306 static void store_tag1(uint64_t ptr
, uint8_t *mem
, int tag
)
308 int ofs
= extract32(ptr
, LOG2_TAG_GRANULE
, 1) * 4;
309 *mem
= deposit32(*mem
, ofs
, 4, tag
);
312 /* For use in a parallel context, atomically store to the given nibble. */
313 static void store_tag1_parallel(uint64_t ptr
, uint8_t *mem
, int tag
)
315 int ofs
= extract32(ptr
, LOG2_TAG_GRANULE
, 1) * 4;
316 uint8_t old
= qatomic_read(mem
);
319 uint8_t new = deposit32(old
, ofs
, 4, tag
);
320 uint8_t cmp
= qatomic_cmpxchg(mem
, old
, new);
321 if (likely(cmp
== old
)) {
328 typedef void stg_store1(uint64_t, uint8_t *, int);
330 static inline void do_stg(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
,
331 uintptr_t ra
, stg_store1 store1
)
333 int mmu_idx
= cpu_mmu_index(env
, false);
336 check_tag_aligned(env
, ptr
, ra
);
338 /* Trap if accessing an invalid page. */
339 mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
, TAG_GRANULE
,
340 MMU_DATA_STORE
, 1, ra
);
342 /* Store if page supports tags. */
344 store1(ptr
, mem
, allocation_tag_from_addr(xt
));
348 void HELPER(stg
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
350 do_stg(env
, ptr
, xt
, GETPC(), store_tag1
);
353 void HELPER(stg_parallel
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
355 do_stg(env
, ptr
, xt
, GETPC(), store_tag1_parallel
);
358 void HELPER(stg_stub
)(CPUARMState
*env
, uint64_t ptr
)
360 int mmu_idx
= cpu_mmu_index(env
, false);
361 uintptr_t ra
= GETPC();
363 check_tag_aligned(env
, ptr
, ra
);
364 probe_write(env
, ptr
, TAG_GRANULE
, mmu_idx
, ra
);
367 static inline void do_st2g(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
,
368 uintptr_t ra
, stg_store1 store1
)
370 int mmu_idx
= cpu_mmu_index(env
, false);
371 int tag
= allocation_tag_from_addr(xt
);
372 uint8_t *mem1
, *mem2
;
374 check_tag_aligned(env
, ptr
, ra
);
377 * Trap if accessing an invalid page(s).
378 * This takes priority over !allocation_tag_access_enabled.
380 if (ptr
& TAG_GRANULE
) {
381 /* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */
382 mem1
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
,
383 TAG_GRANULE
, MMU_DATA_STORE
, 1, ra
);
384 mem2
= allocation_tag_mem(env
, mmu_idx
, ptr
+ TAG_GRANULE
,
385 MMU_DATA_STORE
, TAG_GRANULE
,
386 MMU_DATA_STORE
, 1, ra
);
388 /* Store if page(s) support tags. */
390 store1(TAG_GRANULE
, mem1
, tag
);
393 store1(0, mem2
, tag
);
396 /* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */
397 mem1
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
,
398 2 * TAG_GRANULE
, MMU_DATA_STORE
, 1, ra
);
401 qatomic_set(mem1
, tag
);
406 void HELPER(st2g
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
408 do_st2g(env
, ptr
, xt
, GETPC(), store_tag1
);
411 void HELPER(st2g_parallel
)(CPUARMState
*env
, uint64_t ptr
, uint64_t xt
)
413 do_st2g(env
, ptr
, xt
, GETPC(), store_tag1_parallel
);
416 void HELPER(st2g_stub
)(CPUARMState
*env
, uint64_t ptr
)
418 int mmu_idx
= cpu_mmu_index(env
, false);
419 uintptr_t ra
= GETPC();
420 int in_page
= -(ptr
| TARGET_PAGE_MASK
);
422 check_tag_aligned(env
, ptr
, ra
);
424 if (likely(in_page
>= 2 * TAG_GRANULE
)) {
425 probe_write(env
, ptr
, 2 * TAG_GRANULE
, mmu_idx
, ra
);
427 probe_write(env
, ptr
, TAG_GRANULE
, mmu_idx
, ra
);
428 probe_write(env
, ptr
+ TAG_GRANULE
, TAG_GRANULE
, mmu_idx
, ra
);
432 #define LDGM_STGM_SIZE (4 << GMID_EL1_BS)
434 uint64_t HELPER(ldgm
)(CPUARMState
*env
, uint64_t ptr
)
436 int mmu_idx
= cpu_mmu_index(env
, false);
437 uintptr_t ra
= GETPC();
440 ptr
= QEMU_ALIGN_DOWN(ptr
, LDGM_STGM_SIZE
);
442 /* Trap if accessing an invalid page. */
443 tag_mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_LOAD
,
444 LDGM_STGM_SIZE
, MMU_DATA_LOAD
,
445 LDGM_STGM_SIZE
/ (2 * TAG_GRANULE
), ra
);
447 /* The tag is squashed to zero if the page does not support tags. */
452 QEMU_BUILD_BUG_ON(GMID_EL1_BS
!= 6);
454 * We are loading 64-bits worth of tags. The ordering of elements
455 * within the word corresponds to a 64-bit little-endian operation.
457 return ldq_le_p(tag_mem
);
460 void HELPER(stgm
)(CPUARMState
*env
, uint64_t ptr
, uint64_t val
)
462 int mmu_idx
= cpu_mmu_index(env
, false);
463 uintptr_t ra
= GETPC();
466 ptr
= QEMU_ALIGN_DOWN(ptr
, LDGM_STGM_SIZE
);
468 /* Trap if accessing an invalid page. */
469 tag_mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
,
470 LDGM_STGM_SIZE
, MMU_DATA_LOAD
,
471 LDGM_STGM_SIZE
/ (2 * TAG_GRANULE
), ra
);
474 * Tag store only happens if the page support tags,
475 * and if the OS has enabled access to the tags.
481 QEMU_BUILD_BUG_ON(GMID_EL1_BS
!= 6);
483 * We are storing 64-bits worth of tags. The ordering of elements
484 * within the word corresponds to a 64-bit little-endian operation.
486 stq_le_p(tag_mem
, val
);
489 void HELPER(stzgm_tags
)(CPUARMState
*env
, uint64_t ptr
, uint64_t val
)
491 uintptr_t ra
= GETPC();
492 int mmu_idx
= cpu_mmu_index(env
, false);
493 int log2_dcz_bytes
, log2_tag_bytes
;
494 intptr_t dcz_bytes
, tag_bytes
;
498 * In arm_cpu_realizefn, we assert that dcz > LOG2_TAG_GRANULE+1,
499 * i.e. 32 bytes, which is an unreasonably small dcz anyway,
500 * to make sure that we can access one complete tag byte here.
502 log2_dcz_bytes
= env_archcpu(env
)->dcz_blocksize
+ 2;
503 log2_tag_bytes
= log2_dcz_bytes
- (LOG2_TAG_GRANULE
+ 1);
504 dcz_bytes
= (intptr_t)1 << log2_dcz_bytes
;
505 tag_bytes
= (intptr_t)1 << log2_tag_bytes
;
508 mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, MMU_DATA_STORE
, dcz_bytes
,
509 MMU_DATA_STORE
, tag_bytes
, ra
);
511 int tag_pair
= (val
& 0xf) * 0x11;
512 memset(mem
, tag_pair
, tag_bytes
);
516 /* Record a tag check failure. */
517 static void mte_check_fail(CPUARMState
*env
, uint32_t desc
,
518 uint64_t dirty_ptr
, uintptr_t ra
)
520 int mmu_idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
521 ARMMMUIdx arm_mmu_idx
= core_to_aa64_mmu_idx(mmu_idx
);
522 int el
, reg_el
, tcf
, select
, is_write
, syn
;
525 reg_el
= regime_el(env
, arm_mmu_idx
);
526 sctlr
= env
->cp15
.sctlr_el
[reg_el
];
528 el
= arm_current_el(env
);
530 tcf
= extract64(sctlr
, 38, 2);
532 tcf
= extract64(sctlr
, 40, 2);
538 * Tag check fail causes a synchronous exception.
540 * In restore_state_to_opc, we set the exception syndrome
541 * for the load or store operation. Unwind first so we
542 * may overwrite that with the syndrome for the tag check.
544 cpu_restore_state(env_cpu(env
), ra
, true);
545 env
->exception
.vaddress
= dirty_ptr
;
547 is_write
= FIELD_EX32(desc
, MTEDESC
, WRITE
);
548 syn
= syn_data_abort_no_iss(el
!= 0, 0, 0, 0, 0, is_write
, 0x11);
549 raise_exception(env
, EXCP_DATA_ABORT
, syn
, exception_target_el(env
));
550 /* noreturn, but fall through to the assert anyway */
554 * Tag check fail does not affect the PE.
555 * We eliminate this case by not setting MTE_ACTIVE
556 * in tb_flags, so that we never make this runtime call.
558 g_assert_not_reached();
561 /* Tag check fail causes asynchronous flag set. */
562 if (regime_has_2_ranges(arm_mmu_idx
)) {
563 select
= extract64(dirty_ptr
, 55, 1);
567 env
->cp15
.tfsr_el
[el
] |= 1 << select
;
571 /* Case 3: Reserved. */
572 qemu_log_mask(LOG_GUEST_ERROR
,
573 "Tag check failure with SCTLR_EL%d.TCF%s "
574 "set to reserved value %d\n",
575 reg_el
, el
? "" : "0", tcf
);
581 * Perform an MTE checked access for a single logical or atomic access.
583 static bool mte_probe1_int(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
,
584 uintptr_t ra
, int bit55
)
586 int mem_tag
, mmu_idx
, ptr_tag
, size
;
590 ptr_tag
= allocation_tag_from_addr(ptr
);
592 if (tcma_check(desc
, bit55
, ptr_tag
)) {
596 mmu_idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
597 type
= FIELD_EX32(desc
, MTEDESC
, WRITE
) ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
598 size
= FIELD_EX32(desc
, MTEDESC
, ESIZE
);
600 mem
= allocation_tag_mem(env
, mmu_idx
, ptr
, type
, size
,
601 MMU_DATA_LOAD
, 1, ra
);
606 mem_tag
= load_tag1(ptr
, mem
);
607 return ptr_tag
== mem_tag
;
611 * No-fault version of mte_check1, to be used by SVE for MemSingleNF.
612 * Returns false if the access is Checked and the check failed. This
613 * is only intended to probe the tag -- the validity of the page must
614 * be checked beforehand.
616 bool mte_probe1(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
)
618 int bit55
= extract64(ptr
, 55, 1);
620 /* If TBI is disabled, the access is unchecked. */
621 if (unlikely(!tbi_check(desc
, bit55
))) {
625 return mte_probe1_int(env
, desc
, ptr
, 0, bit55
);
628 uint64_t mte_check1(CPUARMState
*env
, uint32_t desc
,
629 uint64_t ptr
, uintptr_t ra
)
631 int bit55
= extract64(ptr
, 55, 1);
633 /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
634 if (unlikely(!tbi_check(desc
, bit55
))) {
638 if (unlikely(!mte_probe1_int(env
, desc
, ptr
, ra
, bit55
))) {
639 mte_check_fail(env
, desc
, ptr
, ra
);
642 return useronly_clean_ptr(ptr
);
645 uint64_t HELPER(mte_check1
)(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
)
647 return mte_check1(env
, desc
, ptr
, GETPC());
651 * Perform an MTE checked access for multiple logical accesses.
656 * @tag: tag memory to test
657 * @odd: true to begin testing at tags at odd nibble
658 * @cmp: the tag to compare against
659 * @count: number of tags to test
661 * Return the number of successful tests.
662 * Thus a return value < @count indicates a failure.
664 * A note about sizes: count is expected to be small.
666 * The most common use will be LDP/STP of two integer registers,
667 * which means 16 bytes of memory touching at most 2 tags, but
668 * often the access is aligned and thus just 1 tag.
670 * Using AdvSIMD LD/ST (multiple), one can access 64 bytes of memory,
671 * touching at most 5 tags. SVE LDR/STR (vector) with the default
672 * vector length is also 64 bytes; the maximum architectural length
673 * is 256 bytes touching at most 9 tags.
675 * The loop below uses 7 logical operations and 1 memory operation
676 * per tag pair. An implementation that loads an aligned word and
677 * uses masking to ignore adjacent tags requires 18 logical operations
678 * and thus does not begin to pay off until 6 tags.
679 * Which, according to the survey above, is unlikely to be common.
681 static int checkN(uint8_t *mem
, int odd
, int cmp
, int count
)
685 /* Replicate the test tag and compare. */
695 if (unlikely((diff
) & 0x0f)) {
704 if (unlikely((diff
) & 0xf0)) {
716 uint64_t mte_checkN(CPUARMState
*env
, uint32_t desc
,
717 uint64_t ptr
, uintptr_t ra
)
719 int mmu_idx
, ptr_tag
, bit55
;
720 uint64_t ptr_last
, ptr_end
, prev_page
, next_page
;
721 uint64_t tag_first
, tag_end
;
722 uint64_t tag_byte_first
, tag_byte_end
;
723 uint32_t esize
, total
, tag_count
, tag_size
, n
, c
;
724 uint8_t *mem1
, *mem2
;
727 bit55
= extract64(ptr
, 55, 1);
729 /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
730 if (unlikely(!tbi_check(desc
, bit55
))) {
734 ptr_tag
= allocation_tag_from_addr(ptr
);
736 if (tcma_check(desc
, bit55
, ptr_tag
)) {
740 mmu_idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
741 type
= FIELD_EX32(desc
, MTEDESC
, WRITE
) ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
742 esize
= FIELD_EX32(desc
, MTEDESC
, ESIZE
);
743 total
= FIELD_EX32(desc
, MTEDESC
, TSIZE
);
745 /* Find the addr of the end of the access, and of the last element. */
746 ptr_end
= ptr
+ total
;
747 ptr_last
= ptr_end
- esize
;
749 /* Round the bounds to the tag granule, and compute the number of tags. */
750 tag_first
= QEMU_ALIGN_DOWN(ptr
, TAG_GRANULE
);
751 tag_end
= QEMU_ALIGN_UP(ptr_last
, TAG_GRANULE
);
752 tag_count
= (tag_end
- tag_first
) / TAG_GRANULE
;
754 /* Round the bounds to twice the tag granule, and compute the bytes. */
755 tag_byte_first
= QEMU_ALIGN_DOWN(ptr
, 2 * TAG_GRANULE
);
756 tag_byte_end
= QEMU_ALIGN_UP(ptr_last
, 2 * TAG_GRANULE
);
758 /* Locate the page boundaries. */
759 prev_page
= ptr
& TARGET_PAGE_MASK
;
760 next_page
= prev_page
+ TARGET_PAGE_SIZE
;
762 if (likely(tag_end
- prev_page
<= TARGET_PAGE_SIZE
)) {
763 /* Memory access stays on one page. */
764 tag_size
= (tag_byte_end
- tag_byte_first
) / (2 * TAG_GRANULE
);
765 mem1
= allocation_tag_mem(env
, mmu_idx
, ptr
, type
, total
,
766 MMU_DATA_LOAD
, tag_size
, ra
);
770 /* Perform all of the comparisons. */
771 n
= checkN(mem1
, ptr
& TAG_GRANULE
, ptr_tag
, tag_count
);
773 /* Memory access crosses to next page. */
774 tag_size
= (next_page
- tag_byte_first
) / (2 * TAG_GRANULE
);
775 mem1
= allocation_tag_mem(env
, mmu_idx
, ptr
, type
, next_page
- ptr
,
776 MMU_DATA_LOAD
, tag_size
, ra
);
778 tag_size
= (tag_byte_end
- next_page
) / (2 * TAG_GRANULE
);
779 mem2
= allocation_tag_mem(env
, mmu_idx
, next_page
, type
,
781 MMU_DATA_LOAD
, tag_size
, ra
);
784 * Perform all of the comparisons.
785 * Note the possible but unlikely case of the operation spanning
786 * two pages that do not both have tagging enabled.
788 n
= c
= (next_page
- tag_first
) / TAG_GRANULE
;
790 n
= checkN(mem1
, ptr
& TAG_GRANULE
, ptr_tag
, c
);
796 n
+= checkN(mem2
, 0, ptr_tag
, tag_count
- c
);
801 * If we failed, we know which granule. Compute the element that
802 * is first in that granule, and signal failure on that element.
804 if (unlikely(n
< tag_count
)) {
807 fail_ofs
= tag_first
+ n
* TAG_GRANULE
- ptr
;
808 fail_ofs
= ROUND_UP(fail_ofs
, esize
);
809 mte_check_fail(env
, desc
, ptr
+ fail_ofs
, ra
);
813 return useronly_clean_ptr(ptr
);
816 uint64_t HELPER(mte_checkN
)(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
)
818 return mte_checkN(env
, desc
, ptr
, GETPC());
822 * Perform an MTE checked access for DC_ZVA.
824 uint64_t HELPER(mte_check_zva
)(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
)
826 uintptr_t ra
= GETPC();
827 int log2_dcz_bytes
, log2_tag_bytes
;
829 intptr_t dcz_bytes
, tag_bytes
, i
;
831 uint64_t ptr_tag
, mem_tag
, align_ptr
;
833 bit55
= extract64(ptr
, 55, 1);
835 /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
836 if (unlikely(!tbi_check(desc
, bit55
))) {
840 ptr_tag
= allocation_tag_from_addr(ptr
);
842 if (tcma_check(desc
, bit55
, ptr_tag
)) {
847 * In arm_cpu_realizefn, we asserted that dcz > LOG2_TAG_GRANULE+1,
848 * i.e. 32 bytes, which is an unreasonably small dcz anyway, to make
849 * sure that we can access one complete tag byte here.
851 log2_dcz_bytes
= env_archcpu(env
)->dcz_blocksize
+ 2;
852 log2_tag_bytes
= log2_dcz_bytes
- (LOG2_TAG_GRANULE
+ 1);
853 dcz_bytes
= (intptr_t)1 << log2_dcz_bytes
;
854 tag_bytes
= (intptr_t)1 << log2_tag_bytes
;
855 align_ptr
= ptr
& -dcz_bytes
;
858 * Trap if accessing an invalid page. DC_ZVA requires that we supply
859 * the original pointer for an invalid page. But watchpoints require
860 * that we probe the actual space. So do both.
862 mmu_idx
= FIELD_EX32(desc
, MTEDESC
, MIDX
);
863 (void) probe_write(env
, ptr
, 1, mmu_idx
, ra
);
864 mem
= allocation_tag_mem(env
, mmu_idx
, align_ptr
, MMU_DATA_STORE
,
865 dcz_bytes
, MMU_DATA_LOAD
, tag_bytes
, ra
);
871 * Unlike the reasoning for checkN, DC_ZVA is always aligned, and thus
872 * it is quite easy to perform all of the comparisons at once without
875 * The most common zva block size is 64; some of the thunderx cpus use
876 * a block size of 128. For user-only, aarch64_max_initfn will set the
877 * block size to 512. Fill out the other cases for future-proofing.
879 * In order to be able to find the first miscompare later, we want the
880 * tag bytes to be in little-endian order.
882 switch (log2_tag_bytes
) {
883 case 0: /* zva_blocksize 32 */
884 mem_tag
= *(uint8_t *)mem
;
887 case 1: /* zva_blocksize 64 */
888 mem_tag
= cpu_to_le16(*(uint16_t *)mem
);
891 case 2: /* zva_blocksize 128 */
892 mem_tag
= cpu_to_le32(*(uint32_t *)mem
);
893 ptr_tag
*= 0x11111111u
;
895 case 3: /* zva_blocksize 256 */
896 mem_tag
= cpu_to_le64(*(uint64_t *)mem
);
897 ptr_tag
*= 0x1111111111111111ull
;
900 default: /* zva_blocksize 512, 1024, 2048 */
901 ptr_tag
*= 0x1111111111111111ull
;
904 mem_tag
= cpu_to_le64(*(uint64_t *)(mem
+ i
));
905 if (unlikely(mem_tag
!= ptr_tag
)) {
909 align_ptr
+= 16 * TAG_GRANULE
;
910 } while (i
< tag_bytes
);
914 if (likely(mem_tag
== ptr_tag
)) {
919 /* Locate the first nibble that differs. */
920 i
= ctz64(mem_tag
^ ptr_tag
) >> 4;
921 mte_check_fail(env
, desc
, align_ptr
+ i
* TAG_GRANULE
, ra
);
924 return useronly_clean_ptr(ptr
);