target/arm: Inline scalar SQADD, UQADD, SQSUB, UQSUB
[qemu/kevin.git] / target / arm / tcg / mte_helper.c
blob037ac6dd60d0913d26b35fb5f209cfe045c40eae
1 /*
2 * ARM v8.5-MemTag Operations
4 * Copyright (c) 2020 Linaro, Ltd.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "internals.h"
24 #include "exec/exec-all.h"
25 #include "exec/page-protection.h"
26 #include "exec/ram_addr.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "hw/core/tcg-cpu-ops.h"
30 #include "qapi/error.h"
31 #include "qemu/guest-random.h"
34 static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
36 if (exclude == 0xffff) {
37 return 0;
39 if (offset == 0) {
40 while (exclude & (1 << tag)) {
41 tag = (tag + 1) & 15;
43 } else {
44 do {
45 do {
46 tag = (tag + 1) & 15;
47 } while (exclude & (1 << tag));
48 } while (--offset > 0);
50 return tag;
53 /**
54 * allocation_tag_mem_probe:
55 * @env: the cpu environment
56 * @ptr_mmu_idx: the addressing regime to use for the virtual address
57 * @ptr: the virtual address for which to look up tag memory
58 * @ptr_access: the access to use for the virtual address
59 * @ptr_size: the number of bytes in the normal memory access
60 * @tag_access: the access to use for the tag memory
61 * @probe: true to merely probe, never taking an exception
62 * @ra: the return address for exception handling
64 * Our tag memory is formatted as a sequence of little-endian nibbles.
65 * That is, the byte at (addr >> (LOG2_TAG_GRANULE + 1)) contains two
66 * tags, with the tag at [3:0] for the lower addr and the tag at [7:4]
67 * for the higher addr.
69 * Here, resolve the physical address from the virtual address, and return
70 * a pointer to the corresponding tag byte.
72 * If there is no tag storage corresponding to @ptr, return NULL.
74 * If the page is inaccessible for @ptr_access, or has a watchpoint, there are
75 * three options:
76 * (1) probe = true, ra = 0 : pure probe -- we return NULL if the page is not
77 * accessible, and do not take watchpoint traps. The calling code must
78 * handle those cases in the right priority compared to MTE traps.
79 * (2) probe = false, ra = 0 : probe, no fault expected -- the caller guarantees
80 * that the page is going to be accessible. We will take watchpoint traps.
81 * (3) probe = false, ra != 0 : non-probe -- we will take both memory access
82 * traps and watchpoint traps.
83 * (probe = true, ra != 0 is invalid and will assert.)
85 static uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx,
86 uint64_t ptr, MMUAccessType ptr_access,
87 int ptr_size, MMUAccessType tag_access,
88 bool probe, uintptr_t ra)
90 #ifdef CONFIG_USER_ONLY
91 uint64_t clean_ptr = useronly_clean_ptr(ptr);
92 int flags = page_get_flags(clean_ptr);
93 uint8_t *tags;
94 uintptr_t index;
96 assert(!(probe && ra));
98 if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) {
99 cpu_loop_exit_sigsegv(env_cpu(env), ptr, ptr_access,
100 !(flags & PAGE_VALID), ra);
103 /* Require both MAP_ANON and PROT_MTE for the page. */
104 if (!(flags & PAGE_ANON) || !(flags & PAGE_MTE)) {
105 return NULL;
108 tags = page_get_target_data(clean_ptr);
110 index = extract32(ptr, LOG2_TAG_GRANULE + 1,
111 TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
112 return tags + index;
113 #else
114 CPUTLBEntryFull *full;
115 MemTxAttrs attrs;
116 int in_page, flags;
117 hwaddr ptr_paddr, tag_paddr, xlat;
118 MemoryRegion *mr;
119 ARMASIdx tag_asi;
120 AddressSpace *tag_as;
121 void *host;
124 * Probe the first byte of the virtual address. This raises an
125 * exception for inaccessible pages, and resolves the virtual address
126 * into the softmmu tlb.
128 * When RA == 0, this is either a pure probe or a no-fault-expected probe.
129 * Indicate to probe_access_flags no-fault, then either return NULL
130 * for the pure probe, or assert that we received a valid page for the
131 * no-fault-expected probe.
133 flags = probe_access_full(env, ptr, 0, ptr_access, ptr_mmu_idx,
134 ra == 0, &host, &full, ra);
135 if (probe && (flags & TLB_INVALID_MASK)) {
136 return NULL;
138 assert(!(flags & TLB_INVALID_MASK));
140 /* If the virtual page MemAttr != Tagged, access unchecked. */
141 if (full->extra.arm.pte_attrs != 0xf0) {
142 return NULL;
146 * If not backed by host ram, there is no tag storage: access unchecked.
147 * This is probably a guest os bug though, so log it.
149 if (unlikely(flags & TLB_MMIO)) {
150 qemu_log_mask(LOG_GUEST_ERROR,
151 "Page @ 0x%" PRIx64 " indicates Tagged Normal memory "
152 "but is not backed by host ram\n", ptr);
153 return NULL;
157 * Remember these values across the second lookup below,
158 * which may invalidate this pointer via tlb resize.
160 ptr_paddr = full->phys_addr | (ptr & ~TARGET_PAGE_MASK);
161 attrs = full->attrs;
162 full = NULL;
165 * The Normal memory access can extend to the next page. E.g. a single
166 * 8-byte access to the last byte of a page will check only the last
167 * tag on the first page.
168 * Any page access exception has priority over tag check exception.
170 in_page = -(ptr | TARGET_PAGE_MASK);
171 if (unlikely(ptr_size > in_page)) {
172 flags |= probe_access_full(env, ptr + in_page, 0, ptr_access,
173 ptr_mmu_idx, ra == 0, &host, &full, ra);
174 assert(!(flags & TLB_INVALID_MASK));
177 /* Any debug exception has priority over a tag check exception. */
178 if (!probe && unlikely(flags & TLB_WATCHPOINT)) {
179 int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
180 assert(ra != 0);
181 cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra);
184 /* Convert to the physical address in tag space. */
185 tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
187 /* Look up the address in tag space. */
188 tag_asi = attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
189 tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
190 mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
191 tag_access == MMU_DATA_STORE, attrs);
194 * Note that @mr will never be NULL. If there is nothing in the address
195 * space at @tag_paddr, the translation will return the unallocated memory
196 * region. For our purposes, the result must be ram.
198 if (unlikely(!memory_region_is_ram(mr))) {
199 /* ??? Failure is a board configuration error. */
200 qemu_log_mask(LOG_UNIMP,
201 "Tag Memory @ 0x%" HWADDR_PRIx " not found for "
202 "Normal Memory @ 0x%" HWADDR_PRIx "\n",
203 tag_paddr, ptr_paddr);
204 return NULL;
208 * Ensure the tag memory is dirty on write, for migration.
209 * Tag memory can never contain code or display memory (vga).
211 if (tag_access == MMU_DATA_STORE) {
212 ram_addr_t tag_ra = memory_region_get_ram_addr(mr) + xlat;
213 cpu_physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION);
216 return memory_region_get_ram_ptr(mr) + xlat;
217 #endif
220 static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
221 uint64_t ptr, MMUAccessType ptr_access,
222 int ptr_size, MMUAccessType tag_access,
223 uintptr_t ra)
225 return allocation_tag_mem_probe(env, ptr_mmu_idx, ptr, ptr_access,
226 ptr_size, tag_access, false, ra);
229 uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm)
231 uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16);
232 int rrnd = extract32(env->cp15.gcr_el1, 16, 1);
233 int start = extract32(env->cp15.rgsr_el1, 0, 4);
234 int seed = extract32(env->cp15.rgsr_el1, 8, 16);
235 int offset, i, rtag;
238 * Our IMPDEF choice for GCR_EL1.RRND==1 is to continue to use the
239 * deterministic algorithm. Except that with RRND==1 the kernel is
240 * not required to have set RGSR_EL1.SEED != 0, which is required for
241 * the deterministic algorithm to function. So we force a non-zero
242 * SEED for that case.
244 if (unlikely(seed == 0) && rrnd) {
245 do {
246 Error *err = NULL;
247 uint16_t two;
249 if (qemu_guest_getrandom(&two, sizeof(two), &err) < 0) {
251 * Failed, for unknown reasons in the crypto subsystem.
252 * Best we can do is log the reason and use a constant seed.
254 qemu_log_mask(LOG_UNIMP, "IRG: Crypto failure: %s\n",
255 error_get_pretty(err));
256 error_free(err);
257 two = 1;
259 seed = two;
260 } while (seed == 0);
263 /* RandomTag */
264 for (i = offset = 0; i < 4; ++i) {
265 /* NextRandomTagBit */
266 int top = (extract32(seed, 5, 1) ^ extract32(seed, 3, 1) ^
267 extract32(seed, 2, 1) ^ extract32(seed, 0, 1));
268 seed = (top << 15) | (seed >> 1);
269 offset |= top << i;
271 rtag = choose_nonexcluded_tag(start, offset, exclude);
272 env->cp15.rgsr_el1 = rtag | (seed << 8);
274 return address_with_allocation_tag(rn, rtag);
277 uint64_t HELPER(addsubg)(CPUARMState *env, uint64_t ptr,
278 int32_t offset, uint32_t tag_offset)
280 int start_tag = allocation_tag_from_addr(ptr);
281 uint16_t exclude = extract32(env->cp15.gcr_el1, 0, 16);
282 int rtag = choose_nonexcluded_tag(start_tag, tag_offset, exclude);
284 return address_with_allocation_tag(ptr + offset, rtag);
287 static int load_tag1(uint64_t ptr, uint8_t *mem)
289 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
290 return extract32(*mem, ofs, 4);
293 uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
295 int mmu_idx = arm_env_mmu_index(env);
296 uint8_t *mem;
297 int rtag = 0;
299 /* Trap if accessing an invalid page. */
300 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1,
301 MMU_DATA_LOAD, GETPC());
303 /* Load if page supports tags. */
304 if (mem) {
305 rtag = load_tag1(ptr, mem);
308 return address_with_allocation_tag(xt, rtag);
311 static void check_tag_aligned(CPUARMState *env, uint64_t ptr, uintptr_t ra)
313 if (unlikely(!QEMU_IS_ALIGNED(ptr, TAG_GRANULE))) {
314 arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE,
315 arm_env_mmu_index(env), ra);
316 g_assert_not_reached();
320 /* For use in a non-parallel context, store to the given nibble. */
321 static void store_tag1(uint64_t ptr, uint8_t *mem, int tag)
323 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
324 *mem = deposit32(*mem, ofs, 4, tag);
327 /* For use in a parallel context, atomically store to the given nibble. */
328 static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag)
330 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
331 uint8_t old = qatomic_read(mem);
333 while (1) {
334 uint8_t new = deposit32(old, ofs, 4, tag);
335 uint8_t cmp = qatomic_cmpxchg(mem, old, new);
336 if (likely(cmp == old)) {
337 return;
339 old = cmp;
343 typedef void stg_store1(uint64_t, uint8_t *, int);
345 static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt,
346 uintptr_t ra, stg_store1 store1)
348 int mmu_idx = arm_env_mmu_index(env);
349 uint8_t *mem;
351 check_tag_aligned(env, ptr, ra);
353 /* Trap if accessing an invalid page. */
354 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE,
355 MMU_DATA_STORE, ra);
357 /* Store if page supports tags. */
358 if (mem) {
359 store1(ptr, mem, allocation_tag_from_addr(xt));
363 void HELPER(stg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
365 do_stg(env, ptr, xt, GETPC(), store_tag1);
368 void HELPER(stg_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
370 do_stg(env, ptr, xt, GETPC(), store_tag1_parallel);
373 void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr)
375 int mmu_idx = arm_env_mmu_index(env);
376 uintptr_t ra = GETPC();
378 check_tag_aligned(env, ptr, ra);
379 probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
382 static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
383 uintptr_t ra, stg_store1 store1)
385 int mmu_idx = arm_env_mmu_index(env);
386 int tag = allocation_tag_from_addr(xt);
387 uint8_t *mem1, *mem2;
389 check_tag_aligned(env, ptr, ra);
392 * Trap if accessing an invalid page(s).
393 * This takes priority over !allocation_tag_access_enabled.
395 if (ptr & TAG_GRANULE) {
396 /* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */
397 mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
398 TAG_GRANULE, MMU_DATA_STORE, ra);
399 mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE,
400 MMU_DATA_STORE, TAG_GRANULE,
401 MMU_DATA_STORE, ra);
403 /* Store if page(s) support tags. */
404 if (mem1) {
405 store1(TAG_GRANULE, mem1, tag);
407 if (mem2) {
408 store1(0, mem2, tag);
410 } else {
411 /* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */
412 mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
413 2 * TAG_GRANULE, MMU_DATA_STORE, ra);
414 if (mem1) {
415 tag |= tag << 4;
416 qatomic_set(mem1, tag);
421 void HELPER(st2g)(CPUARMState *env, uint64_t ptr, uint64_t xt)
423 do_st2g(env, ptr, xt, GETPC(), store_tag1);
426 void HELPER(st2g_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
428 do_st2g(env, ptr, xt, GETPC(), store_tag1_parallel);
431 void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr)
433 int mmu_idx = arm_env_mmu_index(env);
434 uintptr_t ra = GETPC();
435 int in_page = -(ptr | TARGET_PAGE_MASK);
437 check_tag_aligned(env, ptr, ra);
439 if (likely(in_page >= 2 * TAG_GRANULE)) {
440 probe_write(env, ptr, 2 * TAG_GRANULE, mmu_idx, ra);
441 } else {
442 probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
443 probe_write(env, ptr + TAG_GRANULE, TAG_GRANULE, mmu_idx, ra);
447 uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr)
449 int mmu_idx = arm_env_mmu_index(env);
450 uintptr_t ra = GETPC();
451 int gm_bs = env_archcpu(env)->gm_blocksize;
452 int gm_bs_bytes = 4 << gm_bs;
453 void *tag_mem;
454 uint64_t ret;
455 int shift;
457 ptr = QEMU_ALIGN_DOWN(ptr, gm_bs_bytes);
459 /* Trap if accessing an invalid page. */
460 tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD,
461 gm_bs_bytes, MMU_DATA_LOAD, ra);
463 /* The tag is squashed to zero if the page does not support tags. */
464 if (!tag_mem) {
465 return 0;
469 * The ordering of elements within the word corresponds to
470 * a little-endian operation. Computation of shift comes from
472 * index = address<LOG2_TAG_GRANULE+3:LOG2_TAG_GRANULE>
473 * data<index*4+3:index*4> = tag
475 * Because of the alignment of ptr above, BS=6 has shift=0.
476 * All memory operations are aligned. Defer support for BS=2,
477 * requiring insertion or extraction of a nibble, until we
478 * support a cpu that requires it.
480 switch (gm_bs) {
481 case 3:
482 /* 32 bytes -> 2 tags -> 8 result bits */
483 ret = *(uint8_t *)tag_mem;
484 break;
485 case 4:
486 /* 64 bytes -> 4 tags -> 16 result bits */
487 ret = cpu_to_le16(*(uint16_t *)tag_mem);
488 break;
489 case 5:
490 /* 128 bytes -> 8 tags -> 32 result bits */
491 ret = cpu_to_le32(*(uint32_t *)tag_mem);
492 break;
493 case 6:
494 /* 256 bytes -> 16 tags -> 64 result bits */
495 return cpu_to_le64(*(uint64_t *)tag_mem);
496 default:
498 * CPU configured with unsupported/invalid gm blocksize.
499 * This is detected early in arm_cpu_realizefn.
501 g_assert_not_reached();
503 shift = extract64(ptr, LOG2_TAG_GRANULE, 4) * 4;
504 return ret << shift;
507 void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
509 int mmu_idx = arm_env_mmu_index(env);
510 uintptr_t ra = GETPC();
511 int gm_bs = env_archcpu(env)->gm_blocksize;
512 int gm_bs_bytes = 4 << gm_bs;
513 void *tag_mem;
514 int shift;
516 ptr = QEMU_ALIGN_DOWN(ptr, gm_bs_bytes);
518 /* Trap if accessing an invalid page. */
519 tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
520 gm_bs_bytes, MMU_DATA_LOAD, ra);
523 * Tag store only happens if the page support tags,
524 * and if the OS has enabled access to the tags.
526 if (!tag_mem) {
527 return;
530 /* See LDGM for comments on BS and on shift. */
531 shift = extract64(ptr, LOG2_TAG_GRANULE, 4) * 4;
532 val >>= shift;
533 switch (gm_bs) {
534 case 3:
535 /* 32 bytes -> 2 tags -> 8 result bits */
536 *(uint8_t *)tag_mem = val;
537 break;
538 case 4:
539 /* 64 bytes -> 4 tags -> 16 result bits */
540 *(uint16_t *)tag_mem = cpu_to_le16(val);
541 break;
542 case 5:
543 /* 128 bytes -> 8 tags -> 32 result bits */
544 *(uint32_t *)tag_mem = cpu_to_le32(val);
545 break;
546 case 6:
547 /* 256 bytes -> 16 tags -> 64 result bits */
548 *(uint64_t *)tag_mem = cpu_to_le64(val);
549 break;
550 default:
551 /* cpu configured with unsupported gm blocksize. */
552 g_assert_not_reached();
556 void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
558 uintptr_t ra = GETPC();
559 int mmu_idx = arm_env_mmu_index(env);
560 int log2_dcz_bytes, log2_tag_bytes;
561 intptr_t dcz_bytes, tag_bytes;
562 uint8_t *mem;
565 * In arm_cpu_realizefn, we assert that dcz > LOG2_TAG_GRANULE+1,
566 * i.e. 32 bytes, which is an unreasonably small dcz anyway,
567 * to make sure that we can access one complete tag byte here.
569 log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
570 log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
571 dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
572 tag_bytes = (intptr_t)1 << log2_tag_bytes;
573 ptr &= -dcz_bytes;
575 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, dcz_bytes,
576 MMU_DATA_STORE, ra);
577 if (mem) {
578 int tag_pair = (val & 0xf) * 0x11;
579 memset(mem, tag_pair, tag_bytes);
583 static void mte_sync_check_fail(CPUARMState *env, uint32_t desc,
584 uint64_t dirty_ptr, uintptr_t ra)
586 int is_write, syn;
588 env->exception.vaddress = dirty_ptr;
590 is_write = FIELD_EX32(desc, MTEDESC, WRITE);
591 syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0, is_write,
592 0x11);
593 raise_exception_ra(env, EXCP_DATA_ABORT, syn, exception_target_el(env), ra);
594 g_assert_not_reached();
597 static void mte_async_check_fail(CPUARMState *env, uint64_t dirty_ptr,
598 uintptr_t ra, ARMMMUIdx arm_mmu_idx, int el)
600 int select;
602 if (regime_has_2_ranges(arm_mmu_idx)) {
603 select = extract64(dirty_ptr, 55, 1);
604 } else {
605 select = 0;
607 env->cp15.tfsr_el[el] |= 1 << select;
608 #ifdef CONFIG_USER_ONLY
610 * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT,
611 * which then sends a SIGSEGV when the thread is next scheduled.
612 * This cpu will return to the main loop at the end of the TB,
613 * which is rather sooner than "normal". But the alternative
614 * is waiting until the next syscall.
616 qemu_cpu_kick(env_cpu(env));
617 #endif
620 /* Record a tag check failure. */
621 void mte_check_fail(CPUARMState *env, uint32_t desc,
622 uint64_t dirty_ptr, uintptr_t ra)
624 int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
625 ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx);
626 int el, reg_el, tcf;
627 uint64_t sctlr;
629 reg_el = regime_el(env, arm_mmu_idx);
630 sctlr = env->cp15.sctlr_el[reg_el];
632 switch (arm_mmu_idx) {
633 case ARMMMUIdx_E10_0:
634 case ARMMMUIdx_E20_0:
635 el = 0;
636 tcf = extract64(sctlr, 38, 2);
637 break;
638 default:
639 el = reg_el;
640 tcf = extract64(sctlr, 40, 2);
643 switch (tcf) {
644 case 1:
645 /* Tag check fail causes a synchronous exception. */
646 mte_sync_check_fail(env, desc, dirty_ptr, ra);
647 break;
649 case 0:
651 * Tag check fail does not affect the PE.
652 * We eliminate this case by not setting MTE_ACTIVE
653 * in tb_flags, so that we never make this runtime call.
655 g_assert_not_reached();
657 case 2:
658 /* Tag check fail causes asynchronous flag set. */
659 mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
660 break;
662 case 3:
664 * Tag check fail causes asynchronous flag set for stores, or
665 * a synchronous exception for loads.
667 if (FIELD_EX32(desc, MTEDESC, WRITE)) {
668 mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
669 } else {
670 mte_sync_check_fail(env, desc, dirty_ptr, ra);
672 break;
677 * checkN:
678 * @tag: tag memory to test
679 * @odd: true to begin testing at tags at odd nibble
680 * @cmp: the tag to compare against
681 * @count: number of tags to test
683 * Return the number of successful tests.
684 * Thus a return value < @count indicates a failure.
686 * A note about sizes: count is expected to be small.
688 * The most common use will be LDP/STP of two integer registers,
689 * which means 16 bytes of memory touching at most 2 tags, but
690 * often the access is aligned and thus just 1 tag.
692 * Using AdvSIMD LD/ST (multiple), one can access 64 bytes of memory,
693 * touching at most 5 tags. SVE LDR/STR (vector) with the default
694 * vector length is also 64 bytes; the maximum architectural length
695 * is 256 bytes touching at most 9 tags.
697 * The loop below uses 7 logical operations and 1 memory operation
698 * per tag pair. An implementation that loads an aligned word and
699 * uses masking to ignore adjacent tags requires 18 logical operations
700 * and thus does not begin to pay off until 6 tags.
701 * Which, according to the survey above, is unlikely to be common.
703 static int checkN(uint8_t *mem, int odd, int cmp, int count)
705 int n = 0, diff;
707 /* Replicate the test tag and compare. */
708 cmp *= 0x11;
709 diff = *mem++ ^ cmp;
711 if (odd) {
712 goto start_odd;
715 while (1) {
716 /* Test even tag. */
717 if (unlikely((diff) & 0x0f)) {
718 break;
720 if (++n == count) {
721 break;
724 start_odd:
725 /* Test odd tag. */
726 if (unlikely((diff) & 0xf0)) {
727 break;
729 if (++n == count) {
730 break;
733 diff = *mem++ ^ cmp;
735 return n;
739 * checkNrev:
740 * @tag: tag memory to test
741 * @odd: true to begin testing at tags at odd nibble
742 * @cmp: the tag to compare against
743 * @count: number of tags to test
745 * Return the number of successful tests.
746 * Thus a return value < @count indicates a failure.
748 * This is like checkN, but it runs backwards, checking the
749 * tags starting with @tag and then the tags preceding it.
750 * This is needed by the backwards-memory-copying operations.
752 static int checkNrev(uint8_t *mem, int odd, int cmp, int count)
754 int n = 0, diff;
756 /* Replicate the test tag and compare. */
757 cmp *= 0x11;
758 diff = *mem-- ^ cmp;
760 if (!odd) {
761 goto start_even;
764 while (1) {
765 /* Test odd tag. */
766 if (unlikely((diff) & 0xf0)) {
767 break;
769 if (++n == count) {
770 break;
773 start_even:
774 /* Test even tag. */
775 if (unlikely((diff) & 0x0f)) {
776 break;
778 if (++n == count) {
779 break;
782 diff = *mem-- ^ cmp;
784 return n;
788 * mte_probe_int() - helper for mte_probe and mte_check
789 * @env: CPU environment
790 * @desc: MTEDESC descriptor
791 * @ptr: virtual address of the base of the access
792 * @fault: return virtual address of the first check failure
794 * Internal routine for both mte_probe and mte_check.
795 * Return zero on failure, filling in *fault.
796 * Return negative on trivial success for tbi disabled.
797 * Return positive on success with tbi enabled.
799 static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
800 uintptr_t ra, uint64_t *fault)
802 int mmu_idx, ptr_tag, bit55;
803 uint64_t ptr_last, prev_page, next_page;
804 uint64_t tag_first, tag_last;
805 uint32_t sizem1, tag_count, n, c;
806 uint8_t *mem1, *mem2;
807 MMUAccessType type;
809 bit55 = extract64(ptr, 55, 1);
810 *fault = ptr;
812 /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
813 if (unlikely(!tbi_check(desc, bit55))) {
814 return -1;
817 ptr_tag = allocation_tag_from_addr(ptr);
819 if (tcma_check(desc, bit55, ptr_tag)) {
820 return 1;
823 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
824 type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
825 sizem1 = FIELD_EX32(desc, MTEDESC, SIZEM1);
827 /* Find the addr of the end of the access */
828 ptr_last = ptr + sizem1;
830 /* Round the bounds to the tag granule, and compute the number of tags. */
831 tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
832 tag_last = QEMU_ALIGN_DOWN(ptr_last, TAG_GRANULE);
833 tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
835 /* Locate the page boundaries. */
836 prev_page = ptr & TARGET_PAGE_MASK;
837 next_page = prev_page + TARGET_PAGE_SIZE;
839 if (likely(tag_last - prev_page < TARGET_PAGE_SIZE)) {
840 /* Memory access stays on one page. */
841 mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, sizem1 + 1,
842 MMU_DATA_LOAD, ra);
843 if (!mem1) {
844 return 1;
846 /* Perform all of the comparisons. */
847 n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count);
848 } else {
849 /* Memory access crosses to next page. */
850 mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr,
851 MMU_DATA_LOAD, ra);
853 mem2 = allocation_tag_mem(env, mmu_idx, next_page, type,
854 ptr_last - next_page + 1,
855 MMU_DATA_LOAD, ra);
858 * Perform all of the comparisons.
859 * Note the possible but unlikely case of the operation spanning
860 * two pages that do not both have tagging enabled.
862 n = c = (next_page - tag_first) / TAG_GRANULE;
863 if (mem1) {
864 n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, c);
866 if (n == c) {
867 if (!mem2) {
868 return 1;
870 n += checkN(mem2, 0, ptr_tag, tag_count - c);
874 if (likely(n == tag_count)) {
875 return 1;
879 * If we failed, we know which granule. For the first granule, the
880 * failure address is @ptr, the first byte accessed. Otherwise the
881 * failure address is the first byte of the nth granule.
883 if (n > 0) {
884 *fault = tag_first + n * TAG_GRANULE;
886 return 0;
889 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra)
891 uint64_t fault;
892 int ret = mte_probe_int(env, desc, ptr, ra, &fault);
894 if (unlikely(ret == 0)) {
895 mte_check_fail(env, desc, fault, ra);
896 } else if (ret < 0) {
897 return ptr;
899 return useronly_clean_ptr(ptr);
902 uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr)
905 * R_XCHFJ: Alignment check not caused by memory type is priority 1,
906 * higher than any translation fault. When MTE is disabled, tcg
907 * performs the alignment check during the code generated for the
908 * memory access. With MTE enabled, we must check this here before
909 * raising any translation fault in allocation_tag_mem.
911 unsigned align = FIELD_EX32(desc, MTEDESC, ALIGN);
912 if (unlikely(align)) {
913 align = (1u << align) - 1;
914 if (unlikely(ptr & align)) {
915 int idx = FIELD_EX32(desc, MTEDESC, MIDX);
916 bool w = FIELD_EX32(desc, MTEDESC, WRITE);
917 MMUAccessType type = w ? MMU_DATA_STORE : MMU_DATA_LOAD;
918 arm_cpu_do_unaligned_access(env_cpu(env), ptr, type, idx, GETPC());
922 return mte_check(env, desc, ptr, GETPC());
926 * No-fault version of mte_check, to be used by SVE for MemSingleNF.
927 * Returns false if the access is Checked and the check failed. This
928 * is only intended to probe the tag -- the validity of the page must
929 * be checked beforehand.
931 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr)
933 uint64_t fault;
934 int ret = mte_probe_int(env, desc, ptr, 0, &fault);
936 return ret != 0;
940 * Perform an MTE checked access for DC_ZVA.
942 uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr)
944 uintptr_t ra = GETPC();
945 int log2_dcz_bytes, log2_tag_bytes;
946 int mmu_idx, bit55;
947 intptr_t dcz_bytes, tag_bytes, i;
948 void *mem;
949 uint64_t ptr_tag, mem_tag, align_ptr;
951 bit55 = extract64(ptr, 55, 1);
953 /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
954 if (unlikely(!tbi_check(desc, bit55))) {
955 return ptr;
958 ptr_tag = allocation_tag_from_addr(ptr);
960 if (tcma_check(desc, bit55, ptr_tag)) {
961 goto done;
965 * In arm_cpu_realizefn, we asserted that dcz > LOG2_TAG_GRANULE+1,
966 * i.e. 32 bytes, which is an unreasonably small dcz anyway, to make
967 * sure that we can access one complete tag byte here.
969 log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
970 log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
971 dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
972 tag_bytes = (intptr_t)1 << log2_tag_bytes;
973 align_ptr = ptr & -dcz_bytes;
976 * Trap if accessing an invalid page. DC_ZVA requires that we supply
977 * the original pointer for an invalid page. But watchpoints require
978 * that we probe the actual space. So do both.
980 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
981 (void) probe_write(env, ptr, 1, mmu_idx, ra);
982 mem = allocation_tag_mem(env, mmu_idx, align_ptr, MMU_DATA_STORE,
983 dcz_bytes, MMU_DATA_LOAD, ra);
984 if (!mem) {
985 goto done;
989 * Unlike the reasoning for checkN, DC_ZVA is always aligned, and thus
990 * it is quite easy to perform all of the comparisons at once without
991 * any extra masking.
993 * The most common zva block size is 64; some of the thunderx cpus use
994 * a block size of 128. For user-only, aarch64_max_initfn will set the
995 * block size to 512. Fill out the other cases for future-proofing.
997 * In order to be able to find the first miscompare later, we want the
998 * tag bytes to be in little-endian order.
1000 switch (log2_tag_bytes) {
1001 case 0: /* zva_blocksize 32 */
1002 mem_tag = *(uint8_t *)mem;
1003 ptr_tag *= 0x11u;
1004 break;
1005 case 1: /* zva_blocksize 64 */
1006 mem_tag = cpu_to_le16(*(uint16_t *)mem);
1007 ptr_tag *= 0x1111u;
1008 break;
1009 case 2: /* zva_blocksize 128 */
1010 mem_tag = cpu_to_le32(*(uint32_t *)mem);
1011 ptr_tag *= 0x11111111u;
1012 break;
1013 case 3: /* zva_blocksize 256 */
1014 mem_tag = cpu_to_le64(*(uint64_t *)mem);
1015 ptr_tag *= 0x1111111111111111ull;
1016 break;
1018 default: /* zva_blocksize 512, 1024, 2048 */
1019 ptr_tag *= 0x1111111111111111ull;
1020 i = 0;
1021 do {
1022 mem_tag = cpu_to_le64(*(uint64_t *)(mem + i));
1023 if (unlikely(mem_tag != ptr_tag)) {
1024 goto fail;
1026 i += 8;
1027 align_ptr += 16 * TAG_GRANULE;
1028 } while (i < tag_bytes);
1029 goto done;
1032 if (likely(mem_tag == ptr_tag)) {
1033 goto done;
1036 fail:
1037 /* Locate the first nibble that differs. */
1038 i = ctz64(mem_tag ^ ptr_tag) >> 4;
1039 mte_check_fail(env, desc, align_ptr + i * TAG_GRANULE, ra);
1041 done:
1042 return useronly_clean_ptr(ptr);
1045 uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
1046 uint32_t desc)
1048 int mmu_idx, tag_count;
1049 uint64_t ptr_tag, tag_first, tag_last;
1050 void *mem;
1051 bool w = FIELD_EX32(desc, MTEDESC, WRITE);
1052 uint32_t n;
1054 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
1055 /* True probe; this will never fault */
1056 mem = allocation_tag_mem_probe(env, mmu_idx, ptr,
1057 w ? MMU_DATA_STORE : MMU_DATA_LOAD,
1058 size, MMU_DATA_LOAD, true, 0);
1059 if (!mem) {
1060 return size;
1064 * TODO: checkN() is not designed for checks of the size we expect
1065 * for FEAT_MOPS operations, so we should implement this differently.
1066 * Maybe we should do something like
1067 * if (region start and size are aligned nicely) {
1068 * do direct loads of 64 tag bits at a time;
1069 * } else {
1070 * call checkN()
1073 /* Round the bounds to the tag granule, and compute the number of tags. */
1074 ptr_tag = allocation_tag_from_addr(ptr);
1075 tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
1076 tag_last = QEMU_ALIGN_DOWN(ptr + size - 1, TAG_GRANULE);
1077 tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
1078 n = checkN(mem, ptr & TAG_GRANULE, ptr_tag, tag_count);
1079 if (likely(n == tag_count)) {
1080 return size;
1084 * Failure; for the first granule, it's at @ptr. Otherwise
1085 * it's at the first byte of the nth granule. Calculate how
1086 * many bytes we can access without hitting that failure.
1088 if (n == 0) {
1089 return 0;
1090 } else {
1091 return n * TAG_GRANULE - (ptr - tag_first);
1095 uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size,
1096 uint32_t desc)
1098 int mmu_idx, tag_count;
1099 uint64_t ptr_tag, tag_first, tag_last;
1100 void *mem;
1101 bool w = FIELD_EX32(desc, MTEDESC, WRITE);
1102 uint32_t n;
1104 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
1106 * True probe; this will never fault. Note that our caller passes
1107 * us a pointer to the end of the region, but allocation_tag_mem_probe()
1108 * wants a pointer to the start. Because we know we don't span a page
1109 * boundary and that allocation_tag_mem_probe() doesn't otherwise care
1110 * about the size, pass in a size of 1 byte. This is simpler than
1111 * adjusting the ptr to point to the start of the region and then having
1112 * to adjust the returned 'mem' to get the end of the tag memory.
1114 mem = allocation_tag_mem_probe(env, mmu_idx, ptr,
1115 w ? MMU_DATA_STORE : MMU_DATA_LOAD,
1116 1, MMU_DATA_LOAD, true, 0);
1117 if (!mem) {
1118 return size;
1122 * TODO: checkNrev() is not designed for checks of the size we expect
1123 * for FEAT_MOPS operations, so we should implement this differently.
1124 * Maybe we should do something like
1125 * if (region start and size are aligned nicely) {
1126 * do direct loads of 64 tag bits at a time;
1127 * } else {
1128 * call checkN()
1131 /* Round the bounds to the tag granule, and compute the number of tags. */
1132 ptr_tag = allocation_tag_from_addr(ptr);
1133 tag_first = QEMU_ALIGN_DOWN(ptr - (size - 1), TAG_GRANULE);
1134 tag_last = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
1135 tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
1136 n = checkNrev(mem, ptr & TAG_GRANULE, ptr_tag, tag_count);
1137 if (likely(n == tag_count)) {
1138 return size;
1142 * Failure; for the first granule, it's at @ptr. Otherwise
1143 * it's at the last byte of the nth granule. Calculate how
1144 * many bytes we can access without hitting that failure.
1146 if (n == 0) {
1147 return 0;
1148 } else {
1149 return (n - 1) * TAG_GRANULE + ((ptr + 1) - tag_last);
1153 void mte_mops_set_tags(CPUARMState *env, uint64_t ptr, uint64_t size,
1154 uint32_t desc)
1156 int mmu_idx, tag_count;
1157 uint64_t ptr_tag;
1158 void *mem;
1160 if (!desc) {
1161 /* Tags not actually enabled */
1162 return;
1165 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
1166 /* True probe: this will never fault */
1167 mem = allocation_tag_mem_probe(env, mmu_idx, ptr, MMU_DATA_STORE, size,
1168 MMU_DATA_STORE, true, 0);
1169 if (!mem) {
1170 return;
1174 * We know that ptr and size are both TAG_GRANULE aligned; store
1175 * the tag from the pointer value into the tag memory.
1177 ptr_tag = allocation_tag_from_addr(ptr);
1178 tag_count = size / TAG_GRANULE;
1179 if (ptr & TAG_GRANULE) {
1180 /* Not 2*TAG_GRANULE-aligned: store tag to first nibble */
1181 store_tag1_parallel(TAG_GRANULE, mem, ptr_tag);
1182 mem++;
1183 tag_count--;
1185 memset(mem, ptr_tag | (ptr_tag << 4), tag_count / 2);
1186 if (tag_count & 1) {
1187 /* Final trailing unaligned nibble */
1188 mem += tag_count / 2;
1189 store_tag1_parallel(0, mem, ptr_tag);