stats: Move QMP commands from monitor/ to stats/
[qemu/armbru.git] / linux-user / aarch64 / target_prctl.h
blob907c314146624a9708071c5cfa6eea3c8b07b97b
1 /*
2 * AArch64 specific prctl functions for linux-user
4 * SPDX-License-Identifier: GPL-2.0-or-later
5 */
6 #ifndef AARCH64_TARGET_PRCTL_H
7 #define AARCH64_TARGET_PRCTL_H
9 static abi_long do_prctl_sve_get_vl(CPUArchState *env)
11 ARMCPU *cpu = env_archcpu(env);
12 if (cpu_isar_feature(aa64_sve, cpu)) {
13 /* PSTATE.SM is always unset on syscall entry. */
14 return sve_vq(env) * 16;
16 return -TARGET_EINVAL;
18 #define do_prctl_sve_get_vl do_prctl_sve_get_vl
20 static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2)
23 * We cannot support either PR_SVE_SET_VL_ONEXEC or PR_SVE_VL_INHERIT.
24 * Note the kernel definition of sve_vl_valid allows for VQ=512,
25 * i.e. VL=8192, even though the current architectural maximum is VQ=16.
27 if (cpu_isar_feature(aa64_sve, env_archcpu(env))
28 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
29 uint32_t vq, old_vq;
31 /* PSTATE.SM is always unset on syscall entry. */
32 old_vq = sve_vq(env);
35 * Bound the value of arg2, so that we know that it fits into
36 * the 4-bit field in ZCR_EL1. Rely on the hflags rebuild to
37 * sort out the length supported by the cpu.
39 vq = MAX(arg2 / 16, 1);
40 vq = MIN(vq, ARM_MAX_VQ);
41 env->vfp.zcr_el[1] = vq - 1;
42 arm_rebuild_hflags(env);
44 vq = sve_vq(env);
45 if (vq < old_vq) {
46 aarch64_sve_narrow_vq(env, vq);
48 return vq * 16;
50 return -TARGET_EINVAL;
52 #define do_prctl_sve_set_vl do_prctl_sve_set_vl
54 static abi_long do_prctl_sme_get_vl(CPUArchState *env)
56 ARMCPU *cpu = env_archcpu(env);
57 if (cpu_isar_feature(aa64_sme, cpu)) {
58 return sme_vq(env) * 16;
60 return -TARGET_EINVAL;
62 #define do_prctl_sme_get_vl do_prctl_sme_get_vl
64 static abi_long do_prctl_sme_set_vl(CPUArchState *env, abi_long arg2)
67 * We cannot support either PR_SME_SET_VL_ONEXEC or PR_SME_VL_INHERIT.
68 * Note the kernel definition of sve_vl_valid allows for VQ=512,
69 * i.e. VL=8192, even though the architectural maximum is VQ=16.
71 if (cpu_isar_feature(aa64_sme, env_archcpu(env))
72 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
73 int vq, old_vq;
75 old_vq = sme_vq(env);
78 * Bound the value of vq, so that we know that it fits into
79 * the 4-bit field in SMCR_EL1. Because PSTATE.SM is cleared
80 * on syscall entry, we are not modifying the current SVE
81 * vector length.
83 vq = MAX(arg2 / 16, 1);
84 vq = MIN(vq, 16);
85 env->vfp.smcr_el[1] =
86 FIELD_DP64(env->vfp.smcr_el[1], SMCR, LEN, vq - 1);
88 /* Delay rebuilding hflags until we know if ZA must change. */
89 vq = sve_vqm1_for_el_sm(env, 0, true) + 1;
91 if (vq != old_vq) {
93 * PSTATE.ZA state is cleared on any change to SVL.
94 * We need not call arm_rebuild_hflags because PSTATE.SM was
95 * cleared on syscall entry, so this hasn't changed VL.
97 env->svcr = FIELD_DP64(env->svcr, SVCR, ZA, 0);
98 arm_rebuild_hflags(env);
100 return vq * 16;
102 return -TARGET_EINVAL;
104 #define do_prctl_sme_set_vl do_prctl_sme_set_vl
106 static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2)
108 ARMCPU *cpu = env_archcpu(env);
110 if (cpu_isar_feature(aa64_pauth, cpu)) {
111 int all = (PR_PAC_APIAKEY | PR_PAC_APIBKEY |
112 PR_PAC_APDAKEY | PR_PAC_APDBKEY | PR_PAC_APGAKEY);
113 int ret = 0;
114 Error *err = NULL;
116 if (arg2 == 0) {
117 arg2 = all;
118 } else if (arg2 & ~all) {
119 return -TARGET_EINVAL;
121 if (arg2 & PR_PAC_APIAKEY) {
122 ret |= qemu_guest_getrandom(&env->keys.apia,
123 sizeof(ARMPACKey), &err);
125 if (arg2 & PR_PAC_APIBKEY) {
126 ret |= qemu_guest_getrandom(&env->keys.apib,
127 sizeof(ARMPACKey), &err);
129 if (arg2 & PR_PAC_APDAKEY) {
130 ret |= qemu_guest_getrandom(&env->keys.apda,
131 sizeof(ARMPACKey), &err);
133 if (arg2 & PR_PAC_APDBKEY) {
134 ret |= qemu_guest_getrandom(&env->keys.apdb,
135 sizeof(ARMPACKey), &err);
137 if (arg2 & PR_PAC_APGAKEY) {
138 ret |= qemu_guest_getrandom(&env->keys.apga,
139 sizeof(ARMPACKey), &err);
141 if (ret != 0) {
143 * Some unknown failure in the crypto. The best
144 * we can do is log it and fail the syscall.
145 * The real syscall cannot fail this way.
147 qemu_log_mask(LOG_UNIMP, "PR_PAC_RESET_KEYS: Crypto failure: %s",
148 error_get_pretty(err));
149 error_free(err);
150 return -TARGET_EIO;
152 return 0;
154 return -TARGET_EINVAL;
156 #define do_prctl_reset_keys do_prctl_reset_keys
158 static abi_long do_prctl_set_tagged_addr_ctrl(CPUArchState *env, abi_long arg2)
160 abi_ulong valid_mask = PR_TAGGED_ADDR_ENABLE;
161 ARMCPU *cpu = env_archcpu(env);
163 if (cpu_isar_feature(aa64_mte, cpu)) {
164 valid_mask |= PR_MTE_TCF_MASK;
165 valid_mask |= PR_MTE_TAG_MASK;
168 if (arg2 & ~valid_mask) {
169 return -TARGET_EINVAL;
171 env->tagged_addr_enable = arg2 & PR_TAGGED_ADDR_ENABLE;
173 if (cpu_isar_feature(aa64_mte, cpu)) {
174 switch (arg2 & PR_MTE_TCF_MASK) {
175 case PR_MTE_TCF_NONE:
176 case PR_MTE_TCF_SYNC:
177 case PR_MTE_TCF_ASYNC:
178 break;
179 default:
180 return -EINVAL;
184 * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
185 * Note that the syscall values are consistent with hw.
187 env->cp15.sctlr_el[1] =
188 deposit64(env->cp15.sctlr_el[1], 38, 2, arg2 >> PR_MTE_TCF_SHIFT);
191 * Write PR_MTE_TAG to GCR_EL1[Exclude].
192 * Note that the syscall uses an include mask,
193 * and hardware uses an exclude mask -- invert.
195 env->cp15.gcr_el1 =
196 deposit64(env->cp15.gcr_el1, 0, 16, ~arg2 >> PR_MTE_TAG_SHIFT);
197 arm_rebuild_hflags(env);
199 return 0;
201 #define do_prctl_set_tagged_addr_ctrl do_prctl_set_tagged_addr_ctrl
203 static abi_long do_prctl_get_tagged_addr_ctrl(CPUArchState *env)
205 ARMCPU *cpu = env_archcpu(env);
206 abi_long ret = 0;
208 if (env->tagged_addr_enable) {
209 ret |= PR_TAGGED_ADDR_ENABLE;
211 if (cpu_isar_feature(aa64_mte, cpu)) {
212 /* See do_prctl_set_tagged_addr_ctrl. */
213 ret |= extract64(env->cp15.sctlr_el[1], 38, 2) << PR_MTE_TCF_SHIFT;
214 ret = deposit64(ret, PR_MTE_TAG_SHIFT, 16, ~env->cp15.gcr_el1);
216 return ret;
218 #define do_prctl_get_tagged_addr_ctrl do_prctl_get_tagged_addr_ctrl
220 #endif /* AARCH64_TARGET_PRCTL_H */