1 /* Support for writing ELF notes for ARM architectures
3 * Copyright (C) 2015 Red Hat Inc.
5 * Author: Andrew Jones <drjones@redhat.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "sysemu/dump.h"
25 #include "cpu-features.h"
27 /* struct user_pt_regs from arch/arm64/include/uapi/asm/ptrace.h */
28 struct aarch64_user_regs
{
35 QEMU_BUILD_BUG_ON(sizeof(struct aarch64_user_regs
) != 272);
37 /* struct elf_prstatus from include/uapi/linux/elfcore.h */
38 struct aarch64_elf_prstatus
{
39 char pad1
[32]; /* 32 == offsetof(struct elf_prstatus, pr_pid) */
41 char pad2
[76]; /* 76 == offsetof(struct elf_prstatus, pr_reg) -
42 offsetof(struct elf_prstatus, pr_ppid) */
43 struct aarch64_user_regs pr_reg
;
48 QEMU_BUILD_BUG_ON(sizeof(struct aarch64_elf_prstatus
) != 392);
50 /* struct user_fpsimd_state from arch/arm64/include/uapi/asm/ptrace.h
52 * While the vregs member of user_fpsimd_state is of type __uint128_t,
53 * QEMU uses an array of uint64_t, where the high half of the 128-bit
54 * value is always in the 2n+1'th index. Thus we also break the 128-
55 * bit values into two halves in this reproduction of user_fpsimd_state.
57 struct aarch64_user_vfp_state
{
64 QEMU_BUILD_BUG_ON(sizeof(struct aarch64_user_vfp_state
) != 528);
66 /* struct user_sve_header from arch/arm64/include/uapi/asm/ptrace.h */
67 struct aarch64_user_sve_header
{
78 char name
[8]; /* align_up(sizeof("CORE"), 4) */
80 struct aarch64_elf_prstatus prstatus
;
81 struct aarch64_user_vfp_state vfp
;
82 struct aarch64_user_sve_header sve
;
86 #define AARCH64_NOTE_HEADER_SIZE offsetof(struct aarch64_note, prstatus)
87 #define AARCH64_PRSTATUS_NOTE_SIZE \
88 (AARCH64_NOTE_HEADER_SIZE + sizeof(struct aarch64_elf_prstatus))
89 #define AARCH64_PRFPREG_NOTE_SIZE \
90 (AARCH64_NOTE_HEADER_SIZE + sizeof(struct aarch64_user_vfp_state))
91 #define AARCH64_SVE_NOTE_SIZE(env) \
92 (AARCH64_NOTE_HEADER_SIZE + sve_size(env))
94 static void aarch64_note_init(struct aarch64_note
*note
, DumpState
*s
,
95 const char *name
, Elf64_Word namesz
,
96 Elf64_Word type
, Elf64_Word descsz
)
98 memset(note
, 0, sizeof(*note
));
100 note
->hdr
.n_namesz
= cpu_to_dump32(s
, namesz
);
101 note
->hdr
.n_descsz
= cpu_to_dump32(s
, descsz
);
102 note
->hdr
.n_type
= cpu_to_dump32(s
, type
);
104 memcpy(note
->name
, name
, namesz
);
107 static int aarch64_write_elf64_prfpreg(WriteCoreDumpFunction f
,
108 CPUARMState
*env
, int cpuid
,
111 struct aarch64_note note
;
114 aarch64_note_init(¬e
, s
, "CORE", 5, NT_PRFPREG
, sizeof(note
.vfp
));
116 for (i
= 0; i
< 32; ++i
) {
117 uint64_t *q
= aa64_vfp_qreg(env
, i
);
118 note
.vfp
.vregs
[2 * i
+ 0] = cpu_to_dump64(s
, q
[0]);
119 note
.vfp
.vregs
[2 * i
+ 1] = cpu_to_dump64(s
, q
[1]);
122 if (s
->dump_info
.d_endian
== ELFDATA2MSB
) {
123 /* For AArch64 we must always swap the vfp.regs's 2n and 2n+1
124 * entries when generating BE notes, because even big endian
125 * hosts use 2n+1 for the high half.
127 for (i
= 0; i
< 32; ++i
) {
128 uint64_t tmp
= note
.vfp
.vregs
[2*i
];
129 note
.vfp
.vregs
[2 * i
] = note
.vfp
.vregs
[2 * i
+ 1];
130 note
.vfp
.vregs
[2 * i
+ 1] = tmp
;
134 note
.vfp
.fpsr
= cpu_to_dump32(s
, vfp_get_fpsr(env
));
135 note
.vfp
.fpcr
= cpu_to_dump32(s
, vfp_get_fpcr(env
));
137 ret
= f(¬e
, AARCH64_PRFPREG_NOTE_SIZE
, s
);
145 #ifdef TARGET_AARCH64
146 static off_t
sve_zreg_offset(uint32_t vq
, int n
)
148 off_t off
= sizeof(struct aarch64_user_sve_header
);
149 return ROUND_UP(off
, 16) + vq
* 16 * n
;
152 static off_t
sve_preg_offset(uint32_t vq
, int n
)
154 return sve_zreg_offset(vq
, 32) + vq
* 16 / 8 * n
;
157 static off_t
sve_fpsr_offset(uint32_t vq
)
159 off_t off
= sve_preg_offset(vq
, 17);
160 return ROUND_UP(off
, 16);
163 static off_t
sve_fpcr_offset(uint32_t vq
)
165 return sve_fpsr_offset(vq
) + sizeof(uint32_t);
168 static uint32_t sve_current_vq(CPUARMState
*env
)
170 return sve_vqm1_for_el(env
, arm_current_el(env
)) + 1;
173 static size_t sve_size_vq(uint32_t vq
)
175 off_t off
= sve_fpcr_offset(vq
) + sizeof(uint32_t);
176 return ROUND_UP(off
, 16);
179 static size_t sve_size(CPUARMState
*env
)
181 return sve_size_vq(sve_current_vq(env
));
184 static int aarch64_write_elf64_sve(WriteCoreDumpFunction f
,
185 CPUARMState
*env
, int cpuid
,
188 struct aarch64_note
*note
;
189 ARMCPU
*cpu
= env_archcpu(env
);
190 uint32_t vq
= sve_current_vq(env
);
191 uint64_t tmp
[ARM_MAX_VQ
* 2], *r
;
196 note
= g_malloc0(AARCH64_SVE_NOTE_SIZE(env
));
197 buf
= (uint8_t *)¬e
->sve
;
199 aarch64_note_init(note
, s
, "LINUX", 6, NT_ARM_SVE
, sve_size_vq(vq
));
201 note
->sve
.size
= cpu_to_dump32(s
, sve_size_vq(vq
));
202 note
->sve
.max_size
= cpu_to_dump32(s
, sve_size_vq(cpu
->sve_max_vq
));
203 note
->sve
.vl
= cpu_to_dump16(s
, vq
* 16);
204 note
->sve
.max_vl
= cpu_to_dump16(s
, cpu
->sve_max_vq
* 16);
205 note
->sve
.flags
= cpu_to_dump16(s
, 1);
207 for (i
= 0; i
< 32; ++i
) {
208 r
= sve_bswap64(tmp
, &env
->vfp
.zregs
[i
].d
[0], vq
* 2);
209 memcpy(&buf
[sve_zreg_offset(vq
, i
)], r
, vq
* 16);
212 for (i
= 0; i
< 17; ++i
) {
213 r
= sve_bswap64(tmp
, r
= &env
->vfp
.pregs
[i
].p
[0],
214 DIV_ROUND_UP(vq
* 2, 8));
215 memcpy(&buf
[sve_preg_offset(vq
, i
)], r
, vq
* 16 / 8);
218 fpr
= cpu_to_dump32(s
, vfp_get_fpsr(env
));
219 memcpy(&buf
[sve_fpsr_offset(vq
)], &fpr
, sizeof(uint32_t));
221 fpr
= cpu_to_dump32(s
, vfp_get_fpcr(env
));
222 memcpy(&buf
[sve_fpcr_offset(vq
)], &fpr
, sizeof(uint32_t));
224 ret
= f(note
, AARCH64_SVE_NOTE_SIZE(env
), s
);
235 int arm_cpu_write_elf64_note(WriteCoreDumpFunction f
, CPUState
*cs
,
236 int cpuid
, DumpState
*s
)
238 struct aarch64_note note
;
239 ARMCPU
*cpu
= ARM_CPU(cs
);
240 CPUARMState
*env
= &cpu
->env
;
244 aarch64_note_init(¬e
, s
, "CORE", 5, NT_PRSTATUS
, sizeof(note
.prstatus
));
246 note
.prstatus
.pr_pid
= cpu_to_dump32(s
, cpuid
);
247 note
.prstatus
.pr_fpvalid
= cpu_to_dump32(s
, 1);
250 aarch64_sync_32_to_64(env
);
251 pstate
= cpsr_read(env
);
254 pstate
= pstate_read(env
);
258 for (i
= 0; i
< 31; ++i
) {
259 note
.prstatus
.pr_reg
.regs
[i
] = cpu_to_dump64(s
, env
->xregs
[i
]);
261 note
.prstatus
.pr_reg
.sp
= cpu_to_dump64(s
, sp
);
262 note
.prstatus
.pr_reg
.pc
= cpu_to_dump64(s
, env
->pc
);
263 note
.prstatus
.pr_reg
.pstate
= cpu_to_dump64(s
, pstate
);
265 ret
= f(¬e
, AARCH64_PRSTATUS_NOTE_SIZE
, s
);
270 ret
= aarch64_write_elf64_prfpreg(f
, env
, cpuid
, s
);
275 #ifdef TARGET_AARCH64
276 if (cpu_isar_feature(aa64_sve
, cpu
)) {
277 ret
= aarch64_write_elf64_sve(f
, env
, cpuid
, s
);
284 /* struct pt_regs from arch/arm/include/asm/ptrace.h */
285 struct arm_user_regs
{
290 QEMU_BUILD_BUG_ON(sizeof(struct arm_user_regs
) != 72);
292 /* struct elf_prstatus from include/uapi/linux/elfcore.h */
293 struct arm_elf_prstatus
{
294 char pad1
[24]; /* 24 == offsetof(struct elf_prstatus, pr_pid) */
296 char pad2
[44]; /* 44 == offsetof(struct elf_prstatus, pr_reg) -
297 offsetof(struct elf_prstatus, pr_ppid) */
298 struct arm_user_regs pr_reg
;
300 } QEMU_PACKED arm_elf_prstatus
;
302 QEMU_BUILD_BUG_ON(sizeof(struct arm_elf_prstatus
) != 148);
304 /* struct user_vfp from arch/arm/include/asm/user.h */
305 struct arm_user_vfp_state
{
310 QEMU_BUILD_BUG_ON(sizeof(struct arm_user_vfp_state
) != 260);
314 char name
[8]; /* align_up(sizeof("LINUX"), 4) */
316 struct arm_elf_prstatus prstatus
;
317 struct arm_user_vfp_state vfp
;
321 #define ARM_NOTE_HEADER_SIZE offsetof(struct arm_note, prstatus)
322 #define ARM_PRSTATUS_NOTE_SIZE \
323 (ARM_NOTE_HEADER_SIZE + sizeof(struct arm_elf_prstatus))
324 #define ARM_VFP_NOTE_SIZE \
325 (ARM_NOTE_HEADER_SIZE + sizeof(struct arm_user_vfp_state))
327 static void arm_note_init(struct arm_note
*note
, DumpState
*s
,
328 const char *name
, Elf32_Word namesz
,
329 Elf32_Word type
, Elf32_Word descsz
)
331 memset(note
, 0, sizeof(*note
));
333 note
->hdr
.n_namesz
= cpu_to_dump32(s
, namesz
);
334 note
->hdr
.n_descsz
= cpu_to_dump32(s
, descsz
);
335 note
->hdr
.n_type
= cpu_to_dump32(s
, type
);
337 memcpy(note
->name
, name
, namesz
);
340 static int arm_write_elf32_vfp(WriteCoreDumpFunction f
, CPUARMState
*env
,
341 int cpuid
, DumpState
*s
)
343 struct arm_note note
;
346 arm_note_init(¬e
, s
, "LINUX", 6, NT_ARM_VFP
, sizeof(note
.vfp
));
348 for (i
= 0; i
< 32; ++i
) {
349 note
.vfp
.vregs
[i
] = cpu_to_dump64(s
, *aa32_vfp_dreg(env
, i
));
352 note
.vfp
.fpscr
= cpu_to_dump32(s
, vfp_get_fpscr(env
));
354 ret
= f(¬e
, ARM_VFP_NOTE_SIZE
, s
);
362 int arm_cpu_write_elf32_note(WriteCoreDumpFunction f
, CPUState
*cs
,
363 int cpuid
, DumpState
*s
)
365 struct arm_note note
;
366 ARMCPU
*cpu
= ARM_CPU(cs
);
367 CPUARMState
*env
= &cpu
->env
;
369 bool fpvalid
= cpu_isar_feature(aa32_vfp_simd
, cpu
);
371 arm_note_init(¬e
, s
, "CORE", 5, NT_PRSTATUS
, sizeof(note
.prstatus
));
373 note
.prstatus
.pr_pid
= cpu_to_dump32(s
, cpuid
);
374 note
.prstatus
.pr_fpvalid
= cpu_to_dump32(s
, fpvalid
);
376 for (i
= 0; i
< 16; ++i
) {
377 note
.prstatus
.pr_reg
.regs
[i
] = cpu_to_dump32(s
, env
->regs
[i
]);
379 note
.prstatus
.pr_reg
.regs
[16] = cpu_to_dump32(s
, cpsr_read(env
));
381 ret
= f(¬e
, ARM_PRSTATUS_NOTE_SIZE
, s
);
384 } else if (fpvalid
) {
385 return arm_write_elf32_vfp(f
, env
, cpuid
, s
);
391 int cpu_get_dump_info(ArchDumpInfo
*info
,
392 const GuestPhysBlockList
*guest_phys_blocks
)
396 GuestPhysBlock
*block
;
397 hwaddr lowest_addr
= ULLONG_MAX
;
399 if (first_cpu
== NULL
) {
403 cpu
= ARM_CPU(first_cpu
);
406 /* Take a best guess at the phys_base. If we get it wrong then crash
407 * will need '--machdep phys_offset=<phys-offset>' added to its command
408 * line, which isn't any worse than assuming we can use zero, but being
409 * wrong. This is the same algorithm the crash utility uses when
410 * attempting to guess as it loads non-dumpfile formatted files.
412 QTAILQ_FOREACH(block
, &guest_phys_blocks
->head
, next
) {
413 if (block
->target_start
< lowest_addr
) {
414 lowest_addr
= block
->target_start
;
418 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
419 info
->d_machine
= EM_AARCH64
;
420 info
->d_class
= ELFCLASS64
;
421 info
->page_size
= (1 << 16); /* aarch64 max pagesize */
422 if (lowest_addr
!= ULLONG_MAX
) {
423 info
->phys_base
= lowest_addr
;
426 info
->d_machine
= EM_ARM
;
427 info
->d_class
= ELFCLASS32
;
428 info
->page_size
= (1 << 12);
429 if (lowest_addr
< UINT_MAX
) {
430 info
->phys_base
= lowest_addr
;
434 /* We assume the relevant endianness is that of EL1; this is right
435 * for kernels, but might give the wrong answer if you're trying to
436 * dump a hypervisor that happens to be running an opposite-endian
439 info
->d_endian
= (env
->cp15
.sctlr_el
[1] & SCTLR_EE
) != 0
440 ? ELFDATA2MSB
: ELFDATA2LSB
;
445 ssize_t
cpu_get_note_size(int class, int machine
, int nr_cpus
)
447 ARMCPU
*cpu
= ARM_CPU(first_cpu
);
450 if (class == ELFCLASS64
) {
451 note_size
= AARCH64_PRSTATUS_NOTE_SIZE
;
452 note_size
+= AARCH64_PRFPREG_NOTE_SIZE
;
453 #ifdef TARGET_AARCH64
454 if (cpu_isar_feature(aa64_sve
, cpu
)) {
455 note_size
+= AARCH64_SVE_NOTE_SIZE(&cpu
->env
);
459 note_size
= ARM_PRSTATUS_NOTE_SIZE
;
460 if (cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
461 note_size
+= ARM_VFP_NOTE_SIZE
;
465 return note_size
* nr_cpus
;