1 //===-- sanitizer_stoptheworld_linux_libcdep.cpp --------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // See sanitizer_stoptheworld.h for details.
10 // This implementation was inspired by Markus Gutschke's linuxthreads.cc.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_platform.h"
16 #if SANITIZER_LINUX && \
17 (defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \
18 defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \
19 defined(__arm__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64)
21 #include "sanitizer_stoptheworld.h"
23 #include "sanitizer_platform_limits_posix.h"
24 #include "sanitizer_atomic.h"
27 #include <sched.h> // for CLONE_* definitions
29 #include <sys/prctl.h> // for PR_* definitions
30 #include <sys/ptrace.h> // for PTRACE_* definitions
31 #include <sys/types.h> // for pid_t
32 #include <sys/uio.h> // for iovec
33 #include <elf.h> // for NT_PRSTATUS
34 #if (defined(__aarch64__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64) && \
36 // GLIBC 2.20+ sys/user does not include asm/ptrace.h
37 # include <asm/ptrace.h>
39 #include <sys/user.h> // for user_regs_struct
40 #if SANITIZER_ANDROID && SANITIZER_MIPS
41 # include <asm/reg.h> // for mips SP register in sys/user.h
43 #include <sys/wait.h> // for signal-related stuff
53 #include "sanitizer_common.h"
54 #include "sanitizer_flags.h"
55 #include "sanitizer_libc.h"
56 #include "sanitizer_linux.h"
57 #include "sanitizer_mutex.h"
58 #include "sanitizer_placement_new.h"
60 // Sufficiently old kernel headers don't provide this value, but we can still
61 // call prctl with it. If the runtime kernel is new enough, the prctl call will
62 // have the desired effect; if the kernel is too old, the call will error and we
63 // can ignore said error.
64 #ifndef PR_SET_PTRACER
65 #define PR_SET_PTRACER 0x59616d61
68 // This module works by spawning a Linux task which then attaches to every
69 // thread in the caller process with ptrace. This suspends the threads, and
70 // PTRACE_GETREGS can then be used to obtain their register state. The callback
71 // supplied to StopTheWorld() is run in the tracer task while the threads are
73 // The tracer task must be placed in a different thread group for ptrace to
74 // work, so it cannot be spawned as a pthread. Instead, we use the low-level
75 // clone() interface (we want to share the address space with the caller
76 // process, so we prefer clone() over fork()).
78 // We don't use any libc functions, relying instead on direct syscalls. There
79 // are two reasons for this:
80 // 1. calling a library function while threads are suspended could cause a
81 // deadlock, if one of the treads happens to be holding a libc lock;
82 // 2. it's generally not safe to call libc functions from the tracer task,
83 // because clone() does not set up a thread-local storage for it. Any
84 // thread-local variables used by libc will be shared between the tracer task
85 // and the thread which spawned it.
87 namespace __sanitizer
{
89 class SuspendedThreadsListLinux final
: public SuspendedThreadsList
{
91 SuspendedThreadsListLinux() { thread_ids_
.reserve(1024); }
93 tid_t
GetThreadID(uptr index
) const override
;
94 uptr
ThreadCount() const override
;
95 bool ContainsTid(tid_t thread_id
) const;
96 void Append(tid_t tid
);
98 PtraceRegistersStatus
GetRegistersAndSP(uptr index
,
99 InternalMmapVector
<uptr
> *buffer
,
100 uptr
*sp
) const override
;
103 InternalMmapVector
<tid_t
> thread_ids_
;
106 // Structure for passing arguments into the tracer thread.
107 struct TracerThreadArgument
{
108 StopTheWorldCallback callback
;
109 void *callback_argument
;
110 // The tracer thread waits on this mutex while the parent finishes its
113 // Tracer thread signals its completion by setting done.
114 atomic_uintptr_t done
;
118 // This class handles thread suspending/unsuspending in the tracer thread.
119 class ThreadSuspender
{
121 explicit ThreadSuspender(pid_t pid
, TracerThreadArgument
*arg
)
126 bool SuspendAllThreads();
127 void ResumeAllThreads();
128 void KillAllThreads();
129 SuspendedThreadsListLinux
&suspended_threads_list() {
130 return suspended_threads_list_
;
132 TracerThreadArgument
*arg
;
134 SuspendedThreadsListLinux suspended_threads_list_
;
136 bool SuspendThread(tid_t thread_id
);
139 bool ThreadSuspender::SuspendThread(tid_t tid
) {
140 // Are we already attached to this thread?
141 // Currently this check takes linear time, however the number of threads is
143 if (suspended_threads_list_
.ContainsTid(tid
)) return false;
145 if (internal_iserror(internal_ptrace(PTRACE_ATTACH
, tid
, nullptr, nullptr),
147 // Either the thread is dead, or something prevented us from attaching.
148 // Log this event and move on.
149 VReport(1, "Could not attach to thread %zu (errno %d).\n", (uptr
)tid
,
153 VReport(2, "Attached to thread %zu.\n", (uptr
)tid
);
154 // The thread is not guaranteed to stop before ptrace returns, so we must
155 // wait on it. Note: if the thread receives a signal concurrently,
156 // we can get notification about the signal before notification about stop.
157 // In such case we need to forward the signal to the thread, otherwise
158 // the signal will be missed (as we do PTRACE_DETACH with arg=0) and
159 // any logic relying on signals will break. After forwarding we need to
160 // continue to wait for stopping, because the thread is not stopped yet.
161 // We do ignore delivery of SIGSTOP, because we want to make stop-the-world
162 // as invisible as possible.
166 HANDLE_EINTR(waitpid_status
, internal_waitpid(tid
, &status
, __WALL
));
168 if (internal_iserror(waitpid_status
, &wperrno
)) {
169 // Got a ECHILD error. I don't think this situation is possible, but it
170 // doesn't hurt to report it.
171 VReport(1, "Waiting on thread %zu failed, detaching (errno %d).\n",
173 internal_ptrace(PTRACE_DETACH
, tid
, nullptr, nullptr);
176 if (WIFSTOPPED(status
) && WSTOPSIG(status
) != SIGSTOP
) {
177 internal_ptrace(PTRACE_CONT
, tid
, nullptr,
178 (void*)(uptr
)WSTOPSIG(status
));
183 suspended_threads_list_
.Append(tid
);
188 void ThreadSuspender::ResumeAllThreads() {
189 for (uptr i
= 0; i
< suspended_threads_list_
.ThreadCount(); i
++) {
190 pid_t tid
= suspended_threads_list_
.GetThreadID(i
);
192 if (!internal_iserror(internal_ptrace(PTRACE_DETACH
, tid
, nullptr, nullptr),
194 VReport(2, "Detached from thread %d.\n", tid
);
196 // Either the thread is dead, or we are already detached.
197 // The latter case is possible, for instance, if this function was called
198 // from a signal handler.
199 VReport(1, "Could not detach from thread %d (errno %d).\n", tid
, pterrno
);
204 void ThreadSuspender::KillAllThreads() {
205 for (uptr i
= 0; i
< suspended_threads_list_
.ThreadCount(); i
++)
206 internal_ptrace(PTRACE_KILL
, suspended_threads_list_
.GetThreadID(i
),
210 bool ThreadSuspender::SuspendAllThreads() {
211 ThreadLister
thread_lister(pid_
);
213 InternalMmapVector
<tid_t
> threads
;
214 threads
.reserve(128);
215 for (int i
= 0; i
< 30 && retry
; ++i
) {
217 switch (thread_lister
.ListThreads(&threads
)) {
218 case ThreadLister::Error
:
221 case ThreadLister::Incomplete
:
224 case ThreadLister::Ok
:
227 for (tid_t tid
: threads
) {
228 if (SuspendThread(tid
))
232 return suspended_threads_list_
.ThreadCount();
235 // Pointer to the ThreadSuspender instance for use in signal handler.
236 static ThreadSuspender
*thread_suspender_instance
= nullptr;
238 // Synchronous signals that should not be blocked.
239 static const int kSyncSignals
[] = { SIGABRT
, SIGILL
, SIGFPE
, SIGSEGV
, SIGBUS
,
242 static void TracerThreadDieCallback() {
243 // Generally a call to Die() in the tracer thread should be fatal to the
244 // parent process as well, because they share the address space.
245 // This really only works correctly if all the threads are suspended at this
246 // point. So we correctly handle calls to Die() from within the callback, but
247 // not those that happen before or after the callback. Hopefully there aren't
248 // a lot of opportunities for that to happen...
249 ThreadSuspender
*inst
= thread_suspender_instance
;
250 if (inst
&& stoptheworld_tracer_pid
== internal_getpid()) {
251 inst
->KillAllThreads();
252 thread_suspender_instance
= nullptr;
256 // Signal handler to wake up suspended threads when the tracer thread dies.
257 static void TracerThreadSignalHandler(int signum
, __sanitizer_siginfo
*siginfo
,
259 SignalContext
ctx(siginfo
, uctx
);
260 Printf("Tracer caught signal %d: addr=0x%zx pc=0x%zx sp=0x%zx\n", signum
,
261 ctx
.addr
, ctx
.pc
, ctx
.sp
);
262 ThreadSuspender
*inst
= thread_suspender_instance
;
264 if (signum
== SIGABRT
)
265 inst
->KillAllThreads();
267 inst
->ResumeAllThreads();
268 RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback
));
269 thread_suspender_instance
= nullptr;
270 atomic_store(&inst
->arg
->done
, 1, memory_order_relaxed
);
272 internal__exit((signum
== SIGABRT
) ? 1 : 2);
275 // Size of alternative stack for signal handlers in the tracer thread.
276 static const int kHandlerStackSize
= 8192;
278 // This function will be run as a cloned task.
279 static int TracerThread(void* argument
) {
280 TracerThreadArgument
*tracer_thread_argument
=
281 (TracerThreadArgument
*)argument
;
283 internal_prctl(PR_SET_PDEATHSIG
, SIGKILL
, 0, 0, 0);
284 // Check if parent is already dead.
285 if (internal_getppid() != tracer_thread_argument
->parent_pid
)
288 // Wait for the parent thread to finish preparations.
289 tracer_thread_argument
->mutex
.Lock();
290 tracer_thread_argument
->mutex
.Unlock();
292 RAW_CHECK(AddDieCallback(TracerThreadDieCallback
));
294 ThreadSuspender
thread_suspender(internal_getppid(), tracer_thread_argument
);
295 // Global pointer for the signal handler.
296 thread_suspender_instance
= &thread_suspender
;
298 // Alternate stack for signal handling.
299 InternalMmapVector
<char> handler_stack_memory(kHandlerStackSize
);
300 stack_t handler_stack
;
301 internal_memset(&handler_stack
, 0, sizeof(handler_stack
));
302 handler_stack
.ss_sp
= handler_stack_memory
.data();
303 handler_stack
.ss_size
= kHandlerStackSize
;
304 internal_sigaltstack(&handler_stack
, nullptr);
306 // Install our handler for synchronous signals. Other signals should be
307 // blocked by the mask we inherited from the parent thread.
308 for (uptr i
= 0; i
< ARRAY_SIZE(kSyncSignals
); i
++) {
309 __sanitizer_sigaction act
;
310 internal_memset(&act
, 0, sizeof(act
));
311 act
.sigaction
= TracerThreadSignalHandler
;
312 act
.sa_flags
= SA_ONSTACK
| SA_SIGINFO
;
313 internal_sigaction_norestorer(kSyncSignals
[i
], &act
, 0);
317 if (!thread_suspender
.SuspendAllThreads()) {
318 VReport(1, "Failed suspending threads.\n");
321 tracer_thread_argument
->callback(thread_suspender
.suspended_threads_list(),
322 tracer_thread_argument
->callback_argument
);
323 thread_suspender
.ResumeAllThreads();
326 RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback
));
327 thread_suspender_instance
= nullptr;
328 atomic_store(&tracer_thread_argument
->done
, 1, memory_order_relaxed
);
332 class ScopedStackSpaceWithGuard
{
334 explicit ScopedStackSpaceWithGuard(uptr stack_size
) {
335 stack_size_
= stack_size
;
336 guard_size_
= GetPageSizeCached();
337 // FIXME: Omitting MAP_STACK here works in current kernels but might break
339 guard_start_
= (uptr
)MmapOrDie(stack_size_
+ guard_size_
,
340 "ScopedStackWithGuard");
341 CHECK(MprotectNoAccess((uptr
)guard_start_
, guard_size_
));
343 ~ScopedStackSpaceWithGuard() {
344 UnmapOrDie((void *)guard_start_
, stack_size_
+ guard_size_
);
346 void *Bottom() const {
347 return (void *)(guard_start_
+ stack_size_
+ guard_size_
);
356 // We have a limitation on the stack frame size, so some stuff had to be moved
358 static __sanitizer_sigset_t blocked_sigset
;
359 static __sanitizer_sigset_t old_sigset
;
361 class StopTheWorldScope
{
363 StopTheWorldScope() {
364 // Make this process dumpable. Processes that are not dumpable cannot be
366 process_was_dumpable_
= internal_prctl(PR_GET_DUMPABLE
, 0, 0, 0, 0);
367 if (!process_was_dumpable_
)
368 internal_prctl(PR_SET_DUMPABLE
, 1, 0, 0, 0);
371 ~StopTheWorldScope() {
372 // Restore the dumpable flag.
373 if (!process_was_dumpable_
)
374 internal_prctl(PR_SET_DUMPABLE
, 0, 0, 0, 0);
378 int process_was_dumpable_
;
381 // When sanitizer output is being redirected to file (i.e. by using log_path),
382 // the tracer should write to the parent's log instead of trying to open a new
383 // file. Alert the logging code to the fact that we have a tracer.
384 struct ScopedSetTracerPID
{
385 explicit ScopedSetTracerPID(uptr tracer_pid
) {
386 stoptheworld_tracer_pid
= tracer_pid
;
387 stoptheworld_tracer_ppid
= internal_getpid();
389 ~ScopedSetTracerPID() {
390 stoptheworld_tracer_pid
= 0;
391 stoptheworld_tracer_ppid
= 0;
395 void StopTheWorld(StopTheWorldCallback callback
, void *argument
) {
396 StopTheWorldScope in_stoptheworld
;
397 // Prepare the arguments for TracerThread.
398 struct TracerThreadArgument tracer_thread_argument
;
399 tracer_thread_argument
.callback
= callback
;
400 tracer_thread_argument
.callback_argument
= argument
;
401 tracer_thread_argument
.parent_pid
= internal_getpid();
402 atomic_store(&tracer_thread_argument
.done
, 0, memory_order_relaxed
);
403 const uptr kTracerStackSize
= 2 * 1024 * 1024;
404 ScopedStackSpaceWithGuard
tracer_stack(kTracerStackSize
);
405 // Block the execution of TracerThread until after we have set ptrace
407 tracer_thread_argument
.mutex
.Lock();
408 // Signal handling story.
409 // We don't want async signals to be delivered to the tracer thread,
410 // so we block all async signals before creating the thread. An async signal
411 // handler can temporary modify errno, which is shared with this thread.
412 // We ought to use pthread_sigmask here, because sigprocmask has undefined
413 // behavior in multithreaded programs. However, on linux sigprocmask is
414 // equivalent to pthread_sigmask with the exception that pthread_sigmask
415 // does not allow to block some signals used internally in pthread
416 // implementation. We are fine with blocking them here, we are really not
417 // going to pthread_cancel the thread.
418 // The tracer thread should not raise any synchronous signals. But in case it
419 // does, we setup a special handler for sync signals that properly kills the
420 // parent as well. Note: we don't pass CLONE_SIGHAND to clone, so handlers
421 // in the tracer thread won't interfere with user program. Double note: if a
422 // user does something along the lines of 'kill -11 pid', that can kill the
423 // process even if user setup own handler for SEGV.
424 // Thing to watch out for: this code should not change behavior of user code
425 // in any observable way. In particular it should not override user signal
427 internal_sigfillset(&blocked_sigset
);
428 for (uptr i
= 0; i
< ARRAY_SIZE(kSyncSignals
); i
++)
429 internal_sigdelset(&blocked_sigset
, kSyncSignals
[i
]);
430 int rv
= internal_sigprocmask(SIG_BLOCK
, &blocked_sigset
, &old_sigset
);
432 uptr tracer_pid
= internal_clone(
433 TracerThread
, tracer_stack
.Bottom(),
434 CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_UNTRACED
,
435 &tracer_thread_argument
, nullptr /* parent_tidptr */,
436 nullptr /* newtls */, nullptr /* child_tidptr */);
437 internal_sigprocmask(SIG_SETMASK
, &old_sigset
, 0);
439 if (internal_iserror(tracer_pid
, &local_errno
)) {
440 VReport(1, "Failed spawning a tracer thread (errno %d).\n", local_errno
);
441 tracer_thread_argument
.mutex
.Unlock();
443 ScopedSetTracerPID
scoped_set_tracer_pid(tracer_pid
);
444 // On some systems we have to explicitly declare that we want to be traced
445 // by the tracer thread.
446 internal_prctl(PR_SET_PTRACER
, tracer_pid
, 0, 0, 0);
447 // Allow the tracer thread to start.
448 tracer_thread_argument
.mutex
.Unlock();
449 // NOTE: errno is shared between this thread and the tracer thread.
450 // internal_waitpid() may call syscall() which can access/spoil errno,
451 // so we can't call it now. Instead we for the tracer thread to finish using
452 // the spin loop below. Man page for sched_yield() says "In the Linux
453 // implementation, sched_yield() always succeeds", so let's hope it does not
454 // spoil errno. Note that this spin loop runs only for brief periods before
455 // the tracer thread has suspended us and when it starts unblocking threads.
456 while (atomic_load(&tracer_thread_argument
.done
, memory_order_relaxed
) == 0)
458 // Now the tracer thread is about to exit and does not touch errno,
461 uptr waitpid_status
= internal_waitpid(tracer_pid
, nullptr, __WALL
);
462 if (!internal_iserror(waitpid_status
, &local_errno
))
464 if (local_errno
== EINTR
)
466 VReport(1, "Waiting on the tracer thread failed (errno %d).\n",
473 // Platform-specific methods from SuspendedThreadsList.
474 #if SANITIZER_ANDROID && defined(__arm__)
475 typedef pt_regs regs_struct
;
476 #define REG_SP ARM_sp
478 #elif SANITIZER_LINUX && defined(__arm__)
479 typedef user_regs regs_struct
;
480 #define REG_SP uregs[13]
482 #elif defined(__i386__) || defined(__x86_64__)
483 typedef user_regs_struct regs_struct
;
484 #if defined(__i386__)
489 #define ARCH_IOVEC_FOR_GETREGSET
490 // Support ptrace extensions even when compiled without required kernel support
491 #ifndef NT_X86_XSTATE
492 #define NT_X86_XSTATE 0x202
494 #ifndef PTRACE_GETREGSET
495 #define PTRACE_GETREGSET 0x4204
497 // Compiler may use FP registers to store pointers.
498 static constexpr uptr kExtraRegs
[] = {NT_X86_XSTATE
, NT_FPREGSET
};
500 #elif defined(__powerpc__) || defined(__powerpc64__)
501 typedef pt_regs regs_struct
;
502 #define REG_SP gpr[PT_R1]
504 #elif defined(__mips__)
505 typedef struct user regs_struct
;
506 # if SANITIZER_ANDROID
507 # define REG_SP regs[EF_R29]
509 # define REG_SP regs[EF_REG29]
512 #elif defined(__aarch64__)
513 typedef struct user_pt_regs regs_struct
;
515 static constexpr uptr kExtraRegs
[] = {0};
516 #define ARCH_IOVEC_FOR_GETREGSET
518 #elif defined(__loongarch__)
519 typedef struct user_pt_regs regs_struct
;
520 #define REG_SP regs[3]
521 static constexpr uptr kExtraRegs
[] = {0};
522 #define ARCH_IOVEC_FOR_GETREGSET
524 #elif SANITIZER_RISCV64
525 typedef struct user_regs_struct regs_struct
;
526 // sys/ucontext.h already defines REG_SP as 2. Undefine it first.
529 static constexpr uptr kExtraRegs
[] = {0};
530 #define ARCH_IOVEC_FOR_GETREGSET
532 #elif defined(__s390__)
533 typedef _user_regs_struct regs_struct
;
534 #define REG_SP gprs[15]
535 static constexpr uptr kExtraRegs
[] = {0};
536 #define ARCH_IOVEC_FOR_GETREGSET
539 #error "Unsupported architecture"
540 #endif // SANITIZER_ANDROID && defined(__arm__)
542 tid_t
SuspendedThreadsListLinux::GetThreadID(uptr index
) const {
543 CHECK_LT(index
, thread_ids_
.size());
544 return thread_ids_
[index
];
547 uptr
SuspendedThreadsListLinux::ThreadCount() const {
548 return thread_ids_
.size();
551 bool SuspendedThreadsListLinux::ContainsTid(tid_t thread_id
) const {
552 for (uptr i
= 0; i
< thread_ids_
.size(); i
++) {
553 if (thread_ids_
[i
] == thread_id
) return true;
558 void SuspendedThreadsListLinux::Append(tid_t tid
) {
559 thread_ids_
.push_back(tid
);
562 PtraceRegistersStatus
SuspendedThreadsListLinux::GetRegistersAndSP(
563 uptr index
, InternalMmapVector
<uptr
> *buffer
, uptr
*sp
) const {
564 pid_t tid
= GetThreadID(index
);
565 constexpr uptr uptr_sz
= sizeof(uptr
);
567 #ifdef ARCH_IOVEC_FOR_GETREGSET
568 auto AppendF
= [&](uptr regset
) {
569 uptr size
= buffer
->size();
570 // NT_X86_XSTATE requires 64bit alignment.
571 uptr size_up
= RoundUpTo(size
, 8 / uptr_sz
);
572 buffer
->reserve(Max
<uptr
>(1024, size_up
));
573 struct iovec regset_io
;
574 for (;; buffer
->resize(buffer
->capacity() * 2)) {
575 buffer
->resize(buffer
->capacity());
576 uptr available_bytes
= (buffer
->size() - size_up
) * uptr_sz
;
577 regset_io
.iov_base
= buffer
->data() + size_up
;
578 regset_io
.iov_len
= available_bytes
;
580 internal_iserror(internal_ptrace(PTRACE_GETREGSET
, tid
,
581 (void *)regset
, (void *)®set_io
),
584 VReport(1, "Could not get regset %p from thread %d (errno %d).\n",
585 (void *)regset
, tid
, pterrno
);
586 buffer
->resize(size
);
590 // Far enough from the buffer size, no need to resize and repeat.
591 if (regset_io
.iov_len
+ 64 < available_bytes
)
594 buffer
->resize(size_up
+ RoundUpTo(regset_io
.iov_len
, uptr_sz
) / uptr_sz
);
599 bool fail
= !AppendF(NT_PRSTATUS
);
601 // Accept the first available and do not report errors.
602 for (uptr regs
: kExtraRegs
)
603 if (regs
&& AppendF(regs
))
607 buffer
->resize(RoundUpTo(sizeof(regs_struct
), uptr_sz
) / uptr_sz
);
608 bool fail
= internal_iserror(
609 internal_ptrace(PTRACE_GETREGS
, tid
, nullptr, buffer
->data()), &pterrno
);
611 VReport(1, "Could not get registers from thread %d (errno %d).\n", tid
,
615 // ESRCH means that the given thread is not suspended or already dead.
616 // Therefore it's unsafe to inspect its data (e.g. walk through stack) and
617 // we should notify caller about this.
618 return pterrno
== ESRCH
? REGISTERS_UNAVAILABLE_FATAL
619 : REGISTERS_UNAVAILABLE
;
622 *sp
= reinterpret_cast<regs_struct
*>(buffer
->data())[0].REG_SP
;
623 return REGISTERS_AVAILABLE
;
626 } // namespace __sanitizer
628 #endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__)
629 // || defined(__aarch64__) || defined(__powerpc64__)
630 // || defined(__s390__) || defined(__i386__) || defined(__arm__)
631 // || SANITIZER_LOONGARCH64